diff options
author | David S. Miller <davem@davemloft.net> | 2008-10-11 15:39:35 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-10-11 15:39:35 -0400 |
commit | 56c5d900dbb8e042bfad035d18433476931d8f93 (patch) | |
tree | 00b793965beeef10db03e0ff021d2d965c410759 /drivers/scsi | |
parent | 4dd95b63ae25c5cad6986829b5e8788e9faa0330 (diff) | |
parent | ead9d23d803ea3a73766c3cb27bf7563ac8d7266 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
sound/core/memalloc.c
Diffstat (limited to 'drivers/scsi')
51 files changed, 1442 insertions, 1015 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 4e0322b1c1ea..d3b211af4e1c 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -1325,14 +1325,6 @@ config SCSI_QLOGIC_FAS | |||
1325 | To compile this driver as a module, choose M here: the | 1325 | To compile this driver as a module, choose M here: the |
1326 | module will be called qlogicfas. | 1326 | module will be called qlogicfas. |
1327 | 1327 | ||
1328 | config SCSI_QLOGIC_FC_FIRMWARE | ||
1329 | bool "Include loadable firmware in driver" | ||
1330 | depends on SCSI_QLOGIC_FC | ||
1331 | help | ||
1332 | Say Y to include ISP2X00 Fabric Initiator/Target Firmware, with | ||
1333 | expanded LUN addressing and FcTape (FCP-2) support, in the | ||
1334 | qlogicfc driver. This is required on some platforms. | ||
1335 | |||
1336 | config SCSI_QLOGIC_1280 | 1328 | config SCSI_QLOGIC_1280 |
1337 | tristate "Qlogic QLA 1240/1x80/1x160 SCSI support" | 1329 | tristate "Qlogic QLA 1240/1x80/1x160 SCSI support" |
1338 | depends on PCI && SCSI | 1330 | depends on PCI && SCSI |
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index aa4e77c25273..8abfd06b5a72 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c | |||
@@ -1139,7 +1139,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd | |||
1139 | srbcmd->id = cpu_to_le32(scmd_id(cmd)); | 1139 | srbcmd->id = cpu_to_le32(scmd_id(cmd)); |
1140 | srbcmd->lun = cpu_to_le32(cmd->device->lun); | 1140 | srbcmd->lun = cpu_to_le32(cmd->device->lun); |
1141 | srbcmd->flags = cpu_to_le32(flag); | 1141 | srbcmd->flags = cpu_to_le32(flag); |
1142 | timeout = cmd->timeout_per_command/HZ; | 1142 | timeout = cmd->request->timeout/HZ; |
1143 | if (timeout == 0) | 1143 | if (timeout == 0) |
1144 | timeout = 1; | 1144 | timeout = 1; |
1145 | srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds | 1145 | srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds |
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c index ef693e8412e9..8f45570a8a01 100644 --- a/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/drivers/scsi/device_handler/scsi_dh_emc.c | |||
@@ -84,7 +84,7 @@ struct clariion_dh_data { | |||
84 | /* | 84 | /* |
85 | * I/O buffer for both MODE_SELECT and INQUIRY commands. | 85 | * I/O buffer for both MODE_SELECT and INQUIRY commands. |
86 | */ | 86 | */ |
87 | char buffer[CLARIION_BUFFER_SIZE]; | 87 | unsigned char buffer[CLARIION_BUFFER_SIZE]; |
88 | /* | 88 | /* |
89 | * SCSI sense buffer for commands -- assumes serial issuance | 89 | * SCSI sense buffer for commands -- assumes serial issuance |
90 | * and completion sequence of all commands for same multipath. | 90 | * and completion sequence of all commands for same multipath. |
@@ -176,7 +176,7 @@ static int parse_sp_info_reply(struct scsi_device *sdev, | |||
176 | err = SCSI_DH_DEV_TEMP_BUSY; | 176 | err = SCSI_DH_DEV_TEMP_BUSY; |
177 | goto out; | 177 | goto out; |
178 | } | 178 | } |
179 | if (csdev->buffer[4] < 0 || csdev->buffer[4] > 2) { | 179 | if (csdev->buffer[4] > 2) { |
180 | /* Invalid buffer format */ | 180 | /* Invalid buffer format */ |
181 | sdev_printk(KERN_NOTICE, sdev, | 181 | sdev_printk(KERN_NOTICE, sdev, |
182 | "%s: invalid VPD page 0xC0 format\n", | 182 | "%s: invalid VPD page 0xC0 format\n", |
@@ -278,7 +278,6 @@ static struct request *get_req(struct scsi_device *sdev, int cmd, | |||
278 | return NULL; | 278 | return NULL; |
279 | } | 279 | } |
280 | 280 | ||
281 | memset(rq->cmd, 0, BLK_MAX_CDB); | ||
282 | rq->cmd_len = COMMAND_SIZE(cmd); | 281 | rq->cmd_len = COMMAND_SIZE(cmd); |
283 | rq->cmd[0] = cmd; | 282 | rq->cmd[0] = cmd; |
284 | 283 | ||
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index a6a4ef3ad51c..5e93c88ad66b 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c | |||
@@ -114,7 +114,6 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h) | |||
114 | req->cmd_type = REQ_TYPE_BLOCK_PC; | 114 | req->cmd_type = REQ_TYPE_BLOCK_PC; |
115 | req->cmd_flags |= REQ_FAILFAST; | 115 | req->cmd_flags |= REQ_FAILFAST; |
116 | req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY); | 116 | req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY); |
117 | memset(req->cmd, 0, MAX_COMMAND_SIZE); | ||
118 | req->cmd[0] = TEST_UNIT_READY; | 117 | req->cmd[0] = TEST_UNIT_READY; |
119 | req->timeout = HP_SW_TIMEOUT; | 118 | req->timeout = HP_SW_TIMEOUT; |
120 | req->sense = h->sense; | 119 | req->sense = h->sense; |
@@ -207,7 +206,6 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h) | |||
207 | req->cmd_type = REQ_TYPE_BLOCK_PC; | 206 | req->cmd_type = REQ_TYPE_BLOCK_PC; |
208 | req->cmd_flags |= REQ_FAILFAST; | 207 | req->cmd_flags |= REQ_FAILFAST; |
209 | req->cmd_len = COMMAND_SIZE(START_STOP); | 208 | req->cmd_len = COMMAND_SIZE(START_STOP); |
210 | memset(req->cmd, 0, MAX_COMMAND_SIZE); | ||
211 | req->cmd[0] = START_STOP; | 209 | req->cmd[0] = START_STOP; |
212 | req->cmd[4] = 1; /* Start spin cycle */ | 210 | req->cmd[4] = 1; /* Start spin cycle */ |
213 | req->timeout = HP_SW_TIMEOUT; | 211 | req->timeout = HP_SW_TIMEOUT; |
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 6e2f130d56de..50bf95f3b5c4 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c | |||
@@ -225,8 +225,6 @@ static struct request *get_rdac_req(struct scsi_device *sdev, | |||
225 | return NULL; | 225 | return NULL; |
226 | } | 226 | } |
227 | 227 | ||
228 | memset(rq->cmd, 0, BLK_MAX_CDB); | ||
229 | |||
230 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | 228 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
231 | rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; | 229 | rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; |
232 | rq->retries = RDAC_RETRIES; | 230 | rq->retries = RDAC_RETRIES; |
@@ -590,6 +588,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = { | |||
590 | {"STK", "OPENstorage D280"}, | 588 | {"STK", "OPENstorage D280"}, |
591 | {"SUN", "CSM200_R"}, | 589 | {"SUN", "CSM200_R"}, |
592 | {"SUN", "LCSM100_F"}, | 590 | {"SUN", "LCSM100_F"}, |
591 | {"DELL", "MD3000"}, | ||
592 | {"DELL", "MD3000i"}, | ||
593 | {NULL, NULL}, | 593 | {NULL, NULL}, |
594 | }; | 594 | }; |
595 | 595 | ||
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index 822d5214692b..c387c15a2128 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c | |||
@@ -464,7 +464,6 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd, | |||
464 | 464 | ||
465 | /* use request field to save the ptr. to completion struct. */ | 465 | /* use request field to save the ptr. to completion struct. */ |
466 | scp->request = (struct request *)&wait; | 466 | scp->request = (struct request *)&wait; |
467 | scp->timeout_per_command = timeout*HZ; | ||
468 | scp->cmd_len = 12; | 467 | scp->cmd_len = 12; |
469 | scp->cmnd = cmnd; | 468 | scp->cmnd = cmnd; |
470 | cmndinfo.priority = IOCTL_PRI; | 469 | cmndinfo.priority = IOCTL_PRI; |
@@ -1995,23 +1994,12 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority) | |||
1995 | register Scsi_Cmnd *pscp; | 1994 | register Scsi_Cmnd *pscp; |
1996 | register Scsi_Cmnd *nscp; | 1995 | register Scsi_Cmnd *nscp; |
1997 | ulong flags; | 1996 | ulong flags; |
1998 | unchar b, t; | ||
1999 | 1997 | ||
2000 | TRACE(("gdth_putq() priority %d\n",priority)); | 1998 | TRACE(("gdth_putq() priority %d\n",priority)); |
2001 | spin_lock_irqsave(&ha->smp_lock, flags); | 1999 | spin_lock_irqsave(&ha->smp_lock, flags); |
2002 | 2000 | ||
2003 | if (!cmndinfo->internal_command) { | 2001 | if (!cmndinfo->internal_command) |
2004 | cmndinfo->priority = priority; | 2002 | cmndinfo->priority = priority; |
2005 | b = scp->device->channel; | ||
2006 | t = scp->device->id; | ||
2007 | if (priority >= DEFAULT_PRI) { | ||
2008 | if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) || | ||
2009 | (b==ha->virt_bus && t<MAX_HDRIVES && ha->hdr[t].lock)) { | ||
2010 | TRACE2(("gdth_putq(): locked IO ->update_timeout()\n")); | ||
2011 | cmndinfo->timeout = gdth_update_timeout(scp, 0); | ||
2012 | } | ||
2013 | } | ||
2014 | } | ||
2015 | 2003 | ||
2016 | if (ha->req_first==NULL) { | 2004 | if (ha->req_first==NULL) { |
2017 | ha->req_first = scp; /* queue was empty */ | 2005 | ha->req_first = scp; /* queue was empty */ |
@@ -3899,6 +3887,39 @@ static const char *gdth_info(struct Scsi_Host *shp) | |||
3899 | return ((const char *)ha->binfo.type_string); | 3887 | return ((const char *)ha->binfo.type_string); |
3900 | } | 3888 | } |
3901 | 3889 | ||
3890 | static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp) | ||
3891 | { | ||
3892 | gdth_ha_str *ha = shost_priv(scp->device->host); | ||
3893 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); | ||
3894 | unchar b, t; | ||
3895 | ulong flags; | ||
3896 | enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED; | ||
3897 | |||
3898 | TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__)); | ||
3899 | b = scp->device->channel; | ||
3900 | t = scp->device->id; | ||
3901 | |||
3902 | /* | ||
3903 | * We don't really honor the command timeout, but we try to | ||
3904 | * honor 6 times of the actual command timeout! So reset the | ||
3905 | * timer if this is less than 6th timeout on this command! | ||
3906 | */ | ||
3907 | if (++cmndinfo->timeout_count < 6) | ||
3908 | retval = BLK_EH_RESET_TIMER; | ||
3909 | |||
3910 | /* Reset the timeout if it is locked IO */ | ||
3911 | spin_lock_irqsave(&ha->smp_lock, flags); | ||
3912 | if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha, b)].lock) || | ||
3913 | (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) { | ||
3914 | TRACE2(("%s(): locked IO, reset timeout\n", __func__)); | ||
3915 | retval = BLK_EH_RESET_TIMER; | ||
3916 | } | ||
3917 | spin_unlock_irqrestore(&ha->smp_lock, flags); | ||
3918 | |||
3919 | return retval; | ||
3920 | } | ||
3921 | |||
3922 | |||
3902 | static int gdth_eh_bus_reset(Scsi_Cmnd *scp) | 3923 | static int gdth_eh_bus_reset(Scsi_Cmnd *scp) |
3903 | { | 3924 | { |
3904 | gdth_ha_str *ha = shost_priv(scp->device->host); | 3925 | gdth_ha_str *ha = shost_priv(scp->device->host); |
@@ -3992,7 +4013,7 @@ static int gdth_queuecommand(struct scsi_cmnd *scp, | |||
3992 | BUG_ON(!cmndinfo); | 4013 | BUG_ON(!cmndinfo); |
3993 | 4014 | ||
3994 | scp->scsi_done = done; | 4015 | scp->scsi_done = done; |
3995 | gdth_update_timeout(scp, scp->timeout_per_command * 6); | 4016 | cmndinfo->timeout_count = 0; |
3996 | cmndinfo->priority = DEFAULT_PRI; | 4017 | cmndinfo->priority = DEFAULT_PRI; |
3997 | 4018 | ||
3998 | return __gdth_queuecommand(ha, scp, cmndinfo); | 4019 | return __gdth_queuecommand(ha, scp, cmndinfo); |
@@ -4096,12 +4117,10 @@ static int ioc_lockdrv(void __user *arg) | |||
4096 | ha->hdr[j].lock = 1; | 4117 | ha->hdr[j].lock = 1; |
4097 | spin_unlock_irqrestore(&ha->smp_lock, flags); | 4118 | spin_unlock_irqrestore(&ha->smp_lock, flags); |
4098 | gdth_wait_completion(ha, ha->bus_cnt, j); | 4119 | gdth_wait_completion(ha, ha->bus_cnt, j); |
4099 | gdth_stop_timeout(ha, ha->bus_cnt, j); | ||
4100 | } else { | 4120 | } else { |
4101 | spin_lock_irqsave(&ha->smp_lock, flags); | 4121 | spin_lock_irqsave(&ha->smp_lock, flags); |
4102 | ha->hdr[j].lock = 0; | 4122 | ha->hdr[j].lock = 0; |
4103 | spin_unlock_irqrestore(&ha->smp_lock, flags); | 4123 | spin_unlock_irqrestore(&ha->smp_lock, flags); |
4104 | gdth_start_timeout(ha, ha->bus_cnt, j); | ||
4105 | gdth_next(ha); | 4124 | gdth_next(ha); |
4106 | } | 4125 | } |
4107 | } | 4126 | } |
@@ -4539,18 +4558,14 @@ static int gdth_ioctl(struct inode *inode, struct file *filep, | |||
4539 | spin_lock_irqsave(&ha->smp_lock, flags); | 4558 | spin_lock_irqsave(&ha->smp_lock, flags); |
4540 | ha->raw[i].lock = 1; | 4559 | ha->raw[i].lock = 1; |
4541 | spin_unlock_irqrestore(&ha->smp_lock, flags); | 4560 | spin_unlock_irqrestore(&ha->smp_lock, flags); |
4542 | for (j = 0; j < ha->tid_cnt; ++j) { | 4561 | for (j = 0; j < ha->tid_cnt; ++j) |
4543 | gdth_wait_completion(ha, i, j); | 4562 | gdth_wait_completion(ha, i, j); |
4544 | gdth_stop_timeout(ha, i, j); | ||
4545 | } | ||
4546 | } else { | 4563 | } else { |
4547 | spin_lock_irqsave(&ha->smp_lock, flags); | 4564 | spin_lock_irqsave(&ha->smp_lock, flags); |
4548 | ha->raw[i].lock = 0; | 4565 | ha->raw[i].lock = 0; |
4549 | spin_unlock_irqrestore(&ha->smp_lock, flags); | 4566 | spin_unlock_irqrestore(&ha->smp_lock, flags); |
4550 | for (j = 0; j < ha->tid_cnt; ++j) { | 4567 | for (j = 0; j < ha->tid_cnt; ++j) |
4551 | gdth_start_timeout(ha, i, j); | ||
4552 | gdth_next(ha); | 4568 | gdth_next(ha); |
4553 | } | ||
4554 | } | 4569 | } |
4555 | } | 4570 | } |
4556 | break; | 4571 | break; |
@@ -4644,6 +4659,7 @@ static struct scsi_host_template gdth_template = { | |||
4644 | .slave_configure = gdth_slave_configure, | 4659 | .slave_configure = gdth_slave_configure, |
4645 | .bios_param = gdth_bios_param, | 4660 | .bios_param = gdth_bios_param, |
4646 | .proc_info = gdth_proc_info, | 4661 | .proc_info = gdth_proc_info, |
4662 | .eh_timed_out = gdth_timed_out, | ||
4647 | .proc_name = "gdth", | 4663 | .proc_name = "gdth", |
4648 | .can_queue = GDTH_MAXCMDS, | 4664 | .can_queue = GDTH_MAXCMDS, |
4649 | .this_id = -1, | 4665 | .this_id = -1, |
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h index ca92476727cf..1646444e9bd5 100644 --- a/drivers/scsi/gdth.h +++ b/drivers/scsi/gdth.h | |||
@@ -916,7 +916,7 @@ typedef struct { | |||
916 | gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/ | 916 | gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/ |
917 | dma_addr_t sense_paddr; /* sense dma-addr */ | 917 | dma_addr_t sense_paddr; /* sense dma-addr */ |
918 | unchar priority; | 918 | unchar priority; |
919 | int timeout; | 919 | int timeout_count; /* # of timeout calls */ |
920 | volatile int wait_for_completion; | 920 | volatile int wait_for_completion; |
921 | ushort status; | 921 | ushort status; |
922 | ulong32 info; | 922 | ulong32 info; |
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c index ce0228e26aec..59349a316e13 100644 --- a/drivers/scsi/gdth_proc.c +++ b/drivers/scsi/gdth_proc.c | |||
@@ -748,69 +748,3 @@ static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id) | |||
748 | } | 748 | } |
749 | spin_unlock_irqrestore(&ha->smp_lock, flags); | 749 | spin_unlock_irqrestore(&ha->smp_lock, flags); |
750 | } | 750 | } |
751 | |||
752 | static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id) | ||
753 | { | ||
754 | ulong flags; | ||
755 | Scsi_Cmnd *scp; | ||
756 | unchar b, t; | ||
757 | |||
758 | spin_lock_irqsave(&ha->smp_lock, flags); | ||
759 | |||
760 | for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) { | ||
761 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); | ||
762 | if (!cmndinfo->internal_command) { | ||
763 | b = scp->device->channel; | ||
764 | t = scp->device->id; | ||
765 | if (t == (unchar)id && b == (unchar)busnum) { | ||
766 | TRACE2(("gdth_stop_timeout(): update_timeout()\n")); | ||
767 | cmndinfo->timeout = gdth_update_timeout(scp, 0); | ||
768 | } | ||
769 | } | ||
770 | } | ||
771 | spin_unlock_irqrestore(&ha->smp_lock, flags); | ||
772 | } | ||
773 | |||
774 | static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id) | ||
775 | { | ||
776 | ulong flags; | ||
777 | Scsi_Cmnd *scp; | ||
778 | unchar b, t; | ||
779 | |||
780 | spin_lock_irqsave(&ha->smp_lock, flags); | ||
781 | |||
782 | for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) { | ||
783 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); | ||
784 | if (!cmndinfo->internal_command) { | ||
785 | b = scp->device->channel; | ||
786 | t = scp->device->id; | ||
787 | if (t == (unchar)id && b == (unchar)busnum) { | ||
788 | TRACE2(("gdth_start_timeout(): update_timeout()\n")); | ||
789 | gdth_update_timeout(scp, cmndinfo->timeout); | ||
790 | } | ||
791 | } | ||
792 | } | ||
793 | spin_unlock_irqrestore(&ha->smp_lock, flags); | ||
794 | } | ||
795 | |||
796 | static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout) | ||
797 | { | ||
798 | int oldto; | ||
799 | |||
800 | oldto = scp->timeout_per_command; | ||
801 | scp->timeout_per_command = timeout; | ||
802 | |||
803 | if (timeout == 0) { | ||
804 | del_timer(&scp->eh_timeout); | ||
805 | scp->eh_timeout.data = (unsigned long) NULL; | ||
806 | scp->eh_timeout.expires = 0; | ||
807 | } else { | ||
808 | if (scp->eh_timeout.data != (unsigned long) NULL) | ||
809 | del_timer(&scp->eh_timeout); | ||
810 | scp->eh_timeout.data = (unsigned long) scp; | ||
811 | scp->eh_timeout.expires = jiffies + timeout; | ||
812 | add_timer(&scp->eh_timeout); | ||
813 | } | ||
814 | |||
815 | return oldto; | ||
816 | } | ||
diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h index 45e6fdacf36e..9b900cc9ebe8 100644 --- a/drivers/scsi/gdth_proc.h +++ b/drivers/scsi/gdth_proc.h | |||
@@ -20,9 +20,6 @@ static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch, | |||
20 | ulong64 *paddr); | 20 | ulong64 *paddr); |
21 | static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr); | 21 | static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr); |
22 | static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id); | 22 | static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id); |
23 | static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id); | ||
24 | static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id); | ||
25 | static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout); | ||
26 | 23 | ||
27 | #endif | 24 | #endif |
28 | 25 | ||
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index fed0b02ebc1d..3fdbb13e80a8 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c | |||
@@ -464,7 +464,7 @@ static int __scsi_host_match(struct device *dev, void *data) | |||
464 | struct Scsi_Host *scsi_host_lookup(unsigned short hostnum) | 464 | struct Scsi_Host *scsi_host_lookup(unsigned short hostnum) |
465 | { | 465 | { |
466 | struct device *cdev; | 466 | struct device *cdev; |
467 | struct Scsi_Host *shost = ERR_PTR(-ENXIO); | 467 | struct Scsi_Host *shost = NULL; |
468 | 468 | ||
469 | cdev = class_find_device(&shost_class, NULL, &hostnum, | 469 | cdev = class_find_device(&shost_class, NULL, &hostnum, |
470 | __scsi_host_match); | 470 | __scsi_host_match); |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 7b1502c0ab6e..87e09f35d3d4 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -756,7 +756,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, | |||
756 | init_event_struct(evt_struct, | 756 | init_event_struct(evt_struct, |
757 | handle_cmd_rsp, | 757 | handle_cmd_rsp, |
758 | VIOSRP_SRP_FORMAT, | 758 | VIOSRP_SRP_FORMAT, |
759 | cmnd->timeout_per_command/HZ); | 759 | cmnd->request->timeout/HZ); |
760 | 760 | ||
761 | evt_struct->cmnd = cmnd; | 761 | evt_struct->cmnd = cmnd; |
762 | evt_struct->cmnd_done = done; | 762 | evt_struct->cmnd_done = done; |
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c index 461331d3dc45..81c16cba5417 100644 --- a/drivers/scsi/ide-scsi.c +++ b/drivers/scsi/ide-scsi.c | |||
@@ -612,7 +612,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd, | |||
612 | pc->req_xfer = pc->buf_size = scsi_bufflen(cmd); | 612 | pc->req_xfer = pc->buf_size = scsi_bufflen(cmd); |
613 | pc->scsi_cmd = cmd; | 613 | pc->scsi_cmd = cmd; |
614 | pc->done = done; | 614 | pc->done = done; |
615 | pc->timeout = jiffies + cmd->timeout_per_command; | 615 | pc->timeout = jiffies + cmd->request->timeout; |
616 | 616 | ||
617 | if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) { | 617 | if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) { |
618 | printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number); | 618 | printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number); |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index e7a3a6554425..d30eb7ba018e 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -3670,7 +3670,8 @@ static int ipr_slave_configure(struct scsi_device *sdev) | |||
3670 | sdev->no_uld_attach = 1; | 3670 | sdev->no_uld_attach = 1; |
3671 | } | 3671 | } |
3672 | if (ipr_is_vset_device(res)) { | 3672 | if (ipr_is_vset_device(res)) { |
3673 | sdev->timeout = IPR_VSET_RW_TIMEOUT; | 3673 | blk_queue_rq_timeout(sdev->request_queue, |
3674 | IPR_VSET_RW_TIMEOUT); | ||
3674 | blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); | 3675 | blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); |
3675 | } | 3676 | } |
3676 | if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)) | 3677 | if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)) |
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index bc9e6ddf41df..ef683f0d2b5a 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c | |||
@@ -3818,7 +3818,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb) | |||
3818 | scb->cmd.dcdb.segment_4G = 0; | 3818 | scb->cmd.dcdb.segment_4G = 0; |
3819 | scb->cmd.dcdb.enhanced_sg = 0; | 3819 | scb->cmd.dcdb.enhanced_sg = 0; |
3820 | 3820 | ||
3821 | TimeOut = scb->scsi_cmd->timeout_per_command; | 3821 | TimeOut = scb->scsi_cmd->request->timeout; |
3822 | 3822 | ||
3823 | if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */ | 3823 | if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */ |
3824 | if (!scb->sg_len) { | 3824 | if (!scb->sg_len) { |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 299e075a7b34..da7b67d30d9a 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -1456,7 +1456,7 @@ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun, | |||
1456 | if (lun == task->sc->device->lun || lun == -1) { | 1456 | if (lun == task->sc->device->lun || lun == -1) { |
1457 | debug_scsi("failing in progress sc %p itt 0x%x\n", | 1457 | debug_scsi("failing in progress sc %p itt 0x%x\n", |
1458 | task->sc, task->itt); | 1458 | task->sc, task->itt); |
1459 | fail_command(conn, task, DID_BUS_BUSY << 16); | 1459 | fail_command(conn, task, error << 16); |
1460 | } | 1460 | } |
1461 | } | 1461 | } |
1462 | } | 1462 | } |
@@ -1476,12 +1476,12 @@ static void iscsi_start_tx(struct iscsi_conn *conn) | |||
1476 | scsi_queue_work(conn->session->host, &conn->xmitwork); | 1476 | scsi_queue_work(conn->session->host, &conn->xmitwork); |
1477 | } | 1477 | } |
1478 | 1478 | ||
1479 | static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) | 1479 | static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) |
1480 | { | 1480 | { |
1481 | struct iscsi_cls_session *cls_session; | 1481 | struct iscsi_cls_session *cls_session; |
1482 | struct iscsi_session *session; | 1482 | struct iscsi_session *session; |
1483 | struct iscsi_conn *conn; | 1483 | struct iscsi_conn *conn; |
1484 | enum scsi_eh_timer_return rc = EH_NOT_HANDLED; | 1484 | enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED; |
1485 | 1485 | ||
1486 | cls_session = starget_to_session(scsi_target(scmd->device)); | 1486 | cls_session = starget_to_session(scsi_target(scmd->device)); |
1487 | session = cls_session->dd_data; | 1487 | session = cls_session->dd_data; |
@@ -1494,14 +1494,14 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) | |||
1494 | * We are probably in the middle of iscsi recovery so let | 1494 | * We are probably in the middle of iscsi recovery so let |
1495 | * that complete and handle the error. | 1495 | * that complete and handle the error. |
1496 | */ | 1496 | */ |
1497 | rc = EH_RESET_TIMER; | 1497 | rc = BLK_EH_RESET_TIMER; |
1498 | goto done; | 1498 | goto done; |
1499 | } | 1499 | } |
1500 | 1500 | ||
1501 | conn = session->leadconn; | 1501 | conn = session->leadconn; |
1502 | if (!conn) { | 1502 | if (!conn) { |
1503 | /* In the middle of shuting down */ | 1503 | /* In the middle of shuting down */ |
1504 | rc = EH_RESET_TIMER; | 1504 | rc = BLK_EH_RESET_TIMER; |
1505 | goto done; | 1505 | goto done; |
1506 | } | 1506 | } |
1507 | 1507 | ||
@@ -1513,20 +1513,21 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) | |||
1513 | */ | 1513 | */ |
1514 | if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + | 1514 | if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + |
1515 | (conn->ping_timeout * HZ), jiffies)) | 1515 | (conn->ping_timeout * HZ), jiffies)) |
1516 | rc = EH_RESET_TIMER; | 1516 | rc = BLK_EH_RESET_TIMER; |
1517 | /* | 1517 | /* |
1518 | * if we are about to check the transport then give the command | 1518 | * if we are about to check the transport then give the command |
1519 | * more time | 1519 | * more time |
1520 | */ | 1520 | */ |
1521 | if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ), | 1521 | if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ), |
1522 | jiffies)) | 1522 | jiffies)) |
1523 | rc = EH_RESET_TIMER; | 1523 | rc = BLK_EH_RESET_TIMER; |
1524 | /* if in the middle of checking the transport then give us more time */ | 1524 | /* if in the middle of checking the transport then give us more time */ |
1525 | if (conn->ping_task) | 1525 | if (conn->ping_task) |
1526 | rc = EH_RESET_TIMER; | 1526 | rc = BLK_EH_RESET_TIMER; |
1527 | done: | 1527 | done: |
1528 | spin_unlock(&session->lock); | 1528 | spin_unlock(&session->lock); |
1529 | debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh"); | 1529 | debug_scsi("return %s\n", rc == BLK_EH_RESET_TIMER ? |
1530 | "timer reset" : "nh"); | ||
1530 | return rc; | 1531 | return rc; |
1531 | } | 1532 | } |
1532 | 1533 | ||
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 48ee8c7f5bdd..e15501170698 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c | |||
@@ -294,10 +294,10 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc) | |||
294 | } | 294 | } |
295 | } | 295 | } |
296 | 296 | ||
297 | static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in, | 297 | static int sas_ata_scr_write(struct ata_link *link, unsigned int sc_reg_in, |
298 | u32 val) | 298 | u32 val) |
299 | { | 299 | { |
300 | struct domain_device *dev = ap->private_data; | 300 | struct domain_device *dev = link->ap->private_data; |
301 | 301 | ||
302 | SAS_DPRINTK("STUB %s\n", __func__); | 302 | SAS_DPRINTK("STUB %s\n", __func__); |
303 | switch (sc_reg_in) { | 303 | switch (sc_reg_in) { |
@@ -319,10 +319,10 @@ static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in, | |||
319 | return 0; | 319 | return 0; |
320 | } | 320 | } |
321 | 321 | ||
322 | static int sas_ata_scr_read(struct ata_port *ap, unsigned int sc_reg_in, | 322 | static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in, |
323 | u32 *val) | 323 | u32 *val) |
324 | { | 324 | { |
325 | struct domain_device *dev = ap->private_data; | 325 | struct domain_device *dev = link->ap->private_data; |
326 | 326 | ||
327 | SAS_DPRINTK("STUB %s\n", __func__); | 327 | SAS_DPRINTK("STUB %s\n", __func__); |
328 | switch (sc_reg_in) { | 328 | switch (sc_reg_in) { |
@@ -398,7 +398,7 @@ void sas_ata_task_abort(struct sas_task *task) | |||
398 | 398 | ||
399 | /* Bounce SCSI-initiated commands to the SCSI EH */ | 399 | /* Bounce SCSI-initiated commands to the SCSI EH */ |
400 | if (qc->scsicmd) { | 400 | if (qc->scsicmd) { |
401 | scsi_req_abort_cmd(qc->scsicmd); | 401 | blk_abort_request(qc->scsicmd->request); |
402 | scsi_schedule_eh(qc->scsicmd->device->host); | 402 | scsi_schedule_eh(qc->scsicmd->device->host); |
403 | return; | 403 | return; |
404 | } | 404 | } |
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index b4f9368f116a..0001374bd6b2 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h | |||
@@ -55,7 +55,7 @@ void sas_unregister_phys(struct sas_ha_struct *sas_ha); | |||
55 | int sas_register_ports(struct sas_ha_struct *sas_ha); | 55 | int sas_register_ports(struct sas_ha_struct *sas_ha); |
56 | void sas_unregister_ports(struct sas_ha_struct *sas_ha); | 56 | void sas_unregister_ports(struct sas_ha_struct *sas_ha); |
57 | 57 | ||
58 | enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *); | 58 | enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *); |
59 | 59 | ||
60 | int sas_init_queue(struct sas_ha_struct *sas_ha); | 60 | int sas_init_queue(struct sas_ha_struct *sas_ha); |
61 | int sas_init_events(struct sas_ha_struct *sas_ha); | 61 | int sas_init_events(struct sas_ha_struct *sas_ha); |
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index a8e3ef309070..744838780ada 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c | |||
@@ -673,43 +673,43 @@ out: | |||
673 | return; | 673 | return; |
674 | } | 674 | } |
675 | 675 | ||
676 | enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) | 676 | enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) |
677 | { | 677 | { |
678 | struct sas_task *task = TO_SAS_TASK(cmd); | 678 | struct sas_task *task = TO_SAS_TASK(cmd); |
679 | unsigned long flags; | 679 | unsigned long flags; |
680 | 680 | ||
681 | if (!task) { | 681 | if (!task) { |
682 | cmd->timeout_per_command /= 2; | 682 | cmd->request->timeout /= 2; |
683 | SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n", | 683 | SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n", |
684 | cmd, task, (cmd->timeout_per_command ? | 684 | cmd, task, (cmd->request->timeout ? |
685 | "EH_RESET_TIMER" : "EH_NOT_HANDLED")); | 685 | "BLK_EH_RESET_TIMER" : "BLK_EH_NOT_HANDLED")); |
686 | if (!cmd->timeout_per_command) | 686 | if (!cmd->request->timeout) |
687 | return EH_NOT_HANDLED; | 687 | return BLK_EH_NOT_HANDLED; |
688 | return EH_RESET_TIMER; | 688 | return BLK_EH_RESET_TIMER; |
689 | } | 689 | } |
690 | 690 | ||
691 | spin_lock_irqsave(&task->task_state_lock, flags); | 691 | spin_lock_irqsave(&task->task_state_lock, flags); |
692 | BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED); | 692 | BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED); |
693 | if (task->task_state_flags & SAS_TASK_STATE_DONE) { | 693 | if (task->task_state_flags & SAS_TASK_STATE_DONE) { |
694 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 694 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
695 | SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n", | 695 | SAS_DPRINTK("command 0x%p, task 0x%p, timed out: " |
696 | cmd, task); | 696 | "BLK_EH_HANDLED\n", cmd, task); |
697 | return EH_HANDLED; | 697 | return BLK_EH_HANDLED; |
698 | } | 698 | } |
699 | if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) { | 699 | if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) { |
700 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 700 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
701 | SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: " | 701 | SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: " |
702 | "EH_RESET_TIMER\n", | 702 | "BLK_EH_RESET_TIMER\n", |
703 | cmd, task); | 703 | cmd, task); |
704 | return EH_RESET_TIMER; | 704 | return BLK_EH_RESET_TIMER; |
705 | } | 705 | } |
706 | task->task_state_flags |= SAS_TASK_STATE_ABORTED; | 706 | task->task_state_flags |= SAS_TASK_STATE_ABORTED; |
707 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 707 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
708 | 708 | ||
709 | SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_NOT_HANDLED\n", | 709 | SAS_DPRINTK("command 0x%p, task 0x%p, timed out: BLK_EH_NOT_HANDLED\n", |
710 | cmd, task); | 710 | cmd, task); |
711 | 711 | ||
712 | return EH_NOT_HANDLED; | 712 | return BLK_EH_NOT_HANDLED; |
713 | } | 713 | } |
714 | 714 | ||
715 | int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) | 715 | int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) |
@@ -1039,7 +1039,7 @@ void sas_task_abort(struct sas_task *task) | |||
1039 | return; | 1039 | return; |
1040 | } | 1040 | } |
1041 | 1041 | ||
1042 | scsi_req_abort_cmd(sc); | 1042 | blk_abort_request(sc->request); |
1043 | scsi_schedule_eh(sc->device->host); | 1043 | scsi_schedule_eh(sc->device->host); |
1044 | } | 1044 | } |
1045 | 1045 | ||
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index 97b763378e7d..afe1de998763 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c | |||
@@ -1167,7 +1167,7 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd) | |||
1167 | * cmd has not been completed within the timeout period. | 1167 | * cmd has not been completed within the timeout period. |
1168 | */ | 1168 | */ |
1169 | static enum | 1169 | static enum |
1170 | scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) | 1170 | blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) |
1171 | { | 1171 | { |
1172 | struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr; | 1172 | struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr; |
1173 | struct megasas_instance *instance; | 1173 | struct megasas_instance *instance; |
@@ -1175,7 +1175,7 @@ scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) | |||
1175 | 1175 | ||
1176 | if (time_after(jiffies, scmd->jiffies_at_alloc + | 1176 | if (time_after(jiffies, scmd->jiffies_at_alloc + |
1177 | (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) { | 1177 | (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) { |
1178 | return EH_NOT_HANDLED; | 1178 | return BLK_EH_NOT_HANDLED; |
1179 | } | 1179 | } |
1180 | 1180 | ||
1181 | instance = cmd->instance; | 1181 | instance = cmd->instance; |
@@ -1189,7 +1189,7 @@ scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) | |||
1189 | 1189 | ||
1190 | spin_unlock_irqrestore(instance->host->host_lock, flags); | 1190 | spin_unlock_irqrestore(instance->host->host_lock, flags); |
1191 | } | 1191 | } |
1192 | return EH_RESET_TIMER; | 1192 | return BLK_EH_RESET_TIMER; |
1193 | } | 1193 | } |
1194 | 1194 | ||
1195 | /** | 1195 | /** |
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c index c57c94c0ffd2..3b7240e40819 100644 --- a/drivers/scsi/ncr53c8xx.c +++ b/drivers/scsi/ncr53c8xx.c | |||
@@ -4170,8 +4170,8 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd) | |||
4170 | ** | 4170 | ** |
4171 | **---------------------------------------------------- | 4171 | **---------------------------------------------------- |
4172 | */ | 4172 | */ |
4173 | if (np->settle_time && cmd->timeout_per_command >= HZ) { | 4173 | if (np->settle_time && cmd->request->timeout >= HZ) { |
4174 | u_long tlimit = jiffies + cmd->timeout_per_command - HZ; | 4174 | u_long tlimit = jiffies + cmd->request->timeout - HZ; |
4175 | if (time_after(np->settle_time, tlimit)) | 4175 | if (time_after(np->settle_time, tlimit)) |
4176 | np->settle_time = tlimit; | 4176 | np->settle_time = tlimit; |
4177 | } | 4177 | } |
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 37f9ba0cd798..b6cd12b2e996 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c | |||
@@ -2845,7 +2845,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) | |||
2845 | memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); | 2845 | memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); |
2846 | 2846 | ||
2847 | /* Set ISP command timeout. */ | 2847 | /* Set ISP command timeout. */ |
2848 | pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ); | 2848 | pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ); |
2849 | 2849 | ||
2850 | /* Set device target ID and LUN */ | 2850 | /* Set device target ID and LUN */ |
2851 | pkt->lun = SCSI_LUN_32(cmd); | 2851 | pkt->lun = SCSI_LUN_32(cmd); |
@@ -3114,7 +3114,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) | |||
3114 | memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); | 3114 | memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); |
3115 | 3115 | ||
3116 | /* Set ISP command timeout. */ | 3116 | /* Set ISP command timeout. */ |
3117 | pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ); | 3117 | pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ); |
3118 | 3118 | ||
3119 | /* Set device target ID and LUN */ | 3119 | /* Set device target ID and LUN */ |
3120 | pkt->lun = SCSI_LUN_32(cmd); | 3120 | pkt->lun = SCSI_LUN_32(cmd); |
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 45e7dcb4b34d..0ddfe7106b3b 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -292,10 +292,11 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj, | |||
292 | valid = 0; | 292 | valid = 0; |
293 | if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) | 293 | if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) |
294 | valid = 1; | 294 | valid = 1; |
295 | else if (start == (FA_BOOT_CODE_ADDR*4) || | 295 | else if (start == (ha->flt_region_boot * 4) || |
296 | start == (FA_RISC_CODE_ADDR*4)) | 296 | start == (ha->flt_region_fw * 4)) |
297 | valid = 1; | 297 | valid = 1; |
298 | else if (IS_QLA25XX(ha) && start == (FA_VPD_NVRAM_ADDR*4)) | 298 | else if (IS_QLA25XX(ha) && |
299 | start == (ha->flt_region_vpd_nvram * 4)) | ||
299 | valid = 1; | 300 | valid = 1; |
300 | if (!valid) { | 301 | if (!valid) { |
301 | qla_printk(KERN_WARNING, ha, | 302 | qla_printk(KERN_WARNING, ha, |
@@ -1065,6 +1066,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) | |||
1065 | pfc_host_stat->dumped_frames = stats->dumped_frames; | 1066 | pfc_host_stat->dumped_frames = stats->dumped_frames; |
1066 | pfc_host_stat->nos_count = stats->nos_rcvd; | 1067 | pfc_host_stat->nos_count = stats->nos_rcvd; |
1067 | } | 1068 | } |
1069 | pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20; | ||
1070 | pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20; | ||
1068 | 1071 | ||
1069 | done_free: | 1072 | done_free: |
1070 | dma_pool_free(ha->s_dma_pool, stats, stats_dma); | 1073 | dma_pool_free(ha->s_dma_pool, stats, stats_dma); |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 94a720eabfd8..83c819216771 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/firmware.h> | 25 | #include <linux/firmware.h> |
26 | #include <linux/aer.h> | 26 | #include <linux/aer.h> |
27 | #include <linux/mutex.h> | 27 | #include <linux/mutex.h> |
28 | #include <linux/semaphore.h> | ||
29 | 28 | ||
30 | #include <scsi/scsi.h> | 29 | #include <scsi/scsi.h> |
31 | #include <scsi/scsi_host.h> | 30 | #include <scsi/scsi_host.h> |
@@ -2157,6 +2156,8 @@ struct qla_chip_state_84xx { | |||
2157 | 2156 | ||
2158 | struct qla_statistics { | 2157 | struct qla_statistics { |
2159 | uint32_t total_isp_aborts; | 2158 | uint32_t total_isp_aborts; |
2159 | uint64_t input_bytes; | ||
2160 | uint64_t output_bytes; | ||
2160 | }; | 2161 | }; |
2161 | 2162 | ||
2162 | /* | 2163 | /* |
@@ -2238,6 +2239,7 @@ typedef struct scsi_qla_host { | |||
2238 | #define FCPORT_UPDATE_NEEDED 27 | 2239 | #define FCPORT_UPDATE_NEEDED 27 |
2239 | #define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */ | 2240 | #define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */ |
2240 | #define UNLOADING 29 | 2241 | #define UNLOADING 29 |
2242 | #define NPIV_CONFIG_NEEDED 30 | ||
2241 | 2243 | ||
2242 | uint32_t device_flags; | 2244 | uint32_t device_flags; |
2243 | #define DFLG_LOCAL_DEVICES BIT_0 | 2245 | #define DFLG_LOCAL_DEVICES BIT_0 |
@@ -2507,7 +2509,6 @@ typedef struct scsi_qla_host { | |||
2507 | uint64_t fce_wr, fce_rd; | 2509 | uint64_t fce_wr, fce_rd; |
2508 | struct mutex fce_mutex; | 2510 | struct mutex fce_mutex; |
2509 | 2511 | ||
2510 | uint32_t hw_event_start; | ||
2511 | uint32_t hw_event_ptr; | 2512 | uint32_t hw_event_ptr; |
2512 | uint32_t hw_event_pause_errors; | 2513 | uint32_t hw_event_pause_errors; |
2513 | 2514 | ||
@@ -2553,6 +2554,14 @@ typedef struct scsi_qla_host { | |||
2553 | uint32_t fdt_unprotect_sec_cmd; | 2554 | uint32_t fdt_unprotect_sec_cmd; |
2554 | uint32_t fdt_protect_sec_cmd; | 2555 | uint32_t fdt_protect_sec_cmd; |
2555 | 2556 | ||
2557 | uint32_t flt_region_flt; | ||
2558 | uint32_t flt_region_fdt; | ||
2559 | uint32_t flt_region_boot; | ||
2560 | uint32_t flt_region_fw; | ||
2561 | uint32_t flt_region_vpd_nvram; | ||
2562 | uint32_t flt_region_hw_event; | ||
2563 | uint32_t flt_region_npiv_conf; | ||
2564 | |||
2556 | /* Needed for BEACON */ | 2565 | /* Needed for BEACON */ |
2557 | uint16_t beacon_blink_led; | 2566 | uint16_t beacon_blink_led; |
2558 | uint8_t beacon_color_state; | 2567 | uint8_t beacon_color_state; |
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index cf194517400d..d1d14202575a 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h | |||
@@ -789,14 +789,23 @@ struct device_reg_24xx { | |||
789 | #define FA_RISC_CODE_ADDR 0x20000 | 789 | #define FA_RISC_CODE_ADDR 0x20000 |
790 | #define FA_RISC_CODE_SEGMENTS 2 | 790 | #define FA_RISC_CODE_SEGMENTS 2 |
791 | 791 | ||
792 | #define FA_FLASH_DESCR_ADDR_24 0x11000 | ||
793 | #define FA_FLASH_LAYOUT_ADDR_24 0x11400 | ||
794 | #define FA_NPIV_CONF0_ADDR_24 0x16000 | ||
795 | #define FA_NPIV_CONF1_ADDR_24 0x17000 | ||
796 | |||
792 | #define FA_FW_AREA_ADDR 0x40000 | 797 | #define FA_FW_AREA_ADDR 0x40000 |
793 | #define FA_VPD_NVRAM_ADDR 0x48000 | 798 | #define FA_VPD_NVRAM_ADDR 0x48000 |
794 | #define FA_FEATURE_ADDR 0x4C000 | 799 | #define FA_FEATURE_ADDR 0x4C000 |
795 | #define FA_FLASH_DESCR_ADDR 0x50000 | 800 | #define FA_FLASH_DESCR_ADDR 0x50000 |
801 | #define FA_FLASH_LAYOUT_ADDR 0x50400 | ||
796 | #define FA_HW_EVENT0_ADDR 0x54000 | 802 | #define FA_HW_EVENT0_ADDR 0x54000 |
797 | #define FA_HW_EVENT1_ADDR 0x54200 | 803 | #define FA_HW_EVENT1_ADDR 0x54400 |
798 | #define FA_HW_EVENT_SIZE 0x200 | 804 | #define FA_HW_EVENT_SIZE 0x200 |
799 | #define FA_HW_EVENT_ENTRY_SIZE 4 | 805 | #define FA_HW_EVENT_ENTRY_SIZE 4 |
806 | #define FA_NPIV_CONF0_ADDR 0x5C000 | ||
807 | #define FA_NPIV_CONF1_ADDR 0x5D000 | ||
808 | |||
800 | /* | 809 | /* |
801 | * Flash Error Log Event Codes. | 810 | * Flash Error Log Event Codes. |
802 | */ | 811 | */ |
@@ -806,10 +815,6 @@ struct device_reg_24xx { | |||
806 | #define HW_EVENT_NVRAM_CHKSUM_ERR 0xF023 | 815 | #define HW_EVENT_NVRAM_CHKSUM_ERR 0xF023 |
807 | #define HW_EVENT_FLASH_FW_ERR 0xF024 | 816 | #define HW_EVENT_FLASH_FW_ERR 0xF024 |
808 | 817 | ||
809 | #define FA_BOOT_LOG_ADDR 0x58000 | ||
810 | #define FA_FW_DUMP0_ADDR 0x60000 | ||
811 | #define FA_FW_DUMP1_ADDR 0x70000 | ||
812 | |||
813 | uint32_t flash_data; /* Flash/NVRAM BIOS data. */ | 818 | uint32_t flash_data; /* Flash/NVRAM BIOS data. */ |
814 | 819 | ||
815 | uint32_t ctrl_status; /* Control/Status. */ | 820 | uint32_t ctrl_status; /* Control/Status. */ |
@@ -1203,6 +1208,62 @@ struct qla_fdt_layout { | |||
1203 | uint8_t unused2[65]; | 1208 | uint8_t unused2[65]; |
1204 | }; | 1209 | }; |
1205 | 1210 | ||
1211 | /* Flash Layout Table ********************************************************/ | ||
1212 | |||
1213 | struct qla_flt_location { | ||
1214 | uint8_t sig[4]; | ||
1215 | uint32_t start_lo; | ||
1216 | uint32_t start_hi; | ||
1217 | uint16_t unused; | ||
1218 | uint16_t checksum; | ||
1219 | }; | ||
1220 | |||
1221 | struct qla_flt_header { | ||
1222 | uint16_t version; | ||
1223 | uint16_t length; | ||
1224 | uint16_t checksum; | ||
1225 | uint16_t unused; | ||
1226 | }; | ||
1227 | |||
1228 | #define FLT_REG_FW 0x01 | ||
1229 | #define FLT_REG_BOOT_CODE 0x07 | ||
1230 | #define FLT_REG_VPD_0 0x14 | ||
1231 | #define FLT_REG_NVRAM_0 0x15 | ||
1232 | #define FLT_REG_VPD_1 0x16 | ||
1233 | #define FLT_REG_NVRAM_1 0x17 | ||
1234 | #define FLT_REG_FDT 0x1a | ||
1235 | #define FLT_REG_FLT 0x1c | ||
1236 | #define FLT_REG_HW_EVENT_0 0x1d | ||
1237 | #define FLT_REG_HW_EVENT_1 0x1f | ||
1238 | #define FLT_REG_NPIV_CONF_0 0x29 | ||
1239 | #define FLT_REG_NPIV_CONF_1 0x2a | ||
1240 | |||
1241 | struct qla_flt_region { | ||
1242 | uint32_t code; | ||
1243 | uint32_t size; | ||
1244 | uint32_t start; | ||
1245 | uint32_t end; | ||
1246 | }; | ||
1247 | |||
1248 | /* Flash NPIV Configuration Table ********************************************/ | ||
1249 | |||
1250 | struct qla_npiv_header { | ||
1251 | uint8_t sig[2]; | ||
1252 | uint16_t version; | ||
1253 | uint16_t entries; | ||
1254 | uint16_t unused[4]; | ||
1255 | uint16_t checksum; | ||
1256 | }; | ||
1257 | |||
1258 | struct qla_npiv_entry { | ||
1259 | uint16_t flags; | ||
1260 | uint16_t vf_id; | ||
1261 | uint16_t qos; | ||
1262 | uint16_t unused1; | ||
1263 | uint8_t port_name[WWN_SIZE]; | ||
1264 | uint8_t node_name[WWN_SIZE]; | ||
1265 | }; | ||
1266 | |||
1206 | /* 84XX Support **************************************************************/ | 1267 | /* 84XX Support **************************************************************/ |
1207 | 1268 | ||
1208 | #define MBA_ISP84XX_ALERT 0x800f /* Alert Notification. */ | 1269 | #define MBA_ISP84XX_ALERT 0x800f /* Alert Notification. */ |
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 0b156735e9a6..753dbe6cce6e 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
@@ -313,9 +313,11 @@ extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *); | |||
313 | extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t, | 313 | extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t, |
314 | uint16_t, uint16_t); | 314 | uint16_t, uint16_t); |
315 | 315 | ||
316 | extern void qla2xxx_get_flash_info(scsi_qla_host_t *); | 316 | extern int qla2xxx_get_flash_info(scsi_qla_host_t *); |
317 | extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t); | 317 | extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t); |
318 | 318 | ||
319 | extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *); | ||
320 | |||
319 | /* | 321 | /* |
320 | * Global Function Prototypes in qla_dbg.c source file. | 322 | * Global Function Prototypes in qla_dbg.c source file. |
321 | */ | 323 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index ee89ddd64aae..a470f2d3270d 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -83,6 +83,13 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha) | |||
83 | 83 | ||
84 | ha->isp_ops->reset_chip(ha); | 84 | ha->isp_ops->reset_chip(ha); |
85 | 85 | ||
86 | rval = qla2xxx_get_flash_info(ha); | ||
87 | if (rval) { | ||
88 | DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n", | ||
89 | ha->host_no)); | ||
90 | return (rval); | ||
91 | } | ||
92 | |||
86 | ha->isp_ops->get_flash_version(ha, ha->request_ring); | 93 | ha->isp_ops->get_flash_version(ha, ha->request_ring); |
87 | 94 | ||
88 | qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); | 95 | qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); |
@@ -109,7 +116,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha) | |||
109 | rval = qla2x00_setup_chip(ha); | 116 | rval = qla2x00_setup_chip(ha); |
110 | if (rval) | 117 | if (rval) |
111 | return (rval); | 118 | return (rval); |
112 | qla2xxx_get_flash_info(ha); | ||
113 | } | 119 | } |
114 | if (IS_QLA84XX(ha)) { | 120 | if (IS_QLA84XX(ha)) { |
115 | ha->cs84xx = qla84xx_get_chip(ha); | 121 | ha->cs84xx = qla84xx_get_chip(ha); |
@@ -2016,7 +2022,7 @@ qla2x00_configure_loop(scsi_qla_host_t *ha) | |||
2016 | DEBUG3(printk("%s: exiting normally\n", __func__)); | 2022 | DEBUG3(printk("%s: exiting normally\n", __func__)); |
2017 | } | 2023 | } |
2018 | 2024 | ||
2019 | /* Restore state if a resync event occured during processing */ | 2025 | /* Restore state if a resync event occurred during processing */ |
2020 | if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { | 2026 | if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { |
2021 | if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) | 2027 | if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) |
2022 | set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); | 2028 | set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); |
@@ -2561,7 +2567,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports) | |||
2561 | rval = QLA_SUCCESS; | 2567 | rval = QLA_SUCCESS; |
2562 | 2568 | ||
2563 | /* Try GID_PT to get device list, else GAN. */ | 2569 | /* Try GID_PT to get device list, else GAN. */ |
2564 | swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_ATOMIC); | 2570 | swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL); |
2565 | if (!swl) { | 2571 | if (!swl) { |
2566 | /*EMPTY*/ | 2572 | /*EMPTY*/ |
2567 | DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback " | 2573 | DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback " |
@@ -3751,7 +3757,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr) | |||
3751 | rval = QLA_SUCCESS; | 3757 | rval = QLA_SUCCESS; |
3752 | 3758 | ||
3753 | segments = FA_RISC_CODE_SEGMENTS; | 3759 | segments = FA_RISC_CODE_SEGMENTS; |
3754 | faddr = FA_RISC_CODE_ADDR; | 3760 | faddr = ha->flt_region_fw; |
3755 | dcode = (uint32_t *)ha->request_ring; | 3761 | dcode = (uint32_t *)ha->request_ring; |
3756 | *srisc_addr = 0; | 3762 | *srisc_addr = 0; |
3757 | 3763 | ||
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 92fafbdbbaab..e90afad120ee 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h | |||
@@ -52,7 +52,7 @@ to_qla_parent(scsi_qla_host_t *ha) | |||
52 | * @ha: HA context | 52 | * @ha: HA context |
53 | * @ha_locked: is function called with the hardware lock | 53 | * @ha_locked: is function called with the hardware lock |
54 | * | 54 | * |
55 | * Returns non-zero if a failure occured, else zero. | 55 | * Returns non-zero if a failure occurred, else zero. |
56 | */ | 56 | */ |
57 | static inline int | 57 | static inline int |
58 | qla2x00_issue_marker(scsi_qla_host_t *ha, int ha_locked) | 58 | qla2x00_issue_marker(scsi_qla_host_t *ha, int ha_locked) |
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index d57669aa4615..85bc0a48598b 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c | |||
@@ -21,17 +21,22 @@ static void qla2x00_isp_cmd(scsi_qla_host_t *ha); | |||
21 | * Returns the proper CF_* direction based on CDB. | 21 | * Returns the proper CF_* direction based on CDB. |
22 | */ | 22 | */ |
23 | static inline uint16_t | 23 | static inline uint16_t |
24 | qla2x00_get_cmd_direction(struct scsi_cmnd *cmd) | 24 | qla2x00_get_cmd_direction(srb_t *sp) |
25 | { | 25 | { |
26 | uint16_t cflags; | 26 | uint16_t cflags; |
27 | 27 | ||
28 | cflags = 0; | 28 | cflags = 0; |
29 | 29 | ||
30 | /* Set transfer direction */ | 30 | /* Set transfer direction */ |
31 | if (cmd->sc_data_direction == DMA_TO_DEVICE) | 31 | if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) { |
32 | cflags = CF_WRITE; | 32 | cflags = CF_WRITE; |
33 | else if (cmd->sc_data_direction == DMA_FROM_DEVICE) | 33 | sp->fcport->ha->qla_stats.output_bytes += |
34 | scsi_bufflen(sp->cmd); | ||
35 | } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) { | ||
34 | cflags = CF_READ; | 36 | cflags = CF_READ; |
37 | sp->fcport->ha->qla_stats.input_bytes += | ||
38 | scsi_bufflen(sp->cmd); | ||
39 | } | ||
35 | return (cflags); | 40 | return (cflags); |
36 | } | 41 | } |
37 | 42 | ||
@@ -169,7 +174,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, | |||
169 | 174 | ||
170 | ha = sp->ha; | 175 | ha = sp->ha; |
171 | 176 | ||
172 | cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd)); | 177 | cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); |
173 | 178 | ||
174 | /* Three DSDs are available in the Command Type 2 IOCB */ | 179 | /* Three DSDs are available in the Command Type 2 IOCB */ |
175 | avail_dsds = 3; | 180 | avail_dsds = 3; |
@@ -228,7 +233,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, | |||
228 | 233 | ||
229 | ha = sp->ha; | 234 | ha = sp->ha; |
230 | 235 | ||
231 | cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd)); | 236 | cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); |
232 | 237 | ||
233 | /* Two DSDs are available in the Command Type 3 IOCB */ | 238 | /* Two DSDs are available in the Command Type 3 IOCB */ |
234 | avail_dsds = 2; | 239 | avail_dsds = 2; |
@@ -262,7 +267,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, | |||
262 | * qla2x00_start_scsi() - Send a SCSI command to the ISP | 267 | * qla2x00_start_scsi() - Send a SCSI command to the ISP |
263 | * @sp: command to send to the ISP | 268 | * @sp: command to send to the ISP |
264 | * | 269 | * |
265 | * Returns non-zero if a failure occured, else zero. | 270 | * Returns non-zero if a failure occurred, else zero. |
266 | */ | 271 | */ |
267 | int | 272 | int |
268 | qla2x00_start_scsi(srb_t *sp) | 273 | qla2x00_start_scsi(srb_t *sp) |
@@ -407,7 +412,7 @@ queuing_error: | |||
407 | * | 412 | * |
408 | * Can be called from both normal and interrupt context. | 413 | * Can be called from both normal and interrupt context. |
409 | * | 414 | * |
410 | * Returns non-zero if a failure occured, else zero. | 415 | * Returns non-zero if a failure occurred, else zero. |
411 | */ | 416 | */ |
412 | int | 417 | int |
413 | __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, | 418 | __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, |
@@ -625,12 +630,17 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, | |||
625 | ha = sp->ha; | 630 | ha = sp->ha; |
626 | 631 | ||
627 | /* Set transfer direction */ | 632 | /* Set transfer direction */ |
628 | if (cmd->sc_data_direction == DMA_TO_DEVICE) | 633 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { |
629 | cmd_pkt->task_mgmt_flags = | 634 | cmd_pkt->task_mgmt_flags = |
630 | __constant_cpu_to_le16(TMF_WRITE_DATA); | 635 | __constant_cpu_to_le16(TMF_WRITE_DATA); |
631 | else if (cmd->sc_data_direction == DMA_FROM_DEVICE) | 636 | sp->fcport->ha->qla_stats.output_bytes += |
637 | scsi_bufflen(sp->cmd); | ||
638 | } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { | ||
632 | cmd_pkt->task_mgmt_flags = | 639 | cmd_pkt->task_mgmt_flags = |
633 | __constant_cpu_to_le16(TMF_READ_DATA); | 640 | __constant_cpu_to_le16(TMF_READ_DATA); |
641 | sp->fcport->ha->qla_stats.input_bytes += | ||
642 | scsi_bufflen(sp->cmd); | ||
643 | } | ||
634 | 644 | ||
635 | /* One DSD is available in the Command Type 3 IOCB */ | 645 | /* One DSD is available in the Command Type 3 IOCB */ |
636 | avail_dsds = 1; | 646 | avail_dsds = 1; |
@@ -666,7 +676,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, | |||
666 | * qla24xx_start_scsi() - Send a SCSI command to the ISP | 676 | * qla24xx_start_scsi() - Send a SCSI command to the ISP |
667 | * @sp: command to send to the ISP | 677 | * @sp: command to send to the ISP |
668 | * | 678 | * |
669 | * Returns non-zero if a failure occured, else zero. | 679 | * Returns non-zero if a failure occurred, else zero. |
670 | */ | 680 | */ |
671 | int | 681 | int |
672 | qla24xx_start_scsi(srb_t *sp) | 682 | qla24xx_start_scsi(srb_t *sp) |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 45a3b93eed57..fc4bfa7f839c 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -391,9 +391,9 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
391 | break; | 391 | break; |
392 | 392 | ||
393 | case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ | 393 | case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ |
394 | DEBUG2(printk("scsi(%ld): LIP occured (%x).\n", ha->host_no, | 394 | DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", ha->host_no, |
395 | mb[1])); | 395 | mb[1])); |
396 | qla_printk(KERN_INFO, ha, "LIP occured (%x).\n", mb[1]); | 396 | qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]); |
397 | 397 | ||
398 | if (atomic_read(&ha->loop_state) != LOOP_DOWN) { | 398 | if (atomic_read(&ha->loop_state) != LOOP_DOWN) { |
399 | atomic_set(&ha->loop_state, LOOP_DOWN); | 399 | atomic_set(&ha->loop_state, LOOP_DOWN); |
@@ -460,7 +460,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
460 | DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n", | 460 | DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n", |
461 | ha->host_no, mb[1])); | 461 | ha->host_no, mb[1])); |
462 | qla_printk(KERN_INFO, ha, | 462 | qla_printk(KERN_INFO, ha, |
463 | "LIP reset occured (%x).\n", mb[1]); | 463 | "LIP reset occurred (%x).\n", mb[1]); |
464 | 464 | ||
465 | if (atomic_read(&ha->loop_state) != LOOP_DOWN) { | 465 | if (atomic_read(&ha->loop_state) != LOOP_DOWN) { |
466 | atomic_set(&ha->loop_state, LOOP_DOWN); | 466 | atomic_set(&ha->loop_state, LOOP_DOWN); |
@@ -543,7 +543,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
543 | 543 | ||
544 | case MBA_PORT_UPDATE: /* Port database update */ | 544 | case MBA_PORT_UPDATE: /* Port database update */ |
545 | /* | 545 | /* |
546 | * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET | 546 | * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET |
547 | * event etc. earlier indicating loop is down) then process | 547 | * event etc. earlier indicating loop is down) then process |
548 | * it. Otherwise ignore it and Wait for RSCN to come in. | 548 | * it. Otherwise ignore it and Wait for RSCN to come in. |
549 | */ | 549 | */ |
@@ -589,7 +589,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
589 | "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n", | 589 | "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n", |
590 | ha->host_no, mb[1], mb[2], mb[3])); | 590 | ha->host_no, mb[1], mb[2], mb[3])); |
591 | 591 | ||
592 | rscn_entry = (mb[1] << 16) | mb[2]; | 592 | rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; |
593 | host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) | | 593 | host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) | |
594 | ha->d_id.b.al_pa; | 594 | ha->d_id.b.al_pa; |
595 | if (rscn_entry == host_pid) { | 595 | if (rscn_entry == host_pid) { |
@@ -600,6 +600,8 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) | |||
600 | break; | 600 | break; |
601 | } | 601 | } |
602 | 602 | ||
603 | /* Ignore reserved bits from RSCN-payload. */ | ||
604 | rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; | ||
603 | rscn_queue_index = ha->rscn_in_ptr + 1; | 605 | rscn_queue_index = ha->rscn_in_ptr + 1; |
604 | if (rscn_queue_index == MAX_RSCN_COUNT) | 606 | if (rscn_queue_index == MAX_RSCN_COUNT) |
605 | rscn_queue_index = 0; | 607 | rscn_queue_index = 0; |
@@ -1060,8 +1062,9 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
1060 | resid = resid_len; | 1062 | resid = resid_len; |
1061 | /* Use F/W calculated residual length. */ | 1063 | /* Use F/W calculated residual length. */ |
1062 | if (IS_FWI2_CAPABLE(ha)) { | 1064 | if (IS_FWI2_CAPABLE(ha)) { |
1063 | if (scsi_status & SS_RESIDUAL_UNDER && | 1065 | if (!(scsi_status & SS_RESIDUAL_UNDER)) { |
1064 | resid != fw_resid_len) { | 1066 | lscsi_status = 0; |
1067 | } else if (resid != fw_resid_len) { | ||
1065 | scsi_status &= ~SS_RESIDUAL_UNDER; | 1068 | scsi_status &= ~SS_RESIDUAL_UNDER; |
1066 | lscsi_status = 0; | 1069 | lscsi_status = 0; |
1067 | } | 1070 | } |
@@ -1834,7 +1837,6 @@ clear_risc_ints: | |||
1834 | WRT_REG_WORD(®->isp.hccr, HCCR_CLR_HOST_INT); | 1837 | WRT_REG_WORD(®->isp.hccr, HCCR_CLR_HOST_INT); |
1835 | } | 1838 | } |
1836 | spin_unlock_irq(&ha->hardware_lock); | 1839 | spin_unlock_irq(&ha->hardware_lock); |
1837 | ha->isp_ops->enable_intrs(ha); | ||
1838 | 1840 | ||
1839 | fail: | 1841 | fail: |
1840 | return ret; | 1842 | return ret; |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 813bc7784c0a..36bc6851e23d 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
@@ -233,7 +233,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp) | |||
233 | DEBUG2_3_11(printk("%s(%ld): timeout schedule " | 233 | DEBUG2_3_11(printk("%s(%ld): timeout schedule " |
234 | "isp_abort_needed.\n", __func__, ha->host_no)); | 234 | "isp_abort_needed.\n", __func__, ha->host_no)); |
235 | qla_printk(KERN_WARNING, ha, | 235 | qla_printk(KERN_WARNING, ha, |
236 | "Mailbox command timeout occured. Scheduling ISP " | 236 | "Mailbox command timeout occurred. Scheduling ISP " |
237 | "abort.\n"); | 237 | "abort.\n"); |
238 | set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); | 238 | set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); |
239 | qla2xxx_wake_dpc(ha); | 239 | qla2xxx_wake_dpc(ha); |
@@ -244,7 +244,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp) | |||
244 | DEBUG2_3_11(printk("%s(%ld): timeout calling " | 244 | DEBUG2_3_11(printk("%s(%ld): timeout calling " |
245 | "abort_isp\n", __func__, ha->host_no)); | 245 | "abort_isp\n", __func__, ha->host_no)); |
246 | qla_printk(KERN_WARNING, ha, | 246 | qla_printk(KERN_WARNING, ha, |
247 | "Mailbox command timeout occured. Issuing ISP " | 247 | "Mailbox command timeout occurred. Issuing ISP " |
248 | "abort.\n"); | 248 | "abort.\n"); |
249 | 249 | ||
250 | set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); | 250 | set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); |
@@ -1995,7 +1995,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map) | |||
1995 | char *pmap; | 1995 | char *pmap; |
1996 | dma_addr_t pmap_dma; | 1996 | dma_addr_t pmap_dma; |
1997 | 1997 | ||
1998 | pmap = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &pmap_dma); | 1998 | pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); |
1999 | if (pmap == NULL) { | 1999 | if (pmap == NULL) { |
2000 | DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****", | 2000 | DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****", |
2001 | __func__, ha->host_no)); | 2001 | __func__, ha->host_no)); |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 26afe44265c7..3433441b956a 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -1517,6 +1517,7 @@ qla2xxx_scan_start(struct Scsi_Host *shost) | |||
1517 | set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); | 1517 | set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); |
1518 | set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); | 1518 | set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); |
1519 | set_bit(RSCN_UPDATE, &ha->dpc_flags); | 1519 | set_bit(RSCN_UPDATE, &ha->dpc_flags); |
1520 | set_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags); | ||
1520 | } | 1521 | } |
1521 | 1522 | ||
1522 | static int | 1523 | static int |
@@ -1663,8 +1664,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1663 | ha->gid_list_info_size = 8; | 1664 | ha->gid_list_info_size = 8; |
1664 | ha->optrom_size = OPTROM_SIZE_25XX; | 1665 | ha->optrom_size = OPTROM_SIZE_25XX; |
1665 | ha->isp_ops = &qla25xx_isp_ops; | 1666 | ha->isp_ops = &qla25xx_isp_ops; |
1666 | ha->hw_event_start = PCI_FUNC(pdev->devfn) ? | ||
1667 | FA_HW_EVENT1_ADDR: FA_HW_EVENT0_ADDR; | ||
1668 | } | 1667 | } |
1669 | host->can_queue = ha->request_q_length + 128; | 1668 | host->can_queue = ha->request_q_length + 128; |
1670 | 1669 | ||
@@ -1740,6 +1739,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1740 | if (ret) | 1739 | if (ret) |
1741 | goto probe_failed; | 1740 | goto probe_failed; |
1742 | 1741 | ||
1742 | ha->isp_ops->enable_intrs(ha); | ||
1743 | |||
1743 | scsi_scan_host(host); | 1744 | scsi_scan_host(host); |
1744 | 1745 | ||
1745 | qla2x00_alloc_sysfs_attr(ha); | 1746 | qla2x00_alloc_sysfs_attr(ha); |
@@ -2431,6 +2432,12 @@ qla2x00_do_dpc(void *data) | |||
2431 | ha->host_no)); | 2432 | ha->host_no)); |
2432 | } | 2433 | } |
2433 | 2434 | ||
2435 | if (test_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags) && | ||
2436 | atomic_read(&ha->loop_state) == LOOP_READY) { | ||
2437 | clear_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags); | ||
2438 | qla2xxx_flash_npiv_conf(ha); | ||
2439 | } | ||
2440 | |||
2434 | if (!ha->interrupts_on) | 2441 | if (!ha->interrupts_on) |
2435 | ha->isp_ops->enable_intrs(ha); | 2442 | ha->isp_ops->enable_intrs(ha); |
2436 | 2443 | ||
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 1bca74474935..90a13211717f 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c | |||
@@ -543,23 +543,198 @@ qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id, | |||
543 | } | 543 | } |
544 | } | 544 | } |
545 | 545 | ||
546 | void | 546 | static int |
547 | qla2xxx_get_flash_info(scsi_qla_host_t *ha) | 547 | qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start) |
548 | { | ||
549 | const char *loc, *locations[] = { "DEF", "PCI" }; | ||
550 | uint32_t pcihdr, pcids; | ||
551 | uint32_t *dcode; | ||
552 | uint8_t *buf, *bcode, last_image; | ||
553 | uint16_t cnt, chksum, *wptr; | ||
554 | struct qla_flt_location *fltl; | ||
555 | |||
556 | /* | ||
557 | * FLT-location structure resides after the last PCI region. | ||
558 | */ | ||
559 | |||
560 | /* Begin with sane defaults. */ | ||
561 | loc = locations[0]; | ||
562 | *start = IS_QLA24XX_TYPE(ha) ? FA_FLASH_LAYOUT_ADDR_24: | ||
563 | FA_FLASH_LAYOUT_ADDR; | ||
564 | |||
565 | /* Begin with first PCI expansion ROM header. */ | ||
566 | buf = (uint8_t *)ha->request_ring; | ||
567 | dcode = (uint32_t *)ha->request_ring; | ||
568 | pcihdr = 0; | ||
569 | last_image = 1; | ||
570 | do { | ||
571 | /* Verify PCI expansion ROM header. */ | ||
572 | qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 0x20); | ||
573 | bcode = buf + (pcihdr % 4); | ||
574 | if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) | ||
575 | goto end; | ||
576 | |||
577 | /* Locate PCI data structure. */ | ||
578 | pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); | ||
579 | qla24xx_read_flash_data(ha, dcode, pcids >> 2, 0x20); | ||
580 | bcode = buf + (pcihdr % 4); | ||
581 | |||
582 | /* Validate signature of PCI data structure. */ | ||
583 | if (bcode[0x0] != 'P' || bcode[0x1] != 'C' || | ||
584 | bcode[0x2] != 'I' || bcode[0x3] != 'R') | ||
585 | goto end; | ||
586 | |||
587 | last_image = bcode[0x15] & BIT_7; | ||
588 | |||
589 | /* Locate next PCI expansion ROM. */ | ||
590 | pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512; | ||
591 | } while (!last_image); | ||
592 | |||
593 | /* Now verify FLT-location structure. */ | ||
594 | fltl = (struct qla_flt_location *)ha->request_ring; | ||
595 | qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, | ||
596 | sizeof(struct qla_flt_location) >> 2); | ||
597 | if (fltl->sig[0] != 'Q' || fltl->sig[1] != 'F' || | ||
598 | fltl->sig[2] != 'L' || fltl->sig[3] != 'T') | ||
599 | goto end; | ||
600 | |||
601 | wptr = (uint16_t *)ha->request_ring; | ||
602 | cnt = sizeof(struct qla_flt_location) >> 1; | ||
603 | for (chksum = 0; cnt; cnt--) | ||
604 | chksum += le16_to_cpu(*wptr++); | ||
605 | if (chksum) { | ||
606 | qla_printk(KERN_ERR, ha, | ||
607 | "Inconsistent FLTL detected: checksum=0x%x.\n", chksum); | ||
608 | qla2x00_dump_buffer(buf, sizeof(struct qla_flt_location)); | ||
609 | return QLA_FUNCTION_FAILED; | ||
610 | } | ||
611 | |||
612 | /* Good data. Use specified location. */ | ||
613 | loc = locations[1]; | ||
614 | *start = le16_to_cpu(fltl->start_hi) << 16 | | ||
615 | le16_to_cpu(fltl->start_lo); | ||
616 | end: | ||
617 | DEBUG2(qla_printk(KERN_DEBUG, ha, "FLTL[%s] = 0x%x.\n", loc, *start)); | ||
618 | return QLA_SUCCESS; | ||
619 | } | ||
620 | |||
621 | static void | ||
622 | qla2xxx_get_flt_info(scsi_qla_host_t *ha, uint32_t flt_addr) | ||
623 | { | ||
624 | const char *loc, *locations[] = { "DEF", "FLT" }; | ||
625 | uint16_t *wptr; | ||
626 | uint16_t cnt, chksum; | ||
627 | uint32_t start; | ||
628 | struct qla_flt_header *flt; | ||
629 | struct qla_flt_region *region; | ||
630 | |||
631 | ha->flt_region_flt = flt_addr; | ||
632 | wptr = (uint16_t *)ha->request_ring; | ||
633 | flt = (struct qla_flt_header *)ha->request_ring; | ||
634 | region = (struct qla_flt_region *)&flt[1]; | ||
635 | ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring, | ||
636 | flt_addr << 2, OPTROM_BURST_SIZE); | ||
637 | if (*wptr == __constant_cpu_to_le16(0xffff)) | ||
638 | goto no_flash_data; | ||
639 | if (flt->version != __constant_cpu_to_le16(1)) { | ||
640 | DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported FLT detected: " | ||
641 | "version=0x%x length=0x%x checksum=0x%x.\n", | ||
642 | le16_to_cpu(flt->version), le16_to_cpu(flt->length), | ||
643 | le16_to_cpu(flt->checksum))); | ||
644 | goto no_flash_data; | ||
645 | } | ||
646 | |||
647 | cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1; | ||
648 | for (chksum = 0; cnt; cnt--) | ||
649 | chksum += le16_to_cpu(*wptr++); | ||
650 | if (chksum) { | ||
651 | DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FLT detected: " | ||
652 | "version=0x%x length=0x%x checksum=0x%x.\n", | ||
653 | le16_to_cpu(flt->version), le16_to_cpu(flt->length), | ||
654 | chksum)); | ||
655 | goto no_flash_data; | ||
656 | } | ||
657 | |||
658 | loc = locations[1]; | ||
659 | cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region); | ||
660 | for ( ; cnt; cnt--, region++) { | ||
661 | /* Store addresses as DWORD offsets. */ | ||
662 | start = le32_to_cpu(region->start) >> 2; | ||
663 | |||
664 | DEBUG3(qla_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x " | ||
665 | "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start, | ||
666 | le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size))); | ||
667 | |||
668 | switch (le32_to_cpu(region->code)) { | ||
669 | case FLT_REG_FW: | ||
670 | ha->flt_region_fw = start; | ||
671 | break; | ||
672 | case FLT_REG_BOOT_CODE: | ||
673 | ha->flt_region_boot = start; | ||
674 | break; | ||
675 | case FLT_REG_VPD_0: | ||
676 | ha->flt_region_vpd_nvram = start; | ||
677 | break; | ||
678 | case FLT_REG_FDT: | ||
679 | ha->flt_region_fdt = start; | ||
680 | break; | ||
681 | case FLT_REG_HW_EVENT_0: | ||
682 | if (!PCI_FUNC(ha->pdev->devfn)) | ||
683 | ha->flt_region_hw_event = start; | ||
684 | break; | ||
685 | case FLT_REG_HW_EVENT_1: | ||
686 | if (PCI_FUNC(ha->pdev->devfn)) | ||
687 | ha->flt_region_hw_event = start; | ||
688 | break; | ||
689 | case FLT_REG_NPIV_CONF_0: | ||
690 | if (!PCI_FUNC(ha->pdev->devfn)) | ||
691 | ha->flt_region_npiv_conf = start; | ||
692 | break; | ||
693 | case FLT_REG_NPIV_CONF_1: | ||
694 | if (PCI_FUNC(ha->pdev->devfn)) | ||
695 | ha->flt_region_npiv_conf = start; | ||
696 | break; | ||
697 | } | ||
698 | } | ||
699 | goto done; | ||
700 | |||
701 | no_flash_data: | ||
702 | /* Use hardcoded defaults. */ | ||
703 | loc = locations[0]; | ||
704 | ha->flt_region_fw = FA_RISC_CODE_ADDR; | ||
705 | ha->flt_region_boot = FA_BOOT_CODE_ADDR; | ||
706 | ha->flt_region_vpd_nvram = FA_VPD_NVRAM_ADDR; | ||
707 | ha->flt_region_fdt = IS_QLA24XX_TYPE(ha) ? FA_FLASH_DESCR_ADDR_24: | ||
708 | FA_FLASH_DESCR_ADDR; | ||
709 | ha->flt_region_hw_event = !PCI_FUNC(ha->pdev->devfn) ? | ||
710 | FA_HW_EVENT0_ADDR: FA_HW_EVENT1_ADDR; | ||
711 | ha->flt_region_npiv_conf = !PCI_FUNC(ha->pdev->devfn) ? | ||
712 | (IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF0_ADDR_24: FA_NPIV_CONF0_ADDR): | ||
713 | (IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF1_ADDR_24: FA_NPIV_CONF1_ADDR); | ||
714 | done: | ||
715 | DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x " | ||
716 | "vpd_nvram=0x%x fdt=0x%x flt=0x%x hwe=0x%x npiv=0x%x.\n", loc, | ||
717 | ha->flt_region_boot, ha->flt_region_fw, ha->flt_region_vpd_nvram, | ||
718 | ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_hw_event, | ||
719 | ha->flt_region_npiv_conf)); | ||
720 | } | ||
721 | |||
722 | static void | ||
723 | qla2xxx_get_fdt_info(scsi_qla_host_t *ha) | ||
548 | { | 724 | { |
549 | #define FLASH_BLK_SIZE_32K 0x8000 | 725 | #define FLASH_BLK_SIZE_32K 0x8000 |
550 | #define FLASH_BLK_SIZE_64K 0x10000 | 726 | #define FLASH_BLK_SIZE_64K 0x10000 |
727 | const char *loc, *locations[] = { "MID", "FDT" }; | ||
551 | uint16_t cnt, chksum; | 728 | uint16_t cnt, chksum; |
552 | uint16_t *wptr; | 729 | uint16_t *wptr; |
553 | struct qla_fdt_layout *fdt; | 730 | struct qla_fdt_layout *fdt; |
554 | uint8_t man_id, flash_id; | 731 | uint8_t man_id, flash_id; |
555 | 732 | uint16_t mid, fid; | |
556 | if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) | ||
557 | return; | ||
558 | 733 | ||
559 | wptr = (uint16_t *)ha->request_ring; | 734 | wptr = (uint16_t *)ha->request_ring; |
560 | fdt = (struct qla_fdt_layout *)ha->request_ring; | 735 | fdt = (struct qla_fdt_layout *)ha->request_ring; |
561 | ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring, | 736 | ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring, |
562 | FA_FLASH_DESCR_ADDR << 2, OPTROM_BURST_SIZE); | 737 | ha->flt_region_fdt << 2, OPTROM_BURST_SIZE); |
563 | if (*wptr == __constant_cpu_to_le16(0xffff)) | 738 | if (*wptr == __constant_cpu_to_le16(0xffff)) |
564 | goto no_flash_data; | 739 | goto no_flash_data; |
565 | if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' || | 740 | if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' || |
@@ -577,7 +752,10 @@ qla2xxx_get_flash_info(scsi_qla_host_t *ha) | |||
577 | goto no_flash_data; | 752 | goto no_flash_data; |
578 | } | 753 | } |
579 | 754 | ||
580 | ha->fdt_odd_index = le16_to_cpu(fdt->man_id) == 0x1f; | 755 | loc = locations[1]; |
756 | mid = le16_to_cpu(fdt->man_id); | ||
757 | fid = le16_to_cpu(fdt->id); | ||
758 | ha->fdt_odd_index = mid == 0x1f; | ||
581 | ha->fdt_wrt_disable = fdt->wrt_disable_bits; | 759 | ha->fdt_wrt_disable = fdt->wrt_disable_bits; |
582 | ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd); | 760 | ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd); |
583 | ha->fdt_block_size = le32_to_cpu(fdt->block_size); | 761 | ha->fdt_block_size = le32_to_cpu(fdt->block_size); |
@@ -588,16 +766,12 @@ qla2xxx_get_flash_info(scsi_qla_host_t *ha) | |||
588 | flash_conf_to_access_addr(0x0300 | fdt->protect_sec_cmd): | 766 | flash_conf_to_access_addr(0x0300 | fdt->protect_sec_cmd): |
589 | flash_conf_to_access_addr(0x0336); | 767 | flash_conf_to_access_addr(0x0336); |
590 | } | 768 | } |
591 | 769 | goto done; | |
592 | DEBUG2(qla_printk(KERN_DEBUG, ha, "Flash[FDT]: (0x%x/0x%x) erase=0x%x " | ||
593 | "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n", | ||
594 | le16_to_cpu(fdt->man_id), le16_to_cpu(fdt->id), ha->fdt_erase_cmd, | ||
595 | ha->fdt_protect_sec_cmd, ha->fdt_unprotect_sec_cmd, | ||
596 | ha->fdt_odd_index, ha->fdt_wrt_disable, ha->fdt_block_size)); | ||
597 | return; | ||
598 | |||
599 | no_flash_data: | 770 | no_flash_data: |
771 | loc = locations[0]; | ||
600 | qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id); | 772 | qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id); |
773 | mid = man_id; | ||
774 | fid = flash_id; | ||
601 | ha->fdt_wrt_disable = 0x9c; | 775 | ha->fdt_wrt_disable = 0x9c; |
602 | ha->fdt_erase_cmd = flash_conf_to_access_addr(0x03d8); | 776 | ha->fdt_erase_cmd = flash_conf_to_access_addr(0x03d8); |
603 | switch (man_id) { | 777 | switch (man_id) { |
@@ -625,14 +799,117 @@ no_flash_data: | |||
625 | ha->fdt_block_size = FLASH_BLK_SIZE_64K; | 799 | ha->fdt_block_size = FLASH_BLK_SIZE_64K; |
626 | break; | 800 | break; |
627 | } | 801 | } |
628 | 802 | done: | |
629 | DEBUG2(qla_printk(KERN_DEBUG, ha, "Flash[MID]: (0x%x/0x%x) erase=0x%x " | 803 | DEBUG2(qla_printk(KERN_DEBUG, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x " |
630 | "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n", man_id, flash_id, | 804 | "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n", loc, mid, fid, |
631 | ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd, | 805 | ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd, |
632 | ha->fdt_unprotect_sec_cmd, ha->fdt_odd_index, ha->fdt_wrt_disable, | 806 | ha->fdt_unprotect_sec_cmd, ha->fdt_odd_index, ha->fdt_wrt_disable, |
633 | ha->fdt_block_size)); | 807 | ha->fdt_block_size)); |
634 | } | 808 | } |
635 | 809 | ||
810 | int | ||
811 | qla2xxx_get_flash_info(scsi_qla_host_t *ha) | ||
812 | { | ||
813 | int ret; | ||
814 | uint32_t flt_addr; | ||
815 | |||
816 | if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) | ||
817 | return QLA_SUCCESS; | ||
818 | |||
819 | ret = qla2xxx_find_flt_start(ha, &flt_addr); | ||
820 | if (ret != QLA_SUCCESS) | ||
821 | return ret; | ||
822 | |||
823 | qla2xxx_get_flt_info(ha, flt_addr); | ||
824 | qla2xxx_get_fdt_info(ha); | ||
825 | |||
826 | return QLA_SUCCESS; | ||
827 | } | ||
828 | |||
829 | void | ||
830 | qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha) | ||
831 | { | ||
832 | #define NPIV_CONFIG_SIZE (16*1024) | ||
833 | void *data; | ||
834 | uint16_t *wptr; | ||
835 | uint16_t cnt, chksum; | ||
836 | struct qla_npiv_header hdr; | ||
837 | struct qla_npiv_entry *entry; | ||
838 | |||
839 | if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) | ||
840 | return; | ||
841 | |||
842 | ha->isp_ops->read_optrom(ha, (uint8_t *)&hdr, | ||
843 | ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header)); | ||
844 | if (hdr.version == __constant_cpu_to_le16(0xffff)) | ||
845 | return; | ||
846 | if (hdr.version != __constant_cpu_to_le16(1)) { | ||
847 | DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported NPIV-Config " | ||
848 | "detected: version=0x%x entries=0x%x checksum=0x%x.\n", | ||
849 | le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries), | ||
850 | le16_to_cpu(hdr.checksum))); | ||
851 | return; | ||
852 | } | ||
853 | |||
854 | data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL); | ||
855 | if (!data) { | ||
856 | DEBUG2(qla_printk(KERN_INFO, ha, "NPIV-Config: Unable to " | ||
857 | "allocate memory.\n")); | ||
858 | return; | ||
859 | } | ||
860 | |||
861 | ha->isp_ops->read_optrom(ha, (uint8_t *)data, | ||
862 | ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE); | ||
863 | |||
864 | cnt = (sizeof(struct qla_npiv_header) + le16_to_cpu(hdr.entries) * | ||
865 | sizeof(struct qla_npiv_entry)) >> 1; | ||
866 | for (wptr = data, chksum = 0; cnt; cnt--) | ||
867 | chksum += le16_to_cpu(*wptr++); | ||
868 | if (chksum) { | ||
869 | DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent NPIV-Config " | ||
870 | "detected: version=0x%x entries=0x%x checksum=0x%x.\n", | ||
871 | le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries), | ||
872 | chksum)); | ||
873 | goto done; | ||
874 | } | ||
875 | |||
876 | entry = data + sizeof(struct qla_npiv_header); | ||
877 | cnt = le16_to_cpu(hdr.entries); | ||
878 | for ( ; cnt; cnt--, entry++) { | ||
879 | uint16_t flags; | ||
880 | struct fc_vport_identifiers vid; | ||
881 | struct fc_vport *vport; | ||
882 | |||
883 | flags = le16_to_cpu(entry->flags); | ||
884 | if (flags == 0xffff) | ||
885 | continue; | ||
886 | if ((flags & BIT_0) == 0) | ||
887 | continue; | ||
888 | |||
889 | memset(&vid, 0, sizeof(vid)); | ||
890 | vid.roles = FC_PORT_ROLE_FCP_INITIATOR; | ||
891 | vid.vport_type = FC_PORTTYPE_NPIV; | ||
892 | vid.disable = false; | ||
893 | vid.port_name = wwn_to_u64(entry->port_name); | ||
894 | vid.node_name = wwn_to_u64(entry->node_name); | ||
895 | |||
896 | DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx " | ||
897 | "wwnn=%llx vf_id=0x%x qos=0x%x.\n", cnt, | ||
898 | (unsigned long long)vid.port_name, | ||
899 | (unsigned long long)vid.node_name, | ||
900 | le16_to_cpu(entry->vf_id), le16_to_cpu(entry->qos))); | ||
901 | |||
902 | vport = fc_vport_create(ha->host, 0, &vid); | ||
903 | if (!vport) | ||
904 | qla_printk(KERN_INFO, ha, "NPIV-Config: Failed to " | ||
905 | "create vport [%02x]: wwpn=%llx wwnn=%llx.\n", cnt, | ||
906 | (unsigned long long)vid.port_name, | ||
907 | (unsigned long long)vid.node_name); | ||
908 | } | ||
909 | done: | ||
910 | kfree(data); | ||
911 | } | ||
912 | |||
636 | static void | 913 | static void |
637 | qla24xx_unprotect_flash(scsi_qla_host_t *ha) | 914 | qla24xx_unprotect_flash(scsi_qla_host_t *ha) |
638 | { | 915 | { |
@@ -920,7 +1197,8 @@ qla25xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, | |||
920 | dwptr = (uint32_t *)buf; | 1197 | dwptr = (uint32_t *)buf; |
921 | for (i = 0; i < bytes >> 2; i++, naddr++) | 1198 | for (i = 0; i < bytes >> 2; i++, naddr++) |
922 | dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, | 1199 | dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, |
923 | flash_data_to_access_addr(FA_VPD_NVRAM_ADDR | naddr))); | 1200 | flash_data_to_access_addr(ha->flt_region_vpd_nvram | |
1201 | naddr))); | ||
924 | 1202 | ||
925 | return buf; | 1203 | return buf; |
926 | } | 1204 | } |
@@ -935,10 +1213,10 @@ qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, | |||
935 | dbuf = vmalloc(RMW_BUFFER_SIZE); | 1213 | dbuf = vmalloc(RMW_BUFFER_SIZE); |
936 | if (!dbuf) | 1214 | if (!dbuf) |
937 | return QLA_MEMORY_ALLOC_FAILED; | 1215 | return QLA_MEMORY_ALLOC_FAILED; |
938 | ha->isp_ops->read_optrom(ha, dbuf, FA_VPD_NVRAM_ADDR << 2, | 1216 | ha->isp_ops->read_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2, |
939 | RMW_BUFFER_SIZE); | 1217 | RMW_BUFFER_SIZE); |
940 | memcpy(dbuf + (naddr << 2), buf, bytes); | 1218 | memcpy(dbuf + (naddr << 2), buf, bytes); |
941 | ha->isp_ops->write_optrom(ha, dbuf, FA_VPD_NVRAM_ADDR << 2, | 1219 | ha->isp_ops->write_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2, |
942 | RMW_BUFFER_SIZE); | 1220 | RMW_BUFFER_SIZE); |
943 | vfree(dbuf); | 1221 | vfree(dbuf); |
944 | 1222 | ||
@@ -2166,7 +2444,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf) | |||
2166 | memset(dbyte, 0, 8); | 2444 | memset(dbyte, 0, 8); |
2167 | dcode = (uint16_t *)dbyte; | 2445 | dcode = (uint16_t *)dbyte; |
2168 | 2446 | ||
2169 | qla2x00_read_flash_data(ha, dbyte, FA_RISC_CODE_ADDR * 4 + 10, | 2447 | qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10, |
2170 | 8); | 2448 | 8); |
2171 | DEBUG3(printk("%s(%ld): dumping fw ver from flash:\n", | 2449 | DEBUG3(printk("%s(%ld): dumping fw ver from flash:\n", |
2172 | __func__, ha->host_no)); | 2450 | __func__, ha->host_no)); |
@@ -2177,7 +2455,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf) | |||
2177 | (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && | 2455 | (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && |
2178 | dcode[3] == 0)) { | 2456 | dcode[3] == 0)) { |
2179 | DEBUG2(printk("%s(): Unrecognized fw revision at " | 2457 | DEBUG2(printk("%s(): Unrecognized fw revision at " |
2180 | "%x.\n", __func__, FA_RISC_CODE_ADDR * 4)); | 2458 | "%x.\n", __func__, ha->flt_region_fw * 4)); |
2181 | } else { | 2459 | } else { |
2182 | /* values are in big endian */ | 2460 | /* values are in big endian */ |
2183 | ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1]; | 2461 | ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1]; |
@@ -2212,7 +2490,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf) | |||
2212 | dcode = mbuf; | 2490 | dcode = mbuf; |
2213 | 2491 | ||
2214 | /* Begin with first PCI expansion ROM header. */ | 2492 | /* Begin with first PCI expansion ROM header. */ |
2215 | pcihdr = 0; | 2493 | pcihdr = ha->flt_region_boot; |
2216 | last_image = 1; | 2494 | last_image = 1; |
2217 | do { | 2495 | do { |
2218 | /* Verify PCI expansion ROM header. */ | 2496 | /* Verify PCI expansion ROM header. */ |
@@ -2282,7 +2560,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf) | |||
2282 | memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); | 2560 | memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); |
2283 | dcode = mbuf; | 2561 | dcode = mbuf; |
2284 | 2562 | ||
2285 | qla24xx_read_flash_data(ha, dcode, FA_RISC_CODE_ADDR + 4, 4); | 2563 | qla24xx_read_flash_data(ha, dcode, ha->flt_region_fw + 4, 4); |
2286 | for (i = 0; i < 4; i++) | 2564 | for (i = 0; i < 4; i++) |
2287 | dcode[i] = be32_to_cpu(dcode[i]); | 2565 | dcode[i] = be32_to_cpu(dcode[i]); |
2288 | 2566 | ||
@@ -2291,7 +2569,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf) | |||
2291 | (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && | 2569 | (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && |
2292 | dcode[3] == 0)) { | 2570 | dcode[3] == 0)) { |
2293 | DEBUG2(printk("%s(): Unrecognized fw version at %x.\n", | 2571 | DEBUG2(printk("%s(): Unrecognized fw version at %x.\n", |
2294 | __func__, FA_RISC_CODE_ADDR)); | 2572 | __func__, ha->flt_region_fw)); |
2295 | } else { | 2573 | } else { |
2296 | ha->fw_revision[0] = dcode[0]; | 2574 | ha->fw_revision[0] = dcode[0]; |
2297 | ha->fw_revision[1] = dcode[1]; | 2575 | ha->fw_revision[1] = dcode[1]; |
@@ -2355,7 +2633,7 @@ qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata) | |||
2355 | /* Locate first empty entry. */ | 2633 | /* Locate first empty entry. */ |
2356 | for (;;) { | 2634 | for (;;) { |
2357 | if (ha->hw_event_ptr >= | 2635 | if (ha->hw_event_ptr >= |
2358 | ha->hw_event_start + FA_HW_EVENT_SIZE) { | 2636 | ha->flt_region_hw_event + FA_HW_EVENT_SIZE) { |
2359 | DEBUG2(qla_printk(KERN_WARNING, ha, | 2637 | DEBUG2(qla_printk(KERN_WARNING, ha, |
2360 | "HW event -- Log Full!\n")); | 2638 | "HW event -- Log Full!\n")); |
2361 | return QLA_MEMORY_ALLOC_FAILED; | 2639 | return QLA_MEMORY_ALLOC_FAILED; |
@@ -2391,7 +2669,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1, | |||
2391 | int rval; | 2669 | int rval; |
2392 | uint32_t marker[2], fdata[4]; | 2670 | uint32_t marker[2], fdata[4]; |
2393 | 2671 | ||
2394 | if (ha->hw_event_start == 0) | 2672 | if (ha->flt_region_hw_event == 0) |
2395 | return QLA_FUNCTION_FAILED; | 2673 | return QLA_FUNCTION_FAILED; |
2396 | 2674 | ||
2397 | DEBUG2(qla_printk(KERN_WARNING, ha, | 2675 | DEBUG2(qla_printk(KERN_WARNING, ha, |
@@ -2406,7 +2684,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1, | |||
2406 | QLA_DRIVER_PATCH_VER, QLA_DRIVER_BETA_VER); | 2684 | QLA_DRIVER_PATCH_VER, QLA_DRIVER_BETA_VER); |
2407 | 2685 | ||
2408 | /* Locate marker. */ | 2686 | /* Locate marker. */ |
2409 | ha->hw_event_ptr = ha->hw_event_start; | 2687 | ha->hw_event_ptr = ha->flt_region_hw_event; |
2410 | for (;;) { | 2688 | for (;;) { |
2411 | qla24xx_read_flash_data(ha, fdata, ha->hw_event_ptr, | 2689 | qla24xx_read_flash_data(ha, fdata, ha->hw_event_ptr, |
2412 | 4); | 2690 | 4); |
@@ -2415,7 +2693,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1, | |||
2415 | break; | 2693 | break; |
2416 | ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE; | 2694 | ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE; |
2417 | if (ha->hw_event_ptr >= | 2695 | if (ha->hw_event_ptr >= |
2418 | ha->hw_event_start + FA_HW_EVENT_SIZE) { | 2696 | ha->flt_region_hw_event + FA_HW_EVENT_SIZE) { |
2419 | DEBUG2(qla_printk(KERN_WARNING, ha, | 2697 | DEBUG2(qla_printk(KERN_WARNING, ha, |
2420 | "HW event -- Log Full!\n")); | 2698 | "HW event -- Log Full!\n")); |
2421 | return QLA_MEMORY_ALLOC_FAILED; | 2699 | return QLA_MEMORY_ALLOC_FAILED; |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 4160e4caa7b9..be5e299df528 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,7 +7,7 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.02.01-k7" | 10 | #define QLA2XXX_VERSION "8.02.01-k8" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 2 | 13 | #define QLA_DRIVER_MINOR_VER 2 |
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 88bebb13bc52..de8279ad7d89 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
@@ -1542,7 +1542,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) | |||
1542 | DEBUG2(printk(KERN_INFO | 1542 | DEBUG2(printk(KERN_INFO |
1543 | "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x," | 1543 | "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x," |
1544 | "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, | 1544 | "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, |
1545 | cmd, jiffies, cmd->timeout_per_command / HZ, | 1545 | cmd, jiffies, cmd->request->timeout / HZ, |
1546 | ha->dpc_flags, cmd->result, cmd->allowed)); | 1546 | ha->dpc_flags, cmd->result, cmd->allowed)); |
1547 | 1547 | ||
1548 | /* FIXME: wait for hba to go online */ | 1548 | /* FIXME: wait for hba to go online */ |
@@ -1598,7 +1598,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) | |||
1598 | DEBUG2(printk(KERN_INFO | 1598 | DEBUG2(printk(KERN_INFO |
1599 | "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, " | 1599 | "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, " |
1600 | "to=%x,dpc_flags=%lx, status=%x allowed=%d\n", | 1600 | "to=%x,dpc_flags=%lx, status=%x allowed=%d\n", |
1601 | ha->host_no, cmd, jiffies, cmd->timeout_per_command / HZ, | 1601 | ha->host_no, cmd, jiffies, cmd->request->timeout / HZ, |
1602 | ha->dpc_flags, cmd->result, cmd->allowed)); | 1602 | ha->dpc_flags, cmd->result, cmd->allowed)); |
1603 | 1603 | ||
1604 | stat = qla4xxx_reset_target(ha, ddb_entry); | 1604 | stat = qla4xxx_reset_target(ha, ddb_entry); |
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 42807671512b..69d6ad862b60 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c | |||
@@ -918,6 +918,7 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd, | |||
918 | ds[i].d_count = sg_dma_len(s); | 918 | ds[i].d_count = sg_dma_len(s); |
919 | } | 919 | } |
920 | sg_count -= n; | 920 | sg_count -= n; |
921 | sg = s; | ||
921 | } | 922 | } |
922 | } else { | 923 | } else { |
923 | cmd->dataseg[0].d_base = 0; | 924 | cmd->dataseg[0].d_base = 0; |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index ee6be596503d..2ac3cb2b9081 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -291,7 +291,6 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask) | |||
291 | unsigned long flags; | 291 | unsigned long flags; |
292 | 292 | ||
293 | cmd->device = dev; | 293 | cmd->device = dev; |
294 | init_timer(&cmd->eh_timeout); | ||
295 | INIT_LIST_HEAD(&cmd->list); | 294 | INIT_LIST_HEAD(&cmd->list); |
296 | spin_lock_irqsave(&dev->list_lock, flags); | 295 | spin_lock_irqsave(&dev->list_lock, flags); |
297 | list_add_tail(&cmd->list, &dev->cmd_list); | 296 | list_add_tail(&cmd->list, &dev->cmd_list); |
@@ -652,26 +651,33 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
652 | unsigned long timeout; | 651 | unsigned long timeout; |
653 | int rtn = 0; | 652 | int rtn = 0; |
654 | 653 | ||
654 | /* | ||
655 | * We will use a queued command if possible, otherwise we will | ||
656 | * emulate the queuing and calling of completion function ourselves. | ||
657 | */ | ||
658 | atomic_inc(&cmd->device->iorequest_cnt); | ||
659 | |||
655 | /* check if the device is still usable */ | 660 | /* check if the device is still usable */ |
656 | if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { | 661 | if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { |
657 | /* in SDEV_DEL we error all commands. DID_NO_CONNECT | 662 | /* in SDEV_DEL we error all commands. DID_NO_CONNECT |
658 | * returns an immediate error upwards, and signals | 663 | * returns an immediate error upwards, and signals |
659 | * that the device is no longer present */ | 664 | * that the device is no longer present */ |
660 | cmd->result = DID_NO_CONNECT << 16; | 665 | cmd->result = DID_NO_CONNECT << 16; |
661 | atomic_inc(&cmd->device->iorequest_cnt); | 666 | scsi_done(cmd); |
662 | __scsi_done(cmd); | ||
663 | /* return 0 (because the command has been processed) */ | 667 | /* return 0 (because the command has been processed) */ |
664 | goto out; | 668 | goto out; |
665 | } | 669 | } |
666 | 670 | ||
667 | /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */ | 671 | /* Check to see if the scsi lld made this device blocked. */ |
668 | if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) { | 672 | if (unlikely(scsi_device_blocked(cmd->device))) { |
669 | /* | 673 | /* |
670 | * in SDEV_BLOCK, the command is just put back on the device | 674 | * in blocked state, the command is just put back on |
671 | * queue. The suspend state has already blocked the queue so | 675 | * the device queue. The suspend state has already |
672 | * future requests should not occur until the device | 676 | * blocked the queue so future requests should not |
673 | * transitions out of the suspend state. | 677 | * occur until the device transitions out of the |
678 | * suspend state. | ||
674 | */ | 679 | */ |
680 | |||
675 | scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); | 681 | scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); |
676 | 682 | ||
677 | SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n")); | 683 | SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n")); |
@@ -714,21 +720,9 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
714 | host->resetting = 0; | 720 | host->resetting = 0; |
715 | } | 721 | } |
716 | 722 | ||
717 | /* | ||
718 | * AK: unlikely race here: for some reason the timer could | ||
719 | * expire before the serial number is set up below. | ||
720 | */ | ||
721 | scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out); | ||
722 | |||
723 | scsi_log_send(cmd); | 723 | scsi_log_send(cmd); |
724 | 724 | ||
725 | /* | 725 | /* |
726 | * We will use a queued command if possible, otherwise we will | ||
727 | * emulate the queuing and calling of completion function ourselves. | ||
728 | */ | ||
729 | atomic_inc(&cmd->device->iorequest_cnt); | ||
730 | |||
731 | /* | ||
732 | * Before we queue this command, check if the command | 726 | * Before we queue this command, check if the command |
733 | * length exceeds what the host adapter can handle. | 727 | * length exceeds what the host adapter can handle. |
734 | */ | 728 | */ |
@@ -744,6 +738,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
744 | } | 738 | } |
745 | 739 | ||
746 | spin_lock_irqsave(host->host_lock, flags); | 740 | spin_lock_irqsave(host->host_lock, flags); |
741 | /* | ||
742 | * AK: unlikely race here: for some reason the timer could | ||
743 | * expire before the serial number is set up below. | ||
744 | * | ||
745 | * TODO: kill serial or move to blk layer | ||
746 | */ | ||
747 | scsi_cmd_get_serial(host, cmd); | 747 | scsi_cmd_get_serial(host, cmd); |
748 | 748 | ||
749 | if (unlikely(host->shost_state == SHOST_DEL)) { | 749 | if (unlikely(host->shost_state == SHOST_DEL)) { |
@@ -754,12 +754,8 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
754 | } | 754 | } |
755 | spin_unlock_irqrestore(host->host_lock, flags); | 755 | spin_unlock_irqrestore(host->host_lock, flags); |
756 | if (rtn) { | 756 | if (rtn) { |
757 | if (scsi_delete_timer(cmd)) { | 757 | scsi_queue_insert(cmd, (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ? |
758 | atomic_inc(&cmd->device->iodone_cnt); | 758 | rtn : SCSI_MLQUEUE_HOST_BUSY); |
759 | scsi_queue_insert(cmd, | ||
760 | (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ? | ||
761 | rtn : SCSI_MLQUEUE_HOST_BUSY); | ||
762 | } | ||
763 | SCSI_LOG_MLQUEUE(3, | 759 | SCSI_LOG_MLQUEUE(3, |
764 | printk("queuecommand : request rejected\n")); | 760 | printk("queuecommand : request rejected\n")); |
765 | } | 761 | } |
@@ -770,24 +766,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
770 | } | 766 | } |
771 | 767 | ||
772 | /** | 768 | /** |
773 | * scsi_req_abort_cmd -- Request command recovery for the specified command | ||
774 | * @cmd: pointer to the SCSI command of interest | ||
775 | * | ||
776 | * This function requests that SCSI Core start recovery for the | ||
777 | * command by deleting the timer and adding the command to the eh | ||
778 | * queue. It can be called by either LLDDs or SCSI Core. LLDDs who | ||
779 | * implement their own error recovery MAY ignore the timeout event if | ||
780 | * they generated scsi_req_abort_cmd. | ||
781 | */ | ||
782 | void scsi_req_abort_cmd(struct scsi_cmnd *cmd) | ||
783 | { | ||
784 | if (!scsi_delete_timer(cmd)) | ||
785 | return; | ||
786 | scsi_times_out(cmd); | ||
787 | } | ||
788 | EXPORT_SYMBOL(scsi_req_abort_cmd); | ||
789 | |||
790 | /** | ||
791 | * scsi_done - Enqueue the finished SCSI command into the done queue. | 769 | * scsi_done - Enqueue the finished SCSI command into the done queue. |
792 | * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives | 770 | * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives |
793 | * ownership back to SCSI Core -- i.e. the LLDD has finished with it. | 771 | * ownership back to SCSI Core -- i.e. the LLDD has finished with it. |
@@ -802,42 +780,7 @@ EXPORT_SYMBOL(scsi_req_abort_cmd); | |||
802 | */ | 780 | */ |
803 | static void scsi_done(struct scsi_cmnd *cmd) | 781 | static void scsi_done(struct scsi_cmnd *cmd) |
804 | { | 782 | { |
805 | /* | 783 | blk_complete_request(cmd->request); |
806 | * We don't have to worry about this one timing out anymore. | ||
807 | * If we are unable to remove the timer, then the command | ||
808 | * has already timed out. In which case, we have no choice but to | ||
809 | * let the timeout function run, as we have no idea where in fact | ||
810 | * that function could really be. It might be on another processor, | ||
811 | * etc, etc. | ||
812 | */ | ||
813 | if (!scsi_delete_timer(cmd)) | ||
814 | return; | ||
815 | __scsi_done(cmd); | ||
816 | } | ||
817 | |||
818 | /* Private entry to scsi_done() to complete a command when the timer | ||
819 | * isn't running --- used by scsi_times_out */ | ||
820 | void __scsi_done(struct scsi_cmnd *cmd) | ||
821 | { | ||
822 | struct request *rq = cmd->request; | ||
823 | |||
824 | /* | ||
825 | * Set the serial numbers back to zero | ||
826 | */ | ||
827 | cmd->serial_number = 0; | ||
828 | |||
829 | atomic_inc(&cmd->device->iodone_cnt); | ||
830 | if (cmd->result) | ||
831 | atomic_inc(&cmd->device->ioerr_cnt); | ||
832 | |||
833 | BUG_ON(!rq); | ||
834 | |||
835 | /* | ||
836 | * The uptodate/nbytes values don't matter, as we allow partial | ||
837 | * completes and thus will check this in the softirq callback | ||
838 | */ | ||
839 | rq->completion_data = cmd; | ||
840 | blk_complete_request(rq); | ||
841 | } | 784 | } |
842 | 785 | ||
843 | /* Move this to a header if it becomes more generally useful */ | 786 | /* Move this to a header if it becomes more generally useful */ |
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 39ce3aba1dac..fecefa05cb62 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
@@ -112,69 +112,8 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag) | |||
112 | } | 112 | } |
113 | 113 | ||
114 | /** | 114 | /** |
115 | * scsi_add_timer - Start timeout timer for a single scsi command. | ||
116 | * @scmd: scsi command that is about to start running. | ||
117 | * @timeout: amount of time to allow this command to run. | ||
118 | * @complete: timeout function to call if timer isn't canceled. | ||
119 | * | ||
120 | * Notes: | ||
121 | * This should be turned into an inline function. Each scsi command | ||
122 | * has its own timer, and as it is added to the queue, we set up the | ||
123 | * timer. When the command completes, we cancel the timer. | ||
124 | */ | ||
125 | void scsi_add_timer(struct scsi_cmnd *scmd, int timeout, | ||
126 | void (*complete)(struct scsi_cmnd *)) | ||
127 | { | ||
128 | |||
129 | /* | ||
130 | * If the clock was already running for this command, then | ||
131 | * first delete the timer. The timer handling code gets rather | ||
132 | * confused if we don't do this. | ||
133 | */ | ||
134 | if (scmd->eh_timeout.function) | ||
135 | del_timer(&scmd->eh_timeout); | ||
136 | |||
137 | scmd->eh_timeout.data = (unsigned long)scmd; | ||
138 | scmd->eh_timeout.expires = jiffies + timeout; | ||
139 | scmd->eh_timeout.function = (void (*)(unsigned long)) complete; | ||
140 | |||
141 | SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:" | ||
142 | " %d, (%p)\n", __func__, | ||
143 | scmd, timeout, complete)); | ||
144 | |||
145 | add_timer(&scmd->eh_timeout); | ||
146 | } | ||
147 | |||
148 | /** | ||
149 | * scsi_delete_timer - Delete/cancel timer for a given function. | ||
150 | * @scmd: Cmd that we are canceling timer for | ||
151 | * | ||
152 | * Notes: | ||
153 | * This should be turned into an inline function. | ||
154 | * | ||
155 | * Return value: | ||
156 | * 1 if we were able to detach the timer. 0 if we blew it, and the | ||
157 | * timer function has already started to run. | ||
158 | */ | ||
159 | int scsi_delete_timer(struct scsi_cmnd *scmd) | ||
160 | { | ||
161 | int rtn; | ||
162 | |||
163 | rtn = del_timer(&scmd->eh_timeout); | ||
164 | |||
165 | SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p," | ||
166 | " rtn: %d\n", __func__, | ||
167 | scmd, rtn)); | ||
168 | |||
169 | scmd->eh_timeout.data = (unsigned long)NULL; | ||
170 | scmd->eh_timeout.function = NULL; | ||
171 | |||
172 | return rtn; | ||
173 | } | ||
174 | |||
175 | /** | ||
176 | * scsi_times_out - Timeout function for normal scsi commands. | 115 | * scsi_times_out - Timeout function for normal scsi commands. |
177 | * @scmd: Cmd that is timing out. | 116 | * @req: request that is timing out. |
178 | * | 117 | * |
179 | * Notes: | 118 | * Notes: |
180 | * We do not need to lock this. There is the potential for a race | 119 | * We do not need to lock this. There is the potential for a race |
@@ -182,9 +121,11 @@ int scsi_delete_timer(struct scsi_cmnd *scmd) | |||
182 | * normal completion function determines that the timer has already | 121 | * normal completion function determines that the timer has already |
183 | * fired, then it mustn't do anything. | 122 | * fired, then it mustn't do anything. |
184 | */ | 123 | */ |
185 | void scsi_times_out(struct scsi_cmnd *scmd) | 124 | enum blk_eh_timer_return scsi_times_out(struct request *req) |
186 | { | 125 | { |
187 | enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *); | 126 | struct scsi_cmnd *scmd = req->special; |
127 | enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *); | ||
128 | enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED; | ||
188 | 129 | ||
189 | scsi_log_completion(scmd, TIMEOUT_ERROR); | 130 | scsi_log_completion(scmd, TIMEOUT_ERROR); |
190 | 131 | ||
@@ -196,22 +137,20 @@ void scsi_times_out(struct scsi_cmnd *scmd) | |||
196 | eh_timed_out = NULL; | 137 | eh_timed_out = NULL; |
197 | 138 | ||
198 | if (eh_timed_out) | 139 | if (eh_timed_out) |
199 | switch (eh_timed_out(scmd)) { | 140 | rtn = eh_timed_out(scmd); |
200 | case EH_HANDLED: | 141 | switch (rtn) { |
201 | __scsi_done(scmd); | 142 | case BLK_EH_NOT_HANDLED: |
202 | return; | ||
203 | case EH_RESET_TIMER: | ||
204 | scsi_add_timer(scmd, scmd->timeout_per_command, | ||
205 | scsi_times_out); | ||
206 | return; | ||
207 | case EH_NOT_HANDLED: | ||
208 | break; | 143 | break; |
144 | default: | ||
145 | return rtn; | ||
209 | } | 146 | } |
210 | 147 | ||
211 | if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) { | 148 | if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) { |
212 | scmd->result |= DID_TIME_OUT << 16; | 149 | scmd->result |= DID_TIME_OUT << 16; |
213 | __scsi_done(scmd); | 150 | return BLK_EH_HANDLED; |
214 | } | 151 | } |
152 | |||
153 | return BLK_EH_NOT_HANDLED; | ||
215 | } | 154 | } |
216 | 155 | ||
217 | /** | 156 | /** |
@@ -1793,7 +1732,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag) | |||
1793 | 1732 | ||
1794 | blk_rq_init(NULL, &req); | 1733 | blk_rq_init(NULL, &req); |
1795 | scmd->request = &req; | 1734 | scmd->request = &req; |
1796 | memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout)); | ||
1797 | 1735 | ||
1798 | scmd->cmnd = req.cmd; | 1736 | scmd->cmnd = req.cmd; |
1799 | 1737 | ||
@@ -1804,8 +1742,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag) | |||
1804 | 1742 | ||
1805 | scmd->sc_data_direction = DMA_BIDIRECTIONAL; | 1743 | scmd->sc_data_direction = DMA_BIDIRECTIONAL; |
1806 | 1744 | ||
1807 | init_timer(&scmd->eh_timeout); | ||
1808 | |||
1809 | spin_lock_irqsave(shost->host_lock, flags); | 1745 | spin_lock_irqsave(shost->host_lock, flags); |
1810 | shost->tmf_in_progress = 1; | 1746 | shost->tmf_in_progress = 1; |
1811 | spin_unlock_irqrestore(shost->host_lock, flags); | 1747 | spin_unlock_irqrestore(shost->host_lock, flags); |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index ff5d56b3ee4d..98ee55ced592 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -852,7 +852,7 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd) | |||
852 | void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | 852 | void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) |
853 | { | 853 | { |
854 | int result = cmd->result; | 854 | int result = cmd->result; |
855 | int this_count = scsi_bufflen(cmd); | 855 | int this_count; |
856 | struct request_queue *q = cmd->device->request_queue; | 856 | struct request_queue *q = cmd->device->request_queue; |
857 | struct request *req = cmd->request; | 857 | struct request *req = cmd->request; |
858 | int error = 0; | 858 | int error = 0; |
@@ -908,6 +908,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
908 | */ | 908 | */ |
909 | if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) | 909 | if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) |
910 | return; | 910 | return; |
911 | this_count = blk_rq_bytes(req); | ||
911 | 912 | ||
912 | /* good_bytes = 0, or (inclusive) there were leftovers and | 913 | /* good_bytes = 0, or (inclusive) there were leftovers and |
913 | * result = 0, so scsi_end_request couldn't retry. | 914 | * result = 0, so scsi_end_request couldn't retry. |
@@ -1180,7 +1181,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) | |||
1180 | 1181 | ||
1181 | cmd->transfersize = req->data_len; | 1182 | cmd->transfersize = req->data_len; |
1182 | cmd->allowed = req->retries; | 1183 | cmd->allowed = req->retries; |
1183 | cmd->timeout_per_command = req->timeout; | ||
1184 | return BLKPREP_OK; | 1184 | return BLKPREP_OK; |
1185 | } | 1185 | } |
1186 | EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); | 1186 | EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); |
@@ -1250,6 +1250,7 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) | |||
1250 | break; | 1250 | break; |
1251 | case SDEV_QUIESCE: | 1251 | case SDEV_QUIESCE: |
1252 | case SDEV_BLOCK: | 1252 | case SDEV_BLOCK: |
1253 | case SDEV_CREATED_BLOCK: | ||
1253 | /* | 1254 | /* |
1254 | * If the devices is blocked we defer normal commands. | 1255 | * If the devices is blocked we defer normal commands. |
1255 | */ | 1256 | */ |
@@ -1415,17 +1416,26 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) | |||
1415 | spin_unlock(shost->host_lock); | 1416 | spin_unlock(shost->host_lock); |
1416 | spin_lock(sdev->request_queue->queue_lock); | 1417 | spin_lock(sdev->request_queue->queue_lock); |
1417 | 1418 | ||
1418 | __scsi_done(cmd); | 1419 | blk_complete_request(req); |
1419 | } | 1420 | } |
1420 | 1421 | ||
1421 | static void scsi_softirq_done(struct request *rq) | 1422 | static void scsi_softirq_done(struct request *rq) |
1422 | { | 1423 | { |
1423 | struct scsi_cmnd *cmd = rq->completion_data; | 1424 | struct scsi_cmnd *cmd = rq->special; |
1424 | unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command; | 1425 | unsigned long wait_for = (cmd->allowed + 1) * rq->timeout; |
1425 | int disposition; | 1426 | int disposition; |
1426 | 1427 | ||
1427 | INIT_LIST_HEAD(&cmd->eh_entry); | 1428 | INIT_LIST_HEAD(&cmd->eh_entry); |
1428 | 1429 | ||
1430 | /* | ||
1431 | * Set the serial numbers back to zero | ||
1432 | */ | ||
1433 | cmd->serial_number = 0; | ||
1434 | |||
1435 | atomic_inc(&cmd->device->iodone_cnt); | ||
1436 | if (cmd->result) | ||
1437 | atomic_inc(&cmd->device->ioerr_cnt); | ||
1438 | |||
1429 | disposition = scsi_decide_disposition(cmd); | 1439 | disposition = scsi_decide_disposition(cmd); |
1430 | if (disposition != SUCCESS && | 1440 | if (disposition != SUCCESS && |
1431 | time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { | 1441 | time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { |
@@ -1674,6 +1684,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) | |||
1674 | 1684 | ||
1675 | blk_queue_prep_rq(q, scsi_prep_fn); | 1685 | blk_queue_prep_rq(q, scsi_prep_fn); |
1676 | blk_queue_softirq_done(q, scsi_softirq_done); | 1686 | blk_queue_softirq_done(q, scsi_softirq_done); |
1687 | blk_queue_rq_timed_out(q, scsi_times_out); | ||
1677 | return q; | 1688 | return q; |
1678 | } | 1689 | } |
1679 | 1690 | ||
@@ -2063,10 +2074,13 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) | |||
2063 | 2074 | ||
2064 | switch (state) { | 2075 | switch (state) { |
2065 | case SDEV_CREATED: | 2076 | case SDEV_CREATED: |
2066 | /* There are no legal states that come back to | 2077 | switch (oldstate) { |
2067 | * created. This is the manually initialised start | 2078 | case SDEV_CREATED_BLOCK: |
2068 | * state */ | 2079 | break; |
2069 | goto illegal; | 2080 | default: |
2081 | goto illegal; | ||
2082 | } | ||
2083 | break; | ||
2070 | 2084 | ||
2071 | case SDEV_RUNNING: | 2085 | case SDEV_RUNNING: |
2072 | switch (oldstate) { | 2086 | switch (oldstate) { |
@@ -2104,8 +2118,17 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) | |||
2104 | 2118 | ||
2105 | case SDEV_BLOCK: | 2119 | case SDEV_BLOCK: |
2106 | switch (oldstate) { | 2120 | switch (oldstate) { |
2107 | case SDEV_CREATED: | ||
2108 | case SDEV_RUNNING: | 2121 | case SDEV_RUNNING: |
2122 | case SDEV_CREATED_BLOCK: | ||
2123 | break; | ||
2124 | default: | ||
2125 | goto illegal; | ||
2126 | } | ||
2127 | break; | ||
2128 | |||
2129 | case SDEV_CREATED_BLOCK: | ||
2130 | switch (oldstate) { | ||
2131 | case SDEV_CREATED: | ||
2109 | break; | 2132 | break; |
2110 | default: | 2133 | default: |
2111 | goto illegal; | 2134 | goto illegal; |
@@ -2393,8 +2416,12 @@ scsi_internal_device_block(struct scsi_device *sdev) | |||
2393 | int err = 0; | 2416 | int err = 0; |
2394 | 2417 | ||
2395 | err = scsi_device_set_state(sdev, SDEV_BLOCK); | 2418 | err = scsi_device_set_state(sdev, SDEV_BLOCK); |
2396 | if (err) | 2419 | if (err) { |
2397 | return err; | 2420 | err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); |
2421 | |||
2422 | if (err) | ||
2423 | return err; | ||
2424 | } | ||
2398 | 2425 | ||
2399 | /* | 2426 | /* |
2400 | * The device has transitioned to SDEV_BLOCK. Stop the | 2427 | * The device has transitioned to SDEV_BLOCK. Stop the |
@@ -2437,8 +2464,12 @@ scsi_internal_device_unblock(struct scsi_device *sdev) | |||
2437 | * and goose the device queue if successful. | 2464 | * and goose the device queue if successful. |
2438 | */ | 2465 | */ |
2439 | err = scsi_device_set_state(sdev, SDEV_RUNNING); | 2466 | err = scsi_device_set_state(sdev, SDEV_RUNNING); |
2440 | if (err) | 2467 | if (err) { |
2441 | return err; | 2468 | err = scsi_device_set_state(sdev, SDEV_CREATED); |
2469 | |||
2470 | if (err) | ||
2471 | return err; | ||
2472 | } | ||
2442 | 2473 | ||
2443 | spin_lock_irqsave(q->queue_lock, flags); | 2474 | spin_lock_irqsave(q->queue_lock, flags); |
2444 | blk_start_queue(q); | 2475 | blk_start_queue(q); |
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c index ae7ed9a22662..b37e133de805 100644 --- a/drivers/scsi/scsi_netlink.c +++ b/drivers/scsi/scsi_netlink.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/time.h> | 21 | #include <linux/time.h> |
22 | #include <linux/jiffies.h> | 22 | #include <linux/jiffies.h> |
23 | #include <linux/security.h> | 23 | #include <linux/security.h> |
24 | #include <linux/delay.h> | ||
24 | #include <net/sock.h> | 25 | #include <net/sock.h> |
25 | #include <net/netlink.h> | 26 | #include <net/netlink.h> |
26 | 27 | ||
@@ -30,6 +31,39 @@ | |||
30 | struct sock *scsi_nl_sock = NULL; | 31 | struct sock *scsi_nl_sock = NULL; |
31 | EXPORT_SYMBOL_GPL(scsi_nl_sock); | 32 | EXPORT_SYMBOL_GPL(scsi_nl_sock); |
32 | 33 | ||
34 | static DEFINE_SPINLOCK(scsi_nl_lock); | ||
35 | static struct list_head scsi_nl_drivers; | ||
36 | |||
37 | static u32 scsi_nl_state; | ||
38 | #define STATE_EHANDLER_BSY 0x00000001 | ||
39 | |||
40 | struct scsi_nl_transport { | ||
41 | int (*msg_handler)(struct sk_buff *); | ||
42 | void (*event_handler)(struct notifier_block *, unsigned long, void *); | ||
43 | unsigned int refcnt; | ||
44 | int flags; | ||
45 | }; | ||
46 | |||
47 | /* flags values (bit flags) */ | ||
48 | #define HANDLER_DELETING 0x1 | ||
49 | |||
50 | static struct scsi_nl_transport transports[SCSI_NL_MAX_TRANSPORTS] = | ||
51 | { {NULL, }, }; | ||
52 | |||
53 | |||
54 | struct scsi_nl_drvr { | ||
55 | struct list_head next; | ||
56 | int (*dmsg_handler)(struct Scsi_Host *shost, void *payload, | ||
57 | u32 len, u32 pid); | ||
58 | void (*devt_handler)(struct notifier_block *nb, | ||
59 | unsigned long event, void *notify_ptr); | ||
60 | struct scsi_host_template *hostt; | ||
61 | u64 vendor_id; | ||
62 | unsigned int refcnt; | ||
63 | int flags; | ||
64 | }; | ||
65 | |||
66 | |||
33 | 67 | ||
34 | /** | 68 | /** |
35 | * scsi_nl_rcv_msg - Receive message handler. | 69 | * scsi_nl_rcv_msg - Receive message handler. |
@@ -45,8 +79,9 @@ scsi_nl_rcv_msg(struct sk_buff *skb) | |||
45 | { | 79 | { |
46 | struct nlmsghdr *nlh; | 80 | struct nlmsghdr *nlh; |
47 | struct scsi_nl_hdr *hdr; | 81 | struct scsi_nl_hdr *hdr; |
48 | uint32_t rlen; | 82 | unsigned long flags; |
49 | int err; | 83 | u32 rlen; |
84 | int err, tport; | ||
50 | 85 | ||
51 | while (skb->len >= NLMSG_SPACE(0)) { | 86 | while (skb->len >= NLMSG_SPACE(0)) { |
52 | err = 0; | 87 | err = 0; |
@@ -65,7 +100,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb) | |||
65 | 100 | ||
66 | if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) { | 101 | if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) { |
67 | err = -EBADMSG; | 102 | err = -EBADMSG; |
68 | return; | 103 | goto next_msg; |
69 | } | 104 | } |
70 | 105 | ||
71 | hdr = NLMSG_DATA(nlh); | 106 | hdr = NLMSG_DATA(nlh); |
@@ -83,12 +118,27 @@ scsi_nl_rcv_msg(struct sk_buff *skb) | |||
83 | if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) { | 118 | if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) { |
84 | printk(KERN_WARNING "%s: discarding partial message\n", | 119 | printk(KERN_WARNING "%s: discarding partial message\n", |
85 | __func__); | 120 | __func__); |
86 | return; | 121 | goto next_msg; |
87 | } | 122 | } |
88 | 123 | ||
89 | /* | 124 | /* |
90 | * We currently don't support anyone sending us a message | 125 | * Deliver message to the appropriate transport |
91 | */ | 126 | */ |
127 | spin_lock_irqsave(&scsi_nl_lock, flags); | ||
128 | |||
129 | tport = hdr->transport; | ||
130 | if ((tport < SCSI_NL_MAX_TRANSPORTS) && | ||
131 | !(transports[tport].flags & HANDLER_DELETING) && | ||
132 | (transports[tport].msg_handler)) { | ||
133 | transports[tport].refcnt++; | ||
134 | spin_unlock_irqrestore(&scsi_nl_lock, flags); | ||
135 | err = transports[tport].msg_handler(skb); | ||
136 | spin_lock_irqsave(&scsi_nl_lock, flags); | ||
137 | transports[tport].refcnt--; | ||
138 | } else | ||
139 | err = -ENOENT; | ||
140 | |||
141 | spin_unlock_irqrestore(&scsi_nl_lock, flags); | ||
92 | 142 | ||
93 | next_msg: | 143 | next_msg: |
94 | if ((err) || (nlh->nlmsg_flags & NLM_F_ACK)) | 144 | if ((err) || (nlh->nlmsg_flags & NLM_F_ACK)) |
@@ -110,14 +160,42 @@ static int | |||
110 | scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr) | 160 | scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr) |
111 | { | 161 | { |
112 | struct netlink_notify *n = ptr; | 162 | struct netlink_notify *n = ptr; |
163 | struct scsi_nl_drvr *driver; | ||
164 | unsigned long flags; | ||
165 | int tport; | ||
113 | 166 | ||
114 | if (n->protocol != NETLINK_SCSITRANSPORT) | 167 | if (n->protocol != NETLINK_SCSITRANSPORT) |
115 | return NOTIFY_DONE; | 168 | return NOTIFY_DONE; |
116 | 169 | ||
170 | spin_lock_irqsave(&scsi_nl_lock, flags); | ||
171 | scsi_nl_state |= STATE_EHANDLER_BSY; | ||
172 | |||
117 | /* | 173 | /* |
118 | * Currently, we are not tracking PID's, etc. There is nothing | 174 | * Pass event on to any transports that may be listening |
119 | * to handle. | ||
120 | */ | 175 | */ |
176 | for (tport = 0; tport < SCSI_NL_MAX_TRANSPORTS; tport++) { | ||
177 | if (!(transports[tport].flags & HANDLER_DELETING) && | ||
178 | (transports[tport].event_handler)) { | ||
179 | spin_unlock_irqrestore(&scsi_nl_lock, flags); | ||
180 | transports[tport].event_handler(this, event, ptr); | ||
181 | spin_lock_irqsave(&scsi_nl_lock, flags); | ||
182 | } | ||
183 | } | ||
184 | |||
185 | /* | ||
186 | * Pass event on to any drivers that may be listening | ||
187 | */ | ||
188 | list_for_each_entry(driver, &scsi_nl_drivers, next) { | ||
189 | if (!(driver->flags & HANDLER_DELETING) && | ||
190 | (driver->devt_handler)) { | ||
191 | spin_unlock_irqrestore(&scsi_nl_lock, flags); | ||
192 | driver->devt_handler(this, event, ptr); | ||
193 | spin_lock_irqsave(&scsi_nl_lock, flags); | ||
194 | } | ||
195 | } | ||
196 | |||
197 | scsi_nl_state &= ~STATE_EHANDLER_BSY; | ||
198 | spin_unlock_irqrestore(&scsi_nl_lock, flags); | ||
121 | 199 | ||
122 | return NOTIFY_DONE; | 200 | return NOTIFY_DONE; |
123 | } | 201 | } |
@@ -128,7 +206,281 @@ static struct notifier_block scsi_netlink_notifier = { | |||
128 | 206 | ||
129 | 207 | ||
130 | /** | 208 | /** |
131 | * scsi_netlink_init - Called by SCSI subsystem to intialize the SCSI transport netlink interface | 209 | * GENERIC SCSI transport receive and event handlers |
210 | **/ | ||
211 | |||
212 | /** | ||
213 | * scsi_generic_msg_handler - receive message handler for GENERIC transport | ||
214 | * messages | ||
215 | * | ||
216 | * @skb: socket receive buffer | ||
217 | * | ||
218 | **/ | ||
219 | static int | ||
220 | scsi_generic_msg_handler(struct sk_buff *skb) | ||
221 | { | ||
222 | struct nlmsghdr *nlh = nlmsg_hdr(skb); | ||
223 | struct scsi_nl_hdr *snlh = NLMSG_DATA(nlh); | ||
224 | struct scsi_nl_drvr *driver; | ||
225 | struct Scsi_Host *shost; | ||
226 | unsigned long flags; | ||
227 | int err = 0, match, pid; | ||
228 | |||
229 | pid = NETLINK_CREDS(skb)->pid; | ||
230 | |||
231 | switch (snlh->msgtype) { | ||
232 | case SCSI_NL_SHOST_VENDOR: | ||
233 | { | ||
234 | struct scsi_nl_host_vendor_msg *msg = NLMSG_DATA(nlh); | ||
235 | |||
236 | /* Locate the driver that corresponds to the message */ | ||
237 | spin_lock_irqsave(&scsi_nl_lock, flags); | ||
238 | match = 0; | ||
239 | list_for_each_entry(driver, &scsi_nl_drivers, next) { | ||
240 | if (driver->vendor_id == msg->vendor_id) { | ||
241 | match = 1; | ||
242 | break; | ||
243 | } | ||
244 | } | ||
245 | |||
246 | if ((!match) || (!driver->dmsg_handler)) { | ||
247 | spin_unlock_irqrestore(&scsi_nl_lock, flags); | ||
248 | err = -ESRCH; | ||
249 | goto rcv_exit; | ||
250 | } | ||
251 | |||
252 | if (driver->flags & HANDLER_DELETING) { | ||
253 | spin_unlock_irqrestore(&scsi_nl_lock, flags); | ||
254 | err = -ESHUTDOWN; | ||
255 | goto rcv_exit; | ||
256 | } | ||
257 | |||
258 | driver->refcnt++; | ||
259 | spin_unlock_irqrestore(&scsi_nl_lock, flags); | ||
260 | |||
261 | |||
262 | /* if successful, scsi_host_lookup takes a shost reference */ | ||
263 | shost = scsi_host_lookup(msg->host_no); | ||
264 | if (!shost) { | ||
265 | err = -ENODEV; | ||
266 | goto driver_exit; | ||
267 | } | ||
268 | |||
269 | /* is this host owned by the vendor ? */ | ||
270 | if (shost->hostt != driver->hostt) { | ||
271 | err = -EINVAL; | ||
272 | goto vendormsg_put; | ||
273 | } | ||
274 | |||
275 | /* pass message on to the driver */ | ||
276 | err = driver->dmsg_handler(shost, (void *)&msg[1], | ||
277 | msg->vmsg_datalen, pid); | ||
278 | |||
279 | vendormsg_put: | ||
280 | /* release reference by scsi_host_lookup */ | ||
281 | scsi_host_put(shost); | ||
282 | |||
283 | driver_exit: | ||
284 | /* release our own reference on the registration object */ | ||
285 | spin_lock_irqsave(&scsi_nl_lock, flags); | ||
286 | driver->refcnt--; | ||
287 | spin_unlock_irqrestore(&scsi_nl_lock, flags); | ||
288 | break; | ||
289 | } | ||
290 | |||
291 | default: | ||
292 | err = -EBADR; | ||
293 | break; | ||
294 | } | ||
295 | |||
296 | rcv_exit: | ||
297 | if (err) | ||
298 | printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n", | ||
299 | __func__, snlh->msgtype, err); | ||
300 | return err; | ||
301 | } | ||
302 | |||
303 | |||
304 | /** | ||
305 | * scsi_nl_add_transport - | ||
306 | * Registers message and event handlers for a transport. Enables | ||
307 | * receipt of netlink messages and events to a transport. | ||
308 | * | ||
309 | * @tport: transport registering handlers | ||
310 | * @msg_handler: receive message handler callback | ||
311 | * @event_handler: receive event handler callback | ||
312 | **/ | ||
313 | int | ||
314 | scsi_nl_add_transport(u8 tport, | ||
315 | int (*msg_handler)(struct sk_buff *), | ||
316 | void (*event_handler)(struct notifier_block *, unsigned long, void *)) | ||
317 | { | ||
318 | unsigned long flags; | ||
319 | int err = 0; | ||
320 | |||
321 | if (tport >= SCSI_NL_MAX_TRANSPORTS) | ||
322 | return -EINVAL; | ||
323 | |||
324 | spin_lock_irqsave(&scsi_nl_lock, flags); | ||
325 | |||
326 | if (scsi_nl_state & STATE_EHANDLER_BSY) { | ||
327 | spin_unlock_irqrestore(&scsi_nl_lock, flags); | ||
328 | msleep(1); | ||
329 | spin_lock_irqsave(&scsi_nl_lock, flags); | ||
330 | } | ||
331 | |||
332 | if (transports[tport].msg_handler || transports[tport].event_handler) { | ||
333 | err = -EALREADY; | ||
334 | goto register_out; | ||
335 | } | ||
336 | |||
337 | transports[tport].msg_handler = msg_handler; | ||
338 | transports[tport].event_handler = event_handler; | ||
339 | transports[tport].flags = 0; | ||
340 | transports[tport].refcnt = 0; | ||
341 | |||
342 | register_out: | ||
343 | spin_unlock_irqrestore(&scsi_nl_lock, flags); | ||
344 | |||
345 | return err; | ||
346 | } | ||
347 | EXPORT_SYMBOL_GPL(scsi_nl_add_transport); | ||
348 | |||
349 | |||
350 | /** | ||
351 | * scsi_nl_remove_transport - | ||
352 | * Disable transport receiption of messages and events | ||
353 | * | ||
354 | * @tport: transport deregistering handlers | ||
355 | * | ||
356 | **/ | ||
357 | void | ||
358 | scsi_nl_remove_transport(u8 tport) | ||
359 | { | ||
360 | unsigned long flags; | ||
361 | |||
362 | spin_lock_irqsave(&scsi_nl_lock, flags); | ||
363 | if (scsi_nl_state & STATE_EHANDLER_BSY) { | ||
364 | spin_unlock_irqrestore(&scsi_nl_lock, flags); | ||
365 | msleep(1); | ||
366 | spin_lock_irqsave(&scsi_nl_lock, flags); | ||
367 | } | ||
368 | |||
369 | if (tport < SCSI_NL_MAX_TRANSPORTS) { | ||
370 | transports[tport].flags |= HANDLER_DELETING; | ||
371 | |||
372 | while (transports[tport].refcnt != 0) { | ||
373 | spin_unlock_irqrestore(&scsi_nl_lock, flags); | ||
374 | schedule_timeout_uninterruptible(HZ/4); | ||
375 | spin_lock_irqsave(&scsi_nl_lock, flags); | ||
376 | } | ||
377 | transports[tport].msg_handler = NULL; | ||
378 | transports[tport].event_handler = NULL; | ||
379 | transports[tport].flags = 0; | ||
380 | } | ||
381 | |||
382 | spin_unlock_irqrestore(&scsi_nl_lock, flags); | ||
383 | |||
384 | return; | ||
385 | } | ||
386 | EXPORT_SYMBOL_GPL(scsi_nl_remove_transport); | ||
387 | |||
388 | |||
389 | /** | ||
390 | * scsi_nl_add_driver - | ||
391 | * A driver is registering its interfaces for SCSI netlink messages | ||
392 | * | ||
393 | * @vendor_id: A unique identification value for the driver. | ||
394 | * @hostt: address of the driver's host template. Used | ||
395 | * to verify an shost is bound to the driver | ||
396 | * @nlmsg_handler: receive message handler callback | ||
397 | * @nlevt_handler: receive event handler callback | ||
398 | * | ||
399 | * Returns: | ||
400 | * 0 on Success | ||
401 | * error result otherwise | ||
402 | **/ | ||
403 | int | ||
404 | scsi_nl_add_driver(u64 vendor_id, struct scsi_host_template *hostt, | ||
405 | int (*nlmsg_handler)(struct Scsi_Host *shost, void *payload, | ||
406 | u32 len, u32 pid), | ||
407 | void (*nlevt_handler)(struct notifier_block *nb, | ||
408 | unsigned long event, void *notify_ptr)) | ||
409 | { | ||
410 | struct scsi_nl_drvr *driver; | ||
411 | unsigned long flags; | ||
412 | |||
413 | driver = kzalloc(sizeof(*driver), GFP_KERNEL); | ||
414 | if (unlikely(!driver)) { | ||
415 | printk(KERN_ERR "%s: allocation failure\n", __func__); | ||
416 | return -ENOMEM; | ||
417 | } | ||
418 | |||
419 | driver->dmsg_handler = nlmsg_handler; | ||
420 | driver->devt_handler = nlevt_handler; | ||
421 | driver->hostt = hostt; | ||
422 | driver->vendor_id = vendor_id; | ||
423 | |||
424 | spin_lock_irqsave(&scsi_nl_lock, flags); | ||
425 | if (scsi_nl_state & STATE_EHANDLER_BSY) { | ||
426 | spin_unlock_irqrestore(&scsi_nl_lock, flags); | ||
427 | msleep(1); | ||
428 | spin_lock_irqsave(&scsi_nl_lock, flags); | ||
429 | } | ||
430 | list_add_tail(&driver->next, &scsi_nl_drivers); | ||
431 | spin_unlock_irqrestore(&scsi_nl_lock, flags); | ||
432 | |||
433 | return 0; | ||
434 | } | ||
435 | EXPORT_SYMBOL_GPL(scsi_nl_add_driver); | ||
436 | |||
437 | |||
438 | /** | ||
439 | * scsi_nl_remove_driver - | ||
440 | * An driver is unregistering with the SCSI netlink messages | ||
441 | * | ||
442 | * @vendor_id: The unique identification value for the driver. | ||
443 | **/ | ||
444 | void | ||
445 | scsi_nl_remove_driver(u64 vendor_id) | ||
446 | { | ||
447 | struct scsi_nl_drvr *driver; | ||
448 | unsigned long flags; | ||
449 | |||
450 | spin_lock_irqsave(&scsi_nl_lock, flags); | ||
451 | if (scsi_nl_state & STATE_EHANDLER_BSY) { | ||
452 | spin_unlock_irqrestore(&scsi_nl_lock, flags); | ||
453 | msleep(1); | ||
454 | spin_lock_irqsave(&scsi_nl_lock, flags); | ||
455 | } | ||
456 | |||
457 | list_for_each_entry(driver, &scsi_nl_drivers, next) { | ||
458 | if (driver->vendor_id == vendor_id) { | ||
459 | driver->flags |= HANDLER_DELETING; | ||
460 | while (driver->refcnt != 0) { | ||
461 | spin_unlock_irqrestore(&scsi_nl_lock, flags); | ||
462 | schedule_timeout_uninterruptible(HZ/4); | ||
463 | spin_lock_irqsave(&scsi_nl_lock, flags); | ||
464 | } | ||
465 | list_del(&driver->next); | ||
466 | kfree(driver); | ||
467 | spin_unlock_irqrestore(&scsi_nl_lock, flags); | ||
468 | return; | ||
469 | } | ||
470 | } | ||
471 | |||
472 | spin_unlock_irqrestore(&scsi_nl_lock, flags); | ||
473 | |||
474 | printk(KERN_ERR "%s: removal of driver failed - vendor_id 0x%llx\n", | ||
475 | __func__, (unsigned long long)vendor_id); | ||
476 | return; | ||
477 | } | ||
478 | EXPORT_SYMBOL_GPL(scsi_nl_remove_driver); | ||
479 | |||
480 | |||
481 | /** | ||
482 | * scsi_netlink_init - Called by SCSI subsystem to intialize | ||
483 | * the SCSI transport netlink interface | ||
132 | * | 484 | * |
133 | **/ | 485 | **/ |
134 | void | 486 | void |
@@ -136,6 +488,8 @@ scsi_netlink_init(void) | |||
136 | { | 488 | { |
137 | int error; | 489 | int error; |
138 | 490 | ||
491 | INIT_LIST_HEAD(&scsi_nl_drivers); | ||
492 | |||
139 | error = netlink_register_notifier(&scsi_netlink_notifier); | 493 | error = netlink_register_notifier(&scsi_netlink_notifier); |
140 | if (error) { | 494 | if (error) { |
141 | printk(KERN_ERR "%s: register of event handler failed - %d\n", | 495 | printk(KERN_ERR "%s: register of event handler failed - %d\n", |
@@ -150,8 +504,15 @@ scsi_netlink_init(void) | |||
150 | printk(KERN_ERR "%s: register of recieve handler failed\n", | 504 | printk(KERN_ERR "%s: register of recieve handler failed\n", |
151 | __func__); | 505 | __func__); |
152 | netlink_unregister_notifier(&scsi_netlink_notifier); | 506 | netlink_unregister_notifier(&scsi_netlink_notifier); |
507 | return; | ||
153 | } | 508 | } |
154 | 509 | ||
510 | /* Register the entry points for the generic SCSI transport */ | ||
511 | error = scsi_nl_add_transport(SCSI_NL_TRANSPORT, | ||
512 | scsi_generic_msg_handler, NULL); | ||
513 | if (error) | ||
514 | printk(KERN_ERR "%s: register of GENERIC transport handler" | ||
515 | " failed - %d\n", __func__, error); | ||
155 | return; | 516 | return; |
156 | } | 517 | } |
157 | 518 | ||
@@ -163,6 +524,8 @@ scsi_netlink_init(void) | |||
163 | void | 524 | void |
164 | scsi_netlink_exit(void) | 525 | scsi_netlink_exit(void) |
165 | { | 526 | { |
527 | scsi_nl_remove_transport(SCSI_NL_TRANSPORT); | ||
528 | |||
166 | if (scsi_nl_sock) { | 529 | if (scsi_nl_sock) { |
167 | netlink_kernel_release(scsi_nl_sock); | 530 | netlink_kernel_release(scsi_nl_sock); |
168 | netlink_unregister_notifier(&scsi_netlink_notifier); | 531 | netlink_unregister_notifier(&scsi_netlink_notifier); |
@@ -172,3 +535,147 @@ scsi_netlink_exit(void) | |||
172 | } | 535 | } |
173 | 536 | ||
174 | 537 | ||
538 | /* | ||
539 | * Exported Interfaces | ||
540 | */ | ||
541 | |||
542 | /** | ||
543 | * scsi_nl_send_transport_msg - | ||
544 | * Generic function to send a single message from a SCSI transport to | ||
545 | * a single process | ||
546 | * | ||
547 | * @pid: receiving pid | ||
548 | * @hdr: message payload | ||
549 | * | ||
550 | **/ | ||
551 | void | ||
552 | scsi_nl_send_transport_msg(u32 pid, struct scsi_nl_hdr *hdr) | ||
553 | { | ||
554 | struct sk_buff *skb; | ||
555 | struct nlmsghdr *nlh; | ||
556 | const char *fn; | ||
557 | char *datab; | ||
558 | u32 len, skblen; | ||
559 | int err; | ||
560 | |||
561 | if (!scsi_nl_sock) { | ||
562 | err = -ENOENT; | ||
563 | fn = "netlink socket"; | ||
564 | goto msg_fail; | ||
565 | } | ||
566 | |||
567 | len = NLMSG_SPACE(hdr->msglen); | ||
568 | skblen = NLMSG_SPACE(len); | ||
569 | |||
570 | skb = alloc_skb(skblen, GFP_KERNEL); | ||
571 | if (!skb) { | ||
572 | err = -ENOBUFS; | ||
573 | fn = "alloc_skb"; | ||
574 | goto msg_fail; | ||
575 | } | ||
576 | |||
577 | nlh = nlmsg_put(skb, pid, 0, SCSI_TRANSPORT_MSG, len - sizeof(*nlh), 0); | ||
578 | if (!nlh) { | ||
579 | err = -ENOBUFS; | ||
580 | fn = "nlmsg_put"; | ||
581 | goto msg_fail_skb; | ||
582 | } | ||
583 | datab = NLMSG_DATA(nlh); | ||
584 | memcpy(datab, hdr, hdr->msglen); | ||
585 | |||
586 | err = nlmsg_unicast(scsi_nl_sock, skb, pid); | ||
587 | if (err < 0) { | ||
588 | fn = "nlmsg_unicast"; | ||
589 | /* nlmsg_unicast already kfree_skb'd */ | ||
590 | goto msg_fail; | ||
591 | } | ||
592 | |||
593 | return; | ||
594 | |||
595 | msg_fail_skb: | ||
596 | kfree_skb(skb); | ||
597 | msg_fail: | ||
598 | printk(KERN_WARNING | ||
599 | "%s: Dropped Message : pid %d Transport %d, msgtype x%x, " | ||
600 | "msglen %d: %s : err %d\n", | ||
601 | __func__, pid, hdr->transport, hdr->msgtype, hdr->msglen, | ||
602 | fn, err); | ||
603 | return; | ||
604 | } | ||
605 | EXPORT_SYMBOL_GPL(scsi_nl_send_transport_msg); | ||
606 | |||
607 | |||
608 | /** | ||
609 | * scsi_nl_send_vendor_msg - called to send a shost vendor unique message | ||
610 | * to a specific process id. | ||
611 | * | ||
612 | * @pid: process id of the receiver | ||
613 | * @host_no: host # sending the message | ||
614 | * @vendor_id: unique identifier for the driver's vendor | ||
615 | * @data_len: amount, in bytes, of vendor unique payload data | ||
616 | * @data_buf: pointer to vendor unique data buffer | ||
617 | * | ||
618 | * Returns: | ||
619 | * 0 on succesful return | ||
620 | * otherwise, failing error code | ||
621 | * | ||
622 | * Notes: | ||
623 | * This routine assumes no locks are held on entry. | ||
624 | */ | ||
625 | int | ||
626 | scsi_nl_send_vendor_msg(u32 pid, unsigned short host_no, u64 vendor_id, | ||
627 | char *data_buf, u32 data_len) | ||
628 | { | ||
629 | struct sk_buff *skb; | ||
630 | struct nlmsghdr *nlh; | ||
631 | struct scsi_nl_host_vendor_msg *msg; | ||
632 | u32 len, skblen; | ||
633 | int err; | ||
634 | |||
635 | if (!scsi_nl_sock) { | ||
636 | err = -ENOENT; | ||
637 | goto send_vendor_fail; | ||
638 | } | ||
639 | |||
640 | len = SCSI_NL_MSGALIGN(sizeof(*msg) + data_len); | ||
641 | skblen = NLMSG_SPACE(len); | ||
642 | |||
643 | skb = alloc_skb(skblen, GFP_KERNEL); | ||
644 | if (!skb) { | ||
645 | err = -ENOBUFS; | ||
646 | goto send_vendor_fail; | ||
647 | } | ||
648 | |||
649 | nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, | ||
650 | skblen - sizeof(*nlh), 0); | ||
651 | if (!nlh) { | ||
652 | err = -ENOBUFS; | ||
653 | goto send_vendor_fail_skb; | ||
654 | } | ||
655 | msg = NLMSG_DATA(nlh); | ||
656 | |||
657 | INIT_SCSI_NL_HDR(&msg->snlh, SCSI_NL_TRANSPORT, | ||
658 | SCSI_NL_SHOST_VENDOR, len); | ||
659 | msg->vendor_id = vendor_id; | ||
660 | msg->host_no = host_no; | ||
661 | msg->vmsg_datalen = data_len; /* bytes */ | ||
662 | memcpy(&msg[1], data_buf, data_len); | ||
663 | |||
664 | err = nlmsg_unicast(scsi_nl_sock, skb, pid); | ||
665 | if (err) | ||
666 | /* nlmsg_multicast already kfree_skb'd */ | ||
667 | goto send_vendor_fail; | ||
668 | |||
669 | return 0; | ||
670 | |||
671 | send_vendor_fail_skb: | ||
672 | kfree_skb(skb); | ||
673 | send_vendor_fail: | ||
674 | printk(KERN_WARNING | ||
675 | "%s: Dropped SCSI Msg : host %d vendor_unique - err %d\n", | ||
676 | __func__, host_no, err); | ||
677 | return err; | ||
678 | } | ||
679 | EXPORT_SYMBOL(scsi_nl_send_vendor_msg); | ||
680 | |||
681 | |||
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 79f0f7511204..6cddd5dd323c 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/device.h> | 4 | #include <linux/device.h> |
5 | 5 | ||
6 | struct request_queue; | 6 | struct request_queue; |
7 | struct request; | ||
7 | struct scsi_cmnd; | 8 | struct scsi_cmnd; |
8 | struct scsi_device; | 9 | struct scsi_device; |
9 | struct scsi_host_template; | 10 | struct scsi_host_template; |
@@ -27,7 +28,6 @@ extern void scsi_exit_hosts(void); | |||
27 | extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd); | 28 | extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd); |
28 | extern int scsi_setup_command_freelist(struct Scsi_Host *shost); | 29 | extern int scsi_setup_command_freelist(struct Scsi_Host *shost); |
29 | extern void scsi_destroy_command_freelist(struct Scsi_Host *shost); | 30 | extern void scsi_destroy_command_freelist(struct Scsi_Host *shost); |
30 | extern void __scsi_done(struct scsi_cmnd *cmd); | ||
31 | #ifdef CONFIG_SCSI_LOGGING | 31 | #ifdef CONFIG_SCSI_LOGGING |
32 | void scsi_log_send(struct scsi_cmnd *cmd); | 32 | void scsi_log_send(struct scsi_cmnd *cmd); |
33 | void scsi_log_completion(struct scsi_cmnd *cmd, int disposition); | 33 | void scsi_log_completion(struct scsi_cmnd *cmd, int disposition); |
@@ -49,10 +49,7 @@ extern int __init scsi_init_devinfo(void); | |||
49 | extern void scsi_exit_devinfo(void); | 49 | extern void scsi_exit_devinfo(void); |
50 | 50 | ||
51 | /* scsi_error.c */ | 51 | /* scsi_error.c */ |
52 | extern void scsi_add_timer(struct scsi_cmnd *, int, | 52 | extern enum blk_eh_timer_return scsi_times_out(struct request *req); |
53 | void (*)(struct scsi_cmnd *)); | ||
54 | extern int scsi_delete_timer(struct scsi_cmnd *); | ||
55 | extern void scsi_times_out(struct scsi_cmnd *cmd); | ||
56 | extern int scsi_error_handler(void *host); | 53 | extern int scsi_error_handler(void *host); |
57 | extern int scsi_decide_disposition(struct scsi_cmnd *cmd); | 54 | extern int scsi_decide_disposition(struct scsi_cmnd *cmd); |
58 | extern void scsi_eh_wakeup(struct Scsi_Host *shost); | 55 | extern void scsi_eh_wakeup(struct Scsi_Host *shost); |
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c index c6a904a45bf9..82f7b2dd08a2 100644 --- a/drivers/scsi/scsi_proc.c +++ b/drivers/scsi/scsi_proc.c | |||
@@ -259,8 +259,8 @@ static int scsi_add_single_device(uint host, uint channel, uint id, uint lun) | |||
259 | int error = -ENXIO; | 259 | int error = -ENXIO; |
260 | 260 | ||
261 | shost = scsi_host_lookup(host); | 261 | shost = scsi_host_lookup(host); |
262 | if (IS_ERR(shost)) | 262 | if (!shost) |
263 | return PTR_ERR(shost); | 263 | return error; |
264 | 264 | ||
265 | if (shost->transportt->user_scan) | 265 | if (shost->transportt->user_scan) |
266 | error = shost->transportt->user_scan(shost, channel, id, lun); | 266 | error = shost->transportt->user_scan(shost, channel, id, lun); |
@@ -287,8 +287,8 @@ static int scsi_remove_single_device(uint host, uint channel, uint id, uint lun) | |||
287 | int error = -ENXIO; | 287 | int error = -ENXIO; |
288 | 288 | ||
289 | shost = scsi_host_lookup(host); | 289 | shost = scsi_host_lookup(host); |
290 | if (IS_ERR(shost)) | 290 | if (!shost) |
291 | return PTR_ERR(shost); | 291 | return error; |
292 | sdev = scsi_device_lookup(shost, channel, id, lun); | 292 | sdev = scsi_device_lookup(shost, channel, id, lun); |
293 | if (sdev) { | 293 | if (sdev) { |
294 | scsi_remove_device(sdev); | 294 | scsi_remove_device(sdev); |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 34d0de6cd511..334862e26a1b 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -730,6 +730,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result, | |||
730 | static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, | 730 | static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, |
731 | int *bflags, int async) | 731 | int *bflags, int async) |
732 | { | 732 | { |
733 | int ret; | ||
734 | |||
733 | /* | 735 | /* |
734 | * XXX do not save the inquiry, since it can change underneath us, | 736 | * XXX do not save the inquiry, since it can change underneath us, |
735 | * save just vendor/model/rev. | 737 | * save just vendor/model/rev. |
@@ -885,7 +887,17 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, | |||
885 | 887 | ||
886 | /* set the device running here so that slave configure | 888 | /* set the device running here so that slave configure |
887 | * may do I/O */ | 889 | * may do I/O */ |
888 | scsi_device_set_state(sdev, SDEV_RUNNING); | 890 | ret = scsi_device_set_state(sdev, SDEV_RUNNING); |
891 | if (ret) { | ||
892 | ret = scsi_device_set_state(sdev, SDEV_BLOCK); | ||
893 | |||
894 | if (ret) { | ||
895 | sdev_printk(KERN_ERR, sdev, | ||
896 | "in wrong state %s to complete scan\n", | ||
897 | scsi_device_state_name(sdev->sdev_state)); | ||
898 | return SCSI_SCAN_NO_RESPONSE; | ||
899 | } | ||
900 | } | ||
889 | 901 | ||
890 | if (*bflags & BLIST_MS_192_BYTES_FOR_3F) | 902 | if (*bflags & BLIST_MS_192_BYTES_FOR_3F) |
891 | sdev->use_192_bytes_for_3f = 1; | 903 | sdev->use_192_bytes_for_3f = 1; |
@@ -899,7 +911,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, | |||
899 | transport_configure_device(&sdev->sdev_gendev); | 911 | transport_configure_device(&sdev->sdev_gendev); |
900 | 912 | ||
901 | if (sdev->host->hostt->slave_configure) { | 913 | if (sdev->host->hostt->slave_configure) { |
902 | int ret = sdev->host->hostt->slave_configure(sdev); | 914 | ret = sdev->host->hostt->slave_configure(sdev); |
903 | if (ret) { | 915 | if (ret) { |
904 | /* | 916 | /* |
905 | * if LLDD reports slave not present, don't clutter | 917 | * if LLDD reports slave not present, don't clutter |
@@ -994,7 +1006,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget, | |||
994 | */ | 1006 | */ |
995 | sdev = scsi_device_lookup_by_target(starget, lun); | 1007 | sdev = scsi_device_lookup_by_target(starget, lun); |
996 | if (sdev) { | 1008 | if (sdev) { |
997 | if (rescan || sdev->sdev_state != SDEV_CREATED) { | 1009 | if (rescan || !scsi_device_created(sdev)) { |
998 | SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO | 1010 | SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO |
999 | "scsi scan: device exists on %s\n", | 1011 | "scsi scan: device exists on %s\n", |
1000 | sdev->sdev_gendev.bus_id)); | 1012 | sdev->sdev_gendev.bus_id)); |
@@ -1467,7 +1479,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, | |||
1467 | kfree(lun_data); | 1479 | kfree(lun_data); |
1468 | out: | 1480 | out: |
1469 | scsi_device_put(sdev); | 1481 | scsi_device_put(sdev); |
1470 | if (sdev->sdev_state == SDEV_CREATED) | 1482 | if (scsi_device_created(sdev)) |
1471 | /* | 1483 | /* |
1472 | * the sdev we used didn't appear in the report luns scan | 1484 | * the sdev we used didn't appear in the report luns scan |
1473 | */ | 1485 | */ |
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index ab3c71869be5..93c28f30bbd7 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c | |||
@@ -34,6 +34,7 @@ static const struct { | |||
34 | { SDEV_QUIESCE, "quiesce" }, | 34 | { SDEV_QUIESCE, "quiesce" }, |
35 | { SDEV_OFFLINE, "offline" }, | 35 | { SDEV_OFFLINE, "offline" }, |
36 | { SDEV_BLOCK, "blocked" }, | 36 | { SDEV_BLOCK, "blocked" }, |
37 | { SDEV_CREATED_BLOCK, "created-blocked" }, | ||
37 | }; | 38 | }; |
38 | 39 | ||
39 | const char *scsi_device_state_name(enum scsi_device_state state) | 40 | const char *scsi_device_state_name(enum scsi_device_state state) |
@@ -560,12 +561,15 @@ sdev_rd_attr (vendor, "%.8s\n"); | |||
560 | sdev_rd_attr (model, "%.16s\n"); | 561 | sdev_rd_attr (model, "%.16s\n"); |
561 | sdev_rd_attr (rev, "%.4s\n"); | 562 | sdev_rd_attr (rev, "%.4s\n"); |
562 | 563 | ||
564 | /* | ||
565 | * TODO: can we make these symlinks to the block layer ones? | ||
566 | */ | ||
563 | static ssize_t | 567 | static ssize_t |
564 | sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf) | 568 | sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf) |
565 | { | 569 | { |
566 | struct scsi_device *sdev; | 570 | struct scsi_device *sdev; |
567 | sdev = to_scsi_device(dev); | 571 | sdev = to_scsi_device(dev); |
568 | return snprintf (buf, 20, "%d\n", sdev->timeout / HZ); | 572 | return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ); |
569 | } | 573 | } |
570 | 574 | ||
571 | static ssize_t | 575 | static ssize_t |
@@ -576,7 +580,7 @@ sdev_store_timeout (struct device *dev, struct device_attribute *attr, | |||
576 | int timeout; | 580 | int timeout; |
577 | sdev = to_scsi_device(dev); | 581 | sdev = to_scsi_device(dev); |
578 | sscanf (buf, "%d\n", &timeout); | 582 | sscanf (buf, "%d\n", &timeout); |
579 | sdev->timeout = timeout * HZ; | 583 | blk_queue_rq_timeout(sdev->request_queue, timeout * HZ); |
580 | return count; | 584 | return count; |
581 | } | 585 | } |
582 | static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout); | 586 | static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout); |
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c index 257e097c39af..48ba413f7f6a 100644 --- a/drivers/scsi/scsi_tgt_lib.c +++ b/drivers/scsi/scsi_tgt_lib.c | |||
@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd, | |||
362 | int err; | 362 | int err; |
363 | 363 | ||
364 | dprintk("%lx %u\n", uaddr, len); | 364 | dprintk("%lx %u\n", uaddr, len); |
365 | err = blk_rq_map_user(q, rq, (void *)uaddr, len); | 365 | err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL); |
366 | if (err) { | 366 | if (err) { |
367 | /* | 367 | /* |
368 | * TODO: need to fixup sg_tablesize, max_segment_size, | 368 | * TODO: need to fixup sg_tablesize, max_segment_size, |
@@ -460,7 +460,7 @@ int scsi_tgt_kspace_exec(int host_no, u64 itn_id, int result, u64 tag, | |||
460 | 460 | ||
461 | /* TODO: replace with a O(1) alg */ | 461 | /* TODO: replace with a O(1) alg */ |
462 | shost = scsi_host_lookup(host_no); | 462 | shost = scsi_host_lookup(host_no); |
463 | if (IS_ERR(shost)) { | 463 | if (!shost) { |
464 | printk(KERN_ERR "Could not find host no %d\n", host_no); | 464 | printk(KERN_ERR "Could not find host no %d\n", host_no); |
465 | return -EINVAL; | 465 | return -EINVAL; |
466 | } | 466 | } |
@@ -550,7 +550,7 @@ int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 itn_id, u64 mid, int result) | |||
550 | dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid); | 550 | dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid); |
551 | 551 | ||
552 | shost = scsi_host_lookup(host_no); | 552 | shost = scsi_host_lookup(host_no); |
553 | if (IS_ERR(shost)) { | 553 | if (!shost) { |
554 | printk(KERN_ERR "Could not find host no %d\n", host_no); | 554 | printk(KERN_ERR "Could not find host no %d\n", host_no); |
555 | return err; | 555 | return err; |
556 | } | 556 | } |
@@ -603,7 +603,7 @@ int scsi_tgt_kspace_it_nexus_rsp(int host_no, u64 itn_id, int result) | |||
603 | dprintk("%d %d%llx\n", host_no, result, (unsigned long long)itn_id); | 603 | dprintk("%d %d%llx\n", host_no, result, (unsigned long long)itn_id); |
604 | 604 | ||
605 | shost = scsi_host_lookup(host_no); | 605 | shost = scsi_host_lookup(host_no); |
606 | if (IS_ERR(shost)) { | 606 | if (!shost) { |
607 | printk(KERN_ERR "Could not find host no %d\n", host_no); | 607 | printk(KERN_ERR "Could not find host no %d\n", host_no); |
608 | return err; | 608 | return err; |
609 | } | 609 | } |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 56823fd1fb84..d5f7653bb94b 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -40,31 +40,7 @@ | |||
40 | 40 | ||
41 | static int fc_queue_work(struct Scsi_Host *, struct work_struct *); | 41 | static int fc_queue_work(struct Scsi_Host *, struct work_struct *); |
42 | static void fc_vport_sched_delete(struct work_struct *work); | 42 | static void fc_vport_sched_delete(struct work_struct *work); |
43 | 43 | static int fc_vport_setup(struct Scsi_Host *shost, int channel, | |
44 | /* | ||
45 | * This is a temporary carrier for creating a vport. It will eventually | ||
46 | * be replaced by a real message definition for sgio or netlink. | ||
47 | * | ||
48 | * fc_vport_identifiers: This set of data contains all elements | ||
49 | * to uniquely identify and instantiate a FC virtual port. | ||
50 | * | ||
51 | * Notes: | ||
52 | * symbolic_name: The driver is to append the symbolic_name string data | ||
53 | * to the symbolic_node_name data that it generates by default. | ||
54 | * the resulting combination should then be registered with the switch. | ||
55 | * It is expected that things like Xen may stuff a VM title into | ||
56 | * this field. | ||
57 | */ | ||
58 | struct fc_vport_identifiers { | ||
59 | u64 node_name; | ||
60 | u64 port_name; | ||
61 | u32 roles; | ||
62 | bool disable; | ||
63 | enum fc_port_type vport_type; /* only FC_PORTTYPE_NPIV allowed */ | ||
64 | char symbolic_name[FC_VPORT_SYMBOLIC_NAMELEN]; | ||
65 | }; | ||
66 | |||
67 | static int fc_vport_create(struct Scsi_Host *shost, int channel, | ||
68 | struct device *pdev, struct fc_vport_identifiers *ids, | 44 | struct device *pdev, struct fc_vport_identifiers *ids, |
69 | struct fc_vport **vport); | 45 | struct fc_vport **vport); |
70 | 46 | ||
@@ -1760,7 +1736,7 @@ store_fc_host_vport_create(struct device *dev, struct device_attribute *attr, | |||
1760 | vid.disable = false; /* always enabled */ | 1736 | vid.disable = false; /* always enabled */ |
1761 | 1737 | ||
1762 | /* we only allow support on Channel 0 !!! */ | 1738 | /* we only allow support on Channel 0 !!! */ |
1763 | stat = fc_vport_create(shost, 0, &shost->shost_gendev, &vid, &vport); | 1739 | stat = fc_vport_setup(shost, 0, &shost->shost_gendev, &vid, &vport); |
1764 | return stat ? stat : count; | 1740 | return stat ? stat : count; |
1765 | } | 1741 | } |
1766 | static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL, | 1742 | static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL, |
@@ -1950,15 +1926,15 @@ static int fc_vport_match(struct attribute_container *cont, | |||
1950 | * Notes: | 1926 | * Notes: |
1951 | * This routine assumes no locks are held on entry. | 1927 | * This routine assumes no locks are held on entry. |
1952 | */ | 1928 | */ |
1953 | static enum scsi_eh_timer_return | 1929 | static enum blk_eh_timer_return |
1954 | fc_timed_out(struct scsi_cmnd *scmd) | 1930 | fc_timed_out(struct scsi_cmnd *scmd) |
1955 | { | 1931 | { |
1956 | struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device)); | 1932 | struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device)); |
1957 | 1933 | ||
1958 | if (rport->port_state == FC_PORTSTATE_BLOCKED) | 1934 | if (rport->port_state == FC_PORTSTATE_BLOCKED) |
1959 | return EH_RESET_TIMER; | 1935 | return BLK_EH_RESET_TIMER; |
1960 | 1936 | ||
1961 | return EH_NOT_HANDLED; | 1937 | return BLK_EH_NOT_HANDLED; |
1962 | } | 1938 | } |
1963 | 1939 | ||
1964 | /* | 1940 | /* |
@@ -3103,7 +3079,7 @@ fc_scsi_scan_rport(struct work_struct *work) | |||
3103 | 3079 | ||
3104 | 3080 | ||
3105 | /** | 3081 | /** |
3106 | * fc_vport_create - allocates and creates a FC virtual port. | 3082 | * fc_vport_setup - allocates and creates a FC virtual port. |
3107 | * @shost: scsi host the virtual port is connected to. | 3083 | * @shost: scsi host the virtual port is connected to. |
3108 | * @channel: Channel on shost port connected to. | 3084 | * @channel: Channel on shost port connected to. |
3109 | * @pdev: parent device for vport | 3085 | * @pdev: parent device for vport |
@@ -3118,7 +3094,7 @@ fc_scsi_scan_rport(struct work_struct *work) | |||
3118 | * This routine assumes no locks are held on entry. | 3094 | * This routine assumes no locks are held on entry. |
3119 | */ | 3095 | */ |
3120 | static int | 3096 | static int |
3121 | fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev, | 3097 | fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev, |
3122 | struct fc_vport_identifiers *ids, struct fc_vport **ret_vport) | 3098 | struct fc_vport_identifiers *ids, struct fc_vport **ret_vport) |
3123 | { | 3099 | { |
3124 | struct fc_host_attrs *fc_host = shost_to_fc_host(shost); | 3100 | struct fc_host_attrs *fc_host = shost_to_fc_host(shost); |
@@ -3231,6 +3207,28 @@ delete_vport: | |||
3231 | return error; | 3207 | return error; |
3232 | } | 3208 | } |
3233 | 3209 | ||
3210 | /** | ||
3211 | * fc_vport_create - Admin App or LLDD requests creation of a vport | ||
3212 | * @shost: scsi host the virtual port is connected to. | ||
3213 | * @channel: channel on shost port connected to. | ||
3214 | * @ids: The world wide names, FC4 port roles, etc for | ||
3215 | * the virtual port. | ||
3216 | * | ||
3217 | * Notes: | ||
3218 | * This routine assumes no locks are held on entry. | ||
3219 | */ | ||
3220 | struct fc_vport * | ||
3221 | fc_vport_create(struct Scsi_Host *shost, int channel, | ||
3222 | struct fc_vport_identifiers *ids) | ||
3223 | { | ||
3224 | int stat; | ||
3225 | struct fc_vport *vport; | ||
3226 | |||
3227 | stat = fc_vport_setup(shost, channel, &shost->shost_gendev, | ||
3228 | ids, &vport); | ||
3229 | return stat ? NULL : vport; | ||
3230 | } | ||
3231 | EXPORT_SYMBOL(fc_vport_create); | ||
3234 | 3232 | ||
3235 | /** | 3233 | /** |
3236 | * fc_vport_terminate - Admin App or LLDD requests termination of a vport | 3234 | * fc_vport_terminate - Admin App or LLDD requests termination of a vport |
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 043c3921164f..0ce5f7cdfe2a 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -1361,7 +1361,7 @@ iscsi_tgt_dscvr(struct iscsi_transport *transport, | |||
1361 | return -EINVAL; | 1361 | return -EINVAL; |
1362 | 1362 | ||
1363 | shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no); | 1363 | shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no); |
1364 | if (IS_ERR(shost)) { | 1364 | if (!shost) { |
1365 | printk(KERN_ERR "target discovery could not find host no %u\n", | 1365 | printk(KERN_ERR "target discovery could not find host no %u\n", |
1366 | ev->u.tgt_dscvr.host_no); | 1366 | ev->u.tgt_dscvr.host_no); |
1367 | return -ENODEV; | 1367 | return -ENODEV; |
@@ -1387,7 +1387,7 @@ iscsi_set_host_param(struct iscsi_transport *transport, | |||
1387 | return -ENOSYS; | 1387 | return -ENOSYS; |
1388 | 1388 | ||
1389 | shost = scsi_host_lookup(ev->u.set_host_param.host_no); | 1389 | shost = scsi_host_lookup(ev->u.set_host_param.host_no); |
1390 | if (IS_ERR(shost)) { | 1390 | if (!shost) { |
1391 | printk(KERN_ERR "set_host_param could not find host no %u\n", | 1391 | printk(KERN_ERR "set_host_param could not find host no %u\n", |
1392 | ev->u.set_host_param.host_no); | 1392 | ev->u.set_host_param.host_no); |
1393 | return -ENODEV; | 1393 | return -ENODEV; |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index e5e7d7856454..a7b53be63367 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <linux/blkpg.h> | 47 | #include <linux/blkpg.h> |
48 | #include <linux/delay.h> | 48 | #include <linux/delay.h> |
49 | #include <linux/mutex.h> | 49 | #include <linux/mutex.h> |
50 | #include <linux/string_helpers.h> | ||
50 | #include <asm/uaccess.h> | 51 | #include <asm/uaccess.h> |
51 | 52 | ||
52 | #include <scsi/scsi.h> | 53 | #include <scsi/scsi.h> |
@@ -86,6 +87,12 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK); | |||
86 | MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD); | 87 | MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD); |
87 | MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); | 88 | MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); |
88 | 89 | ||
90 | #if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT) | ||
91 | #define SD_MINORS 16 | ||
92 | #else | ||
93 | #define SD_MINORS 0 | ||
94 | #endif | ||
95 | |||
89 | static int sd_revalidate_disk(struct gendisk *); | 96 | static int sd_revalidate_disk(struct gendisk *); |
90 | static int sd_probe(struct device *); | 97 | static int sd_probe(struct device *); |
91 | static int sd_remove(struct device *); | 98 | static int sd_remove(struct device *); |
@@ -159,7 +166,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, | |||
159 | sd_print_sense_hdr(sdkp, &sshdr); | 166 | sd_print_sense_hdr(sdkp, &sshdr); |
160 | return -EINVAL; | 167 | return -EINVAL; |
161 | } | 168 | } |
162 | sd_revalidate_disk(sdkp->disk); | 169 | revalidate_disk(sdkp->disk); |
163 | return count; | 170 | return count; |
164 | } | 171 | } |
165 | 172 | ||
@@ -377,7 +384,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) | |||
377 | sector_t block = rq->sector; | 384 | sector_t block = rq->sector; |
378 | sector_t threshold; | 385 | sector_t threshold; |
379 | unsigned int this_count = rq->nr_sectors; | 386 | unsigned int this_count = rq->nr_sectors; |
380 | unsigned int timeout = sdp->timeout; | ||
381 | int ret; | 387 | int ret; |
382 | 388 | ||
383 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { | 389 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
@@ -578,7 +584,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) | |||
578 | SCpnt->transfersize = sdp->sector_size; | 584 | SCpnt->transfersize = sdp->sector_size; |
579 | SCpnt->underflow = this_count << 9; | 585 | SCpnt->underflow = this_count << 9; |
580 | SCpnt->allowed = SD_MAX_RETRIES; | 586 | SCpnt->allowed = SD_MAX_RETRIES; |
581 | SCpnt->timeout_per_command = timeout; | ||
582 | 587 | ||
583 | /* | 588 | /* |
584 | * This indicates that the command is ready from our end to be | 589 | * This indicates that the command is ready from our end to be |
@@ -910,7 +915,7 @@ static void sd_rescan(struct device *dev) | |||
910 | struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); | 915 | struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); |
911 | 916 | ||
912 | if (sdkp) { | 917 | if (sdkp) { |
913 | sd_revalidate_disk(sdkp->disk); | 918 | revalidate_disk(sdkp->disk); |
914 | scsi_disk_put(sdkp); | 919 | scsi_disk_put(sdkp); |
915 | } | 920 | } |
916 | } | 921 | } |
@@ -1429,27 +1434,21 @@ got_data: | |||
1429 | */ | 1434 | */ |
1430 | sector_size = 512; | 1435 | sector_size = 512; |
1431 | } | 1436 | } |
1437 | blk_queue_hardsect_size(sdp->request_queue, sector_size); | ||
1438 | |||
1432 | { | 1439 | { |
1433 | /* | 1440 | char cap_str_2[10], cap_str_10[10]; |
1434 | * The msdos fs needs to know the hardware sector size | 1441 | u64 sz = sdkp->capacity << ffz(~sector_size); |
1435 | * So I have created this table. See ll_rw_blk.c | ||
1436 | * Jacques Gelinas (Jacques@solucorp.qc.ca) | ||
1437 | */ | ||
1438 | int hard_sector = sector_size; | ||
1439 | sector_t sz = (sdkp->capacity/2) * (hard_sector/256); | ||
1440 | struct request_queue *queue = sdp->request_queue; | ||
1441 | sector_t mb = sz; | ||
1442 | 1442 | ||
1443 | blk_queue_hardsect_size(queue, hard_sector); | 1443 | string_get_size(sz, STRING_UNITS_2, cap_str_2, |
1444 | /* avoid 64-bit division on 32-bit platforms */ | 1444 | sizeof(cap_str_2)); |
1445 | sector_div(sz, 625); | 1445 | string_get_size(sz, STRING_UNITS_10, cap_str_10, |
1446 | mb -= sz - 974; | 1446 | sizeof(cap_str_10)); |
1447 | sector_div(mb, 1950); | ||
1448 | 1447 | ||
1449 | sd_printk(KERN_NOTICE, sdkp, | 1448 | sd_printk(KERN_NOTICE, sdkp, |
1450 | "%llu %d-byte hardware sectors (%llu MB)\n", | 1449 | "%llu %d-byte hardware sectors: (%s/%s)\n", |
1451 | (unsigned long long)sdkp->capacity, | 1450 | (unsigned long long)sdkp->capacity, |
1452 | hard_sector, (unsigned long long)mb); | 1451 | sector_size, cap_str_10, cap_str_2); |
1453 | } | 1452 | } |
1454 | 1453 | ||
1455 | /* Rescale capacity to 512-byte units */ | 1454 | /* Rescale capacity to 512-byte units */ |
@@ -1764,6 +1763,52 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
1764 | } | 1763 | } |
1765 | 1764 | ||
1766 | /** | 1765 | /** |
1766 | * sd_format_disk_name - format disk name | ||
1767 | * @prefix: name prefix - ie. "sd" for SCSI disks | ||
1768 | * @index: index of the disk to format name for | ||
1769 | * @buf: output buffer | ||
1770 | * @buflen: length of the output buffer | ||
1771 | * | ||
1772 | * SCSI disk names starts at sda. The 26th device is sdz and the | ||
1773 | * 27th is sdaa. The last one for two lettered suffix is sdzz | ||
1774 | * which is followed by sdaaa. | ||
1775 | * | ||
1776 | * This is basically 26 base counting with one extra 'nil' entry | ||
1777 | * at the beggining from the second digit on and can be | ||
1778 | * determined using similar method as 26 base conversion with the | ||
1779 | * index shifted -1 after each digit is computed. | ||
1780 | * | ||
1781 | * CONTEXT: | ||
1782 | * Don't care. | ||
1783 | * | ||
1784 | * RETURNS: | ||
1785 | * 0 on success, -errno on failure. | ||
1786 | */ | ||
1787 | static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen) | ||
1788 | { | ||
1789 | const int base = 'z' - 'a' + 1; | ||
1790 | char *begin = buf + strlen(prefix); | ||
1791 | char *end = buf + buflen; | ||
1792 | char *p; | ||
1793 | int unit; | ||
1794 | |||
1795 | p = end - 1; | ||
1796 | *p = '\0'; | ||
1797 | unit = base; | ||
1798 | do { | ||
1799 | if (p == begin) | ||
1800 | return -EINVAL; | ||
1801 | *--p = 'a' + (index % unit); | ||
1802 | index = (index / unit) - 1; | ||
1803 | } while (index >= 0); | ||
1804 | |||
1805 | memmove(begin, p, end - p); | ||
1806 | memcpy(buf, prefix, strlen(prefix)); | ||
1807 | |||
1808 | return 0; | ||
1809 | } | ||
1810 | |||
1811 | /** | ||
1767 | * sd_probe - called during driver initialization and whenever a | 1812 | * sd_probe - called during driver initialization and whenever a |
1768 | * new scsi device is attached to the system. It is called once | 1813 | * new scsi device is attached to the system. It is called once |
1769 | * for each scsi device (not just disks) present. | 1814 | * for each scsi device (not just disks) present. |
@@ -1801,7 +1846,7 @@ static int sd_probe(struct device *dev) | |||
1801 | if (!sdkp) | 1846 | if (!sdkp) |
1802 | goto out; | 1847 | goto out; |
1803 | 1848 | ||
1804 | gd = alloc_disk(16); | 1849 | gd = alloc_disk(SD_MINORS); |
1805 | if (!gd) | 1850 | if (!gd) |
1806 | goto out_free; | 1851 | goto out_free; |
1807 | 1852 | ||
@@ -1815,8 +1860,8 @@ static int sd_probe(struct device *dev) | |||
1815 | if (error) | 1860 | if (error) |
1816 | goto out_put; | 1861 | goto out_put; |
1817 | 1862 | ||
1818 | error = -EBUSY; | 1863 | error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); |
1819 | if (index >= SD_MAX_DISKS) | 1864 | if (error) |
1820 | goto out_free_index; | 1865 | goto out_free_index; |
1821 | 1866 | ||
1822 | sdkp->device = sdp; | 1867 | sdkp->device = sdp; |
@@ -1826,11 +1871,12 @@ static int sd_probe(struct device *dev) | |||
1826 | sdkp->openers = 0; | 1871 | sdkp->openers = 0; |
1827 | sdkp->previous_state = 1; | 1872 | sdkp->previous_state = 1; |
1828 | 1873 | ||
1829 | if (!sdp->timeout) { | 1874 | if (!sdp->request_queue->rq_timeout) { |
1830 | if (sdp->type != TYPE_MOD) | 1875 | if (sdp->type != TYPE_MOD) |
1831 | sdp->timeout = SD_TIMEOUT; | 1876 | blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT); |
1832 | else | 1877 | else |
1833 | sdp->timeout = SD_MOD_TIMEOUT; | 1878 | blk_queue_rq_timeout(sdp->request_queue, |
1879 | SD_MOD_TIMEOUT); | ||
1834 | } | 1880 | } |
1835 | 1881 | ||
1836 | device_initialize(&sdkp->dev); | 1882 | device_initialize(&sdkp->dev); |
@@ -1843,24 +1889,12 @@ static int sd_probe(struct device *dev) | |||
1843 | 1889 | ||
1844 | get_device(&sdp->sdev_gendev); | 1890 | get_device(&sdp->sdev_gendev); |
1845 | 1891 | ||
1846 | gd->major = sd_major((index & 0xf0) >> 4); | 1892 | if (index < SD_MAX_DISKS) { |
1847 | gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); | 1893 | gd->major = sd_major((index & 0xf0) >> 4); |
1848 | gd->minors = 16; | 1894 | gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); |
1849 | gd->fops = &sd_fops; | 1895 | gd->minors = SD_MINORS; |
1850 | |||
1851 | if (index < 26) { | ||
1852 | sprintf(gd->disk_name, "sd%c", 'a' + index % 26); | ||
1853 | } else if (index < (26 + 1) * 26) { | ||
1854 | sprintf(gd->disk_name, "sd%c%c", | ||
1855 | 'a' + index / 26 - 1,'a' + index % 26); | ||
1856 | } else { | ||
1857 | const unsigned int m1 = (index / 26 - 1) / 26 - 1; | ||
1858 | const unsigned int m2 = (index / 26 - 1) % 26; | ||
1859 | const unsigned int m3 = index % 26; | ||
1860 | sprintf(gd->disk_name, "sd%c%c%c", | ||
1861 | 'a' + m1, 'a' + m2, 'a' + m3); | ||
1862 | } | 1896 | } |
1863 | 1897 | gd->fops = &sd_fops; | |
1864 | gd->private_data = &sdkp->driver; | 1898 | gd->private_data = &sdkp->driver; |
1865 | gd->queue = sdkp->device->request_queue; | 1899 | gd->queue = sdkp->device->request_queue; |
1866 | 1900 | ||
@@ -1869,7 +1903,7 @@ static int sd_probe(struct device *dev) | |||
1869 | blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); | 1903 | blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); |
1870 | 1904 | ||
1871 | gd->driverfs_dev = &sdp->sdev_gendev; | 1905 | gd->driverfs_dev = &sdp->sdev_gendev; |
1872 | gd->flags = GENHD_FL_DRIVERFS; | 1906 | gd->flags = GENHD_FL_EXT_DEVT | GENHD_FL_DRIVERFS; |
1873 | if (sdp->removable) | 1907 | if (sdp->removable) |
1874 | gd->flags |= GENHD_FL_REMOVABLE; | 1908 | gd->flags |= GENHD_FL_REMOVABLE; |
1875 | 1909 | ||
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 661f9f21650a..ba9b9bbd4e73 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -47,7 +47,6 @@ static int sg_version_num = 30534; /* 2 digits for each component */ | |||
47 | #include <linux/seq_file.h> | 47 | #include <linux/seq_file.h> |
48 | #include <linux/blkdev.h> | 48 | #include <linux/blkdev.h> |
49 | #include <linux/delay.h> | 49 | #include <linux/delay.h> |
50 | #include <linux/scatterlist.h> | ||
51 | #include <linux/blktrace_api.h> | 50 | #include <linux/blktrace_api.h> |
52 | #include <linux/smp_lock.h> | 51 | #include <linux/smp_lock.h> |
53 | 52 | ||
@@ -69,7 +68,6 @@ static void sg_proc_cleanup(void); | |||
69 | #endif | 68 | #endif |
70 | 69 | ||
71 | #define SG_ALLOW_DIO_DEF 0 | 70 | #define SG_ALLOW_DIO_DEF 0 |
72 | #define SG_ALLOW_DIO_CODE /* compile out by commenting this define */ | ||
73 | 71 | ||
74 | #define SG_MAX_DEVS 32768 | 72 | #define SG_MAX_DEVS 32768 |
75 | 73 | ||
@@ -118,8 +116,8 @@ typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */ | |||
118 | unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */ | 116 | unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */ |
119 | unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */ | 117 | unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */ |
120 | unsigned bufflen; /* Size of (aggregate) data buffer */ | 118 | unsigned bufflen; /* Size of (aggregate) data buffer */ |
121 | unsigned b_malloc_len; /* actual len malloc'ed in buffer */ | 119 | struct page **pages; |
122 | struct scatterlist *buffer;/* scatter list */ | 120 | int page_order; |
123 | char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ | 121 | char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ |
124 | unsigned char cmd_opcode; /* first byte of command */ | 122 | unsigned char cmd_opcode; /* first byte of command */ |
125 | } Sg_scatter_hold; | 123 | } Sg_scatter_hold; |
@@ -137,6 +135,8 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ | |||
137 | char orphan; /* 1 -> drop on sight, 0 -> normal */ | 135 | char orphan; /* 1 -> drop on sight, 0 -> normal */ |
138 | char sg_io_owned; /* 1 -> packet belongs to SG_IO */ | 136 | char sg_io_owned; /* 1 -> packet belongs to SG_IO */ |
139 | volatile char done; /* 0->before bh, 1->before read, 2->read */ | 137 | volatile char done; /* 0->before bh, 1->before read, 2->read */ |
138 | struct request *rq; | ||
139 | struct bio *bio; | ||
140 | } Sg_request; | 140 | } Sg_request; |
141 | 141 | ||
142 | typedef struct sg_fd { /* holds the state of a file descriptor */ | 142 | typedef struct sg_fd { /* holds the state of a file descriptor */ |
@@ -175,8 +175,8 @@ typedef struct sg_device { /* holds the state of each scsi generic device */ | |||
175 | 175 | ||
176 | static int sg_fasync(int fd, struct file *filp, int mode); | 176 | static int sg_fasync(int fd, struct file *filp, int mode); |
177 | /* tasklet or soft irq callback */ | 177 | /* tasklet or soft irq callback */ |
178 | static void sg_cmd_done(void *data, char *sense, int result, int resid); | 178 | static void sg_rq_end_io(struct request *rq, int uptodate); |
179 | static int sg_start_req(Sg_request * srp); | 179 | static int sg_start_req(Sg_request *srp, unsigned char *cmd); |
180 | static void sg_finish_rem_req(Sg_request * srp); | 180 | static void sg_finish_rem_req(Sg_request * srp); |
181 | static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); | 181 | static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); |
182 | static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, | 182 | static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, |
@@ -188,17 +188,11 @@ static ssize_t sg_new_write(Sg_fd *sfp, struct file *file, | |||
188 | int read_only, Sg_request **o_srp); | 188 | int read_only, Sg_request **o_srp); |
189 | static int sg_common_write(Sg_fd * sfp, Sg_request * srp, | 189 | static int sg_common_write(Sg_fd * sfp, Sg_request * srp, |
190 | unsigned char *cmnd, int timeout, int blocking); | 190 | unsigned char *cmnd, int timeout, int blocking); |
191 | static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind, | ||
192 | int wr_xf, int *countp, unsigned char __user **up); | ||
193 | static int sg_write_xfer(Sg_request * srp); | ||
194 | static int sg_read_xfer(Sg_request * srp); | ||
195 | static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); | 191 | static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); |
196 | static void sg_remove_scat(Sg_scatter_hold * schp); | 192 | static void sg_remove_scat(Sg_scatter_hold * schp); |
197 | static void sg_build_reserve(Sg_fd * sfp, int req_size); | 193 | static void sg_build_reserve(Sg_fd * sfp, int req_size); |
198 | static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); | 194 | static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); |
199 | static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); | 195 | static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); |
200 | static struct page *sg_page_malloc(int rqSz, int lowDma, int *retSzp); | ||
201 | static void sg_page_free(struct page *page, int size); | ||
202 | static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev); | 196 | static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev); |
203 | static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); | 197 | static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); |
204 | static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); | 198 | static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); |
@@ -206,7 +200,6 @@ static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id); | |||
206 | static Sg_request *sg_add_request(Sg_fd * sfp); | 200 | static Sg_request *sg_add_request(Sg_fd * sfp); |
207 | static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); | 201 | static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); |
208 | static int sg_res_in_use(Sg_fd * sfp); | 202 | static int sg_res_in_use(Sg_fd * sfp); |
209 | static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len); | ||
210 | static Sg_device *sg_get_dev(int dev); | 203 | static Sg_device *sg_get_dev(int dev); |
211 | #ifdef CONFIG_SCSI_PROC_FS | 204 | #ifdef CONFIG_SCSI_PROC_FS |
212 | static int sg_last_dev(void); | 205 | static int sg_last_dev(void); |
@@ -529,8 +522,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) | |||
529 | err = -EFAULT; | 522 | err = -EFAULT; |
530 | goto err_out; | 523 | goto err_out; |
531 | } | 524 | } |
532 | err = sg_read_xfer(srp); | 525 | err_out: |
533 | err_out: | ||
534 | sg_finish_rem_req(srp); | 526 | sg_finish_rem_req(srp); |
535 | return (0 == err) ? count : err; | 527 | return (0 == err) ? count : err; |
536 | } | 528 | } |
@@ -612,7 +604,10 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) | |||
612 | else | 604 | else |
613 | hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE; | 605 | hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE; |
614 | hp->dxfer_len = mxsize; | 606 | hp->dxfer_len = mxsize; |
615 | hp->dxferp = (char __user *)buf + cmd_size; | 607 | if (hp->dxfer_direction == SG_DXFER_TO_DEV) |
608 | hp->dxferp = (char __user *)buf + cmd_size; | ||
609 | else | ||
610 | hp->dxferp = NULL; | ||
616 | hp->sbp = NULL; | 611 | hp->sbp = NULL; |
617 | hp->timeout = old_hdr.reply_len; /* structure abuse ... */ | 612 | hp->timeout = old_hdr.reply_len; /* structure abuse ... */ |
618 | hp->flags = input_size; /* structure abuse ... */ | 613 | hp->flags = input_size; /* structure abuse ... */ |
@@ -732,16 +727,12 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, | |||
732 | SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", | 727 | SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", |
733 | (int) cmnd[0], (int) hp->cmd_len)); | 728 | (int) cmnd[0], (int) hp->cmd_len)); |
734 | 729 | ||
735 | if ((k = sg_start_req(srp))) { | 730 | k = sg_start_req(srp, cmnd); |
731 | if (k) { | ||
736 | SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k)); | 732 | SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k)); |
737 | sg_finish_rem_req(srp); | 733 | sg_finish_rem_req(srp); |
738 | return k; /* probably out of space --> ENOMEM */ | 734 | return k; /* probably out of space --> ENOMEM */ |
739 | } | 735 | } |
740 | if ((k = sg_write_xfer(srp))) { | ||
741 | SCSI_LOG_TIMEOUT(1, printk("sg_common_write: write_xfer, bad address\n")); | ||
742 | sg_finish_rem_req(srp); | ||
743 | return k; | ||
744 | } | ||
745 | if (sdp->detached) { | 736 | if (sdp->detached) { |
746 | sg_finish_rem_req(srp); | 737 | sg_finish_rem_req(srp); |
747 | return -ENODEV; | 738 | return -ENODEV; |
@@ -763,20 +754,11 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, | |||
763 | break; | 754 | break; |
764 | } | 755 | } |
765 | hp->duration = jiffies_to_msecs(jiffies); | 756 | hp->duration = jiffies_to_msecs(jiffies); |
766 | /* Now send everything of to mid-level. The next time we hear about this | 757 | |
767 | packet is when sg_cmd_done() is called (i.e. a callback). */ | 758 | srp->rq->timeout = timeout; |
768 | if (scsi_execute_async(sdp->device, cmnd, hp->cmd_len, data_dir, srp->data.buffer, | 759 | blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk, |
769 | hp->dxfer_len, srp->data.k_use_sg, timeout, | 760 | srp->rq, 1, sg_rq_end_io); |
770 | SG_DEFAULT_RETRIES, srp, sg_cmd_done, | 761 | return 0; |
771 | GFP_ATOMIC)) { | ||
772 | SCSI_LOG_TIMEOUT(1, printk("sg_common_write: scsi_execute_async failed\n")); | ||
773 | /* | ||
774 | * most likely out of mem, but could also be a bad map | ||
775 | */ | ||
776 | sg_finish_rem_req(srp); | ||
777 | return -ENOMEM; | ||
778 | } else | ||
779 | return 0; | ||
780 | } | 762 | } |
781 | 763 | ||
782 | static int | 764 | static int |
@@ -1192,8 +1174,7 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1192 | Sg_fd *sfp; | 1174 | Sg_fd *sfp; |
1193 | unsigned long offset, len, sa; | 1175 | unsigned long offset, len, sa; |
1194 | Sg_scatter_hold *rsv_schp; | 1176 | Sg_scatter_hold *rsv_schp; |
1195 | struct scatterlist *sg; | 1177 | int k, length; |
1196 | int k; | ||
1197 | 1178 | ||
1198 | if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) | 1179 | if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) |
1199 | return VM_FAULT_SIGBUS; | 1180 | return VM_FAULT_SIGBUS; |
@@ -1203,15 +1184,14 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1203 | return VM_FAULT_SIGBUS; | 1184 | return VM_FAULT_SIGBUS; |
1204 | SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n", | 1185 | SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n", |
1205 | offset, rsv_schp->k_use_sg)); | 1186 | offset, rsv_schp->k_use_sg)); |
1206 | sg = rsv_schp->buffer; | ||
1207 | sa = vma->vm_start; | 1187 | sa = vma->vm_start; |
1208 | for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); | 1188 | length = 1 << (PAGE_SHIFT + rsv_schp->page_order); |
1209 | ++k, sg = sg_next(sg)) { | 1189 | for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) { |
1210 | len = vma->vm_end - sa; | 1190 | len = vma->vm_end - sa; |
1211 | len = (len < sg->length) ? len : sg->length; | 1191 | len = (len < length) ? len : length; |
1212 | if (offset < len) { | 1192 | if (offset < len) { |
1213 | struct page *page; | 1193 | struct page *page = nth_page(rsv_schp->pages[k], |
1214 | page = virt_to_page(page_address(sg_page(sg)) + offset); | 1194 | offset >> PAGE_SHIFT); |
1215 | get_page(page); /* increment page count */ | 1195 | get_page(page); /* increment page count */ |
1216 | vmf->page = page; | 1196 | vmf->page = page; |
1217 | return 0; /* success */ | 1197 | return 0; /* success */ |
@@ -1233,8 +1213,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) | |||
1233 | Sg_fd *sfp; | 1213 | Sg_fd *sfp; |
1234 | unsigned long req_sz, len, sa; | 1214 | unsigned long req_sz, len, sa; |
1235 | Sg_scatter_hold *rsv_schp; | 1215 | Sg_scatter_hold *rsv_schp; |
1236 | int k; | 1216 | int k, length; |
1237 | struct scatterlist *sg; | ||
1238 | 1217 | ||
1239 | if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) | 1218 | if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) |
1240 | return -ENXIO; | 1219 | return -ENXIO; |
@@ -1248,11 +1227,10 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) | |||
1248 | return -ENOMEM; /* cannot map more than reserved buffer */ | 1227 | return -ENOMEM; /* cannot map more than reserved buffer */ |
1249 | 1228 | ||
1250 | sa = vma->vm_start; | 1229 | sa = vma->vm_start; |
1251 | sg = rsv_schp->buffer; | 1230 | length = 1 << (PAGE_SHIFT + rsv_schp->page_order); |
1252 | for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); | 1231 | for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) { |
1253 | ++k, sg = sg_next(sg)) { | ||
1254 | len = vma->vm_end - sa; | 1232 | len = vma->vm_end - sa; |
1255 | len = (len < sg->length) ? len : sg->length; | 1233 | len = (len < length) ? len : length; |
1256 | sa += len; | 1234 | sa += len; |
1257 | } | 1235 | } |
1258 | 1236 | ||
@@ -1263,16 +1241,19 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) | |||
1263 | return 0; | 1241 | return 0; |
1264 | } | 1242 | } |
1265 | 1243 | ||
1266 | /* This function is a "bottom half" handler that is called by the | 1244 | /* |
1267 | * mid level when a command is completed (or has failed). */ | 1245 | * This function is a "bottom half" handler that is called by the mid |
1268 | static void | 1246 | * level when a command is completed (or has failed). |
1269 | sg_cmd_done(void *data, char *sense, int result, int resid) | 1247 | */ |
1248 | static void sg_rq_end_io(struct request *rq, int uptodate) | ||
1270 | { | 1249 | { |
1271 | Sg_request *srp = data; | 1250 | struct sg_request *srp = rq->end_io_data; |
1272 | Sg_device *sdp = NULL; | 1251 | Sg_device *sdp = NULL; |
1273 | Sg_fd *sfp; | 1252 | Sg_fd *sfp; |
1274 | unsigned long iflags; | 1253 | unsigned long iflags; |
1275 | unsigned int ms; | 1254 | unsigned int ms; |
1255 | char *sense; | ||
1256 | int result, resid; | ||
1276 | 1257 | ||
1277 | if (NULL == srp) { | 1258 | if (NULL == srp) { |
1278 | printk(KERN_ERR "sg_cmd_done: NULL request\n"); | 1259 | printk(KERN_ERR "sg_cmd_done: NULL request\n"); |
@@ -1286,6 +1267,9 @@ sg_cmd_done(void *data, char *sense, int result, int resid) | |||
1286 | return; | 1267 | return; |
1287 | } | 1268 | } |
1288 | 1269 | ||
1270 | sense = rq->sense; | ||
1271 | result = rq->errors; | ||
1272 | resid = rq->data_len; | ||
1289 | 1273 | ||
1290 | SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", | 1274 | SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", |
1291 | sdp->disk->disk_name, srp->header.pack_id, result)); | 1275 | sdp->disk->disk_name, srp->header.pack_id, result)); |
@@ -1296,7 +1280,6 @@ sg_cmd_done(void *data, char *sense, int result, int resid) | |||
1296 | if (0 != result) { | 1280 | if (0 != result) { |
1297 | struct scsi_sense_hdr sshdr; | 1281 | struct scsi_sense_hdr sshdr; |
1298 | 1282 | ||
1299 | memcpy(srp->sense_b, sense, sizeof (srp->sense_b)); | ||
1300 | srp->header.status = 0xff & result; | 1283 | srp->header.status = 0xff & result; |
1301 | srp->header.masked_status = status_byte(result); | 1284 | srp->header.masked_status = status_byte(result); |
1302 | srp->header.msg_status = msg_byte(result); | 1285 | srp->header.msg_status = msg_byte(result); |
@@ -1634,37 +1617,79 @@ exit_sg(void) | |||
1634 | idr_destroy(&sg_index_idr); | 1617 | idr_destroy(&sg_index_idr); |
1635 | } | 1618 | } |
1636 | 1619 | ||
1637 | static int | 1620 | static int sg_start_req(Sg_request *srp, unsigned char *cmd) |
1638 | sg_start_req(Sg_request * srp) | ||
1639 | { | 1621 | { |
1640 | int res; | 1622 | int res; |
1623 | struct request *rq; | ||
1641 | Sg_fd *sfp = srp->parentfp; | 1624 | Sg_fd *sfp = srp->parentfp; |
1642 | sg_io_hdr_t *hp = &srp->header; | 1625 | sg_io_hdr_t *hp = &srp->header; |
1643 | int dxfer_len = (int) hp->dxfer_len; | 1626 | int dxfer_len = (int) hp->dxfer_len; |
1644 | int dxfer_dir = hp->dxfer_direction; | 1627 | int dxfer_dir = hp->dxfer_direction; |
1628 | unsigned int iov_count = hp->iovec_count; | ||
1645 | Sg_scatter_hold *req_schp = &srp->data; | 1629 | Sg_scatter_hold *req_schp = &srp->data; |
1646 | Sg_scatter_hold *rsv_schp = &sfp->reserve; | 1630 | Sg_scatter_hold *rsv_schp = &sfp->reserve; |
1631 | struct request_queue *q = sfp->parentdp->device->request_queue; | ||
1632 | struct rq_map_data *md, map_data; | ||
1633 | int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ; | ||
1634 | |||
1635 | SCSI_LOG_TIMEOUT(4, printk(KERN_INFO "sg_start_req: dxfer_len=%d\n", | ||
1636 | dxfer_len)); | ||
1637 | |||
1638 | rq = blk_get_request(q, rw, GFP_ATOMIC); | ||
1639 | if (!rq) | ||
1640 | return -ENOMEM; | ||
1641 | |||
1642 | memcpy(rq->cmd, cmd, hp->cmd_len); | ||
1643 | |||
1644 | rq->cmd_len = hp->cmd_len; | ||
1645 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | ||
1646 | |||
1647 | srp->rq = rq; | ||
1648 | rq->end_io_data = srp; | ||
1649 | rq->sense = srp->sense_b; | ||
1650 | rq->retries = SG_DEFAULT_RETRIES; | ||
1647 | 1651 | ||
1648 | SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len)); | ||
1649 | if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) | 1652 | if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) |
1650 | return 0; | 1653 | return 0; |
1651 | if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) && | 1654 | |
1652 | (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) && | 1655 | if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO && |
1653 | (!sfp->parentdp->device->host->unchecked_isa_dma)) { | 1656 | dxfer_dir != SG_DXFER_UNKNOWN && !iov_count && |
1654 | res = sg_build_direct(srp, sfp, dxfer_len); | 1657 | !sfp->parentdp->device->host->unchecked_isa_dma && |
1655 | if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */ | 1658 | blk_rq_aligned(q, hp->dxferp, dxfer_len)) |
1656 | return res; | 1659 | md = NULL; |
1657 | } | 1660 | else |
1658 | if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen)) | 1661 | md = &map_data; |
1659 | sg_link_reserve(sfp, srp, dxfer_len); | 1662 | |
1660 | else { | 1663 | if (md) { |
1661 | res = sg_build_indirect(req_schp, sfp, dxfer_len); | 1664 | if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen) |
1662 | if (res) { | 1665 | sg_link_reserve(sfp, srp, dxfer_len); |
1663 | sg_remove_scat(req_schp); | 1666 | else { |
1664 | return res; | 1667 | res = sg_build_indirect(req_schp, sfp, dxfer_len); |
1668 | if (res) | ||
1669 | return res; | ||
1665 | } | 1670 | } |
1671 | |||
1672 | md->pages = req_schp->pages; | ||
1673 | md->page_order = req_schp->page_order; | ||
1674 | md->nr_entries = req_schp->k_use_sg; | ||
1666 | } | 1675 | } |
1667 | return 0; | 1676 | |
1677 | if (iov_count) | ||
1678 | res = blk_rq_map_user_iov(q, rq, md, hp->dxferp, iov_count, | ||
1679 | hp->dxfer_len, GFP_ATOMIC); | ||
1680 | else | ||
1681 | res = blk_rq_map_user(q, rq, md, hp->dxferp, | ||
1682 | hp->dxfer_len, GFP_ATOMIC); | ||
1683 | |||
1684 | if (!res) { | ||
1685 | srp->bio = rq->bio; | ||
1686 | |||
1687 | if (!md) { | ||
1688 | req_schp->dio_in_use = 1; | ||
1689 | hp->info |= SG_INFO_DIRECT_IO; | ||
1690 | } | ||
1691 | } | ||
1692 | return res; | ||
1668 | } | 1693 | } |
1669 | 1694 | ||
1670 | static void | 1695 | static void |
@@ -1678,186 +1703,37 @@ sg_finish_rem_req(Sg_request * srp) | |||
1678 | sg_unlink_reserve(sfp, srp); | 1703 | sg_unlink_reserve(sfp, srp); |
1679 | else | 1704 | else |
1680 | sg_remove_scat(req_schp); | 1705 | sg_remove_scat(req_schp); |
1706 | |||
1707 | if (srp->rq) { | ||
1708 | if (srp->bio) | ||
1709 | blk_rq_unmap_user(srp->bio); | ||
1710 | |||
1711 | blk_put_request(srp->rq); | ||
1712 | } | ||
1713 | |||
1681 | sg_remove_request(sfp, srp); | 1714 | sg_remove_request(sfp, srp); |
1682 | } | 1715 | } |
1683 | 1716 | ||
1684 | static int | 1717 | static int |
1685 | sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) | 1718 | sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) |
1686 | { | 1719 | { |
1687 | int sg_bufflen = tablesize * sizeof(struct scatterlist); | 1720 | int sg_bufflen = tablesize * sizeof(struct page *); |
1688 | gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN; | 1721 | gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN; |
1689 | 1722 | ||
1690 | /* | 1723 | schp->pages = kzalloc(sg_bufflen, gfp_flags); |
1691 | * TODO: test without low_dma, we should not need it since | 1724 | if (!schp->pages) |
1692 | * the block layer will bounce the buffer for us | ||
1693 | * | ||
1694 | * XXX(hch): we shouldn't need GFP_DMA for the actual S/G list. | ||
1695 | */ | ||
1696 | if (sfp->low_dma) | ||
1697 | gfp_flags |= GFP_DMA; | ||
1698 | schp->buffer = kzalloc(sg_bufflen, gfp_flags); | ||
1699 | if (!schp->buffer) | ||
1700 | return -ENOMEM; | 1725 | return -ENOMEM; |
1701 | sg_init_table(schp->buffer, tablesize); | ||
1702 | schp->sglist_len = sg_bufflen; | 1726 | schp->sglist_len = sg_bufflen; |
1703 | return tablesize; /* number of scat_gath elements allocated */ | 1727 | return tablesize; /* number of scat_gath elements allocated */ |
1704 | } | 1728 | } |
1705 | 1729 | ||
1706 | #ifdef SG_ALLOW_DIO_CODE | ||
1707 | /* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */ | ||
1708 | /* TODO: hopefully we can use the generic block layer code */ | ||
1709 | |||
1710 | /* Pin down user pages and put them into a scatter gather list. Returns <= 0 if | ||
1711 | - mapping of all pages not successful | ||
1712 | (i.e., either completely successful or fails) | ||
1713 | */ | ||
1714 | static int | ||
1715 | st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages, | ||
1716 | unsigned long uaddr, size_t count, int rw) | ||
1717 | { | ||
1718 | unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
1719 | unsigned long start = uaddr >> PAGE_SHIFT; | ||
1720 | const int nr_pages = end - start; | ||
1721 | int res, i, j; | ||
1722 | struct page **pages; | ||
1723 | |||
1724 | /* User attempted Overflow! */ | ||
1725 | if ((uaddr + count) < uaddr) | ||
1726 | return -EINVAL; | ||
1727 | |||
1728 | /* Too big */ | ||
1729 | if (nr_pages > max_pages) | ||
1730 | return -ENOMEM; | ||
1731 | |||
1732 | /* Hmm? */ | ||
1733 | if (count == 0) | ||
1734 | return 0; | ||
1735 | |||
1736 | if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL) | ||
1737 | return -ENOMEM; | ||
1738 | |||
1739 | /* Try to fault in all of the necessary pages */ | ||
1740 | down_read(¤t->mm->mmap_sem); | ||
1741 | /* rw==READ means read from drive, write into memory area */ | ||
1742 | res = get_user_pages( | ||
1743 | current, | ||
1744 | current->mm, | ||
1745 | uaddr, | ||
1746 | nr_pages, | ||
1747 | rw == READ, | ||
1748 | 0, /* don't force */ | ||
1749 | pages, | ||
1750 | NULL); | ||
1751 | up_read(¤t->mm->mmap_sem); | ||
1752 | |||
1753 | /* Errors and no page mapped should return here */ | ||
1754 | if (res < nr_pages) | ||
1755 | goto out_unmap; | ||
1756 | |||
1757 | for (i=0; i < nr_pages; i++) { | ||
1758 | /* FIXME: flush superflous for rw==READ, | ||
1759 | * probably wrong function for rw==WRITE | ||
1760 | */ | ||
1761 | flush_dcache_page(pages[i]); | ||
1762 | /* ?? Is locking needed? I don't think so */ | ||
1763 | /* if (!trylock_page(pages[i])) | ||
1764 | goto out_unlock; */ | ||
1765 | } | ||
1766 | |||
1767 | sg_set_page(sgl, pages[0], 0, uaddr & ~PAGE_MASK); | ||
1768 | if (nr_pages > 1) { | ||
1769 | sgl[0].length = PAGE_SIZE - sgl[0].offset; | ||
1770 | count -= sgl[0].length; | ||
1771 | for (i=1; i < nr_pages ; i++) | ||
1772 | sg_set_page(&sgl[i], pages[i], count < PAGE_SIZE ? count : PAGE_SIZE, 0); | ||
1773 | } | ||
1774 | else { | ||
1775 | sgl[0].length = count; | ||
1776 | } | ||
1777 | |||
1778 | kfree(pages); | ||
1779 | return nr_pages; | ||
1780 | |||
1781 | out_unmap: | ||
1782 | if (res > 0) { | ||
1783 | for (j=0; j < res; j++) | ||
1784 | page_cache_release(pages[j]); | ||
1785 | res = 0; | ||
1786 | } | ||
1787 | kfree(pages); | ||
1788 | return res; | ||
1789 | } | ||
1790 | |||
1791 | |||
1792 | /* And unmap them... */ | ||
1793 | static int | ||
1794 | st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages, | ||
1795 | int dirtied) | ||
1796 | { | ||
1797 | int i; | ||
1798 | |||
1799 | for (i=0; i < nr_pages; i++) { | ||
1800 | struct page *page = sg_page(&sgl[i]); | ||
1801 | |||
1802 | if (dirtied) | ||
1803 | SetPageDirty(page); | ||
1804 | /* unlock_page(page); */ | ||
1805 | /* FIXME: cache flush missing for rw==READ | ||
1806 | * FIXME: call the correct reference counting function | ||
1807 | */ | ||
1808 | page_cache_release(page); | ||
1809 | } | ||
1810 | |||
1811 | return 0; | ||
1812 | } | ||
1813 | |||
1814 | /* ^^^^^^^^ above code borrowed from st driver's direct IO ^^^^^^^^^ */ | ||
1815 | #endif | ||
1816 | |||
1817 | |||
1818 | /* Returns: -ve -> error, 0 -> done, 1 -> try indirect */ | ||
1819 | static int | ||
1820 | sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len) | ||
1821 | { | ||
1822 | #ifdef SG_ALLOW_DIO_CODE | ||
1823 | sg_io_hdr_t *hp = &srp->header; | ||
1824 | Sg_scatter_hold *schp = &srp->data; | ||
1825 | int sg_tablesize = sfp->parentdp->sg_tablesize; | ||
1826 | int mx_sc_elems, res; | ||
1827 | struct scsi_device *sdev = sfp->parentdp->device; | ||
1828 | |||
1829 | if (((unsigned long)hp->dxferp & | ||
1830 | queue_dma_alignment(sdev->request_queue)) != 0) | ||
1831 | return 1; | ||
1832 | |||
1833 | mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); | ||
1834 | if (mx_sc_elems <= 0) { | ||
1835 | return 1; | ||
1836 | } | ||
1837 | res = st_map_user_pages(schp->buffer, mx_sc_elems, | ||
1838 | (unsigned long)hp->dxferp, dxfer_len, | ||
1839 | (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0); | ||
1840 | if (res <= 0) { | ||
1841 | sg_remove_scat(schp); | ||
1842 | return 1; | ||
1843 | } | ||
1844 | schp->k_use_sg = res; | ||
1845 | schp->dio_in_use = 1; | ||
1846 | hp->info |= SG_INFO_DIRECT_IO; | ||
1847 | return 0; | ||
1848 | #else | ||
1849 | return 1; | ||
1850 | #endif | ||
1851 | } | ||
1852 | |||
1853 | static int | 1730 | static int |
1854 | sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) | 1731 | sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) |
1855 | { | 1732 | { |
1856 | struct scatterlist *sg; | 1733 | int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems; |
1857 | int ret_sz = 0, k, rem_sz, num, mx_sc_elems; | ||
1858 | int sg_tablesize = sfp->parentdp->sg_tablesize; | 1734 | int sg_tablesize = sfp->parentdp->sg_tablesize; |
1859 | int blk_size = buff_size; | 1735 | int blk_size = buff_size, order; |
1860 | struct page *p = NULL; | 1736 | gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN; |
1861 | 1737 | ||
1862 | if (blk_size < 0) | 1738 | if (blk_size < 0) |
1863 | return -EFAULT; | 1739 | return -EFAULT; |
@@ -1881,15 +1757,26 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) | |||
1881 | } else | 1757 | } else |
1882 | scatter_elem_sz_prev = num; | 1758 | scatter_elem_sz_prev = num; |
1883 | } | 1759 | } |
1884 | for (k = 0, sg = schp->buffer, rem_sz = blk_size; | 1760 | |
1885 | (rem_sz > 0) && (k < mx_sc_elems); | 1761 | if (sfp->low_dma) |
1886 | ++k, rem_sz -= ret_sz, sg = sg_next(sg)) { | 1762 | gfp_mask |= GFP_DMA; |
1887 | 1763 | ||
1764 | if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) | ||
1765 | gfp_mask |= __GFP_ZERO; | ||
1766 | |||
1767 | order = get_order(num); | ||
1768 | retry: | ||
1769 | ret_sz = 1 << (PAGE_SHIFT + order); | ||
1770 | |||
1771 | for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems; | ||
1772 | k++, rem_sz -= ret_sz) { | ||
1773 | |||
1888 | num = (rem_sz > scatter_elem_sz_prev) ? | 1774 | num = (rem_sz > scatter_elem_sz_prev) ? |
1889 | scatter_elem_sz_prev : rem_sz; | 1775 | scatter_elem_sz_prev : rem_sz; |
1890 | p = sg_page_malloc(num, sfp->low_dma, &ret_sz); | 1776 | |
1891 | if (!p) | 1777 | schp->pages[k] = alloc_pages(gfp_mask, order); |
1892 | return -ENOMEM; | 1778 | if (!schp->pages[k]) |
1779 | goto out; | ||
1893 | 1780 | ||
1894 | if (num == scatter_elem_sz_prev) { | 1781 | if (num == scatter_elem_sz_prev) { |
1895 | if (unlikely(ret_sz > scatter_elem_sz_prev)) { | 1782 | if (unlikely(ret_sz > scatter_elem_sz_prev)) { |
@@ -1897,12 +1784,12 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) | |||
1897 | scatter_elem_sz_prev = ret_sz; | 1784 | scatter_elem_sz_prev = ret_sz; |
1898 | } | 1785 | } |
1899 | } | 1786 | } |
1900 | sg_set_page(sg, p, (ret_sz > num) ? num : ret_sz, 0); | ||
1901 | 1787 | ||
1902 | SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, " | 1788 | SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, " |
1903 | "ret_sz=%d\n", k, num, ret_sz)); | 1789 | "ret_sz=%d\n", k, num, ret_sz)); |
1904 | } /* end of for loop */ | 1790 | } /* end of for loop */ |
1905 | 1791 | ||
1792 | schp->page_order = order; | ||
1906 | schp->k_use_sg = k; | 1793 | schp->k_use_sg = k; |
1907 | SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, " | 1794 | SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, " |
1908 | "rem_sz=%d\n", k, rem_sz)); | 1795 | "rem_sz=%d\n", k, rem_sz)); |
@@ -1910,223 +1797,42 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) | |||
1910 | schp->bufflen = blk_size; | 1797 | schp->bufflen = blk_size; |
1911 | if (rem_sz > 0) /* must have failed */ | 1798 | if (rem_sz > 0) /* must have failed */ |
1912 | return -ENOMEM; | 1799 | return -ENOMEM; |
1913 | |||
1914 | return 0; | 1800 | return 0; |
1915 | } | 1801 | out: |
1916 | 1802 | for (i = 0; i < k; i++) | |
1917 | static int | 1803 | __free_pages(schp->pages[k], order); |
1918 | sg_write_xfer(Sg_request * srp) | ||
1919 | { | ||
1920 | sg_io_hdr_t *hp = &srp->header; | ||
1921 | Sg_scatter_hold *schp = &srp->data; | ||
1922 | struct scatterlist *sg = schp->buffer; | ||
1923 | int num_xfer = 0; | ||
1924 | int j, k, onum, usglen, ksglen, res; | ||
1925 | int iovec_count = (int) hp->iovec_count; | ||
1926 | int dxfer_dir = hp->dxfer_direction; | ||
1927 | unsigned char *p; | ||
1928 | unsigned char __user *up; | ||
1929 | int new_interface = ('\0' == hp->interface_id) ? 0 : 1; | ||
1930 | |||
1931 | if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) || | ||
1932 | (SG_DXFER_TO_FROM_DEV == dxfer_dir)) { | ||
1933 | num_xfer = (int) (new_interface ? hp->dxfer_len : hp->flags); | ||
1934 | if (schp->bufflen < num_xfer) | ||
1935 | num_xfer = schp->bufflen; | ||
1936 | } | ||
1937 | if ((num_xfer <= 0) || (schp->dio_in_use) || | ||
1938 | (new_interface | ||
1939 | && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags))) | ||
1940 | return 0; | ||
1941 | |||
1942 | SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n", | ||
1943 | num_xfer, iovec_count, schp->k_use_sg)); | ||
1944 | if (iovec_count) { | ||
1945 | onum = iovec_count; | ||
1946 | if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum)) | ||
1947 | return -EFAULT; | ||
1948 | } else | ||
1949 | onum = 1; | ||
1950 | |||
1951 | ksglen = sg->length; | ||
1952 | p = page_address(sg_page(sg)); | ||
1953 | for (j = 0, k = 0; j < onum; ++j) { | ||
1954 | res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up); | ||
1955 | if (res) | ||
1956 | return res; | ||
1957 | |||
1958 | for (; p; sg = sg_next(sg), ksglen = sg->length, | ||
1959 | p = page_address(sg_page(sg))) { | ||
1960 | if (usglen <= 0) | ||
1961 | break; | ||
1962 | if (ksglen > usglen) { | ||
1963 | if (usglen >= num_xfer) { | ||
1964 | if (__copy_from_user(p, up, num_xfer)) | ||
1965 | return -EFAULT; | ||
1966 | return 0; | ||
1967 | } | ||
1968 | if (__copy_from_user(p, up, usglen)) | ||
1969 | return -EFAULT; | ||
1970 | p += usglen; | ||
1971 | ksglen -= usglen; | ||
1972 | break; | ||
1973 | } else { | ||
1974 | if (ksglen >= num_xfer) { | ||
1975 | if (__copy_from_user(p, up, num_xfer)) | ||
1976 | return -EFAULT; | ||
1977 | return 0; | ||
1978 | } | ||
1979 | if (__copy_from_user(p, up, ksglen)) | ||
1980 | return -EFAULT; | ||
1981 | up += ksglen; | ||
1982 | usglen -= ksglen; | ||
1983 | } | ||
1984 | ++k; | ||
1985 | if (k >= schp->k_use_sg) | ||
1986 | return 0; | ||
1987 | } | ||
1988 | } | ||
1989 | |||
1990 | return 0; | ||
1991 | } | ||
1992 | 1804 | ||
1993 | static int | 1805 | if (--order >= 0) |
1994 | sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind, | 1806 | goto retry; |
1995 | int wr_xf, int *countp, unsigned char __user **up) | ||
1996 | { | ||
1997 | int num_xfer = (int) hp->dxfer_len; | ||
1998 | unsigned char __user *p = hp->dxferp; | ||
1999 | int count; | ||
2000 | 1807 | ||
2001 | if (0 == sg_num) { | 1808 | return -ENOMEM; |
2002 | if (wr_xf && ('\0' == hp->interface_id)) | ||
2003 | count = (int) hp->flags; /* holds "old" input_size */ | ||
2004 | else | ||
2005 | count = num_xfer; | ||
2006 | } else { | ||
2007 | sg_iovec_t iovec; | ||
2008 | if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC)) | ||
2009 | return -EFAULT; | ||
2010 | p = iovec.iov_base; | ||
2011 | count = (int) iovec.iov_len; | ||
2012 | } | ||
2013 | if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count)) | ||
2014 | return -EFAULT; | ||
2015 | if (up) | ||
2016 | *up = p; | ||
2017 | if (countp) | ||
2018 | *countp = count; | ||
2019 | return 0; | ||
2020 | } | 1809 | } |
2021 | 1810 | ||
2022 | static void | 1811 | static void |
2023 | sg_remove_scat(Sg_scatter_hold * schp) | 1812 | sg_remove_scat(Sg_scatter_hold * schp) |
2024 | { | 1813 | { |
2025 | SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); | 1814 | SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); |
2026 | if (schp->buffer && (schp->sglist_len > 0)) { | 1815 | if (schp->pages && schp->sglist_len > 0) { |
2027 | struct scatterlist *sg = schp->buffer; | 1816 | if (!schp->dio_in_use) { |
2028 | |||
2029 | if (schp->dio_in_use) { | ||
2030 | #ifdef SG_ALLOW_DIO_CODE | ||
2031 | st_unmap_user_pages(sg, schp->k_use_sg, TRUE); | ||
2032 | #endif | ||
2033 | } else { | ||
2034 | int k; | 1817 | int k; |
2035 | 1818 | ||
2036 | for (k = 0; (k < schp->k_use_sg) && sg_page(sg); | 1819 | for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { |
2037 | ++k, sg = sg_next(sg)) { | ||
2038 | SCSI_LOG_TIMEOUT(5, printk( | 1820 | SCSI_LOG_TIMEOUT(5, printk( |
2039 | "sg_remove_scat: k=%d, pg=0x%p, len=%d\n", | 1821 | "sg_remove_scat: k=%d, pg=0x%p\n", |
2040 | k, sg_page(sg), sg->length)); | 1822 | k, schp->pages[k])); |
2041 | sg_page_free(sg_page(sg), sg->length); | 1823 | __free_pages(schp->pages[k], schp->page_order); |
2042 | } | 1824 | } |
2043 | } | ||
2044 | kfree(schp->buffer); | ||
2045 | } | ||
2046 | memset(schp, 0, sizeof (*schp)); | ||
2047 | } | ||
2048 | 1825 | ||
2049 | static int | 1826 | kfree(schp->pages); |
2050 | sg_read_xfer(Sg_request * srp) | ||
2051 | { | ||
2052 | sg_io_hdr_t *hp = &srp->header; | ||
2053 | Sg_scatter_hold *schp = &srp->data; | ||
2054 | struct scatterlist *sg = schp->buffer; | ||
2055 | int num_xfer = 0; | ||
2056 | int j, k, onum, usglen, ksglen, res; | ||
2057 | int iovec_count = (int) hp->iovec_count; | ||
2058 | int dxfer_dir = hp->dxfer_direction; | ||
2059 | unsigned char *p; | ||
2060 | unsigned char __user *up; | ||
2061 | int new_interface = ('\0' == hp->interface_id) ? 0 : 1; | ||
2062 | |||
2063 | if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir) | ||
2064 | || (SG_DXFER_TO_FROM_DEV == dxfer_dir)) { | ||
2065 | num_xfer = hp->dxfer_len; | ||
2066 | if (schp->bufflen < num_xfer) | ||
2067 | num_xfer = schp->bufflen; | ||
2068 | } | ||
2069 | if ((num_xfer <= 0) || (schp->dio_in_use) || | ||
2070 | (new_interface | ||
2071 | && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags))) | ||
2072 | return 0; | ||
2073 | |||
2074 | SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n", | ||
2075 | num_xfer, iovec_count, schp->k_use_sg)); | ||
2076 | if (iovec_count) { | ||
2077 | onum = iovec_count; | ||
2078 | if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum)) | ||
2079 | return -EFAULT; | ||
2080 | } else | ||
2081 | onum = 1; | ||
2082 | |||
2083 | p = page_address(sg_page(sg)); | ||
2084 | ksglen = sg->length; | ||
2085 | for (j = 0, k = 0; j < onum; ++j) { | ||
2086 | res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up); | ||
2087 | if (res) | ||
2088 | return res; | ||
2089 | |||
2090 | for (; p; sg = sg_next(sg), ksglen = sg->length, | ||
2091 | p = page_address(sg_page(sg))) { | ||
2092 | if (usglen <= 0) | ||
2093 | break; | ||
2094 | if (ksglen > usglen) { | ||
2095 | if (usglen >= num_xfer) { | ||
2096 | if (__copy_to_user(up, p, num_xfer)) | ||
2097 | return -EFAULT; | ||
2098 | return 0; | ||
2099 | } | ||
2100 | if (__copy_to_user(up, p, usglen)) | ||
2101 | return -EFAULT; | ||
2102 | p += usglen; | ||
2103 | ksglen -= usglen; | ||
2104 | break; | ||
2105 | } else { | ||
2106 | if (ksglen >= num_xfer) { | ||
2107 | if (__copy_to_user(up, p, num_xfer)) | ||
2108 | return -EFAULT; | ||
2109 | return 0; | ||
2110 | } | ||
2111 | if (__copy_to_user(up, p, ksglen)) | ||
2112 | return -EFAULT; | ||
2113 | up += ksglen; | ||
2114 | usglen -= ksglen; | ||
2115 | } | ||
2116 | ++k; | ||
2117 | if (k >= schp->k_use_sg) | ||
2118 | return 0; | ||
2119 | } | 1827 | } |
2120 | } | 1828 | } |
2121 | 1829 | memset(schp, 0, sizeof (*schp)); | |
2122 | return 0; | ||
2123 | } | 1830 | } |
2124 | 1831 | ||
2125 | static int | 1832 | static int |
2126 | sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) | 1833 | sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) |
2127 | { | 1834 | { |
2128 | Sg_scatter_hold *schp = &srp->data; | 1835 | Sg_scatter_hold *schp = &srp->data; |
2129 | struct scatterlist *sg = schp->buffer; | ||
2130 | int k, num; | 1836 | int k, num; |
2131 | 1837 | ||
2132 | SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n", | 1838 | SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n", |
@@ -2134,15 +1840,15 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) | |||
2134 | if ((!outp) || (num_read_xfer <= 0)) | 1840 | if ((!outp) || (num_read_xfer <= 0)) |
2135 | return 0; | 1841 | return 0; |
2136 | 1842 | ||
2137 | for (k = 0; (k < schp->k_use_sg) && sg_page(sg); ++k, sg = sg_next(sg)) { | 1843 | num = 1 << (PAGE_SHIFT + schp->page_order); |
2138 | num = sg->length; | 1844 | for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { |
2139 | if (num > num_read_xfer) { | 1845 | if (num > num_read_xfer) { |
2140 | if (__copy_to_user(outp, page_address(sg_page(sg)), | 1846 | if (__copy_to_user(outp, page_address(schp->pages[k]), |
2141 | num_read_xfer)) | 1847 | num_read_xfer)) |
2142 | return -EFAULT; | 1848 | return -EFAULT; |
2143 | break; | 1849 | break; |
2144 | } else { | 1850 | } else { |
2145 | if (__copy_to_user(outp, page_address(sg_page(sg)), | 1851 | if (__copy_to_user(outp, page_address(schp->pages[k]), |
2146 | num)) | 1852 | num)) |
2147 | return -EFAULT; | 1853 | return -EFAULT; |
2148 | num_read_xfer -= num; | 1854 | num_read_xfer -= num; |
@@ -2177,24 +1883,21 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size) | |||
2177 | { | 1883 | { |
2178 | Sg_scatter_hold *req_schp = &srp->data; | 1884 | Sg_scatter_hold *req_schp = &srp->data; |
2179 | Sg_scatter_hold *rsv_schp = &sfp->reserve; | 1885 | Sg_scatter_hold *rsv_schp = &sfp->reserve; |
2180 | struct scatterlist *sg = rsv_schp->buffer; | ||
2181 | int k, num, rem; | 1886 | int k, num, rem; |
2182 | 1887 | ||
2183 | srp->res_used = 1; | 1888 | srp->res_used = 1; |
2184 | SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size)); | 1889 | SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size)); |
2185 | rem = size; | 1890 | rem = size; |
2186 | 1891 | ||
2187 | for (k = 0; k < rsv_schp->k_use_sg; ++k, sg = sg_next(sg)) { | 1892 | num = 1 << (PAGE_SHIFT + rsv_schp->page_order); |
2188 | num = sg->length; | 1893 | for (k = 0; k < rsv_schp->k_use_sg; k++) { |
2189 | if (rem <= num) { | 1894 | if (rem <= num) { |
2190 | sfp->save_scat_len = num; | ||
2191 | sg->length = rem; | ||
2192 | req_schp->k_use_sg = k + 1; | 1895 | req_schp->k_use_sg = k + 1; |
2193 | req_schp->sglist_len = rsv_schp->sglist_len; | 1896 | req_schp->sglist_len = rsv_schp->sglist_len; |
2194 | req_schp->buffer = rsv_schp->buffer; | 1897 | req_schp->pages = rsv_schp->pages; |
2195 | 1898 | ||
2196 | req_schp->bufflen = size; | 1899 | req_schp->bufflen = size; |
2197 | req_schp->b_malloc_len = rsv_schp->b_malloc_len; | 1900 | req_schp->page_order = rsv_schp->page_order; |
2198 | break; | 1901 | break; |
2199 | } else | 1902 | } else |
2200 | rem -= num; | 1903 | rem -= num; |
@@ -2208,22 +1911,13 @@ static void | |||
2208 | sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) | 1911 | sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) |
2209 | { | 1912 | { |
2210 | Sg_scatter_hold *req_schp = &srp->data; | 1913 | Sg_scatter_hold *req_schp = &srp->data; |
2211 | Sg_scatter_hold *rsv_schp = &sfp->reserve; | ||
2212 | 1914 | ||
2213 | SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n", | 1915 | SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n", |
2214 | (int) req_schp->k_use_sg)); | 1916 | (int) req_schp->k_use_sg)); |
2215 | if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) { | ||
2216 | struct scatterlist *sg = rsv_schp->buffer; | ||
2217 | |||
2218 | if (sfp->save_scat_len > 0) | ||
2219 | (sg + (req_schp->k_use_sg - 1))->length = | ||
2220 | (unsigned) sfp->save_scat_len; | ||
2221 | else | ||
2222 | SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n")); | ||
2223 | } | ||
2224 | req_schp->k_use_sg = 0; | 1917 | req_schp->k_use_sg = 0; |
2225 | req_schp->bufflen = 0; | 1918 | req_schp->bufflen = 0; |
2226 | req_schp->buffer = NULL; | 1919 | req_schp->pages = NULL; |
1920 | req_schp->page_order = 0; | ||
2227 | req_schp->sglist_len = 0; | 1921 | req_schp->sglist_len = 0; |
2228 | sfp->save_scat_len = 0; | 1922 | sfp->save_scat_len = 0; |
2229 | srp->res_used = 0; | 1923 | srp->res_used = 0; |
@@ -2481,53 +2175,6 @@ sg_res_in_use(Sg_fd * sfp) | |||
2481 | return srp ? 1 : 0; | 2175 | return srp ? 1 : 0; |
2482 | } | 2176 | } |
2483 | 2177 | ||
2484 | /* The size fetched (value output via retSzp) set when non-NULL return */ | ||
2485 | static struct page * | ||
2486 | sg_page_malloc(int rqSz, int lowDma, int *retSzp) | ||
2487 | { | ||
2488 | struct page *resp = NULL; | ||
2489 | gfp_t page_mask; | ||
2490 | int order, a_size; | ||
2491 | int resSz; | ||
2492 | |||
2493 | if ((rqSz <= 0) || (NULL == retSzp)) | ||
2494 | return resp; | ||
2495 | |||
2496 | if (lowDma) | ||
2497 | page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN; | ||
2498 | else | ||
2499 | page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN; | ||
2500 | |||
2501 | for (order = 0, a_size = PAGE_SIZE; a_size < rqSz; | ||
2502 | order++, a_size <<= 1) ; | ||
2503 | resSz = a_size; /* rounded up if necessary */ | ||
2504 | resp = alloc_pages(page_mask, order); | ||
2505 | while ((!resp) && order) { | ||
2506 | --order; | ||
2507 | a_size >>= 1; /* divide by 2, until PAGE_SIZE */ | ||
2508 | resp = alloc_pages(page_mask, order); /* try half */ | ||
2509 | resSz = a_size; | ||
2510 | } | ||
2511 | if (resp) { | ||
2512 | if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) | ||
2513 | memset(page_address(resp), 0, resSz); | ||
2514 | *retSzp = resSz; | ||
2515 | } | ||
2516 | return resp; | ||
2517 | } | ||
2518 | |||
2519 | static void | ||
2520 | sg_page_free(struct page *page, int size) | ||
2521 | { | ||
2522 | int order, a_size; | ||
2523 | |||
2524 | if (!page) | ||
2525 | return; | ||
2526 | for (order = 0, a_size = PAGE_SIZE; a_size < size; | ||
2527 | order++, a_size <<= 1) ; | ||
2528 | __free_pages(page, order); | ||
2529 | } | ||
2530 | |||
2531 | #ifdef CONFIG_SCSI_PROC_FS | 2178 | #ifdef CONFIG_SCSI_PROC_FS |
2532 | static int | 2179 | static int |
2533 | sg_idr_max_id(int id, void *p, void *data) | 2180 | sg_idr_max_id(int id, void *p, void *data) |
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 27f5bfd1def3..0f17009c99d2 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c | |||
@@ -331,7 +331,7 @@ static int sr_done(struct scsi_cmnd *SCpnt) | |||
331 | 331 | ||
332 | static int sr_prep_fn(struct request_queue *q, struct request *rq) | 332 | static int sr_prep_fn(struct request_queue *q, struct request *rq) |
333 | { | 333 | { |
334 | int block=0, this_count, s_size, timeout = SR_TIMEOUT; | 334 | int block = 0, this_count, s_size; |
335 | struct scsi_cd *cd; | 335 | struct scsi_cd *cd; |
336 | struct scsi_cmnd *SCpnt; | 336 | struct scsi_cmnd *SCpnt; |
337 | struct scsi_device *sdp = q->queuedata; | 337 | struct scsi_device *sdp = q->queuedata; |
@@ -461,7 +461,6 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq) | |||
461 | SCpnt->transfersize = cd->device->sector_size; | 461 | SCpnt->transfersize = cd->device->sector_size; |
462 | SCpnt->underflow = this_count << 9; | 462 | SCpnt->underflow = this_count << 9; |
463 | SCpnt->allowed = MAX_RETRIES; | 463 | SCpnt->allowed = MAX_RETRIES; |
464 | SCpnt->timeout_per_command = timeout; | ||
465 | 464 | ||
466 | /* | 465 | /* |
467 | * This indicates that the command is ready from our end to be | 466 | * This indicates that the command is ready from our end to be |
@@ -620,6 +619,8 @@ static int sr_probe(struct device *dev) | |||
620 | disk->fops = &sr_bdops; | 619 | disk->fops = &sr_bdops; |
621 | disk->flags = GENHD_FL_CD; | 620 | disk->flags = GENHD_FL_CD; |
622 | 621 | ||
622 | blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT); | ||
623 | |||
623 | cd->device = sdev; | 624 | cd->device = sdev; |
624 | cd->disk = disk; | 625 | cd->disk = disk; |
625 | cd->driver = &sr_template; | 626 | cd->driver = &sr_template; |
@@ -878,7 +879,7 @@ static void sr_kref_release(struct kref *kref) | |||
878 | struct gendisk *disk = cd->disk; | 879 | struct gendisk *disk = cd->disk; |
879 | 880 | ||
880 | spin_lock(&sr_index_lock); | 881 | spin_lock(&sr_index_lock); |
881 | clear_bit(disk->first_minor, sr_index_bits); | 882 | clear_bit(MINOR(disk_devt(disk)), sr_index_bits); |
882 | spin_unlock(&sr_index_lock); | 883 | spin_unlock(&sr_index_lock); |
883 | 884 | ||
884 | unregister_cdrom(&cd->cdi); | 885 | unregister_cdrom(&cd->cdi); |
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index d39107b7669b..f4e6cde1fd0d 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c | |||
@@ -519,8 +519,8 @@ static int sym53c8xx_queue_command(struct scsi_cmnd *cmd, | |||
519 | * Shorten our settle_time if needed for | 519 | * Shorten our settle_time if needed for |
520 | * this command not to time out. | 520 | * this command not to time out. |
521 | */ | 521 | */ |
522 | if (np->s.settle_time_valid && cmd->timeout_per_command) { | 522 | if (np->s.settle_time_valid && cmd->request->timeout) { |
523 | unsigned long tlimit = jiffies + cmd->timeout_per_command; | 523 | unsigned long tlimit = jiffies + cmd->request->timeout; |
524 | tlimit -= SYM_CONF_TIMER_INTERVAL*2; | 524 | tlimit -= SYM_CONF_TIMER_INTERVAL*2; |
525 | if (time_after(np->s.settle_time, tlimit)) { | 525 | if (time_after(np->s.settle_time, tlimit)) { |
526 | np->s.settle_time = tlimit; | 526 | np->s.settle_time = tlimit; |
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c index 1723d71cbf3f..69ac6e590f1d 100644 --- a/drivers/scsi/tmscsim.c +++ b/drivers/scsi/tmscsim.c | |||
@@ -2573,8 +2573,8 @@ static struct pci_driver dc390_driver = { | |||
2573 | static int __init dc390_module_init(void) | 2573 | static int __init dc390_module_init(void) |
2574 | { | 2574 | { |
2575 | if (!disable_clustering) | 2575 | if (!disable_clustering) |
2576 | printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n" | 2576 | printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n"); |
2577 | "\twith \"disable_clustering=1\" and report to maintainers\n"); | 2577 | printk(KERN_INFO " with \"disable_clustering=1\" and report to maintainers\n"); |
2578 | 2578 | ||
2579 | if (tmscsim[0] == -1 || tmscsim[0] > 15) { | 2579 | if (tmscsim[0] == -1 || tmscsim[0] > 15) { |
2580 | tmscsim[0] = 7; | 2580 | tmscsim[0] = 7; |