aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-10 13:52:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-10 13:52:45 -0400
commite26feff647ef34423b048b940540a0059001ddb0 (patch)
treeacafe68602ee2f6f1a438c113073ffcc0040e949 /drivers/scsi
parentd403a6484f0341bf0624d17ece46f24f741b6a92 (diff)
parentb911e473d24633c19414b54b82b9ff0b1a2419d7 (diff)
Merge branch 'for-2.6.28' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.28' of git://git.kernel.dk/linux-2.6-block: (132 commits) doc/cdrom: Trvial documentation error, file not present block_dev: fix kernel-doc in new functions block: add some comments around the bio read-write flags block: mark bio_split_pool static block: Find bio sector offset given idx and offset block: gendisk integrity wrapper block: Switch blk_integrity_compare from bdev to gendisk block: Fix double put in blk_integrity_unregister block: Introduce integrity data ownership flag block: revert part of d7533ad0e132f92e75c1b2eb7c26387b25a583c1 bio.h: Remove unused conditional code block: remove end_{queued|dequeued}_request() block: change elevator to use __blk_end_request() gdrom: change to use __blk_end_request() memstick: change to use __blk_end_request() virtio_blk: change to use __blk_end_request() blktrace: use BLKTRACE_BDEV_SIZE as the name size for setup structure block: add lld busy state exporting interface block: Fix blk_start_queueing() to not kick a stopped queue include blktrace_api.h in headers_install ...
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/gdth.c60
-rw-r--r--drivers/scsi/gdth.h2
-rw-r--r--drivers/scsi/gdth_proc.c66
-rw-r--r--drivers/scsi/gdth_proc.h3
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ide-scsi.c2
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/libiscsi.c17
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c30
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c6
-rw-r--r--drivers/scsi/ncr53c8xx.c4
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c4
-rw-r--r--drivers/scsi/scsi.c92
-rw-r--r--drivers/scsi/scsi_error.c90
-rw-r--r--drivers/scsi/scsi_lib.c17
-rw-r--r--drivers/scsi/scsi_priv.h7
-rw-r--r--drivers/scsi/scsi_sysfs.c7
-rw-r--r--drivers/scsi/scsi_tgt_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_fc.c6
-rw-r--r--drivers/scsi/sd.c95
-rw-r--r--drivers/scsi/sg.c667
-rw-r--r--drivers/scsi/sr.c7
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
28 files changed, 364 insertions, 841 deletions
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index aa4e77c25273..8abfd06b5a72 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1139,7 +1139,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
1139 srbcmd->id = cpu_to_le32(scmd_id(cmd)); 1139 srbcmd->id = cpu_to_le32(scmd_id(cmd));
1140 srbcmd->lun = cpu_to_le32(cmd->device->lun); 1140 srbcmd->lun = cpu_to_le32(cmd->device->lun);
1141 srbcmd->flags = cpu_to_le32(flag); 1141 srbcmd->flags = cpu_to_le32(flag);
1142 timeout = cmd->timeout_per_command/HZ; 1142 timeout = cmd->request->timeout/HZ;
1143 if (timeout == 0) 1143 if (timeout == 0)
1144 timeout = 1; 1144 timeout = 1;
1145 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds 1145 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 822d5214692b..c387c15a2128 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -464,7 +464,6 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
464 464
465 /* use request field to save the ptr. to completion struct. */ 465 /* use request field to save the ptr. to completion struct. */
466 scp->request = (struct request *)&wait; 466 scp->request = (struct request *)&wait;
467 scp->timeout_per_command = timeout*HZ;
468 scp->cmd_len = 12; 467 scp->cmd_len = 12;
469 scp->cmnd = cmnd; 468 scp->cmnd = cmnd;
470 cmndinfo.priority = IOCTL_PRI; 469 cmndinfo.priority = IOCTL_PRI;
@@ -1995,23 +1994,12 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority)
1995 register Scsi_Cmnd *pscp; 1994 register Scsi_Cmnd *pscp;
1996 register Scsi_Cmnd *nscp; 1995 register Scsi_Cmnd *nscp;
1997 ulong flags; 1996 ulong flags;
1998 unchar b, t;
1999 1997
2000 TRACE(("gdth_putq() priority %d\n",priority)); 1998 TRACE(("gdth_putq() priority %d\n",priority));
2001 spin_lock_irqsave(&ha->smp_lock, flags); 1999 spin_lock_irqsave(&ha->smp_lock, flags);
2002 2000
2003 if (!cmndinfo->internal_command) { 2001 if (!cmndinfo->internal_command)
2004 cmndinfo->priority = priority; 2002 cmndinfo->priority = priority;
2005 b = scp->device->channel;
2006 t = scp->device->id;
2007 if (priority >= DEFAULT_PRI) {
2008 if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) ||
2009 (b==ha->virt_bus && t<MAX_HDRIVES && ha->hdr[t].lock)) {
2010 TRACE2(("gdth_putq(): locked IO ->update_timeout()\n"));
2011 cmndinfo->timeout = gdth_update_timeout(scp, 0);
2012 }
2013 }
2014 }
2015 2003
2016 if (ha->req_first==NULL) { 2004 if (ha->req_first==NULL) {
2017 ha->req_first = scp; /* queue was empty */ 2005 ha->req_first = scp; /* queue was empty */
@@ -3899,6 +3887,39 @@ static const char *gdth_info(struct Scsi_Host *shp)
3899 return ((const char *)ha->binfo.type_string); 3887 return ((const char *)ha->binfo.type_string);
3900} 3888}
3901 3889
3890static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
3891{
3892 gdth_ha_str *ha = shost_priv(scp->device->host);
3893 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
3894 unchar b, t;
3895 ulong flags;
3896 enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED;
3897
3898 TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
3899 b = scp->device->channel;
3900 t = scp->device->id;
3901
3902 /*
3903 * We don't really honor the command timeout, but we try to
3904 * honor 6 times of the actual command timeout! So reset the
3905 * timer if this is less than 6th timeout on this command!
3906 */
3907 if (++cmndinfo->timeout_count < 6)
3908 retval = BLK_EH_RESET_TIMER;
3909
3910 /* Reset the timeout if it is locked IO */
3911 spin_lock_irqsave(&ha->smp_lock, flags);
3912 if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha, b)].lock) ||
3913 (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) {
3914 TRACE2(("%s(): locked IO, reset timeout\n", __func__));
3915 retval = BLK_EH_RESET_TIMER;
3916 }
3917 spin_unlock_irqrestore(&ha->smp_lock, flags);
3918
3919 return retval;
3920}
3921
3922
3902static int gdth_eh_bus_reset(Scsi_Cmnd *scp) 3923static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
3903{ 3924{
3904 gdth_ha_str *ha = shost_priv(scp->device->host); 3925 gdth_ha_str *ha = shost_priv(scp->device->host);
@@ -3992,7 +4013,7 @@ static int gdth_queuecommand(struct scsi_cmnd *scp,
3992 BUG_ON(!cmndinfo); 4013 BUG_ON(!cmndinfo);
3993 4014
3994 scp->scsi_done = done; 4015 scp->scsi_done = done;
3995 gdth_update_timeout(scp, scp->timeout_per_command * 6); 4016 cmndinfo->timeout_count = 0;
3996 cmndinfo->priority = DEFAULT_PRI; 4017 cmndinfo->priority = DEFAULT_PRI;
3997 4018
3998 return __gdth_queuecommand(ha, scp, cmndinfo); 4019 return __gdth_queuecommand(ha, scp, cmndinfo);
@@ -4096,12 +4117,10 @@ static int ioc_lockdrv(void __user *arg)
4096 ha->hdr[j].lock = 1; 4117 ha->hdr[j].lock = 1;
4097 spin_unlock_irqrestore(&ha->smp_lock, flags); 4118 spin_unlock_irqrestore(&ha->smp_lock, flags);
4098 gdth_wait_completion(ha, ha->bus_cnt, j); 4119 gdth_wait_completion(ha, ha->bus_cnt, j);
4099 gdth_stop_timeout(ha, ha->bus_cnt, j);
4100 } else { 4120 } else {
4101 spin_lock_irqsave(&ha->smp_lock, flags); 4121 spin_lock_irqsave(&ha->smp_lock, flags);
4102 ha->hdr[j].lock = 0; 4122 ha->hdr[j].lock = 0;
4103 spin_unlock_irqrestore(&ha->smp_lock, flags); 4123 spin_unlock_irqrestore(&ha->smp_lock, flags);
4104 gdth_start_timeout(ha, ha->bus_cnt, j);
4105 gdth_next(ha); 4124 gdth_next(ha);
4106 } 4125 }
4107 } 4126 }
@@ -4539,18 +4558,14 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
4539 spin_lock_irqsave(&ha->smp_lock, flags); 4558 spin_lock_irqsave(&ha->smp_lock, flags);
4540 ha->raw[i].lock = 1; 4559 ha->raw[i].lock = 1;
4541 spin_unlock_irqrestore(&ha->smp_lock, flags); 4560 spin_unlock_irqrestore(&ha->smp_lock, flags);
4542 for (j = 0; j < ha->tid_cnt; ++j) { 4561 for (j = 0; j < ha->tid_cnt; ++j)
4543 gdth_wait_completion(ha, i, j); 4562 gdth_wait_completion(ha, i, j);
4544 gdth_stop_timeout(ha, i, j);
4545 }
4546 } else { 4563 } else {
4547 spin_lock_irqsave(&ha->smp_lock, flags); 4564 spin_lock_irqsave(&ha->smp_lock, flags);
4548 ha->raw[i].lock = 0; 4565 ha->raw[i].lock = 0;
4549 spin_unlock_irqrestore(&ha->smp_lock, flags); 4566 spin_unlock_irqrestore(&ha->smp_lock, flags);
4550 for (j = 0; j < ha->tid_cnt; ++j) { 4567 for (j = 0; j < ha->tid_cnt; ++j)
4551 gdth_start_timeout(ha, i, j);
4552 gdth_next(ha); 4568 gdth_next(ha);
4553 }
4554 } 4569 }
4555 } 4570 }
4556 break; 4571 break;
@@ -4644,6 +4659,7 @@ static struct scsi_host_template gdth_template = {
4644 .slave_configure = gdth_slave_configure, 4659 .slave_configure = gdth_slave_configure,
4645 .bios_param = gdth_bios_param, 4660 .bios_param = gdth_bios_param,
4646 .proc_info = gdth_proc_info, 4661 .proc_info = gdth_proc_info,
4662 .eh_timed_out = gdth_timed_out,
4647 .proc_name = "gdth", 4663 .proc_name = "gdth",
4648 .can_queue = GDTH_MAXCMDS, 4664 .can_queue = GDTH_MAXCMDS,
4649 .this_id = -1, 4665 .this_id = -1,
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index ca92476727cf..1646444e9bd5 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -916,7 +916,7 @@ typedef struct {
916 gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/ 916 gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/
917 dma_addr_t sense_paddr; /* sense dma-addr */ 917 dma_addr_t sense_paddr; /* sense dma-addr */
918 unchar priority; 918 unchar priority;
919 int timeout; 919 int timeout_count; /* # of timeout calls */
920 volatile int wait_for_completion; 920 volatile int wait_for_completion;
921 ushort status; 921 ushort status;
922 ulong32 info; 922 ulong32 info;
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index ce0228e26aec..59349a316e13 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -748,69 +748,3 @@ static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
748 } 748 }
749 spin_unlock_irqrestore(&ha->smp_lock, flags); 749 spin_unlock_irqrestore(&ha->smp_lock, flags);
750} 750}
751
752static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id)
753{
754 ulong flags;
755 Scsi_Cmnd *scp;
756 unchar b, t;
757
758 spin_lock_irqsave(&ha->smp_lock, flags);
759
760 for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
761 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
762 if (!cmndinfo->internal_command) {
763 b = scp->device->channel;
764 t = scp->device->id;
765 if (t == (unchar)id && b == (unchar)busnum) {
766 TRACE2(("gdth_stop_timeout(): update_timeout()\n"));
767 cmndinfo->timeout = gdth_update_timeout(scp, 0);
768 }
769 }
770 }
771 spin_unlock_irqrestore(&ha->smp_lock, flags);
772}
773
774static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id)
775{
776 ulong flags;
777 Scsi_Cmnd *scp;
778 unchar b, t;
779
780 spin_lock_irqsave(&ha->smp_lock, flags);
781
782 for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
783 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
784 if (!cmndinfo->internal_command) {
785 b = scp->device->channel;
786 t = scp->device->id;
787 if (t == (unchar)id && b == (unchar)busnum) {
788 TRACE2(("gdth_start_timeout(): update_timeout()\n"));
789 gdth_update_timeout(scp, cmndinfo->timeout);
790 }
791 }
792 }
793 spin_unlock_irqrestore(&ha->smp_lock, flags);
794}
795
796static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout)
797{
798 int oldto;
799
800 oldto = scp->timeout_per_command;
801 scp->timeout_per_command = timeout;
802
803 if (timeout == 0) {
804 del_timer(&scp->eh_timeout);
805 scp->eh_timeout.data = (unsigned long) NULL;
806 scp->eh_timeout.expires = 0;
807 } else {
808 if (scp->eh_timeout.data != (unsigned long) NULL)
809 del_timer(&scp->eh_timeout);
810 scp->eh_timeout.data = (unsigned long) scp;
811 scp->eh_timeout.expires = jiffies + timeout;
812 add_timer(&scp->eh_timeout);
813 }
814
815 return oldto;
816}
diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h
index 45e6fdacf36e..9b900cc9ebe8 100644
--- a/drivers/scsi/gdth_proc.h
+++ b/drivers/scsi/gdth_proc.h
@@ -20,9 +20,6 @@ static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
20 ulong64 *paddr); 20 ulong64 *paddr);
21static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr); 21static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr);
22static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id); 22static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id);
23static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id);
24static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id);
25static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout);
26 23
27#endif 24#endif
28 25
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 7b1502c0ab6e..87e09f35d3d4 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -756,7 +756,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
756 init_event_struct(evt_struct, 756 init_event_struct(evt_struct,
757 handle_cmd_rsp, 757 handle_cmd_rsp,
758 VIOSRP_SRP_FORMAT, 758 VIOSRP_SRP_FORMAT,
759 cmnd->timeout_per_command/HZ); 759 cmnd->request->timeout/HZ);
760 760
761 evt_struct->cmnd = cmnd; 761 evt_struct->cmnd = cmnd;
762 evt_struct->cmnd_done = done; 762 evt_struct->cmnd_done = done;
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 461331d3dc45..81c16cba5417 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -612,7 +612,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
612 pc->req_xfer = pc->buf_size = scsi_bufflen(cmd); 612 pc->req_xfer = pc->buf_size = scsi_bufflen(cmd);
613 pc->scsi_cmd = cmd; 613 pc->scsi_cmd = cmd;
614 pc->done = done; 614 pc->done = done;
615 pc->timeout = jiffies + cmd->timeout_per_command; 615 pc->timeout = jiffies + cmd->request->timeout;
616 616
617 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) { 617 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
618 printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number); 618 printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index e7a3a6554425..d30eb7ba018e 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3670,7 +3670,8 @@ static int ipr_slave_configure(struct scsi_device *sdev)
3670 sdev->no_uld_attach = 1; 3670 sdev->no_uld_attach = 1;
3671 } 3671 }
3672 if (ipr_is_vset_device(res)) { 3672 if (ipr_is_vset_device(res)) {
3673 sdev->timeout = IPR_VSET_RW_TIMEOUT; 3673 blk_queue_rq_timeout(sdev->request_queue,
3674 IPR_VSET_RW_TIMEOUT);
3674 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); 3675 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3675 } 3676 }
3676 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)) 3677 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index bc9e6ddf41df..ef683f0d2b5a 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -3818,7 +3818,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
3818 scb->cmd.dcdb.segment_4G = 0; 3818 scb->cmd.dcdb.segment_4G = 0;
3819 scb->cmd.dcdb.enhanced_sg = 0; 3819 scb->cmd.dcdb.enhanced_sg = 0;
3820 3820
3821 TimeOut = scb->scsi_cmd->timeout_per_command; 3821 TimeOut = scb->scsi_cmd->request->timeout;
3822 3822
3823 if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */ 3823 if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */
3824 if (!scb->sg_len) { 3824 if (!scb->sg_len) {
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 299e075a7b34..1eca82420aab 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1476,12 +1476,12 @@ static void iscsi_start_tx(struct iscsi_conn *conn)
1476 scsi_queue_work(conn->session->host, &conn->xmitwork); 1476 scsi_queue_work(conn->session->host, &conn->xmitwork);
1477} 1477}
1478 1478
1479static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) 1479static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1480{ 1480{
1481 struct iscsi_cls_session *cls_session; 1481 struct iscsi_cls_session *cls_session;
1482 struct iscsi_session *session; 1482 struct iscsi_session *session;
1483 struct iscsi_conn *conn; 1483 struct iscsi_conn *conn;
1484 enum scsi_eh_timer_return rc = EH_NOT_HANDLED; 1484 enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
1485 1485
1486 cls_session = starget_to_session(scsi_target(scmd->device)); 1486 cls_session = starget_to_session(scsi_target(scmd->device));
1487 session = cls_session->dd_data; 1487 session = cls_session->dd_data;
@@ -1494,14 +1494,14 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1494 * We are probably in the middle of iscsi recovery so let 1494 * We are probably in the middle of iscsi recovery so let
1495 * that complete and handle the error. 1495 * that complete and handle the error.
1496 */ 1496 */
1497 rc = EH_RESET_TIMER; 1497 rc = BLK_EH_RESET_TIMER;
1498 goto done; 1498 goto done;
1499 } 1499 }
1500 1500
1501 conn = session->leadconn; 1501 conn = session->leadconn;
1502 if (!conn) { 1502 if (!conn) {
1503 /* In the middle of shuting down */ 1503 /* In the middle of shuting down */
1504 rc = EH_RESET_TIMER; 1504 rc = BLK_EH_RESET_TIMER;
1505 goto done; 1505 goto done;
1506 } 1506 }
1507 1507
@@ -1513,20 +1513,21 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1513 */ 1513 */
1514 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + 1514 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
1515 (conn->ping_timeout * HZ), jiffies)) 1515 (conn->ping_timeout * HZ), jiffies))
1516 rc = EH_RESET_TIMER; 1516 rc = BLK_EH_RESET_TIMER;
1517 /* 1517 /*
1518 * if we are about to check the transport then give the command 1518 * if we are about to check the transport then give the command
1519 * more time 1519 * more time
1520 */ 1520 */
1521 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ), 1521 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
1522 jiffies)) 1522 jiffies))
1523 rc = EH_RESET_TIMER; 1523 rc = BLK_EH_RESET_TIMER;
1524 /* if in the middle of checking the transport then give us more time */ 1524 /* if in the middle of checking the transport then give us more time */
1525 if (conn->ping_task) 1525 if (conn->ping_task)
1526 rc = EH_RESET_TIMER; 1526 rc = BLK_EH_RESET_TIMER;
1527done: 1527done:
1528 spin_unlock(&session->lock); 1528 spin_unlock(&session->lock);
1529 debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh"); 1529 debug_scsi("return %s\n", rc == BLK_EH_RESET_TIMER ?
1530 "timer reset" : "nh");
1530 return rc; 1531 return rc;
1531} 1532}
1532 1533
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index e1872989710a..e15501170698 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -398,7 +398,7 @@ void sas_ata_task_abort(struct sas_task *task)
398 398
399 /* Bounce SCSI-initiated commands to the SCSI EH */ 399 /* Bounce SCSI-initiated commands to the SCSI EH */
400 if (qc->scsicmd) { 400 if (qc->scsicmd) {
401 scsi_req_abort_cmd(qc->scsicmd); 401 blk_abort_request(qc->scsicmd->request);
402 scsi_schedule_eh(qc->scsicmd->device->host); 402 scsi_schedule_eh(qc->scsicmd->device->host);
403 return; 403 return;
404 } 404 }
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index b4f9368f116a..0001374bd6b2 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -55,7 +55,7 @@ void sas_unregister_phys(struct sas_ha_struct *sas_ha);
55int sas_register_ports(struct sas_ha_struct *sas_ha); 55int sas_register_ports(struct sas_ha_struct *sas_ha);
56void sas_unregister_ports(struct sas_ha_struct *sas_ha); 56void sas_unregister_ports(struct sas_ha_struct *sas_ha);
57 57
58enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *); 58enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
59 59
60int sas_init_queue(struct sas_ha_struct *sas_ha); 60int sas_init_queue(struct sas_ha_struct *sas_ha);
61int sas_init_events(struct sas_ha_struct *sas_ha); 61int sas_init_events(struct sas_ha_struct *sas_ha);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index a8e3ef309070..744838780ada 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -673,43 +673,43 @@ out:
673 return; 673 return;
674} 674}
675 675
676enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) 676enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
677{ 677{
678 struct sas_task *task = TO_SAS_TASK(cmd); 678 struct sas_task *task = TO_SAS_TASK(cmd);
679 unsigned long flags; 679 unsigned long flags;
680 680
681 if (!task) { 681 if (!task) {
682 cmd->timeout_per_command /= 2; 682 cmd->request->timeout /= 2;
683 SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n", 683 SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n",
684 cmd, task, (cmd->timeout_per_command ? 684 cmd, task, (cmd->request->timeout ?
685 "EH_RESET_TIMER" : "EH_NOT_HANDLED")); 685 "BLK_EH_RESET_TIMER" : "BLK_EH_NOT_HANDLED"));
686 if (!cmd->timeout_per_command) 686 if (!cmd->request->timeout)
687 return EH_NOT_HANDLED; 687 return BLK_EH_NOT_HANDLED;
688 return EH_RESET_TIMER; 688 return BLK_EH_RESET_TIMER;
689 } 689 }
690 690
691 spin_lock_irqsave(&task->task_state_lock, flags); 691 spin_lock_irqsave(&task->task_state_lock, flags);
692 BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED); 692 BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED);
693 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 693 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
694 spin_unlock_irqrestore(&task->task_state_lock, flags); 694 spin_unlock_irqrestore(&task->task_state_lock, flags);
695 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n", 695 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: "
696 cmd, task); 696 "BLK_EH_HANDLED\n", cmd, task);
697 return EH_HANDLED; 697 return BLK_EH_HANDLED;
698 } 698 }
699 if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) { 699 if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) {
700 spin_unlock_irqrestore(&task->task_state_lock, flags); 700 spin_unlock_irqrestore(&task->task_state_lock, flags);
701 SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: " 701 SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: "
702 "EH_RESET_TIMER\n", 702 "BLK_EH_RESET_TIMER\n",
703 cmd, task); 703 cmd, task);
704 return EH_RESET_TIMER; 704 return BLK_EH_RESET_TIMER;
705 } 705 }
706 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 706 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
707 spin_unlock_irqrestore(&task->task_state_lock, flags); 707 spin_unlock_irqrestore(&task->task_state_lock, flags);
708 708
709 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_NOT_HANDLED\n", 709 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: BLK_EH_NOT_HANDLED\n",
710 cmd, task); 710 cmd, task);
711 711
712 return EH_NOT_HANDLED; 712 return BLK_EH_NOT_HANDLED;
713} 713}
714 714
715int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 715int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
@@ -1039,7 +1039,7 @@ void sas_task_abort(struct sas_task *task)
1039 return; 1039 return;
1040 } 1040 }
1041 1041
1042 scsi_req_abort_cmd(sc); 1042 blk_abort_request(sc->request);
1043 scsi_schedule_eh(sc->device->host); 1043 scsi_schedule_eh(sc->device->host);
1044} 1044}
1045 1045
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 97b763378e7d..afe1de998763 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -1167,7 +1167,7 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
1167 * cmd has not been completed within the timeout period. 1167 * cmd has not been completed within the timeout period.
1168 */ 1168 */
1169static enum 1169static enum
1170scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) 1170blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
1171{ 1171{
1172 struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr; 1172 struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr;
1173 struct megasas_instance *instance; 1173 struct megasas_instance *instance;
@@ -1175,7 +1175,7 @@ scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
1175 1175
1176 if (time_after(jiffies, scmd->jiffies_at_alloc + 1176 if (time_after(jiffies, scmd->jiffies_at_alloc +
1177 (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) { 1177 (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) {
1178 return EH_NOT_HANDLED; 1178 return BLK_EH_NOT_HANDLED;
1179 } 1179 }
1180 1180
1181 instance = cmd->instance; 1181 instance = cmd->instance;
@@ -1189,7 +1189,7 @@ scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
1189 1189
1190 spin_unlock_irqrestore(instance->host->host_lock, flags); 1190 spin_unlock_irqrestore(instance->host->host_lock, flags);
1191 } 1191 }
1192 return EH_RESET_TIMER; 1192 return BLK_EH_RESET_TIMER;
1193} 1193}
1194 1194
1195/** 1195/**
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index c57c94c0ffd2..3b7240e40819 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -4170,8 +4170,8 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd)
4170 ** 4170 **
4171 **---------------------------------------------------- 4171 **----------------------------------------------------
4172 */ 4172 */
4173 if (np->settle_time && cmd->timeout_per_command >= HZ) { 4173 if (np->settle_time && cmd->request->timeout >= HZ) {
4174 u_long tlimit = jiffies + cmd->timeout_per_command - HZ; 4174 u_long tlimit = jiffies + cmd->request->timeout - HZ;
4175 if (time_after(np->settle_time, tlimit)) 4175 if (time_after(np->settle_time, tlimit))
4176 np->settle_time = tlimit; 4176 np->settle_time = tlimit;
4177 } 4177 }
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 37f9ba0cd798..b6cd12b2e996 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2845,7 +2845,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2845 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); 2845 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
2846 2846
2847 /* Set ISP command timeout. */ 2847 /* Set ISP command timeout. */
2848 pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ); 2848 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
2849 2849
2850 /* Set device target ID and LUN */ 2850 /* Set device target ID and LUN */
2851 pkt->lun = SCSI_LUN_32(cmd); 2851 pkt->lun = SCSI_LUN_32(cmd);
@@ -3114,7 +3114,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3114 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); 3114 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
3115 3115
3116 /* Set ISP command timeout. */ 3116 /* Set ISP command timeout. */
3117 pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ); 3117 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
3118 3118
3119 /* Set device target ID and LUN */ 3119 /* Set device target ID and LUN */
3120 pkt->lun = SCSI_LUN_32(cmd); 3120 pkt->lun = SCSI_LUN_32(cmd);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 88bebb13bc52..de8279ad7d89 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1542,7 +1542,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
1542 DEBUG2(printk(KERN_INFO 1542 DEBUG2(printk(KERN_INFO
1543 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x," 1543 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
1544 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, 1544 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
1545 cmd, jiffies, cmd->timeout_per_command / HZ, 1545 cmd, jiffies, cmd->request->timeout / HZ,
1546 ha->dpc_flags, cmd->result, cmd->allowed)); 1546 ha->dpc_flags, cmd->result, cmd->allowed));
1547 1547
1548 /* FIXME: wait for hba to go online */ 1548 /* FIXME: wait for hba to go online */
@@ -1598,7 +1598,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
1598 DEBUG2(printk(KERN_INFO 1598 DEBUG2(printk(KERN_INFO
1599 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, " 1599 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
1600 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n", 1600 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
1601 ha->host_no, cmd, jiffies, cmd->timeout_per_command / HZ, 1601 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
1602 ha->dpc_flags, cmd->result, cmd->allowed)); 1602 ha->dpc_flags, cmd->result, cmd->allowed));
1603 1603
1604 stat = qla4xxx_reset_target(ha, ddb_entry); 1604 stat = qla4xxx_reset_target(ha, ddb_entry);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index ee6be596503d..dbeb86cafc0d 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -291,7 +291,6 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
291 unsigned long flags; 291 unsigned long flags;
292 292
293 cmd->device = dev; 293 cmd->device = dev;
294 init_timer(&cmd->eh_timeout);
295 INIT_LIST_HEAD(&cmd->list); 294 INIT_LIST_HEAD(&cmd->list);
296 spin_lock_irqsave(&dev->list_lock, flags); 295 spin_lock_irqsave(&dev->list_lock, flags);
297 list_add_tail(&cmd->list, &dev->cmd_list); 296 list_add_tail(&cmd->list, &dev->cmd_list);
@@ -652,14 +651,19 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
652 unsigned long timeout; 651 unsigned long timeout;
653 int rtn = 0; 652 int rtn = 0;
654 653
654 /*
655 * We will use a queued command if possible, otherwise we will
656 * emulate the queuing and calling of completion function ourselves.
657 */
658 atomic_inc(&cmd->device->iorequest_cnt);
659
655 /* check if the device is still usable */ 660 /* check if the device is still usable */
656 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { 661 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
657 /* in SDEV_DEL we error all commands. DID_NO_CONNECT 662 /* in SDEV_DEL we error all commands. DID_NO_CONNECT
658 * returns an immediate error upwards, and signals 663 * returns an immediate error upwards, and signals
659 * that the device is no longer present */ 664 * that the device is no longer present */
660 cmd->result = DID_NO_CONNECT << 16; 665 cmd->result = DID_NO_CONNECT << 16;
661 atomic_inc(&cmd->device->iorequest_cnt); 666 scsi_done(cmd);
662 __scsi_done(cmd);
663 /* return 0 (because the command has been processed) */ 667 /* return 0 (because the command has been processed) */
664 goto out; 668 goto out;
665 } 669 }
@@ -672,6 +676,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
672 * future requests should not occur until the device 676 * future requests should not occur until the device
673 * transitions out of the suspend state. 677 * transitions out of the suspend state.
674 */ 678 */
679
675 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 680 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
676 681
677 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n")); 682 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
@@ -714,21 +719,9 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
714 host->resetting = 0; 719 host->resetting = 0;
715 } 720 }
716 721
717 /*
718 * AK: unlikely race here: for some reason the timer could
719 * expire before the serial number is set up below.
720 */
721 scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out);
722
723 scsi_log_send(cmd); 722 scsi_log_send(cmd);
724 723
725 /* 724 /*
726 * We will use a queued command if possible, otherwise we will
727 * emulate the queuing and calling of completion function ourselves.
728 */
729 atomic_inc(&cmd->device->iorequest_cnt);
730
731 /*
732 * Before we queue this command, check if the command 725 * Before we queue this command, check if the command
733 * length exceeds what the host adapter can handle. 726 * length exceeds what the host adapter can handle.
734 */ 727 */
@@ -744,6 +737,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
744 } 737 }
745 738
746 spin_lock_irqsave(host->host_lock, flags); 739 spin_lock_irqsave(host->host_lock, flags);
740 /*
741 * AK: unlikely race here: for some reason the timer could
742 * expire before the serial number is set up below.
743 *
744 * TODO: kill serial or move to blk layer
745 */
747 scsi_cmd_get_serial(host, cmd); 746 scsi_cmd_get_serial(host, cmd);
748 747
749 if (unlikely(host->shost_state == SHOST_DEL)) { 748 if (unlikely(host->shost_state == SHOST_DEL)) {
@@ -754,12 +753,8 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
754 } 753 }
755 spin_unlock_irqrestore(host->host_lock, flags); 754 spin_unlock_irqrestore(host->host_lock, flags);
756 if (rtn) { 755 if (rtn) {
757 if (scsi_delete_timer(cmd)) { 756 scsi_queue_insert(cmd, (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
758 atomic_inc(&cmd->device->iodone_cnt); 757 rtn : SCSI_MLQUEUE_HOST_BUSY);
759 scsi_queue_insert(cmd,
760 (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
761 rtn : SCSI_MLQUEUE_HOST_BUSY);
762 }
763 SCSI_LOG_MLQUEUE(3, 758 SCSI_LOG_MLQUEUE(3,
764 printk("queuecommand : request rejected\n")); 759 printk("queuecommand : request rejected\n"));
765 } 760 }
@@ -770,24 +765,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
770} 765}
771 766
772/** 767/**
773 * scsi_req_abort_cmd -- Request command recovery for the specified command
774 * @cmd: pointer to the SCSI command of interest
775 *
776 * This function requests that SCSI Core start recovery for the
777 * command by deleting the timer and adding the command to the eh
778 * queue. It can be called by either LLDDs or SCSI Core. LLDDs who
779 * implement their own error recovery MAY ignore the timeout event if
780 * they generated scsi_req_abort_cmd.
781 */
782void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
783{
784 if (!scsi_delete_timer(cmd))
785 return;
786 scsi_times_out(cmd);
787}
788EXPORT_SYMBOL(scsi_req_abort_cmd);
789
790/**
791 * scsi_done - Enqueue the finished SCSI command into the done queue. 768 * scsi_done - Enqueue the finished SCSI command into the done queue.
792 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 769 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
793 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 770 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
@@ -802,42 +779,7 @@ EXPORT_SYMBOL(scsi_req_abort_cmd);
802 */ 779 */
803static void scsi_done(struct scsi_cmnd *cmd) 780static void scsi_done(struct scsi_cmnd *cmd)
804{ 781{
805 /* 782 blk_complete_request(cmd->request);
806 * We don't have to worry about this one timing out anymore.
807 * If we are unable to remove the timer, then the command
808 * has already timed out. In which case, we have no choice but to
809 * let the timeout function run, as we have no idea where in fact
810 * that function could really be. It might be on another processor,
811 * etc, etc.
812 */
813 if (!scsi_delete_timer(cmd))
814 return;
815 __scsi_done(cmd);
816}
817
818/* Private entry to scsi_done() to complete a command when the timer
819 * isn't running --- used by scsi_times_out */
820void __scsi_done(struct scsi_cmnd *cmd)
821{
822 struct request *rq = cmd->request;
823
824 /*
825 * Set the serial numbers back to zero
826 */
827 cmd->serial_number = 0;
828
829 atomic_inc(&cmd->device->iodone_cnt);
830 if (cmd->result)
831 atomic_inc(&cmd->device->ioerr_cnt);
832
833 BUG_ON(!rq);
834
835 /*
836 * The uptodate/nbytes values don't matter, as we allow partial
837 * completes and thus will check this in the softirq callback
838 */
839 rq->completion_data = cmd;
840 blk_complete_request(rq);
841} 783}
842 784
843/* Move this to a header if it becomes more generally useful */ 785/* Move this to a header if it becomes more generally useful */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 39ce3aba1dac..fecefa05cb62 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -112,69 +112,8 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
112} 112}
113 113
114/** 114/**
115 * scsi_add_timer - Start timeout timer for a single scsi command.
116 * @scmd: scsi command that is about to start running.
117 * @timeout: amount of time to allow this command to run.
118 * @complete: timeout function to call if timer isn't canceled.
119 *
120 * Notes:
121 * This should be turned into an inline function. Each scsi command
122 * has its own timer, and as it is added to the queue, we set up the
123 * timer. When the command completes, we cancel the timer.
124 */
125void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
126 void (*complete)(struct scsi_cmnd *))
127{
128
129 /*
130 * If the clock was already running for this command, then
131 * first delete the timer. The timer handling code gets rather
132 * confused if we don't do this.
133 */
134 if (scmd->eh_timeout.function)
135 del_timer(&scmd->eh_timeout);
136
137 scmd->eh_timeout.data = (unsigned long)scmd;
138 scmd->eh_timeout.expires = jiffies + timeout;
139 scmd->eh_timeout.function = (void (*)(unsigned long)) complete;
140
141 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:"
142 " %d, (%p)\n", __func__,
143 scmd, timeout, complete));
144
145 add_timer(&scmd->eh_timeout);
146}
147
148/**
149 * scsi_delete_timer - Delete/cancel timer for a given function.
150 * @scmd: Cmd that we are canceling timer for
151 *
152 * Notes:
153 * This should be turned into an inline function.
154 *
155 * Return value:
156 * 1 if we were able to detach the timer. 0 if we blew it, and the
157 * timer function has already started to run.
158 */
159int scsi_delete_timer(struct scsi_cmnd *scmd)
160{
161 int rtn;
162
163 rtn = del_timer(&scmd->eh_timeout);
164
165 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p,"
166 " rtn: %d\n", __func__,
167 scmd, rtn));
168
169 scmd->eh_timeout.data = (unsigned long)NULL;
170 scmd->eh_timeout.function = NULL;
171
172 return rtn;
173}
174
175/**
176 * scsi_times_out - Timeout function for normal scsi commands. 115 * scsi_times_out - Timeout function for normal scsi commands.
177 * @scmd: Cmd that is timing out. 116 * @req: request that is timing out.
178 * 117 *
179 * Notes: 118 * Notes:
180 * We do not need to lock this. There is the potential for a race 119 * We do not need to lock this. There is the potential for a race
@@ -182,9 +121,11 @@ int scsi_delete_timer(struct scsi_cmnd *scmd)
182 * normal completion function determines that the timer has already 121 * normal completion function determines that the timer has already
183 * fired, then it mustn't do anything. 122 * fired, then it mustn't do anything.
184 */ 123 */
185void scsi_times_out(struct scsi_cmnd *scmd) 124enum blk_eh_timer_return scsi_times_out(struct request *req)
186{ 125{
187 enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *); 126 struct scsi_cmnd *scmd = req->special;
127 enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
128 enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
188 129
189 scsi_log_completion(scmd, TIMEOUT_ERROR); 130 scsi_log_completion(scmd, TIMEOUT_ERROR);
190 131
@@ -196,22 +137,20 @@ void scsi_times_out(struct scsi_cmnd *scmd)
196 eh_timed_out = NULL; 137 eh_timed_out = NULL;
197 138
198 if (eh_timed_out) 139 if (eh_timed_out)
199 switch (eh_timed_out(scmd)) { 140 rtn = eh_timed_out(scmd);
200 case EH_HANDLED: 141 switch (rtn) {
201 __scsi_done(scmd); 142 case BLK_EH_NOT_HANDLED:
202 return;
203 case EH_RESET_TIMER:
204 scsi_add_timer(scmd, scmd->timeout_per_command,
205 scsi_times_out);
206 return;
207 case EH_NOT_HANDLED:
208 break; 143 break;
144 default:
145 return rtn;
209 } 146 }
210 147
211 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) { 148 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
212 scmd->result |= DID_TIME_OUT << 16; 149 scmd->result |= DID_TIME_OUT << 16;
213 __scsi_done(scmd); 150 return BLK_EH_HANDLED;
214 } 151 }
152
153 return BLK_EH_NOT_HANDLED;
215} 154}
216 155
217/** 156/**
@@ -1793,7 +1732,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1793 1732
1794 blk_rq_init(NULL, &req); 1733 blk_rq_init(NULL, &req);
1795 scmd->request = &req; 1734 scmd->request = &req;
1796 memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
1797 1735
1798 scmd->cmnd = req.cmd; 1736 scmd->cmnd = req.cmd;
1799 1737
@@ -1804,8 +1742,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1804 1742
1805 scmd->sc_data_direction = DMA_BIDIRECTIONAL; 1743 scmd->sc_data_direction = DMA_BIDIRECTIONAL;
1806 1744
1807 init_timer(&scmd->eh_timeout);
1808
1809 spin_lock_irqsave(shost->host_lock, flags); 1745 spin_lock_irqsave(shost->host_lock, flags);
1810 shost->tmf_in_progress = 1; 1746 shost->tmf_in_progress = 1;
1811 spin_unlock_irqrestore(shost->host_lock, flags); 1747 spin_unlock_irqrestore(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 62307bd794a9..e7686500e9dd 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1181,7 +1181,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1181 1181
1182 cmd->transfersize = req->data_len; 1182 cmd->transfersize = req->data_len;
1183 cmd->allowed = req->retries; 1183 cmd->allowed = req->retries;
1184 cmd->timeout_per_command = req->timeout;
1185 return BLKPREP_OK; 1184 return BLKPREP_OK;
1186} 1185}
1187EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); 1186EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
@@ -1416,17 +1415,26 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1416 spin_unlock(shost->host_lock); 1415 spin_unlock(shost->host_lock);
1417 spin_lock(sdev->request_queue->queue_lock); 1416 spin_lock(sdev->request_queue->queue_lock);
1418 1417
1419 __scsi_done(cmd); 1418 blk_complete_request(req);
1420} 1419}
1421 1420
1422static void scsi_softirq_done(struct request *rq) 1421static void scsi_softirq_done(struct request *rq)
1423{ 1422{
1424 struct scsi_cmnd *cmd = rq->completion_data; 1423 struct scsi_cmnd *cmd = rq->special;
1425 unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command; 1424 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1426 int disposition; 1425 int disposition;
1427 1426
1428 INIT_LIST_HEAD(&cmd->eh_entry); 1427 INIT_LIST_HEAD(&cmd->eh_entry);
1429 1428
1429 /*
1430 * Set the serial numbers back to zero
1431 */
1432 cmd->serial_number = 0;
1433
1434 atomic_inc(&cmd->device->iodone_cnt);
1435 if (cmd->result)
1436 atomic_inc(&cmd->device->ioerr_cnt);
1437
1430 disposition = scsi_decide_disposition(cmd); 1438 disposition = scsi_decide_disposition(cmd);
1431 if (disposition != SUCCESS && 1439 if (disposition != SUCCESS &&
1432 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1440 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
@@ -1675,6 +1683,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1675 1683
1676 blk_queue_prep_rq(q, scsi_prep_fn); 1684 blk_queue_prep_rq(q, scsi_prep_fn);
1677 blk_queue_softirq_done(q, scsi_softirq_done); 1685 blk_queue_softirq_done(q, scsi_softirq_done);
1686 blk_queue_rq_timed_out(q, scsi_times_out);
1678 return q; 1687 return q;
1679} 1688}
1680 1689
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 79f0f7511204..6cddd5dd323c 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -4,6 +4,7 @@
4#include <linux/device.h> 4#include <linux/device.h>
5 5
6struct request_queue; 6struct request_queue;
7struct request;
7struct scsi_cmnd; 8struct scsi_cmnd;
8struct scsi_device; 9struct scsi_device;
9struct scsi_host_template; 10struct scsi_host_template;
@@ -27,7 +28,6 @@ extern void scsi_exit_hosts(void);
27extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd); 28extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd);
28extern int scsi_setup_command_freelist(struct Scsi_Host *shost); 29extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
29extern void scsi_destroy_command_freelist(struct Scsi_Host *shost); 30extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
30extern void __scsi_done(struct scsi_cmnd *cmd);
31#ifdef CONFIG_SCSI_LOGGING 31#ifdef CONFIG_SCSI_LOGGING
32void scsi_log_send(struct scsi_cmnd *cmd); 32void scsi_log_send(struct scsi_cmnd *cmd);
33void scsi_log_completion(struct scsi_cmnd *cmd, int disposition); 33void scsi_log_completion(struct scsi_cmnd *cmd, int disposition);
@@ -49,10 +49,7 @@ extern int __init scsi_init_devinfo(void);
49extern void scsi_exit_devinfo(void); 49extern void scsi_exit_devinfo(void);
50 50
51/* scsi_error.c */ 51/* scsi_error.c */
52extern void scsi_add_timer(struct scsi_cmnd *, int, 52extern enum blk_eh_timer_return scsi_times_out(struct request *req);
53 void (*)(struct scsi_cmnd *));
54extern int scsi_delete_timer(struct scsi_cmnd *);
55extern void scsi_times_out(struct scsi_cmnd *cmd);
56extern int scsi_error_handler(void *host); 53extern int scsi_error_handler(void *host);
57extern int scsi_decide_disposition(struct scsi_cmnd *cmd); 54extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
58extern void scsi_eh_wakeup(struct Scsi_Host *shost); 55extern void scsi_eh_wakeup(struct Scsi_Host *shost);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index ab3c71869be5..7f618ee5ecea 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -560,12 +560,15 @@ sdev_rd_attr (vendor, "%.8s\n");
560sdev_rd_attr (model, "%.16s\n"); 560sdev_rd_attr (model, "%.16s\n");
561sdev_rd_attr (rev, "%.4s\n"); 561sdev_rd_attr (rev, "%.4s\n");
562 562
563/*
564 * TODO: can we make these symlinks to the block layer ones?
565 */
563static ssize_t 566static ssize_t
564sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf) 567sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf)
565{ 568{
566 struct scsi_device *sdev; 569 struct scsi_device *sdev;
567 sdev = to_scsi_device(dev); 570 sdev = to_scsi_device(dev);
568 return snprintf (buf, 20, "%d\n", sdev->timeout / HZ); 571 return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ);
569} 572}
570 573
571static ssize_t 574static ssize_t
@@ -576,7 +579,7 @@ sdev_store_timeout (struct device *dev, struct device_attribute *attr,
576 int timeout; 579 int timeout;
577 sdev = to_scsi_device(dev); 580 sdev = to_scsi_device(dev);
578 sscanf (buf, "%d\n", &timeout); 581 sscanf (buf, "%d\n", &timeout);
579 sdev->timeout = timeout * HZ; 582 blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
580 return count; 583 return count;
581} 584}
582static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout); 585static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout);
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 257e097c39af..3117bb106b5d 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
362 int err; 362 int err;
363 363
364 dprintk("%lx %u\n", uaddr, len); 364 dprintk("%lx %u\n", uaddr, len);
365 err = blk_rq_map_user(q, rq, (void *)uaddr, len); 365 err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
366 if (err) { 366 if (err) {
367 /* 367 /*
368 * TODO: need to fixup sg_tablesize, max_segment_size, 368 * TODO: need to fixup sg_tablesize, max_segment_size,
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 56823fd1fb84..9168883d0dfe 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1950,15 +1950,15 @@ static int fc_vport_match(struct attribute_container *cont,
1950 * Notes: 1950 * Notes:
1951 * This routine assumes no locks are held on entry. 1951 * This routine assumes no locks are held on entry.
1952 */ 1952 */
1953static enum scsi_eh_timer_return 1953static enum blk_eh_timer_return
1954fc_timed_out(struct scsi_cmnd *scmd) 1954fc_timed_out(struct scsi_cmnd *scmd)
1955{ 1955{
1956 struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device)); 1956 struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
1957 1957
1958 if (rport->port_state == FC_PORTSTATE_BLOCKED) 1958 if (rport->port_state == FC_PORTSTATE_BLOCKED)
1959 return EH_RESET_TIMER; 1959 return BLK_EH_RESET_TIMER;
1960 1960
1961 return EH_NOT_HANDLED; 1961 return BLK_EH_NOT_HANDLED;
1962} 1962}
1963 1963
1964/* 1964/*
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e5e7d7856454..c0cf4acda7de 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -86,6 +86,12 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
86MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD); 86MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
87MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); 87MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
88 88
89#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
90#define SD_MINORS 16
91#else
92#define SD_MINORS 0
93#endif
94
89static int sd_revalidate_disk(struct gendisk *); 95static int sd_revalidate_disk(struct gendisk *);
90static int sd_probe(struct device *); 96static int sd_probe(struct device *);
91static int sd_remove(struct device *); 97static int sd_remove(struct device *);
@@ -159,7 +165,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
159 sd_print_sense_hdr(sdkp, &sshdr); 165 sd_print_sense_hdr(sdkp, &sshdr);
160 return -EINVAL; 166 return -EINVAL;
161 } 167 }
162 sd_revalidate_disk(sdkp->disk); 168 revalidate_disk(sdkp->disk);
163 return count; 169 return count;
164} 170}
165 171
@@ -377,7 +383,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
377 sector_t block = rq->sector; 383 sector_t block = rq->sector;
378 sector_t threshold; 384 sector_t threshold;
379 unsigned int this_count = rq->nr_sectors; 385 unsigned int this_count = rq->nr_sectors;
380 unsigned int timeout = sdp->timeout;
381 int ret; 386 int ret;
382 387
383 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 388 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -578,7 +583,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
578 SCpnt->transfersize = sdp->sector_size; 583 SCpnt->transfersize = sdp->sector_size;
579 SCpnt->underflow = this_count << 9; 584 SCpnt->underflow = this_count << 9;
580 SCpnt->allowed = SD_MAX_RETRIES; 585 SCpnt->allowed = SD_MAX_RETRIES;
581 SCpnt->timeout_per_command = timeout;
582 586
583 /* 587 /*
584 * This indicates that the command is ready from our end to be 588 * This indicates that the command is ready from our end to be
@@ -910,7 +914,7 @@ static void sd_rescan(struct device *dev)
910 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); 914 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
911 915
912 if (sdkp) { 916 if (sdkp) {
913 sd_revalidate_disk(sdkp->disk); 917 revalidate_disk(sdkp->disk);
914 scsi_disk_put(sdkp); 918 scsi_disk_put(sdkp);
915 } 919 }
916} 920}
@@ -1764,6 +1768,52 @@ static int sd_revalidate_disk(struct gendisk *disk)
1764} 1768}
1765 1769
1766/** 1770/**
1771 * sd_format_disk_name - format disk name
1772 * @prefix: name prefix - ie. "sd" for SCSI disks
1773 * @index: index of the disk to format name for
1774 * @buf: output buffer
1775 * @buflen: length of the output buffer
1776 *
1777 * SCSI disk names starts at sda. The 26th device is sdz and the
1778 * 27th is sdaa. The last one for two lettered suffix is sdzz
1779 * which is followed by sdaaa.
1780 *
1781 * This is basically 26 base counting with one extra 'nil' entry
1782 * at the beggining from the second digit on and can be
1783 * determined using similar method as 26 base conversion with the
1784 * index shifted -1 after each digit is computed.
1785 *
1786 * CONTEXT:
1787 * Don't care.
1788 *
1789 * RETURNS:
1790 * 0 on success, -errno on failure.
1791 */
1792static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
1793{
1794 const int base = 'z' - 'a' + 1;
1795 char *begin = buf + strlen(prefix);
1796 char *end = buf + buflen;
1797 char *p;
1798 int unit;
1799
1800 p = end - 1;
1801 *p = '\0';
1802 unit = base;
1803 do {
1804 if (p == begin)
1805 return -EINVAL;
1806 *--p = 'a' + (index % unit);
1807 index = (index / unit) - 1;
1808 } while (index >= 0);
1809
1810 memmove(begin, p, end - p);
1811 memcpy(buf, prefix, strlen(prefix));
1812
1813 return 0;
1814}
1815
1816/**
1767 * sd_probe - called during driver initialization and whenever a 1817 * sd_probe - called during driver initialization and whenever a
1768 * new scsi device is attached to the system. It is called once 1818 * new scsi device is attached to the system. It is called once
1769 * for each scsi device (not just disks) present. 1819 * for each scsi device (not just disks) present.
@@ -1801,7 +1851,7 @@ static int sd_probe(struct device *dev)
1801 if (!sdkp) 1851 if (!sdkp)
1802 goto out; 1852 goto out;
1803 1853
1804 gd = alloc_disk(16); 1854 gd = alloc_disk(SD_MINORS);
1805 if (!gd) 1855 if (!gd)
1806 goto out_free; 1856 goto out_free;
1807 1857
@@ -1815,8 +1865,8 @@ static int sd_probe(struct device *dev)
1815 if (error) 1865 if (error)
1816 goto out_put; 1866 goto out_put;
1817 1867
1818 error = -EBUSY; 1868 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
1819 if (index >= SD_MAX_DISKS) 1869 if (error)
1820 goto out_free_index; 1870 goto out_free_index;
1821 1871
1822 sdkp->device = sdp; 1872 sdkp->device = sdp;
@@ -1826,11 +1876,12 @@ static int sd_probe(struct device *dev)
1826 sdkp->openers = 0; 1876 sdkp->openers = 0;
1827 sdkp->previous_state = 1; 1877 sdkp->previous_state = 1;
1828 1878
1829 if (!sdp->timeout) { 1879 if (!sdp->request_queue->rq_timeout) {
1830 if (sdp->type != TYPE_MOD) 1880 if (sdp->type != TYPE_MOD)
1831 sdp->timeout = SD_TIMEOUT; 1881 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
1832 else 1882 else
1833 sdp->timeout = SD_MOD_TIMEOUT; 1883 blk_queue_rq_timeout(sdp->request_queue,
1884 SD_MOD_TIMEOUT);
1834 } 1885 }
1835 1886
1836 device_initialize(&sdkp->dev); 1887 device_initialize(&sdkp->dev);
@@ -1843,24 +1894,12 @@ static int sd_probe(struct device *dev)
1843 1894
1844 get_device(&sdp->sdev_gendev); 1895 get_device(&sdp->sdev_gendev);
1845 1896
1846 gd->major = sd_major((index & 0xf0) >> 4); 1897 if (index < SD_MAX_DISKS) {
1847 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); 1898 gd->major = sd_major((index & 0xf0) >> 4);
1848 gd->minors = 16; 1899 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
1849 gd->fops = &sd_fops; 1900 gd->minors = SD_MINORS;
1850
1851 if (index < 26) {
1852 sprintf(gd->disk_name, "sd%c", 'a' + index % 26);
1853 } else if (index < (26 + 1) * 26) {
1854 sprintf(gd->disk_name, "sd%c%c",
1855 'a' + index / 26 - 1,'a' + index % 26);
1856 } else {
1857 const unsigned int m1 = (index / 26 - 1) / 26 - 1;
1858 const unsigned int m2 = (index / 26 - 1) % 26;
1859 const unsigned int m3 = index % 26;
1860 sprintf(gd->disk_name, "sd%c%c%c",
1861 'a' + m1, 'a' + m2, 'a' + m3);
1862 } 1901 }
1863 1902 gd->fops = &sd_fops;
1864 gd->private_data = &sdkp->driver; 1903 gd->private_data = &sdkp->driver;
1865 gd->queue = sdkp->device->request_queue; 1904 gd->queue = sdkp->device->request_queue;
1866 1905
@@ -1869,7 +1908,7 @@ static int sd_probe(struct device *dev)
1869 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); 1908 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
1870 1909
1871 gd->driverfs_dev = &sdp->sdev_gendev; 1910 gd->driverfs_dev = &sdp->sdev_gendev;
1872 gd->flags = GENHD_FL_DRIVERFS; 1911 gd->flags = GENHD_FL_EXT_DEVT | GENHD_FL_DRIVERFS;
1873 if (sdp->removable) 1912 if (sdp->removable)
1874 gd->flags |= GENHD_FL_REMOVABLE; 1913 gd->flags |= GENHD_FL_REMOVABLE;
1875 1914
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 661f9f21650a..ba9b9bbd4e73 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -47,7 +47,6 @@ static int sg_version_num = 30534; /* 2 digits for each component */
47#include <linux/seq_file.h> 47#include <linux/seq_file.h>
48#include <linux/blkdev.h> 48#include <linux/blkdev.h>
49#include <linux/delay.h> 49#include <linux/delay.h>
50#include <linux/scatterlist.h>
51#include <linux/blktrace_api.h> 50#include <linux/blktrace_api.h>
52#include <linux/smp_lock.h> 51#include <linux/smp_lock.h>
53 52
@@ -69,7 +68,6 @@ static void sg_proc_cleanup(void);
69#endif 68#endif
70 69
71#define SG_ALLOW_DIO_DEF 0 70#define SG_ALLOW_DIO_DEF 0
72#define SG_ALLOW_DIO_CODE /* compile out by commenting this define */
73 71
74#define SG_MAX_DEVS 32768 72#define SG_MAX_DEVS 32768
75 73
@@ -118,8 +116,8 @@ typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
118 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */ 116 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
119 unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */ 117 unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
120 unsigned bufflen; /* Size of (aggregate) data buffer */ 118 unsigned bufflen; /* Size of (aggregate) data buffer */
121 unsigned b_malloc_len; /* actual len malloc'ed in buffer */ 119 struct page **pages;
122 struct scatterlist *buffer;/* scatter list */ 120 int page_order;
123 char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ 121 char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
124 unsigned char cmd_opcode; /* first byte of command */ 122 unsigned char cmd_opcode; /* first byte of command */
125} Sg_scatter_hold; 123} Sg_scatter_hold;
@@ -137,6 +135,8 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
137 char orphan; /* 1 -> drop on sight, 0 -> normal */ 135 char orphan; /* 1 -> drop on sight, 0 -> normal */
138 char sg_io_owned; /* 1 -> packet belongs to SG_IO */ 136 char sg_io_owned; /* 1 -> packet belongs to SG_IO */
139 volatile char done; /* 0->before bh, 1->before read, 2->read */ 137 volatile char done; /* 0->before bh, 1->before read, 2->read */
138 struct request *rq;
139 struct bio *bio;
140} Sg_request; 140} Sg_request;
141 141
142typedef struct sg_fd { /* holds the state of a file descriptor */ 142typedef struct sg_fd { /* holds the state of a file descriptor */
@@ -175,8 +175,8 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
175 175
176static int sg_fasync(int fd, struct file *filp, int mode); 176static int sg_fasync(int fd, struct file *filp, int mode);
177/* tasklet or soft irq callback */ 177/* tasklet or soft irq callback */
178static void sg_cmd_done(void *data, char *sense, int result, int resid); 178static void sg_rq_end_io(struct request *rq, int uptodate);
179static int sg_start_req(Sg_request * srp); 179static int sg_start_req(Sg_request *srp, unsigned char *cmd);
180static void sg_finish_rem_req(Sg_request * srp); 180static void sg_finish_rem_req(Sg_request * srp);
181static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); 181static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
182static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, 182static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
@@ -188,17 +188,11 @@ static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
188 int read_only, Sg_request **o_srp); 188 int read_only, Sg_request **o_srp);
189static int sg_common_write(Sg_fd * sfp, Sg_request * srp, 189static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
190 unsigned char *cmnd, int timeout, int blocking); 190 unsigned char *cmnd, int timeout, int blocking);
191static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
192 int wr_xf, int *countp, unsigned char __user **up);
193static int sg_write_xfer(Sg_request * srp);
194static int sg_read_xfer(Sg_request * srp);
195static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); 191static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
196static void sg_remove_scat(Sg_scatter_hold * schp); 192static void sg_remove_scat(Sg_scatter_hold * schp);
197static void sg_build_reserve(Sg_fd * sfp, int req_size); 193static void sg_build_reserve(Sg_fd * sfp, int req_size);
198static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); 194static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
199static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); 195static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
200static struct page *sg_page_malloc(int rqSz, int lowDma, int *retSzp);
201static void sg_page_free(struct page *page, int size);
202static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev); 196static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
203static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); 197static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
204static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); 198static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
@@ -206,7 +200,6 @@ static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
206static Sg_request *sg_add_request(Sg_fd * sfp); 200static Sg_request *sg_add_request(Sg_fd * sfp);
207static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); 201static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
208static int sg_res_in_use(Sg_fd * sfp); 202static int sg_res_in_use(Sg_fd * sfp);
209static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
210static Sg_device *sg_get_dev(int dev); 203static Sg_device *sg_get_dev(int dev);
211#ifdef CONFIG_SCSI_PROC_FS 204#ifdef CONFIG_SCSI_PROC_FS
212static int sg_last_dev(void); 205static int sg_last_dev(void);
@@ -529,8 +522,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
529 err = -EFAULT; 522 err = -EFAULT;
530 goto err_out; 523 goto err_out;
531 } 524 }
532 err = sg_read_xfer(srp); 525err_out:
533 err_out:
534 sg_finish_rem_req(srp); 526 sg_finish_rem_req(srp);
535 return (0 == err) ? count : err; 527 return (0 == err) ? count : err;
536} 528}
@@ -612,7 +604,10 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
612 else 604 else
613 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE; 605 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
614 hp->dxfer_len = mxsize; 606 hp->dxfer_len = mxsize;
615 hp->dxferp = (char __user *)buf + cmd_size; 607 if (hp->dxfer_direction == SG_DXFER_TO_DEV)
608 hp->dxferp = (char __user *)buf + cmd_size;
609 else
610 hp->dxferp = NULL;
616 hp->sbp = NULL; 611 hp->sbp = NULL;
617 hp->timeout = old_hdr.reply_len; /* structure abuse ... */ 612 hp->timeout = old_hdr.reply_len; /* structure abuse ... */
618 hp->flags = input_size; /* structure abuse ... */ 613 hp->flags = input_size; /* structure abuse ... */
@@ -732,16 +727,12 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
732 SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", 727 SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
733 (int) cmnd[0], (int) hp->cmd_len)); 728 (int) cmnd[0], (int) hp->cmd_len));
734 729
735 if ((k = sg_start_req(srp))) { 730 k = sg_start_req(srp, cmnd);
731 if (k) {
736 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k)); 732 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k));
737 sg_finish_rem_req(srp); 733 sg_finish_rem_req(srp);
738 return k; /* probably out of space --> ENOMEM */ 734 return k; /* probably out of space --> ENOMEM */
739 } 735 }
740 if ((k = sg_write_xfer(srp))) {
741 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: write_xfer, bad address\n"));
742 sg_finish_rem_req(srp);
743 return k;
744 }
745 if (sdp->detached) { 736 if (sdp->detached) {
746 sg_finish_rem_req(srp); 737 sg_finish_rem_req(srp);
747 return -ENODEV; 738 return -ENODEV;
@@ -763,20 +754,11 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
763 break; 754 break;
764 } 755 }
765 hp->duration = jiffies_to_msecs(jiffies); 756 hp->duration = jiffies_to_msecs(jiffies);
766/* Now send everything of to mid-level. The next time we hear about this 757
767 packet is when sg_cmd_done() is called (i.e. a callback). */ 758 srp->rq->timeout = timeout;
768 if (scsi_execute_async(sdp->device, cmnd, hp->cmd_len, data_dir, srp->data.buffer, 759 blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
769 hp->dxfer_len, srp->data.k_use_sg, timeout, 760 srp->rq, 1, sg_rq_end_io);
770 SG_DEFAULT_RETRIES, srp, sg_cmd_done, 761 return 0;
771 GFP_ATOMIC)) {
772 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: scsi_execute_async failed\n"));
773 /*
774 * most likely out of mem, but could also be a bad map
775 */
776 sg_finish_rem_req(srp);
777 return -ENOMEM;
778 } else
779 return 0;
780} 762}
781 763
782static int 764static int
@@ -1192,8 +1174,7 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1192 Sg_fd *sfp; 1174 Sg_fd *sfp;
1193 unsigned long offset, len, sa; 1175 unsigned long offset, len, sa;
1194 Sg_scatter_hold *rsv_schp; 1176 Sg_scatter_hold *rsv_schp;
1195 struct scatterlist *sg; 1177 int k, length;
1196 int k;
1197 1178
1198 if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) 1179 if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
1199 return VM_FAULT_SIGBUS; 1180 return VM_FAULT_SIGBUS;
@@ -1203,15 +1184,14 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1203 return VM_FAULT_SIGBUS; 1184 return VM_FAULT_SIGBUS;
1204 SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n", 1185 SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n",
1205 offset, rsv_schp->k_use_sg)); 1186 offset, rsv_schp->k_use_sg));
1206 sg = rsv_schp->buffer;
1207 sa = vma->vm_start; 1187 sa = vma->vm_start;
1208 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); 1188 length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1209 ++k, sg = sg_next(sg)) { 1189 for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1210 len = vma->vm_end - sa; 1190 len = vma->vm_end - sa;
1211 len = (len < sg->length) ? len : sg->length; 1191 len = (len < length) ? len : length;
1212 if (offset < len) { 1192 if (offset < len) {
1213 struct page *page; 1193 struct page *page = nth_page(rsv_schp->pages[k],
1214 page = virt_to_page(page_address(sg_page(sg)) + offset); 1194 offset >> PAGE_SHIFT);
1215 get_page(page); /* increment page count */ 1195 get_page(page); /* increment page count */
1216 vmf->page = page; 1196 vmf->page = page;
1217 return 0; /* success */ 1197 return 0; /* success */
@@ -1233,8 +1213,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1233 Sg_fd *sfp; 1213 Sg_fd *sfp;
1234 unsigned long req_sz, len, sa; 1214 unsigned long req_sz, len, sa;
1235 Sg_scatter_hold *rsv_schp; 1215 Sg_scatter_hold *rsv_schp;
1236 int k; 1216 int k, length;
1237 struct scatterlist *sg;
1238 1217
1239 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) 1218 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
1240 return -ENXIO; 1219 return -ENXIO;
@@ -1248,11 +1227,10 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1248 return -ENOMEM; /* cannot map more than reserved buffer */ 1227 return -ENOMEM; /* cannot map more than reserved buffer */
1249 1228
1250 sa = vma->vm_start; 1229 sa = vma->vm_start;
1251 sg = rsv_schp->buffer; 1230 length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1252 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); 1231 for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1253 ++k, sg = sg_next(sg)) {
1254 len = vma->vm_end - sa; 1232 len = vma->vm_end - sa;
1255 len = (len < sg->length) ? len : sg->length; 1233 len = (len < length) ? len : length;
1256 sa += len; 1234 sa += len;
1257 } 1235 }
1258 1236
@@ -1263,16 +1241,19 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1263 return 0; 1241 return 0;
1264} 1242}
1265 1243
1266/* This function is a "bottom half" handler that is called by the 1244/*
1267 * mid level when a command is completed (or has failed). */ 1245 * This function is a "bottom half" handler that is called by the mid
1268static void 1246 * level when a command is completed (or has failed).
1269sg_cmd_done(void *data, char *sense, int result, int resid) 1247 */
1248static void sg_rq_end_io(struct request *rq, int uptodate)
1270{ 1249{
1271 Sg_request *srp = data; 1250 struct sg_request *srp = rq->end_io_data;
1272 Sg_device *sdp = NULL; 1251 Sg_device *sdp = NULL;
1273 Sg_fd *sfp; 1252 Sg_fd *sfp;
1274 unsigned long iflags; 1253 unsigned long iflags;
1275 unsigned int ms; 1254 unsigned int ms;
1255 char *sense;
1256 int result, resid;
1276 1257
1277 if (NULL == srp) { 1258 if (NULL == srp) {
1278 printk(KERN_ERR "sg_cmd_done: NULL request\n"); 1259 printk(KERN_ERR "sg_cmd_done: NULL request\n");
@@ -1286,6 +1267,9 @@ sg_cmd_done(void *data, char *sense, int result, int resid)
1286 return; 1267 return;
1287 } 1268 }
1288 1269
1270 sense = rq->sense;
1271 result = rq->errors;
1272 resid = rq->data_len;
1289 1273
1290 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", 1274 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1291 sdp->disk->disk_name, srp->header.pack_id, result)); 1275 sdp->disk->disk_name, srp->header.pack_id, result));
@@ -1296,7 +1280,6 @@ sg_cmd_done(void *data, char *sense, int result, int resid)
1296 if (0 != result) { 1280 if (0 != result) {
1297 struct scsi_sense_hdr sshdr; 1281 struct scsi_sense_hdr sshdr;
1298 1282
1299 memcpy(srp->sense_b, sense, sizeof (srp->sense_b));
1300 srp->header.status = 0xff & result; 1283 srp->header.status = 0xff & result;
1301 srp->header.masked_status = status_byte(result); 1284 srp->header.masked_status = status_byte(result);
1302 srp->header.msg_status = msg_byte(result); 1285 srp->header.msg_status = msg_byte(result);
@@ -1634,37 +1617,79 @@ exit_sg(void)
1634 idr_destroy(&sg_index_idr); 1617 idr_destroy(&sg_index_idr);
1635} 1618}
1636 1619
1637static int 1620static int sg_start_req(Sg_request *srp, unsigned char *cmd)
1638sg_start_req(Sg_request * srp)
1639{ 1621{
1640 int res; 1622 int res;
1623 struct request *rq;
1641 Sg_fd *sfp = srp->parentfp; 1624 Sg_fd *sfp = srp->parentfp;
1642 sg_io_hdr_t *hp = &srp->header; 1625 sg_io_hdr_t *hp = &srp->header;
1643 int dxfer_len = (int) hp->dxfer_len; 1626 int dxfer_len = (int) hp->dxfer_len;
1644 int dxfer_dir = hp->dxfer_direction; 1627 int dxfer_dir = hp->dxfer_direction;
1628 unsigned int iov_count = hp->iovec_count;
1645 Sg_scatter_hold *req_schp = &srp->data; 1629 Sg_scatter_hold *req_schp = &srp->data;
1646 Sg_scatter_hold *rsv_schp = &sfp->reserve; 1630 Sg_scatter_hold *rsv_schp = &sfp->reserve;
1631 struct request_queue *q = sfp->parentdp->device->request_queue;
1632 struct rq_map_data *md, map_data;
1633 int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
1634
1635 SCSI_LOG_TIMEOUT(4, printk(KERN_INFO "sg_start_req: dxfer_len=%d\n",
1636 dxfer_len));
1637
1638 rq = blk_get_request(q, rw, GFP_ATOMIC);
1639 if (!rq)
1640 return -ENOMEM;
1641
1642 memcpy(rq->cmd, cmd, hp->cmd_len);
1643
1644 rq->cmd_len = hp->cmd_len;
1645 rq->cmd_type = REQ_TYPE_BLOCK_PC;
1646
1647 srp->rq = rq;
1648 rq->end_io_data = srp;
1649 rq->sense = srp->sense_b;
1650 rq->retries = SG_DEFAULT_RETRIES;
1647 1651
1648 SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len));
1649 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) 1652 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
1650 return 0; 1653 return 0;
1651 if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) && 1654
1652 (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) && 1655 if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
1653 (!sfp->parentdp->device->host->unchecked_isa_dma)) { 1656 dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
1654 res = sg_build_direct(srp, sfp, dxfer_len); 1657 !sfp->parentdp->device->host->unchecked_isa_dma &&
1655 if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */ 1658 blk_rq_aligned(q, hp->dxferp, dxfer_len))
1656 return res; 1659 md = NULL;
1657 } 1660 else
1658 if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen)) 1661 md = &map_data;
1659 sg_link_reserve(sfp, srp, dxfer_len); 1662
1660 else { 1663 if (md) {
1661 res = sg_build_indirect(req_schp, sfp, dxfer_len); 1664 if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
1662 if (res) { 1665 sg_link_reserve(sfp, srp, dxfer_len);
1663 sg_remove_scat(req_schp); 1666 else {
1664 return res; 1667 res = sg_build_indirect(req_schp, sfp, dxfer_len);
1668 if (res)
1669 return res;
1665 } 1670 }
1671
1672 md->pages = req_schp->pages;
1673 md->page_order = req_schp->page_order;
1674 md->nr_entries = req_schp->k_use_sg;
1666 } 1675 }
1667 return 0; 1676
1677 if (iov_count)
1678 res = blk_rq_map_user_iov(q, rq, md, hp->dxferp, iov_count,
1679 hp->dxfer_len, GFP_ATOMIC);
1680 else
1681 res = blk_rq_map_user(q, rq, md, hp->dxferp,
1682 hp->dxfer_len, GFP_ATOMIC);
1683
1684 if (!res) {
1685 srp->bio = rq->bio;
1686
1687 if (!md) {
1688 req_schp->dio_in_use = 1;
1689 hp->info |= SG_INFO_DIRECT_IO;
1690 }
1691 }
1692 return res;
1668} 1693}
1669 1694
1670static void 1695static void
@@ -1678,186 +1703,37 @@ sg_finish_rem_req(Sg_request * srp)
1678 sg_unlink_reserve(sfp, srp); 1703 sg_unlink_reserve(sfp, srp);
1679 else 1704 else
1680 sg_remove_scat(req_schp); 1705 sg_remove_scat(req_schp);
1706
1707 if (srp->rq) {
1708 if (srp->bio)
1709 blk_rq_unmap_user(srp->bio);
1710
1711 blk_put_request(srp->rq);
1712 }
1713
1681 sg_remove_request(sfp, srp); 1714 sg_remove_request(sfp, srp);
1682} 1715}
1683 1716
1684static int 1717static int
1685sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) 1718sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
1686{ 1719{
1687 int sg_bufflen = tablesize * sizeof(struct scatterlist); 1720 int sg_bufflen = tablesize * sizeof(struct page *);
1688 gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN; 1721 gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
1689 1722
1690 /* 1723 schp->pages = kzalloc(sg_bufflen, gfp_flags);
1691 * TODO: test without low_dma, we should not need it since 1724 if (!schp->pages)
1692 * the block layer will bounce the buffer for us
1693 *
1694 * XXX(hch): we shouldn't need GFP_DMA for the actual S/G list.
1695 */
1696 if (sfp->low_dma)
1697 gfp_flags |= GFP_DMA;
1698 schp->buffer = kzalloc(sg_bufflen, gfp_flags);
1699 if (!schp->buffer)
1700 return -ENOMEM; 1725 return -ENOMEM;
1701 sg_init_table(schp->buffer, tablesize);
1702 schp->sglist_len = sg_bufflen; 1726 schp->sglist_len = sg_bufflen;
1703 return tablesize; /* number of scat_gath elements allocated */ 1727 return tablesize; /* number of scat_gath elements allocated */
1704} 1728}
1705 1729
1706#ifdef SG_ALLOW_DIO_CODE
1707/* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */
1708 /* TODO: hopefully we can use the generic block layer code */
1709
1710/* Pin down user pages and put them into a scatter gather list. Returns <= 0 if
1711 - mapping of all pages not successful
1712 (i.e., either completely successful or fails)
1713*/
1714static int
1715st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
1716 unsigned long uaddr, size_t count, int rw)
1717{
1718 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
1719 unsigned long start = uaddr >> PAGE_SHIFT;
1720 const int nr_pages = end - start;
1721 int res, i, j;
1722 struct page **pages;
1723
1724 /* User attempted Overflow! */
1725 if ((uaddr + count) < uaddr)
1726 return -EINVAL;
1727
1728 /* Too big */
1729 if (nr_pages > max_pages)
1730 return -ENOMEM;
1731
1732 /* Hmm? */
1733 if (count == 0)
1734 return 0;
1735
1736 if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL)
1737 return -ENOMEM;
1738
1739 /* Try to fault in all of the necessary pages */
1740 down_read(&current->mm->mmap_sem);
1741 /* rw==READ means read from drive, write into memory area */
1742 res = get_user_pages(
1743 current,
1744 current->mm,
1745 uaddr,
1746 nr_pages,
1747 rw == READ,
1748 0, /* don't force */
1749 pages,
1750 NULL);
1751 up_read(&current->mm->mmap_sem);
1752
1753 /* Errors and no page mapped should return here */
1754 if (res < nr_pages)
1755 goto out_unmap;
1756
1757 for (i=0; i < nr_pages; i++) {
1758 /* FIXME: flush superflous for rw==READ,
1759 * probably wrong function for rw==WRITE
1760 */
1761 flush_dcache_page(pages[i]);
1762 /* ?? Is locking needed? I don't think so */
1763 /* if (!trylock_page(pages[i]))
1764 goto out_unlock; */
1765 }
1766
1767 sg_set_page(sgl, pages[0], 0, uaddr & ~PAGE_MASK);
1768 if (nr_pages > 1) {
1769 sgl[0].length = PAGE_SIZE - sgl[0].offset;
1770 count -= sgl[0].length;
1771 for (i=1; i < nr_pages ; i++)
1772 sg_set_page(&sgl[i], pages[i], count < PAGE_SIZE ? count : PAGE_SIZE, 0);
1773 }
1774 else {
1775 sgl[0].length = count;
1776 }
1777
1778 kfree(pages);
1779 return nr_pages;
1780
1781 out_unmap:
1782 if (res > 0) {
1783 for (j=0; j < res; j++)
1784 page_cache_release(pages[j]);
1785 res = 0;
1786 }
1787 kfree(pages);
1788 return res;
1789}
1790
1791
1792/* And unmap them... */
1793static int
1794st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
1795 int dirtied)
1796{
1797 int i;
1798
1799 for (i=0; i < nr_pages; i++) {
1800 struct page *page = sg_page(&sgl[i]);
1801
1802 if (dirtied)
1803 SetPageDirty(page);
1804 /* unlock_page(page); */
1805 /* FIXME: cache flush missing for rw==READ
1806 * FIXME: call the correct reference counting function
1807 */
1808 page_cache_release(page);
1809 }
1810
1811 return 0;
1812}
1813
1814/* ^^^^^^^^ above code borrowed from st driver's direct IO ^^^^^^^^^ */
1815#endif
1816
1817
1818/* Returns: -ve -> error, 0 -> done, 1 -> try indirect */
1819static int
1820sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len)
1821{
1822#ifdef SG_ALLOW_DIO_CODE
1823 sg_io_hdr_t *hp = &srp->header;
1824 Sg_scatter_hold *schp = &srp->data;
1825 int sg_tablesize = sfp->parentdp->sg_tablesize;
1826 int mx_sc_elems, res;
1827 struct scsi_device *sdev = sfp->parentdp->device;
1828
1829 if (((unsigned long)hp->dxferp &
1830 queue_dma_alignment(sdev->request_queue)) != 0)
1831 return 1;
1832
1833 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1834 if (mx_sc_elems <= 0) {
1835 return 1;
1836 }
1837 res = st_map_user_pages(schp->buffer, mx_sc_elems,
1838 (unsigned long)hp->dxferp, dxfer_len,
1839 (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0);
1840 if (res <= 0) {
1841 sg_remove_scat(schp);
1842 return 1;
1843 }
1844 schp->k_use_sg = res;
1845 schp->dio_in_use = 1;
1846 hp->info |= SG_INFO_DIRECT_IO;
1847 return 0;
1848#else
1849 return 1;
1850#endif
1851}
1852
1853static int 1730static int
1854sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) 1731sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1855{ 1732{
1856 struct scatterlist *sg; 1733 int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
1857 int ret_sz = 0, k, rem_sz, num, mx_sc_elems;
1858 int sg_tablesize = sfp->parentdp->sg_tablesize; 1734 int sg_tablesize = sfp->parentdp->sg_tablesize;
1859 int blk_size = buff_size; 1735 int blk_size = buff_size, order;
1860 struct page *p = NULL; 1736 gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
1861 1737
1862 if (blk_size < 0) 1738 if (blk_size < 0)
1863 return -EFAULT; 1739 return -EFAULT;
@@ -1881,15 +1757,26 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1881 } else 1757 } else
1882 scatter_elem_sz_prev = num; 1758 scatter_elem_sz_prev = num;
1883 } 1759 }
1884 for (k = 0, sg = schp->buffer, rem_sz = blk_size; 1760
1885 (rem_sz > 0) && (k < mx_sc_elems); 1761 if (sfp->low_dma)
1886 ++k, rem_sz -= ret_sz, sg = sg_next(sg)) { 1762 gfp_mask |= GFP_DMA;
1887 1763
1764 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1765 gfp_mask |= __GFP_ZERO;
1766
1767 order = get_order(num);
1768retry:
1769 ret_sz = 1 << (PAGE_SHIFT + order);
1770
1771 for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
1772 k++, rem_sz -= ret_sz) {
1773
1888 num = (rem_sz > scatter_elem_sz_prev) ? 1774 num = (rem_sz > scatter_elem_sz_prev) ?
1889 scatter_elem_sz_prev : rem_sz; 1775 scatter_elem_sz_prev : rem_sz;
1890 p = sg_page_malloc(num, sfp->low_dma, &ret_sz); 1776
1891 if (!p) 1777 schp->pages[k] = alloc_pages(gfp_mask, order);
1892 return -ENOMEM; 1778 if (!schp->pages[k])
1779 goto out;
1893 1780
1894 if (num == scatter_elem_sz_prev) { 1781 if (num == scatter_elem_sz_prev) {
1895 if (unlikely(ret_sz > scatter_elem_sz_prev)) { 1782 if (unlikely(ret_sz > scatter_elem_sz_prev)) {
@@ -1897,12 +1784,12 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1897 scatter_elem_sz_prev = ret_sz; 1784 scatter_elem_sz_prev = ret_sz;
1898 } 1785 }
1899 } 1786 }
1900 sg_set_page(sg, p, (ret_sz > num) ? num : ret_sz, 0);
1901 1787
1902 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, " 1788 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, "
1903 "ret_sz=%d\n", k, num, ret_sz)); 1789 "ret_sz=%d\n", k, num, ret_sz));
1904 } /* end of for loop */ 1790 } /* end of for loop */
1905 1791
1792 schp->page_order = order;
1906 schp->k_use_sg = k; 1793 schp->k_use_sg = k;
1907 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, " 1794 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, "
1908 "rem_sz=%d\n", k, rem_sz)); 1795 "rem_sz=%d\n", k, rem_sz));
@@ -1910,223 +1797,42 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1910 schp->bufflen = blk_size; 1797 schp->bufflen = blk_size;
1911 if (rem_sz > 0) /* must have failed */ 1798 if (rem_sz > 0) /* must have failed */
1912 return -ENOMEM; 1799 return -ENOMEM;
1913
1914 return 0; 1800 return 0;
1915} 1801out:
1916 1802 for (i = 0; i < k; i++)
1917static int 1803 __free_pages(schp->pages[k], order);
1918sg_write_xfer(Sg_request * srp)
1919{
1920 sg_io_hdr_t *hp = &srp->header;
1921 Sg_scatter_hold *schp = &srp->data;
1922 struct scatterlist *sg = schp->buffer;
1923 int num_xfer = 0;
1924 int j, k, onum, usglen, ksglen, res;
1925 int iovec_count = (int) hp->iovec_count;
1926 int dxfer_dir = hp->dxfer_direction;
1927 unsigned char *p;
1928 unsigned char __user *up;
1929 int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
1930
1931 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) ||
1932 (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
1933 num_xfer = (int) (new_interface ? hp->dxfer_len : hp->flags);
1934 if (schp->bufflen < num_xfer)
1935 num_xfer = schp->bufflen;
1936 }
1937 if ((num_xfer <= 0) || (schp->dio_in_use) ||
1938 (new_interface
1939 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
1940 return 0;
1941
1942 SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
1943 num_xfer, iovec_count, schp->k_use_sg));
1944 if (iovec_count) {
1945 onum = iovec_count;
1946 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
1947 return -EFAULT;
1948 } else
1949 onum = 1;
1950
1951 ksglen = sg->length;
1952 p = page_address(sg_page(sg));
1953 for (j = 0, k = 0; j < onum; ++j) {
1954 res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
1955 if (res)
1956 return res;
1957
1958 for (; p; sg = sg_next(sg), ksglen = sg->length,
1959 p = page_address(sg_page(sg))) {
1960 if (usglen <= 0)
1961 break;
1962 if (ksglen > usglen) {
1963 if (usglen >= num_xfer) {
1964 if (__copy_from_user(p, up, num_xfer))
1965 return -EFAULT;
1966 return 0;
1967 }
1968 if (__copy_from_user(p, up, usglen))
1969 return -EFAULT;
1970 p += usglen;
1971 ksglen -= usglen;
1972 break;
1973 } else {
1974 if (ksglen >= num_xfer) {
1975 if (__copy_from_user(p, up, num_xfer))
1976 return -EFAULT;
1977 return 0;
1978 }
1979 if (__copy_from_user(p, up, ksglen))
1980 return -EFAULT;
1981 up += ksglen;
1982 usglen -= ksglen;
1983 }
1984 ++k;
1985 if (k >= schp->k_use_sg)
1986 return 0;
1987 }
1988 }
1989
1990 return 0;
1991}
1992 1804
1993static int 1805 if (--order >= 0)
1994sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind, 1806 goto retry;
1995 int wr_xf, int *countp, unsigned char __user **up)
1996{
1997 int num_xfer = (int) hp->dxfer_len;
1998 unsigned char __user *p = hp->dxferp;
1999 int count;
2000 1807
2001 if (0 == sg_num) { 1808 return -ENOMEM;
2002 if (wr_xf && ('\0' == hp->interface_id))
2003 count = (int) hp->flags; /* holds "old" input_size */
2004 else
2005 count = num_xfer;
2006 } else {
2007 sg_iovec_t iovec;
2008 if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC))
2009 return -EFAULT;
2010 p = iovec.iov_base;
2011 count = (int) iovec.iov_len;
2012 }
2013 if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count))
2014 return -EFAULT;
2015 if (up)
2016 *up = p;
2017 if (countp)
2018 *countp = count;
2019 return 0;
2020} 1809}
2021 1810
2022static void 1811static void
2023sg_remove_scat(Sg_scatter_hold * schp) 1812sg_remove_scat(Sg_scatter_hold * schp)
2024{ 1813{
2025 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); 1814 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
2026 if (schp->buffer && (schp->sglist_len > 0)) { 1815 if (schp->pages && schp->sglist_len > 0) {
2027 struct scatterlist *sg = schp->buffer; 1816 if (!schp->dio_in_use) {
2028
2029 if (schp->dio_in_use) {
2030#ifdef SG_ALLOW_DIO_CODE
2031 st_unmap_user_pages(sg, schp->k_use_sg, TRUE);
2032#endif
2033 } else {
2034 int k; 1817 int k;
2035 1818
2036 for (k = 0; (k < schp->k_use_sg) && sg_page(sg); 1819 for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
2037 ++k, sg = sg_next(sg)) {
2038 SCSI_LOG_TIMEOUT(5, printk( 1820 SCSI_LOG_TIMEOUT(5, printk(
2039 "sg_remove_scat: k=%d, pg=0x%p, len=%d\n", 1821 "sg_remove_scat: k=%d, pg=0x%p\n",
2040 k, sg_page(sg), sg->length)); 1822 k, schp->pages[k]));
2041 sg_page_free(sg_page(sg), sg->length); 1823 __free_pages(schp->pages[k], schp->page_order);
2042 } 1824 }
2043 }
2044 kfree(schp->buffer);
2045 }
2046 memset(schp, 0, sizeof (*schp));
2047}
2048 1825
2049static int 1826 kfree(schp->pages);
2050sg_read_xfer(Sg_request * srp)
2051{
2052 sg_io_hdr_t *hp = &srp->header;
2053 Sg_scatter_hold *schp = &srp->data;
2054 struct scatterlist *sg = schp->buffer;
2055 int num_xfer = 0;
2056 int j, k, onum, usglen, ksglen, res;
2057 int iovec_count = (int) hp->iovec_count;
2058 int dxfer_dir = hp->dxfer_direction;
2059 unsigned char *p;
2060 unsigned char __user *up;
2061 int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
2062
2063 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir)
2064 || (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
2065 num_xfer = hp->dxfer_len;
2066 if (schp->bufflen < num_xfer)
2067 num_xfer = schp->bufflen;
2068 }
2069 if ((num_xfer <= 0) || (schp->dio_in_use) ||
2070 (new_interface
2071 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
2072 return 0;
2073
2074 SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
2075 num_xfer, iovec_count, schp->k_use_sg));
2076 if (iovec_count) {
2077 onum = iovec_count;
2078 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
2079 return -EFAULT;
2080 } else
2081 onum = 1;
2082
2083 p = page_address(sg_page(sg));
2084 ksglen = sg->length;
2085 for (j = 0, k = 0; j < onum; ++j) {
2086 res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
2087 if (res)
2088 return res;
2089
2090 for (; p; sg = sg_next(sg), ksglen = sg->length,
2091 p = page_address(sg_page(sg))) {
2092 if (usglen <= 0)
2093 break;
2094 if (ksglen > usglen) {
2095 if (usglen >= num_xfer) {
2096 if (__copy_to_user(up, p, num_xfer))
2097 return -EFAULT;
2098 return 0;
2099 }
2100 if (__copy_to_user(up, p, usglen))
2101 return -EFAULT;
2102 p += usglen;
2103 ksglen -= usglen;
2104 break;
2105 } else {
2106 if (ksglen >= num_xfer) {
2107 if (__copy_to_user(up, p, num_xfer))
2108 return -EFAULT;
2109 return 0;
2110 }
2111 if (__copy_to_user(up, p, ksglen))
2112 return -EFAULT;
2113 up += ksglen;
2114 usglen -= ksglen;
2115 }
2116 ++k;
2117 if (k >= schp->k_use_sg)
2118 return 0;
2119 } 1827 }
2120 } 1828 }
2121 1829 memset(schp, 0, sizeof (*schp));
2122 return 0;
2123} 1830}
2124 1831
2125static int 1832static int
2126sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) 1833sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
2127{ 1834{
2128 Sg_scatter_hold *schp = &srp->data; 1835 Sg_scatter_hold *schp = &srp->data;
2129 struct scatterlist *sg = schp->buffer;
2130 int k, num; 1836 int k, num;
2131 1837
2132 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n", 1838 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
@@ -2134,15 +1840,15 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
2134 if ((!outp) || (num_read_xfer <= 0)) 1840 if ((!outp) || (num_read_xfer <= 0))
2135 return 0; 1841 return 0;
2136 1842
2137 for (k = 0; (k < schp->k_use_sg) && sg_page(sg); ++k, sg = sg_next(sg)) { 1843 num = 1 << (PAGE_SHIFT + schp->page_order);
2138 num = sg->length; 1844 for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
2139 if (num > num_read_xfer) { 1845 if (num > num_read_xfer) {
2140 if (__copy_to_user(outp, page_address(sg_page(sg)), 1846 if (__copy_to_user(outp, page_address(schp->pages[k]),
2141 num_read_xfer)) 1847 num_read_xfer))
2142 return -EFAULT; 1848 return -EFAULT;
2143 break; 1849 break;
2144 } else { 1850 } else {
2145 if (__copy_to_user(outp, page_address(sg_page(sg)), 1851 if (__copy_to_user(outp, page_address(schp->pages[k]),
2146 num)) 1852 num))
2147 return -EFAULT; 1853 return -EFAULT;
2148 num_read_xfer -= num; 1854 num_read_xfer -= num;
@@ -2177,24 +1883,21 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
2177{ 1883{
2178 Sg_scatter_hold *req_schp = &srp->data; 1884 Sg_scatter_hold *req_schp = &srp->data;
2179 Sg_scatter_hold *rsv_schp = &sfp->reserve; 1885 Sg_scatter_hold *rsv_schp = &sfp->reserve;
2180 struct scatterlist *sg = rsv_schp->buffer;
2181 int k, num, rem; 1886 int k, num, rem;
2182 1887
2183 srp->res_used = 1; 1888 srp->res_used = 1;
2184 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size)); 1889 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
2185 rem = size; 1890 rem = size;
2186 1891
2187 for (k = 0; k < rsv_schp->k_use_sg; ++k, sg = sg_next(sg)) { 1892 num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
2188 num = sg->length; 1893 for (k = 0; k < rsv_schp->k_use_sg; k++) {
2189 if (rem <= num) { 1894 if (rem <= num) {
2190 sfp->save_scat_len = num;
2191 sg->length = rem;
2192 req_schp->k_use_sg = k + 1; 1895 req_schp->k_use_sg = k + 1;
2193 req_schp->sglist_len = rsv_schp->sglist_len; 1896 req_schp->sglist_len = rsv_schp->sglist_len;
2194 req_schp->buffer = rsv_schp->buffer; 1897 req_schp->pages = rsv_schp->pages;
2195 1898
2196 req_schp->bufflen = size; 1899 req_schp->bufflen = size;
2197 req_schp->b_malloc_len = rsv_schp->b_malloc_len; 1900 req_schp->page_order = rsv_schp->page_order;
2198 break; 1901 break;
2199 } else 1902 } else
2200 rem -= num; 1903 rem -= num;
@@ -2208,22 +1911,13 @@ static void
2208sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) 1911sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
2209{ 1912{
2210 Sg_scatter_hold *req_schp = &srp->data; 1913 Sg_scatter_hold *req_schp = &srp->data;
2211 Sg_scatter_hold *rsv_schp = &sfp->reserve;
2212 1914
2213 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n", 1915 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
2214 (int) req_schp->k_use_sg)); 1916 (int) req_schp->k_use_sg));
2215 if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) {
2216 struct scatterlist *sg = rsv_schp->buffer;
2217
2218 if (sfp->save_scat_len > 0)
2219 (sg + (req_schp->k_use_sg - 1))->length =
2220 (unsigned) sfp->save_scat_len;
2221 else
2222 SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n"));
2223 }
2224 req_schp->k_use_sg = 0; 1917 req_schp->k_use_sg = 0;
2225 req_schp->bufflen = 0; 1918 req_schp->bufflen = 0;
2226 req_schp->buffer = NULL; 1919 req_schp->pages = NULL;
1920 req_schp->page_order = 0;
2227 req_schp->sglist_len = 0; 1921 req_schp->sglist_len = 0;
2228 sfp->save_scat_len = 0; 1922 sfp->save_scat_len = 0;
2229 srp->res_used = 0; 1923 srp->res_used = 0;
@@ -2481,53 +2175,6 @@ sg_res_in_use(Sg_fd * sfp)
2481 return srp ? 1 : 0; 2175 return srp ? 1 : 0;
2482} 2176}
2483 2177
2484/* The size fetched (value output via retSzp) set when non-NULL return */
2485static struct page *
2486sg_page_malloc(int rqSz, int lowDma, int *retSzp)
2487{
2488 struct page *resp = NULL;
2489 gfp_t page_mask;
2490 int order, a_size;
2491 int resSz;
2492
2493 if ((rqSz <= 0) || (NULL == retSzp))
2494 return resp;
2495
2496 if (lowDma)
2497 page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN;
2498 else
2499 page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
2500
2501 for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
2502 order++, a_size <<= 1) ;
2503 resSz = a_size; /* rounded up if necessary */
2504 resp = alloc_pages(page_mask, order);
2505 while ((!resp) && order) {
2506 --order;
2507 a_size >>= 1; /* divide by 2, until PAGE_SIZE */
2508 resp = alloc_pages(page_mask, order); /* try half */
2509 resSz = a_size;
2510 }
2511 if (resp) {
2512 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2513 memset(page_address(resp), 0, resSz);
2514 *retSzp = resSz;
2515 }
2516 return resp;
2517}
2518
2519static void
2520sg_page_free(struct page *page, int size)
2521{
2522 int order, a_size;
2523
2524 if (!page)
2525 return;
2526 for (order = 0, a_size = PAGE_SIZE; a_size < size;
2527 order++, a_size <<= 1) ;
2528 __free_pages(page, order);
2529}
2530
2531#ifdef CONFIG_SCSI_PROC_FS 2178#ifdef CONFIG_SCSI_PROC_FS
2532static int 2179static int
2533sg_idr_max_id(int id, void *p, void *data) 2180sg_idr_max_id(int id, void *p, void *data)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 27f5bfd1def3..0f17009c99d2 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -331,7 +331,7 @@ static int sr_done(struct scsi_cmnd *SCpnt)
331 331
332static int sr_prep_fn(struct request_queue *q, struct request *rq) 332static int sr_prep_fn(struct request_queue *q, struct request *rq)
333{ 333{
334 int block=0, this_count, s_size, timeout = SR_TIMEOUT; 334 int block = 0, this_count, s_size;
335 struct scsi_cd *cd; 335 struct scsi_cd *cd;
336 struct scsi_cmnd *SCpnt; 336 struct scsi_cmnd *SCpnt;
337 struct scsi_device *sdp = q->queuedata; 337 struct scsi_device *sdp = q->queuedata;
@@ -461,7 +461,6 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
461 SCpnt->transfersize = cd->device->sector_size; 461 SCpnt->transfersize = cd->device->sector_size;
462 SCpnt->underflow = this_count << 9; 462 SCpnt->underflow = this_count << 9;
463 SCpnt->allowed = MAX_RETRIES; 463 SCpnt->allowed = MAX_RETRIES;
464 SCpnt->timeout_per_command = timeout;
465 464
466 /* 465 /*
467 * This indicates that the command is ready from our end to be 466 * This indicates that the command is ready from our end to be
@@ -620,6 +619,8 @@ static int sr_probe(struct device *dev)
620 disk->fops = &sr_bdops; 619 disk->fops = &sr_bdops;
621 disk->flags = GENHD_FL_CD; 620 disk->flags = GENHD_FL_CD;
622 621
622 blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
623
623 cd->device = sdev; 624 cd->device = sdev;
624 cd->disk = disk; 625 cd->disk = disk;
625 cd->driver = &sr_template; 626 cd->driver = &sr_template;
@@ -878,7 +879,7 @@ static void sr_kref_release(struct kref *kref)
878 struct gendisk *disk = cd->disk; 879 struct gendisk *disk = cd->disk;
879 880
880 spin_lock(&sr_index_lock); 881 spin_lock(&sr_index_lock);
881 clear_bit(disk->first_minor, sr_index_bits); 882 clear_bit(MINOR(disk_devt(disk)), sr_index_bits);
882 spin_unlock(&sr_index_lock); 883 spin_unlock(&sr_index_lock);
883 884
884 unregister_cdrom(&cd->cdi); 885 unregister_cdrom(&cd->cdi);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index d39107b7669b..f4e6cde1fd0d 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -519,8 +519,8 @@ static int sym53c8xx_queue_command(struct scsi_cmnd *cmd,
519 * Shorten our settle_time if needed for 519 * Shorten our settle_time if needed for
520 * this command not to time out. 520 * this command not to time out.
521 */ 521 */
522 if (np->s.settle_time_valid && cmd->timeout_per_command) { 522 if (np->s.settle_time_valid && cmd->request->timeout) {
523 unsigned long tlimit = jiffies + cmd->timeout_per_command; 523 unsigned long tlimit = jiffies + cmd->request->timeout;
524 tlimit -= SYM_CONF_TIMER_INTERVAL*2; 524 tlimit -= SYM_CONF_TIMER_INTERVAL*2;
525 if (time_after(np->s.settle_time, tlimit)) { 525 if (time_after(np->s.settle_time, tlimit)) {
526 np->s.settle_time = tlimit; 526 np->s.settle_time = tlimit;