diff options
39 files changed, 399 insertions, 339 deletions
diff --git a/block/Makefile b/block/Makefile index 0da976ce67dd..bfe73049f939 100644 --- a/block/Makefile +++ b/block/Makefile | |||
@@ -4,8 +4,8 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ | 5 | obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ |
6 | blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \ | 6 | blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \ |
7 | blk-exec.o blk-merge.o blk-softirq.o ioctl.o genhd.o \ | 7 | blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ |
8 | scsi_ioctl.o cmd-filter.o | 8 | ioctl.o genhd.o scsi_ioctl.o cmd-filter.o |
9 | 9 | ||
10 | obj-$(CONFIG_BLK_DEV_BSG) += bsg.o | 10 | obj-$(CONFIG_BLK_DEV_BSG) += bsg.o |
11 | obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o | 11 | obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o |
diff --git a/block/blk-core.c b/block/blk-core.c index f25eb9786d94..d768a8ddc173 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -110,6 +110,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq) | |||
110 | memset(rq, 0, sizeof(*rq)); | 110 | memset(rq, 0, sizeof(*rq)); |
111 | 111 | ||
112 | INIT_LIST_HEAD(&rq->queuelist); | 112 | INIT_LIST_HEAD(&rq->queuelist); |
113 | INIT_LIST_HEAD(&rq->timeout_list); | ||
113 | rq->cpu = -1; | 114 | rq->cpu = -1; |
114 | rq->q = q; | 115 | rq->q = q; |
115 | rq->sector = rq->hard_sector = (sector_t) -1; | 116 | rq->sector = rq->hard_sector = (sector_t) -1; |
@@ -490,6 +491,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
490 | } | 491 | } |
491 | 492 | ||
492 | init_timer(&q->unplug_timer); | 493 | init_timer(&q->unplug_timer); |
494 | setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); | ||
495 | INIT_LIST_HEAD(&q->timeout_list); | ||
493 | 496 | ||
494 | kobject_init(&q->kobj, &blk_queue_ktype); | 497 | kobject_init(&q->kobj, &blk_queue_ktype); |
495 | 498 | ||
@@ -897,6 +900,8 @@ EXPORT_SYMBOL(blk_start_queueing); | |||
897 | */ | 900 | */ |
898 | void blk_requeue_request(struct request_queue *q, struct request *rq) | 901 | void blk_requeue_request(struct request_queue *q, struct request *rq) |
899 | { | 902 | { |
903 | blk_delete_timer(rq); | ||
904 | blk_clear_rq_complete(rq); | ||
900 | blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); | 905 | blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); |
901 | 906 | ||
902 | if (blk_rq_tagged(rq)) | 907 | if (blk_rq_tagged(rq)) |
@@ -1650,6 +1655,8 @@ static void end_that_request_last(struct request *req, int error) | |||
1650 | { | 1655 | { |
1651 | struct gendisk *disk = req->rq_disk; | 1656 | struct gendisk *disk = req->rq_disk; |
1652 | 1657 | ||
1658 | blk_delete_timer(req); | ||
1659 | |||
1653 | if (blk_rq_tagged(req)) | 1660 | if (blk_rq_tagged(req)) |
1654 | blk_queue_end_tag(req->q, req); | 1661 | blk_queue_end_tag(req->q, req); |
1655 | 1662 | ||
diff --git a/block/blk-settings.c b/block/blk-settings.c index d70692badcdb..1d0330d0b40a 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -77,6 +77,18 @@ void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) | |||
77 | } | 77 | } |
78 | EXPORT_SYMBOL(blk_queue_softirq_done); | 78 | EXPORT_SYMBOL(blk_queue_softirq_done); |
79 | 79 | ||
80 | void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) | ||
81 | { | ||
82 | q->rq_timeout = timeout; | ||
83 | } | ||
84 | EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); | ||
85 | |||
86 | void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) | ||
87 | { | ||
88 | q->rq_timed_out_fn = fn; | ||
89 | } | ||
90 | EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out); | ||
91 | |||
80 | /** | 92 | /** |
81 | * blk_queue_make_request - define an alternate make_request function for a device | 93 | * blk_queue_make_request - define an alternate make_request function for a device |
82 | * @q: the request queue for the device to be affected | 94 | * @q: the request queue for the device to be affected |
diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 3a1af551191e..7ab344afb16f 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c | |||
@@ -101,18 +101,7 @@ static struct notifier_block __cpuinitdata blk_cpu_notifier = { | |||
101 | .notifier_call = blk_cpu_notify, | 101 | .notifier_call = blk_cpu_notify, |
102 | }; | 102 | }; |
103 | 103 | ||
104 | /** | 104 | void __blk_complete_request(struct request *req) |
105 | * blk_complete_request - end I/O on a request | ||
106 | * @req: the request being processed | ||
107 | * | ||
108 | * Description: | ||
109 | * Ends all I/O on a request. It does not handle partial completions, | ||
110 | * unless the driver actually implements this in its completion callback | ||
111 | * through requeueing. The actual completion happens out-of-order, | ||
112 | * through a softirq handler. The user must have registered a completion | ||
113 | * callback through blk_queue_softirq_done(). | ||
114 | **/ | ||
115 | void blk_complete_request(struct request *req) | ||
116 | { | 105 | { |
117 | struct request_queue *q = req->q; | 106 | struct request_queue *q = req->q; |
118 | unsigned long flags; | 107 | unsigned long flags; |
@@ -151,6 +140,23 @@ do_local: | |||
151 | 140 | ||
152 | local_irq_restore(flags); | 141 | local_irq_restore(flags); |
153 | } | 142 | } |
143 | |||
144 | /** | ||
145 | * blk_complete_request - end I/O on a request | ||
146 | * @req: the request being processed | ||
147 | * | ||
148 | * Description: | ||
149 | * Ends all I/O on a request. It does not handle partial completions, | ||
150 | * unless the driver actually implements this in its completion callback | ||
151 | * through requeueing. The actual completion happens out-of-order, | ||
152 | * through a softirq handler. The user must have registered a completion | ||
153 | * callback through blk_queue_softirq_done(). | ||
154 | **/ | ||
155 | void blk_complete_request(struct request *req) | ||
156 | { | ||
157 | if (!blk_mark_rq_complete(req)) | ||
158 | __blk_complete_request(req); | ||
159 | } | ||
154 | EXPORT_SYMBOL(blk_complete_request); | 160 | EXPORT_SYMBOL(blk_complete_request); |
155 | 161 | ||
156 | __init int blk_softirq_init(void) | 162 | __init int blk_softirq_init(void) |
diff --git a/block/blk-timeout.c b/block/blk-timeout.c new file mode 100644 index 000000000000..b36d07bf0afb --- /dev/null +++ b/block/blk-timeout.c | |||
@@ -0,0 +1,155 @@ | |||
1 | /* | ||
2 | * Functions related to generic timeout handling of requests. | ||
3 | */ | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/blkdev.h> | ||
7 | |||
8 | #include "blk.h" | ||
9 | |||
10 | /* | ||
11 | * blk_delete_timer - Delete/cancel timer for a given function. | ||
12 | * @req: request that we are canceling timer for | ||
13 | * | ||
14 | */ | ||
15 | void blk_delete_timer(struct request *req) | ||
16 | { | ||
17 | struct request_queue *q = req->q; | ||
18 | |||
19 | /* | ||
20 | * Nothing to detach | ||
21 | */ | ||
22 | if (!q->rq_timed_out_fn || !req->deadline) | ||
23 | return; | ||
24 | |||
25 | list_del_init(&req->timeout_list); | ||
26 | |||
27 | if (list_empty(&q->timeout_list)) | ||
28 | del_timer(&q->timeout); | ||
29 | } | ||
30 | |||
31 | static void blk_rq_timed_out(struct request *req) | ||
32 | { | ||
33 | struct request_queue *q = req->q; | ||
34 | enum blk_eh_timer_return ret; | ||
35 | |||
36 | ret = q->rq_timed_out_fn(req); | ||
37 | switch (ret) { | ||
38 | case BLK_EH_HANDLED: | ||
39 | __blk_complete_request(req); | ||
40 | break; | ||
41 | case BLK_EH_RESET_TIMER: | ||
42 | blk_clear_rq_complete(req); | ||
43 | blk_add_timer(req); | ||
44 | break; | ||
45 | case BLK_EH_NOT_HANDLED: | ||
46 | /* | ||
47 | * LLD handles this for now but in the future | ||
48 | * we can send a request msg to abort the command | ||
49 | * and we can move more of the generic scsi eh code to | ||
50 | * the blk layer. | ||
51 | */ | ||
52 | break; | ||
53 | default: | ||
54 | printk(KERN_ERR "block: bad eh return: %d\n", ret); | ||
55 | break; | ||
56 | } | ||
57 | } | ||
58 | |||
59 | void blk_rq_timed_out_timer(unsigned long data) | ||
60 | { | ||
61 | struct request_queue *q = (struct request_queue *) data; | ||
62 | unsigned long flags, uninitialized_var(next), next_set = 0; | ||
63 | struct request *rq, *tmp; | ||
64 | |||
65 | spin_lock_irqsave(q->queue_lock, flags); | ||
66 | |||
67 | list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) { | ||
68 | if (time_after_eq(jiffies, rq->deadline)) { | ||
69 | list_del_init(&rq->timeout_list); | ||
70 | |||
71 | /* | ||
72 | * Check if we raced with end io completion | ||
73 | */ | ||
74 | if (blk_mark_rq_complete(rq)) | ||
75 | continue; | ||
76 | blk_rq_timed_out(rq); | ||
77 | } | ||
78 | if (!next_set) { | ||
79 | next = rq->deadline; | ||
80 | next_set = 1; | ||
81 | } else if (time_after(next, rq->deadline)) | ||
82 | next = rq->deadline; | ||
83 | } | ||
84 | |||
85 | if (next_set && !list_empty(&q->timeout_list)) | ||
86 | mod_timer(&q->timeout, round_jiffies(next)); | ||
87 | |||
88 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
89 | } | ||
90 | |||
91 | /** | ||
92 | * blk_abort_request -- Request request recovery for the specified command | ||
93 | * @req: pointer to the request of interest | ||
94 | * | ||
95 | * This function requests that the block layer start recovery for the | ||
96 | * request by deleting the timer and calling the q's timeout function. | ||
97 | * LLDDs who implement their own error recovery MAY ignore the timeout | ||
98 | * event if they generated blk_abort_req. Must hold queue lock. | ||
99 | */ | ||
100 | void blk_abort_request(struct request *req) | ||
101 | { | ||
102 | blk_delete_timer(req); | ||
103 | blk_rq_timed_out(req); | ||
104 | } | ||
105 | EXPORT_SYMBOL_GPL(blk_abort_request); | ||
106 | |||
107 | /** | ||
108 | * blk_add_timer - Start timeout timer for a single request | ||
109 | * @req: request that is about to start running. | ||
110 | * | ||
111 | * Notes: | ||
112 | * Each request has its own timer, and as it is added to the queue, we | ||
113 | * set up the timer. When the request completes, we cancel the timer. | ||
114 | */ | ||
115 | void blk_add_timer(struct request *req) | ||
116 | { | ||
117 | struct request_queue *q = req->q; | ||
118 | unsigned long expiry; | ||
119 | |||
120 | if (!q->rq_timed_out_fn) | ||
121 | return; | ||
122 | |||
123 | BUG_ON(!list_empty(&req->timeout_list)); | ||
124 | BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags)); | ||
125 | |||
126 | if (req->timeout) | ||
127 | req->deadline = jiffies + req->timeout; | ||
128 | else { | ||
129 | req->deadline = jiffies + q->rq_timeout; | ||
130 | /* | ||
131 | * Some LLDs, like scsi, peek at the timeout to prevent | ||
132 | * a command from being retried forever. | ||
133 | */ | ||
134 | req->timeout = q->rq_timeout; | ||
135 | } | ||
136 | list_add_tail(&req->timeout_list, &q->timeout_list); | ||
137 | |||
138 | /* | ||
139 | * If the timer isn't already pending or this timeout is earlier | ||
140 | * than an existing one, modify the timer. Round to next nearest | ||
141 | * second. | ||
142 | */ | ||
143 | expiry = round_jiffies(req->deadline); | ||
144 | |||
145 | /* | ||
146 | * We use ->deadline == 0 to detect whether a timer was added or | ||
147 | * not, so just increase to next jiffy for that specific case | ||
148 | */ | ||
149 | if (unlikely(!req->deadline)) | ||
150 | req->deadline = 1; | ||
151 | |||
152 | if (!timer_pending(&q->timeout) || | ||
153 | time_before(expiry, q->timeout.expires)) | ||
154 | mod_timer(&q->timeout, expiry); | ||
155 | } | ||
diff --git a/block/blk.h b/block/blk.h index de74254cb916..a4f4a50aefaa 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -17,6 +17,30 @@ void __blk_queue_free_tags(struct request_queue *q); | |||
17 | 17 | ||
18 | void blk_unplug_work(struct work_struct *work); | 18 | void blk_unplug_work(struct work_struct *work); |
19 | void blk_unplug_timeout(unsigned long data); | 19 | void blk_unplug_timeout(unsigned long data); |
20 | void blk_rq_timed_out_timer(unsigned long data); | ||
21 | void blk_delete_timer(struct request *); | ||
22 | void blk_add_timer(struct request *); | ||
23 | |||
24 | /* | ||
25 | * Internal atomic flags for request handling | ||
26 | */ | ||
27 | enum rq_atomic_flags { | ||
28 | REQ_ATOM_COMPLETE = 0, | ||
29 | }; | ||
30 | |||
31 | /* | ||
32 | * EH timer and IO completion will both attempt to 'grab' the request, make | ||
33 | * sure that only one of them suceeds | ||
34 | */ | ||
35 | static inline int blk_mark_rq_complete(struct request *rq) | ||
36 | { | ||
37 | return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); | ||
38 | } | ||
39 | |||
40 | static inline void blk_clear_rq_complete(struct request *rq) | ||
41 | { | ||
42 | clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); | ||
43 | } | ||
20 | 44 | ||
21 | struct io_context *current_io_context(gfp_t gfp_flags, int node); | 45 | struct io_context *current_io_context(gfp_t gfp_flags, int node); |
22 | 46 | ||
diff --git a/block/elevator.c b/block/elevator.c index 8e3fc3afc77b..a91fc59edd01 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -36,6 +36,8 @@ | |||
36 | #include <linux/hash.h> | 36 | #include <linux/hash.h> |
37 | #include <linux/uaccess.h> | 37 | #include <linux/uaccess.h> |
38 | 38 | ||
39 | #include "blk.h" | ||
40 | |||
39 | static DEFINE_SPINLOCK(elv_list_lock); | 41 | static DEFINE_SPINLOCK(elv_list_lock); |
40 | static LIST_HEAD(elv_list); | 42 | static LIST_HEAD(elv_list); |
41 | 43 | ||
@@ -771,6 +773,12 @@ struct request *elv_next_request(struct request_queue *q) | |||
771 | */ | 773 | */ |
772 | rq->cmd_flags |= REQ_STARTED; | 774 | rq->cmd_flags |= REQ_STARTED; |
773 | blk_add_trace_rq(q, rq, BLK_TA_ISSUE); | 775 | blk_add_trace_rq(q, rq, BLK_TA_ISSUE); |
776 | |||
777 | /* | ||
778 | * We are now handing the request to the hardware, | ||
779 | * add the timeout handler | ||
780 | */ | ||
781 | blk_add_timer(rq); | ||
774 | } | 782 | } |
775 | 783 | ||
776 | if (!q->boundary_rq || q->boundary_rq == rq) { | 784 | if (!q->boundary_rq || q->boundary_rq == rq) { |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index c1db2f234d2e..bd0b2bc76f10 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -33,6 +33,7 @@ | |||
33 | */ | 33 | */ |
34 | 34 | ||
35 | #include <linux/kernel.h> | 35 | #include <linux/kernel.h> |
36 | #include <linux/blkdev.h> | ||
36 | #include <linux/pci.h> | 37 | #include <linux/pci.h> |
37 | #include <scsi/scsi.h> | 38 | #include <scsi/scsi.h> |
38 | #include <scsi/scsi_host.h> | 39 | #include <scsi/scsi_host.h> |
@@ -457,29 +458,29 @@ static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, | |||
457 | * RETURNS: | 458 | * RETURNS: |
458 | * EH_HANDLED or EH_NOT_HANDLED | 459 | * EH_HANDLED or EH_NOT_HANDLED |
459 | */ | 460 | */ |
460 | enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) | 461 | enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) |
461 | { | 462 | { |
462 | struct Scsi_Host *host = cmd->device->host; | 463 | struct Scsi_Host *host = cmd->device->host; |
463 | struct ata_port *ap = ata_shost_to_port(host); | 464 | struct ata_port *ap = ata_shost_to_port(host); |
464 | unsigned long flags; | 465 | unsigned long flags; |
465 | struct ata_queued_cmd *qc; | 466 | struct ata_queued_cmd *qc; |
466 | enum scsi_eh_timer_return ret; | 467 | enum blk_eh_timer_return ret; |
467 | 468 | ||
468 | DPRINTK("ENTER\n"); | 469 | DPRINTK("ENTER\n"); |
469 | 470 | ||
470 | if (ap->ops->error_handler) { | 471 | if (ap->ops->error_handler) { |
471 | ret = EH_NOT_HANDLED; | 472 | ret = BLK_EH_NOT_HANDLED; |
472 | goto out; | 473 | goto out; |
473 | } | 474 | } |
474 | 475 | ||
475 | ret = EH_HANDLED; | 476 | ret = BLK_EH_HANDLED; |
476 | spin_lock_irqsave(ap->lock, flags); | 477 | spin_lock_irqsave(ap->lock, flags); |
477 | qc = ata_qc_from_tag(ap, ap->link.active_tag); | 478 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
478 | if (qc) { | 479 | if (qc) { |
479 | WARN_ON(qc->scsicmd != cmd); | 480 | WARN_ON(qc->scsicmd != cmd); |
480 | qc->flags |= ATA_QCFLAG_EH_SCHEDULED; | 481 | qc->flags |= ATA_QCFLAG_EH_SCHEDULED; |
481 | qc->err_mask |= AC_ERR_TIMEOUT; | 482 | qc->err_mask |= AC_ERR_TIMEOUT; |
482 | ret = EH_NOT_HANDLED; | 483 | ret = BLK_EH_NOT_HANDLED; |
483 | } | 484 | } |
484 | spin_unlock_irqrestore(ap->lock, flags); | 485 | spin_unlock_irqrestore(ap->lock, flags); |
485 | 486 | ||
@@ -831,7 +832,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc) | |||
831 | * Note that ATA_QCFLAG_FAILED is unconditionally set after | 832 | * Note that ATA_QCFLAG_FAILED is unconditionally set after |
832 | * this function completes. | 833 | * this function completes. |
833 | */ | 834 | */ |
834 | scsi_req_abort_cmd(qc->scsicmd); | 835 | blk_abort_request(qc->scsicmd->request); |
835 | } | 836 | } |
836 | 837 | ||
837 | /** | 838 | /** |
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index ade5c75b6144..24f5005478b0 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h | |||
@@ -152,7 +152,7 @@ extern int ata_bus_probe(struct ata_port *ap); | |||
152 | /* libata-eh.c */ | 152 | /* libata-eh.c */ |
153 | extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd); | 153 | extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd); |
154 | extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd); | 154 | extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd); |
155 | extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); | 155 | extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); |
156 | extern void ata_scsi_error(struct Scsi_Host *host); | 156 | extern void ata_scsi_error(struct Scsi_Host *host); |
157 | extern void ata_port_wait_eh(struct ata_port *ap); | 157 | extern void ata_port_wait_eh(struct ata_port *ap); |
158 | extern void ata_eh_fastdrain_timerfn(unsigned long arg); | 158 | extern void ata_eh_fastdrain_timerfn(unsigned long arg); |
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index aa4e77c25273..8abfd06b5a72 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c | |||
@@ -1139,7 +1139,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd | |||
1139 | srbcmd->id = cpu_to_le32(scmd_id(cmd)); | 1139 | srbcmd->id = cpu_to_le32(scmd_id(cmd)); |
1140 | srbcmd->lun = cpu_to_le32(cmd->device->lun); | 1140 | srbcmd->lun = cpu_to_le32(cmd->device->lun); |
1141 | srbcmd->flags = cpu_to_le32(flag); | 1141 | srbcmd->flags = cpu_to_le32(flag); |
1142 | timeout = cmd->timeout_per_command/HZ; | 1142 | timeout = cmd->request->timeout/HZ; |
1143 | if (timeout == 0) | 1143 | if (timeout == 0) |
1144 | timeout = 1; | 1144 | timeout = 1; |
1145 | srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds | 1145 | srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds |
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index 822d5214692b..c387c15a2128 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c | |||
@@ -464,7 +464,6 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd, | |||
464 | 464 | ||
465 | /* use request field to save the ptr. to completion struct. */ | 465 | /* use request field to save the ptr. to completion struct. */ |
466 | scp->request = (struct request *)&wait; | 466 | scp->request = (struct request *)&wait; |
467 | scp->timeout_per_command = timeout*HZ; | ||
468 | scp->cmd_len = 12; | 467 | scp->cmd_len = 12; |
469 | scp->cmnd = cmnd; | 468 | scp->cmnd = cmnd; |
470 | cmndinfo.priority = IOCTL_PRI; | 469 | cmndinfo.priority = IOCTL_PRI; |
@@ -1995,23 +1994,12 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority) | |||
1995 | register Scsi_Cmnd *pscp; | 1994 | register Scsi_Cmnd *pscp; |
1996 | register Scsi_Cmnd *nscp; | 1995 | register Scsi_Cmnd *nscp; |
1997 | ulong flags; | 1996 | ulong flags; |
1998 | unchar b, t; | ||
1999 | 1997 | ||
2000 | TRACE(("gdth_putq() priority %d\n",priority)); | 1998 | TRACE(("gdth_putq() priority %d\n",priority)); |
2001 | spin_lock_irqsave(&ha->smp_lock, flags); | 1999 | spin_lock_irqsave(&ha->smp_lock, flags); |
2002 | 2000 | ||
2003 | if (!cmndinfo->internal_command) { | 2001 | if (!cmndinfo->internal_command) |
2004 | cmndinfo->priority = priority; | 2002 | cmndinfo->priority = priority; |
2005 | b = scp->device->channel; | ||
2006 | t = scp->device->id; | ||
2007 | if (priority >= DEFAULT_PRI) { | ||
2008 | if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) || | ||
2009 | (b==ha->virt_bus && t<MAX_HDRIVES && ha->hdr[t].lock)) { | ||
2010 | TRACE2(("gdth_putq(): locked IO ->update_timeout()\n")); | ||
2011 | cmndinfo->timeout = gdth_update_timeout(scp, 0); | ||
2012 | } | ||
2013 | } | ||
2014 | } | ||
2015 | 2003 | ||
2016 | if (ha->req_first==NULL) { | 2004 | if (ha->req_first==NULL) { |
2017 | ha->req_first = scp; /* queue was empty */ | 2005 | ha->req_first = scp; /* queue was empty */ |
@@ -3899,6 +3887,39 @@ static const char *gdth_info(struct Scsi_Host *shp) | |||
3899 | return ((const char *)ha->binfo.type_string); | 3887 | return ((const char *)ha->binfo.type_string); |
3900 | } | 3888 | } |
3901 | 3889 | ||
3890 | static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp) | ||
3891 | { | ||
3892 | gdth_ha_str *ha = shost_priv(scp->device->host); | ||
3893 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); | ||
3894 | unchar b, t; | ||
3895 | ulong flags; | ||
3896 | enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED; | ||
3897 | |||
3898 | TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__)); | ||
3899 | b = scp->device->channel; | ||
3900 | t = scp->device->id; | ||
3901 | |||
3902 | /* | ||
3903 | * We don't really honor the command timeout, but we try to | ||
3904 | * honor 6 times of the actual command timeout! So reset the | ||
3905 | * timer if this is less than 6th timeout on this command! | ||
3906 | */ | ||
3907 | if (++cmndinfo->timeout_count < 6) | ||
3908 | retval = BLK_EH_RESET_TIMER; | ||
3909 | |||
3910 | /* Reset the timeout if it is locked IO */ | ||
3911 | spin_lock_irqsave(&ha->smp_lock, flags); | ||
3912 | if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha, b)].lock) || | ||
3913 | (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) { | ||
3914 | TRACE2(("%s(): locked IO, reset timeout\n", __func__)); | ||
3915 | retval = BLK_EH_RESET_TIMER; | ||
3916 | } | ||
3917 | spin_unlock_irqrestore(&ha->smp_lock, flags); | ||
3918 | |||
3919 | return retval; | ||
3920 | } | ||
3921 | |||
3922 | |||
3902 | static int gdth_eh_bus_reset(Scsi_Cmnd *scp) | 3923 | static int gdth_eh_bus_reset(Scsi_Cmnd *scp) |
3903 | { | 3924 | { |
3904 | gdth_ha_str *ha = shost_priv(scp->device->host); | 3925 | gdth_ha_str *ha = shost_priv(scp->device->host); |
@@ -3992,7 +4013,7 @@ static int gdth_queuecommand(struct scsi_cmnd *scp, | |||
3992 | BUG_ON(!cmndinfo); | 4013 | BUG_ON(!cmndinfo); |
3993 | 4014 | ||
3994 | scp->scsi_done = done; | 4015 | scp->scsi_done = done; |
3995 | gdth_update_timeout(scp, scp->timeout_per_command * 6); | 4016 | cmndinfo->timeout_count = 0; |
3996 | cmndinfo->priority = DEFAULT_PRI; | 4017 | cmndinfo->priority = DEFAULT_PRI; |
3997 | 4018 | ||
3998 | return __gdth_queuecommand(ha, scp, cmndinfo); | 4019 | return __gdth_queuecommand(ha, scp, cmndinfo); |
@@ -4096,12 +4117,10 @@ static int ioc_lockdrv(void __user *arg) | |||
4096 | ha->hdr[j].lock = 1; | 4117 | ha->hdr[j].lock = 1; |
4097 | spin_unlock_irqrestore(&ha->smp_lock, flags); | 4118 | spin_unlock_irqrestore(&ha->smp_lock, flags); |
4098 | gdth_wait_completion(ha, ha->bus_cnt, j); | 4119 | gdth_wait_completion(ha, ha->bus_cnt, j); |
4099 | gdth_stop_timeout(ha, ha->bus_cnt, j); | ||
4100 | } else { | 4120 | } else { |
4101 | spin_lock_irqsave(&ha->smp_lock, flags); | 4121 | spin_lock_irqsave(&ha->smp_lock, flags); |
4102 | ha->hdr[j].lock = 0; | 4122 | ha->hdr[j].lock = 0; |
4103 | spin_unlock_irqrestore(&ha->smp_lock, flags); | 4123 | spin_unlock_irqrestore(&ha->smp_lock, flags); |
4104 | gdth_start_timeout(ha, ha->bus_cnt, j); | ||
4105 | gdth_next(ha); | 4124 | gdth_next(ha); |
4106 | } | 4125 | } |
4107 | } | 4126 | } |
@@ -4539,18 +4558,14 @@ static int gdth_ioctl(struct inode *inode, struct file *filep, | |||
4539 | spin_lock_irqsave(&ha->smp_lock, flags); | 4558 | spin_lock_irqsave(&ha->smp_lock, flags); |
4540 | ha->raw[i].lock = 1; | 4559 | ha->raw[i].lock = 1; |
4541 | spin_unlock_irqrestore(&ha->smp_lock, flags); | 4560 | spin_unlock_irqrestore(&ha->smp_lock, flags); |
4542 | for (j = 0; j < ha->tid_cnt; ++j) { | 4561 | for (j = 0; j < ha->tid_cnt; ++j) |
4543 | gdth_wait_completion(ha, i, j); | 4562 | gdth_wait_completion(ha, i, j); |
4544 | gdth_stop_timeout(ha, i, j); | ||
4545 | } | ||
4546 | } else { | 4563 | } else { |
4547 | spin_lock_irqsave(&ha->smp_lock, flags); | 4564 | spin_lock_irqsave(&ha->smp_lock, flags); |
4548 | ha->raw[i].lock = 0; | 4565 | ha->raw[i].lock = 0; |
4549 | spin_unlock_irqrestore(&ha->smp_lock, flags); | 4566 | spin_unlock_irqrestore(&ha->smp_lock, flags); |
4550 | for (j = 0; j < ha->tid_cnt; ++j) { | 4567 | for (j = 0; j < ha->tid_cnt; ++j) |
4551 | gdth_start_timeout(ha, i, j); | ||
4552 | gdth_next(ha); | 4568 | gdth_next(ha); |
4553 | } | ||
4554 | } | 4569 | } |
4555 | } | 4570 | } |
4556 | break; | 4571 | break; |
@@ -4644,6 +4659,7 @@ static struct scsi_host_template gdth_template = { | |||
4644 | .slave_configure = gdth_slave_configure, | 4659 | .slave_configure = gdth_slave_configure, |
4645 | .bios_param = gdth_bios_param, | 4660 | .bios_param = gdth_bios_param, |
4646 | .proc_info = gdth_proc_info, | 4661 | .proc_info = gdth_proc_info, |
4662 | .eh_timed_out = gdth_timed_out, | ||
4647 | .proc_name = "gdth", | 4663 | .proc_name = "gdth", |
4648 | .can_queue = GDTH_MAXCMDS, | 4664 | .can_queue = GDTH_MAXCMDS, |
4649 | .this_id = -1, | 4665 | .this_id = -1, |
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h index ca92476727cf..1646444e9bd5 100644 --- a/drivers/scsi/gdth.h +++ b/drivers/scsi/gdth.h | |||
@@ -916,7 +916,7 @@ typedef struct { | |||
916 | gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/ | 916 | gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/ |
917 | dma_addr_t sense_paddr; /* sense dma-addr */ | 917 | dma_addr_t sense_paddr; /* sense dma-addr */ |
918 | unchar priority; | 918 | unchar priority; |
919 | int timeout; | 919 | int timeout_count; /* # of timeout calls */ |
920 | volatile int wait_for_completion; | 920 | volatile int wait_for_completion; |
921 | ushort status; | 921 | ushort status; |
922 | ulong32 info; | 922 | ulong32 info; |
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c index ce0228e26aec..59349a316e13 100644 --- a/drivers/scsi/gdth_proc.c +++ b/drivers/scsi/gdth_proc.c | |||
@@ -748,69 +748,3 @@ static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id) | |||
748 | } | 748 | } |
749 | spin_unlock_irqrestore(&ha->smp_lock, flags); | 749 | spin_unlock_irqrestore(&ha->smp_lock, flags); |
750 | } | 750 | } |
751 | |||
752 | static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id) | ||
753 | { | ||
754 | ulong flags; | ||
755 | Scsi_Cmnd *scp; | ||
756 | unchar b, t; | ||
757 | |||
758 | spin_lock_irqsave(&ha->smp_lock, flags); | ||
759 | |||
760 | for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) { | ||
761 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); | ||
762 | if (!cmndinfo->internal_command) { | ||
763 | b = scp->device->channel; | ||
764 | t = scp->device->id; | ||
765 | if (t == (unchar)id && b == (unchar)busnum) { | ||
766 | TRACE2(("gdth_stop_timeout(): update_timeout()\n")); | ||
767 | cmndinfo->timeout = gdth_update_timeout(scp, 0); | ||
768 | } | ||
769 | } | ||
770 | } | ||
771 | spin_unlock_irqrestore(&ha->smp_lock, flags); | ||
772 | } | ||
773 | |||
774 | static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id) | ||
775 | { | ||
776 | ulong flags; | ||
777 | Scsi_Cmnd *scp; | ||
778 | unchar b, t; | ||
779 | |||
780 | spin_lock_irqsave(&ha->smp_lock, flags); | ||
781 | |||
782 | for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) { | ||
783 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); | ||
784 | if (!cmndinfo->internal_command) { | ||
785 | b = scp->device->channel; | ||
786 | t = scp->device->id; | ||
787 | if (t == (unchar)id && b == (unchar)busnum) { | ||
788 | TRACE2(("gdth_start_timeout(): update_timeout()\n")); | ||
789 | gdth_update_timeout(scp, cmndinfo->timeout); | ||
790 | } | ||
791 | } | ||
792 | } | ||
793 | spin_unlock_irqrestore(&ha->smp_lock, flags); | ||
794 | } | ||
795 | |||
796 | static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout) | ||
797 | { | ||
798 | int oldto; | ||
799 | |||
800 | oldto = scp->timeout_per_command; | ||
801 | scp->timeout_per_command = timeout; | ||
802 | |||
803 | if (timeout == 0) { | ||
804 | del_timer(&scp->eh_timeout); | ||
805 | scp->eh_timeout.data = (unsigned long) NULL; | ||
806 | scp->eh_timeout.expires = 0; | ||
807 | } else { | ||
808 | if (scp->eh_timeout.data != (unsigned long) NULL) | ||
809 | del_timer(&scp->eh_timeout); | ||
810 | scp->eh_timeout.data = (unsigned long) scp; | ||
811 | scp->eh_timeout.expires = jiffies + timeout; | ||
812 | add_timer(&scp->eh_timeout); | ||
813 | } | ||
814 | |||
815 | return oldto; | ||
816 | } | ||
diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h index 45e6fdacf36e..9b900cc9ebe8 100644 --- a/drivers/scsi/gdth_proc.h +++ b/drivers/scsi/gdth_proc.h | |||
@@ -20,9 +20,6 @@ static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch, | |||
20 | ulong64 *paddr); | 20 | ulong64 *paddr); |
21 | static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr); | 21 | static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr); |
22 | static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id); | 22 | static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id); |
23 | static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id); | ||
24 | static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id); | ||
25 | static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout); | ||
26 | 23 | ||
27 | #endif | 24 | #endif |
28 | 25 | ||
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 7b1502c0ab6e..87e09f35d3d4 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -756,7 +756,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, | |||
756 | init_event_struct(evt_struct, | 756 | init_event_struct(evt_struct, |
757 | handle_cmd_rsp, | 757 | handle_cmd_rsp, |
758 | VIOSRP_SRP_FORMAT, | 758 | VIOSRP_SRP_FORMAT, |
759 | cmnd->timeout_per_command/HZ); | 759 | cmnd->request->timeout/HZ); |
760 | 760 | ||
761 | evt_struct->cmnd = cmnd; | 761 | evt_struct->cmnd = cmnd; |
762 | evt_struct->cmnd_done = done; | 762 | evt_struct->cmnd_done = done; |
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c index 461331d3dc45..81c16cba5417 100644 --- a/drivers/scsi/ide-scsi.c +++ b/drivers/scsi/ide-scsi.c | |||
@@ -612,7 +612,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd, | |||
612 | pc->req_xfer = pc->buf_size = scsi_bufflen(cmd); | 612 | pc->req_xfer = pc->buf_size = scsi_bufflen(cmd); |
613 | pc->scsi_cmd = cmd; | 613 | pc->scsi_cmd = cmd; |
614 | pc->done = done; | 614 | pc->done = done; |
615 | pc->timeout = jiffies + cmd->timeout_per_command; | 615 | pc->timeout = jiffies + cmd->request->timeout; |
616 | 616 | ||
617 | if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) { | 617 | if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) { |
618 | printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number); | 618 | printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number); |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index e7a3a6554425..d30eb7ba018e 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -3670,7 +3670,8 @@ static int ipr_slave_configure(struct scsi_device *sdev) | |||
3670 | sdev->no_uld_attach = 1; | 3670 | sdev->no_uld_attach = 1; |
3671 | } | 3671 | } |
3672 | if (ipr_is_vset_device(res)) { | 3672 | if (ipr_is_vset_device(res)) { |
3673 | sdev->timeout = IPR_VSET_RW_TIMEOUT; | 3673 | blk_queue_rq_timeout(sdev->request_queue, |
3674 | IPR_VSET_RW_TIMEOUT); | ||
3674 | blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); | 3675 | blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); |
3675 | } | 3676 | } |
3676 | if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)) | 3677 | if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)) |
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index bc9e6ddf41df..ef683f0d2b5a 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c | |||
@@ -3818,7 +3818,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb) | |||
3818 | scb->cmd.dcdb.segment_4G = 0; | 3818 | scb->cmd.dcdb.segment_4G = 0; |
3819 | scb->cmd.dcdb.enhanced_sg = 0; | 3819 | scb->cmd.dcdb.enhanced_sg = 0; |
3820 | 3820 | ||
3821 | TimeOut = scb->scsi_cmd->timeout_per_command; | 3821 | TimeOut = scb->scsi_cmd->request->timeout; |
3822 | 3822 | ||
3823 | if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */ | 3823 | if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */ |
3824 | if (!scb->sg_len) { | 3824 | if (!scb->sg_len) { |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 299e075a7b34..1eca82420aab 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -1476,12 +1476,12 @@ static void iscsi_start_tx(struct iscsi_conn *conn) | |||
1476 | scsi_queue_work(conn->session->host, &conn->xmitwork); | 1476 | scsi_queue_work(conn->session->host, &conn->xmitwork); |
1477 | } | 1477 | } |
1478 | 1478 | ||
1479 | static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) | 1479 | static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) |
1480 | { | 1480 | { |
1481 | struct iscsi_cls_session *cls_session; | 1481 | struct iscsi_cls_session *cls_session; |
1482 | struct iscsi_session *session; | 1482 | struct iscsi_session *session; |
1483 | struct iscsi_conn *conn; | 1483 | struct iscsi_conn *conn; |
1484 | enum scsi_eh_timer_return rc = EH_NOT_HANDLED; | 1484 | enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED; |
1485 | 1485 | ||
1486 | cls_session = starget_to_session(scsi_target(scmd->device)); | 1486 | cls_session = starget_to_session(scsi_target(scmd->device)); |
1487 | session = cls_session->dd_data; | 1487 | session = cls_session->dd_data; |
@@ -1494,14 +1494,14 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) | |||
1494 | * We are probably in the middle of iscsi recovery so let | 1494 | * We are probably in the middle of iscsi recovery so let |
1495 | * that complete and handle the error. | 1495 | * that complete and handle the error. |
1496 | */ | 1496 | */ |
1497 | rc = EH_RESET_TIMER; | 1497 | rc = BLK_EH_RESET_TIMER; |
1498 | goto done; | 1498 | goto done; |
1499 | } | 1499 | } |
1500 | 1500 | ||
1501 | conn = session->leadconn; | 1501 | conn = session->leadconn; |
1502 | if (!conn) { | 1502 | if (!conn) { |
1503 | /* In the middle of shuting down */ | 1503 | /* In the middle of shuting down */ |
1504 | rc = EH_RESET_TIMER; | 1504 | rc = BLK_EH_RESET_TIMER; |
1505 | goto done; | 1505 | goto done; |
1506 | } | 1506 | } |
1507 | 1507 | ||
@@ -1513,20 +1513,21 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) | |||
1513 | */ | 1513 | */ |
1514 | if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + | 1514 | if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + |
1515 | (conn->ping_timeout * HZ), jiffies)) | 1515 | (conn->ping_timeout * HZ), jiffies)) |
1516 | rc = EH_RESET_TIMER; | 1516 | rc = BLK_EH_RESET_TIMER; |
1517 | /* | 1517 | /* |
1518 | * if we are about to check the transport then give the command | 1518 | * if we are about to check the transport then give the command |
1519 | * more time | 1519 | * more time |
1520 | */ | 1520 | */ |
1521 | if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ), | 1521 | if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ), |
1522 | jiffies)) | 1522 | jiffies)) |
1523 | rc = EH_RESET_TIMER; | 1523 | rc = BLK_EH_RESET_TIMER; |
1524 | /* if in the middle of checking the transport then give us more time */ | 1524 | /* if in the middle of checking the transport then give us more time */ |
1525 | if (conn->ping_task) | 1525 | if (conn->ping_task) |
1526 | rc = EH_RESET_TIMER; | 1526 | rc = BLK_EH_RESET_TIMER; |
1527 | done: | 1527 | done: |
1528 | spin_unlock(&session->lock); | 1528 | spin_unlock(&session->lock); |
1529 | debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh"); | 1529 | debug_scsi("return %s\n", rc == BLK_EH_RESET_TIMER ? |
1530 | "timer reset" : "nh"); | ||
1530 | return rc; | 1531 | return rc; |
1531 | } | 1532 | } |
1532 | 1533 | ||
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 48ee8c7f5bdd..837b095ba90d 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c | |||
@@ -398,7 +398,7 @@ void sas_ata_task_abort(struct sas_task *task) | |||
398 | 398 | ||
399 | /* Bounce SCSI-initiated commands to the SCSI EH */ | 399 | /* Bounce SCSI-initiated commands to the SCSI EH */ |
400 | if (qc->scsicmd) { | 400 | if (qc->scsicmd) { |
401 | scsi_req_abort_cmd(qc->scsicmd); | 401 | blk_abort_request(qc->scsicmd->request); |
402 | scsi_schedule_eh(qc->scsicmd->device->host); | 402 | scsi_schedule_eh(qc->scsicmd->device->host); |
403 | return; | 403 | return; |
404 | } | 404 | } |
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index b4f9368f116a..0001374bd6b2 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h | |||
@@ -55,7 +55,7 @@ void sas_unregister_phys(struct sas_ha_struct *sas_ha); | |||
55 | int sas_register_ports(struct sas_ha_struct *sas_ha); | 55 | int sas_register_ports(struct sas_ha_struct *sas_ha); |
56 | void sas_unregister_ports(struct sas_ha_struct *sas_ha); | 56 | void sas_unregister_ports(struct sas_ha_struct *sas_ha); |
57 | 57 | ||
58 | enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *); | 58 | enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *); |
59 | 59 | ||
60 | int sas_init_queue(struct sas_ha_struct *sas_ha); | 60 | int sas_init_queue(struct sas_ha_struct *sas_ha); |
61 | int sas_init_events(struct sas_ha_struct *sas_ha); | 61 | int sas_init_events(struct sas_ha_struct *sas_ha); |
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index a8e3ef309070..744838780ada 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c | |||
@@ -673,43 +673,43 @@ out: | |||
673 | return; | 673 | return; |
674 | } | 674 | } |
675 | 675 | ||
676 | enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) | 676 | enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) |
677 | { | 677 | { |
678 | struct sas_task *task = TO_SAS_TASK(cmd); | 678 | struct sas_task *task = TO_SAS_TASK(cmd); |
679 | unsigned long flags; | 679 | unsigned long flags; |
680 | 680 | ||
681 | if (!task) { | 681 | if (!task) { |
682 | cmd->timeout_per_command /= 2; | 682 | cmd->request->timeout /= 2; |
683 | SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n", | 683 | SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n", |
684 | cmd, task, (cmd->timeout_per_command ? | 684 | cmd, task, (cmd->request->timeout ? |
685 | "EH_RESET_TIMER" : "EH_NOT_HANDLED")); | 685 | "BLK_EH_RESET_TIMER" : "BLK_EH_NOT_HANDLED")); |
686 | if (!cmd->timeout_per_command) | 686 | if (!cmd->request->timeout) |
687 | return EH_NOT_HANDLED; | 687 | return BLK_EH_NOT_HANDLED; |
688 | return EH_RESET_TIMER; | 688 | return BLK_EH_RESET_TIMER; |
689 | } | 689 | } |
690 | 690 | ||
691 | spin_lock_irqsave(&task->task_state_lock, flags); | 691 | spin_lock_irqsave(&task->task_state_lock, flags); |
692 | BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED); | 692 | BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED); |
693 | if (task->task_state_flags & SAS_TASK_STATE_DONE) { | 693 | if (task->task_state_flags & SAS_TASK_STATE_DONE) { |
694 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 694 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
695 | SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n", | 695 | SAS_DPRINTK("command 0x%p, task 0x%p, timed out: " |
696 | cmd, task); | 696 | "BLK_EH_HANDLED\n", cmd, task); |
697 | return EH_HANDLED; | 697 | return BLK_EH_HANDLED; |
698 | } | 698 | } |
699 | if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) { | 699 | if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) { |
700 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 700 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
701 | SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: " | 701 | SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: " |
702 | "EH_RESET_TIMER\n", | 702 | "BLK_EH_RESET_TIMER\n", |
703 | cmd, task); | 703 | cmd, task); |
704 | return EH_RESET_TIMER; | 704 | return BLK_EH_RESET_TIMER; |
705 | } | 705 | } |
706 | task->task_state_flags |= SAS_TASK_STATE_ABORTED; | 706 | task->task_state_flags |= SAS_TASK_STATE_ABORTED; |
707 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 707 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
708 | 708 | ||
709 | SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_NOT_HANDLED\n", | 709 | SAS_DPRINTK("command 0x%p, task 0x%p, timed out: BLK_EH_NOT_HANDLED\n", |
710 | cmd, task); | 710 | cmd, task); |
711 | 711 | ||
712 | return EH_NOT_HANDLED; | 712 | return BLK_EH_NOT_HANDLED; |
713 | } | 713 | } |
714 | 714 | ||
715 | int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) | 715 | int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) |
@@ -1039,7 +1039,7 @@ void sas_task_abort(struct sas_task *task) | |||
1039 | return; | 1039 | return; |
1040 | } | 1040 | } |
1041 | 1041 | ||
1042 | scsi_req_abort_cmd(sc); | 1042 | blk_abort_request(sc->request); |
1043 | scsi_schedule_eh(sc->device->host); | 1043 | scsi_schedule_eh(sc->device->host); |
1044 | } | 1044 | } |
1045 | 1045 | ||
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index 97b763378e7d..afe1de998763 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c | |||
@@ -1167,7 +1167,7 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd) | |||
1167 | * cmd has not been completed within the timeout period. | 1167 | * cmd has not been completed within the timeout period. |
1168 | */ | 1168 | */ |
1169 | static enum | 1169 | static enum |
1170 | scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) | 1170 | blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) |
1171 | { | 1171 | { |
1172 | struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr; | 1172 | struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr; |
1173 | struct megasas_instance *instance; | 1173 | struct megasas_instance *instance; |
@@ -1175,7 +1175,7 @@ scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) | |||
1175 | 1175 | ||
1176 | if (time_after(jiffies, scmd->jiffies_at_alloc + | 1176 | if (time_after(jiffies, scmd->jiffies_at_alloc + |
1177 | (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) { | 1177 | (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) { |
1178 | return EH_NOT_HANDLED; | 1178 | return BLK_EH_NOT_HANDLED; |
1179 | } | 1179 | } |
1180 | 1180 | ||
1181 | instance = cmd->instance; | 1181 | instance = cmd->instance; |
@@ -1189,7 +1189,7 @@ scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) | |||
1189 | 1189 | ||
1190 | spin_unlock_irqrestore(instance->host->host_lock, flags); | 1190 | spin_unlock_irqrestore(instance->host->host_lock, flags); |
1191 | } | 1191 | } |
1192 | return EH_RESET_TIMER; | 1192 | return BLK_EH_RESET_TIMER; |
1193 | } | 1193 | } |
1194 | 1194 | ||
1195 | /** | 1195 | /** |
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c index c57c94c0ffd2..3b7240e40819 100644 --- a/drivers/scsi/ncr53c8xx.c +++ b/drivers/scsi/ncr53c8xx.c | |||
@@ -4170,8 +4170,8 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd) | |||
4170 | ** | 4170 | ** |
4171 | **---------------------------------------------------- | 4171 | **---------------------------------------------------- |
4172 | */ | 4172 | */ |
4173 | if (np->settle_time && cmd->timeout_per_command >= HZ) { | 4173 | if (np->settle_time && cmd->request->timeout >= HZ) { |
4174 | u_long tlimit = jiffies + cmd->timeout_per_command - HZ; | 4174 | u_long tlimit = jiffies + cmd->request->timeout - HZ; |
4175 | if (time_after(np->settle_time, tlimit)) | 4175 | if (time_after(np->settle_time, tlimit)) |
4176 | np->settle_time = tlimit; | 4176 | np->settle_time = tlimit; |
4177 | } | 4177 | } |
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 37f9ba0cd798..b6cd12b2e996 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c | |||
@@ -2845,7 +2845,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) | |||
2845 | memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); | 2845 | memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); |
2846 | 2846 | ||
2847 | /* Set ISP command timeout. */ | 2847 | /* Set ISP command timeout. */ |
2848 | pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ); | 2848 | pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ); |
2849 | 2849 | ||
2850 | /* Set device target ID and LUN */ | 2850 | /* Set device target ID and LUN */ |
2851 | pkt->lun = SCSI_LUN_32(cmd); | 2851 | pkt->lun = SCSI_LUN_32(cmd); |
@@ -3114,7 +3114,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) | |||
3114 | memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); | 3114 | memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); |
3115 | 3115 | ||
3116 | /* Set ISP command timeout. */ | 3116 | /* Set ISP command timeout. */ |
3117 | pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ); | 3117 | pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ); |
3118 | 3118 | ||
3119 | /* Set device target ID and LUN */ | 3119 | /* Set device target ID and LUN */ |
3120 | pkt->lun = SCSI_LUN_32(cmd); | 3120 | pkt->lun = SCSI_LUN_32(cmd); |
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 88bebb13bc52..de8279ad7d89 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
@@ -1542,7 +1542,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) | |||
1542 | DEBUG2(printk(KERN_INFO | 1542 | DEBUG2(printk(KERN_INFO |
1543 | "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x," | 1543 | "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x," |
1544 | "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, | 1544 | "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, |
1545 | cmd, jiffies, cmd->timeout_per_command / HZ, | 1545 | cmd, jiffies, cmd->request->timeout / HZ, |
1546 | ha->dpc_flags, cmd->result, cmd->allowed)); | 1546 | ha->dpc_flags, cmd->result, cmd->allowed)); |
1547 | 1547 | ||
1548 | /* FIXME: wait for hba to go online */ | 1548 | /* FIXME: wait for hba to go online */ |
@@ -1598,7 +1598,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) | |||
1598 | DEBUG2(printk(KERN_INFO | 1598 | DEBUG2(printk(KERN_INFO |
1599 | "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, " | 1599 | "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, " |
1600 | "to=%x,dpc_flags=%lx, status=%x allowed=%d\n", | 1600 | "to=%x,dpc_flags=%lx, status=%x allowed=%d\n", |
1601 | ha->host_no, cmd, jiffies, cmd->timeout_per_command / HZ, | 1601 | ha->host_no, cmd, jiffies, cmd->request->timeout / HZ, |
1602 | ha->dpc_flags, cmd->result, cmd->allowed)); | 1602 | ha->dpc_flags, cmd->result, cmd->allowed)); |
1603 | 1603 | ||
1604 | stat = qla4xxx_reset_target(ha, ddb_entry); | 1604 | stat = qla4xxx_reset_target(ha, ddb_entry); |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index ee6be596503d..dbeb86cafc0d 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -291,7 +291,6 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask) | |||
291 | unsigned long flags; | 291 | unsigned long flags; |
292 | 292 | ||
293 | cmd->device = dev; | 293 | cmd->device = dev; |
294 | init_timer(&cmd->eh_timeout); | ||
295 | INIT_LIST_HEAD(&cmd->list); | 294 | INIT_LIST_HEAD(&cmd->list); |
296 | spin_lock_irqsave(&dev->list_lock, flags); | 295 | spin_lock_irqsave(&dev->list_lock, flags); |
297 | list_add_tail(&cmd->list, &dev->cmd_list); | 296 | list_add_tail(&cmd->list, &dev->cmd_list); |
@@ -652,14 +651,19 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
652 | unsigned long timeout; | 651 | unsigned long timeout; |
653 | int rtn = 0; | 652 | int rtn = 0; |
654 | 653 | ||
654 | /* | ||
655 | * We will use a queued command if possible, otherwise we will | ||
656 | * emulate the queuing and calling of completion function ourselves. | ||
657 | */ | ||
658 | atomic_inc(&cmd->device->iorequest_cnt); | ||
659 | |||
655 | /* check if the device is still usable */ | 660 | /* check if the device is still usable */ |
656 | if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { | 661 | if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { |
657 | /* in SDEV_DEL we error all commands. DID_NO_CONNECT | 662 | /* in SDEV_DEL we error all commands. DID_NO_CONNECT |
658 | * returns an immediate error upwards, and signals | 663 | * returns an immediate error upwards, and signals |
659 | * that the device is no longer present */ | 664 | * that the device is no longer present */ |
660 | cmd->result = DID_NO_CONNECT << 16; | 665 | cmd->result = DID_NO_CONNECT << 16; |
661 | atomic_inc(&cmd->device->iorequest_cnt); | 666 | scsi_done(cmd); |
662 | __scsi_done(cmd); | ||
663 | /* return 0 (because the command has been processed) */ | 667 | /* return 0 (because the command has been processed) */ |
664 | goto out; | 668 | goto out; |
665 | } | 669 | } |
@@ -672,6 +676,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
672 | * future requests should not occur until the device | 676 | * future requests should not occur until the device |
673 | * transitions out of the suspend state. | 677 | * transitions out of the suspend state. |
674 | */ | 678 | */ |
679 | |||
675 | scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); | 680 | scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); |
676 | 681 | ||
677 | SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n")); | 682 | SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n")); |
@@ -714,21 +719,9 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
714 | host->resetting = 0; | 719 | host->resetting = 0; |
715 | } | 720 | } |
716 | 721 | ||
717 | /* | ||
718 | * AK: unlikely race here: for some reason the timer could | ||
719 | * expire before the serial number is set up below. | ||
720 | */ | ||
721 | scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out); | ||
722 | |||
723 | scsi_log_send(cmd); | 722 | scsi_log_send(cmd); |
724 | 723 | ||
725 | /* | 724 | /* |
726 | * We will use a queued command if possible, otherwise we will | ||
727 | * emulate the queuing and calling of completion function ourselves. | ||
728 | */ | ||
729 | atomic_inc(&cmd->device->iorequest_cnt); | ||
730 | |||
731 | /* | ||
732 | * Before we queue this command, check if the command | 725 | * Before we queue this command, check if the command |
733 | * length exceeds what the host adapter can handle. | 726 | * length exceeds what the host adapter can handle. |
734 | */ | 727 | */ |
@@ -744,6 +737,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
744 | } | 737 | } |
745 | 738 | ||
746 | spin_lock_irqsave(host->host_lock, flags); | 739 | spin_lock_irqsave(host->host_lock, flags); |
740 | /* | ||
741 | * AK: unlikely race here: for some reason the timer could | ||
742 | * expire before the serial number is set up below. | ||
743 | * | ||
744 | * TODO: kill serial or move to blk layer | ||
745 | */ | ||
747 | scsi_cmd_get_serial(host, cmd); | 746 | scsi_cmd_get_serial(host, cmd); |
748 | 747 | ||
749 | if (unlikely(host->shost_state == SHOST_DEL)) { | 748 | if (unlikely(host->shost_state == SHOST_DEL)) { |
@@ -754,12 +753,8 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
754 | } | 753 | } |
755 | spin_unlock_irqrestore(host->host_lock, flags); | 754 | spin_unlock_irqrestore(host->host_lock, flags); |
756 | if (rtn) { | 755 | if (rtn) { |
757 | if (scsi_delete_timer(cmd)) { | 756 | scsi_queue_insert(cmd, (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ? |
758 | atomic_inc(&cmd->device->iodone_cnt); | 757 | rtn : SCSI_MLQUEUE_HOST_BUSY); |
759 | scsi_queue_insert(cmd, | ||
760 | (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ? | ||
761 | rtn : SCSI_MLQUEUE_HOST_BUSY); | ||
762 | } | ||
763 | SCSI_LOG_MLQUEUE(3, | 758 | SCSI_LOG_MLQUEUE(3, |
764 | printk("queuecommand : request rejected\n")); | 759 | printk("queuecommand : request rejected\n")); |
765 | } | 760 | } |
@@ -770,24 +765,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) | |||
770 | } | 765 | } |
771 | 766 | ||
772 | /** | 767 | /** |
773 | * scsi_req_abort_cmd -- Request command recovery for the specified command | ||
774 | * @cmd: pointer to the SCSI command of interest | ||
775 | * | ||
776 | * This function requests that SCSI Core start recovery for the | ||
777 | * command by deleting the timer and adding the command to the eh | ||
778 | * queue. It can be called by either LLDDs or SCSI Core. LLDDs who | ||
779 | * implement their own error recovery MAY ignore the timeout event if | ||
780 | * they generated scsi_req_abort_cmd. | ||
781 | */ | ||
782 | void scsi_req_abort_cmd(struct scsi_cmnd *cmd) | ||
783 | { | ||
784 | if (!scsi_delete_timer(cmd)) | ||
785 | return; | ||
786 | scsi_times_out(cmd); | ||
787 | } | ||
788 | EXPORT_SYMBOL(scsi_req_abort_cmd); | ||
789 | |||
790 | /** | ||
791 | * scsi_done - Enqueue the finished SCSI command into the done queue. | 768 | * scsi_done - Enqueue the finished SCSI command into the done queue. |
792 | * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives | 769 | * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives |
793 | * ownership back to SCSI Core -- i.e. the LLDD has finished with it. | 770 | * ownership back to SCSI Core -- i.e. the LLDD has finished with it. |
@@ -802,42 +779,7 @@ EXPORT_SYMBOL(scsi_req_abort_cmd); | |||
802 | */ | 779 | */ |
803 | static void scsi_done(struct scsi_cmnd *cmd) | 780 | static void scsi_done(struct scsi_cmnd *cmd) |
804 | { | 781 | { |
805 | /* | 782 | blk_complete_request(cmd->request); |
806 | * We don't have to worry about this one timing out anymore. | ||
807 | * If we are unable to remove the timer, then the command | ||
808 | * has already timed out. In which case, we have no choice but to | ||
809 | * let the timeout function run, as we have no idea where in fact | ||
810 | * that function could really be. It might be on another processor, | ||
811 | * etc, etc. | ||
812 | */ | ||
813 | if (!scsi_delete_timer(cmd)) | ||
814 | return; | ||
815 | __scsi_done(cmd); | ||
816 | } | ||
817 | |||
818 | /* Private entry to scsi_done() to complete a command when the timer | ||
819 | * isn't running --- used by scsi_times_out */ | ||
820 | void __scsi_done(struct scsi_cmnd *cmd) | ||
821 | { | ||
822 | struct request *rq = cmd->request; | ||
823 | |||
824 | /* | ||
825 | * Set the serial numbers back to zero | ||
826 | */ | ||
827 | cmd->serial_number = 0; | ||
828 | |||
829 | atomic_inc(&cmd->device->iodone_cnt); | ||
830 | if (cmd->result) | ||
831 | atomic_inc(&cmd->device->ioerr_cnt); | ||
832 | |||
833 | BUG_ON(!rq); | ||
834 | |||
835 | /* | ||
836 | * The uptodate/nbytes values don't matter, as we allow partial | ||
837 | * completes and thus will check this in the softirq callback | ||
838 | */ | ||
839 | rq->completion_data = cmd; | ||
840 | blk_complete_request(rq); | ||
841 | } | 783 | } |
842 | 784 | ||
843 | /* Move this to a header if it becomes more generally useful */ | 785 | /* Move this to a header if it becomes more generally useful */ |
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 39ce3aba1dac..fecefa05cb62 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
@@ -112,69 +112,8 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag) | |||
112 | } | 112 | } |
113 | 113 | ||
114 | /** | 114 | /** |
115 | * scsi_add_timer - Start timeout timer for a single scsi command. | ||
116 | * @scmd: scsi command that is about to start running. | ||
117 | * @timeout: amount of time to allow this command to run. | ||
118 | * @complete: timeout function to call if timer isn't canceled. | ||
119 | * | ||
120 | * Notes: | ||
121 | * This should be turned into an inline function. Each scsi command | ||
122 | * has its own timer, and as it is added to the queue, we set up the | ||
123 | * timer. When the command completes, we cancel the timer. | ||
124 | */ | ||
125 | void scsi_add_timer(struct scsi_cmnd *scmd, int timeout, | ||
126 | void (*complete)(struct scsi_cmnd *)) | ||
127 | { | ||
128 | |||
129 | /* | ||
130 | * If the clock was already running for this command, then | ||
131 | * first delete the timer. The timer handling code gets rather | ||
132 | * confused if we don't do this. | ||
133 | */ | ||
134 | if (scmd->eh_timeout.function) | ||
135 | del_timer(&scmd->eh_timeout); | ||
136 | |||
137 | scmd->eh_timeout.data = (unsigned long)scmd; | ||
138 | scmd->eh_timeout.expires = jiffies + timeout; | ||
139 | scmd->eh_timeout.function = (void (*)(unsigned long)) complete; | ||
140 | |||
141 | SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:" | ||
142 | " %d, (%p)\n", __func__, | ||
143 | scmd, timeout, complete)); | ||
144 | |||
145 | add_timer(&scmd->eh_timeout); | ||
146 | } | ||
147 | |||
148 | /** | ||
149 | * scsi_delete_timer - Delete/cancel timer for a given function. | ||
150 | * @scmd: Cmd that we are canceling timer for | ||
151 | * | ||
152 | * Notes: | ||
153 | * This should be turned into an inline function. | ||
154 | * | ||
155 | * Return value: | ||
156 | * 1 if we were able to detach the timer. 0 if we blew it, and the | ||
157 | * timer function has already started to run. | ||
158 | */ | ||
159 | int scsi_delete_timer(struct scsi_cmnd *scmd) | ||
160 | { | ||
161 | int rtn; | ||
162 | |||
163 | rtn = del_timer(&scmd->eh_timeout); | ||
164 | |||
165 | SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p," | ||
166 | " rtn: %d\n", __func__, | ||
167 | scmd, rtn)); | ||
168 | |||
169 | scmd->eh_timeout.data = (unsigned long)NULL; | ||
170 | scmd->eh_timeout.function = NULL; | ||
171 | |||
172 | return rtn; | ||
173 | } | ||
174 | |||
175 | /** | ||
176 | * scsi_times_out - Timeout function for normal scsi commands. | 115 | * scsi_times_out - Timeout function for normal scsi commands. |
177 | * @scmd: Cmd that is timing out. | 116 | * @req: request that is timing out. |
178 | * | 117 | * |
179 | * Notes: | 118 | * Notes: |
180 | * We do not need to lock this. There is the potential for a race | 119 | * We do not need to lock this. There is the potential for a race |
@@ -182,9 +121,11 @@ int scsi_delete_timer(struct scsi_cmnd *scmd) | |||
182 | * normal completion function determines that the timer has already | 121 | * normal completion function determines that the timer has already |
183 | * fired, then it mustn't do anything. | 122 | * fired, then it mustn't do anything. |
184 | */ | 123 | */ |
185 | void scsi_times_out(struct scsi_cmnd *scmd) | 124 | enum blk_eh_timer_return scsi_times_out(struct request *req) |
186 | { | 125 | { |
187 | enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *); | 126 | struct scsi_cmnd *scmd = req->special; |
127 | enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *); | ||
128 | enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED; | ||
188 | 129 | ||
189 | scsi_log_completion(scmd, TIMEOUT_ERROR); | 130 | scsi_log_completion(scmd, TIMEOUT_ERROR); |
190 | 131 | ||
@@ -196,22 +137,20 @@ void scsi_times_out(struct scsi_cmnd *scmd) | |||
196 | eh_timed_out = NULL; | 137 | eh_timed_out = NULL; |
197 | 138 | ||
198 | if (eh_timed_out) | 139 | if (eh_timed_out) |
199 | switch (eh_timed_out(scmd)) { | 140 | rtn = eh_timed_out(scmd); |
200 | case EH_HANDLED: | 141 | switch (rtn) { |
201 | __scsi_done(scmd); | 142 | case BLK_EH_NOT_HANDLED: |
202 | return; | ||
203 | case EH_RESET_TIMER: | ||
204 | scsi_add_timer(scmd, scmd->timeout_per_command, | ||
205 | scsi_times_out); | ||
206 | return; | ||
207 | case EH_NOT_HANDLED: | ||
208 | break; | 143 | break; |
144 | default: | ||
145 | return rtn; | ||
209 | } | 146 | } |
210 | 147 | ||
211 | if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) { | 148 | if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) { |
212 | scmd->result |= DID_TIME_OUT << 16; | 149 | scmd->result |= DID_TIME_OUT << 16; |
213 | __scsi_done(scmd); | 150 | return BLK_EH_HANDLED; |
214 | } | 151 | } |
152 | |||
153 | return BLK_EH_NOT_HANDLED; | ||
215 | } | 154 | } |
216 | 155 | ||
217 | /** | 156 | /** |
@@ -1793,7 +1732,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag) | |||
1793 | 1732 | ||
1794 | blk_rq_init(NULL, &req); | 1733 | blk_rq_init(NULL, &req); |
1795 | scmd->request = &req; | 1734 | scmd->request = &req; |
1796 | memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout)); | ||
1797 | 1735 | ||
1798 | scmd->cmnd = req.cmd; | 1736 | scmd->cmnd = req.cmd; |
1799 | 1737 | ||
@@ -1804,8 +1742,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag) | |||
1804 | 1742 | ||
1805 | scmd->sc_data_direction = DMA_BIDIRECTIONAL; | 1743 | scmd->sc_data_direction = DMA_BIDIRECTIONAL; |
1806 | 1744 | ||
1807 | init_timer(&scmd->eh_timeout); | ||
1808 | |||
1809 | spin_lock_irqsave(shost->host_lock, flags); | 1745 | spin_lock_irqsave(shost->host_lock, flags); |
1810 | shost->tmf_in_progress = 1; | 1746 | shost->tmf_in_progress = 1; |
1811 | spin_unlock_irqrestore(shost->host_lock, flags); | 1747 | spin_unlock_irqrestore(shost->host_lock, flags); |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 62307bd794a9..e7686500e9dd 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -1181,7 +1181,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) | |||
1181 | 1181 | ||
1182 | cmd->transfersize = req->data_len; | 1182 | cmd->transfersize = req->data_len; |
1183 | cmd->allowed = req->retries; | 1183 | cmd->allowed = req->retries; |
1184 | cmd->timeout_per_command = req->timeout; | ||
1185 | return BLKPREP_OK; | 1184 | return BLKPREP_OK; |
1186 | } | 1185 | } |
1187 | EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); | 1186 | EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); |
@@ -1416,17 +1415,26 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) | |||
1416 | spin_unlock(shost->host_lock); | 1415 | spin_unlock(shost->host_lock); |
1417 | spin_lock(sdev->request_queue->queue_lock); | 1416 | spin_lock(sdev->request_queue->queue_lock); |
1418 | 1417 | ||
1419 | __scsi_done(cmd); | 1418 | blk_complete_request(req); |
1420 | } | 1419 | } |
1421 | 1420 | ||
1422 | static void scsi_softirq_done(struct request *rq) | 1421 | static void scsi_softirq_done(struct request *rq) |
1423 | { | 1422 | { |
1424 | struct scsi_cmnd *cmd = rq->completion_data; | 1423 | struct scsi_cmnd *cmd = rq->special; |
1425 | unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command; | 1424 | unsigned long wait_for = (cmd->allowed + 1) * rq->timeout; |
1426 | int disposition; | 1425 | int disposition; |
1427 | 1426 | ||
1428 | INIT_LIST_HEAD(&cmd->eh_entry); | 1427 | INIT_LIST_HEAD(&cmd->eh_entry); |
1429 | 1428 | ||
1429 | /* | ||
1430 | * Set the serial numbers back to zero | ||
1431 | */ | ||
1432 | cmd->serial_number = 0; | ||
1433 | |||
1434 | atomic_inc(&cmd->device->iodone_cnt); | ||
1435 | if (cmd->result) | ||
1436 | atomic_inc(&cmd->device->ioerr_cnt); | ||
1437 | |||
1430 | disposition = scsi_decide_disposition(cmd); | 1438 | disposition = scsi_decide_disposition(cmd); |
1431 | if (disposition != SUCCESS && | 1439 | if (disposition != SUCCESS && |
1432 | time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { | 1440 | time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { |
@@ -1675,6 +1683,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) | |||
1675 | 1683 | ||
1676 | blk_queue_prep_rq(q, scsi_prep_fn); | 1684 | blk_queue_prep_rq(q, scsi_prep_fn); |
1677 | blk_queue_softirq_done(q, scsi_softirq_done); | 1685 | blk_queue_softirq_done(q, scsi_softirq_done); |
1686 | blk_queue_rq_timed_out(q, scsi_times_out); | ||
1678 | return q; | 1687 | return q; |
1679 | } | 1688 | } |
1680 | 1689 | ||
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 79f0f7511204..6cddd5dd323c 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/device.h> | 4 | #include <linux/device.h> |
5 | 5 | ||
6 | struct request_queue; | 6 | struct request_queue; |
7 | struct request; | ||
7 | struct scsi_cmnd; | 8 | struct scsi_cmnd; |
8 | struct scsi_device; | 9 | struct scsi_device; |
9 | struct scsi_host_template; | 10 | struct scsi_host_template; |
@@ -27,7 +28,6 @@ extern void scsi_exit_hosts(void); | |||
27 | extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd); | 28 | extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd); |
28 | extern int scsi_setup_command_freelist(struct Scsi_Host *shost); | 29 | extern int scsi_setup_command_freelist(struct Scsi_Host *shost); |
29 | extern void scsi_destroy_command_freelist(struct Scsi_Host *shost); | 30 | extern void scsi_destroy_command_freelist(struct Scsi_Host *shost); |
30 | extern void __scsi_done(struct scsi_cmnd *cmd); | ||
31 | #ifdef CONFIG_SCSI_LOGGING | 31 | #ifdef CONFIG_SCSI_LOGGING |
32 | void scsi_log_send(struct scsi_cmnd *cmd); | 32 | void scsi_log_send(struct scsi_cmnd *cmd); |
33 | void scsi_log_completion(struct scsi_cmnd *cmd, int disposition); | 33 | void scsi_log_completion(struct scsi_cmnd *cmd, int disposition); |
@@ -49,10 +49,7 @@ extern int __init scsi_init_devinfo(void); | |||
49 | extern void scsi_exit_devinfo(void); | 49 | extern void scsi_exit_devinfo(void); |
50 | 50 | ||
51 | /* scsi_error.c */ | 51 | /* scsi_error.c */ |
52 | extern void scsi_add_timer(struct scsi_cmnd *, int, | 52 | extern enum blk_eh_timer_return scsi_times_out(struct request *req); |
53 | void (*)(struct scsi_cmnd *)); | ||
54 | extern int scsi_delete_timer(struct scsi_cmnd *); | ||
55 | extern void scsi_times_out(struct scsi_cmnd *cmd); | ||
56 | extern int scsi_error_handler(void *host); | 53 | extern int scsi_error_handler(void *host); |
57 | extern int scsi_decide_disposition(struct scsi_cmnd *cmd); | 54 | extern int scsi_decide_disposition(struct scsi_cmnd *cmd); |
58 | extern void scsi_eh_wakeup(struct Scsi_Host *shost); | 55 | extern void scsi_eh_wakeup(struct Scsi_Host *shost); |
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index ab3c71869be5..7f618ee5ecea 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c | |||
@@ -560,12 +560,15 @@ sdev_rd_attr (vendor, "%.8s\n"); | |||
560 | sdev_rd_attr (model, "%.16s\n"); | 560 | sdev_rd_attr (model, "%.16s\n"); |
561 | sdev_rd_attr (rev, "%.4s\n"); | 561 | sdev_rd_attr (rev, "%.4s\n"); |
562 | 562 | ||
563 | /* | ||
564 | * TODO: can we make these symlinks to the block layer ones? | ||
565 | */ | ||
563 | static ssize_t | 566 | static ssize_t |
564 | sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf) | 567 | sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf) |
565 | { | 568 | { |
566 | struct scsi_device *sdev; | 569 | struct scsi_device *sdev; |
567 | sdev = to_scsi_device(dev); | 570 | sdev = to_scsi_device(dev); |
568 | return snprintf (buf, 20, "%d\n", sdev->timeout / HZ); | 571 | return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ); |
569 | } | 572 | } |
570 | 573 | ||
571 | static ssize_t | 574 | static ssize_t |
@@ -576,7 +579,7 @@ sdev_store_timeout (struct device *dev, struct device_attribute *attr, | |||
576 | int timeout; | 579 | int timeout; |
577 | sdev = to_scsi_device(dev); | 580 | sdev = to_scsi_device(dev); |
578 | sscanf (buf, "%d\n", &timeout); | 581 | sscanf (buf, "%d\n", &timeout); |
579 | sdev->timeout = timeout * HZ; | 582 | blk_queue_rq_timeout(sdev->request_queue, timeout * HZ); |
580 | return count; | 583 | return count; |
581 | } | 584 | } |
582 | static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout); | 585 | static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout); |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 56823fd1fb84..9168883d0dfe 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -1950,15 +1950,15 @@ static int fc_vport_match(struct attribute_container *cont, | |||
1950 | * Notes: | 1950 | * Notes: |
1951 | * This routine assumes no locks are held on entry. | 1951 | * This routine assumes no locks are held on entry. |
1952 | */ | 1952 | */ |
1953 | static enum scsi_eh_timer_return | 1953 | static enum blk_eh_timer_return |
1954 | fc_timed_out(struct scsi_cmnd *scmd) | 1954 | fc_timed_out(struct scsi_cmnd *scmd) |
1955 | { | 1955 | { |
1956 | struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device)); | 1956 | struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device)); |
1957 | 1957 | ||
1958 | if (rport->port_state == FC_PORTSTATE_BLOCKED) | 1958 | if (rport->port_state == FC_PORTSTATE_BLOCKED) |
1959 | return EH_RESET_TIMER; | 1959 | return BLK_EH_RESET_TIMER; |
1960 | 1960 | ||
1961 | return EH_NOT_HANDLED; | 1961 | return BLK_EH_NOT_HANDLED; |
1962 | } | 1962 | } |
1963 | 1963 | ||
1964 | /* | 1964 | /* |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index cb115d1bf228..c0cf4acda7de 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -383,7 +383,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) | |||
383 | sector_t block = rq->sector; | 383 | sector_t block = rq->sector; |
384 | sector_t threshold; | 384 | sector_t threshold; |
385 | unsigned int this_count = rq->nr_sectors; | 385 | unsigned int this_count = rq->nr_sectors; |
386 | unsigned int timeout = sdp->timeout; | ||
387 | int ret; | 386 | int ret; |
388 | 387 | ||
389 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { | 388 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
@@ -584,7 +583,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) | |||
584 | SCpnt->transfersize = sdp->sector_size; | 583 | SCpnt->transfersize = sdp->sector_size; |
585 | SCpnt->underflow = this_count << 9; | 584 | SCpnt->underflow = this_count << 9; |
586 | SCpnt->allowed = SD_MAX_RETRIES; | 585 | SCpnt->allowed = SD_MAX_RETRIES; |
587 | SCpnt->timeout_per_command = timeout; | ||
588 | 586 | ||
589 | /* | 587 | /* |
590 | * This indicates that the command is ready from our end to be | 588 | * This indicates that the command is ready from our end to be |
@@ -1878,11 +1876,12 @@ static int sd_probe(struct device *dev) | |||
1878 | sdkp->openers = 0; | 1876 | sdkp->openers = 0; |
1879 | sdkp->previous_state = 1; | 1877 | sdkp->previous_state = 1; |
1880 | 1878 | ||
1881 | if (!sdp->timeout) { | 1879 | if (!sdp->request_queue->rq_timeout) { |
1882 | if (sdp->type != TYPE_MOD) | 1880 | if (sdp->type != TYPE_MOD) |
1883 | sdp->timeout = SD_TIMEOUT; | 1881 | blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT); |
1884 | else | 1882 | else |
1885 | sdp->timeout = SD_MOD_TIMEOUT; | 1883 | blk_queue_rq_timeout(sdp->request_queue, |
1884 | SD_MOD_TIMEOUT); | ||
1886 | } | 1885 | } |
1887 | 1886 | ||
1888 | device_initialize(&sdkp->dev); | 1887 | device_initialize(&sdkp->dev); |
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 8dbe3798d5fd..0f17009c99d2 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c | |||
@@ -331,7 +331,7 @@ static int sr_done(struct scsi_cmnd *SCpnt) | |||
331 | 331 | ||
332 | static int sr_prep_fn(struct request_queue *q, struct request *rq) | 332 | static int sr_prep_fn(struct request_queue *q, struct request *rq) |
333 | { | 333 | { |
334 | int block=0, this_count, s_size, timeout = SR_TIMEOUT; | 334 | int block = 0, this_count, s_size; |
335 | struct scsi_cd *cd; | 335 | struct scsi_cd *cd; |
336 | struct scsi_cmnd *SCpnt; | 336 | struct scsi_cmnd *SCpnt; |
337 | struct scsi_device *sdp = q->queuedata; | 337 | struct scsi_device *sdp = q->queuedata; |
@@ -461,7 +461,6 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq) | |||
461 | SCpnt->transfersize = cd->device->sector_size; | 461 | SCpnt->transfersize = cd->device->sector_size; |
462 | SCpnt->underflow = this_count << 9; | 462 | SCpnt->underflow = this_count << 9; |
463 | SCpnt->allowed = MAX_RETRIES; | 463 | SCpnt->allowed = MAX_RETRIES; |
464 | SCpnt->timeout_per_command = timeout; | ||
465 | 464 | ||
466 | /* | 465 | /* |
467 | * This indicates that the command is ready from our end to be | 466 | * This indicates that the command is ready from our end to be |
@@ -620,6 +619,8 @@ static int sr_probe(struct device *dev) | |||
620 | disk->fops = &sr_bdops; | 619 | disk->fops = &sr_bdops; |
621 | disk->flags = GENHD_FL_CD; | 620 | disk->flags = GENHD_FL_CD; |
622 | 621 | ||
622 | blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT); | ||
623 | |||
623 | cd->device = sdev; | 624 | cd->device = sdev; |
624 | cd->disk = disk; | 625 | cd->disk = disk; |
625 | cd->driver = &sr_template; | 626 | cd->driver = &sr_template; |
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index d39107b7669b..f4e6cde1fd0d 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c | |||
@@ -519,8 +519,8 @@ static int sym53c8xx_queue_command(struct scsi_cmnd *cmd, | |||
519 | * Shorten our settle_time if needed for | 519 | * Shorten our settle_time if needed for |
520 | * this command not to time out. | 520 | * this command not to time out. |
521 | */ | 521 | */ |
522 | if (np->s.settle_time_valid && cmd->timeout_per_command) { | 522 | if (np->s.settle_time_valid && cmd->request->timeout) { |
523 | unsigned long tlimit = jiffies + cmd->timeout_per_command; | 523 | unsigned long tlimit = jiffies + cmd->request->timeout; |
524 | tlimit -= SYM_CONF_TIMER_INTERVAL*2; | 524 | tlimit -= SYM_CONF_TIMER_INTERVAL*2; |
525 | if (time_after(np->s.settle_time, tlimit)) { | 525 | if (time_after(np->s.settle_time, tlimit)) { |
526 | np->s.settle_time = tlimit; | 526 | np->s.settle_time = tlimit; |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 9c2549260427..067f28b80072 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -147,6 +147,7 @@ struct request { | |||
147 | 147 | ||
148 | unsigned int cmd_flags; | 148 | unsigned int cmd_flags; |
149 | enum rq_cmd_type_bits cmd_type; | 149 | enum rq_cmd_type_bits cmd_type; |
150 | unsigned long atomic_flags; | ||
150 | 151 | ||
151 | /* Maintain bio traversal state for part by part I/O submission. | 152 | /* Maintain bio traversal state for part by part I/O submission. |
152 | * hard_* are block layer internals, no driver should touch them! | 153 | * hard_* are block layer internals, no driver should touch them! |
@@ -214,6 +215,8 @@ struct request { | |||
214 | void *data; | 215 | void *data; |
215 | void *sense; | 216 | void *sense; |
216 | 217 | ||
218 | unsigned long deadline; | ||
219 | struct list_head timeout_list; | ||
217 | unsigned int timeout; | 220 | unsigned int timeout; |
218 | int retries; | 221 | int retries; |
219 | 222 | ||
@@ -266,6 +269,14 @@ typedef void (prepare_flush_fn) (struct request_queue *, struct request *); | |||
266 | typedef void (softirq_done_fn)(struct request *); | 269 | typedef void (softirq_done_fn)(struct request *); |
267 | typedef int (dma_drain_needed_fn)(struct request *); | 270 | typedef int (dma_drain_needed_fn)(struct request *); |
268 | 271 | ||
272 | enum blk_eh_timer_return { | ||
273 | BLK_EH_NOT_HANDLED, | ||
274 | BLK_EH_HANDLED, | ||
275 | BLK_EH_RESET_TIMER, | ||
276 | }; | ||
277 | |||
278 | typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); | ||
279 | |||
269 | enum blk_queue_state { | 280 | enum blk_queue_state { |
270 | Queue_down, | 281 | Queue_down, |
271 | Queue_up, | 282 | Queue_up, |
@@ -311,6 +322,7 @@ struct request_queue | |||
311 | merge_bvec_fn *merge_bvec_fn; | 322 | merge_bvec_fn *merge_bvec_fn; |
312 | prepare_flush_fn *prepare_flush_fn; | 323 | prepare_flush_fn *prepare_flush_fn; |
313 | softirq_done_fn *softirq_done_fn; | 324 | softirq_done_fn *softirq_done_fn; |
325 | rq_timed_out_fn *rq_timed_out_fn; | ||
314 | dma_drain_needed_fn *dma_drain_needed; | 326 | dma_drain_needed_fn *dma_drain_needed; |
315 | 327 | ||
316 | /* | 328 | /* |
@@ -386,6 +398,10 @@ struct request_queue | |||
386 | unsigned int nr_sorted; | 398 | unsigned int nr_sorted; |
387 | unsigned int in_flight; | 399 | unsigned int in_flight; |
388 | 400 | ||
401 | unsigned int rq_timeout; | ||
402 | struct timer_list timeout; | ||
403 | struct list_head timeout_list; | ||
404 | |||
389 | /* | 405 | /* |
390 | * sg stuff | 406 | * sg stuff |
391 | */ | 407 | */ |
@@ -770,6 +786,8 @@ extern int blk_end_request_callback(struct request *rq, int error, | |||
770 | unsigned int nr_bytes, | 786 | unsigned int nr_bytes, |
771 | int (drv_callback)(struct request *)); | 787 | int (drv_callback)(struct request *)); |
772 | extern void blk_complete_request(struct request *); | 788 | extern void blk_complete_request(struct request *); |
789 | extern void __blk_complete_request(struct request *); | ||
790 | extern void blk_abort_request(struct request *); | ||
773 | 791 | ||
774 | /* | 792 | /* |
775 | * blk_end_request() takes bytes instead of sectors as a complete size. | 793 | * blk_end_request() takes bytes instead of sectors as a complete size. |
@@ -811,6 +829,8 @@ extern void blk_queue_dma_alignment(struct request_queue *, int); | |||
811 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); | 829 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); |
812 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | 830 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
813 | extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *); | 831 | extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *); |
832 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); | ||
833 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); | ||
814 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 834 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
815 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); | 835 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); |
816 | extern int blk_do_ordered(struct request_queue *, struct request **); | 836 | extern int blk_do_ordered(struct request_queue *, struct request **); |
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index f9f6e793575c..855bf95963e7 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h | |||
@@ -75,7 +75,6 @@ struct scsi_cmnd { | |||
75 | 75 | ||
76 | int retries; | 76 | int retries; |
77 | int allowed; | 77 | int allowed; |
78 | int timeout_per_command; | ||
79 | 78 | ||
80 | unsigned char prot_op; | 79 | unsigned char prot_op; |
81 | unsigned char prot_type; | 80 | unsigned char prot_type; |
@@ -86,7 +85,6 @@ struct scsi_cmnd { | |||
86 | /* These elements define the operation we are about to perform */ | 85 | /* These elements define the operation we are about to perform */ |
87 | unsigned char *cmnd; | 86 | unsigned char *cmnd; |
88 | 87 | ||
89 | struct timer_list eh_timeout; /* Used to time out the command. */ | ||
90 | 88 | ||
91 | /* These elements define the operation we ultimately want to perform */ | 89 | /* These elements define the operation we ultimately want to perform */ |
92 | struct scsi_data_buffer sdb; | 90 | struct scsi_data_buffer sdb; |
@@ -139,7 +137,6 @@ extern void scsi_put_command(struct scsi_cmnd *); | |||
139 | extern void __scsi_put_command(struct Scsi_Host *, struct scsi_cmnd *, | 137 | extern void __scsi_put_command(struct Scsi_Host *, struct scsi_cmnd *, |
140 | struct device *); | 138 | struct device *); |
141 | extern void scsi_finish_command(struct scsi_cmnd *cmd); | 139 | extern void scsi_finish_command(struct scsi_cmnd *cmd); |
142 | extern void scsi_req_abort_cmd(struct scsi_cmnd *cmd); | ||
143 | 140 | ||
144 | extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count, | 141 | extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count, |
145 | size_t *offset, size_t *len); | 142 | size_t *offset, size_t *len); |
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 44a55d1bf530..d123ca84e732 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h | |||
@@ -43,13 +43,6 @@ struct blk_queue_tags; | |||
43 | #define DISABLE_CLUSTERING 0 | 43 | #define DISABLE_CLUSTERING 0 |
44 | #define ENABLE_CLUSTERING 1 | 44 | #define ENABLE_CLUSTERING 1 |
45 | 45 | ||
46 | enum scsi_eh_timer_return { | ||
47 | EH_NOT_HANDLED, | ||
48 | EH_HANDLED, | ||
49 | EH_RESET_TIMER, | ||
50 | }; | ||
51 | |||
52 | |||
53 | struct scsi_host_template { | 46 | struct scsi_host_template { |
54 | struct module *module; | 47 | struct module *module; |
55 | const char *name; | 48 | const char *name; |
@@ -347,7 +340,7 @@ struct scsi_host_template { | |||
347 | * | 340 | * |
348 | * Status: OPTIONAL | 341 | * Status: OPTIONAL |
349 | */ | 342 | */ |
350 | enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *); | 343 | enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *); |
351 | 344 | ||
352 | /* | 345 | /* |
353 | * Name of proc directory | 346 | * Name of proc directory |
diff --git a/include/scsi/scsi_transport.h b/include/scsi/scsi_transport.h index 490bd13a634c..0de32cd4e8a7 100644 --- a/include/scsi/scsi_transport.h +++ b/include/scsi/scsi_transport.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #define SCSI_TRANSPORT_H | 21 | #define SCSI_TRANSPORT_H |
22 | 22 | ||
23 | #include <linux/transport_class.h> | 23 | #include <linux/transport_class.h> |
24 | #include <linux/blkdev.h> | ||
24 | #include <scsi/scsi_host.h> | 25 | #include <scsi/scsi_host.h> |
25 | #include <scsi/scsi_device.h> | 26 | #include <scsi/scsi_device.h> |
26 | 27 | ||
@@ -64,7 +65,7 @@ struct scsi_transport_template { | |||
64 | * begin counting again | 65 | * begin counting again |
65 | * EH_NOT_HANDLED Begin normal error recovery | 66 | * EH_NOT_HANDLED Begin normal error recovery |
66 | */ | 67 | */ |
67 | enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *); | 68 | enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *); |
68 | 69 | ||
69 | /* | 70 | /* |
70 | * Used as callback for the completion of i_t_nexus request | 71 | * Used as callback for the completion of i_t_nexus request |