diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-12-18 15:54:49 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-18 15:54:49 -0500 |
commit | d110ec3a1e1f522e2e9dfceb9c36d6590c26d2d4 (patch) | |
tree | 86b2f8f1d22b74b05239525c55bd42e3db6afc03 /block | |
parent | 343e9099c8152daff20e10d6269edec21da44fc0 (diff) | |
parent | 55dac3a5553b13891f0ae4bbd11920619b5436d4 (diff) |
Merge branch 'linus' into core/rcu
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-barrier.c | 4 | ||||
-rw-r--r-- | block/blk-core.c | 30 | ||||
-rw-r--r-- | block/blk-map.c | 8 | ||||
-rw-r--r-- | block/blk-merge.c | 21 | ||||
-rw-r--r-- | block/blk-settings.c | 4 | ||||
-rw-r--r-- | block/blk-timeout.c | 20 | ||||
-rw-r--r-- | block/bsg.c | 2 | ||||
-rw-r--r-- | block/compat_ioctl.c | 31 | ||||
-rw-r--r-- | block/elevator.c | 7 | ||||
-rw-r--r-- | block/genhd.c | 4 | ||||
-rw-r--r-- | block/ioctl.c | 7 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 2 |
12 files changed, 82 insertions, 58 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 5c99ff8d2db8..6e72d661ae42 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c | |||
@@ -161,7 +161,7 @@ static inline struct request *start_ordered(struct request_queue *q, | |||
161 | /* | 161 | /* |
162 | * Prep proxy barrier request. | 162 | * Prep proxy barrier request. |
163 | */ | 163 | */ |
164 | blkdev_dequeue_request(rq); | 164 | elv_dequeue_request(q, rq); |
165 | q->orig_bar_rq = rq; | 165 | q->orig_bar_rq = rq; |
166 | rq = &q->bar_rq; | 166 | rq = &q->bar_rq; |
167 | blk_rq_init(q, rq); | 167 | blk_rq_init(q, rq); |
@@ -219,7 +219,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp) | |||
219 | * This can happen when the queue switches to | 219 | * This can happen when the queue switches to |
220 | * ORDERED_NONE while this request is on it. | 220 | * ORDERED_NONE while this request is on it. |
221 | */ | 221 | */ |
222 | blkdev_dequeue_request(rq); | 222 | elv_dequeue_request(q, rq); |
223 | if (__blk_end_request(rq, -EOPNOTSUPP, | 223 | if (__blk_end_request(rq, -EOPNOTSUPP, |
224 | blk_rq_bytes(rq))) | 224 | blk_rq_bytes(rq))) |
225 | BUG(); | 225 | BUG(); |
diff --git a/block/blk-core.c b/block/blk-core.c index c3df30cfb3fc..c36aa98fafa3 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -592,7 +592,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | |||
592 | 1 << QUEUE_FLAG_STACKABLE); | 592 | 1 << QUEUE_FLAG_STACKABLE); |
593 | q->queue_lock = lock; | 593 | q->queue_lock = lock; |
594 | 594 | ||
595 | blk_queue_segment_boundary(q, 0xffffffff); | 595 | blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK); |
596 | 596 | ||
597 | blk_queue_make_request(q, __make_request); | 597 | blk_queue_make_request(q, __make_request); |
598 | blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE); | 598 | blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE); |
@@ -1637,6 +1637,28 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) | |||
1637 | EXPORT_SYMBOL_GPL(blk_insert_cloned_request); | 1637 | EXPORT_SYMBOL_GPL(blk_insert_cloned_request); |
1638 | 1638 | ||
1639 | /** | 1639 | /** |
1640 | * blkdev_dequeue_request - dequeue request and start timeout timer | ||
1641 | * @req: request to dequeue | ||
1642 | * | ||
1643 | * Dequeue @req and start timeout timer on it. This hands off the | ||
1644 | * request to the driver. | ||
1645 | * | ||
1646 | * Block internal functions which don't want to start timer should | ||
1647 | * call elv_dequeue_request(). | ||
1648 | */ | ||
1649 | void blkdev_dequeue_request(struct request *req) | ||
1650 | { | ||
1651 | elv_dequeue_request(req->q, req); | ||
1652 | |||
1653 | /* | ||
1654 | * We are now handing the request to the hardware, add the | ||
1655 | * timeout handler. | ||
1656 | */ | ||
1657 | blk_add_timer(req); | ||
1658 | } | ||
1659 | EXPORT_SYMBOL(blkdev_dequeue_request); | ||
1660 | |||
1661 | /** | ||
1640 | * __end_that_request_first - end I/O on a request | 1662 | * __end_that_request_first - end I/O on a request |
1641 | * @req: the request being processed | 1663 | * @req: the request being processed |
1642 | * @error: %0 for success, < %0 for error | 1664 | * @error: %0 for success, < %0 for error |
@@ -1770,17 +1792,17 @@ static void end_that_request_last(struct request *req, int error) | |||
1770 | { | 1792 | { |
1771 | struct gendisk *disk = req->rq_disk; | 1793 | struct gendisk *disk = req->rq_disk; |
1772 | 1794 | ||
1773 | blk_delete_timer(req); | ||
1774 | |||
1775 | if (blk_rq_tagged(req)) | 1795 | if (blk_rq_tagged(req)) |
1776 | blk_queue_end_tag(req->q, req); | 1796 | blk_queue_end_tag(req->q, req); |
1777 | 1797 | ||
1778 | if (blk_queued_rq(req)) | 1798 | if (blk_queued_rq(req)) |
1779 | blkdev_dequeue_request(req); | 1799 | elv_dequeue_request(req->q, req); |
1780 | 1800 | ||
1781 | if (unlikely(laptop_mode) && blk_fs_request(req)) | 1801 | if (unlikely(laptop_mode) && blk_fs_request(req)) |
1782 | laptop_io_completion(); | 1802 | laptop_io_completion(); |
1783 | 1803 | ||
1804 | blk_delete_timer(req); | ||
1805 | |||
1784 | /* | 1806 | /* |
1785 | * Account IO completion. bar_rq isn't accounted as a normal | 1807 | * Account IO completion. bar_rq isn't accounted as a normal |
1786 | * IO on queueing nor completion. Accounting the containing | 1808 | * IO on queueing nor completion. Accounting the containing |
diff --git a/block/blk-map.c b/block/blk-map.c index 4849fa36161e..2990447f45e9 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
@@ -217,8 +217,14 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |||
217 | return PTR_ERR(bio); | 217 | return PTR_ERR(bio); |
218 | 218 | ||
219 | if (bio->bi_size != len) { | 219 | if (bio->bi_size != len) { |
220 | /* | ||
221 | * Grab an extra reference to this bio, as bio_unmap_user() | ||
222 | * expects to be able to drop it twice as it happens on the | ||
223 | * normal IO completion path | ||
224 | */ | ||
225 | bio_get(bio); | ||
220 | bio_endio(bio, 0); | 226 | bio_endio(bio, 0); |
221 | bio_unmap_user(bio); | 227 | __blk_rq_unmap_user(bio); |
222 | return -EINVAL; | 228 | return -EINVAL; |
223 | } | 229 | } |
224 | 230 | ||
diff --git a/block/blk-merge.c b/block/blk-merge.c index 8681cd6f9911..b92f5b0866b0 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -222,27 +222,6 @@ new_segment: | |||
222 | } | 222 | } |
223 | EXPORT_SYMBOL(blk_rq_map_sg); | 223 | EXPORT_SYMBOL(blk_rq_map_sg); |
224 | 224 | ||
225 | static inline int ll_new_mergeable(struct request_queue *q, | ||
226 | struct request *req, | ||
227 | struct bio *bio) | ||
228 | { | ||
229 | int nr_phys_segs = bio_phys_segments(q, bio); | ||
230 | |||
231 | if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { | ||
232 | req->cmd_flags |= REQ_NOMERGE; | ||
233 | if (req == q->last_merge) | ||
234 | q->last_merge = NULL; | ||
235 | return 0; | ||
236 | } | ||
237 | |||
238 | /* | ||
239 | * A hw segment is just getting larger, bump just the phys | ||
240 | * counter. | ||
241 | */ | ||
242 | req->nr_phys_segments += nr_phys_segs; | ||
243 | return 1; | ||
244 | } | ||
245 | |||
246 | static inline int ll_new_hw_segment(struct request_queue *q, | 225 | static inline int ll_new_hw_segment(struct request_queue *q, |
247 | struct request *req, | 226 | struct request *req, |
248 | struct bio *bio) | 227 | struct bio *bio) |
diff --git a/block/blk-settings.c b/block/blk-settings.c index 41392fbe19ff..afa55e14e278 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -125,6 +125,9 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) | |||
125 | q->nr_requests = BLKDEV_MAX_RQ; | 125 | q->nr_requests = BLKDEV_MAX_RQ; |
126 | blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); | 126 | blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); |
127 | blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); | 127 | blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); |
128 | blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK); | ||
129 | blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE); | ||
130 | |||
128 | q->make_request_fn = mfn; | 131 | q->make_request_fn = mfn; |
129 | q->backing_dev_info.ra_pages = | 132 | q->backing_dev_info.ra_pages = |
130 | (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | 133 | (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; |
@@ -314,6 +317,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | |||
314 | /* zero is "infinity" */ | 317 | /* zero is "infinity" */ |
315 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); | 318 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); |
316 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); | 319 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); |
320 | t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask); | ||
317 | 321 | ||
318 | t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments); | 322 | t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments); |
319 | t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments); | 323 | t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments); |
diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 972a63f848fb..69185ea9fae2 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c | |||
@@ -75,14 +75,7 @@ void blk_delete_timer(struct request *req) | |||
75 | { | 75 | { |
76 | struct request_queue *q = req->q; | 76 | struct request_queue *q = req->q; |
77 | 77 | ||
78 | /* | ||
79 | * Nothing to detach | ||
80 | */ | ||
81 | if (!q->rq_timed_out_fn || !req->deadline) | ||
82 | return; | ||
83 | |||
84 | list_del_init(&req->timeout_list); | 78 | list_del_init(&req->timeout_list); |
85 | |||
86 | if (list_empty(&q->timeout_list)) | 79 | if (list_empty(&q->timeout_list)) |
87 | del_timer(&q->timeout); | 80 | del_timer(&q->timeout); |
88 | } | 81 | } |
@@ -142,7 +135,7 @@ void blk_rq_timed_out_timer(unsigned long data) | |||
142 | } | 135 | } |
143 | 136 | ||
144 | if (next_set && !list_empty(&q->timeout_list)) | 137 | if (next_set && !list_empty(&q->timeout_list)) |
145 | mod_timer(&q->timeout, round_jiffies(next)); | 138 | mod_timer(&q->timeout, round_jiffies_up(next)); |
146 | 139 | ||
147 | spin_unlock_irqrestore(q->queue_lock, flags); | 140 | spin_unlock_irqrestore(q->queue_lock, flags); |
148 | } | 141 | } |
@@ -198,17 +191,10 @@ void blk_add_timer(struct request *req) | |||
198 | 191 | ||
199 | /* | 192 | /* |
200 | * If the timer isn't already pending or this timeout is earlier | 193 | * If the timer isn't already pending or this timeout is earlier |
201 | * than an existing one, modify the timer. Round to next nearest | 194 | * than an existing one, modify the timer. Round up to next nearest |
202 | * second. | 195 | * second. |
203 | */ | 196 | */ |
204 | expiry = round_jiffies(req->deadline); | 197 | expiry = round_jiffies_up(req->deadline); |
205 | |||
206 | /* | ||
207 | * We use ->deadline == 0 to detect whether a timer was added or | ||
208 | * not, so just increase to next jiffy for that specific case | ||
209 | */ | ||
210 | if (unlikely(!req->deadline)) | ||
211 | req->deadline = 1; | ||
212 | 198 | ||
213 | if (!timer_pending(&q->timeout) || | 199 | if (!timer_pending(&q->timeout) || |
214 | time_before(expiry, q->timeout.expires)) | 200 | time_before(expiry, q->timeout.expires)) |
diff --git a/block/bsg.c b/block/bsg.c index e8bd2475682a..e73e50daf3d0 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
@@ -202,6 +202,8 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, | |||
202 | rq->timeout = q->sg_timeout; | 202 | rq->timeout = q->sg_timeout; |
203 | if (!rq->timeout) | 203 | if (!rq->timeout) |
204 | rq->timeout = BLK_DEFAULT_SG_TIMEOUT; | 204 | rq->timeout = BLK_DEFAULT_SG_TIMEOUT; |
205 | if (rq->timeout < BLK_MIN_SG_TIMEOUT) | ||
206 | rq->timeout = BLK_MIN_SG_TIMEOUT; | ||
205 | 207 | ||
206 | return 0; | 208 | return 0; |
207 | } | 209 | } |
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index 3d3e7a46f38c..67eb93cff699 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c | |||
@@ -677,6 +677,29 @@ static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode, | |||
677 | case DVD_WRITE_STRUCT: | 677 | case DVD_WRITE_STRUCT: |
678 | case DVD_AUTH: | 678 | case DVD_AUTH: |
679 | arg = (unsigned long)compat_ptr(arg); | 679 | arg = (unsigned long)compat_ptr(arg); |
680 | /* These intepret arg as an unsigned long, not as a pointer, | ||
681 | * so we must not do compat_ptr() conversion. */ | ||
682 | case HDIO_SET_MULTCOUNT: | ||
683 | case HDIO_SET_UNMASKINTR: | ||
684 | case HDIO_SET_KEEPSETTINGS: | ||
685 | case HDIO_SET_32BIT: | ||
686 | case HDIO_SET_NOWERR: | ||
687 | case HDIO_SET_DMA: | ||
688 | case HDIO_SET_PIO_MODE: | ||
689 | case HDIO_SET_NICE: | ||
690 | case HDIO_SET_WCACHE: | ||
691 | case HDIO_SET_ACOUSTIC: | ||
692 | case HDIO_SET_BUSSTATE: | ||
693 | case HDIO_SET_ADDRESS: | ||
694 | case CDROMEJECT_SW: | ||
695 | case CDROM_SET_OPTIONS: | ||
696 | case CDROM_CLEAR_OPTIONS: | ||
697 | case CDROM_SELECT_SPEED: | ||
698 | case CDROM_SELECT_DISC: | ||
699 | case CDROM_MEDIA_CHANGED: | ||
700 | case CDROM_DRIVE_STATUS: | ||
701 | case CDROM_LOCKDOOR: | ||
702 | case CDROM_DEBUG: | ||
680 | break; | 703 | break; |
681 | default: | 704 | default: |
682 | /* unknown ioctl number */ | 705 | /* unknown ioctl number */ |
@@ -699,8 +722,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) | |||
699 | struct backing_dev_info *bdi; | 722 | struct backing_dev_info *bdi; |
700 | loff_t size; | 723 | loff_t size; |
701 | 724 | ||
725 | /* | ||
726 | * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have | ||
727 | * to updated it before every ioctl. | ||
728 | */ | ||
702 | if (file->f_flags & O_NDELAY) | 729 | if (file->f_flags & O_NDELAY) |
703 | mode |= FMODE_NDELAY_NOW; | 730 | mode |= FMODE_NDELAY; |
731 | else | ||
732 | mode &= ~FMODE_NDELAY; | ||
704 | 733 | ||
705 | switch (cmd) { | 734 | switch (cmd) { |
706 | case HDIO_GETGEO: | 735 | case HDIO_GETGEO: |
diff --git a/block/elevator.c b/block/elevator.c index 59173a69ebdf..a6951f76ba0c 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -773,12 +773,6 @@ struct request *elv_next_request(struct request_queue *q) | |||
773 | */ | 773 | */ |
774 | rq->cmd_flags |= REQ_STARTED; | 774 | rq->cmd_flags |= REQ_STARTED; |
775 | blk_add_trace_rq(q, rq, BLK_TA_ISSUE); | 775 | blk_add_trace_rq(q, rq, BLK_TA_ISSUE); |
776 | |||
777 | /* | ||
778 | * We are now handing the request to the hardware, | ||
779 | * add the timeout handler | ||
780 | */ | ||
781 | blk_add_timer(rq); | ||
782 | } | 776 | } |
783 | 777 | ||
784 | if (!q->boundary_rq || q->boundary_rq == rq) { | 778 | if (!q->boundary_rq || q->boundary_rq == rq) { |
@@ -851,7 +845,6 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq) | |||
851 | if (blk_account_rq(rq)) | 845 | if (blk_account_rq(rq)) |
852 | q->in_flight++; | 846 | q->in_flight++; |
853 | } | 847 | } |
854 | EXPORT_SYMBOL(elv_dequeue_request); | ||
855 | 848 | ||
856 | int elv_queue_empty(struct request_queue *q) | 849 | int elv_queue_empty(struct request_queue *q) |
857 | { | 850 | { |
diff --git a/block/genhd.c b/block/genhd.c index 4e5e7493f676..2f7feda61e35 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -768,6 +768,8 @@ static int __init genhd_device_init(void) | |||
768 | bdev_map = kobj_map_init(base_probe, &block_class_lock); | 768 | bdev_map = kobj_map_init(base_probe, &block_class_lock); |
769 | blk_dev_init(); | 769 | blk_dev_init(); |
770 | 770 | ||
771 | register_blkdev(BLOCK_EXT_MAJOR, "blkext"); | ||
772 | |||
771 | #ifndef CONFIG_SYSFS_DEPRECATED | 773 | #ifndef CONFIG_SYSFS_DEPRECATED |
772 | /* create top-level block dir */ | 774 | /* create top-level block dir */ |
773 | block_depr = kobject_create_and_add("block", NULL); | 775 | block_depr = kobject_create_and_add("block", NULL); |
@@ -1100,6 +1102,7 @@ struct gendisk *alloc_disk_node(int minors, int node_id) | |||
1100 | kfree(disk); | 1102 | kfree(disk); |
1101 | return NULL; | 1103 | return NULL; |
1102 | } | 1104 | } |
1105 | disk->node_id = node_id; | ||
1103 | if (disk_expand_part_tbl(disk, 0)) { | 1106 | if (disk_expand_part_tbl(disk, 0)) { |
1104 | free_part_stats(&disk->part0); | 1107 | free_part_stats(&disk->part0); |
1105 | kfree(disk); | 1108 | kfree(disk); |
@@ -1114,7 +1117,6 @@ struct gendisk *alloc_disk_node(int minors, int node_id) | |||
1114 | device_initialize(disk_to_dev(disk)); | 1117 | device_initialize(disk_to_dev(disk)); |
1115 | INIT_WORK(&disk->async_notify, | 1118 | INIT_WORK(&disk->async_notify, |
1116 | media_change_notify_thread); | 1119 | media_change_notify_thread); |
1117 | disk->node_id = node_id; | ||
1118 | } | 1120 | } |
1119 | return disk; | 1121 | return disk; |
1120 | } | 1122 | } |
diff --git a/block/ioctl.c b/block/ioctl.c index c832d639b6e2..d03985b04d67 100644 --- a/block/ioctl.c +++ b/block/ioctl.c | |||
@@ -18,7 +18,6 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user | |||
18 | struct disk_part_iter piter; | 18 | struct disk_part_iter piter; |
19 | long long start, length; | 19 | long long start, length; |
20 | int partno; | 20 | int partno; |
21 | int err; | ||
22 | 21 | ||
23 | if (!capable(CAP_SYS_ADMIN)) | 22 | if (!capable(CAP_SYS_ADMIN)) |
24 | return -EACCES; | 23 | return -EACCES; |
@@ -61,10 +60,10 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user | |||
61 | disk_part_iter_exit(&piter); | 60 | disk_part_iter_exit(&piter); |
62 | 61 | ||
63 | /* all seems OK */ | 62 | /* all seems OK */ |
64 | err = add_partition(disk, partno, start, length, | 63 | part = add_partition(disk, partno, start, length, |
65 | ADDPART_FLAG_NONE); | 64 | ADDPART_FLAG_NONE); |
66 | mutex_unlock(&bdev->bd_mutex); | 65 | mutex_unlock(&bdev->bd_mutex); |
67 | return err; | 66 | return IS_ERR(part) ? PTR_ERR(part) : 0; |
68 | case BLKPG_DEL_PARTITION: | 67 | case BLKPG_DEL_PARTITION: |
69 | part = disk_get_part(disk, partno); | 68 | part = disk_get_part(disk, partno); |
70 | if (!part) | 69 | if (!part) |
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 5963cf91a3a0..d0bb92cbefb9 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
@@ -208,6 +208,8 @@ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, | |||
208 | rq->timeout = q->sg_timeout; | 208 | rq->timeout = q->sg_timeout; |
209 | if (!rq->timeout) | 209 | if (!rq->timeout) |
210 | rq->timeout = BLK_DEFAULT_SG_TIMEOUT; | 210 | rq->timeout = BLK_DEFAULT_SG_TIMEOUT; |
211 | if (rq->timeout < BLK_MIN_SG_TIMEOUT) | ||
212 | rq->timeout = BLK_MIN_SG_TIMEOUT; | ||
211 | 213 | ||
212 | return 0; | 214 | return 0; |
213 | } | 215 | } |