diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-31 07:35:57 -0500 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-31 07:35:57 -0500 |
commit | 2ca1a615835d9f4990f42102ab1f2ef434e7e89c (patch) | |
tree | 726cf3d5f29a6c66c44e4bd68e7ebed2fd83d059 /block | |
parent | e12f0102ac81d660c9f801d0a0e10ccf4537a9de (diff) | |
parent | 6a94cb73064c952255336cc57731904174b2c58f (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
arch/x86/kernel/io_apic.c
Diffstat (limited to 'block')
-rw-r--r-- | block/Kconfig | 23 | ||||
-rw-r--r-- | block/as-iosched.c | 10 | ||||
-rw-r--r-- | block/blk-barrier.c | 120 | ||||
-rw-r--r-- | block/blk-core.c | 63 | ||||
-rw-r--r-- | block/blk-settings.c | 6 | ||||
-rw-r--r-- | block/blk-softirq.c | 2 | ||||
-rw-r--r-- | block/blk-sysfs.c | 7 | ||||
-rw-r--r-- | block/blk-tag.c | 1 | ||||
-rw-r--r-- | block/blk-timeout.c | 21 | ||||
-rw-r--r-- | block/cfq-iosched.c | 26 | ||||
-rw-r--r-- | block/compat_ioctl.c | 2 | ||||
-rw-r--r-- | block/deadline-iosched.c | 6 | ||||
-rw-r--r-- | block/elevator.c | 73 | ||||
-rw-r--r-- | block/genhd.c | 23 | ||||
-rw-r--r-- | block/ioctl.c | 2 | ||||
-rw-r--r-- | block/noop-iosched.c | 2 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 4 |
17 files changed, 196 insertions, 195 deletions
diff --git a/block/Kconfig b/block/Kconfig index 290b219fad9c..ac0956f77785 100644 --- a/block/Kconfig +++ b/block/Kconfig | |||
@@ -24,21 +24,17 @@ menuconfig BLOCK | |||
24 | if BLOCK | 24 | if BLOCK |
25 | 25 | ||
26 | config LBD | 26 | config LBD |
27 | bool "Support for Large Block Devices" | 27 | bool "Support for large block devices and files" |
28 | depends on !64BIT | 28 | depends on !64BIT |
29 | help | 29 | help |
30 | Enable block devices of size 2TB and larger. | 30 | Enable block devices or files of size 2TB and larger. |
31 | 31 | ||
32 | This option is required to support the full capacity of large | 32 | This option is required to support the full capacity of large |
33 | (2TB+) block devices, including RAID, disk, Network Block Device, | 33 | (2TB+) block devices, including RAID, disk, Network Block Device, |
34 | Logical Volume Manager (LVM) and loopback. | 34 | Logical Volume Manager (LVM) and loopback. |
35 | 35 | ||
36 | For example, RAID devices are frequently bigger than the capacity | 36 | This option also enables support for single files larger than |
37 | of the largest individual hard drive. | 37 | 2TB. |
38 | |||
39 | This option is not required if you have individual disk drives | ||
40 | which total 2TB+ and you are not aggregating the capacity into | ||
41 | a large block device (e.g. using RAID or LVM). | ||
42 | 38 | ||
43 | If unsure, say N. | 39 | If unsure, say N. |
44 | 40 | ||
@@ -58,15 +54,6 @@ config BLK_DEV_IO_TRACE | |||
58 | 54 | ||
59 | If unsure, say N. | 55 | If unsure, say N. |
60 | 56 | ||
61 | config LSF | ||
62 | bool "Support for Large Single Files" | ||
63 | depends on !64BIT | ||
64 | help | ||
65 | Say Y here if you want to be able to handle very large files (2TB | ||
66 | and larger), otherwise say N. | ||
67 | |||
68 | If unsure, say Y. | ||
69 | |||
70 | config BLK_DEV_BSG | 57 | config BLK_DEV_BSG |
71 | bool "Block layer SG support v4 (EXPERIMENTAL)" | 58 | bool "Block layer SG support v4 (EXPERIMENTAL)" |
72 | depends on EXPERIMENTAL | 59 | depends on EXPERIMENTAL |
diff --git a/block/as-iosched.c b/block/as-iosched.c index 71f0abb219ee..631f6f44460a 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c | |||
@@ -1339,12 +1339,12 @@ static int as_may_queue(struct request_queue *q, int rw) | |||
1339 | return ret; | 1339 | return ret; |
1340 | } | 1340 | } |
1341 | 1341 | ||
1342 | static void as_exit_queue(elevator_t *e) | 1342 | static void as_exit_queue(struct elevator_queue *e) |
1343 | { | 1343 | { |
1344 | struct as_data *ad = e->elevator_data; | 1344 | struct as_data *ad = e->elevator_data; |
1345 | 1345 | ||
1346 | del_timer_sync(&ad->antic_timer); | 1346 | del_timer_sync(&ad->antic_timer); |
1347 | kblockd_flush_work(&ad->antic_work); | 1347 | cancel_work_sync(&ad->antic_work); |
1348 | 1348 | ||
1349 | BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC])); | 1349 | BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC])); |
1350 | BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC])); | 1350 | BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC])); |
@@ -1409,7 +1409,7 @@ as_var_store(unsigned long *var, const char *page, size_t count) | |||
1409 | return count; | 1409 | return count; |
1410 | } | 1410 | } |
1411 | 1411 | ||
1412 | static ssize_t est_time_show(elevator_t *e, char *page) | 1412 | static ssize_t est_time_show(struct elevator_queue *e, char *page) |
1413 | { | 1413 | { |
1414 | struct as_data *ad = e->elevator_data; | 1414 | struct as_data *ad = e->elevator_data; |
1415 | int pos = 0; | 1415 | int pos = 0; |
@@ -1427,7 +1427,7 @@ static ssize_t est_time_show(elevator_t *e, char *page) | |||
1427 | } | 1427 | } |
1428 | 1428 | ||
1429 | #define SHOW_FUNCTION(__FUNC, __VAR) \ | 1429 | #define SHOW_FUNCTION(__FUNC, __VAR) \ |
1430 | static ssize_t __FUNC(elevator_t *e, char *page) \ | 1430 | static ssize_t __FUNC(struct elevator_queue *e, char *page) \ |
1431 | { \ | 1431 | { \ |
1432 | struct as_data *ad = e->elevator_data; \ | 1432 | struct as_data *ad = e->elevator_data; \ |
1433 | return as_var_show(jiffies_to_msecs((__VAR)), (page)); \ | 1433 | return as_var_show(jiffies_to_msecs((__VAR)), (page)); \ |
@@ -1440,7 +1440,7 @@ SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]); | |||
1440 | #undef SHOW_FUNCTION | 1440 | #undef SHOW_FUNCTION |
1441 | 1441 | ||
1442 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ | 1442 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ |
1443 | static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ | 1443 | static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ |
1444 | { \ | 1444 | { \ |
1445 | struct as_data *ad = e->elevator_data; \ | 1445 | struct as_data *ad = e->elevator_data; \ |
1446 | int ret = as_var_store(__PTR, (page), count); \ | 1446 | int ret = as_var_store(__PTR, (page), count); \ |
diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 6e72d661ae42..8eba4e43bb0c 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c | |||
@@ -24,8 +24,8 @@ | |||
24 | int blk_queue_ordered(struct request_queue *q, unsigned ordered, | 24 | int blk_queue_ordered(struct request_queue *q, unsigned ordered, |
25 | prepare_flush_fn *prepare_flush_fn) | 25 | prepare_flush_fn *prepare_flush_fn) |
26 | { | 26 | { |
27 | if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && | 27 | if (!prepare_flush_fn && (ordered & (QUEUE_ORDERED_DO_PREFLUSH | |
28 | prepare_flush_fn == NULL) { | 28 | QUEUE_ORDERED_DO_POSTFLUSH))) { |
29 | printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__); | 29 | printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__); |
30 | return -EINVAL; | 30 | return -EINVAL; |
31 | } | 31 | } |
@@ -88,7 +88,7 @@ unsigned blk_ordered_req_seq(struct request *rq) | |||
88 | return QUEUE_ORDSEQ_DONE; | 88 | return QUEUE_ORDSEQ_DONE; |
89 | } | 89 | } |
90 | 90 | ||
91 | void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) | 91 | bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) |
92 | { | 92 | { |
93 | struct request *rq; | 93 | struct request *rq; |
94 | 94 | ||
@@ -99,7 +99,7 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) | |||
99 | q->ordseq |= seq; | 99 | q->ordseq |= seq; |
100 | 100 | ||
101 | if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE) | 101 | if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE) |
102 | return; | 102 | return false; |
103 | 103 | ||
104 | /* | 104 | /* |
105 | * Okay, sequence complete. | 105 | * Okay, sequence complete. |
@@ -109,6 +109,8 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) | |||
109 | 109 | ||
110 | if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq))) | 110 | if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq))) |
111 | BUG(); | 111 | BUG(); |
112 | |||
113 | return true; | ||
112 | } | 114 | } |
113 | 115 | ||
114 | static void pre_flush_end_io(struct request *rq, int error) | 116 | static void pre_flush_end_io(struct request *rq, int error) |
@@ -134,7 +136,7 @@ static void queue_flush(struct request_queue *q, unsigned which) | |||
134 | struct request *rq; | 136 | struct request *rq; |
135 | rq_end_io_fn *end_io; | 137 | rq_end_io_fn *end_io; |
136 | 138 | ||
137 | if (which == QUEUE_ORDERED_PREFLUSH) { | 139 | if (which == QUEUE_ORDERED_DO_PREFLUSH) { |
138 | rq = &q->pre_flush_rq; | 140 | rq = &q->pre_flush_rq; |
139 | end_io = pre_flush_end_io; | 141 | end_io = pre_flush_end_io; |
140 | } else { | 142 | } else { |
@@ -151,80 +153,110 @@ static void queue_flush(struct request_queue *q, unsigned which) | |||
151 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | 153 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); |
152 | } | 154 | } |
153 | 155 | ||
154 | static inline struct request *start_ordered(struct request_queue *q, | 156 | static inline bool start_ordered(struct request_queue *q, struct request **rqp) |
155 | struct request *rq) | ||
156 | { | 157 | { |
158 | struct request *rq = *rqp; | ||
159 | unsigned skip = 0; | ||
160 | |||
157 | q->orderr = 0; | 161 | q->orderr = 0; |
158 | q->ordered = q->next_ordered; | 162 | q->ordered = q->next_ordered; |
159 | q->ordseq |= QUEUE_ORDSEQ_STARTED; | 163 | q->ordseq |= QUEUE_ORDSEQ_STARTED; |
160 | 164 | ||
161 | /* | 165 | /* |
162 | * Prep proxy barrier request. | 166 | * For an empty barrier, there's no actual BAR request, which |
167 | * in turn makes POSTFLUSH unnecessary. Mask them off. | ||
163 | */ | 168 | */ |
169 | if (!rq->hard_nr_sectors) { | ||
170 | q->ordered &= ~(QUEUE_ORDERED_DO_BAR | | ||
171 | QUEUE_ORDERED_DO_POSTFLUSH); | ||
172 | /* | ||
173 | * Empty barrier on a write-through device w/ ordered | ||
174 | * tag has no command to issue and without any command | ||
175 | * to issue, ordering by tag can't be used. Drain | ||
176 | * instead. | ||
177 | */ | ||
178 | if ((q->ordered & QUEUE_ORDERED_BY_TAG) && | ||
179 | !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) { | ||
180 | q->ordered &= ~QUEUE_ORDERED_BY_TAG; | ||
181 | q->ordered |= QUEUE_ORDERED_BY_DRAIN; | ||
182 | } | ||
183 | } | ||
184 | |||
185 | /* stash away the original request */ | ||
164 | elv_dequeue_request(q, rq); | 186 | elv_dequeue_request(q, rq); |
165 | q->orig_bar_rq = rq; | 187 | q->orig_bar_rq = rq; |
166 | rq = &q->bar_rq; | 188 | rq = NULL; |
167 | blk_rq_init(q, rq); | ||
168 | if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) | ||
169 | rq->cmd_flags |= REQ_RW; | ||
170 | if (q->ordered & QUEUE_ORDERED_FUA) | ||
171 | rq->cmd_flags |= REQ_FUA; | ||
172 | init_request_from_bio(rq, q->orig_bar_rq->bio); | ||
173 | rq->end_io = bar_end_io; | ||
174 | 189 | ||
175 | /* | 190 | /* |
176 | * Queue ordered sequence. As we stack them at the head, we | 191 | * Queue ordered sequence. As we stack them at the head, we |
177 | * need to queue in reverse order. Note that we rely on that | 192 | * need to queue in reverse order. Note that we rely on that |
178 | * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs | 193 | * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs |
179 | * request gets inbetween ordered sequence. If this request is | 194 | * request gets inbetween ordered sequence. |
180 | * an empty barrier, we don't need to do a postflush ever since | ||
181 | * there will be no data written between the pre and post flush. | ||
182 | * Hence a single flush will suffice. | ||
183 | */ | 195 | */ |
184 | if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq)) | 196 | if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) { |
185 | queue_flush(q, QUEUE_ORDERED_POSTFLUSH); | 197 | queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH); |
186 | else | 198 | rq = &q->post_flush_rq; |
187 | q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; | 199 | } else |
200 | skip |= QUEUE_ORDSEQ_POSTFLUSH; | ||
188 | 201 | ||
189 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | 202 | if (q->ordered & QUEUE_ORDERED_DO_BAR) { |
203 | rq = &q->bar_rq; | ||
204 | |||
205 | /* initialize proxy request and queue it */ | ||
206 | blk_rq_init(q, rq); | ||
207 | if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) | ||
208 | rq->cmd_flags |= REQ_RW; | ||
209 | if (q->ordered & QUEUE_ORDERED_DO_FUA) | ||
210 | rq->cmd_flags |= REQ_FUA; | ||
211 | init_request_from_bio(rq, q->orig_bar_rq->bio); | ||
212 | rq->end_io = bar_end_io; | ||
190 | 213 | ||
191 | if (q->ordered & QUEUE_ORDERED_PREFLUSH) { | 214 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); |
192 | queue_flush(q, QUEUE_ORDERED_PREFLUSH); | 215 | } else |
216 | skip |= QUEUE_ORDSEQ_BAR; | ||
217 | |||
218 | if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) { | ||
219 | queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH); | ||
193 | rq = &q->pre_flush_rq; | 220 | rq = &q->pre_flush_rq; |
194 | } else | 221 | } else |
195 | q->ordseq |= QUEUE_ORDSEQ_PREFLUSH; | 222 | skip |= QUEUE_ORDSEQ_PREFLUSH; |
196 | 223 | ||
197 | if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0) | 224 | if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight) |
198 | q->ordseq |= QUEUE_ORDSEQ_DRAIN; | ||
199 | else | ||
200 | rq = NULL; | 225 | rq = NULL; |
226 | else | ||
227 | skip |= QUEUE_ORDSEQ_DRAIN; | ||
228 | |||
229 | *rqp = rq; | ||
201 | 230 | ||
202 | return rq; | 231 | /* |
232 | * Complete skipped sequences. If whole sequence is complete, | ||
233 | * return false to tell elevator that this request is gone. | ||
234 | */ | ||
235 | return !blk_ordered_complete_seq(q, skip, 0); | ||
203 | } | 236 | } |
204 | 237 | ||
205 | int blk_do_ordered(struct request_queue *q, struct request **rqp) | 238 | bool blk_do_ordered(struct request_queue *q, struct request **rqp) |
206 | { | 239 | { |
207 | struct request *rq = *rqp; | 240 | struct request *rq = *rqp; |
208 | const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); | 241 | const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); |
209 | 242 | ||
210 | if (!q->ordseq) { | 243 | if (!q->ordseq) { |
211 | if (!is_barrier) | 244 | if (!is_barrier) |
212 | return 1; | 245 | return true; |
213 | 246 | ||
214 | if (q->next_ordered != QUEUE_ORDERED_NONE) { | 247 | if (q->next_ordered != QUEUE_ORDERED_NONE) |
215 | *rqp = start_ordered(q, rq); | 248 | return start_ordered(q, rqp); |
216 | return 1; | 249 | else { |
217 | } else { | ||
218 | /* | 250 | /* |
219 | * This can happen when the queue switches to | 251 | * Queue ordering not supported. Terminate |
220 | * ORDERED_NONE while this request is on it. | 252 | * with prejudice. |
221 | */ | 253 | */ |
222 | elv_dequeue_request(q, rq); | 254 | elv_dequeue_request(q, rq); |
223 | if (__blk_end_request(rq, -EOPNOTSUPP, | 255 | if (__blk_end_request(rq, -EOPNOTSUPP, |
224 | blk_rq_bytes(rq))) | 256 | blk_rq_bytes(rq))) |
225 | BUG(); | 257 | BUG(); |
226 | *rqp = NULL; | 258 | *rqp = NULL; |
227 | return 0; | 259 | return false; |
228 | } | 260 | } |
229 | } | 261 | } |
230 | 262 | ||
@@ -235,9 +267,9 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp) | |||
235 | /* Special requests are not subject to ordering rules. */ | 267 | /* Special requests are not subject to ordering rules. */ |
236 | if (!blk_fs_request(rq) && | 268 | if (!blk_fs_request(rq) && |
237 | rq != &q->pre_flush_rq && rq != &q->post_flush_rq) | 269 | rq != &q->pre_flush_rq && rq != &q->post_flush_rq) |
238 | return 1; | 270 | return true; |
239 | 271 | ||
240 | if (q->ordered & QUEUE_ORDERED_TAG) { | 272 | if (q->ordered & QUEUE_ORDERED_BY_TAG) { |
241 | /* Ordered by tag. Blocking the next barrier is enough. */ | 273 | /* Ordered by tag. Blocking the next barrier is enough. */ |
242 | if (is_barrier && rq != &q->bar_rq) | 274 | if (is_barrier && rq != &q->bar_rq) |
243 | *rqp = NULL; | 275 | *rqp = NULL; |
@@ -248,7 +280,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp) | |||
248 | *rqp = NULL; | 280 | *rqp = NULL; |
249 | } | 281 | } |
250 | 282 | ||
251 | return 1; | 283 | return true; |
252 | } | 284 | } |
253 | 285 | ||
254 | static void bio_end_empty_barrier(struct bio *bio, int err) | 286 | static void bio_end_empty_barrier(struct bio *bio, int err) |
diff --git a/block/blk-core.c b/block/blk-core.c index 561e8a1b43a4..a824e49c0d0a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -153,6 +153,9 @@ static void req_bio_endio(struct request *rq, struct bio *bio, | |||
153 | nbytes = bio->bi_size; | 153 | nbytes = bio->bi_size; |
154 | } | 154 | } |
155 | 155 | ||
156 | if (unlikely(rq->cmd_flags & REQ_QUIET)) | ||
157 | set_bit(BIO_QUIET, &bio->bi_flags); | ||
158 | |||
156 | bio->bi_size -= nbytes; | 159 | bio->bi_size -= nbytes; |
157 | bio->bi_sector += (nbytes >> 9); | 160 | bio->bi_sector += (nbytes >> 9); |
158 | 161 | ||
@@ -265,8 +268,7 @@ void __generic_unplug_device(struct request_queue *q) | |||
265 | { | 268 | { |
266 | if (unlikely(blk_queue_stopped(q))) | 269 | if (unlikely(blk_queue_stopped(q))) |
267 | return; | 270 | return; |
268 | 271 | if (!blk_remove_plug(q) && !blk_queue_nonrot(q)) | |
269 | if (!blk_remove_plug(q)) | ||
270 | return; | 272 | return; |
271 | 273 | ||
272 | q->request_fn(q); | 274 | q->request_fn(q); |
@@ -404,7 +406,8 @@ EXPORT_SYMBOL(blk_stop_queue); | |||
404 | void blk_sync_queue(struct request_queue *q) | 406 | void blk_sync_queue(struct request_queue *q) |
405 | { | 407 | { |
406 | del_timer_sync(&q->unplug_timer); | 408 | del_timer_sync(&q->unplug_timer); |
407 | kblockd_flush_work(&q->unplug_work); | 409 | del_timer_sync(&q->timeout); |
410 | cancel_work_sync(&q->unplug_work); | ||
408 | } | 411 | } |
409 | EXPORT_SYMBOL(blk_sync_queue); | 412 | EXPORT_SYMBOL(blk_sync_queue); |
410 | 413 | ||
@@ -1135,7 +1138,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) | |||
1135 | static int __make_request(struct request_queue *q, struct bio *bio) | 1138 | static int __make_request(struct request_queue *q, struct bio *bio) |
1136 | { | 1139 | { |
1137 | struct request *req; | 1140 | struct request *req; |
1138 | int el_ret, nr_sectors, barrier, discard, err; | 1141 | int el_ret, nr_sectors; |
1139 | const unsigned short prio = bio_prio(bio); | 1142 | const unsigned short prio = bio_prio(bio); |
1140 | const int sync = bio_sync(bio); | 1143 | const int sync = bio_sync(bio); |
1141 | int rw_flags; | 1144 | int rw_flags; |
@@ -1149,22 +1152,9 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1149 | */ | 1152 | */ |
1150 | blk_queue_bounce(q, &bio); | 1153 | blk_queue_bounce(q, &bio); |
1151 | 1154 | ||
1152 | barrier = bio_barrier(bio); | ||
1153 | if (unlikely(barrier) && bio_has_data(bio) && | ||
1154 | (q->next_ordered == QUEUE_ORDERED_NONE)) { | ||
1155 | err = -EOPNOTSUPP; | ||
1156 | goto end_io; | ||
1157 | } | ||
1158 | |||
1159 | discard = bio_discard(bio); | ||
1160 | if (unlikely(discard) && !q->prepare_discard_fn) { | ||
1161 | err = -EOPNOTSUPP; | ||
1162 | goto end_io; | ||
1163 | } | ||
1164 | |||
1165 | spin_lock_irq(q->queue_lock); | 1155 | spin_lock_irq(q->queue_lock); |
1166 | 1156 | ||
1167 | if (unlikely(barrier) || elv_queue_empty(q)) | 1157 | if (unlikely(bio_barrier(bio)) || elv_queue_empty(q)) |
1168 | goto get_rq; | 1158 | goto get_rq; |
1169 | 1159 | ||
1170 | el_ret = elv_merge(q, &req, bio); | 1160 | el_ret = elv_merge(q, &req, bio); |
@@ -1250,18 +1240,14 @@ get_rq: | |||
1250 | if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || | 1240 | if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || |
1251 | bio_flagged(bio, BIO_CPU_AFFINE)) | 1241 | bio_flagged(bio, BIO_CPU_AFFINE)) |
1252 | req->cpu = blk_cpu_to_group(smp_processor_id()); | 1242 | req->cpu = blk_cpu_to_group(smp_processor_id()); |
1253 | if (elv_queue_empty(q)) | 1243 | if (!blk_queue_nonrot(q) && elv_queue_empty(q)) |
1254 | blk_plug_device(q); | 1244 | blk_plug_device(q); |
1255 | add_request(q, req); | 1245 | add_request(q, req); |
1256 | out: | 1246 | out: |
1257 | if (sync) | 1247 | if (sync || blk_queue_nonrot(q)) |
1258 | __generic_unplug_device(q); | 1248 | __generic_unplug_device(q); |
1259 | spin_unlock_irq(q->queue_lock); | 1249 | spin_unlock_irq(q->queue_lock); |
1260 | return 0; | 1250 | return 0; |
1261 | |||
1262 | end_io: | ||
1263 | bio_endio(bio, err); | ||
1264 | return 0; | ||
1265 | } | 1251 | } |
1266 | 1252 | ||
1267 | /* | 1253 | /* |
@@ -1414,15 +1400,13 @@ static inline void __generic_make_request(struct bio *bio) | |||
1414 | char b[BDEVNAME_SIZE]; | 1400 | char b[BDEVNAME_SIZE]; |
1415 | 1401 | ||
1416 | q = bdev_get_queue(bio->bi_bdev); | 1402 | q = bdev_get_queue(bio->bi_bdev); |
1417 | if (!q) { | 1403 | if (unlikely(!q)) { |
1418 | printk(KERN_ERR | 1404 | printk(KERN_ERR |
1419 | "generic_make_request: Trying to access " | 1405 | "generic_make_request: Trying to access " |
1420 | "nonexistent block-device %s (%Lu)\n", | 1406 | "nonexistent block-device %s (%Lu)\n", |
1421 | bdevname(bio->bi_bdev, b), | 1407 | bdevname(bio->bi_bdev, b), |
1422 | (long long) bio->bi_sector); | 1408 | (long long) bio->bi_sector); |
1423 | end_io: | 1409 | goto end_io; |
1424 | bio_endio(bio, err); | ||
1425 | break; | ||
1426 | } | 1410 | } |
1427 | 1411 | ||
1428 | if (unlikely(nr_sectors > q->max_hw_sectors)) { | 1412 | if (unlikely(nr_sectors > q->max_hw_sectors)) { |
@@ -1459,14 +1443,19 @@ end_io: | |||
1459 | 1443 | ||
1460 | if (bio_check_eod(bio, nr_sectors)) | 1444 | if (bio_check_eod(bio, nr_sectors)) |
1461 | goto end_io; | 1445 | goto end_io; |
1462 | if ((bio_empty_barrier(bio) && !q->prepare_flush_fn) || | 1446 | |
1463 | (bio_discard(bio) && !q->prepare_discard_fn)) { | 1447 | if (bio_discard(bio) && !q->prepare_discard_fn) { |
1464 | err = -EOPNOTSUPP; | 1448 | err = -EOPNOTSUPP; |
1465 | goto end_io; | 1449 | goto end_io; |
1466 | } | 1450 | } |
1467 | 1451 | ||
1468 | ret = q->make_request_fn(q, bio); | 1452 | ret = q->make_request_fn(q, bio); |
1469 | } while (ret); | 1453 | } while (ret); |
1454 | |||
1455 | return; | ||
1456 | |||
1457 | end_io: | ||
1458 | bio_endio(bio, err); | ||
1470 | } | 1459 | } |
1471 | 1460 | ||
1472 | /* | 1461 | /* |
@@ -1716,14 +1705,6 @@ static int __end_that_request_first(struct request *req, int error, | |||
1716 | while ((bio = req->bio) != NULL) { | 1705 | while ((bio = req->bio) != NULL) { |
1717 | int nbytes; | 1706 | int nbytes; |
1718 | 1707 | ||
1719 | /* | ||
1720 | * For an empty barrier request, the low level driver must | ||
1721 | * store a potential error location in ->sector. We pass | ||
1722 | * that back up in ->bi_sector. | ||
1723 | */ | ||
1724 | if (blk_empty_barrier(req)) | ||
1725 | bio->bi_sector = req->sector; | ||
1726 | |||
1727 | if (nr_bytes >= bio->bi_size) { | 1708 | if (nr_bytes >= bio->bi_size) { |
1728 | req->bio = bio->bi_next; | 1709 | req->bio = bio->bi_next; |
1729 | nbytes = bio->bi_size; | 1710 | nbytes = bio->bi_size; |
@@ -2143,12 +2124,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) | |||
2143 | } | 2124 | } |
2144 | EXPORT_SYMBOL(kblockd_schedule_work); | 2125 | EXPORT_SYMBOL(kblockd_schedule_work); |
2145 | 2126 | ||
2146 | void kblockd_flush_work(struct work_struct *work) | ||
2147 | { | ||
2148 | cancel_work_sync(work); | ||
2149 | } | ||
2150 | EXPORT_SYMBOL(kblockd_flush_work); | ||
2151 | |||
2152 | int __init blk_dev_init(void) | 2127 | int __init blk_dev_init(void) |
2153 | { | 2128 | { |
2154 | kblockd_workqueue = create_workqueue("kblockd"); | 2129 | kblockd_workqueue = create_workqueue("kblockd"); |
diff --git a/block/blk-settings.c b/block/blk-settings.c index afa55e14e278..59fd05d9f1d5 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -319,9 +319,9 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | |||
319 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); | 319 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); |
320 | t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask); | 320 | t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask); |
321 | 321 | ||
322 | t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments); | 322 | t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments); |
323 | t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments); | 323 | t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments); |
324 | t->max_segment_size = min(t->max_segment_size, b->max_segment_size); | 324 | t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size); |
325 | t->hardsect_size = max(t->hardsect_size, b->hardsect_size); | 325 | t->hardsect_size = max(t->hardsect_size, b->hardsect_size); |
326 | if (!t->queue_lock) | 326 | if (!t->queue_lock) |
327 | WARN_ON_ONCE(1); | 327 | WARN_ON_ONCE(1); |
diff --git a/block/blk-softirq.c b/block/blk-softirq.c index e660d26ca656..ce0efc6b26dc 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c | |||
@@ -161,7 +161,7 @@ void blk_complete_request(struct request *req) | |||
161 | } | 161 | } |
162 | EXPORT_SYMBOL(blk_complete_request); | 162 | EXPORT_SYMBOL(blk_complete_request); |
163 | 163 | ||
164 | __init int blk_softirq_init(void) | 164 | static __init int blk_softirq_init(void) |
165 | { | 165 | { |
166 | int i; | 166 | int i; |
167 | 167 | ||
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 21e275d7eed9..a29cb788e408 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -88,9 +88,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count) | |||
88 | unsigned long ra_kb; | 88 | unsigned long ra_kb; |
89 | ssize_t ret = queue_var_store(&ra_kb, page, count); | 89 | ssize_t ret = queue_var_store(&ra_kb, page, count); |
90 | 90 | ||
91 | spin_lock_irq(q->queue_lock); | ||
92 | q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); | 91 | q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); |
93 | spin_unlock_irq(q->queue_lock); | ||
94 | 92 | ||
95 | return ret; | 93 | return ret; |
96 | } | 94 | } |
@@ -117,10 +115,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |||
117 | 115 | ||
118 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) | 116 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) |
119 | return -EINVAL; | 117 | return -EINVAL; |
120 | /* | 118 | |
121 | * Take the queue lock to update the readahead and max_sectors | ||
122 | * values synchronously: | ||
123 | */ | ||
124 | spin_lock_irq(q->queue_lock); | 119 | spin_lock_irq(q->queue_lock); |
125 | q->max_sectors = max_sectors_kb << 1; | 120 | q->max_sectors = max_sectors_kb << 1; |
126 | spin_unlock_irq(q->queue_lock); | 121 | spin_unlock_irq(q->queue_lock); |
diff --git a/block/blk-tag.c b/block/blk-tag.c index c0d419e84ce7..3c518e3303ae 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c | |||
@@ -158,7 +158,6 @@ fail: | |||
158 | /** | 158 | /** |
159 | * blk_init_tags - initialize the tag info for an external tag map | 159 | * blk_init_tags - initialize the tag info for an external tag map |
160 | * @depth: the maximum queue depth supported | 160 | * @depth: the maximum queue depth supported |
161 | * @tags: the tag to use | ||
162 | **/ | 161 | **/ |
163 | struct blk_queue_tag *blk_init_tags(int depth) | 162 | struct blk_queue_tag *blk_init_tags(int depth) |
164 | { | 163 | { |
diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 69185ea9fae2..a09535377a94 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c | |||
@@ -73,11 +73,7 @@ ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr, | |||
73 | */ | 73 | */ |
74 | void blk_delete_timer(struct request *req) | 74 | void blk_delete_timer(struct request *req) |
75 | { | 75 | { |
76 | struct request_queue *q = req->q; | ||
77 | |||
78 | list_del_init(&req->timeout_list); | 76 | list_del_init(&req->timeout_list); |
79 | if (list_empty(&q->timeout_list)) | ||
80 | del_timer(&q->timeout); | ||
81 | } | 77 | } |
82 | 78 | ||
83 | static void blk_rq_timed_out(struct request *req) | 79 | static void blk_rq_timed_out(struct request *req) |
@@ -111,7 +107,7 @@ static void blk_rq_timed_out(struct request *req) | |||
111 | void blk_rq_timed_out_timer(unsigned long data) | 107 | void blk_rq_timed_out_timer(unsigned long data) |
112 | { | 108 | { |
113 | struct request_queue *q = (struct request_queue *) data; | 109 | struct request_queue *q = (struct request_queue *) data; |
114 | unsigned long flags, uninitialized_var(next), next_set = 0; | 110 | unsigned long flags, next = 0; |
115 | struct request *rq, *tmp; | 111 | struct request *rq, *tmp; |
116 | 112 | ||
117 | spin_lock_irqsave(q->queue_lock, flags); | 113 | spin_lock_irqsave(q->queue_lock, flags); |
@@ -126,15 +122,18 @@ void blk_rq_timed_out_timer(unsigned long data) | |||
126 | if (blk_mark_rq_complete(rq)) | 122 | if (blk_mark_rq_complete(rq)) |
127 | continue; | 123 | continue; |
128 | blk_rq_timed_out(rq); | 124 | blk_rq_timed_out(rq); |
125 | } else { | ||
126 | if (!next || time_after(next, rq->deadline)) | ||
127 | next = rq->deadline; | ||
129 | } | 128 | } |
130 | if (!next_set) { | ||
131 | next = rq->deadline; | ||
132 | next_set = 1; | ||
133 | } else if (time_after(next, rq->deadline)) | ||
134 | next = rq->deadline; | ||
135 | } | 129 | } |
136 | 130 | ||
137 | if (next_set && !list_empty(&q->timeout_list)) | 131 | /* |
132 | * next can never be 0 here with the list non-empty, since we always | ||
133 | * bump ->deadline to 1 so we can detect if the timer was ever added | ||
134 | * or not. See comment in blk_add_timer() | ||
135 | */ | ||
136 | if (next) | ||
138 | mod_timer(&q->timeout, round_jiffies_up(next)); | 137 | mod_timer(&q->timeout, round_jiffies_up(next)); |
139 | 138 | ||
140 | spin_unlock_irqrestore(q->queue_lock, flags); | 139 | spin_unlock_irqrestore(q->queue_lock, flags); |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 6a062eebbd15..e8525fa72823 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1136,12 +1136,8 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) | |||
1136 | if (cfq_class_idle(cfqq)) | 1136 | if (cfq_class_idle(cfqq)) |
1137 | max_dispatch = 1; | 1137 | max_dispatch = 1; |
1138 | 1138 | ||
1139 | if (cfqq->dispatched >= max_dispatch) { | 1139 | if (cfqq->dispatched >= max_dispatch && cfqd->busy_queues > 1) |
1140 | if (cfqd->busy_queues > 1) | 1140 | break; |
1141 | break; | ||
1142 | if (cfqq->dispatched >= 4 * max_dispatch) | ||
1143 | break; | ||
1144 | } | ||
1145 | 1141 | ||
1146 | if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) | 1142 | if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) |
1147 | break; | 1143 | break; |
@@ -1318,7 +1314,15 @@ static void cfq_exit_single_io_context(struct io_context *ioc, | |||
1318 | unsigned long flags; | 1314 | unsigned long flags; |
1319 | 1315 | ||
1320 | spin_lock_irqsave(q->queue_lock, flags); | 1316 | spin_lock_irqsave(q->queue_lock, flags); |
1321 | __cfq_exit_single_io_context(cfqd, cic); | 1317 | |
1318 | /* | ||
1319 | * Ensure we get a fresh copy of the ->key to prevent | ||
1320 | * race between exiting task and queue | ||
1321 | */ | ||
1322 | smp_read_barrier_depends(); | ||
1323 | if (cic->key) | ||
1324 | __cfq_exit_single_io_context(cfqd, cic); | ||
1325 | |||
1322 | spin_unlock_irqrestore(q->queue_lock, flags); | 1326 | spin_unlock_irqrestore(q->queue_lock, flags); |
1323 | } | 1327 | } |
1324 | } | 1328 | } |
@@ -2160,7 +2164,7 @@ out_cont: | |||
2160 | static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) | 2164 | static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) |
2161 | { | 2165 | { |
2162 | del_timer_sync(&cfqd->idle_slice_timer); | 2166 | del_timer_sync(&cfqd->idle_slice_timer); |
2163 | kblockd_flush_work(&cfqd->unplug_work); | 2167 | cancel_work_sync(&cfqd->unplug_work); |
2164 | } | 2168 | } |
2165 | 2169 | ||
2166 | static void cfq_put_async_queues(struct cfq_data *cfqd) | 2170 | static void cfq_put_async_queues(struct cfq_data *cfqd) |
@@ -2178,7 +2182,7 @@ static void cfq_put_async_queues(struct cfq_data *cfqd) | |||
2178 | cfq_put_queue(cfqd->async_idle_cfqq); | 2182 | cfq_put_queue(cfqd->async_idle_cfqq); |
2179 | } | 2183 | } |
2180 | 2184 | ||
2181 | static void cfq_exit_queue(elevator_t *e) | 2185 | static void cfq_exit_queue(struct elevator_queue *e) |
2182 | { | 2186 | { |
2183 | struct cfq_data *cfqd = e->elevator_data; | 2187 | struct cfq_data *cfqd = e->elevator_data; |
2184 | struct request_queue *q = cfqd->queue; | 2188 | struct request_queue *q = cfqd->queue; |
@@ -2288,7 +2292,7 @@ cfq_var_store(unsigned int *var, const char *page, size_t count) | |||
2288 | } | 2292 | } |
2289 | 2293 | ||
2290 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ | 2294 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ |
2291 | static ssize_t __FUNC(elevator_t *e, char *page) \ | 2295 | static ssize_t __FUNC(struct elevator_queue *e, char *page) \ |
2292 | { \ | 2296 | { \ |
2293 | struct cfq_data *cfqd = e->elevator_data; \ | 2297 | struct cfq_data *cfqd = e->elevator_data; \ |
2294 | unsigned int __data = __VAR; \ | 2298 | unsigned int __data = __VAR; \ |
@@ -2308,7 +2312,7 @@ SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); | |||
2308 | #undef SHOW_FUNCTION | 2312 | #undef SHOW_FUNCTION |
2309 | 2313 | ||
2310 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | 2314 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
2311 | static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ | 2315 | static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ |
2312 | { \ | 2316 | { \ |
2313 | struct cfq_data *cfqd = e->elevator_data; \ | 2317 | struct cfq_data *cfqd = e->elevator_data; \ |
2314 | unsigned int __data; \ | 2318 | unsigned int __data; \ |
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index 67eb93cff699..f87615dea46b 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c | |||
@@ -774,9 +774,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) | |||
774 | bdi = blk_get_backing_dev_info(bdev); | 774 | bdi = blk_get_backing_dev_info(bdev); |
775 | if (bdi == NULL) | 775 | if (bdi == NULL) |
776 | return -ENOTTY; | 776 | return -ENOTTY; |
777 | lock_kernel(); | ||
778 | bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; | 777 | bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; |
779 | unlock_kernel(); | ||
780 | return 0; | 778 | return 0; |
781 | case BLKGETSIZE: | 779 | case BLKGETSIZE: |
782 | size = bdev->bd_inode->i_size; | 780 | size = bdev->bd_inode->i_size; |
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index fd311179f44c..c4d991d4adef 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c | |||
@@ -334,7 +334,7 @@ static int deadline_queue_empty(struct request_queue *q) | |||
334 | && list_empty(&dd->fifo_list[READ]); | 334 | && list_empty(&dd->fifo_list[READ]); |
335 | } | 335 | } |
336 | 336 | ||
337 | static void deadline_exit_queue(elevator_t *e) | 337 | static void deadline_exit_queue(struct elevator_queue *e) |
338 | { | 338 | { |
339 | struct deadline_data *dd = e->elevator_data; | 339 | struct deadline_data *dd = e->elevator_data; |
340 | 340 | ||
@@ -387,7 +387,7 @@ deadline_var_store(int *var, const char *page, size_t count) | |||
387 | } | 387 | } |
388 | 388 | ||
389 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ | 389 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ |
390 | static ssize_t __FUNC(elevator_t *e, char *page) \ | 390 | static ssize_t __FUNC(struct elevator_queue *e, char *page) \ |
391 | { \ | 391 | { \ |
392 | struct deadline_data *dd = e->elevator_data; \ | 392 | struct deadline_data *dd = e->elevator_data; \ |
393 | int __data = __VAR; \ | 393 | int __data = __VAR; \ |
@@ -403,7 +403,7 @@ SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0); | |||
403 | #undef SHOW_FUNCTION | 403 | #undef SHOW_FUNCTION |
404 | 404 | ||
405 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | 405 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
406 | static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ | 406 | static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ |
407 | { \ | 407 | { \ |
408 | struct deadline_data *dd = e->elevator_data; \ | 408 | struct deadline_data *dd = e->elevator_data; \ |
409 | int __data; \ | 409 | int __data; \ |
diff --git a/block/elevator.c b/block/elevator.c index 86836dd179c0..98259eda0ef6 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -65,7 +65,7 @@ DEFINE_TRACE(block_rq_issue); | |||
65 | static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) | 65 | static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) |
66 | { | 66 | { |
67 | struct request_queue *q = rq->q; | 67 | struct request_queue *q = rq->q; |
68 | elevator_t *e = q->elevator; | 68 | struct elevator_queue *e = q->elevator; |
69 | 69 | ||
70 | if (e->ops->elevator_allow_merge_fn) | 70 | if (e->ops->elevator_allow_merge_fn) |
71 | return e->ops->elevator_allow_merge_fn(q, rq, bio); | 71 | return e->ops->elevator_allow_merge_fn(q, rq, bio); |
@@ -208,13 +208,13 @@ __setup("elevator=", elevator_setup); | |||
208 | 208 | ||
209 | static struct kobj_type elv_ktype; | 209 | static struct kobj_type elv_ktype; |
210 | 210 | ||
211 | static elevator_t *elevator_alloc(struct request_queue *q, | 211 | static struct elevator_queue *elevator_alloc(struct request_queue *q, |
212 | struct elevator_type *e) | 212 | struct elevator_type *e) |
213 | { | 213 | { |
214 | elevator_t *eq; | 214 | struct elevator_queue *eq; |
215 | int i; | 215 | int i; |
216 | 216 | ||
217 | eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node); | 217 | eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node); |
218 | if (unlikely(!eq)) | 218 | if (unlikely(!eq)) |
219 | goto err; | 219 | goto err; |
220 | 220 | ||
@@ -240,8 +240,9 @@ err: | |||
240 | 240 | ||
241 | static void elevator_release(struct kobject *kobj) | 241 | static void elevator_release(struct kobject *kobj) |
242 | { | 242 | { |
243 | elevator_t *e = container_of(kobj, elevator_t, kobj); | 243 | struct elevator_queue *e; |
244 | 244 | ||
245 | e = container_of(kobj, struct elevator_queue, kobj); | ||
245 | elevator_put(e->elevator_type); | 246 | elevator_put(e->elevator_type); |
246 | kfree(e->hash); | 247 | kfree(e->hash); |
247 | kfree(e); | 248 | kfree(e); |
@@ -297,7 +298,7 @@ int elevator_init(struct request_queue *q, char *name) | |||
297 | } | 298 | } |
298 | EXPORT_SYMBOL(elevator_init); | 299 | EXPORT_SYMBOL(elevator_init); |
299 | 300 | ||
300 | void elevator_exit(elevator_t *e) | 301 | void elevator_exit(struct elevator_queue *e) |
301 | { | 302 | { |
302 | mutex_lock(&e->sysfs_lock); | 303 | mutex_lock(&e->sysfs_lock); |
303 | if (e->ops->elevator_exit_fn) | 304 | if (e->ops->elevator_exit_fn) |
@@ -311,7 +312,7 @@ EXPORT_SYMBOL(elevator_exit); | |||
311 | 312 | ||
312 | static void elv_activate_rq(struct request_queue *q, struct request *rq) | 313 | static void elv_activate_rq(struct request_queue *q, struct request *rq) |
313 | { | 314 | { |
314 | elevator_t *e = q->elevator; | 315 | struct elevator_queue *e = q->elevator; |
315 | 316 | ||
316 | if (e->ops->elevator_activate_req_fn) | 317 | if (e->ops->elevator_activate_req_fn) |
317 | e->ops->elevator_activate_req_fn(q, rq); | 318 | e->ops->elevator_activate_req_fn(q, rq); |
@@ -319,7 +320,7 @@ static void elv_activate_rq(struct request_queue *q, struct request *rq) | |||
319 | 320 | ||
320 | static void elv_deactivate_rq(struct request_queue *q, struct request *rq) | 321 | static void elv_deactivate_rq(struct request_queue *q, struct request *rq) |
321 | { | 322 | { |
322 | elevator_t *e = q->elevator; | 323 | struct elevator_queue *e = q->elevator; |
323 | 324 | ||
324 | if (e->ops->elevator_deactivate_req_fn) | 325 | if (e->ops->elevator_deactivate_req_fn) |
325 | e->ops->elevator_deactivate_req_fn(q, rq); | 326 | e->ops->elevator_deactivate_req_fn(q, rq); |
@@ -338,7 +339,7 @@ static void elv_rqhash_del(struct request_queue *q, struct request *rq) | |||
338 | 339 | ||
339 | static void elv_rqhash_add(struct request_queue *q, struct request *rq) | 340 | static void elv_rqhash_add(struct request_queue *q, struct request *rq) |
340 | { | 341 | { |
341 | elevator_t *e = q->elevator; | 342 | struct elevator_queue *e = q->elevator; |
342 | 343 | ||
343 | BUG_ON(ELV_ON_HASH(rq)); | 344 | BUG_ON(ELV_ON_HASH(rq)); |
344 | hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]); | 345 | hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]); |
@@ -352,7 +353,7 @@ static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) | |||
352 | 353 | ||
353 | static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) | 354 | static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) |
354 | { | 355 | { |
355 | elevator_t *e = q->elevator; | 356 | struct elevator_queue *e = q->elevator; |
356 | struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; | 357 | struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; |
357 | struct hlist_node *entry, *next; | 358 | struct hlist_node *entry, *next; |
358 | struct request *rq; | 359 | struct request *rq; |
@@ -494,7 +495,7 @@ EXPORT_SYMBOL(elv_dispatch_add_tail); | |||
494 | 495 | ||
495 | int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) | 496 | int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) |
496 | { | 497 | { |
497 | elevator_t *e = q->elevator; | 498 | struct elevator_queue *e = q->elevator; |
498 | struct request *__rq; | 499 | struct request *__rq; |
499 | int ret; | 500 | int ret; |
500 | 501 | ||
@@ -529,7 +530,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) | |||
529 | 530 | ||
530 | void elv_merged_request(struct request_queue *q, struct request *rq, int type) | 531 | void elv_merged_request(struct request_queue *q, struct request *rq, int type) |
531 | { | 532 | { |
532 | elevator_t *e = q->elevator; | 533 | struct elevator_queue *e = q->elevator; |
533 | 534 | ||
534 | if (e->ops->elevator_merged_fn) | 535 | if (e->ops->elevator_merged_fn) |
535 | e->ops->elevator_merged_fn(q, rq, type); | 536 | e->ops->elevator_merged_fn(q, rq, type); |
@@ -543,7 +544,7 @@ void elv_merged_request(struct request_queue *q, struct request *rq, int type) | |||
543 | void elv_merge_requests(struct request_queue *q, struct request *rq, | 544 | void elv_merge_requests(struct request_queue *q, struct request *rq, |
544 | struct request *next) | 545 | struct request *next) |
545 | { | 546 | { |
546 | elevator_t *e = q->elevator; | 547 | struct elevator_queue *e = q->elevator; |
547 | 548 | ||
548 | if (e->ops->elevator_merge_req_fn) | 549 | if (e->ops->elevator_merge_req_fn) |
549 | e->ops->elevator_merge_req_fn(q, rq, next); | 550 | e->ops->elevator_merge_req_fn(q, rq, next); |
@@ -755,14 +756,6 @@ struct request *elv_next_request(struct request_queue *q) | |||
755 | int ret; | 756 | int ret; |
756 | 757 | ||
757 | while ((rq = __elv_next_request(q)) != NULL) { | 758 | while ((rq = __elv_next_request(q)) != NULL) { |
758 | /* | ||
759 | * Kill the empty barrier place holder, the driver must | ||
760 | * not ever see it. | ||
761 | */ | ||
762 | if (blk_empty_barrier(rq)) { | ||
763 | __blk_end_request(rq, 0, blk_rq_bytes(rq)); | ||
764 | continue; | ||
765 | } | ||
766 | if (!(rq->cmd_flags & REQ_STARTED)) { | 759 | if (!(rq->cmd_flags & REQ_STARTED)) { |
767 | /* | 760 | /* |
768 | * This is the first time the device driver | 761 | * This is the first time the device driver |
@@ -854,7 +847,7 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq) | |||
854 | 847 | ||
855 | int elv_queue_empty(struct request_queue *q) | 848 | int elv_queue_empty(struct request_queue *q) |
856 | { | 849 | { |
857 | elevator_t *e = q->elevator; | 850 | struct elevator_queue *e = q->elevator; |
858 | 851 | ||
859 | if (!list_empty(&q->queue_head)) | 852 | if (!list_empty(&q->queue_head)) |
860 | return 0; | 853 | return 0; |
@@ -868,7 +861,7 @@ EXPORT_SYMBOL(elv_queue_empty); | |||
868 | 861 | ||
869 | struct request *elv_latter_request(struct request_queue *q, struct request *rq) | 862 | struct request *elv_latter_request(struct request_queue *q, struct request *rq) |
870 | { | 863 | { |
871 | elevator_t *e = q->elevator; | 864 | struct elevator_queue *e = q->elevator; |
872 | 865 | ||
873 | if (e->ops->elevator_latter_req_fn) | 866 | if (e->ops->elevator_latter_req_fn) |
874 | return e->ops->elevator_latter_req_fn(q, rq); | 867 | return e->ops->elevator_latter_req_fn(q, rq); |
@@ -877,7 +870,7 @@ struct request *elv_latter_request(struct request_queue *q, struct request *rq) | |||
877 | 870 | ||
878 | struct request *elv_former_request(struct request_queue *q, struct request *rq) | 871 | struct request *elv_former_request(struct request_queue *q, struct request *rq) |
879 | { | 872 | { |
880 | elevator_t *e = q->elevator; | 873 | struct elevator_queue *e = q->elevator; |
881 | 874 | ||
882 | if (e->ops->elevator_former_req_fn) | 875 | if (e->ops->elevator_former_req_fn) |
883 | return e->ops->elevator_former_req_fn(q, rq); | 876 | return e->ops->elevator_former_req_fn(q, rq); |
@@ -886,7 +879,7 @@ struct request *elv_former_request(struct request_queue *q, struct request *rq) | |||
886 | 879 | ||
887 | int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) | 880 | int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) |
888 | { | 881 | { |
889 | elevator_t *e = q->elevator; | 882 | struct elevator_queue *e = q->elevator; |
890 | 883 | ||
891 | if (e->ops->elevator_set_req_fn) | 884 | if (e->ops->elevator_set_req_fn) |
892 | return e->ops->elevator_set_req_fn(q, rq, gfp_mask); | 885 | return e->ops->elevator_set_req_fn(q, rq, gfp_mask); |
@@ -897,7 +890,7 @@ int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) | |||
897 | 890 | ||
898 | void elv_put_request(struct request_queue *q, struct request *rq) | 891 | void elv_put_request(struct request_queue *q, struct request *rq) |
899 | { | 892 | { |
900 | elevator_t *e = q->elevator; | 893 | struct elevator_queue *e = q->elevator; |
901 | 894 | ||
902 | if (e->ops->elevator_put_req_fn) | 895 | if (e->ops->elevator_put_req_fn) |
903 | e->ops->elevator_put_req_fn(rq); | 896 | e->ops->elevator_put_req_fn(rq); |
@@ -905,7 +898,7 @@ void elv_put_request(struct request_queue *q, struct request *rq) | |||
905 | 898 | ||
906 | int elv_may_queue(struct request_queue *q, int rw) | 899 | int elv_may_queue(struct request_queue *q, int rw) |
907 | { | 900 | { |
908 | elevator_t *e = q->elevator; | 901 | struct elevator_queue *e = q->elevator; |
909 | 902 | ||
910 | if (e->ops->elevator_may_queue_fn) | 903 | if (e->ops->elevator_may_queue_fn) |
911 | return e->ops->elevator_may_queue_fn(q, rw); | 904 | return e->ops->elevator_may_queue_fn(q, rw); |
@@ -928,7 +921,7 @@ EXPORT_SYMBOL(elv_abort_queue); | |||
928 | 921 | ||
929 | void elv_completed_request(struct request_queue *q, struct request *rq) | 922 | void elv_completed_request(struct request_queue *q, struct request *rq) |
930 | { | 923 | { |
931 | elevator_t *e = q->elevator; | 924 | struct elevator_queue *e = q->elevator; |
932 | 925 | ||
933 | /* | 926 | /* |
934 | * request is released from the driver, io must be done | 927 | * request is released from the driver, io must be done |
@@ -944,10 +937,14 @@ void elv_completed_request(struct request_queue *q, struct request *rq) | |||
944 | * drained for flush sequence. | 937 | * drained for flush sequence. |
945 | */ | 938 | */ |
946 | if (unlikely(q->ordseq)) { | 939 | if (unlikely(q->ordseq)) { |
947 | struct request *first_rq = list_entry_rq(q->queue_head.next); | 940 | struct request *next = NULL; |
948 | if (q->in_flight == 0 && | 941 | |
942 | if (!list_empty(&q->queue_head)) | ||
943 | next = list_entry_rq(q->queue_head.next); | ||
944 | |||
945 | if (!q->in_flight && | ||
949 | blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && | 946 | blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && |
950 | blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) { | 947 | (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) { |
951 | blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); | 948 | blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); |
952 | blk_start_queueing(q); | 949 | blk_start_queueing(q); |
953 | } | 950 | } |
@@ -959,13 +956,14 @@ void elv_completed_request(struct request_queue *q, struct request *rq) | |||
959 | static ssize_t | 956 | static ssize_t |
960 | elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | 957 | elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) |
961 | { | 958 | { |
962 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
963 | struct elv_fs_entry *entry = to_elv(attr); | 959 | struct elv_fs_entry *entry = to_elv(attr); |
960 | struct elevator_queue *e; | ||
964 | ssize_t error; | 961 | ssize_t error; |
965 | 962 | ||
966 | if (!entry->show) | 963 | if (!entry->show) |
967 | return -EIO; | 964 | return -EIO; |
968 | 965 | ||
966 | e = container_of(kobj, struct elevator_queue, kobj); | ||
969 | mutex_lock(&e->sysfs_lock); | 967 | mutex_lock(&e->sysfs_lock); |
970 | error = e->ops ? entry->show(e, page) : -ENOENT; | 968 | error = e->ops ? entry->show(e, page) : -ENOENT; |
971 | mutex_unlock(&e->sysfs_lock); | 969 | mutex_unlock(&e->sysfs_lock); |
@@ -976,13 +974,14 @@ static ssize_t | |||
976 | elv_attr_store(struct kobject *kobj, struct attribute *attr, | 974 | elv_attr_store(struct kobject *kobj, struct attribute *attr, |
977 | const char *page, size_t length) | 975 | const char *page, size_t length) |
978 | { | 976 | { |
979 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
980 | struct elv_fs_entry *entry = to_elv(attr); | 977 | struct elv_fs_entry *entry = to_elv(attr); |
978 | struct elevator_queue *e; | ||
981 | ssize_t error; | 979 | ssize_t error; |
982 | 980 | ||
983 | if (!entry->store) | 981 | if (!entry->store) |
984 | return -EIO; | 982 | return -EIO; |
985 | 983 | ||
984 | e = container_of(kobj, struct elevator_queue, kobj); | ||
986 | mutex_lock(&e->sysfs_lock); | 985 | mutex_lock(&e->sysfs_lock); |
987 | error = e->ops ? entry->store(e, page, length) : -ENOENT; | 986 | error = e->ops ? entry->store(e, page, length) : -ENOENT; |
988 | mutex_unlock(&e->sysfs_lock); | 987 | mutex_unlock(&e->sysfs_lock); |
@@ -1001,7 +1000,7 @@ static struct kobj_type elv_ktype = { | |||
1001 | 1000 | ||
1002 | int elv_register_queue(struct request_queue *q) | 1001 | int elv_register_queue(struct request_queue *q) |
1003 | { | 1002 | { |
1004 | elevator_t *e = q->elevator; | 1003 | struct elevator_queue *e = q->elevator; |
1005 | int error; | 1004 | int error; |
1006 | 1005 | ||
1007 | error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); | 1006 | error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); |
@@ -1019,7 +1018,7 @@ int elv_register_queue(struct request_queue *q) | |||
1019 | return error; | 1018 | return error; |
1020 | } | 1019 | } |
1021 | 1020 | ||
1022 | static void __elv_unregister_queue(elevator_t *e) | 1021 | static void __elv_unregister_queue(struct elevator_queue *e) |
1023 | { | 1022 | { |
1024 | kobject_uevent(&e->kobj, KOBJ_REMOVE); | 1023 | kobject_uevent(&e->kobj, KOBJ_REMOVE); |
1025 | kobject_del(&e->kobj); | 1024 | kobject_del(&e->kobj); |
@@ -1082,7 +1081,7 @@ EXPORT_SYMBOL_GPL(elv_unregister); | |||
1082 | */ | 1081 | */ |
1083 | static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | 1082 | static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) |
1084 | { | 1083 | { |
1085 | elevator_t *old_elevator, *e; | 1084 | struct elevator_queue *old_elevator, *e; |
1086 | void *data; | 1085 | void *data; |
1087 | 1086 | ||
1088 | /* | 1087 | /* |
@@ -1188,7 +1187,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name, | |||
1188 | 1187 | ||
1189 | ssize_t elv_iosched_show(struct request_queue *q, char *name) | 1188 | ssize_t elv_iosched_show(struct request_queue *q, char *name) |
1190 | { | 1189 | { |
1191 | elevator_t *e = q->elevator; | 1190 | struct elevator_queue *e = q->elevator; |
1192 | struct elevator_type *elv = e->elevator_type; | 1191 | struct elevator_type *elv = e->elevator_type; |
1193 | struct elevator_type *__e; | 1192 | struct elevator_type *__e; |
1194 | int len = 0; | 1193 | int len = 0; |
diff --git a/block/genhd.c b/block/genhd.c index 2f7feda61e35..d84a7df1e2a0 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -181,6 +181,12 @@ void disk_part_iter_exit(struct disk_part_iter *piter) | |||
181 | } | 181 | } |
182 | EXPORT_SYMBOL_GPL(disk_part_iter_exit); | 182 | EXPORT_SYMBOL_GPL(disk_part_iter_exit); |
183 | 183 | ||
184 | static inline int sector_in_part(struct hd_struct *part, sector_t sector) | ||
185 | { | ||
186 | return part->start_sect <= sector && | ||
187 | sector < part->start_sect + part->nr_sects; | ||
188 | } | ||
189 | |||
184 | /** | 190 | /** |
185 | * disk_map_sector_rcu - map sector to partition | 191 | * disk_map_sector_rcu - map sector to partition |
186 | * @disk: gendisk of interest | 192 | * @disk: gendisk of interest |
@@ -199,16 +205,22 @@ EXPORT_SYMBOL_GPL(disk_part_iter_exit); | |||
199 | struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector) | 205 | struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector) |
200 | { | 206 | { |
201 | struct disk_part_tbl *ptbl; | 207 | struct disk_part_tbl *ptbl; |
208 | struct hd_struct *part; | ||
202 | int i; | 209 | int i; |
203 | 210 | ||
204 | ptbl = rcu_dereference(disk->part_tbl); | 211 | ptbl = rcu_dereference(disk->part_tbl); |
205 | 212 | ||
213 | part = rcu_dereference(ptbl->last_lookup); | ||
214 | if (part && sector_in_part(part, sector)) | ||
215 | return part; | ||
216 | |||
206 | for (i = 1; i < ptbl->len; i++) { | 217 | for (i = 1; i < ptbl->len; i++) { |
207 | struct hd_struct *part = rcu_dereference(ptbl->part[i]); | 218 | part = rcu_dereference(ptbl->part[i]); |
208 | 219 | ||
209 | if (part && part->start_sect <= sector && | 220 | if (part && sector_in_part(part, sector)) { |
210 | sector < part->start_sect + part->nr_sects) | 221 | rcu_assign_pointer(ptbl->last_lookup, part); |
211 | return part; | 222 | return part; |
223 | } | ||
212 | } | 224 | } |
213 | return &disk->part0; | 225 | return &disk->part0; |
214 | } | 226 | } |
@@ -888,8 +900,11 @@ static void disk_replace_part_tbl(struct gendisk *disk, | |||
888 | struct disk_part_tbl *old_ptbl = disk->part_tbl; | 900 | struct disk_part_tbl *old_ptbl = disk->part_tbl; |
889 | 901 | ||
890 | rcu_assign_pointer(disk->part_tbl, new_ptbl); | 902 | rcu_assign_pointer(disk->part_tbl, new_ptbl); |
891 | if (old_ptbl) | 903 | |
904 | if (old_ptbl) { | ||
905 | rcu_assign_pointer(old_ptbl->last_lookup, NULL); | ||
892 | call_rcu(&old_ptbl->rcu_head, disk_free_ptbl_rcu_cb); | 906 | call_rcu(&old_ptbl->rcu_head, disk_free_ptbl_rcu_cb); |
907 | } | ||
893 | } | 908 | } |
894 | 909 | ||
895 | /** | 910 | /** |
diff --git a/block/ioctl.c b/block/ioctl.c index d03985b04d67..0f22e629b13c 100644 --- a/block/ioctl.c +++ b/block/ioctl.c | |||
@@ -323,9 +323,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, | |||
323 | bdi = blk_get_backing_dev_info(bdev); | 323 | bdi = blk_get_backing_dev_info(bdev); |
324 | if (bdi == NULL) | 324 | if (bdi == NULL) |
325 | return -ENOTTY; | 325 | return -ENOTTY; |
326 | lock_kernel(); | ||
327 | bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; | 326 | bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; |
328 | unlock_kernel(); | ||
329 | return 0; | 327 | return 0; |
330 | case BLKBSZSET: | 328 | case BLKBSZSET: |
331 | /* set the logical block size */ | 329 | /* set the logical block size */ |
diff --git a/block/noop-iosched.c b/block/noop-iosched.c index c23e02969650..3a0d369d08c7 100644 --- a/block/noop-iosched.c +++ b/block/noop-iosched.c | |||
@@ -76,7 +76,7 @@ static void *noop_init_queue(struct request_queue *q) | |||
76 | return nd; | 76 | return nd; |
77 | } | 77 | } |
78 | 78 | ||
79 | static void noop_exit_queue(elevator_t *e) | 79 | static void noop_exit_queue(struct elevator_queue *e) |
80 | { | 80 | { |
81 | struct noop_data *nd = e->elevator_data; | 81 | struct noop_data *nd = e->elevator_data; |
82 | 82 | ||
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index d0bb92cbefb9..ee9c67d7e1be 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
@@ -60,7 +60,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p) | |||
60 | 60 | ||
61 | static int sg_get_timeout(struct request_queue *q) | 61 | static int sg_get_timeout(struct request_queue *q) |
62 | { | 62 | { |
63 | return q->sg_timeout / (HZ / USER_HZ); | 63 | return jiffies_to_clock_t(q->sg_timeout); |
64 | } | 64 | } |
65 | 65 | ||
66 | static int sg_set_timeout(struct request_queue *q, int __user *p) | 66 | static int sg_set_timeout(struct request_queue *q, int __user *p) |
@@ -68,7 +68,7 @@ static int sg_set_timeout(struct request_queue *q, int __user *p) | |||
68 | int timeout, err = get_user(timeout, p); | 68 | int timeout, err = get_user(timeout, p); |
69 | 69 | ||
70 | if (!err) | 70 | if (!err) |
71 | q->sg_timeout = timeout * (HZ / USER_HZ); | 71 | q->sg_timeout = clock_t_to_jiffies(timeout); |
72 | 72 | ||
73 | return err; | 73 | return err; |
74 | } | 74 | } |