diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-barrier.c | 14 | ||||
-rw-r--r-- | block/blk-core.c | 80 | ||||
-rw-r--r-- | block/blk-map.c | 21 | ||||
-rw-r--r-- | block/blk-merge.c | 6 | ||||
-rw-r--r-- | block/blk-settings.c | 23 | ||||
-rw-r--r-- | block/blk-sysfs.c | 26 | ||||
-rw-r--r-- | block/blk-tag.c | 16 | ||||
-rw-r--r-- | block/blk.h | 1 | ||||
-rw-r--r-- | block/bsg.c | 2 | ||||
-rw-r--r-- | block/elevator.c | 23 | ||||
-rw-r--r-- | block/genhd.c | 8 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 3 |
12 files changed, 133 insertions, 90 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 55c5f1fc4f1f..a09ead19f9c5 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c | |||
@@ -26,8 +26,7 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered, | |||
26 | { | 26 | { |
27 | if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && | 27 | if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && |
28 | prepare_flush_fn == NULL) { | 28 | prepare_flush_fn == NULL) { |
29 | printk(KERN_ERR "%s: prepare_flush_fn required\n", | 29 | printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__); |
30 | __FUNCTION__); | ||
31 | return -EINVAL; | 30 | return -EINVAL; |
32 | } | 31 | } |
33 | 32 | ||
@@ -53,7 +52,7 @@ EXPORT_SYMBOL(blk_queue_ordered); | |||
53 | /* | 52 | /* |
54 | * Cache flushing for ordered writes handling | 53 | * Cache flushing for ordered writes handling |
55 | */ | 54 | */ |
56 | inline unsigned blk_ordered_cur_seq(struct request_queue *q) | 55 | unsigned blk_ordered_cur_seq(struct request_queue *q) |
57 | { | 56 | { |
58 | if (!q->ordseq) | 57 | if (!q->ordseq) |
59 | return 0; | 58 | return 0; |
@@ -143,10 +142,8 @@ static void queue_flush(struct request_queue *q, unsigned which) | |||
143 | end_io = post_flush_end_io; | 142 | end_io = post_flush_end_io; |
144 | } | 143 | } |
145 | 144 | ||
145 | blk_rq_init(q, rq); | ||
146 | rq->cmd_flags = REQ_HARDBARRIER; | 146 | rq->cmd_flags = REQ_HARDBARRIER; |
147 | rq_init(q, rq); | ||
148 | rq->elevator_private = NULL; | ||
149 | rq->elevator_private2 = NULL; | ||
150 | rq->rq_disk = q->bar_rq.rq_disk; | 147 | rq->rq_disk = q->bar_rq.rq_disk; |
151 | rq->end_io = end_io; | 148 | rq->end_io = end_io; |
152 | q->prepare_flush_fn(q, rq); | 149 | q->prepare_flush_fn(q, rq); |
@@ -167,14 +164,11 @@ static inline struct request *start_ordered(struct request_queue *q, | |||
167 | blkdev_dequeue_request(rq); | 164 | blkdev_dequeue_request(rq); |
168 | q->orig_bar_rq = rq; | 165 | q->orig_bar_rq = rq; |
169 | rq = &q->bar_rq; | 166 | rq = &q->bar_rq; |
170 | rq->cmd_flags = 0; | 167 | blk_rq_init(q, rq); |
171 | rq_init(q, rq); | ||
172 | if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) | 168 | if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) |
173 | rq->cmd_flags |= REQ_RW; | 169 | rq->cmd_flags |= REQ_RW; |
174 | if (q->ordered & QUEUE_ORDERED_FUA) | 170 | if (q->ordered & QUEUE_ORDERED_FUA) |
175 | rq->cmd_flags |= REQ_FUA; | 171 | rq->cmd_flags |= REQ_FUA; |
176 | rq->elevator_private = NULL; | ||
177 | rq->elevator_private2 = NULL; | ||
178 | init_request_from_bio(rq, q->orig_bar_rq->bio); | 172 | init_request_from_bio(rq, q->orig_bar_rq->bio); |
179 | rq->end_io = bar_end_io; | 173 | rq->end_io = bar_end_io; |
180 | 174 | ||
diff --git a/block/blk-core.c b/block/blk-core.c index 2a438a93f723..b754a4a2f9bd 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -107,41 +107,21 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) | |||
107 | } | 107 | } |
108 | EXPORT_SYMBOL(blk_get_backing_dev_info); | 108 | EXPORT_SYMBOL(blk_get_backing_dev_info); |
109 | 109 | ||
110 | /* | 110 | void blk_rq_init(struct request_queue *q, struct request *rq) |
111 | * We can't just memset() the structure, since the allocation path | ||
112 | * already stored some information in the request. | ||
113 | */ | ||
114 | void rq_init(struct request_queue *q, struct request *rq) | ||
115 | { | 111 | { |
112 | memset(rq, 0, sizeof(*rq)); | ||
113 | |||
116 | INIT_LIST_HEAD(&rq->queuelist); | 114 | INIT_LIST_HEAD(&rq->queuelist); |
117 | INIT_LIST_HEAD(&rq->donelist); | 115 | INIT_LIST_HEAD(&rq->donelist); |
118 | rq->q = q; | 116 | rq->q = q; |
119 | rq->sector = rq->hard_sector = (sector_t) -1; | 117 | rq->sector = rq->hard_sector = (sector_t) -1; |
120 | rq->nr_sectors = rq->hard_nr_sectors = 0; | ||
121 | rq->current_nr_sectors = rq->hard_cur_sectors = 0; | ||
122 | rq->bio = rq->biotail = NULL; | ||
123 | INIT_HLIST_NODE(&rq->hash); | 118 | INIT_HLIST_NODE(&rq->hash); |
124 | RB_CLEAR_NODE(&rq->rb_node); | 119 | RB_CLEAR_NODE(&rq->rb_node); |
125 | rq->rq_disk = NULL; | 120 | rq->cmd = rq->__cmd; |
126 | rq->nr_phys_segments = 0; | ||
127 | rq->nr_hw_segments = 0; | ||
128 | rq->ioprio = 0; | ||
129 | rq->special = NULL; | ||
130 | rq->buffer = NULL; | ||
131 | rq->tag = -1; | 121 | rq->tag = -1; |
132 | rq->errors = 0; | ||
133 | rq->ref_count = 1; | 122 | rq->ref_count = 1; |
134 | rq->cmd_len = 0; | ||
135 | memset(rq->cmd, 0, sizeof(rq->cmd)); | ||
136 | rq->data_len = 0; | ||
137 | rq->extra_len = 0; | ||
138 | rq->sense_len = 0; | ||
139 | rq->data = NULL; | ||
140 | rq->sense = NULL; | ||
141 | rq->end_io = NULL; | ||
142 | rq->end_io_data = NULL; | ||
143 | rq->next_rq = NULL; | ||
144 | } | 123 | } |
124 | EXPORT_SYMBOL(blk_rq_init); | ||
145 | 125 | ||
146 | static void req_bio_endio(struct request *rq, struct bio *bio, | 126 | static void req_bio_endio(struct request *rq, struct bio *bio, |
147 | unsigned int nbytes, int error) | 127 | unsigned int nbytes, int error) |
@@ -156,7 +136,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio, | |||
156 | 136 | ||
157 | if (unlikely(nbytes > bio->bi_size)) { | 137 | if (unlikely(nbytes > bio->bi_size)) { |
158 | printk(KERN_ERR "%s: want %u bytes done, %u left\n", | 138 | printk(KERN_ERR "%s: want %u bytes done, %u left\n", |
159 | __FUNCTION__, nbytes, bio->bi_size); | 139 | __func__, nbytes, bio->bi_size); |
160 | nbytes = bio->bi_size; | 140 | nbytes = bio->bi_size; |
161 | } | 141 | } |
162 | 142 | ||
@@ -194,7 +174,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg) | |||
194 | 174 | ||
195 | if (blk_pc_request(rq)) { | 175 | if (blk_pc_request(rq)) { |
196 | printk(KERN_INFO " cdb: "); | 176 | printk(KERN_INFO " cdb: "); |
197 | for (bit = 0; bit < sizeof(rq->cmd); bit++) | 177 | for (bit = 0; bit < BLK_MAX_CDB; bit++) |
198 | printk("%02x ", rq->cmd[bit]); | 178 | printk("%02x ", rq->cmd[bit]); |
199 | printk("\n"); | 179 | printk("\n"); |
200 | } | 180 | } |
@@ -220,7 +200,8 @@ void blk_plug_device(struct request_queue *q) | |||
220 | if (blk_queue_stopped(q)) | 200 | if (blk_queue_stopped(q)) |
221 | return; | 201 | return; |
222 | 202 | ||
223 | if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { | 203 | if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { |
204 | __set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags); | ||
224 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); | 205 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); |
225 | blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); | 206 | blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); |
226 | } | 207 | } |
@@ -235,9 +216,10 @@ int blk_remove_plug(struct request_queue *q) | |||
235 | { | 216 | { |
236 | WARN_ON(!irqs_disabled()); | 217 | WARN_ON(!irqs_disabled()); |
237 | 218 | ||
238 | if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) | 219 | if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) |
239 | return 0; | 220 | return 0; |
240 | 221 | ||
222 | queue_flag_clear(QUEUE_FLAG_PLUGGED, q); | ||
241 | del_timer(&q->unplug_timer); | 223 | del_timer(&q->unplug_timer); |
242 | return 1; | 224 | return 1; |
243 | } | 225 | } |
@@ -333,15 +315,16 @@ void blk_start_queue(struct request_queue *q) | |||
333 | { | 315 | { |
334 | WARN_ON(!irqs_disabled()); | 316 | WARN_ON(!irqs_disabled()); |
335 | 317 | ||
336 | clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); | 318 | queue_flag_clear(QUEUE_FLAG_STOPPED, q); |
337 | 319 | ||
338 | /* | 320 | /* |
339 | * one level of recursion is ok and is much faster than kicking | 321 | * one level of recursion is ok and is much faster than kicking |
340 | * the unplug handling | 322 | * the unplug handling |
341 | */ | 323 | */ |
342 | if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { | 324 | if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { |
325 | queue_flag_set(QUEUE_FLAG_REENTER, q); | ||
343 | q->request_fn(q); | 326 | q->request_fn(q); |
344 | clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); | 327 | queue_flag_clear(QUEUE_FLAG_REENTER, q); |
345 | } else { | 328 | } else { |
346 | blk_plug_device(q); | 329 | blk_plug_device(q); |
347 | kblockd_schedule_work(&q->unplug_work); | 330 | kblockd_schedule_work(&q->unplug_work); |
@@ -366,7 +349,7 @@ EXPORT_SYMBOL(blk_start_queue); | |||
366 | void blk_stop_queue(struct request_queue *q) | 349 | void blk_stop_queue(struct request_queue *q) |
367 | { | 350 | { |
368 | blk_remove_plug(q); | 351 | blk_remove_plug(q); |
369 | set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); | 352 | queue_flag_set(QUEUE_FLAG_STOPPED, q); |
370 | } | 353 | } |
371 | EXPORT_SYMBOL(blk_stop_queue); | 354 | EXPORT_SYMBOL(blk_stop_queue); |
372 | 355 | ||
@@ -395,11 +378,8 @@ EXPORT_SYMBOL(blk_sync_queue); | |||
395 | * blk_run_queue - run a single device queue | 378 | * blk_run_queue - run a single device queue |
396 | * @q: The queue to run | 379 | * @q: The queue to run |
397 | */ | 380 | */ |
398 | void blk_run_queue(struct request_queue *q) | 381 | void __blk_run_queue(struct request_queue *q) |
399 | { | 382 | { |
400 | unsigned long flags; | ||
401 | |||
402 | spin_lock_irqsave(q->queue_lock, flags); | ||
403 | blk_remove_plug(q); | 383 | blk_remove_plug(q); |
404 | 384 | ||
405 | /* | 385 | /* |
@@ -407,15 +387,28 @@ void blk_run_queue(struct request_queue *q) | |||
407 | * handling reinvoke the handler shortly if we already got there. | 387 | * handling reinvoke the handler shortly if we already got there. |
408 | */ | 388 | */ |
409 | if (!elv_queue_empty(q)) { | 389 | if (!elv_queue_empty(q)) { |
410 | if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { | 390 | if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { |
391 | queue_flag_set(QUEUE_FLAG_REENTER, q); | ||
411 | q->request_fn(q); | 392 | q->request_fn(q); |
412 | clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); | 393 | queue_flag_clear(QUEUE_FLAG_REENTER, q); |
413 | } else { | 394 | } else { |
414 | blk_plug_device(q); | 395 | blk_plug_device(q); |
415 | kblockd_schedule_work(&q->unplug_work); | 396 | kblockd_schedule_work(&q->unplug_work); |
416 | } | 397 | } |
417 | } | 398 | } |
399 | } | ||
400 | EXPORT_SYMBOL(__blk_run_queue); | ||
401 | |||
402 | /** | ||
403 | * blk_run_queue - run a single device queue | ||
404 | * @q: The queue to run | ||
405 | */ | ||
406 | void blk_run_queue(struct request_queue *q) | ||
407 | { | ||
408 | unsigned long flags; | ||
418 | 409 | ||
410 | spin_lock_irqsave(q->queue_lock, flags); | ||
411 | __blk_run_queue(q); | ||
419 | spin_unlock_irqrestore(q->queue_lock, flags); | 412 | spin_unlock_irqrestore(q->queue_lock, flags); |
420 | } | 413 | } |
421 | EXPORT_SYMBOL(blk_run_queue); | 414 | EXPORT_SYMBOL(blk_run_queue); |
@@ -428,7 +421,7 @@ void blk_put_queue(struct request_queue *q) | |||
428 | void blk_cleanup_queue(struct request_queue *q) | 421 | void blk_cleanup_queue(struct request_queue *q) |
429 | { | 422 | { |
430 | mutex_lock(&q->sysfs_lock); | 423 | mutex_lock(&q->sysfs_lock); |
431 | set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); | 424 | queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); |
432 | mutex_unlock(&q->sysfs_lock); | 425 | mutex_unlock(&q->sysfs_lock); |
433 | 426 | ||
434 | if (q->elevator) | 427 | if (q->elevator) |
@@ -607,6 +600,8 @@ blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask) | |||
607 | if (!rq) | 600 | if (!rq) |
608 | return NULL; | 601 | return NULL; |
609 | 602 | ||
603 | blk_rq_init(q, rq); | ||
604 | |||
610 | /* | 605 | /* |
611 | * first three bits are identical in rq->cmd_flags and bio->bi_rw, | 606 | * first three bits are identical in rq->cmd_flags and bio->bi_rw, |
612 | * see bio.h and blkdev.h | 607 | * see bio.h and blkdev.h |
@@ -789,8 +784,6 @@ rq_starved: | |||
789 | if (ioc_batching(q, ioc)) | 784 | if (ioc_batching(q, ioc)) |
790 | ioc->nr_batch_requests--; | 785 | ioc->nr_batch_requests--; |
791 | 786 | ||
792 | rq_init(q, rq); | ||
793 | |||
794 | blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); | 787 | blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); |
795 | out: | 788 | out: |
796 | return rq; | 789 | return rq; |
@@ -1573,8 +1566,7 @@ static int __end_that_request_first(struct request *req, int error, | |||
1573 | if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { | 1566 | if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { |
1574 | blk_dump_rq_flags(req, "__end_that"); | 1567 | blk_dump_rq_flags(req, "__end_that"); |
1575 | printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", | 1568 | printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", |
1576 | __FUNCTION__, bio->bi_idx, | 1569 | __func__, bio->bi_idx, bio->bi_vcnt); |
1577 | bio->bi_vcnt); | ||
1578 | break; | 1570 | break; |
1579 | } | 1571 | } |
1580 | 1572 | ||
diff --git a/block/blk-map.c b/block/blk-map.c index 3c942bd6422a..0b1af5a3537c 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
@@ -255,10 +255,18 @@ EXPORT_SYMBOL(blk_rq_unmap_user); | |||
255 | * @kbuf: the kernel buffer | 255 | * @kbuf: the kernel buffer |
256 | * @len: length of user data | 256 | * @len: length of user data |
257 | * @gfp_mask: memory allocation flags | 257 | * @gfp_mask: memory allocation flags |
258 | * | ||
259 | * Description: | ||
260 | * Data will be mapped directly if possible. Otherwise a bounce | ||
261 | * buffer is used. | ||
258 | */ | 262 | */ |
259 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | 263 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, |
260 | unsigned int len, gfp_t gfp_mask) | 264 | unsigned int len, gfp_t gfp_mask) |
261 | { | 265 | { |
266 | unsigned long kaddr; | ||
267 | unsigned int alignment; | ||
268 | int reading = rq_data_dir(rq) == READ; | ||
269 | int do_copy = 0; | ||
262 | struct bio *bio; | 270 | struct bio *bio; |
263 | 271 | ||
264 | if (len > (q->max_hw_sectors << 9)) | 272 | if (len > (q->max_hw_sectors << 9)) |
@@ -266,13 +274,24 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | |||
266 | if (!len || !kbuf) | 274 | if (!len || !kbuf) |
267 | return -EINVAL; | 275 | return -EINVAL; |
268 | 276 | ||
269 | bio = bio_map_kern(q, kbuf, len, gfp_mask); | 277 | kaddr = (unsigned long)kbuf; |
278 | alignment = queue_dma_alignment(q) | q->dma_pad_mask; | ||
279 | do_copy = ((kaddr & alignment) || (len & alignment)); | ||
280 | |||
281 | if (do_copy) | ||
282 | bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); | ||
283 | else | ||
284 | bio = bio_map_kern(q, kbuf, len, gfp_mask); | ||
285 | |||
270 | if (IS_ERR(bio)) | 286 | if (IS_ERR(bio)) |
271 | return PTR_ERR(bio); | 287 | return PTR_ERR(bio); |
272 | 288 | ||
273 | if (rq_data_dir(rq) == WRITE) | 289 | if (rq_data_dir(rq) == WRITE) |
274 | bio->bi_rw |= (1 << BIO_RW); | 290 | bio->bi_rw |= (1 << BIO_RW); |
275 | 291 | ||
292 | if (do_copy) | ||
293 | rq->cmd_flags |= REQ_COPY_USER; | ||
294 | |||
276 | blk_rq_bio_prep(q, rq, bio); | 295 | blk_rq_bio_prep(q, rq, bio); |
277 | blk_queue_bounce(q, &rq->bio); | 296 | blk_queue_bounce(q, &rq->bio); |
278 | rq->buffer = rq->data = NULL; | 297 | rq->buffer = rq->data = NULL; |
diff --git a/block/blk-merge.c b/block/blk-merge.c index b5c5c4a9e3f0..73b23562af20 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -55,7 +55,7 @@ void blk_recalc_rq_segments(struct request *rq) | |||
55 | if (!rq->bio) | 55 | if (!rq->bio) |
56 | return; | 56 | return; |
57 | 57 | ||
58 | cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); | 58 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
59 | hw_seg_size = seg_size = 0; | 59 | hw_seg_size = seg_size = 0; |
60 | phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0; | 60 | phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0; |
61 | rq_for_each_segment(bv, rq, iter) { | 61 | rq_for_each_segment(bv, rq, iter) { |
@@ -128,7 +128,7 @@ EXPORT_SYMBOL(blk_recount_segments); | |||
128 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | 128 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, |
129 | struct bio *nxt) | 129 | struct bio *nxt) |
130 | { | 130 | { |
131 | if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER))) | 131 | if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) |
132 | return 0; | 132 | return 0; |
133 | 133 | ||
134 | if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) | 134 | if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) |
@@ -175,7 +175,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |||
175 | int nsegs, cluster; | 175 | int nsegs, cluster; |
176 | 176 | ||
177 | nsegs = 0; | 177 | nsegs = 0; |
178 | cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); | 178 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
179 | 179 | ||
180 | /* | 180 | /* |
181 | * for each bio in rq | 181 | * for each bio in rq |
diff --git a/block/blk-settings.c b/block/blk-settings.c index 5713f7e5cbd2..bb93d4c32775 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -14,7 +14,6 @@ unsigned long blk_max_low_pfn; | |||
14 | EXPORT_SYMBOL(blk_max_low_pfn); | 14 | EXPORT_SYMBOL(blk_max_low_pfn); |
15 | 15 | ||
16 | unsigned long blk_max_pfn; | 16 | unsigned long blk_max_pfn; |
17 | EXPORT_SYMBOL(blk_max_pfn); | ||
18 | 17 | ||
19 | /** | 18 | /** |
20 | * blk_queue_prep_rq - set a prepare_request function for queue | 19 | * blk_queue_prep_rq - set a prepare_request function for queue |
@@ -169,8 +168,8 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) | |||
169 | { | 168 | { |
170 | if ((max_sectors << 9) < PAGE_CACHE_SIZE) { | 169 | if ((max_sectors << 9) < PAGE_CACHE_SIZE) { |
171 | max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); | 170 | max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); |
172 | printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, | 171 | printk(KERN_INFO "%s: set to minimum %d\n", |
173 | max_sectors); | 172 | __func__, max_sectors); |
174 | } | 173 | } |
175 | 174 | ||
176 | if (BLK_DEF_MAX_SECTORS > max_sectors) | 175 | if (BLK_DEF_MAX_SECTORS > max_sectors) |
@@ -197,8 +196,8 @@ void blk_queue_max_phys_segments(struct request_queue *q, | |||
197 | { | 196 | { |
198 | if (!max_segments) { | 197 | if (!max_segments) { |
199 | max_segments = 1; | 198 | max_segments = 1; |
200 | printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, | 199 | printk(KERN_INFO "%s: set to minimum %d\n", |
201 | max_segments); | 200 | __func__, max_segments); |
202 | } | 201 | } |
203 | 202 | ||
204 | q->max_phys_segments = max_segments; | 203 | q->max_phys_segments = max_segments; |
@@ -221,8 +220,8 @@ void blk_queue_max_hw_segments(struct request_queue *q, | |||
221 | { | 220 | { |
222 | if (!max_segments) { | 221 | if (!max_segments) { |
223 | max_segments = 1; | 222 | max_segments = 1; |
224 | printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, | 223 | printk(KERN_INFO "%s: set to minimum %d\n", |
225 | max_segments); | 224 | __func__, max_segments); |
226 | } | 225 | } |
227 | 226 | ||
228 | q->max_hw_segments = max_segments; | 227 | q->max_hw_segments = max_segments; |
@@ -242,8 +241,8 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) | |||
242 | { | 241 | { |
243 | if (max_size < PAGE_CACHE_SIZE) { | 242 | if (max_size < PAGE_CACHE_SIZE) { |
244 | max_size = PAGE_CACHE_SIZE; | 243 | max_size = PAGE_CACHE_SIZE; |
245 | printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, | 244 | printk(KERN_INFO "%s: set to minimum %d\n", |
246 | max_size); | 245 | __func__, max_size); |
247 | } | 246 | } |
248 | 247 | ||
249 | q->max_segment_size = max_size; | 248 | q->max_segment_size = max_size; |
@@ -288,7 +287,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | |||
288 | t->max_segment_size = min(t->max_segment_size, b->max_segment_size); | 287 | t->max_segment_size = min(t->max_segment_size, b->max_segment_size); |
289 | t->hardsect_size = max(t->hardsect_size, b->hardsect_size); | 288 | t->hardsect_size = max(t->hardsect_size, b->hardsect_size); |
290 | if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) | 289 | if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) |
291 | clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); | 290 | queue_flag_clear(QUEUE_FLAG_CLUSTER, t); |
292 | } | 291 | } |
293 | EXPORT_SYMBOL(blk_queue_stack_limits); | 292 | EXPORT_SYMBOL(blk_queue_stack_limits); |
294 | 293 | ||
@@ -358,8 +357,8 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) | |||
358 | { | 357 | { |
359 | if (mask < PAGE_CACHE_SIZE - 1) { | 358 | if (mask < PAGE_CACHE_SIZE - 1) { |
360 | mask = PAGE_CACHE_SIZE - 1; | 359 | mask = PAGE_CACHE_SIZE - 1; |
361 | printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__, | 360 | printk(KERN_INFO "%s: set to minimum %lx\n", |
362 | mask); | 361 | __func__, mask); |
363 | } | 362 | } |
364 | 363 | ||
365 | q->seg_boundary_mask = mask; | 364 | q->seg_boundary_mask = mask; |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index fc41d83be22b..e85c4013e8a2 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -135,6 +135,25 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | |||
135 | return queue_var_show(max_hw_sectors_kb, (page)); | 135 | return queue_var_show(max_hw_sectors_kb, (page)); |
136 | } | 136 | } |
137 | 137 | ||
138 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) | ||
139 | { | ||
140 | return queue_var_show(blk_queue_nomerges(q), page); | ||
141 | } | ||
142 | |||
143 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | ||
144 | size_t count) | ||
145 | { | ||
146 | unsigned long nm; | ||
147 | ssize_t ret = queue_var_store(&nm, page, count); | ||
148 | |||
149 | if (nm) | ||
150 | set_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags); | ||
151 | else | ||
152 | clear_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags); | ||
153 | |||
154 | return ret; | ||
155 | } | ||
156 | |||
138 | 157 | ||
139 | static struct queue_sysfs_entry queue_requests_entry = { | 158 | static struct queue_sysfs_entry queue_requests_entry = { |
140 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, | 159 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, |
@@ -170,6 +189,12 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = { | |||
170 | .show = queue_hw_sector_size_show, | 189 | .show = queue_hw_sector_size_show, |
171 | }; | 190 | }; |
172 | 191 | ||
192 | static struct queue_sysfs_entry queue_nomerges_entry = { | ||
193 | .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, | ||
194 | .show = queue_nomerges_show, | ||
195 | .store = queue_nomerges_store, | ||
196 | }; | ||
197 | |||
173 | static struct attribute *default_attrs[] = { | 198 | static struct attribute *default_attrs[] = { |
174 | &queue_requests_entry.attr, | 199 | &queue_requests_entry.attr, |
175 | &queue_ra_entry.attr, | 200 | &queue_ra_entry.attr, |
@@ -177,6 +202,7 @@ static struct attribute *default_attrs[] = { | |||
177 | &queue_max_sectors_entry.attr, | 202 | &queue_max_sectors_entry.attr, |
178 | &queue_iosched_entry.attr, | 203 | &queue_iosched_entry.attr, |
179 | &queue_hw_sector_size_entry.attr, | 204 | &queue_hw_sector_size_entry.attr, |
205 | &queue_nomerges_entry.attr, | ||
180 | NULL, | 206 | NULL, |
181 | }; | 207 | }; |
182 | 208 | ||
diff --git a/block/blk-tag.c b/block/blk-tag.c index 4780a46ce234..de64e0429977 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c | |||
@@ -70,7 +70,7 @@ void __blk_queue_free_tags(struct request_queue *q) | |||
70 | __blk_free_tags(bqt); | 70 | __blk_free_tags(bqt); |
71 | 71 | ||
72 | q->queue_tags = NULL; | 72 | q->queue_tags = NULL; |
73 | q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED); | 73 | queue_flag_clear(QUEUE_FLAG_QUEUED, q); |
74 | } | 74 | } |
75 | 75 | ||
76 | /** | 76 | /** |
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(blk_free_tags); | |||
98 | **/ | 98 | **/ |
99 | void blk_queue_free_tags(struct request_queue *q) | 99 | void blk_queue_free_tags(struct request_queue *q) |
100 | { | 100 | { |
101 | clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); | 101 | queue_flag_clear(QUEUE_FLAG_QUEUED, q); |
102 | } | 102 | } |
103 | EXPORT_SYMBOL(blk_queue_free_tags); | 103 | EXPORT_SYMBOL(blk_queue_free_tags); |
104 | 104 | ||
@@ -112,7 +112,7 @@ init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) | |||
112 | if (q && depth > q->nr_requests * 2) { | 112 | if (q && depth > q->nr_requests * 2) { |
113 | depth = q->nr_requests * 2; | 113 | depth = q->nr_requests * 2; |
114 | printk(KERN_ERR "%s: adjusted depth to %d\n", | 114 | printk(KERN_ERR "%s: adjusted depth to %d\n", |
115 | __FUNCTION__, depth); | 115 | __func__, depth); |
116 | } | 116 | } |
117 | 117 | ||
118 | tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); | 118 | tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); |
@@ -188,7 +188,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth, | |||
188 | rc = blk_queue_resize_tags(q, depth); | 188 | rc = blk_queue_resize_tags(q, depth); |
189 | if (rc) | 189 | if (rc) |
190 | return rc; | 190 | return rc; |
191 | set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); | 191 | queue_flag_set(QUEUE_FLAG_QUEUED, q); |
192 | return 0; | 192 | return 0; |
193 | } else | 193 | } else |
194 | atomic_inc(&tags->refcnt); | 194 | atomic_inc(&tags->refcnt); |
@@ -197,7 +197,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth, | |||
197 | * assign it, all done | 197 | * assign it, all done |
198 | */ | 198 | */ |
199 | q->queue_tags = tags; | 199 | q->queue_tags = tags; |
200 | q->queue_flags |= (1 << QUEUE_FLAG_QUEUED); | 200 | queue_flag_set(QUEUE_FLAG_QUEUED, q); |
201 | INIT_LIST_HEAD(&q->tag_busy_list); | 201 | INIT_LIST_HEAD(&q->tag_busy_list); |
202 | return 0; | 202 | return 0; |
203 | fail: | 203 | fail: |
@@ -296,13 +296,13 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq) | |||
296 | 296 | ||
297 | if (unlikely(bqt->tag_index[tag] == NULL)) | 297 | if (unlikely(bqt->tag_index[tag] == NULL)) |
298 | printk(KERN_ERR "%s: tag %d is missing\n", | 298 | printk(KERN_ERR "%s: tag %d is missing\n", |
299 | __FUNCTION__, tag); | 299 | __func__, tag); |
300 | 300 | ||
301 | bqt->tag_index[tag] = NULL; | 301 | bqt->tag_index[tag] = NULL; |
302 | 302 | ||
303 | if (unlikely(!test_bit(tag, bqt->tag_map))) { | 303 | if (unlikely(!test_bit(tag, bqt->tag_map))) { |
304 | printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", | 304 | printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", |
305 | __FUNCTION__, tag); | 305 | __func__, tag); |
306 | return; | 306 | return; |
307 | } | 307 | } |
308 | /* | 308 | /* |
@@ -340,7 +340,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) | |||
340 | if (unlikely((rq->cmd_flags & REQ_QUEUED))) { | 340 | if (unlikely((rq->cmd_flags & REQ_QUEUED))) { |
341 | printk(KERN_ERR | 341 | printk(KERN_ERR |
342 | "%s: request %p for device [%s] already tagged %d", | 342 | "%s: request %p for device [%s] already tagged %d", |
343 | __FUNCTION__, rq, | 343 | __func__, rq, |
344 | rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); | 344 | rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); |
345 | BUG(); | 345 | BUG(); |
346 | } | 346 | } |
diff --git a/block/blk.h b/block/blk.h index ec9120fb789a..59776ab4742a 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -10,7 +10,6 @@ | |||
10 | extern struct kmem_cache *blk_requestq_cachep; | 10 | extern struct kmem_cache *blk_requestq_cachep; |
11 | extern struct kobj_type blk_queue_ktype; | 11 | extern struct kobj_type blk_queue_ktype; |
12 | 12 | ||
13 | void rq_init(struct request_queue *q, struct request *rq); | ||
14 | void init_request_from_bio(struct request *req, struct bio *bio); | 13 | void init_request_from_bio(struct request *req, struct bio *bio); |
15 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | 14 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, |
16 | struct bio *bio); | 15 | struct bio *bio); |
diff --git a/block/bsg.c b/block/bsg.c index d8b889d2e411..f0b7cd343216 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
@@ -57,7 +57,7 @@ enum { | |||
57 | #undef BSG_DEBUG | 57 | #undef BSG_DEBUG |
58 | 58 | ||
59 | #ifdef BSG_DEBUG | 59 | #ifdef BSG_DEBUG |
60 | #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args) | 60 | #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args) |
61 | #else | 61 | #else |
62 | #define dprintk(fmt, args...) | 62 | #define dprintk(fmt, args...) |
63 | #endif | 63 | #endif |
diff --git a/block/elevator.c b/block/elevator.c index 88318c383608..980f8ae147b4 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -69,7 +69,7 @@ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) | |||
69 | /* | 69 | /* |
70 | * can we safely merge with this request? | 70 | * can we safely merge with this request? |
71 | */ | 71 | */ |
72 | inline int elv_rq_merge_ok(struct request *rq, struct bio *bio) | 72 | int elv_rq_merge_ok(struct request *rq, struct bio *bio) |
73 | { | 73 | { |
74 | if (!rq_mergeable(rq)) | 74 | if (!rq_mergeable(rq)) |
75 | return 0; | 75 | return 0; |
@@ -488,6 +488,9 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) | |||
488 | } | 488 | } |
489 | } | 489 | } |
490 | 490 | ||
491 | if (blk_queue_nomerges(q)) | ||
492 | return ELEVATOR_NO_MERGE; | ||
493 | |||
491 | /* | 494 | /* |
492 | * See if our hash lookup can find a potential backmerge. | 495 | * See if our hash lookup can find a potential backmerge. |
493 | */ | 496 | */ |
@@ -647,7 +650,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) | |||
647 | 650 | ||
648 | default: | 651 | default: |
649 | printk(KERN_ERR "%s: bad insertion point %d\n", | 652 | printk(KERN_ERR "%s: bad insertion point %d\n", |
650 | __FUNCTION__, where); | 653 | __func__, where); |
651 | BUG(); | 654 | BUG(); |
652 | } | 655 | } |
653 | 656 | ||
@@ -805,8 +808,7 @@ struct request *elv_next_request(struct request_queue *q) | |||
805 | rq->cmd_flags |= REQ_QUIET; | 808 | rq->cmd_flags |= REQ_QUIET; |
806 | end_queued_request(rq, 0); | 809 | end_queued_request(rq, 0); |
807 | } else { | 810 | } else { |
808 | printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__, | 811 | printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); |
809 | ret); | ||
810 | break; | 812 | break; |
811 | } | 813 | } |
812 | } | 814 | } |
@@ -1070,7 +1072,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
1070 | */ | 1072 | */ |
1071 | spin_lock_irq(q->queue_lock); | 1073 | spin_lock_irq(q->queue_lock); |
1072 | 1074 | ||
1073 | set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 1075 | queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); |
1074 | 1076 | ||
1075 | elv_drain_elevator(q); | 1077 | elv_drain_elevator(q); |
1076 | 1078 | ||
@@ -1104,7 +1106,10 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
1104 | * finally exit old elevator and turn off BYPASS. | 1106 | * finally exit old elevator and turn off BYPASS. |
1105 | */ | 1107 | */ |
1106 | elevator_exit(old_elevator); | 1108 | elevator_exit(old_elevator); |
1107 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 1109 | spin_lock_irq(q->queue_lock); |
1110 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); | ||
1111 | spin_unlock_irq(q->queue_lock); | ||
1112 | |||
1108 | return 1; | 1113 | return 1; |
1109 | 1114 | ||
1110 | fail_register: | 1115 | fail_register: |
@@ -1115,7 +1120,11 @@ fail_register: | |||
1115 | elevator_exit(e); | 1120 | elevator_exit(e); |
1116 | q->elevator = old_elevator; | 1121 | q->elevator = old_elevator; |
1117 | elv_register_queue(q); | 1122 | elv_register_queue(q); |
1118 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 1123 | |
1124 | spin_lock_irq(q->queue_lock); | ||
1125 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); | ||
1126 | spin_unlock_irq(q->queue_lock); | ||
1127 | |||
1119 | return 0; | 1128 | return 0; |
1120 | } | 1129 | } |
1121 | 1130 | ||
diff --git a/block/genhd.c b/block/genhd.c index 00da5219ee37..fda9c7a63c29 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -182,11 +182,17 @@ static int exact_lock(dev_t devt, void *data) | |||
182 | */ | 182 | */ |
183 | void add_disk(struct gendisk *disk) | 183 | void add_disk(struct gendisk *disk) |
184 | { | 184 | { |
185 | struct backing_dev_info *bdi; | ||
186 | |||
185 | disk->flags |= GENHD_FL_UP; | 187 | disk->flags |= GENHD_FL_UP; |
186 | blk_register_region(MKDEV(disk->major, disk->first_minor), | 188 | blk_register_region(MKDEV(disk->major, disk->first_minor), |
187 | disk->minors, NULL, exact_match, exact_lock, disk); | 189 | disk->minors, NULL, exact_match, exact_lock, disk); |
188 | register_disk(disk); | 190 | register_disk(disk); |
189 | blk_register_queue(disk); | 191 | blk_register_queue(disk); |
192 | |||
193 | bdi = &disk->queue->backing_dev_info; | ||
194 | bdi_register_dev(bdi, MKDEV(disk->major, disk->first_minor)); | ||
195 | sysfs_create_link(&disk->dev.kobj, &bdi->dev->kobj, "bdi"); | ||
190 | } | 196 | } |
191 | 197 | ||
192 | EXPORT_SYMBOL(add_disk); | 198 | EXPORT_SYMBOL(add_disk); |
@@ -194,6 +200,8 @@ EXPORT_SYMBOL(del_gendisk); /* in partitions/check.c */ | |||
194 | 200 | ||
195 | void unlink_gendisk(struct gendisk *disk) | 201 | void unlink_gendisk(struct gendisk *disk) |
196 | { | 202 | { |
203 | sysfs_remove_link(&disk->dev.kobj, "bdi"); | ||
204 | bdi_unregister(&disk->queue->backing_dev_info); | ||
197 | blk_unregister_queue(disk); | 205 | blk_unregister_queue(disk); |
198 | blk_unregister_region(MKDEV(disk->major, disk->first_minor), | 206 | blk_unregister_region(MKDEV(disk->major, disk->first_minor), |
199 | disk->minors); | 207 | disk->minors); |
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index aaf07e413ffd..78199c08ec92 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
@@ -216,8 +216,6 @@ EXPORT_SYMBOL_GPL(blk_verify_command); | |||
216 | static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, | 216 | static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, |
217 | struct sg_io_hdr *hdr, int has_write_perm) | 217 | struct sg_io_hdr *hdr, int has_write_perm) |
218 | { | 218 | { |
219 | memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ | ||
220 | |||
221 | if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len)) | 219 | if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len)) |
222 | return -EFAULT; | 220 | return -EFAULT; |
223 | if (blk_verify_command(rq->cmd, has_write_perm)) | 221 | if (blk_verify_command(rq->cmd, has_write_perm)) |
@@ -530,7 +528,6 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk, | |||
530 | rq->data_len = 0; | 528 | rq->data_len = 0; |
531 | rq->extra_len = 0; | 529 | rq->extra_len = 0; |
532 | rq->timeout = BLK_DEFAULT_SG_TIMEOUT; | 530 | rq->timeout = BLK_DEFAULT_SG_TIMEOUT; |
533 | memset(rq->cmd, 0, sizeof(rq->cmd)); | ||
534 | rq->cmd[0] = cmd; | 531 | rq->cmd[0] = cmd; |
535 | rq->cmd[4] = data; | 532 | rq->cmd[4] = data; |
536 | rq->cmd_len = 6; | 533 | rq->cmd_len = 6; |