aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-29 11:18:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-29 11:18:03 -0400
commitbd5d435a96837c3495e62eef37cbe4cb728b79ae (patch)
tree82aacaf5a1d220910c4b0a1088d7d2482c0d9ee0
parentfee4b19fb3f28d17c0b9f9ea0668db5275697178 (diff)
parentac9fafa1243640349aa481adf473db283a695766 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: block: Skip I/O merges when disabled block: add large command support block: replace sizeof(rq->cmd) with BLK_MAX_CDB ide: use blk_rq_init() to initialize the request block: use blk_rq_init() to initialize the request block: rename and export rq_init() block: no need to initialize rq->cmd with blk_get_request block: no need to initialize rq->cmd in prepare_flush_fn hook block/blk-barrier.c:blk_ordered_cur_seq() mustn't be inline block/elevator.c:elv_rq_merge_ok() mustn't be inline block: make queue flags non-atomic block: add dma alignment and padding support to blk_rq_map_kern unexport blk_max_pfn ps3disk: Remove superfluous cast block: make rq_init() do a full memset() relay: fix splice problem
-rw-r--r--block/blk-barrier.c11
-rw-r--r--block/blk-core.c75
-rw-r--r--block/blk-map.c21
-rw-r--r--block/blk-merge.c6
-rw-r--r--block/blk-settings.c3
-rw-r--r--block/blk-sysfs.c26
-rw-r--r--block/blk-tag.c8
-rw-r--r--block/blk.h1
-rw-r--r--block/elevator.c18
-rw-r--r--block/scsi_ioctl.c3
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/nbd.c1
-rw-r--r--drivers/block/paride/pd.c4
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/ps3disk.c4
-rw-r--r--drivers/block/ub.c2
-rw-r--r--drivers/cdrom/cdrom.c1
-rw-r--r--drivers/ide/ide-cd.c4
-rw-r--r--drivers/ide/ide-cd_verbose.c2
-rw-r--r--drivers/ide/ide-io.c3
-rw-r--r--drivers/ide/ide-tape.c2
-rw-r--r--drivers/ide/ide-taskfile.c3
-rw-r--r--drivers/ide/ide.c4
-rw-r--r--drivers/md/dm-emc.c2
-rw-r--r--drivers/md/dm-mpath-hp-sw.c1
-rw-r--r--drivers/md/dm-mpath-rdac.c1
-rw-r--r--drivers/md/dm-table.c7
-rw-r--r--drivers/md/md.c3
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_error.c1
-rw-r--r--drivers/scsi/scsi_lib.c31
-rw-r--r--drivers/scsi/scsi_transport_sas.c3
-rw-r--r--drivers/scsi/sd.c1
-rw-r--r--fs/bio.c90
-rw-r--r--fs/splice.c2
-rw-r--r--include/linux/bio.h2
-rw-r--r--include/linux/blkdev.h41
-rw-r--r--kernel/relay.c2
38 files changed, 275 insertions, 120 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 55c5f1fc4f1..66e55288178 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -53,7 +53,7 @@ EXPORT_SYMBOL(blk_queue_ordered);
53/* 53/*
54 * Cache flushing for ordered writes handling 54 * Cache flushing for ordered writes handling
55 */ 55 */
56inline unsigned blk_ordered_cur_seq(struct request_queue *q) 56unsigned blk_ordered_cur_seq(struct request_queue *q)
57{ 57{
58 if (!q->ordseq) 58 if (!q->ordseq)
59 return 0; 59 return 0;
@@ -143,10 +143,8 @@ static void queue_flush(struct request_queue *q, unsigned which)
143 end_io = post_flush_end_io; 143 end_io = post_flush_end_io;
144 } 144 }
145 145
146 blk_rq_init(q, rq);
146 rq->cmd_flags = REQ_HARDBARRIER; 147 rq->cmd_flags = REQ_HARDBARRIER;
147 rq_init(q, rq);
148 rq->elevator_private = NULL;
149 rq->elevator_private2 = NULL;
150 rq->rq_disk = q->bar_rq.rq_disk; 148 rq->rq_disk = q->bar_rq.rq_disk;
151 rq->end_io = end_io; 149 rq->end_io = end_io;
152 q->prepare_flush_fn(q, rq); 150 q->prepare_flush_fn(q, rq);
@@ -167,14 +165,11 @@ static inline struct request *start_ordered(struct request_queue *q,
167 blkdev_dequeue_request(rq); 165 blkdev_dequeue_request(rq);
168 q->orig_bar_rq = rq; 166 q->orig_bar_rq = rq;
169 rq = &q->bar_rq; 167 rq = &q->bar_rq;
170 rq->cmd_flags = 0; 168 blk_rq_init(q, rq);
171 rq_init(q, rq);
172 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) 169 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
173 rq->cmd_flags |= REQ_RW; 170 rq->cmd_flags |= REQ_RW;
174 if (q->ordered & QUEUE_ORDERED_FUA) 171 if (q->ordered & QUEUE_ORDERED_FUA)
175 rq->cmd_flags |= REQ_FUA; 172 rq->cmd_flags |= REQ_FUA;
176 rq->elevator_private = NULL;
177 rq->elevator_private2 = NULL;
178 init_request_from_bio(rq, q->orig_bar_rq->bio); 173 init_request_from_bio(rq, q->orig_bar_rq->bio);
179 rq->end_io = bar_end_io; 174 rq->end_io = bar_end_io;
180 175
diff --git a/block/blk-core.c b/block/blk-core.c
index 2a438a93f72..5d09f8c5602 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -107,41 +107,21 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
107} 107}
108EXPORT_SYMBOL(blk_get_backing_dev_info); 108EXPORT_SYMBOL(blk_get_backing_dev_info);
109 109
110/* 110void blk_rq_init(struct request_queue *q, struct request *rq)
111 * We can't just memset() the structure, since the allocation path
112 * already stored some information in the request.
113 */
114void rq_init(struct request_queue *q, struct request *rq)
115{ 111{
112 memset(rq, 0, sizeof(*rq));
113
116 INIT_LIST_HEAD(&rq->queuelist); 114 INIT_LIST_HEAD(&rq->queuelist);
117 INIT_LIST_HEAD(&rq->donelist); 115 INIT_LIST_HEAD(&rq->donelist);
118 rq->q = q; 116 rq->q = q;
119 rq->sector = rq->hard_sector = (sector_t) -1; 117 rq->sector = rq->hard_sector = (sector_t) -1;
120 rq->nr_sectors = rq->hard_nr_sectors = 0;
121 rq->current_nr_sectors = rq->hard_cur_sectors = 0;
122 rq->bio = rq->biotail = NULL;
123 INIT_HLIST_NODE(&rq->hash); 118 INIT_HLIST_NODE(&rq->hash);
124 RB_CLEAR_NODE(&rq->rb_node); 119 RB_CLEAR_NODE(&rq->rb_node);
125 rq->rq_disk = NULL; 120 rq->cmd = rq->__cmd;
126 rq->nr_phys_segments = 0;
127 rq->nr_hw_segments = 0;
128 rq->ioprio = 0;
129 rq->special = NULL;
130 rq->buffer = NULL;
131 rq->tag = -1; 121 rq->tag = -1;
132 rq->errors = 0;
133 rq->ref_count = 1; 122 rq->ref_count = 1;
134 rq->cmd_len = 0;
135 memset(rq->cmd, 0, sizeof(rq->cmd));
136 rq->data_len = 0;
137 rq->extra_len = 0;
138 rq->sense_len = 0;
139 rq->data = NULL;
140 rq->sense = NULL;
141 rq->end_io = NULL;
142 rq->end_io_data = NULL;
143 rq->next_rq = NULL;
144} 123}
124EXPORT_SYMBOL(blk_rq_init);
145 125
146static void req_bio_endio(struct request *rq, struct bio *bio, 126static void req_bio_endio(struct request *rq, struct bio *bio,
147 unsigned int nbytes, int error) 127 unsigned int nbytes, int error)
@@ -194,7 +174,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
194 174
195 if (blk_pc_request(rq)) { 175 if (blk_pc_request(rq)) {
196 printk(KERN_INFO " cdb: "); 176 printk(KERN_INFO " cdb: ");
197 for (bit = 0; bit < sizeof(rq->cmd); bit++) 177 for (bit = 0; bit < BLK_MAX_CDB; bit++)
198 printk("%02x ", rq->cmd[bit]); 178 printk("%02x ", rq->cmd[bit]);
199 printk("\n"); 179 printk("\n");
200 } 180 }
@@ -220,7 +200,8 @@ void blk_plug_device(struct request_queue *q)
220 if (blk_queue_stopped(q)) 200 if (blk_queue_stopped(q))
221 return; 201 return;
222 202
223 if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { 203 if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
204 __set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
224 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); 205 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
225 blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); 206 blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
226 } 207 }
@@ -235,9 +216,10 @@ int blk_remove_plug(struct request_queue *q)
235{ 216{
236 WARN_ON(!irqs_disabled()); 217 WARN_ON(!irqs_disabled());
237 218
238 if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) 219 if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
239 return 0; 220 return 0;
240 221
222 queue_flag_clear(QUEUE_FLAG_PLUGGED, q);
241 del_timer(&q->unplug_timer); 223 del_timer(&q->unplug_timer);
242 return 1; 224 return 1;
243} 225}
@@ -333,15 +315,16 @@ void blk_start_queue(struct request_queue *q)
333{ 315{
334 WARN_ON(!irqs_disabled()); 316 WARN_ON(!irqs_disabled());
335 317
336 clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); 318 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
337 319
338 /* 320 /*
339 * one level of recursion is ok and is much faster than kicking 321 * one level of recursion is ok and is much faster than kicking
340 * the unplug handling 322 * the unplug handling
341 */ 323 */
342 if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { 324 if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
325 queue_flag_set(QUEUE_FLAG_REENTER, q);
343 q->request_fn(q); 326 q->request_fn(q);
344 clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); 327 queue_flag_clear(QUEUE_FLAG_REENTER, q);
345 } else { 328 } else {
346 blk_plug_device(q); 329 blk_plug_device(q);
347 kblockd_schedule_work(&q->unplug_work); 330 kblockd_schedule_work(&q->unplug_work);
@@ -366,7 +349,7 @@ EXPORT_SYMBOL(blk_start_queue);
366void blk_stop_queue(struct request_queue *q) 349void blk_stop_queue(struct request_queue *q)
367{ 350{
368 blk_remove_plug(q); 351 blk_remove_plug(q);
369 set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); 352 queue_flag_set(QUEUE_FLAG_STOPPED, q);
370} 353}
371EXPORT_SYMBOL(blk_stop_queue); 354EXPORT_SYMBOL(blk_stop_queue);
372 355
@@ -395,11 +378,8 @@ EXPORT_SYMBOL(blk_sync_queue);
395 * blk_run_queue - run a single device queue 378 * blk_run_queue - run a single device queue
396 * @q: The queue to run 379 * @q: The queue to run
397 */ 380 */
398void blk_run_queue(struct request_queue *q) 381void __blk_run_queue(struct request_queue *q)
399{ 382{
400 unsigned long flags;
401
402 spin_lock_irqsave(q->queue_lock, flags);
403 blk_remove_plug(q); 383 blk_remove_plug(q);
404 384
405 /* 385 /*
@@ -407,15 +387,28 @@ void blk_run_queue(struct request_queue *q)
407 * handling reinvoke the handler shortly if we already got there. 387 * handling reinvoke the handler shortly if we already got there.
408 */ 388 */
409 if (!elv_queue_empty(q)) { 389 if (!elv_queue_empty(q)) {
410 if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { 390 if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
391 queue_flag_set(QUEUE_FLAG_REENTER, q);
411 q->request_fn(q); 392 q->request_fn(q);
412 clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); 393 queue_flag_clear(QUEUE_FLAG_REENTER, q);
413 } else { 394 } else {
414 blk_plug_device(q); 395 blk_plug_device(q);
415 kblockd_schedule_work(&q->unplug_work); 396 kblockd_schedule_work(&q->unplug_work);
416 } 397 }
417 } 398 }
399}
400EXPORT_SYMBOL(__blk_run_queue);
401
402/**
403 * blk_run_queue - run a single device queue
404 * @q: The queue to run
405 */
406void blk_run_queue(struct request_queue *q)
407{
408 unsigned long flags;
418 409
410 spin_lock_irqsave(q->queue_lock, flags);
411 __blk_run_queue(q);
419 spin_unlock_irqrestore(q->queue_lock, flags); 412 spin_unlock_irqrestore(q->queue_lock, flags);
420} 413}
421EXPORT_SYMBOL(blk_run_queue); 414EXPORT_SYMBOL(blk_run_queue);
@@ -428,7 +421,7 @@ void blk_put_queue(struct request_queue *q)
428void blk_cleanup_queue(struct request_queue *q) 421void blk_cleanup_queue(struct request_queue *q)
429{ 422{
430 mutex_lock(&q->sysfs_lock); 423 mutex_lock(&q->sysfs_lock);
431 set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); 424 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
432 mutex_unlock(&q->sysfs_lock); 425 mutex_unlock(&q->sysfs_lock);
433 426
434 if (q->elevator) 427 if (q->elevator)
@@ -607,6 +600,8 @@ blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
607 if (!rq) 600 if (!rq)
608 return NULL; 601 return NULL;
609 602
603 blk_rq_init(q, rq);
604
610 /* 605 /*
611 * first three bits are identical in rq->cmd_flags and bio->bi_rw, 606 * first three bits are identical in rq->cmd_flags and bio->bi_rw,
612 * see bio.h and blkdev.h 607 * see bio.h and blkdev.h
@@ -789,8 +784,6 @@ rq_starved:
789 if (ioc_batching(q, ioc)) 784 if (ioc_batching(q, ioc))
790 ioc->nr_batch_requests--; 785 ioc->nr_batch_requests--;
791 786
792 rq_init(q, rq);
793
794 blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); 787 blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
795out: 788out:
796 return rq; 789 return rq;
diff --git a/block/blk-map.c b/block/blk-map.c
index 3c942bd6422..0b1af5a3537 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -255,10 +255,18 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
255 * @kbuf: the kernel buffer 255 * @kbuf: the kernel buffer
256 * @len: length of user data 256 * @len: length of user data
257 * @gfp_mask: memory allocation flags 257 * @gfp_mask: memory allocation flags
258 *
259 * Description:
260 * Data will be mapped directly if possible. Otherwise a bounce
261 * buffer is used.
258 */ 262 */
259int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 263int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
260 unsigned int len, gfp_t gfp_mask) 264 unsigned int len, gfp_t gfp_mask)
261{ 265{
266 unsigned long kaddr;
267 unsigned int alignment;
268 int reading = rq_data_dir(rq) == READ;
269 int do_copy = 0;
262 struct bio *bio; 270 struct bio *bio;
263 271
264 if (len > (q->max_hw_sectors << 9)) 272 if (len > (q->max_hw_sectors << 9))
@@ -266,13 +274,24 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
266 if (!len || !kbuf) 274 if (!len || !kbuf)
267 return -EINVAL; 275 return -EINVAL;
268 276
269 bio = bio_map_kern(q, kbuf, len, gfp_mask); 277 kaddr = (unsigned long)kbuf;
278 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
279 do_copy = ((kaddr & alignment) || (len & alignment));
280
281 if (do_copy)
282 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
283 else
284 bio = bio_map_kern(q, kbuf, len, gfp_mask);
285
270 if (IS_ERR(bio)) 286 if (IS_ERR(bio))
271 return PTR_ERR(bio); 287 return PTR_ERR(bio);
272 288
273 if (rq_data_dir(rq) == WRITE) 289 if (rq_data_dir(rq) == WRITE)
274 bio->bi_rw |= (1 << BIO_RW); 290 bio->bi_rw |= (1 << BIO_RW);
275 291
292 if (do_copy)
293 rq->cmd_flags |= REQ_COPY_USER;
294
276 blk_rq_bio_prep(q, rq, bio); 295 blk_rq_bio_prep(q, rq, bio);
277 blk_queue_bounce(q, &rq->bio); 296 blk_queue_bounce(q, &rq->bio);
278 rq->buffer = rq->data = NULL; 297 rq->buffer = rq->data = NULL;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index b5c5c4a9e3f..73b23562af2 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -55,7 +55,7 @@ void blk_recalc_rq_segments(struct request *rq)
55 if (!rq->bio) 55 if (!rq->bio)
56 return; 56 return;
57 57
58 cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); 58 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
59 hw_seg_size = seg_size = 0; 59 hw_seg_size = seg_size = 0;
60 phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0; 60 phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
61 rq_for_each_segment(bv, rq, iter) { 61 rq_for_each_segment(bv, rq, iter) {
@@ -128,7 +128,7 @@ EXPORT_SYMBOL(blk_recount_segments);
128static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, 128static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
129 struct bio *nxt) 129 struct bio *nxt)
130{ 130{
131 if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER))) 131 if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
132 return 0; 132 return 0;
133 133
134 if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) 134 if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
@@ -175,7 +175,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
175 int nsegs, cluster; 175 int nsegs, cluster;
176 176
177 nsegs = 0; 177 nsegs = 0;
178 cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); 178 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
179 179
180 /* 180 /*
181 * for each bio in rq 181 * for each bio in rq
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 5713f7e5cbd..6089384ab06 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -14,7 +14,6 @@ unsigned long blk_max_low_pfn;
14EXPORT_SYMBOL(blk_max_low_pfn); 14EXPORT_SYMBOL(blk_max_low_pfn);
15 15
16unsigned long blk_max_pfn; 16unsigned long blk_max_pfn;
17EXPORT_SYMBOL(blk_max_pfn);
18 17
19/** 18/**
20 * blk_queue_prep_rq - set a prepare_request function for queue 19 * blk_queue_prep_rq - set a prepare_request function for queue
@@ -288,7 +287,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
288 t->max_segment_size = min(t->max_segment_size, b->max_segment_size); 287 t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
289 t->hardsect_size = max(t->hardsect_size, b->hardsect_size); 288 t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
290 if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) 289 if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
291 clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); 290 queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
292} 291}
293EXPORT_SYMBOL(blk_queue_stack_limits); 292EXPORT_SYMBOL(blk_queue_stack_limits);
294 293
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index fc41d83be22..e85c4013e8a 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -135,6 +135,25 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
135 return queue_var_show(max_hw_sectors_kb, (page)); 135 return queue_var_show(max_hw_sectors_kb, (page));
136} 136}
137 137
138static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
139{
140 return queue_var_show(blk_queue_nomerges(q), page);
141}
142
143static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
144 size_t count)
145{
146 unsigned long nm;
147 ssize_t ret = queue_var_store(&nm, page, count);
148
149 if (nm)
150 set_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags);
151 else
152 clear_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags);
153
154 return ret;
155}
156
138 157
139static struct queue_sysfs_entry queue_requests_entry = { 158static struct queue_sysfs_entry queue_requests_entry = {
140 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, 159 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
@@ -170,6 +189,12 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = {
170 .show = queue_hw_sector_size_show, 189 .show = queue_hw_sector_size_show,
171}; 190};
172 191
192static struct queue_sysfs_entry queue_nomerges_entry = {
193 .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
194 .show = queue_nomerges_show,
195 .store = queue_nomerges_store,
196};
197
173static struct attribute *default_attrs[] = { 198static struct attribute *default_attrs[] = {
174 &queue_requests_entry.attr, 199 &queue_requests_entry.attr,
175 &queue_ra_entry.attr, 200 &queue_ra_entry.attr,
@@ -177,6 +202,7 @@ static struct attribute *default_attrs[] = {
177 &queue_max_sectors_entry.attr, 202 &queue_max_sectors_entry.attr,
178 &queue_iosched_entry.attr, 203 &queue_iosched_entry.attr,
179 &queue_hw_sector_size_entry.attr, 204 &queue_hw_sector_size_entry.attr,
205 &queue_nomerges_entry.attr,
180 NULL, 206 NULL,
181}; 207};
182 208
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 4780a46ce23..e176ddbe599 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -70,7 +70,7 @@ void __blk_queue_free_tags(struct request_queue *q)
70 __blk_free_tags(bqt); 70 __blk_free_tags(bqt);
71 71
72 q->queue_tags = NULL; 72 q->queue_tags = NULL;
73 q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED); 73 queue_flag_clear(QUEUE_FLAG_QUEUED, q);
74} 74}
75 75
76/** 76/**
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(blk_free_tags);
98 **/ 98 **/
99void blk_queue_free_tags(struct request_queue *q) 99void blk_queue_free_tags(struct request_queue *q)
100{ 100{
101 clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); 101 queue_flag_clear(QUEUE_FLAG_QUEUED, q);
102} 102}
103EXPORT_SYMBOL(blk_queue_free_tags); 103EXPORT_SYMBOL(blk_queue_free_tags);
104 104
@@ -188,7 +188,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
188 rc = blk_queue_resize_tags(q, depth); 188 rc = blk_queue_resize_tags(q, depth);
189 if (rc) 189 if (rc)
190 return rc; 190 return rc;
191 set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); 191 queue_flag_set(QUEUE_FLAG_QUEUED, q);
192 return 0; 192 return 0;
193 } else 193 } else
194 atomic_inc(&tags->refcnt); 194 atomic_inc(&tags->refcnt);
@@ -197,7 +197,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
197 * assign it, all done 197 * assign it, all done
198 */ 198 */
199 q->queue_tags = tags; 199 q->queue_tags = tags;
200 q->queue_flags |= (1 << QUEUE_FLAG_QUEUED); 200 queue_flag_set(QUEUE_FLAG_QUEUED, q);
201 INIT_LIST_HEAD(&q->tag_busy_list); 201 INIT_LIST_HEAD(&q->tag_busy_list);
202 return 0; 202 return 0;
203fail: 203fail:
diff --git a/block/blk.h b/block/blk.h
index ec9120fb789..59776ab4742 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -10,7 +10,6 @@
10extern struct kmem_cache *blk_requestq_cachep; 10extern struct kmem_cache *blk_requestq_cachep;
11extern struct kobj_type blk_queue_ktype; 11extern struct kobj_type blk_queue_ktype;
12 12
13void rq_init(struct request_queue *q, struct request *rq);
14void init_request_from_bio(struct request *req, struct bio *bio); 13void init_request_from_bio(struct request *req, struct bio *bio);
15void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 14void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
16 struct bio *bio); 15 struct bio *bio);
diff --git a/block/elevator.c b/block/elevator.c
index 88318c38360..ac5310ef827 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -69,7 +69,7 @@ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
69/* 69/*
70 * can we safely merge with this request? 70 * can we safely merge with this request?
71 */ 71 */
72inline int elv_rq_merge_ok(struct request *rq, struct bio *bio) 72int elv_rq_merge_ok(struct request *rq, struct bio *bio)
73{ 73{
74 if (!rq_mergeable(rq)) 74 if (!rq_mergeable(rq))
75 return 0; 75 return 0;
@@ -488,6 +488,9 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
488 } 488 }
489 } 489 }
490 490
491 if (blk_queue_nomerges(q))
492 return ELEVATOR_NO_MERGE;
493
491 /* 494 /*
492 * See if our hash lookup can find a potential backmerge. 495 * See if our hash lookup can find a potential backmerge.
493 */ 496 */
@@ -1070,7 +1073,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1070 */ 1073 */
1071 spin_lock_irq(q->queue_lock); 1074 spin_lock_irq(q->queue_lock);
1072 1075
1073 set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 1076 queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
1074 1077
1075 elv_drain_elevator(q); 1078 elv_drain_elevator(q);
1076 1079
@@ -1104,7 +1107,10 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1104 * finally exit old elevator and turn off BYPASS. 1107 * finally exit old elevator and turn off BYPASS.
1105 */ 1108 */
1106 elevator_exit(old_elevator); 1109 elevator_exit(old_elevator);
1107 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 1110 spin_lock_irq(q->queue_lock);
1111 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
1112 spin_unlock_irq(q->queue_lock);
1113
1108 return 1; 1114 return 1;
1109 1115
1110fail_register: 1116fail_register:
@@ -1115,7 +1121,11 @@ fail_register:
1115 elevator_exit(e); 1121 elevator_exit(e);
1116 q->elevator = old_elevator; 1122 q->elevator = old_elevator;
1117 elv_register_queue(q); 1123 elv_register_queue(q);
1118 clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 1124
1125 spin_lock_irq(q->queue_lock);
1126 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
1127 spin_unlock_irq(q->queue_lock);
1128
1119 return 0; 1129 return 0;
1120} 1130}
1121 1131
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index a2c3a936ebf..ffa3720e6ca 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -217,8 +217,6 @@ EXPORT_SYMBOL_GPL(blk_verify_command);
217static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, 217static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
218 struct sg_io_hdr *hdr, int has_write_perm) 218 struct sg_io_hdr *hdr, int has_write_perm)
219{ 219{
220 memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
221
222 if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len)) 220 if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
223 return -EFAULT; 221 return -EFAULT;
224 if (blk_verify_command(rq->cmd, has_write_perm)) 222 if (blk_verify_command(rq->cmd, has_write_perm))
@@ -531,7 +529,6 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
531 rq->data_len = 0; 529 rq->data_len = 0;
532 rq->extra_len = 0; 530 rq->extra_len = 0;
533 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; 531 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
534 memset(rq->cmd, 0, sizeof(rq->cmd));
535 rq->cmd[0] = cmd; 532 rq->cmd[0] = cmd;
536 rq->cmd[4] = data; 533 rq->cmd[4] = data;
537 rq->cmd_len = 6; 534 rq->cmd_len = 6;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f7f163557aa..d3a25b027ff 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -546,7 +546,7 @@ static void loop_unplug(struct request_queue *q)
546{ 546{
547 struct loop_device *lo = q->queuedata; 547 struct loop_device *lo = q->queuedata;
548 548
549 clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags); 549 queue_flag_clear_unlocked(QUEUE_FLAG_PLUGGED, q);
550 blk_run_address_space(lo->lo_backing_file->f_mapping); 550 blk_run_address_space(lo->lo_backing_file->f_mapping);
551} 551}
552 552
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index bdba282f15e..ad98dda6037 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -577,6 +577,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
577 switch (cmd) { 577 switch (cmd) {
578 case NBD_DISCONNECT: 578 case NBD_DISCONNECT:
579 printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name); 579 printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name);
580 blk_rq_init(NULL, &sreq);
580 sreq.cmd_type = REQ_TYPE_SPECIAL; 581 sreq.cmd_type = REQ_TYPE_SPECIAL;
581 nbd_cmd(&sreq) = NBD_CMD_DISC; 582 nbd_cmd(&sreq) = NBD_CMD_DISC;
582 /* 583 /*
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index df819f8a95a..570f3b70dce 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -716,10 +716,8 @@ static int pd_special_command(struct pd_unit *disk,
716 struct request rq; 716 struct request rq;
717 int err = 0; 717 int err = 0;
718 718
719 memset(&rq, 0, sizeof(rq)); 719 blk_rq_init(NULL, &rq);
720 rq.errors = 0;
721 rq.rq_disk = disk->gd; 720 rq.rq_disk = disk->gd;
722 rq.ref_count = 1;
723 rq.end_io_data = &wait; 721 rq.end_io_data = &wait;
724 rq.end_io = blk_end_sync_rq; 722 rq.end_io = blk_end_sync_rq;
725 blk_insert_request(disk->gd->queue, &rq, 0, func); 723 blk_insert_request(disk->gd->queue, &rq, 0, func);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index fd0472996df..3ba1df93e9e 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -776,8 +776,6 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
776 776
777 rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]); 777 rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
778 memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE); 778 memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
779 if (sizeof(rq->cmd) > CDROM_PACKET_SIZE)
780 memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE);
781 779
782 rq->timeout = 60*HZ; 780 rq->timeout = 60*HZ;
783 rq->cmd_type = REQ_TYPE_BLOCK_PC; 781 rq->cmd_type = REQ_TYPE_BLOCK_PC;
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 7483f947f0e..d797e209951 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -102,8 +102,7 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
102 dev_dbg(&dev->sbd.core, 102 dev_dbg(&dev->sbd.core,
103 "%s:%u: bio %u: %u segs %u sectors from %lu\n", 103 "%s:%u: bio %u: %u segs %u sectors from %lu\n",
104 __func__, __LINE__, i, bio_segments(iter.bio), 104 __func__, __LINE__, i, bio_segments(iter.bio),
105 bio_sectors(iter.bio), 105 bio_sectors(iter.bio), iter.bio->bi_sector);
106 (unsigned long)iter.bio->bi_sector);
107 106
108 size = bvec->bv_len; 107 size = bvec->bv_len;
109 buf = bvec_kmap_irq(bvec, &flags); 108 buf = bvec_kmap_irq(bvec, &flags);
@@ -406,7 +405,6 @@ static void ps3disk_prepare_flush(struct request_queue *q, struct request *req)
406 405
407 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); 406 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
408 407
409 memset(req->cmd, 0, sizeof(req->cmd));
410 req->cmd_type = REQ_TYPE_FLUSH; 408 req->cmd_type = REQ_TYPE_FLUSH;
411} 409}
412 410
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 27bfe72aab5..e322cce8c12 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -2399,7 +2399,7 @@ static void ub_disconnect(struct usb_interface *intf)
2399 del_gendisk(lun->disk); 2399 del_gendisk(lun->disk);
2400 /* 2400 /*
2401 * I wish I could do: 2401 * I wish I could do:
2402 * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); 2402 * queue_flag_set(QUEUE_FLAG_DEAD, q);
2403 * As it is, we rely on our internal poisoning and let 2403 * As it is, we rely on our internal poisoning and let
2404 * the upper levels to spin furiously failing all the I/O. 2404 * the upper levels to spin furiously failing all the I/O.
2405 */ 2405 */
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index ac3829030ac..69f26eb6415 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2194,7 +2194,6 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2194 if (ret) 2194 if (ret)
2195 break; 2195 break;
2196 2196
2197 memset(rq->cmd, 0, sizeof(rq->cmd));
2198 rq->cmd[0] = GPCMD_READ_CD; 2197 rq->cmd[0] = GPCMD_READ_CD;
2199 rq->cmd[1] = 1 << 2; 2198 rq->cmd[1] = 1 << 2;
2200 rq->cmd[2] = (lba >> 24) & 0xff; 2199 rq->cmd[2] = (lba >> 24) & 0xff;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index fe9df38f62c..68e7f19dc03 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -782,7 +782,7 @@ static ide_startstop_t cdrom_start_seek_continuation(ide_drive_t *drive)
782 782
783 sector_div(frame, queue_hardsect_size(drive->queue) >> SECTOR_BITS); 783 sector_div(frame, queue_hardsect_size(drive->queue) >> SECTOR_BITS);
784 784
785 memset(rq->cmd, 0, sizeof(rq->cmd)); 785 memset(rq->cmd, 0, BLK_MAX_CDB);
786 rq->cmd[0] = GPCMD_SEEK; 786 rq->cmd[0] = GPCMD_SEEK;
787 put_unaligned(cpu_to_be32(frame), (unsigned int *) &rq->cmd[2]); 787 put_unaligned(cpu_to_be32(frame), (unsigned int *) &rq->cmd[2]);
788 788
@@ -1694,7 +1694,7 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
1694 long block = (long)rq->hard_sector / (hard_sect >> 9); 1694 long block = (long)rq->hard_sector / (hard_sect >> 9);
1695 unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9); 1695 unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9);
1696 1696
1697 memset(rq->cmd, 0, sizeof(rq->cmd)); 1697 memset(rq->cmd, 0, BLK_MAX_CDB);
1698 1698
1699 if (rq_data_dir(rq) == READ) 1699 if (rq_data_dir(rq) == READ)
1700 rq->cmd[0] = GPCMD_READ_10; 1700 rq->cmd[0] = GPCMD_READ_10;
diff --git a/drivers/ide/ide-cd_verbose.c b/drivers/ide/ide-cd_verbose.c
index 6ed7ca07133..6490a2dea96 100644
--- a/drivers/ide/ide-cd_verbose.c
+++ b/drivers/ide/ide-cd_verbose.c
@@ -326,7 +326,7 @@ void ide_cd_log_error(const char *name, struct request *failed_command,
326 326
327 printk(KERN_ERR " The failed \"%s\" packet command " 327 printk(KERN_ERR " The failed \"%s\" packet command "
328 "was: \n \"", s); 328 "was: \n \"", s);
329 for (i = 0; i < sizeof(failed_command->cmd); i++) 329 for (i = 0; i < BLK_MAX_CDB; i++)
330 printk(KERN_CONT "%02x ", failed_command->cmd[i]); 330 printk(KERN_CONT "%02x ", failed_command->cmd[i]);
331 printk(KERN_CONT "\"\n"); 331 printk(KERN_CONT "\"\n");
332 } 332 }
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 788783da902..696525342e9 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -1550,8 +1550,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1550 1550
1551void ide_init_drive_cmd (struct request *rq) 1551void ide_init_drive_cmd (struct request *rq)
1552{ 1552{
1553 memset(rq, 0, sizeof(*rq)); 1553 blk_rq_init(NULL, rq);
1554 rq->ref_count = 1;
1555} 1554}
1556 1555
1557EXPORT_SYMBOL(ide_init_drive_cmd); 1556EXPORT_SYMBOL(ide_init_drive_cmd);
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 54a43b04460..1e1f26331a2 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -662,7 +662,7 @@ static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc)
662 662
663static void idetape_init_rq(struct request *rq, u8 cmd) 663static void idetape_init_rq(struct request *rq, u8 cmd)
664{ 664{
665 memset(rq, 0, sizeof(*rq)); 665 blk_rq_init(NULL, rq);
666 rq->cmd_type = REQ_TYPE_SPECIAL; 666 rq->cmd_type = REQ_TYPE_SPECIAL;
667 rq->cmd[0] = cmd; 667 rq->cmd[0] = cmd;
668} 668}
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 9a846a0cd5a..0c908ca3ff7 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -494,8 +494,7 @@ int ide_raw_taskfile(ide_drive_t *drive, ide_task_t *task, u8 *buf, u16 nsect)
494{ 494{
495 struct request rq; 495 struct request rq;
496 496
497 memset(&rq, 0, sizeof(rq)); 497 blk_rq_init(NULL, &rq);
498 rq.ref_count = 1;
499 rq.cmd_type = REQ_TYPE_ATA_TASKFILE; 498 rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
500 rq.buffer = buf; 499 rq.buffer = buf;
501 500
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 999584c03d9..c758dcb13b1 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -564,7 +564,7 @@ static int generic_ide_suspend(struct device *dev, pm_message_t mesg)
564 if (!(drive->dn % 2)) 564 if (!(drive->dn % 2))
565 ide_acpi_get_timing(hwif); 565 ide_acpi_get_timing(hwif);
566 566
567 memset(&rq, 0, sizeof(rq)); 567 blk_rq_init(NULL, &rq);
568 memset(&rqpm, 0, sizeof(rqpm)); 568 memset(&rqpm, 0, sizeof(rqpm));
569 memset(&args, 0, sizeof(args)); 569 memset(&args, 0, sizeof(args));
570 rq.cmd_type = REQ_TYPE_PM_SUSPEND; 570 rq.cmd_type = REQ_TYPE_PM_SUSPEND;
@@ -602,7 +602,7 @@ static int generic_ide_resume(struct device *dev)
602 602
603 ide_acpi_exec_tfs(drive); 603 ide_acpi_exec_tfs(drive);
604 604
605 memset(&rq, 0, sizeof(rq)); 605 blk_rq_init(NULL, &rq);
606 memset(&rqpm, 0, sizeof(rqpm)); 606 memset(&rqpm, 0, sizeof(rqpm));
607 memset(&args, 0, sizeof(args)); 607 memset(&args, 0, sizeof(args));
608 rq.cmd_type = REQ_TYPE_PM_RESUME; 608 rq.cmd_type = REQ_TYPE_PM_RESUME;
diff --git a/drivers/md/dm-emc.c b/drivers/md/dm-emc.c
index 6b91b9ab1d4..3ea5ad4b780 100644
--- a/drivers/md/dm-emc.c
+++ b/drivers/md/dm-emc.c
@@ -110,8 +110,6 @@ static struct request *get_failover_req(struct emc_handler *h,
110 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 110 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
111 rq->sense_len = 0; 111 rq->sense_len = 0;
112 112
113 memset(&rq->cmd, 0, BLK_MAX_CDB);
114
115 rq->timeout = EMC_FAILOVER_TIMEOUT; 113 rq->timeout = EMC_FAILOVER_TIMEOUT;
116 rq->cmd_type = REQ_TYPE_BLOCK_PC; 114 rq->cmd_type = REQ_TYPE_BLOCK_PC;
117 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; 115 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
diff --git a/drivers/md/dm-mpath-hp-sw.c b/drivers/md/dm-mpath-hp-sw.c
index 204bf42c944..b63a0ab37c5 100644
--- a/drivers/md/dm-mpath-hp-sw.c
+++ b/drivers/md/dm-mpath-hp-sw.c
@@ -137,7 +137,6 @@ static struct request *hp_sw_get_request(struct dm_path *path)
137 req->sense = h->sense; 137 req->sense = h->sense;
138 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); 138 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
139 139
140 memset(&req->cmd, 0, BLK_MAX_CDB);
141 req->cmd[0] = START_STOP; 140 req->cmd[0] = START_STOP;
142 req->cmd[4] = 1; 141 req->cmd[4] = 1;
143 req->cmd_len = COMMAND_SIZE(req->cmd[0]); 142 req->cmd_len = COMMAND_SIZE(req->cmd[0]);
diff --git a/drivers/md/dm-mpath-rdac.c b/drivers/md/dm-mpath-rdac.c
index e04eb5c697f..95e77734880 100644
--- a/drivers/md/dm-mpath-rdac.c
+++ b/drivers/md/dm-mpath-rdac.c
@@ -284,7 +284,6 @@ static struct request *get_rdac_req(struct rdac_handler *h,
284 return NULL; 284 return NULL;
285 } 285 }
286 286
287 memset(&rq->cmd, 0, BLK_MAX_CDB);
288 rq->sense = h->sense; 287 rq->sense = h->sense;
289 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 288 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
290 rq->sense_len = 0; 289 rq->sense_len = 0;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 51be5334421..73326e7c54b 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -873,10 +873,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
873 q->max_hw_sectors = t->limits.max_hw_sectors; 873 q->max_hw_sectors = t->limits.max_hw_sectors;
874 q->seg_boundary_mask = t->limits.seg_boundary_mask; 874 q->seg_boundary_mask = t->limits.seg_boundary_mask;
875 q->bounce_pfn = t->limits.bounce_pfn; 875 q->bounce_pfn = t->limits.bounce_pfn;
876 /* XXX: the below will probably go bug. must ensure there can be no
877 * concurrency on queue_flags, and use the unlocked versions...
878 */
876 if (t->limits.no_cluster) 879 if (t->limits.no_cluster)
877 q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER); 880 queue_flag_clear(QUEUE_FLAG_CLUSTER, q);
878 else 881 else
879 q->queue_flags |= (1 << QUEUE_FLAG_CLUSTER); 882 queue_flag_set(QUEUE_FLAG_CLUSTER, q);
880 883
881} 884}
882 885
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 6fe4a769c85..bb3e4b1cb77 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -282,7 +282,8 @@ static mddev_t * mddev_find(dev_t unit)
282 kfree(new); 282 kfree(new);
283 return NULL; 283 return NULL;
284 } 284 }
285 set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags); 285 /* Can be unlocked because the queue is new: no concurrency */
286 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, new->queue);
286 287
287 blk_queue_make_request(new->queue, md_fail_request); 288 blk_queue_make_request(new->queue, md_fail_request);
288 289
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 07103c399fe..f6600bfb5bd 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1773,7 +1773,7 @@ static int scsi_debug_slave_alloc(struct scsi_device *sdp)
1773 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 1773 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
1774 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n", 1774 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
1775 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); 1775 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
1776 set_bit(QUEUE_FLAG_BIDI, &sdp->request_queue->queue_flags); 1776 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
1777 return 0; 1777 return 0;
1778} 1778}
1779 1779
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 221f31e36d2..1eaba6cd80f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1771,6 +1771,7 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1771 unsigned long flags; 1771 unsigned long flags;
1772 int rtn; 1772 int rtn;
1773 1773
1774 blk_rq_init(NULL, &req);
1774 scmd->request = &req; 1775 scmd->request = &req;
1775 memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout)); 1776 memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
1776 1777
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 67f412bb497..d545ad1cf47 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -536,6 +536,9 @@ static void scsi_run_queue(struct request_queue *q)
536 !shost->host_blocked && !shost->host_self_blocked && 536 !shost->host_blocked && !shost->host_self_blocked &&
537 !((shost->can_queue > 0) && 537 !((shost->can_queue > 0) &&
538 (shost->host_busy >= shost->can_queue))) { 538 (shost->host_busy >= shost->can_queue))) {
539
540 int flagset;
541
539 /* 542 /*
540 * As long as shost is accepting commands and we have 543 * As long as shost is accepting commands and we have
541 * starved queues, call blk_run_queue. scsi_request_fn 544 * starved queues, call blk_run_queue. scsi_request_fn
@@ -549,19 +552,20 @@ static void scsi_run_queue(struct request_queue *q)
549 sdev = list_entry(shost->starved_list.next, 552 sdev = list_entry(shost->starved_list.next,
550 struct scsi_device, starved_entry); 553 struct scsi_device, starved_entry);
551 list_del_init(&sdev->starved_entry); 554 list_del_init(&sdev->starved_entry);
552 spin_unlock_irqrestore(shost->host_lock, flags); 555 spin_unlock(shost->host_lock);
553 556
557 spin_lock(sdev->request_queue->queue_lock);
558 flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
559 !test_bit(QUEUE_FLAG_REENTER,
560 &sdev->request_queue->queue_flags);
561 if (flagset)
562 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
563 __blk_run_queue(sdev->request_queue);
564 if (flagset)
565 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
566 spin_unlock(sdev->request_queue->queue_lock);
554 567
555 if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && 568 spin_lock(shost->host_lock);
556 !test_and_set_bit(QUEUE_FLAG_REENTER,
557 &sdev->request_queue->queue_flags)) {
558 blk_run_queue(sdev->request_queue);
559 clear_bit(QUEUE_FLAG_REENTER,
560 &sdev->request_queue->queue_flags);
561 } else
562 blk_run_queue(sdev->request_queue);
563
564 spin_lock_irqsave(shost->host_lock, flags);
565 if (unlikely(!list_empty(&sdev->starved_entry))) 569 if (unlikely(!list_empty(&sdev->starved_entry)))
566 /* 570 /*
567 * sdev lost a race, and was put back on the 571 * sdev lost a race, and was put back on the
@@ -1585,8 +1589,9 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1585 1589
1586 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); 1590 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1587 1591
1592 /* New queue, no concurrency on queue_flags */
1588 if (!shost->use_clustering) 1593 if (!shost->use_clustering)
1589 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 1594 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
1590 1595
1591 /* 1596 /*
1592 * set a reasonable default alignment on word boundaries: the 1597 * set a reasonable default alignment on word boundaries: the
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 7899e3dda9b..f4461d35ffb 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -248,8 +248,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
248 else 248 else
249 q->queuedata = shost; 249 q->queuedata = shost;
250 250
251 set_bit(QUEUE_FLAG_BIDI, &q->queue_flags); 251 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
252
253 return 0; 252 return 0;
254} 253}
255 254
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3cea17dd5db..01cefbb2d53 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -860,7 +860,6 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
860 860
861static void sd_prepare_flush(struct request_queue *q, struct request *rq) 861static void sd_prepare_flush(struct request_queue *q, struct request *rq)
862{ 862{
863 memset(rq->cmd, 0, sizeof(rq->cmd));
864 rq->cmd_type = REQ_TYPE_BLOCK_PC; 863 rq->cmd_type = REQ_TYPE_BLOCK_PC;
865 rq->timeout = SD_TIMEOUT; 864 rq->timeout = SD_TIMEOUT;
866 rq->cmd[0] = SYNCHRONIZE_CACHE; 865 rq->cmd[0] = SYNCHRONIZE_CACHE;
diff --git a/fs/bio.c b/fs/bio.c
index 6e0b6f66df0..799f86deff2 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -937,6 +937,95 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
937 return ERR_PTR(-EINVAL); 937 return ERR_PTR(-EINVAL);
938} 938}
939 939
940static void bio_copy_kern_endio(struct bio *bio, int err)
941{
942 struct bio_vec *bvec;
943 const int read = bio_data_dir(bio) == READ;
944 char *p = bio->bi_private;
945 int i;
946
947 __bio_for_each_segment(bvec, bio, i, 0) {
948 char *addr = page_address(bvec->bv_page);
949
950 if (read && !err)
951 memcpy(p, addr, bvec->bv_len);
952
953 __free_page(bvec->bv_page);
954 p += bvec->bv_len;
955 }
956
957 bio_put(bio);
958}
959
960/**
961 * bio_copy_kern - copy kernel address into bio
962 * @q: the struct request_queue for the bio
963 * @data: pointer to buffer to copy
964 * @len: length in bytes
965 * @gfp_mask: allocation flags for bio and page allocation
966 *
967 * copy the kernel address into a bio suitable for io to a block
968 * device. Returns an error pointer in case of error.
969 */
970struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
971 gfp_t gfp_mask, int reading)
972{
973 unsigned long kaddr = (unsigned long)data;
974 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
975 unsigned long start = kaddr >> PAGE_SHIFT;
976 const int nr_pages = end - start;
977 struct bio *bio;
978 struct bio_vec *bvec;
979 int i, ret;
980
981 bio = bio_alloc(gfp_mask, nr_pages);
982 if (!bio)
983 return ERR_PTR(-ENOMEM);
984
985 while (len) {
986 struct page *page;
987 unsigned int bytes = PAGE_SIZE;
988
989 if (bytes > len)
990 bytes = len;
991
992 page = alloc_page(q->bounce_gfp | gfp_mask);
993 if (!page) {
994 ret = -ENOMEM;
995 goto cleanup;
996 }
997
998 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
999 ret = -EINVAL;
1000 goto cleanup;
1001 }
1002
1003 len -= bytes;
1004 }
1005
1006 if (!reading) {
1007 void *p = data;
1008
1009 bio_for_each_segment(bvec, bio, i) {
1010 char *addr = page_address(bvec->bv_page);
1011
1012 memcpy(addr, p, bvec->bv_len);
1013 p += bvec->bv_len;
1014 }
1015 }
1016
1017 bio->bi_private = data;
1018 bio->bi_end_io = bio_copy_kern_endio;
1019 return bio;
1020cleanup:
1021 bio_for_each_segment(bvec, bio, i)
1022 __free_page(bvec->bv_page);
1023
1024 bio_put(bio);
1025
1026 return ERR_PTR(ret);
1027}
1028
940/* 1029/*
941 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions 1030 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
942 * for performing direct-IO in BIOs. 1031 * for performing direct-IO in BIOs.
@@ -1273,6 +1362,7 @@ EXPORT_SYMBOL(bio_get_nr_vecs);
1273EXPORT_SYMBOL(bio_map_user); 1362EXPORT_SYMBOL(bio_map_user);
1274EXPORT_SYMBOL(bio_unmap_user); 1363EXPORT_SYMBOL(bio_unmap_user);
1275EXPORT_SYMBOL(bio_map_kern); 1364EXPORT_SYMBOL(bio_map_kern);
1365EXPORT_SYMBOL(bio_copy_kern);
1276EXPORT_SYMBOL(bio_pair_release); 1366EXPORT_SYMBOL(bio_pair_release);
1277EXPORT_SYMBOL(bio_split); 1367EXPORT_SYMBOL(bio_split);
1278EXPORT_SYMBOL(bio_split_pool); 1368EXPORT_SYMBOL(bio_split_pool);
diff --git a/fs/splice.c b/fs/splice.c
index eeb1a86a701..633f58ebfb7 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1075,7 +1075,7 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
1075 1075
1076 ret = splice_direct_to_actor(in, &sd, direct_splice_actor); 1076 ret = splice_direct_to_actor(in, &sd, direct_splice_actor);
1077 if (ret > 0) 1077 if (ret > 0)
1078 *ppos += ret; 1078 *ppos = sd.pos;
1079 1079
1080 return ret; 1080 return ret;
1081} 1081}
diff --git a/include/linux/bio.h b/include/linux/bio.h
index d259690863f..61c15eaf3fb 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -324,6 +324,8 @@ extern struct bio *bio_map_user_iov(struct request_queue *,
324extern void bio_unmap_user(struct bio *); 324extern void bio_unmap_user(struct bio *);
325extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, 325extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
326 gfp_t); 326 gfp_t);
327extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
328 gfp_t, int);
327extern void bio_set_pages_dirty(struct bio *bio); 329extern void bio_set_pages_dirty(struct bio *bio);
328extern void bio_check_pages_dirty(struct bio *bio); 330extern void bio_check_pages_dirty(struct bio *bio);
329extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int); 331extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c5065e3d2ca..c09696a90d6 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -215,8 +215,9 @@ struct request {
215 /* 215 /*
216 * when request is used as a packet command carrier 216 * when request is used as a packet command carrier
217 */ 217 */
218 unsigned int cmd_len; 218 unsigned short cmd_len;
219 unsigned char cmd[BLK_MAX_CDB]; 219 unsigned char __cmd[BLK_MAX_CDB];
220 unsigned char *cmd;
220 221
221 unsigned int data_len; 222 unsigned int data_len;
222 unsigned int extra_len; /* length of alignment and padding */ 223 unsigned int extra_len; /* length of alignment and padding */
@@ -407,6 +408,31 @@ struct request_queue
407#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 408#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
408#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ 409#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */
409#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ 410#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */
411#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */
412
413static inline void queue_flag_set_unlocked(unsigned int flag,
414 struct request_queue *q)
415{
416 __set_bit(flag, &q->queue_flags);
417}
418
419static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
420{
421 WARN_ON_ONCE(!spin_is_locked(q->queue_lock));
422 __set_bit(flag, &q->queue_flags);
423}
424
425static inline void queue_flag_clear_unlocked(unsigned int flag,
426 struct request_queue *q)
427{
428 __clear_bit(flag, &q->queue_flags);
429}
430
431static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
432{
433 WARN_ON_ONCE(!spin_is_locked(q->queue_lock));
434 __clear_bit(flag, &q->queue_flags);
435}
410 436
411enum { 437enum {
412 /* 438 /*
@@ -451,6 +477,7 @@ enum {
451#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 477#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
452#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 478#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
453#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 479#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
480#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
454#define blk_queue_flushing(q) ((q)->ordseq) 481#define blk_queue_flushing(q) ((q)->ordseq)
455 482
456#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 483#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)
@@ -496,17 +523,17 @@ static inline int blk_queue_full(struct request_queue *q, int rw)
496static inline void blk_set_queue_full(struct request_queue *q, int rw) 523static inline void blk_set_queue_full(struct request_queue *q, int rw)
497{ 524{
498 if (rw == READ) 525 if (rw == READ)
499 set_bit(QUEUE_FLAG_READFULL, &q->queue_flags); 526 queue_flag_set(QUEUE_FLAG_READFULL, q);
500 else 527 else
501 set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); 528 queue_flag_set(QUEUE_FLAG_WRITEFULL, q);
502} 529}
503 530
504static inline void blk_clear_queue_full(struct request_queue *q, int rw) 531static inline void blk_clear_queue_full(struct request_queue *q, int rw)
505{ 532{
506 if (rw == READ) 533 if (rw == READ)
507 clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags); 534 queue_flag_clear(QUEUE_FLAG_READFULL, q);
508 else 535 else
509 clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); 536 queue_flag_clear(QUEUE_FLAG_WRITEFULL, q);
510} 537}
511 538
512 539
@@ -583,6 +610,7 @@ extern int blk_register_queue(struct gendisk *disk);
583extern void blk_unregister_queue(struct gendisk *disk); 610extern void blk_unregister_queue(struct gendisk *disk);
584extern void register_disk(struct gendisk *dev); 611extern void register_disk(struct gendisk *dev);
585extern void generic_make_request(struct bio *bio); 612extern void generic_make_request(struct bio *bio);
613extern void blk_rq_init(struct request_queue *q, struct request *rq);
586extern void blk_put_request(struct request *); 614extern void blk_put_request(struct request *);
587extern void __blk_put_request(struct request_queue *, struct request *); 615extern void __blk_put_request(struct request_queue *, struct request *);
588extern void blk_end_sync_rq(struct request *rq, int error); 616extern void blk_end_sync_rq(struct request *rq, int error);
@@ -626,6 +654,7 @@ extern void blk_start_queue(struct request_queue *q);
626extern void blk_stop_queue(struct request_queue *q); 654extern void blk_stop_queue(struct request_queue *q);
627extern void blk_sync_queue(struct request_queue *q); 655extern void blk_sync_queue(struct request_queue *q);
628extern void __blk_stop_queue(struct request_queue *q); 656extern void __blk_stop_queue(struct request_queue *q);
657extern void __blk_run_queue(struct request_queue *);
629extern void blk_run_queue(struct request_queue *); 658extern void blk_run_queue(struct request_queue *);
630extern void blk_start_queueing(struct request_queue *); 659extern void blk_start_queueing(struct request_queue *);
631extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); 660extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long);
diff --git a/kernel/relay.c b/kernel/relay.c
index bc24dcdc570..7de644cdec4 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -1191,7 +1191,7 @@ static ssize_t relay_file_splice_read(struct file *in,
1191 ret = 0; 1191 ret = 0;
1192 spliced = 0; 1192 spliced = 0;
1193 1193
1194 while (len) { 1194 while (len && !spliced) {
1195 ret = subbuf_splice_actor(in, ppos, pipe, len, flags, &nonpad_ret); 1195 ret = subbuf_splice_actor(in, ppos, pipe, len, flags, &nonpad_ret);
1196 if (ret < 0) 1196 if (ret < 0)
1197 break; 1197 break;