aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-29 11:18:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-29 11:18:03 -0400
commitbd5d435a96837c3495e62eef37cbe4cb728b79ae (patch)
tree82aacaf5a1d220910c4b0a1088d7d2482c0d9ee0 /block/blk-core.c
parentfee4b19fb3f28d17c0b9f9ea0668db5275697178 (diff)
parentac9fafa1243640349aa481adf473db283a695766 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: block: Skip I/O merges when disabled block: add large command support block: replace sizeof(rq->cmd) with BLK_MAX_CDB ide: use blk_rq_init() to initialize the request block: use blk_rq_init() to initialize the request block: rename and export rq_init() block: no need to initialize rq->cmd with blk_get_request block: no need to initialize rq->cmd in prepare_flush_fn hook block/blk-barrier.c:blk_ordered_cur_seq() mustn't be inline block/elevator.c:elv_rq_merge_ok() mustn't be inline block: make queue flags non-atomic block: add dma alignment and padding support to blk_rq_map_kern unexport blk_max_pfn ps3disk: Remove superfluous cast block: make rq_init() do a full memset() relay: fix splice problem
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c75
1 files changed, 34 insertions, 41 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 2a438a93f723..5d09f8c56024 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -107,41 +107,21 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
107} 107}
108EXPORT_SYMBOL(blk_get_backing_dev_info); 108EXPORT_SYMBOL(blk_get_backing_dev_info);
109 109
110/* 110void blk_rq_init(struct request_queue *q, struct request *rq)
111 * We can't just memset() the structure, since the allocation path
112 * already stored some information in the request.
113 */
114void rq_init(struct request_queue *q, struct request *rq)
115{ 111{
112 memset(rq, 0, sizeof(*rq));
113
116 INIT_LIST_HEAD(&rq->queuelist); 114 INIT_LIST_HEAD(&rq->queuelist);
117 INIT_LIST_HEAD(&rq->donelist); 115 INIT_LIST_HEAD(&rq->donelist);
118 rq->q = q; 116 rq->q = q;
119 rq->sector = rq->hard_sector = (sector_t) -1; 117 rq->sector = rq->hard_sector = (sector_t) -1;
120 rq->nr_sectors = rq->hard_nr_sectors = 0;
121 rq->current_nr_sectors = rq->hard_cur_sectors = 0;
122 rq->bio = rq->biotail = NULL;
123 INIT_HLIST_NODE(&rq->hash); 118 INIT_HLIST_NODE(&rq->hash);
124 RB_CLEAR_NODE(&rq->rb_node); 119 RB_CLEAR_NODE(&rq->rb_node);
125 rq->rq_disk = NULL; 120 rq->cmd = rq->__cmd;
126 rq->nr_phys_segments = 0;
127 rq->nr_hw_segments = 0;
128 rq->ioprio = 0;
129 rq->special = NULL;
130 rq->buffer = NULL;
131 rq->tag = -1; 121 rq->tag = -1;
132 rq->errors = 0;
133 rq->ref_count = 1; 122 rq->ref_count = 1;
134 rq->cmd_len = 0;
135 memset(rq->cmd, 0, sizeof(rq->cmd));
136 rq->data_len = 0;
137 rq->extra_len = 0;
138 rq->sense_len = 0;
139 rq->data = NULL;
140 rq->sense = NULL;
141 rq->end_io = NULL;
142 rq->end_io_data = NULL;
143 rq->next_rq = NULL;
144} 123}
124EXPORT_SYMBOL(blk_rq_init);
145 125
146static void req_bio_endio(struct request *rq, struct bio *bio, 126static void req_bio_endio(struct request *rq, struct bio *bio,
147 unsigned int nbytes, int error) 127 unsigned int nbytes, int error)
@@ -194,7 +174,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
194 174
195 if (blk_pc_request(rq)) { 175 if (blk_pc_request(rq)) {
196 printk(KERN_INFO " cdb: "); 176 printk(KERN_INFO " cdb: ");
197 for (bit = 0; bit < sizeof(rq->cmd); bit++) 177 for (bit = 0; bit < BLK_MAX_CDB; bit++)
198 printk("%02x ", rq->cmd[bit]); 178 printk("%02x ", rq->cmd[bit]);
199 printk("\n"); 179 printk("\n");
200 } 180 }
@@ -220,7 +200,8 @@ void blk_plug_device(struct request_queue *q)
220 if (blk_queue_stopped(q)) 200 if (blk_queue_stopped(q))
221 return; 201 return;
222 202
223 if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { 203 if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
204 __set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
224 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); 205 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
225 blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); 206 blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
226 } 207 }
@@ -235,9 +216,10 @@ int blk_remove_plug(struct request_queue *q)
235{ 216{
236 WARN_ON(!irqs_disabled()); 217 WARN_ON(!irqs_disabled());
237 218
238 if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) 219 if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
239 return 0; 220 return 0;
240 221
222 queue_flag_clear(QUEUE_FLAG_PLUGGED, q);
241 del_timer(&q->unplug_timer); 223 del_timer(&q->unplug_timer);
242 return 1; 224 return 1;
243} 225}
@@ -333,15 +315,16 @@ void blk_start_queue(struct request_queue *q)
333{ 315{
334 WARN_ON(!irqs_disabled()); 316 WARN_ON(!irqs_disabled());
335 317
336 clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); 318 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
337 319
338 /* 320 /*
339 * one level of recursion is ok and is much faster than kicking 321 * one level of recursion is ok and is much faster than kicking
340 * the unplug handling 322 * the unplug handling
341 */ 323 */
342 if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { 324 if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
325 queue_flag_set(QUEUE_FLAG_REENTER, q);
343 q->request_fn(q); 326 q->request_fn(q);
344 clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); 327 queue_flag_clear(QUEUE_FLAG_REENTER, q);
345 } else { 328 } else {
346 blk_plug_device(q); 329 blk_plug_device(q);
347 kblockd_schedule_work(&q->unplug_work); 330 kblockd_schedule_work(&q->unplug_work);
@@ -366,7 +349,7 @@ EXPORT_SYMBOL(blk_start_queue);
366void blk_stop_queue(struct request_queue *q) 349void blk_stop_queue(struct request_queue *q)
367{ 350{
368 blk_remove_plug(q); 351 blk_remove_plug(q);
369 set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); 352 queue_flag_set(QUEUE_FLAG_STOPPED, q);
370} 353}
371EXPORT_SYMBOL(blk_stop_queue); 354EXPORT_SYMBOL(blk_stop_queue);
372 355
@@ -395,11 +378,8 @@ EXPORT_SYMBOL(blk_sync_queue);
395 * blk_run_queue - run a single device queue 378 * blk_run_queue - run a single device queue
396 * @q: The queue to run 379 * @q: The queue to run
397 */ 380 */
398void blk_run_queue(struct request_queue *q) 381void __blk_run_queue(struct request_queue *q)
399{ 382{
400 unsigned long flags;
401
402 spin_lock_irqsave(q->queue_lock, flags);
403 blk_remove_plug(q); 383 blk_remove_plug(q);
404 384
405 /* 385 /*
@@ -407,15 +387,28 @@ void blk_run_queue(struct request_queue *q)
407 * handling reinvoke the handler shortly if we already got there. 387 * handling reinvoke the handler shortly if we already got there.
408 */ 388 */
409 if (!elv_queue_empty(q)) { 389 if (!elv_queue_empty(q)) {
410 if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { 390 if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
391 queue_flag_set(QUEUE_FLAG_REENTER, q);
411 q->request_fn(q); 392 q->request_fn(q);
412 clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); 393 queue_flag_clear(QUEUE_FLAG_REENTER, q);
413 } else { 394 } else {
414 blk_plug_device(q); 395 blk_plug_device(q);
415 kblockd_schedule_work(&q->unplug_work); 396 kblockd_schedule_work(&q->unplug_work);
416 } 397 }
417 } 398 }
399}
400EXPORT_SYMBOL(__blk_run_queue);
401
402/**
403 * blk_run_queue - run a single device queue
404 * @q: The queue to run
405 */
406void blk_run_queue(struct request_queue *q)
407{
408 unsigned long flags;
418 409
410 spin_lock_irqsave(q->queue_lock, flags);
411 __blk_run_queue(q);
419 spin_unlock_irqrestore(q->queue_lock, flags); 412 spin_unlock_irqrestore(q->queue_lock, flags);
420} 413}
421EXPORT_SYMBOL(blk_run_queue); 414EXPORT_SYMBOL(blk_run_queue);
@@ -428,7 +421,7 @@ void blk_put_queue(struct request_queue *q)
428void blk_cleanup_queue(struct request_queue *q) 421void blk_cleanup_queue(struct request_queue *q)
429{ 422{
430 mutex_lock(&q->sysfs_lock); 423 mutex_lock(&q->sysfs_lock);
431 set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); 424 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
432 mutex_unlock(&q->sysfs_lock); 425 mutex_unlock(&q->sysfs_lock);
433 426
434 if (q->elevator) 427 if (q->elevator)
@@ -607,6 +600,8 @@ blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
607 if (!rq) 600 if (!rq)
608 return NULL; 601 return NULL;
609 602
603 blk_rq_init(q, rq);
604
610 /* 605 /*
611 * first three bits are identical in rq->cmd_flags and bio->bi_rw, 606 * first three bits are identical in rq->cmd_flags and bio->bi_rw,
612 * see bio.h and blkdev.h 607 * see bio.h and blkdev.h
@@ -789,8 +784,6 @@ rq_starved:
789 if (ioc_batching(q, ioc)) 784 if (ioc_batching(q, ioc))
790 ioc->nr_batch_requests--; 785 ioc->nr_batch_requests--;
791 786
792 rq_init(q, rq);
793
794 blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); 787 blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
795out: 788out:
796 return rq; 789 return rq;