aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/elevator.c114
-rw-r--r--block/ll_rw_blk.c53
-rw-r--r--block/scsi_ioctl.c3
3 files changed, 82 insertions, 88 deletions
diff --git a/block/elevator.c b/block/elevator.c
index c9f424d5399c..24b702d649a9 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -139,35 +139,16 @@ static int elevator_attach(request_queue_t *q, struct elevator_type *e,
139 139
140static char chosen_elevator[16]; 140static char chosen_elevator[16];
141 141
142static void elevator_setup_default(void) 142static int __init elevator_setup(char *str)
143{ 143{
144 struct elevator_type *e;
145
146 /*
147 * If default has not been set, use the compiled-in selection.
148 */
149 if (!chosen_elevator[0])
150 strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED);
151
152 /* 144 /*
153 * Be backwards-compatible with previous kernels, so users 145 * Be backwards-compatible with previous kernels, so users
154 * won't get the wrong elevator. 146 * won't get the wrong elevator.
155 */ 147 */
156 if (!strcmp(chosen_elevator, "as")) 148 if (!strcmp(str, "as"))
157 strcpy(chosen_elevator, "anticipatory"); 149 strcpy(chosen_elevator, "anticipatory");
158
159 /*
160 * If the given scheduler is not available, fall back to the default
161 */
162 if ((e = elevator_find(chosen_elevator)))
163 elevator_put(e);
164 else 150 else
165 strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED); 151 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
166}
167
168static int __init elevator_setup(char *str)
169{
170 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
171 return 0; 152 return 0;
172} 153}
173 154
@@ -184,14 +165,16 @@ int elevator_init(request_queue_t *q, char *name)
184 q->end_sector = 0; 165 q->end_sector = 0;
185 q->boundary_rq = NULL; 166 q->boundary_rq = NULL;
186 167
187 elevator_setup_default(); 168 if (name && !(e = elevator_get(name)))
169 return -EINVAL;
188 170
189 if (!name) 171 if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator)))
190 name = chosen_elevator; 172 printk("I/O scheduler %s not found\n", chosen_elevator);
191 173
192 e = elevator_get(name); 174 if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) {
193 if (!e) 175 printk("Default I/O scheduler not found, using no-op\n");
194 return -EINVAL; 176 e = elevator_get("noop");
177 }
195 178
196 eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL); 179 eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
197 if (!eq) { 180 if (!eq) {
@@ -310,7 +293,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
310 293
311 rq->flags &= ~REQ_STARTED; 294 rq->flags &= ~REQ_STARTED;
312 295
313 __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE, 0); 296 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
314} 297}
315 298
316static void elv_drain_elevator(request_queue_t *q) 299static void elv_drain_elevator(request_queue_t *q)
@@ -327,40 +310,11 @@ static void elv_drain_elevator(request_queue_t *q)
327 } 310 }
328} 311}
329 312
330void __elv_add_request(request_queue_t *q, struct request *rq, int where, 313void elv_insert(request_queue_t *q, struct request *rq, int where)
331 int plug)
332{ 314{
333 struct list_head *pos; 315 struct list_head *pos;
334 unsigned ordseq; 316 unsigned ordseq;
335 317
336 if (q->ordcolor)
337 rq->flags |= REQ_ORDERED_COLOR;
338
339 if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
340 /*
341 * toggle ordered color
342 */
343 q->ordcolor ^= 1;
344
345 /*
346 * barriers implicitly indicate back insertion
347 */
348 if (where == ELEVATOR_INSERT_SORT)
349 where = ELEVATOR_INSERT_BACK;
350
351 /*
352 * this request is scheduling boundary, update end_sector
353 */
354 if (blk_fs_request(rq)) {
355 q->end_sector = rq_end_sector(rq);
356 q->boundary_rq = rq;
357 }
358 } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
359 where = ELEVATOR_INSERT_BACK;
360
361 if (plug)
362 blk_plug_device(q);
363
364 rq->q = q; 318 rq->q = q;
365 319
366 switch (where) { 320 switch (where) {
@@ -441,6 +395,42 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
441 } 395 }
442} 396}
443 397
398void __elv_add_request(request_queue_t *q, struct request *rq, int where,
399 int plug)
400{
401 if (q->ordcolor)
402 rq->flags |= REQ_ORDERED_COLOR;
403
404 if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
405 /*
406 * toggle ordered color
407 */
408 if (blk_barrier_rq(rq))
409 q->ordcolor ^= 1;
410
411 /*
412 * barriers implicitly indicate back insertion
413 */
414 if (where == ELEVATOR_INSERT_SORT)
415 where = ELEVATOR_INSERT_BACK;
416
417 /*
418 * this request is scheduling boundary, update
419 * end_sector
420 */
421 if (blk_fs_request(rq)) {
422 q->end_sector = rq_end_sector(rq);
423 q->boundary_rq = rq;
424 }
425 } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
426 where = ELEVATOR_INSERT_BACK;
427
428 if (plug)
429 blk_plug_device(q);
430
431 elv_insert(q, rq, where);
432}
433
444void elv_add_request(request_queue_t *q, struct request *rq, int where, 434void elv_add_request(request_queue_t *q, struct request *rq, int where,
445 int plug) 435 int plug)
446{ 436{
@@ -669,8 +659,10 @@ int elv_register(struct elevator_type *e)
669 spin_unlock_irq(&elv_list_lock); 659 spin_unlock_irq(&elv_list_lock);
670 660
671 printk(KERN_INFO "io scheduler %s registered", e->elevator_name); 661 printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
672 if (!strcmp(e->elevator_name, chosen_elevator)) 662 if (!strcmp(e->elevator_name, chosen_elevator) ||
673 printk(" (default)"); 663 (!*chosen_elevator &&
664 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
665 printk(" (default)");
674 printk("\n"); 666 printk("\n");
675 return 0; 667 return 0;
676} 668}
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 8e27d0ab0d7c..03d9c82b0fe7 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -304,6 +304,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
304 * blk_queue_ordered - does this queue support ordered writes 304 * blk_queue_ordered - does this queue support ordered writes
305 * @q: the request queue 305 * @q: the request queue
306 * @ordered: one of QUEUE_ORDERED_* 306 * @ordered: one of QUEUE_ORDERED_*
307 * @prepare_flush_fn: rq setup helper for cache flush ordered writes
307 * 308 *
308 * Description: 309 * Description:
309 * For journalled file systems, doing ordered writes on a commit 310 * For journalled file systems, doing ordered writes on a commit
@@ -332,6 +333,7 @@ int blk_queue_ordered(request_queue_t *q, unsigned ordered,
332 return -EINVAL; 333 return -EINVAL;
333 } 334 }
334 335
336 q->ordered = ordered;
335 q->next_ordered = ordered; 337 q->next_ordered = ordered;
336 q->prepare_flush_fn = prepare_flush_fn; 338 q->prepare_flush_fn = prepare_flush_fn;
337 339
@@ -452,7 +454,7 @@ static void queue_flush(request_queue_t *q, unsigned which)
452 rq->end_io = end_io; 454 rq->end_io = end_io;
453 q->prepare_flush_fn(q, rq); 455 q->prepare_flush_fn(q, rq);
454 456
455 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); 457 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
456} 458}
457 459
458static inline struct request *start_ordered(request_queue_t *q, 460static inline struct request *start_ordered(request_queue_t *q,
@@ -488,7 +490,7 @@ static inline struct request *start_ordered(request_queue_t *q,
488 else 490 else
489 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; 491 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
490 492
491 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); 493 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
492 494
493 if (q->ordered & QUEUE_ORDERED_PREFLUSH) { 495 if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
494 queue_flush(q, QUEUE_ORDERED_PREFLUSH); 496 queue_flush(q, QUEUE_ORDERED_PREFLUSH);
@@ -506,7 +508,7 @@ static inline struct request *start_ordered(request_queue_t *q,
506 508
507int blk_do_ordered(request_queue_t *q, struct request **rqp) 509int blk_do_ordered(request_queue_t *q, struct request **rqp)
508{ 510{
509 struct request *rq = *rqp, *allowed_rq; 511 struct request *rq = *rqp;
510 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); 512 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
511 513
512 if (!q->ordseq) { 514 if (!q->ordseq) {
@@ -530,32 +532,26 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp)
530 } 532 }
531 } 533 }
532 534
535 /*
536 * Ordered sequence in progress
537 */
538
539 /* Special requests are not subject to ordering rules. */
540 if (!blk_fs_request(rq) &&
541 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
542 return 1;
543
533 if (q->ordered & QUEUE_ORDERED_TAG) { 544 if (q->ordered & QUEUE_ORDERED_TAG) {
545 /* Ordered by tag. Blocking the next barrier is enough. */
534 if (is_barrier && rq != &q->bar_rq) 546 if (is_barrier && rq != &q->bar_rq)
535 *rqp = NULL; 547 *rqp = NULL;
536 return 1; 548 } else {
537 } 549 /* Ordered by draining. Wait for turn. */
538 550 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
539 switch (blk_ordered_cur_seq(q)) { 551 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
540 case QUEUE_ORDSEQ_PREFLUSH: 552 *rqp = NULL;
541 allowed_rq = &q->pre_flush_rq;
542 break;
543 case QUEUE_ORDSEQ_BAR:
544 allowed_rq = &q->bar_rq;
545 break;
546 case QUEUE_ORDSEQ_POSTFLUSH:
547 allowed_rq = &q->post_flush_rq;
548 break;
549 default:
550 allowed_rq = NULL;
551 break;
552 } 553 }
553 554
554 if (rq != allowed_rq &&
555 (blk_fs_request(rq) || rq == &q->pre_flush_rq ||
556 rq == &q->post_flush_rq))
557 *rqp = NULL;
558
559 return 1; 555 return 1;
560} 556}
561 557
@@ -662,7 +658,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
662 * Enables a low level driver to set an upper limit on the size of 658 * Enables a low level driver to set an upper limit on the size of
663 * received requests. 659 * received requests.
664 **/ 660 **/
665void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors) 661void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors)
666{ 662{
667 if ((max_sectors << 9) < PAGE_CACHE_SIZE) { 663 if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
668 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 664 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
@@ -2577,6 +2573,8 @@ void disk_round_stats(struct gendisk *disk)
2577 disk->stamp = now; 2573 disk->stamp = now;
2578} 2574}
2579 2575
2576EXPORT_SYMBOL_GPL(disk_round_stats);
2577
2580/* 2578/*
2581 * queue lock must be held 2579 * queue lock must be held
2582 */ 2580 */
@@ -2632,6 +2630,7 @@ EXPORT_SYMBOL(blk_put_request);
2632/** 2630/**
2633 * blk_end_sync_rq - executes a completion event on a request 2631 * blk_end_sync_rq - executes a completion event on a request
2634 * @rq: request to complete 2632 * @rq: request to complete
2633 * @error: end io status of the request
2635 */ 2634 */
2636void blk_end_sync_rq(struct request *rq, int error) 2635void blk_end_sync_rq(struct request *rq, int error)
2637{ 2636{
@@ -3153,7 +3152,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
3153 if (blk_fs_request(req) && req->rq_disk) { 3152 if (blk_fs_request(req) && req->rq_disk) {
3154 const int rw = rq_data_dir(req); 3153 const int rw = rq_data_dir(req);
3155 3154
3156 __disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9); 3155 disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
3157 } 3156 }
3158 3157
3159 total_bytes = bio_nbytes = 0; 3158 total_bytes = bio_nbytes = 0;
@@ -3448,7 +3447,7 @@ int __init blk_dev_init(void)
3448 iocontext_cachep = kmem_cache_create("blkdev_ioc", 3447 iocontext_cachep = kmem_cache_create("blkdev_ioc",
3449 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); 3448 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
3450 3449
3451 for (i = 0; i < NR_CPUS; i++) 3450 for_each_cpu(i)
3452 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); 3451 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
3453 3452
3454 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); 3453 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index cc72210687eb..24f7af9d0abc 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -310,6 +310,8 @@ static int sg_io(struct file *file, request_queue_t *q,
310 if (!rq->timeout) 310 if (!rq->timeout)
311 rq->timeout = BLK_DEFAULT_TIMEOUT; 311 rq->timeout = BLK_DEFAULT_TIMEOUT;
312 312
313 rq->retries = 0;
314
313 start_time = jiffies; 315 start_time = jiffies;
314 316
315 /* ignore return value. All information is passed back to caller 317 /* ignore return value. All information is passed back to caller
@@ -427,6 +429,7 @@ static int sg_scsi_ioctl(struct file *file, request_queue_t *q,
427 rq->data = buffer; 429 rq->data = buffer;
428 rq->data_len = bytes; 430 rq->data_len = bytes;
429 rq->flags |= REQ_BLOCK_PC; 431 rq->flags |= REQ_BLOCK_PC;
432 rq->retries = 0;
430 433
431 blk_execute_rq(q, bd_disk, rq, 0); 434 blk_execute_rq(q, bd_disk, rq, 0);
432 err = rq->errors & 0xff; /* only 8 bit SCSI status */ 435 err = rq->errors & 0xff; /* only 8 bit SCSI status */