aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <htejun@gmail.com>2005-11-10 02:52:05 -0500
committerJens Axboe <axboe@nelson.home.kernel.dk>2005-11-12 04:56:06 -0500
commit15853af9f07673680439b224519c692f1352b959 (patch)
treee6a8fc1cd34dec568883cd62102e1e626d9241d9
parent1b5ed5e1f1315e37380e55102f58bcae3344d2a7 (diff)
[BLOCK] Implement elv_drain_elevator for improved switch error detection
This patch adds request_queue->nr_sorted which keeps the number of requests in the iosched and implement elv_drain_elevator which performs forced dispatching. elv_drain_elevator checks whether iosched actually dispatches all requests it has and prints error message if it doesn't. As buggy forced dispatching can result in wrong barrier operations, I think this extra check is worthwhile. Signed-off-by: Tejun Heo <htejun@gmail.com> Signed-off-by: Jens Axboe <axboe@suse.de>
-rw-r--r--block/elevator.c25
-rw-r--r--include/linux/blkdev.h2
2 files changed, 22 insertions, 5 deletions
diff --git a/block/elevator.c b/block/elevator.c
index a475b1a19f67..73aa46b6db49 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -226,6 +226,7 @@ void elv_dispatch_sort(request_queue_t *q, struct request *rq)
226 226
227 if (q->last_merge == rq) 227 if (q->last_merge == rq)
228 q->last_merge = NULL; 228 q->last_merge = NULL;
229 q->nr_sorted--;
229 230
230 boundary = q->end_sector; 231 boundary = q->end_sector;
231 232
@@ -284,6 +285,7 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
284 285
285 if (e->ops->elevator_merge_req_fn) 286 if (e->ops->elevator_merge_req_fn)
286 e->ops->elevator_merge_req_fn(q, rq, next); 287 e->ops->elevator_merge_req_fn(q, rq, next);
288 q->nr_sorted--;
287 289
288 q->last_merge = rq; 290 q->last_merge = rq;
289} 291}
@@ -315,6 +317,20 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
315 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); 317 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
316} 318}
317 319
320static void elv_drain_elevator(request_queue_t *q)
321{
322 static int printed;
323 while (q->elevator->ops->elevator_dispatch_fn(q, 1))
324 ;
325 if (q->nr_sorted == 0)
326 return;
327 if (printed++ < 10) {
328 printk(KERN_ERR "%s: forced dispatching is broken "
329 "(nr_sorted=%u), please report this\n",
330 q->elevator->elevator_type->elevator_name, q->nr_sorted);
331 }
332}
333
318void __elv_add_request(request_queue_t *q, struct request *rq, int where, 334void __elv_add_request(request_queue_t *q, struct request *rq, int where,
319 int plug) 335 int plug)
320{ 336{
@@ -349,9 +365,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
349 365
350 case ELEVATOR_INSERT_BACK: 366 case ELEVATOR_INSERT_BACK:
351 rq->flags |= REQ_SOFTBARRIER; 367 rq->flags |= REQ_SOFTBARRIER;
352 368 elv_drain_elevator(q);
353 while (q->elevator->ops->elevator_dispatch_fn(q, 1))
354 ;
355 list_add_tail(&rq->queuelist, &q->queue_head); 369 list_add_tail(&rq->queuelist, &q->queue_head);
356 /* 370 /*
357 * We kick the queue here for the following reasons. 371 * We kick the queue here for the following reasons.
@@ -370,6 +384,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
370 case ELEVATOR_INSERT_SORT: 384 case ELEVATOR_INSERT_SORT:
371 BUG_ON(!blk_fs_request(rq)); 385 BUG_ON(!blk_fs_request(rq));
372 rq->flags |= REQ_SORTED; 386 rq->flags |= REQ_SORTED;
387 q->nr_sorted++;
373 if (q->last_merge == NULL && rq_mergeable(rq)) 388 if (q->last_merge == NULL && rq_mergeable(rq))
374 q->last_merge = rq; 389 q->last_merge = rq;
375 /* 390 /*
@@ -692,8 +707,7 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
692 707
693 set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 708 set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
694 709
695 while (q->elevator->ops->elevator_dispatch_fn(q, 1)) 710 elv_drain_elevator(q);
696 ;
697 711
698 while (q->rq.elvpriv) { 712 while (q->rq.elvpriv) {
699 blk_remove_plug(q); 713 blk_remove_plug(q);
@@ -701,6 +715,7 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
701 spin_unlock_irq(q->queue_lock); 715 spin_unlock_irq(q->queue_lock);
702 msleep(10); 716 msleep(10);
703 spin_lock_irq(q->queue_lock); 717 spin_lock_irq(q->queue_lock);
718 elv_drain_elevator(q);
704 } 719 }
705 720
706 spin_unlock_irq(q->queue_lock); 721 spin_unlock_irq(q->queue_lock);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 025a7f084dbd..a33a31e71bbc 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -406,6 +406,7 @@ struct request_queue
406 406
407 atomic_t refcnt; 407 atomic_t refcnt;
408 408
409 unsigned int nr_sorted;
409 unsigned int in_flight; 410 unsigned int in_flight;
410 411
411 /* 412 /*
@@ -631,6 +632,7 @@ static inline void elv_dispatch_add_tail(struct request_queue *q,
631{ 632{
632 if (q->last_merge == rq) 633 if (q->last_merge == rq)
633 q->last_merge = NULL; 634 q->last_merge = NULL;
635 q->nr_sorted--;
634 636
635 q->end_sector = rq_end_sector(rq); 637 q->end_sector = rq_end_sector(rq);
636 q->boundary_rq = rq; 638 q->boundary_rq = rq;