aboutsummaryrefslogtreecommitdiffstats
path: root/block/elevator.c
diff options
context:
space:
mode:
authorJeff Moyer <jmoyer@redhat.com>2011-06-02 15:19:05 -0400
committerJens Axboe <jaxboe@fusionio.com>2011-06-02 15:19:05 -0400
commit796d5116c407690b14fd5bda136aa67a39e7061a (patch)
treebb0522aacb0a923e6636c82aff143df95ea1d730 /block/elevator.c
parent1fa7b6a29c61358cc2ca6f64cef4aa0e1a7ca74c (diff)
iosched: prevent aliased requests from starving other I/O
Hi, Jens, If you recall, I posted an RFC patch for this back in July of last year: http://lkml.org/lkml/2010/7/13/279 The basic problem is that a process can issue a never-ending stream of async direct I/Os to the same sector on a device, thus starving out other I/O in the system (due to the way the alias handling works in both cfq and deadline). The solution I proposed back then was to start dispatching from the fifo after a certain number of aliases had been dispatched. Vivek asked why we had to treat aliases differently at all, and I never had a good answer. So, I put together a simple patch which allows aliases to be added to the rb tree (it adds them to the right, though that doesn't matter as the order isn't guaranteed anyway). I think this is the preferred solution, as it doesn't break up time slices in CFQ or batches in deadline. I've tested it, and it does solve the starvation issue. Let me know what you think. Cheers, Jeff Signed-off-by: Jeff Moyer <jmoyer@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/elevator.c')
-rw-r--r--block/elevator.c7
1 files changed, 2 insertions, 5 deletions
diff --git a/block/elevator.c b/block/elevator.c
index b0b38ce0dcb6..a3b64bc71d88 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -353,7 +353,7 @@ static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
353 * RB-tree support functions for inserting/lookup/removal of requests 353 * RB-tree support functions for inserting/lookup/removal of requests
354 * in a sorted RB tree. 354 * in a sorted RB tree.
355 */ 355 */
356struct request *elv_rb_add(struct rb_root *root, struct request *rq) 356void elv_rb_add(struct rb_root *root, struct request *rq)
357{ 357{
358 struct rb_node **p = &root->rb_node; 358 struct rb_node **p = &root->rb_node;
359 struct rb_node *parent = NULL; 359 struct rb_node *parent = NULL;
@@ -365,15 +365,12 @@ struct request *elv_rb_add(struct rb_root *root, struct request *rq)
365 365
366 if (blk_rq_pos(rq) < blk_rq_pos(__rq)) 366 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
367 p = &(*p)->rb_left; 367 p = &(*p)->rb_left;
368 else if (blk_rq_pos(rq) > blk_rq_pos(__rq)) 368 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
369 p = &(*p)->rb_right; 369 p = &(*p)->rb_right;
370 else
371 return __rq;
372 } 370 }
373 371
374 rb_link_node(&rq->rb_node, parent, p); 372 rb_link_node(&rq->rb_node, parent, p);
375 rb_insert_color(&rq->rb_node, root); 373 rb_insert_color(&rq->rb_node, root);
376 return NULL;
377} 374}
378EXPORT_SYMBOL(elv_rb_add); 375EXPORT_SYMBOL(elv_rb_add);
379 376