aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/elevator.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-10-28 11:53:49 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-28 11:53:49 -0400
commit28d721e24c88496ff8e9c4a0959bdc1415c0658e (patch)
tree0652161bbbcbfddf47c7ddb25d2db8ecd4cbec89 /drivers/block/elevator.c
parent0ee40c6628434f0535da31deeacc28b61e80d810 (diff)
parentcb19833dccb32f97cacbfff834b53523915f13f6 (diff)
Merge branch 'generic-dispatch' of git://brick.kernel.dk/data/git/linux-2.6-block
Diffstat (limited to 'drivers/block/elevator.c')
-rw-r--r--drivers/block/elevator.c266
1 files changed, 170 insertions, 96 deletions
diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c
index 4f69d222b183..3b8099ccedff 100644
--- a/drivers/block/elevator.c
+++ b/drivers/block/elevator.c
@@ -83,15 +83,6 @@ inline int elv_try_merge(struct request *__rq, struct bio *bio)
83} 83}
84EXPORT_SYMBOL(elv_try_merge); 84EXPORT_SYMBOL(elv_try_merge);
85 85
86inline int elv_try_last_merge(request_queue_t *q, struct bio *bio)
87{
88 if (q->last_merge)
89 return elv_try_merge(q->last_merge, bio);
90
91 return ELEVATOR_NO_MERGE;
92}
93EXPORT_SYMBOL(elv_try_last_merge);
94
95static struct elevator_type *elevator_find(const char *name) 86static struct elevator_type *elevator_find(const char *name)
96{ 87{
97 struct elevator_type *e = NULL; 88 struct elevator_type *e = NULL;
@@ -143,6 +134,8 @@ static int elevator_attach(request_queue_t *q, struct elevator_type *e,
143 INIT_LIST_HEAD(&q->queue_head); 134 INIT_LIST_HEAD(&q->queue_head);
144 q->last_merge = NULL; 135 q->last_merge = NULL;
145 q->elevator = eq; 136 q->elevator = eq;
137 q->end_sector = 0;
138 q->boundary_rq = NULL;
146 139
147 if (eq->ops->elevator_init_fn) 140 if (eq->ops->elevator_init_fn)
148 ret = eq->ops->elevator_init_fn(q, eq); 141 ret = eq->ops->elevator_init_fn(q, eq);
@@ -225,9 +218,52 @@ void elevator_exit(elevator_t *e)
225 kfree(e); 218 kfree(e);
226} 219}
227 220
221/*
222 * Insert rq into dispatch queue of q. Queue lock must be held on
223 * entry. If sort != 0, rq is sort-inserted; otherwise, rq will be
224 * appended to the dispatch queue. To be used by specific elevators.
225 */
226void elv_dispatch_sort(request_queue_t *q, struct request *rq)
227{
228 sector_t boundary;
229 struct list_head *entry;
230
231 if (q->last_merge == rq)
232 q->last_merge = NULL;
233
234 boundary = q->end_sector;
235
236 list_for_each_prev(entry, &q->queue_head) {
237 struct request *pos = list_entry_rq(entry);
238
239 if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
240 break;
241 if (rq->sector >= boundary) {
242 if (pos->sector < boundary)
243 continue;
244 } else {
245 if (pos->sector >= boundary)
246 break;
247 }
248 if (rq->sector >= pos->sector)
249 break;
250 }
251
252 list_add(&rq->queuelist, entry);
253}
254
228int elv_merge(request_queue_t *q, struct request **req, struct bio *bio) 255int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
229{ 256{
230 elevator_t *e = q->elevator; 257 elevator_t *e = q->elevator;
258 int ret;
259
260 if (q->last_merge) {
261 ret = elv_try_merge(q->last_merge, bio);
262 if (ret != ELEVATOR_NO_MERGE) {
263 *req = q->last_merge;
264 return ret;
265 }
266 }
231 267
232 if (e->ops->elevator_merge_fn) 268 if (e->ops->elevator_merge_fn)
233 return e->ops->elevator_merge_fn(q, req, bio); 269 return e->ops->elevator_merge_fn(q, req, bio);
@@ -241,6 +277,8 @@ void elv_merged_request(request_queue_t *q, struct request *rq)
241 277
242 if (e->ops->elevator_merged_fn) 278 if (e->ops->elevator_merged_fn)
243 e->ops->elevator_merged_fn(q, rq); 279 e->ops->elevator_merged_fn(q, rq);
280
281 q->last_merge = rq;
244} 282}
245 283
246void elv_merge_requests(request_queue_t *q, struct request *rq, 284void elv_merge_requests(request_queue_t *q, struct request *rq,
@@ -248,20 +286,13 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
248{ 286{
249 elevator_t *e = q->elevator; 287 elevator_t *e = q->elevator;
250 288
251 if (q->last_merge == next)
252 q->last_merge = NULL;
253
254 if (e->ops->elevator_merge_req_fn) 289 if (e->ops->elevator_merge_req_fn)
255 e->ops->elevator_merge_req_fn(q, rq, next); 290 e->ops->elevator_merge_req_fn(q, rq, next);
291
292 q->last_merge = rq;
256} 293}
257 294
258/* 295void elv_requeue_request(request_queue_t *q, struct request *rq)
259 * For careful internal use by the block layer. Essentially the same as
260 * a requeue in that it tells the io scheduler that this request is not
261 * active in the driver or hardware anymore, but we don't want the request
262 * added back to the scheduler. Function is not exported.
263 */
264void elv_deactivate_request(request_queue_t *q, struct request *rq)
265{ 296{
266 elevator_t *e = q->elevator; 297 elevator_t *e = q->elevator;
267 298
@@ -269,19 +300,14 @@ void elv_deactivate_request(request_queue_t *q, struct request *rq)
269 * it already went through dequeue, we need to decrement the 300 * it already went through dequeue, we need to decrement the
270 * in_flight count again 301 * in_flight count again
271 */ 302 */
272 if (blk_account_rq(rq)) 303 if (blk_account_rq(rq)) {
273 q->in_flight--; 304 q->in_flight--;
305 if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
306 e->ops->elevator_deactivate_req_fn(q, rq);
307 }
274 308
275 rq->flags &= ~REQ_STARTED; 309 rq->flags &= ~REQ_STARTED;
276 310
277 if (e->ops->elevator_deactivate_req_fn)
278 e->ops->elevator_deactivate_req_fn(q, rq);
279}
280
281void elv_requeue_request(request_queue_t *q, struct request *rq)
282{
283 elv_deactivate_request(q, rq);
284
285 /* 311 /*
286 * if this is the flush, requeue the original instead and drop the flush 312 * if this is the flush, requeue the original instead and drop the flush
287 */ 313 */
@@ -290,55 +316,91 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
290 rq = rq->end_io_data; 316 rq = rq->end_io_data;
291 } 317 }
292 318
293 /* 319 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
294 * the request is prepped and may have some resources allocated.
295 * allowing unprepped requests to pass this one may cause resource
296 * deadlock. turn on softbarrier.
297 */
298 rq->flags |= REQ_SOFTBARRIER;
299
300 /*
301 * if iosched has an explicit requeue hook, then use that. otherwise
302 * just put the request at the front of the queue
303 */
304 if (q->elevator->ops->elevator_requeue_req_fn)
305 q->elevator->ops->elevator_requeue_req_fn(q, rq);
306 else
307 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
308} 320}
309 321
310void __elv_add_request(request_queue_t *q, struct request *rq, int where, 322void __elv_add_request(request_queue_t *q, struct request *rq, int where,
311 int plug) 323 int plug)
312{ 324{
313 /* 325 if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
314 * barriers implicitly indicate back insertion 326 /*
315 */ 327 * barriers implicitly indicate back insertion
316 if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER) && 328 */
317 where == ELEVATOR_INSERT_SORT) 329 if (where == ELEVATOR_INSERT_SORT)
318 where = ELEVATOR_INSERT_BACK; 330 where = ELEVATOR_INSERT_BACK;
331
332 /*
333 * this request is scheduling boundary, update end_sector
334 */
335 if (blk_fs_request(rq)) {
336 q->end_sector = rq_end_sector(rq);
337 q->boundary_rq = rq;
338 }
339 }
319 340
320 if (plug) 341 if (plug)
321 blk_plug_device(q); 342 blk_plug_device(q);
322 343
323 rq->q = q; 344 rq->q = q;
324 345
325 if (!test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)) { 346 if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) {
326 q->elevator->ops->elevator_add_req_fn(q, rq, where);
327
328 if (blk_queue_plugged(q)) {
329 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
330 - q->in_flight;
331
332 if (nrq >= q->unplug_thresh)
333 __generic_unplug_device(q);
334 }
335 } else
336 /* 347 /*
337 * if drain is set, store the request "locally". when the drain 348 * if drain is set, store the request "locally". when the drain
338 * is finished, the requests will be handed ordered to the io 349 * is finished, the requests will be handed ordered to the io
339 * scheduler 350 * scheduler
340 */ 351 */
341 list_add_tail(&rq->queuelist, &q->drain_list); 352 list_add_tail(&rq->queuelist, &q->drain_list);
353 return;
354 }
355
356 switch (where) {
357 case ELEVATOR_INSERT_FRONT:
358 rq->flags |= REQ_SOFTBARRIER;
359
360 list_add(&rq->queuelist, &q->queue_head);
361 break;
362
363 case ELEVATOR_INSERT_BACK:
364 rq->flags |= REQ_SOFTBARRIER;
365
366 while (q->elevator->ops->elevator_dispatch_fn(q, 1))
367 ;
368 list_add_tail(&rq->queuelist, &q->queue_head);
369 /*
370 * We kick the queue here for the following reasons.
371 * - The elevator might have returned NULL previously
372 * to delay requests and returned them now. As the
373 * queue wasn't empty before this request, ll_rw_blk
374 * won't run the queue on return, resulting in hang.
375 * - Usually, back inserted requests won't be merged
376 * with anything. There's no point in delaying queue
377 * processing.
378 */
379 blk_remove_plug(q);
380 q->request_fn(q);
381 break;
382
383 case ELEVATOR_INSERT_SORT:
384 BUG_ON(!blk_fs_request(rq));
385 rq->flags |= REQ_SORTED;
386 q->elevator->ops->elevator_add_req_fn(q, rq);
387 if (q->last_merge == NULL && rq_mergeable(rq))
388 q->last_merge = rq;
389 break;
390
391 default:
392 printk(KERN_ERR "%s: bad insertion point %d\n",
393 __FUNCTION__, where);
394 BUG();
395 }
396
397 if (blk_queue_plugged(q)) {
398 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
399 - q->in_flight;
400
401 if (nrq >= q->unplug_thresh)
402 __generic_unplug_device(q);
403 }
342} 404}
343 405
344void elv_add_request(request_queue_t *q, struct request *rq, int where, 406void elv_add_request(request_queue_t *q, struct request *rq, int where,
@@ -353,13 +415,19 @@ void elv_add_request(request_queue_t *q, struct request *rq, int where,
353 415
354static inline struct request *__elv_next_request(request_queue_t *q) 416static inline struct request *__elv_next_request(request_queue_t *q)
355{ 417{
356 struct request *rq = q->elevator->ops->elevator_next_req_fn(q); 418 struct request *rq;
419
420 if (unlikely(list_empty(&q->queue_head) &&
421 !q->elevator->ops->elevator_dispatch_fn(q, 0)))
422 return NULL;
423
424 rq = list_entry_rq(q->queue_head.next);
357 425
358 /* 426 /*
359 * if this is a barrier write and the device has to issue a 427 * if this is a barrier write and the device has to issue a
360 * flush sequence to support it, check how far we are 428 * flush sequence to support it, check how far we are
361 */ 429 */
362 if (rq && blk_fs_request(rq) && blk_barrier_rq(rq)) { 430 if (blk_fs_request(rq) && blk_barrier_rq(rq)) {
363 BUG_ON(q->ordered == QUEUE_ORDERED_NONE); 431 BUG_ON(q->ordered == QUEUE_ORDERED_NONE);
364 432
365 if (q->ordered == QUEUE_ORDERED_FLUSH && 433 if (q->ordered == QUEUE_ORDERED_FLUSH &&
@@ -376,15 +444,30 @@ struct request *elv_next_request(request_queue_t *q)
376 int ret; 444 int ret;
377 445
378 while ((rq = __elv_next_request(q)) != NULL) { 446 while ((rq = __elv_next_request(q)) != NULL) {
379 /* 447 if (!(rq->flags & REQ_STARTED)) {
380 * just mark as started even if we don't start it, a request 448 elevator_t *e = q->elevator;
381 * that has been delayed should not be passed by new incoming 449
382 * requests 450 /*
383 */ 451 * This is the first time the device driver
384 rq->flags |= REQ_STARTED; 452 * sees this request (possibly after
453 * requeueing). Notify IO scheduler.
454 */
455 if (blk_sorted_rq(rq) &&
456 e->ops->elevator_activate_req_fn)
457 e->ops->elevator_activate_req_fn(q, rq);
385 458
386 if (rq == q->last_merge) 459 /*
387 q->last_merge = NULL; 460 * just mark as started even if we don't start
461 * it, a request that has been delayed should
462 * not be passed by new incoming requests
463 */
464 rq->flags |= REQ_STARTED;
465 }
466
467 if (!q->boundary_rq || q->boundary_rq == rq) {
468 q->end_sector = rq_end_sector(rq);
469 q->boundary_rq = NULL;
470 }
388 471
389 if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn) 472 if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
390 break; 473 break;
@@ -396,9 +479,9 @@ struct request *elv_next_request(request_queue_t *q)
396 /* 479 /*
397 * the request may have been (partially) prepped. 480 * the request may have been (partially) prepped.
398 * we need to keep this request in the front to 481 * we need to keep this request in the front to
399 * avoid resource deadlock. turn on softbarrier. 482 * avoid resource deadlock. REQ_STARTED will
483 * prevent other fs requests from passing this one.
400 */ 484 */
401 rq->flags |= REQ_SOFTBARRIER;
402 rq = NULL; 485 rq = NULL;
403 break; 486 break;
404 } else if (ret == BLKPREP_KILL) { 487 } else if (ret == BLKPREP_KILL) {
@@ -421,42 +504,32 @@ struct request *elv_next_request(request_queue_t *q)
421 return rq; 504 return rq;
422} 505}
423 506
424void elv_remove_request(request_queue_t *q, struct request *rq) 507void elv_dequeue_request(request_queue_t *q, struct request *rq)
425{ 508{
426 elevator_t *e = q->elevator; 509 BUG_ON(list_empty(&rq->queuelist));
510
511 list_del_init(&rq->queuelist);
427 512
428 /* 513 /*
429 * the time frame between a request being removed from the lists 514 * the time frame between a request being removed from the lists
430 * and to it is freed is accounted as io that is in progress at 515 * and to it is freed is accounted as io that is in progress at
431 * the driver side. note that we only account requests that the 516 * the driver side.
432 * driver has seen (REQ_STARTED set), to avoid false accounting
433 * for request-request merges
434 */ 517 */
435 if (blk_account_rq(rq)) 518 if (blk_account_rq(rq))
436 q->in_flight++; 519 q->in_flight++;
437
438 /*
439 * the main clearing point for q->last_merge is on retrieval of
440 * request by driver (it calls elv_next_request()), but it _can_
441 * also happen here if a request is added to the queue but later
442 * deleted without ever being given to driver (merged with another
443 * request).
444 */
445 if (rq == q->last_merge)
446 q->last_merge = NULL;
447
448 if (e->ops->elevator_remove_req_fn)
449 e->ops->elevator_remove_req_fn(q, rq);
450} 520}
451 521
452int elv_queue_empty(request_queue_t *q) 522int elv_queue_empty(request_queue_t *q)
453{ 523{
454 elevator_t *e = q->elevator; 524 elevator_t *e = q->elevator;
455 525
526 if (!list_empty(&q->queue_head))
527 return 0;
528
456 if (e->ops->elevator_queue_empty_fn) 529 if (e->ops->elevator_queue_empty_fn)
457 return e->ops->elevator_queue_empty_fn(q); 530 return e->ops->elevator_queue_empty_fn(q);
458 531
459 return list_empty(&q->queue_head); 532 return 1;
460} 533}
461 534
462struct request *elv_latter_request(request_queue_t *q, struct request *rq) 535struct request *elv_latter_request(request_queue_t *q, struct request *rq)
@@ -528,11 +601,11 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
528 /* 601 /*
529 * request is released from the driver, io must be done 602 * request is released from the driver, io must be done
530 */ 603 */
531 if (blk_account_rq(rq)) 604 if (blk_account_rq(rq)) {
532 q->in_flight--; 605 q->in_flight--;
533 606 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
534 if (e->ops->elevator_completed_req_fn) 607 e->ops->elevator_completed_req_fn(q, rq);
535 e->ops->elevator_completed_req_fn(q, rq); 608 }
536} 609}
537 610
538int elv_register_queue(struct request_queue *q) 611int elv_register_queue(struct request_queue *q)
@@ -705,11 +778,12 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
705 return len; 778 return len;
706} 779}
707 780
781EXPORT_SYMBOL(elv_dispatch_sort);
708EXPORT_SYMBOL(elv_add_request); 782EXPORT_SYMBOL(elv_add_request);
709EXPORT_SYMBOL(__elv_add_request); 783EXPORT_SYMBOL(__elv_add_request);
710EXPORT_SYMBOL(elv_requeue_request); 784EXPORT_SYMBOL(elv_requeue_request);
711EXPORT_SYMBOL(elv_next_request); 785EXPORT_SYMBOL(elv_next_request);
712EXPORT_SYMBOL(elv_remove_request); 786EXPORT_SYMBOL(elv_dequeue_request);
713EXPORT_SYMBOL(elv_queue_empty); 787EXPORT_SYMBOL(elv_queue_empty);
714EXPORT_SYMBOL(elv_completed_request); 788EXPORT_SYMBOL(elv_completed_request);
715EXPORT_SYMBOL(elevator_exit); 789EXPORT_SYMBOL(elevator_exit);