diff options
-rw-r--r-- | drivers/block/elevator.c | 242 | ||||
-rw-r--r-- | drivers/block/ll_rw_blk.c | 23 | ||||
-rw-r--r-- | include/linux/blkdev.h | 17 | ||||
-rw-r--r-- | include/linux/elevator.h | 16 |
4 files changed, 201 insertions, 97 deletions
diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c index 4144f30d82a9..a27555908d35 100644 --- a/drivers/block/elevator.c +++ b/drivers/block/elevator.c | |||
@@ -40,6 +40,11 @@ | |||
40 | static DEFINE_SPINLOCK(elv_list_lock); | 40 | static DEFINE_SPINLOCK(elv_list_lock); |
41 | static LIST_HEAD(elv_list); | 41 | static LIST_HEAD(elv_list); |
42 | 42 | ||
43 | static inline sector_t rq_last_sector(struct request *rq) | ||
44 | { | ||
45 | return rq->sector + rq->nr_sectors; | ||
46 | } | ||
47 | |||
43 | /* | 48 | /* |
44 | * can we safely merge with this request? | 49 | * can we safely merge with this request? |
45 | */ | 50 | */ |
@@ -143,6 +148,9 @@ static int elevator_attach(request_queue_t *q, struct elevator_type *e, | |||
143 | INIT_LIST_HEAD(&q->queue_head); | 148 | INIT_LIST_HEAD(&q->queue_head); |
144 | q->last_merge = NULL; | 149 | q->last_merge = NULL; |
145 | q->elevator = eq; | 150 | q->elevator = eq; |
151 | q->last_sector = 0; | ||
152 | q->boundary_rq = NULL; | ||
153 | q->max_back_kb = 0; | ||
146 | 154 | ||
147 | if (eq->ops->elevator_init_fn) | 155 | if (eq->ops->elevator_init_fn) |
148 | ret = eq->ops->elevator_init_fn(q, eq); | 156 | ret = eq->ops->elevator_init_fn(q, eq); |
@@ -225,6 +233,48 @@ void elevator_exit(elevator_t *e) | |||
225 | kfree(e); | 233 | kfree(e); |
226 | } | 234 | } |
227 | 235 | ||
236 | /* | ||
237 | * Insert rq into dispatch queue of q. Queue lock must be held on | ||
238 | * entry. If sort != 0, rq is sort-inserted; otherwise, rq will be | ||
239 | * appended to the dispatch queue. To be used by specific elevators. | ||
240 | */ | ||
241 | void elv_dispatch_insert(request_queue_t *q, struct request *rq, int sort) | ||
242 | { | ||
243 | sector_t boundary; | ||
244 | unsigned max_back; | ||
245 | struct list_head *entry; | ||
246 | |||
247 | if (!sort) { | ||
248 | /* Specific elevator is performing sort. Step away. */ | ||
249 | q->last_sector = rq_last_sector(rq); | ||
250 | q->boundary_rq = rq; | ||
251 | list_add_tail(&rq->queuelist, &q->queue_head); | ||
252 | return; | ||
253 | } | ||
254 | |||
255 | boundary = q->last_sector; | ||
256 | max_back = q->max_back_kb * 2; | ||
257 | boundary = boundary > max_back ? boundary - max_back : 0; | ||
258 | |||
259 | list_for_each_prev(entry, &q->queue_head) { | ||
260 | struct request *pos = list_entry_rq(entry); | ||
261 | |||
262 | if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) | ||
263 | break; | ||
264 | if (rq->sector >= boundary) { | ||
265 | if (pos->sector < boundary) | ||
266 | continue; | ||
267 | } else { | ||
268 | if (pos->sector >= boundary) | ||
269 | break; | ||
270 | } | ||
271 | if (rq->sector >= pos->sector) | ||
272 | break; | ||
273 | } | ||
274 | |||
275 | list_add(&rq->queuelist, entry); | ||
276 | } | ||
277 | |||
228 | int elv_merge(request_queue_t *q, struct request **req, struct bio *bio) | 278 | int elv_merge(request_queue_t *q, struct request **req, struct bio *bio) |
229 | { | 279 | { |
230 | elevator_t *e = q->elevator; | 280 | elevator_t *e = q->elevator; |
@@ -255,13 +305,7 @@ void elv_merge_requests(request_queue_t *q, struct request *rq, | |||
255 | e->ops->elevator_merge_req_fn(q, rq, next); | 305 | e->ops->elevator_merge_req_fn(q, rq, next); |
256 | } | 306 | } |
257 | 307 | ||
258 | /* | 308 | void elv_requeue_request(request_queue_t *q, struct request *rq) |
259 | * For careful internal use by the block layer. Essentially the same as | ||
260 | * a requeue in that it tells the io scheduler that this request is not | ||
261 | * active in the driver or hardware anymore, but we don't want the request | ||
262 | * added back to the scheduler. Function is not exported. | ||
263 | */ | ||
264 | void elv_deactivate_request(request_queue_t *q, struct request *rq) | ||
265 | { | 309 | { |
266 | elevator_t *e = q->elevator; | 310 | elevator_t *e = q->elevator; |
267 | 311 | ||
@@ -269,19 +313,14 @@ void elv_deactivate_request(request_queue_t *q, struct request *rq) | |||
269 | * it already went through dequeue, we need to decrement the | 313 | * it already went through dequeue, we need to decrement the |
270 | * in_flight count again | 314 | * in_flight count again |
271 | */ | 315 | */ |
272 | if (blk_account_rq(rq)) | 316 | if (blk_account_rq(rq)) { |
273 | q->in_flight--; | 317 | q->in_flight--; |
318 | if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn) | ||
319 | e->ops->elevator_deactivate_req_fn(q, rq); | ||
320 | } | ||
274 | 321 | ||
275 | rq->flags &= ~REQ_STARTED; | 322 | rq->flags &= ~REQ_STARTED; |
276 | 323 | ||
277 | if (e->ops->elevator_deactivate_req_fn) | ||
278 | e->ops->elevator_deactivate_req_fn(q, rq); | ||
279 | } | ||
280 | |||
281 | void elv_requeue_request(request_queue_t *q, struct request *rq) | ||
282 | { | ||
283 | elv_deactivate_request(q, rq); | ||
284 | |||
285 | /* | 324 | /* |
286 | * if this is the flush, requeue the original instead and drop the flush | 325 | * if this is the flush, requeue the original instead and drop the flush |
287 | */ | 326 | */ |
@@ -290,55 +329,89 @@ void elv_requeue_request(request_queue_t *q, struct request *rq) | |||
290 | rq = rq->end_io_data; | 329 | rq = rq->end_io_data; |
291 | } | 330 | } |
292 | 331 | ||
293 | /* | 332 | __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); |
294 | * the request is prepped and may have some resources allocated. | ||
295 | * allowing unprepped requests to pass this one may cause resource | ||
296 | * deadlock. turn on softbarrier. | ||
297 | */ | ||
298 | rq->flags |= REQ_SOFTBARRIER; | ||
299 | |||
300 | /* | ||
301 | * if iosched has an explicit requeue hook, then use that. otherwise | ||
302 | * just put the request at the front of the queue | ||
303 | */ | ||
304 | if (q->elevator->ops->elevator_requeue_req_fn) | ||
305 | q->elevator->ops->elevator_requeue_req_fn(q, rq); | ||
306 | else | ||
307 | __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); | ||
308 | } | 333 | } |
309 | 334 | ||
310 | void __elv_add_request(request_queue_t *q, struct request *rq, int where, | 335 | void __elv_add_request(request_queue_t *q, struct request *rq, int where, |
311 | int plug) | 336 | int plug) |
312 | { | 337 | { |
313 | /* | 338 | if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { |
314 | * barriers implicitly indicate back insertion | 339 | /* |
315 | */ | 340 | * barriers implicitly indicate back insertion |
316 | if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER) && | 341 | */ |
317 | where == ELEVATOR_INSERT_SORT) | 342 | if (where == ELEVATOR_INSERT_SORT) |
318 | where = ELEVATOR_INSERT_BACK; | 343 | where = ELEVATOR_INSERT_BACK; |
344 | |||
345 | /* | ||
346 | * this request is scheduling boundary, update last_sector | ||
347 | */ | ||
348 | if (blk_fs_request(rq)) { | ||
349 | q->last_sector = rq_last_sector(rq); | ||
350 | q->boundary_rq = rq; | ||
351 | } | ||
352 | } | ||
319 | 353 | ||
320 | if (plug) | 354 | if (plug) |
321 | blk_plug_device(q); | 355 | blk_plug_device(q); |
322 | 356 | ||
323 | rq->q = q; | 357 | rq->q = q; |
324 | 358 | ||
325 | if (!test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)) { | 359 | if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) { |
326 | q->elevator->ops->elevator_add_req_fn(q, rq, where); | ||
327 | |||
328 | if (blk_queue_plugged(q)) { | ||
329 | int nrq = q->rq.count[READ] + q->rq.count[WRITE] | ||
330 | - q->in_flight; | ||
331 | |||
332 | if (nrq >= q->unplug_thresh) | ||
333 | __generic_unplug_device(q); | ||
334 | } | ||
335 | } else | ||
336 | /* | 360 | /* |
337 | * if drain is set, store the request "locally". when the drain | 361 | * if drain is set, store the request "locally". when the drain |
338 | * is finished, the requests will be handed ordered to the io | 362 | * is finished, the requests will be handed ordered to the io |
339 | * scheduler | 363 | * scheduler |
340 | */ | 364 | */ |
341 | list_add_tail(&rq->queuelist, &q->drain_list); | 365 | list_add_tail(&rq->queuelist, &q->drain_list); |
366 | return; | ||
367 | } | ||
368 | |||
369 | switch (where) { | ||
370 | case ELEVATOR_INSERT_FRONT: | ||
371 | rq->flags |= REQ_SOFTBARRIER; | ||
372 | |||
373 | list_add(&rq->queuelist, &q->queue_head); | ||
374 | break; | ||
375 | |||
376 | case ELEVATOR_INSERT_BACK: | ||
377 | rq->flags |= REQ_SOFTBARRIER; | ||
378 | |||
379 | while (q->elevator->ops->elevator_dispatch_fn(q, 1)) | ||
380 | ; | ||
381 | list_add_tail(&rq->queuelist, &q->queue_head); | ||
382 | /* | ||
383 | * We kick the queue here for the following reasons. | ||
384 | * - The elevator might have returned NULL previously | ||
385 | * to delay requests and returned them now. As the | ||
386 | * queue wasn't empty before this request, ll_rw_blk | ||
387 | * won't run the queue on return, resulting in hang. | ||
388 | * - Usually, back inserted requests won't be merged | ||
389 | * with anything. There's no point in delaying queue | ||
390 | * processing. | ||
391 | */ | ||
392 | blk_remove_plug(q); | ||
393 | q->request_fn(q); | ||
394 | break; | ||
395 | |||
396 | case ELEVATOR_INSERT_SORT: | ||
397 | BUG_ON(!blk_fs_request(rq)); | ||
398 | rq->flags |= REQ_SORTED; | ||
399 | q->elevator->ops->elevator_add_req_fn(q, rq); | ||
400 | break; | ||
401 | |||
402 | default: | ||
403 | printk(KERN_ERR "%s: bad insertion point %d\n", | ||
404 | __FUNCTION__, where); | ||
405 | BUG(); | ||
406 | } | ||
407 | |||
408 | if (blk_queue_plugged(q)) { | ||
409 | int nrq = q->rq.count[READ] + q->rq.count[WRITE] | ||
410 | - q->in_flight; | ||
411 | |||
412 | if (nrq >= q->unplug_thresh) | ||
413 | __generic_unplug_device(q); | ||
414 | } | ||
342 | } | 415 | } |
343 | 416 | ||
344 | void elv_add_request(request_queue_t *q, struct request *rq, int where, | 417 | void elv_add_request(request_queue_t *q, struct request *rq, int where, |
@@ -353,13 +426,19 @@ void elv_add_request(request_queue_t *q, struct request *rq, int where, | |||
353 | 426 | ||
354 | static inline struct request *__elv_next_request(request_queue_t *q) | 427 | static inline struct request *__elv_next_request(request_queue_t *q) |
355 | { | 428 | { |
356 | struct request *rq = q->elevator->ops->elevator_next_req_fn(q); | 429 | struct request *rq; |
430 | |||
431 | if (unlikely(list_empty(&q->queue_head) && | ||
432 | !q->elevator->ops->elevator_dispatch_fn(q, 0))) | ||
433 | return NULL; | ||
434 | |||
435 | rq = list_entry_rq(q->queue_head.next); | ||
357 | 436 | ||
358 | /* | 437 | /* |
359 | * if this is a barrier write and the device has to issue a | 438 | * if this is a barrier write and the device has to issue a |
360 | * flush sequence to support it, check how far we are | 439 | * flush sequence to support it, check how far we are |
361 | */ | 440 | */ |
362 | if (rq && blk_fs_request(rq) && blk_barrier_rq(rq)) { | 441 | if (blk_fs_request(rq) && blk_barrier_rq(rq)) { |
363 | BUG_ON(q->ordered == QUEUE_ORDERED_NONE); | 442 | BUG_ON(q->ordered == QUEUE_ORDERED_NONE); |
364 | 443 | ||
365 | if (q->ordered == QUEUE_ORDERED_FLUSH && | 444 | if (q->ordered == QUEUE_ORDERED_FLUSH && |
@@ -376,16 +455,34 @@ struct request *elv_next_request(request_queue_t *q) | |||
376 | int ret; | 455 | int ret; |
377 | 456 | ||
378 | while ((rq = __elv_next_request(q)) != NULL) { | 457 | while ((rq = __elv_next_request(q)) != NULL) { |
379 | /* | 458 | if (!(rq->flags & REQ_STARTED)) { |
380 | * just mark as started even if we don't start it, a request | 459 | elevator_t *e = q->elevator; |
381 | * that has been delayed should not be passed by new incoming | 460 | |
382 | * requests | 461 | /* |
383 | */ | 462 | * This is the first time the device driver |
384 | rq->flags |= REQ_STARTED; | 463 | * sees this request (possibly after |
464 | * requeueing). Notify IO scheduler. | ||
465 | */ | ||
466 | if (blk_sorted_rq(rq) && | ||
467 | e->ops->elevator_activate_req_fn) | ||
468 | e->ops->elevator_activate_req_fn(q, rq); | ||
469 | |||
470 | /* | ||
471 | * just mark as started even if we don't start | ||
472 | * it, a request that has been delayed should | ||
473 | * not be passed by new incoming requests | ||
474 | */ | ||
475 | rq->flags |= REQ_STARTED; | ||
476 | } | ||
385 | 477 | ||
386 | if (rq == q->last_merge) | 478 | if (rq == q->last_merge) |
387 | q->last_merge = NULL; | 479 | q->last_merge = NULL; |
388 | 480 | ||
481 | if (!q->boundary_rq || q->boundary_rq == rq) { | ||
482 | q->last_sector = rq_last_sector(rq); | ||
483 | q->boundary_rq = NULL; | ||
484 | } | ||
485 | |||
389 | if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn) | 486 | if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn) |
390 | break; | 487 | break; |
391 | 488 | ||
@@ -396,9 +493,9 @@ struct request *elv_next_request(request_queue_t *q) | |||
396 | /* | 493 | /* |
397 | * the request may have been (partially) prepped. | 494 | * the request may have been (partially) prepped. |
398 | * we need to keep this request in the front to | 495 | * we need to keep this request in the front to |
399 | * avoid resource deadlock. turn on softbarrier. | 496 | * avoid resource deadlock. REQ_STARTED will |
497 | * prevent other fs requests from passing this one. | ||
400 | */ | 498 | */ |
401 | rq->flags |= REQ_SOFTBARRIER; | ||
402 | rq = NULL; | 499 | rq = NULL; |
403 | break; | 500 | break; |
404 | } else if (ret == BLKPREP_KILL) { | 501 | } else if (ret == BLKPREP_KILL) { |
@@ -421,16 +518,16 @@ struct request *elv_next_request(request_queue_t *q) | |||
421 | return rq; | 518 | return rq; |
422 | } | 519 | } |
423 | 520 | ||
424 | void elv_remove_request(request_queue_t *q, struct request *rq) | 521 | void elv_dequeue_request(request_queue_t *q, struct request *rq) |
425 | { | 522 | { |
426 | elevator_t *e = q->elevator; | 523 | BUG_ON(list_empty(&rq->queuelist)); |
524 | |||
525 | list_del_init(&rq->queuelist); | ||
427 | 526 | ||
428 | /* | 527 | /* |
429 | * the time frame between a request being removed from the lists | 528 | * the time frame between a request being removed from the lists |
430 | * and to it is freed is accounted as io that is in progress at | 529 | * and to it is freed is accounted as io that is in progress at |
431 | * the driver side. note that we only account requests that the | 530 | * the driver side. |
432 | * driver has seen (REQ_STARTED set), to avoid false accounting | ||
433 | * for request-request merges | ||
434 | */ | 531 | */ |
435 | if (blk_account_rq(rq)) | 532 | if (blk_account_rq(rq)) |
436 | q->in_flight++; | 533 | q->in_flight++; |
@@ -444,19 +541,19 @@ void elv_remove_request(request_queue_t *q, struct request *rq) | |||
444 | */ | 541 | */ |
445 | if (rq == q->last_merge) | 542 | if (rq == q->last_merge) |
446 | q->last_merge = NULL; | 543 | q->last_merge = NULL; |
447 | |||
448 | if (e->ops->elevator_remove_req_fn) | ||
449 | e->ops->elevator_remove_req_fn(q, rq); | ||
450 | } | 544 | } |
451 | 545 | ||
452 | int elv_queue_empty(request_queue_t *q) | 546 | int elv_queue_empty(request_queue_t *q) |
453 | { | 547 | { |
454 | elevator_t *e = q->elevator; | 548 | elevator_t *e = q->elevator; |
455 | 549 | ||
550 | if (!list_empty(&q->queue_head)) | ||
551 | return 0; | ||
552 | |||
456 | if (e->ops->elevator_queue_empty_fn) | 553 | if (e->ops->elevator_queue_empty_fn) |
457 | return e->ops->elevator_queue_empty_fn(q); | 554 | return e->ops->elevator_queue_empty_fn(q); |
458 | 555 | ||
459 | return list_empty(&q->queue_head); | 556 | return 1; |
460 | } | 557 | } |
461 | 558 | ||
462 | struct request *elv_latter_request(request_queue_t *q, struct request *rq) | 559 | struct request *elv_latter_request(request_queue_t *q, struct request *rq) |
@@ -528,11 +625,11 @@ void elv_completed_request(request_queue_t *q, struct request *rq) | |||
528 | /* | 625 | /* |
529 | * request is released from the driver, io must be done | 626 | * request is released from the driver, io must be done |
530 | */ | 627 | */ |
531 | if (blk_account_rq(rq)) | 628 | if (blk_account_rq(rq)) { |
532 | q->in_flight--; | 629 | q->in_flight--; |
533 | 630 | if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) | |
534 | if (e->ops->elevator_completed_req_fn) | 631 | e->ops->elevator_completed_req_fn(q, rq); |
535 | e->ops->elevator_completed_req_fn(q, rq); | 632 | } |
536 | } | 633 | } |
537 | 634 | ||
538 | int elv_register_queue(struct request_queue *q) | 635 | int elv_register_queue(struct request_queue *q) |
@@ -705,11 +802,12 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name) | |||
705 | return len; | 802 | return len; |
706 | } | 803 | } |
707 | 804 | ||
805 | EXPORT_SYMBOL(elv_dispatch_insert); | ||
708 | EXPORT_SYMBOL(elv_add_request); | 806 | EXPORT_SYMBOL(elv_add_request); |
709 | EXPORT_SYMBOL(__elv_add_request); | 807 | EXPORT_SYMBOL(__elv_add_request); |
710 | EXPORT_SYMBOL(elv_requeue_request); | 808 | EXPORT_SYMBOL(elv_requeue_request); |
711 | EXPORT_SYMBOL(elv_next_request); | 809 | EXPORT_SYMBOL(elv_next_request); |
712 | EXPORT_SYMBOL(elv_remove_request); | 810 | EXPORT_SYMBOL(elv_dequeue_request); |
713 | EXPORT_SYMBOL(elv_queue_empty); | 811 | EXPORT_SYMBOL(elv_queue_empty); |
714 | EXPORT_SYMBOL(elv_completed_request); | 812 | EXPORT_SYMBOL(elv_completed_request); |
715 | EXPORT_SYMBOL(elevator_exit); | 813 | EXPORT_SYMBOL(elevator_exit); |
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index 4e2b1b06b411..d2a66fd309c3 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c | |||
@@ -353,6 +353,8 @@ static void blk_pre_flush_end_io(struct request *flush_rq) | |||
353 | struct request *rq = flush_rq->end_io_data; | 353 | struct request *rq = flush_rq->end_io_data; |
354 | request_queue_t *q = rq->q; | 354 | request_queue_t *q = rq->q; |
355 | 355 | ||
356 | elv_completed_request(q, flush_rq); | ||
357 | |||
356 | rq->flags |= REQ_BAR_PREFLUSH; | 358 | rq->flags |= REQ_BAR_PREFLUSH; |
357 | 359 | ||
358 | if (!flush_rq->errors) | 360 | if (!flush_rq->errors) |
@@ -369,6 +371,8 @@ static void blk_post_flush_end_io(struct request *flush_rq) | |||
369 | struct request *rq = flush_rq->end_io_data; | 371 | struct request *rq = flush_rq->end_io_data; |
370 | request_queue_t *q = rq->q; | 372 | request_queue_t *q = rq->q; |
371 | 373 | ||
374 | elv_completed_request(q, flush_rq); | ||
375 | |||
372 | rq->flags |= REQ_BAR_POSTFLUSH; | 376 | rq->flags |= REQ_BAR_POSTFLUSH; |
373 | 377 | ||
374 | q->end_flush_fn(q, flush_rq); | 378 | q->end_flush_fn(q, flush_rq); |
@@ -408,8 +412,6 @@ struct request *blk_start_pre_flush(request_queue_t *q, struct request *rq) | |||
408 | if (!list_empty(&rq->queuelist)) | 412 | if (!list_empty(&rq->queuelist)) |
409 | blkdev_dequeue_request(rq); | 413 | blkdev_dequeue_request(rq); |
410 | 414 | ||
411 | elv_deactivate_request(q, rq); | ||
412 | |||
413 | flush_rq->end_io_data = rq; | 415 | flush_rq->end_io_data = rq; |
414 | flush_rq->end_io = blk_pre_flush_end_io; | 416 | flush_rq->end_io = blk_pre_flush_end_io; |
415 | 417 | ||
@@ -1040,6 +1042,7 @@ EXPORT_SYMBOL(blk_queue_invalidate_tags); | |||
1040 | static char *rq_flags[] = { | 1042 | static char *rq_flags[] = { |
1041 | "REQ_RW", | 1043 | "REQ_RW", |
1042 | "REQ_FAILFAST", | 1044 | "REQ_FAILFAST", |
1045 | "REQ_SORTED", | ||
1043 | "REQ_SOFTBARRIER", | 1046 | "REQ_SOFTBARRIER", |
1044 | "REQ_HARDBARRIER", | 1047 | "REQ_HARDBARRIER", |
1045 | "REQ_CMD", | 1048 | "REQ_CMD", |
@@ -2456,6 +2459,8 @@ static void __blk_put_request(request_queue_t *q, struct request *req) | |||
2456 | if (unlikely(--req->ref_count)) | 2459 | if (unlikely(--req->ref_count)) |
2457 | return; | 2460 | return; |
2458 | 2461 | ||
2462 | elv_completed_request(q, req); | ||
2463 | |||
2459 | req->rq_status = RQ_INACTIVE; | 2464 | req->rq_status = RQ_INACTIVE; |
2460 | req->rl = NULL; | 2465 | req->rl = NULL; |
2461 | 2466 | ||
@@ -2466,8 +2471,6 @@ static void __blk_put_request(request_queue_t *q, struct request *req) | |||
2466 | if (rl) { | 2471 | if (rl) { |
2467 | int rw = rq_data_dir(req); | 2472 | int rw = rq_data_dir(req); |
2468 | 2473 | ||
2469 | elv_completed_request(q, req); | ||
2470 | |||
2471 | BUG_ON(!list_empty(&req->queuelist)); | 2474 | BUG_ON(!list_empty(&req->queuelist)); |
2472 | 2475 | ||
2473 | blk_free_request(q, req); | 2476 | blk_free_request(q, req); |
@@ -2477,14 +2480,14 @@ static void __blk_put_request(request_queue_t *q, struct request *req) | |||
2477 | 2480 | ||
2478 | void blk_put_request(struct request *req) | 2481 | void blk_put_request(struct request *req) |
2479 | { | 2482 | { |
2483 | unsigned long flags; | ||
2484 | request_queue_t *q = req->q; | ||
2485 | |||
2480 | /* | 2486 | /* |
2481 | * if req->rl isn't set, this request didnt originate from the | 2487 | * Gee, IDE calls in w/ NULL q. Fix IDE and remove the |
2482 | * block layer, so it's safe to just disregard it | 2488 | * following if (q) test. |
2483 | */ | 2489 | */ |
2484 | if (req->rl) { | 2490 | if (q) { |
2485 | unsigned long flags; | ||
2486 | request_queue_t *q = req->q; | ||
2487 | |||
2488 | spin_lock_irqsave(q->queue_lock, flags); | 2491 | spin_lock_irqsave(q->queue_lock, flags); |
2489 | __blk_put_request(q, req); | 2492 | __blk_put_request(q, req); |
2490 | spin_unlock_irqrestore(q->queue_lock, flags); | 2493 | spin_unlock_irqrestore(q->queue_lock, flags); |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index efdc9b5bc05c..2c7b9154927a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -203,6 +203,7 @@ struct request { | |||
203 | enum rq_flag_bits { | 203 | enum rq_flag_bits { |
204 | __REQ_RW, /* not set, read. set, write */ | 204 | __REQ_RW, /* not set, read. set, write */ |
205 | __REQ_FAILFAST, /* no low level driver retries */ | 205 | __REQ_FAILFAST, /* no low level driver retries */ |
206 | __REQ_SORTED, /* elevator knows about this request */ | ||
206 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ | 207 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ |
207 | __REQ_HARDBARRIER, /* may not be passed by drive either */ | 208 | __REQ_HARDBARRIER, /* may not be passed by drive either */ |
208 | __REQ_CMD, /* is a regular fs rw request */ | 209 | __REQ_CMD, /* is a regular fs rw request */ |
@@ -235,6 +236,7 @@ enum rq_flag_bits { | |||
235 | 236 | ||
236 | #define REQ_RW (1 << __REQ_RW) | 237 | #define REQ_RW (1 << __REQ_RW) |
237 | #define REQ_FAILFAST (1 << __REQ_FAILFAST) | 238 | #define REQ_FAILFAST (1 << __REQ_FAILFAST) |
239 | #define REQ_SORTED (1 << __REQ_SORTED) | ||
238 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) | 240 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) |
239 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) | 241 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) |
240 | #define REQ_CMD (1 << __REQ_CMD) | 242 | #define REQ_CMD (1 << __REQ_CMD) |
@@ -333,6 +335,13 @@ struct request_queue | |||
333 | end_flush_fn *end_flush_fn; | 335 | end_flush_fn *end_flush_fn; |
334 | 336 | ||
335 | /* | 337 | /* |
338 | * Dispatch queue sorting | ||
339 | */ | ||
340 | sector_t last_sector; | ||
341 | struct request *boundary_rq; | ||
342 | unsigned int max_back_kb; | ||
343 | |||
344 | /* | ||
336 | * Auto-unplugging state | 345 | * Auto-unplugging state |
337 | */ | 346 | */ |
338 | struct timer_list unplug_timer; | 347 | struct timer_list unplug_timer; |
@@ -454,6 +463,7 @@ enum { | |||
454 | #define blk_pm_request(rq) \ | 463 | #define blk_pm_request(rq) \ |
455 | ((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME)) | 464 | ((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME)) |
456 | 465 | ||
466 | #define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED) | ||
457 | #define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER) | 467 | #define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER) |
458 | #define blk_barrier_preflush(rq) ((rq)->flags & REQ_BAR_PREFLUSH) | 468 | #define blk_barrier_preflush(rq) ((rq)->flags & REQ_BAR_PREFLUSH) |
459 | #define blk_barrier_postflush(rq) ((rq)->flags & REQ_BAR_POSTFLUSH) | 469 | #define blk_barrier_postflush(rq) ((rq)->flags & REQ_BAR_POSTFLUSH) |
@@ -611,12 +621,7 @@ extern void end_request(struct request *req, int uptodate); | |||
611 | 621 | ||
612 | static inline void blkdev_dequeue_request(struct request *req) | 622 | static inline void blkdev_dequeue_request(struct request *req) |
613 | { | 623 | { |
614 | BUG_ON(list_empty(&req->queuelist)); | 624 | elv_dequeue_request(req->q, req); |
615 | |||
616 | list_del_init(&req->queuelist); | ||
617 | |||
618 | if (req->rl) | ||
619 | elv_remove_request(req->q, req); | ||
620 | } | 625 | } |
621 | 626 | ||
622 | /* | 627 | /* |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index ea6bbc2d7407..76f4f6920744 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -8,18 +8,17 @@ typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struc | |||
8 | 8 | ||
9 | typedef void (elevator_merged_fn) (request_queue_t *, struct request *); | 9 | typedef void (elevator_merged_fn) (request_queue_t *, struct request *); |
10 | 10 | ||
11 | typedef struct request *(elevator_next_req_fn) (request_queue_t *); | 11 | typedef int (elevator_dispatch_fn) (request_queue_t *, int); |
12 | 12 | ||
13 | typedef void (elevator_add_req_fn) (request_queue_t *, struct request *, int); | 13 | typedef void (elevator_add_req_fn) (request_queue_t *, struct request *); |
14 | typedef int (elevator_queue_empty_fn) (request_queue_t *); | 14 | typedef int (elevator_queue_empty_fn) (request_queue_t *); |
15 | typedef void (elevator_remove_req_fn) (request_queue_t *, struct request *); | ||
16 | typedef void (elevator_requeue_req_fn) (request_queue_t *, struct request *); | ||
17 | typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *); | 15 | typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *); |
18 | typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *); | 16 | typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *); |
19 | typedef int (elevator_may_queue_fn) (request_queue_t *, int, struct bio *); | 17 | typedef int (elevator_may_queue_fn) (request_queue_t *, int, struct bio *); |
20 | 18 | ||
21 | typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, struct bio *, int); | 19 | typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, struct bio *, int); |
22 | typedef void (elevator_put_req_fn) (request_queue_t *, struct request *); | 20 | typedef void (elevator_put_req_fn) (request_queue_t *, struct request *); |
21 | typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *); | ||
23 | typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *); | 22 | typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *); |
24 | 23 | ||
25 | typedef int (elevator_init_fn) (request_queue_t *, elevator_t *); | 24 | typedef int (elevator_init_fn) (request_queue_t *, elevator_t *); |
@@ -31,10 +30,9 @@ struct elevator_ops | |||
31 | elevator_merged_fn *elevator_merged_fn; | 30 | elevator_merged_fn *elevator_merged_fn; |
32 | elevator_merge_req_fn *elevator_merge_req_fn; | 31 | elevator_merge_req_fn *elevator_merge_req_fn; |
33 | 32 | ||
34 | elevator_next_req_fn *elevator_next_req_fn; | 33 | elevator_dispatch_fn *elevator_dispatch_fn; |
35 | elevator_add_req_fn *elevator_add_req_fn; | 34 | elevator_add_req_fn *elevator_add_req_fn; |
36 | elevator_remove_req_fn *elevator_remove_req_fn; | 35 | elevator_activate_req_fn *elevator_activate_req_fn; |
37 | elevator_requeue_req_fn *elevator_requeue_req_fn; | ||
38 | elevator_deactivate_req_fn *elevator_deactivate_req_fn; | 36 | elevator_deactivate_req_fn *elevator_deactivate_req_fn; |
39 | 37 | ||
40 | elevator_queue_empty_fn *elevator_queue_empty_fn; | 38 | elevator_queue_empty_fn *elevator_queue_empty_fn; |
@@ -81,15 +79,15 @@ struct elevator_queue | |||
81 | /* | 79 | /* |
82 | * block elevator interface | 80 | * block elevator interface |
83 | */ | 81 | */ |
82 | extern void elv_dispatch_insert(request_queue_t *, struct request *, int); | ||
84 | extern void elv_add_request(request_queue_t *, struct request *, int, int); | 83 | extern void elv_add_request(request_queue_t *, struct request *, int, int); |
85 | extern void __elv_add_request(request_queue_t *, struct request *, int, int); | 84 | extern void __elv_add_request(request_queue_t *, struct request *, int, int); |
86 | extern int elv_merge(request_queue_t *, struct request **, struct bio *); | 85 | extern int elv_merge(request_queue_t *, struct request **, struct bio *); |
87 | extern void elv_merge_requests(request_queue_t *, struct request *, | 86 | extern void elv_merge_requests(request_queue_t *, struct request *, |
88 | struct request *); | 87 | struct request *); |
89 | extern void elv_merged_request(request_queue_t *, struct request *); | 88 | extern void elv_merged_request(request_queue_t *, struct request *); |
90 | extern void elv_remove_request(request_queue_t *, struct request *); | 89 | extern void elv_dequeue_request(request_queue_t *, struct request *); |
91 | extern void elv_requeue_request(request_queue_t *, struct request *); | 90 | extern void elv_requeue_request(request_queue_t *, struct request *); |
92 | extern void elv_deactivate_request(request_queue_t *, struct request *); | ||
93 | extern int elv_queue_empty(request_queue_t *); | 91 | extern int elv_queue_empty(request_queue_t *); |
94 | extern struct request *elv_next_request(struct request_queue *q); | 92 | extern struct request *elv_next_request(struct request_queue *q); |
95 | extern struct request *elv_former_request(request_queue_t *, struct request *); | 93 | extern struct request *elv_former_request(request_queue_t *, struct request *); |