diff options
author | Jens Axboe <axboe@suse.de> | 2006-08-10 02:44:47 -0400 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2006-09-30 14:23:37 -0400 |
commit | 4aff5e2333c9a1609662f2091f55c3f6fffdad36 (patch) | |
tree | b73d8c2b7c1bdc03d3313c108da7dfc95ee95525 /block/elevator.c | |
parent | 77ed74da26f50fa28471571ee7a2251b77526d84 (diff) |
[PATCH] Split struct request ->flags into two parts
Right now ->flags is a bit of a mess: some are request types, and
others are just modifiers. Clean this up by splitting it into
->cmd_type and ->cmd_flags. This allows introduction of generic
Linux block message types, useful for sending generic Linux commands
to block devices.
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block/elevator.c')
-rw-r--r-- | block/elevator.c | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/block/elevator.c b/block/elevator.c index 9b72dc7c8a5c..4ac97b642042 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -242,7 +242,7 @@ void elv_dispatch_sort(request_queue_t *q, struct request *rq) | |||
242 | list_for_each_prev(entry, &q->queue_head) { | 242 | list_for_each_prev(entry, &q->queue_head) { |
243 | struct request *pos = list_entry_rq(entry); | 243 | struct request *pos = list_entry_rq(entry); |
244 | 244 | ||
245 | if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) | 245 | if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) |
246 | break; | 246 | break; |
247 | if (rq->sector >= boundary) { | 247 | if (rq->sector >= boundary) { |
248 | if (pos->sector < boundary) | 248 | if (pos->sector < boundary) |
@@ -313,7 +313,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq) | |||
313 | e->ops->elevator_deactivate_req_fn(q, rq); | 313 | e->ops->elevator_deactivate_req_fn(q, rq); |
314 | } | 314 | } |
315 | 315 | ||
316 | rq->flags &= ~REQ_STARTED; | 316 | rq->cmd_flags &= ~REQ_STARTED; |
317 | 317 | ||
318 | elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); | 318 | elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); |
319 | } | 319 | } |
@@ -344,13 +344,13 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) | |||
344 | 344 | ||
345 | switch (where) { | 345 | switch (where) { |
346 | case ELEVATOR_INSERT_FRONT: | 346 | case ELEVATOR_INSERT_FRONT: |
347 | rq->flags |= REQ_SOFTBARRIER; | 347 | rq->cmd_flags |= REQ_SOFTBARRIER; |
348 | 348 | ||
349 | list_add(&rq->queuelist, &q->queue_head); | 349 | list_add(&rq->queuelist, &q->queue_head); |
350 | break; | 350 | break; |
351 | 351 | ||
352 | case ELEVATOR_INSERT_BACK: | 352 | case ELEVATOR_INSERT_BACK: |
353 | rq->flags |= REQ_SOFTBARRIER; | 353 | rq->cmd_flags |= REQ_SOFTBARRIER; |
354 | elv_drain_elevator(q); | 354 | elv_drain_elevator(q); |
355 | list_add_tail(&rq->queuelist, &q->queue_head); | 355 | list_add_tail(&rq->queuelist, &q->queue_head); |
356 | /* | 356 | /* |
@@ -369,7 +369,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) | |||
369 | 369 | ||
370 | case ELEVATOR_INSERT_SORT: | 370 | case ELEVATOR_INSERT_SORT: |
371 | BUG_ON(!blk_fs_request(rq)); | 371 | BUG_ON(!blk_fs_request(rq)); |
372 | rq->flags |= REQ_SORTED; | 372 | rq->cmd_flags |= REQ_SORTED; |
373 | q->nr_sorted++; | 373 | q->nr_sorted++; |
374 | if (q->last_merge == NULL && rq_mergeable(rq)) | 374 | if (q->last_merge == NULL && rq_mergeable(rq)) |
375 | q->last_merge = rq; | 375 | q->last_merge = rq; |
@@ -387,7 +387,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) | |||
387 | * insertion; otherwise, requests should be requeued | 387 | * insertion; otherwise, requests should be requeued |
388 | * in ordseq order. | 388 | * in ordseq order. |
389 | */ | 389 | */ |
390 | rq->flags |= REQ_SOFTBARRIER; | 390 | rq->cmd_flags |= REQ_SOFTBARRIER; |
391 | 391 | ||
392 | if (q->ordseq == 0) { | 392 | if (q->ordseq == 0) { |
393 | list_add(&rq->queuelist, &q->queue_head); | 393 | list_add(&rq->queuelist, &q->queue_head); |
@@ -429,9 +429,9 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where, | |||
429 | int plug) | 429 | int plug) |
430 | { | 430 | { |
431 | if (q->ordcolor) | 431 | if (q->ordcolor) |
432 | rq->flags |= REQ_ORDERED_COLOR; | 432 | rq->cmd_flags |= REQ_ORDERED_COLOR; |
433 | 433 | ||
434 | if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { | 434 | if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { |
435 | /* | 435 | /* |
436 | * toggle ordered color | 436 | * toggle ordered color |
437 | */ | 437 | */ |
@@ -452,7 +452,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where, | |||
452 | q->end_sector = rq_end_sector(rq); | 452 | q->end_sector = rq_end_sector(rq); |
453 | q->boundary_rq = rq; | 453 | q->boundary_rq = rq; |
454 | } | 454 | } |
455 | } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) | 455 | } else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) |
456 | where = ELEVATOR_INSERT_BACK; | 456 | where = ELEVATOR_INSERT_BACK; |
457 | 457 | ||
458 | if (plug) | 458 | if (plug) |
@@ -493,7 +493,7 @@ struct request *elv_next_request(request_queue_t *q) | |||
493 | int ret; | 493 | int ret; |
494 | 494 | ||
495 | while ((rq = __elv_next_request(q)) != NULL) { | 495 | while ((rq = __elv_next_request(q)) != NULL) { |
496 | if (!(rq->flags & REQ_STARTED)) { | 496 | if (!(rq->cmd_flags & REQ_STARTED)) { |
497 | elevator_t *e = q->elevator; | 497 | elevator_t *e = q->elevator; |
498 | 498 | ||
499 | /* | 499 | /* |
@@ -510,7 +510,7 @@ struct request *elv_next_request(request_queue_t *q) | |||
510 | * it, a request that has been delayed should | 510 | * it, a request that has been delayed should |
511 | * not be passed by new incoming requests | 511 | * not be passed by new incoming requests |
512 | */ | 512 | */ |
513 | rq->flags |= REQ_STARTED; | 513 | rq->cmd_flags |= REQ_STARTED; |
514 | blk_add_trace_rq(q, rq, BLK_TA_ISSUE); | 514 | blk_add_trace_rq(q, rq, BLK_TA_ISSUE); |
515 | } | 515 | } |
516 | 516 | ||
@@ -519,7 +519,7 @@ struct request *elv_next_request(request_queue_t *q) | |||
519 | q->boundary_rq = NULL; | 519 | q->boundary_rq = NULL; |
520 | } | 520 | } |
521 | 521 | ||
522 | if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn) | 522 | if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn) |
523 | break; | 523 | break; |
524 | 524 | ||
525 | ret = q->prep_rq_fn(q, rq); | 525 | ret = q->prep_rq_fn(q, rq); |
@@ -541,7 +541,7 @@ struct request *elv_next_request(request_queue_t *q) | |||
541 | nr_bytes = rq->data_len; | 541 | nr_bytes = rq->data_len; |
542 | 542 | ||
543 | blkdev_dequeue_request(rq); | 543 | blkdev_dequeue_request(rq); |
544 | rq->flags |= REQ_QUIET; | 544 | rq->cmd_flags |= REQ_QUIET; |
545 | end_that_request_chunk(rq, 0, nr_bytes); | 545 | end_that_request_chunk(rq, 0, nr_bytes); |
546 | end_that_request_last(rq, 0); | 546 | end_that_request_last(rq, 0); |
547 | } else { | 547 | } else { |