diff options
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 118 |
1 files changed, 85 insertions, 33 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index a33a31e71bbc..02a585faa62c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -102,7 +102,7 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc); | |||
| 102 | void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); | 102 | void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); |
| 103 | 103 | ||
| 104 | struct request; | 104 | struct request; |
| 105 | typedef void (rq_end_io_fn)(struct request *); | 105 | typedef void (rq_end_io_fn)(struct request *, int); |
| 106 | 106 | ||
| 107 | struct request_list { | 107 | struct request_list { |
| 108 | int count[2]; | 108 | int count[2]; |
| @@ -118,9 +118,9 @@ struct request_list { | |||
| 118 | * try to put the fields that are referenced together in the same cacheline | 118 | * try to put the fields that are referenced together in the same cacheline |
| 119 | */ | 119 | */ |
| 120 | struct request { | 120 | struct request { |
| 121 | struct list_head queuelist; /* looking for ->queue? you must _not_ | 121 | struct list_head queuelist; |
| 122 | * access it directly, use | 122 | struct list_head donelist; |
| 123 | * blkdev_dequeue_request! */ | 123 | |
| 124 | unsigned long flags; /* see REQ_ bits below */ | 124 | unsigned long flags; /* see REQ_ bits below */ |
| 125 | 125 | ||
| 126 | /* Maintain bio traversal state for part by part I/O submission. | 126 | /* Maintain bio traversal state for part by part I/O submission. |
| @@ -141,6 +141,7 @@ struct request { | |||
| 141 | struct bio *biotail; | 141 | struct bio *biotail; |
| 142 | 142 | ||
| 143 | void *elevator_private; | 143 | void *elevator_private; |
| 144 | void *completion_data; | ||
| 144 | 145 | ||
| 145 | unsigned short ioprio; | 146 | unsigned short ioprio; |
| 146 | 147 | ||
| @@ -184,6 +185,7 @@ struct request { | |||
| 184 | void *sense; | 185 | void *sense; |
| 185 | 186 | ||
| 186 | unsigned int timeout; | 187 | unsigned int timeout; |
| 188 | int retries; | ||
| 187 | 189 | ||
| 188 | /* | 190 | /* |
| 189 | * For Power Management requests | 191 | * For Power Management requests |
| @@ -206,6 +208,7 @@ enum rq_flag_bits { | |||
| 206 | __REQ_SORTED, /* elevator knows about this request */ | 208 | __REQ_SORTED, /* elevator knows about this request */ |
| 207 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ | 209 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ |
| 208 | __REQ_HARDBARRIER, /* may not be passed by drive either */ | 210 | __REQ_HARDBARRIER, /* may not be passed by drive either */ |
| 211 | __REQ_FUA, /* forced unit access */ | ||
| 209 | __REQ_CMD, /* is a regular fs rw request */ | 212 | __REQ_CMD, /* is a regular fs rw request */ |
| 210 | __REQ_NOMERGE, /* don't touch this for merging */ | 213 | __REQ_NOMERGE, /* don't touch this for merging */ |
| 211 | __REQ_STARTED, /* drive already may have started this one */ | 214 | __REQ_STARTED, /* drive already may have started this one */ |
| @@ -229,9 +232,7 @@ enum rq_flag_bits { | |||
| 229 | __REQ_PM_SUSPEND, /* suspend request */ | 232 | __REQ_PM_SUSPEND, /* suspend request */ |
| 230 | __REQ_PM_RESUME, /* resume request */ | 233 | __REQ_PM_RESUME, /* resume request */ |
| 231 | __REQ_PM_SHUTDOWN, /* shutdown request */ | 234 | __REQ_PM_SHUTDOWN, /* shutdown request */ |
| 232 | __REQ_BAR_PREFLUSH, /* barrier pre-flush done */ | 235 | __REQ_ORDERED_COLOR, /* is before or after barrier */ |
| 233 | __REQ_BAR_POSTFLUSH, /* barrier post-flush */ | ||
| 234 | __REQ_BAR_FLUSH, /* rq is the flush request */ | ||
| 235 | __REQ_NR_BITS, /* stops here */ | 236 | __REQ_NR_BITS, /* stops here */ |
| 236 | }; | 237 | }; |
| 237 | 238 | ||
| @@ -240,6 +241,7 @@ enum rq_flag_bits { | |||
| 240 | #define REQ_SORTED (1 << __REQ_SORTED) | 241 | #define REQ_SORTED (1 << __REQ_SORTED) |
| 241 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) | 242 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) |
| 242 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) | 243 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) |
| 244 | #define REQ_FUA (1 << __REQ_FUA) | ||
| 243 | #define REQ_CMD (1 << __REQ_CMD) | 245 | #define REQ_CMD (1 << __REQ_CMD) |
| 244 | #define REQ_NOMERGE (1 << __REQ_NOMERGE) | 246 | #define REQ_NOMERGE (1 << __REQ_NOMERGE) |
| 245 | #define REQ_STARTED (1 << __REQ_STARTED) | 247 | #define REQ_STARTED (1 << __REQ_STARTED) |
| @@ -259,9 +261,7 @@ enum rq_flag_bits { | |||
| 259 | #define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND) | 261 | #define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND) |
| 260 | #define REQ_PM_RESUME (1 << __REQ_PM_RESUME) | 262 | #define REQ_PM_RESUME (1 << __REQ_PM_RESUME) |
| 261 | #define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN) | 263 | #define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN) |
| 262 | #define REQ_BAR_PREFLUSH (1 << __REQ_BAR_PREFLUSH) | 264 | #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) |
| 263 | #define REQ_BAR_POSTFLUSH (1 << __REQ_BAR_POSTFLUSH) | ||
| 264 | #define REQ_BAR_FLUSH (1 << __REQ_BAR_FLUSH) | ||
| 265 | 265 | ||
| 266 | /* | 266 | /* |
| 267 | * State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME | 267 | * State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME |
| @@ -291,8 +291,8 @@ struct bio_vec; | |||
| 291 | typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *); | 291 | typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *); |
| 292 | typedef void (activity_fn) (void *data, int rw); | 292 | typedef void (activity_fn) (void *data, int rw); |
| 293 | typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); | 293 | typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); |
| 294 | typedef int (prepare_flush_fn) (request_queue_t *, struct request *); | 294 | typedef void (prepare_flush_fn) (request_queue_t *, struct request *); |
| 295 | typedef void (end_flush_fn) (request_queue_t *, struct request *); | 295 | typedef void (softirq_done_fn)(struct request *); |
| 296 | 296 | ||
| 297 | enum blk_queue_state { | 297 | enum blk_queue_state { |
| 298 | Queue_down, | 298 | Queue_down, |
| @@ -334,7 +334,7 @@ struct request_queue | |||
| 334 | activity_fn *activity_fn; | 334 | activity_fn *activity_fn; |
| 335 | issue_flush_fn *issue_flush_fn; | 335 | issue_flush_fn *issue_flush_fn; |
| 336 | prepare_flush_fn *prepare_flush_fn; | 336 | prepare_flush_fn *prepare_flush_fn; |
| 337 | end_flush_fn *end_flush_fn; | 337 | softirq_done_fn *softirq_done_fn; |
| 338 | 338 | ||
| 339 | /* | 339 | /* |
| 340 | * Dispatch queue sorting | 340 | * Dispatch queue sorting |
| @@ -419,14 +419,11 @@ struct request_queue | |||
| 419 | /* | 419 | /* |
| 420 | * reserved for flush operations | 420 | * reserved for flush operations |
| 421 | */ | 421 | */ |
| 422 | struct request *flush_rq; | 422 | unsigned int ordered, next_ordered, ordseq; |
| 423 | unsigned char ordered; | 423 | int orderr, ordcolor; |
| 424 | }; | 424 | struct request pre_flush_rq, bar_rq, post_flush_rq; |
| 425 | 425 | struct request *orig_bar_rq; | |
| 426 | enum { | 426 | unsigned int bi_size; |
| 427 | QUEUE_ORDERED_NONE, | ||
| 428 | QUEUE_ORDERED_TAG, | ||
| 429 | QUEUE_ORDERED_FLUSH, | ||
| 430 | }; | 427 | }; |
| 431 | 428 | ||
| 432 | #define RQ_INACTIVE (-1) | 429 | #define RQ_INACTIVE (-1) |
| @@ -444,12 +441,51 @@ enum { | |||
| 444 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ | 441 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ |
| 445 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ | 442 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ |
| 446 | #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ | 443 | #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ |
| 447 | #define QUEUE_FLAG_FLUSH 9 /* doing barrier flush sequence */ | 444 | |
| 445 | enum { | ||
| 446 | /* | ||
| 447 | * Hardbarrier is supported with one of the following methods. | ||
| 448 | * | ||
| 449 | * NONE : hardbarrier unsupported | ||
| 450 | * DRAIN : ordering by draining is enough | ||
| 451 | * DRAIN_FLUSH : ordering by draining w/ pre and post flushes | ||
| 452 | * DRAIN_FUA : ordering by draining w/ pre flush and FUA write | ||
| 453 | * TAG : ordering by tag is enough | ||
| 454 | * TAG_FLUSH : ordering by tag w/ pre and post flushes | ||
| 455 | * TAG_FUA : ordering by tag w/ pre flush and FUA write | ||
| 456 | */ | ||
| 457 | QUEUE_ORDERED_NONE = 0x00, | ||
| 458 | QUEUE_ORDERED_DRAIN = 0x01, | ||
| 459 | QUEUE_ORDERED_TAG = 0x02, | ||
| 460 | |||
| 461 | QUEUE_ORDERED_PREFLUSH = 0x10, | ||
| 462 | QUEUE_ORDERED_POSTFLUSH = 0x20, | ||
| 463 | QUEUE_ORDERED_FUA = 0x40, | ||
| 464 | |||
| 465 | QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | | ||
| 466 | QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, | ||
| 467 | QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | | ||
| 468 | QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, | ||
| 469 | QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | | ||
| 470 | QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, | ||
| 471 | QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | | ||
| 472 | QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, | ||
| 473 | |||
| 474 | /* | ||
| 475 | * Ordered operation sequence | ||
| 476 | */ | ||
| 477 | QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */ | ||
| 478 | QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */ | ||
| 479 | QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */ | ||
| 480 | QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */ | ||
| 481 | QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */ | ||
| 482 | QUEUE_ORDSEQ_DONE = 0x20, | ||
| 483 | }; | ||
| 448 | 484 | ||
| 449 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) | 485 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) |
| 450 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 486 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
| 451 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 487 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
| 452 | #define blk_queue_flushing(q) test_bit(QUEUE_FLAG_FLUSH, &(q)->queue_flags) | 488 | #define blk_queue_flushing(q) ((q)->ordseq) |
| 453 | 489 | ||
| 454 | #define blk_fs_request(rq) ((rq)->flags & REQ_CMD) | 490 | #define blk_fs_request(rq) ((rq)->flags & REQ_CMD) |
| 455 | #define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC) | 491 | #define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC) |
| @@ -465,8 +501,7 @@ enum { | |||
| 465 | 501 | ||
| 466 | #define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED) | 502 | #define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED) |
| 467 | #define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER) | 503 | #define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER) |
| 468 | #define blk_barrier_preflush(rq) ((rq)->flags & REQ_BAR_PREFLUSH) | 504 | #define blk_fua_rq(rq) ((rq)->flags & REQ_FUA) |
| 469 | #define blk_barrier_postflush(rq) ((rq)->flags & REQ_BAR_POSTFLUSH) | ||
| 470 | 505 | ||
| 471 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) | 506 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) |
| 472 | 507 | ||
| @@ -558,8 +593,8 @@ extern void blk_unregister_queue(struct gendisk *disk); | |||
| 558 | extern void register_disk(struct gendisk *dev); | 593 | extern void register_disk(struct gendisk *dev); |
| 559 | extern void generic_make_request(struct bio *bio); | 594 | extern void generic_make_request(struct bio *bio); |
| 560 | extern void blk_put_request(struct request *); | 595 | extern void blk_put_request(struct request *); |
| 561 | extern void blk_end_sync_rq(struct request *rq); | 596 | extern void __blk_put_request(request_queue_t *, struct request *); |
| 562 | extern void blk_attempt_remerge(request_queue_t *, struct request *); | 597 | extern void blk_end_sync_rq(struct request *rq, int error); |
| 563 | extern struct request *blk_get_request(request_queue_t *, int, gfp_t); | 598 | extern struct request *blk_get_request(request_queue_t *, int, gfp_t); |
| 564 | extern void blk_insert_request(request_queue_t *, struct request *, int, void *); | 599 | extern void blk_insert_request(request_queue_t *, struct request *, int, void *); |
| 565 | extern void blk_requeue_request(request_queue_t *, struct request *); | 600 | extern void blk_requeue_request(request_queue_t *, struct request *); |
| @@ -579,6 +614,9 @@ extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned | |||
| 579 | extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int); | 614 | extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int); |
| 580 | extern int blk_execute_rq(request_queue_t *, struct gendisk *, | 615 | extern int blk_execute_rq(request_queue_t *, struct gendisk *, |
| 581 | struct request *, int); | 616 | struct request *, int); |
| 617 | extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *, | ||
| 618 | struct request *, int, rq_end_io_fn *); | ||
| 619 | |||
| 582 | static inline request_queue_t *bdev_get_queue(struct block_device *bdev) | 620 | static inline request_queue_t *bdev_get_queue(struct block_device *bdev) |
| 583 | { | 621 | { |
| 584 | return bdev->bd_disk->queue; | 622 | return bdev->bd_disk->queue; |
| @@ -608,8 +646,19 @@ static inline void blk_run_address_space(struct address_space *mapping) | |||
| 608 | */ | 646 | */ |
| 609 | extern int end_that_request_first(struct request *, int, int); | 647 | extern int end_that_request_first(struct request *, int, int); |
| 610 | extern int end_that_request_chunk(struct request *, int, int); | 648 | extern int end_that_request_chunk(struct request *, int, int); |
| 611 | extern void end_that_request_last(struct request *); | 649 | extern void end_that_request_last(struct request *, int); |
| 612 | extern void end_request(struct request *req, int uptodate); | 650 | extern void end_request(struct request *req, int uptodate); |
| 651 | extern void blk_complete_request(struct request *); | ||
| 652 | |||
| 653 | static inline int rq_all_done(struct request *rq, unsigned int nr_bytes) | ||
| 654 | { | ||
| 655 | if (blk_fs_request(rq)) | ||
| 656 | return (nr_bytes >= (rq->hard_nr_sectors << 9)); | ||
| 657 | else if (blk_pc_request(rq)) | ||
| 658 | return nr_bytes >= rq->data_len; | ||
| 659 | |||
| 660 | return 0; | ||
| 661 | } | ||
| 613 | 662 | ||
| 614 | /* | 663 | /* |
| 615 | * end_that_request_first/chunk() takes an uptodate argument. we account | 664 | * end_that_request_first/chunk() takes an uptodate argument. we account |
| @@ -658,12 +707,14 @@ extern void blk_queue_segment_boundary(request_queue_t *, unsigned long); | |||
| 658 | extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); | 707 | extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); |
| 659 | extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); | 708 | extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); |
| 660 | extern void blk_queue_dma_alignment(request_queue_t *, int); | 709 | extern void blk_queue_dma_alignment(request_queue_t *, int); |
| 710 | extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *); | ||
| 661 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 711 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
| 662 | extern void blk_queue_ordered(request_queue_t *, int); | 712 | extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *); |
| 663 | extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); | 713 | extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); |
| 664 | extern struct request *blk_start_pre_flush(request_queue_t *,struct request *); | 714 | extern int blk_do_ordered(request_queue_t *, struct request **); |
| 665 | extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int); | 715 | extern unsigned blk_ordered_cur_seq(request_queue_t *); |
| 666 | extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int); | 716 | extern unsigned blk_ordered_req_seq(struct request *); |
| 717 | extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int); | ||
| 667 | 718 | ||
| 668 | extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); | 719 | extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); |
| 669 | extern void blk_dump_rq_flags(struct request *, char *); | 720 | extern void blk_dump_rq_flags(struct request *, char *); |
| @@ -696,7 +747,8 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *); | |||
| 696 | 747 | ||
| 697 | #define MAX_PHYS_SEGMENTS 128 | 748 | #define MAX_PHYS_SEGMENTS 128 |
| 698 | #define MAX_HW_SEGMENTS 128 | 749 | #define MAX_HW_SEGMENTS 128 |
| 699 | #define MAX_SECTORS 255 | 750 | #define SAFE_MAX_SECTORS 255 |
| 751 | #define BLK_DEF_MAX_SECTORS 1024 | ||
| 700 | 752 | ||
| 701 | #define MAX_SEGMENT_SIZE 65536 | 753 | #define MAX_SEGMENT_SIZE 65536 |
| 702 | 754 | ||
