diff options
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 114 |
1 files changed, 26 insertions, 88 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e542c8fd9215..71e7a847dffc 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -34,83 +34,10 @@ struct sg_io_hdr; | |||
34 | #define BLKDEV_MIN_RQ 4 | 34 | #define BLKDEV_MIN_RQ 4 |
35 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ | 35 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ |
36 | 36 | ||
37 | /* | 37 | int put_io_context(struct io_context *ioc); |
38 | * This is the per-process anticipatory I/O scheduler state. | ||
39 | */ | ||
40 | struct as_io_context { | ||
41 | spinlock_t lock; | ||
42 | |||
43 | void (*dtor)(struct as_io_context *aic); /* destructor */ | ||
44 | void (*exit)(struct as_io_context *aic); /* called on task exit */ | ||
45 | |||
46 | unsigned long state; | ||
47 | atomic_t nr_queued; /* queued reads & sync writes */ | ||
48 | atomic_t nr_dispatched; /* number of requests gone to the drivers */ | ||
49 | |||
50 | /* IO History tracking */ | ||
51 | /* Thinktime */ | ||
52 | unsigned long last_end_request; | ||
53 | unsigned long ttime_total; | ||
54 | unsigned long ttime_samples; | ||
55 | unsigned long ttime_mean; | ||
56 | /* Layout pattern */ | ||
57 | unsigned int seek_samples; | ||
58 | sector_t last_request_pos; | ||
59 | u64 seek_total; | ||
60 | sector_t seek_mean; | ||
61 | }; | ||
62 | |||
63 | struct cfq_queue; | ||
64 | struct cfq_io_context { | ||
65 | struct rb_node rb_node; | ||
66 | void *key; | ||
67 | |||
68 | struct cfq_queue *cfqq[2]; | ||
69 | |||
70 | struct io_context *ioc; | ||
71 | |||
72 | unsigned long last_end_request; | ||
73 | sector_t last_request_pos; | ||
74 | |||
75 | unsigned long ttime_total; | ||
76 | unsigned long ttime_samples; | ||
77 | unsigned long ttime_mean; | ||
78 | |||
79 | unsigned int seek_samples; | ||
80 | u64 seek_total; | ||
81 | sector_t seek_mean; | ||
82 | |||
83 | struct list_head queue_list; | ||
84 | |||
85 | void (*dtor)(struct io_context *); /* destructor */ | ||
86 | void (*exit)(struct io_context *); /* called on task exit */ | ||
87 | }; | ||
88 | |||
89 | /* | ||
90 | * This is the per-process I/O subsystem state. It is refcounted and | ||
91 | * kmalloc'ed. Currently all fields are modified in process io context | ||
92 | * (apart from the atomic refcount), so require no locking. | ||
93 | */ | ||
94 | struct io_context { | ||
95 | atomic_t refcount; | ||
96 | struct task_struct *task; | ||
97 | |||
98 | unsigned int ioprio_changed; | ||
99 | |||
100 | /* | ||
101 | * For request batching | ||
102 | */ | ||
103 | unsigned long last_waited; /* Time last woken after wait for request */ | ||
104 | int nr_batch_requests; /* Number of requests left in the batch */ | ||
105 | |||
106 | struct as_io_context *aic; | ||
107 | struct rb_root cic_root; | ||
108 | void *ioc_data; | ||
109 | }; | ||
110 | |||
111 | void put_io_context(struct io_context *ioc); | ||
112 | void exit_io_context(void); | 38 | void exit_io_context(void); |
113 | struct io_context *get_io_context(gfp_t gfp_flags, int node); | 39 | struct io_context *get_io_context(gfp_t gfp_flags, int node); |
40 | struct io_context *alloc_io_context(gfp_t gfp_flags, int node); | ||
114 | void copy_io_context(struct io_context **pdst, struct io_context **psrc); | 41 | void copy_io_context(struct io_context **pdst, struct io_context **psrc); |
115 | void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); | 42 | void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); |
116 | 43 | ||
@@ -539,6 +466,8 @@ enum { | |||
539 | #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) | 466 | #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) |
540 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) | 467 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) |
541 | #define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) | 468 | #define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) |
469 | /* rq->queuelist of dequeued request must be list_empty() */ | ||
470 | #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) | ||
542 | 471 | ||
543 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) | 472 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) |
544 | 473 | ||
@@ -718,29 +647,32 @@ static inline void blk_run_address_space(struct address_space *mapping) | |||
718 | } | 647 | } |
719 | 648 | ||
720 | /* | 649 | /* |
721 | * end_request() and friends. Must be called with the request queue spinlock | 650 | * blk_end_request() and friends. |
722 | * acquired. All functions called within end_request() _must_be_ atomic. | 651 | * __blk_end_request() and end_request() must be called with |
652 | * the request queue spinlock acquired. | ||
723 | * | 653 | * |
724 | * Several drivers define their own end_request and call | 654 | * Several drivers define their own end_request and call |
725 | * end_that_request_first() and end_that_request_last() | 655 | * blk_end_request() for parts of the original function. |
726 | * for parts of the original function. This prevents | 656 | * This prevents code duplication in drivers. |
727 | * code duplication in drivers. | ||
728 | */ | 657 | */ |
729 | extern int end_that_request_first(struct request *, int, int); | 658 | extern int blk_end_request(struct request *rq, int error, int nr_bytes); |
730 | extern int end_that_request_chunk(struct request *, int, int); | 659 | extern int __blk_end_request(struct request *rq, int error, int nr_bytes); |
731 | extern void end_that_request_last(struct request *, int); | 660 | extern int blk_end_bidi_request(struct request *rq, int error, int nr_bytes, |
661 | int bidi_bytes); | ||
732 | extern void end_request(struct request *, int); | 662 | extern void end_request(struct request *, int); |
733 | extern void end_queued_request(struct request *, int); | 663 | extern void end_queued_request(struct request *, int); |
734 | extern void end_dequeued_request(struct request *, int); | 664 | extern void end_dequeued_request(struct request *, int); |
665 | extern int blk_end_request_callback(struct request *rq, int error, int nr_bytes, | ||
666 | int (drv_callback)(struct request *)); | ||
735 | extern void blk_complete_request(struct request *); | 667 | extern void blk_complete_request(struct request *); |
736 | 668 | ||
737 | /* | 669 | /* |
738 | * end_that_request_first/chunk() takes an uptodate argument. we account | 670 | * blk_end_request() takes bytes instead of sectors as a complete size. |
739 | * any value <= as an io error. 0 means -EIO for compatability reasons, | 671 | * blk_rq_bytes() returns bytes left to complete in the entire request. |
740 | * any other < 0 value is the direct error type. An uptodate value of | 672 | * blk_rq_cur_bytes() returns bytes left to complete in the current segment. |
741 | * 1 indicates successful io completion | ||
742 | */ | 673 | */ |
743 | #define end_io_error(uptodate) (unlikely((uptodate) <= 0)) | 674 | extern unsigned int blk_rq_bytes(struct request *rq); |
675 | extern unsigned int blk_rq_cur_bytes(struct request *rq); | ||
744 | 676 | ||
745 | static inline void blkdev_dequeue_request(struct request *req) | 677 | static inline void blkdev_dequeue_request(struct request *req) |
746 | { | 678 | { |
@@ -893,6 +825,12 @@ static inline void exit_io_context(void) | |||
893 | { | 825 | { |
894 | } | 826 | } |
895 | 827 | ||
828 | static inline int put_io_context(struct io_context *ioc) | ||
829 | { | ||
830 | return 1; | ||
831 | } | ||
832 | |||
833 | |||
896 | #endif /* CONFIG_BLOCK */ | 834 | #endif /* CONFIG_BLOCK */ |
897 | 835 | ||
898 | #endif | 836 | #endif |