aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h136
1 files changed, 38 insertions, 98 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index bbf906a0b419..90392a9d7a9c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -34,85 +34,11 @@ struct sg_io_hdr;
34#define BLKDEV_MIN_RQ 4 34#define BLKDEV_MIN_RQ 4
35#define BLKDEV_MAX_RQ 128 /* Default maximum */ 35#define BLKDEV_MAX_RQ 128 /* Default maximum */
36 36
37/* 37int put_io_context(struct io_context *ioc);
38 * This is the per-process anticipatory I/O scheduler state.
39 */
40struct as_io_context {
41 spinlock_t lock;
42
43 void (*dtor)(struct as_io_context *aic); /* destructor */
44 void (*exit)(struct as_io_context *aic); /* called on task exit */
45
46 unsigned long state;
47 atomic_t nr_queued; /* queued reads & sync writes */
48 atomic_t nr_dispatched; /* number of requests gone to the drivers */
49
50 /* IO History tracking */
51 /* Thinktime */
52 unsigned long last_end_request;
53 unsigned long ttime_total;
54 unsigned long ttime_samples;
55 unsigned long ttime_mean;
56 /* Layout pattern */
57 unsigned int seek_samples;
58 sector_t last_request_pos;
59 u64 seek_total;
60 sector_t seek_mean;
61};
62
63struct cfq_queue;
64struct cfq_io_context {
65 struct rb_node rb_node;
66 void *key;
67
68 struct cfq_queue *cfqq[2];
69
70 struct io_context *ioc;
71
72 unsigned long last_end_request;
73 sector_t last_request_pos;
74
75 unsigned long ttime_total;
76 unsigned long ttime_samples;
77 unsigned long ttime_mean;
78
79 unsigned int seek_samples;
80 u64 seek_total;
81 sector_t seek_mean;
82
83 struct list_head queue_list;
84
85 void (*dtor)(struct io_context *); /* destructor */
86 void (*exit)(struct io_context *); /* called on task exit */
87};
88
89/*
90 * This is the per-process I/O subsystem state. It is refcounted and
91 * kmalloc'ed. Currently all fields are modified in process io context
92 * (apart from the atomic refcount), so require no locking.
93 */
94struct io_context {
95 atomic_t refcount;
96 struct task_struct *task;
97
98 unsigned int ioprio_changed;
99
100 /*
101 * For request batching
102 */
103 unsigned long last_waited; /* Time last woken after wait for request */
104 int nr_batch_requests; /* Number of requests left in the batch */
105
106 struct as_io_context *aic;
107 struct rb_root cic_root;
108 void *ioc_data;
109};
110
111void put_io_context(struct io_context *ioc);
112void exit_io_context(void); 38void exit_io_context(void);
113struct io_context *get_io_context(gfp_t gfp_flags, int node); 39struct io_context *get_io_context(gfp_t gfp_flags, int node);
40struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
114void copy_io_context(struct io_context **pdst, struct io_context **psrc); 41void copy_io_context(struct io_context **pdst, struct io_context **psrc);
115void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
116 42
117struct request; 43struct request;
118typedef void (rq_end_io_fn)(struct request *, int); 44typedef void (rq_end_io_fn)(struct request *, int);
@@ -143,8 +69,6 @@ enum rq_cmd_type_bits {
143 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver 69 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
144 * private REQ_LB opcodes to differentiate what type of request this is 70 * private REQ_LB opcodes to differentiate what type of request this is
145 */ 71 */
146 REQ_TYPE_ATA_CMD,
147 REQ_TYPE_ATA_TASK,
148 REQ_TYPE_ATA_TASKFILE, 72 REQ_TYPE_ATA_TASKFILE,
149 REQ_TYPE_ATA_PC, 73 REQ_TYPE_ATA_PC,
150}; 74};
@@ -341,7 +265,6 @@ enum blk_queue_state {
341struct blk_queue_tag { 265struct blk_queue_tag {
342 struct request **tag_index; /* map of busy tags */ 266 struct request **tag_index; /* map of busy tags */
343 unsigned long *tag_map; /* bit map of free/busy tags */ 267 unsigned long *tag_map; /* bit map of free/busy tags */
344 struct list_head busy_list; /* fifo list of busy tags */
345 int busy; /* current depth */ 268 int busy; /* current depth */
346 int max_depth; /* what we will send to device */ 269 int max_depth; /* what we will send to device */
347 int real_max_depth; /* what the array can hold */ 270 int real_max_depth; /* what the array can hold */
@@ -432,9 +355,12 @@ struct request_queue
432 unsigned int max_segment_size; 355 unsigned int max_segment_size;
433 356
434 unsigned long seg_boundary_mask; 357 unsigned long seg_boundary_mask;
358 void *dma_drain_buffer;
359 unsigned int dma_drain_size;
435 unsigned int dma_alignment; 360 unsigned int dma_alignment;
436 361
437 struct blk_queue_tag *queue_tags; 362 struct blk_queue_tag *queue_tags;
363 struct list_head tag_busy_list;
438 364
439 unsigned int nr_sorted; 365 unsigned int nr_sorted;
440 unsigned int in_flight; 366 unsigned int in_flight;
@@ -539,6 +465,8 @@ enum {
539#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 465#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
540#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 466#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
541#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) 467#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
468/* rq->queuelist of dequeued request must be list_empty() */
469#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
542 470
543#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 471#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
544 472
@@ -697,6 +625,7 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
697extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 625extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
698 struct request *, int, rq_end_io_fn *); 626 struct request *, int, rq_end_io_fn *);
699extern int blk_verify_command(unsigned char *, int); 627extern int blk_verify_command(unsigned char *, int);
628extern void blk_unplug(struct request_queue *q);
700 629
701static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 630static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
702{ 631{
@@ -717,29 +646,35 @@ static inline void blk_run_address_space(struct address_space *mapping)
717} 646}
718 647
719/* 648/*
720 * end_request() and friends. Must be called with the request queue spinlock 649 * blk_end_request() and friends.
721 * acquired. All functions called within end_request() _must_be_ atomic. 650 * __blk_end_request() and end_request() must be called with
651 * the request queue spinlock acquired.
722 * 652 *
723 * Several drivers define their own end_request and call 653 * Several drivers define their own end_request and call
724 * end_that_request_first() and end_that_request_last() 654 * blk_end_request() for parts of the original function.
725 * for parts of the original function. This prevents 655 * This prevents code duplication in drivers.
726 * code duplication in drivers.
727 */ 656 */
728extern int end_that_request_first(struct request *, int, int); 657extern int blk_end_request(struct request *rq, int error,
729extern int end_that_request_chunk(struct request *, int, int); 658 unsigned int nr_bytes);
730extern void end_that_request_last(struct request *, int); 659extern int __blk_end_request(struct request *rq, int error,
660 unsigned int nr_bytes);
661extern int blk_end_bidi_request(struct request *rq, int error,
662 unsigned int nr_bytes, unsigned int bidi_bytes);
731extern void end_request(struct request *, int); 663extern void end_request(struct request *, int);
732extern void end_queued_request(struct request *, int); 664extern void end_queued_request(struct request *, int);
733extern void end_dequeued_request(struct request *, int); 665extern void end_dequeued_request(struct request *, int);
666extern int blk_end_request_callback(struct request *rq, int error,
667 unsigned int nr_bytes,
668 int (drv_callback)(struct request *));
734extern void blk_complete_request(struct request *); 669extern void blk_complete_request(struct request *);
735 670
736/* 671/*
737 * end_that_request_first/chunk() takes an uptodate argument. we account 672 * blk_end_request() takes bytes instead of sectors as a complete size.
738 * any value <= as an io error. 0 means -EIO for compatability reasons, 673 * blk_rq_bytes() returns bytes left to complete in the entire request.
739 * any other < 0 value is the direct error type. An uptodate value of 674 * blk_rq_cur_bytes() returns bytes left to complete in the current segment.
740 * 1 indicates successful io completion
741 */ 675 */
742#define end_io_error(uptodate) (unlikely((uptodate) <= 0)) 676extern unsigned int blk_rq_bytes(struct request *rq);
677extern unsigned int blk_rq_cur_bytes(struct request *rq);
743 678
744static inline void blkdev_dequeue_request(struct request *req) 679static inline void blkdev_dequeue_request(struct request *req)
745{ 680{
@@ -761,10 +696,13 @@ extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
761extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 696extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
762extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); 697extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
763extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 698extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
699extern int blk_queue_dma_drain(struct request_queue *q, void *buf,
700 unsigned int size);
764extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 701extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
765extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 702extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
766extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 703extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
767extern void blk_queue_dma_alignment(struct request_queue *, int); 704extern void blk_queue_dma_alignment(struct request_queue *, int);
705extern void blk_queue_update_dma_alignment(struct request_queue *, int);
768extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 706extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
769extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 707extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
770extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 708extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
@@ -836,12 +774,7 @@ static inline int bdev_hardsect_size(struct block_device *bdev)
836 774
837static inline int queue_dma_alignment(struct request_queue *q) 775static inline int queue_dma_alignment(struct request_queue *q)
838{ 776{
839 int retval = 511; 777 return q ? q->dma_alignment : 511;
840
841 if (q && q->dma_alignment)
842 retval = q->dma_alignment;
843
844 return retval;
845} 778}
846 779
847/* assumes size > 256 */ 780/* assumes size > 256 */
@@ -894,6 +827,13 @@ static inline void exit_io_context(void)
894{ 827{
895} 828}
896 829
830struct io_context;
831static inline int put_io_context(struct io_context *ioc)
832{
833 return 1;
834}
835
836
897#endif /* CONFIG_BLOCK */ 837#endif /* CONFIG_BLOCK */
898 838
899#endif 839#endif