aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h142
1 files changed, 27 insertions, 115 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 09a840264d6f..89c855c5655c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -60,7 +60,6 @@ enum rq_cmd_type_bits {
60 REQ_TYPE_PM_RESUME, /* resume request */ 60 REQ_TYPE_PM_RESUME, /* resume request */
61 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 61 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
62 REQ_TYPE_SPECIAL, /* driver defined type */ 62 REQ_TYPE_SPECIAL, /* driver defined type */
63 REQ_TYPE_LINUX_BLOCK, /* generic block layer message */
64 /* 63 /*
65 * for ATA/ATAPI devices. this really doesn't belong here, ide should 64 * for ATA/ATAPI devices. this really doesn't belong here, ide should
66 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver 65 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
@@ -70,84 +69,6 @@ enum rq_cmd_type_bits {
70 REQ_TYPE_ATA_PC, 69 REQ_TYPE_ATA_PC,
71}; 70};
72 71
73/*
74 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
75 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
76 * SCSI cdb.
77 *
78 * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need,
79 * typically to differentiate REQ_TYPE_SPECIAL requests.
80 *
81 */
82enum {
83 REQ_LB_OP_EJECT = 0x40, /* eject request */
84 REQ_LB_OP_FLUSH = 0x41, /* flush request */
85};
86
87/*
88 * request type modified bits. first four bits match BIO_RW* bits, important
89 */
90enum rq_flag_bits {
91 __REQ_RW, /* not set, read. set, write */
92 __REQ_FAILFAST_DEV, /* no driver retries of device errors */
93 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
94 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
95 /* above flags must match BIO_RW_* */
96 __REQ_DISCARD, /* request to discard sectors */
97 __REQ_SORTED, /* elevator knows about this request */
98 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
99 __REQ_HARDBARRIER, /* may not be passed by drive either */
100 __REQ_FUA, /* forced unit access */
101 __REQ_NOMERGE, /* don't touch this for merging */
102 __REQ_STARTED, /* drive already may have started this one */
103 __REQ_DONTPREP, /* don't call prep for this one */
104 __REQ_QUEUED, /* uses queueing */
105 __REQ_ELVPRIV, /* elevator private data attached */
106 __REQ_FAILED, /* set if the request failed */
107 __REQ_QUIET, /* don't worry about errors */
108 __REQ_PREEMPT, /* set for "ide_preempt" requests */
109 __REQ_ORDERED_COLOR, /* is before or after barrier */
110 __REQ_RW_SYNC, /* request is sync (sync write or read) */
111 __REQ_ALLOCED, /* request came from our alloc pool */
112 __REQ_RW_META, /* metadata io request */
113 __REQ_COPY_USER, /* contains copies of user pages */
114 __REQ_INTEGRITY, /* integrity metadata has been remapped */
115 __REQ_NOIDLE, /* Don't anticipate more IO after this one */
116 __REQ_IO_STAT, /* account I/O stat */
117 __REQ_MIXED_MERGE, /* merge of different types, fail separately */
118 __REQ_NR_BITS, /* stops here */
119};
120
121#define REQ_RW (1 << __REQ_RW)
122#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
123#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
124#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
125#define REQ_DISCARD (1 << __REQ_DISCARD)
126#define REQ_SORTED (1 << __REQ_SORTED)
127#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
128#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
129#define REQ_FUA (1 << __REQ_FUA)
130#define REQ_NOMERGE (1 << __REQ_NOMERGE)
131#define REQ_STARTED (1 << __REQ_STARTED)
132#define REQ_DONTPREP (1 << __REQ_DONTPREP)
133#define REQ_QUEUED (1 << __REQ_QUEUED)
134#define REQ_ELVPRIV (1 << __REQ_ELVPRIV)
135#define REQ_FAILED (1 << __REQ_FAILED)
136#define REQ_QUIET (1 << __REQ_QUIET)
137#define REQ_PREEMPT (1 << __REQ_PREEMPT)
138#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
139#define REQ_RW_SYNC (1 << __REQ_RW_SYNC)
140#define REQ_ALLOCED (1 << __REQ_ALLOCED)
141#define REQ_RW_META (1 << __REQ_RW_META)
142#define REQ_COPY_USER (1 << __REQ_COPY_USER)
143#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
144#define REQ_NOIDLE (1 << __REQ_NOIDLE)
145#define REQ_IO_STAT (1 << __REQ_IO_STAT)
146#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
147
148#define REQ_FAILFAST_MASK (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | \
149 REQ_FAILFAST_DRIVER)
150
151#define BLK_MAX_CDB 16 72#define BLK_MAX_CDB 16
152 73
153/* 74/*
@@ -264,6 +185,7 @@ struct request_pm_state
264typedef void (request_fn_proc) (struct request_queue *q); 185typedef void (request_fn_proc) (struct request_queue *q);
265typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 186typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
266typedef int (prep_rq_fn) (struct request_queue *, struct request *); 187typedef int (prep_rq_fn) (struct request_queue *, struct request *);
188typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
267typedef void (unplug_fn) (struct request_queue *); 189typedef void (unplug_fn) (struct request_queue *);
268 190
269struct bio_vec; 191struct bio_vec;
@@ -275,7 +197,6 @@ struct bvec_merge_data {
275}; 197};
276typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, 198typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
277 struct bio_vec *); 199 struct bio_vec *);
278typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
279typedef void (softirq_done_fn)(struct request *); 200typedef void (softirq_done_fn)(struct request *);
280typedef int (dma_drain_needed_fn)(struct request *); 201typedef int (dma_drain_needed_fn)(struct request *);
281typedef int (lld_busy_fn) (struct request_queue *q); 202typedef int (lld_busy_fn) (struct request_queue *q);
@@ -346,9 +267,9 @@ struct request_queue
346 request_fn_proc *request_fn; 267 request_fn_proc *request_fn;
347 make_request_fn *make_request_fn; 268 make_request_fn *make_request_fn;
348 prep_rq_fn *prep_rq_fn; 269 prep_rq_fn *prep_rq_fn;
270 unprep_rq_fn *unprep_rq_fn;
349 unplug_fn *unplug_fn; 271 unplug_fn *unplug_fn;
350 merge_bvec_fn *merge_bvec_fn; 272 merge_bvec_fn *merge_bvec_fn;
351 prepare_flush_fn *prepare_flush_fn;
352 softirq_done_fn *softirq_done_fn; 273 softirq_done_fn *softirq_done_fn;
353 rq_timed_out_fn *rq_timed_out_fn; 274 rq_timed_out_fn *rq_timed_out_fn;
354 dma_drain_needed_fn *dma_drain_needed; 275 dma_drain_needed_fn *dma_drain_needed;
@@ -467,11 +388,13 @@ struct request_queue
467#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 388#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
468#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ 389#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
469#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ 390#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */
391#define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */
470 392
471#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 393#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
472 (1 << QUEUE_FLAG_CLUSTER) | \ 394 (1 << QUEUE_FLAG_CLUSTER) | \
473 (1 << QUEUE_FLAG_STACKABLE) | \ 395 (1 << QUEUE_FLAG_STACKABLE) | \
474 (1 << QUEUE_FLAG_SAME_COMP)) 396 (1 << QUEUE_FLAG_SAME_COMP) | \
397 (1 << QUEUE_FLAG_ADD_RANDOM))
475 398
476static inline int queue_is_locked(struct request_queue *q) 399static inline int queue_is_locked(struct request_queue *q)
477{ 400{
@@ -596,38 +519,26 @@ enum {
596 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 519 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
597#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 520#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
598#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 521#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
522#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
599#define blk_queue_flushing(q) ((q)->ordseq) 523#define blk_queue_flushing(q) ((q)->ordseq)
600#define blk_queue_stackable(q) \ 524#define blk_queue_stackable(q) \
601 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 525 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
602#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 526#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
603 527
604#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 528#define blk_noretry_request(rq) \
605#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) 529 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
606#define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) 530 REQ_FAILFAST_DRIVER))
607#define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) 531
608 532#define blk_account_rq(rq) \
609#define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV) 533 (((rq)->cmd_flags & REQ_STARTED) && \
610#define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT) 534 ((rq)->cmd_type == REQ_TYPE_FS || \
611#define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER) 535 ((rq)->cmd_flags & REQ_DISCARD)))
612#define blk_noretry_request(rq) (blk_failfast_dev(rq) || \ 536
613 blk_failfast_transport(rq) || \
614 blk_failfast_driver(rq))
615#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
616#define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT)
617#define blk_rq_quiet(rq) ((rq)->cmd_flags & REQ_QUIET)
618
619#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
620
621#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
622#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME)
623#define blk_pm_request(rq) \ 537#define blk_pm_request(rq) \
624 (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) 538 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
539 (rq)->cmd_type == REQ_TYPE_PM_RESUME)
625 540
626#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 541#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
627#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED)
628#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER)
629#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
630#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD)
631#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 542#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
632/* rq->queuelist of dequeued request must be list_empty() */ 543/* rq->queuelist of dequeued request must be list_empty() */
633#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 544#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
@@ -641,7 +552,7 @@ enum {
641 */ 552 */
642static inline bool rw_is_sync(unsigned int rw_flags) 553static inline bool rw_is_sync(unsigned int rw_flags)
643{ 554{
644 return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC); 555 return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC);
645} 556}
646 557
647static inline bool rq_is_sync(struct request *rq) 558static inline bool rq_is_sync(struct request *rq)
@@ -649,9 +560,6 @@ static inline bool rq_is_sync(struct request *rq)
649 return rw_is_sync(rq->cmd_flags); 560 return rw_is_sync(rq->cmd_flags);
650} 561}
651 562
652#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META)
653#define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE)
654
655static inline int blk_queue_full(struct request_queue *q, int sync) 563static inline int blk_queue_full(struct request_queue *q, int sync)
656{ 564{
657 if (sync) 565 if (sync)
@@ -684,7 +592,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync)
684 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 592 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
685#define rq_mergeable(rq) \ 593#define rq_mergeable(rq) \
686 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 594 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
687 (blk_discard_rq(rq) || blk_fs_request((rq)))) 595 (((rq)->cmd_flags & REQ_DISCARD) || \
596 (rq)->cmd_type == REQ_TYPE_FS))
688 597
689/* 598/*
690 * q->prep_rq_fn return values 599 * q->prep_rq_fn return values
@@ -709,7 +618,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
709#define BLK_BOUNCE_HIGH -1ULL 618#define BLK_BOUNCE_HIGH -1ULL
710#endif 619#endif
711#define BLK_BOUNCE_ANY (-1ULL) 620#define BLK_BOUNCE_ANY (-1ULL)
712#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD) 621#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
713 622
714/* 623/*
715 * default timeout for SG_IO if none specified 624 * default timeout for SG_IO if none specified
@@ -781,6 +690,8 @@ extern struct request *blk_make_request(struct request_queue *, struct bio *,
781 gfp_t); 690 gfp_t);
782extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 691extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
783extern void blk_requeue_request(struct request_queue *, struct request *); 692extern void blk_requeue_request(struct request_queue *, struct request *);
693extern void blk_add_request_payload(struct request *rq, struct page *page,
694 unsigned int len);
784extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 695extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
785extern int blk_lld_busy(struct request_queue *q); 696extern int blk_lld_busy(struct request_queue *q);
786extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 697extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
@@ -915,6 +826,7 @@ extern void blk_complete_request(struct request *);
915extern void __blk_complete_request(struct request *); 826extern void __blk_complete_request(struct request *);
916extern void blk_abort_request(struct request *); 827extern void blk_abort_request(struct request *);
917extern void blk_abort_queue(struct request_queue *); 828extern void blk_abort_queue(struct request_queue *);
829extern void blk_unprep_request(struct request *);
918 830
919/* 831/*
920 * Access functions for manipulating queue properties 832 * Access functions for manipulating queue properties
@@ -959,6 +871,7 @@ extern int blk_queue_dma_drain(struct request_queue *q,
959extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 871extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
960extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 872extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
961extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 873extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
874extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
962extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 875extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
963extern void blk_queue_dma_alignment(struct request_queue *, int); 876extern void blk_queue_dma_alignment(struct request_queue *, int);
964extern void blk_queue_update_dma_alignment(struct request_queue *, int); 877extern void blk_queue_update_dma_alignment(struct request_queue *, int);
@@ -966,7 +879,7 @@ extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
966extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 879extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
967extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 880extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
968extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 881extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
969extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 882extern int blk_queue_ordered(struct request_queue *, unsigned);
970extern bool blk_do_ordered(struct request_queue *, struct request **); 883extern bool blk_do_ordered(struct request_queue *, struct request **);
971extern unsigned blk_ordered_cur_seq(struct request_queue *); 884extern unsigned blk_ordered_cur_seq(struct request_queue *);
972extern unsigned blk_ordered_req_seq(struct request *); 885extern unsigned blk_ordered_req_seq(struct request *);
@@ -1020,7 +933,7 @@ static inline int sb_issue_discard(struct super_block *sb,
1020{ 933{
1021 block <<= (sb->s_blocksize_bits - 9); 934 block <<= (sb->s_blocksize_bits - 9);
1022 nr_blocks <<= (sb->s_blocksize_bits - 9); 935 nr_blocks <<= (sb->s_blocksize_bits - 9);
1023 return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL, 936 return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_NOFS,
1024 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); 937 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
1025} 938}
1026 939
@@ -1333,7 +1246,6 @@ static inline int blk_integrity_rq(struct request *rq)
1333struct block_device_operations { 1246struct block_device_operations {
1334 int (*open) (struct block_device *, fmode_t); 1247 int (*open) (struct block_device *, fmode_t);
1335 int (*release) (struct gendisk *, fmode_t); 1248 int (*release) (struct gendisk *, fmode_t);
1336 int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1337 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1249 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1338 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1250 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1339 int (*direct_access) (struct block_device *, sector_t, 1251 int (*direct_access) (struct block_device *, sector_t,