diff options
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 228 |
1 files changed, 101 insertions, 127 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6690e8bae7bb..2c54906f678f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -60,7 +60,6 @@ enum rq_cmd_type_bits { | |||
60 | REQ_TYPE_PM_RESUME, /* resume request */ | 60 | REQ_TYPE_PM_RESUME, /* resume request */ |
61 | REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ | 61 | REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ |
62 | REQ_TYPE_SPECIAL, /* driver defined type */ | 62 | REQ_TYPE_SPECIAL, /* driver defined type */ |
63 | REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ | ||
64 | /* | 63 | /* |
65 | * for ATA/ATAPI devices. this really doesn't belong here, ide should | 64 | * for ATA/ATAPI devices. this really doesn't belong here, ide should |
66 | * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver | 65 | * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver |
@@ -70,84 +69,6 @@ enum rq_cmd_type_bits { | |||
70 | REQ_TYPE_ATA_PC, | 69 | REQ_TYPE_ATA_PC, |
71 | }; | 70 | }; |
72 | 71 | ||
73 | /* | ||
74 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being | ||
75 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a | ||
76 | * SCSI cdb. | ||
77 | * | ||
78 | * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need, | ||
79 | * typically to differentiate REQ_TYPE_SPECIAL requests. | ||
80 | * | ||
81 | */ | ||
82 | enum { | ||
83 | REQ_LB_OP_EJECT = 0x40, /* eject request */ | ||
84 | REQ_LB_OP_FLUSH = 0x41, /* flush request */ | ||
85 | }; | ||
86 | |||
87 | /* | ||
88 | * request type modified bits. first four bits match BIO_RW* bits, important | ||
89 | */ | ||
90 | enum rq_flag_bits { | ||
91 | __REQ_RW, /* not set, read. set, write */ | ||
92 | __REQ_FAILFAST_DEV, /* no driver retries of device errors */ | ||
93 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ | ||
94 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ | ||
95 | /* above flags must match BIO_RW_* */ | ||
96 | __REQ_DISCARD, /* request to discard sectors */ | ||
97 | __REQ_SORTED, /* elevator knows about this request */ | ||
98 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ | ||
99 | __REQ_HARDBARRIER, /* may not be passed by drive either */ | ||
100 | __REQ_FUA, /* forced unit access */ | ||
101 | __REQ_NOMERGE, /* don't touch this for merging */ | ||
102 | __REQ_STARTED, /* drive already may have started this one */ | ||
103 | __REQ_DONTPREP, /* don't call prep for this one */ | ||
104 | __REQ_QUEUED, /* uses queueing */ | ||
105 | __REQ_ELVPRIV, /* elevator private data attached */ | ||
106 | __REQ_FAILED, /* set if the request failed */ | ||
107 | __REQ_QUIET, /* don't worry about errors */ | ||
108 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ | ||
109 | __REQ_ORDERED_COLOR, /* is before or after barrier */ | ||
110 | __REQ_RW_SYNC, /* request is sync (sync write or read) */ | ||
111 | __REQ_ALLOCED, /* request came from our alloc pool */ | ||
112 | __REQ_RW_META, /* metadata io request */ | ||
113 | __REQ_COPY_USER, /* contains copies of user pages */ | ||
114 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ | ||
115 | __REQ_NOIDLE, /* Don't anticipate more IO after this one */ | ||
116 | __REQ_IO_STAT, /* account I/O stat */ | ||
117 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ | ||
118 | __REQ_NR_BITS, /* stops here */ | ||
119 | }; | ||
120 | |||
121 | #define REQ_RW (1 << __REQ_RW) | ||
122 | #define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) | ||
123 | #define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) | ||
124 | #define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) | ||
125 | #define REQ_DISCARD (1 << __REQ_DISCARD) | ||
126 | #define REQ_SORTED (1 << __REQ_SORTED) | ||
127 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) | ||
128 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) | ||
129 | #define REQ_FUA (1 << __REQ_FUA) | ||
130 | #define REQ_NOMERGE (1 << __REQ_NOMERGE) | ||
131 | #define REQ_STARTED (1 << __REQ_STARTED) | ||
132 | #define REQ_DONTPREP (1 << __REQ_DONTPREP) | ||
133 | #define REQ_QUEUED (1 << __REQ_QUEUED) | ||
134 | #define REQ_ELVPRIV (1 << __REQ_ELVPRIV) | ||
135 | #define REQ_FAILED (1 << __REQ_FAILED) | ||
136 | #define REQ_QUIET (1 << __REQ_QUIET) | ||
137 | #define REQ_PREEMPT (1 << __REQ_PREEMPT) | ||
138 | #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) | ||
139 | #define REQ_RW_SYNC (1 << __REQ_RW_SYNC) | ||
140 | #define REQ_ALLOCED (1 << __REQ_ALLOCED) | ||
141 | #define REQ_RW_META (1 << __REQ_RW_META) | ||
142 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) | ||
143 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) | ||
144 | #define REQ_NOIDLE (1 << __REQ_NOIDLE) | ||
145 | #define REQ_IO_STAT (1 << __REQ_IO_STAT) | ||
146 | #define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) | ||
147 | |||
148 | #define REQ_FAILFAST_MASK (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | \ | ||
149 | REQ_FAILFAST_DRIVER) | ||
150 | |||
151 | #define BLK_MAX_CDB 16 | 72 | #define BLK_MAX_CDB 16 |
152 | 73 | ||
153 | /* | 74 | /* |
@@ -186,15 +107,19 @@ struct request { | |||
186 | }; | 107 | }; |
187 | 108 | ||
188 | /* | 109 | /* |
189 | * two pointers are available for the IO schedulers, if they need | 110 | * Three pointers are available for the IO schedulers, if they need |
190 | * more they have to dynamically allocate it. | 111 | * more they have to dynamically allocate it. |
191 | */ | 112 | */ |
192 | void *elevator_private; | 113 | void *elevator_private; |
193 | void *elevator_private2; | 114 | void *elevator_private2; |
115 | void *elevator_private3; | ||
194 | 116 | ||
195 | struct gendisk *rq_disk; | 117 | struct gendisk *rq_disk; |
196 | unsigned long start_time; | 118 | unsigned long start_time; |
197 | 119 | #ifdef CONFIG_BLK_CGROUP | |
120 | unsigned long long start_time_ns; | ||
121 | unsigned long long io_start_time_ns; /* when passed to hardware */ | ||
122 | #endif | ||
198 | /* Number of scatter-gather DMA addr+len pairs after | 123 | /* Number of scatter-gather DMA addr+len pairs after |
199 | * physical address coalescing is performed. | 124 | * physical address coalescing is performed. |
200 | */ | 125 | */ |
@@ -260,6 +185,7 @@ struct request_pm_state | |||
260 | typedef void (request_fn_proc) (struct request_queue *q); | 185 | typedef void (request_fn_proc) (struct request_queue *q); |
261 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); | 186 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); |
262 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 187 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
188 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); | ||
263 | typedef void (unplug_fn) (struct request_queue *); | 189 | typedef void (unplug_fn) (struct request_queue *); |
264 | 190 | ||
265 | struct bio_vec; | 191 | struct bio_vec; |
@@ -271,7 +197,6 @@ struct bvec_merge_data { | |||
271 | }; | 197 | }; |
272 | typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, | 198 | typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, |
273 | struct bio_vec *); | 199 | struct bio_vec *); |
274 | typedef void (prepare_flush_fn) (struct request_queue *, struct request *); | ||
275 | typedef void (softirq_done_fn)(struct request *); | 200 | typedef void (softirq_done_fn)(struct request *); |
276 | typedef int (dma_drain_needed_fn)(struct request *); | 201 | typedef int (dma_drain_needed_fn)(struct request *); |
277 | typedef int (lld_busy_fn) (struct request_queue *q); | 202 | typedef int (lld_busy_fn) (struct request_queue *q); |
@@ -342,9 +267,9 @@ struct request_queue | |||
342 | request_fn_proc *request_fn; | 267 | request_fn_proc *request_fn; |
343 | make_request_fn *make_request_fn; | 268 | make_request_fn *make_request_fn; |
344 | prep_rq_fn *prep_rq_fn; | 269 | prep_rq_fn *prep_rq_fn; |
270 | unprep_rq_fn *unprep_rq_fn; | ||
345 | unplug_fn *unplug_fn; | 271 | unplug_fn *unplug_fn; |
346 | merge_bvec_fn *merge_bvec_fn; | 272 | merge_bvec_fn *merge_bvec_fn; |
347 | prepare_flush_fn *prepare_flush_fn; | ||
348 | softirq_done_fn *softirq_done_fn; | 273 | softirq_done_fn *softirq_done_fn; |
349 | rq_timed_out_fn *rq_timed_out_fn; | 274 | rq_timed_out_fn *rq_timed_out_fn; |
350 | dma_drain_needed_fn *dma_drain_needed; | 275 | dma_drain_needed_fn *dma_drain_needed; |
@@ -463,11 +388,14 @@ struct request_queue | |||
463 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ | 388 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ |
464 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ | 389 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ |
465 | #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ | 390 | #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ |
391 | #define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */ | ||
392 | #define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ | ||
466 | 393 | ||
467 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 394 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
468 | (1 << QUEUE_FLAG_CLUSTER) | \ | 395 | (1 << QUEUE_FLAG_CLUSTER) | \ |
469 | (1 << QUEUE_FLAG_STACKABLE) | \ | 396 | (1 << QUEUE_FLAG_STACKABLE) | \ |
470 | (1 << QUEUE_FLAG_SAME_COMP)) | 397 | (1 << QUEUE_FLAG_SAME_COMP) | \ |
398 | (1 << QUEUE_FLAG_ADD_RANDOM)) | ||
471 | 399 | ||
472 | static inline int queue_is_locked(struct request_queue *q) | 400 | static inline int queue_is_locked(struct request_queue *q) |
473 | { | 401 | { |
@@ -592,38 +520,28 @@ enum { | |||
592 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) | 520 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) |
593 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) | 521 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) |
594 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) | 522 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) |
523 | #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) | ||
595 | #define blk_queue_flushing(q) ((q)->ordseq) | 524 | #define blk_queue_flushing(q) ((q)->ordseq) |
596 | #define blk_queue_stackable(q) \ | 525 | #define blk_queue_stackable(q) \ |
597 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) | 526 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) |
598 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) | 527 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) |
528 | #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ | ||
529 | test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) | ||
530 | |||
531 | #define blk_noretry_request(rq) \ | ||
532 | ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ | ||
533 | REQ_FAILFAST_DRIVER)) | ||
534 | |||
535 | #define blk_account_rq(rq) \ | ||
536 | (((rq)->cmd_flags & REQ_STARTED) && \ | ||
537 | ((rq)->cmd_type == REQ_TYPE_FS || \ | ||
538 | ((rq)->cmd_flags & REQ_DISCARD))) | ||
599 | 539 | ||
600 | #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) | ||
601 | #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) | ||
602 | #define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) | ||
603 | #define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) | ||
604 | |||
605 | #define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV) | ||
606 | #define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT) | ||
607 | #define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER) | ||
608 | #define blk_noretry_request(rq) (blk_failfast_dev(rq) || \ | ||
609 | blk_failfast_transport(rq) || \ | ||
610 | blk_failfast_driver(rq)) | ||
611 | #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) | ||
612 | #define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT) | ||
613 | #define blk_rq_quiet(rq) ((rq)->cmd_flags & REQ_QUIET) | ||
614 | |||
615 | #define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) | ||
616 | |||
617 | #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) | ||
618 | #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) | ||
619 | #define blk_pm_request(rq) \ | 540 | #define blk_pm_request(rq) \ |
620 | (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) | 541 | ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ |
542 | (rq)->cmd_type == REQ_TYPE_PM_RESUME) | ||
621 | 543 | ||
622 | #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) | 544 | #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) |
623 | #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) | ||
624 | #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) | ||
625 | #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) | ||
626 | #define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) | ||
627 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) | 545 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) |
628 | /* rq->queuelist of dequeued request must be list_empty() */ | 546 | /* rq->queuelist of dequeued request must be list_empty() */ |
629 | #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) | 547 | #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) |
@@ -637,7 +555,7 @@ enum { | |||
637 | */ | 555 | */ |
638 | static inline bool rw_is_sync(unsigned int rw_flags) | 556 | static inline bool rw_is_sync(unsigned int rw_flags) |
639 | { | 557 | { |
640 | return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC); | 558 | return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); |
641 | } | 559 | } |
642 | 560 | ||
643 | static inline bool rq_is_sync(struct request *rq) | 561 | static inline bool rq_is_sync(struct request *rq) |
@@ -645,9 +563,6 @@ static inline bool rq_is_sync(struct request *rq) | |||
645 | return rw_is_sync(rq->cmd_flags); | 563 | return rw_is_sync(rq->cmd_flags); |
646 | } | 564 | } |
647 | 565 | ||
648 | #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) | ||
649 | #define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE) | ||
650 | |||
651 | static inline int blk_queue_full(struct request_queue *q, int sync) | 566 | static inline int blk_queue_full(struct request_queue *q, int sync) |
652 | { | 567 | { |
653 | if (sync) | 568 | if (sync) |
@@ -680,7 +595,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync) | |||
680 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) | 595 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) |
681 | #define rq_mergeable(rq) \ | 596 | #define rq_mergeable(rq) \ |
682 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ | 597 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ |
683 | (blk_discard_rq(rq) || blk_fs_request((rq)))) | 598 | (((rq)->cmd_flags & REQ_DISCARD) || \ |
599 | (rq)->cmd_type == REQ_TYPE_FS)) | ||
684 | 600 | ||
685 | /* | 601 | /* |
686 | * q->prep_rq_fn return values | 602 | * q->prep_rq_fn return values |
@@ -705,7 +621,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn; | |||
705 | #define BLK_BOUNCE_HIGH -1ULL | 621 | #define BLK_BOUNCE_HIGH -1ULL |
706 | #endif | 622 | #endif |
707 | #define BLK_BOUNCE_ANY (-1ULL) | 623 | #define BLK_BOUNCE_ANY (-1ULL) |
708 | #define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD) | 624 | #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) |
709 | 625 | ||
710 | /* | 626 | /* |
711 | * default timeout for SG_IO if none specified | 627 | * default timeout for SG_IO if none specified |
@@ -777,6 +693,8 @@ extern struct request *blk_make_request(struct request_queue *, struct bio *, | |||
777 | gfp_t); | 693 | gfp_t); |
778 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); | 694 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); |
779 | extern void blk_requeue_request(struct request_queue *, struct request *); | 695 | extern void blk_requeue_request(struct request_queue *, struct request *); |
696 | extern void blk_add_request_payload(struct request *rq, struct page *page, | ||
697 | unsigned int len); | ||
780 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); | 698 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); |
781 | extern int blk_lld_busy(struct request_queue *q); | 699 | extern int blk_lld_busy(struct request_queue *q); |
782 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | 700 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, |
@@ -911,13 +829,19 @@ extern void blk_complete_request(struct request *); | |||
911 | extern void __blk_complete_request(struct request *); | 829 | extern void __blk_complete_request(struct request *); |
912 | extern void blk_abort_request(struct request *); | 830 | extern void blk_abort_request(struct request *); |
913 | extern void blk_abort_queue(struct request_queue *); | 831 | extern void blk_abort_queue(struct request_queue *); |
832 | extern void blk_unprep_request(struct request *); | ||
914 | 833 | ||
915 | /* | 834 | /* |
916 | * Access functions for manipulating queue properties | 835 | * Access functions for manipulating queue properties |
917 | */ | 836 | */ |
918 | extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, | 837 | extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, |
919 | spinlock_t *lock, int node_id); | 838 | spinlock_t *lock, int node_id); |
839 | extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *, | ||
840 | request_fn_proc *, | ||
841 | spinlock_t *, int node_id); | ||
920 | extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); | 842 | extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); |
843 | extern struct request_queue *blk_init_allocated_queue(struct request_queue *, | ||
844 | request_fn_proc *, spinlock_t *); | ||
921 | extern void blk_cleanup_queue(struct request_queue *); | 845 | extern void blk_cleanup_queue(struct request_queue *); |
922 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 846 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
923 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 847 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
@@ -950,6 +874,7 @@ extern int blk_queue_dma_drain(struct request_queue *q, | |||
950 | extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); | 874 | extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); |
951 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); | 875 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); |
952 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); | 876 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); |
877 | extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); | ||
953 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); | 878 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); |
954 | extern void blk_queue_dma_alignment(struct request_queue *, int); | 879 | extern void blk_queue_dma_alignment(struct request_queue *, int); |
955 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); | 880 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); |
@@ -957,7 +882,7 @@ extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | |||
957 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); | 882 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); |
958 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); | 883 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); |
959 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 884 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
960 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); | 885 | extern int blk_queue_ordered(struct request_queue *, unsigned); |
961 | extern bool blk_do_ordered(struct request_queue *, struct request **); | 886 | extern bool blk_do_ordered(struct request_queue *, struct request **); |
962 | extern unsigned blk_ordered_cur_seq(struct request_queue *); | 887 | extern unsigned blk_ordered_cur_seq(struct request_queue *); |
963 | extern unsigned blk_ordered_req_seq(struct request *); | 888 | extern unsigned blk_ordered_req_seq(struct request *); |
@@ -994,20 +919,27 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, | |||
994 | return NULL; | 919 | return NULL; |
995 | return bqt->tag_index[tag]; | 920 | return bqt->tag_index[tag]; |
996 | } | 921 | } |
997 | 922 | enum{ | |
998 | extern int blkdev_issue_flush(struct block_device *, sector_t *); | 923 | BLKDEV_WAIT, /* wait for completion */ |
999 | #define DISCARD_FL_WAIT 0x01 /* wait for completion */ | 924 | BLKDEV_BARRIER, /* issue request with barrier */ |
1000 | #define DISCARD_FL_BARRIER 0x02 /* issue DISCARD_BARRIER request */ | 925 | BLKDEV_SECURE, /* secure discard */ |
1001 | extern int blkdev_issue_discard(struct block_device *, sector_t sector, | 926 | }; |
1002 | sector_t nr_sects, gfp_t, int flags); | 927 | #define BLKDEV_IFL_WAIT (1 << BLKDEV_WAIT) |
1003 | 928 | #define BLKDEV_IFL_BARRIER (1 << BLKDEV_BARRIER) | |
929 | #define BLKDEV_IFL_SECURE (1 << BLKDEV_SECURE) | ||
930 | extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *, | ||
931 | unsigned long); | ||
932 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | ||
933 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); | ||
934 | extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | ||
935 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); | ||
1004 | static inline int sb_issue_discard(struct super_block *sb, | 936 | static inline int sb_issue_discard(struct super_block *sb, |
1005 | sector_t block, sector_t nr_blocks) | 937 | sector_t block, sector_t nr_blocks) |
1006 | { | 938 | { |
1007 | block <<= (sb->s_blocksize_bits - 9); | 939 | block <<= (sb->s_blocksize_bits - 9); |
1008 | nr_blocks <<= (sb->s_blocksize_bits - 9); | 940 | nr_blocks <<= (sb->s_blocksize_bits - 9); |
1009 | return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL, | 941 | return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_NOFS, |
1010 | DISCARD_FL_BARRIER); | 942 | BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); |
1011 | } | 943 | } |
1012 | 944 | ||
1013 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); | 945 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); |
@@ -1196,6 +1128,48 @@ static inline void put_dev_sector(Sector p) | |||
1196 | struct work_struct; | 1128 | struct work_struct; |
1197 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); | 1129 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); |
1198 | 1130 | ||
1131 | #ifdef CONFIG_BLK_CGROUP | ||
1132 | /* | ||
1133 | * This should not be using sched_clock(). A real patch is in progress | ||
1134 | * to fix this up, until that is in place we need to disable preemption | ||
1135 | * around sched_clock() in this function and set_io_start_time_ns(). | ||
1136 | */ | ||
1137 | static inline void set_start_time_ns(struct request *req) | ||
1138 | { | ||
1139 | preempt_disable(); | ||
1140 | req->start_time_ns = sched_clock(); | ||
1141 | preempt_enable(); | ||
1142 | } | ||
1143 | |||
1144 | static inline void set_io_start_time_ns(struct request *req) | ||
1145 | { | ||
1146 | preempt_disable(); | ||
1147 | req->io_start_time_ns = sched_clock(); | ||
1148 | preempt_enable(); | ||
1149 | } | ||
1150 | |||
1151 | static inline uint64_t rq_start_time_ns(struct request *req) | ||
1152 | { | ||
1153 | return req->start_time_ns; | ||
1154 | } | ||
1155 | |||
1156 | static inline uint64_t rq_io_start_time_ns(struct request *req) | ||
1157 | { | ||
1158 | return req->io_start_time_ns; | ||
1159 | } | ||
1160 | #else | ||
1161 | static inline void set_start_time_ns(struct request *req) {} | ||
1162 | static inline void set_io_start_time_ns(struct request *req) {} | ||
1163 | static inline uint64_t rq_start_time_ns(struct request *req) | ||
1164 | { | ||
1165 | return 0; | ||
1166 | } | ||
1167 | static inline uint64_t rq_io_start_time_ns(struct request *req) | ||
1168 | { | ||
1169 | return 0; | ||
1170 | } | ||
1171 | #endif | ||
1172 | |||
1199 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ | 1173 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ |
1200 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) | 1174 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) |
1201 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ | 1175 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ |
@@ -1277,16 +1251,16 @@ static inline int blk_integrity_rq(struct request *rq) | |||
1277 | struct block_device_operations { | 1251 | struct block_device_operations { |
1278 | int (*open) (struct block_device *, fmode_t); | 1252 | int (*open) (struct block_device *, fmode_t); |
1279 | int (*release) (struct gendisk *, fmode_t); | 1253 | int (*release) (struct gendisk *, fmode_t); |
1280 | int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | ||
1281 | int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 1254 | int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
1282 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 1255 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
1283 | int (*direct_access) (struct block_device *, sector_t, | 1256 | int (*direct_access) (struct block_device *, sector_t, |
1284 | void **, unsigned long *); | 1257 | void **, unsigned long *); |
1285 | int (*media_changed) (struct gendisk *); | 1258 | int (*media_changed) (struct gendisk *); |
1286 | unsigned long long (*set_capacity) (struct gendisk *, | 1259 | void (*unlock_native_capacity) (struct gendisk *); |
1287 | unsigned long long); | ||
1288 | int (*revalidate_disk) (struct gendisk *); | 1260 | int (*revalidate_disk) (struct gendisk *); |
1289 | int (*getgeo)(struct block_device *, struct hd_geometry *); | 1261 | int (*getgeo)(struct block_device *, struct hd_geometry *); |
1262 | /* this callback is with swap_lock and sometimes page table lock held */ | ||
1263 | void (*swap_slot_free_notify) (struct block_device *, unsigned long); | ||
1290 | struct module *owner; | 1264 | struct module *owner; |
1291 | }; | 1265 | }; |
1292 | 1266 | ||