diff options
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 151 |
1 files changed, 94 insertions, 57 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 53ea933cf60b..a92d9e4ea96e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -16,7 +16,9 @@ | |||
16 | #include <linux/bio.h> | 16 | #include <linux/bio.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/stringify.h> | 18 | #include <linux/stringify.h> |
19 | #include <linux/gfp.h> | ||
19 | #include <linux/bsg.h> | 20 | #include <linux/bsg.h> |
21 | #include <linux/smp.h> | ||
20 | 22 | ||
21 | #include <asm/scatterlist.h> | 23 | #include <asm/scatterlist.h> |
22 | 24 | ||
@@ -54,7 +56,6 @@ enum rq_cmd_type_bits { | |||
54 | REQ_TYPE_PM_SUSPEND, /* suspend request */ | 56 | REQ_TYPE_PM_SUSPEND, /* suspend request */ |
55 | REQ_TYPE_PM_RESUME, /* resume request */ | 57 | REQ_TYPE_PM_RESUME, /* resume request */ |
56 | REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ | 58 | REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ |
57 | REQ_TYPE_FLUSH, /* flush request */ | ||
58 | REQ_TYPE_SPECIAL, /* driver defined type */ | 59 | REQ_TYPE_SPECIAL, /* driver defined type */ |
59 | REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ | 60 | REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ |
60 | /* | 61 | /* |
@@ -76,19 +77,18 @@ enum rq_cmd_type_bits { | |||
76 | * | 77 | * |
77 | */ | 78 | */ |
78 | enum { | 79 | enum { |
79 | /* | ||
80 | * just examples for now | ||
81 | */ | ||
82 | REQ_LB_OP_EJECT = 0x40, /* eject request */ | 80 | REQ_LB_OP_EJECT = 0x40, /* eject request */ |
83 | REQ_LB_OP_FLUSH = 0x41, /* flush device */ | 81 | REQ_LB_OP_FLUSH = 0x41, /* flush request */ |
82 | REQ_LB_OP_DISCARD = 0x42, /* discard sectors */ | ||
84 | }; | 83 | }; |
85 | 84 | ||
86 | /* | 85 | /* |
87 | * request type modified bits. first three bits match BIO_RW* bits, important | 86 | * request type modified bits. first two bits match BIO_RW* bits, important |
88 | */ | 87 | */ |
89 | enum rq_flag_bits { | 88 | enum rq_flag_bits { |
90 | __REQ_RW, /* not set, read. set, write */ | 89 | __REQ_RW, /* not set, read. set, write */ |
91 | __REQ_FAILFAST, /* no low level driver retries */ | 90 | __REQ_FAILFAST, /* no low level driver retries */ |
91 | __REQ_DISCARD, /* request to discard sectors */ | ||
92 | __REQ_SORTED, /* elevator knows about this request */ | 92 | __REQ_SORTED, /* elevator knows about this request */ |
93 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ | 93 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ |
94 | __REQ_HARDBARRIER, /* may not be passed by drive either */ | 94 | __REQ_HARDBARRIER, /* may not be passed by drive either */ |
@@ -111,6 +111,7 @@ enum rq_flag_bits { | |||
111 | }; | 111 | }; |
112 | 112 | ||
113 | #define REQ_RW (1 << __REQ_RW) | 113 | #define REQ_RW (1 << __REQ_RW) |
114 | #define REQ_DISCARD (1 << __REQ_DISCARD) | ||
114 | #define REQ_FAILFAST (1 << __REQ_FAILFAST) | 115 | #define REQ_FAILFAST (1 << __REQ_FAILFAST) |
115 | #define REQ_SORTED (1 << __REQ_SORTED) | 116 | #define REQ_SORTED (1 << __REQ_SORTED) |
116 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) | 117 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) |
@@ -140,12 +141,14 @@ enum rq_flag_bits { | |||
140 | */ | 141 | */ |
141 | struct request { | 142 | struct request { |
142 | struct list_head queuelist; | 143 | struct list_head queuelist; |
143 | struct list_head donelist; | 144 | struct call_single_data csd; |
145 | int cpu; | ||
144 | 146 | ||
145 | struct request_queue *q; | 147 | struct request_queue *q; |
146 | 148 | ||
147 | unsigned int cmd_flags; | 149 | unsigned int cmd_flags; |
148 | enum rq_cmd_type_bits cmd_type; | 150 | enum rq_cmd_type_bits cmd_type; |
151 | unsigned long atomic_flags; | ||
149 | 152 | ||
150 | /* Maintain bio traversal state for part by part I/O submission. | 153 | /* Maintain bio traversal state for part by part I/O submission. |
151 | * hard_* are block layer internals, no driver should touch them! | 154 | * hard_* are block layer internals, no driver should touch them! |
@@ -190,13 +193,6 @@ struct request { | |||
190 | */ | 193 | */ |
191 | unsigned short nr_phys_segments; | 194 | unsigned short nr_phys_segments; |
192 | 195 | ||
193 | /* Number of scatter-gather addr+len pairs after | ||
194 | * physical and DMA remapping hardware coalescing is performed. | ||
195 | * This is the number of scatter-gather entries the driver | ||
196 | * will actually have to deal with after DMA mapping is done. | ||
197 | */ | ||
198 | unsigned short nr_hw_segments; | ||
199 | |||
200 | unsigned short ioprio; | 196 | unsigned short ioprio; |
201 | 197 | ||
202 | void *special; | 198 | void *special; |
@@ -220,6 +216,8 @@ struct request { | |||
220 | void *data; | 216 | void *data; |
221 | void *sense; | 217 | void *sense; |
222 | 218 | ||
219 | unsigned long deadline; | ||
220 | struct list_head timeout_list; | ||
223 | unsigned int timeout; | 221 | unsigned int timeout; |
224 | int retries; | 222 | int retries; |
225 | 223 | ||
@@ -233,6 +231,11 @@ struct request { | |||
233 | struct request *next_rq; | 231 | struct request *next_rq; |
234 | }; | 232 | }; |
235 | 233 | ||
234 | static inline unsigned short req_get_ioprio(struct request *req) | ||
235 | { | ||
236 | return req->ioprio; | ||
237 | } | ||
238 | |||
236 | /* | 239 | /* |
237 | * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME | 240 | * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME |
238 | * requests. Some step values could eventually be made generic. | 241 | * requests. Some step values could eventually be made generic. |
@@ -252,6 +255,7 @@ typedef void (request_fn_proc) (struct request_queue *q); | |||
252 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); | 255 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); |
253 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 256 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
254 | typedef void (unplug_fn) (struct request_queue *); | 257 | typedef void (unplug_fn) (struct request_queue *); |
258 | typedef int (prepare_discard_fn) (struct request_queue *, struct request *); | ||
255 | 259 | ||
256 | struct bio_vec; | 260 | struct bio_vec; |
257 | struct bvec_merge_data { | 261 | struct bvec_merge_data { |
@@ -265,6 +269,15 @@ typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, | |||
265 | typedef void (prepare_flush_fn) (struct request_queue *, struct request *); | 269 | typedef void (prepare_flush_fn) (struct request_queue *, struct request *); |
266 | typedef void (softirq_done_fn)(struct request *); | 270 | typedef void (softirq_done_fn)(struct request *); |
267 | typedef int (dma_drain_needed_fn)(struct request *); | 271 | typedef int (dma_drain_needed_fn)(struct request *); |
272 | typedef int (lld_busy_fn) (struct request_queue *q); | ||
273 | |||
274 | enum blk_eh_timer_return { | ||
275 | BLK_EH_NOT_HANDLED, | ||
276 | BLK_EH_HANDLED, | ||
277 | BLK_EH_RESET_TIMER, | ||
278 | }; | ||
279 | |||
280 | typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); | ||
268 | 281 | ||
269 | enum blk_queue_state { | 282 | enum blk_queue_state { |
270 | Queue_down, | 283 | Queue_down, |
@@ -307,10 +320,13 @@ struct request_queue | |||
307 | make_request_fn *make_request_fn; | 320 | make_request_fn *make_request_fn; |
308 | prep_rq_fn *prep_rq_fn; | 321 | prep_rq_fn *prep_rq_fn; |
309 | unplug_fn *unplug_fn; | 322 | unplug_fn *unplug_fn; |
323 | prepare_discard_fn *prepare_discard_fn; | ||
310 | merge_bvec_fn *merge_bvec_fn; | 324 | merge_bvec_fn *merge_bvec_fn; |
311 | prepare_flush_fn *prepare_flush_fn; | 325 | prepare_flush_fn *prepare_flush_fn; |
312 | softirq_done_fn *softirq_done_fn; | 326 | softirq_done_fn *softirq_done_fn; |
327 | rq_timed_out_fn *rq_timed_out_fn; | ||
313 | dma_drain_needed_fn *dma_drain_needed; | 328 | dma_drain_needed_fn *dma_drain_needed; |
329 | lld_busy_fn *lld_busy_fn; | ||
314 | 330 | ||
315 | /* | 331 | /* |
316 | * Dispatch queue sorting | 332 | * Dispatch queue sorting |
@@ -385,6 +401,10 @@ struct request_queue | |||
385 | unsigned int nr_sorted; | 401 | unsigned int nr_sorted; |
386 | unsigned int in_flight; | 402 | unsigned int in_flight; |
387 | 403 | ||
404 | unsigned int rq_timeout; | ||
405 | struct timer_list timeout; | ||
406 | struct list_head timeout_list; | ||
407 | |||
388 | /* | 408 | /* |
389 | * sg stuff | 409 | * sg stuff |
390 | */ | 410 | */ |
@@ -421,6 +441,10 @@ struct request_queue | |||
421 | #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ | 441 | #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ |
422 | #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ | 442 | #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ |
423 | #define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ | 443 | #define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ |
444 | #define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */ | ||
445 | #define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ | ||
446 | #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ | ||
447 | #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ | ||
424 | 448 | ||
425 | static inline int queue_is_locked(struct request_queue *q) | 449 | static inline int queue_is_locked(struct request_queue *q) |
426 | { | 450 | { |
@@ -526,7 +550,10 @@ enum { | |||
526 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 550 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
527 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 551 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
528 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | 552 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
553 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) | ||
529 | #define blk_queue_flushing(q) ((q)->ordseq) | 554 | #define blk_queue_flushing(q) ((q)->ordseq) |
555 | #define blk_queue_stackable(q) \ | ||
556 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) | ||
530 | 557 | ||
531 | #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) | 558 | #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) |
532 | #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) | 559 | #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) |
@@ -536,16 +563,18 @@ enum { | |||
536 | #define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST) | 563 | #define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST) |
537 | #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) | 564 | #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) |
538 | 565 | ||
539 | #define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq)) | 566 | #define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) |
540 | 567 | ||
541 | #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) | 568 | #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) |
542 | #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) | 569 | #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) |
543 | #define blk_pm_request(rq) \ | 570 | #define blk_pm_request(rq) \ |
544 | (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) | 571 | (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) |
545 | 572 | ||
573 | #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) | ||
546 | #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) | 574 | #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) |
547 | #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) | 575 | #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) |
548 | #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) | 576 | #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) |
577 | #define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) | ||
549 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) | 578 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) |
550 | #define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) | 579 | #define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) |
551 | /* rq->queuelist of dequeued request must be list_empty() */ | 580 | /* rq->queuelist of dequeued request must be list_empty() */ |
@@ -592,7 +621,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw) | |||
592 | #define RQ_NOMERGE_FLAGS \ | 621 | #define RQ_NOMERGE_FLAGS \ |
593 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) | 622 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) |
594 | #define rq_mergeable(rq) \ | 623 | #define rq_mergeable(rq) \ |
595 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) | 624 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ |
625 | (blk_discard_rq(rq) || blk_fs_request((rq)))) | ||
596 | 626 | ||
597 | /* | 627 | /* |
598 | * q->prep_rq_fn return values | 628 | * q->prep_rq_fn return values |
@@ -637,6 +667,12 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) | |||
637 | } | 667 | } |
638 | #endif /* CONFIG_MMU */ | 668 | #endif /* CONFIG_MMU */ |
639 | 669 | ||
670 | struct rq_map_data { | ||
671 | struct page **pages; | ||
672 | int page_order; | ||
673 | int nr_entries; | ||
674 | }; | ||
675 | |||
640 | struct req_iterator { | 676 | struct req_iterator { |
641 | int i; | 677 | int i; |
642 | struct bio *bio; | 678 | struct bio *bio; |
@@ -664,6 +700,10 @@ extern void __blk_put_request(struct request_queue *, struct request *); | |||
664 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | 700 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); |
665 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); | 701 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); |
666 | extern void blk_requeue_request(struct request_queue *, struct request *); | 702 | extern void blk_requeue_request(struct request_queue *, struct request *); |
703 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); | ||
704 | extern int blk_lld_busy(struct request_queue *q); | ||
705 | extern int blk_insert_cloned_request(struct request_queue *q, | ||
706 | struct request *rq); | ||
667 | extern void blk_plug_device(struct request_queue *); | 707 | extern void blk_plug_device(struct request_queue *); |
668 | extern void blk_plug_device_unlocked(struct request_queue *); | 708 | extern void blk_plug_device_unlocked(struct request_queue *); |
669 | extern int blk_remove_plug(struct request_queue *); | 709 | extern int blk_remove_plug(struct request_queue *); |
@@ -705,11 +745,14 @@ extern void __blk_stop_queue(struct request_queue *q); | |||
705 | extern void __blk_run_queue(struct request_queue *); | 745 | extern void __blk_run_queue(struct request_queue *); |
706 | extern void blk_run_queue(struct request_queue *); | 746 | extern void blk_run_queue(struct request_queue *); |
707 | extern void blk_start_queueing(struct request_queue *); | 747 | extern void blk_start_queueing(struct request_queue *); |
708 | extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); | 748 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
749 | struct rq_map_data *, void __user *, unsigned long, | ||
750 | gfp_t); | ||
709 | extern int blk_rq_unmap_user(struct bio *); | 751 | extern int blk_rq_unmap_user(struct bio *); |
710 | extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); | 752 | extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); |
711 | extern int blk_rq_map_user_iov(struct request_queue *, struct request *, | 753 | extern int blk_rq_map_user_iov(struct request_queue *, struct request *, |
712 | struct sg_iovec *, int, unsigned int); | 754 | struct rq_map_data *, struct sg_iovec *, int, |
755 | unsigned int, gfp_t); | ||
713 | extern int blk_execute_rq(struct request_queue *, struct gendisk *, | 756 | extern int blk_execute_rq(struct request_queue *, struct gendisk *, |
714 | struct request *, int); | 757 | struct request *, int); |
715 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, | 758 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, |
@@ -750,12 +793,15 @@ extern int __blk_end_request(struct request *rq, int error, | |||
750 | extern int blk_end_bidi_request(struct request *rq, int error, | 793 | extern int blk_end_bidi_request(struct request *rq, int error, |
751 | unsigned int nr_bytes, unsigned int bidi_bytes); | 794 | unsigned int nr_bytes, unsigned int bidi_bytes); |
752 | extern void end_request(struct request *, int); | 795 | extern void end_request(struct request *, int); |
753 | extern void end_queued_request(struct request *, int); | ||
754 | extern void end_dequeued_request(struct request *, int); | ||
755 | extern int blk_end_request_callback(struct request *rq, int error, | 796 | extern int blk_end_request_callback(struct request *rq, int error, |
756 | unsigned int nr_bytes, | 797 | unsigned int nr_bytes, |
757 | int (drv_callback)(struct request *)); | 798 | int (drv_callback)(struct request *)); |
758 | extern void blk_complete_request(struct request *); | 799 | extern void blk_complete_request(struct request *); |
800 | extern void __blk_complete_request(struct request *); | ||
801 | extern void blk_abort_request(struct request *); | ||
802 | extern void blk_abort_queue(struct request_queue *); | ||
803 | extern void blk_update_request(struct request *rq, int error, | ||
804 | unsigned int nr_bytes); | ||
759 | 805 | ||
760 | /* | 806 | /* |
761 | * blk_end_request() takes bytes instead of sectors as a complete size. | 807 | * blk_end_request() takes bytes instead of sectors as a complete size. |
@@ -790,12 +836,16 @@ extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); | |||
790 | extern int blk_queue_dma_drain(struct request_queue *q, | 836 | extern int blk_queue_dma_drain(struct request_queue *q, |
791 | dma_drain_needed_fn *dma_drain_needed, | 837 | dma_drain_needed_fn *dma_drain_needed, |
792 | void *buf, unsigned int size); | 838 | void *buf, unsigned int size); |
839 | extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); | ||
793 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); | 840 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); |
794 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); | 841 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); |
795 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); | 842 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); |
796 | extern void blk_queue_dma_alignment(struct request_queue *, int); | 843 | extern void blk_queue_dma_alignment(struct request_queue *, int); |
797 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); | 844 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); |
798 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | 845 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
846 | extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *); | ||
847 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); | ||
848 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); | ||
799 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 849 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
800 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); | 850 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); |
801 | extern int blk_do_ordered(struct request_queue *, struct request **); | 851 | extern int blk_do_ordered(struct request_queue *, struct request **); |
@@ -837,6 +887,16 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, | |||
837 | } | 887 | } |
838 | 888 | ||
839 | extern int blkdev_issue_flush(struct block_device *, sector_t *); | 889 | extern int blkdev_issue_flush(struct block_device *, sector_t *); |
890 | extern int blkdev_issue_discard(struct block_device *, | ||
891 | sector_t sector, sector_t nr_sects, gfp_t); | ||
892 | |||
893 | static inline int sb_issue_discard(struct super_block *sb, | ||
894 | sector_t block, sector_t nr_blocks) | ||
895 | { | ||
896 | block <<= (sb->s_blocksize_bits - 9); | ||
897 | nr_blocks <<= (sb->s_blocksize_bits - 9); | ||
898 | return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL); | ||
899 | } | ||
840 | 900 | ||
841 | /* | 901 | /* |
842 | * command filter functions | 902 | * command filter functions |
@@ -874,6 +934,13 @@ static inline int queue_dma_alignment(struct request_queue *q) | |||
874 | return q ? q->dma_alignment : 511; | 934 | return q ? q->dma_alignment : 511; |
875 | } | 935 | } |
876 | 936 | ||
937 | static inline int blk_rq_aligned(struct request_queue *q, void *addr, | ||
938 | unsigned int len) | ||
939 | { | ||
940 | unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; | ||
941 | return !((unsigned long)addr & alignment) && !(len & alignment); | ||
942 | } | ||
943 | |||
877 | /* assumes size > 256 */ | 944 | /* assumes size > 256 */ |
878 | static inline unsigned int blksize_bits(unsigned int size) | 945 | static inline unsigned int blksize_bits(unsigned int size) |
879 | { | 946 | { |
@@ -900,7 +967,7 @@ static inline void put_dev_sector(Sector p) | |||
900 | } | 967 | } |
901 | 968 | ||
902 | struct work_struct; | 969 | struct work_struct; |
903 | int kblockd_schedule_work(struct work_struct *work); | 970 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); |
904 | void kblockd_flush_work(struct work_struct *work); | 971 | void kblockd_flush_work(struct work_struct *work); |
905 | 972 | ||
906 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ | 973 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ |
@@ -945,49 +1012,19 @@ struct blk_integrity { | |||
945 | 1012 | ||
946 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); | 1013 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); |
947 | extern void blk_integrity_unregister(struct gendisk *); | 1014 | extern void blk_integrity_unregister(struct gendisk *); |
948 | extern int blk_integrity_compare(struct block_device *, struct block_device *); | 1015 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); |
949 | extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); | 1016 | extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); |
950 | extern int blk_rq_count_integrity_sg(struct request *); | 1017 | extern int blk_rq_count_integrity_sg(struct request *); |
951 | 1018 | ||
952 | static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi) | 1019 | static inline |
953 | { | 1020 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) |
954 | if (bi) | ||
955 | return bi->tuple_size; | ||
956 | |||
957 | return 0; | ||
958 | } | ||
959 | |||
960 | static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev) | ||
961 | { | 1021 | { |
962 | return bdev->bd_disk->integrity; | 1022 | return bdev->bd_disk->integrity; |
963 | } | 1023 | } |
964 | 1024 | ||
965 | static inline unsigned int bdev_get_tag_size(struct block_device *bdev) | 1025 | static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) |
966 | { | 1026 | { |
967 | struct blk_integrity *bi = bdev_get_integrity(bdev); | 1027 | return disk->integrity; |
968 | |||
969 | if (bi) | ||
970 | return bi->tag_size; | ||
971 | |||
972 | return 0; | ||
973 | } | ||
974 | |||
975 | static inline int bdev_integrity_enabled(struct block_device *bdev, int rw) | ||
976 | { | ||
977 | struct blk_integrity *bi = bdev_get_integrity(bdev); | ||
978 | |||
979 | if (bi == NULL) | ||
980 | return 0; | ||
981 | |||
982 | if (rw == READ && bi->verify_fn != NULL && | ||
983 | (bi->flags & INTEGRITY_FLAG_READ)) | ||
984 | return 1; | ||
985 | |||
986 | if (rw == WRITE && bi->generate_fn != NULL && | ||
987 | (bi->flags & INTEGRITY_FLAG_WRITE)) | ||
988 | return 1; | ||
989 | |||
990 | return 0; | ||
991 | } | 1028 | } |
992 | 1029 | ||
993 | static inline int blk_integrity_rq(struct request *rq) | 1030 | static inline int blk_integrity_rq(struct request *rq) |
@@ -1004,7 +1041,7 @@ static inline int blk_integrity_rq(struct request *rq) | |||
1004 | #define blk_rq_count_integrity_sg(a) (0) | 1041 | #define blk_rq_count_integrity_sg(a) (0) |
1005 | #define blk_rq_map_integrity_sg(a, b) (0) | 1042 | #define blk_rq_map_integrity_sg(a, b) (0) |
1006 | #define bdev_get_integrity(a) (0) | 1043 | #define bdev_get_integrity(a) (0) |
1007 | #define bdev_get_tag_size(a) (0) | 1044 | #define blk_get_integrity(a) (0) |
1008 | #define blk_integrity_compare(a, b) (0) | 1045 | #define blk_integrity_compare(a, b) (0) |
1009 | #define blk_integrity_register(a, b) (0) | 1046 | #define blk_integrity_register(a, b) (0) |
1010 | #define blk_integrity_unregister(a) do { } while (0); | 1047 | #define blk_integrity_unregister(a) do { } while (0); |