diff options
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 194 |
1 files changed, 128 insertions, 66 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 53ea933cf60b..a135256b272c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -16,7 +16,9 @@ | |||
16 | #include <linux/bio.h> | 16 | #include <linux/bio.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/stringify.h> | 18 | #include <linux/stringify.h> |
19 | #include <linux/gfp.h> | ||
19 | #include <linux/bsg.h> | 20 | #include <linux/bsg.h> |
21 | #include <linux/smp.h> | ||
20 | 22 | ||
21 | #include <asm/scatterlist.h> | 23 | #include <asm/scatterlist.h> |
22 | 24 | ||
@@ -54,7 +56,6 @@ enum rq_cmd_type_bits { | |||
54 | REQ_TYPE_PM_SUSPEND, /* suspend request */ | 56 | REQ_TYPE_PM_SUSPEND, /* suspend request */ |
55 | REQ_TYPE_PM_RESUME, /* resume request */ | 57 | REQ_TYPE_PM_RESUME, /* resume request */ |
56 | REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ | 58 | REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ |
57 | REQ_TYPE_FLUSH, /* flush request */ | ||
58 | REQ_TYPE_SPECIAL, /* driver defined type */ | 59 | REQ_TYPE_SPECIAL, /* driver defined type */ |
59 | REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ | 60 | REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ |
60 | /* | 61 | /* |
@@ -76,19 +77,20 @@ enum rq_cmd_type_bits { | |||
76 | * | 77 | * |
77 | */ | 78 | */ |
78 | enum { | 79 | enum { |
79 | /* | ||
80 | * just examples for now | ||
81 | */ | ||
82 | REQ_LB_OP_EJECT = 0x40, /* eject request */ | 80 | REQ_LB_OP_EJECT = 0x40, /* eject request */ |
83 | REQ_LB_OP_FLUSH = 0x41, /* flush device */ | 81 | REQ_LB_OP_FLUSH = 0x41, /* flush request */ |
82 | REQ_LB_OP_DISCARD = 0x42, /* discard sectors */ | ||
84 | }; | 83 | }; |
85 | 84 | ||
86 | /* | 85 | /* |
87 | * request type modified bits. first three bits match BIO_RW* bits, important | 86 | * request type modified bits. first two bits match BIO_RW* bits, important |
88 | */ | 87 | */ |
89 | enum rq_flag_bits { | 88 | enum rq_flag_bits { |
90 | __REQ_RW, /* not set, read. set, write */ | 89 | __REQ_RW, /* not set, read. set, write */ |
91 | __REQ_FAILFAST, /* no low level driver retries */ | 90 | __REQ_FAILFAST_DEV, /* no driver retries of device errors */ |
91 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ | ||
92 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ | ||
93 | __REQ_DISCARD, /* request to discard sectors */ | ||
92 | __REQ_SORTED, /* elevator knows about this request */ | 94 | __REQ_SORTED, /* elevator knows about this request */ |
93 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ | 95 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ |
94 | __REQ_HARDBARRIER, /* may not be passed by drive either */ | 96 | __REQ_HARDBARRIER, /* may not be passed by drive either */ |
@@ -111,7 +113,10 @@ enum rq_flag_bits { | |||
111 | }; | 113 | }; |
112 | 114 | ||
113 | #define REQ_RW (1 << __REQ_RW) | 115 | #define REQ_RW (1 << __REQ_RW) |
114 | #define REQ_FAILFAST (1 << __REQ_FAILFAST) | 116 | #define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) |
117 | #define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) | ||
118 | #define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) | ||
119 | #define REQ_DISCARD (1 << __REQ_DISCARD) | ||
115 | #define REQ_SORTED (1 << __REQ_SORTED) | 120 | #define REQ_SORTED (1 << __REQ_SORTED) |
116 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) | 121 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) |
117 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) | 122 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) |
@@ -140,12 +145,14 @@ enum rq_flag_bits { | |||
140 | */ | 145 | */ |
141 | struct request { | 146 | struct request { |
142 | struct list_head queuelist; | 147 | struct list_head queuelist; |
143 | struct list_head donelist; | 148 | struct call_single_data csd; |
149 | int cpu; | ||
144 | 150 | ||
145 | struct request_queue *q; | 151 | struct request_queue *q; |
146 | 152 | ||
147 | unsigned int cmd_flags; | 153 | unsigned int cmd_flags; |
148 | enum rq_cmd_type_bits cmd_type; | 154 | enum rq_cmd_type_bits cmd_type; |
155 | unsigned long atomic_flags; | ||
149 | 156 | ||
150 | /* Maintain bio traversal state for part by part I/O submission. | 157 | /* Maintain bio traversal state for part by part I/O submission. |
151 | * hard_* are block layer internals, no driver should touch them! | 158 | * hard_* are block layer internals, no driver should touch them! |
@@ -190,13 +197,6 @@ struct request { | |||
190 | */ | 197 | */ |
191 | unsigned short nr_phys_segments; | 198 | unsigned short nr_phys_segments; |
192 | 199 | ||
193 | /* Number of scatter-gather addr+len pairs after | ||
194 | * physical and DMA remapping hardware coalescing is performed. | ||
195 | * This is the number of scatter-gather entries the driver | ||
196 | * will actually have to deal with after DMA mapping is done. | ||
197 | */ | ||
198 | unsigned short nr_hw_segments; | ||
199 | |||
200 | unsigned short ioprio; | 200 | unsigned short ioprio; |
201 | 201 | ||
202 | void *special; | 202 | void *special; |
@@ -220,6 +220,8 @@ struct request { | |||
220 | void *data; | 220 | void *data; |
221 | void *sense; | 221 | void *sense; |
222 | 222 | ||
223 | unsigned long deadline; | ||
224 | struct list_head timeout_list; | ||
223 | unsigned int timeout; | 225 | unsigned int timeout; |
224 | int retries; | 226 | int retries; |
225 | 227 | ||
@@ -233,6 +235,11 @@ struct request { | |||
233 | struct request *next_rq; | 235 | struct request *next_rq; |
234 | }; | 236 | }; |
235 | 237 | ||
238 | static inline unsigned short req_get_ioprio(struct request *req) | ||
239 | { | ||
240 | return req->ioprio; | ||
241 | } | ||
242 | |||
236 | /* | 243 | /* |
237 | * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME | 244 | * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME |
238 | * requests. Some step values could eventually be made generic. | 245 | * requests. Some step values could eventually be made generic. |
@@ -252,6 +259,7 @@ typedef void (request_fn_proc) (struct request_queue *q); | |||
252 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); | 259 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); |
253 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 260 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
254 | typedef void (unplug_fn) (struct request_queue *); | 261 | typedef void (unplug_fn) (struct request_queue *); |
262 | typedef int (prepare_discard_fn) (struct request_queue *, struct request *); | ||
255 | 263 | ||
256 | struct bio_vec; | 264 | struct bio_vec; |
257 | struct bvec_merge_data { | 265 | struct bvec_merge_data { |
@@ -265,6 +273,15 @@ typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, | |||
265 | typedef void (prepare_flush_fn) (struct request_queue *, struct request *); | 273 | typedef void (prepare_flush_fn) (struct request_queue *, struct request *); |
266 | typedef void (softirq_done_fn)(struct request *); | 274 | typedef void (softirq_done_fn)(struct request *); |
267 | typedef int (dma_drain_needed_fn)(struct request *); | 275 | typedef int (dma_drain_needed_fn)(struct request *); |
276 | typedef int (lld_busy_fn) (struct request_queue *q); | ||
277 | |||
278 | enum blk_eh_timer_return { | ||
279 | BLK_EH_NOT_HANDLED, | ||
280 | BLK_EH_HANDLED, | ||
281 | BLK_EH_RESET_TIMER, | ||
282 | }; | ||
283 | |||
284 | typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); | ||
268 | 285 | ||
269 | enum blk_queue_state { | 286 | enum blk_queue_state { |
270 | Queue_down, | 287 | Queue_down, |
@@ -307,10 +324,13 @@ struct request_queue | |||
307 | make_request_fn *make_request_fn; | 324 | make_request_fn *make_request_fn; |
308 | prep_rq_fn *prep_rq_fn; | 325 | prep_rq_fn *prep_rq_fn; |
309 | unplug_fn *unplug_fn; | 326 | unplug_fn *unplug_fn; |
327 | prepare_discard_fn *prepare_discard_fn; | ||
310 | merge_bvec_fn *merge_bvec_fn; | 328 | merge_bvec_fn *merge_bvec_fn; |
311 | prepare_flush_fn *prepare_flush_fn; | 329 | prepare_flush_fn *prepare_flush_fn; |
312 | softirq_done_fn *softirq_done_fn; | 330 | softirq_done_fn *softirq_done_fn; |
331 | rq_timed_out_fn *rq_timed_out_fn; | ||
313 | dma_drain_needed_fn *dma_drain_needed; | 332 | dma_drain_needed_fn *dma_drain_needed; |
333 | lld_busy_fn *lld_busy_fn; | ||
314 | 334 | ||
315 | /* | 335 | /* |
316 | * Dispatch queue sorting | 336 | * Dispatch queue sorting |
@@ -385,6 +405,10 @@ struct request_queue | |||
385 | unsigned int nr_sorted; | 405 | unsigned int nr_sorted; |
386 | unsigned int in_flight; | 406 | unsigned int in_flight; |
387 | 407 | ||
408 | unsigned int rq_timeout; | ||
409 | struct timer_list timeout; | ||
410 | struct list_head timeout_list; | ||
411 | |||
388 | /* | 412 | /* |
389 | * sg stuff | 413 | * sg stuff |
390 | */ | 414 | */ |
@@ -421,6 +445,10 @@ struct request_queue | |||
421 | #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ | 445 | #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ |
422 | #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ | 446 | #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ |
423 | #define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ | 447 | #define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ |
448 | #define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */ | ||
449 | #define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ | ||
450 | #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ | ||
451 | #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ | ||
424 | 452 | ||
425 | static inline int queue_is_locked(struct request_queue *q) | 453 | static inline int queue_is_locked(struct request_queue *q) |
426 | { | 454 | { |
@@ -526,26 +554,36 @@ enum { | |||
526 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 554 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
527 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 555 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
528 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | 556 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
557 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) | ||
529 | #define blk_queue_flushing(q) ((q)->ordseq) | 558 | #define blk_queue_flushing(q) ((q)->ordseq) |
559 | #define blk_queue_stackable(q) \ | ||
560 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) | ||
530 | 561 | ||
531 | #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) | 562 | #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) |
532 | #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) | 563 | #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) |
533 | #define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) | 564 | #define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) |
534 | #define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) | 565 | #define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) |
535 | 566 | ||
536 | #define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST) | 567 | #define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV) |
568 | #define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT) | ||
569 | #define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER) | ||
570 | #define blk_noretry_request(rq) (blk_failfast_dev(rq) || \ | ||
571 | blk_failfast_transport(rq) || \ | ||
572 | blk_failfast_driver(rq)) | ||
537 | #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) | 573 | #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) |
538 | 574 | ||
539 | #define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq)) | 575 | #define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) |
540 | 576 | ||
541 | #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) | 577 | #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) |
542 | #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) | 578 | #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) |
543 | #define blk_pm_request(rq) \ | 579 | #define blk_pm_request(rq) \ |
544 | (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) | 580 | (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) |
545 | 581 | ||
582 | #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) | ||
546 | #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) | 583 | #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) |
547 | #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) | 584 | #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) |
548 | #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) | 585 | #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) |
586 | #define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) | ||
549 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) | 587 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) |
550 | #define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) | 588 | #define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) |
551 | /* rq->queuelist of dequeued request must be list_empty() */ | 589 | /* rq->queuelist of dequeued request must be list_empty() */ |
@@ -592,7 +630,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw) | |||
592 | #define RQ_NOMERGE_FLAGS \ | 630 | #define RQ_NOMERGE_FLAGS \ |
593 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) | 631 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) |
594 | #define rq_mergeable(rq) \ | 632 | #define rq_mergeable(rq) \ |
595 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) | 633 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ |
634 | (blk_discard_rq(rq) || blk_fs_request((rq)))) | ||
596 | 635 | ||
597 | /* | 636 | /* |
598 | * q->prep_rq_fn return values | 637 | * q->prep_rq_fn return values |
@@ -637,6 +676,12 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) | |||
637 | } | 676 | } |
638 | #endif /* CONFIG_MMU */ | 677 | #endif /* CONFIG_MMU */ |
639 | 678 | ||
679 | struct rq_map_data { | ||
680 | struct page **pages; | ||
681 | int page_order; | ||
682 | int nr_entries; | ||
683 | }; | ||
684 | |||
640 | struct req_iterator { | 685 | struct req_iterator { |
641 | int i; | 686 | int i; |
642 | struct bio *bio; | 687 | struct bio *bio; |
@@ -664,14 +709,18 @@ extern void __blk_put_request(struct request_queue *, struct request *); | |||
664 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | 709 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); |
665 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); | 710 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); |
666 | extern void blk_requeue_request(struct request_queue *, struct request *); | 711 | extern void blk_requeue_request(struct request_queue *, struct request *); |
712 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); | ||
713 | extern int blk_lld_busy(struct request_queue *q); | ||
714 | extern int blk_insert_cloned_request(struct request_queue *q, | ||
715 | struct request *rq); | ||
667 | extern void blk_plug_device(struct request_queue *); | 716 | extern void blk_plug_device(struct request_queue *); |
668 | extern void blk_plug_device_unlocked(struct request_queue *); | 717 | extern void blk_plug_device_unlocked(struct request_queue *); |
669 | extern int blk_remove_plug(struct request_queue *); | 718 | extern int blk_remove_plug(struct request_queue *); |
670 | extern void blk_recount_segments(struct request_queue *, struct bio *); | 719 | extern void blk_recount_segments(struct request_queue *, struct bio *); |
671 | extern int scsi_cmd_ioctl(struct file *, struct request_queue *, | 720 | extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
672 | struct gendisk *, unsigned int, void __user *); | 721 | unsigned int, void __user *); |
673 | extern int sg_scsi_ioctl(struct file *, struct request_queue *, | 722 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
674 | struct gendisk *, struct scsi_ioctl_command __user *); | 723 | struct scsi_ioctl_command __user *); |
675 | 724 | ||
676 | /* | 725 | /* |
677 | * Temporary export, until SCSI gets fixed up. | 726 | * Temporary export, until SCSI gets fixed up. |
@@ -705,11 +754,14 @@ extern void __blk_stop_queue(struct request_queue *q); | |||
705 | extern void __blk_run_queue(struct request_queue *); | 754 | extern void __blk_run_queue(struct request_queue *); |
706 | extern void blk_run_queue(struct request_queue *); | 755 | extern void blk_run_queue(struct request_queue *); |
707 | extern void blk_start_queueing(struct request_queue *); | 756 | extern void blk_start_queueing(struct request_queue *); |
708 | extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); | 757 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
758 | struct rq_map_data *, void __user *, unsigned long, | ||
759 | gfp_t); | ||
709 | extern int blk_rq_unmap_user(struct bio *); | 760 | extern int blk_rq_unmap_user(struct bio *); |
710 | extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); | 761 | extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); |
711 | extern int blk_rq_map_user_iov(struct request_queue *, struct request *, | 762 | extern int blk_rq_map_user_iov(struct request_queue *, struct request *, |
712 | struct sg_iovec *, int, unsigned int); | 763 | struct rq_map_data *, struct sg_iovec *, int, |
764 | unsigned int, gfp_t); | ||
713 | extern int blk_execute_rq(struct request_queue *, struct gendisk *, | 765 | extern int blk_execute_rq(struct request_queue *, struct gendisk *, |
714 | struct request *, int); | 766 | struct request *, int); |
715 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, | 767 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, |
@@ -750,12 +802,15 @@ extern int __blk_end_request(struct request *rq, int error, | |||
750 | extern int blk_end_bidi_request(struct request *rq, int error, | 802 | extern int blk_end_bidi_request(struct request *rq, int error, |
751 | unsigned int nr_bytes, unsigned int bidi_bytes); | 803 | unsigned int nr_bytes, unsigned int bidi_bytes); |
752 | extern void end_request(struct request *, int); | 804 | extern void end_request(struct request *, int); |
753 | extern void end_queued_request(struct request *, int); | ||
754 | extern void end_dequeued_request(struct request *, int); | ||
755 | extern int blk_end_request_callback(struct request *rq, int error, | 805 | extern int blk_end_request_callback(struct request *rq, int error, |
756 | unsigned int nr_bytes, | 806 | unsigned int nr_bytes, |
757 | int (drv_callback)(struct request *)); | 807 | int (drv_callback)(struct request *)); |
758 | extern void blk_complete_request(struct request *); | 808 | extern void blk_complete_request(struct request *); |
809 | extern void __blk_complete_request(struct request *); | ||
810 | extern void blk_abort_request(struct request *); | ||
811 | extern void blk_abort_queue(struct request_queue *); | ||
812 | extern void blk_update_request(struct request *rq, int error, | ||
813 | unsigned int nr_bytes); | ||
759 | 814 | ||
760 | /* | 815 | /* |
761 | * blk_end_request() takes bytes instead of sectors as a complete size. | 816 | * blk_end_request() takes bytes instead of sectors as a complete size. |
@@ -790,12 +845,16 @@ extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); | |||
790 | extern int blk_queue_dma_drain(struct request_queue *q, | 845 | extern int blk_queue_dma_drain(struct request_queue *q, |
791 | dma_drain_needed_fn *dma_drain_needed, | 846 | dma_drain_needed_fn *dma_drain_needed, |
792 | void *buf, unsigned int size); | 847 | void *buf, unsigned int size); |
848 | extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); | ||
793 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); | 849 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); |
794 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); | 850 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); |
795 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); | 851 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); |
796 | extern void blk_queue_dma_alignment(struct request_queue *, int); | 852 | extern void blk_queue_dma_alignment(struct request_queue *, int); |
797 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); | 853 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); |
798 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | 854 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
855 | extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *); | ||
856 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); | ||
857 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); | ||
799 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 858 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
800 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); | 859 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); |
801 | extern int blk_do_ordered(struct request_queue *, struct request **); | 860 | extern int blk_do_ordered(struct request_queue *, struct request **); |
@@ -806,7 +865,6 @@ extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int); | |||
806 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); | 865 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); |
807 | extern void blk_dump_rq_flags(struct request *, char *); | 866 | extern void blk_dump_rq_flags(struct request *, char *); |
808 | extern void generic_unplug_device(struct request_queue *); | 867 | extern void generic_unplug_device(struct request_queue *); |
809 | extern void __generic_unplug_device(struct request_queue *); | ||
810 | extern long nr_blockdev_pages(void); | 868 | extern long nr_blockdev_pages(void); |
811 | 869 | ||
812 | int blk_get_queue(struct request_queue *); | 870 | int blk_get_queue(struct request_queue *); |
@@ -837,12 +895,23 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, | |||
837 | } | 895 | } |
838 | 896 | ||
839 | extern int blkdev_issue_flush(struct block_device *, sector_t *); | 897 | extern int blkdev_issue_flush(struct block_device *, sector_t *); |
898 | extern int blkdev_issue_discard(struct block_device *, | ||
899 | sector_t sector, sector_t nr_sects, gfp_t); | ||
900 | |||
901 | static inline int sb_issue_discard(struct super_block *sb, | ||
902 | sector_t block, sector_t nr_blocks) | ||
903 | { | ||
904 | block <<= (sb->s_blocksize_bits - 9); | ||
905 | nr_blocks <<= (sb->s_blocksize_bits - 9); | ||
906 | return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL); | ||
907 | } | ||
840 | 908 | ||
841 | /* | 909 | /* |
842 | * command filter functions | 910 | * command filter functions |
843 | */ | 911 | */ |
844 | extern int blk_verify_command(struct blk_cmd_filter *filter, | 912 | extern int blk_verify_command(struct blk_cmd_filter *filter, |
845 | unsigned char *cmd, int has_write_perm); | 913 | unsigned char *cmd, fmode_t has_write_perm); |
914 | extern void blk_unregister_filter(struct gendisk *disk); | ||
846 | extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); | 915 | extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); |
847 | 916 | ||
848 | #define MAX_PHYS_SEGMENTS 128 | 917 | #define MAX_PHYS_SEGMENTS 128 |
@@ -874,6 +943,13 @@ static inline int queue_dma_alignment(struct request_queue *q) | |||
874 | return q ? q->dma_alignment : 511; | 943 | return q ? q->dma_alignment : 511; |
875 | } | 944 | } |
876 | 945 | ||
946 | static inline int blk_rq_aligned(struct request_queue *q, void *addr, | ||
947 | unsigned int len) | ||
948 | { | ||
949 | unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; | ||
950 | return !((unsigned long)addr & alignment) && !(len & alignment); | ||
951 | } | ||
952 | |||
877 | /* assumes size > 256 */ | 953 | /* assumes size > 256 */ |
878 | static inline unsigned int blksize_bits(unsigned int size) | 954 | static inline unsigned int blksize_bits(unsigned int size) |
879 | { | 955 | { |
@@ -900,7 +976,7 @@ static inline void put_dev_sector(Sector p) | |||
900 | } | 976 | } |
901 | 977 | ||
902 | struct work_struct; | 978 | struct work_struct; |
903 | int kblockd_schedule_work(struct work_struct *work); | 979 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); |
904 | void kblockd_flush_work(struct work_struct *work); | 980 | void kblockd_flush_work(struct work_struct *work); |
905 | 981 | ||
906 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ | 982 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ |
@@ -945,49 +1021,19 @@ struct blk_integrity { | |||
945 | 1021 | ||
946 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); | 1022 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); |
947 | extern void blk_integrity_unregister(struct gendisk *); | 1023 | extern void blk_integrity_unregister(struct gendisk *); |
948 | extern int blk_integrity_compare(struct block_device *, struct block_device *); | 1024 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); |
949 | extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); | 1025 | extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); |
950 | extern int blk_rq_count_integrity_sg(struct request *); | 1026 | extern int blk_rq_count_integrity_sg(struct request *); |
951 | 1027 | ||
952 | static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi) | 1028 | static inline |
953 | { | 1029 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) |
954 | if (bi) | ||
955 | return bi->tuple_size; | ||
956 | |||
957 | return 0; | ||
958 | } | ||
959 | |||
960 | static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev) | ||
961 | { | 1030 | { |
962 | return bdev->bd_disk->integrity; | 1031 | return bdev->bd_disk->integrity; |
963 | } | 1032 | } |
964 | 1033 | ||
965 | static inline unsigned int bdev_get_tag_size(struct block_device *bdev) | 1034 | static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) |
966 | { | ||
967 | struct blk_integrity *bi = bdev_get_integrity(bdev); | ||
968 | |||
969 | if (bi) | ||
970 | return bi->tag_size; | ||
971 | |||
972 | return 0; | ||
973 | } | ||
974 | |||
975 | static inline int bdev_integrity_enabled(struct block_device *bdev, int rw) | ||
976 | { | 1035 | { |
977 | struct blk_integrity *bi = bdev_get_integrity(bdev); | 1036 | return disk->integrity; |
978 | |||
979 | if (bi == NULL) | ||
980 | return 0; | ||
981 | |||
982 | if (rw == READ && bi->verify_fn != NULL && | ||
983 | (bi->flags & INTEGRITY_FLAG_READ)) | ||
984 | return 1; | ||
985 | |||
986 | if (rw == WRITE && bi->generate_fn != NULL && | ||
987 | (bi->flags & INTEGRITY_FLAG_WRITE)) | ||
988 | return 1; | ||
989 | |||
990 | return 0; | ||
991 | } | 1037 | } |
992 | 1038 | ||
993 | static inline int blk_integrity_rq(struct request *rq) | 1039 | static inline int blk_integrity_rq(struct request *rq) |
@@ -1004,13 +1050,29 @@ static inline int blk_integrity_rq(struct request *rq) | |||
1004 | #define blk_rq_count_integrity_sg(a) (0) | 1050 | #define blk_rq_count_integrity_sg(a) (0) |
1005 | #define blk_rq_map_integrity_sg(a, b) (0) | 1051 | #define blk_rq_map_integrity_sg(a, b) (0) |
1006 | #define bdev_get_integrity(a) (0) | 1052 | #define bdev_get_integrity(a) (0) |
1007 | #define bdev_get_tag_size(a) (0) | 1053 | #define blk_get_integrity(a) (0) |
1008 | #define blk_integrity_compare(a, b) (0) | 1054 | #define blk_integrity_compare(a, b) (0) |
1009 | #define blk_integrity_register(a, b) (0) | 1055 | #define blk_integrity_register(a, b) (0) |
1010 | #define blk_integrity_unregister(a) do { } while (0); | 1056 | #define blk_integrity_unregister(a) do { } while (0); |
1011 | 1057 | ||
1012 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 1058 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
1013 | 1059 | ||
1060 | struct block_device_operations { | ||
1061 | int (*open) (struct block_device *, fmode_t); | ||
1062 | int (*release) (struct gendisk *, fmode_t); | ||
1063 | int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | ||
1064 | int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | ||
1065 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | ||
1066 | int (*direct_access) (struct block_device *, sector_t, | ||
1067 | void **, unsigned long *); | ||
1068 | int (*media_changed) (struct gendisk *); | ||
1069 | int (*revalidate_disk) (struct gendisk *); | ||
1070 | int (*getgeo)(struct block_device *, struct hd_geometry *); | ||
1071 | struct module *owner; | ||
1072 | }; | ||
1073 | |||
1074 | extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, | ||
1075 | unsigned long); | ||
1014 | #else /* CONFIG_BLOCK */ | 1076 | #else /* CONFIG_BLOCK */ |
1015 | /* | 1077 | /* |
1016 | * stubs for when the block layer is configured out | 1078 | * stubs for when the block layer is configured out |