diff options
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 211 |
1 files changed, 140 insertions, 71 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 88d68081a0f1..a135256b272c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -16,7 +16,9 @@ | |||
16 | #include <linux/bio.h> | 16 | #include <linux/bio.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/stringify.h> | 18 | #include <linux/stringify.h> |
19 | #include <linux/gfp.h> | ||
19 | #include <linux/bsg.h> | 20 | #include <linux/bsg.h> |
21 | #include <linux/smp.h> | ||
20 | 22 | ||
21 | #include <asm/scatterlist.h> | 23 | #include <asm/scatterlist.h> |
22 | 24 | ||
@@ -54,7 +56,6 @@ enum rq_cmd_type_bits { | |||
54 | REQ_TYPE_PM_SUSPEND, /* suspend request */ | 56 | REQ_TYPE_PM_SUSPEND, /* suspend request */ |
55 | REQ_TYPE_PM_RESUME, /* resume request */ | 57 | REQ_TYPE_PM_RESUME, /* resume request */ |
56 | REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ | 58 | REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ |
57 | REQ_TYPE_FLUSH, /* flush request */ | ||
58 | REQ_TYPE_SPECIAL, /* driver defined type */ | 59 | REQ_TYPE_SPECIAL, /* driver defined type */ |
59 | REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ | 60 | REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ |
60 | /* | 61 | /* |
@@ -76,19 +77,20 @@ enum rq_cmd_type_bits { | |||
76 | * | 77 | * |
77 | */ | 78 | */ |
78 | enum { | 79 | enum { |
79 | /* | ||
80 | * just examples for now | ||
81 | */ | ||
82 | REQ_LB_OP_EJECT = 0x40, /* eject request */ | 80 | REQ_LB_OP_EJECT = 0x40, /* eject request */ |
83 | REQ_LB_OP_FLUSH = 0x41, /* flush device */ | 81 | REQ_LB_OP_FLUSH = 0x41, /* flush request */ |
82 | REQ_LB_OP_DISCARD = 0x42, /* discard sectors */ | ||
84 | }; | 83 | }; |
85 | 84 | ||
86 | /* | 85 | /* |
87 | * request type modified bits. first three bits match BIO_RW* bits, important | 86 | * request type modified bits. first two bits match BIO_RW* bits, important |
88 | */ | 87 | */ |
89 | enum rq_flag_bits { | 88 | enum rq_flag_bits { |
90 | __REQ_RW, /* not set, read. set, write */ | 89 | __REQ_RW, /* not set, read. set, write */ |
91 | __REQ_FAILFAST, /* no low level driver retries */ | 90 | __REQ_FAILFAST_DEV, /* no driver retries of device errors */ |
91 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ | ||
92 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ | ||
93 | __REQ_DISCARD, /* request to discard sectors */ | ||
92 | __REQ_SORTED, /* elevator knows about this request */ | 94 | __REQ_SORTED, /* elevator knows about this request */ |
93 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ | 95 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ |
94 | __REQ_HARDBARRIER, /* may not be passed by drive either */ | 96 | __REQ_HARDBARRIER, /* may not be passed by drive either */ |
@@ -111,7 +113,10 @@ enum rq_flag_bits { | |||
111 | }; | 113 | }; |
112 | 114 | ||
113 | #define REQ_RW (1 << __REQ_RW) | 115 | #define REQ_RW (1 << __REQ_RW) |
114 | #define REQ_FAILFAST (1 << __REQ_FAILFAST) | 116 | #define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) |
117 | #define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) | ||
118 | #define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) | ||
119 | #define REQ_DISCARD (1 << __REQ_DISCARD) | ||
115 | #define REQ_SORTED (1 << __REQ_SORTED) | 120 | #define REQ_SORTED (1 << __REQ_SORTED) |
116 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) | 121 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) |
117 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) | 122 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) |
@@ -140,12 +145,14 @@ enum rq_flag_bits { | |||
140 | */ | 145 | */ |
141 | struct request { | 146 | struct request { |
142 | struct list_head queuelist; | 147 | struct list_head queuelist; |
143 | struct list_head donelist; | 148 | struct call_single_data csd; |
149 | int cpu; | ||
144 | 150 | ||
145 | struct request_queue *q; | 151 | struct request_queue *q; |
146 | 152 | ||
147 | unsigned int cmd_flags; | 153 | unsigned int cmd_flags; |
148 | enum rq_cmd_type_bits cmd_type; | 154 | enum rq_cmd_type_bits cmd_type; |
155 | unsigned long atomic_flags; | ||
149 | 156 | ||
150 | /* Maintain bio traversal state for part by part I/O submission. | 157 | /* Maintain bio traversal state for part by part I/O submission. |
151 | * hard_* are block layer internals, no driver should touch them! | 158 | * hard_* are block layer internals, no driver should touch them! |
@@ -190,13 +197,6 @@ struct request { | |||
190 | */ | 197 | */ |
191 | unsigned short nr_phys_segments; | 198 | unsigned short nr_phys_segments; |
192 | 199 | ||
193 | /* Number of scatter-gather addr+len pairs after | ||
194 | * physical and DMA remapping hardware coalescing is performed. | ||
195 | * This is the number of scatter-gather entries the driver | ||
196 | * will actually have to deal with after DMA mapping is done. | ||
197 | */ | ||
198 | unsigned short nr_hw_segments; | ||
199 | |||
200 | unsigned short ioprio; | 200 | unsigned short ioprio; |
201 | 201 | ||
202 | void *special; | 202 | void *special; |
@@ -220,6 +220,8 @@ struct request { | |||
220 | void *data; | 220 | void *data; |
221 | void *sense; | 221 | void *sense; |
222 | 222 | ||
223 | unsigned long deadline; | ||
224 | struct list_head timeout_list; | ||
223 | unsigned int timeout; | 225 | unsigned int timeout; |
224 | int retries; | 226 | int retries; |
225 | 227 | ||
@@ -233,6 +235,11 @@ struct request { | |||
233 | struct request *next_rq; | 235 | struct request *next_rq; |
234 | }; | 236 | }; |
235 | 237 | ||
238 | static inline unsigned short req_get_ioprio(struct request *req) | ||
239 | { | ||
240 | return req->ioprio; | ||
241 | } | ||
242 | |||
236 | /* | 243 | /* |
237 | * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME | 244 | * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME |
238 | * requests. Some step values could eventually be made generic. | 245 | * requests. Some step values could eventually be made generic. |
@@ -252,6 +259,7 @@ typedef void (request_fn_proc) (struct request_queue *q); | |||
252 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); | 259 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); |
253 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 260 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
254 | typedef void (unplug_fn) (struct request_queue *); | 261 | typedef void (unplug_fn) (struct request_queue *); |
262 | typedef int (prepare_discard_fn) (struct request_queue *, struct request *); | ||
255 | 263 | ||
256 | struct bio_vec; | 264 | struct bio_vec; |
257 | struct bvec_merge_data { | 265 | struct bvec_merge_data { |
@@ -265,6 +273,15 @@ typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, | |||
265 | typedef void (prepare_flush_fn) (struct request_queue *, struct request *); | 273 | typedef void (prepare_flush_fn) (struct request_queue *, struct request *); |
266 | typedef void (softirq_done_fn)(struct request *); | 274 | typedef void (softirq_done_fn)(struct request *); |
267 | typedef int (dma_drain_needed_fn)(struct request *); | 275 | typedef int (dma_drain_needed_fn)(struct request *); |
276 | typedef int (lld_busy_fn) (struct request_queue *q); | ||
277 | |||
278 | enum blk_eh_timer_return { | ||
279 | BLK_EH_NOT_HANDLED, | ||
280 | BLK_EH_HANDLED, | ||
281 | BLK_EH_RESET_TIMER, | ||
282 | }; | ||
283 | |||
284 | typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); | ||
268 | 285 | ||
269 | enum blk_queue_state { | 286 | enum blk_queue_state { |
270 | Queue_down, | 287 | Queue_down, |
@@ -280,6 +297,15 @@ struct blk_queue_tag { | |||
280 | atomic_t refcnt; /* map can be shared */ | 297 | atomic_t refcnt; /* map can be shared */ |
281 | }; | 298 | }; |
282 | 299 | ||
300 | #define BLK_SCSI_MAX_CMDS (256) | ||
301 | #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) | ||
302 | |||
303 | struct blk_cmd_filter { | ||
304 | unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; | ||
305 | unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; | ||
306 | struct kobject kobj; | ||
307 | }; | ||
308 | |||
283 | struct request_queue | 309 | struct request_queue |
284 | { | 310 | { |
285 | /* | 311 | /* |
@@ -298,10 +324,13 @@ struct request_queue | |||
298 | make_request_fn *make_request_fn; | 324 | make_request_fn *make_request_fn; |
299 | prep_rq_fn *prep_rq_fn; | 325 | prep_rq_fn *prep_rq_fn; |
300 | unplug_fn *unplug_fn; | 326 | unplug_fn *unplug_fn; |
327 | prepare_discard_fn *prepare_discard_fn; | ||
301 | merge_bvec_fn *merge_bvec_fn; | 328 | merge_bvec_fn *merge_bvec_fn; |
302 | prepare_flush_fn *prepare_flush_fn; | 329 | prepare_flush_fn *prepare_flush_fn; |
303 | softirq_done_fn *softirq_done_fn; | 330 | softirq_done_fn *softirq_done_fn; |
331 | rq_timed_out_fn *rq_timed_out_fn; | ||
304 | dma_drain_needed_fn *dma_drain_needed; | 332 | dma_drain_needed_fn *dma_drain_needed; |
333 | lld_busy_fn *lld_busy_fn; | ||
305 | 334 | ||
306 | /* | 335 | /* |
307 | * Dispatch queue sorting | 336 | * Dispatch queue sorting |
@@ -376,6 +405,10 @@ struct request_queue | |||
376 | unsigned int nr_sorted; | 405 | unsigned int nr_sorted; |
377 | unsigned int in_flight; | 406 | unsigned int in_flight; |
378 | 407 | ||
408 | unsigned int rq_timeout; | ||
409 | struct timer_list timeout; | ||
410 | struct list_head timeout_list; | ||
411 | |||
379 | /* | 412 | /* |
380 | * sg stuff | 413 | * sg stuff |
381 | */ | 414 | */ |
@@ -398,6 +431,7 @@ struct request_queue | |||
398 | #if defined(CONFIG_BLK_DEV_BSG) | 431 | #if defined(CONFIG_BLK_DEV_BSG) |
399 | struct bsg_class_device bsg_dev; | 432 | struct bsg_class_device bsg_dev; |
400 | #endif | 433 | #endif |
434 | struct blk_cmd_filter cmd_filter; | ||
401 | }; | 435 | }; |
402 | 436 | ||
403 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ | 437 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ |
@@ -411,6 +445,10 @@ struct request_queue | |||
411 | #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ | 445 | #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ |
412 | #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ | 446 | #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ |
413 | #define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ | 447 | #define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ |
448 | #define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */ | ||
449 | #define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ | ||
450 | #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ | ||
451 | #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ | ||
414 | 452 | ||
415 | static inline int queue_is_locked(struct request_queue *q) | 453 | static inline int queue_is_locked(struct request_queue *q) |
416 | { | 454 | { |
@@ -516,26 +554,36 @@ enum { | |||
516 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 554 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
517 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 555 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
518 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | 556 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
557 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) | ||
519 | #define blk_queue_flushing(q) ((q)->ordseq) | 558 | #define blk_queue_flushing(q) ((q)->ordseq) |
559 | #define blk_queue_stackable(q) \ | ||
560 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) | ||
520 | 561 | ||
521 | #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) | 562 | #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) |
522 | #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) | 563 | #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) |
523 | #define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) | 564 | #define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) |
524 | #define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) | 565 | #define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) |
525 | 566 | ||
526 | #define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST) | 567 | #define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV) |
568 | #define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT) | ||
569 | #define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER) | ||
570 | #define blk_noretry_request(rq) (blk_failfast_dev(rq) || \ | ||
571 | blk_failfast_transport(rq) || \ | ||
572 | blk_failfast_driver(rq)) | ||
527 | #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) | 573 | #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) |
528 | 574 | ||
529 | #define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq)) | 575 | #define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) |
530 | 576 | ||
531 | #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) | 577 | #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) |
532 | #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) | 578 | #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) |
533 | #define blk_pm_request(rq) \ | 579 | #define blk_pm_request(rq) \ |
534 | (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) | 580 | (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) |
535 | 581 | ||
582 | #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) | ||
536 | #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) | 583 | #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) |
537 | #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) | 584 | #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) |
538 | #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) | 585 | #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) |
586 | #define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) | ||
539 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) | 587 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) |
540 | #define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) | 588 | #define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) |
541 | /* rq->queuelist of dequeued request must be list_empty() */ | 589 | /* rq->queuelist of dequeued request must be list_empty() */ |
@@ -582,7 +630,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw) | |||
582 | #define RQ_NOMERGE_FLAGS \ | 630 | #define RQ_NOMERGE_FLAGS \ |
583 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) | 631 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) |
584 | #define rq_mergeable(rq) \ | 632 | #define rq_mergeable(rq) \ |
585 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) | 633 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ |
634 | (blk_discard_rq(rq) || blk_fs_request((rq)))) | ||
586 | 635 | ||
587 | /* | 636 | /* |
588 | * q->prep_rq_fn return values | 637 | * q->prep_rq_fn return values |
@@ -627,6 +676,12 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) | |||
627 | } | 676 | } |
628 | #endif /* CONFIG_MMU */ | 677 | #endif /* CONFIG_MMU */ |
629 | 678 | ||
679 | struct rq_map_data { | ||
680 | struct page **pages; | ||
681 | int page_order; | ||
682 | int nr_entries; | ||
683 | }; | ||
684 | |||
630 | struct req_iterator { | 685 | struct req_iterator { |
631 | int i; | 686 | int i; |
632 | struct bio *bio; | 687 | struct bio *bio; |
@@ -654,13 +709,18 @@ extern void __blk_put_request(struct request_queue *, struct request *); | |||
654 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | 709 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); |
655 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); | 710 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); |
656 | extern void blk_requeue_request(struct request_queue *, struct request *); | 711 | extern void blk_requeue_request(struct request_queue *, struct request *); |
712 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); | ||
713 | extern int blk_lld_busy(struct request_queue *q); | ||
714 | extern int blk_insert_cloned_request(struct request_queue *q, | ||
715 | struct request *rq); | ||
657 | extern void blk_plug_device(struct request_queue *); | 716 | extern void blk_plug_device(struct request_queue *); |
717 | extern void blk_plug_device_unlocked(struct request_queue *); | ||
658 | extern int blk_remove_plug(struct request_queue *); | 718 | extern int blk_remove_plug(struct request_queue *); |
659 | extern void blk_recount_segments(struct request_queue *, struct bio *); | 719 | extern void blk_recount_segments(struct request_queue *, struct bio *); |
660 | extern int scsi_cmd_ioctl(struct file *, struct request_queue *, | 720 | extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
661 | struct gendisk *, unsigned int, void __user *); | 721 | unsigned int, void __user *); |
662 | extern int sg_scsi_ioctl(struct file *, struct request_queue *, | 722 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
663 | struct gendisk *, struct scsi_ioctl_command __user *); | 723 | struct scsi_ioctl_command __user *); |
664 | 724 | ||
665 | /* | 725 | /* |
666 | * Temporary export, until SCSI gets fixed up. | 726 | * Temporary export, until SCSI gets fixed up. |
@@ -694,11 +754,14 @@ extern void __blk_stop_queue(struct request_queue *q); | |||
694 | extern void __blk_run_queue(struct request_queue *); | 754 | extern void __blk_run_queue(struct request_queue *); |
695 | extern void blk_run_queue(struct request_queue *); | 755 | extern void blk_run_queue(struct request_queue *); |
696 | extern void blk_start_queueing(struct request_queue *); | 756 | extern void blk_start_queueing(struct request_queue *); |
697 | extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); | 757 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
758 | struct rq_map_data *, void __user *, unsigned long, | ||
759 | gfp_t); | ||
698 | extern int blk_rq_unmap_user(struct bio *); | 760 | extern int blk_rq_unmap_user(struct bio *); |
699 | extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); | 761 | extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); |
700 | extern int blk_rq_map_user_iov(struct request_queue *, struct request *, | 762 | extern int blk_rq_map_user_iov(struct request_queue *, struct request *, |
701 | struct sg_iovec *, int, unsigned int); | 763 | struct rq_map_data *, struct sg_iovec *, int, |
764 | unsigned int, gfp_t); | ||
702 | extern int blk_execute_rq(struct request_queue *, struct gendisk *, | 765 | extern int blk_execute_rq(struct request_queue *, struct gendisk *, |
703 | struct request *, int); | 766 | struct request *, int); |
704 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, | 767 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, |
@@ -739,12 +802,15 @@ extern int __blk_end_request(struct request *rq, int error, | |||
739 | extern int blk_end_bidi_request(struct request *rq, int error, | 802 | extern int blk_end_bidi_request(struct request *rq, int error, |
740 | unsigned int nr_bytes, unsigned int bidi_bytes); | 803 | unsigned int nr_bytes, unsigned int bidi_bytes); |
741 | extern void end_request(struct request *, int); | 804 | extern void end_request(struct request *, int); |
742 | extern void end_queued_request(struct request *, int); | ||
743 | extern void end_dequeued_request(struct request *, int); | ||
744 | extern int blk_end_request_callback(struct request *rq, int error, | 805 | extern int blk_end_request_callback(struct request *rq, int error, |
745 | unsigned int nr_bytes, | 806 | unsigned int nr_bytes, |
746 | int (drv_callback)(struct request *)); | 807 | int (drv_callback)(struct request *)); |
747 | extern void blk_complete_request(struct request *); | 808 | extern void blk_complete_request(struct request *); |
809 | extern void __blk_complete_request(struct request *); | ||
810 | extern void blk_abort_request(struct request *); | ||
811 | extern void blk_abort_queue(struct request_queue *); | ||
812 | extern void blk_update_request(struct request *rq, int error, | ||
813 | unsigned int nr_bytes); | ||
748 | 814 | ||
749 | /* | 815 | /* |
750 | * blk_end_request() takes bytes instead of sectors as a complete size. | 816 | * blk_end_request() takes bytes instead of sectors as a complete size. |
@@ -779,12 +845,16 @@ extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); | |||
779 | extern int blk_queue_dma_drain(struct request_queue *q, | 845 | extern int blk_queue_dma_drain(struct request_queue *q, |
780 | dma_drain_needed_fn *dma_drain_needed, | 846 | dma_drain_needed_fn *dma_drain_needed, |
781 | void *buf, unsigned int size); | 847 | void *buf, unsigned int size); |
848 | extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); | ||
782 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); | 849 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); |
783 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); | 850 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); |
784 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); | 851 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); |
785 | extern void blk_queue_dma_alignment(struct request_queue *, int); | 852 | extern void blk_queue_dma_alignment(struct request_queue *, int); |
786 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); | 853 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); |
787 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | 854 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
855 | extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *); | ||
856 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); | ||
857 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); | ||
788 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 858 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
789 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); | 859 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); |
790 | extern int blk_do_ordered(struct request_queue *, struct request **); | 860 | extern int blk_do_ordered(struct request_queue *, struct request **); |
@@ -795,7 +865,6 @@ extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int); | |||
795 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); | 865 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); |
796 | extern void blk_dump_rq_flags(struct request *, char *); | 866 | extern void blk_dump_rq_flags(struct request *, char *); |
797 | extern void generic_unplug_device(struct request_queue *); | 867 | extern void generic_unplug_device(struct request_queue *); |
798 | extern void __generic_unplug_device(struct request_queue *); | ||
799 | extern long nr_blockdev_pages(void); | 868 | extern long nr_blockdev_pages(void); |
800 | 869 | ||
801 | int blk_get_queue(struct request_queue *); | 870 | int blk_get_queue(struct request_queue *); |
@@ -806,8 +875,6 @@ extern void blk_put_queue(struct request_queue *); | |||
806 | /* | 875 | /* |
807 | * tag stuff | 876 | * tag stuff |
808 | */ | 877 | */ |
809 | #define blk_queue_tag_depth(q) ((q)->queue_tags->busy) | ||
810 | #define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) | ||
811 | #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) | 878 | #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) |
812 | extern int blk_queue_start_tag(struct request_queue *, struct request *); | 879 | extern int blk_queue_start_tag(struct request_queue *, struct request *); |
813 | extern struct request *blk_queue_find_tag(struct request_queue *, int); | 880 | extern struct request *blk_queue_find_tag(struct request_queue *, int); |
@@ -828,15 +895,24 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, | |||
828 | } | 895 | } |
829 | 896 | ||
830 | extern int blkdev_issue_flush(struct block_device *, sector_t *); | 897 | extern int blkdev_issue_flush(struct block_device *, sector_t *); |
898 | extern int blkdev_issue_discard(struct block_device *, | ||
899 | sector_t sector, sector_t nr_sects, gfp_t); | ||
900 | |||
901 | static inline int sb_issue_discard(struct super_block *sb, | ||
902 | sector_t block, sector_t nr_blocks) | ||
903 | { | ||
904 | block <<= (sb->s_blocksize_bits - 9); | ||
905 | nr_blocks <<= (sb->s_blocksize_bits - 9); | ||
906 | return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL); | ||
907 | } | ||
831 | 908 | ||
832 | /* | 909 | /* |
833 | * command filter functions | 910 | * command filter functions |
834 | */ | 911 | */ |
835 | extern int blk_verify_command(struct file *file, unsigned char *cmd); | 912 | extern int blk_verify_command(struct blk_cmd_filter *filter, |
836 | extern int blk_cmd_filter_verify_command(struct blk_scsi_cmd_filter *filter, | 913 | unsigned char *cmd, fmode_t has_write_perm); |
837 | unsigned char *cmd, mode_t *f_mode); | ||
838 | extern int blk_register_filter(struct gendisk *disk); | ||
839 | extern void blk_unregister_filter(struct gendisk *disk); | 914 | extern void blk_unregister_filter(struct gendisk *disk); |
915 | extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); | ||
840 | 916 | ||
841 | #define MAX_PHYS_SEGMENTS 128 | 917 | #define MAX_PHYS_SEGMENTS 128 |
842 | #define MAX_HW_SEGMENTS 128 | 918 | #define MAX_HW_SEGMENTS 128 |
@@ -867,6 +943,13 @@ static inline int queue_dma_alignment(struct request_queue *q) | |||
867 | return q ? q->dma_alignment : 511; | 943 | return q ? q->dma_alignment : 511; |
868 | } | 944 | } |
869 | 945 | ||
946 | static inline int blk_rq_aligned(struct request_queue *q, void *addr, | ||
947 | unsigned int len) | ||
948 | { | ||
949 | unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; | ||
950 | return !((unsigned long)addr & alignment) && !(len & alignment); | ||
951 | } | ||
952 | |||
870 | /* assumes size > 256 */ | 953 | /* assumes size > 256 */ |
871 | static inline unsigned int blksize_bits(unsigned int size) | 954 | static inline unsigned int blksize_bits(unsigned int size) |
872 | { | 955 | { |
@@ -893,7 +976,7 @@ static inline void put_dev_sector(Sector p) | |||
893 | } | 976 | } |
894 | 977 | ||
895 | struct work_struct; | 978 | struct work_struct; |
896 | int kblockd_schedule_work(struct work_struct *work); | 979 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); |
897 | void kblockd_flush_work(struct work_struct *work); | 980 | void kblockd_flush_work(struct work_struct *work); |
898 | 981 | ||
899 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ | 982 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ |
@@ -938,49 +1021,19 @@ struct blk_integrity { | |||
938 | 1021 | ||
939 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); | 1022 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); |
940 | extern void blk_integrity_unregister(struct gendisk *); | 1023 | extern void blk_integrity_unregister(struct gendisk *); |
941 | extern int blk_integrity_compare(struct block_device *, struct block_device *); | 1024 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); |
942 | extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); | 1025 | extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); |
943 | extern int blk_rq_count_integrity_sg(struct request *); | 1026 | extern int blk_rq_count_integrity_sg(struct request *); |
944 | 1027 | ||
945 | static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi) | 1028 | static inline |
946 | { | 1029 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) |
947 | if (bi) | ||
948 | return bi->tuple_size; | ||
949 | |||
950 | return 0; | ||
951 | } | ||
952 | |||
953 | static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev) | ||
954 | { | 1030 | { |
955 | return bdev->bd_disk->integrity; | 1031 | return bdev->bd_disk->integrity; |
956 | } | 1032 | } |
957 | 1033 | ||
958 | static inline unsigned int bdev_get_tag_size(struct block_device *bdev) | 1034 | static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) |
959 | { | ||
960 | struct blk_integrity *bi = bdev_get_integrity(bdev); | ||
961 | |||
962 | if (bi) | ||
963 | return bi->tag_size; | ||
964 | |||
965 | return 0; | ||
966 | } | ||
967 | |||
968 | static inline int bdev_integrity_enabled(struct block_device *bdev, int rw) | ||
969 | { | 1035 | { |
970 | struct blk_integrity *bi = bdev_get_integrity(bdev); | 1036 | return disk->integrity; |
971 | |||
972 | if (bi == NULL) | ||
973 | return 0; | ||
974 | |||
975 | if (rw == READ && bi->verify_fn != NULL && | ||
976 | (bi->flags & INTEGRITY_FLAG_READ)) | ||
977 | return 1; | ||
978 | |||
979 | if (rw == WRITE && bi->generate_fn != NULL && | ||
980 | (bi->flags & INTEGRITY_FLAG_WRITE)) | ||
981 | return 1; | ||
982 | |||
983 | return 0; | ||
984 | } | 1037 | } |
985 | 1038 | ||
986 | static inline int blk_integrity_rq(struct request *rq) | 1039 | static inline int blk_integrity_rq(struct request *rq) |
@@ -997,13 +1050,29 @@ static inline int blk_integrity_rq(struct request *rq) | |||
997 | #define blk_rq_count_integrity_sg(a) (0) | 1050 | #define blk_rq_count_integrity_sg(a) (0) |
998 | #define blk_rq_map_integrity_sg(a, b) (0) | 1051 | #define blk_rq_map_integrity_sg(a, b) (0) |
999 | #define bdev_get_integrity(a) (0) | 1052 | #define bdev_get_integrity(a) (0) |
1000 | #define bdev_get_tag_size(a) (0) | 1053 | #define blk_get_integrity(a) (0) |
1001 | #define blk_integrity_compare(a, b) (0) | 1054 | #define blk_integrity_compare(a, b) (0) |
1002 | #define blk_integrity_register(a, b) (0) | 1055 | #define blk_integrity_register(a, b) (0) |
1003 | #define blk_integrity_unregister(a) do { } while (0); | 1056 | #define blk_integrity_unregister(a) do { } while (0); |
1004 | 1057 | ||
1005 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 1058 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
1006 | 1059 | ||
1060 | struct block_device_operations { | ||
1061 | int (*open) (struct block_device *, fmode_t); | ||
1062 | int (*release) (struct gendisk *, fmode_t); | ||
1063 | int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | ||
1064 | int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | ||
1065 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | ||
1066 | int (*direct_access) (struct block_device *, sector_t, | ||
1067 | void **, unsigned long *); | ||
1068 | int (*media_changed) (struct gendisk *); | ||
1069 | int (*revalidate_disk) (struct gendisk *); | ||
1070 | int (*getgeo)(struct block_device *, struct hd_geometry *); | ||
1071 | struct module *owner; | ||
1072 | }; | ||
1073 | |||
1074 | extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, | ||
1075 | unsigned long); | ||
1007 | #else /* CONFIG_BLOCK */ | 1076 | #else /* CONFIG_BLOCK */ |
1008 | /* | 1077 | /* |
1009 | * stubs for when the block layer is configured out | 1078 | * stubs for when the block layer is configured out |