aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h36
1 files changed, 17 insertions, 19 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 8da66379f7ea..8089ca17db9a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -267,6 +267,7 @@ struct blk_queue_ctx;
267 267
268typedef void (request_fn_proc) (struct request_queue *q); 268typedef void (request_fn_proc) (struct request_queue *q);
269typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); 269typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
270typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t);
270typedef int (prep_rq_fn) (struct request_queue *, struct request *); 271typedef int (prep_rq_fn) (struct request_queue *, struct request *);
271typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 272typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
272 273
@@ -409,6 +410,7 @@ struct request_queue {
409 410
410 request_fn_proc *request_fn; 411 request_fn_proc *request_fn;
411 make_request_fn *make_request_fn; 412 make_request_fn *make_request_fn;
413 poll_q_fn *poll_fn;
412 prep_rq_fn *prep_rq_fn; 414 prep_rq_fn *prep_rq_fn;
413 unprep_rq_fn *unprep_rq_fn; 415 unprep_rq_fn *unprep_rq_fn;
414 softirq_done_fn *softirq_done_fn; 416 softirq_done_fn *softirq_done_fn;
@@ -610,7 +612,6 @@ struct request_queue {
610#define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */ 612#define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */
611#define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */ 613#define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */
612#define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */ 614#define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */
613#define QUEUE_FLAG_STACKABLE 8 /* supports request stacking */
614#define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */ 615#define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */
615#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 616#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
616#define QUEUE_FLAG_IO_STAT 10 /* do IO stats */ 617#define QUEUE_FLAG_IO_STAT 10 /* do IO stats */
@@ -632,14 +633,13 @@ struct request_queue {
632#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */ 633#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */
633#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */ 634#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
634#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */ 635#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
636#define QUEUE_FLAG_PREEMPT_ONLY 29 /* only process REQ_PREEMPT requests */
635 637
636#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 638#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
637 (1 << QUEUE_FLAG_STACKABLE) | \
638 (1 << QUEUE_FLAG_SAME_COMP) | \ 639 (1 << QUEUE_FLAG_SAME_COMP) | \
639 (1 << QUEUE_FLAG_ADD_RANDOM)) 640 (1 << QUEUE_FLAG_ADD_RANDOM))
640 641
641#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 642#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
642 (1 << QUEUE_FLAG_STACKABLE) | \
643 (1 << QUEUE_FLAG_SAME_COMP) | \ 643 (1 << QUEUE_FLAG_SAME_COMP) | \
644 (1 << QUEUE_FLAG_POLL)) 644 (1 << QUEUE_FLAG_POLL))
645 645
@@ -723,8 +723,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
723#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 723#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
724#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 724#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
725#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 725#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
726#define blk_queue_stackable(q) \
727 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
728#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 726#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
729#define blk_queue_secure_erase(q) \ 727#define blk_queue_secure_erase(q) \
730 (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) 728 (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
@@ -736,6 +734,11 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
736 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 734 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
737 REQ_FAILFAST_DRIVER)) 735 REQ_FAILFAST_DRIVER))
738#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) 736#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
737#define blk_queue_preempt_only(q) \
738 test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
739
740extern int blk_set_preempt_only(struct request_queue *q);
741extern void blk_clear_preempt_only(struct request_queue *q);
739 742
740static inline bool blk_account_rq(struct request *rq) 743static inline bool blk_account_rq(struct request *rq)
741{ 744{
@@ -923,24 +926,17 @@ static inline void rq_flush_dcache_pages(struct request *rq)
923} 926}
924#endif 927#endif
925 928
926#ifdef CONFIG_PRINTK
927#define vfs_msg(sb, level, fmt, ...) \
928 __vfs_msg(sb, level, fmt, ##__VA_ARGS__)
929#else
930#define vfs_msg(sb, level, fmt, ...) \
931do { \
932 no_printk(fmt, ##__VA_ARGS__); \
933 __vfs_msg(sb, "", " "); \
934} while (0)
935#endif
936
937extern int blk_register_queue(struct gendisk *disk); 929extern int blk_register_queue(struct gendisk *disk);
938extern void blk_unregister_queue(struct gendisk *disk); 930extern void blk_unregister_queue(struct gendisk *disk);
939extern blk_qc_t generic_make_request(struct bio *bio); 931extern blk_qc_t generic_make_request(struct bio *bio);
932extern blk_qc_t direct_make_request(struct bio *bio);
940extern void blk_rq_init(struct request_queue *q, struct request *rq); 933extern void blk_rq_init(struct request_queue *q, struct request *rq);
941extern void blk_init_request_from_bio(struct request *req, struct bio *bio); 934extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
942extern void blk_put_request(struct request *); 935extern void blk_put_request(struct request *);
943extern void __blk_put_request(struct request_queue *, struct request *); 936extern void __blk_put_request(struct request_queue *, struct request *);
937extern struct request *blk_get_request_flags(struct request_queue *,
938 unsigned int op,
939 blk_mq_req_flags_t flags);
944extern struct request *blk_get_request(struct request_queue *, unsigned int op, 940extern struct request *blk_get_request(struct request_queue *, unsigned int op,
945 gfp_t gfp_mask); 941 gfp_t gfp_mask);
946extern void blk_requeue_request(struct request_queue *, struct request *); 942extern void blk_requeue_request(struct request_queue *, struct request *);
@@ -964,7 +960,7 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
964extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 960extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
965 struct scsi_ioctl_command __user *); 961 struct scsi_ioctl_command __user *);
966 962
967extern int blk_queue_enter(struct request_queue *q, bool nowait); 963extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
968extern void blk_queue_exit(struct request_queue *q); 964extern void blk_queue_exit(struct request_queue *q);
969extern void blk_start_queue(struct request_queue *q); 965extern void blk_start_queue(struct request_queue *q);
970extern void blk_start_queue_async(struct request_queue *q); 966extern void blk_start_queue_async(struct request_queue *q);
@@ -991,7 +987,7 @@ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
991int blk_status_to_errno(blk_status_t status); 987int blk_status_to_errno(blk_status_t status);
992blk_status_t errno_to_blk_status(int errno); 988blk_status_t errno_to_blk_status(int errno);
993 989
994bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie); 990bool blk_poll(struct request_queue *q, blk_qc_t cookie);
995 991
996static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 992static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
997{ 993{
@@ -1110,6 +1106,8 @@ extern struct request *blk_peek_request(struct request_queue *q);
1110extern void blk_start_request(struct request *rq); 1106extern void blk_start_request(struct request *rq);
1111extern struct request *blk_fetch_request(struct request_queue *q); 1107extern struct request *blk_fetch_request(struct request_queue *q);
1112 1108
1109void blk_steal_bios(struct bio_list *list, struct request *rq);
1110
1113/* 1111/*
1114 * Request completion related functions. 1112 * Request completion related functions.
1115 * 1113 *
@@ -1372,7 +1370,7 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
1372 gfp_mask, 0); 1370 gfp_mask, 0);
1373} 1371}
1374 1372
1375extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 1373extern int blk_verify_command(unsigned char *cmd, fmode_t mode);
1376 1374
1377enum blk_default_limits { 1375enum blk_default_limits {
1378 BLK_MAX_SEGMENTS = 128, 1376 BLK_MAX_SEGMENTS = 128,