aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h171
1 files changed, 84 insertions, 87 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2c54906f678f..aae86fd10c4f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -124,6 +124,9 @@ struct request {
124 * physical address coalescing is performed. 124 * physical address coalescing is performed.
125 */ 125 */
126 unsigned short nr_phys_segments; 126 unsigned short nr_phys_segments;
127#if defined(CONFIG_BLK_DEV_INTEGRITY)
128 unsigned short nr_integrity_segments;
129#endif
127 130
128 unsigned short ioprio; 131 unsigned short ioprio;
129 132
@@ -243,6 +246,7 @@ struct queue_limits {
243 246
244 unsigned short logical_block_size; 247 unsigned short logical_block_size;
245 unsigned short max_segments; 248 unsigned short max_segments;
249 unsigned short max_integrity_segments;
246 250
247 unsigned char misaligned; 251 unsigned char misaligned;
248 unsigned char discard_misaligned; 252 unsigned char discard_misaligned;
@@ -355,18 +359,25 @@ struct request_queue
355 struct blk_trace *blk_trace; 359 struct blk_trace *blk_trace;
356#endif 360#endif
357 /* 361 /*
358 * reserved for flush operations 362 * for flush operations
359 */ 363 */
360 unsigned int ordered, next_ordered, ordseq; 364 unsigned int flush_flags;
361 int orderr, ordcolor; 365 unsigned int flush_seq;
362 struct request pre_flush_rq, bar_rq, post_flush_rq; 366 int flush_err;
363 struct request *orig_bar_rq; 367 struct request flush_rq;
368 struct request *orig_flush_rq;
369 struct list_head pending_flushes;
364 370
365 struct mutex sysfs_lock; 371 struct mutex sysfs_lock;
366 372
367#if defined(CONFIG_BLK_DEV_BSG) 373#if defined(CONFIG_BLK_DEV_BSG)
368 struct bsg_class_device bsg_dev; 374 struct bsg_class_device bsg_dev;
369#endif 375#endif
376
377#ifdef CONFIG_BLK_DEV_THROTTLING
378 /* Throttle data */
379 struct throtl_data *td;
380#endif
370}; 381};
371 382
372#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 383#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
@@ -462,56 +473,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
462 __clear_bit(flag, &q->queue_flags); 473 __clear_bit(flag, &q->queue_flags);
463} 474}
464 475
465enum {
466 /*
467 * Hardbarrier is supported with one of the following methods.
468 *
469 * NONE : hardbarrier unsupported
470 * DRAIN : ordering by draining is enough
471 * DRAIN_FLUSH : ordering by draining w/ pre and post flushes
472 * DRAIN_FUA : ordering by draining w/ pre flush and FUA write
473 * TAG : ordering by tag is enough
474 * TAG_FLUSH : ordering by tag w/ pre and post flushes
475 * TAG_FUA : ordering by tag w/ pre flush and FUA write
476 */
477 QUEUE_ORDERED_BY_DRAIN = 0x01,
478 QUEUE_ORDERED_BY_TAG = 0x02,
479 QUEUE_ORDERED_DO_PREFLUSH = 0x10,
480 QUEUE_ORDERED_DO_BAR = 0x20,
481 QUEUE_ORDERED_DO_POSTFLUSH = 0x40,
482 QUEUE_ORDERED_DO_FUA = 0x80,
483
484 QUEUE_ORDERED_NONE = 0x00,
485
486 QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN |
487 QUEUE_ORDERED_DO_BAR,
488 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
489 QUEUE_ORDERED_DO_PREFLUSH |
490 QUEUE_ORDERED_DO_POSTFLUSH,
491 QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
492 QUEUE_ORDERED_DO_PREFLUSH |
493 QUEUE_ORDERED_DO_FUA,
494
495 QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG |
496 QUEUE_ORDERED_DO_BAR,
497 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
498 QUEUE_ORDERED_DO_PREFLUSH |
499 QUEUE_ORDERED_DO_POSTFLUSH,
500 QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
501 QUEUE_ORDERED_DO_PREFLUSH |
502 QUEUE_ORDERED_DO_FUA,
503
504 /*
505 * Ordered operation sequence
506 */
507 QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */
508 QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */
509 QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */
510 QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */
511 QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */
512 QUEUE_ORDSEQ_DONE = 0x20,
513};
514
515#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 476#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
516#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 477#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
517#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 478#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
@@ -521,7 +482,6 @@ enum {
521#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 482#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
522#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 483#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
523#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 484#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
524#define blk_queue_flushing(q) ((q)->ordseq)
525#define blk_queue_stackable(q) \ 485#define blk_queue_stackable(q) \
526 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 486 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
527#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 487#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
@@ -592,7 +552,7 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync)
592 * it already be started by driver. 552 * it already be started by driver.
593 */ 553 */
594#define RQ_NOMERGE_FLAGS \ 554#define RQ_NOMERGE_FLAGS \
595 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 555 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
596#define rq_mergeable(rq) \ 556#define rq_mergeable(rq) \
597 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 557 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
598 (((rq)->cmd_flags & REQ_DISCARD) || \ 558 (((rq)->cmd_flags & REQ_DISCARD) || \
@@ -851,7 +811,7 @@ extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
851extern void blk_queue_max_discard_sectors(struct request_queue *q, 811extern void blk_queue_max_discard_sectors(struct request_queue *q,
852 unsigned int max_discard_sectors); 812 unsigned int max_discard_sectors);
853extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 813extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
854extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); 814extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
855extern void blk_queue_alignment_offset(struct request_queue *q, 815extern void blk_queue_alignment_offset(struct request_queue *q,
856 unsigned int alignment); 816 unsigned int alignment);
857extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 817extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
@@ -881,12 +841,8 @@ extern void blk_queue_update_dma_alignment(struct request_queue *, int);
881extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 841extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
882extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 842extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
883extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 843extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
844extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
884extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 845extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
885extern int blk_queue_ordered(struct request_queue *, unsigned);
886extern bool blk_do_ordered(struct request_queue *, struct request **);
887extern unsigned blk_ordered_cur_seq(struct request_queue *);
888extern unsigned blk_ordered_req_seq(struct request *);
889extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int);
890 846
891extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 847extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
892extern void blk_dump_rq_flags(struct request *, char *); 848extern void blk_dump_rq_flags(struct request *, char *);
@@ -919,27 +875,28 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
919 return NULL; 875 return NULL;
920 return bqt->tag_index[tag]; 876 return bqt->tag_index[tag];
921} 877}
922enum{ 878
923 BLKDEV_WAIT, /* wait for completion */ 879#define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */
924 BLKDEV_BARRIER, /* issue request with barrier */ 880
925 BLKDEV_SECURE, /* secure discard */ 881extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
926};
927#define BLKDEV_IFL_WAIT (1 << BLKDEV_WAIT)
928#define BLKDEV_IFL_BARRIER (1 << BLKDEV_BARRIER)
929#define BLKDEV_IFL_SECURE (1 << BLKDEV_SECURE)
930extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *,
931 unsigned long);
932extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 882extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
933 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 883 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
934extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 884extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
935 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 885 sector_t nr_sects, gfp_t gfp_mask);
936static inline int sb_issue_discard(struct super_block *sb, 886static inline int sb_issue_discard(struct super_block *sb, sector_t block,
937 sector_t block, sector_t nr_blocks) 887 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
938{ 888{
939 block <<= (sb->s_blocksize_bits - 9); 889 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
940 nr_blocks <<= (sb->s_blocksize_bits - 9); 890 nr_blocks << (sb->s_blocksize_bits - 9),
941 return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_NOFS, 891 gfp_mask, flags);
942 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); 892}
893static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
894 sector_t nr_blocks, gfp_t gfp_mask)
895{
896 return blkdev_issue_zeroout(sb->s_bdev,
897 block << (sb->s_blocksize_bits - 9),
898 nr_blocks << (sb->s_blocksize_bits - 9),
899 gfp_mask);
943} 900}
944 901
945extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 902extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
@@ -1004,7 +961,7 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q)
1004 return q->limits.physical_block_size; 961 return q->limits.physical_block_size;
1005} 962}
1006 963
1007static inline int bdev_physical_block_size(struct block_device *bdev) 964static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
1008{ 965{
1009 return queue_physical_block_size(bdev_get_queue(bdev)); 966 return queue_physical_block_size(bdev_get_queue(bdev));
1010} 967}
@@ -1093,11 +1050,11 @@ static inline int queue_dma_alignment(struct request_queue *q)
1093 return q ? q->dma_alignment : 511; 1050 return q ? q->dma_alignment : 511;
1094} 1051}
1095 1052
1096static inline int blk_rq_aligned(struct request_queue *q, void *addr, 1053static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
1097 unsigned int len) 1054 unsigned int len)
1098{ 1055{
1099 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1056 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
1100 return !((unsigned long)addr & alignment) && !(len & alignment); 1057 return !(addr & alignment) && !(len & alignment);
1101} 1058}
1102 1059
1103/* assumes size > 256 */ 1060/* assumes size > 256 */
@@ -1127,6 +1084,7 @@ static inline void put_dev_sector(Sector p)
1127 1084
1128struct work_struct; 1085struct work_struct;
1129int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1086int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
1087int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
1130 1088
1131#ifdef CONFIG_BLK_CGROUP 1089#ifdef CONFIG_BLK_CGROUP
1132/* 1090/*
@@ -1170,6 +1128,24 @@ static inline uint64_t rq_io_start_time_ns(struct request *req)
1170} 1128}
1171#endif 1129#endif
1172 1130
1131#ifdef CONFIG_BLK_DEV_THROTTLING
1132extern int blk_throtl_init(struct request_queue *q);
1133extern void blk_throtl_exit(struct request_queue *q);
1134extern int blk_throtl_bio(struct request_queue *q, struct bio **bio);
1135extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay);
1136extern void throtl_shutdown_timer_wq(struct request_queue *q);
1137#else /* CONFIG_BLK_DEV_THROTTLING */
1138static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
1139{
1140 return 0;
1141}
1142
1143static inline int blk_throtl_init(struct request_queue *q) { return 0; }
1144static inline int blk_throtl_exit(struct request_queue *q) { return 0; }
1145static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {}
1146static inline void throtl_shutdown_timer_wq(struct request_queue *q) {}
1147#endif /* CONFIG_BLK_DEV_THROTTLING */
1148
1173#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1149#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1174 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1150 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1175#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1151#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
@@ -1213,8 +1189,13 @@ struct blk_integrity {
1213extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 1189extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
1214extern void blk_integrity_unregister(struct gendisk *); 1190extern void blk_integrity_unregister(struct gendisk *);
1215extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1191extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
1216extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); 1192extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1217extern int blk_rq_count_integrity_sg(struct request *); 1193 struct scatterlist *);
1194extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1195extern int blk_integrity_merge_rq(struct request_queue *, struct request *,
1196 struct request *);
1197extern int blk_integrity_merge_bio(struct request_queue *, struct request *,
1198 struct bio *);
1218 1199
1219static inline 1200static inline
1220struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1201struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
@@ -1235,16 +1216,32 @@ static inline int blk_integrity_rq(struct request *rq)
1235 return bio_integrity(rq->bio); 1216 return bio_integrity(rq->bio);
1236} 1217}
1237 1218
1219static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1220 unsigned int segs)
1221{
1222 q->limits.max_integrity_segments = segs;
1223}
1224
1225static inline unsigned short
1226queue_max_integrity_segments(struct request_queue *q)
1227{
1228 return q->limits.max_integrity_segments;
1229}
1230
1238#else /* CONFIG_BLK_DEV_INTEGRITY */ 1231#else /* CONFIG_BLK_DEV_INTEGRITY */
1239 1232
1240#define blk_integrity_rq(rq) (0) 1233#define blk_integrity_rq(rq) (0)
1241#define blk_rq_count_integrity_sg(a) (0) 1234#define blk_rq_count_integrity_sg(a, b) (0)
1242#define blk_rq_map_integrity_sg(a, b) (0) 1235#define blk_rq_map_integrity_sg(a, b, c) (0)
1243#define bdev_get_integrity(a) (0) 1236#define bdev_get_integrity(a) (0)
1244#define blk_get_integrity(a) (0) 1237#define blk_get_integrity(a) (0)
1245#define blk_integrity_compare(a, b) (0) 1238#define blk_integrity_compare(a, b) (0)
1246#define blk_integrity_register(a, b) (0) 1239#define blk_integrity_register(a, b) (0)
1247#define blk_integrity_unregister(a) do { } while (0); 1240#define blk_integrity_unregister(a) do { } while (0);
1241#define blk_queue_max_integrity_segments(a, b) do { } while (0);
1242#define queue_max_integrity_segments(a) (0)
1243#define blk_integrity_merge_rq(a, b, c) (0)
1244#define blk_integrity_merge_bio(a, b, c) (0)
1248 1245
1249#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1246#endif /* CONFIG_BLK_DEV_INTEGRITY */
1250 1247