diff options
| author | Ingo Molnar <mingo@elte.hu> | 2010-10-30 04:43:08 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2010-10-30 04:43:08 -0400 |
| commit | 169ed55bd30305b933f52bfab32a58671d44ab68 (patch) | |
| tree | 32e280957474f458901abfce16fa2a1687ef7497 /include/linux/blkdev.h | |
| parent | 3d7851b3cdd43a734e5cc4c643fd886ab28ad4d5 (diff) | |
| parent | 45f81b1c96d9793e47ce925d257ea693ce0b193e (diff) | |
Merge branch 'tip/perf/jump-label-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/urgent
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 164 |
1 files changed, 77 insertions, 87 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 2c54906f678f..646b462d04df 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -124,6 +124,9 @@ struct request { | |||
| 124 | * physical address coalescing is performed. | 124 | * physical address coalescing is performed. |
| 125 | */ | 125 | */ |
| 126 | unsigned short nr_phys_segments; | 126 | unsigned short nr_phys_segments; |
| 127 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | ||
| 128 | unsigned short nr_integrity_segments; | ||
| 129 | #endif | ||
| 127 | 130 | ||
| 128 | unsigned short ioprio; | 131 | unsigned short ioprio; |
| 129 | 132 | ||
| @@ -243,6 +246,7 @@ struct queue_limits { | |||
| 243 | 246 | ||
| 244 | unsigned short logical_block_size; | 247 | unsigned short logical_block_size; |
| 245 | unsigned short max_segments; | 248 | unsigned short max_segments; |
| 249 | unsigned short max_integrity_segments; | ||
| 246 | 250 | ||
| 247 | unsigned char misaligned; | 251 | unsigned char misaligned; |
| 248 | unsigned char discard_misaligned; | 252 | unsigned char discard_misaligned; |
| @@ -355,18 +359,25 @@ struct request_queue | |||
| 355 | struct blk_trace *blk_trace; | 359 | struct blk_trace *blk_trace; |
| 356 | #endif | 360 | #endif |
| 357 | /* | 361 | /* |
| 358 | * reserved for flush operations | 362 | * for flush operations |
| 359 | */ | 363 | */ |
| 360 | unsigned int ordered, next_ordered, ordseq; | 364 | unsigned int flush_flags; |
| 361 | int orderr, ordcolor; | 365 | unsigned int flush_seq; |
| 362 | struct request pre_flush_rq, bar_rq, post_flush_rq; | 366 | int flush_err; |
| 363 | struct request *orig_bar_rq; | 367 | struct request flush_rq; |
| 368 | struct request *orig_flush_rq; | ||
| 369 | struct list_head pending_flushes; | ||
| 364 | 370 | ||
| 365 | struct mutex sysfs_lock; | 371 | struct mutex sysfs_lock; |
| 366 | 372 | ||
| 367 | #if defined(CONFIG_BLK_DEV_BSG) | 373 | #if defined(CONFIG_BLK_DEV_BSG) |
| 368 | struct bsg_class_device bsg_dev; | 374 | struct bsg_class_device bsg_dev; |
| 369 | #endif | 375 | #endif |
| 376 | |||
| 377 | #ifdef CONFIG_BLK_DEV_THROTTLING | ||
| 378 | /* Throttle data */ | ||
| 379 | struct throtl_data *td; | ||
| 380 | #endif | ||
| 370 | }; | 381 | }; |
| 371 | 382 | ||
| 372 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ | 383 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ |
| @@ -462,56 +473,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | |||
| 462 | __clear_bit(flag, &q->queue_flags); | 473 | __clear_bit(flag, &q->queue_flags); |
| 463 | } | 474 | } |
| 464 | 475 | ||
| 465 | enum { | ||
| 466 | /* | ||
| 467 | * Hardbarrier is supported with one of the following methods. | ||
| 468 | * | ||
| 469 | * NONE : hardbarrier unsupported | ||
| 470 | * DRAIN : ordering by draining is enough | ||
| 471 | * DRAIN_FLUSH : ordering by draining w/ pre and post flushes | ||
| 472 | * DRAIN_FUA : ordering by draining w/ pre flush and FUA write | ||
| 473 | * TAG : ordering by tag is enough | ||
| 474 | * TAG_FLUSH : ordering by tag w/ pre and post flushes | ||
| 475 | * TAG_FUA : ordering by tag w/ pre flush and FUA write | ||
| 476 | */ | ||
| 477 | QUEUE_ORDERED_BY_DRAIN = 0x01, | ||
| 478 | QUEUE_ORDERED_BY_TAG = 0x02, | ||
| 479 | QUEUE_ORDERED_DO_PREFLUSH = 0x10, | ||
| 480 | QUEUE_ORDERED_DO_BAR = 0x20, | ||
| 481 | QUEUE_ORDERED_DO_POSTFLUSH = 0x40, | ||
| 482 | QUEUE_ORDERED_DO_FUA = 0x80, | ||
| 483 | |||
| 484 | QUEUE_ORDERED_NONE = 0x00, | ||
| 485 | |||
| 486 | QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN | | ||
| 487 | QUEUE_ORDERED_DO_BAR, | ||
| 488 | QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | | ||
| 489 | QUEUE_ORDERED_DO_PREFLUSH | | ||
| 490 | QUEUE_ORDERED_DO_POSTFLUSH, | ||
| 491 | QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | | ||
| 492 | QUEUE_ORDERED_DO_PREFLUSH | | ||
| 493 | QUEUE_ORDERED_DO_FUA, | ||
| 494 | |||
| 495 | QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG | | ||
| 496 | QUEUE_ORDERED_DO_BAR, | ||
| 497 | QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | | ||
| 498 | QUEUE_ORDERED_DO_PREFLUSH | | ||
| 499 | QUEUE_ORDERED_DO_POSTFLUSH, | ||
| 500 | QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | | ||
| 501 | QUEUE_ORDERED_DO_PREFLUSH | | ||
| 502 | QUEUE_ORDERED_DO_FUA, | ||
| 503 | |||
| 504 | /* | ||
| 505 | * Ordered operation sequence | ||
| 506 | */ | ||
| 507 | QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */ | ||
| 508 | QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */ | ||
| 509 | QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */ | ||
| 510 | QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */ | ||
| 511 | QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */ | ||
| 512 | QUEUE_ORDSEQ_DONE = 0x20, | ||
| 513 | }; | ||
| 514 | |||
| 515 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) | 476 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) |
| 516 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 477 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
| 517 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 478 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
| @@ -521,7 +482,6 @@ enum { | |||
| 521 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) | 482 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) |
| 522 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) | 483 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) |
| 523 | #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) | 484 | #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) |
| 524 | #define blk_queue_flushing(q) ((q)->ordseq) | ||
| 525 | #define blk_queue_stackable(q) \ | 485 | #define blk_queue_stackable(q) \ |
| 526 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) | 486 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) |
| 527 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) | 487 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) |
| @@ -592,7 +552,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync) | |||
| 592 | * it already be started by driver. | 552 | * it already be started by driver. |
| 593 | */ | 553 | */ |
| 594 | #define RQ_NOMERGE_FLAGS \ | 554 | #define RQ_NOMERGE_FLAGS \ |
| 595 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) | 555 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER | \ |
| 556 | REQ_FLUSH | REQ_FUA) | ||
| 596 | #define rq_mergeable(rq) \ | 557 | #define rq_mergeable(rq) \ |
| 597 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ | 558 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ |
| 598 | (((rq)->cmd_flags & REQ_DISCARD) || \ | 559 | (((rq)->cmd_flags & REQ_DISCARD) || \ |
| @@ -851,7 +812,7 @@ extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | |||
| 851 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | 812 | extern void blk_queue_max_discard_sectors(struct request_queue *q, |
| 852 | unsigned int max_discard_sectors); | 813 | unsigned int max_discard_sectors); |
| 853 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); | 814 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
| 854 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); | 815 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); |
| 855 | extern void blk_queue_alignment_offset(struct request_queue *q, | 816 | extern void blk_queue_alignment_offset(struct request_queue *q, |
| 856 | unsigned int alignment); | 817 | unsigned int alignment); |
| 857 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); | 818 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); |
| @@ -881,12 +842,8 @@ extern void blk_queue_update_dma_alignment(struct request_queue *, int); | |||
| 881 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | 842 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
| 882 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); | 843 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); |
| 883 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); | 844 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); |
| 845 | extern void blk_queue_flush(struct request_queue *q, unsigned int flush); | ||
| 884 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 846 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
| 885 | extern int blk_queue_ordered(struct request_queue *, unsigned); | ||
| 886 | extern bool blk_do_ordered(struct request_queue *, struct request **); | ||
| 887 | extern unsigned blk_ordered_cur_seq(struct request_queue *); | ||
| 888 | extern unsigned blk_ordered_req_seq(struct request *); | ||
| 889 | extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int); | ||
| 890 | 847 | ||
| 891 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); | 848 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); |
| 892 | extern void blk_dump_rq_flags(struct request *, char *); | 849 | extern void blk_dump_rq_flags(struct request *, char *); |
| @@ -919,27 +876,20 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, | |||
| 919 | return NULL; | 876 | return NULL; |
| 920 | return bqt->tag_index[tag]; | 877 | return bqt->tag_index[tag]; |
| 921 | } | 878 | } |
| 922 | enum{ | 879 | |
| 923 | BLKDEV_WAIT, /* wait for completion */ | 880 | #define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ |
| 924 | BLKDEV_BARRIER, /* issue request with barrier */ | 881 | |
| 925 | BLKDEV_SECURE, /* secure discard */ | 882 | extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); |
| 926 | }; | ||
| 927 | #define BLKDEV_IFL_WAIT (1 << BLKDEV_WAIT) | ||
| 928 | #define BLKDEV_IFL_BARRIER (1 << BLKDEV_BARRIER) | ||
| 929 | #define BLKDEV_IFL_SECURE (1 << BLKDEV_SECURE) | ||
| 930 | extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *, | ||
| 931 | unsigned long); | ||
| 932 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | 883 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
| 933 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); | 884 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); |
| 934 | extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | 885 | extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
| 935 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); | 886 | sector_t nr_sects, gfp_t gfp_mask); |
| 936 | static inline int sb_issue_discard(struct super_block *sb, | 887 | static inline int sb_issue_discard(struct super_block *sb, sector_t block, |
| 937 | sector_t block, sector_t nr_blocks) | 888 | sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) |
| 938 | { | 889 | { |
| 939 | block <<= (sb->s_blocksize_bits - 9); | 890 | return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), |
| 940 | nr_blocks <<= (sb->s_blocksize_bits - 9); | 891 | nr_blocks << (sb->s_blocksize_bits - 9), |
| 941 | return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_NOFS, | 892 | gfp_mask, flags); |
| 942 | BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); | ||
| 943 | } | 893 | } |
| 944 | 894 | ||
| 945 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); | 895 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); |
| @@ -1004,7 +954,7 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q) | |||
| 1004 | return q->limits.physical_block_size; | 954 | return q->limits.physical_block_size; |
| 1005 | } | 955 | } |
| 1006 | 956 | ||
| 1007 | static inline int bdev_physical_block_size(struct block_device *bdev) | 957 | static inline unsigned int bdev_physical_block_size(struct block_device *bdev) |
| 1008 | { | 958 | { |
| 1009 | return queue_physical_block_size(bdev_get_queue(bdev)); | 959 | return queue_physical_block_size(bdev_get_queue(bdev)); |
| 1010 | } | 960 | } |
| @@ -1093,11 +1043,11 @@ static inline int queue_dma_alignment(struct request_queue *q) | |||
| 1093 | return q ? q->dma_alignment : 511; | 1043 | return q ? q->dma_alignment : 511; |
| 1094 | } | 1044 | } |
| 1095 | 1045 | ||
| 1096 | static inline int blk_rq_aligned(struct request_queue *q, void *addr, | 1046 | static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, |
| 1097 | unsigned int len) | 1047 | unsigned int len) |
| 1098 | { | 1048 | { |
| 1099 | unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; | 1049 | unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; |
| 1100 | return !((unsigned long)addr & alignment) && !(len & alignment); | 1050 | return !(addr & alignment) && !(len & alignment); |
| 1101 | } | 1051 | } |
| 1102 | 1052 | ||
| 1103 | /* assumes size > 256 */ | 1053 | /* assumes size > 256 */ |
| @@ -1127,6 +1077,7 @@ static inline void put_dev_sector(Sector p) | |||
| 1127 | 1077 | ||
| 1128 | struct work_struct; | 1078 | struct work_struct; |
| 1129 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); | 1079 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); |
| 1080 | int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay); | ||
| 1130 | 1081 | ||
| 1131 | #ifdef CONFIG_BLK_CGROUP | 1082 | #ifdef CONFIG_BLK_CGROUP |
| 1132 | /* | 1083 | /* |
| @@ -1170,6 +1121,24 @@ static inline uint64_t rq_io_start_time_ns(struct request *req) | |||
| 1170 | } | 1121 | } |
| 1171 | #endif | 1122 | #endif |
| 1172 | 1123 | ||
| 1124 | #ifdef CONFIG_BLK_DEV_THROTTLING | ||
| 1125 | extern int blk_throtl_init(struct request_queue *q); | ||
| 1126 | extern void blk_throtl_exit(struct request_queue *q); | ||
| 1127 | extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); | ||
| 1128 | extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay); | ||
| 1129 | extern void throtl_shutdown_timer_wq(struct request_queue *q); | ||
| 1130 | #else /* CONFIG_BLK_DEV_THROTTLING */ | ||
| 1131 | static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) | ||
| 1132 | { | ||
| 1133 | return 0; | ||
| 1134 | } | ||
| 1135 | |||
| 1136 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } | ||
| 1137 | static inline int blk_throtl_exit(struct request_queue *q) { return 0; } | ||
| 1138 | static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {} | ||
| 1139 | static inline void throtl_shutdown_timer_wq(struct request_queue *q) {} | ||
| 1140 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | ||
| 1141 | |||
| 1173 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ | 1142 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ |
| 1174 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) | 1143 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) |
| 1175 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ | 1144 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ |
| @@ -1213,8 +1182,13 @@ struct blk_integrity { | |||
| 1213 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); | 1182 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); |
| 1214 | extern void blk_integrity_unregister(struct gendisk *); | 1183 | extern void blk_integrity_unregister(struct gendisk *); |
| 1215 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); | 1184 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); |
| 1216 | extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); | 1185 | extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, |
| 1217 | extern int blk_rq_count_integrity_sg(struct request *); | 1186 | struct scatterlist *); |
| 1187 | extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); | ||
| 1188 | extern int blk_integrity_merge_rq(struct request_queue *, struct request *, | ||
| 1189 | struct request *); | ||
| 1190 | extern int blk_integrity_merge_bio(struct request_queue *, struct request *, | ||
| 1191 | struct bio *); | ||
| 1218 | 1192 | ||
| 1219 | static inline | 1193 | static inline |
| 1220 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) | 1194 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) |
| @@ -1235,16 +1209,32 @@ static inline int blk_integrity_rq(struct request *rq) | |||
| 1235 | return bio_integrity(rq->bio); | 1209 | return bio_integrity(rq->bio); |
| 1236 | } | 1210 | } |
| 1237 | 1211 | ||
| 1212 | static inline void blk_queue_max_integrity_segments(struct request_queue *q, | ||
| 1213 | unsigned int segs) | ||
| 1214 | { | ||
| 1215 | q->limits.max_integrity_segments = segs; | ||
| 1216 | } | ||
| 1217 | |||
| 1218 | static inline unsigned short | ||
| 1219 | queue_max_integrity_segments(struct request_queue *q) | ||
| 1220 | { | ||
| 1221 | return q->limits.max_integrity_segments; | ||
| 1222 | } | ||
| 1223 | |||
| 1238 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | 1224 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
| 1239 | 1225 | ||
| 1240 | #define blk_integrity_rq(rq) (0) | 1226 | #define blk_integrity_rq(rq) (0) |
| 1241 | #define blk_rq_count_integrity_sg(a) (0) | 1227 | #define blk_rq_count_integrity_sg(a, b) (0) |
| 1242 | #define blk_rq_map_integrity_sg(a, b) (0) | 1228 | #define blk_rq_map_integrity_sg(a, b, c) (0) |
| 1243 | #define bdev_get_integrity(a) (0) | 1229 | #define bdev_get_integrity(a) (0) |
| 1244 | #define blk_get_integrity(a) (0) | 1230 | #define blk_get_integrity(a) (0) |
| 1245 | #define blk_integrity_compare(a, b) (0) | 1231 | #define blk_integrity_compare(a, b) (0) |
| 1246 | #define blk_integrity_register(a, b) (0) | 1232 | #define blk_integrity_register(a, b) (0) |
| 1247 | #define blk_integrity_unregister(a) do { } while (0); | 1233 | #define blk_integrity_unregister(a) do { } while (0); |
| 1234 | #define blk_queue_max_integrity_segments(a, b) do { } while (0); | ||
| 1235 | #define queue_max_integrity_segments(a) (0) | ||
| 1236 | #define blk_integrity_merge_rq(a, b, c) (0) | ||
| 1237 | #define blk_integrity_merge_bio(a, b, c) (0) | ||
| 1248 | 1238 | ||
| 1249 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 1239 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
| 1250 | 1240 | ||
