diff options
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 167 |
1 files changed, 147 insertions, 20 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d2a1b71e93c3..88d68081a0f1 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -23,7 +23,6 @@ | |||
| 23 | struct scsi_ioctl_command; | 23 | struct scsi_ioctl_command; |
| 24 | 24 | ||
| 25 | struct request_queue; | 25 | struct request_queue; |
| 26 | typedef struct request_queue request_queue_t __deprecated; | ||
| 27 | struct elevator_queue; | 26 | struct elevator_queue; |
| 28 | typedef struct elevator_queue elevator_t; | 27 | typedef struct elevator_queue elevator_t; |
| 29 | struct request_pm_state; | 28 | struct request_pm_state; |
| @@ -34,12 +33,6 @@ struct sg_io_hdr; | |||
| 34 | #define BLKDEV_MIN_RQ 4 | 33 | #define BLKDEV_MIN_RQ 4 |
| 35 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ | 34 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ |
| 36 | 35 | ||
| 37 | int put_io_context(struct io_context *ioc); | ||
| 38 | void exit_io_context(void); | ||
| 39 | struct io_context *get_io_context(gfp_t gfp_flags, int node); | ||
| 40 | struct io_context *alloc_io_context(gfp_t gfp_flags, int node); | ||
| 41 | void copy_io_context(struct io_context **pdst, struct io_context **psrc); | ||
| 42 | |||
| 43 | struct request; | 36 | struct request; |
| 44 | typedef void (rq_end_io_fn)(struct request *, int); | 37 | typedef void (rq_end_io_fn)(struct request *, int); |
| 45 | 38 | ||
| @@ -113,6 +106,7 @@ enum rq_flag_bits { | |||
| 113 | __REQ_ALLOCED, /* request came from our alloc pool */ | 106 | __REQ_ALLOCED, /* request came from our alloc pool */ |
| 114 | __REQ_RW_META, /* metadata io request */ | 107 | __REQ_RW_META, /* metadata io request */ |
| 115 | __REQ_COPY_USER, /* contains copies of user pages */ | 108 | __REQ_COPY_USER, /* contains copies of user pages */ |
| 109 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ | ||
| 116 | __REQ_NR_BITS, /* stops here */ | 110 | __REQ_NR_BITS, /* stops here */ |
| 117 | }; | 111 | }; |
| 118 | 112 | ||
| @@ -135,6 +129,7 @@ enum rq_flag_bits { | |||
| 135 | #define REQ_ALLOCED (1 << __REQ_ALLOCED) | 129 | #define REQ_ALLOCED (1 << __REQ_ALLOCED) |
| 136 | #define REQ_RW_META (1 << __REQ_RW_META) | 130 | #define REQ_RW_META (1 << __REQ_RW_META) |
| 137 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) | 131 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) |
| 132 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) | ||
| 138 | 133 | ||
| 139 | #define BLK_MAX_CDB 16 | 134 | #define BLK_MAX_CDB 16 |
| 140 | 135 | ||
| @@ -259,7 +254,14 @@ typedef int (prep_rq_fn) (struct request_queue *, struct request *); | |||
| 259 | typedef void (unplug_fn) (struct request_queue *); | 254 | typedef void (unplug_fn) (struct request_queue *); |
| 260 | 255 | ||
| 261 | struct bio_vec; | 256 | struct bio_vec; |
| 262 | typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *); | 257 | struct bvec_merge_data { |
| 258 | struct block_device *bi_bdev; | ||
| 259 | sector_t bi_sector; | ||
| 260 | unsigned bi_size; | ||
| 261 | unsigned long bi_rw; | ||
| 262 | }; | ||
| 263 | typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, | ||
| 264 | struct bio_vec *); | ||
| 263 | typedef void (prepare_flush_fn) (struct request_queue *, struct request *); | 265 | typedef void (prepare_flush_fn) (struct request_queue *, struct request *); |
| 264 | typedef void (softirq_done_fn)(struct request *); | 266 | typedef void (softirq_done_fn)(struct request *); |
| 265 | typedef int (dma_drain_needed_fn)(struct request *); | 267 | typedef int (dma_drain_needed_fn)(struct request *); |
| @@ -426,6 +428,32 @@ static inline void queue_flag_set_unlocked(unsigned int flag, | |||
| 426 | __set_bit(flag, &q->queue_flags); | 428 | __set_bit(flag, &q->queue_flags); |
| 427 | } | 429 | } |
| 428 | 430 | ||
| 431 | static inline int queue_flag_test_and_clear(unsigned int flag, | ||
| 432 | struct request_queue *q) | ||
| 433 | { | ||
| 434 | WARN_ON_ONCE(!queue_is_locked(q)); | ||
| 435 | |||
| 436 | if (test_bit(flag, &q->queue_flags)) { | ||
| 437 | __clear_bit(flag, &q->queue_flags); | ||
| 438 | return 1; | ||
| 439 | } | ||
| 440 | |||
| 441 | return 0; | ||
| 442 | } | ||
| 443 | |||
| 444 | static inline int queue_flag_test_and_set(unsigned int flag, | ||
| 445 | struct request_queue *q) | ||
| 446 | { | ||
| 447 | WARN_ON_ONCE(!queue_is_locked(q)); | ||
| 448 | |||
| 449 | if (!test_bit(flag, &q->queue_flags)) { | ||
| 450 | __set_bit(flag, &q->queue_flags); | ||
| 451 | return 0; | ||
| 452 | } | ||
| 453 | |||
| 454 | return 1; | ||
| 455 | } | ||
| 456 | |||
| 429 | static inline void queue_flag_set(unsigned int flag, struct request_queue *q) | 457 | static inline void queue_flag_set(unsigned int flag, struct request_queue *q) |
| 430 | { | 458 | { |
| 431 | WARN_ON_ONCE(!queue_is_locked(q)); | 459 | WARN_ON_ONCE(!queue_is_locked(q)); |
| @@ -623,7 +651,6 @@ extern void generic_make_request(struct bio *bio); | |||
| 623 | extern void blk_rq_init(struct request_queue *q, struct request *rq); | 651 | extern void blk_rq_init(struct request_queue *q, struct request *rq); |
| 624 | extern void blk_put_request(struct request *); | 652 | extern void blk_put_request(struct request *); |
| 625 | extern void __blk_put_request(struct request_queue *, struct request *); | 653 | extern void __blk_put_request(struct request_queue *, struct request *); |
| 626 | extern void blk_end_sync_rq(struct request *rq, int error); | ||
| 627 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | 654 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); |
| 628 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); | 655 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); |
| 629 | extern void blk_requeue_request(struct request_queue *, struct request *); | 656 | extern void blk_requeue_request(struct request_queue *, struct request *); |
| @@ -676,7 +703,6 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *, | |||
| 676 | struct request *, int); | 703 | struct request *, int); |
| 677 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, | 704 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, |
| 678 | struct request *, int, rq_end_io_fn *); | 705 | struct request *, int, rq_end_io_fn *); |
| 679 | extern int blk_verify_command(unsigned char *, int); | ||
| 680 | extern void blk_unplug(struct request_queue *q); | 706 | extern void blk_unplug(struct request_queue *q); |
| 681 | 707 | ||
| 682 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) | 708 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) |
| @@ -749,6 +775,7 @@ extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | |||
| 749 | extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); | 775 | extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); |
| 750 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); | 776 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); |
| 751 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); | 777 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); |
| 778 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); | ||
| 752 | extern int blk_queue_dma_drain(struct request_queue *q, | 779 | extern int blk_queue_dma_drain(struct request_queue *q, |
| 753 | dma_drain_needed_fn *dma_drain_needed, | 780 | dma_drain_needed_fn *dma_drain_needed, |
| 754 | void *buf, unsigned int size); | 781 | void *buf, unsigned int size); |
| @@ -802,6 +829,15 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, | |||
| 802 | 829 | ||
| 803 | extern int blkdev_issue_flush(struct block_device *, sector_t *); | 830 | extern int blkdev_issue_flush(struct block_device *, sector_t *); |
| 804 | 831 | ||
| 832 | /* | ||
| 833 | * command filter functions | ||
| 834 | */ | ||
| 835 | extern int blk_verify_command(struct file *file, unsigned char *cmd); | ||
| 836 | extern int blk_cmd_filter_verify_command(struct blk_scsi_cmd_filter *filter, | ||
| 837 | unsigned char *cmd, mode_t *f_mode); | ||
| 838 | extern int blk_register_filter(struct gendisk *disk); | ||
| 839 | extern void blk_unregister_filter(struct gendisk *disk); | ||
| 840 | |||
| 805 | #define MAX_PHYS_SEGMENTS 128 | 841 | #define MAX_PHYS_SEGMENTS 128 |
| 806 | #define MAX_HW_SEGMENTS 128 | 842 | #define MAX_HW_SEGMENTS 128 |
| 807 | #define SAFE_MAX_SECTORS 255 | 843 | #define SAFE_MAX_SECTORS 255 |
| @@ -865,28 +901,119 @@ void kblockd_flush_work(struct work_struct *work); | |||
| 865 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ | 901 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ |
| 866 | MODULE_ALIAS("block-major-" __stringify(major) "-*") | 902 | MODULE_ALIAS("block-major-" __stringify(major) "-*") |
| 867 | 903 | ||
| 904 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | ||
| 868 | 905 | ||
| 869 | #else /* CONFIG_BLOCK */ | 906 | #define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ |
| 870 | /* | 907 | #define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ |
| 871 | * stubs for when the block layer is configured out | ||
| 872 | */ | ||
| 873 | #define buffer_heads_over_limit 0 | ||
| 874 | 908 | ||
| 875 | static inline long nr_blockdev_pages(void) | 909 | struct blk_integrity_exchg { |
| 910 | void *prot_buf; | ||
| 911 | void *data_buf; | ||
| 912 | sector_t sector; | ||
| 913 | unsigned int data_size; | ||
| 914 | unsigned short sector_size; | ||
| 915 | const char *disk_name; | ||
| 916 | }; | ||
| 917 | |||
| 918 | typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); | ||
| 919 | typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); | ||
| 920 | typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); | ||
| 921 | typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); | ||
| 922 | |||
| 923 | struct blk_integrity { | ||
| 924 | integrity_gen_fn *generate_fn; | ||
| 925 | integrity_vrfy_fn *verify_fn; | ||
| 926 | integrity_set_tag_fn *set_tag_fn; | ||
| 927 | integrity_get_tag_fn *get_tag_fn; | ||
| 928 | |||
| 929 | unsigned short flags; | ||
| 930 | unsigned short tuple_size; | ||
| 931 | unsigned short sector_size; | ||
| 932 | unsigned short tag_size; | ||
| 933 | |||
| 934 | const char *name; | ||
| 935 | |||
| 936 | struct kobject kobj; | ||
| 937 | }; | ||
| 938 | |||
| 939 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); | ||
| 940 | extern void blk_integrity_unregister(struct gendisk *); | ||
| 941 | extern int blk_integrity_compare(struct block_device *, struct block_device *); | ||
| 942 | extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); | ||
| 943 | extern int blk_rq_count_integrity_sg(struct request *); | ||
| 944 | |||
| 945 | static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi) | ||
| 876 | { | 946 | { |
| 947 | if (bi) | ||
| 948 | return bi->tuple_size; | ||
| 949 | |||
| 877 | return 0; | 950 | return 0; |
| 878 | } | 951 | } |
| 879 | 952 | ||
| 880 | static inline void exit_io_context(void) | 953 | static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev) |
| 881 | { | 954 | { |
| 955 | return bdev->bd_disk->integrity; | ||
| 882 | } | 956 | } |
| 883 | 957 | ||
| 884 | struct io_context; | 958 | static inline unsigned int bdev_get_tag_size(struct block_device *bdev) |
| 885 | static inline int put_io_context(struct io_context *ioc) | ||
| 886 | { | 959 | { |
| 887 | return 1; | 960 | struct blk_integrity *bi = bdev_get_integrity(bdev); |
| 961 | |||
| 962 | if (bi) | ||
| 963 | return bi->tag_size; | ||
| 964 | |||
| 965 | return 0; | ||
| 966 | } | ||
| 967 | |||
| 968 | static inline int bdev_integrity_enabled(struct block_device *bdev, int rw) | ||
| 969 | { | ||
| 970 | struct blk_integrity *bi = bdev_get_integrity(bdev); | ||
| 971 | |||
| 972 | if (bi == NULL) | ||
| 973 | return 0; | ||
| 974 | |||
| 975 | if (rw == READ && bi->verify_fn != NULL && | ||
| 976 | (bi->flags & INTEGRITY_FLAG_READ)) | ||
| 977 | return 1; | ||
| 978 | |||
| 979 | if (rw == WRITE && bi->generate_fn != NULL && | ||
| 980 | (bi->flags & INTEGRITY_FLAG_WRITE)) | ||
| 981 | return 1; | ||
| 982 | |||
| 983 | return 0; | ||
| 888 | } | 984 | } |
| 889 | 985 | ||
| 986 | static inline int blk_integrity_rq(struct request *rq) | ||
| 987 | { | ||
| 988 | if (rq->bio == NULL) | ||
| 989 | return 0; | ||
| 990 | |||
| 991 | return bio_integrity(rq->bio); | ||
| 992 | } | ||
| 993 | |||
| 994 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | ||
| 995 | |||
| 996 | #define blk_integrity_rq(rq) (0) | ||
| 997 | #define blk_rq_count_integrity_sg(a) (0) | ||
| 998 | #define blk_rq_map_integrity_sg(a, b) (0) | ||
| 999 | #define bdev_get_integrity(a) (0) | ||
| 1000 | #define bdev_get_tag_size(a) (0) | ||
| 1001 | #define blk_integrity_compare(a, b) (0) | ||
| 1002 | #define blk_integrity_register(a, b) (0) | ||
| 1003 | #define blk_integrity_unregister(a) do { } while (0); | ||
| 1004 | |||
| 1005 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | ||
| 1006 | |||
| 1007 | #else /* CONFIG_BLOCK */ | ||
| 1008 | /* | ||
| 1009 | * stubs for when the block layer is configured out | ||
| 1010 | */ | ||
| 1011 | #define buffer_heads_over_limit 0 | ||
| 1012 | |||
| 1013 | static inline long nr_blockdev_pages(void) | ||
| 1014 | { | ||
| 1015 | return 0; | ||
| 1016 | } | ||
| 890 | 1017 | ||
| 891 | #endif /* CONFIG_BLOCK */ | 1018 | #endif /* CONFIG_BLOCK */ |
| 892 | 1019 | ||
