diff options
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 163 |
1 files changed, 144 insertions, 19 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d2a1b71e93c3..1ffd8bfdc4c9 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -23,7 +23,6 @@ | |||
23 | struct scsi_ioctl_command; | 23 | struct scsi_ioctl_command; |
24 | 24 | ||
25 | struct request_queue; | 25 | struct request_queue; |
26 | typedef struct request_queue request_queue_t __deprecated; | ||
27 | struct elevator_queue; | 26 | struct elevator_queue; |
28 | typedef struct elevator_queue elevator_t; | 27 | typedef struct elevator_queue elevator_t; |
29 | struct request_pm_state; | 28 | struct request_pm_state; |
@@ -34,12 +33,6 @@ struct sg_io_hdr; | |||
34 | #define BLKDEV_MIN_RQ 4 | 33 | #define BLKDEV_MIN_RQ 4 |
35 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ | 34 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ |
36 | 35 | ||
37 | int put_io_context(struct io_context *ioc); | ||
38 | void exit_io_context(void); | ||
39 | struct io_context *get_io_context(gfp_t gfp_flags, int node); | ||
40 | struct io_context *alloc_io_context(gfp_t gfp_flags, int node); | ||
41 | void copy_io_context(struct io_context **pdst, struct io_context **psrc); | ||
42 | |||
43 | struct request; | 36 | struct request; |
44 | typedef void (rq_end_io_fn)(struct request *, int); | 37 | typedef void (rq_end_io_fn)(struct request *, int); |
45 | 38 | ||
@@ -113,6 +106,7 @@ enum rq_flag_bits { | |||
113 | __REQ_ALLOCED, /* request came from our alloc pool */ | 106 | __REQ_ALLOCED, /* request came from our alloc pool */ |
114 | __REQ_RW_META, /* metadata io request */ | 107 | __REQ_RW_META, /* metadata io request */ |
115 | __REQ_COPY_USER, /* contains copies of user pages */ | 108 | __REQ_COPY_USER, /* contains copies of user pages */ |
109 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ | ||
116 | __REQ_NR_BITS, /* stops here */ | 110 | __REQ_NR_BITS, /* stops here */ |
117 | }; | 111 | }; |
118 | 112 | ||
@@ -135,6 +129,7 @@ enum rq_flag_bits { | |||
135 | #define REQ_ALLOCED (1 << __REQ_ALLOCED) | 129 | #define REQ_ALLOCED (1 << __REQ_ALLOCED) |
136 | #define REQ_RW_META (1 << __REQ_RW_META) | 130 | #define REQ_RW_META (1 << __REQ_RW_META) |
137 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) | 131 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) |
132 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) | ||
138 | 133 | ||
139 | #define BLK_MAX_CDB 16 | 134 | #define BLK_MAX_CDB 16 |
140 | 135 | ||
@@ -259,7 +254,14 @@ typedef int (prep_rq_fn) (struct request_queue *, struct request *); | |||
259 | typedef void (unplug_fn) (struct request_queue *); | 254 | typedef void (unplug_fn) (struct request_queue *); |
260 | 255 | ||
261 | struct bio_vec; | 256 | struct bio_vec; |
262 | typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *); | 257 | struct bvec_merge_data { |
258 | struct block_device *bi_bdev; | ||
259 | sector_t bi_sector; | ||
260 | unsigned bi_size; | ||
261 | unsigned long bi_rw; | ||
262 | }; | ||
263 | typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, | ||
264 | struct bio_vec *); | ||
263 | typedef void (prepare_flush_fn) (struct request_queue *, struct request *); | 265 | typedef void (prepare_flush_fn) (struct request_queue *, struct request *); |
264 | typedef void (softirq_done_fn)(struct request *); | 266 | typedef void (softirq_done_fn)(struct request *); |
265 | typedef int (dma_drain_needed_fn)(struct request *); | 267 | typedef int (dma_drain_needed_fn)(struct request *); |
@@ -426,6 +428,32 @@ static inline void queue_flag_set_unlocked(unsigned int flag, | |||
426 | __set_bit(flag, &q->queue_flags); | 428 | __set_bit(flag, &q->queue_flags); |
427 | } | 429 | } |
428 | 430 | ||
431 | static inline int queue_flag_test_and_clear(unsigned int flag, | ||
432 | struct request_queue *q) | ||
433 | { | ||
434 | WARN_ON_ONCE(!queue_is_locked(q)); | ||
435 | |||
436 | if (test_bit(flag, &q->queue_flags)) { | ||
437 | __clear_bit(flag, &q->queue_flags); | ||
438 | return 1; | ||
439 | } | ||
440 | |||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | static inline int queue_flag_test_and_set(unsigned int flag, | ||
445 | struct request_queue *q) | ||
446 | { | ||
447 | WARN_ON_ONCE(!queue_is_locked(q)); | ||
448 | |||
449 | if (!test_bit(flag, &q->queue_flags)) { | ||
450 | __set_bit(flag, &q->queue_flags); | ||
451 | return 0; | ||
452 | } | ||
453 | |||
454 | return 1; | ||
455 | } | ||
456 | |||
429 | static inline void queue_flag_set(unsigned int flag, struct request_queue *q) | 457 | static inline void queue_flag_set(unsigned int flag, struct request_queue *q) |
430 | { | 458 | { |
431 | WARN_ON_ONCE(!queue_is_locked(q)); | 459 | WARN_ON_ONCE(!queue_is_locked(q)); |
@@ -676,7 +704,6 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *, | |||
676 | struct request *, int); | 704 | struct request *, int); |
677 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, | 705 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, |
678 | struct request *, int, rq_end_io_fn *); | 706 | struct request *, int, rq_end_io_fn *); |
679 | extern int blk_verify_command(unsigned char *, int); | ||
680 | extern void blk_unplug(struct request_queue *q); | 707 | extern void blk_unplug(struct request_queue *q); |
681 | 708 | ||
682 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) | 709 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) |
@@ -749,6 +776,7 @@ extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | |||
749 | extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); | 776 | extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); |
750 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); | 777 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); |
751 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); | 778 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); |
779 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); | ||
752 | extern int blk_queue_dma_drain(struct request_queue *q, | 780 | extern int blk_queue_dma_drain(struct request_queue *q, |
753 | dma_drain_needed_fn *dma_drain_needed, | 781 | dma_drain_needed_fn *dma_drain_needed, |
754 | void *buf, unsigned int size); | 782 | void *buf, unsigned int size); |
@@ -802,6 +830,15 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, | |||
802 | 830 | ||
803 | extern int blkdev_issue_flush(struct block_device *, sector_t *); | 831 | extern int blkdev_issue_flush(struct block_device *, sector_t *); |
804 | 832 | ||
833 | /* | ||
834 | * command filter functions | ||
835 | */ | ||
836 | extern int blk_verify_command(struct file *file, unsigned char *cmd); | ||
837 | extern int blk_cmd_filter_verify_command(struct blk_scsi_cmd_filter *filter, | ||
838 | unsigned char *cmd, mode_t *f_mode); | ||
839 | extern int blk_register_filter(struct gendisk *disk); | ||
840 | extern void blk_unregister_filter(struct gendisk *disk); | ||
841 | |||
805 | #define MAX_PHYS_SEGMENTS 128 | 842 | #define MAX_PHYS_SEGMENTS 128 |
806 | #define MAX_HW_SEGMENTS 128 | 843 | #define MAX_HW_SEGMENTS 128 |
807 | #define SAFE_MAX_SECTORS 255 | 844 | #define SAFE_MAX_SECTORS 255 |
@@ -865,28 +902,116 @@ void kblockd_flush_work(struct work_struct *work); | |||
865 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ | 902 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ |
866 | MODULE_ALIAS("block-major-" __stringify(major) "-*") | 903 | MODULE_ALIAS("block-major-" __stringify(major) "-*") |
867 | 904 | ||
905 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | ||
868 | 906 | ||
869 | #else /* CONFIG_BLOCK */ | 907 | #define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ |
870 | /* | 908 | #define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ |
871 | * stubs for when the block layer is configured out | ||
872 | */ | ||
873 | #define buffer_heads_over_limit 0 | ||
874 | 909 | ||
875 | static inline long nr_blockdev_pages(void) | 910 | struct blk_integrity_exchg { |
911 | void *prot_buf; | ||
912 | void *data_buf; | ||
913 | sector_t sector; | ||
914 | unsigned int data_size; | ||
915 | unsigned short sector_size; | ||
916 | const char *disk_name; | ||
917 | }; | ||
918 | |||
919 | typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); | ||
920 | typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); | ||
921 | typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); | ||
922 | typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); | ||
923 | |||
924 | struct blk_integrity { | ||
925 | integrity_gen_fn *generate_fn; | ||
926 | integrity_vrfy_fn *verify_fn; | ||
927 | integrity_set_tag_fn *set_tag_fn; | ||
928 | integrity_get_tag_fn *get_tag_fn; | ||
929 | |||
930 | unsigned short flags; | ||
931 | unsigned short tuple_size; | ||
932 | unsigned short sector_size; | ||
933 | unsigned short tag_size; | ||
934 | |||
935 | const char *name; | ||
936 | |||
937 | struct kobject kobj; | ||
938 | }; | ||
939 | |||
940 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); | ||
941 | extern void blk_integrity_unregister(struct gendisk *); | ||
942 | extern int blk_integrity_compare(struct block_device *, struct block_device *); | ||
943 | extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); | ||
944 | extern int blk_rq_count_integrity_sg(struct request *); | ||
945 | |||
946 | static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi) | ||
876 | { | 947 | { |
948 | if (bi) | ||
949 | return bi->tuple_size; | ||
950 | |||
877 | return 0; | 951 | return 0; |
878 | } | 952 | } |
879 | 953 | ||
880 | static inline void exit_io_context(void) | 954 | static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev) |
881 | { | 955 | { |
956 | return bdev->bd_disk->integrity; | ||
882 | } | 957 | } |
883 | 958 | ||
884 | struct io_context; | 959 | static inline unsigned int bdev_get_tag_size(struct block_device *bdev) |
885 | static inline int put_io_context(struct io_context *ioc) | ||
886 | { | 960 | { |
887 | return 1; | 961 | struct blk_integrity *bi = bdev_get_integrity(bdev); |
962 | |||
963 | if (bi) | ||
964 | return bi->tag_size; | ||
965 | |||
966 | return 0; | ||
967 | } | ||
968 | |||
969 | static inline int bdev_integrity_enabled(struct block_device *bdev, int rw) | ||
970 | { | ||
971 | struct blk_integrity *bi = bdev_get_integrity(bdev); | ||
972 | |||
973 | if (bi == NULL) | ||
974 | return 0; | ||
975 | |||
976 | if (rw == READ && bi->verify_fn != NULL && | ||
977 | (bi->flags & INTEGRITY_FLAG_READ)) | ||
978 | return 1; | ||
979 | |||
980 | if (rw == WRITE && bi->generate_fn != NULL && | ||
981 | (bi->flags & INTEGRITY_FLAG_WRITE)) | ||
982 | return 1; | ||
983 | |||
984 | return 0; | ||
888 | } | 985 | } |
889 | 986 | ||
987 | static inline int blk_integrity_rq(struct request *rq) | ||
988 | { | ||
989 | return bio_integrity(rq->bio); | ||
990 | } | ||
991 | |||
992 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | ||
993 | |||
994 | #define blk_integrity_rq(rq) (0) | ||
995 | #define blk_rq_count_integrity_sg(a) (0) | ||
996 | #define blk_rq_map_integrity_sg(a, b) (0) | ||
997 | #define bdev_get_integrity(a) (0) | ||
998 | #define bdev_get_tag_size(a) (0) | ||
999 | #define blk_integrity_compare(a, b) (0) | ||
1000 | #define blk_integrity_register(a, b) (0) | ||
1001 | #define blk_integrity_unregister(a) do { } while (0); | ||
1002 | |||
1003 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | ||
1004 | |||
1005 | #else /* CONFIG_BLOCK */ | ||
1006 | /* | ||
1007 | * stubs for when the block layer is configured out | ||
1008 | */ | ||
1009 | #define buffer_heads_over_limit 0 | ||
1010 | |||
1011 | static inline long nr_blockdev_pages(void) | ||
1012 | { | ||
1013 | return 0; | ||
1014 | } | ||
890 | 1015 | ||
891 | #endif /* CONFIG_BLOCK */ | 1016 | #endif /* CONFIG_BLOCK */ |
892 | 1017 | ||