aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-15 18:29:07 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-15 18:29:07 -0400
commit82638844d9a8581bbf33201cc209a14876eca167 (patch)
tree961d7f9360194421a71aa644a9d0c176a960ce49 /include/linux/blkdev.h
parent9982fbface82893e77d211fbabfbd229da6bdde6 (diff)
parent63cf13b77ab785e87c867defa8545e6d4a989774 (diff)
Merge branch 'linus' into cpus4096
Conflicts: arch/x86/xen/smp.c kernel/sched_rt.c net/iucv/iucv.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h164
1 files changed, 144 insertions, 20 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d2a1b71e93c3..32a441b05fd5 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -23,7 +23,6 @@
23struct scsi_ioctl_command; 23struct scsi_ioctl_command;
24 24
25struct request_queue; 25struct request_queue;
26typedef struct request_queue request_queue_t __deprecated;
27struct elevator_queue; 26struct elevator_queue;
28typedef struct elevator_queue elevator_t; 27typedef struct elevator_queue elevator_t;
29struct request_pm_state; 28struct request_pm_state;
@@ -34,12 +33,6 @@ struct sg_io_hdr;
34#define BLKDEV_MIN_RQ 4 33#define BLKDEV_MIN_RQ 4
35#define BLKDEV_MAX_RQ 128 /* Default maximum */ 34#define BLKDEV_MAX_RQ 128 /* Default maximum */
36 35
37int put_io_context(struct io_context *ioc);
38void exit_io_context(void);
39struct io_context *get_io_context(gfp_t gfp_flags, int node);
40struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
41void copy_io_context(struct io_context **pdst, struct io_context **psrc);
42
43struct request; 36struct request;
44typedef void (rq_end_io_fn)(struct request *, int); 37typedef void (rq_end_io_fn)(struct request *, int);
45 38
@@ -113,6 +106,7 @@ enum rq_flag_bits {
113 __REQ_ALLOCED, /* request came from our alloc pool */ 106 __REQ_ALLOCED, /* request came from our alloc pool */
114 __REQ_RW_META, /* metadata io request */ 107 __REQ_RW_META, /* metadata io request */
115 __REQ_COPY_USER, /* contains copies of user pages */ 108 __REQ_COPY_USER, /* contains copies of user pages */
109 __REQ_INTEGRITY, /* integrity metadata has been remapped */
116 __REQ_NR_BITS, /* stops here */ 110 __REQ_NR_BITS, /* stops here */
117}; 111};
118 112
@@ -135,6 +129,7 @@ enum rq_flag_bits {
135#define REQ_ALLOCED (1 << __REQ_ALLOCED) 129#define REQ_ALLOCED (1 << __REQ_ALLOCED)
136#define REQ_RW_META (1 << __REQ_RW_META) 130#define REQ_RW_META (1 << __REQ_RW_META)
137#define REQ_COPY_USER (1 << __REQ_COPY_USER) 131#define REQ_COPY_USER (1 << __REQ_COPY_USER)
132#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
138 133
139#define BLK_MAX_CDB 16 134#define BLK_MAX_CDB 16
140 135
@@ -259,7 +254,14 @@ typedef int (prep_rq_fn) (struct request_queue *, struct request *);
259typedef void (unplug_fn) (struct request_queue *); 254typedef void (unplug_fn) (struct request_queue *);
260 255
261struct bio_vec; 256struct bio_vec;
262typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *); 257struct bvec_merge_data {
258 struct block_device *bi_bdev;
259 sector_t bi_sector;
260 unsigned bi_size;
261 unsigned long bi_rw;
262};
263typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
264 struct bio_vec *);
263typedef void (prepare_flush_fn) (struct request_queue *, struct request *); 265typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
264typedef void (softirq_done_fn)(struct request *); 266typedef void (softirq_done_fn)(struct request *);
265typedef int (dma_drain_needed_fn)(struct request *); 267typedef int (dma_drain_needed_fn)(struct request *);
@@ -426,6 +428,32 @@ static inline void queue_flag_set_unlocked(unsigned int flag,
426 __set_bit(flag, &q->queue_flags); 428 __set_bit(flag, &q->queue_flags);
427} 429}
428 430
431static inline int queue_flag_test_and_clear(unsigned int flag,
432 struct request_queue *q)
433{
434 WARN_ON_ONCE(!queue_is_locked(q));
435
436 if (test_bit(flag, &q->queue_flags)) {
437 __clear_bit(flag, &q->queue_flags);
438 return 1;
439 }
440
441 return 0;
442}
443
444static inline int queue_flag_test_and_set(unsigned int flag,
445 struct request_queue *q)
446{
447 WARN_ON_ONCE(!queue_is_locked(q));
448
449 if (!test_bit(flag, &q->queue_flags)) {
450 __set_bit(flag, &q->queue_flags);
451 return 0;
452 }
453
454 return 1;
455}
456
429static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 457static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
430{ 458{
431 WARN_ON_ONCE(!queue_is_locked(q)); 459 WARN_ON_ONCE(!queue_is_locked(q));
@@ -623,7 +651,6 @@ extern void generic_make_request(struct bio *bio);
623extern void blk_rq_init(struct request_queue *q, struct request *rq); 651extern void blk_rq_init(struct request_queue *q, struct request *rq);
624extern void blk_put_request(struct request *); 652extern void blk_put_request(struct request *);
625extern void __blk_put_request(struct request_queue *, struct request *); 653extern void __blk_put_request(struct request_queue *, struct request *);
626extern void blk_end_sync_rq(struct request *rq, int error);
627extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 654extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
628extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 655extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
629extern void blk_requeue_request(struct request_queue *, struct request *); 656extern void blk_requeue_request(struct request_queue *, struct request *);
@@ -676,7 +703,6 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
676 struct request *, int); 703 struct request *, int);
677extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 704extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
678 struct request *, int, rq_end_io_fn *); 705 struct request *, int, rq_end_io_fn *);
679extern int blk_verify_command(unsigned char *, int);
680extern void blk_unplug(struct request_queue *q); 706extern void blk_unplug(struct request_queue *q);
681 707
682static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 708static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
@@ -749,6 +775,7 @@ extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
749extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); 775extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
750extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 776extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
751extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 777extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
778extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
752extern int blk_queue_dma_drain(struct request_queue *q, 779extern int blk_queue_dma_drain(struct request_queue *q,
753 dma_drain_needed_fn *dma_drain_needed, 780 dma_drain_needed_fn *dma_drain_needed,
754 void *buf, unsigned int size); 781 void *buf, unsigned int size);
@@ -802,6 +829,15 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
802 829
803extern int blkdev_issue_flush(struct block_device *, sector_t *); 830extern int blkdev_issue_flush(struct block_device *, sector_t *);
804 831
832/*
833* command filter functions
834*/
835extern int blk_verify_command(struct file *file, unsigned char *cmd);
836extern int blk_cmd_filter_verify_command(struct blk_scsi_cmd_filter *filter,
837 unsigned char *cmd, mode_t *f_mode);
838extern int blk_register_filter(struct gendisk *disk);
839extern void blk_unregister_filter(struct gendisk *disk);
840
805#define MAX_PHYS_SEGMENTS 128 841#define MAX_PHYS_SEGMENTS 128
806#define MAX_HW_SEGMENTS 128 842#define MAX_HW_SEGMENTS 128
807#define SAFE_MAX_SECTORS 255 843#define SAFE_MAX_SECTORS 255
@@ -865,28 +901,116 @@ void kblockd_flush_work(struct work_struct *work);
865#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 901#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
866 MODULE_ALIAS("block-major-" __stringify(major) "-*") 902 MODULE_ALIAS("block-major-" __stringify(major) "-*")
867 903
904#if defined(CONFIG_BLK_DEV_INTEGRITY)
868 905
869#else /* CONFIG_BLOCK */ 906#define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */
870/* 907#define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */
871 * stubs for when the block layer is configured out
872 */
873#define buffer_heads_over_limit 0
874 908
875static inline long nr_blockdev_pages(void) 909struct blk_integrity_exchg {
910 void *prot_buf;
911 void *data_buf;
912 sector_t sector;
913 unsigned int data_size;
914 unsigned short sector_size;
915 const char *disk_name;
916};
917
918typedef void (integrity_gen_fn) (struct blk_integrity_exchg *);
919typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *);
920typedef void (integrity_set_tag_fn) (void *, void *, unsigned int);
921typedef void (integrity_get_tag_fn) (void *, void *, unsigned int);
922
923struct blk_integrity {
924 integrity_gen_fn *generate_fn;
925 integrity_vrfy_fn *verify_fn;
926 integrity_set_tag_fn *set_tag_fn;
927 integrity_get_tag_fn *get_tag_fn;
928
929 unsigned short flags;
930 unsigned short tuple_size;
931 unsigned short sector_size;
932 unsigned short tag_size;
933
934 const char *name;
935
936 struct kobject kobj;
937};
938
939extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
940extern void blk_integrity_unregister(struct gendisk *);
941extern int blk_integrity_compare(struct block_device *, struct block_device *);
942extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
943extern int blk_rq_count_integrity_sg(struct request *);
944
945static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi)
876{ 946{
947 if (bi)
948 return bi->tuple_size;
949
877 return 0; 950 return 0;
878} 951}
879 952
880static inline void exit_io_context(void) 953static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
881{ 954{
955 return bdev->bd_disk->integrity;
882} 956}
883 957
884struct io_context; 958static inline unsigned int bdev_get_tag_size(struct block_device *bdev)
885static inline int put_io_context(struct io_context *ioc)
886{ 959{
887 return 1; 960 struct blk_integrity *bi = bdev_get_integrity(bdev);
961
962 if (bi)
963 return bi->tag_size;
964
965 return 0;
966}
967
968static inline int bdev_integrity_enabled(struct block_device *bdev, int rw)
969{
970 struct blk_integrity *bi = bdev_get_integrity(bdev);
971
972 if (bi == NULL)
973 return 0;
974
975 if (rw == READ && bi->verify_fn != NULL &&
976 (bi->flags & INTEGRITY_FLAG_READ))
977 return 1;
978
979 if (rw == WRITE && bi->generate_fn != NULL &&
980 (bi->flags & INTEGRITY_FLAG_WRITE))
981 return 1;
982
983 return 0;
888} 984}
889 985
986static inline int blk_integrity_rq(struct request *rq)
987{
988 return bio_integrity(rq->bio);
989}
990
991#else /* CONFIG_BLK_DEV_INTEGRITY */
992
993#define blk_integrity_rq(rq) (0)
994#define blk_rq_count_integrity_sg(a) (0)
995#define blk_rq_map_integrity_sg(a, b) (0)
996#define bdev_get_integrity(a) (0)
997#define bdev_get_tag_size(a) (0)
998#define blk_integrity_compare(a, b) (0)
999#define blk_integrity_register(a, b) (0)
1000#define blk_integrity_unregister(a) do { } while (0);
1001
1002#endif /* CONFIG_BLK_DEV_INTEGRITY */
1003
1004#else /* CONFIG_BLOCK */
1005/*
1006 * stubs for when the block layer is configured out
1007 */
1008#define buffer_heads_over_limit 0
1009
1010static inline long nr_blockdev_pages(void)
1011{
1012 return 0;
1013}
890 1014
891#endif /* CONFIG_BLOCK */ 1015#endif /* CONFIG_BLOCK */
892 1016