diff options
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 118 |
1 files changed, 76 insertions, 42 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8963d9149b5f..221cecd86bd3 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -70,11 +70,6 @@ enum rq_cmd_type_bits { | |||
| 70 | REQ_TYPE_ATA_PC, | 70 | REQ_TYPE_ATA_PC, |
| 71 | }; | 71 | }; |
| 72 | 72 | ||
| 73 | enum { | ||
| 74 | BLK_RW_ASYNC = 0, | ||
| 75 | BLK_RW_SYNC = 1, | ||
| 76 | }; | ||
| 77 | |||
| 78 | /* | 73 | /* |
| 79 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being | 74 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being |
| 80 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a | 75 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a |
| @@ -87,17 +82,17 @@ enum { | |||
| 87 | enum { | 82 | enum { |
| 88 | REQ_LB_OP_EJECT = 0x40, /* eject request */ | 83 | REQ_LB_OP_EJECT = 0x40, /* eject request */ |
| 89 | REQ_LB_OP_FLUSH = 0x41, /* flush request */ | 84 | REQ_LB_OP_FLUSH = 0x41, /* flush request */ |
| 90 | REQ_LB_OP_DISCARD = 0x42, /* discard sectors */ | ||
| 91 | }; | 85 | }; |
| 92 | 86 | ||
| 93 | /* | 87 | /* |
| 94 | * request type modified bits. first two bits match BIO_RW* bits, important | 88 | * request type modified bits. first four bits match BIO_RW* bits, important |
| 95 | */ | 89 | */ |
| 96 | enum rq_flag_bits { | 90 | enum rq_flag_bits { |
| 97 | __REQ_RW, /* not set, read. set, write */ | 91 | __REQ_RW, /* not set, read. set, write */ |
| 98 | __REQ_FAILFAST_DEV, /* no driver retries of device errors */ | 92 | __REQ_FAILFAST_DEV, /* no driver retries of device errors */ |
| 99 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ | 93 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ |
| 100 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ | 94 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ |
| 95 | /* above flags must match BIO_RW_* */ | ||
| 101 | __REQ_DISCARD, /* request to discard sectors */ | 96 | __REQ_DISCARD, /* request to discard sectors */ |
| 102 | __REQ_SORTED, /* elevator knows about this request */ | 97 | __REQ_SORTED, /* elevator knows about this request */ |
| 103 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ | 98 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ |
| @@ -119,6 +114,7 @@ enum rq_flag_bits { | |||
| 119 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ | 114 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ |
| 120 | __REQ_NOIDLE, /* Don't anticipate more IO after this one */ | 115 | __REQ_NOIDLE, /* Don't anticipate more IO after this one */ |
| 121 | __REQ_IO_STAT, /* account I/O stat */ | 116 | __REQ_IO_STAT, /* account I/O stat */ |
| 117 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ | ||
| 122 | __REQ_NR_BITS, /* stops here */ | 118 | __REQ_NR_BITS, /* stops here */ |
| 123 | }; | 119 | }; |
| 124 | 120 | ||
| @@ -147,6 +143,10 @@ enum rq_flag_bits { | |||
| 147 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) | 143 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) |
| 148 | #define REQ_NOIDLE (1 << __REQ_NOIDLE) | 144 | #define REQ_NOIDLE (1 << __REQ_NOIDLE) |
| 149 | #define REQ_IO_STAT (1 << __REQ_IO_STAT) | 145 | #define REQ_IO_STAT (1 << __REQ_IO_STAT) |
| 146 | #define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) | ||
| 147 | |||
| 148 | #define REQ_FAILFAST_MASK (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | \ | ||
| 149 | REQ_FAILFAST_DRIVER) | ||
| 150 | 150 | ||
| 151 | #define BLK_MAX_CDB 16 | 151 | #define BLK_MAX_CDB 16 |
| 152 | 152 | ||
| @@ -260,7 +260,6 @@ typedef void (request_fn_proc) (struct request_queue *q); | |||
| 260 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); | 260 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); |
| 261 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 261 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
| 262 | typedef void (unplug_fn) (struct request_queue *); | 262 | typedef void (unplug_fn) (struct request_queue *); |
| 263 | typedef int (prepare_discard_fn) (struct request_queue *, struct request *); | ||
| 264 | 263 | ||
| 265 | struct bio_vec; | 264 | struct bio_vec; |
| 266 | struct bvec_merge_data { | 265 | struct bvec_merge_data { |
| @@ -301,12 +300,6 @@ struct blk_queue_tag { | |||
| 301 | #define BLK_SCSI_MAX_CMDS (256) | 300 | #define BLK_SCSI_MAX_CMDS (256) |
| 302 | #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) | 301 | #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) |
| 303 | 302 | ||
| 304 | struct blk_cmd_filter { | ||
| 305 | unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; | ||
| 306 | unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; | ||
| 307 | struct kobject kobj; | ||
| 308 | }; | ||
| 309 | |||
| 310 | struct queue_limits { | 303 | struct queue_limits { |
| 311 | unsigned long bounce_pfn; | 304 | unsigned long bounce_pfn; |
| 312 | unsigned long seg_boundary_mask; | 305 | unsigned long seg_boundary_mask; |
| @@ -318,6 +311,7 @@ struct queue_limits { | |||
| 318 | unsigned int alignment_offset; | 311 | unsigned int alignment_offset; |
| 319 | unsigned int io_min; | 312 | unsigned int io_min; |
| 320 | unsigned int io_opt; | 313 | unsigned int io_opt; |
| 314 | unsigned int max_discard_sectors; | ||
| 321 | 315 | ||
| 322 | unsigned short logical_block_size; | 316 | unsigned short logical_block_size; |
| 323 | unsigned short max_hw_segments; | 317 | unsigned short max_hw_segments; |
| @@ -345,7 +339,6 @@ struct request_queue | |||
| 345 | make_request_fn *make_request_fn; | 339 | make_request_fn *make_request_fn; |
| 346 | prep_rq_fn *prep_rq_fn; | 340 | prep_rq_fn *prep_rq_fn; |
| 347 | unplug_fn *unplug_fn; | 341 | unplug_fn *unplug_fn; |
| 348 | prepare_discard_fn *prepare_discard_fn; | ||
| 349 | merge_bvec_fn *merge_bvec_fn; | 342 | merge_bvec_fn *merge_bvec_fn; |
| 350 | prepare_flush_fn *prepare_flush_fn; | 343 | prepare_flush_fn *prepare_flush_fn; |
| 351 | softirq_done_fn *softirq_done_fn; | 344 | softirq_done_fn *softirq_done_fn; |
| @@ -445,7 +438,6 @@ struct request_queue | |||
| 445 | #if defined(CONFIG_BLK_DEV_BSG) | 438 | #if defined(CONFIG_BLK_DEV_BSG) |
| 446 | struct bsg_class_device bsg_dev; | 439 | struct bsg_class_device bsg_dev; |
| 447 | #endif | 440 | #endif |
| 448 | struct blk_cmd_filter cmd_filter; | ||
| 449 | }; | 441 | }; |
| 450 | 442 | ||
| 451 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ | 443 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ |
| @@ -465,10 +457,13 @@ struct request_queue | |||
| 465 | #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ | 457 | #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ |
| 466 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ | 458 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
| 467 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ | 459 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ |
| 460 | #define QUEUE_FLAG_CQ 16 /* hardware does queuing */ | ||
| 461 | #define QUEUE_FLAG_DISCARD 17 /* supports DISCARD */ | ||
| 468 | 462 | ||
| 469 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 463 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
| 470 | (1 << QUEUE_FLAG_CLUSTER) | \ | 464 | (1 << QUEUE_FLAG_CLUSTER) | \ |
| 471 | (1 << QUEUE_FLAG_STACKABLE)) | 465 | (1 << QUEUE_FLAG_STACKABLE) | \ |
| 466 | (1 << QUEUE_FLAG_SAME_COMP)) | ||
| 472 | 467 | ||
| 473 | static inline int queue_is_locked(struct request_queue *q) | 468 | static inline int queue_is_locked(struct request_queue *q) |
| 474 | { | 469 | { |
| @@ -587,6 +582,7 @@ enum { | |||
| 587 | 582 | ||
| 588 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) | 583 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) |
| 589 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 584 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
| 585 | #define blk_queue_queuing(q) test_bit(QUEUE_FLAG_CQ, &(q)->queue_flags) | ||
| 590 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 586 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
| 591 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | 587 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
| 592 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) | 588 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) |
| @@ -594,6 +590,7 @@ enum { | |||
| 594 | #define blk_queue_flushing(q) ((q)->ordseq) | 590 | #define blk_queue_flushing(q) ((q)->ordseq) |
| 595 | #define blk_queue_stackable(q) \ | 591 | #define blk_queue_stackable(q) \ |
| 596 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) | 592 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) |
| 593 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) | ||
| 597 | 594 | ||
| 598 | #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) | 595 | #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) |
| 599 | #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) | 596 | #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) |
| @@ -730,6 +727,7 @@ struct rq_map_data { | |||
| 730 | int nr_entries; | 727 | int nr_entries; |
| 731 | unsigned long offset; | 728 | unsigned long offset; |
| 732 | int null_mapped; | 729 | int null_mapped; |
| 730 | int from_user; | ||
| 733 | }; | 731 | }; |
| 734 | 732 | ||
| 735 | struct req_iterator { | 733 | struct req_iterator { |
| @@ -786,18 +784,18 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, | |||
| 786 | * congested queues, and wake up anyone who was waiting for requests to be | 784 | * congested queues, and wake up anyone who was waiting for requests to be |
| 787 | * put back. | 785 | * put back. |
| 788 | */ | 786 | */ |
| 789 | static inline void blk_clear_queue_congested(struct request_queue *q, int rw) | 787 | static inline void blk_clear_queue_congested(struct request_queue *q, int sync) |
| 790 | { | 788 | { |
| 791 | clear_bdi_congested(&q->backing_dev_info, rw); | 789 | clear_bdi_congested(&q->backing_dev_info, sync); |
| 792 | } | 790 | } |
| 793 | 791 | ||
| 794 | /* | 792 | /* |
| 795 | * A queue has just entered congestion. Flag that in the queue's VM-visible | 793 | * A queue has just entered congestion. Flag that in the queue's VM-visible |
| 796 | * state flags and increment the global gounter of congested queues. | 794 | * state flags and increment the global gounter of congested queues. |
| 797 | */ | 795 | */ |
| 798 | static inline void blk_set_queue_congested(struct request_queue *q, int rw) | 796 | static inline void blk_set_queue_congested(struct request_queue *q, int sync) |
| 799 | { | 797 | { |
| 800 | set_bdi_congested(&q->backing_dev_info, rw); | 798 | set_bdi_congested(&q->backing_dev_info, sync); |
| 801 | } | 799 | } |
| 802 | 800 | ||
| 803 | extern void blk_start_queue(struct request_queue *q); | 801 | extern void blk_start_queue(struct request_queue *q); |
| @@ -839,11 +837,13 @@ static inline void blk_run_address_space(struct address_space *mapping) | |||
| 839 | } | 837 | } |
| 840 | 838 | ||
| 841 | /* | 839 | /* |
| 842 | * blk_rq_pos() : the current sector | 840 | * blk_rq_pos() : the current sector |
| 843 | * blk_rq_bytes() : bytes left in the entire request | 841 | * blk_rq_bytes() : bytes left in the entire request |
| 844 | * blk_rq_cur_bytes() : bytes left in the current segment | 842 | * blk_rq_cur_bytes() : bytes left in the current segment |
| 845 | * blk_rq_sectors() : sectors left in the entire request | 843 | * blk_rq_err_bytes() : bytes left till the next error boundary |
| 846 | * blk_rq_cur_sectors() : sectors left in the current segment | 844 | * blk_rq_sectors() : sectors left in the entire request |
| 845 | * blk_rq_cur_sectors() : sectors left in the current segment | ||
| 846 | * blk_rq_err_sectors() : sectors left till the next error boundary | ||
| 847 | */ | 847 | */ |
| 848 | static inline sector_t blk_rq_pos(const struct request *rq) | 848 | static inline sector_t blk_rq_pos(const struct request *rq) |
| 849 | { | 849 | { |
| @@ -860,6 +860,8 @@ static inline int blk_rq_cur_bytes(const struct request *rq) | |||
| 860 | return rq->bio ? bio_cur_bytes(rq->bio) : 0; | 860 | return rq->bio ? bio_cur_bytes(rq->bio) : 0; |
| 861 | } | 861 | } |
| 862 | 862 | ||
| 863 | extern unsigned int blk_rq_err_bytes(const struct request *rq); | ||
| 864 | |||
| 863 | static inline unsigned int blk_rq_sectors(const struct request *rq) | 865 | static inline unsigned int blk_rq_sectors(const struct request *rq) |
| 864 | { | 866 | { |
| 865 | return blk_rq_bytes(rq) >> 9; | 867 | return blk_rq_bytes(rq) >> 9; |
| @@ -870,6 +872,11 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq) | |||
| 870 | return blk_rq_cur_bytes(rq) >> 9; | 872 | return blk_rq_cur_bytes(rq) >> 9; |
| 871 | } | 873 | } |
| 872 | 874 | ||
| 875 | static inline unsigned int blk_rq_err_sectors(const struct request *rq) | ||
| 876 | { | ||
| 877 | return blk_rq_err_bytes(rq) >> 9; | ||
| 878 | } | ||
| 879 | |||
| 873 | /* | 880 | /* |
| 874 | * Request issue related functions. | 881 | * Request issue related functions. |
| 875 | */ | 882 | */ |
| @@ -896,10 +903,12 @@ extern bool blk_end_request(struct request *rq, int error, | |||
| 896 | unsigned int nr_bytes); | 903 | unsigned int nr_bytes); |
| 897 | extern void blk_end_request_all(struct request *rq, int error); | 904 | extern void blk_end_request_all(struct request *rq, int error); |
| 898 | extern bool blk_end_request_cur(struct request *rq, int error); | 905 | extern bool blk_end_request_cur(struct request *rq, int error); |
| 906 | extern bool blk_end_request_err(struct request *rq, int error); | ||
| 899 | extern bool __blk_end_request(struct request *rq, int error, | 907 | extern bool __blk_end_request(struct request *rq, int error, |
| 900 | unsigned int nr_bytes); | 908 | unsigned int nr_bytes); |
| 901 | extern void __blk_end_request_all(struct request *rq, int error); | 909 | extern void __blk_end_request_all(struct request *rq, int error); |
| 902 | extern bool __blk_end_request_cur(struct request *rq, int error); | 910 | extern bool __blk_end_request_cur(struct request *rq, int error); |
| 911 | extern bool __blk_end_request_err(struct request *rq, int error); | ||
| 903 | 912 | ||
| 904 | extern void blk_complete_request(struct request *); | 913 | extern void blk_complete_request(struct request *); |
| 905 | extern void __blk_complete_request(struct request *); | 914 | extern void __blk_complete_request(struct request *); |
| @@ -920,11 +929,15 @@ extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | |||
| 920 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); | 929 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); |
| 921 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); | 930 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); |
| 922 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 931 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
| 932 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | ||
| 933 | unsigned int max_discard_sectors); | ||
| 923 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); | 934 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
| 924 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); | 935 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); |
| 925 | extern void blk_queue_alignment_offset(struct request_queue *q, | 936 | extern void blk_queue_alignment_offset(struct request_queue *q, |
| 926 | unsigned int alignment); | 937 | unsigned int alignment); |
| 938 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); | ||
| 927 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); | 939 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); |
| 940 | extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); | ||
| 928 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); | 941 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); |
| 929 | extern void blk_set_default_limits(struct queue_limits *lim); | 942 | extern void blk_set_default_limits(struct queue_limits *lim); |
| 930 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | 943 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, |
| @@ -944,7 +957,6 @@ extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); | |||
| 944 | extern void blk_queue_dma_alignment(struct request_queue *, int); | 957 | extern void blk_queue_dma_alignment(struct request_queue *, int); |
| 945 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); | 958 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); |
| 946 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | 959 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
| 947 | extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *); | ||
| 948 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); | 960 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); |
| 949 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); | 961 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); |
| 950 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 962 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
| @@ -987,24 +999,21 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, | |||
| 987 | } | 999 | } |
| 988 | 1000 | ||
| 989 | extern int blkdev_issue_flush(struct block_device *, sector_t *); | 1001 | extern int blkdev_issue_flush(struct block_device *, sector_t *); |
| 990 | extern int blkdev_issue_discard(struct block_device *, | 1002 | #define DISCARD_FL_WAIT 0x01 /* wait for completion */ |
| 991 | sector_t sector, sector_t nr_sects, gfp_t); | 1003 | #define DISCARD_FL_BARRIER 0x02 /* issue DISCARD_BARRIER request */ |
| 1004 | extern int blkdev_issue_discard(struct block_device *, sector_t sector, | ||
| 1005 | sector_t nr_sects, gfp_t, int flags); | ||
| 992 | 1006 | ||
| 993 | static inline int sb_issue_discard(struct super_block *sb, | 1007 | static inline int sb_issue_discard(struct super_block *sb, |
| 994 | sector_t block, sector_t nr_blocks) | 1008 | sector_t block, sector_t nr_blocks) |
| 995 | { | 1009 | { |
| 996 | block <<= (sb->s_blocksize_bits - 9); | 1010 | block <<= (sb->s_blocksize_bits - 9); |
| 997 | nr_blocks <<= (sb->s_blocksize_bits - 9); | 1011 | nr_blocks <<= (sb->s_blocksize_bits - 9); |
| 998 | return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL); | 1012 | return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL, |
| 1013 | DISCARD_FL_BARRIER); | ||
| 999 | } | 1014 | } |
| 1000 | 1015 | ||
| 1001 | /* | 1016 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); |
| 1002 | * command filter functions | ||
| 1003 | */ | ||
| 1004 | extern int blk_verify_command(struct blk_cmd_filter *filter, | ||
| 1005 | unsigned char *cmd, fmode_t has_write_perm); | ||
| 1006 | extern void blk_unregister_filter(struct gendisk *disk); | ||
| 1007 | extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); | ||
| 1008 | 1017 | ||
| 1009 | #define MAX_PHYS_SEGMENTS 128 | 1018 | #define MAX_PHYS_SEGMENTS 128 |
| 1010 | #define MAX_HW_SEGMENTS 128 | 1019 | #define MAX_HW_SEGMENTS 128 |
| @@ -1072,25 +1081,37 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q) | |||
| 1072 | return q->limits.physical_block_size; | 1081 | return q->limits.physical_block_size; |
| 1073 | } | 1082 | } |
| 1074 | 1083 | ||
| 1084 | static inline int bdev_physical_block_size(struct block_device *bdev) | ||
| 1085 | { | ||
| 1086 | return queue_physical_block_size(bdev_get_queue(bdev)); | ||
| 1087 | } | ||
| 1088 | |||
| 1075 | static inline unsigned int queue_io_min(struct request_queue *q) | 1089 | static inline unsigned int queue_io_min(struct request_queue *q) |
| 1076 | { | 1090 | { |
| 1077 | return q->limits.io_min; | 1091 | return q->limits.io_min; |
| 1078 | } | 1092 | } |
| 1079 | 1093 | ||
| 1094 | static inline int bdev_io_min(struct block_device *bdev) | ||
| 1095 | { | ||
| 1096 | return queue_io_min(bdev_get_queue(bdev)); | ||
| 1097 | } | ||
| 1098 | |||
| 1080 | static inline unsigned int queue_io_opt(struct request_queue *q) | 1099 | static inline unsigned int queue_io_opt(struct request_queue *q) |
| 1081 | { | 1100 | { |
| 1082 | return q->limits.io_opt; | 1101 | return q->limits.io_opt; |
| 1083 | } | 1102 | } |
| 1084 | 1103 | ||
| 1104 | static inline int bdev_io_opt(struct block_device *bdev) | ||
| 1105 | { | ||
| 1106 | return queue_io_opt(bdev_get_queue(bdev)); | ||
| 1107 | } | ||
| 1108 | |||
| 1085 | static inline int queue_alignment_offset(struct request_queue *q) | 1109 | static inline int queue_alignment_offset(struct request_queue *q) |
| 1086 | { | 1110 | { |
| 1087 | if (q && q->limits.misaligned) | 1111 | if (q->limits.misaligned) |
| 1088 | return -1; | 1112 | return -1; |
| 1089 | 1113 | ||
| 1090 | if (q && q->limits.alignment_offset) | 1114 | return q->limits.alignment_offset; |
| 1091 | return q->limits.alignment_offset; | ||
| 1092 | |||
| 1093 | return 0; | ||
| 1094 | } | 1115 | } |
| 1095 | 1116 | ||
| 1096 | static inline int queue_sector_alignment_offset(struct request_queue *q, | 1117 | static inline int queue_sector_alignment_offset(struct request_queue *q, |
| @@ -1100,6 +1121,19 @@ static inline int queue_sector_alignment_offset(struct request_queue *q, | |||
| 1100 | & (q->limits.io_min - 1); | 1121 | & (q->limits.io_min - 1); |
| 1101 | } | 1122 | } |
| 1102 | 1123 | ||
| 1124 | static inline int bdev_alignment_offset(struct block_device *bdev) | ||
| 1125 | { | ||
| 1126 | struct request_queue *q = bdev_get_queue(bdev); | ||
| 1127 | |||
| 1128 | if (q->limits.misaligned) | ||
| 1129 | return -1; | ||
| 1130 | |||
| 1131 | if (bdev != bdev->bd_contains) | ||
| 1132 | return bdev->bd_part->alignment_offset; | ||
| 1133 | |||
| 1134 | return q->limits.alignment_offset; | ||
| 1135 | } | ||
| 1136 | |||
| 1103 | static inline int queue_dma_alignment(struct request_queue *q) | 1137 | static inline int queue_dma_alignment(struct request_queue *q) |
| 1104 | { | 1138 | { |
| 1105 | return q ? q->dma_alignment : 511; | 1139 | return q ? q->dma_alignment : 511; |
