diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-18 14:53:51 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-18 14:53:51 -0400 |
| commit | d3dc366bbaf07c125561e90d6da4bb147741101a (patch) | |
| tree | 6eb7e79a8ec9df1fa705393c6d15ccea3d104661 /include/linux | |
| parent | 511c41d9e6665a07aca94eb00983cf6d77dd87ff (diff) | |
| parent | e19a8a0ad2d255316830ead05b59c5a704434cbb (diff) | |
Merge branch 'for-3.18/core' of git://git.kernel.dk/linux-block
Pull core block layer changes from Jens Axboe:
"This is the core block IO pull request for 3.18. Apart from the new
and improved flush machinery for blk-mq, this is all mostly bug fixes
and cleanups.
- blk-mq timeout updates and fixes from Christoph.
- Removal of REQ_END, also from Christoph. We pass it through the
->queue_rq() hook for blk-mq instead, freeing up one of the request
bits. The space was overly tight on 32-bit, so Martin also killed
REQ_KERNEL since it's no longer used.
- blk integrity updates and fixes from Martin and Gu Zheng.
- Update to the flush machinery for blk-mq from Ming Lei. Now we
have a per hardware context flush request, which both cleans up the
code should scale better for flush intensive workloads on blk-mq.
- Improve the error printing, from Rob Elliott.
- Backing device improvements and cleanups from Tejun.
- Fixup of a misplaced rq_complete() tracepoint from Hannes.
- Make blk_get_request() return error pointers, fixing up issues
where we NULL deref when a device goes bad or missing. From Joe
Lawrence.
- Prep work for drastically reducing the memory consumption of dm
devices from Junichi Nomura. This allows creating clone bio sets
without preallocating a lot of memory.
- Fix a blk-mq hang on certain combinations of queue depths and
hardware queues from me.
- Limit memory consumption for blk-mq devices for crash dump
scenarios and drivers that use crazy high depths (certain SCSI
shared tag setups). We now just use a single queue and limited
depth for that"
* 'for-3.18/core' of git://git.kernel.dk/linux-block: (58 commits)
block: Remove REQ_KERNEL
blk-mq: allocate cpumask on the home node
bio-integrity: remove the needless fail handle of bip_slab creating
block: include func name in __get_request prints
block: make blk_update_request print prefix match ratelimited prefix
blk-merge: don't compute bi_phys_segments from bi_vcnt for cloned bio
block: fix alignment_offset math that assumes io_min is a power-of-2
blk-mq: Make bt_clear_tag() easier to read
blk-mq: fix potential hang if rolling wakeup depth is too high
block: add bioset_create_nobvec()
block: use bio_clone_fast() in blk_rq_prep_clone()
block: misplaced rq_complete tracepoint
sd: Honor block layer integrity handling flags
block: Replace strnicmp with strncasecmp
block: Add T10 Protection Information functions
block: Don't merge requests if integrity flags differ
block: Integrity checksum flag
block: Relocate bio integrity flags
block: Add a disk flag to block integrity profile
block: Add prefix to block integrity profile flags
...
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/backing-dev.h | 4 | ||||
| -rw-r--r-- | include/linux/bio.h | 65 | ||||
| -rw-r--r-- | include/linux/blk-mq.h | 22 | ||||
| -rw-r--r-- | include/linux/blk_types.h | 18 | ||||
| -rw-r--r-- | include/linux/blkdev.h | 71 | ||||
| -rw-r--r-- | include/linux/crc-t10dif.h | 5 | ||||
| -rw-r--r-- | include/linux/fs.h | 2 | ||||
| -rw-r--r-- | include/linux/nfs_fs.h | 4 | ||||
| -rw-r--r-- | include/linux/t10-pi.h | 22 |
9 files changed, 133 insertions, 80 deletions
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index e488e9459a93..5da6012b7a14 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
| @@ -28,12 +28,10 @@ struct dentry; | |||
| 28 | * Bits in backing_dev_info.state | 28 | * Bits in backing_dev_info.state |
| 29 | */ | 29 | */ |
| 30 | enum bdi_state { | 30 | enum bdi_state { |
| 31 | BDI_wb_alloc, /* Default embedded wb allocated */ | ||
| 32 | BDI_async_congested, /* The async (write) queue is getting full */ | 31 | BDI_async_congested, /* The async (write) queue is getting full */ |
| 33 | BDI_sync_congested, /* The sync queue is getting full */ | 32 | BDI_sync_congested, /* The sync queue is getting full */ |
| 34 | BDI_registered, /* bdi_register() was done */ | 33 | BDI_registered, /* bdi_register() was done */ |
| 35 | BDI_writeback_running, /* Writeback is in progress */ | 34 | BDI_writeback_running, /* Writeback is in progress */ |
| 36 | BDI_unused, /* Available bits start here */ | ||
| 37 | }; | 35 | }; |
| 38 | 36 | ||
| 39 | typedef int (congested_fn)(void *, int); | 37 | typedef int (congested_fn)(void *, int); |
| @@ -50,7 +48,6 @@ enum bdi_stat_item { | |||
| 50 | 48 | ||
| 51 | struct bdi_writeback { | 49 | struct bdi_writeback { |
| 52 | struct backing_dev_info *bdi; /* our parent bdi */ | 50 | struct backing_dev_info *bdi; /* our parent bdi */ |
| 53 | unsigned int nr; | ||
| 54 | 51 | ||
| 55 | unsigned long last_old_flush; /* last old data flush */ | 52 | unsigned long last_old_flush; /* last old data flush */ |
| 56 | 53 | ||
| @@ -124,7 +121,6 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi); | |||
| 124 | void bdi_writeback_workfn(struct work_struct *work); | 121 | void bdi_writeback_workfn(struct work_struct *work); |
| 125 | int bdi_has_dirty_io(struct backing_dev_info *bdi); | 122 | int bdi_has_dirty_io(struct backing_dev_info *bdi); |
| 126 | void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi); | 123 | void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi); |
| 127 | void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2); | ||
| 128 | 124 | ||
| 129 | extern spinlock_t bdi_lock; | 125 | extern spinlock_t bdi_lock; |
| 130 | extern struct list_head bdi_list; | 126 | extern struct list_head bdi_list; |
diff --git a/include/linux/bio.h b/include/linux/bio.h index b39e5000ff58..7347f486ceca 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
| @@ -292,7 +292,24 @@ static inline unsigned bio_segments(struct bio *bio) | |||
| 292 | */ | 292 | */ |
| 293 | #define bio_get(bio) atomic_inc(&(bio)->bi_cnt) | 293 | #define bio_get(bio) atomic_inc(&(bio)->bi_cnt) |
| 294 | 294 | ||
| 295 | enum bip_flags { | ||
| 296 | BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ | ||
| 297 | BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ | ||
| 298 | BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */ | ||
| 299 | BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */ | ||
| 300 | BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */ | ||
| 301 | }; | ||
| 302 | |||
| 295 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | 303 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
| 304 | |||
| 305 | static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) | ||
| 306 | { | ||
| 307 | if (bio->bi_rw & REQ_INTEGRITY) | ||
| 308 | return bio->bi_integrity; | ||
| 309 | |||
| 310 | return NULL; | ||
| 311 | } | ||
| 312 | |||
| 296 | /* | 313 | /* |
| 297 | * bio integrity payload | 314 | * bio integrity payload |
| 298 | */ | 315 | */ |
| @@ -301,21 +318,40 @@ struct bio_integrity_payload { | |||
| 301 | 318 | ||
| 302 | struct bvec_iter bip_iter; | 319 | struct bvec_iter bip_iter; |
| 303 | 320 | ||
| 304 | /* kill - should just use bip_vec */ | ||
| 305 | void *bip_buf; /* generated integrity data */ | ||
| 306 | |||
| 307 | bio_end_io_t *bip_end_io; /* saved I/O completion fn */ | 321 | bio_end_io_t *bip_end_io; /* saved I/O completion fn */ |
| 308 | 322 | ||
| 309 | unsigned short bip_slab; /* slab the bip came from */ | 323 | unsigned short bip_slab; /* slab the bip came from */ |
| 310 | unsigned short bip_vcnt; /* # of integrity bio_vecs */ | 324 | unsigned short bip_vcnt; /* # of integrity bio_vecs */ |
| 311 | unsigned short bip_max_vcnt; /* integrity bio_vec slots */ | 325 | unsigned short bip_max_vcnt; /* integrity bio_vec slots */ |
| 312 | unsigned bip_owns_buf:1; /* should free bip_buf */ | 326 | unsigned short bip_flags; /* control flags */ |
| 313 | 327 | ||
| 314 | struct work_struct bip_work; /* I/O completion */ | 328 | struct work_struct bip_work; /* I/O completion */ |
| 315 | 329 | ||
| 316 | struct bio_vec *bip_vec; | 330 | struct bio_vec *bip_vec; |
| 317 | struct bio_vec bip_inline_vecs[0];/* embedded bvec array */ | 331 | struct bio_vec bip_inline_vecs[0];/* embedded bvec array */ |
| 318 | }; | 332 | }; |
| 333 | |||
| 334 | static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) | ||
| 335 | { | ||
| 336 | struct bio_integrity_payload *bip = bio_integrity(bio); | ||
| 337 | |||
| 338 | if (bip) | ||
| 339 | return bip->bip_flags & flag; | ||
| 340 | |||
| 341 | return false; | ||
| 342 | } | ||
| 343 | |||
| 344 | static inline sector_t bip_get_seed(struct bio_integrity_payload *bip) | ||
| 345 | { | ||
| 346 | return bip->bip_iter.bi_sector; | ||
| 347 | } | ||
| 348 | |||
| 349 | static inline void bip_set_seed(struct bio_integrity_payload *bip, | ||
| 350 | sector_t seed) | ||
| 351 | { | ||
| 352 | bip->bip_iter.bi_sector = seed; | ||
| 353 | } | ||
| 354 | |||
| 319 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 355 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
| 320 | 356 | ||
| 321 | extern void bio_trim(struct bio *bio, int offset, int size); | 357 | extern void bio_trim(struct bio *bio, int offset, int size); |
| @@ -342,6 +378,7 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors, | |||
| 342 | } | 378 | } |
| 343 | 379 | ||
| 344 | extern struct bio_set *bioset_create(unsigned int, unsigned int); | 380 | extern struct bio_set *bioset_create(unsigned int, unsigned int); |
| 381 | extern struct bio_set *bioset_create_nobvec(unsigned int, unsigned int); | ||
| 345 | extern void bioset_free(struct bio_set *); | 382 | extern void bioset_free(struct bio_set *); |
| 346 | extern mempool_t *biovec_create_pool(int pool_entries); | 383 | extern mempool_t *biovec_create_pool(int pool_entries); |
| 347 | 384 | ||
| @@ -353,7 +390,6 @@ extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); | |||
| 353 | extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs); | 390 | extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs); |
| 354 | 391 | ||
| 355 | extern struct bio_set *fs_bio_set; | 392 | extern struct bio_set *fs_bio_set; |
| 356 | unsigned int bio_integrity_tag_size(struct bio *bio); | ||
| 357 | 393 | ||
| 358 | static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) | 394 | static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) |
| 359 | { | 395 | { |
| @@ -661,14 +697,10 @@ struct biovec_slab { | |||
| 661 | for_each_bio(_bio) \ | 697 | for_each_bio(_bio) \ |
| 662 | bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) | 698 | bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) |
| 663 | 699 | ||
| 664 | #define bio_integrity(bio) (bio->bi_integrity != NULL) | ||
| 665 | |||
| 666 | extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); | 700 | extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); |
| 667 | extern void bio_integrity_free(struct bio *); | 701 | extern void bio_integrity_free(struct bio *); |
| 668 | extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); | 702 | extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); |
| 669 | extern int bio_integrity_enabled(struct bio *bio); | 703 | extern bool bio_integrity_enabled(struct bio *bio); |
| 670 | extern int bio_integrity_set_tag(struct bio *, void *, unsigned int); | ||
| 671 | extern int bio_integrity_get_tag(struct bio *, void *, unsigned int); | ||
| 672 | extern int bio_integrity_prep(struct bio *); | 704 | extern int bio_integrity_prep(struct bio *); |
| 673 | extern void bio_integrity_endio(struct bio *, int); | 705 | extern void bio_integrity_endio(struct bio *, int); |
| 674 | extern void bio_integrity_advance(struct bio *, unsigned int); | 706 | extern void bio_integrity_advance(struct bio *, unsigned int); |
| @@ -680,14 +712,14 @@ extern void bio_integrity_init(void); | |||
| 680 | 712 | ||
| 681 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | 713 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
| 682 | 714 | ||
| 683 | static inline int bio_integrity(struct bio *bio) | 715 | static inline void *bio_integrity(struct bio *bio) |
| 684 | { | 716 | { |
| 685 | return 0; | 717 | return NULL; |
| 686 | } | 718 | } |
| 687 | 719 | ||
| 688 | static inline int bio_integrity_enabled(struct bio *bio) | 720 | static inline bool bio_integrity_enabled(struct bio *bio) |
| 689 | { | 721 | { |
| 690 | return 0; | 722 | return false; |
| 691 | } | 723 | } |
| 692 | 724 | ||
| 693 | static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) | 725 | static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) |
| @@ -733,6 +765,11 @@ static inline void bio_integrity_init(void) | |||
| 733 | return; | 765 | return; |
| 734 | } | 766 | } |
| 735 | 767 | ||
| 768 | static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) | ||
| 769 | { | ||
| 770 | return false; | ||
| 771 | } | ||
| 772 | |||
| 736 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 773 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
| 737 | 774 | ||
| 738 | #endif /* CONFIG_BLOCK */ | 775 | #endif /* CONFIG_BLOCK */ |
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index c13a0c09faea..c9be1589415a 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | #include <linux/blkdev.h> | 4 | #include <linux/blkdev.h> |
| 5 | 5 | ||
| 6 | struct blk_mq_tags; | 6 | struct blk_mq_tags; |
| 7 | struct blk_flush_queue; | ||
| 7 | 8 | ||
| 8 | struct blk_mq_cpu_notifier { | 9 | struct blk_mq_cpu_notifier { |
| 9 | struct list_head list; | 10 | struct list_head list; |
| @@ -34,6 +35,7 @@ struct blk_mq_hw_ctx { | |||
| 34 | 35 | ||
| 35 | struct request_queue *queue; | 36 | struct request_queue *queue; |
| 36 | unsigned int queue_num; | 37 | unsigned int queue_num; |
| 38 | struct blk_flush_queue *fq; | ||
| 37 | 39 | ||
| 38 | void *driver_data; | 40 | void *driver_data; |
| 39 | 41 | ||
| @@ -77,8 +79,9 @@ struct blk_mq_tag_set { | |||
| 77 | struct list_head tag_list; | 79 | struct list_head tag_list; |
| 78 | }; | 80 | }; |
| 79 | 81 | ||
| 80 | typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); | 82 | typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *, bool); |
| 81 | typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); | 83 | typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); |
| 84 | typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); | ||
| 82 | typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); | 85 | typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); |
| 83 | typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); | 86 | typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); |
| 84 | typedef int (init_request_fn)(void *, struct request *, unsigned int, | 87 | typedef int (init_request_fn)(void *, struct request *, unsigned int, |
| @@ -86,6 +89,9 @@ typedef int (init_request_fn)(void *, struct request *, unsigned int, | |||
| 86 | typedef void (exit_request_fn)(void *, struct request *, unsigned int, | 89 | typedef void (exit_request_fn)(void *, struct request *, unsigned int, |
| 87 | unsigned int); | 90 | unsigned int); |
| 88 | 91 | ||
| 92 | typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, | ||
| 93 | bool); | ||
| 94 | |||
| 89 | struct blk_mq_ops { | 95 | struct blk_mq_ops { |
| 90 | /* | 96 | /* |
| 91 | * Queue request | 97 | * Queue request |
| @@ -100,7 +106,7 @@ struct blk_mq_ops { | |||
| 100 | /* | 106 | /* |
| 101 | * Called on request timeout | 107 | * Called on request timeout |
| 102 | */ | 108 | */ |
| 103 | rq_timed_out_fn *timeout; | 109 | timeout_fn *timeout; |
| 104 | 110 | ||
| 105 | softirq_done_fn *complete; | 111 | softirq_done_fn *complete; |
| 106 | 112 | ||
| @@ -115,6 +121,10 @@ struct blk_mq_ops { | |||
| 115 | /* | 121 | /* |
| 116 | * Called for every command allocated by the block layer to allow | 122 | * Called for every command allocated by the block layer to allow |
| 117 | * the driver to set up driver specific data. | 123 | * the driver to set up driver specific data. |
| 124 | * | ||
| 125 | * Tag greater than or equal to queue_depth is for setting up | ||
| 126 | * flush request. | ||
| 127 | * | ||
| 118 | * Ditto for exit/teardown. | 128 | * Ditto for exit/teardown. |
| 119 | */ | 129 | */ |
| 120 | init_request_fn *init_request; | 130 | init_request_fn *init_request; |
| @@ -160,8 +170,9 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); | |||
| 160 | struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); | 170 | struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); |
| 161 | struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); | 171 | struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); |
| 162 | 172 | ||
| 163 | void blk_mq_end_io(struct request *rq, int error); | 173 | void blk_mq_start_request(struct request *rq); |
| 164 | void __blk_mq_end_io(struct request *rq, int error); | 174 | void blk_mq_end_request(struct request *rq, int error); |
| 175 | void __blk_mq_end_request(struct request *rq, int error); | ||
| 165 | 176 | ||
| 166 | void blk_mq_requeue_request(struct request *rq); | 177 | void blk_mq_requeue_request(struct request *rq); |
| 167 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); | 178 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); |
| @@ -174,7 +185,8 @@ void blk_mq_stop_hw_queues(struct request_queue *q); | |||
| 174 | void blk_mq_start_hw_queues(struct request_queue *q); | 185 | void blk_mq_start_hw_queues(struct request_queue *q); |
| 175 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); | 186 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); |
| 176 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); | 187 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); |
| 177 | void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data); | 188 | void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, |
| 189 | void *priv); | ||
| 178 | 190 | ||
| 179 | /* | 191 | /* |
| 180 | * Driver command data is immediately after the request. So subtract request | 192 | * Driver command data is immediately after the request. So subtract request |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 66c2167f04a9..445d59231bc4 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
| @@ -78,9 +78,11 @@ struct bio { | |||
| 78 | struct io_context *bi_ioc; | 78 | struct io_context *bi_ioc; |
| 79 | struct cgroup_subsys_state *bi_css; | 79 | struct cgroup_subsys_state *bi_css; |
| 80 | #endif | 80 | #endif |
| 81 | union { | ||
| 81 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | 82 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
| 82 | struct bio_integrity_payload *bi_integrity; /* data integrity */ | 83 | struct bio_integrity_payload *bi_integrity; /* data integrity */ |
| 83 | #endif | 84 | #endif |
| 85 | }; | ||
| 84 | 86 | ||
| 85 | unsigned short bi_vcnt; /* how many bio_vec's */ | 87 | unsigned short bi_vcnt; /* how many bio_vec's */ |
| 86 | 88 | ||
| @@ -118,10 +120,8 @@ struct bio { | |||
| 118 | #define BIO_USER_MAPPED 6 /* contains user pages */ | 120 | #define BIO_USER_MAPPED 6 /* contains user pages */ |
| 119 | #define BIO_EOPNOTSUPP 7 /* not supported */ | 121 | #define BIO_EOPNOTSUPP 7 /* not supported */ |
| 120 | #define BIO_NULL_MAPPED 8 /* contains invalid user pages */ | 122 | #define BIO_NULL_MAPPED 8 /* contains invalid user pages */ |
| 121 | #define BIO_FS_INTEGRITY 9 /* fs owns integrity data, not block layer */ | 123 | #define BIO_QUIET 9 /* Make BIO Quiet */ |
| 122 | #define BIO_QUIET 10 /* Make BIO Quiet */ | 124 | #define BIO_SNAP_STABLE 10 /* bio data must be snapshotted during write */ |
| 123 | #define BIO_MAPPED_INTEGRITY 11/* integrity metadata has been remapped */ | ||
| 124 | #define BIO_SNAP_STABLE 12 /* bio data must be snapshotted during write */ | ||
| 125 | 125 | ||
| 126 | /* | 126 | /* |
| 127 | * Flags starting here get preserved by bio_reset() - this includes | 127 | * Flags starting here get preserved by bio_reset() - this includes |
| @@ -162,6 +162,7 @@ enum rq_flag_bits { | |||
| 162 | __REQ_WRITE_SAME, /* write same block many times */ | 162 | __REQ_WRITE_SAME, /* write same block many times */ |
| 163 | 163 | ||
| 164 | __REQ_NOIDLE, /* don't anticipate more IO after this one */ | 164 | __REQ_NOIDLE, /* don't anticipate more IO after this one */ |
| 165 | __REQ_INTEGRITY, /* I/O includes block integrity payload */ | ||
| 165 | __REQ_FUA, /* forced unit access */ | 166 | __REQ_FUA, /* forced unit access */ |
| 166 | __REQ_FLUSH, /* request for cache flush */ | 167 | __REQ_FLUSH, /* request for cache flush */ |
| 167 | 168 | ||
| @@ -186,9 +187,7 @@ enum rq_flag_bits { | |||
| 186 | __REQ_FLUSH_SEQ, /* request for flush sequence */ | 187 | __REQ_FLUSH_SEQ, /* request for flush sequence */ |
| 187 | __REQ_IO_STAT, /* account I/O stat */ | 188 | __REQ_IO_STAT, /* account I/O stat */ |
| 188 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ | 189 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ |
| 189 | __REQ_KERNEL, /* direct IO to kernel pages */ | ||
| 190 | __REQ_PM, /* runtime pm request */ | 190 | __REQ_PM, /* runtime pm request */ |
| 191 | __REQ_END, /* last of chain of requests */ | ||
| 192 | __REQ_HASHED, /* on IO scheduler merge hash */ | 191 | __REQ_HASHED, /* on IO scheduler merge hash */ |
| 193 | __REQ_MQ_INFLIGHT, /* track inflight for MQ */ | 192 | __REQ_MQ_INFLIGHT, /* track inflight for MQ */ |
| 194 | __REQ_NR_BITS, /* stops here */ | 193 | __REQ_NR_BITS, /* stops here */ |
| @@ -204,13 +203,14 @@ enum rq_flag_bits { | |||
| 204 | #define REQ_DISCARD (1ULL << __REQ_DISCARD) | 203 | #define REQ_DISCARD (1ULL << __REQ_DISCARD) |
| 205 | #define REQ_WRITE_SAME (1ULL << __REQ_WRITE_SAME) | 204 | #define REQ_WRITE_SAME (1ULL << __REQ_WRITE_SAME) |
| 206 | #define REQ_NOIDLE (1ULL << __REQ_NOIDLE) | 205 | #define REQ_NOIDLE (1ULL << __REQ_NOIDLE) |
| 206 | #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) | ||
| 207 | 207 | ||
| 208 | #define REQ_FAILFAST_MASK \ | 208 | #define REQ_FAILFAST_MASK \ |
| 209 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) | 209 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) |
| 210 | #define REQ_COMMON_MASK \ | 210 | #define REQ_COMMON_MASK \ |
| 211 | (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \ | 211 | (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \ |
| 212 | REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \ | 212 | REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \ |
| 213 | REQ_SECURE) | 213 | REQ_SECURE | REQ_INTEGRITY) |
| 214 | #define REQ_CLONE_MASK REQ_COMMON_MASK | 214 | #define REQ_CLONE_MASK REQ_COMMON_MASK |
| 215 | 215 | ||
| 216 | #define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME) | 216 | #define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME) |
| @@ -240,9 +240,7 @@ enum rq_flag_bits { | |||
| 240 | #define REQ_IO_STAT (1ULL << __REQ_IO_STAT) | 240 | #define REQ_IO_STAT (1ULL << __REQ_IO_STAT) |
| 241 | #define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE) | 241 | #define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE) |
| 242 | #define REQ_SECURE (1ULL << __REQ_SECURE) | 242 | #define REQ_SECURE (1ULL << __REQ_SECURE) |
| 243 | #define REQ_KERNEL (1ULL << __REQ_KERNEL) | ||
| 244 | #define REQ_PM (1ULL << __REQ_PM) | 243 | #define REQ_PM (1ULL << __REQ_PM) |
| 245 | #define REQ_END (1ULL << __REQ_END) | ||
| 246 | #define REQ_HASHED (1ULL << __REQ_HASHED) | 244 | #define REQ_HASHED (1ULL << __REQ_HASHED) |
| 247 | #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) | 245 | #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) |
| 248 | 246 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 87be398166d3..0207a78a8d82 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -36,6 +36,7 @@ struct request; | |||
| 36 | struct sg_io_hdr; | 36 | struct sg_io_hdr; |
| 37 | struct bsg_job; | 37 | struct bsg_job; |
| 38 | struct blkcg_gq; | 38 | struct blkcg_gq; |
| 39 | struct blk_flush_queue; | ||
| 39 | 40 | ||
| 40 | #define BLKDEV_MIN_RQ 4 | 41 | #define BLKDEV_MIN_RQ 4 |
| 41 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ | 42 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ |
| @@ -455,14 +456,7 @@ struct request_queue { | |||
| 455 | */ | 456 | */ |
| 456 | unsigned int flush_flags; | 457 | unsigned int flush_flags; |
| 457 | unsigned int flush_not_queueable:1; | 458 | unsigned int flush_not_queueable:1; |
| 458 | unsigned int flush_queue_delayed:1; | 459 | struct blk_flush_queue *fq; |
| 459 | unsigned int flush_pending_idx:1; | ||
| 460 | unsigned int flush_running_idx:1; | ||
| 461 | unsigned long flush_pending_since; | ||
| 462 | struct list_head flush_queue[2]; | ||
| 463 | struct list_head flush_data_in_flight; | ||
| 464 | struct request *flush_rq; | ||
| 465 | spinlock_t mq_flush_lock; | ||
| 466 | 460 | ||
| 467 | struct list_head requeue_list; | 461 | struct list_head requeue_list; |
| 468 | spinlock_t requeue_lock; | 462 | spinlock_t requeue_lock; |
| @@ -865,7 +859,7 @@ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, | |||
| 865 | 859 | ||
| 866 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) | 860 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) |
| 867 | { | 861 | { |
| 868 | return bdev->bd_disk->queue; | 862 | return bdev->bd_disk->queue; /* this is never NULL */ |
| 869 | } | 863 | } |
| 870 | 864 | ||
| 871 | /* | 865 | /* |
| @@ -1285,10 +1279,9 @@ static inline int queue_alignment_offset(struct request_queue *q) | |||
| 1285 | static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) | 1279 | static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) |
| 1286 | { | 1280 | { |
| 1287 | unsigned int granularity = max(lim->physical_block_size, lim->io_min); | 1281 | unsigned int granularity = max(lim->physical_block_size, lim->io_min); |
| 1288 | unsigned int alignment = (sector << 9) & (granularity - 1); | 1282 | unsigned int alignment = sector_div(sector, granularity >> 9) << 9; |
| 1289 | 1283 | ||
| 1290 | return (granularity + lim->alignment_offset - alignment) | 1284 | return (granularity + lim->alignment_offset - alignment) % granularity; |
| 1291 | & (granularity - 1); | ||
| 1292 | } | 1285 | } |
| 1293 | 1286 | ||
| 1294 | static inline int bdev_alignment_offset(struct block_device *bdev) | 1287 | static inline int bdev_alignment_offset(struct block_device *bdev) |
| @@ -1464,32 +1457,31 @@ static inline uint64_t rq_io_start_time_ns(struct request *req) | |||
| 1464 | 1457 | ||
| 1465 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | 1458 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
| 1466 | 1459 | ||
| 1467 | #define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ | 1460 | enum blk_integrity_flags { |
| 1468 | #define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ | 1461 | BLK_INTEGRITY_VERIFY = 1 << 0, |
| 1462 | BLK_INTEGRITY_GENERATE = 1 << 1, | ||
| 1463 | BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, | ||
| 1464 | BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, | ||
| 1465 | }; | ||
| 1469 | 1466 | ||
| 1470 | struct blk_integrity_exchg { | 1467 | struct blk_integrity_iter { |
| 1471 | void *prot_buf; | 1468 | void *prot_buf; |
| 1472 | void *data_buf; | 1469 | void *data_buf; |
| 1473 | sector_t sector; | 1470 | sector_t seed; |
| 1474 | unsigned int data_size; | 1471 | unsigned int data_size; |
| 1475 | unsigned short sector_size; | 1472 | unsigned short interval; |
| 1476 | const char *disk_name; | 1473 | const char *disk_name; |
| 1477 | }; | 1474 | }; |
| 1478 | 1475 | ||
| 1479 | typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); | 1476 | typedef int (integrity_processing_fn) (struct blk_integrity_iter *); |
| 1480 | typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); | ||
| 1481 | typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); | ||
| 1482 | typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); | ||
| 1483 | 1477 | ||
| 1484 | struct blk_integrity { | 1478 | struct blk_integrity { |
| 1485 | integrity_gen_fn *generate_fn; | 1479 | integrity_processing_fn *generate_fn; |
| 1486 | integrity_vrfy_fn *verify_fn; | 1480 | integrity_processing_fn *verify_fn; |
| 1487 | integrity_set_tag_fn *set_tag_fn; | ||
| 1488 | integrity_get_tag_fn *get_tag_fn; | ||
| 1489 | 1481 | ||
| 1490 | unsigned short flags; | 1482 | unsigned short flags; |
| 1491 | unsigned short tuple_size; | 1483 | unsigned short tuple_size; |
| 1492 | unsigned short sector_size; | 1484 | unsigned short interval; |
| 1493 | unsigned short tag_size; | 1485 | unsigned short tag_size; |
| 1494 | 1486 | ||
| 1495 | const char *name; | 1487 | const char *name; |
| @@ -1504,10 +1496,10 @@ extern int blk_integrity_compare(struct gendisk *, struct gendisk *); | |||
| 1504 | extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, | 1496 | extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, |
| 1505 | struct scatterlist *); | 1497 | struct scatterlist *); |
| 1506 | extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); | 1498 | extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); |
| 1507 | extern int blk_integrity_merge_rq(struct request_queue *, struct request *, | 1499 | extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, |
| 1508 | struct request *); | 1500 | struct request *); |
| 1509 | extern int blk_integrity_merge_bio(struct request_queue *, struct request *, | 1501 | extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, |
| 1510 | struct bio *); | 1502 | struct bio *); |
| 1511 | 1503 | ||
| 1512 | static inline | 1504 | static inline |
| 1513 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) | 1505 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) |
| @@ -1520,12 +1512,9 @@ static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) | |||
| 1520 | return disk->integrity; | 1512 | return disk->integrity; |
| 1521 | } | 1513 | } |
| 1522 | 1514 | ||
| 1523 | static inline int blk_integrity_rq(struct request *rq) | 1515 | static inline bool blk_integrity_rq(struct request *rq) |
| 1524 | { | 1516 | { |
| 1525 | if (rq->bio == NULL) | 1517 | return rq->cmd_flags & REQ_INTEGRITY; |
| 1526 | return 0; | ||
| 1527 | |||
| 1528 | return bio_integrity(rq->bio); | ||
| 1529 | } | 1518 | } |
| 1530 | 1519 | ||
| 1531 | static inline void blk_queue_max_integrity_segments(struct request_queue *q, | 1520 | static inline void blk_queue_max_integrity_segments(struct request_queue *q, |
| @@ -1590,15 +1579,15 @@ static inline unsigned short queue_max_integrity_segments(struct request_queue * | |||
| 1590 | { | 1579 | { |
| 1591 | return 0; | 1580 | return 0; |
| 1592 | } | 1581 | } |
| 1593 | static inline int blk_integrity_merge_rq(struct request_queue *rq, | 1582 | static inline bool blk_integrity_merge_rq(struct request_queue *rq, |
| 1594 | struct request *r1, | 1583 | struct request *r1, |
| 1595 | struct request *r2) | 1584 | struct request *r2) |
| 1596 | { | 1585 | { |
| 1597 | return 0; | 1586 | return 0; |
| 1598 | } | 1587 | } |
| 1599 | static inline int blk_integrity_merge_bio(struct request_queue *rq, | 1588 | static inline bool blk_integrity_merge_bio(struct request_queue *rq, |
| 1600 | struct request *r, | 1589 | struct request *r, |
| 1601 | struct bio *b) | 1590 | struct bio *b) |
| 1602 | { | 1591 | { |
| 1603 | return 0; | 1592 | return 0; |
| 1604 | } | 1593 | } |
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h index b3cb71f0d3b0..cf53d0773ce3 100644 --- a/include/linux/crc-t10dif.h +++ b/include/linux/crc-t10dif.h | |||
| @@ -6,7 +6,8 @@ | |||
| 6 | #define CRC_T10DIF_DIGEST_SIZE 2 | 6 | #define CRC_T10DIF_DIGEST_SIZE 2 |
| 7 | #define CRC_T10DIF_BLOCK_SIZE 1 | 7 | #define CRC_T10DIF_BLOCK_SIZE 1 |
| 8 | 8 | ||
| 9 | __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len); | 9 | extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, |
| 10 | __u16 crc_t10dif(unsigned char const *, size_t); | 10 | size_t len); |
| 11 | extern __u16 crc_t10dif(unsigned char const *, size_t); | ||
| 11 | 12 | ||
| 12 | #endif | 13 | #endif |
diff --git a/include/linux/fs.h b/include/linux/fs.h index ab4f1a10da20..a957d4366c24 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -192,8 +192,6 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, | |||
| 192 | #define READ 0 | 192 | #define READ 0 |
| 193 | #define WRITE RW_MASK | 193 | #define WRITE RW_MASK |
| 194 | #define READA RWA_MASK | 194 | #define READA RWA_MASK |
| 195 | #define KERNEL_READ (READ|REQ_KERNEL) | ||
| 196 | #define KERNEL_WRITE (WRITE|REQ_KERNEL) | ||
| 197 | 195 | ||
| 198 | #define READ_SYNC (READ | REQ_SYNC) | 196 | #define READ_SYNC (READ | REQ_SYNC) |
| 199 | #define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE) | 197 | #define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE) |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 28d649054d5f..c72d1ad41ad4 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
| @@ -448,10 +448,10 @@ static inline struct rpc_cred *nfs_file_cred(struct file *file) | |||
| 448 | extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t); | 448 | extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t); |
| 449 | extern ssize_t nfs_file_direct_read(struct kiocb *iocb, | 449 | extern ssize_t nfs_file_direct_read(struct kiocb *iocb, |
| 450 | struct iov_iter *iter, | 450 | struct iov_iter *iter, |
| 451 | loff_t pos, bool uio); | 451 | loff_t pos); |
| 452 | extern ssize_t nfs_file_direct_write(struct kiocb *iocb, | 452 | extern ssize_t nfs_file_direct_write(struct kiocb *iocb, |
| 453 | struct iov_iter *iter, | 453 | struct iov_iter *iter, |
| 454 | loff_t pos, bool uio); | 454 | loff_t pos); |
| 455 | 455 | ||
| 456 | /* | 456 | /* |
| 457 | * linux/fs/nfs/dir.c | 457 | * linux/fs/nfs/dir.c |
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h new file mode 100644 index 000000000000..6a8b9942632d --- /dev/null +++ b/include/linux/t10-pi.h | |||
| @@ -0,0 +1,22 @@ | |||
| 1 | #ifndef _LINUX_T10_PI_H | ||
| 2 | #define _LINUX_T10_PI_H | ||
| 3 | |||
| 4 | #include <linux/types.h> | ||
| 5 | #include <linux/blkdev.h> | ||
| 6 | |||
| 7 | /* | ||
| 8 | * T10 Protection Information tuple. | ||
| 9 | */ | ||
| 10 | struct t10_pi_tuple { | ||
| 11 | __be16 guard_tag; /* Checksum */ | ||
| 12 | __be16 app_tag; /* Opaque storage */ | ||
| 13 | __be32 ref_tag; /* Target LBA or indirect LBA */ | ||
| 14 | }; | ||
| 15 | |||
| 16 | |||
| 17 | extern struct blk_integrity t10_pi_type1_crc; | ||
| 18 | extern struct blk_integrity t10_pi_type1_ip; | ||
| 19 | extern struct blk_integrity t10_pi_type3_crc; | ||
| 20 | extern struct blk_integrity t10_pi_type3_ip; | ||
| 21 | |||
| 22 | #endif | ||
