diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-26 18:03:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-26 18:03:07 -0400 |
commit | d05d7f40791ccbb6e543cc5dd6a6aa08fc71d635 (patch) | |
tree | dc0039fe490a41a70de10d58fe8e6136db46463a /fs/buffer.c | |
parent | 75a442efb1ca613f8d1cc71a32c2c9b0aefae4a5 (diff) | |
parent | 17007f3994cdb4643355c73f54f0adad006cf59e (diff) |
Merge branch 'for-4.8/core' of git://git.kernel.dk/linux-block
Pull core block updates from Jens Axboe:
- the big change is the cleanup from Mike Christie, cleaning up our
uses of command types and modified flags. This is what will throw
some merge conflicts
- regression fix for the above for btrfs, from Vincent
- following up to the above, better packing of struct request from
Christoph
- a 2038 fix for blktrace from Arnd
- a few trivial/spelling fixes from Bart Van Assche
- a front merge check fix from Damien, which could cause issues on
SMR drives
- Atari partition fix from Gabriel
- convert cfq to highres timers, since jiffies isn't granular enough
for some devices these days. From Jan and Jeff
- CFQ priority boost fix idle classes, from me
- cleanup series from Ming, improving our bio/bvec iteration
- a direct issue fix for blk-mq from Omar
- fix for plug merging not involving the IO scheduler, like we do for
other types of merges. From Tahsin
- expose DAX type internally and through sysfs. From Toshi and Yigal
* 'for-4.8/core' of git://git.kernel.dk/linux-block: (76 commits)
block: Fix front merge check
block: do not merge requests without consulting with io scheduler
block: Fix spelling in a source code comment
block: expose QUEUE_FLAG_DAX in sysfs
block: add QUEUE_FLAG_DAX for devices to advertise their DAX support
Btrfs: fix comparison in __btrfs_map_block()
block: atari: Return early for unsupported sector size
Doc: block: Fix a typo in queue-sysfs.txt
cfq-iosched: Charge at least 1 jiffie instead of 1 ns
cfq-iosched: Fix regression in bonnie++ rewrite performance
cfq-iosched: Convert slice_resid from u64 to s64
block: Convert fifo_time from ulong to u64
blktrace: avoid using timespec
block/blk-cgroup.c: Declare local symbols static
block/bio-integrity.c: Add #include "blk.h"
block/partition-generic.c: Remove a set-but-not-used variable
block: bio: kill BIO_MAX_SIZE
cfq-iosched: temporarily boost queue priority for idle classes
block: drbd: avoid to use BIO_MAX_SIZE
block: bio: remove BIO_MAX_SECTORS
...
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 69 |
1 files changed, 36 insertions, 33 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 6c15012a75d9..e156a36463a1 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -45,7 +45,7 @@ | |||
45 | #include <trace/events/block.h> | 45 | #include <trace/events/block.h> |
46 | 46 | ||
47 | static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); | 47 | static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); |
48 | static int submit_bh_wbc(int rw, struct buffer_head *bh, | 48 | static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, |
49 | unsigned long bio_flags, | 49 | unsigned long bio_flags, |
50 | struct writeback_control *wbc); | 50 | struct writeback_control *wbc); |
51 | 51 | ||
@@ -588,7 +588,7 @@ void write_boundary_block(struct block_device *bdev, | |||
588 | struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); | 588 | struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); |
589 | if (bh) { | 589 | if (bh) { |
590 | if (buffer_dirty(bh)) | 590 | if (buffer_dirty(bh)) |
591 | ll_rw_block(WRITE, 1, &bh); | 591 | ll_rw_block(REQ_OP_WRITE, 0, 1, &bh); |
592 | put_bh(bh); | 592 | put_bh(bh); |
593 | } | 593 | } |
594 | } | 594 | } |
@@ -1225,7 +1225,7 @@ static struct buffer_head *__bread_slow(struct buffer_head *bh) | |||
1225 | } else { | 1225 | } else { |
1226 | get_bh(bh); | 1226 | get_bh(bh); |
1227 | bh->b_end_io = end_buffer_read_sync; | 1227 | bh->b_end_io = end_buffer_read_sync; |
1228 | submit_bh(READ, bh); | 1228 | submit_bh(REQ_OP_READ, 0, bh); |
1229 | wait_on_buffer(bh); | 1229 | wait_on_buffer(bh); |
1230 | if (buffer_uptodate(bh)) | 1230 | if (buffer_uptodate(bh)) |
1231 | return bh; | 1231 | return bh; |
@@ -1395,7 +1395,7 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size) | |||
1395 | { | 1395 | { |
1396 | struct buffer_head *bh = __getblk(bdev, block, size); | 1396 | struct buffer_head *bh = __getblk(bdev, block, size); |
1397 | if (likely(bh)) { | 1397 | if (likely(bh)) { |
1398 | ll_rw_block(READA, 1, &bh); | 1398 | ll_rw_block(REQ_OP_READ, READA, 1, &bh); |
1399 | brelse(bh); | 1399 | brelse(bh); |
1400 | } | 1400 | } |
1401 | } | 1401 | } |
@@ -1697,7 +1697,7 @@ int __block_write_full_page(struct inode *inode, struct page *page, | |||
1697 | struct buffer_head *bh, *head; | 1697 | struct buffer_head *bh, *head; |
1698 | unsigned int blocksize, bbits; | 1698 | unsigned int blocksize, bbits; |
1699 | int nr_underway = 0; | 1699 | int nr_underway = 0; |
1700 | int write_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); | 1700 | int write_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0); |
1701 | 1701 | ||
1702 | head = create_page_buffers(page, inode, | 1702 | head = create_page_buffers(page, inode, |
1703 | (1 << BH_Dirty)|(1 << BH_Uptodate)); | 1703 | (1 << BH_Dirty)|(1 << BH_Uptodate)); |
@@ -1786,7 +1786,7 @@ int __block_write_full_page(struct inode *inode, struct page *page, | |||
1786 | do { | 1786 | do { |
1787 | struct buffer_head *next = bh->b_this_page; | 1787 | struct buffer_head *next = bh->b_this_page; |
1788 | if (buffer_async_write(bh)) { | 1788 | if (buffer_async_write(bh)) { |
1789 | submit_bh_wbc(write_op, bh, 0, wbc); | 1789 | submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, 0, wbc); |
1790 | nr_underway++; | 1790 | nr_underway++; |
1791 | } | 1791 | } |
1792 | bh = next; | 1792 | bh = next; |
@@ -1840,7 +1840,7 @@ recover: | |||
1840 | struct buffer_head *next = bh->b_this_page; | 1840 | struct buffer_head *next = bh->b_this_page; |
1841 | if (buffer_async_write(bh)) { | 1841 | if (buffer_async_write(bh)) { |
1842 | clear_buffer_dirty(bh); | 1842 | clear_buffer_dirty(bh); |
1843 | submit_bh_wbc(write_op, bh, 0, wbc); | 1843 | submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, 0, wbc); |
1844 | nr_underway++; | 1844 | nr_underway++; |
1845 | } | 1845 | } |
1846 | bh = next; | 1846 | bh = next; |
@@ -1956,7 +1956,7 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len, | |||
1956 | if (!buffer_uptodate(bh) && !buffer_delay(bh) && | 1956 | if (!buffer_uptodate(bh) && !buffer_delay(bh) && |
1957 | !buffer_unwritten(bh) && | 1957 | !buffer_unwritten(bh) && |
1958 | (block_start < from || block_end > to)) { | 1958 | (block_start < from || block_end > to)) { |
1959 | ll_rw_block(READ, 1, &bh); | 1959 | ll_rw_block(REQ_OP_READ, 0, 1, &bh); |
1960 | *wait_bh++=bh; | 1960 | *wait_bh++=bh; |
1961 | } | 1961 | } |
1962 | } | 1962 | } |
@@ -2249,7 +2249,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block) | |||
2249 | if (buffer_uptodate(bh)) | 2249 | if (buffer_uptodate(bh)) |
2250 | end_buffer_async_read(bh, 1); | 2250 | end_buffer_async_read(bh, 1); |
2251 | else | 2251 | else |
2252 | submit_bh(READ, bh); | 2252 | submit_bh(REQ_OP_READ, 0, bh); |
2253 | } | 2253 | } |
2254 | return 0; | 2254 | return 0; |
2255 | } | 2255 | } |
@@ -2583,7 +2583,7 @@ int nobh_write_begin(struct address_space *mapping, | |||
2583 | if (block_start < from || block_end > to) { | 2583 | if (block_start < from || block_end > to) { |
2584 | lock_buffer(bh); | 2584 | lock_buffer(bh); |
2585 | bh->b_end_io = end_buffer_read_nobh; | 2585 | bh->b_end_io = end_buffer_read_nobh; |
2586 | submit_bh(READ, bh); | 2586 | submit_bh(REQ_OP_READ, 0, bh); |
2587 | nr_reads++; | 2587 | nr_reads++; |
2588 | } | 2588 | } |
2589 | } | 2589 | } |
@@ -2853,7 +2853,7 @@ int block_truncate_page(struct address_space *mapping, | |||
2853 | 2853 | ||
2854 | if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { | 2854 | if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { |
2855 | err = -EIO; | 2855 | err = -EIO; |
2856 | ll_rw_block(READ, 1, &bh); | 2856 | ll_rw_block(REQ_OP_READ, 0, 1, &bh); |
2857 | wait_on_buffer(bh); | 2857 | wait_on_buffer(bh); |
2858 | /* Uhhuh. Read error. Complain and punt. */ | 2858 | /* Uhhuh. Read error. Complain and punt. */ |
2859 | if (!buffer_uptodate(bh)) | 2859 | if (!buffer_uptodate(bh)) |
@@ -2950,7 +2950,7 @@ static void end_bio_bh_io_sync(struct bio *bio) | |||
2950 | * errors, this only handles the "we need to be able to | 2950 | * errors, this only handles the "we need to be able to |
2951 | * do IO at the final sector" case. | 2951 | * do IO at the final sector" case. |
2952 | */ | 2952 | */ |
2953 | void guard_bio_eod(int rw, struct bio *bio) | 2953 | void guard_bio_eod(int op, struct bio *bio) |
2954 | { | 2954 | { |
2955 | sector_t maxsector; | 2955 | sector_t maxsector; |
2956 | struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; | 2956 | struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; |
@@ -2980,13 +2980,13 @@ void guard_bio_eod(int rw, struct bio *bio) | |||
2980 | bvec->bv_len -= truncated_bytes; | 2980 | bvec->bv_len -= truncated_bytes; |
2981 | 2981 | ||
2982 | /* ..and clear the end of the buffer for reads */ | 2982 | /* ..and clear the end of the buffer for reads */ |
2983 | if ((rw & RW_MASK) == READ) { | 2983 | if (op == REQ_OP_READ) { |
2984 | zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len, | 2984 | zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len, |
2985 | truncated_bytes); | 2985 | truncated_bytes); |
2986 | } | 2986 | } |
2987 | } | 2987 | } |
2988 | 2988 | ||
2989 | static int submit_bh_wbc(int rw, struct buffer_head *bh, | 2989 | static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, |
2990 | unsigned long bio_flags, struct writeback_control *wbc) | 2990 | unsigned long bio_flags, struct writeback_control *wbc) |
2991 | { | 2991 | { |
2992 | struct bio *bio; | 2992 | struct bio *bio; |
@@ -3000,7 +3000,7 @@ static int submit_bh_wbc(int rw, struct buffer_head *bh, | |||
3000 | /* | 3000 | /* |
3001 | * Only clear out a write error when rewriting | 3001 | * Only clear out a write error when rewriting |
3002 | */ | 3002 | */ |
3003 | if (test_set_buffer_req(bh) && (rw & WRITE)) | 3003 | if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) |
3004 | clear_buffer_write_io_error(bh); | 3004 | clear_buffer_write_io_error(bh); |
3005 | 3005 | ||
3006 | /* | 3006 | /* |
@@ -3025,32 +3025,35 @@ static int submit_bh_wbc(int rw, struct buffer_head *bh, | |||
3025 | bio->bi_flags |= bio_flags; | 3025 | bio->bi_flags |= bio_flags; |
3026 | 3026 | ||
3027 | /* Take care of bh's that straddle the end of the device */ | 3027 | /* Take care of bh's that straddle the end of the device */ |
3028 | guard_bio_eod(rw, bio); | 3028 | guard_bio_eod(op, bio); |
3029 | 3029 | ||
3030 | if (buffer_meta(bh)) | 3030 | if (buffer_meta(bh)) |
3031 | rw |= REQ_META; | 3031 | op_flags |= REQ_META; |
3032 | if (buffer_prio(bh)) | 3032 | if (buffer_prio(bh)) |
3033 | rw |= REQ_PRIO; | 3033 | op_flags |= REQ_PRIO; |
3034 | bio_set_op_attrs(bio, op, op_flags); | ||
3034 | 3035 | ||
3035 | submit_bio(rw, bio); | 3036 | submit_bio(bio); |
3036 | return 0; | 3037 | return 0; |
3037 | } | 3038 | } |
3038 | 3039 | ||
3039 | int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags) | 3040 | int _submit_bh(int op, int op_flags, struct buffer_head *bh, |
3041 | unsigned long bio_flags) | ||
3040 | { | 3042 | { |
3041 | return submit_bh_wbc(rw, bh, bio_flags, NULL); | 3043 | return submit_bh_wbc(op, op_flags, bh, bio_flags, NULL); |
3042 | } | 3044 | } |
3043 | EXPORT_SYMBOL_GPL(_submit_bh); | 3045 | EXPORT_SYMBOL_GPL(_submit_bh); |
3044 | 3046 | ||
3045 | int submit_bh(int rw, struct buffer_head *bh) | 3047 | int submit_bh(int op, int op_flags, struct buffer_head *bh) |
3046 | { | 3048 | { |
3047 | return submit_bh_wbc(rw, bh, 0, NULL); | 3049 | return submit_bh_wbc(op, op_flags, bh, 0, NULL); |
3048 | } | 3050 | } |
3049 | EXPORT_SYMBOL(submit_bh); | 3051 | EXPORT_SYMBOL(submit_bh); |
3050 | 3052 | ||
3051 | /** | 3053 | /** |
3052 | * ll_rw_block: low-level access to block devices (DEPRECATED) | 3054 | * ll_rw_block: low-level access to block devices (DEPRECATED) |
3053 | * @rw: whether to %READ or %WRITE or maybe %READA (readahead) | 3055 | * @op: whether to %READ or %WRITE |
3056 | * @op_flags: rq_flag_bits or %READA (readahead) | ||
3054 | * @nr: number of &struct buffer_heads in the array | 3057 | * @nr: number of &struct buffer_heads in the array |
3055 | * @bhs: array of pointers to &struct buffer_head | 3058 | * @bhs: array of pointers to &struct buffer_head |
3056 | * | 3059 | * |
@@ -3073,7 +3076,7 @@ EXPORT_SYMBOL(submit_bh); | |||
3073 | * All of the buffers must be for the same device, and must also be a | 3076 | * All of the buffers must be for the same device, and must also be a |
3074 | * multiple of the current approved size for the device. | 3077 | * multiple of the current approved size for the device. |
3075 | */ | 3078 | */ |
3076 | void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) | 3079 | void ll_rw_block(int op, int op_flags, int nr, struct buffer_head *bhs[]) |
3077 | { | 3080 | { |
3078 | int i; | 3081 | int i; |
3079 | 3082 | ||
@@ -3082,18 +3085,18 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) | |||
3082 | 3085 | ||
3083 | if (!trylock_buffer(bh)) | 3086 | if (!trylock_buffer(bh)) |
3084 | continue; | 3087 | continue; |
3085 | if (rw == WRITE) { | 3088 | if (op == WRITE) { |
3086 | if (test_clear_buffer_dirty(bh)) { | 3089 | if (test_clear_buffer_dirty(bh)) { |
3087 | bh->b_end_io = end_buffer_write_sync; | 3090 | bh->b_end_io = end_buffer_write_sync; |
3088 | get_bh(bh); | 3091 | get_bh(bh); |
3089 | submit_bh(WRITE, bh); | 3092 | submit_bh(op, op_flags, bh); |
3090 | continue; | 3093 | continue; |
3091 | } | 3094 | } |
3092 | } else { | 3095 | } else { |
3093 | if (!buffer_uptodate(bh)) { | 3096 | if (!buffer_uptodate(bh)) { |
3094 | bh->b_end_io = end_buffer_read_sync; | 3097 | bh->b_end_io = end_buffer_read_sync; |
3095 | get_bh(bh); | 3098 | get_bh(bh); |
3096 | submit_bh(rw, bh); | 3099 | submit_bh(op, op_flags, bh); |
3097 | continue; | 3100 | continue; |
3098 | } | 3101 | } |
3099 | } | 3102 | } |
@@ -3102,7 +3105,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) | |||
3102 | } | 3105 | } |
3103 | EXPORT_SYMBOL(ll_rw_block); | 3106 | EXPORT_SYMBOL(ll_rw_block); |
3104 | 3107 | ||
3105 | void write_dirty_buffer(struct buffer_head *bh, int rw) | 3108 | void write_dirty_buffer(struct buffer_head *bh, int op_flags) |
3106 | { | 3109 | { |
3107 | lock_buffer(bh); | 3110 | lock_buffer(bh); |
3108 | if (!test_clear_buffer_dirty(bh)) { | 3111 | if (!test_clear_buffer_dirty(bh)) { |
@@ -3111,7 +3114,7 @@ void write_dirty_buffer(struct buffer_head *bh, int rw) | |||
3111 | } | 3114 | } |
3112 | bh->b_end_io = end_buffer_write_sync; | 3115 | bh->b_end_io = end_buffer_write_sync; |
3113 | get_bh(bh); | 3116 | get_bh(bh); |
3114 | submit_bh(rw, bh); | 3117 | submit_bh(REQ_OP_WRITE, op_flags, bh); |
3115 | } | 3118 | } |
3116 | EXPORT_SYMBOL(write_dirty_buffer); | 3119 | EXPORT_SYMBOL(write_dirty_buffer); |
3117 | 3120 | ||
@@ -3120,7 +3123,7 @@ EXPORT_SYMBOL(write_dirty_buffer); | |||
3120 | * and then start new I/O and then wait upon it. The caller must have a ref on | 3123 | * and then start new I/O and then wait upon it. The caller must have a ref on |
3121 | * the buffer_head. | 3124 | * the buffer_head. |
3122 | */ | 3125 | */ |
3123 | int __sync_dirty_buffer(struct buffer_head *bh, int rw) | 3126 | int __sync_dirty_buffer(struct buffer_head *bh, int op_flags) |
3124 | { | 3127 | { |
3125 | int ret = 0; | 3128 | int ret = 0; |
3126 | 3129 | ||
@@ -3129,7 +3132,7 @@ int __sync_dirty_buffer(struct buffer_head *bh, int rw) | |||
3129 | if (test_clear_buffer_dirty(bh)) { | 3132 | if (test_clear_buffer_dirty(bh)) { |
3130 | get_bh(bh); | 3133 | get_bh(bh); |
3131 | bh->b_end_io = end_buffer_write_sync; | 3134 | bh->b_end_io = end_buffer_write_sync; |
3132 | ret = submit_bh(rw, bh); | 3135 | ret = submit_bh(REQ_OP_WRITE, op_flags, bh); |
3133 | wait_on_buffer(bh); | 3136 | wait_on_buffer(bh); |
3134 | if (!ret && !buffer_uptodate(bh)) | 3137 | if (!ret && !buffer_uptodate(bh)) |
3135 | ret = -EIO; | 3138 | ret = -EIO; |
@@ -3392,7 +3395,7 @@ int bh_submit_read(struct buffer_head *bh) | |||
3392 | 3395 | ||
3393 | get_bh(bh); | 3396 | get_bh(bh); |
3394 | bh->b_end_io = end_buffer_read_sync; | 3397 | bh->b_end_io = end_buffer_read_sync; |
3395 | submit_bh(READ, bh); | 3398 | submit_bh(REQ_OP_READ, 0, bh); |
3396 | wait_on_buffer(bh); | 3399 | wait_on_buffer(bh); |
3397 | if (buffer_uptodate(bh)) | 3400 | if (buffer_uptodate(bh)) |
3398 | return 0; | 3401 | return 0; |