diff options
| -rw-r--r-- | fs/buffer.c | 52 |
1 files changed, 52 insertions, 0 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 3586fb05c8ce..c4e11390a44c 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
| @@ -2893,6 +2893,55 @@ static void end_bio_bh_io_sync(struct bio *bio, int err) | |||
| 2893 | bio_put(bio); | 2893 | bio_put(bio); |
| 2894 | } | 2894 | } |
| 2895 | 2895 | ||
| 2896 | /* | ||
| 2897 | * This allows us to do IO even on the odd last sectors | ||
| 2898 | * of a device, even if the bh block size is some multiple | ||
| 2899 | * of the physical sector size. | ||
| 2900 | * | ||
| 2901 | * We'll just truncate the bio to the size of the device, | ||
| 2902 | * and clear the end of the buffer head manually. | ||
| 2903 | * | ||
| 2904 | * Truly out-of-range accesses will turn into actual IO | ||
| 2905 | * errors, this only handles the "we need to be able to | ||
| 2906 | * do IO at the final sector" case. | ||
| 2907 | */ | ||
| 2908 | static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh) | ||
| 2909 | { | ||
| 2910 | sector_t maxsector; | ||
| 2911 | unsigned bytes; | ||
| 2912 | |||
| 2913 | maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; | ||
| 2914 | if (!maxsector) | ||
| 2915 | return; | ||
| 2916 | |||
| 2917 | /* | ||
| 2918 | * If the *whole* IO is past the end of the device, | ||
| 2919 | * let it through, and the IO layer will turn it into | ||
| 2920 | * an EIO. | ||
| 2921 | */ | ||
| 2922 | if (unlikely(bio->bi_sector >= maxsector)) | ||
| 2923 | return; | ||
| 2924 | |||
| 2925 | maxsector -= bio->bi_sector; | ||
| 2926 | bytes = bio->bi_size; | ||
| 2927 | if (likely((bytes >> 9) <= maxsector)) | ||
| 2928 | return; | ||
| 2929 | |||
| 2930 | /* Uhhuh. We've got a bh that straddles the device size! */ | ||
| 2931 | bytes = maxsector << 9; | ||
| 2932 | |||
| 2933 | /* Truncate the bio.. */ | ||
| 2934 | bio->bi_size = bytes; | ||
| 2935 | bio->bi_io_vec[0].bv_len = bytes; | ||
| 2936 | |||
| 2937 | /* ..and clear the end of the buffer for reads */ | ||
| 2938 | if (rw & READ) { | ||
| 2939 | void *kaddr = kmap_atomic(bh->b_page); | ||
| 2940 | memset(kaddr + bh_offset(bh) + bytes, 0, bh->b_size - bytes); | ||
| 2941 | kunmap_atomic(kaddr); | ||
| 2942 | } | ||
| 2943 | } | ||
| 2944 | |||
| 2896 | int submit_bh(int rw, struct buffer_head * bh) | 2945 | int submit_bh(int rw, struct buffer_head * bh) |
| 2897 | { | 2946 | { |
| 2898 | struct bio *bio; | 2947 | struct bio *bio; |
| @@ -2929,6 +2978,9 @@ int submit_bh(int rw, struct buffer_head * bh) | |||
| 2929 | bio->bi_end_io = end_bio_bh_io_sync; | 2978 | bio->bi_end_io = end_bio_bh_io_sync; |
| 2930 | bio->bi_private = bh; | 2979 | bio->bi_private = bh; |
| 2931 | 2980 | ||
| 2981 | /* Take care of bh's that straddle the end of the device */ | ||
| 2982 | guard_bh_eod(rw, bio, bh); | ||
| 2983 | |||
| 2932 | bio_get(bio); | 2984 | bio_get(bio); |
| 2933 | submit_bio(rw, bio); | 2985 | submit_bio(rw, bio); |
| 2934 | 2986 | ||
