aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c70
1 files changed, 59 insertions, 11 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 3586fb05c8ce..c017a2dfb909 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -46,8 +46,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46 46
47#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) 47#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48 48
49inline void 49void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
50init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51{ 50{
52 bh->b_end_io = handler; 51 bh->b_end_io = handler;
53 bh->b_private = private; 52 bh->b_private = private;
@@ -555,7 +554,7 @@ void emergency_thaw_all(void)
555 */ 554 */
556int sync_mapping_buffers(struct address_space *mapping) 555int sync_mapping_buffers(struct address_space *mapping)
557{ 556{
558 struct address_space *buffer_mapping = mapping->assoc_mapping; 557 struct address_space *buffer_mapping = mapping->private_data;
559 558
560 if (buffer_mapping == NULL || list_empty(&mapping->private_list)) 559 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
561 return 0; 560 return 0;
@@ -588,10 +587,10 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
588 struct address_space *buffer_mapping = bh->b_page->mapping; 587 struct address_space *buffer_mapping = bh->b_page->mapping;
589 588
590 mark_buffer_dirty(bh); 589 mark_buffer_dirty(bh);
591 if (!mapping->assoc_mapping) { 590 if (!mapping->private_data) {
592 mapping->assoc_mapping = buffer_mapping; 591 mapping->private_data = buffer_mapping;
593 } else { 592 } else {
594 BUG_ON(mapping->assoc_mapping != buffer_mapping); 593 BUG_ON(mapping->private_data != buffer_mapping);
595 } 594 }
596 if (!bh->b_assoc_map) { 595 if (!bh->b_assoc_map) {
597 spin_lock(&buffer_mapping->private_lock); 596 spin_lock(&buffer_mapping->private_lock);
@@ -788,7 +787,7 @@ void invalidate_inode_buffers(struct inode *inode)
788 if (inode_has_buffers(inode)) { 787 if (inode_has_buffers(inode)) {
789 struct address_space *mapping = &inode->i_data; 788 struct address_space *mapping = &inode->i_data;
790 struct list_head *list = &mapping->private_list; 789 struct list_head *list = &mapping->private_list;
791 struct address_space *buffer_mapping = mapping->assoc_mapping; 790 struct address_space *buffer_mapping = mapping->private_data;
792 791
793 spin_lock(&buffer_mapping->private_lock); 792 spin_lock(&buffer_mapping->private_lock);
794 while (!list_empty(list)) 793 while (!list_empty(list))
@@ -811,7 +810,7 @@ int remove_inode_buffers(struct inode *inode)
811 if (inode_has_buffers(inode)) { 810 if (inode_has_buffers(inode)) {
812 struct address_space *mapping = &inode->i_data; 811 struct address_space *mapping = &inode->i_data;
813 struct list_head *list = &mapping->private_list; 812 struct list_head *list = &mapping->private_list;
814 struct address_space *buffer_mapping = mapping->assoc_mapping; 813 struct address_space *buffer_mapping = mapping->private_data;
815 814
816 spin_lock(&buffer_mapping->private_lock); 815 spin_lock(&buffer_mapping->private_lock);
817 while (!list_empty(list)) { 816 while (!list_empty(list)) {
@@ -850,13 +849,10 @@ try_again:
850 if (!bh) 849 if (!bh)
851 goto no_grow; 850 goto no_grow;
852 851
853 bh->b_bdev = NULL;
854 bh->b_this_page = head; 852 bh->b_this_page = head;
855 bh->b_blocknr = -1; 853 bh->b_blocknr = -1;
856 head = bh; 854 head = bh;
857 855
858 bh->b_state = 0;
859 atomic_set(&bh->b_count, 0);
860 bh->b_size = size; 856 bh->b_size = size;
861 857
862 /* Link the buffer to its page */ 858 /* Link the buffer to its page */
@@ -2893,6 +2889,55 @@ static void end_bio_bh_io_sync(struct bio *bio, int err)
2893 bio_put(bio); 2889 bio_put(bio);
2894} 2890}
2895 2891
2892/*
2893 * This allows us to do IO even on the odd last sectors
2894 * of a device, even if the bh block size is some multiple
2895 * of the physical sector size.
2896 *
2897 * We'll just truncate the bio to the size of the device,
2898 * and clear the end of the buffer head manually.
2899 *
2900 * Truly out-of-range accesses will turn into actual IO
2901 * errors, this only handles the "we need to be able to
2902 * do IO at the final sector" case.
2903 */
2904static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
2905{
2906 sector_t maxsector;
2907 unsigned bytes;
2908
2909 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
2910 if (!maxsector)
2911 return;
2912
2913 /*
2914 * If the *whole* IO is past the end of the device,
2915 * let it through, and the IO layer will turn it into
2916 * an EIO.
2917 */
2918 if (unlikely(bio->bi_sector >= maxsector))
2919 return;
2920
2921 maxsector -= bio->bi_sector;
2922 bytes = bio->bi_size;
2923 if (likely((bytes >> 9) <= maxsector))
2924 return;
2925
2926 /* Uhhuh. We've got a bh that straddles the device size! */
2927 bytes = maxsector << 9;
2928
2929 /* Truncate the bio.. */
2930 bio->bi_size = bytes;
2931 bio->bi_io_vec[0].bv_len = bytes;
2932
2933 /* ..and clear the end of the buffer for reads */
2934 if ((rw & RW_MASK) == READ) {
2935 void *kaddr = kmap_atomic(bh->b_page);
2936 memset(kaddr + bh_offset(bh) + bytes, 0, bh->b_size - bytes);
2937 kunmap_atomic(kaddr);
2938 }
2939}
2940
2896int submit_bh(int rw, struct buffer_head * bh) 2941int submit_bh(int rw, struct buffer_head * bh)
2897{ 2942{
2898 struct bio *bio; 2943 struct bio *bio;
@@ -2929,6 +2974,9 @@ int submit_bh(int rw, struct buffer_head * bh)
2929 bio->bi_end_io = end_bio_bh_io_sync; 2974 bio->bi_end_io = end_bio_bh_io_sync;
2930 bio->bi_private = bh; 2975 bio->bi_private = bh;
2931 2976
2977 /* Take care of bh's that straddle the end of the device */
2978 guard_bh_eod(rw, bio, bh);
2979
2932 bio_get(bio); 2980 bio_get(bio);
2933 submit_bio(rw, bio); 2981 submit_bio(rw, bio);
2934 2982