diff options
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 39 |
1 files changed, 34 insertions, 5 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 16cfbcd254..35527dca1d 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -452,6 +452,7 @@ static void end_buffer_async_write(struct buffer_head *bh, int uptodate) | |||
452 | bdevname(bh->b_bdev, b)); | 452 | bdevname(bh->b_bdev, b)); |
453 | } | 453 | } |
454 | set_bit(AS_EIO, &page->mapping->flags); | 454 | set_bit(AS_EIO, &page->mapping->flags); |
455 | set_buffer_write_io_error(bh); | ||
455 | clear_buffer_uptodate(bh); | 456 | clear_buffer_uptodate(bh); |
456 | SetPageError(page); | 457 | SetPageError(page); |
457 | } | 458 | } |
@@ -571,6 +572,10 @@ EXPORT_SYMBOL(mark_buffer_async_write); | |||
571 | static inline void __remove_assoc_queue(struct buffer_head *bh) | 572 | static inline void __remove_assoc_queue(struct buffer_head *bh) |
572 | { | 573 | { |
573 | list_del_init(&bh->b_assoc_buffers); | 574 | list_del_init(&bh->b_assoc_buffers); |
575 | WARN_ON(!bh->b_assoc_map); | ||
576 | if (buffer_write_io_error(bh)) | ||
577 | set_bit(AS_EIO, &bh->b_assoc_map->flags); | ||
578 | bh->b_assoc_map = NULL; | ||
574 | } | 579 | } |
575 | 580 | ||
576 | int inode_has_buffers(struct inode *inode) | 581 | int inode_has_buffers(struct inode *inode) |
@@ -669,6 +674,7 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) | |||
669 | spin_lock(&buffer_mapping->private_lock); | 674 | spin_lock(&buffer_mapping->private_lock); |
670 | list_move_tail(&bh->b_assoc_buffers, | 675 | list_move_tail(&bh->b_assoc_buffers, |
671 | &mapping->private_list); | 676 | &mapping->private_list); |
677 | bh->b_assoc_map = mapping; | ||
672 | spin_unlock(&buffer_mapping->private_lock); | 678 | spin_unlock(&buffer_mapping->private_lock); |
673 | } | 679 | } |
674 | } | 680 | } |
@@ -701,7 +707,10 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode); | |||
701 | */ | 707 | */ |
702 | int __set_page_dirty_buffers(struct page *page) | 708 | int __set_page_dirty_buffers(struct page *page) |
703 | { | 709 | { |
704 | struct address_space * const mapping = page->mapping; | 710 | struct address_space * const mapping = page_mapping(page); |
711 | |||
712 | if (unlikely(!mapping)) | ||
713 | return !TestSetPageDirty(page); | ||
705 | 714 | ||
706 | spin_lock(&mapping->private_lock); | 715 | spin_lock(&mapping->private_lock); |
707 | if (page_has_buffers(page)) { | 716 | if (page_has_buffers(page)) { |
@@ -762,7 +771,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
762 | spin_lock(lock); | 771 | spin_lock(lock); |
763 | while (!list_empty(list)) { | 772 | while (!list_empty(list)) { |
764 | bh = BH_ENTRY(list->next); | 773 | bh = BH_ENTRY(list->next); |
765 | list_del_init(&bh->b_assoc_buffers); | 774 | __remove_assoc_queue(bh); |
766 | if (buffer_dirty(bh) || buffer_locked(bh)) { | 775 | if (buffer_dirty(bh) || buffer_locked(bh)) { |
767 | list_add(&bh->b_assoc_buffers, &tmp); | 776 | list_add(&bh->b_assoc_buffers, &tmp); |
768 | if (buffer_dirty(bh)) { | 777 | if (buffer_dirty(bh)) { |
@@ -783,7 +792,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
783 | 792 | ||
784 | while (!list_empty(&tmp)) { | 793 | while (!list_empty(&tmp)) { |
785 | bh = BH_ENTRY(tmp.prev); | 794 | bh = BH_ENTRY(tmp.prev); |
786 | __remove_assoc_queue(bh); | 795 | list_del_init(&bh->b_assoc_buffers); |
787 | get_bh(bh); | 796 | get_bh(bh); |
788 | spin_unlock(lock); | 797 | spin_unlock(lock); |
789 | wait_on_buffer(bh); | 798 | wait_on_buffer(bh); |
@@ -1039,8 +1048,21 @@ grow_buffers(struct block_device *bdev, sector_t block, int size) | |||
1039 | } while ((size << sizebits) < PAGE_SIZE); | 1048 | } while ((size << sizebits) < PAGE_SIZE); |
1040 | 1049 | ||
1041 | index = block >> sizebits; | 1050 | index = block >> sizebits; |
1042 | block = index << sizebits; | ||
1043 | 1051 | ||
1052 | /* | ||
1053 | * Check for a block which wants to lie outside our maximum possible | ||
1054 | * pagecache index. (this comparison is done using sector_t types). | ||
1055 | */ | ||
1056 | if (unlikely(index != block >> sizebits)) { | ||
1057 | char b[BDEVNAME_SIZE]; | ||
1058 | |||
1059 | printk(KERN_ERR "%s: requested out-of-range block %llu for " | ||
1060 | "device %s\n", | ||
1061 | __FUNCTION__, (unsigned long long)block, | ||
1062 | bdevname(bdev, b)); | ||
1063 | return -EIO; | ||
1064 | } | ||
1065 | block = index << sizebits; | ||
1044 | /* Create a page with the proper size buffers.. */ | 1066 | /* Create a page with the proper size buffers.. */ |
1045 | page = grow_dev_page(bdev, block, index, size); | 1067 | page = grow_dev_page(bdev, block, index, size); |
1046 | if (!page) | 1068 | if (!page) |
@@ -1067,12 +1089,16 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size) | |||
1067 | 1089 | ||
1068 | for (;;) { | 1090 | for (;;) { |
1069 | struct buffer_head * bh; | 1091 | struct buffer_head * bh; |
1092 | int ret; | ||
1070 | 1093 | ||
1071 | bh = __find_get_block(bdev, block, size); | 1094 | bh = __find_get_block(bdev, block, size); |
1072 | if (bh) | 1095 | if (bh) |
1073 | return bh; | 1096 | return bh; |
1074 | 1097 | ||
1075 | if (!grow_buffers(bdev, block, size)) | 1098 | ret = grow_buffers(bdev, block, size); |
1099 | if (ret < 0) | ||
1100 | return NULL; | ||
1101 | if (ret == 0) | ||
1076 | free_more_memory(); | 1102 | free_more_memory(); |
1077 | } | 1103 | } |
1078 | } | 1104 | } |
@@ -1147,6 +1173,7 @@ void __bforget(struct buffer_head *bh) | |||
1147 | 1173 | ||
1148 | spin_lock(&buffer_mapping->private_lock); | 1174 | spin_lock(&buffer_mapping->private_lock); |
1149 | list_del_init(&bh->b_assoc_buffers); | 1175 | list_del_init(&bh->b_assoc_buffers); |
1176 | bh->b_assoc_map = NULL; | ||
1150 | spin_unlock(&buffer_mapping->private_lock); | 1177 | spin_unlock(&buffer_mapping->private_lock); |
1151 | } | 1178 | } |
1152 | __brelse(bh); | 1179 | __brelse(bh); |
@@ -1834,6 +1861,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page, | |||
1834 | clear_buffer_new(bh); | 1861 | clear_buffer_new(bh); |
1835 | kaddr = kmap_atomic(page, KM_USER0); | 1862 | kaddr = kmap_atomic(page, KM_USER0); |
1836 | memset(kaddr+block_start, 0, bh->b_size); | 1863 | memset(kaddr+block_start, 0, bh->b_size); |
1864 | flush_dcache_page(page); | ||
1837 | kunmap_atomic(kaddr, KM_USER0); | 1865 | kunmap_atomic(kaddr, KM_USER0); |
1838 | set_buffer_uptodate(bh); | 1866 | set_buffer_uptodate(bh); |
1839 | mark_buffer_dirty(bh); | 1867 | mark_buffer_dirty(bh); |
@@ -2340,6 +2368,7 @@ failed: | |||
2340 | */ | 2368 | */ |
2341 | kaddr = kmap_atomic(page, KM_USER0); | 2369 | kaddr = kmap_atomic(page, KM_USER0); |
2342 | memset(kaddr, 0, PAGE_CACHE_SIZE); | 2370 | memset(kaddr, 0, PAGE_CACHE_SIZE); |
2371 | flush_dcache_page(page); | ||
2343 | kunmap_atomic(kaddr, KM_USER0); | 2372 | kunmap_atomic(kaddr, KM_USER0); |
2344 | SetPageUptodate(page); | 2373 | SetPageUptodate(page); |
2345 | set_page_dirty(page); | 2374 | set_page_dirty(page); |