diff options
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 63 |
1 files changed, 39 insertions, 24 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index aa68206bd517..0f9006714230 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -356,7 +356,7 @@ static void free_more_memory(void) | |||
356 | for_each_online_pgdat(pgdat) { | 356 | for_each_online_pgdat(pgdat) { |
357 | zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones; | 357 | zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones; |
358 | if (*zones) | 358 | if (*zones) |
359 | try_to_free_pages(zones, GFP_NOFS); | 359 | try_to_free_pages(zones, 0, GFP_NOFS); |
360 | } | 360 | } |
361 | } | 361 | } |
362 | 362 | ||
@@ -676,6 +676,39 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) | |||
676 | EXPORT_SYMBOL(mark_buffer_dirty_inode); | 676 | EXPORT_SYMBOL(mark_buffer_dirty_inode); |
677 | 677 | ||
678 | /* | 678 | /* |
679 | * Mark the page dirty, and set it dirty in the radix tree, and mark the inode | ||
680 | * dirty. | ||
681 | * | ||
682 | * If warn is true, then emit a warning if the page is not uptodate and has | ||
683 | * not been truncated. | ||
684 | */ | ||
685 | static int __set_page_dirty(struct page *page, | ||
686 | struct address_space *mapping, int warn) | ||
687 | { | ||
688 | if (unlikely(!mapping)) | ||
689 | return !TestSetPageDirty(page); | ||
690 | |||
691 | if (TestSetPageDirty(page)) | ||
692 | return 0; | ||
693 | |||
694 | write_lock_irq(&mapping->tree_lock); | ||
695 | if (page->mapping) { /* Race with truncate? */ | ||
696 | WARN_ON_ONCE(warn && !PageUptodate(page)); | ||
697 | |||
698 | if (mapping_cap_account_dirty(mapping)) { | ||
699 | __inc_zone_page_state(page, NR_FILE_DIRTY); | ||
700 | task_io_account_write(PAGE_CACHE_SIZE); | ||
701 | } | ||
702 | radix_tree_tag_set(&mapping->page_tree, | ||
703 | page_index(page), PAGECACHE_TAG_DIRTY); | ||
704 | } | ||
705 | write_unlock_irq(&mapping->tree_lock); | ||
706 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | ||
707 | |||
708 | return 1; | ||
709 | } | ||
710 | |||
711 | /* | ||
679 | * Add a page to the dirty page list. | 712 | * Add a page to the dirty page list. |
680 | * | 713 | * |
681 | * It is a sad fact of life that this function is called from several places | 714 | * It is a sad fact of life that this function is called from several places |
@@ -702,7 +735,7 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode); | |||
702 | */ | 735 | */ |
703 | int __set_page_dirty_buffers(struct page *page) | 736 | int __set_page_dirty_buffers(struct page *page) |
704 | { | 737 | { |
705 | struct address_space * const mapping = page_mapping(page); | 738 | struct address_space *mapping = page_mapping(page); |
706 | 739 | ||
707 | if (unlikely(!mapping)) | 740 | if (unlikely(!mapping)) |
708 | return !TestSetPageDirty(page); | 741 | return !TestSetPageDirty(page); |
@@ -719,21 +752,7 @@ int __set_page_dirty_buffers(struct page *page) | |||
719 | } | 752 | } |
720 | spin_unlock(&mapping->private_lock); | 753 | spin_unlock(&mapping->private_lock); |
721 | 754 | ||
722 | if (TestSetPageDirty(page)) | 755 | return __set_page_dirty(page, mapping, 1); |
723 | return 0; | ||
724 | |||
725 | write_lock_irq(&mapping->tree_lock); | ||
726 | if (page->mapping) { /* Race with truncate? */ | ||
727 | if (mapping_cap_account_dirty(mapping)) { | ||
728 | __inc_zone_page_state(page, NR_FILE_DIRTY); | ||
729 | task_io_account_write(PAGE_CACHE_SIZE); | ||
730 | } | ||
731 | radix_tree_tag_set(&mapping->page_tree, | ||
732 | page_index(page), PAGECACHE_TAG_DIRTY); | ||
733 | } | ||
734 | write_unlock_irq(&mapping->tree_lock); | ||
735 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | ||
736 | return 1; | ||
737 | } | 756 | } |
738 | EXPORT_SYMBOL(__set_page_dirty_buffers); | 757 | EXPORT_SYMBOL(__set_page_dirty_buffers); |
739 | 758 | ||
@@ -982,7 +1001,7 @@ grow_dev_page(struct block_device *bdev, sector_t block, | |||
982 | struct buffer_head *bh; | 1001 | struct buffer_head *bh; |
983 | 1002 | ||
984 | page = find_or_create_page(inode->i_mapping, index, | 1003 | page = find_or_create_page(inode->i_mapping, index, |
985 | mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); | 1004 | (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE); |
986 | if (!page) | 1005 | if (!page) |
987 | return NULL; | 1006 | return NULL; |
988 | 1007 | ||
@@ -1026,11 +1045,6 @@ failed: | |||
1026 | /* | 1045 | /* |
1027 | * Create buffers for the specified block device block's page. If | 1046 | * Create buffers for the specified block device block's page. If |
1028 | * that page was dirty, the buffers are set dirty also. | 1047 | * that page was dirty, the buffers are set dirty also. |
1029 | * | ||
1030 | * Except that's a bug. Attaching dirty buffers to a dirty | ||
1031 | * blockdev's page can result in filesystem corruption, because | ||
1032 | * some of those buffers may be aliases of filesystem data. | ||
1033 | * grow_dev_page() will go BUG() if this happens. | ||
1034 | */ | 1048 | */ |
1035 | static int | 1049 | static int |
1036 | grow_buffers(struct block_device *bdev, sector_t block, int size) | 1050 | grow_buffers(struct block_device *bdev, sector_t block, int size) |
@@ -1137,8 +1151,9 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size) | |||
1137 | */ | 1151 | */ |
1138 | void fastcall mark_buffer_dirty(struct buffer_head *bh) | 1152 | void fastcall mark_buffer_dirty(struct buffer_head *bh) |
1139 | { | 1153 | { |
1154 | WARN_ON_ONCE(!buffer_uptodate(bh)); | ||
1140 | if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh)) | 1155 | if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh)) |
1141 | __set_page_dirty_nobuffers(bh->b_page); | 1156 | __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0); |
1142 | } | 1157 | } |
1143 | 1158 | ||
1144 | /* | 1159 | /* |