aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c168
1 files changed, 11 insertions, 157 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 9f697419ed8e..a2fd743d97cb 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -166,151 +166,6 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
166} 166}
167 167
168/* 168/*
169 * Write out and wait upon all the dirty data associated with a block
170 * device via its mapping. Does not take the superblock lock.
171 */
172int sync_blockdev(struct block_device *bdev)
173{
174 int ret = 0;
175
176 if (bdev)
177 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
178 return ret;
179}
180EXPORT_SYMBOL(sync_blockdev);
181
182/*
183 * Write out and wait upon all dirty data associated with this
184 * device. Filesystem data as well as the underlying block
185 * device. Takes the superblock lock.
186 */
187int fsync_bdev(struct block_device *bdev)
188{
189 struct super_block *sb = get_super(bdev);
190 if (sb) {
191 int res = fsync_super(sb);
192 drop_super(sb);
193 return res;
194 }
195 return sync_blockdev(bdev);
196}
197
198/**
199 * freeze_bdev -- lock a filesystem and force it into a consistent state
200 * @bdev: blockdevice to lock
201 *
202 * This takes the block device bd_mount_sem to make sure no new mounts
203 * happen on bdev until thaw_bdev() is called.
204 * If a superblock is found on this device, we take the s_umount semaphore
205 * on it to make sure nobody unmounts until the snapshot creation is done.
206 * The reference counter (bd_fsfreeze_count) guarantees that only the last
207 * unfreeze process can unfreeze the frozen filesystem actually when multiple
208 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
209 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
210 * actually.
211 */
212struct super_block *freeze_bdev(struct block_device *bdev)
213{
214 struct super_block *sb;
215 int error = 0;
216
217 mutex_lock(&bdev->bd_fsfreeze_mutex);
218 if (bdev->bd_fsfreeze_count > 0) {
219 bdev->bd_fsfreeze_count++;
220 sb = get_super(bdev);
221 mutex_unlock(&bdev->bd_fsfreeze_mutex);
222 return sb;
223 }
224 bdev->bd_fsfreeze_count++;
225
226 down(&bdev->bd_mount_sem);
227 sb = get_super(bdev);
228 if (sb && !(sb->s_flags & MS_RDONLY)) {
229 sb->s_frozen = SB_FREEZE_WRITE;
230 smp_wmb();
231
232 __fsync_super(sb);
233
234 sb->s_frozen = SB_FREEZE_TRANS;
235 smp_wmb();
236
237 sync_blockdev(sb->s_bdev);
238
239 if (sb->s_op->freeze_fs) {
240 error = sb->s_op->freeze_fs(sb);
241 if (error) {
242 printk(KERN_ERR
243 "VFS:Filesystem freeze failed\n");
244 sb->s_frozen = SB_UNFROZEN;
245 drop_super(sb);
246 up(&bdev->bd_mount_sem);
247 bdev->bd_fsfreeze_count--;
248 mutex_unlock(&bdev->bd_fsfreeze_mutex);
249 return ERR_PTR(error);
250 }
251 }
252 }
253
254 sync_blockdev(bdev);
255 mutex_unlock(&bdev->bd_fsfreeze_mutex);
256
257 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
258}
259EXPORT_SYMBOL(freeze_bdev);
260
261/**
262 * thaw_bdev -- unlock filesystem
263 * @bdev: blockdevice to unlock
264 * @sb: associated superblock
265 *
266 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
267 */
268int thaw_bdev(struct block_device *bdev, struct super_block *sb)
269{
270 int error = 0;
271
272 mutex_lock(&bdev->bd_fsfreeze_mutex);
273 if (!bdev->bd_fsfreeze_count) {
274 mutex_unlock(&bdev->bd_fsfreeze_mutex);
275 return -EINVAL;
276 }
277
278 bdev->bd_fsfreeze_count--;
279 if (bdev->bd_fsfreeze_count > 0) {
280 if (sb)
281 drop_super(sb);
282 mutex_unlock(&bdev->bd_fsfreeze_mutex);
283 return 0;
284 }
285
286 if (sb) {
287 BUG_ON(sb->s_bdev != bdev);
288 if (!(sb->s_flags & MS_RDONLY)) {
289 if (sb->s_op->unfreeze_fs) {
290 error = sb->s_op->unfreeze_fs(sb);
291 if (error) {
292 printk(KERN_ERR
293 "VFS:Filesystem thaw failed\n");
294 sb->s_frozen = SB_FREEZE_TRANS;
295 bdev->bd_fsfreeze_count++;
296 mutex_unlock(&bdev->bd_fsfreeze_mutex);
297 return error;
298 }
299 }
300 sb->s_frozen = SB_UNFROZEN;
301 smp_wmb();
302 wake_up(&sb->s_wait_unfrozen);
303 }
304 drop_super(sb);
305 }
306
307 up(&bdev->bd_mount_sem);
308 mutex_unlock(&bdev->bd_fsfreeze_mutex);
309 return 0;
310}
311EXPORT_SYMBOL(thaw_bdev);
312
313/*
314 * Various filesystems appear to want __find_get_block to be non-blocking. 169 * Various filesystems appear to want __find_get_block to be non-blocking.
315 * But it's the page lock which protects the buffers. To get around this, 170 * But it's the page lock which protects the buffers. To get around this,
316 * we get exclusion from try_to_free_buffers with the blockdev mapping's 171 * we get exclusion from try_to_free_buffers with the blockdev mapping's
@@ -760,15 +615,9 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
760 * If warn is true, then emit a warning if the page is not uptodate and has 615 * If warn is true, then emit a warning if the page is not uptodate and has
761 * not been truncated. 616 * not been truncated.
762 */ 617 */
763static int __set_page_dirty(struct page *page, 618static void __set_page_dirty(struct page *page,
764 struct address_space *mapping, int warn) 619 struct address_space *mapping, int warn)
765{ 620{
766 if (unlikely(!mapping))
767 return !TestSetPageDirty(page);
768
769 if (TestSetPageDirty(page))
770 return 0;
771
772 spin_lock_irq(&mapping->tree_lock); 621 spin_lock_irq(&mapping->tree_lock);
773 if (page->mapping) { /* Race with truncate? */ 622 if (page->mapping) { /* Race with truncate? */
774 WARN_ON_ONCE(warn && !PageUptodate(page)); 623 WARN_ON_ONCE(warn && !PageUptodate(page));
@@ -785,8 +634,6 @@ static int __set_page_dirty(struct page *page,
785 } 634 }
786 spin_unlock_irq(&mapping->tree_lock); 635 spin_unlock_irq(&mapping->tree_lock);
787 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 636 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
788
789 return 1;
790} 637}
791 638
792/* 639/*
@@ -816,6 +663,7 @@ static int __set_page_dirty(struct page *page,
816 */ 663 */
817int __set_page_dirty_buffers(struct page *page) 664int __set_page_dirty_buffers(struct page *page)
818{ 665{
666 int newly_dirty;
819 struct address_space *mapping = page_mapping(page); 667 struct address_space *mapping = page_mapping(page);
820 668
821 if (unlikely(!mapping)) 669 if (unlikely(!mapping))
@@ -831,9 +679,12 @@ int __set_page_dirty_buffers(struct page *page)
831 bh = bh->b_this_page; 679 bh = bh->b_this_page;
832 } while (bh != head); 680 } while (bh != head);
833 } 681 }
682 newly_dirty = !TestSetPageDirty(page);
834 spin_unlock(&mapping->private_lock); 683 spin_unlock(&mapping->private_lock);
835 684
836 return __set_page_dirty(page, mapping, 1); 685 if (newly_dirty)
686 __set_page_dirty(page, mapping, 1);
687 return newly_dirty;
837} 688}
838EXPORT_SYMBOL(__set_page_dirty_buffers); 689EXPORT_SYMBOL(__set_page_dirty_buffers);
839 690
@@ -1262,8 +1113,11 @@ void mark_buffer_dirty(struct buffer_head *bh)
1262 return; 1113 return;
1263 } 1114 }
1264 1115
1265 if (!test_set_buffer_dirty(bh)) 1116 if (!test_set_buffer_dirty(bh)) {
1266 __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0); 1117 struct page *page = bh->b_page;
1118 if (!TestSetPageDirty(page))
1119 __set_page_dirty(page, page_mapping(page), 0);
1120 }
1267} 1121}
1268 1122
1269/* 1123/*