aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/file.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2007-12-21 16:27:21 -0500
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:03:58 -0400
commit1832a6d5ee3b1af61001cadba9e10da9e91af4a4 (patch)
tree3a6dcb2a186c3623b0355b7e45d1b94ecae5e54d /fs/btrfs/file.c
parent01f466580502c57001bf80fff709479fdb9e87a5 (diff)
Btrfs: Implement basic support for -ENOSPC
This is intended to prevent accidentally filling the drive. A determined user can still make things oops. It includes some accounting of the current bytes under delayed allocation, but this will change as things get optimized Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/file.c')
-rw-r--r--fs/btrfs/file.c28
1 files changed, 24 insertions, 4 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 461b09663fed..71dc2d33b6c6 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -307,6 +307,7 @@ static int dirty_and_release_pages(struct btrfs_trans_handle *trans,
307 inline_size > 32768 || 307 inline_size > 32768 ||
308 inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) { 308 inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) {
309 u64 last_end; 309 u64 last_end;
310 u64 existing_delalloc = 0;
310 311
311 for (i = 0; i < num_pages; i++) { 312 for (i = 0; i < num_pages; i++) {
312 struct page *p = pages[i]; 313 struct page *p = pages[i];
@@ -316,8 +317,19 @@ static int dirty_and_release_pages(struct btrfs_trans_handle *trans,
316 last_end = (u64)(pages[num_pages -1]->index) << 317 last_end = (u64)(pages[num_pages -1]->index) <<
317 PAGE_CACHE_SHIFT; 318 PAGE_CACHE_SHIFT;
318 last_end += PAGE_CACHE_SIZE - 1; 319 last_end += PAGE_CACHE_SIZE - 1;
320 if (start_pos < isize) {
321 u64 delalloc_start = start_pos;
322 existing_delalloc = count_range_bits(em_tree,
323 &delalloc_start,
324 end_of_last_block, (u64)-1,
325 EXTENT_DELALLOC);
326 }
319 set_extent_delalloc(em_tree, start_pos, end_of_last_block, 327 set_extent_delalloc(em_tree, start_pos, end_of_last_block,
320 GFP_NOFS); 328 GFP_NOFS);
329 spin_lock(&root->fs_info->delalloc_lock);
330 root->fs_info->delalloc_bytes += (end_of_last_block + 1 -
331 start_pos) - existing_delalloc;
332 spin_unlock(&root->fs_info->delalloc_lock);
321 } else { 333 } else {
322 u64 aligned_end; 334 u64 aligned_end;
323 /* step one, delete the existing extents in this range */ 335 /* step one, delete the existing extents in this range */
@@ -708,12 +720,12 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
708 current->backing_dev_info = inode->i_mapping->backing_dev_info; 720 current->backing_dev_info = inode->i_mapping->backing_dev_info;
709 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 721 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
710 if (err) 722 if (err)
711 goto out; 723 goto out_nolock;
712 if (count == 0) 724 if (count == 0)
713 goto out; 725 goto out_nolock;
714 err = remove_suid(fdentry(file)); 726 err = remove_suid(fdentry(file));
715 if (err) 727 if (err)
716 goto out; 728 goto out_nolock;
717 file_update_time(file); 729 file_update_time(file);
718 730
719 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); 731 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
@@ -758,6 +770,13 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
758 770
759 WARN_ON(num_pages > nrptrs); 771 WARN_ON(num_pages > nrptrs);
760 memset(pages, 0, sizeof(pages)); 772 memset(pages, 0, sizeof(pages));
773
774 mutex_lock(&root->fs_info->fs_mutex);
775 ret = btrfs_check_free_space(root, write_bytes, 0);
776 mutex_unlock(&root->fs_info->fs_mutex);
777 if (ret)
778 goto out;
779
761 ret = prepare_pages(root, file, pages, num_pages, 780 ret = prepare_pages(root, file, pages, num_pages,
762 pos, first_index, last_index, 781 pos, first_index, last_index,
763 write_bytes); 782 write_bytes);
@@ -787,8 +806,9 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
787 btrfs_btree_balance_dirty(root, 1); 806 btrfs_btree_balance_dirty(root, 1);
788 cond_resched(); 807 cond_resched();
789 } 808 }
790 mutex_unlock(&inode->i_mutex);
791out: 809out:
810 mutex_unlock(&inode->i_mutex);
811out_nolock:
792 kfree(pages); 812 kfree(pages);
793 if (pinned[0]) 813 if (pinned[0])
794 page_cache_release(pinned[0]); 814 page_cache_release(pinned[0]);