aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/extents.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ext4/extents.c')
-rw-r--r--fs/ext4/extents.c38
1 files changed, 12 insertions, 26 deletions
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 9b119308daea..ad39627c1fbc 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3296,28 +3296,9 @@ static int ext4_find_delalloc_range(struct inode *inode,
3296 3296
3297 while ((i >= lblk_start) && (i <= lblk_end)) { 3297 while ((i >= lblk_start) && (i <= lblk_end)) {
3298 page = find_get_page(mapping, index); 3298 page = find_get_page(mapping, index);
3299 if (!page || !PageDirty(page)) 3299 if (!page)
3300 goto nextpage; 3300 goto nextpage;
3301 3301
3302 if (PageWriteback(page)) {
3303 /*
3304 * This might be a race with allocation and writeout. In
3305 * this case we just assume that the rest of the range
3306 * will eventually be written and there wont be any
3307 * delalloc blocks left.
3308 * TODO: the above assumption is troublesome, but might
3309 * work better in practice. other option could be note
3310 * somewhere that the cluster is getting written out and
3311 * detect that here.
3312 */
3313 page_cache_release(page);
3314 trace_ext4_find_delalloc_range(inode,
3315 lblk_start, lblk_end,
3316 search_hint_reverse,
3317 0, i);
3318 return 0;
3319 }
3320
3321 if (!page_has_buffers(page)) 3302 if (!page_has_buffers(page))
3322 goto nextpage; 3303 goto nextpage;
3323 3304
@@ -3340,7 +3321,11 @@ static int ext4_find_delalloc_range(struct inode *inode,
3340 continue; 3321 continue;
3341 } 3322 }
3342 3323
3343 if (buffer_delay(bh)) { 3324 /* Check if the buffer is delayed allocated and that it
3325 * is not yet mapped. (when da-buffers are mapped during
3326 * their writeout, their da_mapped bit is set.)
3327 */
3328 if (buffer_delay(bh) && !buffer_da_mapped(bh)) {
3344 page_cache_release(page); 3329 page_cache_release(page);
3345 trace_ext4_find_delalloc_range(inode, 3330 trace_ext4_find_delalloc_range(inode,
3346 lblk_start, lblk_end, 3331 lblk_start, lblk_end,
@@ -4106,6 +4091,7 @@ got_allocated_blocks:
4106 ext4_da_update_reserve_space(inode, allocated_clusters, 4091 ext4_da_update_reserve_space(inode, allocated_clusters,
4107 1); 4092 1);
4108 if (reserved_clusters < allocated_clusters) { 4093 if (reserved_clusters < allocated_clusters) {
4094 struct ext4_inode_info *ei = EXT4_I(inode);
4109 int reservation = allocated_clusters - 4095 int reservation = allocated_clusters -
4110 reserved_clusters; 4096 reserved_clusters;
4111 /* 4097 /*
@@ -4148,11 +4134,11 @@ got_allocated_blocks:
4148 * remaining blocks finally gets written, we 4134 * remaining blocks finally gets written, we
4149 * could claim them. 4135 * could claim them.
4150 */ 4136 */
4151 while (reservation) { 4137 dquot_reserve_block(inode,
4152 ext4_da_reserve_space(inode, 4138 EXT4_C2B(sbi, reservation));
4153 map->m_lblk); 4139 spin_lock(&ei->i_block_reservation_lock);
4154 reservation--; 4140 ei->i_reserved_data_blocks += reservation;
4155 } 4141 spin_unlock(&ei->i_block_reservation_lock);
4156 } 4142 }
4157 } 4143 }
4158 } 4144 }