aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ext4')
-rw-r--r--fs/ext4/ext4.h11
-rw-r--r--fs/ext4/ext4_extents.h3
-rw-r--r--fs/ext4/extents.c70
-rw-r--r--fs/ext4/inode.c150
-rw-r--r--fs/ext4/super.c1
5 files changed, 153 insertions, 82 deletions
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 56f9271ee8cc..874d169a193e 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -361,14 +361,11 @@ struct ext4_new_group_data {
361 so set the magic i_delalloc_reserve_flag after taking the 361 so set the magic i_delalloc_reserve_flag after taking the
362 inode allocation semaphore for */ 362 inode allocation semaphore for */
363#define EXT4_GET_BLOCKS_DELALLOC_RESERVE 0x0004 363#define EXT4_GET_BLOCKS_DELALLOC_RESERVE 0x0004
364 /* Call ext4_da_update_reserve_space() after successfully
365 allocating the blocks */
366#define EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE 0x0008
367 /* caller is from the direct IO path, request to creation of an 364 /* caller is from the direct IO path, request to creation of an
368 unitialized extents if not allocated, split the uninitialized 365 unitialized extents if not allocated, split the uninitialized
369 extent if blocks has been preallocated already*/ 366 extent if blocks has been preallocated already*/
370#define EXT4_GET_BLOCKS_DIO 0x0010 367#define EXT4_GET_BLOCKS_DIO 0x0008
371#define EXT4_GET_BLOCKS_CONVERT 0x0020 368#define EXT4_GET_BLOCKS_CONVERT 0x0010
372#define EXT4_GET_BLOCKS_DIO_CREATE_EXT (EXT4_GET_BLOCKS_DIO|\ 369#define EXT4_GET_BLOCKS_DIO_CREATE_EXT (EXT4_GET_BLOCKS_DIO|\
373 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT) 370 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
374 /* Convert extent to initialized after direct IO complete */ 371 /* Convert extent to initialized after direct IO complete */
@@ -699,6 +696,8 @@ struct ext4_inode_info {
699 unsigned int i_reserved_meta_blocks; 696 unsigned int i_reserved_meta_blocks;
700 unsigned int i_allocated_meta_blocks; 697 unsigned int i_allocated_meta_blocks;
701 unsigned short i_delalloc_reserved_flag; 698 unsigned short i_delalloc_reserved_flag;
699 sector_t i_da_metadata_calc_last_lblock;
700 int i_da_metadata_calc_len;
702 701
703 /* on-disk additional length */ 702 /* on-disk additional length */
704 __u16 i_extra_isize; 703 __u16 i_extra_isize;
@@ -1441,6 +1440,8 @@ extern int ext4_block_truncate_page(handle_t *handle,
1441extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); 1440extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
1442extern qsize_t *ext4_get_reserved_space(struct inode *inode); 1441extern qsize_t *ext4_get_reserved_space(struct inode *inode);
1443extern int flush_aio_dio_completed_IO(struct inode *inode); 1442extern int flush_aio_dio_completed_IO(struct inode *inode);
1443extern void ext4_da_update_reserve_space(struct inode *inode,
1444 int used, int quota_claim);
1444/* ioctl.c */ 1445/* ioctl.c */
1445extern long ext4_ioctl(struct file *, unsigned int, unsigned long); 1446extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
1446extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long); 1447extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long);
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 2ca686454e87..bdb6ce7e2eb4 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -225,7 +225,8 @@ static inline void ext4_ext_mark_initialized(struct ext4_extent *ext)
225 ext->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ext)); 225 ext->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ext));
226} 226}
227 227
228extern int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks); 228extern int ext4_ext_calc_metadata_amount(struct inode *inode,
229 sector_t lblocks);
229extern ext4_fsblk_t ext_pblock(struct ext4_extent *ex); 230extern ext4_fsblk_t ext_pblock(struct ext4_extent *ex);
230extern ext4_fsblk_t idx_pblock(struct ext4_extent_idx *); 231extern ext4_fsblk_t idx_pblock(struct ext4_extent_idx *);
231extern void ext4_ext_store_pblock(struct ext4_extent *, ext4_fsblk_t); 232extern void ext4_ext_store_pblock(struct ext4_extent *, ext4_fsblk_t);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 91ae46098ea4..765a4826b118 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -296,29 +296,44 @@ static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
296 * to allocate @blocks 296 * to allocate @blocks
297 * Worse case is one block per extent 297 * Worse case is one block per extent
298 */ 298 */
299int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks) 299int ext4_ext_calc_metadata_amount(struct inode *inode, sector_t lblock)
300{ 300{
301 int lcap, icap, rcap, leafs, idxs, num; 301 struct ext4_inode_info *ei = EXT4_I(inode);
302 int newextents = blocks; 302 int idxs, num = 0;
303
304 rcap = ext4_ext_space_root_idx(inode, 0);
305 lcap = ext4_ext_space_block(inode, 0);
306 icap = ext4_ext_space_block_idx(inode, 0);
307 303
308 /* number of new leaf blocks needed */ 304 idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
309 num = leafs = (newextents + lcap - 1) / lcap; 305 / sizeof(struct ext4_extent_idx));
310 306
311 /* 307 /*
312 * Worse case, we need separate index block(s) 308 * If the new delayed allocation block is contiguous with the
313 * to link all new leaf blocks 309 * previous da block, it can share index blocks with the
310 * previous block, so we only need to allocate a new index
311 * block every idxs leaf blocks. At ldxs**2 blocks, we need
312 * an additional index block, and at ldxs**3 blocks, yet
313 * another index blocks.
314 */ 314 */
315 idxs = (leafs + icap - 1) / icap; 315 if (ei->i_da_metadata_calc_len &&
316 do { 316 ei->i_da_metadata_calc_last_lblock+1 == lblock) {
317 num += idxs; 317 if ((ei->i_da_metadata_calc_len % idxs) == 0)
318 idxs = (idxs + icap - 1) / icap; 318 num++;
319 } while (idxs > rcap); 319 if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
320 num++;
321 if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
322 num++;
323 ei->i_da_metadata_calc_len = 0;
324 } else
325 ei->i_da_metadata_calc_len++;
326 ei->i_da_metadata_calc_last_lblock++;
327 return num;
328 }
320 329
321 return num; 330 /*
331 * In the worst case we need a new set of index blocks at
332 * every level of the inode's extent tree.
333 */
334 ei->i_da_metadata_calc_len = 1;
335 ei->i_da_metadata_calc_last_lblock = lblock;
336 return ext_depth(inode) + 1;
322} 337}
323 338
324static int 339static int
@@ -3117,7 +3132,19 @@ out:
3117 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, 3132 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3118 newblock + max_blocks, 3133 newblock + max_blocks,
3119 allocated - max_blocks); 3134 allocated - max_blocks);
3135 allocated = max_blocks;
3120 } 3136 }
3137
3138 /*
3139 * If we have done fallocate with the offset that is already
3140 * delayed allocated, we would have block reservation
3141 * and quota reservation done in the delayed write path.
3142 * But fallocate would have already updated quota and block
3143 * count for this offset. So cancel these reservation
3144 */
3145 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
3146 ext4_da_update_reserve_space(inode, allocated, 0);
3147
3121map_out: 3148map_out:
3122 set_buffer_mapped(bh_result); 3149 set_buffer_mapped(bh_result);
3123out1: 3150out1:
@@ -3353,9 +3380,18 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3353 /* previous routine could use block we allocated */ 3380 /* previous routine could use block we allocated */
3354 newblock = ext_pblock(&newex); 3381 newblock = ext_pblock(&newex);
3355 allocated = ext4_ext_get_actual_len(&newex); 3382 allocated = ext4_ext_get_actual_len(&newex);
3383 if (allocated > max_blocks)
3384 allocated = max_blocks;
3356 set_buffer_new(bh_result); 3385 set_buffer_new(bh_result);
3357 3386
3358 /* 3387 /*
3388 * Update reserved blocks/metadata blocks after successful
3389 * block allocation which had been deferred till now.
3390 */
3391 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
3392 ext4_da_update_reserve_space(inode, allocated, 1);
3393
3394 /*
3359 * Cache the extent and update transaction to commit on fdatasync only 3395 * Cache the extent and update transaction to commit on fdatasync only
3360 * when it is _not_ an uninitialized extent. 3396 * when it is _not_ an uninitialized extent.
3361 */ 3397 */
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 84eeb8f515a3..e11952404e02 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1009,49 +1009,56 @@ qsize_t *ext4_get_reserved_space(struct inode *inode)
1009 return &EXT4_I(inode)->i_reserved_quota; 1009 return &EXT4_I(inode)->i_reserved_quota;
1010} 1010}
1011#endif 1011#endif
1012
1012/* 1013/*
1013 * Calculate the number of metadata blocks need to reserve 1014 * Calculate the number of metadata blocks need to reserve
1014 * to allocate @blocks for non extent file based file 1015 * to allocate a new block at @lblocks for non extent file based file
1015 */ 1016 */
1016static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks) 1017static int ext4_indirect_calc_metadata_amount(struct inode *inode,
1018 sector_t lblock)
1017{ 1019{
1018 int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb); 1020 struct ext4_inode_info *ei = EXT4_I(inode);
1019 int ind_blks, dind_blks, tind_blks; 1021 int dind_mask = EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1;
1020 1022 int blk_bits;
1021 /* number of new indirect blocks needed */
1022 ind_blks = (blocks + icap - 1) / icap;
1023 1023
1024 dind_blks = (ind_blks + icap - 1) / icap; 1024 if (lblock < EXT4_NDIR_BLOCKS)
1025 return 0;
1025 1026
1026 tind_blks = 1; 1027 lblock -= EXT4_NDIR_BLOCKS;
1027 1028
1028 return ind_blks + dind_blks + tind_blks; 1029 if (ei->i_da_metadata_calc_len &&
1030 (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
1031 ei->i_da_metadata_calc_len++;
1032 return 0;
1033 }
1034 ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
1035 ei->i_da_metadata_calc_len = 1;
1036 blk_bits = roundup_pow_of_two(lblock + 1);
1037 return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
1029} 1038}
1030 1039
1031/* 1040/*
1032 * Calculate the number of metadata blocks need to reserve 1041 * Calculate the number of metadata blocks need to reserve
1033 * to allocate given number of blocks 1042 * to allocate a block located at @lblock
1034 */ 1043 */
1035static int ext4_calc_metadata_amount(struct inode *inode, int blocks) 1044static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock)
1036{ 1045{
1037 if (!blocks)
1038 return 0;
1039
1040 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) 1046 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
1041 return ext4_ext_calc_metadata_amount(inode, blocks); 1047 return ext4_ext_calc_metadata_amount(inode, lblock);
1042 1048
1043 return ext4_indirect_calc_metadata_amount(inode, blocks); 1049 return ext4_indirect_calc_metadata_amount(inode, lblock);
1044} 1050}
1045 1051
1046/* 1052/*
1047 * Called with i_data_sem down, which is important since we can call 1053 * Called with i_data_sem down, which is important since we can call
1048 * ext4_discard_preallocations() from here. 1054 * ext4_discard_preallocations() from here.
1049 */ 1055 */
1050static void ext4_da_update_reserve_space(struct inode *inode, int used) 1056void ext4_da_update_reserve_space(struct inode *inode,
1057 int used, int quota_claim)
1051{ 1058{
1052 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1059 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1053 struct ext4_inode_info *ei = EXT4_I(inode); 1060 struct ext4_inode_info *ei = EXT4_I(inode);
1054 int mdb_free = 0; 1061 int mdb_free = 0, allocated_meta_blocks = 0;
1055 1062
1056 spin_lock(&ei->i_block_reservation_lock); 1063 spin_lock(&ei->i_block_reservation_lock);
1057 if (unlikely(used > ei->i_reserved_data_blocks)) { 1064 if (unlikely(used > ei->i_reserved_data_blocks)) {
@@ -1067,6 +1074,7 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
1067 ei->i_reserved_data_blocks -= used; 1074 ei->i_reserved_data_blocks -= used;
1068 used += ei->i_allocated_meta_blocks; 1075 used += ei->i_allocated_meta_blocks;
1069 ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; 1076 ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
1077 allocated_meta_blocks = ei->i_allocated_meta_blocks;
1070 ei->i_allocated_meta_blocks = 0; 1078 ei->i_allocated_meta_blocks = 0;
1071 percpu_counter_sub(&sbi->s_dirtyblocks_counter, used); 1079 percpu_counter_sub(&sbi->s_dirtyblocks_counter, used);
1072 1080
@@ -1076,16 +1084,31 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
1076 * only when we have written all of the delayed 1084 * only when we have written all of the delayed
1077 * allocation blocks. 1085 * allocation blocks.
1078 */ 1086 */
1079 mdb_free = ei->i_allocated_meta_blocks; 1087 mdb_free = ei->i_reserved_meta_blocks;
1088 ei->i_reserved_meta_blocks = 0;
1089 ei->i_da_metadata_calc_len = 0;
1080 percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free); 1090 percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free);
1081 ei->i_allocated_meta_blocks = 0;
1082 } 1091 }
1083 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1092 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1084 1093
1085 /* Update quota subsystem */ 1094 /* Update quota subsystem */
1086 vfs_dq_claim_block(inode, used); 1095 if (quota_claim) {
1087 if (mdb_free) 1096 vfs_dq_claim_block(inode, used);
1088 vfs_dq_release_reservation_block(inode, mdb_free); 1097 if (mdb_free)
1098 vfs_dq_release_reservation_block(inode, mdb_free);
1099 } else {
1100 /*
1101 * We did fallocate with an offset that is already delayed
1102 * allocated. So on delayed allocated writeback we should
1103 * not update the quota for allocated blocks. But then
1104 * converting an fallocate region to initialized region would
1105 * have caused a metadata allocation. So claim quota for
1106 * that
1107 */
1108 if (allocated_meta_blocks)
1109 vfs_dq_claim_block(inode, allocated_meta_blocks);
1110 vfs_dq_release_reservation_block(inode, mdb_free + used);
1111 }
1089 1112
1090 /* 1113 /*
1091 * If we have done all the pending block allocations and if 1114 * If we have done all the pending block allocations and if
@@ -1285,18 +1308,20 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
1285 */ 1308 */
1286 EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE; 1309 EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE;
1287 } 1310 }
1288 }
1289 1311
1312 /*
1313 * Update reserved blocks/metadata blocks after successful
1314 * block allocation which had been deferred till now. We don't
1315 * support fallocate for non extent files. So we can update
1316 * reserve space here.
1317 */
1318 if ((retval > 0) &&
1319 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
1320 ext4_da_update_reserve_space(inode, retval, 1);
1321 }
1290 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1322 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1291 EXT4_I(inode)->i_delalloc_reserved_flag = 0; 1323 EXT4_I(inode)->i_delalloc_reserved_flag = 0;
1292 1324
1293 /*
1294 * Update reserved blocks/metadata blocks after successful
1295 * block allocation which had been deferred till now.
1296 */
1297 if ((retval > 0) && (flags & EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE))
1298 ext4_da_update_reserve_space(inode, retval);
1299
1300 up_write((&EXT4_I(inode)->i_data_sem)); 1325 up_write((&EXT4_I(inode)->i_data_sem));
1301 if (retval > 0 && buffer_mapped(bh)) { 1326 if (retval > 0 && buffer_mapped(bh)) {
1302 int ret = check_block_validity(inode, "file system " 1327 int ret = check_block_validity(inode, "file system "
@@ -1802,12 +1827,15 @@ static int ext4_journalled_write_end(struct file *file,
1802 return ret ? ret : copied; 1827 return ret ? ret : copied;
1803} 1828}
1804 1829
1805static int ext4_da_reserve_space(struct inode *inode, int nrblocks) 1830/*
1831 * Reserve a single block located at lblock
1832 */
1833static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
1806{ 1834{
1807 int retries = 0; 1835 int retries = 0;
1808 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1836 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1809 struct ext4_inode_info *ei = EXT4_I(inode); 1837 struct ext4_inode_info *ei = EXT4_I(inode);
1810 unsigned long md_needed, md_reserved, total = 0; 1838 unsigned long md_needed, md_reserved;
1811 1839
1812 /* 1840 /*
1813 * recalculate the amount of metadata blocks to reserve 1841 * recalculate the amount of metadata blocks to reserve
@@ -1817,8 +1845,7 @@ static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
1817repeat: 1845repeat:
1818 spin_lock(&ei->i_block_reservation_lock); 1846 spin_lock(&ei->i_block_reservation_lock);
1819 md_reserved = ei->i_reserved_meta_blocks; 1847 md_reserved = ei->i_reserved_meta_blocks;
1820 md_needed = ext4_calc_metadata_amount(inode, nrblocks); 1848 md_needed = ext4_calc_metadata_amount(inode, lblock);
1821 total = md_needed + nrblocks;
1822 spin_unlock(&ei->i_block_reservation_lock); 1849 spin_unlock(&ei->i_block_reservation_lock);
1823 1850
1824 /* 1851 /*
@@ -1826,31 +1853,19 @@ repeat:
1826 * later. Real quota accounting is done at pages writeout 1853 * later. Real quota accounting is done at pages writeout
1827 * time. 1854 * time.
1828 */ 1855 */
1829 if (vfs_dq_reserve_block(inode, total)) { 1856 if (vfs_dq_reserve_block(inode, md_needed + 1))
1830 /*
1831 * We tend to badly over-estimate the amount of
1832 * metadata blocks which are needed, so if we have
1833 * reserved any metadata blocks, try to force out the
1834 * inode and see if we have any better luck.
1835 */
1836 if (md_reserved && retries++ <= 3)
1837 goto retry;
1838 return -EDQUOT; 1857 return -EDQUOT;
1839 }
1840 1858
1841 if (ext4_claim_free_blocks(sbi, total)) { 1859 if (ext4_claim_free_blocks(sbi, md_needed + 1)) {
1842 vfs_dq_release_reservation_block(inode, total); 1860 vfs_dq_release_reservation_block(inode, md_needed + 1);
1843 if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1861 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1844 retry:
1845 if (md_reserved)
1846 write_inode_now(inode, (retries == 3));
1847 yield(); 1862 yield();
1848 goto repeat; 1863 goto repeat;
1849 } 1864 }
1850 return -ENOSPC; 1865 return -ENOSPC;
1851 } 1866 }
1852 spin_lock(&ei->i_block_reservation_lock); 1867 spin_lock(&ei->i_block_reservation_lock);
1853 ei->i_reserved_data_blocks += nrblocks; 1868 ei->i_reserved_data_blocks++;
1854 ei->i_reserved_meta_blocks += md_needed; 1869 ei->i_reserved_meta_blocks += md_needed;
1855 spin_unlock(&ei->i_block_reservation_lock); 1870 spin_unlock(&ei->i_block_reservation_lock);
1856 1871
@@ -1889,8 +1904,9 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
1889 * only when we have written all of the delayed 1904 * only when we have written all of the delayed
1890 * allocation blocks. 1905 * allocation blocks.
1891 */ 1906 */
1892 to_free += ei->i_allocated_meta_blocks; 1907 to_free += ei->i_reserved_meta_blocks;
1893 ei->i_allocated_meta_blocks = 0; 1908 ei->i_reserved_meta_blocks = 0;
1909 ei->i_da_metadata_calc_len = 0;
1894 } 1910 }
1895 1911
1896 /* update fs dirty blocks counter */ 1912 /* update fs dirty blocks counter */
@@ -2203,10 +2219,10 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2203 * variables are updated after the blocks have been allocated. 2219 * variables are updated after the blocks have been allocated.
2204 */ 2220 */
2205 new.b_state = 0; 2221 new.b_state = 0;
2206 get_blocks_flags = (EXT4_GET_BLOCKS_CREATE | 2222 get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
2207 EXT4_GET_BLOCKS_DELALLOC_RESERVE);
2208 if (mpd->b_state & (1 << BH_Delay)) 2223 if (mpd->b_state & (1 << BH_Delay))
2209 get_blocks_flags |= EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE; 2224 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2225
2210 blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks, 2226 blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks,
2211 &new, get_blocks_flags); 2227 &new, get_blocks_flags);
2212 if (blks < 0) { 2228 if (blks < 0) {
@@ -2504,7 +2520,7 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2504 * XXX: __block_prepare_write() unmaps passed block, 2520 * XXX: __block_prepare_write() unmaps passed block,
2505 * is it OK? 2521 * is it OK?
2506 */ 2522 */
2507 ret = ext4_da_reserve_space(inode, 1); 2523 ret = ext4_da_reserve_space(inode, iblock);
2508 if (ret) 2524 if (ret)
2509 /* not enough space to reserve */ 2525 /* not enough space to reserve */
2510 return ret; 2526 return ret;
@@ -3022,7 +3038,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
3022 loff_t pos, unsigned len, unsigned flags, 3038 loff_t pos, unsigned len, unsigned flags,
3023 struct page **pagep, void **fsdata) 3039 struct page **pagep, void **fsdata)
3024{ 3040{
3025 int ret, retries = 0; 3041 int ret, retries = 0, quota_retries = 0;
3026 struct page *page; 3042 struct page *page;
3027 pgoff_t index; 3043 pgoff_t index;
3028 unsigned from, to; 3044 unsigned from, to;
@@ -3081,6 +3097,22 @@ retry:
3081 3097
3082 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 3098 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3083 goto retry; 3099 goto retry;
3100
3101 if ((ret == -EDQUOT) &&
3102 EXT4_I(inode)->i_reserved_meta_blocks &&
3103 (quota_retries++ < 3)) {
3104 /*
3105 * Since we often over-estimate the number of meta
3106 * data blocks required, we may sometimes get a
3107 * spurios out of quota error even though there would
3108 * be enough space once we write the data blocks and
3109 * find out how many meta data blocks were _really_
3110 * required. So try forcing the inode write to see if
3111 * that helps.
3112 */
3113 write_inode_now(inode, (quota_retries == 3));
3114 goto retry;
3115 }
3084out: 3116out:
3085 return ret; 3117 return ret;
3086} 3118}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 7cccb35c0f4d..735c20d5fd56 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -702,6 +702,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
702 ei->i_reserved_data_blocks = 0; 702 ei->i_reserved_data_blocks = 0;
703 ei->i_reserved_meta_blocks = 0; 703 ei->i_reserved_meta_blocks = 0;
704 ei->i_allocated_meta_blocks = 0; 704 ei->i_allocated_meta_blocks = 0;
705 ei->i_da_metadata_calc_len = 0;
705 ei->i_delalloc_reserved_flag = 0; 706 ei->i_delalloc_reserved_flag = 0;
706 spin_lock_init(&(ei->i_block_reservation_lock)); 707 spin_lock_init(&(ei->i_block_reservation_lock));
707#ifdef CONFIG_QUOTA 708#ifdef CONFIG_QUOTA