aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ocfs2/file.c
diff options
context:
space:
mode:
authorMark Fasheh <mark.fasheh@oracle.com>2007-03-09 19:43:28 -0500
committerMark Fasheh <mark.fasheh@oracle.com>2007-07-10 20:32:04 -0400
commit2ae99a60374f360ba07037ebbf33d19b89ac43a6 (patch)
treece83db2022a28deb8c402fca7c08cf924ee8e608 /fs/ocfs2/file.c
parentb27b7cbcf12a1bfff1ed68a73ddd7d11edc20daf (diff)
ocfs2: Support creation of unwritten extents
This can now be trivially supported with re-use of our existing extend code. ocfs2_allocate_unwritten_extents() takes a start offset and a byte length and iterates over the inode, adding extents (marked as unwritten) until len is reached. Existing extents are skipped over. Signed-off-by: Mark Fasheh <mark.fasheh@oracle.com>
Diffstat (limited to 'fs/ocfs2/file.c')
-rw-r--r--fs/ocfs2/file.c119
1 files changed, 96 insertions, 23 deletions
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 6745086da6fd..3e21ad9a6dde 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -425,6 +425,7 @@ int ocfs2_do_extend_allocation(struct ocfs2_super *osb,
425 struct inode *inode, 425 struct inode *inode,
426 u32 *logical_offset, 426 u32 *logical_offset,
427 u32 clusters_to_add, 427 u32 clusters_to_add,
428 int mark_unwritten,
428 struct buffer_head *fe_bh, 429 struct buffer_head *fe_bh,
429 handle_t *handle, 430 handle_t *handle,
430 struct ocfs2_alloc_context *data_ac, 431 struct ocfs2_alloc_context *data_ac,
@@ -437,9 +438,13 @@ int ocfs2_do_extend_allocation(struct ocfs2_super *osb,
437 enum ocfs2_alloc_restarted reason = RESTART_NONE; 438 enum ocfs2_alloc_restarted reason = RESTART_NONE;
438 u32 bit_off, num_bits; 439 u32 bit_off, num_bits;
439 u64 block; 440 u64 block;
441 u8 flags = 0;
440 442
441 BUG_ON(!clusters_to_add); 443 BUG_ON(!clusters_to_add);
442 444
445 if (mark_unwritten)
446 flags = OCFS2_EXT_UNWRITTEN;
447
443 free_extents = ocfs2_num_free_extents(osb, inode, fe); 448 free_extents = ocfs2_num_free_extents(osb, inode, fe);
444 if (free_extents < 0) { 449 if (free_extents < 0) {
445 status = free_extents; 450 status = free_extents;
@@ -489,7 +494,7 @@ int ocfs2_do_extend_allocation(struct ocfs2_super *osb,
489 num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno); 494 num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno);
490 status = ocfs2_insert_extent(osb, handle, inode, fe_bh, 495 status = ocfs2_insert_extent(osb, handle, inode, fe_bh,
491 *logical_offset, block, num_bits, 496 *logical_offset, block, num_bits,
492 meta_ac); 497 flags, meta_ac);
493 if (status < 0) { 498 if (status < 0) {
494 mlog_errno(status); 499 mlog_errno(status);
495 goto leave; 500 goto leave;
@@ -522,9 +527,11 @@ leave:
522 * For a given allocation, determine which allocators will need to be 527 * For a given allocation, determine which allocators will need to be
523 * accessed, and lock them, reserving the appropriate number of bits. 528 * accessed, and lock them, reserving the appropriate number of bits.
524 * 529 *
525 * Called from ocfs2_extend_allocation() for file systems which don't 530 * Sparse file systems call this from ocfs2_write_begin_nolock()
526 * support holes, and from ocfs2_write() for file systems which 531 * and ocfs2_allocate_unwritten_extents().
527 * understand sparse inodes. 532 *
533 * File systems which don't support holes call this from
534 * ocfs2_extend_allocation().
528 */ 535 */
529int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_dinode *di, 536int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_dinode *di,
530 u32 clusters_to_add, u32 extents_to_split, 537 u32 clusters_to_add, u32 extents_to_split,
@@ -595,14 +602,13 @@ out:
595 return ret; 602 return ret;
596} 603}
597 604
598static int ocfs2_extend_allocation(struct inode *inode, 605static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
599 u32 clusters_to_add) 606 u32 clusters_to_add, int mark_unwritten)
600{ 607{
601 int status = 0; 608 int status = 0;
602 int restart_func = 0; 609 int restart_func = 0;
603 int drop_alloc_sem = 0;
604 int credits; 610 int credits;
605 u32 prev_clusters, logical_start; 611 u32 prev_clusters;
606 struct buffer_head *bh = NULL; 612 struct buffer_head *bh = NULL;
607 struct ocfs2_dinode *fe = NULL; 613 struct ocfs2_dinode *fe = NULL;
608 handle_t *handle = NULL; 614 handle_t *handle = NULL;
@@ -617,7 +623,7 @@ static int ocfs2_extend_allocation(struct inode *inode,
617 * This function only exists for file systems which don't 623 * This function only exists for file systems which don't
618 * support holes. 624 * support holes.
619 */ 625 */
620 BUG_ON(ocfs2_sparse_alloc(osb)); 626 BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
621 627
622 status = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &bh, 628 status = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &bh,
623 OCFS2_BH_CACHED, inode); 629 OCFS2_BH_CACHED, inode);
@@ -633,18 +639,9 @@ static int ocfs2_extend_allocation(struct inode *inode,
633 goto leave; 639 goto leave;
634 } 640 }
635 641
636 logical_start = OCFS2_I(inode)->ip_clusters;
637
638restart_all: 642restart_all:
639 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters); 643 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
640 644
641 /* blocks peope in read/write from reading our allocation
642 * until we're done changing it. We depend on i_mutex to block
643 * other extend/truncate calls while we're here. Ordering wrt
644 * start_trans is important here -- always do it before! */
645 down_write(&OCFS2_I(inode)->ip_alloc_sem);
646 drop_alloc_sem = 1;
647
648 status = ocfs2_lock_allocators(inode, fe, clusters_to_add, 0, &data_ac, 645 status = ocfs2_lock_allocators(inode, fe, clusters_to_add, 0, &data_ac,
649 &meta_ac); 646 &meta_ac);
650 if (status) { 647 if (status) {
@@ -678,6 +675,7 @@ restarted_transaction:
678 inode, 675 inode,
679 &logical_start, 676 &logical_start,
680 clusters_to_add, 677 clusters_to_add,
678 mark_unwritten,
681 bh, 679 bh,
682 handle, 680 handle,
683 data_ac, 681 data_ac,
@@ -730,10 +728,6 @@ restarted_transaction:
730 OCFS2_I(inode)->ip_clusters, i_size_read(inode)); 728 OCFS2_I(inode)->ip_clusters, i_size_read(inode));
731 729
732leave: 730leave:
733 if (drop_alloc_sem) {
734 up_write(&OCFS2_I(inode)->ip_alloc_sem);
735 drop_alloc_sem = 0;
736 }
737 if (handle) { 731 if (handle) {
738 ocfs2_commit_trans(osb, handle); 732 ocfs2_commit_trans(osb, handle);
739 handle = NULL; 733 handle = NULL;
@@ -759,6 +753,25 @@ leave:
759 return status; 753 return status;
760} 754}
761 755
756static int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
757 u32 clusters_to_add, int mark_unwritten)
758{
759 int ret;
760
761 /*
762 * The alloc sem blocks peope in read/write from reading our
763 * allocation until we're done changing it. We depend on
764 * i_mutex to block other extend/truncate calls while we're
765 * here.
766 */
767 down_write(&OCFS2_I(inode)->ip_alloc_sem);
768 ret = __ocfs2_extend_allocation(inode, logical_start, clusters_to_add,
769 mark_unwritten);
770 up_write(&OCFS2_I(inode)->ip_alloc_sem);
771
772 return ret;
773}
774
762/* Some parts of this taken from generic_cont_expand, which turned out 775/* Some parts of this taken from generic_cont_expand, which turned out
763 * to be too fragile to do exactly what we need without us having to 776 * to be too fragile to do exactly what we need without us having to
764 * worry about recursive locking in ->prepare_write() and 777 * worry about recursive locking in ->prepare_write() and
@@ -900,7 +913,9 @@ static int ocfs2_extend_file(struct inode *inode,
900 } 913 }
901 914
902 if (clusters_to_add) { 915 if (clusters_to_add) {
903 ret = ocfs2_extend_allocation(inode, clusters_to_add); 916 ret = ocfs2_extend_allocation(inode,
917 OCFS2_I(inode)->ip_clusters,
918 clusters_to_add, 0);
904 if (ret < 0) { 919 if (ret < 0) {
905 mlog_errno(ret); 920 mlog_errno(ret);
906 goto out_unlock; 921 goto out_unlock;
@@ -1176,6 +1191,64 @@ out:
1176 return ret; 1191 return ret;
1177} 1192}
1178 1193
1194/*
1195 * Allocate enough extents to cover the region starting at byte offset
1196 * start for len bytes. Existing extents are skipped, any extents
1197 * added are marked as "unwritten".
1198 */
1199static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1200 u64 start, u64 len)
1201{
1202 int ret;
1203 u32 cpos, phys_cpos, clusters, alloc_size;
1204
1205 /*
1206 * We consider both start and len to be inclusive.
1207 */
1208 cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1209 clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1210 clusters -= cpos;
1211
1212 while (clusters) {
1213 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1214 &alloc_size, NULL);
1215 if (ret) {
1216 mlog_errno(ret);
1217 goto out;
1218 }
1219
1220 /*
1221 * Hole or existing extent len can be arbitrary, so
1222 * cap it to our own allocation request.
1223 */
1224 if (alloc_size > clusters)
1225 alloc_size = clusters;
1226
1227 if (phys_cpos) {
1228 /*
1229 * We already have an allocation at this
1230 * region so we can safely skip it.
1231 */
1232 goto next;
1233 }
1234
1235 ret = __ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1236 if (ret) {
1237 if (ret != -ENOSPC)
1238 mlog_errno(ret);
1239 goto out;
1240 }
1241
1242next:
1243 cpos += alloc_size;
1244 clusters -= alloc_size;
1245 }
1246
1247 ret = 0;
1248out:
1249 return ret;
1250}
1251
1179static int ocfs2_prepare_inode_for_write(struct dentry *dentry, 1252static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
1180 loff_t *ppos, 1253 loff_t *ppos,
1181 size_t count, 1254 size_t count,