aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ext4')
-rw-r--r--fs/ext4/Makefile3
-rw-r--r--fs/ext4/balloc.c31
-rw-r--r--fs/ext4/dir.c11
-rw-r--r--fs/ext4/ext4_jbd2.c59
-rw-r--r--fs/ext4/extents.c110
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/inode.c85
-rw-r--r--fs/ext4/ioctl.c2
-rw-r--r--fs/ext4/namei.c13
-rw-r--r--fs/ext4/super.c17
-rw-r--r--fs/ext4/xattr.c5
11 files changed, 251 insertions, 87 deletions
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index a6acb96ebeb9..ae6e7e502ac9 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -5,7 +5,8 @@
5obj-$(CONFIG_EXT4DEV_FS) += ext4dev.o 5obj-$(CONFIG_EXT4DEV_FS) += ext4dev.o
6 6
7ext4dev-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \ 7ext4dev-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
8 ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o 8 ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
9 ext4_jbd2.o
9 10
10ext4dev-$(CONFIG_EXT4DEV_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o 11ext4dev-$(CONFIG_EXT4DEV_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
11ext4dev-$(CONFIG_EXT4DEV_FS_POSIX_ACL) += acl.o 12ext4dev-$(CONFIG_EXT4DEV_FS_POSIX_ACL) += acl.o
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 5d45582f9517..c4dd1103ccf1 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -165,7 +165,7 @@ restart:
165 165
166 printk("Block Allocation Reservation Windows Map (%s):\n", fn); 166 printk("Block Allocation Reservation Windows Map (%s):\n", fn);
167 while (n) { 167 while (n) {
168 rsv = list_entry(n, struct ext4_reserve_window_node, rsv_node); 168 rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
169 if (verbose) 169 if (verbose)
170 printk("reservation window 0x%p " 170 printk("reservation window 0x%p "
171 "start: %llu, end: %llu\n", 171 "start: %llu, end: %llu\n",
@@ -747,7 +747,7 @@ find_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
747 here = 0; 747 here = 0;
748 748
749 p = ((char *)bh->b_data) + (here >> 3); 749 p = ((char *)bh->b_data) + (here >> 3);
750 r = memscan(p, 0, (maxblocks - here + 7) >> 3); 750 r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
751 next = (r - ((char *)bh->b_data)) << 3; 751 next = (r - ((char *)bh->b_data)) << 3;
752 752
753 if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh)) 753 if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh))
@@ -966,7 +966,7 @@ static int find_next_reservable_window(
966 966
967 prev = rsv; 967 prev = rsv;
968 next = rb_next(&rsv->rsv_node); 968 next = rb_next(&rsv->rsv_node);
969 rsv = list_entry(next,struct ext4_reserve_window_node,rsv_node); 969 rsv = rb_entry(next,struct ext4_reserve_window_node,rsv_node);
970 970
971 /* 971 /*
972 * Reached the last reservation, we can just append to the 972 * Reached the last reservation, we can just append to the
@@ -1165,7 +1165,7 @@ retry:
1165 * check if the first free block is within the 1165 * check if the first free block is within the
1166 * free space we just reserved 1166 * free space we just reserved
1167 */ 1167 */
1168 if (start_block >= my_rsv->rsv_start && start_block < my_rsv->rsv_end) 1168 if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end)
1169 return 0; /* success */ 1169 return 0; /* success */
1170 /* 1170 /*
1171 * if the first free bit we found is out of the reservable space 1171 * if the first free bit we found is out of the reservable space
@@ -1210,7 +1210,7 @@ static void try_to_extend_reservation(struct ext4_reserve_window_node *my_rsv,
1210 if (!next) 1210 if (!next)
1211 my_rsv->rsv_end += size; 1211 my_rsv->rsv_end += size;
1212 else { 1212 else {
1213 next_rsv = list_entry(next, struct ext4_reserve_window_node, rsv_node); 1213 next_rsv = rb_entry(next, struct ext4_reserve_window_node, rsv_node);
1214 1214
1215 if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size) 1215 if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
1216 my_rsv->rsv_end += size; 1216 my_rsv->rsv_end += size;
@@ -1288,7 +1288,7 @@ ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1288 } 1288 }
1289 /* 1289 /*
1290 * grp_goal is a group relative block number (if there is a goal) 1290 * grp_goal is a group relative block number (if there is a goal)
1291 * 0 < grp_goal < EXT4_BLOCKS_PER_GROUP(sb) 1291 * 0 <= grp_goal < EXT4_BLOCKS_PER_GROUP(sb)
1292 * first block is a filesystem wide block number 1292 * first block is a filesystem wide block number
1293 * first block is the block number of the first block in this group 1293 * first block is the block number of the first block in this group
1294 */ 1294 */
@@ -1324,10 +1324,14 @@ ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1324 if (!goal_in_my_reservation(&my_rsv->rsv_window, 1324 if (!goal_in_my_reservation(&my_rsv->rsv_window,
1325 grp_goal, group, sb)) 1325 grp_goal, group, sb))
1326 grp_goal = -1; 1326 grp_goal = -1;
1327 } else if (grp_goal > 0 && 1327 } else if (grp_goal >= 0) {
1328 (my_rsv->rsv_end-grp_goal+1) < *count) 1328 int curr = my_rsv->rsv_end -
1329 try_to_extend_reservation(my_rsv, sb, 1329 (grp_goal + group_first_block) + 1;
1330 *count-my_rsv->rsv_end + grp_goal - 1); 1330
1331 if (curr < *count)
1332 try_to_extend_reservation(my_rsv, sb,
1333 *count - curr);
1334 }
1331 1335
1332 if ((my_rsv->rsv_start > group_last_block) || 1336 if ((my_rsv->rsv_start > group_last_block) ||
1333 (my_rsv->rsv_end < group_first_block)) { 1337 (my_rsv->rsv_end < group_first_block)) {
@@ -1525,10 +1529,8 @@ retry_alloc:
1525 if (group_no >= ngroups) 1529 if (group_no >= ngroups)
1526 group_no = 0; 1530 group_no = 0;
1527 gdp = ext4_get_group_desc(sb, group_no, &gdp_bh); 1531 gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
1528 if (!gdp) { 1532 if (!gdp)
1529 *errp = -EIO; 1533 goto io_error;
1530 goto out;
1531 }
1532 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); 1534 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1533 /* 1535 /*
1534 * skip this group if the number of 1536 * skip this group if the number of
@@ -1562,6 +1564,7 @@ retry_alloc:
1562 */ 1564 */
1563 if (my_rsv) { 1565 if (my_rsv) {
1564 my_rsv = NULL; 1566 my_rsv = NULL;
1567 windowsz = 0;
1565 group_no = goal_group; 1568 group_no = goal_group;
1566 goto retry_alloc; 1569 goto retry_alloc;
1567 } 1570 }
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index f8595787a70e..da80368b66f0 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -103,7 +103,7 @@ static int ext4_readdir(struct file * filp,
103 struct ext4_dir_entry_2 *de; 103 struct ext4_dir_entry_2 *de;
104 struct super_block *sb; 104 struct super_block *sb;
105 int err; 105 int err;
106 struct inode *inode = filp->f_dentry->d_inode; 106 struct inode *inode = filp->f_path.dentry->d_inode;
107 int ret = 0; 107 int ret = 0;
108 108
109 sb = inode->i_sb; 109 sb = inode->i_sb;
@@ -122,7 +122,7 @@ static int ext4_readdir(struct file * filp,
122 * We don't set the inode dirty flag since it's not 122 * We don't set the inode dirty flag since it's not
123 * critical that it get flushed back to the disk. 123 * critical that it get flushed back to the disk.
124 */ 124 */
125 EXT4_I(filp->f_dentry->d_inode)->i_flags &= ~EXT4_INDEX_FL; 125 EXT4_I(filp->f_path.dentry->d_inode)->i_flags &= ~EXT4_INDEX_FL;
126 } 126 }
127#endif 127#endif
128 stored = 0; 128 stored = 0;
@@ -153,6 +153,9 @@ static int ext4_readdir(struct file * filp,
153 ext4_error (sb, "ext4_readdir", 153 ext4_error (sb, "ext4_readdir",
154 "directory #%lu contains a hole at offset %lu", 154 "directory #%lu contains a hole at offset %lu",
155 inode->i_ino, (unsigned long)filp->f_pos); 155 inode->i_ino, (unsigned long)filp->f_pos);
156 /* corrupt size? Maybe no more blocks to read */
157 if (filp->f_pos > inode->i_blocks << 9)
158 break;
156 filp->f_pos += sb->s_blocksize - offset; 159 filp->f_pos += sb->s_blocksize - offset;
157 continue; 160 continue;
158 } 161 }
@@ -399,7 +402,7 @@ static int call_filldir(struct file * filp, void * dirent,
399{ 402{
400 struct dir_private_info *info = filp->private_data; 403 struct dir_private_info *info = filp->private_data;
401 loff_t curr_pos; 404 loff_t curr_pos;
402 struct inode *inode = filp->f_dentry->d_inode; 405 struct inode *inode = filp->f_path.dentry->d_inode;
403 struct super_block * sb; 406 struct super_block * sb;
404 int error; 407 int error;
405 408
@@ -429,7 +432,7 @@ static int ext4_dx_readdir(struct file * filp,
429 void * dirent, filldir_t filldir) 432 void * dirent, filldir_t filldir)
430{ 433{
431 struct dir_private_info *info = filp->private_data; 434 struct dir_private_info *info = filp->private_data;
432 struct inode *inode = filp->f_dentry->d_inode; 435 struct inode *inode = filp->f_path.dentry->d_inode;
433 struct fname *fname; 436 struct fname *fname;
434 int ret; 437 int ret;
435 438
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
new file mode 100644
index 000000000000..d6afe4e27340
--- /dev/null
+++ b/fs/ext4/ext4_jbd2.c
@@ -0,0 +1,59 @@
1/*
2 * Interface between ext4 and JBD
3 */
4
5#include <linux/ext4_jbd2.h>
6
7int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
8 struct buffer_head *bh)
9{
10 int err = jbd2_journal_get_undo_access(handle, bh);
11 if (err)
12 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
13 return err;
14}
15
16int __ext4_journal_get_write_access(const char *where, handle_t *handle,
17 struct buffer_head *bh)
18{
19 int err = jbd2_journal_get_write_access(handle, bh);
20 if (err)
21 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
22 return err;
23}
24
25int __ext4_journal_forget(const char *where, handle_t *handle,
26 struct buffer_head *bh)
27{
28 int err = jbd2_journal_forget(handle, bh);
29 if (err)
30 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
31 return err;
32}
33
34int __ext4_journal_revoke(const char *where, handle_t *handle,
35 ext4_fsblk_t blocknr, struct buffer_head *bh)
36{
37 int err = jbd2_journal_revoke(handle, blocknr, bh);
38 if (err)
39 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
40 return err;
41}
42
43int __ext4_journal_get_create_access(const char *where,
44 handle_t *handle, struct buffer_head *bh)
45{
46 int err = jbd2_journal_get_create_access(handle, bh);
47 if (err)
48 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
49 return err;
50}
51
52int __ext4_journal_dirty_metadata(const char *where,
53 handle_t *handle, struct buffer_head *bh)
54{
55 int err = jbd2_journal_dirty_metadata(handle, bh);
56 if (err)
57 ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
58 return err;
59}
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 2608dce18f3e..dc2724fa7622 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -48,7 +48,7 @@
48 * ext_pblock: 48 * ext_pblock:
49 * combine low and high parts of physical block number into ext4_fsblk_t 49 * combine low and high parts of physical block number into ext4_fsblk_t
50 */ 50 */
51static inline ext4_fsblk_t ext_pblock(struct ext4_extent *ex) 51static ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
52{ 52{
53 ext4_fsblk_t block; 53 ext4_fsblk_t block;
54 54
@@ -61,7 +61,7 @@ static inline ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
61 * idx_pblock: 61 * idx_pblock:
62 * combine low and high parts of a leaf physical block number into ext4_fsblk_t 62 * combine low and high parts of a leaf physical block number into ext4_fsblk_t
63 */ 63 */
64static inline ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix) 64static ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
65{ 65{
66 ext4_fsblk_t block; 66 ext4_fsblk_t block;
67 67
@@ -75,7 +75,7 @@ static inline ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
75 * stores a large physical block number into an extent struct, 75 * stores a large physical block number into an extent struct,
76 * breaking it into parts 76 * breaking it into parts
77 */ 77 */
78static inline void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb) 78static void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
79{ 79{
80 ex->ee_start = cpu_to_le32((unsigned long) (pb & 0xffffffff)); 80 ex->ee_start = cpu_to_le32((unsigned long) (pb & 0xffffffff));
81 ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff); 81 ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
@@ -86,7 +86,7 @@ static inline void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb
86 * stores a large physical block number into an index struct, 86 * stores a large physical block number into an index struct,
87 * breaking it into parts 87 * breaking it into parts
88 */ 88 */
89static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb) 89static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
90{ 90{
91 ix->ei_leaf = cpu_to_le32((unsigned long) (pb & 0xffffffff)); 91 ix->ei_leaf = cpu_to_le32((unsigned long) (pb & 0xffffffff));
92 ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff); 92 ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
@@ -186,7 +186,8 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
186 depth = path->p_depth; 186 depth = path->p_depth;
187 187
188 /* try to predict block placement */ 188 /* try to predict block placement */
189 if ((ex = path[depth].p_ext)) 189 ex = path[depth].p_ext;
190 if (ex)
190 return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block)); 191 return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
191 192
192 /* it looks like index is empty; 193 /* it looks like index is empty;
@@ -215,7 +216,7 @@ ext4_ext_new_block(handle_t *handle, struct inode *inode,
215 return newblock; 216 return newblock;
216} 217}
217 218
218static inline int ext4_ext_space_block(struct inode *inode) 219static int ext4_ext_space_block(struct inode *inode)
219{ 220{
220 int size; 221 int size;
221 222
@@ -228,7 +229,7 @@ static inline int ext4_ext_space_block(struct inode *inode)
228 return size; 229 return size;
229} 230}
230 231
231static inline int ext4_ext_space_block_idx(struct inode *inode) 232static int ext4_ext_space_block_idx(struct inode *inode)
232{ 233{
233 int size; 234 int size;
234 235
@@ -241,7 +242,7 @@ static inline int ext4_ext_space_block_idx(struct inode *inode)
241 return size; 242 return size;
242} 243}
243 244
244static inline int ext4_ext_space_root(struct inode *inode) 245static int ext4_ext_space_root(struct inode *inode)
245{ 246{
246 int size; 247 int size;
247 248
@@ -255,7 +256,7 @@ static inline int ext4_ext_space_root(struct inode *inode)
255 return size; 256 return size;
256} 257}
257 258
258static inline int ext4_ext_space_root_idx(struct inode *inode) 259static int ext4_ext_space_root_idx(struct inode *inode)
259{ 260{
260 int size; 261 int size;
261 262
@@ -476,13 +477,12 @@ ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path)
476 477
477 /* account possible depth increase */ 478 /* account possible depth increase */
478 if (!path) { 479 if (!path) {
479 path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 2), 480 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
480 GFP_NOFS); 481 GFP_NOFS);
481 if (!path) 482 if (!path)
482 return ERR_PTR(-ENOMEM); 483 return ERR_PTR(-ENOMEM);
483 alloc = 1; 484 alloc = 1;
484 } 485 }
485 memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1));
486 path[0].p_hdr = eh; 486 path[0].p_hdr = eh;
487 487
488 /* walk through the tree */ 488 /* walk through the tree */
@@ -543,7 +543,8 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
543 struct ext4_extent_idx *ix; 543 struct ext4_extent_idx *ix;
544 int len, err; 544 int len, err;
545 545
546 if ((err = ext4_ext_get_access(handle, inode, curp))) 546 err = ext4_ext_get_access(handle, inode, curp);
547 if (err)
547 return err; 548 return err;
548 549
549 BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block)); 550 BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
@@ -641,10 +642,9 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
641 * We need this to handle errors and free blocks 642 * We need this to handle errors and free blocks
642 * upon them. 643 * upon them.
643 */ 644 */
644 ablocks = kmalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); 645 ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
645 if (!ablocks) 646 if (!ablocks)
646 return -ENOMEM; 647 return -ENOMEM;
647 memset(ablocks, 0, sizeof(ext4_fsblk_t) * depth);
648 648
649 /* allocate all needed blocks */ 649 /* allocate all needed blocks */
650 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); 650 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
@@ -665,7 +665,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
665 } 665 }
666 lock_buffer(bh); 666 lock_buffer(bh);
667 667
668 if ((err = ext4_journal_get_create_access(handle, bh))) 668 err = ext4_journal_get_create_access(handle, bh);
669 if (err)
669 goto cleanup; 670 goto cleanup;
670 671
671 neh = ext_block_hdr(bh); 672 neh = ext_block_hdr(bh);
@@ -702,18 +703,21 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
702 set_buffer_uptodate(bh); 703 set_buffer_uptodate(bh);
703 unlock_buffer(bh); 704 unlock_buffer(bh);
704 705
705 if ((err = ext4_journal_dirty_metadata(handle, bh))) 706 err = ext4_journal_dirty_metadata(handle, bh);
707 if (err)
706 goto cleanup; 708 goto cleanup;
707 brelse(bh); 709 brelse(bh);
708 bh = NULL; 710 bh = NULL;
709 711
710 /* correct old leaf */ 712 /* correct old leaf */
711 if (m) { 713 if (m) {
712 if ((err = ext4_ext_get_access(handle, inode, path + depth))) 714 err = ext4_ext_get_access(handle, inode, path + depth);
715 if (err)
713 goto cleanup; 716 goto cleanup;
714 path[depth].p_hdr->eh_entries = 717 path[depth].p_hdr->eh_entries =
715 cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m); 718 cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);
716 if ((err = ext4_ext_dirty(handle, inode, path + depth))) 719 err = ext4_ext_dirty(handle, inode, path + depth);
720 if (err)
717 goto cleanup; 721 goto cleanup;
718 722
719 } 723 }
@@ -736,7 +740,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
736 } 740 }
737 lock_buffer(bh); 741 lock_buffer(bh);
738 742
739 if ((err = ext4_journal_get_create_access(handle, bh))) 743 err = ext4_journal_get_create_access(handle, bh);
744 if (err)
740 goto cleanup; 745 goto cleanup;
741 746
742 neh = ext_block_hdr(bh); 747 neh = ext_block_hdr(bh);
@@ -780,7 +785,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
780 set_buffer_uptodate(bh); 785 set_buffer_uptodate(bh);
781 unlock_buffer(bh); 786 unlock_buffer(bh);
782 787
783 if ((err = ext4_journal_dirty_metadata(handle, bh))) 788 err = ext4_journal_dirty_metadata(handle, bh);
789 if (err)
784 goto cleanup; 790 goto cleanup;
785 brelse(bh); 791 brelse(bh);
786 bh = NULL; 792 bh = NULL;
@@ -800,9 +806,6 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
800 } 806 }
801 807
802 /* insert new index */ 808 /* insert new index */
803 if (err)
804 goto cleanup;
805
806 err = ext4_ext_insert_index(handle, inode, path + at, 809 err = ext4_ext_insert_index(handle, inode, path + at,
807 le32_to_cpu(border), newblock); 810 le32_to_cpu(border), newblock);
808 811
@@ -857,7 +860,8 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
857 } 860 }
858 lock_buffer(bh); 861 lock_buffer(bh);
859 862
860 if ((err = ext4_journal_get_create_access(handle, bh))) { 863 err = ext4_journal_get_create_access(handle, bh);
864 if (err) {
861 unlock_buffer(bh); 865 unlock_buffer(bh);
862 goto out; 866 goto out;
863 } 867 }
@@ -877,11 +881,13 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
877 set_buffer_uptodate(bh); 881 set_buffer_uptodate(bh);
878 unlock_buffer(bh); 882 unlock_buffer(bh);
879 883
880 if ((err = ext4_journal_dirty_metadata(handle, bh))) 884 err = ext4_journal_dirty_metadata(handle, bh);
885 if (err)
881 goto out; 886 goto out;
882 887
883 /* create index in new top-level index: num,max,pointer */ 888 /* create index in new top-level index: num,max,pointer */
884 if ((err = ext4_ext_get_access(handle, inode, curp))) 889 err = ext4_ext_get_access(handle, inode, curp);
890 if (err)
885 goto out; 891 goto out;
886 892
887 curp->p_hdr->eh_magic = EXT4_EXT_MAGIC; 893 curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
@@ -1073,27 +1079,31 @@ int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1073 */ 1079 */
1074 k = depth - 1; 1080 k = depth - 1;
1075 border = path[depth].p_ext->ee_block; 1081 border = path[depth].p_ext->ee_block;
1076 if ((err = ext4_ext_get_access(handle, inode, path + k))) 1082 err = ext4_ext_get_access(handle, inode, path + k);
1083 if (err)
1077 return err; 1084 return err;
1078 path[k].p_idx->ei_block = border; 1085 path[k].p_idx->ei_block = border;
1079 if ((err = ext4_ext_dirty(handle, inode, path + k))) 1086 err = ext4_ext_dirty(handle, inode, path + k);
1087 if (err)
1080 return err; 1088 return err;
1081 1089
1082 while (k--) { 1090 while (k--) {
1083 /* change all left-side indexes */ 1091 /* change all left-side indexes */
1084 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) 1092 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1085 break; 1093 break;
1086 if ((err = ext4_ext_get_access(handle, inode, path + k))) 1094 err = ext4_ext_get_access(handle, inode, path + k);
1095 if (err)
1087 break; 1096 break;
1088 path[k].p_idx->ei_block = border; 1097 path[k].p_idx->ei_block = border;
1089 if ((err = ext4_ext_dirty(handle, inode, path + k))) 1098 err = ext4_ext_dirty(handle, inode, path + k);
1099 if (err)
1090 break; 1100 break;
1091 } 1101 }
1092 1102
1093 return err; 1103 return err;
1094} 1104}
1095 1105
1096static int inline 1106static int
1097ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, 1107ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1098 struct ext4_extent *ex2) 1108 struct ext4_extent *ex2)
1099{ 1109{
@@ -1145,7 +1155,8 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1145 le16_to_cpu(newext->ee_len), 1155 le16_to_cpu(newext->ee_len),
1146 le32_to_cpu(ex->ee_block), 1156 le32_to_cpu(ex->ee_block),
1147 le16_to_cpu(ex->ee_len), ext_pblock(ex)); 1157 le16_to_cpu(ex->ee_len), ext_pblock(ex));
1148 if ((err = ext4_ext_get_access(handle, inode, path + depth))) 1158 err = ext4_ext_get_access(handle, inode, path + depth);
1159 if (err)
1149 return err; 1160 return err;
1150 ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len) 1161 ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len)
1151 + le16_to_cpu(newext->ee_len)); 1162 + le16_to_cpu(newext->ee_len));
@@ -1195,7 +1206,8 @@ repeat:
1195has_space: 1206has_space:
1196 nearex = path[depth].p_ext; 1207 nearex = path[depth].p_ext;
1197 1208
1198 if ((err = ext4_ext_get_access(handle, inode, path + depth))) 1209 err = ext4_ext_get_access(handle, inode, path + depth);
1210 if (err)
1199 goto cleanup; 1211 goto cleanup;
1200 1212
1201 if (!nearex) { 1213 if (!nearex) {
@@ -1383,7 +1395,7 @@ int ext4_ext_walk_space(struct inode *inode, unsigned long block,
1383 return err; 1395 return err;
1384} 1396}
1385 1397
1386static inline void 1398static void
1387ext4_ext_put_in_cache(struct inode *inode, __u32 block, 1399ext4_ext_put_in_cache(struct inode *inode, __u32 block,
1388 __u32 len, __u32 start, int type) 1400 __u32 len, __u32 start, int type)
1389{ 1401{
@@ -1401,7 +1413,7 @@ ext4_ext_put_in_cache(struct inode *inode, __u32 block,
1401 * calculate boundaries of the gap that the requested block fits into 1413 * calculate boundaries of the gap that the requested block fits into
1402 * and cache this gap 1414 * and cache this gap
1403 */ 1415 */
1404static inline void 1416static void
1405ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, 1417ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1406 unsigned long block) 1418 unsigned long block)
1407{ 1419{
@@ -1442,7 +1454,7 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1442 ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP); 1454 ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
1443} 1455}
1444 1456
1445static inline int 1457static int
1446ext4_ext_in_cache(struct inode *inode, unsigned long block, 1458ext4_ext_in_cache(struct inode *inode, unsigned long block,
1447 struct ext4_extent *ex) 1459 struct ext4_extent *ex)
1448{ 1460{
@@ -1489,10 +1501,12 @@ int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
1489 path--; 1501 path--;
1490 leaf = idx_pblock(path->p_idx); 1502 leaf = idx_pblock(path->p_idx);
1491 BUG_ON(path->p_hdr->eh_entries == 0); 1503 BUG_ON(path->p_hdr->eh_entries == 0);
1492 if ((err = ext4_ext_get_access(handle, inode, path))) 1504 err = ext4_ext_get_access(handle, inode, path);
1505 if (err)
1493 return err; 1506 return err;
1494 path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1); 1507 path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1);
1495 if ((err = ext4_ext_dirty(handle, inode, path))) 1508 err = ext4_ext_dirty(handle, inode, path);
1509 if (err)
1496 return err; 1510 return err;
1497 ext_debug("index is empty, remove it, free block %llu\n", leaf); 1511 ext_debug("index is empty, remove it, free block %llu\n", leaf);
1498 bh = sb_find_get_block(inode->i_sb, leaf); 1512 bh = sb_find_get_block(inode->i_sb, leaf);
@@ -1509,7 +1523,7 @@ int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
1509 * the caller should calculate credits under truncate_mutex and 1523 * the caller should calculate credits under truncate_mutex and
1510 * pass the actual path. 1524 * pass the actual path.
1511 */ 1525 */
1512int inline ext4_ext_calc_credits_for_insert(struct inode *inode, 1526int ext4_ext_calc_credits_for_insert(struct inode *inode,
1513 struct ext4_ext_path *path) 1527 struct ext4_ext_path *path)
1514{ 1528{
1515 int depth, needed; 1529 int depth, needed;
@@ -1534,16 +1548,17 @@ int inline ext4_ext_calc_credits_for_insert(struct inode *inode,
1534 1548
1535 /* 1549 /*
1536 * tree can be full, so it would need to grow in depth: 1550 * tree can be full, so it would need to grow in depth:
1537 * allocation + old root + new root 1551 * we need one credit to modify old root, credits for
1552 * new root will be added in split accounting
1538 */ 1553 */
1539 needed += 2 + 1 + 1; 1554 needed += 1;
1540 1555
1541 /* 1556 /*
1542 * Index split can happen, we would need: 1557 * Index split can happen, we would need:
1543 * allocate intermediate indexes (bitmap + group) 1558 * allocate intermediate indexes (bitmap + group)
1544 * + change two blocks at each level, but root (already included) 1559 * + change two blocks at each level, but root (already included)
1545 */ 1560 */
1546 needed = (depth * 2) + (depth * 2); 1561 needed += (depth * 2) + (depth * 2);
1547 1562
1548 /* any allocation modifies superblock */ 1563 /* any allocation modifies superblock */
1549 needed += 1; 1564 needed += 1;
@@ -1718,7 +1733,7 @@ out:
1718 * ext4_ext_more_to_rm: 1733 * ext4_ext_more_to_rm:
1719 * returns 1 if current index has to be freed (even partial) 1734 * returns 1 if current index has to be freed (even partial)
1720 */ 1735 */
1721static int inline 1736static int
1722ext4_ext_more_to_rm(struct ext4_ext_path *path) 1737ext4_ext_more_to_rm(struct ext4_ext_path *path)
1723{ 1738{
1724 BUG_ON(path->p_idx == NULL); 1739 BUG_ON(path->p_idx == NULL);
@@ -1756,12 +1771,11 @@ int ext4_ext_remove_space(struct inode *inode, unsigned long start)
1756 * We start scanning from right side, freeing all the blocks 1771 * We start scanning from right side, freeing all the blocks
1757 * after i_size and walking into the tree depth-wise. 1772 * after i_size and walking into the tree depth-wise.
1758 */ 1773 */
1759 path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL); 1774 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL);
1760 if (path == NULL) { 1775 if (path == NULL) {
1761 ext4_journal_stop(handle); 1776 ext4_journal_stop(handle);
1762 return -ENOMEM; 1777 return -ENOMEM;
1763 } 1778 }
1764 memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1));
1765 path[0].p_hdr = ext_inode_hdr(inode); 1779 path[0].p_hdr = ext_inode_hdr(inode);
1766 if (ext4_ext_check_header(__FUNCTION__, inode, path[0].p_hdr)) { 1780 if (ext4_ext_check_header(__FUNCTION__, inode, path[0].p_hdr)) {
1767 err = -EIO; 1781 err = -EIO;
@@ -1932,7 +1946,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
1932 mutex_lock(&EXT4_I(inode)->truncate_mutex); 1946 mutex_lock(&EXT4_I(inode)->truncate_mutex);
1933 1947
1934 /* check in cache */ 1948 /* check in cache */
1935 if ((goal = ext4_ext_in_cache(inode, iblock, &newex))) { 1949 goal = ext4_ext_in_cache(inode, iblock, &newex);
1950 if (goal) {
1936 if (goal == EXT4_EXT_CACHE_GAP) { 1951 if (goal == EXT4_EXT_CACHE_GAP) {
1937 if (!create) { 1952 if (!create) {
1938 /* block isn't allocated yet and 1953 /* block isn't allocated yet and
@@ -1971,7 +1986,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
1971 */ 1986 */
1972 BUG_ON(path[depth].p_ext == NULL && depth != 0); 1987 BUG_ON(path[depth].p_ext == NULL && depth != 0);
1973 1988
1974 if ((ex = path[depth].p_ext)) { 1989 ex = path[depth].p_ext;
1990 if (ex) {
1975 unsigned long ee_block = le32_to_cpu(ex->ee_block); 1991 unsigned long ee_block = le32_to_cpu(ex->ee_block);
1976 ext4_fsblk_t ee_start = ext_pblock(ex); 1992 ext4_fsblk_t ee_start = ext_pblock(ex);
1977 unsigned short ee_len = le16_to_cpu(ex->ee_len); 1993 unsigned short ee_len = le16_to_cpu(ex->ee_len);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 0b622c0624b7..3bbc24b58785 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -52,7 +52,7 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
52 unsigned long nr_segs, loff_t pos) 52 unsigned long nr_segs, loff_t pos)
53{ 53{
54 struct file *file = iocb->ki_filp; 54 struct file *file = iocb->ki_filp;
55 struct inode *inode = file->f_dentry->d_inode; 55 struct inode *inode = file->f_path.dentry->d_inode;
56 ssize_t ret; 56 ssize_t ret;
57 int err; 57 int err;
58 58
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 0a60ec5a16db..a127cc03c9fa 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1147,37 +1147,102 @@ static int do_journal_get_write_access(handle_t *handle,
1147 return ext4_journal_get_write_access(handle, bh); 1147 return ext4_journal_get_write_access(handle, bh);
1148} 1148}
1149 1149
1150/*
1151 * The idea of this helper function is following:
1152 * if prepare_write has allocated some blocks, but not all of them, the
1153 * transaction must include the content of the newly allocated blocks.
1154 * This content is expected to be set to zeroes by block_prepare_write().
1155 * 2006/10/14 SAW
1156 */
1157static int ext4_prepare_failure(struct file *file, struct page *page,
1158 unsigned from, unsigned to)
1159{
1160 struct address_space *mapping;
1161 struct buffer_head *bh, *head, *next;
1162 unsigned block_start, block_end;
1163 unsigned blocksize;
1164 int ret;
1165 handle_t *handle = ext4_journal_current_handle();
1166
1167 mapping = page->mapping;
1168 if (ext4_should_writeback_data(mapping->host)) {
1169 /* optimization: no constraints about data */
1170skip:
1171 return ext4_journal_stop(handle);
1172 }
1173
1174 head = page_buffers(page);
1175 blocksize = head->b_size;
1176 for ( bh = head, block_start = 0;
1177 bh != head || !block_start;
1178 block_start = block_end, bh = next)
1179 {
1180 next = bh->b_this_page;
1181 block_end = block_start + blocksize;
1182 if (block_end <= from)
1183 continue;
1184 if (block_start >= to) {
1185 block_start = to;
1186 break;
1187 }
1188 if (!buffer_mapped(bh))
1189 /* prepare_write failed on this bh */
1190 break;
1191 if (ext4_should_journal_data(mapping->host)) {
1192 ret = do_journal_get_write_access(handle, bh);
1193 if (ret) {
1194 ext4_journal_stop(handle);
1195 return ret;
1196 }
1197 }
1198 /*
1199 * block_start here becomes the first block where the current iteration
1200 * of prepare_write failed.
1201 */
1202 }
1203 if (block_start <= from)
1204 goto skip;
1205
1206 /* commit allocated and zeroed buffers */
1207 return mapping->a_ops->commit_write(file, page, from, block_start);
1208}
1209
1150static int ext4_prepare_write(struct file *file, struct page *page, 1210static int ext4_prepare_write(struct file *file, struct page *page,
1151 unsigned from, unsigned to) 1211 unsigned from, unsigned to)
1152{ 1212{
1153 struct inode *inode = page->mapping->host; 1213 struct inode *inode = page->mapping->host;
1154 int ret, needed_blocks = ext4_writepage_trans_blocks(inode); 1214 int ret, ret2;
1215 int needed_blocks = ext4_writepage_trans_blocks(inode);
1155 handle_t *handle; 1216 handle_t *handle;
1156 int retries = 0; 1217 int retries = 0;
1157 1218
1158retry: 1219retry:
1159 handle = ext4_journal_start(inode, needed_blocks); 1220 handle = ext4_journal_start(inode, needed_blocks);
1160 if (IS_ERR(handle)) { 1221 if (IS_ERR(handle))
1161 ret = PTR_ERR(handle); 1222 return PTR_ERR(handle);
1162 goto out;
1163 }
1164 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) 1223 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
1165 ret = nobh_prepare_write(page, from, to, ext4_get_block); 1224 ret = nobh_prepare_write(page, from, to, ext4_get_block);
1166 else 1225 else
1167 ret = block_prepare_write(page, from, to, ext4_get_block); 1226 ret = block_prepare_write(page, from, to, ext4_get_block);
1168 if (ret) 1227 if (ret)
1169 goto prepare_write_failed; 1228 goto failure;
1170 1229
1171 if (ext4_should_journal_data(inode)) { 1230 if (ext4_should_journal_data(inode)) {
1172 ret = walk_page_buffers(handle, page_buffers(page), 1231 ret = walk_page_buffers(handle, page_buffers(page),
1173 from, to, NULL, do_journal_get_write_access); 1232 from, to, NULL, do_journal_get_write_access);
1233 if (ret)
1234 /* fatal error, just put the handle and return */
1235 ext4_journal_stop(handle);
1174 } 1236 }
1175prepare_write_failed: 1237 return ret;
1176 if (ret) 1238
1177 ext4_journal_stop(handle); 1239failure:
1240 ret2 = ext4_prepare_failure(file, page, from, to);
1241 if (ret2 < 0)
1242 return ret2;
1178 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 1243 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1179 goto retry; 1244 goto retry;
1180out: 1245 /* retry number exceeded, or other error like -EDQUOT */
1181 return ret; 1246 return ret;
1182} 1247}
1183 1248
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 22a737c306c7..500567dd53b6 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -256,7 +256,7 @@ flags_err:
256#ifdef CONFIG_COMPAT 256#ifdef CONFIG_COMPAT
257long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 257long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
258{ 258{
259 struct inode *inode = file->f_dentry->d_inode; 259 struct inode *inode = file->f_path.dentry->d_inode;
260 int ret; 260 int ret;
261 261
262 /* These are just misnamed, they actually get/put from/to user an int */ 262 /* These are just misnamed, they actually get/put from/to user an int */
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 8b1bd03d20f5..e5a74a5ac261 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -552,6 +552,15 @@ static int htree_dirblock_to_tree(struct file *dir_file,
552 dir->i_sb->s_blocksize - 552 dir->i_sb->s_blocksize -
553 EXT4_DIR_REC_LEN(0)); 553 EXT4_DIR_REC_LEN(0));
554 for (; de < top; de = ext4_next_entry(de)) { 554 for (; de < top; de = ext4_next_entry(de)) {
555 if (!ext4_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
556 (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
557 +((char *)de - bh->b_data))) {
558 /* On error, skip the f_pos to the next block. */
559 dir_file->f_pos = (dir_file->f_pos |
560 (dir->i_sb->s_blocksize - 1)) + 1;
561 brelse (bh);
562 return count;
563 }
555 ext4fs_dirhash(de->name, de->name_len, hinfo); 564 ext4fs_dirhash(de->name, de->name_len, hinfo);
556 if ((hinfo->hash < start_hash) || 565 if ((hinfo->hash < start_hash) ||
557 ((hinfo->hash == start_hash) && 566 ((hinfo->hash == start_hash) &&
@@ -593,7 +602,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
593 602
594 dxtrace(printk("In htree_fill_tree, start hash: %x:%x\n", start_hash, 603 dxtrace(printk("In htree_fill_tree, start hash: %x:%x\n", start_hash,
595 start_minor_hash)); 604 start_minor_hash));
596 dir = dir_file->f_dentry->d_inode; 605 dir = dir_file->f_path.dentry->d_inode;
597 if (!(EXT4_I(dir)->i_flags & EXT4_INDEX_FL)) { 606 if (!(EXT4_I(dir)->i_flags & EXT4_INDEX_FL)) {
598 hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version; 607 hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
599 hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed; 608 hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
@@ -604,7 +613,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
604 } 613 }
605 hinfo.hash = start_hash; 614 hinfo.hash = start_hash;
606 hinfo.minor_hash = 0; 615 hinfo.minor_hash = 0;
607 frame = dx_probe(NULL, dir_file->f_dentry->d_inode, &hinfo, frames, &err); 616 frame = dx_probe(NULL, dir_file->f_path.dentry->d_inode, &hinfo, frames, &err);
608 if (!frame) 617 if (!frame)
609 return err; 618 return err;
610 619
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b4b022aa2bc2..486a641ca71b 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -486,7 +486,7 @@ static void ext4_put_super (struct super_block * sb)
486 return; 486 return;
487} 487}
488 488
489static kmem_cache_t *ext4_inode_cachep; 489static struct kmem_cache *ext4_inode_cachep;
490 490
491/* 491/*
492 * Called inside transaction, so use GFP_NOFS 492 * Called inside transaction, so use GFP_NOFS
@@ -495,7 +495,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
495{ 495{
496 struct ext4_inode_info *ei; 496 struct ext4_inode_info *ei;
497 497
498 ei = kmem_cache_alloc(ext4_inode_cachep, SLAB_NOFS); 498 ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
499 if (!ei) 499 if (!ei)
500 return NULL; 500 return NULL;
501#ifdef CONFIG_EXT4DEV_FS_POSIX_ACL 501#ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
@@ -513,7 +513,7 @@ static void ext4_destroy_inode(struct inode *inode)
513 kmem_cache_free(ext4_inode_cachep, EXT4_I(inode)); 513 kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
514} 514}
515 515
516static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 516static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
517{ 517{
518 struct ext4_inode_info *ei = (struct ext4_inode_info *) foo; 518 struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
519 519
@@ -1321,6 +1321,12 @@ static void ext4_orphan_cleanup (struct super_block * sb,
1321 return; 1321 return;
1322 } 1322 }
1323 1323
1324 if (bdev_read_only(sb->s_bdev)) {
1325 printk(KERN_ERR "EXT4-fs: write access "
1326 "unavailable, skipping orphan cleanup.\n");
1327 return;
1328 }
1329
1324 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { 1330 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
1325 if (es->s_last_orphan) 1331 if (es->s_last_orphan)
1326 jbd_debug(1, "Errors on filesystem, " 1332 jbd_debug(1, "Errors on filesystem, "
@@ -2460,6 +2466,7 @@ static int ext4_statfs (struct dentry * dentry, struct kstatfs * buf)
2460 struct ext4_super_block *es = sbi->s_es; 2466 struct ext4_super_block *es = sbi->s_es;
2461 ext4_fsblk_t overhead; 2467 ext4_fsblk_t overhead;
2462 int i; 2468 int i;
2469 u64 fsid;
2463 2470
2464 if (test_opt (sb, MINIX_DF)) 2471 if (test_opt (sb, MINIX_DF))
2465 overhead = 0; 2472 overhead = 0;
@@ -2506,6 +2513,10 @@ static int ext4_statfs (struct dentry * dentry, struct kstatfs * buf)
2506 buf->f_files = le32_to_cpu(es->s_inodes_count); 2513 buf->f_files = le32_to_cpu(es->s_inodes_count);
2507 buf->f_ffree = percpu_counter_sum(&sbi->s_freeinodes_counter); 2514 buf->f_ffree = percpu_counter_sum(&sbi->s_freeinodes_counter);
2508 buf->f_namelen = EXT4_NAME_LEN; 2515 buf->f_namelen = EXT4_NAME_LEN;
2516 fsid = le64_to_cpup((void *)es->s_uuid) ^
2517 le64_to_cpup((void *)es->s_uuid + sizeof(u64));
2518 buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
2519 buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
2509 return 0; 2520 return 0;
2510} 2521}
2511 2522
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 63233cd946a7..dc969c357aa1 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -459,14 +459,11 @@ static void ext4_xattr_update_super_block(handle_t *handle,
459 if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR)) 459 if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR))
460 return; 460 return;
461 461
462 lock_super(sb);
463 if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) { 462 if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
464 EXT4_SB(sb)->s_es->s_feature_compat |= 463 EXT4_SET_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR);
465 cpu_to_le32(EXT4_FEATURE_COMPAT_EXT_ATTR);
466 sb->s_dirt = 1; 464 sb->s_dirt = 1;
467 ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh); 465 ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);
468 } 466 }
469 unlock_super(sb);
470} 467}
471 468
472/* 469/*