diff options
author | Mingming Cao <cmm@us.ibm.com> | 2006-10-11 04:20:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-10-11 14:14:15 -0400 |
commit | 617ba13b31fbf505cc21799826639ef24ed94af0 (patch) | |
tree | 2a41e8c993f7c1eed115ad24047d546ba56cbdf5 /fs/ext4/inode.c | |
parent | ac27a0ec112a089f1a5102bc8dffc79c8c815571 (diff) |
[PATCH] ext4: rename ext4 symbols to avoid duplication of ext3 symbols
Mingming Cao originally did this work, and Shaggy reproduced it using some
scripts from her.
Signed-off-by: Mingming Cao <cmm@us.ibm.com>
Signed-off-by: Dave Kleikamp <shaggy@austin.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/ext4/inode.c')
-rw-r--r-- | fs/ext4/inode.c | 1020 |
1 files changed, 510 insertions, 510 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 03ba5bcab186..7275d60dcc59 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * linux/fs/ext3/inode.c | 2 | * linux/fs/ext4/inode.c |
3 | * | 3 | * |
4 | * Copyright (C) 1992, 1993, 1994, 1995 | 4 | * Copyright (C) 1992, 1993, 1994, 1995 |
5 | * Remy Card (card@masi.ibp.fr) | 5 | * Remy Card (card@masi.ibp.fr) |
@@ -19,13 +19,13 @@ | |||
19 | * 64-bit file support on 64-bit platforms by Jakub Jelinek | 19 | * 64-bit file support on 64-bit platforms by Jakub Jelinek |
20 | * (jj@sunsite.ms.mff.cuni.cz) | 20 | * (jj@sunsite.ms.mff.cuni.cz) |
21 | * | 21 | * |
22 | * Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000 | 22 | * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | #include <linux/time.h> | 27 | #include <linux/time.h> |
28 | #include <linux/ext3_jbd.h> | 28 | #include <linux/ext4_jbd.h> |
29 | #include <linux/jbd.h> | 29 | #include <linux/jbd.h> |
30 | #include <linux/smp_lock.h> | 30 | #include <linux/smp_lock.h> |
31 | #include <linux/highuid.h> | 31 | #include <linux/highuid.h> |
@@ -40,21 +40,21 @@ | |||
40 | #include "xattr.h" | 40 | #include "xattr.h" |
41 | #include "acl.h" | 41 | #include "acl.h" |
42 | 42 | ||
43 | static int ext3_writepage_trans_blocks(struct inode *inode); | 43 | static int ext4_writepage_trans_blocks(struct inode *inode); |
44 | 44 | ||
45 | /* | 45 | /* |
46 | * Test whether an inode is a fast symlink. | 46 | * Test whether an inode is a fast symlink. |
47 | */ | 47 | */ |
48 | static int ext3_inode_is_fast_symlink(struct inode *inode) | 48 | static int ext4_inode_is_fast_symlink(struct inode *inode) |
49 | { | 49 | { |
50 | int ea_blocks = EXT3_I(inode)->i_file_acl ? | 50 | int ea_blocks = EXT4_I(inode)->i_file_acl ? |
51 | (inode->i_sb->s_blocksize >> 9) : 0; | 51 | (inode->i_sb->s_blocksize >> 9) : 0; |
52 | 52 | ||
53 | return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); | 53 | return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); |
54 | } | 54 | } |
55 | 55 | ||
56 | /* | 56 | /* |
57 | * The ext3 forget function must perform a revoke if we are freeing data | 57 | * The ext4 forget function must perform a revoke if we are freeing data |
58 | * which has been journaled. Metadata (eg. indirect blocks) must be | 58 | * which has been journaled. Metadata (eg. indirect blocks) must be |
59 | * revoked in all cases. | 59 | * revoked in all cases. |
60 | * | 60 | * |
@@ -62,8 +62,8 @@ static int ext3_inode_is_fast_symlink(struct inode *inode) | |||
62 | * but there may still be a record of it in the journal, and that record | 62 | * but there may still be a record of it in the journal, and that record |
63 | * still needs to be revoked. | 63 | * still needs to be revoked. |
64 | */ | 64 | */ |
65 | int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode, | 65 | int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode, |
66 | struct buffer_head *bh, ext3_fsblk_t blocknr) | 66 | struct buffer_head *bh, ext4_fsblk_t blocknr) |
67 | { | 67 | { |
68 | int err; | 68 | int err; |
69 | 69 | ||
@@ -81,11 +81,11 @@ int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode, | |||
81 | * support it. Otherwise, only skip the revoke on un-journaled | 81 | * support it. Otherwise, only skip the revoke on un-journaled |
82 | * data blocks. */ | 82 | * data blocks. */ |
83 | 83 | ||
84 | if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA || | 84 | if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA || |
85 | (!is_metadata && !ext3_should_journal_data(inode))) { | 85 | (!is_metadata && !ext4_should_journal_data(inode))) { |
86 | if (bh) { | 86 | if (bh) { |
87 | BUFFER_TRACE(bh, "call journal_forget"); | 87 | BUFFER_TRACE(bh, "call journal_forget"); |
88 | return ext3_journal_forget(handle, bh); | 88 | return ext4_journal_forget(handle, bh); |
89 | } | 89 | } |
90 | return 0; | 90 | return 0; |
91 | } | 91 | } |
@@ -93,10 +93,10 @@ int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode, | |||
93 | /* | 93 | /* |
94 | * data!=journal && (is_metadata || should_journal_data(inode)) | 94 | * data!=journal && (is_metadata || should_journal_data(inode)) |
95 | */ | 95 | */ |
96 | BUFFER_TRACE(bh, "call ext3_journal_revoke"); | 96 | BUFFER_TRACE(bh, "call ext4_journal_revoke"); |
97 | err = ext3_journal_revoke(handle, blocknr, bh); | 97 | err = ext4_journal_revoke(handle, blocknr, bh); |
98 | if (err) | 98 | if (err) |
99 | ext3_abort(inode->i_sb, __FUNCTION__, | 99 | ext4_abort(inode->i_sb, __FUNCTION__, |
100 | "error %d when attempting revoke", err); | 100 | "error %d when attempting revoke", err); |
101 | BUFFER_TRACE(bh, "exit"); | 101 | BUFFER_TRACE(bh, "exit"); |
102 | return err; | 102 | return err; |
@@ -115,7 +115,7 @@ static unsigned long blocks_for_truncate(struct inode *inode) | |||
115 | /* Give ourselves just enough room to cope with inodes in which | 115 | /* Give ourselves just enough room to cope with inodes in which |
116 | * i_blocks is corrupt: we've seen disk corruptions in the past | 116 | * i_blocks is corrupt: we've seen disk corruptions in the past |
117 | * which resulted in random data in an inode which looked enough | 117 | * which resulted in random data in an inode which looked enough |
118 | * like a regular file for ext3 to try to delete it. Things | 118 | * like a regular file for ext4 to try to delete it. Things |
119 | * will go a bit crazy if that happens, but at least we should | 119 | * will go a bit crazy if that happens, but at least we should |
120 | * try not to panic the whole kernel. */ | 120 | * try not to panic the whole kernel. */ |
121 | if (needed < 2) | 121 | if (needed < 2) |
@@ -123,10 +123,10 @@ static unsigned long blocks_for_truncate(struct inode *inode) | |||
123 | 123 | ||
124 | /* But we need to bound the transaction so we don't overflow the | 124 | /* But we need to bound the transaction so we don't overflow the |
125 | * journal. */ | 125 | * journal. */ |
126 | if (needed > EXT3_MAX_TRANS_DATA) | 126 | if (needed > EXT4_MAX_TRANS_DATA) |
127 | needed = EXT3_MAX_TRANS_DATA; | 127 | needed = EXT4_MAX_TRANS_DATA; |
128 | 128 | ||
129 | return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed; | 129 | return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed; |
130 | } | 130 | } |
131 | 131 | ||
132 | /* | 132 | /* |
@@ -143,11 +143,11 @@ static handle_t *start_transaction(struct inode *inode) | |||
143 | { | 143 | { |
144 | handle_t *result; | 144 | handle_t *result; |
145 | 145 | ||
146 | result = ext3_journal_start(inode, blocks_for_truncate(inode)); | 146 | result = ext4_journal_start(inode, blocks_for_truncate(inode)); |
147 | if (!IS_ERR(result)) | 147 | if (!IS_ERR(result)) |
148 | return result; | 148 | return result; |
149 | 149 | ||
150 | ext3_std_error(inode->i_sb, PTR_ERR(result)); | 150 | ext4_std_error(inode->i_sb, PTR_ERR(result)); |
151 | return result; | 151 | return result; |
152 | } | 152 | } |
153 | 153 | ||
@@ -159,9 +159,9 @@ static handle_t *start_transaction(struct inode *inode) | |||
159 | */ | 159 | */ |
160 | static int try_to_extend_transaction(handle_t *handle, struct inode *inode) | 160 | static int try_to_extend_transaction(handle_t *handle, struct inode *inode) |
161 | { | 161 | { |
162 | if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS) | 162 | if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS) |
163 | return 0; | 163 | return 0; |
164 | if (!ext3_journal_extend(handle, blocks_for_truncate(inode))) | 164 | if (!ext4_journal_extend(handle, blocks_for_truncate(inode))) |
165 | return 0; | 165 | return 0; |
166 | return 1; | 166 | return 1; |
167 | } | 167 | } |
@@ -171,16 +171,16 @@ static int try_to_extend_transaction(handle_t *handle, struct inode *inode) | |||
171 | * so before we call here everything must be consistently dirtied against | 171 | * so before we call here everything must be consistently dirtied against |
172 | * this transaction. | 172 | * this transaction. |
173 | */ | 173 | */ |
174 | static int ext3_journal_test_restart(handle_t *handle, struct inode *inode) | 174 | static int ext4_journal_test_restart(handle_t *handle, struct inode *inode) |
175 | { | 175 | { |
176 | jbd_debug(2, "restarting handle %p\n", handle); | 176 | jbd_debug(2, "restarting handle %p\n", handle); |
177 | return ext3_journal_restart(handle, blocks_for_truncate(inode)); | 177 | return ext4_journal_restart(handle, blocks_for_truncate(inode)); |
178 | } | 178 | } |
179 | 179 | ||
180 | /* | 180 | /* |
181 | * Called at the last iput() if i_nlink is zero. | 181 | * Called at the last iput() if i_nlink is zero. |
182 | */ | 182 | */ |
183 | void ext3_delete_inode (struct inode * inode) | 183 | void ext4_delete_inode (struct inode * inode) |
184 | { | 184 | { |
185 | handle_t *handle; | 185 | handle_t *handle; |
186 | 186 | ||
@@ -196,7 +196,7 @@ void ext3_delete_inode (struct inode * inode) | |||
196 | * make sure that the in-core orphan linked list is properly | 196 | * make sure that the in-core orphan linked list is properly |
197 | * cleaned up. | 197 | * cleaned up. |
198 | */ | 198 | */ |
199 | ext3_orphan_del(NULL, inode); | 199 | ext4_orphan_del(NULL, inode); |
200 | goto no_delete; | 200 | goto no_delete; |
201 | } | 201 | } |
202 | 202 | ||
@@ -204,17 +204,17 @@ void ext3_delete_inode (struct inode * inode) | |||
204 | handle->h_sync = 1; | 204 | handle->h_sync = 1; |
205 | inode->i_size = 0; | 205 | inode->i_size = 0; |
206 | if (inode->i_blocks) | 206 | if (inode->i_blocks) |
207 | ext3_truncate(inode); | 207 | ext4_truncate(inode); |
208 | /* | 208 | /* |
209 | * Kill off the orphan record which ext3_truncate created. | 209 | * Kill off the orphan record which ext4_truncate created. |
210 | * AKPM: I think this can be inside the above `if'. | 210 | * AKPM: I think this can be inside the above `if'. |
211 | * Note that ext3_orphan_del() has to be able to cope with the | 211 | * Note that ext4_orphan_del() has to be able to cope with the |
212 | * deletion of a non-existent orphan - this is because we don't | 212 | * deletion of a non-existent orphan - this is because we don't |
213 | * know if ext3_truncate() actually created an orphan record. | 213 | * know if ext4_truncate() actually created an orphan record. |
214 | * (Well, we could do this if we need to, but heck - it works) | 214 | * (Well, we could do this if we need to, but heck - it works) |
215 | */ | 215 | */ |
216 | ext3_orphan_del(handle, inode); | 216 | ext4_orphan_del(handle, inode); |
217 | EXT3_I(inode)->i_dtime = get_seconds(); | 217 | EXT4_I(inode)->i_dtime = get_seconds(); |
218 | 218 | ||
219 | /* | 219 | /* |
220 | * One subtle ordering requirement: if anything has gone wrong | 220 | * One subtle ordering requirement: if anything has gone wrong |
@@ -223,12 +223,12 @@ void ext3_delete_inode (struct inode * inode) | |||
223 | * having errors), but we can't free the inode if the mark_dirty | 223 | * having errors), but we can't free the inode if the mark_dirty |
224 | * fails. | 224 | * fails. |
225 | */ | 225 | */ |
226 | if (ext3_mark_inode_dirty(handle, inode)) | 226 | if (ext4_mark_inode_dirty(handle, inode)) |
227 | /* If that failed, just do the required in-core inode clear. */ | 227 | /* If that failed, just do the required in-core inode clear. */ |
228 | clear_inode(inode); | 228 | clear_inode(inode); |
229 | else | 229 | else |
230 | ext3_free_inode(handle, inode); | 230 | ext4_free_inode(handle, inode); |
231 | ext3_journal_stop(handle); | 231 | ext4_journal_stop(handle); |
232 | return; | 232 | return; |
233 | no_delete: | 233 | no_delete: |
234 | clear_inode(inode); /* We must guarantee clearing of inode... */ | 234 | clear_inode(inode); /* We must guarantee clearing of inode... */ |
@@ -254,14 +254,14 @@ static int verify_chain(Indirect *from, Indirect *to) | |||
254 | } | 254 | } |
255 | 255 | ||
256 | /** | 256 | /** |
257 | * ext3_block_to_path - parse the block number into array of offsets | 257 | * ext4_block_to_path - parse the block number into array of offsets |
258 | * @inode: inode in question (we are only interested in its superblock) | 258 | * @inode: inode in question (we are only interested in its superblock) |
259 | * @i_block: block number to be parsed | 259 | * @i_block: block number to be parsed |
260 | * @offsets: array to store the offsets in | 260 | * @offsets: array to store the offsets in |
261 | * @boundary: set this non-zero if the referred-to block is likely to be | 261 | * @boundary: set this non-zero if the referred-to block is likely to be |
262 | * followed (on disk) by an indirect block. | 262 | * followed (on disk) by an indirect block. |
263 | * | 263 | * |
264 | * To store the locations of file's data ext3 uses a data structure common | 264 | * To store the locations of file's data ext4 uses a data structure common |
265 | * for UNIX filesystems - tree of pointers anchored in the inode, with | 265 | * for UNIX filesystems - tree of pointers anchored in the inode, with |
266 | * data blocks at leaves and indirect blocks in intermediate nodes. | 266 | * data blocks at leaves and indirect blocks in intermediate nodes. |
267 | * This function translates the block number into path in that tree - | 267 | * This function translates the block number into path in that tree - |
@@ -284,39 +284,39 @@ static int verify_chain(Indirect *from, Indirect *to) | |||
284 | * get there at all. | 284 | * get there at all. |
285 | */ | 285 | */ |
286 | 286 | ||
287 | static int ext3_block_to_path(struct inode *inode, | 287 | static int ext4_block_to_path(struct inode *inode, |
288 | long i_block, int offsets[4], int *boundary) | 288 | long i_block, int offsets[4], int *boundary) |
289 | { | 289 | { |
290 | int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb); | 290 | int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); |
291 | int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb); | 291 | int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); |
292 | const long direct_blocks = EXT3_NDIR_BLOCKS, | 292 | const long direct_blocks = EXT4_NDIR_BLOCKS, |
293 | indirect_blocks = ptrs, | 293 | indirect_blocks = ptrs, |
294 | double_blocks = (1 << (ptrs_bits * 2)); | 294 | double_blocks = (1 << (ptrs_bits * 2)); |
295 | int n = 0; | 295 | int n = 0; |
296 | int final = 0; | 296 | int final = 0; |
297 | 297 | ||
298 | if (i_block < 0) { | 298 | if (i_block < 0) { |
299 | ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0"); | 299 | ext4_warning (inode->i_sb, "ext4_block_to_path", "block < 0"); |
300 | } else if (i_block < direct_blocks) { | 300 | } else if (i_block < direct_blocks) { |
301 | offsets[n++] = i_block; | 301 | offsets[n++] = i_block; |
302 | final = direct_blocks; | 302 | final = direct_blocks; |
303 | } else if ( (i_block -= direct_blocks) < indirect_blocks) { | 303 | } else if ( (i_block -= direct_blocks) < indirect_blocks) { |
304 | offsets[n++] = EXT3_IND_BLOCK; | 304 | offsets[n++] = EXT4_IND_BLOCK; |
305 | offsets[n++] = i_block; | 305 | offsets[n++] = i_block; |
306 | final = ptrs; | 306 | final = ptrs; |
307 | } else if ((i_block -= indirect_blocks) < double_blocks) { | 307 | } else if ((i_block -= indirect_blocks) < double_blocks) { |
308 | offsets[n++] = EXT3_DIND_BLOCK; | 308 | offsets[n++] = EXT4_DIND_BLOCK; |
309 | offsets[n++] = i_block >> ptrs_bits; | 309 | offsets[n++] = i_block >> ptrs_bits; |
310 | offsets[n++] = i_block & (ptrs - 1); | 310 | offsets[n++] = i_block & (ptrs - 1); |
311 | final = ptrs; | 311 | final = ptrs; |
312 | } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { | 312 | } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { |
313 | offsets[n++] = EXT3_TIND_BLOCK; | 313 | offsets[n++] = EXT4_TIND_BLOCK; |
314 | offsets[n++] = i_block >> (ptrs_bits * 2); | 314 | offsets[n++] = i_block >> (ptrs_bits * 2); |
315 | offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); | 315 | offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); |
316 | offsets[n++] = i_block & (ptrs - 1); | 316 | offsets[n++] = i_block & (ptrs - 1); |
317 | final = ptrs; | 317 | final = ptrs; |
318 | } else { | 318 | } else { |
319 | ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big"); | 319 | ext4_warning(inode->i_sb, "ext4_block_to_path", "block > big"); |
320 | } | 320 | } |
321 | if (boundary) | 321 | if (boundary) |
322 | *boundary = final - 1 - (i_block & (ptrs - 1)); | 322 | *boundary = final - 1 - (i_block & (ptrs - 1)); |
@@ -324,7 +324,7 @@ static int ext3_block_to_path(struct inode *inode, | |||
324 | } | 324 | } |
325 | 325 | ||
326 | /** | 326 | /** |
327 | * ext3_get_branch - read the chain of indirect blocks leading to data | 327 | * ext4_get_branch - read the chain of indirect blocks leading to data |
328 | * @inode: inode in question | 328 | * @inode: inode in question |
329 | * @depth: depth of the chain (1 - direct pointer, etc.) | 329 | * @depth: depth of the chain (1 - direct pointer, etc.) |
330 | * @offsets: offsets of pointers in inode/indirect blocks | 330 | * @offsets: offsets of pointers in inode/indirect blocks |
@@ -352,7 +352,7 @@ static int ext3_block_to_path(struct inode *inode, | |||
352 | * or when it reads all @depth-1 indirect blocks successfully and finds | 352 | * or when it reads all @depth-1 indirect blocks successfully and finds |
353 | * the whole chain, all way to the data (returns %NULL, *err == 0). | 353 | * the whole chain, all way to the data (returns %NULL, *err == 0). |
354 | */ | 354 | */ |
355 | static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets, | 355 | static Indirect *ext4_get_branch(struct inode *inode, int depth, int *offsets, |
356 | Indirect chain[4], int *err) | 356 | Indirect chain[4], int *err) |
357 | { | 357 | { |
358 | struct super_block *sb = inode->i_sb; | 358 | struct super_block *sb = inode->i_sb; |
@@ -361,7 +361,7 @@ static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets, | |||
361 | 361 | ||
362 | *err = 0; | 362 | *err = 0; |
363 | /* i_data is not going away, no lock needed */ | 363 | /* i_data is not going away, no lock needed */ |
364 | add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets); | 364 | add_chain (chain, NULL, EXT4_I(inode)->i_data + *offsets); |
365 | if (!p->key) | 365 | if (!p->key) |
366 | goto no_block; | 366 | goto no_block; |
367 | while (--depth) { | 367 | while (--depth) { |
@@ -389,7 +389,7 @@ no_block: | |||
389 | } | 389 | } |
390 | 390 | ||
391 | /** | 391 | /** |
392 | * ext3_find_near - find a place for allocation with sufficient locality | 392 | * ext4_find_near - find a place for allocation with sufficient locality |
393 | * @inode: owner | 393 | * @inode: owner |
394 | * @ind: descriptor of indirect block. | 394 | * @ind: descriptor of indirect block. |
395 | * | 395 | * |
@@ -408,13 +408,13 @@ no_block: | |||
408 | * | 408 | * |
409 | * Caller must make sure that @ind is valid and will stay that way. | 409 | * Caller must make sure that @ind is valid and will stay that way. |
410 | */ | 410 | */ |
411 | static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind) | 411 | static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) |
412 | { | 412 | { |
413 | struct ext3_inode_info *ei = EXT3_I(inode); | 413 | struct ext4_inode_info *ei = EXT4_I(inode); |
414 | __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data; | 414 | __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data; |
415 | __le32 *p; | 415 | __le32 *p; |
416 | ext3_fsblk_t bg_start; | 416 | ext4_fsblk_t bg_start; |
417 | ext3_grpblk_t colour; | 417 | ext4_grpblk_t colour; |
418 | 418 | ||
419 | /* Try to find previous block */ | 419 | /* Try to find previous block */ |
420 | for (p = ind->p - 1; p >= start; p--) { | 420 | for (p = ind->p - 1; p >= start; p--) { |
@@ -430,14 +430,14 @@ static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind) | |||
430 | * It is going to be referred to from the inode itself? OK, just put it | 430 | * It is going to be referred to from the inode itself? OK, just put it |
431 | * into the same cylinder group then. | 431 | * into the same cylinder group then. |
432 | */ | 432 | */ |
433 | bg_start = ext3_group_first_block_no(inode->i_sb, ei->i_block_group); | 433 | bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group); |
434 | colour = (current->pid % 16) * | 434 | colour = (current->pid % 16) * |
435 | (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16); | 435 | (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); |
436 | return bg_start + colour; | 436 | return bg_start + colour; |
437 | } | 437 | } |
438 | 438 | ||
439 | /** | 439 | /** |
440 | * ext3_find_goal - find a prefered place for allocation. | 440 | * ext4_find_goal - find a prefered place for allocation. |
441 | * @inode: owner | 441 | * @inode: owner |
442 | * @block: block we want | 442 | * @block: block we want |
443 | * @chain: chain of indirect blocks | 443 | * @chain: chain of indirect blocks |
@@ -448,12 +448,12 @@ static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind) | |||
448 | * stores it in *@goal and returns zero. | 448 | * stores it in *@goal and returns zero. |
449 | */ | 449 | */ |
450 | 450 | ||
451 | static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block, | 451 | static ext4_fsblk_t ext4_find_goal(struct inode *inode, long block, |
452 | Indirect chain[4], Indirect *partial) | 452 | Indirect chain[4], Indirect *partial) |
453 | { | 453 | { |
454 | struct ext3_block_alloc_info *block_i; | 454 | struct ext4_block_alloc_info *block_i; |
455 | 455 | ||
456 | block_i = EXT3_I(inode)->i_block_alloc_info; | 456 | block_i = EXT4_I(inode)->i_block_alloc_info; |
457 | 457 | ||
458 | /* | 458 | /* |
459 | * try the heuristic for sequential allocation, | 459 | * try the heuristic for sequential allocation, |
@@ -464,11 +464,11 @@ static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block, | |||
464 | return block_i->last_alloc_physical_block + 1; | 464 | return block_i->last_alloc_physical_block + 1; |
465 | } | 465 | } |
466 | 466 | ||
467 | return ext3_find_near(inode, partial); | 467 | return ext4_find_near(inode, partial); |
468 | } | 468 | } |
469 | 469 | ||
470 | /** | 470 | /** |
471 | * ext3_blks_to_allocate: Look up the block map and count the number | 471 | * ext4_blks_to_allocate: Look up the block map and count the number |
472 | * of direct blocks need to be allocated for the given branch. | 472 | * of direct blocks need to be allocated for the given branch. |
473 | * | 473 | * |
474 | * @branch: chain of indirect blocks | 474 | * @branch: chain of indirect blocks |
@@ -479,7 +479,7 @@ static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block, | |||
479 | * return the total number of blocks to be allocate, including the | 479 | * return the total number of blocks to be allocate, including the |
480 | * direct and indirect blocks. | 480 | * direct and indirect blocks. |
481 | */ | 481 | */ |
482 | static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks, | 482 | static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned long blks, |
483 | int blocks_to_boundary) | 483 | int blocks_to_boundary) |
484 | { | 484 | { |
485 | unsigned long count = 0; | 485 | unsigned long count = 0; |
@@ -506,7 +506,7 @@ static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks, | |||
506 | } | 506 | } |
507 | 507 | ||
508 | /** | 508 | /** |
509 | * ext3_alloc_blocks: multiple allocate blocks needed for a branch | 509 | * ext4_alloc_blocks: multiple allocate blocks needed for a branch |
510 | * @indirect_blks: the number of blocks need to allocate for indirect | 510 | * @indirect_blks: the number of blocks need to allocate for indirect |
511 | * blocks | 511 | * blocks |
512 | * | 512 | * |
@@ -515,14 +515,14 @@ static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks, | |||
515 | * @blks: on return it will store the total number of allocated | 515 | * @blks: on return it will store the total number of allocated |
516 | * direct blocks | 516 | * direct blocks |
517 | */ | 517 | */ |
518 | static int ext3_alloc_blocks(handle_t *handle, struct inode *inode, | 518 | static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, |
519 | ext3_fsblk_t goal, int indirect_blks, int blks, | 519 | ext4_fsblk_t goal, int indirect_blks, int blks, |
520 | ext3_fsblk_t new_blocks[4], int *err) | 520 | ext4_fsblk_t new_blocks[4], int *err) |
521 | { | 521 | { |
522 | int target, i; | 522 | int target, i; |
523 | unsigned long count = 0; | 523 | unsigned long count = 0; |
524 | int index = 0; | 524 | int index = 0; |
525 | ext3_fsblk_t current_block = 0; | 525 | ext4_fsblk_t current_block = 0; |
526 | int ret = 0; | 526 | int ret = 0; |
527 | 527 | ||
528 | /* | 528 | /* |
@@ -538,7 +538,7 @@ static int ext3_alloc_blocks(handle_t *handle, struct inode *inode, | |||
538 | while (1) { | 538 | while (1) { |
539 | count = target; | 539 | count = target; |
540 | /* allocating blocks for indirect blocks and direct blocks */ | 540 | /* allocating blocks for indirect blocks and direct blocks */ |
541 | current_block = ext3_new_blocks(handle,inode,goal,&count,err); | 541 | current_block = ext4_new_blocks(handle,inode,goal,&count,err); |
542 | if (*err) | 542 | if (*err) |
543 | goto failed_out; | 543 | goto failed_out; |
544 | 544 | ||
@@ -562,12 +562,12 @@ static int ext3_alloc_blocks(handle_t *handle, struct inode *inode, | |||
562 | return ret; | 562 | return ret; |
563 | failed_out: | 563 | failed_out: |
564 | for (i = 0; i <index; i++) | 564 | for (i = 0; i <index; i++) |
565 | ext3_free_blocks(handle, inode, new_blocks[i], 1); | 565 | ext4_free_blocks(handle, inode, new_blocks[i], 1); |
566 | return ret; | 566 | return ret; |
567 | } | 567 | } |
568 | 568 | ||
569 | /** | 569 | /** |
570 | * ext3_alloc_branch - allocate and set up a chain of blocks. | 570 | * ext4_alloc_branch - allocate and set up a chain of blocks. |
571 | * @inode: owner | 571 | * @inode: owner |
572 | * @indirect_blks: number of allocated indirect blocks | 572 | * @indirect_blks: number of allocated indirect blocks |
573 | * @blks: number of allocated direct blocks | 573 | * @blks: number of allocated direct blocks |
@@ -578,21 +578,21 @@ failed_out: | |||
578 | * links them into chain and (if we are synchronous) writes them to disk. | 578 | * links them into chain and (if we are synchronous) writes them to disk. |
579 | * In other words, it prepares a branch that can be spliced onto the | 579 | * In other words, it prepares a branch that can be spliced onto the |
580 | * inode. It stores the information about that chain in the branch[], in | 580 | * inode. It stores the information about that chain in the branch[], in |
581 | * the same format as ext3_get_branch() would do. We are calling it after | 581 | * the same format as ext4_get_branch() would do. We are calling it after |
582 | * we had read the existing part of chain and partial points to the last | 582 | * we had read the existing part of chain and partial points to the last |
583 | * triple of that (one with zero ->key). Upon the exit we have the same | 583 | * triple of that (one with zero ->key). Upon the exit we have the same |
584 | * picture as after the successful ext3_get_block(), except that in one | 584 | * picture as after the successful ext4_get_block(), except that in one |
585 | * place chain is disconnected - *branch->p is still zero (we did not | 585 | * place chain is disconnected - *branch->p is still zero (we did not |
586 | * set the last link), but branch->key contains the number that should | 586 | * set the last link), but branch->key contains the number that should |
587 | * be placed into *branch->p to fill that gap. | 587 | * be placed into *branch->p to fill that gap. |
588 | * | 588 | * |
589 | * If allocation fails we free all blocks we've allocated (and forget | 589 | * If allocation fails we free all blocks we've allocated (and forget |
590 | * their buffer_heads) and return the error value the from failed | 590 | * their buffer_heads) and return the error value the from failed |
591 | * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain | 591 | * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain |
592 | * as described above and return 0. | 592 | * as described above and return 0. |
593 | */ | 593 | */ |
594 | static int ext3_alloc_branch(handle_t *handle, struct inode *inode, | 594 | static int ext4_alloc_branch(handle_t *handle, struct inode *inode, |
595 | int indirect_blks, int *blks, ext3_fsblk_t goal, | 595 | int indirect_blks, int *blks, ext4_fsblk_t goal, |
596 | int *offsets, Indirect *branch) | 596 | int *offsets, Indirect *branch) |
597 | { | 597 | { |
598 | int blocksize = inode->i_sb->s_blocksize; | 598 | int blocksize = inode->i_sb->s_blocksize; |
@@ -600,10 +600,10 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode, | |||
600 | int err = 0; | 600 | int err = 0; |
601 | struct buffer_head *bh; | 601 | struct buffer_head *bh; |
602 | int num; | 602 | int num; |
603 | ext3_fsblk_t new_blocks[4]; | 603 | ext4_fsblk_t new_blocks[4]; |
604 | ext3_fsblk_t current_block; | 604 | ext4_fsblk_t current_block; |
605 | 605 | ||
606 | num = ext3_alloc_blocks(handle, inode, goal, indirect_blks, | 606 | num = ext4_alloc_blocks(handle, inode, goal, indirect_blks, |
607 | *blks, new_blocks, &err); | 607 | *blks, new_blocks, &err); |
608 | if (err) | 608 | if (err) |
609 | return err; | 609 | return err; |
@@ -622,7 +622,7 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode, | |||
622 | branch[n].bh = bh; | 622 | branch[n].bh = bh; |
623 | lock_buffer(bh); | 623 | lock_buffer(bh); |
624 | BUFFER_TRACE(bh, "call get_create_access"); | 624 | BUFFER_TRACE(bh, "call get_create_access"); |
625 | err = ext3_journal_get_create_access(handle, bh); | 625 | err = ext4_journal_get_create_access(handle, bh); |
626 | if (err) { | 626 | if (err) { |
627 | unlock_buffer(bh); | 627 | unlock_buffer(bh); |
628 | brelse(bh); | 628 | brelse(bh); |
@@ -647,8 +647,8 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode, | |||
647 | set_buffer_uptodate(bh); | 647 | set_buffer_uptodate(bh); |
648 | unlock_buffer(bh); | 648 | unlock_buffer(bh); |
649 | 649 | ||
650 | BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); | 650 | BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); |
651 | err = ext3_journal_dirty_metadata(handle, bh); | 651 | err = ext4_journal_dirty_metadata(handle, bh); |
652 | if (err) | 652 | if (err) |
653 | goto failed; | 653 | goto failed; |
654 | } | 654 | } |
@@ -658,22 +658,22 @@ failed: | |||
658 | /* Allocation failed, free what we already allocated */ | 658 | /* Allocation failed, free what we already allocated */ |
659 | for (i = 1; i <= n ; i++) { | 659 | for (i = 1; i <= n ; i++) { |
660 | BUFFER_TRACE(branch[i].bh, "call journal_forget"); | 660 | BUFFER_TRACE(branch[i].bh, "call journal_forget"); |
661 | ext3_journal_forget(handle, branch[i].bh); | 661 | ext4_journal_forget(handle, branch[i].bh); |
662 | } | 662 | } |
663 | for (i = 0; i <indirect_blks; i++) | 663 | for (i = 0; i <indirect_blks; i++) |
664 | ext3_free_blocks(handle, inode, new_blocks[i], 1); | 664 | ext4_free_blocks(handle, inode, new_blocks[i], 1); |
665 | 665 | ||
666 | ext3_free_blocks(handle, inode, new_blocks[i], num); | 666 | ext4_free_blocks(handle, inode, new_blocks[i], num); |
667 | 667 | ||
668 | return err; | 668 | return err; |
669 | } | 669 | } |
670 | 670 | ||
671 | /** | 671 | /** |
672 | * ext3_splice_branch - splice the allocated branch onto inode. | 672 | * ext4_splice_branch - splice the allocated branch onto inode. |
673 | * @inode: owner | 673 | * @inode: owner |
674 | * @block: (logical) number of block we are adding | 674 | * @block: (logical) number of block we are adding |
675 | * @chain: chain of indirect blocks (with a missing link - see | 675 | * @chain: chain of indirect blocks (with a missing link - see |
676 | * ext3_alloc_branch) | 676 | * ext4_alloc_branch) |
677 | * @where: location of missing link | 677 | * @where: location of missing link |
678 | * @num: number of indirect blocks we are adding | 678 | * @num: number of indirect blocks we are adding |
679 | * @blks: number of direct blocks we are adding | 679 | * @blks: number of direct blocks we are adding |
@@ -682,15 +682,15 @@ failed: | |||
682 | * inode (->i_blocks, etc.). In case of success we end up with the full | 682 | * inode (->i_blocks, etc.). In case of success we end up with the full |
683 | * chain to new block and return 0. | 683 | * chain to new block and return 0. |
684 | */ | 684 | */ |
685 | static int ext3_splice_branch(handle_t *handle, struct inode *inode, | 685 | static int ext4_splice_branch(handle_t *handle, struct inode *inode, |
686 | long block, Indirect *where, int num, int blks) | 686 | long block, Indirect *where, int num, int blks) |
687 | { | 687 | { |
688 | int i; | 688 | int i; |
689 | int err = 0; | 689 | int err = 0; |
690 | struct ext3_block_alloc_info *block_i; | 690 | struct ext4_block_alloc_info *block_i; |
691 | ext3_fsblk_t current_block; | 691 | ext4_fsblk_t current_block; |
692 | 692 | ||
693 | block_i = EXT3_I(inode)->i_block_alloc_info; | 693 | block_i = EXT4_I(inode)->i_block_alloc_info; |
694 | /* | 694 | /* |
695 | * If we're splicing into a [td]indirect block (as opposed to the | 695 | * If we're splicing into a [td]indirect block (as opposed to the |
696 | * inode) then we need to get write access to the [td]indirect block | 696 | * inode) then we need to get write access to the [td]indirect block |
@@ -698,7 +698,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, | |||
698 | */ | 698 | */ |
699 | if (where->bh) { | 699 | if (where->bh) { |
700 | BUFFER_TRACE(where->bh, "get_write_access"); | 700 | BUFFER_TRACE(where->bh, "get_write_access"); |
701 | err = ext3_journal_get_write_access(handle, where->bh); | 701 | err = ext4_journal_get_write_access(handle, where->bh); |
702 | if (err) | 702 | if (err) |
703 | goto err_out; | 703 | goto err_out; |
704 | } | 704 | } |
@@ -730,7 +730,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, | |||
730 | /* We are done with atomic stuff, now do the rest of housekeeping */ | 730 | /* We are done with atomic stuff, now do the rest of housekeeping */ |
731 | 731 | ||
732 | inode->i_ctime = CURRENT_TIME_SEC; | 732 | inode->i_ctime = CURRENT_TIME_SEC; |
733 | ext3_mark_inode_dirty(handle, inode); | 733 | ext4_mark_inode_dirty(handle, inode); |
734 | 734 | ||
735 | /* had we spliced it onto indirect block? */ | 735 | /* had we spliced it onto indirect block? */ |
736 | if (where->bh) { | 736 | if (where->bh) { |
@@ -740,11 +740,11 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, | |||
740 | * onto an indirect block at the very end of the file (the | 740 | * onto an indirect block at the very end of the file (the |
741 | * file is growing) then we *will* alter the inode to reflect | 741 | * file is growing) then we *will* alter the inode to reflect |
742 | * the new i_size. But that is not done here - it is done in | 742 | * the new i_size. But that is not done here - it is done in |
743 | * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode. | 743 | * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. |
744 | */ | 744 | */ |
745 | jbd_debug(5, "splicing indirect only\n"); | 745 | jbd_debug(5, "splicing indirect only\n"); |
746 | BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata"); | 746 | BUFFER_TRACE(where->bh, "call ext4_journal_dirty_metadata"); |
747 | err = ext3_journal_dirty_metadata(handle, where->bh); | 747 | err = ext4_journal_dirty_metadata(handle, where->bh); |
748 | if (err) | 748 | if (err) |
749 | goto err_out; | 749 | goto err_out; |
750 | } else { | 750 | } else { |
@@ -759,10 +759,10 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, | |||
759 | err_out: | 759 | err_out: |
760 | for (i = 1; i <= num; i++) { | 760 | for (i = 1; i <= num; i++) { |
761 | BUFFER_TRACE(where[i].bh, "call journal_forget"); | 761 | BUFFER_TRACE(where[i].bh, "call journal_forget"); |
762 | ext3_journal_forget(handle, where[i].bh); | 762 | ext4_journal_forget(handle, where[i].bh); |
763 | ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1); | 763 | ext4_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1); |
764 | } | 764 | } |
765 | ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks); | 765 | ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks); |
766 | 766 | ||
767 | return err; | 767 | return err; |
768 | } | 768 | } |
@@ -786,7 +786,7 @@ err_out: | |||
786 | * return = 0, if plain lookup failed. | 786 | * return = 0, if plain lookup failed. |
787 | * return < 0, error case. | 787 | * return < 0, error case. |
788 | */ | 788 | */ |
789 | int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, | 789 | int ext4_get_blocks_handle(handle_t *handle, struct inode *inode, |
790 | sector_t iblock, unsigned long maxblocks, | 790 | sector_t iblock, unsigned long maxblocks, |
791 | struct buffer_head *bh_result, | 791 | struct buffer_head *bh_result, |
792 | int create, int extend_disksize) | 792 | int create, int extend_disksize) |
@@ -795,22 +795,22 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, | |||
795 | int offsets[4]; | 795 | int offsets[4]; |
796 | Indirect chain[4]; | 796 | Indirect chain[4]; |
797 | Indirect *partial; | 797 | Indirect *partial; |
798 | ext3_fsblk_t goal; | 798 | ext4_fsblk_t goal; |
799 | int indirect_blks; | 799 | int indirect_blks; |
800 | int blocks_to_boundary = 0; | 800 | int blocks_to_boundary = 0; |
801 | int depth; | 801 | int depth; |
802 | struct ext3_inode_info *ei = EXT3_I(inode); | 802 | struct ext4_inode_info *ei = EXT4_I(inode); |
803 | int count = 0; | 803 | int count = 0; |
804 | ext3_fsblk_t first_block = 0; | 804 | ext4_fsblk_t first_block = 0; |
805 | 805 | ||
806 | 806 | ||
807 | J_ASSERT(handle != NULL || create == 0); | 807 | J_ASSERT(handle != NULL || create == 0); |
808 | depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary); | 808 | depth = ext4_block_to_path(inode,iblock,offsets,&blocks_to_boundary); |
809 | 809 | ||
810 | if (depth == 0) | 810 | if (depth == 0) |
811 | goto out; | 811 | goto out; |
812 | 812 | ||
813 | partial = ext3_get_branch(inode, depth, offsets, chain, &err); | 813 | partial = ext4_get_branch(inode, depth, offsets, chain, &err); |
814 | 814 | ||
815 | /* Simplest case - block found, no allocation needed */ | 815 | /* Simplest case - block found, no allocation needed */ |
816 | if (!partial) { | 816 | if (!partial) { |
@@ -819,7 +819,7 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, | |||
819 | count++; | 819 | count++; |
820 | /*map more blocks*/ | 820 | /*map more blocks*/ |
821 | while (count < maxblocks && count <= blocks_to_boundary) { | 821 | while (count < maxblocks && count <= blocks_to_boundary) { |
822 | ext3_fsblk_t blk; | 822 | ext4_fsblk_t blk; |
823 | 823 | ||
824 | if (!verify_chain(chain, partial)) { | 824 | if (!verify_chain(chain, partial)) { |
825 | /* | 825 | /* |
@@ -852,7 +852,7 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, | |||
852 | 852 | ||
853 | /* | 853 | /* |
854 | * If the indirect block is missing while we are reading | 854 | * If the indirect block is missing while we are reading |
855 | * the chain(ext3_get_branch() returns -EAGAIN err), or | 855 | * the chain(ext4_get_branch() returns -EAGAIN err), or |
856 | * if the chain has been changed after we grab the semaphore, | 856 | * if the chain has been changed after we grab the semaphore, |
857 | * (either because another process truncated this branch, or | 857 | * (either because another process truncated this branch, or |
858 | * another get_block allocated this branch) re-grab the chain to see if | 858 | * another get_block allocated this branch) re-grab the chain to see if |
@@ -867,7 +867,7 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, | |||
867 | brelse(partial->bh); | 867 | brelse(partial->bh); |
868 | partial--; | 868 | partial--; |
869 | } | 869 | } |
870 | partial = ext3_get_branch(inode, depth, offsets, chain, &err); | 870 | partial = ext4_get_branch(inode, depth, offsets, chain, &err); |
871 | if (!partial) { | 871 | if (!partial) { |
872 | count++; | 872 | count++; |
873 | mutex_unlock(&ei->truncate_mutex); | 873 | mutex_unlock(&ei->truncate_mutex); |
@@ -883,9 +883,9 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, | |||
883 | * allocation info here if necessary | 883 | * allocation info here if necessary |
884 | */ | 884 | */ |
885 | if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info)) | 885 | if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info)) |
886 | ext3_init_block_alloc_info(inode); | 886 | ext4_init_block_alloc_info(inode); |
887 | 887 | ||
888 | goal = ext3_find_goal(inode, iblock, chain, partial); | 888 | goal = ext4_find_goal(inode, iblock, chain, partial); |
889 | 889 | ||
890 | /* the number of blocks need to allocate for [d,t]indirect blocks */ | 890 | /* the number of blocks need to allocate for [d,t]indirect blocks */ |
891 | indirect_blks = (chain + depth) - partial - 1; | 891 | indirect_blks = (chain + depth) - partial - 1; |
@@ -894,28 +894,28 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, | |||
894 | * Next look up the indirect map to count the totoal number of | 894 | * Next look up the indirect map to count the totoal number of |
895 | * direct blocks to allocate for this branch. | 895 | * direct blocks to allocate for this branch. |
896 | */ | 896 | */ |
897 | count = ext3_blks_to_allocate(partial, indirect_blks, | 897 | count = ext4_blks_to_allocate(partial, indirect_blks, |
898 | maxblocks, blocks_to_boundary); | 898 | maxblocks, blocks_to_boundary); |
899 | /* | 899 | /* |
900 | * Block out ext3_truncate while we alter the tree | 900 | * Block out ext4_truncate while we alter the tree |
901 | */ | 901 | */ |
902 | err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal, | 902 | err = ext4_alloc_branch(handle, inode, indirect_blks, &count, goal, |
903 | offsets + (partial - chain), partial); | 903 | offsets + (partial - chain), partial); |
904 | 904 | ||
905 | /* | 905 | /* |
906 | * The ext3_splice_branch call will free and forget any buffers | 906 | * The ext4_splice_branch call will free and forget any buffers |
907 | * on the new chain if there is a failure, but that risks using | 907 | * on the new chain if there is a failure, but that risks using |
908 | * up transaction credits, especially for bitmaps where the | 908 | * up transaction credits, especially for bitmaps where the |
909 | * credits cannot be returned. Can we handle this somehow? We | 909 | * credits cannot be returned. Can we handle this somehow? We |
910 | * may need to return -EAGAIN upwards in the worst case. --sct | 910 | * may need to return -EAGAIN upwards in the worst case. --sct |
911 | */ | 911 | */ |
912 | if (!err) | 912 | if (!err) |
913 | err = ext3_splice_branch(handle, inode, iblock, | 913 | err = ext4_splice_branch(handle, inode, iblock, |
914 | partial, indirect_blks, count); | 914 | partial, indirect_blks, count); |
915 | /* | 915 | /* |
916 | * i_disksize growing is protected by truncate_mutex. Don't forget to | 916 | * i_disksize growing is protected by truncate_mutex. Don't forget to |
917 | * protect it if you're about to implement concurrent | 917 | * protect it if you're about to implement concurrent |
918 | * ext3_get_block() -bzzz | 918 | * ext4_get_block() -bzzz |
919 | */ | 919 | */ |
920 | if (!err && extend_disksize && inode->i_size > ei->i_disksize) | 920 | if (!err && extend_disksize && inode->i_size > ei->i_disksize) |
921 | ei->i_disksize = inode->i_size; | 921 | ei->i_disksize = inode->i_size; |
@@ -942,9 +942,9 @@ out: | |||
942 | return err; | 942 | return err; |
943 | } | 943 | } |
944 | 944 | ||
945 | #define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32) | 945 | #define DIO_CREDITS (EXT4_RESERVE_TRANS_BLOCKS + 32) |
946 | 946 | ||
947 | static int ext3_get_block(struct inode *inode, sector_t iblock, | 947 | static int ext4_get_block(struct inode *inode, sector_t iblock, |
948 | struct buffer_head *bh_result, int create) | 948 | struct buffer_head *bh_result, int create) |
949 | { | 949 | { |
950 | handle_t *handle = journal_current_handle(); | 950 | handle_t *handle = journal_current_handle(); |
@@ -962,29 +962,29 @@ static int ext3_get_block(struct inode *inode, sector_t iblock, | |||
962 | * Huge direct-io writes can hold off commits for long | 962 | * Huge direct-io writes can hold off commits for long |
963 | * periods of time. Let this commit run. | 963 | * periods of time. Let this commit run. |
964 | */ | 964 | */ |
965 | ext3_journal_stop(handle); | 965 | ext4_journal_stop(handle); |
966 | handle = ext3_journal_start(inode, DIO_CREDITS); | 966 | handle = ext4_journal_start(inode, DIO_CREDITS); |
967 | if (IS_ERR(handle)) | 967 | if (IS_ERR(handle)) |
968 | ret = PTR_ERR(handle); | 968 | ret = PTR_ERR(handle); |
969 | goto get_block; | 969 | goto get_block; |
970 | } | 970 | } |
971 | 971 | ||
972 | if (handle->h_buffer_credits <= EXT3_RESERVE_TRANS_BLOCKS) { | 972 | if (handle->h_buffer_credits <= EXT4_RESERVE_TRANS_BLOCKS) { |
973 | /* | 973 | /* |
974 | * Getting low on buffer credits... | 974 | * Getting low on buffer credits... |
975 | */ | 975 | */ |
976 | ret = ext3_journal_extend(handle, DIO_CREDITS); | 976 | ret = ext4_journal_extend(handle, DIO_CREDITS); |
977 | if (ret > 0) { | 977 | if (ret > 0) { |
978 | /* | 978 | /* |
979 | * Couldn't extend the transaction. Start a new one. | 979 | * Couldn't extend the transaction. Start a new one. |
980 | */ | 980 | */ |
981 | ret = ext3_journal_restart(handle, DIO_CREDITS); | 981 | ret = ext4_journal_restart(handle, DIO_CREDITS); |
982 | } | 982 | } |
983 | } | 983 | } |
984 | 984 | ||
985 | get_block: | 985 | get_block: |
986 | if (ret == 0) { | 986 | if (ret == 0) { |
987 | ret = ext3_get_blocks_handle(handle, inode, iblock, | 987 | ret = ext4_get_blocks_handle(handle, inode, iblock, |
988 | max_blocks, bh_result, create, 0); | 988 | max_blocks, bh_result, create, 0); |
989 | if (ret > 0) { | 989 | if (ret > 0) { |
990 | bh_result->b_size = (ret << inode->i_blkbits); | 990 | bh_result->b_size = (ret << inode->i_blkbits); |
@@ -997,7 +997,7 @@ get_block: | |||
997 | /* | 997 | /* |
998 | * `handle' can be NULL if create is zero | 998 | * `handle' can be NULL if create is zero |
999 | */ | 999 | */ |
1000 | struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode, | 1000 | struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, |
1001 | long block, int create, int *errp) | 1001 | long block, int create, int *errp) |
1002 | { | 1002 | { |
1003 | struct buffer_head dummy; | 1003 | struct buffer_head dummy; |
@@ -1008,10 +1008,10 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode, | |||
1008 | dummy.b_state = 0; | 1008 | dummy.b_state = 0; |
1009 | dummy.b_blocknr = -1000; | 1009 | dummy.b_blocknr = -1000; |
1010 | buffer_trace_init(&dummy.b_history); | 1010 | buffer_trace_init(&dummy.b_history); |
1011 | err = ext3_get_blocks_handle(handle, inode, block, 1, | 1011 | err = ext4_get_blocks_handle(handle, inode, block, 1, |
1012 | &dummy, create, 1); | 1012 | &dummy, create, 1); |
1013 | /* | 1013 | /* |
1014 | * ext3_get_blocks_handle() returns number of blocks | 1014 | * ext4_get_blocks_handle() returns number of blocks |
1015 | * mapped. 0 in case of a HOLE. | 1015 | * mapped. 0 in case of a HOLE. |
1016 | */ | 1016 | */ |
1017 | if (err > 0) { | 1017 | if (err > 0) { |
@@ -1035,19 +1035,19 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode, | |||
1035 | * Now that we do not always journal data, we should | 1035 | * Now that we do not always journal data, we should |
1036 | * keep in mind whether this should always journal the | 1036 | * keep in mind whether this should always journal the |
1037 | * new buffer as metadata. For now, regular file | 1037 | * new buffer as metadata. For now, regular file |
1038 | * writes use ext3_get_block instead, so it's not a | 1038 | * writes use ext4_get_block instead, so it's not a |
1039 | * problem. | 1039 | * problem. |
1040 | */ | 1040 | */ |
1041 | lock_buffer(bh); | 1041 | lock_buffer(bh); |
1042 | BUFFER_TRACE(bh, "call get_create_access"); | 1042 | BUFFER_TRACE(bh, "call get_create_access"); |
1043 | fatal = ext3_journal_get_create_access(handle, bh); | 1043 | fatal = ext4_journal_get_create_access(handle, bh); |
1044 | if (!fatal && !buffer_uptodate(bh)) { | 1044 | if (!fatal && !buffer_uptodate(bh)) { |
1045 | memset(bh->b_data,0,inode->i_sb->s_blocksize); | 1045 | memset(bh->b_data,0,inode->i_sb->s_blocksize); |
1046 | set_buffer_uptodate(bh); | 1046 | set_buffer_uptodate(bh); |
1047 | } | 1047 | } |
1048 | unlock_buffer(bh); | 1048 | unlock_buffer(bh); |
1049 | BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); | 1049 | BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); |
1050 | err = ext3_journal_dirty_metadata(handle, bh); | 1050 | err = ext4_journal_dirty_metadata(handle, bh); |
1051 | if (!fatal) | 1051 | if (!fatal) |
1052 | fatal = err; | 1052 | fatal = err; |
1053 | } else { | 1053 | } else { |
@@ -1064,12 +1064,12 @@ err: | |||
1064 | return NULL; | 1064 | return NULL; |
1065 | } | 1065 | } |
1066 | 1066 | ||
1067 | struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode, | 1067 | struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, |
1068 | int block, int create, int *err) | 1068 | int block, int create, int *err) |
1069 | { | 1069 | { |
1070 | struct buffer_head * bh; | 1070 | struct buffer_head * bh; |
1071 | 1071 | ||
1072 | bh = ext3_getblk(handle, inode, block, create, err); | 1072 | bh = ext4_getblk(handle, inode, block, create, err); |
1073 | if (!bh) | 1073 | if (!bh) |
1074 | return bh; | 1074 | return bh; |
1075 | if (buffer_uptodate(bh)) | 1075 | if (buffer_uptodate(bh)) |
@@ -1118,17 +1118,17 @@ static int walk_page_buffers( handle_t *handle, | |||
1118 | /* | 1118 | /* |
1119 | * To preserve ordering, it is essential that the hole instantiation and | 1119 | * To preserve ordering, it is essential that the hole instantiation and |
1120 | * the data write be encapsulated in a single transaction. We cannot | 1120 | * the data write be encapsulated in a single transaction. We cannot |
1121 | * close off a transaction and start a new one between the ext3_get_block() | 1121 | * close off a transaction and start a new one between the ext4_get_block() |
1122 | * and the commit_write(). So doing the journal_start at the start of | 1122 | * and the commit_write(). So doing the journal_start at the start of |
1123 | * prepare_write() is the right place. | 1123 | * prepare_write() is the right place. |
1124 | * | 1124 | * |
1125 | * Also, this function can nest inside ext3_writepage() -> | 1125 | * Also, this function can nest inside ext4_writepage() -> |
1126 | * block_write_full_page(). In that case, we *know* that ext3_writepage() | 1126 | * block_write_full_page(). In that case, we *know* that ext4_writepage() |
1127 | * has generated enough buffer credits to do the whole page. So we won't | 1127 | * has generated enough buffer credits to do the whole page. So we won't |
1128 | * block on the journal in that case, which is good, because the caller may | 1128 | * block on the journal in that case, which is good, because the caller may |
1129 | * be PF_MEMALLOC. | 1129 | * be PF_MEMALLOC. |
1130 | * | 1130 | * |
1131 | * By accident, ext3 can be reentered when a transaction is open via | 1131 | * By accident, ext4 can be reentered when a transaction is open via |
1132 | * quota file writes. If we were to commit the transaction while thus | 1132 | * quota file writes. If we were to commit the transaction while thus |
1133 | * reentered, there can be a deadlock - we would be holding a quota | 1133 | * reentered, there can be a deadlock - we would be holding a quota |
1134 | * lock, and the commit would never complete if another thread had a | 1134 | * lock, and the commit would never complete if another thread had a |
@@ -1145,48 +1145,48 @@ static int do_journal_get_write_access(handle_t *handle, | |||
1145 | { | 1145 | { |
1146 | if (!buffer_mapped(bh) || buffer_freed(bh)) | 1146 | if (!buffer_mapped(bh) || buffer_freed(bh)) |
1147 | return 0; | 1147 | return 0; |
1148 | return ext3_journal_get_write_access(handle, bh); | 1148 | return ext4_journal_get_write_access(handle, bh); |
1149 | } | 1149 | } |
1150 | 1150 | ||
1151 | static int ext3_prepare_write(struct file *file, struct page *page, | 1151 | static int ext4_prepare_write(struct file *file, struct page *page, |
1152 | unsigned from, unsigned to) | 1152 | unsigned from, unsigned to) |
1153 | { | 1153 | { |
1154 | struct inode *inode = page->mapping->host; | 1154 | struct inode *inode = page->mapping->host; |
1155 | int ret, needed_blocks = ext3_writepage_trans_blocks(inode); | 1155 | int ret, needed_blocks = ext4_writepage_trans_blocks(inode); |
1156 | handle_t *handle; | 1156 | handle_t *handle; |
1157 | int retries = 0; | 1157 | int retries = 0; |
1158 | 1158 | ||
1159 | retry: | 1159 | retry: |
1160 | handle = ext3_journal_start(inode, needed_blocks); | 1160 | handle = ext4_journal_start(inode, needed_blocks); |
1161 | if (IS_ERR(handle)) { | 1161 | if (IS_ERR(handle)) { |
1162 | ret = PTR_ERR(handle); | 1162 | ret = PTR_ERR(handle); |
1163 | goto out; | 1163 | goto out; |
1164 | } | 1164 | } |
1165 | if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode)) | 1165 | if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) |
1166 | ret = nobh_prepare_write(page, from, to, ext3_get_block); | 1166 | ret = nobh_prepare_write(page, from, to, ext4_get_block); |
1167 | else | 1167 | else |
1168 | ret = block_prepare_write(page, from, to, ext3_get_block); | 1168 | ret = block_prepare_write(page, from, to, ext4_get_block); |
1169 | if (ret) | 1169 | if (ret) |
1170 | goto prepare_write_failed; | 1170 | goto prepare_write_failed; |
1171 | 1171 | ||
1172 | if (ext3_should_journal_data(inode)) { | 1172 | if (ext4_should_journal_data(inode)) { |
1173 | ret = walk_page_buffers(handle, page_buffers(page), | 1173 | ret = walk_page_buffers(handle, page_buffers(page), |
1174 | from, to, NULL, do_journal_get_write_access); | 1174 | from, to, NULL, do_journal_get_write_access); |
1175 | } | 1175 | } |
1176 | prepare_write_failed: | 1176 | prepare_write_failed: |
1177 | if (ret) | 1177 | if (ret) |
1178 | ext3_journal_stop(handle); | 1178 | ext4_journal_stop(handle); |
1179 | if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) | 1179 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) |
1180 | goto retry; | 1180 | goto retry; |
1181 | out: | 1181 | out: |
1182 | return ret; | 1182 | return ret; |
1183 | } | 1183 | } |
1184 | 1184 | ||
1185 | int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh) | 1185 | int ext4_journal_dirty_data(handle_t *handle, struct buffer_head *bh) |
1186 | { | 1186 | { |
1187 | int err = journal_dirty_data(handle, bh); | 1187 | int err = journal_dirty_data(handle, bh); |
1188 | if (err) | 1188 | if (err) |
1189 | ext3_journal_abort_handle(__FUNCTION__, __FUNCTION__, | 1189 | ext4_journal_abort_handle(__FUNCTION__, __FUNCTION__, |
1190 | bh, handle,err); | 1190 | bh, handle,err); |
1191 | return err; | 1191 | return err; |
1192 | } | 1192 | } |
@@ -1197,25 +1197,25 @@ static int commit_write_fn(handle_t *handle, struct buffer_head *bh) | |||
1197 | if (!buffer_mapped(bh) || buffer_freed(bh)) | 1197 | if (!buffer_mapped(bh) || buffer_freed(bh)) |
1198 | return 0; | 1198 | return 0; |
1199 | set_buffer_uptodate(bh); | 1199 | set_buffer_uptodate(bh); |
1200 | return ext3_journal_dirty_metadata(handle, bh); | 1200 | return ext4_journal_dirty_metadata(handle, bh); |
1201 | } | 1201 | } |
1202 | 1202 | ||
1203 | /* | 1203 | /* |
1204 | * We need to pick up the new inode size which generic_commit_write gave us | 1204 | * We need to pick up the new inode size which generic_commit_write gave us |
1205 | * `file' can be NULL - eg, when called from page_symlink(). | 1205 | * `file' can be NULL - eg, when called from page_symlink(). |
1206 | * | 1206 | * |
1207 | * ext3 never places buffers on inode->i_mapping->private_list. metadata | 1207 | * ext4 never places buffers on inode->i_mapping->private_list. metadata |
1208 | * buffers are managed internally. | 1208 | * buffers are managed internally. |
1209 | */ | 1209 | */ |
1210 | static int ext3_ordered_commit_write(struct file *file, struct page *page, | 1210 | static int ext4_ordered_commit_write(struct file *file, struct page *page, |
1211 | unsigned from, unsigned to) | 1211 | unsigned from, unsigned to) |
1212 | { | 1212 | { |
1213 | handle_t *handle = ext3_journal_current_handle(); | 1213 | handle_t *handle = ext4_journal_current_handle(); |
1214 | struct inode *inode = page->mapping->host; | 1214 | struct inode *inode = page->mapping->host; |
1215 | int ret = 0, ret2; | 1215 | int ret = 0, ret2; |
1216 | 1216 | ||
1217 | ret = walk_page_buffers(handle, page_buffers(page), | 1217 | ret = walk_page_buffers(handle, page_buffers(page), |
1218 | from, to, NULL, ext3_journal_dirty_data); | 1218 | from, to, NULL, ext4_journal_dirty_data); |
1219 | 1219 | ||
1220 | if (ret == 0) { | 1220 | if (ret == 0) { |
1221 | /* | 1221 | /* |
@@ -1226,43 +1226,43 @@ static int ext3_ordered_commit_write(struct file *file, struct page *page, | |||
1226 | loff_t new_i_size; | 1226 | loff_t new_i_size; |
1227 | 1227 | ||
1228 | new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; | 1228 | new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; |
1229 | if (new_i_size > EXT3_I(inode)->i_disksize) | 1229 | if (new_i_size > EXT4_I(inode)->i_disksize) |
1230 | EXT3_I(inode)->i_disksize = new_i_size; | 1230 | EXT4_I(inode)->i_disksize = new_i_size; |
1231 | ret = generic_commit_write(file, page, from, to); | 1231 | ret = generic_commit_write(file, page, from, to); |
1232 | } | 1232 | } |
1233 | ret2 = ext3_journal_stop(handle); | 1233 | ret2 = ext4_journal_stop(handle); |
1234 | if (!ret) | 1234 | if (!ret) |
1235 | ret = ret2; | 1235 | ret = ret2; |
1236 | return ret; | 1236 | return ret; |
1237 | } | 1237 | } |
1238 | 1238 | ||
1239 | static int ext3_writeback_commit_write(struct file *file, struct page *page, | 1239 | static int ext4_writeback_commit_write(struct file *file, struct page *page, |
1240 | unsigned from, unsigned to) | 1240 | unsigned from, unsigned to) |
1241 | { | 1241 | { |
1242 | handle_t *handle = ext3_journal_current_handle(); | 1242 | handle_t *handle = ext4_journal_current_handle(); |
1243 | struct inode *inode = page->mapping->host; | 1243 | struct inode *inode = page->mapping->host; |
1244 | int ret = 0, ret2; | 1244 | int ret = 0, ret2; |
1245 | loff_t new_i_size; | 1245 | loff_t new_i_size; |
1246 | 1246 | ||
1247 | new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; | 1247 | new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; |
1248 | if (new_i_size > EXT3_I(inode)->i_disksize) | 1248 | if (new_i_size > EXT4_I(inode)->i_disksize) |
1249 | EXT3_I(inode)->i_disksize = new_i_size; | 1249 | EXT4_I(inode)->i_disksize = new_i_size; |
1250 | 1250 | ||
1251 | if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode)) | 1251 | if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) |
1252 | ret = nobh_commit_write(file, page, from, to); | 1252 | ret = nobh_commit_write(file, page, from, to); |
1253 | else | 1253 | else |
1254 | ret = generic_commit_write(file, page, from, to); | 1254 | ret = generic_commit_write(file, page, from, to); |
1255 | 1255 | ||
1256 | ret2 = ext3_journal_stop(handle); | 1256 | ret2 = ext4_journal_stop(handle); |
1257 | if (!ret) | 1257 | if (!ret) |
1258 | ret = ret2; | 1258 | ret = ret2; |
1259 | return ret; | 1259 | return ret; |
1260 | } | 1260 | } |
1261 | 1261 | ||
1262 | static int ext3_journalled_commit_write(struct file *file, | 1262 | static int ext4_journalled_commit_write(struct file *file, |
1263 | struct page *page, unsigned from, unsigned to) | 1263 | struct page *page, unsigned from, unsigned to) |
1264 | { | 1264 | { |
1265 | handle_t *handle = ext3_journal_current_handle(); | 1265 | handle_t *handle = ext4_journal_current_handle(); |
1266 | struct inode *inode = page->mapping->host; | 1266 | struct inode *inode = page->mapping->host; |
1267 | int ret = 0, ret2; | 1267 | int ret = 0, ret2; |
1268 | int partial = 0; | 1268 | int partial = 0; |
@@ -1279,14 +1279,14 @@ static int ext3_journalled_commit_write(struct file *file, | |||
1279 | SetPageUptodate(page); | 1279 | SetPageUptodate(page); |
1280 | if (pos > inode->i_size) | 1280 | if (pos > inode->i_size) |
1281 | i_size_write(inode, pos); | 1281 | i_size_write(inode, pos); |
1282 | EXT3_I(inode)->i_state |= EXT3_STATE_JDATA; | 1282 | EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; |
1283 | if (inode->i_size > EXT3_I(inode)->i_disksize) { | 1283 | if (inode->i_size > EXT4_I(inode)->i_disksize) { |
1284 | EXT3_I(inode)->i_disksize = inode->i_size; | 1284 | EXT4_I(inode)->i_disksize = inode->i_size; |
1285 | ret2 = ext3_mark_inode_dirty(handle, inode); | 1285 | ret2 = ext4_mark_inode_dirty(handle, inode); |
1286 | if (!ret) | 1286 | if (!ret) |
1287 | ret = ret2; | 1287 | ret = ret2; |
1288 | } | 1288 | } |
1289 | ret2 = ext3_journal_stop(handle); | 1289 | ret2 = ext4_journal_stop(handle); |
1290 | if (!ret) | 1290 | if (!ret) |
1291 | ret = ret2; | 1291 | ret = ret2; |
1292 | return ret; | 1292 | return ret; |
@@ -1297,7 +1297,7 @@ static int ext3_journalled_commit_write(struct file *file, | |||
1297 | * the swapper to find the on-disk block of a specific piece of data. | 1297 | * the swapper to find the on-disk block of a specific piece of data. |
1298 | * | 1298 | * |
1299 | * Naturally, this is dangerous if the block concerned is still in the | 1299 | * Naturally, this is dangerous if the block concerned is still in the |
1300 | * journal. If somebody makes a swapfile on an ext3 data-journaling | 1300 | * journal. If somebody makes a swapfile on an ext4 data-journaling |
1301 | * filesystem and enables swap, then they may get a nasty shock when the | 1301 | * filesystem and enables swap, then they may get a nasty shock when the |
1302 | * data getting swapped to that swapfile suddenly gets overwritten by | 1302 | * data getting swapped to that swapfile suddenly gets overwritten by |
1303 | * the original zero's written out previously to the journal and | 1303 | * the original zero's written out previously to the journal and |
@@ -1306,13 +1306,13 @@ static int ext3_journalled_commit_write(struct file *file, | |||
1306 | * So, if we see any bmap calls here on a modified, data-journaled file, | 1306 | * So, if we see any bmap calls here on a modified, data-journaled file, |
1307 | * take extra steps to flush any blocks which might be in the cache. | 1307 | * take extra steps to flush any blocks which might be in the cache. |
1308 | */ | 1308 | */ |
1309 | static sector_t ext3_bmap(struct address_space *mapping, sector_t block) | 1309 | static sector_t ext4_bmap(struct address_space *mapping, sector_t block) |
1310 | { | 1310 | { |
1311 | struct inode *inode = mapping->host; | 1311 | struct inode *inode = mapping->host; |
1312 | journal_t *journal; | 1312 | journal_t *journal; |
1313 | int err; | 1313 | int err; |
1314 | 1314 | ||
1315 | if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) { | 1315 | if (EXT4_I(inode)->i_state & EXT4_STATE_JDATA) { |
1316 | /* | 1316 | /* |
1317 | * This is a REALLY heavyweight approach, but the use of | 1317 | * This is a REALLY heavyweight approach, but the use of |
1318 | * bmap on dirty files is expected to be extremely rare: | 1318 | * bmap on dirty files is expected to be extremely rare: |
@@ -1324,15 +1324,15 @@ static sector_t ext3_bmap(struct address_space *mapping, sector_t block) | |||
1324 | * in trouble if mortal users could trigger this path at | 1324 | * in trouble if mortal users could trigger this path at |
1325 | * will.) | 1325 | * will.) |
1326 | * | 1326 | * |
1327 | * NB. EXT3_STATE_JDATA is not set on files other than | 1327 | * NB. EXT4_STATE_JDATA is not set on files other than |
1328 | * regular files. If somebody wants to bmap a directory | 1328 | * regular files. If somebody wants to bmap a directory |
1329 | * or symlink and gets confused because the buffer | 1329 | * or symlink and gets confused because the buffer |
1330 | * hasn't yet been flushed to disk, they deserve | 1330 | * hasn't yet been flushed to disk, they deserve |
1331 | * everything they get. | 1331 | * everything they get. |
1332 | */ | 1332 | */ |
1333 | 1333 | ||
1334 | EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA; | 1334 | EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA; |
1335 | journal = EXT3_JOURNAL(inode); | 1335 | journal = EXT4_JOURNAL(inode); |
1336 | journal_lock_updates(journal); | 1336 | journal_lock_updates(journal); |
1337 | err = journal_flush(journal); | 1337 | err = journal_flush(journal); |
1338 | journal_unlock_updates(journal); | 1338 | journal_unlock_updates(journal); |
@@ -1341,7 +1341,7 @@ static sector_t ext3_bmap(struct address_space *mapping, sector_t block) | |||
1341 | return 0; | 1341 | return 0; |
1342 | } | 1342 | } |
1343 | 1343 | ||
1344 | return generic_block_bmap(mapping,block,ext3_get_block); | 1344 | return generic_block_bmap(mapping,block,ext4_get_block); |
1345 | } | 1345 | } |
1346 | 1346 | ||
1347 | static int bget_one(handle_t *handle, struct buffer_head *bh) | 1347 | static int bget_one(handle_t *handle, struct buffer_head *bh) |
@@ -1359,14 +1359,14 @@ static int bput_one(handle_t *handle, struct buffer_head *bh) | |||
1359 | static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh) | 1359 | static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh) |
1360 | { | 1360 | { |
1361 | if (buffer_mapped(bh)) | 1361 | if (buffer_mapped(bh)) |
1362 | return ext3_journal_dirty_data(handle, bh); | 1362 | return ext4_journal_dirty_data(handle, bh); |
1363 | return 0; | 1363 | return 0; |
1364 | } | 1364 | } |
1365 | 1365 | ||
1366 | /* | 1366 | /* |
1367 | * Note that we always start a transaction even if we're not journalling | 1367 | * Note that we always start a transaction even if we're not journalling |
1368 | * data. This is to preserve ordering: any hole instantiation within | 1368 | * data. This is to preserve ordering: any hole instantiation within |
1369 | * __block_write_full_page -> ext3_get_block() should be journalled | 1369 | * __block_write_full_page -> ext4_get_block() should be journalled |
1370 | * along with the data so we don't crash and then get metadata which | 1370 | * along with the data so we don't crash and then get metadata which |
1371 | * refers to old data. | 1371 | * refers to old data. |
1372 | * | 1372 | * |
@@ -1374,14 +1374,14 @@ static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh) | |||
1374 | * | 1374 | * |
1375 | * Problem: | 1375 | * Problem: |
1376 | * | 1376 | * |
1377 | * ext3_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> | 1377 | * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> |
1378 | * ext3_writepage() | 1378 | * ext4_writepage() |
1379 | * | 1379 | * |
1380 | * Similar for: | 1380 | * Similar for: |
1381 | * | 1381 | * |
1382 | * ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ... | 1382 | * ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ... |
1383 | * | 1383 | * |
1384 | * Same applies to ext3_get_block(). We will deadlock on various things like | 1384 | * Same applies to ext4_get_block(). We will deadlock on various things like |
1385 | * lock_journal and i_truncate_mutex. | 1385 | * lock_journal and i_truncate_mutex. |
1386 | * | 1386 | * |
1387 | * Setting PF_MEMALLOC here doesn't work - too many internal memory | 1387 | * Setting PF_MEMALLOC here doesn't work - too many internal memory |
@@ -1415,7 +1415,7 @@ static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh) | |||
1415 | * AKPM2: if all the page's buffers are mapped to disk and !data=journal, | 1415 | * AKPM2: if all the page's buffers are mapped to disk and !data=journal, |
1416 | * we don't need to open a transaction here. | 1416 | * we don't need to open a transaction here. |
1417 | */ | 1417 | */ |
1418 | static int ext3_ordered_writepage(struct page *page, | 1418 | static int ext4_ordered_writepage(struct page *page, |
1419 | struct writeback_control *wbc) | 1419 | struct writeback_control *wbc) |
1420 | { | 1420 | { |
1421 | struct inode *inode = page->mapping->host; | 1421 | struct inode *inode = page->mapping->host; |
@@ -1430,10 +1430,10 @@ static int ext3_ordered_writepage(struct page *page, | |||
1430 | * We give up here if we're reentered, because it might be for a | 1430 | * We give up here if we're reentered, because it might be for a |
1431 | * different filesystem. | 1431 | * different filesystem. |
1432 | */ | 1432 | */ |
1433 | if (ext3_journal_current_handle()) | 1433 | if (ext4_journal_current_handle()) |
1434 | goto out_fail; | 1434 | goto out_fail; |
1435 | 1435 | ||
1436 | handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode)); | 1436 | handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); |
1437 | 1437 | ||
1438 | if (IS_ERR(handle)) { | 1438 | if (IS_ERR(handle)) { |
1439 | ret = PTR_ERR(handle); | 1439 | ret = PTR_ERR(handle); |
@@ -1448,7 +1448,7 @@ static int ext3_ordered_writepage(struct page *page, | |||
1448 | walk_page_buffers(handle, page_bufs, 0, | 1448 | walk_page_buffers(handle, page_bufs, 0, |
1449 | PAGE_CACHE_SIZE, NULL, bget_one); | 1449 | PAGE_CACHE_SIZE, NULL, bget_one); |
1450 | 1450 | ||
1451 | ret = block_write_full_page(page, ext3_get_block, wbc); | 1451 | ret = block_write_full_page(page, ext4_get_block, wbc); |
1452 | 1452 | ||
1453 | /* | 1453 | /* |
1454 | * The page can become unlocked at any point now, and | 1454 | * The page can become unlocked at any point now, and |
@@ -1470,7 +1470,7 @@ static int ext3_ordered_writepage(struct page *page, | |||
1470 | } | 1470 | } |
1471 | walk_page_buffers(handle, page_bufs, 0, | 1471 | walk_page_buffers(handle, page_bufs, 0, |
1472 | PAGE_CACHE_SIZE, NULL, bput_one); | 1472 | PAGE_CACHE_SIZE, NULL, bput_one); |
1473 | err = ext3_journal_stop(handle); | 1473 | err = ext4_journal_stop(handle); |
1474 | if (!ret) | 1474 | if (!ret) |
1475 | ret = err; | 1475 | ret = err; |
1476 | return ret; | 1476 | return ret; |
@@ -1481,7 +1481,7 @@ out_fail: | |||
1481 | return ret; | 1481 | return ret; |
1482 | } | 1482 | } |
1483 | 1483 | ||
1484 | static int ext3_writeback_writepage(struct page *page, | 1484 | static int ext4_writeback_writepage(struct page *page, |
1485 | struct writeback_control *wbc) | 1485 | struct writeback_control *wbc) |
1486 | { | 1486 | { |
1487 | struct inode *inode = page->mapping->host; | 1487 | struct inode *inode = page->mapping->host; |
@@ -1489,21 +1489,21 @@ static int ext3_writeback_writepage(struct page *page, | |||
1489 | int ret = 0; | 1489 | int ret = 0; |
1490 | int err; | 1490 | int err; |
1491 | 1491 | ||
1492 | if (ext3_journal_current_handle()) | 1492 | if (ext4_journal_current_handle()) |
1493 | goto out_fail; | 1493 | goto out_fail; |
1494 | 1494 | ||
1495 | handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode)); | 1495 | handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); |
1496 | if (IS_ERR(handle)) { | 1496 | if (IS_ERR(handle)) { |
1497 | ret = PTR_ERR(handle); | 1497 | ret = PTR_ERR(handle); |
1498 | goto out_fail; | 1498 | goto out_fail; |
1499 | } | 1499 | } |
1500 | 1500 | ||
1501 | if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode)) | 1501 | if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) |
1502 | ret = nobh_writepage(page, ext3_get_block, wbc); | 1502 | ret = nobh_writepage(page, ext4_get_block, wbc); |
1503 | else | 1503 | else |
1504 | ret = block_write_full_page(page, ext3_get_block, wbc); | 1504 | ret = block_write_full_page(page, ext4_get_block, wbc); |
1505 | 1505 | ||
1506 | err = ext3_journal_stop(handle); | 1506 | err = ext4_journal_stop(handle); |
1507 | if (!ret) | 1507 | if (!ret) |
1508 | ret = err; | 1508 | ret = err; |
1509 | return ret; | 1509 | return ret; |
@@ -1514,7 +1514,7 @@ out_fail: | |||
1514 | return ret; | 1514 | return ret; |
1515 | } | 1515 | } |
1516 | 1516 | ||
1517 | static int ext3_journalled_writepage(struct page *page, | 1517 | static int ext4_journalled_writepage(struct page *page, |
1518 | struct writeback_control *wbc) | 1518 | struct writeback_control *wbc) |
1519 | { | 1519 | { |
1520 | struct inode *inode = page->mapping->host; | 1520 | struct inode *inode = page->mapping->host; |
@@ -1522,10 +1522,10 @@ static int ext3_journalled_writepage(struct page *page, | |||
1522 | int ret = 0; | 1522 | int ret = 0; |
1523 | int err; | 1523 | int err; |
1524 | 1524 | ||
1525 | if (ext3_journal_current_handle()) | 1525 | if (ext4_journal_current_handle()) |
1526 | goto no_write; | 1526 | goto no_write; |
1527 | 1527 | ||
1528 | handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode)); | 1528 | handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); |
1529 | if (IS_ERR(handle)) { | 1529 | if (IS_ERR(handle)) { |
1530 | ret = PTR_ERR(handle); | 1530 | ret = PTR_ERR(handle); |
1531 | goto no_write; | 1531 | goto no_write; |
@@ -1538,9 +1538,9 @@ static int ext3_journalled_writepage(struct page *page, | |||
1538 | */ | 1538 | */ |
1539 | ClearPageChecked(page); | 1539 | ClearPageChecked(page); |
1540 | ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE, | 1540 | ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE, |
1541 | ext3_get_block); | 1541 | ext4_get_block); |
1542 | if (ret != 0) { | 1542 | if (ret != 0) { |
1543 | ext3_journal_stop(handle); | 1543 | ext4_journal_stop(handle); |
1544 | goto out_unlock; | 1544 | goto out_unlock; |
1545 | } | 1545 | } |
1546 | ret = walk_page_buffers(handle, page_buffers(page), 0, | 1546 | ret = walk_page_buffers(handle, page_buffers(page), 0, |
@@ -1550,7 +1550,7 @@ static int ext3_journalled_writepage(struct page *page, | |||
1550 | PAGE_CACHE_SIZE, NULL, commit_write_fn); | 1550 | PAGE_CACHE_SIZE, NULL, commit_write_fn); |
1551 | if (ret == 0) | 1551 | if (ret == 0) |
1552 | ret = err; | 1552 | ret = err; |
1553 | EXT3_I(inode)->i_state |= EXT3_STATE_JDATA; | 1553 | EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; |
1554 | unlock_page(page); | 1554 | unlock_page(page); |
1555 | } else { | 1555 | } else { |
1556 | /* | 1556 | /* |
@@ -1558,9 +1558,9 @@ static int ext3_journalled_writepage(struct page *page, | |||
1558 | * really know unless we go poke around in the buffer_heads. | 1558 | * really know unless we go poke around in the buffer_heads. |
1559 | * But block_write_full_page will do the right thing. | 1559 | * But block_write_full_page will do the right thing. |
1560 | */ | 1560 | */ |
1561 | ret = block_write_full_page(page, ext3_get_block, wbc); | 1561 | ret = block_write_full_page(page, ext4_get_block, wbc); |
1562 | } | 1562 | } |
1563 | err = ext3_journal_stop(handle); | 1563 | err = ext4_journal_stop(handle); |
1564 | if (!ret) | 1564 | if (!ret) |
1565 | ret = err; | 1565 | ret = err; |
1566 | out: | 1566 | out: |
@@ -1573,21 +1573,21 @@ out_unlock: | |||
1573 | goto out; | 1573 | goto out; |
1574 | } | 1574 | } |
1575 | 1575 | ||
1576 | static int ext3_readpage(struct file *file, struct page *page) | 1576 | static int ext4_readpage(struct file *file, struct page *page) |
1577 | { | 1577 | { |
1578 | return mpage_readpage(page, ext3_get_block); | 1578 | return mpage_readpage(page, ext4_get_block); |
1579 | } | 1579 | } |
1580 | 1580 | ||
1581 | static int | 1581 | static int |
1582 | ext3_readpages(struct file *file, struct address_space *mapping, | 1582 | ext4_readpages(struct file *file, struct address_space *mapping, |
1583 | struct list_head *pages, unsigned nr_pages) | 1583 | struct list_head *pages, unsigned nr_pages) |
1584 | { | 1584 | { |
1585 | return mpage_readpages(mapping, pages, nr_pages, ext3_get_block); | 1585 | return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); |
1586 | } | 1586 | } |
1587 | 1587 | ||
1588 | static void ext3_invalidatepage(struct page *page, unsigned long offset) | 1588 | static void ext4_invalidatepage(struct page *page, unsigned long offset) |
1589 | { | 1589 | { |
1590 | journal_t *journal = EXT3_JOURNAL(page->mapping->host); | 1590 | journal_t *journal = EXT4_JOURNAL(page->mapping->host); |
1591 | 1591 | ||
1592 | /* | 1592 | /* |
1593 | * If it's a full truncate we just forget about the pending dirtying | 1593 | * If it's a full truncate we just forget about the pending dirtying |
@@ -1598,9 +1598,9 @@ static void ext3_invalidatepage(struct page *page, unsigned long offset) | |||
1598 | journal_invalidatepage(journal, page, offset); | 1598 | journal_invalidatepage(journal, page, offset); |
1599 | } | 1599 | } |
1600 | 1600 | ||
1601 | static int ext3_releasepage(struct page *page, gfp_t wait) | 1601 | static int ext4_releasepage(struct page *page, gfp_t wait) |
1602 | { | 1602 | { |
1603 | journal_t *journal = EXT3_JOURNAL(page->mapping->host); | 1603 | journal_t *journal = EXT4_JOURNAL(page->mapping->host); |
1604 | 1604 | ||
1605 | WARN_ON(PageChecked(page)); | 1605 | WARN_ON(PageChecked(page)); |
1606 | if (!page_has_buffers(page)) | 1606 | if (!page_has_buffers(page)) |
@@ -1616,13 +1616,13 @@ static int ext3_releasepage(struct page *page, gfp_t wait) | |||
1616 | * If the O_DIRECT write is intantiating holes inside i_size and the machine | 1616 | * If the O_DIRECT write is intantiating holes inside i_size and the machine |
1617 | * crashes then stale disk data _may_ be exposed inside the file. | 1617 | * crashes then stale disk data _may_ be exposed inside the file. |
1618 | */ | 1618 | */ |
1619 | static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, | 1619 | static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, |
1620 | const struct iovec *iov, loff_t offset, | 1620 | const struct iovec *iov, loff_t offset, |
1621 | unsigned long nr_segs) | 1621 | unsigned long nr_segs) |
1622 | { | 1622 | { |
1623 | struct file *file = iocb->ki_filp; | 1623 | struct file *file = iocb->ki_filp; |
1624 | struct inode *inode = file->f_mapping->host; | 1624 | struct inode *inode = file->f_mapping->host; |
1625 | struct ext3_inode_info *ei = EXT3_I(inode); | 1625 | struct ext4_inode_info *ei = EXT4_I(inode); |
1626 | handle_t *handle = NULL; | 1626 | handle_t *handle = NULL; |
1627 | ssize_t ret; | 1627 | ssize_t ret; |
1628 | int orphan = 0; | 1628 | int orphan = 0; |
@@ -1631,13 +1631,13 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, | |||
1631 | if (rw == WRITE) { | 1631 | if (rw == WRITE) { |
1632 | loff_t final_size = offset + count; | 1632 | loff_t final_size = offset + count; |
1633 | 1633 | ||
1634 | handle = ext3_journal_start(inode, DIO_CREDITS); | 1634 | handle = ext4_journal_start(inode, DIO_CREDITS); |
1635 | if (IS_ERR(handle)) { | 1635 | if (IS_ERR(handle)) { |
1636 | ret = PTR_ERR(handle); | 1636 | ret = PTR_ERR(handle); |
1637 | goto out; | 1637 | goto out; |
1638 | } | 1638 | } |
1639 | if (final_size > inode->i_size) { | 1639 | if (final_size > inode->i_size) { |
1640 | ret = ext3_orphan_add(handle, inode); | 1640 | ret = ext4_orphan_add(handle, inode); |
1641 | if (ret) | 1641 | if (ret) |
1642 | goto out_stop; | 1642 | goto out_stop; |
1643 | orphan = 1; | 1643 | orphan = 1; |
@@ -1647,10 +1647,10 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, | |||
1647 | 1647 | ||
1648 | ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, | 1648 | ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, |
1649 | offset, nr_segs, | 1649 | offset, nr_segs, |
1650 | ext3_get_block, NULL); | 1650 | ext4_get_block, NULL); |
1651 | 1651 | ||
1652 | /* | 1652 | /* |
1653 | * Reacquire the handle: ext3_get_block() can restart the transaction | 1653 | * Reacquire the handle: ext4_get_block() can restart the transaction |
1654 | */ | 1654 | */ |
1655 | handle = journal_current_handle(); | 1655 | handle = journal_current_handle(); |
1656 | 1656 | ||
@@ -1659,7 +1659,7 @@ out_stop: | |||
1659 | int err; | 1659 | int err; |
1660 | 1660 | ||
1661 | if (orphan && inode->i_nlink) | 1661 | if (orphan && inode->i_nlink) |
1662 | ext3_orphan_del(handle, inode); | 1662 | ext4_orphan_del(handle, inode); |
1663 | if (orphan && ret > 0) { | 1663 | if (orphan && ret > 0) { |
1664 | loff_t end = offset + ret; | 1664 | loff_t end = offset + ret; |
1665 | if (end > inode->i_size) { | 1665 | if (end > inode->i_size) { |
@@ -1669,13 +1669,13 @@ out_stop: | |||
1669 | * We're going to return a positive `ret' | 1669 | * We're going to return a positive `ret' |
1670 | * here due to non-zero-length I/O, so there's | 1670 | * here due to non-zero-length I/O, so there's |
1671 | * no way of reporting error returns from | 1671 | * no way of reporting error returns from |
1672 | * ext3_mark_inode_dirty() to userspace. So | 1672 | * ext4_mark_inode_dirty() to userspace. So |
1673 | * ignore it. | 1673 | * ignore it. |
1674 | */ | 1674 | */ |
1675 | ext3_mark_inode_dirty(handle, inode); | 1675 | ext4_mark_inode_dirty(handle, inode); |
1676 | } | 1676 | } |
1677 | } | 1677 | } |
1678 | err = ext3_journal_stop(handle); | 1678 | err = ext4_journal_stop(handle); |
1679 | if (ret == 0) | 1679 | if (ret == 0) |
1680 | ret = err; | 1680 | ret = err; |
1681 | } | 1681 | } |
@@ -1684,7 +1684,7 @@ out: | |||
1684 | } | 1684 | } |
1685 | 1685 | ||
1686 | /* | 1686 | /* |
1687 | * Pages can be marked dirty completely asynchronously from ext3's journalling | 1687 | * Pages can be marked dirty completely asynchronously from ext4's journalling |
1688 | * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do | 1688 | * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do |
1689 | * much here because ->set_page_dirty is called under VFS locks. The page is | 1689 | * much here because ->set_page_dirty is called under VFS locks. The page is |
1690 | * not necessarily locked. | 1690 | * not necessarily locked. |
@@ -1696,73 +1696,73 @@ out: | |||
1696 | * So what we do is to mark the page "pending dirty" and next time writepage | 1696 | * So what we do is to mark the page "pending dirty" and next time writepage |
1697 | * is called, propagate that into the buffers appropriately. | 1697 | * is called, propagate that into the buffers appropriately. |
1698 | */ | 1698 | */ |
1699 | static int ext3_journalled_set_page_dirty(struct page *page) | 1699 | static int ext4_journalled_set_page_dirty(struct page *page) |
1700 | { | 1700 | { |
1701 | SetPageChecked(page); | 1701 | SetPageChecked(page); |
1702 | return __set_page_dirty_nobuffers(page); | 1702 | return __set_page_dirty_nobuffers(page); |
1703 | } | 1703 | } |
1704 | 1704 | ||
1705 | static const struct address_space_operations ext3_ordered_aops = { | 1705 | static const struct address_space_operations ext4_ordered_aops = { |
1706 | .readpage = ext3_readpage, | 1706 | .readpage = ext4_readpage, |
1707 | .readpages = ext3_readpages, | 1707 | .readpages = ext4_readpages, |
1708 | .writepage = ext3_ordered_writepage, | 1708 | .writepage = ext4_ordered_writepage, |
1709 | .sync_page = block_sync_page, | 1709 | .sync_page = block_sync_page, |
1710 | .prepare_write = ext3_prepare_write, | 1710 | .prepare_write = ext4_prepare_write, |
1711 | .commit_write = ext3_ordered_commit_write, | 1711 | .commit_write = ext4_ordered_commit_write, |
1712 | .bmap = ext3_bmap, | 1712 | .bmap = ext4_bmap, |
1713 | .invalidatepage = ext3_invalidatepage, | 1713 | .invalidatepage = ext4_invalidatepage, |
1714 | .releasepage = ext3_releasepage, | 1714 | .releasepage = ext4_releasepage, |
1715 | .direct_IO = ext3_direct_IO, | 1715 | .direct_IO = ext4_direct_IO, |
1716 | .migratepage = buffer_migrate_page, | 1716 | .migratepage = buffer_migrate_page, |
1717 | }; | 1717 | }; |
1718 | 1718 | ||
1719 | static const struct address_space_operations ext3_writeback_aops = { | 1719 | static const struct address_space_operations ext4_writeback_aops = { |
1720 | .readpage = ext3_readpage, | 1720 | .readpage = ext4_readpage, |
1721 | .readpages = ext3_readpages, | 1721 | .readpages = ext4_readpages, |
1722 | .writepage = ext3_writeback_writepage, | 1722 | .writepage = ext4_writeback_writepage, |
1723 | .sync_page = block_sync_page, | 1723 | .sync_page = block_sync_page, |
1724 | .prepare_write = ext3_prepare_write, | 1724 | .prepare_write = ext4_prepare_write, |
1725 | .commit_write = ext3_writeback_commit_write, | 1725 | .commit_write = ext4_writeback_commit_write, |
1726 | .bmap = ext3_bmap, | 1726 | .bmap = ext4_bmap, |
1727 | .invalidatepage = ext3_invalidatepage, | 1727 | .invalidatepage = ext4_invalidatepage, |
1728 | .releasepage = ext3_releasepage, | 1728 | .releasepage = ext4_releasepage, |
1729 | .direct_IO = ext3_direct_IO, | 1729 | .direct_IO = ext4_direct_IO, |
1730 | .migratepage = buffer_migrate_page, | 1730 | .migratepage = buffer_migrate_page, |
1731 | }; | 1731 | }; |
1732 | 1732 | ||
1733 | static const struct address_space_operations ext3_journalled_aops = { | 1733 | static const struct address_space_operations ext4_journalled_aops = { |
1734 | .readpage = ext3_readpage, | 1734 | .readpage = ext4_readpage, |
1735 | .readpages = ext3_readpages, | 1735 | .readpages = ext4_readpages, |
1736 | .writepage = ext3_journalled_writepage, | 1736 | .writepage = ext4_journalled_writepage, |
1737 | .sync_page = block_sync_page, | 1737 | .sync_page = block_sync_page, |
1738 | .prepare_write = ext3_prepare_write, | 1738 | .prepare_write = ext4_prepare_write, |
1739 | .commit_write = ext3_journalled_commit_write, | 1739 | .commit_write = ext4_journalled_commit_write, |
1740 | .set_page_dirty = ext3_journalled_set_page_dirty, | 1740 | .set_page_dirty = ext4_journalled_set_page_dirty, |
1741 | .bmap = ext3_bmap, | 1741 | .bmap = ext4_bmap, |
1742 | .invalidatepage = ext3_invalidatepage, | 1742 | .invalidatepage = ext4_invalidatepage, |
1743 | .releasepage = ext3_releasepage, | 1743 | .releasepage = ext4_releasepage, |
1744 | }; | 1744 | }; |
1745 | 1745 | ||
1746 | void ext3_set_aops(struct inode *inode) | 1746 | void ext4_set_aops(struct inode *inode) |
1747 | { | 1747 | { |
1748 | if (ext3_should_order_data(inode)) | 1748 | if (ext4_should_order_data(inode)) |
1749 | inode->i_mapping->a_ops = &ext3_ordered_aops; | 1749 | inode->i_mapping->a_ops = &ext4_ordered_aops; |
1750 | else if (ext3_should_writeback_data(inode)) | 1750 | else if (ext4_should_writeback_data(inode)) |
1751 | inode->i_mapping->a_ops = &ext3_writeback_aops; | 1751 | inode->i_mapping->a_ops = &ext4_writeback_aops; |
1752 | else | 1752 | else |
1753 | inode->i_mapping->a_ops = &ext3_journalled_aops; | 1753 | inode->i_mapping->a_ops = &ext4_journalled_aops; |
1754 | } | 1754 | } |
1755 | 1755 | ||
1756 | /* | 1756 | /* |
1757 | * ext3_block_truncate_page() zeroes out a mapping from file offset `from' | 1757 | * ext4_block_truncate_page() zeroes out a mapping from file offset `from' |
1758 | * up to the end of the block which corresponds to `from'. | 1758 | * up to the end of the block which corresponds to `from'. |
1759 | * This required during truncate. We need to physically zero the tail end | 1759 | * This required during truncate. We need to physically zero the tail end |
1760 | * of that block so it doesn't yield old data if the file is later grown. | 1760 | * of that block so it doesn't yield old data if the file is later grown. |
1761 | */ | 1761 | */ |
1762 | static int ext3_block_truncate_page(handle_t *handle, struct page *page, | 1762 | static int ext4_block_truncate_page(handle_t *handle, struct page *page, |
1763 | struct address_space *mapping, loff_t from) | 1763 | struct address_space *mapping, loff_t from) |
1764 | { | 1764 | { |
1765 | ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT; | 1765 | ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; |
1766 | unsigned offset = from & (PAGE_CACHE_SIZE-1); | 1766 | unsigned offset = from & (PAGE_CACHE_SIZE-1); |
1767 | unsigned blocksize, iblock, length, pos; | 1767 | unsigned blocksize, iblock, length, pos; |
1768 | struct inode *inode = mapping->host; | 1768 | struct inode *inode = mapping->host; |
@@ -1779,7 +1779,7 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page, | |||
1779 | * read-in the page - otherwise we create buffers to do the IO. | 1779 | * read-in the page - otherwise we create buffers to do the IO. |
1780 | */ | 1780 | */ |
1781 | if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) && | 1781 | if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) && |
1782 | ext3_should_writeback_data(inode) && PageUptodate(page)) { | 1782 | ext4_should_writeback_data(inode) && PageUptodate(page)) { |
1783 | kaddr = kmap_atomic(page, KM_USER0); | 1783 | kaddr = kmap_atomic(page, KM_USER0); |
1784 | memset(kaddr + offset, 0, length); | 1784 | memset(kaddr + offset, 0, length); |
1785 | flush_dcache_page(page); | 1785 | flush_dcache_page(page); |
@@ -1808,7 +1808,7 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page, | |||
1808 | 1808 | ||
1809 | if (!buffer_mapped(bh)) { | 1809 | if (!buffer_mapped(bh)) { |
1810 | BUFFER_TRACE(bh, "unmapped"); | 1810 | BUFFER_TRACE(bh, "unmapped"); |
1811 | ext3_get_block(inode, iblock, bh, 0); | 1811 | ext4_get_block(inode, iblock, bh, 0); |
1812 | /* unmapped? It's a hole - nothing to do */ | 1812 | /* unmapped? It's a hole - nothing to do */ |
1813 | if (!buffer_mapped(bh)) { | 1813 | if (!buffer_mapped(bh)) { |
1814 | BUFFER_TRACE(bh, "still unmapped"); | 1814 | BUFFER_TRACE(bh, "still unmapped"); |
@@ -1829,9 +1829,9 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page, | |||
1829 | goto unlock; | 1829 | goto unlock; |
1830 | } | 1830 | } |
1831 | 1831 | ||
1832 | if (ext3_should_journal_data(inode)) { | 1832 | if (ext4_should_journal_data(inode)) { |
1833 | BUFFER_TRACE(bh, "get write access"); | 1833 | BUFFER_TRACE(bh, "get write access"); |
1834 | err = ext3_journal_get_write_access(handle, bh); | 1834 | err = ext4_journal_get_write_access(handle, bh); |
1835 | if (err) | 1835 | if (err) |
1836 | goto unlock; | 1836 | goto unlock; |
1837 | } | 1837 | } |
@@ -1844,11 +1844,11 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page, | |||
1844 | BUFFER_TRACE(bh, "zeroed end of block"); | 1844 | BUFFER_TRACE(bh, "zeroed end of block"); |
1845 | 1845 | ||
1846 | err = 0; | 1846 | err = 0; |
1847 | if (ext3_should_journal_data(inode)) { | 1847 | if (ext4_should_journal_data(inode)) { |
1848 | err = ext3_journal_dirty_metadata(handle, bh); | 1848 | err = ext4_journal_dirty_metadata(handle, bh); |
1849 | } else { | 1849 | } else { |
1850 | if (ext3_should_order_data(inode)) | 1850 | if (ext4_should_order_data(inode)) |
1851 | err = ext3_journal_dirty_data(handle, bh); | 1851 | err = ext4_journal_dirty_data(handle, bh); |
1852 | mark_buffer_dirty(bh); | 1852 | mark_buffer_dirty(bh); |
1853 | } | 1853 | } |
1854 | 1854 | ||
@@ -1872,14 +1872,14 @@ static inline int all_zeroes(__le32 *p, __le32 *q) | |||
1872 | } | 1872 | } |
1873 | 1873 | ||
1874 | /** | 1874 | /** |
1875 | * ext3_find_shared - find the indirect blocks for partial truncation. | 1875 | * ext4_find_shared - find the indirect blocks for partial truncation. |
1876 | * @inode: inode in question | 1876 | * @inode: inode in question |
1877 | * @depth: depth of the affected branch | 1877 | * @depth: depth of the affected branch |
1878 | * @offsets: offsets of pointers in that branch (see ext3_block_to_path) | 1878 | * @offsets: offsets of pointers in that branch (see ext4_block_to_path) |
1879 | * @chain: place to store the pointers to partial indirect blocks | 1879 | * @chain: place to store the pointers to partial indirect blocks |
1880 | * @top: place to the (detached) top of branch | 1880 | * @top: place to the (detached) top of branch |
1881 | * | 1881 | * |
1882 | * This is a helper function used by ext3_truncate(). | 1882 | * This is a helper function used by ext4_truncate(). |
1883 | * | 1883 | * |
1884 | * When we do truncate() we may have to clean the ends of several | 1884 | * When we do truncate() we may have to clean the ends of several |
1885 | * indirect blocks but leave the blocks themselves alive. Block is | 1885 | * indirect blocks but leave the blocks themselves alive. Block is |
@@ -1887,7 +1887,7 @@ static inline int all_zeroes(__le32 *p, __le32 *q) | |||
1887 | * from it (and it is on the path to the first completely truncated | 1887 | * from it (and it is on the path to the first completely truncated |
1888 | * data block, indeed). We have to free the top of that path along | 1888 | * data block, indeed). We have to free the top of that path along |
1889 | * with everything to the right of the path. Since no allocation | 1889 | * with everything to the right of the path. Since no allocation |
1890 | * past the truncation point is possible until ext3_truncate() | 1890 | * past the truncation point is possible until ext4_truncate() |
1891 | * finishes, we may safely do the latter, but top of branch may | 1891 | * finishes, we may safely do the latter, but top of branch may |
1892 | * require special attention - pageout below the truncation point | 1892 | * require special attention - pageout below the truncation point |
1893 | * might try to populate it. | 1893 | * might try to populate it. |
@@ -1906,7 +1906,7 @@ static inline int all_zeroes(__le32 *p, __le32 *q) | |||
1906 | * c) free the subtrees growing from the inode past the @chain[0]. | 1906 | * c) free the subtrees growing from the inode past the @chain[0]. |
1907 | * (no partially truncated stuff there). */ | 1907 | * (no partially truncated stuff there). */ |
1908 | 1908 | ||
1909 | static Indirect *ext3_find_shared(struct inode *inode, int depth, | 1909 | static Indirect *ext4_find_shared(struct inode *inode, int depth, |
1910 | int offsets[4], Indirect chain[4], __le32 *top) | 1910 | int offsets[4], Indirect chain[4], __le32 *top) |
1911 | { | 1911 | { |
1912 | Indirect *partial, *p; | 1912 | Indirect *partial, *p; |
@@ -1916,7 +1916,7 @@ static Indirect *ext3_find_shared(struct inode *inode, int depth, | |||
1916 | /* Make k index the deepest non-null offest + 1 */ | 1916 | /* Make k index the deepest non-null offest + 1 */ |
1917 | for (k = depth; k > 1 && !offsets[k-1]; k--) | 1917 | for (k = depth; k > 1 && !offsets[k-1]; k--) |
1918 | ; | 1918 | ; |
1919 | partial = ext3_get_branch(inode, k, offsets, chain, &err); | 1919 | partial = ext4_get_branch(inode, k, offsets, chain, &err); |
1920 | /* Writer: pointers */ | 1920 | /* Writer: pointers */ |
1921 | if (!partial) | 1921 | if (!partial) |
1922 | partial = chain + k-1; | 1922 | partial = chain + k-1; |
@@ -1939,7 +1939,7 @@ static Indirect *ext3_find_shared(struct inode *inode, int depth, | |||
1939 | p->p--; | 1939 | p->p--; |
1940 | } else { | 1940 | } else { |
1941 | *top = *p->p; | 1941 | *top = *p->p; |
1942 | /* Nope, don't do this in ext3. Must leave the tree intact */ | 1942 | /* Nope, don't do this in ext4. Must leave the tree intact */ |
1943 | #if 0 | 1943 | #if 0 |
1944 | *p->p = 0; | 1944 | *p->p = 0; |
1945 | #endif | 1945 | #endif |
@@ -1962,21 +1962,21 @@ no_top: | |||
1962 | * We release `count' blocks on disk, but (last - first) may be greater | 1962 | * We release `count' blocks on disk, but (last - first) may be greater |
1963 | * than `count' because there can be holes in there. | 1963 | * than `count' because there can be holes in there. |
1964 | */ | 1964 | */ |
1965 | static void ext3_clear_blocks(handle_t *handle, struct inode *inode, | 1965 | static void ext4_clear_blocks(handle_t *handle, struct inode *inode, |
1966 | struct buffer_head *bh, ext3_fsblk_t block_to_free, | 1966 | struct buffer_head *bh, ext4_fsblk_t block_to_free, |
1967 | unsigned long count, __le32 *first, __le32 *last) | 1967 | unsigned long count, __le32 *first, __le32 *last) |
1968 | { | 1968 | { |
1969 | __le32 *p; | 1969 | __le32 *p; |
1970 | if (try_to_extend_transaction(handle, inode)) { | 1970 | if (try_to_extend_transaction(handle, inode)) { |
1971 | if (bh) { | 1971 | if (bh) { |
1972 | BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); | 1972 | BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); |
1973 | ext3_journal_dirty_metadata(handle, bh); | 1973 | ext4_journal_dirty_metadata(handle, bh); |
1974 | } | 1974 | } |
1975 | ext3_mark_inode_dirty(handle, inode); | 1975 | ext4_mark_inode_dirty(handle, inode); |
1976 | ext3_journal_test_restart(handle, inode); | 1976 | ext4_journal_test_restart(handle, inode); |
1977 | if (bh) { | 1977 | if (bh) { |
1978 | BUFFER_TRACE(bh, "retaking write access"); | 1978 | BUFFER_TRACE(bh, "retaking write access"); |
1979 | ext3_journal_get_write_access(handle, bh); | 1979 | ext4_journal_get_write_access(handle, bh); |
1980 | } | 1980 | } |
1981 | } | 1981 | } |
1982 | 1982 | ||
@@ -1995,15 +1995,15 @@ static void ext3_clear_blocks(handle_t *handle, struct inode *inode, | |||
1995 | 1995 | ||
1996 | *p = 0; | 1996 | *p = 0; |
1997 | bh = sb_find_get_block(inode->i_sb, nr); | 1997 | bh = sb_find_get_block(inode->i_sb, nr); |
1998 | ext3_forget(handle, 0, inode, bh, nr); | 1998 | ext4_forget(handle, 0, inode, bh, nr); |
1999 | } | 1999 | } |
2000 | } | 2000 | } |
2001 | 2001 | ||
2002 | ext3_free_blocks(handle, inode, block_to_free, count); | 2002 | ext4_free_blocks(handle, inode, block_to_free, count); |
2003 | } | 2003 | } |
2004 | 2004 | ||
2005 | /** | 2005 | /** |
2006 | * ext3_free_data - free a list of data blocks | 2006 | * ext4_free_data - free a list of data blocks |
2007 | * @handle: handle for this transaction | 2007 | * @handle: handle for this transaction |
2008 | * @inode: inode we are dealing with | 2008 | * @inode: inode we are dealing with |
2009 | * @this_bh: indirect buffer_head which contains *@first and *@last | 2009 | * @this_bh: indirect buffer_head which contains *@first and *@last |
@@ -2021,23 +2021,23 @@ static void ext3_clear_blocks(handle_t *handle, struct inode *inode, | |||
2021 | * @this_bh will be %NULL if @first and @last point into the inode's direct | 2021 | * @this_bh will be %NULL if @first and @last point into the inode's direct |
2022 | * block pointers. | 2022 | * block pointers. |
2023 | */ | 2023 | */ |
2024 | static void ext3_free_data(handle_t *handle, struct inode *inode, | 2024 | static void ext4_free_data(handle_t *handle, struct inode *inode, |
2025 | struct buffer_head *this_bh, | 2025 | struct buffer_head *this_bh, |
2026 | __le32 *first, __le32 *last) | 2026 | __le32 *first, __le32 *last) |
2027 | { | 2027 | { |
2028 | ext3_fsblk_t block_to_free = 0; /* Starting block # of a run */ | 2028 | ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */ |
2029 | unsigned long count = 0; /* Number of blocks in the run */ | 2029 | unsigned long count = 0; /* Number of blocks in the run */ |
2030 | __le32 *block_to_free_p = NULL; /* Pointer into inode/ind | 2030 | __le32 *block_to_free_p = NULL; /* Pointer into inode/ind |
2031 | corresponding to | 2031 | corresponding to |
2032 | block_to_free */ | 2032 | block_to_free */ |
2033 | ext3_fsblk_t nr; /* Current block # */ | 2033 | ext4_fsblk_t nr; /* Current block # */ |
2034 | __le32 *p; /* Pointer into inode/ind | 2034 | __le32 *p; /* Pointer into inode/ind |
2035 | for current block */ | 2035 | for current block */ |
2036 | int err; | 2036 | int err; |
2037 | 2037 | ||
2038 | if (this_bh) { /* For indirect block */ | 2038 | if (this_bh) { /* For indirect block */ |
2039 | BUFFER_TRACE(this_bh, "get_write_access"); | 2039 | BUFFER_TRACE(this_bh, "get_write_access"); |
2040 | err = ext3_journal_get_write_access(handle, this_bh); | 2040 | err = ext4_journal_get_write_access(handle, this_bh); |
2041 | /* Important: if we can't update the indirect pointers | 2041 | /* Important: if we can't update the indirect pointers |
2042 | * to the blocks, we can't free them. */ | 2042 | * to the blocks, we can't free them. */ |
2043 | if (err) | 2043 | if (err) |
@@ -2055,7 +2055,7 @@ static void ext3_free_data(handle_t *handle, struct inode *inode, | |||
2055 | } else if (nr == block_to_free + count) { | 2055 | } else if (nr == block_to_free + count) { |
2056 | count++; | 2056 | count++; |
2057 | } else { | 2057 | } else { |
2058 | ext3_clear_blocks(handle, inode, this_bh, | 2058 | ext4_clear_blocks(handle, inode, this_bh, |
2059 | block_to_free, | 2059 | block_to_free, |
2060 | count, block_to_free_p, p); | 2060 | count, block_to_free_p, p); |
2061 | block_to_free = nr; | 2061 | block_to_free = nr; |
@@ -2066,17 +2066,17 @@ static void ext3_free_data(handle_t *handle, struct inode *inode, | |||
2066 | } | 2066 | } |
2067 | 2067 | ||
2068 | if (count > 0) | 2068 | if (count > 0) |
2069 | ext3_clear_blocks(handle, inode, this_bh, block_to_free, | 2069 | ext4_clear_blocks(handle, inode, this_bh, block_to_free, |
2070 | count, block_to_free_p, p); | 2070 | count, block_to_free_p, p); |
2071 | 2071 | ||
2072 | if (this_bh) { | 2072 | if (this_bh) { |
2073 | BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata"); | 2073 | BUFFER_TRACE(this_bh, "call ext4_journal_dirty_metadata"); |
2074 | ext3_journal_dirty_metadata(handle, this_bh); | 2074 | ext4_journal_dirty_metadata(handle, this_bh); |
2075 | } | 2075 | } |
2076 | } | 2076 | } |
2077 | 2077 | ||
2078 | /** | 2078 | /** |
2079 | * ext3_free_branches - free an array of branches | 2079 | * ext4_free_branches - free an array of branches |
2080 | * @handle: JBD handle for this transaction | 2080 | * @handle: JBD handle for this transaction |
2081 | * @inode: inode we are dealing with | 2081 | * @inode: inode we are dealing with |
2082 | * @parent_bh: the buffer_head which contains *@first and *@last | 2082 | * @parent_bh: the buffer_head which contains *@first and *@last |
@@ -2088,11 +2088,11 @@ static void ext3_free_data(handle_t *handle, struct inode *inode, | |||
2088 | * stored as little-endian 32-bit) and updating @inode->i_blocks | 2088 | * stored as little-endian 32-bit) and updating @inode->i_blocks |
2089 | * appropriately. | 2089 | * appropriately. |
2090 | */ | 2090 | */ |
2091 | static void ext3_free_branches(handle_t *handle, struct inode *inode, | 2091 | static void ext4_free_branches(handle_t *handle, struct inode *inode, |
2092 | struct buffer_head *parent_bh, | 2092 | struct buffer_head *parent_bh, |
2093 | __le32 *first, __le32 *last, int depth) | 2093 | __le32 *first, __le32 *last, int depth) |
2094 | { | 2094 | { |
2095 | ext3_fsblk_t nr; | 2095 | ext4_fsblk_t nr; |
2096 | __le32 *p; | 2096 | __le32 *p; |
2097 | 2097 | ||
2098 | if (is_handle_aborted(handle)) | 2098 | if (is_handle_aborted(handle)) |
@@ -2100,7 +2100,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode, | |||
2100 | 2100 | ||
2101 | if (depth--) { | 2101 | if (depth--) { |
2102 | struct buffer_head *bh; | 2102 | struct buffer_head *bh; |
2103 | int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb); | 2103 | int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); |
2104 | p = last; | 2104 | p = last; |
2105 | while (--p >= first) { | 2105 | while (--p >= first) { |
2106 | nr = le32_to_cpu(*p); | 2106 | nr = le32_to_cpu(*p); |
@@ -2115,7 +2115,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode, | |||
2115 | * (should be rare). | 2115 | * (should be rare). |
2116 | */ | 2116 | */ |
2117 | if (!bh) { | 2117 | if (!bh) { |
2118 | ext3_error(inode->i_sb, "ext3_free_branches", | 2118 | ext4_error(inode->i_sb, "ext4_free_branches", |
2119 | "Read failure, inode=%lu, block="E3FSBLK, | 2119 | "Read failure, inode=%lu, block="E3FSBLK, |
2120 | inode->i_ino, nr); | 2120 | inode->i_ino, nr); |
2121 | continue; | 2121 | continue; |
@@ -2123,7 +2123,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode, | |||
2123 | 2123 | ||
2124 | /* This zaps the entire block. Bottom up. */ | 2124 | /* This zaps the entire block. Bottom up. */ |
2125 | BUFFER_TRACE(bh, "free child branches"); | 2125 | BUFFER_TRACE(bh, "free child branches"); |
2126 | ext3_free_branches(handle, inode, bh, | 2126 | ext4_free_branches(handle, inode, bh, |
2127 | (__le32*)bh->b_data, | 2127 | (__le32*)bh->b_data, |
2128 | (__le32*)bh->b_data + addr_per_block, | 2128 | (__le32*)bh->b_data + addr_per_block, |
2129 | depth); | 2129 | depth); |
@@ -2138,7 +2138,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode, | |||
2138 | * transaction. But if it's part of the committing | 2138 | * transaction. But if it's part of the committing |
2139 | * transaction then journal_forget() will simply | 2139 | * transaction then journal_forget() will simply |
2140 | * brelse() it. That means that if the underlying | 2140 | * brelse() it. That means that if the underlying |
2141 | * block is reallocated in ext3_get_block(), | 2141 | * block is reallocated in ext4_get_block(), |
2142 | * unmap_underlying_metadata() will find this block | 2142 | * unmap_underlying_metadata() will find this block |
2143 | * and will try to get rid of it. damn, damn. | 2143 | * and will try to get rid of it. damn, damn. |
2144 | * | 2144 | * |
@@ -2147,7 +2147,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode, | |||
2147 | * revoke records must be emitted *before* clearing | 2147 | * revoke records must be emitted *before* clearing |
2148 | * this block's bit in the bitmaps. | 2148 | * this block's bit in the bitmaps. |
2149 | */ | 2149 | */ |
2150 | ext3_forget(handle, 1, inode, bh, bh->b_blocknr); | 2150 | ext4_forget(handle, 1, inode, bh, bh->b_blocknr); |
2151 | 2151 | ||
2152 | /* | 2152 | /* |
2153 | * Everything below this this pointer has been | 2153 | * Everything below this this pointer has been |
@@ -2168,11 +2168,11 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode, | |||
2168 | if (is_handle_aborted(handle)) | 2168 | if (is_handle_aborted(handle)) |
2169 | return; | 2169 | return; |
2170 | if (try_to_extend_transaction(handle, inode)) { | 2170 | if (try_to_extend_transaction(handle, inode)) { |
2171 | ext3_mark_inode_dirty(handle, inode); | 2171 | ext4_mark_inode_dirty(handle, inode); |
2172 | ext3_journal_test_restart(handle, inode); | 2172 | ext4_journal_test_restart(handle, inode); |
2173 | } | 2173 | } |
2174 | 2174 | ||
2175 | ext3_free_blocks(handle, inode, nr, 1); | 2175 | ext4_free_blocks(handle, inode, nr, 1); |
2176 | 2176 | ||
2177 | if (parent_bh) { | 2177 | if (parent_bh) { |
2178 | /* | 2178 | /* |
@@ -2180,12 +2180,12 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode, | |||
2180 | * pointed to by an indirect block: journal it | 2180 | * pointed to by an indirect block: journal it |
2181 | */ | 2181 | */ |
2182 | BUFFER_TRACE(parent_bh, "get_write_access"); | 2182 | BUFFER_TRACE(parent_bh, "get_write_access"); |
2183 | if (!ext3_journal_get_write_access(handle, | 2183 | if (!ext4_journal_get_write_access(handle, |
2184 | parent_bh)){ | 2184 | parent_bh)){ |
2185 | *p = 0; | 2185 | *p = 0; |
2186 | BUFFER_TRACE(parent_bh, | 2186 | BUFFER_TRACE(parent_bh, |
2187 | "call ext3_journal_dirty_metadata"); | 2187 | "call ext4_journal_dirty_metadata"); |
2188 | ext3_journal_dirty_metadata(handle, | 2188 | ext4_journal_dirty_metadata(handle, |
2189 | parent_bh); | 2189 | parent_bh); |
2190 | } | 2190 | } |
2191 | } | 2191 | } |
@@ -2193,15 +2193,15 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode, | |||
2193 | } else { | 2193 | } else { |
2194 | /* We have reached the bottom of the tree. */ | 2194 | /* We have reached the bottom of the tree. */ |
2195 | BUFFER_TRACE(parent_bh, "free data blocks"); | 2195 | BUFFER_TRACE(parent_bh, "free data blocks"); |
2196 | ext3_free_data(handle, inode, parent_bh, first, last); | 2196 | ext4_free_data(handle, inode, parent_bh, first, last); |
2197 | } | 2197 | } |
2198 | } | 2198 | } |
2199 | 2199 | ||
2200 | /* | 2200 | /* |
2201 | * ext3_truncate() | 2201 | * ext4_truncate() |
2202 | * | 2202 | * |
2203 | * We block out ext3_get_block() block instantiations across the entire | 2203 | * We block out ext4_get_block() block instantiations across the entire |
2204 | * transaction, and VFS/VM ensures that ext3_truncate() cannot run | 2204 | * transaction, and VFS/VM ensures that ext4_truncate() cannot run |
2205 | * simultaneously on behalf of the same inode. | 2205 | * simultaneously on behalf of the same inode. |
2206 | * | 2206 | * |
2207 | * As we work through the truncate and commmit bits of it to the journal there | 2207 | * As we work through the truncate and commmit bits of it to the journal there |
@@ -2218,19 +2218,19 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode, | |||
2218 | * truncate against the orphan inode list. | 2218 | * truncate against the orphan inode list. |
2219 | * | 2219 | * |
2220 | * The committed inode has the new, desired i_size (which is the same as | 2220 | * The committed inode has the new, desired i_size (which is the same as |
2221 | * i_disksize in this case). After a crash, ext3_orphan_cleanup() will see | 2221 | * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see |
2222 | * that this inode's truncate did not complete and it will again call | 2222 | * that this inode's truncate did not complete and it will again call |
2223 | * ext3_truncate() to have another go. So there will be instantiated blocks | 2223 | * ext4_truncate() to have another go. So there will be instantiated blocks |
2224 | * to the right of the truncation point in a crashed ext3 filesystem. But | 2224 | * to the right of the truncation point in a crashed ext4 filesystem. But |
2225 | * that's fine - as long as they are linked from the inode, the post-crash | 2225 | * that's fine - as long as they are linked from the inode, the post-crash |
2226 | * ext3_truncate() run will find them and release them. | 2226 | * ext4_truncate() run will find them and release them. |
2227 | */ | 2227 | */ |
2228 | void ext3_truncate(struct inode *inode) | 2228 | void ext4_truncate(struct inode *inode) |
2229 | { | 2229 | { |
2230 | handle_t *handle; | 2230 | handle_t *handle; |
2231 | struct ext3_inode_info *ei = EXT3_I(inode); | 2231 | struct ext4_inode_info *ei = EXT4_I(inode); |
2232 | __le32 *i_data = ei->i_data; | 2232 | __le32 *i_data = ei->i_data; |
2233 | int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb); | 2233 | int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); |
2234 | struct address_space *mapping = inode->i_mapping; | 2234 | struct address_space *mapping = inode->i_mapping; |
2235 | int offsets[4]; | 2235 | int offsets[4]; |
2236 | Indirect chain[4]; | 2236 | Indirect chain[4]; |
@@ -2244,7 +2244,7 @@ void ext3_truncate(struct inode *inode) | |||
2244 | if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || | 2244 | if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || |
2245 | S_ISLNK(inode->i_mode))) | 2245 | S_ISLNK(inode->i_mode))) |
2246 | return; | 2246 | return; |
2247 | if (ext3_inode_is_fast_symlink(inode)) | 2247 | if (ext4_inode_is_fast_symlink(inode)) |
2248 | return; | 2248 | return; |
2249 | if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) | 2249 | if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) |
2250 | return; | 2250 | return; |
@@ -2275,12 +2275,12 @@ void ext3_truncate(struct inode *inode) | |||
2275 | } | 2275 | } |
2276 | 2276 | ||
2277 | last_block = (inode->i_size + blocksize-1) | 2277 | last_block = (inode->i_size + blocksize-1) |
2278 | >> EXT3_BLOCK_SIZE_BITS(inode->i_sb); | 2278 | >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); |
2279 | 2279 | ||
2280 | if (page) | 2280 | if (page) |
2281 | ext3_block_truncate_page(handle, page, mapping, inode->i_size); | 2281 | ext4_block_truncate_page(handle, page, mapping, inode->i_size); |
2282 | 2282 | ||
2283 | n = ext3_block_to_path(inode, last_block, offsets, NULL); | 2283 | n = ext4_block_to_path(inode, last_block, offsets, NULL); |
2284 | if (n == 0) | 2284 | if (n == 0) |
2285 | goto out_stop; /* error */ | 2285 | goto out_stop; /* error */ |
2286 | 2286 | ||
@@ -2293,7 +2293,7 @@ void ext3_truncate(struct inode *inode) | |||
2293 | * Implication: the file must always be in a sane, consistent | 2293 | * Implication: the file must always be in a sane, consistent |
2294 | * truncatable state while each transaction commits. | 2294 | * truncatable state while each transaction commits. |
2295 | */ | 2295 | */ |
2296 | if (ext3_orphan_add(handle, inode)) | 2296 | if (ext4_orphan_add(handle, inode)) |
2297 | goto out_stop; | 2297 | goto out_stop; |
2298 | 2298 | ||
2299 | /* | 2299 | /* |
@@ -2301,28 +2301,28 @@ void ext3_truncate(struct inode *inode) | |||
2301 | * occurs before the truncate completes, so it is now safe to propagate | 2301 | * occurs before the truncate completes, so it is now safe to propagate |
2302 | * the new, shorter inode size (held for now in i_size) into the | 2302 | * the new, shorter inode size (held for now in i_size) into the |
2303 | * on-disk inode. We do this via i_disksize, which is the value which | 2303 | * on-disk inode. We do this via i_disksize, which is the value which |
2304 | * ext3 *really* writes onto the disk inode. | 2304 | * ext4 *really* writes onto the disk inode. |
2305 | */ | 2305 | */ |
2306 | ei->i_disksize = inode->i_size; | 2306 | ei->i_disksize = inode->i_size; |
2307 | 2307 | ||
2308 | /* | 2308 | /* |
2309 | * From here we block out all ext3_get_block() callers who want to | 2309 | * From here we block out all ext4_get_block() callers who want to |
2310 | * modify the block allocation tree. | 2310 | * modify the block allocation tree. |
2311 | */ | 2311 | */ |
2312 | mutex_lock(&ei->truncate_mutex); | 2312 | mutex_lock(&ei->truncate_mutex); |
2313 | 2313 | ||
2314 | if (n == 1) { /* direct blocks */ | 2314 | if (n == 1) { /* direct blocks */ |
2315 | ext3_free_data(handle, inode, NULL, i_data+offsets[0], | 2315 | ext4_free_data(handle, inode, NULL, i_data+offsets[0], |
2316 | i_data + EXT3_NDIR_BLOCKS); | 2316 | i_data + EXT4_NDIR_BLOCKS); |
2317 | goto do_indirects; | 2317 | goto do_indirects; |
2318 | } | 2318 | } |
2319 | 2319 | ||
2320 | partial = ext3_find_shared(inode, n, offsets, chain, &nr); | 2320 | partial = ext4_find_shared(inode, n, offsets, chain, &nr); |
2321 | /* Kill the top of shared branch (not detached) */ | 2321 | /* Kill the top of shared branch (not detached) */ |
2322 | if (nr) { | 2322 | if (nr) { |
2323 | if (partial == chain) { | 2323 | if (partial == chain) { |
2324 | /* Shared branch grows from the inode */ | 2324 | /* Shared branch grows from the inode */ |
2325 | ext3_free_branches(handle, inode, NULL, | 2325 | ext4_free_branches(handle, inode, NULL, |
2326 | &nr, &nr+1, (chain+n-1) - partial); | 2326 | &nr, &nr+1, (chain+n-1) - partial); |
2327 | *partial->p = 0; | 2327 | *partial->p = 0; |
2328 | /* | 2328 | /* |
@@ -2332,14 +2332,14 @@ void ext3_truncate(struct inode *inode) | |||
2332 | } else { | 2332 | } else { |
2333 | /* Shared branch grows from an indirect block */ | 2333 | /* Shared branch grows from an indirect block */ |
2334 | BUFFER_TRACE(partial->bh, "get_write_access"); | 2334 | BUFFER_TRACE(partial->bh, "get_write_access"); |
2335 | ext3_free_branches(handle, inode, partial->bh, | 2335 | ext4_free_branches(handle, inode, partial->bh, |
2336 | partial->p, | 2336 | partial->p, |
2337 | partial->p+1, (chain+n-1) - partial); | 2337 | partial->p+1, (chain+n-1) - partial); |
2338 | } | 2338 | } |
2339 | } | 2339 | } |
2340 | /* Clear the ends of indirect blocks on the shared branch */ | 2340 | /* Clear the ends of indirect blocks on the shared branch */ |
2341 | while (partial > chain) { | 2341 | while (partial > chain) { |
2342 | ext3_free_branches(handle, inode, partial->bh, partial->p + 1, | 2342 | ext4_free_branches(handle, inode, partial->bh, partial->p + 1, |
2343 | (__le32*)partial->bh->b_data+addr_per_block, | 2343 | (__le32*)partial->bh->b_data+addr_per_block, |
2344 | (chain+n-1) - partial); | 2344 | (chain+n-1) - partial); |
2345 | BUFFER_TRACE(partial->bh, "call brelse"); | 2345 | BUFFER_TRACE(partial->bh, "call brelse"); |
@@ -2350,32 +2350,32 @@ do_indirects: | |||
2350 | /* Kill the remaining (whole) subtrees */ | 2350 | /* Kill the remaining (whole) subtrees */ |
2351 | switch (offsets[0]) { | 2351 | switch (offsets[0]) { |
2352 | default: | 2352 | default: |
2353 | nr = i_data[EXT3_IND_BLOCK]; | 2353 | nr = i_data[EXT4_IND_BLOCK]; |
2354 | if (nr) { | 2354 | if (nr) { |
2355 | ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1); | 2355 | ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); |
2356 | i_data[EXT3_IND_BLOCK] = 0; | 2356 | i_data[EXT4_IND_BLOCK] = 0; |
2357 | } | 2357 | } |
2358 | case EXT3_IND_BLOCK: | 2358 | case EXT4_IND_BLOCK: |
2359 | nr = i_data[EXT3_DIND_BLOCK]; | 2359 | nr = i_data[EXT4_DIND_BLOCK]; |
2360 | if (nr) { | 2360 | if (nr) { |
2361 | ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2); | 2361 | ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); |
2362 | i_data[EXT3_DIND_BLOCK] = 0; | 2362 | i_data[EXT4_DIND_BLOCK] = 0; |
2363 | } | 2363 | } |
2364 | case EXT3_DIND_BLOCK: | 2364 | case EXT4_DIND_BLOCK: |
2365 | nr = i_data[EXT3_TIND_BLOCK]; | 2365 | nr = i_data[EXT4_TIND_BLOCK]; |
2366 | if (nr) { | 2366 | if (nr) { |
2367 | ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3); | 2367 | ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); |
2368 | i_data[EXT3_TIND_BLOCK] = 0; | 2368 | i_data[EXT4_TIND_BLOCK] = 0; |
2369 | } | 2369 | } |
2370 | case EXT3_TIND_BLOCK: | 2370 | case EXT4_TIND_BLOCK: |
2371 | ; | 2371 | ; |
2372 | } | 2372 | } |
2373 | 2373 | ||
2374 | ext3_discard_reservation(inode); | 2374 | ext4_discard_reservation(inode); |
2375 | 2375 | ||
2376 | mutex_unlock(&ei->truncate_mutex); | 2376 | mutex_unlock(&ei->truncate_mutex); |
2377 | inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; | 2377 | inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; |
2378 | ext3_mark_inode_dirty(handle, inode); | 2378 | ext4_mark_inode_dirty(handle, inode); |
2379 | 2379 | ||
2380 | /* | 2380 | /* |
2381 | * In a multi-transaction truncate, we only make the final transaction | 2381 | * In a multi-transaction truncate, we only make the final transaction |
@@ -2388,25 +2388,25 @@ out_stop: | |||
2388 | * If this was a simple ftruncate(), and the file will remain alive | 2388 | * If this was a simple ftruncate(), and the file will remain alive |
2389 | * then we need to clear up the orphan record which we created above. | 2389 | * then we need to clear up the orphan record which we created above. |
2390 | * However, if this was a real unlink then we were called by | 2390 | * However, if this was a real unlink then we were called by |
2391 | * ext3_delete_inode(), and we allow that function to clean up the | 2391 | * ext4_delete_inode(), and we allow that function to clean up the |
2392 | * orphan info for us. | 2392 | * orphan info for us. |
2393 | */ | 2393 | */ |
2394 | if (inode->i_nlink) | 2394 | if (inode->i_nlink) |
2395 | ext3_orphan_del(handle, inode); | 2395 | ext4_orphan_del(handle, inode); |
2396 | 2396 | ||
2397 | ext3_journal_stop(handle); | 2397 | ext4_journal_stop(handle); |
2398 | } | 2398 | } |
2399 | 2399 | ||
2400 | static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb, | 2400 | static ext4_fsblk_t ext4_get_inode_block(struct super_block *sb, |
2401 | unsigned long ino, struct ext3_iloc *iloc) | 2401 | unsigned long ino, struct ext4_iloc *iloc) |
2402 | { | 2402 | { |
2403 | unsigned long desc, group_desc, block_group; | 2403 | unsigned long desc, group_desc, block_group; |
2404 | unsigned long offset; | 2404 | unsigned long offset; |
2405 | ext3_fsblk_t block; | 2405 | ext4_fsblk_t block; |
2406 | struct buffer_head *bh; | 2406 | struct buffer_head *bh; |
2407 | struct ext3_group_desc * gdp; | 2407 | struct ext4_group_desc * gdp; |
2408 | 2408 | ||
2409 | if (!ext3_valid_inum(sb, ino)) { | 2409 | if (!ext4_valid_inum(sb, ino)) { |
2410 | /* | 2410 | /* |
2411 | * This error is already checked for in namei.c unless we are | 2411 | * This error is already checked for in namei.c unless we are |
2412 | * looking at an NFS filehandle, in which case no error | 2412 | * looking at an NFS filehandle, in which case no error |
@@ -2415,54 +2415,54 @@ static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb, | |||
2415 | return 0; | 2415 | return 0; |
2416 | } | 2416 | } |
2417 | 2417 | ||
2418 | block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb); | 2418 | block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); |
2419 | if (block_group >= EXT3_SB(sb)->s_groups_count) { | 2419 | if (block_group >= EXT4_SB(sb)->s_groups_count) { |
2420 | ext3_error(sb,"ext3_get_inode_block","group >= groups count"); | 2420 | ext4_error(sb,"ext4_get_inode_block","group >= groups count"); |
2421 | return 0; | 2421 | return 0; |
2422 | } | 2422 | } |
2423 | smp_rmb(); | 2423 | smp_rmb(); |
2424 | group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb); | 2424 | group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb); |
2425 | desc = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1); | 2425 | desc = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1); |
2426 | bh = EXT3_SB(sb)->s_group_desc[group_desc]; | 2426 | bh = EXT4_SB(sb)->s_group_desc[group_desc]; |
2427 | if (!bh) { | 2427 | if (!bh) { |
2428 | ext3_error (sb, "ext3_get_inode_block", | 2428 | ext4_error (sb, "ext4_get_inode_block", |
2429 | "Descriptor not loaded"); | 2429 | "Descriptor not loaded"); |
2430 | return 0; | 2430 | return 0; |
2431 | } | 2431 | } |
2432 | 2432 | ||
2433 | gdp = (struct ext3_group_desc *)bh->b_data; | 2433 | gdp = (struct ext4_group_desc *)bh->b_data; |
2434 | /* | 2434 | /* |
2435 | * Figure out the offset within the block group inode table | 2435 | * Figure out the offset within the block group inode table |
2436 | */ | 2436 | */ |
2437 | offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) * | 2437 | offset = ((ino - 1) % EXT4_INODES_PER_GROUP(sb)) * |
2438 | EXT3_INODE_SIZE(sb); | 2438 | EXT4_INODE_SIZE(sb); |
2439 | block = le32_to_cpu(gdp[desc].bg_inode_table) + | 2439 | block = le32_to_cpu(gdp[desc].bg_inode_table) + |
2440 | (offset >> EXT3_BLOCK_SIZE_BITS(sb)); | 2440 | (offset >> EXT4_BLOCK_SIZE_BITS(sb)); |
2441 | 2441 | ||
2442 | iloc->block_group = block_group; | 2442 | iloc->block_group = block_group; |
2443 | iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1); | 2443 | iloc->offset = offset & (EXT4_BLOCK_SIZE(sb) - 1); |
2444 | return block; | 2444 | return block; |
2445 | } | 2445 | } |
2446 | 2446 | ||
2447 | /* | 2447 | /* |
2448 | * ext3_get_inode_loc returns with an extra refcount against the inode's | 2448 | * ext4_get_inode_loc returns with an extra refcount against the inode's |
2449 | * underlying buffer_head on success. If 'in_mem' is true, we have all | 2449 | * underlying buffer_head on success. If 'in_mem' is true, we have all |
2450 | * data in memory that is needed to recreate the on-disk version of this | 2450 | * data in memory that is needed to recreate the on-disk version of this |
2451 | * inode. | 2451 | * inode. |
2452 | */ | 2452 | */ |
2453 | static int __ext3_get_inode_loc(struct inode *inode, | 2453 | static int __ext4_get_inode_loc(struct inode *inode, |
2454 | struct ext3_iloc *iloc, int in_mem) | 2454 | struct ext4_iloc *iloc, int in_mem) |
2455 | { | 2455 | { |
2456 | ext3_fsblk_t block; | 2456 | ext4_fsblk_t block; |
2457 | struct buffer_head *bh; | 2457 | struct buffer_head *bh; |
2458 | 2458 | ||
2459 | block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc); | 2459 | block = ext4_get_inode_block(inode->i_sb, inode->i_ino, iloc); |
2460 | if (!block) | 2460 | if (!block) |
2461 | return -EIO; | 2461 | return -EIO; |
2462 | 2462 | ||
2463 | bh = sb_getblk(inode->i_sb, block); | 2463 | bh = sb_getblk(inode->i_sb, block); |
2464 | if (!bh) { | 2464 | if (!bh) { |
2465 | ext3_error (inode->i_sb, "ext3_get_inode_loc", | 2465 | ext4_error (inode->i_sb, "ext4_get_inode_loc", |
2466 | "unable to read inode block - " | 2466 | "unable to read inode block - " |
2467 | "inode=%lu, block="E3FSBLK, | 2467 | "inode=%lu, block="E3FSBLK, |
2468 | inode->i_ino, block); | 2468 | inode->i_ino, block); |
@@ -2483,22 +2483,22 @@ static int __ext3_get_inode_loc(struct inode *inode, | |||
2483 | */ | 2483 | */ |
2484 | if (in_mem) { | 2484 | if (in_mem) { |
2485 | struct buffer_head *bitmap_bh; | 2485 | struct buffer_head *bitmap_bh; |
2486 | struct ext3_group_desc *desc; | 2486 | struct ext4_group_desc *desc; |
2487 | int inodes_per_buffer; | 2487 | int inodes_per_buffer; |
2488 | int inode_offset, i; | 2488 | int inode_offset, i; |
2489 | int block_group; | 2489 | int block_group; |
2490 | int start; | 2490 | int start; |
2491 | 2491 | ||
2492 | block_group = (inode->i_ino - 1) / | 2492 | block_group = (inode->i_ino - 1) / |
2493 | EXT3_INODES_PER_GROUP(inode->i_sb); | 2493 | EXT4_INODES_PER_GROUP(inode->i_sb); |
2494 | inodes_per_buffer = bh->b_size / | 2494 | inodes_per_buffer = bh->b_size / |
2495 | EXT3_INODE_SIZE(inode->i_sb); | 2495 | EXT4_INODE_SIZE(inode->i_sb); |
2496 | inode_offset = ((inode->i_ino - 1) % | 2496 | inode_offset = ((inode->i_ino - 1) % |
2497 | EXT3_INODES_PER_GROUP(inode->i_sb)); | 2497 | EXT4_INODES_PER_GROUP(inode->i_sb)); |
2498 | start = inode_offset & ~(inodes_per_buffer - 1); | 2498 | start = inode_offset & ~(inodes_per_buffer - 1); |
2499 | 2499 | ||
2500 | /* Is the inode bitmap in cache? */ | 2500 | /* Is the inode bitmap in cache? */ |
2501 | desc = ext3_get_group_desc(inode->i_sb, | 2501 | desc = ext4_get_group_desc(inode->i_sb, |
2502 | block_group, NULL); | 2502 | block_group, NULL); |
2503 | if (!desc) | 2503 | if (!desc) |
2504 | goto make_io; | 2504 | goto make_io; |
@@ -2520,7 +2520,7 @@ static int __ext3_get_inode_loc(struct inode *inode, | |||
2520 | for (i = start; i < start + inodes_per_buffer; i++) { | 2520 | for (i = start; i < start + inodes_per_buffer; i++) { |
2521 | if (i == inode_offset) | 2521 | if (i == inode_offset) |
2522 | continue; | 2522 | continue; |
2523 | if (ext3_test_bit(i, bitmap_bh->b_data)) | 2523 | if (ext4_test_bit(i, bitmap_bh->b_data)) |
2524 | break; | 2524 | break; |
2525 | } | 2525 | } |
2526 | brelse(bitmap_bh); | 2526 | brelse(bitmap_bh); |
@@ -2544,7 +2544,7 @@ make_io: | |||
2544 | submit_bh(READ_META, bh); | 2544 | submit_bh(READ_META, bh); |
2545 | wait_on_buffer(bh); | 2545 | wait_on_buffer(bh); |
2546 | if (!buffer_uptodate(bh)) { | 2546 | if (!buffer_uptodate(bh)) { |
2547 | ext3_error(inode->i_sb, "ext3_get_inode_loc", | 2547 | ext4_error(inode->i_sb, "ext4_get_inode_loc", |
2548 | "unable to read inode block - " | 2548 | "unable to read inode block - " |
2549 | "inode=%lu, block="E3FSBLK, | 2549 | "inode=%lu, block="E3FSBLK, |
2550 | inode->i_ino, block); | 2550 | inode->i_ino, block); |
@@ -2557,48 +2557,48 @@ has_buffer: | |||
2557 | return 0; | 2557 | return 0; |
2558 | } | 2558 | } |
2559 | 2559 | ||
2560 | int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc) | 2560 | int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) |
2561 | { | 2561 | { |
2562 | /* We have all inode data except xattrs in memory here. */ | 2562 | /* We have all inode data except xattrs in memory here. */ |
2563 | return __ext3_get_inode_loc(inode, iloc, | 2563 | return __ext4_get_inode_loc(inode, iloc, |
2564 | !(EXT3_I(inode)->i_state & EXT3_STATE_XATTR)); | 2564 | !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)); |
2565 | } | 2565 | } |
2566 | 2566 | ||
2567 | void ext3_set_inode_flags(struct inode *inode) | 2567 | void ext4_set_inode_flags(struct inode *inode) |
2568 | { | 2568 | { |
2569 | unsigned int flags = EXT3_I(inode)->i_flags; | 2569 | unsigned int flags = EXT4_I(inode)->i_flags; |
2570 | 2570 | ||
2571 | inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); | 2571 | inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); |
2572 | if (flags & EXT3_SYNC_FL) | 2572 | if (flags & EXT4_SYNC_FL) |
2573 | inode->i_flags |= S_SYNC; | 2573 | inode->i_flags |= S_SYNC; |
2574 | if (flags & EXT3_APPEND_FL) | 2574 | if (flags & EXT4_APPEND_FL) |
2575 | inode->i_flags |= S_APPEND; | 2575 | inode->i_flags |= S_APPEND; |
2576 | if (flags & EXT3_IMMUTABLE_FL) | 2576 | if (flags & EXT4_IMMUTABLE_FL) |
2577 | inode->i_flags |= S_IMMUTABLE; | 2577 | inode->i_flags |= S_IMMUTABLE; |
2578 | if (flags & EXT3_NOATIME_FL) | 2578 | if (flags & EXT4_NOATIME_FL) |
2579 | inode->i_flags |= S_NOATIME; | 2579 | inode->i_flags |= S_NOATIME; |
2580 | if (flags & EXT3_DIRSYNC_FL) | 2580 | if (flags & EXT4_DIRSYNC_FL) |
2581 | inode->i_flags |= S_DIRSYNC; | 2581 | inode->i_flags |= S_DIRSYNC; |
2582 | } | 2582 | } |
2583 | 2583 | ||
2584 | void ext3_read_inode(struct inode * inode) | 2584 | void ext4_read_inode(struct inode * inode) |
2585 | { | 2585 | { |
2586 | struct ext3_iloc iloc; | 2586 | struct ext4_iloc iloc; |
2587 | struct ext3_inode *raw_inode; | 2587 | struct ext4_inode *raw_inode; |
2588 | struct ext3_inode_info *ei = EXT3_I(inode); | 2588 | struct ext4_inode_info *ei = EXT4_I(inode); |
2589 | struct buffer_head *bh; | 2589 | struct buffer_head *bh; |
2590 | int block; | 2590 | int block; |
2591 | 2591 | ||
2592 | #ifdef CONFIG_EXT3_FS_POSIX_ACL | 2592 | #ifdef CONFIG_EXT4DEV_FS_POSIX_ACL |
2593 | ei->i_acl = EXT3_ACL_NOT_CACHED; | 2593 | ei->i_acl = EXT4_ACL_NOT_CACHED; |
2594 | ei->i_default_acl = EXT3_ACL_NOT_CACHED; | 2594 | ei->i_default_acl = EXT4_ACL_NOT_CACHED; |
2595 | #endif | 2595 | #endif |
2596 | ei->i_block_alloc_info = NULL; | 2596 | ei->i_block_alloc_info = NULL; |
2597 | 2597 | ||
2598 | if (__ext3_get_inode_loc(inode, &iloc, 0)) | 2598 | if (__ext4_get_inode_loc(inode, &iloc, 0)) |
2599 | goto bad_inode; | 2599 | goto bad_inode; |
2600 | bh = iloc.bh; | 2600 | bh = iloc.bh; |
2601 | raw_inode = ext3_raw_inode(&iloc); | 2601 | raw_inode = ext4_raw_inode(&iloc); |
2602 | inode->i_mode = le16_to_cpu(raw_inode->i_mode); | 2602 | inode->i_mode = le16_to_cpu(raw_inode->i_mode); |
2603 | inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); | 2603 | inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); |
2604 | inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); | 2604 | inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); |
@@ -2623,7 +2623,7 @@ void ext3_read_inode(struct inode * inode) | |||
2623 | */ | 2623 | */ |
2624 | if (inode->i_nlink == 0) { | 2624 | if (inode->i_nlink == 0) { |
2625 | if (inode->i_mode == 0 || | 2625 | if (inode->i_mode == 0 || |
2626 | !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) { | 2626 | !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { |
2627 | /* this inode is deleted */ | 2627 | /* this inode is deleted */ |
2628 | brelse (bh); | 2628 | brelse (bh); |
2629 | goto bad_inode; | 2629 | goto bad_inode; |
@@ -2635,7 +2635,7 @@ void ext3_read_inode(struct inode * inode) | |||
2635 | } | 2635 | } |
2636 | inode->i_blocks = le32_to_cpu(raw_inode->i_blocks); | 2636 | inode->i_blocks = le32_to_cpu(raw_inode->i_blocks); |
2637 | ei->i_flags = le32_to_cpu(raw_inode->i_flags); | 2637 | ei->i_flags = le32_to_cpu(raw_inode->i_flags); |
2638 | #ifdef EXT3_FRAGMENTS | 2638 | #ifdef EXT4_FRAGMENTS |
2639 | ei->i_faddr = le32_to_cpu(raw_inode->i_faddr); | 2639 | ei->i_faddr = le32_to_cpu(raw_inode->i_faddr); |
2640 | ei->i_frag_no = raw_inode->i_frag; | 2640 | ei->i_frag_no = raw_inode->i_frag; |
2641 | ei->i_frag_size = raw_inode->i_fsize; | 2641 | ei->i_frag_size = raw_inode->i_fsize; |
@@ -2654,51 +2654,51 @@ void ext3_read_inode(struct inode * inode) | |||
2654 | * NOTE! The in-memory inode i_data array is in little-endian order | 2654 | * NOTE! The in-memory inode i_data array is in little-endian order |
2655 | * even on big-endian machines: we do NOT byteswap the block numbers! | 2655 | * even on big-endian machines: we do NOT byteswap the block numbers! |
2656 | */ | 2656 | */ |
2657 | for (block = 0; block < EXT3_N_BLOCKS; block++) | 2657 | for (block = 0; block < EXT4_N_BLOCKS; block++) |
2658 | ei->i_data[block] = raw_inode->i_block[block]; | 2658 | ei->i_data[block] = raw_inode->i_block[block]; |
2659 | INIT_LIST_HEAD(&ei->i_orphan); | 2659 | INIT_LIST_HEAD(&ei->i_orphan); |
2660 | 2660 | ||
2661 | if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 && | 2661 | if (inode->i_ino >= EXT4_FIRST_INO(inode->i_sb) + 1 && |
2662 | EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) { | 2662 | EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { |
2663 | /* | 2663 | /* |
2664 | * When mke2fs creates big inodes it does not zero out | 2664 | * When mke2fs creates big inodes it does not zero out |
2665 | * the unused bytes above EXT3_GOOD_OLD_INODE_SIZE, | 2665 | * the unused bytes above EXT4_GOOD_OLD_INODE_SIZE, |
2666 | * so ignore those first few inodes. | 2666 | * so ignore those first few inodes. |
2667 | */ | 2667 | */ |
2668 | ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); | 2668 | ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); |
2669 | if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > | 2669 | if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > |
2670 | EXT3_INODE_SIZE(inode->i_sb)) | 2670 | EXT4_INODE_SIZE(inode->i_sb)) |
2671 | goto bad_inode; | 2671 | goto bad_inode; |
2672 | if (ei->i_extra_isize == 0) { | 2672 | if (ei->i_extra_isize == 0) { |
2673 | /* The extra space is currently unused. Use it. */ | 2673 | /* The extra space is currently unused. Use it. */ |
2674 | ei->i_extra_isize = sizeof(struct ext3_inode) - | 2674 | ei->i_extra_isize = sizeof(struct ext4_inode) - |
2675 | EXT3_GOOD_OLD_INODE_SIZE; | 2675 | EXT4_GOOD_OLD_INODE_SIZE; |
2676 | } else { | 2676 | } else { |
2677 | __le32 *magic = (void *)raw_inode + | 2677 | __le32 *magic = (void *)raw_inode + |
2678 | EXT3_GOOD_OLD_INODE_SIZE + | 2678 | EXT4_GOOD_OLD_INODE_SIZE + |
2679 | ei->i_extra_isize; | 2679 | ei->i_extra_isize; |
2680 | if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC)) | 2680 | if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) |
2681 | ei->i_state |= EXT3_STATE_XATTR; | 2681 | ei->i_state |= EXT4_STATE_XATTR; |
2682 | } | 2682 | } |
2683 | } else | 2683 | } else |
2684 | ei->i_extra_isize = 0; | 2684 | ei->i_extra_isize = 0; |
2685 | 2685 | ||
2686 | if (S_ISREG(inode->i_mode)) { | 2686 | if (S_ISREG(inode->i_mode)) { |
2687 | inode->i_op = &ext3_file_inode_operations; | 2687 | inode->i_op = &ext4_file_inode_operations; |
2688 | inode->i_fop = &ext3_file_operations; | 2688 | inode->i_fop = &ext4_file_operations; |
2689 | ext3_set_aops(inode); | 2689 | ext4_set_aops(inode); |
2690 | } else if (S_ISDIR(inode->i_mode)) { | 2690 | } else if (S_ISDIR(inode->i_mode)) { |
2691 | inode->i_op = &ext3_dir_inode_operations; | 2691 | inode->i_op = &ext4_dir_inode_operations; |
2692 | inode->i_fop = &ext3_dir_operations; | 2692 | inode->i_fop = &ext4_dir_operations; |
2693 | } else if (S_ISLNK(inode->i_mode)) { | 2693 | } else if (S_ISLNK(inode->i_mode)) { |
2694 | if (ext3_inode_is_fast_symlink(inode)) | 2694 | if (ext4_inode_is_fast_symlink(inode)) |
2695 | inode->i_op = &ext3_fast_symlink_inode_operations; | 2695 | inode->i_op = &ext4_fast_symlink_inode_operations; |
2696 | else { | 2696 | else { |
2697 | inode->i_op = &ext3_symlink_inode_operations; | 2697 | inode->i_op = &ext4_symlink_inode_operations; |
2698 | ext3_set_aops(inode); | 2698 | ext4_set_aops(inode); |
2699 | } | 2699 | } |
2700 | } else { | 2700 | } else { |
2701 | inode->i_op = &ext3_special_inode_operations; | 2701 | inode->i_op = &ext4_special_inode_operations; |
2702 | if (raw_inode->i_block[0]) | 2702 | if (raw_inode->i_block[0]) |
2703 | init_special_inode(inode, inode->i_mode, | 2703 | init_special_inode(inode, inode->i_mode, |
2704 | old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); | 2704 | old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); |
@@ -2707,7 +2707,7 @@ void ext3_read_inode(struct inode * inode) | |||
2707 | new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); | 2707 | new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); |
2708 | } | 2708 | } |
2709 | brelse (iloc.bh); | 2709 | brelse (iloc.bh); |
2710 | ext3_set_inode_flags(inode); | 2710 | ext4_set_inode_flags(inode); |
2711 | return; | 2711 | return; |
2712 | 2712 | ||
2713 | bad_inode: | 2713 | bad_inode: |
@@ -2722,19 +2722,19 @@ bad_inode: | |||
2722 | * | 2722 | * |
2723 | * The caller must have write access to iloc->bh. | 2723 | * The caller must have write access to iloc->bh. |
2724 | */ | 2724 | */ |
2725 | static int ext3_do_update_inode(handle_t *handle, | 2725 | static int ext4_do_update_inode(handle_t *handle, |
2726 | struct inode *inode, | 2726 | struct inode *inode, |
2727 | struct ext3_iloc *iloc) | 2727 | struct ext4_iloc *iloc) |
2728 | { | 2728 | { |
2729 | struct ext3_inode *raw_inode = ext3_raw_inode(iloc); | 2729 | struct ext4_inode *raw_inode = ext4_raw_inode(iloc); |
2730 | struct ext3_inode_info *ei = EXT3_I(inode); | 2730 | struct ext4_inode_info *ei = EXT4_I(inode); |
2731 | struct buffer_head *bh = iloc->bh; | 2731 | struct buffer_head *bh = iloc->bh; |
2732 | int err = 0, rc, block; | 2732 | int err = 0, rc, block; |
2733 | 2733 | ||
2734 | /* For fields not not tracking in the in-memory inode, | 2734 | /* For fields not not tracking in the in-memory inode, |
2735 | * initialise them to zero for new inodes. */ | 2735 | * initialise them to zero for new inodes. */ |
2736 | if (ei->i_state & EXT3_STATE_NEW) | 2736 | if (ei->i_state & EXT4_STATE_NEW) |
2737 | memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size); | 2737 | memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); |
2738 | 2738 | ||
2739 | raw_inode->i_mode = cpu_to_le16(inode->i_mode); | 2739 | raw_inode->i_mode = cpu_to_le16(inode->i_mode); |
2740 | if(!(test_opt(inode->i_sb, NO_UID32))) { | 2740 | if(!(test_opt(inode->i_sb, NO_UID32))) { |
@@ -2769,7 +2769,7 @@ static int ext3_do_update_inode(handle_t *handle, | |||
2769 | raw_inode->i_blocks = cpu_to_le32(inode->i_blocks); | 2769 | raw_inode->i_blocks = cpu_to_le32(inode->i_blocks); |
2770 | raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); | 2770 | raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); |
2771 | raw_inode->i_flags = cpu_to_le32(ei->i_flags); | 2771 | raw_inode->i_flags = cpu_to_le32(ei->i_flags); |
2772 | #ifdef EXT3_FRAGMENTS | 2772 | #ifdef EXT4_FRAGMENTS |
2773 | raw_inode->i_faddr = cpu_to_le32(ei->i_faddr); | 2773 | raw_inode->i_faddr = cpu_to_le32(ei->i_faddr); |
2774 | raw_inode->i_frag = ei->i_frag_no; | 2774 | raw_inode->i_frag = ei->i_frag_no; |
2775 | raw_inode->i_fsize = ei->i_frag_size; | 2775 | raw_inode->i_fsize = ei->i_frag_size; |
@@ -2782,24 +2782,24 @@ static int ext3_do_update_inode(handle_t *handle, | |||
2782 | cpu_to_le32(ei->i_disksize >> 32); | 2782 | cpu_to_le32(ei->i_disksize >> 32); |
2783 | if (ei->i_disksize > 0x7fffffffULL) { | 2783 | if (ei->i_disksize > 0x7fffffffULL) { |
2784 | struct super_block *sb = inode->i_sb; | 2784 | struct super_block *sb = inode->i_sb; |
2785 | if (!EXT3_HAS_RO_COMPAT_FEATURE(sb, | 2785 | if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, |
2786 | EXT3_FEATURE_RO_COMPAT_LARGE_FILE) || | 2786 | EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || |
2787 | EXT3_SB(sb)->s_es->s_rev_level == | 2787 | EXT4_SB(sb)->s_es->s_rev_level == |
2788 | cpu_to_le32(EXT3_GOOD_OLD_REV)) { | 2788 | cpu_to_le32(EXT4_GOOD_OLD_REV)) { |
2789 | /* If this is the first large file | 2789 | /* If this is the first large file |
2790 | * created, add a flag to the superblock. | 2790 | * created, add a flag to the superblock. |
2791 | */ | 2791 | */ |
2792 | err = ext3_journal_get_write_access(handle, | 2792 | err = ext4_journal_get_write_access(handle, |
2793 | EXT3_SB(sb)->s_sbh); | 2793 | EXT4_SB(sb)->s_sbh); |
2794 | if (err) | 2794 | if (err) |
2795 | goto out_brelse; | 2795 | goto out_brelse; |
2796 | ext3_update_dynamic_rev(sb); | 2796 | ext4_update_dynamic_rev(sb); |
2797 | EXT3_SET_RO_COMPAT_FEATURE(sb, | 2797 | EXT4_SET_RO_COMPAT_FEATURE(sb, |
2798 | EXT3_FEATURE_RO_COMPAT_LARGE_FILE); | 2798 | EXT4_FEATURE_RO_COMPAT_LARGE_FILE); |
2799 | sb->s_dirt = 1; | 2799 | sb->s_dirt = 1; |
2800 | handle->h_sync = 1; | 2800 | handle->h_sync = 1; |
2801 | err = ext3_journal_dirty_metadata(handle, | 2801 | err = ext4_journal_dirty_metadata(handle, |
2802 | EXT3_SB(sb)->s_sbh); | 2802 | EXT4_SB(sb)->s_sbh); |
2803 | } | 2803 | } |
2804 | } | 2804 | } |
2805 | } | 2805 | } |
@@ -2815,26 +2815,26 @@ static int ext3_do_update_inode(handle_t *handle, | |||
2815 | cpu_to_le32(new_encode_dev(inode->i_rdev)); | 2815 | cpu_to_le32(new_encode_dev(inode->i_rdev)); |
2816 | raw_inode->i_block[2] = 0; | 2816 | raw_inode->i_block[2] = 0; |
2817 | } | 2817 | } |
2818 | } else for (block = 0; block < EXT3_N_BLOCKS; block++) | 2818 | } else for (block = 0; block < EXT4_N_BLOCKS; block++) |
2819 | raw_inode->i_block[block] = ei->i_data[block]; | 2819 | raw_inode->i_block[block] = ei->i_data[block]; |
2820 | 2820 | ||
2821 | if (ei->i_extra_isize) | 2821 | if (ei->i_extra_isize) |
2822 | raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); | 2822 | raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); |
2823 | 2823 | ||
2824 | BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); | 2824 | BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); |
2825 | rc = ext3_journal_dirty_metadata(handle, bh); | 2825 | rc = ext4_journal_dirty_metadata(handle, bh); |
2826 | if (!err) | 2826 | if (!err) |
2827 | err = rc; | 2827 | err = rc; |
2828 | ei->i_state &= ~EXT3_STATE_NEW; | 2828 | ei->i_state &= ~EXT4_STATE_NEW; |
2829 | 2829 | ||
2830 | out_brelse: | 2830 | out_brelse: |
2831 | brelse (bh); | 2831 | brelse (bh); |
2832 | ext3_std_error(inode->i_sb, err); | 2832 | ext4_std_error(inode->i_sb, err); |
2833 | return err; | 2833 | return err; |
2834 | } | 2834 | } |
2835 | 2835 | ||
2836 | /* | 2836 | /* |
2837 | * ext3_write_inode() | 2837 | * ext4_write_inode() |
2838 | * | 2838 | * |
2839 | * We are called from a few places: | 2839 | * We are called from a few places: |
2840 | * | 2840 | * |
@@ -2851,7 +2851,7 @@ out_brelse: | |||
2851 | * | 2851 | * |
2852 | * In all cases it is actually safe for us to return without doing anything, | 2852 | * In all cases it is actually safe for us to return without doing anything, |
2853 | * because the inode has been copied into a raw inode buffer in | 2853 | * because the inode has been copied into a raw inode buffer in |
2854 | * ext3_mark_inode_dirty(). This is a correctness thing for O_SYNC and for | 2854 | * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for |
2855 | * knfsd. | 2855 | * knfsd. |
2856 | * | 2856 | * |
2857 | * Note that we are absolutely dependent upon all inode dirtiers doing the | 2857 | * Note that we are absolutely dependent upon all inode dirtiers doing the |
@@ -2868,12 +2868,12 @@ out_brelse: | |||
2868 | * `stuff()' is running, and the new i_size will be lost. Plus the inode | 2868 | * `stuff()' is running, and the new i_size will be lost. Plus the inode |
2869 | * will no longer be on the superblock's dirty inode list. | 2869 | * will no longer be on the superblock's dirty inode list. |
2870 | */ | 2870 | */ |
2871 | int ext3_write_inode(struct inode *inode, int wait) | 2871 | int ext4_write_inode(struct inode *inode, int wait) |
2872 | { | 2872 | { |
2873 | if (current->flags & PF_MEMALLOC) | 2873 | if (current->flags & PF_MEMALLOC) |
2874 | return 0; | 2874 | return 0; |
2875 | 2875 | ||
2876 | if (ext3_journal_current_handle()) { | 2876 | if (ext4_journal_current_handle()) { |
2877 | jbd_debug(0, "called recursively, non-PF_MEMALLOC!\n"); | 2877 | jbd_debug(0, "called recursively, non-PF_MEMALLOC!\n"); |
2878 | dump_stack(); | 2878 | dump_stack(); |
2879 | return -EIO; | 2879 | return -EIO; |
@@ -2882,11 +2882,11 @@ int ext3_write_inode(struct inode *inode, int wait) | |||
2882 | if (!wait) | 2882 | if (!wait) |
2883 | return 0; | 2883 | return 0; |
2884 | 2884 | ||
2885 | return ext3_force_commit(inode->i_sb); | 2885 | return ext4_force_commit(inode->i_sb); |
2886 | } | 2886 | } |
2887 | 2887 | ||
2888 | /* | 2888 | /* |
2889 | * ext3_setattr() | 2889 | * ext4_setattr() |
2890 | * | 2890 | * |
2891 | * Called from notify_change. | 2891 | * Called from notify_change. |
2892 | * | 2892 | * |
@@ -2902,7 +2902,7 @@ int ext3_write_inode(struct inode *inode, int wait) | |||
2902 | * | 2902 | * |
2903 | * Called with inode->sem down. | 2903 | * Called with inode->sem down. |
2904 | */ | 2904 | */ |
2905 | int ext3_setattr(struct dentry *dentry, struct iattr *attr) | 2905 | int ext4_setattr(struct dentry *dentry, struct iattr *attr) |
2906 | { | 2906 | { |
2907 | struct inode *inode = dentry->d_inode; | 2907 | struct inode *inode = dentry->d_inode; |
2908 | int error, rc = 0; | 2908 | int error, rc = 0; |
@@ -2918,15 +2918,15 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr) | |||
2918 | 2918 | ||
2919 | /* (user+group)*(old+new) structure, inode write (sb, | 2919 | /* (user+group)*(old+new) structure, inode write (sb, |
2920 | * inode block, ? - but truncate inode update has it) */ | 2920 | * inode block, ? - but truncate inode update has it) */ |
2921 | handle = ext3_journal_start(inode, 2*(EXT3_QUOTA_INIT_BLOCKS(inode->i_sb)+ | 2921 | handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+ |
2922 | EXT3_QUOTA_DEL_BLOCKS(inode->i_sb))+3); | 2922 | EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3); |
2923 | if (IS_ERR(handle)) { | 2923 | if (IS_ERR(handle)) { |
2924 | error = PTR_ERR(handle); | 2924 | error = PTR_ERR(handle); |
2925 | goto err_out; | 2925 | goto err_out; |
2926 | } | 2926 | } |
2927 | error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; | 2927 | error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; |
2928 | if (error) { | 2928 | if (error) { |
2929 | ext3_journal_stop(handle); | 2929 | ext4_journal_stop(handle); |
2930 | return error; | 2930 | return error; |
2931 | } | 2931 | } |
2932 | /* Update corresponding info in inode so that everything is in | 2932 | /* Update corresponding info in inode so that everything is in |
@@ -2935,41 +2935,41 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr) | |||
2935 | inode->i_uid = attr->ia_uid; | 2935 | inode->i_uid = attr->ia_uid; |
2936 | if (attr->ia_valid & ATTR_GID) | 2936 | if (attr->ia_valid & ATTR_GID) |
2937 | inode->i_gid = attr->ia_gid; | 2937 | inode->i_gid = attr->ia_gid; |
2938 | error = ext3_mark_inode_dirty(handle, inode); | 2938 | error = ext4_mark_inode_dirty(handle, inode); |
2939 | ext3_journal_stop(handle); | 2939 | ext4_journal_stop(handle); |
2940 | } | 2940 | } |
2941 | 2941 | ||
2942 | if (S_ISREG(inode->i_mode) && | 2942 | if (S_ISREG(inode->i_mode) && |
2943 | attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) { | 2943 | attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) { |
2944 | handle_t *handle; | 2944 | handle_t *handle; |
2945 | 2945 | ||
2946 | handle = ext3_journal_start(inode, 3); | 2946 | handle = ext4_journal_start(inode, 3); |
2947 | if (IS_ERR(handle)) { | 2947 | if (IS_ERR(handle)) { |
2948 | error = PTR_ERR(handle); | 2948 | error = PTR_ERR(handle); |
2949 | goto err_out; | 2949 | goto err_out; |
2950 | } | 2950 | } |
2951 | 2951 | ||
2952 | error = ext3_orphan_add(handle, inode); | 2952 | error = ext4_orphan_add(handle, inode); |
2953 | EXT3_I(inode)->i_disksize = attr->ia_size; | 2953 | EXT4_I(inode)->i_disksize = attr->ia_size; |
2954 | rc = ext3_mark_inode_dirty(handle, inode); | 2954 | rc = ext4_mark_inode_dirty(handle, inode); |
2955 | if (!error) | 2955 | if (!error) |
2956 | error = rc; | 2956 | error = rc; |
2957 | ext3_journal_stop(handle); | 2957 | ext4_journal_stop(handle); |
2958 | } | 2958 | } |
2959 | 2959 | ||
2960 | rc = inode_setattr(inode, attr); | 2960 | rc = inode_setattr(inode, attr); |
2961 | 2961 | ||
2962 | /* If inode_setattr's call to ext3_truncate failed to get a | 2962 | /* If inode_setattr's call to ext4_truncate failed to get a |
2963 | * transaction handle at all, we need to clean up the in-core | 2963 | * transaction handle at all, we need to clean up the in-core |
2964 | * orphan list manually. */ | 2964 | * orphan list manually. */ |
2965 | if (inode->i_nlink) | 2965 | if (inode->i_nlink) |
2966 | ext3_orphan_del(NULL, inode); | 2966 | ext4_orphan_del(NULL, inode); |
2967 | 2967 | ||
2968 | if (!rc && (ia_valid & ATTR_MODE)) | 2968 | if (!rc && (ia_valid & ATTR_MODE)) |
2969 | rc = ext3_acl_chmod(inode); | 2969 | rc = ext4_acl_chmod(inode); |
2970 | 2970 | ||
2971 | err_out: | 2971 | err_out: |
2972 | ext3_std_error(inode->i_sb, error); | 2972 | ext4_std_error(inode->i_sb, error); |
2973 | if (!error) | 2973 | if (!error) |
2974 | error = rc; | 2974 | error = rc; |
2975 | return error; | 2975 | return error; |
@@ -2988,9 +2988,9 @@ err_out: | |||
2988 | * N+5 group descriptor summary blocks | 2988 | * N+5 group descriptor summary blocks |
2989 | * 1 inode block | 2989 | * 1 inode block |
2990 | * 1 superblock. | 2990 | * 1 superblock. |
2991 | * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files | 2991 | * 2 * EXT4_SINGLEDATA_TRANS_BLOCKS for the quote files |
2992 | * | 2992 | * |
2993 | * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS | 2993 | * 3 * (N + 5) + 2 + 2 * EXT4_SINGLEDATA_TRANS_BLOCKS |
2994 | * | 2994 | * |
2995 | * With ordered or writeback data it's the same, less the N data blocks. | 2995 | * With ordered or writeback data it's the same, less the N data blocks. |
2996 | * | 2996 | * |
@@ -3003,13 +3003,13 @@ err_out: | |||
3003 | * block and work out the exact number of indirects which are touched. Pah. | 3003 | * block and work out the exact number of indirects which are touched. Pah. |
3004 | */ | 3004 | */ |
3005 | 3005 | ||
3006 | static int ext3_writepage_trans_blocks(struct inode *inode) | 3006 | static int ext4_writepage_trans_blocks(struct inode *inode) |
3007 | { | 3007 | { |
3008 | int bpp = ext3_journal_blocks_per_page(inode); | 3008 | int bpp = ext4_journal_blocks_per_page(inode); |
3009 | int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3; | 3009 | int indirects = (EXT4_NDIR_BLOCKS % bpp) ? 5 : 3; |
3010 | int ret; | 3010 | int ret; |
3011 | 3011 | ||
3012 | if (ext3_should_journal_data(inode)) | 3012 | if (ext4_should_journal_data(inode)) |
3013 | ret = 3 * (bpp + indirects) + 2; | 3013 | ret = 3 * (bpp + indirects) + 2; |
3014 | else | 3014 | else |
3015 | ret = 2 * (bpp + indirects) + 2; | 3015 | ret = 2 * (bpp + indirects) + 2; |
@@ -3017,26 +3017,26 @@ static int ext3_writepage_trans_blocks(struct inode *inode) | |||
3017 | #ifdef CONFIG_QUOTA | 3017 | #ifdef CONFIG_QUOTA |
3018 | /* We know that structure was already allocated during DQUOT_INIT so | 3018 | /* We know that structure was already allocated during DQUOT_INIT so |
3019 | * we will be updating only the data blocks + inodes */ | 3019 | * we will be updating only the data blocks + inodes */ |
3020 | ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb); | 3020 | ret += 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb); |
3021 | #endif | 3021 | #endif |
3022 | 3022 | ||
3023 | return ret; | 3023 | return ret; |
3024 | } | 3024 | } |
3025 | 3025 | ||
3026 | /* | 3026 | /* |
3027 | * The caller must have previously called ext3_reserve_inode_write(). | 3027 | * The caller must have previously called ext4_reserve_inode_write(). |
3028 | * Give this, we know that the caller already has write access to iloc->bh. | 3028 | * Give this, we know that the caller already has write access to iloc->bh. |
3029 | */ | 3029 | */ |
3030 | int ext3_mark_iloc_dirty(handle_t *handle, | 3030 | int ext4_mark_iloc_dirty(handle_t *handle, |
3031 | struct inode *inode, struct ext3_iloc *iloc) | 3031 | struct inode *inode, struct ext4_iloc *iloc) |
3032 | { | 3032 | { |
3033 | int err = 0; | 3033 | int err = 0; |
3034 | 3034 | ||
3035 | /* the do_update_inode consumes one bh->b_count */ | 3035 | /* the do_update_inode consumes one bh->b_count */ |
3036 | get_bh(iloc->bh); | 3036 | get_bh(iloc->bh); |
3037 | 3037 | ||
3038 | /* ext3_do_update_inode() does journal_dirty_metadata */ | 3038 | /* ext4_do_update_inode() does journal_dirty_metadata */ |
3039 | err = ext3_do_update_inode(handle, inode, iloc); | 3039 | err = ext4_do_update_inode(handle, inode, iloc); |
3040 | put_bh(iloc->bh); | 3040 | put_bh(iloc->bh); |
3041 | return err; | 3041 | return err; |
3042 | } | 3042 | } |
@@ -3047,22 +3047,22 @@ int ext3_mark_iloc_dirty(handle_t *handle, | |||
3047 | */ | 3047 | */ |
3048 | 3048 | ||
3049 | int | 3049 | int |
3050 | ext3_reserve_inode_write(handle_t *handle, struct inode *inode, | 3050 | ext4_reserve_inode_write(handle_t *handle, struct inode *inode, |
3051 | struct ext3_iloc *iloc) | 3051 | struct ext4_iloc *iloc) |
3052 | { | 3052 | { |
3053 | int err = 0; | 3053 | int err = 0; |
3054 | if (handle) { | 3054 | if (handle) { |
3055 | err = ext3_get_inode_loc(inode, iloc); | 3055 | err = ext4_get_inode_loc(inode, iloc); |
3056 | if (!err) { | 3056 | if (!err) { |
3057 | BUFFER_TRACE(iloc->bh, "get_write_access"); | 3057 | BUFFER_TRACE(iloc->bh, "get_write_access"); |
3058 | err = ext3_journal_get_write_access(handle, iloc->bh); | 3058 | err = ext4_journal_get_write_access(handle, iloc->bh); |
3059 | if (err) { | 3059 | if (err) { |
3060 | brelse(iloc->bh); | 3060 | brelse(iloc->bh); |
3061 | iloc->bh = NULL; | 3061 | iloc->bh = NULL; |
3062 | } | 3062 | } |
3063 | } | 3063 | } |
3064 | } | 3064 | } |
3065 | ext3_std_error(inode->i_sb, err); | 3065 | ext4_std_error(inode->i_sb, err); |
3066 | return err; | 3066 | return err; |
3067 | } | 3067 | } |
3068 | 3068 | ||
@@ -3087,20 +3087,20 @@ ext3_reserve_inode_write(handle_t *handle, struct inode *inode, | |||
3087 | * to do a write_super() to free up some memory. It has the desired | 3087 | * to do a write_super() to free up some memory. It has the desired |
3088 | * effect. | 3088 | * effect. |
3089 | */ | 3089 | */ |
3090 | int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode) | 3090 | int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) |
3091 | { | 3091 | { |
3092 | struct ext3_iloc iloc; | 3092 | struct ext4_iloc iloc; |
3093 | int err; | 3093 | int err; |
3094 | 3094 | ||
3095 | might_sleep(); | 3095 | might_sleep(); |
3096 | err = ext3_reserve_inode_write(handle, inode, &iloc); | 3096 | err = ext4_reserve_inode_write(handle, inode, &iloc); |
3097 | if (!err) | 3097 | if (!err) |
3098 | err = ext3_mark_iloc_dirty(handle, inode, &iloc); | 3098 | err = ext4_mark_iloc_dirty(handle, inode, &iloc); |
3099 | return err; | 3099 | return err; |
3100 | } | 3100 | } |
3101 | 3101 | ||
3102 | /* | 3102 | /* |
3103 | * ext3_dirty_inode() is called from __mark_inode_dirty() | 3103 | * ext4_dirty_inode() is called from __mark_inode_dirty() |
3104 | * | 3104 | * |
3105 | * We're really interested in the case where a file is being extended. | 3105 | * We're really interested in the case where a file is being extended. |
3106 | * i_size has been changed by generic_commit_write() and we thus need | 3106 | * i_size has been changed by generic_commit_write() and we thus need |
@@ -3113,12 +3113,12 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode) | |||
3113 | * so would cause a commit on atime updates, which we don't bother doing. | 3113 | * so would cause a commit on atime updates, which we don't bother doing. |
3114 | * We handle synchronous inodes at the highest possible level. | 3114 | * We handle synchronous inodes at the highest possible level. |
3115 | */ | 3115 | */ |
3116 | void ext3_dirty_inode(struct inode *inode) | 3116 | void ext4_dirty_inode(struct inode *inode) |
3117 | { | 3117 | { |
3118 | handle_t *current_handle = ext3_journal_current_handle(); | 3118 | handle_t *current_handle = ext4_journal_current_handle(); |
3119 | handle_t *handle; | 3119 | handle_t *handle; |
3120 | 3120 | ||
3121 | handle = ext3_journal_start(inode, 2); | 3121 | handle = ext4_journal_start(inode, 2); |
3122 | if (IS_ERR(handle)) | 3122 | if (IS_ERR(handle)) |
3123 | goto out; | 3123 | goto out; |
3124 | if (current_handle && | 3124 | if (current_handle && |
@@ -3129,9 +3129,9 @@ void ext3_dirty_inode(struct inode *inode) | |||
3129 | } else { | 3129 | } else { |
3130 | jbd_debug(5, "marking dirty. outer handle=%p\n", | 3130 | jbd_debug(5, "marking dirty. outer handle=%p\n", |
3131 | current_handle); | 3131 | current_handle); |
3132 | ext3_mark_inode_dirty(handle, inode); | 3132 | ext4_mark_inode_dirty(handle, inode); |
3133 | } | 3133 | } |
3134 | ext3_journal_stop(handle); | 3134 | ext4_journal_stop(handle); |
3135 | out: | 3135 | out: |
3136 | return; | 3136 | return; |
3137 | } | 3137 | } |
@@ -3140,32 +3140,32 @@ out: | |||
3140 | /* | 3140 | /* |
3141 | * Bind an inode's backing buffer_head into this transaction, to prevent | 3141 | * Bind an inode's backing buffer_head into this transaction, to prevent |
3142 | * it from being flushed to disk early. Unlike | 3142 | * it from being flushed to disk early. Unlike |
3143 | * ext3_reserve_inode_write, this leaves behind no bh reference and | 3143 | * ext4_reserve_inode_write, this leaves behind no bh reference and |
3144 | * returns no iloc structure, so the caller needs to repeat the iloc | 3144 | * returns no iloc structure, so the caller needs to repeat the iloc |
3145 | * lookup to mark the inode dirty later. | 3145 | * lookup to mark the inode dirty later. |
3146 | */ | 3146 | */ |
3147 | static int ext3_pin_inode(handle_t *handle, struct inode *inode) | 3147 | static int ext4_pin_inode(handle_t *handle, struct inode *inode) |
3148 | { | 3148 | { |
3149 | struct ext3_iloc iloc; | 3149 | struct ext4_iloc iloc; |
3150 | 3150 | ||
3151 | int err = 0; | 3151 | int err = 0; |
3152 | if (handle) { | 3152 | if (handle) { |
3153 | err = ext3_get_inode_loc(inode, &iloc); | 3153 | err = ext4_get_inode_loc(inode, &iloc); |
3154 | if (!err) { | 3154 | if (!err) { |
3155 | BUFFER_TRACE(iloc.bh, "get_write_access"); | 3155 | BUFFER_TRACE(iloc.bh, "get_write_access"); |
3156 | err = journal_get_write_access(handle, iloc.bh); | 3156 | err = journal_get_write_access(handle, iloc.bh); |
3157 | if (!err) | 3157 | if (!err) |
3158 | err = ext3_journal_dirty_metadata(handle, | 3158 | err = ext4_journal_dirty_metadata(handle, |
3159 | iloc.bh); | 3159 | iloc.bh); |
3160 | brelse(iloc.bh); | 3160 | brelse(iloc.bh); |
3161 | } | 3161 | } |
3162 | } | 3162 | } |
3163 | ext3_std_error(inode->i_sb, err); | 3163 | ext4_std_error(inode->i_sb, err); |
3164 | return err; | 3164 | return err; |
3165 | } | 3165 | } |
3166 | #endif | 3166 | #endif |
3167 | 3167 | ||
3168 | int ext3_change_inode_journal_flag(struct inode *inode, int val) | 3168 | int ext4_change_inode_journal_flag(struct inode *inode, int val) |
3169 | { | 3169 | { |
3170 | journal_t *journal; | 3170 | journal_t *journal; |
3171 | handle_t *handle; | 3171 | handle_t *handle; |
@@ -3181,7 +3181,7 @@ int ext3_change_inode_journal_flag(struct inode *inode, int val) | |||
3181 | * nobody is changing anything. | 3181 | * nobody is changing anything. |
3182 | */ | 3182 | */ |
3183 | 3183 | ||
3184 | journal = EXT3_JOURNAL(inode); | 3184 | journal = EXT4_JOURNAL(inode); |
3185 | if (is_journal_aborted(journal) || IS_RDONLY(inode)) | 3185 | if (is_journal_aborted(journal) || IS_RDONLY(inode)) |
3186 | return -EROFS; | 3186 | return -EROFS; |
3187 | 3187 | ||
@@ -3197,23 +3197,23 @@ int ext3_change_inode_journal_flag(struct inode *inode, int val) | |||
3197 | */ | 3197 | */ |
3198 | 3198 | ||
3199 | if (val) | 3199 | if (val) |
3200 | EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL; | 3200 | EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL; |
3201 | else | 3201 | else |
3202 | EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL; | 3202 | EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL; |
3203 | ext3_set_aops(inode); | 3203 | ext4_set_aops(inode); |
3204 | 3204 | ||
3205 | journal_unlock_updates(journal); | 3205 | journal_unlock_updates(journal); |
3206 | 3206 | ||
3207 | /* Finally we can mark the inode as dirty. */ | 3207 | /* Finally we can mark the inode as dirty. */ |
3208 | 3208 | ||
3209 | handle = ext3_journal_start(inode, 1); | 3209 | handle = ext4_journal_start(inode, 1); |
3210 | if (IS_ERR(handle)) | 3210 | if (IS_ERR(handle)) |
3211 | return PTR_ERR(handle); | 3211 | return PTR_ERR(handle); |
3212 | 3212 | ||
3213 | err = ext3_mark_inode_dirty(handle, inode); | 3213 | err = ext4_mark_inode_dirty(handle, inode); |
3214 | handle->h_sync = 1; | 3214 | handle->h_sync = 1; |
3215 | ext3_journal_stop(handle); | 3215 | ext4_journal_stop(handle); |
3216 | ext3_std_error(inode->i_sb, err); | 3216 | ext4_std_error(inode->i_sb, err); |
3217 | 3217 | ||
3218 | return err; | 3218 | return err; |
3219 | } | 3219 | } |