aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/ext3/balloc.c16
-rw-r--r--fs/ext3/bitmap.c2
-rw-r--r--fs/ext3/dir.c14
-rw-r--r--fs/ext3/file.c2
-rw-r--r--fs/ext3/fsync.c6
-rw-r--r--fs/ext3/hash.c6
-rw-r--r--fs/ext3/ialloc.c48
-rw-r--r--fs/ext3/inode.c64
-rw-r--r--fs/ext3/namei.c28
-rw-r--r--fs/ext3/super.c24
-rw-r--r--fs/jbd/checkpoint.c30
-rw-r--r--fs/jbd/journal.c56
-rw-r--r--fs/jbd/recovery.c54
-rw-r--r--fs/jbd/revoke.c70
-rw-r--r--fs/jbd/transaction.c128
-rw-r--r--include/linux/ext3_jbd.h10
-rw-r--r--include/linux/jbd.h56
17 files changed, 307 insertions, 307 deletions
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 063d994bda0b..e6b983707008 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -74,7 +74,7 @@ struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
74} 74}
75 75
76/* 76/*
77 * Read the bitmap for a given block_group, reading into the specified 77 * Read the bitmap for a given block_group, reading into the specified
78 * slot in the superblock's bitmap cache. 78 * slot in the superblock's bitmap cache.
79 * 79 *
80 * Return buffer_head on success or NULL in case of failure. 80 * Return buffer_head on success or NULL in case of failure.
@@ -419,8 +419,8 @@ do_more:
419 } 419 }
420 /* @@@ This prevents newly-allocated data from being 420 /* @@@ This prevents newly-allocated data from being
421 * freed and then reallocated within the same 421 * freed and then reallocated within the same
422 * transaction. 422 * transaction.
423 * 423 *
424 * Ideally we would want to allow that to happen, but to 424 * Ideally we would want to allow that to happen, but to
425 * do so requires making journal_forget() capable of 425 * do so requires making journal_forget() capable of
426 * revoking the queued write of a data block, which 426 * revoking the queued write of a data block, which
@@ -433,7 +433,7 @@ do_more:
433 * safe not to set the allocation bit in the committed 433 * safe not to set the allocation bit in the committed
434 * bitmap, because we know that there is no outstanding 434 * bitmap, because we know that there is no outstanding
435 * activity on the buffer any more and so it is safe to 435 * activity on the buffer any more and so it is safe to
436 * reallocate it. 436 * reallocate it.
437 */ 437 */
438 BUFFER_TRACE(bitmap_bh, "set in b_committed_data"); 438 BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
439 J_ASSERT_BH(bitmap_bh, 439 J_ASSERT_BH(bitmap_bh,
@@ -518,7 +518,7 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode,
518 * data would allow the old block to be overwritten before the 518 * data would allow the old block to be overwritten before the
519 * transaction committed (because we force data to disk before commit). 519 * transaction committed (because we force data to disk before commit).
520 * This would lead to corruption if we crashed between overwriting the 520 * This would lead to corruption if we crashed between overwriting the
521 * data and committing the delete. 521 * data and committing the delete.
522 * 522 *
523 * @@@ We may want to make this allocation behaviour conditional on 523 * @@@ We may want to make this allocation behaviour conditional on
524 * data-writes at some point, and disable it for metadata allocations or 524 * data-writes at some point, and disable it for metadata allocations or
@@ -584,7 +584,7 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
584 584
585 if (start > 0) { 585 if (start > 0) {
586 /* 586 /*
587 * The goal was occupied; search forward for a free 587 * The goal was occupied; search forward for a free
588 * block within the next XX blocks. 588 * block within the next XX blocks.
589 * 589 *
590 * end_goal is more or less random, but it has to be 590 * end_goal is more or less random, but it has to be
@@ -1194,7 +1194,7 @@ int ext3_should_retry_alloc(struct super_block *sb, int *retries)
1194/* 1194/*
1195 * ext3_new_block uses a goal block to assist allocation. If the goal is 1195 * ext3_new_block uses a goal block to assist allocation. If the goal is
1196 * free, or there is a free block within 32 blocks of the goal, that block 1196 * free, or there is a free block within 32 blocks of the goal, that block
1197 * is allocated. Otherwise a forward search is made for a free block; within 1197 * is allocated. Otherwise a forward search is made for a free block; within
1198 * each block group the search first looks for an entire free byte in the block 1198 * each block group the search first looks for an entire free byte in the block
1199 * bitmap, and then for any free bit if that fails. 1199 * bitmap, and then for any free bit if that fails.
1200 * This function also updates quota and i_blocks field. 1200 * This function also updates quota and i_blocks field.
@@ -1303,7 +1303,7 @@ retry_alloc:
1303 smp_rmb(); 1303 smp_rmb();
1304 1304
1305 /* 1305 /*
1306 * Now search the rest of the groups. We assume that 1306 * Now search the rest of the groups. We assume that
1307 * i and gdp correctly point to the last group visited. 1307 * i and gdp correctly point to the last group visited.
1308 */ 1308 */
1309 for (bgi = 0; bgi < ngroups; bgi++) { 1309 for (bgi = 0; bgi < ngroups; bgi++) {
diff --git a/fs/ext3/bitmap.c b/fs/ext3/bitmap.c
index ce4f82b9e528..b9176eed98d1 100644
--- a/fs/ext3/bitmap.c
+++ b/fs/ext3/bitmap.c
@@ -20,7 +20,7 @@ unsigned long ext3_count_free (struct buffer_head * map, unsigned int numchars)
20 unsigned int i; 20 unsigned int i;
21 unsigned long sum = 0; 21 unsigned long sum = 0;
22 22
23 if (!map) 23 if (!map)
24 return (0); 24 return (0);
25 for (i = 0; i < numchars; i++) 25 for (i = 0; i < numchars; i++)
26 sum += nibblemap[map->b_data[i] & 0xf] + 26 sum += nibblemap[map->b_data[i] & 0xf] +
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index fbb0d4ed07d4..6f9e5a523c87 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -59,7 +59,7 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
59 59
60 return (ext3_filetype_table[filetype]); 60 return (ext3_filetype_table[filetype]);
61} 61}
62 62
63 63
64int ext3_check_dir_entry (const char * function, struct inode * dir, 64int ext3_check_dir_entry (const char * function, struct inode * dir,
65 struct ext3_dir_entry_2 * de, 65 struct ext3_dir_entry_2 * de,
@@ -162,7 +162,7 @@ revalidate:
162 * to make sure. */ 162 * to make sure. */
163 if (filp->f_version != inode->i_version) { 163 if (filp->f_version != inode->i_version) {
164 for (i = 0; i < sb->s_blocksize && i < offset; ) { 164 for (i = 0; i < sb->s_blocksize && i < offset; ) {
165 de = (struct ext3_dir_entry_2 *) 165 de = (struct ext3_dir_entry_2 *)
166 (bh->b_data + i); 166 (bh->b_data + i);
167 /* It's too expensive to do a full 167 /* It's too expensive to do a full
168 * dirent test each time round this 168 * dirent test each time round this
@@ -181,7 +181,7 @@ revalidate:
181 filp->f_version = inode->i_version; 181 filp->f_version = inode->i_version;
182 } 182 }
183 183
184 while (!error && filp->f_pos < inode->i_size 184 while (!error && filp->f_pos < inode->i_size
185 && offset < sb->s_blocksize) { 185 && offset < sb->s_blocksize) {
186 de = (struct ext3_dir_entry_2 *) (bh->b_data + offset); 186 de = (struct ext3_dir_entry_2 *) (bh->b_data + offset);
187 if (!ext3_check_dir_entry ("ext3_readdir", inode, de, 187 if (!ext3_check_dir_entry ("ext3_readdir", inode, de,
@@ -229,7 +229,7 @@ out:
229/* 229/*
230 * These functions convert from the major/minor hash to an f_pos 230 * These functions convert from the major/minor hash to an f_pos
231 * value. 231 * value.
232 * 232 *
233 * Currently we only use major hash numer. This is unfortunate, but 233 * Currently we only use major hash numer. This is unfortunate, but
234 * on 32-bit machines, the same VFS interface is used for lseek and 234 * on 32-bit machines, the same VFS interface is used for lseek and
235 * llseek, so if we use the 64 bit offset, then the 32-bit versions of 235 * llseek, so if we use the 64 bit offset, then the 32-bit versions of
@@ -250,7 +250,7 @@ out:
250struct fname { 250struct fname {
251 __u32 hash; 251 __u32 hash;
252 __u32 minor_hash; 252 __u32 minor_hash;
253 struct rb_node rb_hash; 253 struct rb_node rb_hash;
254 struct fname *next; 254 struct fname *next;
255 __u32 inode; 255 __u32 inode;
256 __u8 name_len; 256 __u8 name_len;
@@ -410,7 +410,7 @@ static int call_filldir(struct file * filp, void * dirent,
410 curr_pos = hash2pos(fname->hash, fname->minor_hash); 410 curr_pos = hash2pos(fname->hash, fname->minor_hash);
411 while (fname) { 411 while (fname) {
412 error = filldir(dirent, fname->name, 412 error = filldir(dirent, fname->name,
413 fname->name_len, curr_pos, 413 fname->name_len, curr_pos,
414 fname->inode, 414 fname->inode,
415 get_dtype(sb, fname->file_type)); 415 get_dtype(sb, fname->file_type));
416 if (error) { 416 if (error) {
@@ -465,7 +465,7 @@ static int ext3_dx_readdir(struct file * filp,
465 /* 465 /*
466 * Fill the rbtree if we have no more entries, 466 * Fill the rbtree if we have no more entries,
467 * or the inode has changed since we last read in the 467 * or the inode has changed since we last read in the
468 * cached entries. 468 * cached entries.
469 */ 469 */
470 if ((!info->curr_node) || 470 if ((!info->curr_node) ||
471 (filp->f_version != inode->i_version)) { 471 (filp->f_version != inode->i_version)) {
diff --git a/fs/ext3/file.c b/fs/ext3/file.c
index 1efefb630ea9..994efd189f4e 100644
--- a/fs/ext3/file.c
+++ b/fs/ext3/file.c
@@ -100,7 +100,7 @@ ext3_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t
100 100
101force_commit: 101force_commit:
102 err = ext3_force_commit(inode->i_sb); 102 err = ext3_force_commit(inode->i_sb);
103 if (err) 103 if (err)
104 return err; 104 return err;
105 return ret; 105 return ret;
106} 106}
diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c
index 49382a208e05..dd1fd3c0fc05 100644
--- a/fs/ext3/fsync.c
+++ b/fs/ext3/fsync.c
@@ -8,14 +8,14 @@
8 * Universite Pierre et Marie Curie (Paris VI) 8 * Universite Pierre et Marie Curie (Paris VI)
9 * from 9 * from
10 * linux/fs/minix/truncate.c Copyright (C) 1991, 1992 Linus Torvalds 10 * linux/fs/minix/truncate.c Copyright (C) 1991, 1992 Linus Torvalds
11 * 11 *
12 * ext3fs fsync primitive 12 * ext3fs fsync primitive
13 * 13 *
14 * Big-endian to little-endian byte-swapping/bitmaps by 14 * Big-endian to little-endian byte-swapping/bitmaps by
15 * David S. Miller (davem@caip.rutgers.edu), 1995 15 * David S. Miller (davem@caip.rutgers.edu), 1995
16 * 16 *
17 * Removed unnecessary code duplication for little endian machines 17 * Removed unnecessary code duplication for little endian machines
18 * and excessive __inline__s. 18 * and excessive __inline__s.
19 * Andi Kleen, 1997 19 * Andi Kleen, 1997
20 * 20 *
21 * Major simplications and cleanup - we only need to do the metadata, because 21 * Major simplications and cleanup - we only need to do the metadata, because
diff --git a/fs/ext3/hash.c b/fs/ext3/hash.c
index 5a2d1235ead0..7fa637cd322a 100644
--- a/fs/ext3/hash.c
+++ b/fs/ext3/hash.c
@@ -4,7 +4,7 @@
4 * Copyright (C) 2002 by Theodore Ts'o 4 * Copyright (C) 2002 by Theodore Ts'o
5 * 5 *
6 * This file is released under the GPL v2. 6 * This file is released under the GPL v2.
7 * 7 *
8 * This file may be redistributed under the terms of the GNU Public 8 * This file may be redistributed under the terms of the GNU Public
9 * License. 9 * License.
10 */ 10 */
@@ -80,11 +80,11 @@ static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
80 * Returns the hash of a filename. If len is 0 and name is NULL, then 80 * Returns the hash of a filename. If len is 0 and name is NULL, then
81 * this function can be used to test whether or not a hash version is 81 * this function can be used to test whether or not a hash version is
82 * supported. 82 * supported.
83 * 83 *
84 * The seed is an 4 longword (32 bits) "secret" which can be used to 84 * The seed is an 4 longword (32 bits) "secret" which can be used to
85 * uniquify a hash. If the seed is all zero's, then some default seed 85 * uniquify a hash. If the seed is all zero's, then some default seed
86 * may be used. 86 * may be used.
87 * 87 *
88 * A particular hash version specifies whether or not the seed is 88 * A particular hash version specifies whether or not the seed is
89 * represented, and whether or not the returned hash is 32 bits or 64 89 * represented, and whether or not the returned hash is 32 bits or 64
90 * bits. 32 bit hashes will return 0 for the minor hash. 90 * bits. 32 bit hashes will return 0 for the minor hash.
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 36546ed36a14..5e288368499b 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -216,7 +216,7 @@ static int find_group_dir(struct super_block *sb, struct inode *parent)
216 continue; 216 continue;
217 if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei) 217 if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
218 continue; 218 continue;
219 if (!best_desc || 219 if (!best_desc ||
220 (le16_to_cpu(desc->bg_free_blocks_count) > 220 (le16_to_cpu(desc->bg_free_blocks_count) >
221 le16_to_cpu(best_desc->bg_free_blocks_count))) { 221 le16_to_cpu(best_desc->bg_free_blocks_count))) {
222 best_group = group; 222 best_group = group;
@@ -226,30 +226,30 @@ static int find_group_dir(struct super_block *sb, struct inode *parent)
226 return best_group; 226 return best_group;
227} 227}
228 228
229/* 229/*
230 * Orlov's allocator for directories. 230 * Orlov's allocator for directories.
231 * 231 *
232 * We always try to spread first-level directories. 232 * We always try to spread first-level directories.
233 * 233 *
234 * If there are blockgroups with both free inodes and free blocks counts 234 * If there are blockgroups with both free inodes and free blocks counts
235 * not worse than average we return one with smallest directory count. 235 * not worse than average we return one with smallest directory count.
236 * Otherwise we simply return a random group. 236 * Otherwise we simply return a random group.
237 * 237 *
238 * For the rest rules look so: 238 * For the rest rules look so:
239 * 239 *
240 * It's OK to put directory into a group unless 240 * It's OK to put directory into a group unless
241 * it has too many directories already (max_dirs) or 241 * it has too many directories already (max_dirs) or
242 * it has too few free inodes left (min_inodes) or 242 * it has too few free inodes left (min_inodes) or
243 * it has too few free blocks left (min_blocks) or 243 * it has too few free blocks left (min_blocks) or
244 * it's already running too large debt (max_debt). 244 * it's already running too large debt (max_debt).
245 * Parent's group is prefered, if it doesn't satisfy these 245 * Parent's group is prefered, if it doesn't satisfy these
246 * conditions we search cyclically through the rest. If none 246 * conditions we search cyclically through the rest. If none
247 * of the groups look good we just look for a group with more 247 * of the groups look good we just look for a group with more
248 * free inodes than average (starting at parent's group). 248 * free inodes than average (starting at parent's group).
249 * 249 *
250 * Debt is incremented each time we allocate a directory and decremented 250 * Debt is incremented each time we allocate a directory and decremented
251 * when we allocate an inode, within 0--255. 251 * when we allocate an inode, within 0--255.
252 */ 252 */
253 253
254#define INODE_COST 64 254#define INODE_COST 64
255#define BLOCK_COST 256 255#define BLOCK_COST 256
@@ -454,7 +454,7 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode)
454 group = find_group_dir(sb, dir); 454 group = find_group_dir(sb, dir);
455 else 455 else
456 group = find_group_orlov(sb, dir); 456 group = find_group_orlov(sb, dir);
457 } else 457 } else
458 group = find_group_other(sb, dir); 458 group = find_group_other(sb, dir);
459 459
460 err = -ENOSPC; 460 err = -ENOSPC;
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 84be02e93652..473d206b1d7e 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -55,7 +55,7 @@ static int ext3_inode_is_fast_symlink(struct inode *inode)
55/* 55/*
56 * The ext3 forget function must perform a revoke if we are freeing data 56 * The ext3 forget function must perform a revoke if we are freeing data
57 * which has been journaled. Metadata (eg. indirect blocks) must be 57 * which has been journaled. Metadata (eg. indirect blocks) must be
58 * revoked in all cases. 58 * revoked in all cases.
59 * 59 *
60 * "bh" may be NULL: a metadata block may have been freed from memory 60 * "bh" may be NULL: a metadata block may have been freed from memory
61 * but there may still be a record of it in the journal, and that record 61 * but there may still be a record of it in the journal, and that record
@@ -105,7 +105,7 @@ int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
105 * Work out how many blocks we need to proceed with the next chunk of a 105 * Work out how many blocks we need to proceed with the next chunk of a
106 * truncate transaction. 106 * truncate transaction.
107 */ 107 */
108static unsigned long blocks_for_truncate(struct inode *inode) 108static unsigned long blocks_for_truncate(struct inode *inode)
109{ 109{
110 unsigned long needed; 110 unsigned long needed;
111 111
@@ -122,13 +122,13 @@ static unsigned long blocks_for_truncate(struct inode *inode)
122 122
123 /* But we need to bound the transaction so we don't overflow the 123 /* But we need to bound the transaction so we don't overflow the
124 * journal. */ 124 * journal. */
125 if (needed > EXT3_MAX_TRANS_DATA) 125 if (needed > EXT3_MAX_TRANS_DATA)
126 needed = EXT3_MAX_TRANS_DATA; 126 needed = EXT3_MAX_TRANS_DATA;
127 127
128 return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed; 128 return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
129} 129}
130 130
131/* 131/*
132 * Truncate transactions can be complex and absolutely huge. So we need to 132 * Truncate transactions can be complex and absolutely huge. So we need to
133 * be able to restart the transaction at a conventient checkpoint to make 133 * be able to restart the transaction at a conventient checkpoint to make
134 * sure we don't overflow the journal. 134 * sure we don't overflow the journal.
@@ -136,9 +136,9 @@ static unsigned long blocks_for_truncate(struct inode *inode)
136 * start_transaction gets us a new handle for a truncate transaction, 136 * start_transaction gets us a new handle for a truncate transaction,
137 * and extend_transaction tries to extend the existing one a bit. If 137 * and extend_transaction tries to extend the existing one a bit. If
138 * extend fails, we need to propagate the failure up and restart the 138 * extend fails, we need to propagate the failure up and restart the
139 * transaction in the top-level truncate loop. --sct 139 * transaction in the top-level truncate loop. --sct
140 */ 140 */
141static handle_t *start_transaction(struct inode *inode) 141static handle_t *start_transaction(struct inode *inode)
142{ 142{
143 handle_t *result; 143 handle_t *result;
144 144
@@ -215,12 +215,12 @@ void ext3_delete_inode (struct inode * inode)
215 ext3_orphan_del(handle, inode); 215 ext3_orphan_del(handle, inode);
216 EXT3_I(inode)->i_dtime = get_seconds(); 216 EXT3_I(inode)->i_dtime = get_seconds();
217 217
218 /* 218 /*
219 * One subtle ordering requirement: if anything has gone wrong 219 * One subtle ordering requirement: if anything has gone wrong
220 * (transaction abort, IO errors, whatever), then we can still 220 * (transaction abort, IO errors, whatever), then we can still
221 * do these next steps (the fs will already have been marked as 221 * do these next steps (the fs will already have been marked as
222 * having errors), but we can't free the inode if the mark_dirty 222 * having errors), but we can't free the inode if the mark_dirty
223 * fails. 223 * fails.
224 */ 224 */
225 if (ext3_mark_inode_dirty(handle, inode)) 225 if (ext3_mark_inode_dirty(handle, inode))
226 /* If that failed, just do the required in-core inode clear. */ 226 /* If that failed, just do the required in-core inode clear. */
@@ -398,7 +398,7 @@ no_block:
398 * + if there is a block to the left of our position - allocate near it. 398 * + if there is a block to the left of our position - allocate near it.
399 * + if pointer will live in indirect block - allocate near that block. 399 * + if pointer will live in indirect block - allocate near that block.
400 * + if pointer will live in inode - allocate in the same 400 * + if pointer will live in inode - allocate in the same
401 * cylinder group. 401 * cylinder group.
402 * 402 *
403 * In the latter case we colour the starting block by the callers PID to 403 * In the latter case we colour the starting block by the callers PID to
404 * prevent it from clashing with concurrent allocations for a different inode 404 * prevent it from clashing with concurrent allocations for a different inode
@@ -744,7 +744,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode,
744 jbd_debug(5, "splicing indirect only\n"); 744 jbd_debug(5, "splicing indirect only\n");
745 BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata"); 745 BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
746 err = ext3_journal_dirty_metadata(handle, where->bh); 746 err = ext3_journal_dirty_metadata(handle, where->bh);
747 if (err) 747 if (err)
748 goto err_out; 748 goto err_out;
749 } else { 749 } else {
750 /* 750 /*
@@ -1137,7 +1137,7 @@ static int walk_page_buffers( handle_t *handle,
1137 * So what we do is to rely on the fact that journal_stop/journal_start 1137 * So what we do is to rely on the fact that journal_stop/journal_start
1138 * will _not_ run commit under these circumstances because handle->h_ref 1138 * will _not_ run commit under these circumstances because handle->h_ref
1139 * is elevated. We'll still have enough credits for the tiny quotafile 1139 * is elevated. We'll still have enough credits for the tiny quotafile
1140 * write. 1140 * write.
1141 */ 1141 */
1142static int do_journal_get_write_access(handle_t *handle, 1142static int do_journal_get_write_access(handle_t *handle,
1143 struct buffer_head *bh) 1143 struct buffer_head *bh)
@@ -1282,7 +1282,7 @@ static int ext3_journalled_commit_write(struct file *file,
1282 if (inode->i_size > EXT3_I(inode)->i_disksize) { 1282 if (inode->i_size > EXT3_I(inode)->i_disksize) {
1283 EXT3_I(inode)->i_disksize = inode->i_size; 1283 EXT3_I(inode)->i_disksize = inode->i_size;
1284 ret2 = ext3_mark_inode_dirty(handle, inode); 1284 ret2 = ext3_mark_inode_dirty(handle, inode);
1285 if (!ret) 1285 if (!ret)
1286 ret = ret2; 1286 ret = ret2;
1287 } 1287 }
1288 ret2 = ext3_journal_stop(handle); 1288 ret2 = ext3_journal_stop(handle);
@@ -1291,7 +1291,7 @@ static int ext3_journalled_commit_write(struct file *file,
1291 return ret; 1291 return ret;
1292} 1292}
1293 1293
1294/* 1294/*
1295 * bmap() is special. It gets used by applications such as lilo and by 1295 * bmap() is special. It gets used by applications such as lilo and by
1296 * the swapper to find the on-disk block of a specific piece of data. 1296 * the swapper to find the on-disk block of a specific piece of data.
1297 * 1297 *
@@ -1300,10 +1300,10 @@ static int ext3_journalled_commit_write(struct file *file,
1300 * filesystem and enables swap, then they may get a nasty shock when the 1300 * filesystem and enables swap, then they may get a nasty shock when the
1301 * data getting swapped to that swapfile suddenly gets overwritten by 1301 * data getting swapped to that swapfile suddenly gets overwritten by
1302 * the original zero's written out previously to the journal and 1302 * the original zero's written out previously to the journal and
1303 * awaiting writeback in the kernel's buffer cache. 1303 * awaiting writeback in the kernel's buffer cache.
1304 * 1304 *
1305 * So, if we see any bmap calls here on a modified, data-journaled file, 1305 * So, if we see any bmap calls here on a modified, data-journaled file,
1306 * take extra steps to flush any blocks which might be in the cache. 1306 * take extra steps to flush any blocks which might be in the cache.
1307 */ 1307 */
1308static sector_t ext3_bmap(struct address_space *mapping, sector_t block) 1308static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
1309{ 1309{
@@ -1312,16 +1312,16 @@ static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
1312 int err; 1312 int err;
1313 1313
1314 if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) { 1314 if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) {
1315 /* 1315 /*
1316 * This is a REALLY heavyweight approach, but the use of 1316 * This is a REALLY heavyweight approach, but the use of
1317 * bmap on dirty files is expected to be extremely rare: 1317 * bmap on dirty files is expected to be extremely rare:
1318 * only if we run lilo or swapon on a freshly made file 1318 * only if we run lilo or swapon on a freshly made file
1319 * do we expect this to happen. 1319 * do we expect this to happen.
1320 * 1320 *
1321 * (bmap requires CAP_SYS_RAWIO so this does not 1321 * (bmap requires CAP_SYS_RAWIO so this does not
1322 * represent an unprivileged user DOS attack --- we'd be 1322 * represent an unprivileged user DOS attack --- we'd be
1323 * in trouble if mortal users could trigger this path at 1323 * in trouble if mortal users could trigger this path at
1324 * will.) 1324 * will.)
1325 * 1325 *
1326 * NB. EXT3_STATE_JDATA is not set on files other than 1326 * NB. EXT3_STATE_JDATA is not set on files other than
1327 * regular files. If somebody wants to bmap a directory 1327 * regular files. If somebody wants to bmap a directory
@@ -1457,7 +1457,7 @@ static int ext3_ordered_writepage(struct page *page,
1457 */ 1457 */
1458 1458
1459 /* 1459 /*
1460 * And attach them to the current transaction. But only if 1460 * And attach them to the current transaction. But only if
1461 * block_write_full_page() succeeded. Otherwise they are unmapped, 1461 * block_write_full_page() succeeded. Otherwise they are unmapped,
1462 * and generally junk. 1462 * and generally junk.
1463 */ 1463 */
@@ -1644,7 +1644,7 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1644 } 1644 }
1645 } 1645 }
1646 1646
1647 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 1647 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1648 offset, nr_segs, 1648 offset, nr_segs,
1649 ext3_get_block, NULL); 1649 ext3_get_block, NULL);
1650 1650
@@ -2025,7 +2025,7 @@ static void ext3_free_data(handle_t *handle, struct inode *inode,
2025 __le32 *first, __le32 *last) 2025 __le32 *first, __le32 *last)
2026{ 2026{
2027 ext3_fsblk_t block_to_free = 0; /* Starting block # of a run */ 2027 ext3_fsblk_t block_to_free = 0; /* Starting block # of a run */
2028 unsigned long count = 0; /* Number of blocks in the run */ 2028 unsigned long count = 0; /* Number of blocks in the run */
2029 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind 2029 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
2030 corresponding to 2030 corresponding to
2031 block_to_free */ 2031 block_to_free */
@@ -2054,7 +2054,7 @@ static void ext3_free_data(handle_t *handle, struct inode *inode,
2054 } else if (nr == block_to_free + count) { 2054 } else if (nr == block_to_free + count) {
2055 count++; 2055 count++;
2056 } else { 2056 } else {
2057 ext3_clear_blocks(handle, inode, this_bh, 2057 ext3_clear_blocks(handle, inode, this_bh,
2058 block_to_free, 2058 block_to_free,
2059 count, block_to_free_p, p); 2059 count, block_to_free_p, p);
2060 block_to_free = nr; 2060 block_to_free = nr;
@@ -2184,7 +2184,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
2184 *p = 0; 2184 *p = 0;
2185 BUFFER_TRACE(parent_bh, 2185 BUFFER_TRACE(parent_bh,
2186 "call ext3_journal_dirty_metadata"); 2186 "call ext3_journal_dirty_metadata");
2187 ext3_journal_dirty_metadata(handle, 2187 ext3_journal_dirty_metadata(handle,
2188 parent_bh); 2188 parent_bh);
2189 } 2189 }
2190 } 2190 }
@@ -2704,7 +2704,7 @@ void ext3_read_inode(struct inode * inode)
2704 if (raw_inode->i_block[0]) 2704 if (raw_inode->i_block[0])
2705 init_special_inode(inode, inode->i_mode, 2705 init_special_inode(inode, inode->i_mode,
2706 old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 2706 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
2707 else 2707 else
2708 init_special_inode(inode, inode->i_mode, 2708 init_special_inode(inode, inode->i_mode,
2709 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 2709 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
2710 } 2710 }
@@ -2724,8 +2724,8 @@ bad_inode:
2724 * 2724 *
2725 * The caller must have write access to iloc->bh. 2725 * The caller must have write access to iloc->bh.
2726 */ 2726 */
2727static int ext3_do_update_inode(handle_t *handle, 2727static int ext3_do_update_inode(handle_t *handle,
2728 struct inode *inode, 2728 struct inode *inode,
2729 struct ext3_iloc *iloc) 2729 struct ext3_iloc *iloc)
2730{ 2730{
2731 struct ext3_inode *raw_inode = ext3_raw_inode(iloc); 2731 struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
@@ -2900,7 +2900,7 @@ int ext3_write_inode(struct inode *inode, int wait)
2900 * commit will leave the blocks being flushed in an unused state on 2900 * commit will leave the blocks being flushed in an unused state on
2901 * disk. (On recovery, the inode will get truncated and the blocks will 2901 * disk. (On recovery, the inode will get truncated and the blocks will
2902 * be freed, so we have a strong guarantee that no future commit will 2902 * be freed, so we have a strong guarantee that no future commit will
2903 * leave these blocks visible to the user.) 2903 * leave these blocks visible to the user.)
2904 * 2904 *
2905 * Called with inode->sem down. 2905 * Called with inode->sem down.
2906 */ 2906 */
@@ -3043,13 +3043,13 @@ int ext3_mark_iloc_dirty(handle_t *handle,
3043 return err; 3043 return err;
3044} 3044}
3045 3045
3046/* 3046/*
3047 * On success, We end up with an outstanding reference count against 3047 * On success, We end up with an outstanding reference count against
3048 * iloc->bh. This _must_ be cleaned up later. 3048 * iloc->bh. This _must_ be cleaned up later.
3049 */ 3049 */
3050 3050
3051int 3051int
3052ext3_reserve_inode_write(handle_t *handle, struct inode *inode, 3052ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
3053 struct ext3_iloc *iloc) 3053 struct ext3_iloc *iloc)
3054{ 3054{
3055 int err = 0; 3055 int err = 0;
@@ -3139,7 +3139,7 @@ out:
3139} 3139}
3140 3140
3141#if 0 3141#if 0
3142/* 3142/*
3143 * Bind an inode's backing buffer_head into this transaction, to prevent 3143 * Bind an inode's backing buffer_head into this transaction, to prevent
3144 * it from being flushed to disk early. Unlike 3144 * it from being flushed to disk early. Unlike
3145 * ext3_reserve_inode_write, this leaves behind no bh reference and 3145 * ext3_reserve_inode_write, this leaves behind no bh reference and
@@ -3157,7 +3157,7 @@ static int ext3_pin_inode(handle_t *handle, struct inode *inode)
3157 BUFFER_TRACE(iloc.bh, "get_write_access"); 3157 BUFFER_TRACE(iloc.bh, "get_write_access");
3158 err = journal_get_write_access(handle, iloc.bh); 3158 err = journal_get_write_access(handle, iloc.bh);
3159 if (!err) 3159 if (!err)
3160 err = ext3_journal_dirty_metadata(handle, 3160 err = ext3_journal_dirty_metadata(handle,
3161 iloc.bh); 3161 iloc.bh);
3162 brelse(iloc.bh); 3162 brelse(iloc.bh);
3163 } 3163 }
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 2aa7101b27cd..4123f5261bcd 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -76,7 +76,7 @@ static struct buffer_head *ext3_append(handle_t *handle,
76#ifdef DX_DEBUG 76#ifdef DX_DEBUG
77#define dxtrace(command) command 77#define dxtrace(command) command
78#else 78#else
79#define dxtrace(command) 79#define dxtrace(command)
80#endif 80#endif
81 81
82struct fake_dirent 82struct fake_dirent
@@ -169,7 +169,7 @@ static struct ext3_dir_entry_2* dx_pack_dirents (char *base, int size);
169static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block); 169static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block);
170static int ext3_htree_next_block(struct inode *dir, __u32 hash, 170static int ext3_htree_next_block(struct inode *dir, __u32 hash,
171 struct dx_frame *frame, 171 struct dx_frame *frame,
172 struct dx_frame *frames, 172 struct dx_frame *frames,
173 __u32 *start_hash); 173 __u32 *start_hash);
174static struct buffer_head * ext3_dx_find_entry(struct dentry *dentry, 174static struct buffer_head * ext3_dx_find_entry(struct dentry *dentry,
175 struct ext3_dir_entry_2 **res_dir, int *err); 175 struct ext3_dir_entry_2 **res_dir, int *err);
@@ -250,7 +250,7 @@ static void dx_show_index (char * label, struct dx_entry *entries)
250} 250}
251 251
252struct stats 252struct stats
253{ 253{
254 unsigned names; 254 unsigned names;
255 unsigned space; 255 unsigned space;
256 unsigned bcount; 256 unsigned bcount;
@@ -464,7 +464,7 @@ static void dx_release (struct dx_frame *frames)
464 */ 464 */
465static int ext3_htree_next_block(struct inode *dir, __u32 hash, 465static int ext3_htree_next_block(struct inode *dir, __u32 hash,
466 struct dx_frame *frame, 466 struct dx_frame *frame,
467 struct dx_frame *frames, 467 struct dx_frame *frames,
468 __u32 *start_hash) 468 __u32 *start_hash)
469{ 469{
470 struct dx_frame *p; 470 struct dx_frame *p;
@@ -632,7 +632,7 @@ int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
632 } 632 }
633 count += ret; 633 count += ret;
634 hashval = ~0; 634 hashval = ~0;
635 ret = ext3_htree_next_block(dir, HASH_NB_ALWAYS, 635 ret = ext3_htree_next_block(dir, HASH_NB_ALWAYS,
636 frame, frames, &hashval); 636 frame, frames, &hashval);
637 *next_hash = hashval; 637 *next_hash = hashval;
638 if (ret < 0) { 638 if (ret < 0) {
@@ -649,7 +649,7 @@ int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
649 break; 649 break;
650 } 650 }
651 dx_release(frames); 651 dx_release(frames);
652 dxtrace(printk("Fill tree: returned %d entries, next hash: %x\n", 652 dxtrace(printk("Fill tree: returned %d entries, next hash: %x\n",
653 count, *next_hash)); 653 count, *next_hash));
654 return count; 654 return count;
655errout: 655errout:
@@ -1050,7 +1050,7 @@ struct dentry *ext3_get_parent(struct dentry *child)
1050 parent = ERR_PTR(-ENOMEM); 1050 parent = ERR_PTR(-ENOMEM);
1051 } 1051 }
1052 return parent; 1052 return parent;
1053} 1053}
1054 1054
1055#define S_SHIFT 12 1055#define S_SHIFT 12
1056static unsigned char ext3_type_by_mode[S_IFMT >> S_SHIFT] = { 1056static unsigned char ext3_type_by_mode[S_IFMT >> S_SHIFT] = {
@@ -1198,7 +1198,7 @@ errout:
1198 * add_dirent_to_buf will attempt search the directory block for 1198 * add_dirent_to_buf will attempt search the directory block for
1199 * space. It will return -ENOSPC if no space is available, and -EIO 1199 * space. It will return -ENOSPC if no space is available, and -EIO
1200 * and -EEXIST if directory entry already exists. 1200 * and -EEXIST if directory entry already exists.
1201 * 1201 *
1202 * NOTE! bh is NOT released in the case where ENOSPC is returned. In 1202 * NOTE! bh is NOT released in the case where ENOSPC is returned. In
1203 * all other cases bh is released. 1203 * all other cases bh is released.
1204 */ 1204 */
@@ -1572,7 +1572,7 @@ cleanup:
1572 * ext3_delete_entry deletes a directory entry by merging it with the 1572 * ext3_delete_entry deletes a directory entry by merging it with the
1573 * previous entry 1573 * previous entry
1574 */ 1574 */
1575static int ext3_delete_entry (handle_t *handle, 1575static int ext3_delete_entry (handle_t *handle,
1576 struct inode * dir, 1576 struct inode * dir,
1577 struct ext3_dir_entry_2 * de_del, 1577 struct ext3_dir_entry_2 * de_del,
1578 struct buffer_head * bh) 1578 struct buffer_head * bh)
@@ -1643,12 +1643,12 @@ static int ext3_add_nondir(handle_t *handle,
1643 * is so far negative - it has no inode. 1643 * is so far negative - it has no inode.
1644 * 1644 *
1645 * If the create succeeds, we fill in the inode information 1645 * If the create succeeds, we fill in the inode information
1646 * with d_instantiate(). 1646 * with d_instantiate().
1647 */ 1647 */
1648static int ext3_create (struct inode * dir, struct dentry * dentry, int mode, 1648static int ext3_create (struct inode * dir, struct dentry * dentry, int mode,
1649 struct nameidata *nd) 1649 struct nameidata *nd)
1650{ 1650{
1651 handle_t *handle; 1651 handle_t *handle;
1652 struct inode * inode; 1652 struct inode * inode;
1653 int err, retries = 0; 1653 int err, retries = 0;
1654 1654
@@ -1813,7 +1813,7 @@ static int empty_dir (struct inode * inode)
1813 de1 = (struct ext3_dir_entry_2 *) 1813 de1 = (struct ext3_dir_entry_2 *)
1814 ((char *) de + le16_to_cpu(de->rec_len)); 1814 ((char *) de + le16_to_cpu(de->rec_len));
1815 if (le32_to_cpu(de->inode) != inode->i_ino || 1815 if (le32_to_cpu(de->inode) != inode->i_ino ||
1816 !le32_to_cpu(de1->inode) || 1816 !le32_to_cpu(de1->inode) ||
1817 strcmp (".", de->name) || 1817 strcmp (".", de->name) ||
1818 strcmp ("..", de1->name)) { 1818 strcmp ("..", de1->name)) {
1819 ext3_warning (inode->i_sb, "empty_dir", 1819 ext3_warning (inode->i_sb, "empty_dir",
@@ -1883,7 +1883,7 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode)
1883 * being truncated, or files being unlinked. */ 1883 * being truncated, or files being unlinked. */
1884 1884
1885 /* @@@ FIXME: Observation from aviro: 1885 /* @@@ FIXME: Observation from aviro:
1886 * I think I can trigger J_ASSERT in ext3_orphan_add(). We block 1886 * I think I can trigger J_ASSERT in ext3_orphan_add(). We block
1887 * here (on lock_super()), so race with ext3_link() which might bump 1887 * here (on lock_super()), so race with ext3_link() which might bump
1888 * ->i_nlink. For, say it, character device. Not a regular file, 1888 * ->i_nlink. For, say it, character device. Not a regular file,
1889 * not a directory, not a symlink and ->i_nlink > 0. 1889 * not a directory, not a symlink and ->i_nlink > 0.
@@ -2393,4 +2393,4 @@ struct inode_operations ext3_special_inode_operations = {
2393 .removexattr = generic_removexattr, 2393 .removexattr = generic_removexattr,
2394#endif 2394#endif
2395 .permission = ext3_permission, 2395 .permission = ext3_permission,
2396}; 2396};
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 3559086eee5f..4b95bfe4c8f7 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -62,13 +62,13 @@ static void ext3_unlockfs(struct super_block *sb);
62static void ext3_write_super (struct super_block * sb); 62static void ext3_write_super (struct super_block * sb);
63static void ext3_write_super_lockfs(struct super_block *sb); 63static void ext3_write_super_lockfs(struct super_block *sb);
64 64
65/* 65/*
66 * Wrappers for journal_start/end. 66 * Wrappers for journal_start/end.
67 * 67 *
68 * The only special thing we need to do here is to make sure that all 68 * The only special thing we need to do here is to make sure that all
69 * journal_end calls result in the superblock being marked dirty, so 69 * journal_end calls result in the superblock being marked dirty, so
70 * that sync() will call the filesystem's write_super callback if 70 * that sync() will call the filesystem's write_super callback if
71 * appropriate. 71 * appropriate.
72 */ 72 */
73handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks) 73handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
74{ 74{
@@ -90,11 +90,11 @@ handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
90 return journal_start(journal, nblocks); 90 return journal_start(journal, nblocks);
91} 91}
92 92
93/* 93/*
94 * The only special thing we need to do here is to make sure that all 94 * The only special thing we need to do here is to make sure that all
95 * journal_stop calls result in the superblock being marked dirty, so 95 * journal_stop calls result in the superblock being marked dirty, so
96 * that sync() will call the filesystem's write_super callback if 96 * that sync() will call the filesystem's write_super callback if
97 * appropriate. 97 * appropriate.
98 */ 98 */
99int __ext3_journal_stop(const char *where, handle_t *handle) 99int __ext3_journal_stop(const char *where, handle_t *handle)
100{ 100{
@@ -369,7 +369,7 @@ static void dump_orphan_list(struct super_block *sb, struct ext3_sb_info *sbi)
369{ 369{
370 struct list_head *l; 370 struct list_head *l;
371 371
372 printk(KERN_ERR "sb orphan head is %d\n", 372 printk(KERN_ERR "sb orphan head is %d\n",
373 le32_to_cpu(sbi->s_es->s_last_orphan)); 373 le32_to_cpu(sbi->s_es->s_last_orphan));
374 374
375 printk(KERN_ERR "sb_info orphan list:\n"); 375 printk(KERN_ERR "sb_info orphan list:\n");
@@ -378,7 +378,7 @@ static void dump_orphan_list(struct super_block *sb, struct ext3_sb_info *sbi)
378 printk(KERN_ERR " " 378 printk(KERN_ERR " "
379 "inode %s:%ld at %p: mode %o, nlink %d, next %d\n", 379 "inode %s:%ld at %p: mode %o, nlink %d, next %d\n",
380 inode->i_sb->s_id, inode->i_ino, inode, 380 inode->i_sb->s_id, inode->i_ino, inode,
381 inode->i_mode, inode->i_nlink, 381 inode->i_mode, inode->i_nlink,
382 NEXT_ORPHAN(inode)); 382 NEXT_ORPHAN(inode));
383 } 383 }
384} 384}
@@ -475,7 +475,7 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
475 inode_init_once(&ei->vfs_inode); 475 inode_init_once(&ei->vfs_inode);
476 } 476 }
477} 477}
478 478
479static int init_inodecache(void) 479static int init_inodecache(void)
480{ 480{
481 ext3_inode_cachep = kmem_cache_create("ext3_inode_cache", 481 ext3_inode_cachep = kmem_cache_create("ext3_inode_cache",
@@ -1483,7 +1483,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1483 (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) || 1483 (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) ||
1484 EXT3_HAS_RO_COMPAT_FEATURE(sb, ~0U) || 1484 EXT3_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
1485 EXT3_HAS_INCOMPAT_FEATURE(sb, ~0U))) 1485 EXT3_HAS_INCOMPAT_FEATURE(sb, ~0U)))
1486 printk(KERN_WARNING 1486 printk(KERN_WARNING
1487 "EXT3-fs warning: feature flags set on rev 0 fs, " 1487 "EXT3-fs warning: feature flags set on rev 0 fs, "
1488 "running e2fsck is recommended\n"); 1488 "running e2fsck is recommended\n");
1489 /* 1489 /*
@@ -1509,7 +1509,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1509 1509
1510 if (blocksize < EXT3_MIN_BLOCK_SIZE || 1510 if (blocksize < EXT3_MIN_BLOCK_SIZE ||
1511 blocksize > EXT3_MAX_BLOCK_SIZE) { 1511 blocksize > EXT3_MAX_BLOCK_SIZE) {
1512 printk(KERN_ERR 1512 printk(KERN_ERR
1513 "EXT3-fs: Unsupported filesystem blocksize %d on %s.\n", 1513 "EXT3-fs: Unsupported filesystem blocksize %d on %s.\n",
1514 blocksize, sb->s_id); 1514 blocksize, sb->s_id);
1515 goto failed_mount; 1515 goto failed_mount;
@@ -1533,14 +1533,14 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1533 offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize; 1533 offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize;
1534 bh = sb_bread(sb, logic_sb_block); 1534 bh = sb_bread(sb, logic_sb_block);
1535 if (!bh) { 1535 if (!bh) {
1536 printk(KERN_ERR 1536 printk(KERN_ERR
1537 "EXT3-fs: Can't read superblock on 2nd try.\n"); 1537 "EXT3-fs: Can't read superblock on 2nd try.\n");
1538 goto failed_mount; 1538 goto failed_mount;
1539 } 1539 }
1540 es = (struct ext3_super_block *)(((char *)bh->b_data) + offset); 1540 es = (struct ext3_super_block *)(((char *)bh->b_data) + offset);
1541 sbi->s_es = es; 1541 sbi->s_es = es;
1542 if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) { 1542 if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) {
1543 printk (KERN_ERR 1543 printk (KERN_ERR
1544 "EXT3-fs: Magic mismatch, very weird !\n"); 1544 "EXT3-fs: Magic mismatch, very weird !\n");
1545 goto failed_mount; 1545 goto failed_mount;
1546 } 1546 }
@@ -1820,7 +1820,7 @@ out_fail:
1820/* 1820/*
1821 * Setup any per-fs journal parameters now. We'll do this both on 1821 * Setup any per-fs journal parameters now. We'll do this both on
1822 * initial mount, once the journal has been initialised but before we've 1822 * initial mount, once the journal has been initialised but before we've
1823 * done any recovery; and again on any subsequent remount. 1823 * done any recovery; and again on any subsequent remount.
1824 */ 1824 */
1825static void ext3_init_journal_params(struct super_block *sb, journal_t *journal) 1825static void ext3_init_journal_params(struct super_block *sb, journal_t *journal)
1826{ 1826{
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index d0685596e5a6..961ada28db5e 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * linux/fs/checkpoint.c 2 * linux/fs/checkpoint.c
3 * 3 *
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
5 * 5 *
6 * Copyright 1999 Red Hat Software --- All Rights Reserved 6 * Copyright 1999 Red Hat Software --- All Rights Reserved
@@ -9,8 +9,8 @@
9 * the terms of the GNU General Public License, version 2, or at your 9 * the terms of the GNU General Public License, version 2, or at your
10 * option, any later version, incorporated herein by reference. 10 * option, any later version, incorporated herein by reference.
11 * 11 *
12 * Checkpoint routines for the generic filesystem journaling code. 12 * Checkpoint routines for the generic filesystem journaling code.
13 * Part of the ext2fs journaling system. 13 * Part of the ext2fs journaling system.
14 * 14 *
15 * Checkpointing is the process of ensuring that a section of the log is 15 * Checkpointing is the process of ensuring that a section of the log is
16 * committed fully to disk, so that that portion of the log can be 16 * committed fully to disk, so that that portion of the log can be
@@ -226,7 +226,7 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
226 * Try to flush one buffer from the checkpoint list to disk. 226 * Try to flush one buffer from the checkpoint list to disk.
227 * 227 *
228 * Return 1 if something happened which requires us to abort the current 228 * Return 1 if something happened which requires us to abort the current
229 * scan of the checkpoint list. 229 * scan of the checkpoint list.
230 * 230 *
231 * Called with j_list_lock held and drops it if 1 is returned 231 * Called with j_list_lock held and drops it if 1 is returned
232 * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it 232 * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
@@ -270,7 +270,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
270 * possibly block, while still holding the journal lock. 270 * possibly block, while still holding the journal lock.
271 * We cannot afford to let the transaction logic start 271 * We cannot afford to let the transaction logic start
272 * messing around with this buffer before we write it to 272 * messing around with this buffer before we write it to
273 * disk, as that would break recoverability. 273 * disk, as that would break recoverability.
274 */ 274 */
275 BUFFER_TRACE(bh, "queue"); 275 BUFFER_TRACE(bh, "queue");
276 get_bh(bh); 276 get_bh(bh);
@@ -293,7 +293,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
293 * Perform an actual checkpoint. We take the first transaction on the 293 * Perform an actual checkpoint. We take the first transaction on the
294 * list of transactions to be checkpointed and send all its buffers 294 * list of transactions to be checkpointed and send all its buffers
295 * to disk. We submit larger chunks of data at once. 295 * to disk. We submit larger chunks of data at once.
296 * 296 *
297 * The journal should be locked before calling this function. 297 * The journal should be locked before calling this function.
298 */ 298 */
299int log_do_checkpoint(journal_t *journal) 299int log_do_checkpoint(journal_t *journal)
@@ -304,10 +304,10 @@ int log_do_checkpoint(journal_t *journal)
304 304
305 jbd_debug(1, "Start checkpoint\n"); 305 jbd_debug(1, "Start checkpoint\n");
306 306
307 /* 307 /*
308 * First thing: if there are any transactions in the log which 308 * First thing: if there are any transactions in the log which
309 * don't need checkpointing, just eliminate them from the 309 * don't need checkpointing, just eliminate them from the
310 * journal straight away. 310 * journal straight away.
311 */ 311 */
312 result = cleanup_journal_tail(journal); 312 result = cleanup_journal_tail(journal);
313 jbd_debug(1, "cleanup_journal_tail returned %d\n", result); 313 jbd_debug(1, "cleanup_journal_tail returned %d\n", result);
@@ -385,9 +385,9 @@ out:
385 * we have already got rid of any since the last update of the log tail 385 * we have already got rid of any since the last update of the log tail
386 * in the journal superblock. If so, we can instantly roll the 386 * in the journal superblock. If so, we can instantly roll the
387 * superblock forward to remove those transactions from the log. 387 * superblock forward to remove those transactions from the log.
388 * 388 *
389 * Return <0 on error, 0 on success, 1 if there was nothing to clean up. 389 * Return <0 on error, 0 on success, 1 if there was nothing to clean up.
390 * 390 *
391 * Called with the journal lock held. 391 * Called with the journal lock held.
392 * 392 *
393 * This is the only part of the journaling code which really needs to be 393 * This is the only part of the journaling code which really needs to be
@@ -404,8 +404,8 @@ int cleanup_journal_tail(journal_t *journal)
404 unsigned long blocknr, freed; 404 unsigned long blocknr, freed;
405 405
406 /* OK, work out the oldest transaction remaining in the log, and 406 /* OK, work out the oldest transaction remaining in the log, and
407 * the log block it starts at. 407 * the log block it starts at.
408 * 408 *
409 * If the log is now empty, we need to work out which is the 409 * If the log is now empty, we need to work out which is the
410 * next transaction ID we will write, and where it will 410 * next transaction ID we will write, and where it will
411 * start. */ 411 * start. */
@@ -558,7 +558,7 @@ out:
558 return ret; 558 return ret;
559} 559}
560 560
561/* 561/*
562 * journal_remove_checkpoint: called after a buffer has been committed 562 * journal_remove_checkpoint: called after a buffer has been committed
563 * to disk (either by being write-back flushed to disk, or being 563 * to disk (either by being write-back flushed to disk, or being
564 * committed to the log). 564 * committed to the log).
@@ -636,7 +636,7 @@ out:
636 * Called with the journal locked. 636 * Called with the journal locked.
637 * Called with j_list_lock held. 637 * Called with j_list_lock held.
638 */ 638 */
639void __journal_insert_checkpoint(struct journal_head *jh, 639void __journal_insert_checkpoint(struct journal_head *jh,
640 transaction_t *transaction) 640 transaction_t *transaction)
641{ 641{
642 JBUFFER_TRACE(jh, "entry"); 642 JBUFFER_TRACE(jh, "entry");
@@ -658,7 +658,7 @@ void __journal_insert_checkpoint(struct journal_head *jh,
658 658
659/* 659/*
660 * We've finished with this transaction structure: adios... 660 * We've finished with this transaction structure: adios...
661 * 661 *
662 * The transaction must have no links except for the checkpoint by this 662 * The transaction must have no links except for the checkpoint by this
663 * point. 663 * point.
664 * 664 *
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index f66724ce443a..87c5a6d00805 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -578,7 +578,7 @@ int journal_next_log_block(journal_t *journal, unsigned long *retp)
578 * this is a no-op. If needed, we can use j_blk_offset - everything is 578 * this is a no-op. If needed, we can use j_blk_offset - everything is
579 * ready. 579 * ready.
580 */ 580 */
581int journal_bmap(journal_t *journal, unsigned long blocknr, 581int journal_bmap(journal_t *journal, unsigned long blocknr,
582 unsigned long *retp) 582 unsigned long *retp)
583{ 583{
584 int err = 0; 584 int err = 0;
@@ -699,10 +699,10 @@ fail:
699 * @len: Lenght of the journal in blocks. 699 * @len: Lenght of the journal in blocks.
700 * @blocksize: blocksize of journalling device 700 * @blocksize: blocksize of journalling device
701 * @returns: a newly created journal_t * 701 * @returns: a newly created journal_t *
702 * 702 *
703 * journal_init_dev creates a journal which maps a fixed contiguous 703 * journal_init_dev creates a journal which maps a fixed contiguous
704 * range of blocks on an arbitrary block device. 704 * range of blocks on an arbitrary block device.
705 * 705 *
706 */ 706 */
707journal_t * journal_init_dev(struct block_device *bdev, 707journal_t * journal_init_dev(struct block_device *bdev,
708 struct block_device *fs_dev, 708 struct block_device *fs_dev,
@@ -739,11 +739,11 @@ journal_t * journal_init_dev(struct block_device *bdev,
739 739
740 return journal; 740 return journal;
741} 741}
742 742
743/** 743/**
744 * journal_t * journal_init_inode () - creates a journal which maps to a inode. 744 * journal_t * journal_init_inode () - creates a journal which maps to a inode.
745 * @inode: An inode to create the journal in 745 * @inode: An inode to create the journal in
746 * 746 *
747 * journal_init_inode creates a journal which maps an on-disk inode as 747 * journal_init_inode creates a journal which maps an on-disk inode as
748 * the journal. The inode must exist already, must support bmap() and 748 * the journal. The inode must exist already, must support bmap() and
749 * must have all data blocks preallocated. 749 * must have all data blocks preallocated.
@@ -763,7 +763,7 @@ journal_t * journal_init_inode (struct inode *inode)
763 journal->j_inode = inode; 763 journal->j_inode = inode;
764 jbd_debug(1, 764 jbd_debug(1,
765 "journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n", 765 "journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n",
766 journal, inode->i_sb->s_id, inode->i_ino, 766 journal, inode->i_sb->s_id, inode->i_ino,
767 (long long) inode->i_size, 767 (long long) inode->i_size,
768 inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize); 768 inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize);
769 769
@@ -798,10 +798,10 @@ journal_t * journal_init_inode (struct inode *inode)
798 return journal; 798 return journal;
799} 799}
800 800
801/* 801/*
802 * If the journal init or create aborts, we need to mark the journal 802 * If the journal init or create aborts, we need to mark the journal
803 * superblock as being NULL to prevent the journal destroy from writing 803 * superblock as being NULL to prevent the journal destroy from writing
804 * back a bogus superblock. 804 * back a bogus superblock.
805 */ 805 */
806static void journal_fail_superblock (journal_t *journal) 806static void journal_fail_superblock (journal_t *journal)
807{ 807{
@@ -844,13 +844,13 @@ static int journal_reset(journal_t *journal)
844 return 0; 844 return 0;
845} 845}
846 846
847/** 847/**
848 * int journal_create() - Initialise the new journal file 848 * int journal_create() - Initialise the new journal file
849 * @journal: Journal to create. This structure must have been initialised 849 * @journal: Journal to create. This structure must have been initialised
850 * 850 *
851 * Given a journal_t structure which tells us which disk blocks we can 851 * Given a journal_t structure which tells us which disk blocks we can
852 * use, create a new journal superblock and initialise all of the 852 * use, create a new journal superblock and initialise all of the
853 * journal fields from scratch. 853 * journal fields from scratch.
854 **/ 854 **/
855int journal_create(journal_t *journal) 855int journal_create(journal_t *journal)
856{ 856{
@@ -915,7 +915,7 @@ int journal_create(journal_t *journal)
915 return journal_reset(journal); 915 return journal_reset(journal);
916} 916}
917 917
918/** 918/**
919 * void journal_update_superblock() - Update journal sb on disk. 919 * void journal_update_superblock() - Update journal sb on disk.
920 * @journal: The journal to update. 920 * @journal: The journal to update.
921 * @wait: Set to '0' if you don't want to wait for IO completion. 921 * @wait: Set to '0' if you don't want to wait for IO completion.
@@ -939,7 +939,7 @@ void journal_update_superblock(journal_t *journal, int wait)
939 journal->j_transaction_sequence) { 939 journal->j_transaction_sequence) {
940 jbd_debug(1,"JBD: Skipping superblock update on recovered sb " 940 jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
941 "(start %ld, seq %d, errno %d)\n", 941 "(start %ld, seq %d, errno %d)\n",
942 journal->j_tail, journal->j_tail_sequence, 942 journal->j_tail, journal->j_tail_sequence,
943 journal->j_errno); 943 journal->j_errno);
944 goto out; 944 goto out;
945 } 945 }
@@ -1062,7 +1062,7 @@ static int load_superblock(journal_t *journal)
1062/** 1062/**
1063 * int journal_load() - Read journal from disk. 1063 * int journal_load() - Read journal from disk.
1064 * @journal: Journal to act on. 1064 * @journal: Journal to act on.
1065 * 1065 *
1066 * Given a journal_t structure which tells us which disk blocks contain 1066 * Given a journal_t structure which tells us which disk blocks contain
1067 * a journal, read the journal from disk to initialise the in-memory 1067 * a journal, read the journal from disk to initialise the in-memory
1068 * structures. 1068 * structures.
@@ -1172,9 +1172,9 @@ void journal_destroy(journal_t *journal)
1172 * @compat: bitmask of compatible features 1172 * @compat: bitmask of compatible features
1173 * @ro: bitmask of features that force read-only mount 1173 * @ro: bitmask of features that force read-only mount
1174 * @incompat: bitmask of incompatible features 1174 * @incompat: bitmask of incompatible features
1175 * 1175 *
1176 * Check whether the journal uses all of a given set of 1176 * Check whether the journal uses all of a given set of
1177 * features. Return true (non-zero) if it does. 1177 * features. Return true (non-zero) if it does.
1178 **/ 1178 **/
1179 1179
1180int journal_check_used_features (journal_t *journal, unsigned long compat, 1180int journal_check_used_features (journal_t *journal, unsigned long compat,
@@ -1203,7 +1203,7 @@ int journal_check_used_features (journal_t *journal, unsigned long compat,
1203 * @compat: bitmask of compatible features 1203 * @compat: bitmask of compatible features
1204 * @ro: bitmask of features that force read-only mount 1204 * @ro: bitmask of features that force read-only mount
1205 * @incompat: bitmask of incompatible features 1205 * @incompat: bitmask of incompatible features
1206 * 1206 *
1207 * Check whether the journaling code supports the use of 1207 * Check whether the journaling code supports the use of
1208 * all of a given set of features on this journal. Return true 1208 * all of a given set of features on this journal. Return true
1209 * (non-zero) if it can. */ 1209 * (non-zero) if it can. */
@@ -1241,7 +1241,7 @@ int journal_check_available_features (journal_t *journal, unsigned long compat,
1241 * @incompat: bitmask of incompatible features 1241 * @incompat: bitmask of incompatible features
1242 * 1242 *
1243 * Mark a given journal feature as present on the 1243 * Mark a given journal feature as present on the
1244 * superblock. Returns true if the requested features could be set. 1244 * superblock. Returns true if the requested features could be set.
1245 * 1245 *
1246 */ 1246 */
1247 1247
@@ -1327,7 +1327,7 @@ static int journal_convert_superblock_v1(journal_t *journal,
1327/** 1327/**
1328 * int journal_flush () - Flush journal 1328 * int journal_flush () - Flush journal
1329 * @journal: Journal to act on. 1329 * @journal: Journal to act on.
1330 * 1330 *
1331 * Flush all data for a given journal to disk and empty the journal. 1331 * Flush all data for a given journal to disk and empty the journal.
1332 * Filesystems can use this when remounting readonly to ensure that 1332 * Filesystems can use this when remounting readonly to ensure that
1333 * recovery does not need to happen on remount. 1333 * recovery does not need to happen on remount.
@@ -1394,7 +1394,7 @@ int journal_flush(journal_t *journal)
1394 * int journal_wipe() - Wipe journal contents 1394 * int journal_wipe() - Wipe journal contents
1395 * @journal: Journal to act on. 1395 * @journal: Journal to act on.
1396 * @write: flag (see below) 1396 * @write: flag (see below)
1397 * 1397 *
1398 * Wipe out all of the contents of a journal, safely. This will produce 1398 * Wipe out all of the contents of a journal, safely. This will produce
1399 * a warning if the journal contains any valid recovery information. 1399 * a warning if the journal contains any valid recovery information.
1400 * Must be called between journal_init_*() and journal_load(). 1400 * Must be called between journal_init_*() and journal_load().
@@ -1449,7 +1449,7 @@ static const char *journal_dev_name(journal_t *journal, char *buffer)
1449 1449
1450/* 1450/*
1451 * Journal abort has very specific semantics, which we describe 1451 * Journal abort has very specific semantics, which we describe
1452 * for journal abort. 1452 * for journal abort.
1453 * 1453 *
1454 * Two internal function, which provide abort to te jbd layer 1454 * Two internal function, which provide abort to te jbd layer
1455 * itself are here. 1455 * itself are here.
@@ -1504,7 +1504,7 @@ static void __journal_abort_soft (journal_t *journal, int errno)
1504 * Perform a complete, immediate shutdown of the ENTIRE 1504 * Perform a complete, immediate shutdown of the ENTIRE
1505 * journal (not of a single transaction). This operation cannot be 1505 * journal (not of a single transaction). This operation cannot be
1506 * undone without closing and reopening the journal. 1506 * undone without closing and reopening the journal.
1507 * 1507 *
1508 * The journal_abort function is intended to support higher level error 1508 * The journal_abort function is intended to support higher level error
1509 * recovery mechanisms such as the ext2/ext3 remount-readonly error 1509 * recovery mechanisms such as the ext2/ext3 remount-readonly error
1510 * mode. 1510 * mode.
@@ -1538,7 +1538,7 @@ static void __journal_abort_soft (journal_t *journal, int errno)
1538 * supply an errno; a null errno implies that absolutely no further 1538 * supply an errno; a null errno implies that absolutely no further
1539 * writes are done to the journal (unless there are any already in 1539 * writes are done to the journal (unless there are any already in
1540 * progress). 1540 * progress).
1541 * 1541 *
1542 */ 1542 */
1543 1543
1544void journal_abort(journal_t *journal, int errno) 1544void journal_abort(journal_t *journal, int errno)
@@ -1546,7 +1546,7 @@ void journal_abort(journal_t *journal, int errno)
1546 __journal_abort_soft(journal, errno); 1546 __journal_abort_soft(journal, errno);
1547} 1547}
1548 1548
1549/** 1549/**
1550 * int journal_errno () - returns the journal's error state. 1550 * int journal_errno () - returns the journal's error state.
1551 * @journal: journal to examine. 1551 * @journal: journal to examine.
1552 * 1552 *
@@ -1570,7 +1570,7 @@ int journal_errno(journal_t *journal)
1570 return err; 1570 return err;
1571} 1571}
1572 1572
1573/** 1573/**
1574 * int journal_clear_err () - clears the journal's error state 1574 * int journal_clear_err () - clears the journal's error state
1575 * @journal: journal to act on. 1575 * @journal: journal to act on.
1576 * 1576 *
@@ -1590,7 +1590,7 @@ int journal_clear_err(journal_t *journal)
1590 return err; 1590 return err;
1591} 1591}
1592 1592
1593/** 1593/**
1594 * void journal_ack_err() - Ack journal err. 1594 * void journal_ack_err() - Ack journal err.
1595 * @journal: journal to act on. 1595 * @journal: journal to act on.
1596 * 1596 *
@@ -1612,7 +1612,7 @@ int journal_blocks_per_page(struct inode *inode)
1612 1612
1613/* 1613/*
1614 * Simple support for retrying memory allocations. Introduced to help to 1614 * Simple support for retrying memory allocations. Introduced to help to
1615 * debug different VM deadlock avoidance strategies. 1615 * debug different VM deadlock avoidance strategies.
1616 */ 1616 */
1617void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry) 1617void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
1618{ 1618{
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
index de5bafb4e853..73bb64806ed3 100644
--- a/fs/jbd/recovery.c
+++ b/fs/jbd/recovery.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * linux/fs/recovery.c 2 * linux/fs/recovery.c
3 * 3 *
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
5 * 5 *
6 * Copyright 1999-2000 Red Hat Software --- All Rights Reserved 6 * Copyright 1999-2000 Red Hat Software --- All Rights Reserved
@@ -10,7 +10,7 @@
10 * option, any later version, incorporated herein by reference. 10 * option, any later version, incorporated herein by reference.
11 * 11 *
12 * Journal recovery routines for the generic filesystem journaling code; 12 * Journal recovery routines for the generic filesystem journaling code;
13 * part of the ext2fs journaling system. 13 * part of the ext2fs journaling system.
14 */ 14 */
15 15
16#ifndef __KERNEL__ 16#ifndef __KERNEL__
@@ -25,9 +25,9 @@
25 25
26/* 26/*
27 * Maintain information about the progress of the recovery job, so that 27 * Maintain information about the progress of the recovery job, so that
28 * the different passes can carry information between them. 28 * the different passes can carry information between them.
29 */ 29 */
30struct recovery_info 30struct recovery_info
31{ 31{
32 tid_t start_transaction; 32 tid_t start_transaction;
33 tid_t end_transaction; 33 tid_t end_transaction;
@@ -116,7 +116,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
116 err = 0; 116 err = 0;
117 117
118failed: 118failed:
119 if (nbufs) 119 if (nbufs)
120 journal_brelse_array(bufs, nbufs); 120 journal_brelse_array(bufs, nbufs);
121 return err; 121 return err;
122} 122}
@@ -128,7 +128,7 @@ failed:
128 * Read a block from the journal 128 * Read a block from the journal
129 */ 129 */
130 130
131static int jread(struct buffer_head **bhp, journal_t *journal, 131static int jread(struct buffer_head **bhp, journal_t *journal,
132 unsigned int offset) 132 unsigned int offset)
133{ 133{
134 int err; 134 int err;
@@ -212,14 +212,14 @@ do { \
212/** 212/**
213 * journal_recover - recovers a on-disk journal 213 * journal_recover - recovers a on-disk journal
214 * @journal: the journal to recover 214 * @journal: the journal to recover
215 * 215 *
216 * The primary function for recovering the log contents when mounting a 216 * The primary function for recovering the log contents when mounting a
217 * journaled device. 217 * journaled device.
218 * 218 *
219 * Recovery is done in three passes. In the first pass, we look for the 219 * Recovery is done in three passes. In the first pass, we look for the
220 * end of the log. In the second, we assemble the list of revoke 220 * end of the log. In the second, we assemble the list of revoke
221 * blocks. In the third and final pass, we replay any un-revoked blocks 221 * blocks. In the third and final pass, we replay any un-revoked blocks
222 * in the log. 222 * in the log.
223 */ 223 */
224int journal_recover(journal_t *journal) 224int journal_recover(journal_t *journal)
225{ 225{
@@ -231,10 +231,10 @@ int journal_recover(journal_t *journal)
231 memset(&info, 0, sizeof(info)); 231 memset(&info, 0, sizeof(info));
232 sb = journal->j_superblock; 232 sb = journal->j_superblock;
233 233
234 /* 234 /*
235 * The journal superblock's s_start field (the current log head) 235 * The journal superblock's s_start field (the current log head)
236 * is always zero if, and only if, the journal was cleanly 236 * is always zero if, and only if, the journal was cleanly
237 * unmounted. 237 * unmounted.
238 */ 238 */
239 239
240 if (!sb->s_start) { 240 if (!sb->s_start) {
@@ -253,7 +253,7 @@ int journal_recover(journal_t *journal)
253 jbd_debug(0, "JBD: recovery, exit status %d, " 253 jbd_debug(0, "JBD: recovery, exit status %d, "
254 "recovered transactions %u to %u\n", 254 "recovered transactions %u to %u\n",
255 err, info.start_transaction, info.end_transaction); 255 err, info.start_transaction, info.end_transaction);
256 jbd_debug(0, "JBD: Replayed %d and revoked %d/%d blocks\n", 256 jbd_debug(0, "JBD: Replayed %d and revoked %d/%d blocks\n",
257 info.nr_replays, info.nr_revoke_hits, info.nr_revokes); 257 info.nr_replays, info.nr_revoke_hits, info.nr_revokes);
258 258
259 /* Restart the log at the next transaction ID, thus invalidating 259 /* Restart the log at the next transaction ID, thus invalidating
@@ -268,15 +268,15 @@ int journal_recover(journal_t *journal)
268/** 268/**
269 * journal_skip_recovery - Start journal and wipe exiting records 269 * journal_skip_recovery - Start journal and wipe exiting records
270 * @journal: journal to startup 270 * @journal: journal to startup
271 * 271 *
272 * Locate any valid recovery information from the journal and set up the 272 * Locate any valid recovery information from the journal and set up the
273 * journal structures in memory to ignore it (presumably because the 273 * journal structures in memory to ignore it (presumably because the
274 * caller has evidence that it is out of date). 274 * caller has evidence that it is out of date).
275 * This function does'nt appear to be exorted.. 275 * This function does'nt appear to be exorted..
276 * 276 *
277 * We perform one pass over the journal to allow us to tell the user how 277 * We perform one pass over the journal to allow us to tell the user how
278 * much recovery information is being erased, and to let us initialise 278 * much recovery information is being erased, and to let us initialise
279 * the journal transaction sequence numbers to the next unused ID. 279 * the journal transaction sequence numbers to the next unused ID.
280 */ 280 */
281int journal_skip_recovery(journal_t *journal) 281int journal_skip_recovery(journal_t *journal)
282{ 282{
@@ -297,7 +297,7 @@ int journal_skip_recovery(journal_t *journal)
297#ifdef CONFIG_JBD_DEBUG 297#ifdef CONFIG_JBD_DEBUG
298 int dropped = info.end_transaction - be32_to_cpu(sb->s_sequence); 298 int dropped = info.end_transaction - be32_to_cpu(sb->s_sequence);
299#endif 299#endif
300 jbd_debug(0, 300 jbd_debug(0,
301 "JBD: ignoring %d transaction%s from the journal.\n", 301 "JBD: ignoring %d transaction%s from the journal.\n",
302 dropped, (dropped == 1) ? "" : "s"); 302 dropped, (dropped == 1) ? "" : "s");
303 journal->j_transaction_sequence = ++info.end_transaction; 303 journal->j_transaction_sequence = ++info.end_transaction;
@@ -324,10 +324,10 @@ static int do_one_pass(journal_t *journal,
324 MAX_BLOCKS_PER_DESC = ((journal->j_blocksize-sizeof(journal_header_t)) 324 MAX_BLOCKS_PER_DESC = ((journal->j_blocksize-sizeof(journal_header_t))
325 / sizeof(journal_block_tag_t)); 325 / sizeof(journal_block_tag_t));
326 326
327 /* 327 /*
328 * First thing is to establish what we expect to find in the log 328 * First thing is to establish what we expect to find in the log
329 * (in terms of transaction IDs), and where (in terms of log 329 * (in terms of transaction IDs), and where (in terms of log
330 * block offsets): query the superblock. 330 * block offsets): query the superblock.
331 */ 331 */
332 332
333 sb = journal->j_superblock; 333 sb = journal->j_superblock;
@@ -344,7 +344,7 @@ static int do_one_pass(journal_t *journal,
344 * Now we walk through the log, transaction by transaction, 344 * Now we walk through the log, transaction by transaction,
345 * making sure that each transaction has a commit block in the 345 * making sure that each transaction has a commit block in the
346 * expected place. Each complete transaction gets replayed back 346 * expected place. Each complete transaction gets replayed back
347 * into the main filesystem. 347 * into the main filesystem.
348 */ 348 */
349 349
350 while (1) { 350 while (1) {
@@ -379,8 +379,8 @@ static int do_one_pass(journal_t *journal,
379 next_log_block++; 379 next_log_block++;
380 wrap(journal, next_log_block); 380 wrap(journal, next_log_block);
381 381
382 /* What kind of buffer is it? 382 /* What kind of buffer is it?
383 * 383 *
384 * If it is a descriptor block, check that it has the 384 * If it is a descriptor block, check that it has the
385 * expected sequence number. Otherwise, we're all done 385 * expected sequence number. Otherwise, we're all done
386 * here. */ 386 * here. */
@@ -394,7 +394,7 @@ static int do_one_pass(journal_t *journal,
394 394
395 blocktype = be32_to_cpu(tmp->h_blocktype); 395 blocktype = be32_to_cpu(tmp->h_blocktype);
396 sequence = be32_to_cpu(tmp->h_sequence); 396 sequence = be32_to_cpu(tmp->h_sequence);
397 jbd_debug(3, "Found magic %d, sequence %d\n", 397 jbd_debug(3, "Found magic %d, sequence %d\n",
398 blocktype, sequence); 398 blocktype, sequence);
399 399
400 if (sequence != next_commit_ID) { 400 if (sequence != next_commit_ID) {
@@ -438,7 +438,7 @@ static int do_one_pass(journal_t *journal,
438 /* Recover what we can, but 438 /* Recover what we can, but
439 * report failure at the end. */ 439 * report failure at the end. */
440 success = err; 440 success = err;
441 printk (KERN_ERR 441 printk (KERN_ERR
442 "JBD: IO error %d recovering " 442 "JBD: IO error %d recovering "
443 "block %ld in log\n", 443 "block %ld in log\n",
444 err, io_block); 444 err, io_block);
@@ -452,7 +452,7 @@ static int do_one_pass(journal_t *journal,
452 * revoked, then we're all done 452 * revoked, then we're all done
453 * here. */ 453 * here. */
454 if (journal_test_revoke 454 if (journal_test_revoke
455 (journal, blocknr, 455 (journal, blocknr,
456 next_commit_ID)) { 456 next_commit_ID)) {
457 brelse(obh); 457 brelse(obh);
458 ++info->nr_revoke_hits; 458 ++info->nr_revoke_hits;
@@ -465,7 +465,7 @@ static int do_one_pass(journal_t *journal,
465 blocknr, 465 blocknr,
466 journal->j_blocksize); 466 journal->j_blocksize);
467 if (nbh == NULL) { 467 if (nbh == NULL) {
468 printk(KERN_ERR 468 printk(KERN_ERR
469 "JBD: Out of memory " 469 "JBD: Out of memory "
470 "during recovery.\n"); 470 "during recovery.\n");
471 err = -ENOMEM; 471 err = -ENOMEM;
@@ -537,7 +537,7 @@ static int do_one_pass(journal_t *journal,
537 } 537 }
538 538
539 done: 539 done:
540 /* 540 /*
541 * We broke out of the log scan loop: either we came to the 541 * We broke out of the log scan loop: either we came to the
542 * known end of the log or we found an unexpected block in the 542 * known end of the log or we found an unexpected block in the
543 * log. If the latter happened, then we know that the "current" 543 * log. If the latter happened, then we know that the "current"
@@ -567,7 +567,7 @@ static int do_one_pass(journal_t *journal,
567 567
568/* Scan a revoke record, marking all blocks mentioned as revoked. */ 568/* Scan a revoke record, marking all blocks mentioned as revoked. */
569 569
570static int scan_revoke_records(journal_t *journal, struct buffer_head *bh, 570static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
571 tid_t sequence, struct recovery_info *info) 571 tid_t sequence, struct recovery_info *info)
572{ 572{
573 journal_revoke_header_t *header; 573 journal_revoke_header_t *header;
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index a56144183462..c532429d8d9b 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * linux/fs/revoke.c 2 * linux/fs/revoke.c
3 * 3 *
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 2000 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 2000
5 * 5 *
6 * Copyright 2000 Red Hat corp --- All Rights Reserved 6 * Copyright 2000 Red Hat corp --- All Rights Reserved
@@ -15,10 +15,10 @@
15 * Revoke is the mechanism used to prevent old log records for deleted 15 * Revoke is the mechanism used to prevent old log records for deleted
16 * metadata from being replayed on top of newer data using the same 16 * metadata from being replayed on top of newer data using the same
17 * blocks. The revoke mechanism is used in two separate places: 17 * blocks. The revoke mechanism is used in two separate places:
18 * 18 *
19 * + Commit: during commit we write the entire list of the current 19 * + Commit: during commit we write the entire list of the current
20 * transaction's revoked blocks to the journal 20 * transaction's revoked blocks to the journal
21 * 21 *
22 * + Recovery: during recovery we record the transaction ID of all 22 * + Recovery: during recovery we record the transaction ID of all
23 * revoked blocks. If there are multiple revoke records in the log 23 * revoked blocks. If there are multiple revoke records in the log
24 * for a single block, only the last one counts, and if there is a log 24 * for a single block, only the last one counts, and if there is a log
@@ -29,7 +29,7 @@
29 * single transaction: 29 * single transaction:
30 * 30 *
31 * Block is revoked and then journaled: 31 * Block is revoked and then journaled:
32 * The desired end result is the journaling of the new block, so we 32 * The desired end result is the journaling of the new block, so we
33 * cancel the revoke before the transaction commits. 33 * cancel the revoke before the transaction commits.
34 * 34 *
35 * Block is journaled and then revoked: 35 * Block is journaled and then revoked:
@@ -41,7 +41,7 @@
41 * transaction must have happened after the block was journaled and so 41 * transaction must have happened after the block was journaled and so
42 * the revoke must take precedence. 42 * the revoke must take precedence.
43 * 43 *
44 * Block is revoked and then written as data: 44 * Block is revoked and then written as data:
45 * The data write is allowed to succeed, but the revoke is _not_ 45 * The data write is allowed to succeed, but the revoke is _not_
46 * cancelled. We still need to prevent old log records from 46 * cancelled. We still need to prevent old log records from
47 * overwriting the new data. We don't even need to clear the revoke 47 * overwriting the new data. We don't even need to clear the revoke
@@ -54,7 +54,7 @@
54 * buffer has not been revoked, and cancel_revoke 54 * buffer has not been revoked, and cancel_revoke
55 * need do nothing. 55 * need do nothing.
56 * RevokeValid set, Revoked set: 56 * RevokeValid set, Revoked set:
57 * buffer has been revoked. 57 * buffer has been revoked.
58 */ 58 */
59 59
60#ifndef __KERNEL__ 60#ifndef __KERNEL__
@@ -77,7 +77,7 @@ static kmem_cache_t *revoke_table_cache;
77 journal replay, this involves recording the transaction ID of the 77 journal replay, this involves recording the transaction ID of the
78 last transaction to revoke this block. */ 78 last transaction to revoke this block. */
79 79
80struct jbd_revoke_record_s 80struct jbd_revoke_record_s
81{ 81{
82 struct list_head hash; 82 struct list_head hash;
83 tid_t sequence; /* Used for recovery only */ 83 tid_t sequence; /* Used for recovery only */
@@ -90,8 +90,8 @@ struct jbd_revoke_table_s
90{ 90{
91 /* It is conceivable that we might want a larger hash table 91 /* It is conceivable that we might want a larger hash table
92 * for recovery. Must be a power of two. */ 92 * for recovery. Must be a power of two. */
93 int hash_size; 93 int hash_size;
94 int hash_shift; 94 int hash_shift;
95 struct list_head *hash_table; 95 struct list_head *hash_table;
96}; 96};
97 97
@@ -301,22 +301,22 @@ void journal_destroy_revoke(journal_t *journal)
301 301
302#ifdef __KERNEL__ 302#ifdef __KERNEL__
303 303
304/* 304/*
305 * journal_revoke: revoke a given buffer_head from the journal. This 305 * journal_revoke: revoke a given buffer_head from the journal. This
306 * prevents the block from being replayed during recovery if we take a 306 * prevents the block from being replayed during recovery if we take a
307 * crash after this current transaction commits. Any subsequent 307 * crash after this current transaction commits. Any subsequent
308 * metadata writes of the buffer in this transaction cancel the 308 * metadata writes of the buffer in this transaction cancel the
309 * revoke. 309 * revoke.
310 * 310 *
311 * Note that this call may block --- it is up to the caller to make 311 * Note that this call may block --- it is up to the caller to make
312 * sure that there are no further calls to journal_write_metadata 312 * sure that there are no further calls to journal_write_metadata
313 * before the revoke is complete. In ext3, this implies calling the 313 * before the revoke is complete. In ext3, this implies calling the
314 * revoke before clearing the block bitmap when we are deleting 314 * revoke before clearing the block bitmap when we are deleting
315 * metadata. 315 * metadata.
316 * 316 *
317 * Revoke performs a journal_forget on any buffer_head passed in as a 317 * Revoke performs a journal_forget on any buffer_head passed in as a
318 * parameter, but does _not_ forget the buffer_head if the bh was only 318 * parameter, but does _not_ forget the buffer_head if the bh was only
319 * found implicitly. 319 * found implicitly.
320 * 320 *
321 * bh_in may not be a journalled buffer - it may have come off 321 * bh_in may not be a journalled buffer - it may have come off
322 * the hash tables without an attached journal_head. 322 * the hash tables without an attached journal_head.
@@ -325,7 +325,7 @@ void journal_destroy_revoke(journal_t *journal)
325 * by one. 325 * by one.
326 */ 326 */
327 327
328int journal_revoke(handle_t *handle, unsigned long blocknr, 328int journal_revoke(handle_t *handle, unsigned long blocknr,
329 struct buffer_head *bh_in) 329 struct buffer_head *bh_in)
330{ 330{
331 struct buffer_head *bh = NULL; 331 struct buffer_head *bh = NULL;
@@ -487,7 +487,7 @@ void journal_switch_revoke_table(journal_t *journal)
487 else 487 else
488 journal->j_revoke = journal->j_revoke_table[0]; 488 journal->j_revoke = journal->j_revoke_table[0];
489 489
490 for (i = 0; i < journal->j_revoke->hash_size; i++) 490 for (i = 0; i < journal->j_revoke->hash_size; i++)
491 INIT_LIST_HEAD(&journal->j_revoke->hash_table[i]); 491 INIT_LIST_HEAD(&journal->j_revoke->hash_table[i]);
492} 492}
493 493
@@ -498,7 +498,7 @@ void journal_switch_revoke_table(journal_t *journal)
498 * Called with the journal lock held. 498 * Called with the journal lock held.
499 */ 499 */
500 500
501void journal_write_revoke_records(journal_t *journal, 501void journal_write_revoke_records(journal_t *journal,
502 transaction_t *transaction) 502 transaction_t *transaction)
503{ 503{
504 struct journal_head *descriptor; 504 struct journal_head *descriptor;
@@ -507,7 +507,7 @@ void journal_write_revoke_records(journal_t *journal,
507 struct list_head *hash_list; 507 struct list_head *hash_list;
508 int i, offset, count; 508 int i, offset, count;
509 509
510 descriptor = NULL; 510 descriptor = NULL;
511 offset = 0; 511 offset = 0;
512 count = 0; 512 count = 0;
513 513
@@ -519,10 +519,10 @@ void journal_write_revoke_records(journal_t *journal,
519 hash_list = &revoke->hash_table[i]; 519 hash_list = &revoke->hash_table[i];
520 520
521 while (!list_empty(hash_list)) { 521 while (!list_empty(hash_list)) {
522 record = (struct jbd_revoke_record_s *) 522 record = (struct jbd_revoke_record_s *)
523 hash_list->next; 523 hash_list->next;
524 write_one_revoke_record(journal, transaction, 524 write_one_revoke_record(journal, transaction,
525 &descriptor, &offset, 525 &descriptor, &offset,
526 record); 526 record);
527 count++; 527 count++;
528 list_del(&record->hash); 528 list_del(&record->hash);
@@ -534,14 +534,14 @@ void journal_write_revoke_records(journal_t *journal,
534 jbd_debug(1, "Wrote %d revoke records\n", count); 534 jbd_debug(1, "Wrote %d revoke records\n", count);
535} 535}
536 536
537/* 537/*
538 * Write out one revoke record. We need to create a new descriptor 538 * Write out one revoke record. We need to create a new descriptor
539 * block if the old one is full or if we have not already created one. 539 * block if the old one is full or if we have not already created one.
540 */ 540 */
541 541
542static void write_one_revoke_record(journal_t *journal, 542static void write_one_revoke_record(journal_t *journal,
543 transaction_t *transaction, 543 transaction_t *transaction,
544 struct journal_head **descriptorp, 544 struct journal_head **descriptorp,
545 int *offsetp, 545 int *offsetp,
546 struct jbd_revoke_record_s *record) 546 struct jbd_revoke_record_s *record)
547{ 547{
@@ -584,21 +584,21 @@ static void write_one_revoke_record(journal_t *journal,
584 *descriptorp = descriptor; 584 *descriptorp = descriptor;
585 } 585 }
586 586
587 * ((__be32 *)(&jh2bh(descriptor)->b_data[offset])) = 587 * ((__be32 *)(&jh2bh(descriptor)->b_data[offset])) =
588 cpu_to_be32(record->blocknr); 588 cpu_to_be32(record->blocknr);
589 offset += 4; 589 offset += 4;
590 *offsetp = offset; 590 *offsetp = offset;
591} 591}
592 592
593/* 593/*
594 * Flush a revoke descriptor out to the journal. If we are aborting, 594 * Flush a revoke descriptor out to the journal. If we are aborting,
595 * this is a noop; otherwise we are generating a buffer which needs to 595 * this is a noop; otherwise we are generating a buffer which needs to
596 * be waited for during commit, so it has to go onto the appropriate 596 * be waited for during commit, so it has to go onto the appropriate
597 * journal buffer list. 597 * journal buffer list.
598 */ 598 */
599 599
600static void flush_descriptor(journal_t *journal, 600static void flush_descriptor(journal_t *journal,
601 struct journal_head *descriptor, 601 struct journal_head *descriptor,
602 int offset) 602 int offset)
603{ 603{
604 journal_revoke_header_t *header; 604 journal_revoke_header_t *header;
@@ -618,7 +618,7 @@ static void flush_descriptor(journal_t *journal,
618} 618}
619#endif 619#endif
620 620
621/* 621/*
622 * Revoke support for recovery. 622 * Revoke support for recovery.
623 * 623 *
624 * Recovery needs to be able to: 624 * Recovery needs to be able to:
@@ -629,7 +629,7 @@ static void flush_descriptor(journal_t *journal,
629 * check whether a given block in a given transaction should be replayed 629 * check whether a given block in a given transaction should be replayed
630 * (ie. has not been revoked by a revoke record in that or a subsequent 630 * (ie. has not been revoked by a revoke record in that or a subsequent
631 * transaction) 631 * transaction)
632 * 632 *
633 * empty the revoke table after recovery. 633 * empty the revoke table after recovery.
634 */ 634 */
635 635
@@ -637,11 +637,11 @@ static void flush_descriptor(journal_t *journal,
637 * First, setting revoke records. We create a new revoke record for 637 * First, setting revoke records. We create a new revoke record for
638 * every block ever revoked in the log as we scan it for recovery, and 638 * every block ever revoked in the log as we scan it for recovery, and
639 * we update the existing records if we find multiple revokes for a 639 * we update the existing records if we find multiple revokes for a
640 * single block. 640 * single block.
641 */ 641 */
642 642
643int journal_set_revoke(journal_t *journal, 643int journal_set_revoke(journal_t *journal,
644 unsigned long blocknr, 644 unsigned long blocknr,
645 tid_t sequence) 645 tid_t sequence)
646{ 646{
647 struct jbd_revoke_record_s *record; 647 struct jbd_revoke_record_s *record;
@@ -653,18 +653,18 @@ int journal_set_revoke(journal_t *journal,
653 if (tid_gt(sequence, record->sequence)) 653 if (tid_gt(sequence, record->sequence))
654 record->sequence = sequence; 654 record->sequence = sequence;
655 return 0; 655 return 0;
656 } 656 }
657 return insert_revoke_hash(journal, blocknr, sequence); 657 return insert_revoke_hash(journal, blocknr, sequence);
658} 658}
659 659
660/* 660/*
661 * Test revoke records. For a given block referenced in the log, has 661 * Test revoke records. For a given block referenced in the log, has
662 * that block been revoked? A revoke record with a given transaction 662 * that block been revoked? A revoke record with a given transaction
663 * sequence number revokes all blocks in that transaction and earlier 663 * sequence number revokes all blocks in that transaction and earlier
664 * ones, but later transactions still need replayed. 664 * ones, but later transactions still need replayed.
665 */ 665 */
666 666
667int journal_test_revoke(journal_t *journal, 667int journal_test_revoke(journal_t *journal,
668 unsigned long blocknr, 668 unsigned long blocknr,
669 tid_t sequence) 669 tid_t sequence)
670{ 670{
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index f5169a96260e..bf7fd7117817 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * linux/fs/transaction.c 2 * linux/fs/transaction.c
3 * 3 *
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
5 * 5 *
6 * Copyright 1998 Red Hat corp --- All Rights Reserved 6 * Copyright 1998 Red Hat corp --- All Rights Reserved
@@ -10,7 +10,7 @@
10 * option, any later version, incorporated herein by reference. 10 * option, any later version, incorporated herein by reference.
11 * 11 *
12 * Generic filesystem transaction handling code; part of the ext2fs 12 * Generic filesystem transaction handling code; part of the ext2fs
13 * journaling system. 13 * journaling system.
14 * 14 *
15 * This file manages transactions (compound commits managed by the 15 * This file manages transactions (compound commits managed by the
16 * journaling code) and handles (individual atomic operations by the 16 * journaling code) and handles (individual atomic operations by the
@@ -74,7 +74,7 @@ get_transaction(journal_t *journal, transaction_t *transaction)
74 * start_this_handle: Given a handle, deal with any locking or stalling 74 * start_this_handle: Given a handle, deal with any locking or stalling
75 * needed to make sure that there is enough journal space for the handle 75 * needed to make sure that there is enough journal space for the handle
76 * to begin. Attach the handle to a transaction and set up the 76 * to begin. Attach the handle to a transaction and set up the
77 * transaction's buffer credits. 77 * transaction's buffer credits.
78 */ 78 */
79 79
80static int start_this_handle(journal_t *journal, handle_t *handle) 80static int start_this_handle(journal_t *journal, handle_t *handle)
@@ -117,7 +117,7 @@ repeat_locked:
117 if (is_journal_aborted(journal) || 117 if (is_journal_aborted(journal) ||
118 (journal->j_errno != 0 && !(journal->j_flags & JFS_ACK_ERR))) { 118 (journal->j_errno != 0 && !(journal->j_flags & JFS_ACK_ERR))) {
119 spin_unlock(&journal->j_state_lock); 119 spin_unlock(&journal->j_state_lock);
120 ret = -EROFS; 120 ret = -EROFS;
121 goto out; 121 goto out;
122 } 122 }
123 123
@@ -182,7 +182,7 @@ repeat_locked:
182 goto repeat; 182 goto repeat;
183 } 183 }
184 184
185 /* 185 /*
186 * The commit code assumes that it can get enough log space 186 * The commit code assumes that it can get enough log space
187 * without forcing a checkpoint. This is *critical* for 187 * without forcing a checkpoint. This is *critical* for
188 * correctness: a checkpoint of a buffer which is also 188 * correctness: a checkpoint of a buffer which is also
@@ -191,7 +191,7 @@ repeat_locked:
191 * 191 *
192 * We must therefore ensure the necessary space in the journal 192 * We must therefore ensure the necessary space in the journal
193 * *before* starting to dirty potentially checkpointed buffers 193 * *before* starting to dirty potentially checkpointed buffers
194 * in the new transaction. 194 * in the new transaction.
195 * 195 *
196 * The worst part is, any transaction currently committing can 196 * The worst part is, any transaction currently committing can
197 * reduce the free space arbitrarily. Be careful to account for 197 * reduce the free space arbitrarily. Be careful to account for
@@ -246,13 +246,13 @@ static handle_t *new_handle(int nblocks)
246} 246}
247 247
248/** 248/**
249 * handle_t *journal_start() - Obtain a new handle. 249 * handle_t *journal_start() - Obtain a new handle.
250 * @journal: Journal to start transaction on. 250 * @journal: Journal to start transaction on.
251 * @nblocks: number of block buffer we might modify 251 * @nblocks: number of block buffer we might modify
252 * 252 *
253 * We make sure that the transaction can guarantee at least nblocks of 253 * We make sure that the transaction can guarantee at least nblocks of
254 * modified buffers in the log. We block until the log can guarantee 254 * modified buffers in the log. We block until the log can guarantee
255 * that much space. 255 * that much space.
256 * 256 *
257 * This function is visible to journal users (like ext3fs), so is not 257 * This function is visible to journal users (like ext3fs), so is not
258 * called with the journal already locked. 258 * called with the journal already locked.
@@ -292,11 +292,11 @@ handle_t *journal_start(journal_t *journal, int nblocks)
292 * int journal_extend() - extend buffer credits. 292 * int journal_extend() - extend buffer credits.
293 * @handle: handle to 'extend' 293 * @handle: handle to 'extend'
294 * @nblocks: nr blocks to try to extend by. 294 * @nblocks: nr blocks to try to extend by.
295 * 295 *
296 * Some transactions, such as large extends and truncates, can be done 296 * Some transactions, such as large extends and truncates, can be done
297 * atomically all at once or in several stages. The operation requests 297 * atomically all at once or in several stages. The operation requests
298 * a credit for a number of buffer modications in advance, but can 298 * a credit for a number of buffer modications in advance, but can
299 * extend its credit if it needs more. 299 * extend its credit if it needs more.
300 * 300 *
301 * journal_extend tries to give the running handle more buffer credits. 301 * journal_extend tries to give the running handle more buffer credits.
302 * It does not guarantee that allocation - this is a best-effort only. 302 * It does not guarantee that allocation - this is a best-effort only.
@@ -363,7 +363,7 @@ out:
363 * int journal_restart() - restart a handle . 363 * int journal_restart() - restart a handle .
364 * @handle: handle to restart 364 * @handle: handle to restart
365 * @nblocks: nr credits requested 365 * @nblocks: nr credits requested
366 * 366 *
367 * Restart a handle for a multi-transaction filesystem 367 * Restart a handle for a multi-transaction filesystem
368 * operation. 368 * operation.
369 * 369 *
@@ -462,7 +462,7 @@ void journal_lock_updates(journal_t *journal)
462/** 462/**
463 * void journal_unlock_updates (journal_t* journal) - release barrier 463 * void journal_unlock_updates (journal_t* journal) - release barrier
464 * @journal: Journal to release the barrier on. 464 * @journal: Journal to release the barrier on.
465 * 465 *
466 * Release a transaction barrier obtained with journal_lock_updates(). 466 * Release a transaction barrier obtained with journal_lock_updates().
467 * 467 *
468 * Should be called without the journal lock held. 468 * Should be called without the journal lock held.
@@ -547,8 +547,8 @@ repeat:
547 jbd_lock_bh_state(bh); 547 jbd_lock_bh_state(bh);
548 548
549 /* We now hold the buffer lock so it is safe to query the buffer 549 /* We now hold the buffer lock so it is safe to query the buffer
550 * state. Is the buffer dirty? 550 * state. Is the buffer dirty?
551 * 551 *
552 * If so, there are two possibilities. The buffer may be 552 * If so, there are two possibilities. The buffer may be
553 * non-journaled, and undergoing a quite legitimate writeback. 553 * non-journaled, and undergoing a quite legitimate writeback.
554 * Otherwise, it is journaled, and we don't expect dirty buffers 554 * Otherwise, it is journaled, and we don't expect dirty buffers
@@ -566,7 +566,7 @@ repeat:
566 */ 566 */
567 if (jh->b_transaction) { 567 if (jh->b_transaction) {
568 J_ASSERT_JH(jh, 568 J_ASSERT_JH(jh,
569 jh->b_transaction == transaction || 569 jh->b_transaction == transaction ||
570 jh->b_transaction == 570 jh->b_transaction ==
571 journal->j_committing_transaction); 571 journal->j_committing_transaction);
572 if (jh->b_next_transaction) 572 if (jh->b_next_transaction)
@@ -653,7 +653,7 @@ repeat:
653 * buffer had better remain locked during the kmalloc, 653 * buffer had better remain locked during the kmalloc,
654 * but that should be true --- we hold the journal lock 654 * but that should be true --- we hold the journal lock
655 * still and the buffer is already on the BUF_JOURNAL 655 * still and the buffer is already on the BUF_JOURNAL
656 * list so won't be flushed. 656 * list so won't be flushed.
657 * 657 *
658 * Subtle point, though: if this is a get_undo_access, 658 * Subtle point, though: if this is a get_undo_access,
659 * then we will be relying on the frozen_data to contain 659 * then we will be relying on the frozen_data to contain
@@ -765,8 +765,8 @@ int journal_get_write_access(handle_t *handle, struct buffer_head *bh)
765 * manually rather than reading off disk), then we need to keep the 765 * manually rather than reading off disk), then we need to keep the
766 * buffer_head locked until it has been completely filled with new 766 * buffer_head locked until it has been completely filled with new
767 * data. In this case, we should be able to make the assertion that 767 * data. In this case, we should be able to make the assertion that
768 * the bh is not already part of an existing transaction. 768 * the bh is not already part of an existing transaction.
769 * 769 *
770 * The buffer should already be locked by the caller by this point. 770 * The buffer should already be locked by the caller by this point.
771 * There is no lock ranking violation: it was a newly created, 771 * There is no lock ranking violation: it was a newly created,
772 * unlocked buffer beforehand. */ 772 * unlocked buffer beforehand. */
@@ -778,7 +778,7 @@ int journal_get_write_access(handle_t *handle, struct buffer_head *bh)
778 * 778 *
779 * Call this if you create a new bh. 779 * Call this if you create a new bh.
780 */ 780 */
781int journal_get_create_access(handle_t *handle, struct buffer_head *bh) 781int journal_get_create_access(handle_t *handle, struct buffer_head *bh)
782{ 782{
783 transaction_t *transaction = handle->h_transaction; 783 transaction_t *transaction = handle->h_transaction;
784 journal_t *journal = transaction->t_journal; 784 journal_t *journal = transaction->t_journal;
@@ -847,13 +847,13 @@ out:
847 * do not reuse freed space until the deallocation has been committed, 847 * do not reuse freed space until the deallocation has been committed,
848 * since if we overwrote that space we would make the delete 848 * since if we overwrote that space we would make the delete
849 * un-rewindable in case of a crash. 849 * un-rewindable in case of a crash.
850 * 850 *
851 * To deal with that, journal_get_undo_access requests write access to a 851 * To deal with that, journal_get_undo_access requests write access to a
852 * buffer for parts of non-rewindable operations such as delete 852 * buffer for parts of non-rewindable operations such as delete
853 * operations on the bitmaps. The journaling code must keep a copy of 853 * operations on the bitmaps. The journaling code must keep a copy of
854 * the buffer's contents prior to the undo_access call until such time 854 * the buffer's contents prior to the undo_access call until such time
855 * as we know that the buffer has definitely been committed to disk. 855 * as we know that the buffer has definitely been committed to disk.
856 * 856 *
857 * We never need to know which transaction the committed data is part 857 * We never need to know which transaction the committed data is part
858 * of, buffers touched here are guaranteed to be dirtied later and so 858 * of, buffers touched here are guaranteed to be dirtied later and so
859 * will be committed to a new transaction in due course, at which point 859 * will be committed to a new transaction in due course, at which point
@@ -911,13 +911,13 @@ out:
911 return err; 911 return err;
912} 912}
913 913
914/** 914/**
915 * int journal_dirty_data() - mark a buffer as containing dirty data which 915 * int journal_dirty_data() - mark a buffer as containing dirty data which
916 * needs to be flushed before we can commit the 916 * needs to be flushed before we can commit the
917 * current transaction. 917 * current transaction.
918 * @handle: transaction 918 * @handle: transaction
919 * @bh: bufferhead to mark 919 * @bh: bufferhead to mark
920 * 920 *
921 * The buffer is placed on the transaction's data list and is marked as 921 * The buffer is placed on the transaction's data list and is marked as
922 * belonging to the transaction. 922 * belonging to the transaction.
923 * 923 *
@@ -946,15 +946,15 @@ int journal_dirty_data(handle_t *handle, struct buffer_head *bh)
946 946
947 /* 947 /*
948 * What if the buffer is already part of a running transaction? 948 * What if the buffer is already part of a running transaction?
949 * 949 *
950 * There are two cases: 950 * There are two cases:
951 * 1) It is part of the current running transaction. Refile it, 951 * 1) It is part of the current running transaction. Refile it,
952 * just in case we have allocated it as metadata, deallocated 952 * just in case we have allocated it as metadata, deallocated
953 * it, then reallocated it as data. 953 * it, then reallocated it as data.
954 * 2) It is part of the previous, still-committing transaction. 954 * 2) It is part of the previous, still-committing transaction.
955 * If all we want to do is to guarantee that the buffer will be 955 * If all we want to do is to guarantee that the buffer will be
956 * written to disk before this new transaction commits, then 956 * written to disk before this new transaction commits, then
957 * being sure that the *previous* transaction has this same 957 * being sure that the *previous* transaction has this same
958 * property is sufficient for us! Just leave it on its old 958 * property is sufficient for us! Just leave it on its old
959 * transaction. 959 * transaction.
960 * 960 *
@@ -1076,18 +1076,18 @@ no_journal:
1076 return 0; 1076 return 0;
1077} 1077}
1078 1078
1079/** 1079/**
1080 * int journal_dirty_metadata() - mark a buffer as containing dirty metadata 1080 * int journal_dirty_metadata() - mark a buffer as containing dirty metadata
1081 * @handle: transaction to add buffer to. 1081 * @handle: transaction to add buffer to.
1082 * @bh: buffer to mark 1082 * @bh: buffer to mark
1083 * 1083 *
1084 * mark dirty metadata which needs to be journaled as part of the current 1084 * mark dirty metadata which needs to be journaled as part of the current
1085 * transaction. 1085 * transaction.
1086 * 1086 *
1087 * The buffer is placed on the transaction's metadata list and is marked 1087 * The buffer is placed on the transaction's metadata list and is marked
1088 * as belonging to the transaction. 1088 * as belonging to the transaction.
1089 * 1089 *
1090 * Returns error number or 0 on success. 1090 * Returns error number or 0 on success.
1091 * 1091 *
1092 * Special care needs to be taken if the buffer already belongs to the 1092 * Special care needs to be taken if the buffer already belongs to the
1093 * current committing transaction (in which case we should have frozen 1093 * current committing transaction (in which case we should have frozen
@@ -1135,11 +1135,11 @@ int journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1135 1135
1136 set_buffer_jbddirty(bh); 1136 set_buffer_jbddirty(bh);
1137 1137
1138 /* 1138 /*
1139 * Metadata already on the current transaction list doesn't 1139 * Metadata already on the current transaction list doesn't
1140 * need to be filed. Metadata on another transaction's list must 1140 * need to be filed. Metadata on another transaction's list must
1141 * be committing, and will be refiled once the commit completes: 1141 * be committing, and will be refiled once the commit completes:
1142 * leave it alone for now. 1142 * leave it alone for now.
1143 */ 1143 */
1144 if (jh->b_transaction != transaction) { 1144 if (jh->b_transaction != transaction) {
1145 JBUFFER_TRACE(jh, "already on other transaction"); 1145 JBUFFER_TRACE(jh, "already on other transaction");
@@ -1165,7 +1165,7 @@ out:
1165 return 0; 1165 return 0;
1166} 1166}
1167 1167
1168/* 1168/*
1169 * journal_release_buffer: undo a get_write_access without any buffer 1169 * journal_release_buffer: undo a get_write_access without any buffer
1170 * updates, if the update decided in the end that it didn't need access. 1170 * updates, if the update decided in the end that it didn't need access.
1171 * 1171 *
@@ -1176,20 +1176,20 @@ journal_release_buffer(handle_t *handle, struct buffer_head *bh)
1176 BUFFER_TRACE(bh, "entry"); 1176 BUFFER_TRACE(bh, "entry");
1177} 1177}
1178 1178
1179/** 1179/**
1180 * void journal_forget() - bforget() for potentially-journaled buffers. 1180 * void journal_forget() - bforget() for potentially-journaled buffers.
1181 * @handle: transaction handle 1181 * @handle: transaction handle
1182 * @bh: bh to 'forget' 1182 * @bh: bh to 'forget'
1183 * 1183 *
1184 * We can only do the bforget if there are no commits pending against the 1184 * We can only do the bforget if there are no commits pending against the
1185 * buffer. If the buffer is dirty in the current running transaction we 1185 * buffer. If the buffer is dirty in the current running transaction we
1186 * can safely unlink it. 1186 * can safely unlink it.
1187 * 1187 *
1188 * bh may not be a journalled buffer at all - it may be a non-JBD 1188 * bh may not be a journalled buffer at all - it may be a non-JBD
1189 * buffer which came off the hashtable. Check for this. 1189 * buffer which came off the hashtable. Check for this.
1190 * 1190 *
1191 * Decrements bh->b_count by one. 1191 * Decrements bh->b_count by one.
1192 * 1192 *
1193 * Allow this call even if the handle has aborted --- it may be part of 1193 * Allow this call even if the handle has aborted --- it may be part of
1194 * the caller's cleanup after an abort. 1194 * the caller's cleanup after an abort.
1195 */ 1195 */
@@ -1237,7 +1237,7 @@ int journal_forget (handle_t *handle, struct buffer_head *bh)
1237 1237
1238 drop_reserve = 1; 1238 drop_reserve = 1;
1239 1239
1240 /* 1240 /*
1241 * We are no longer going to journal this buffer. 1241 * We are no longer going to journal this buffer.
1242 * However, the commit of this transaction is still 1242 * However, the commit of this transaction is still
1243 * important to the buffer: the delete that we are now 1243 * important to the buffer: the delete that we are now
@@ -1246,7 +1246,7 @@ int journal_forget (handle_t *handle, struct buffer_head *bh)
1246 * 1246 *
1247 * So, if we have a checkpoint on the buffer, we should 1247 * So, if we have a checkpoint on the buffer, we should
1248 * now refile the buffer on our BJ_Forget list so that 1248 * now refile the buffer on our BJ_Forget list so that
1249 * we know to remove the checkpoint after we commit. 1249 * we know to remove the checkpoint after we commit.
1250 */ 1250 */
1251 1251
1252 if (jh->b_cp_transaction) { 1252 if (jh->b_cp_transaction) {
@@ -1264,7 +1264,7 @@ int journal_forget (handle_t *handle, struct buffer_head *bh)
1264 } 1264 }
1265 } 1265 }
1266 } else if (jh->b_transaction) { 1266 } else if (jh->b_transaction) {
1267 J_ASSERT_JH(jh, (jh->b_transaction == 1267 J_ASSERT_JH(jh, (jh->b_transaction ==
1268 journal->j_committing_transaction)); 1268 journal->j_committing_transaction));
1269 /* However, if the buffer is still owned by a prior 1269 /* However, if the buffer is still owned by a prior
1270 * (committing) transaction, we can't drop it yet... */ 1270 * (committing) transaction, we can't drop it yet... */
@@ -1294,7 +1294,7 @@ drop:
1294/** 1294/**
1295 * int journal_stop() - complete a transaction 1295 * int journal_stop() - complete a transaction
1296 * @handle: tranaction to complete. 1296 * @handle: tranaction to complete.
1297 * 1297 *
1298 * All done for a particular handle. 1298 * All done for a particular handle.
1299 * 1299 *
1300 * There is not much action needed here. We just return any remaining 1300 * There is not much action needed here. We just return any remaining
@@ -1303,7 +1303,7 @@ drop:
1303 * filesystem is marked for synchronous update. 1303 * filesystem is marked for synchronous update.
1304 * 1304 *
1305 * journal_stop itself will not usually return an error, but it may 1305 * journal_stop itself will not usually return an error, but it may
1306 * do so in unusual circumstances. In particular, expect it to 1306 * do so in unusual circumstances. In particular, expect it to
1307 * return -EIO if a journal_abort has been executed since the 1307 * return -EIO if a journal_abort has been executed since the
1308 * transaction began. 1308 * transaction began.
1309 */ 1309 */
@@ -1388,7 +1388,7 @@ int journal_stop(handle_t *handle)
1388 1388
1389 /* 1389 /*
1390 * Special case: JFS_SYNC synchronous updates require us 1390 * Special case: JFS_SYNC synchronous updates require us
1391 * to wait for the commit to complete. 1391 * to wait for the commit to complete.
1392 */ 1392 */
1393 if (handle->h_sync && !(current->flags & PF_MEMALLOC)) 1393 if (handle->h_sync && !(current->flags & PF_MEMALLOC))
1394 err = log_wait_commit(journal, tid); 1394 err = log_wait_commit(journal, tid);
@@ -1439,7 +1439,7 @@ int journal_force_commit(journal_t *journal)
1439 * jbd_lock_bh_state(jh2bh(jh)) is held. 1439 * jbd_lock_bh_state(jh2bh(jh)) is held.
1440 */ 1440 */
1441 1441
1442static inline void 1442static inline void
1443__blist_add_buffer(struct journal_head **list, struct journal_head *jh) 1443__blist_add_buffer(struct journal_head **list, struct journal_head *jh)
1444{ 1444{
1445 if (!*list) { 1445 if (!*list) {
@@ -1454,7 +1454,7 @@ __blist_add_buffer(struct journal_head **list, struct journal_head *jh)
1454 } 1454 }
1455} 1455}
1456 1456
1457/* 1457/*
1458 * Remove a buffer from a transaction list, given the transaction's list 1458 * Remove a buffer from a transaction list, given the transaction's list
1459 * head pointer. 1459 * head pointer.
1460 * 1460 *
@@ -1475,7 +1475,7 @@ __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
1475 jh->b_tnext->b_tprev = jh->b_tprev; 1475 jh->b_tnext->b_tprev = jh->b_tprev;
1476} 1476}
1477 1477
1478/* 1478/*
1479 * Remove a buffer from the appropriate transaction list. 1479 * Remove a buffer from the appropriate transaction list.
1480 * 1480 *
1481 * Note that this function can *change* the value of 1481 * Note that this function can *change* the value of
@@ -1595,17 +1595,17 @@ out:
1595} 1595}
1596 1596
1597 1597
1598/** 1598/**
1599 * int journal_try_to_free_buffers() - try to free page buffers. 1599 * int journal_try_to_free_buffers() - try to free page buffers.
1600 * @journal: journal for operation 1600 * @journal: journal for operation
1601 * @page: to try and free 1601 * @page: to try and free
1602 * @unused_gfp_mask: unused 1602 * @unused_gfp_mask: unused
1603 * 1603 *
1604 * 1604 *
1605 * For all the buffers on this page, 1605 * For all the buffers on this page,
1606 * if they are fully written out ordered data, move them onto BUF_CLEAN 1606 * if they are fully written out ordered data, move them onto BUF_CLEAN
1607 * so try_to_free_buffers() can reap them. 1607 * so try_to_free_buffers() can reap them.
1608 * 1608 *
1609 * This function returns non-zero if we wish try_to_free_buffers() 1609 * This function returns non-zero if we wish try_to_free_buffers()
1610 * to be called. We do this if the page is releasable by try_to_free_buffers(). 1610 * to be called. We do this if the page is releasable by try_to_free_buffers().
1611 * We also do it if the page has locked or dirty buffers and the caller wants 1611 * We also do it if the page has locked or dirty buffers and the caller wants
@@ -1629,7 +1629,7 @@ out:
1629 * cannot happen because we never reallocate freed data as metadata 1629 * cannot happen because we never reallocate freed data as metadata
1630 * while the data is part of a transaction. Yes? 1630 * while the data is part of a transaction. Yes?
1631 */ 1631 */
1632int journal_try_to_free_buffers(journal_t *journal, 1632int journal_try_to_free_buffers(journal_t *journal,
1633 struct page *page, gfp_t unused_gfp_mask) 1633 struct page *page, gfp_t unused_gfp_mask)
1634{ 1634{
1635 struct buffer_head *head; 1635 struct buffer_head *head;
@@ -1697,7 +1697,7 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1697} 1697}
1698 1698
1699/* 1699/*
1700 * journal_invalidatepage 1700 * journal_invalidatepage
1701 * 1701 *
1702 * This code is tricky. It has a number of cases to deal with. 1702 * This code is tricky. It has a number of cases to deal with.
1703 * 1703 *
@@ -1705,15 +1705,15 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1705 * 1705 *
1706 * i_size must be updated on disk before we start calling invalidatepage on the 1706 * i_size must be updated on disk before we start calling invalidatepage on the
1707 * data. 1707 * data.
1708 * 1708 *
1709 * This is done in ext3 by defining an ext3_setattr method which 1709 * This is done in ext3 by defining an ext3_setattr method which
1710 * updates i_size before truncate gets going. By maintaining this 1710 * updates i_size before truncate gets going. By maintaining this
1711 * invariant, we can be sure that it is safe to throw away any buffers 1711 * invariant, we can be sure that it is safe to throw away any buffers
1712 * attached to the current transaction: once the transaction commits, 1712 * attached to the current transaction: once the transaction commits,
1713 * we know that the data will not be needed. 1713 * we know that the data will not be needed.
1714 * 1714 *
1715 * Note however that we can *not* throw away data belonging to the 1715 * Note however that we can *not* throw away data belonging to the
1716 * previous, committing transaction! 1716 * previous, committing transaction!
1717 * 1717 *
1718 * Any disk blocks which *are* part of the previous, committing 1718 * Any disk blocks which *are* part of the previous, committing
1719 * transaction (and which therefore cannot be discarded immediately) are 1719 * transaction (and which therefore cannot be discarded immediately) are
@@ -1732,7 +1732,7 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1732 * don't make guarantees about the order in which data hits disk --- in 1732 * don't make guarantees about the order in which data hits disk --- in
1733 * particular we don't guarantee that new dirty data is flushed before 1733 * particular we don't guarantee that new dirty data is flushed before
1734 * transaction commit --- so it is always safe just to discard data 1734 * transaction commit --- so it is always safe just to discard data
1735 * immediately in that mode. --sct 1735 * immediately in that mode. --sct
1736 */ 1736 */
1737 1737
1738/* 1738/*
@@ -1876,9 +1876,9 @@ zap_buffer_unlocked:
1876 return may_free; 1876 return may_free;
1877} 1877}
1878 1878
1879/** 1879/**
1880 * void journal_invalidatepage() 1880 * void journal_invalidatepage()
1881 * @journal: journal to use for flush... 1881 * @journal: journal to use for flush...
1882 * @page: page to flush 1882 * @page: page to flush
1883 * @offset: length of page to invalidate. 1883 * @offset: length of page to invalidate.
1884 * 1884 *
@@ -1886,7 +1886,7 @@ zap_buffer_unlocked:
1886 * 1886 *
1887 */ 1887 */
1888void journal_invalidatepage(journal_t *journal, 1888void journal_invalidatepage(journal_t *journal,
1889 struct page *page, 1889 struct page *page,
1890 unsigned long offset) 1890 unsigned long offset)
1891{ 1891{
1892 struct buffer_head *head, *bh, *next; 1892 struct buffer_head *head, *bh, *next;
@@ -1924,8 +1924,8 @@ void journal_invalidatepage(journal_t *journal,
1924 } 1924 }
1925} 1925}
1926 1926
1927/* 1927/*
1928 * File a buffer on the given transaction list. 1928 * File a buffer on the given transaction list.
1929 */ 1929 */
1930void __journal_file_buffer(struct journal_head *jh, 1930void __journal_file_buffer(struct journal_head *jh,
1931 transaction_t *transaction, int jlist) 1931 transaction_t *transaction, int jlist)
@@ -1948,7 +1948,7 @@ void __journal_file_buffer(struct journal_head *jh,
1948 * with __jbd_unexpected_dirty_buffer()'s handling of dirty 1948 * with __jbd_unexpected_dirty_buffer()'s handling of dirty
1949 * state. */ 1949 * state. */
1950 1950
1951 if (jlist == BJ_Metadata || jlist == BJ_Reserved || 1951 if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
1952 jlist == BJ_Shadow || jlist == BJ_Forget) { 1952 jlist == BJ_Shadow || jlist == BJ_Forget) {
1953 if (test_clear_buffer_dirty(bh) || 1953 if (test_clear_buffer_dirty(bh) ||
1954 test_clear_buffer_jbddirty(bh)) 1954 test_clear_buffer_jbddirty(bh))
@@ -2008,7 +2008,7 @@ void journal_file_buffer(struct journal_head *jh,
2008 jbd_unlock_bh_state(jh2bh(jh)); 2008 jbd_unlock_bh_state(jh2bh(jh));
2009} 2009}
2010 2010
2011/* 2011/*
2012 * Remove a buffer from its current buffer list in preparation for 2012 * Remove a buffer from its current buffer list in preparation for
2013 * dropping it from its current transaction entirely. If the buffer has 2013 * dropping it from its current transaction entirely. If the buffer has
2014 * already started to be used by a subsequent transaction, refile the 2014 * already started to be used by a subsequent transaction, refile the
@@ -2060,7 +2060,7 @@ void __journal_refile_buffer(struct journal_head *jh)
2060 * to the caller to remove the journal_head if necessary. For the 2060 * to the caller to remove the journal_head if necessary. For the
2061 * unlocked journal_refile_buffer call, the caller isn't going to be 2061 * unlocked journal_refile_buffer call, the caller isn't going to be
2062 * doing anything else to the buffer so we need to do the cleanup 2062 * doing anything else to the buffer so we need to do the cleanup
2063 * ourselves to avoid a jh leak. 2063 * ourselves to avoid a jh leak.
2064 * 2064 *
2065 * *** The journal_head may be freed by this call! *** 2065 * *** The journal_head may be freed by this call! ***
2066 */ 2066 */
diff --git a/include/linux/ext3_jbd.h b/include/linux/ext3_jbd.h
index c8307c02dd07..ce0e6109aff0 100644
--- a/include/linux/ext3_jbd.h
+++ b/include/linux/ext3_jbd.h
@@ -23,7 +23,7 @@
23 23
24/* Define the number of blocks we need to account to a transaction to 24/* Define the number of blocks we need to account to a transaction to
25 * modify one block of data. 25 * modify one block of data.
26 * 26 *
27 * We may have to touch one inode, one bitmap buffer, up to three 27 * We may have to touch one inode, one bitmap buffer, up to three
28 * indirection blocks, the group and superblock summaries, and the data 28 * indirection blocks, the group and superblock summaries, and the data
29 * block to complete the transaction. */ 29 * block to complete the transaction. */
@@ -88,16 +88,16 @@
88#endif 88#endif
89 89
90int 90int
91ext3_mark_iloc_dirty(handle_t *handle, 91ext3_mark_iloc_dirty(handle_t *handle,
92 struct inode *inode, 92 struct inode *inode,
93 struct ext3_iloc *iloc); 93 struct ext3_iloc *iloc);
94 94
95/* 95/*
96 * On success, We end up with an outstanding reference count against 96 * On success, We end up with an outstanding reference count against
97 * iloc->bh. This _must_ be cleaned up later. 97 * iloc->bh. This _must_ be cleaned up later.
98 */ 98 */
99 99
100int ext3_reserve_inode_write(handle_t *handle, struct inode *inode, 100int ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
101 struct ext3_iloc *iloc); 101 struct ext3_iloc *iloc);
102 102
103int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode); 103int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode);
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index a04c154c5207..7d847931ee5a 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * linux/include/linux/jbd.h 2 * linux/include/linux/jbd.h
3 * 3 *
4 * Written by Stephen C. Tweedie <sct@redhat.com> 4 * Written by Stephen C. Tweedie <sct@redhat.com>
5 * 5 *
6 * Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved 6 * Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved
@@ -97,8 +97,8 @@ extern void jbd_slab_free(void *ptr, size_t size);
97 * number of outstanding buffers possible at any time. When the 97 * number of outstanding buffers possible at any time. When the
98 * operation completes, any buffer credits not used are credited back to 98 * operation completes, any buffer credits not used are credited back to
99 * the transaction, so that at all times we know how many buffers the 99 * the transaction, so that at all times we know how many buffers the
100 * outstanding updates on a transaction might possibly touch. 100 * outstanding updates on a transaction might possibly touch.
101 * 101 *
102 * This is an opaque datatype. 102 * This is an opaque datatype.
103 **/ 103 **/
104typedef struct handle_s handle_t; /* Atomic operation type */ 104typedef struct handle_s handle_t; /* Atomic operation type */
@@ -108,7 +108,7 @@ typedef struct handle_s handle_t; /* Atomic operation type */
108 * typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem. 108 * typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem.
109 * 109 *
110 * journal_t is linked to from the fs superblock structure. 110 * journal_t is linked to from the fs superblock structure.
111 * 111 *
112 * We use the journal_t to keep track of all outstanding transaction 112 * We use the journal_t to keep track of all outstanding transaction
113 * activity on the filesystem, and to manage the state of the log 113 * activity on the filesystem, and to manage the state of the log
114 * writing process. 114 * writing process.
@@ -128,7 +128,7 @@ typedef struct journal_s journal_t; /* Journal control structure */
128 * On-disk structures 128 * On-disk structures
129 */ 129 */
130 130
131/* 131/*
132 * Descriptor block types: 132 * Descriptor block types:
133 */ 133 */
134 134
@@ -149,8 +149,8 @@ typedef struct journal_header_s
149} journal_header_t; 149} journal_header_t;
150 150
151 151
152/* 152/*
153 * The block tag: used to describe a single buffer in the journal 153 * The block tag: used to describe a single buffer in the journal
154 */ 154 */
155typedef struct journal_block_tag_s 155typedef struct journal_block_tag_s
156{ 156{
@@ -158,9 +158,9 @@ typedef struct journal_block_tag_s
158 __be32 t_flags; /* See below */ 158 __be32 t_flags; /* See below */
159} journal_block_tag_t; 159} journal_block_tag_t;
160 160
161/* 161/*
162 * The revoke descriptor: used on disk to describe a series of blocks to 162 * The revoke descriptor: used on disk to describe a series of blocks to
163 * be revoked from the log 163 * be revoked from the log
164 */ 164 */
165typedef struct journal_revoke_header_s 165typedef struct journal_revoke_header_s
166{ 166{
@@ -374,10 +374,10 @@ struct jbd_revoke_table_s;
374 **/ 374 **/
375 375
376/* Docbook can't yet cope with the bit fields, but will leave the documentation 376/* Docbook can't yet cope with the bit fields, but will leave the documentation
377 * in so it can be fixed later. 377 * in so it can be fixed later.
378 */ 378 */
379 379
380struct handle_s 380struct handle_s
381{ 381{
382 /* Which compound transaction is this update a part of? */ 382 /* Which compound transaction is this update a part of? */
383 transaction_t *h_transaction; 383 transaction_t *h_transaction;
@@ -435,7 +435,7 @@ struct handle_s
435 * 435 *
436 */ 436 */
437 437
438struct transaction_s 438struct transaction_s
439{ 439{
440 /* Pointer to the journal for this transaction. [no locking] */ 440 /* Pointer to the journal for this transaction. [no locking] */
441 journal_t *t_journal; 441 journal_t *t_journal;
@@ -455,7 +455,7 @@ struct transaction_s
455 T_RUNDOWN, 455 T_RUNDOWN,
456 T_FLUSH, 456 T_FLUSH,
457 T_COMMIT, 457 T_COMMIT,
458 T_FINISHED 458 T_FINISHED
459 } t_state; 459 } t_state;
460 460
461 /* 461 /*
@@ -569,7 +569,7 @@ struct transaction_s
569 * journal_t. 569 * journal_t.
570 * @j_flags: General journaling state flags 570 * @j_flags: General journaling state flags
571 * @j_errno: Is there an outstanding uncleared error on the journal (from a 571 * @j_errno: Is there an outstanding uncleared error on the journal (from a
572 * prior abort)? 572 * prior abort)?
573 * @j_sb_buffer: First part of superblock buffer 573 * @j_sb_buffer: First part of superblock buffer
574 * @j_superblock: Second part of superblock buffer 574 * @j_superblock: Second part of superblock buffer
575 * @j_format_version: Version of the superblock format 575 * @j_format_version: Version of the superblock format
@@ -583,7 +583,7 @@ struct transaction_s
583 * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction 583 * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
584 * to start committing, or for a barrier lock to be released 584 * to start committing, or for a barrier lock to be released
585 * @j_wait_logspace: Wait queue for waiting for checkpointing to complete 585 * @j_wait_logspace: Wait queue for waiting for checkpointing to complete
586 * @j_wait_done_commit: Wait queue for waiting for commit to complete 586 * @j_wait_done_commit: Wait queue for waiting for commit to complete
587 * @j_wait_checkpoint: Wait queue to trigger checkpointing 587 * @j_wait_checkpoint: Wait queue to trigger checkpointing
588 * @j_wait_commit: Wait queue to trigger commit 588 * @j_wait_commit: Wait queue to trigger commit
589 * @j_wait_updates: Wait queue to wait for updates to complete 589 * @j_wait_updates: Wait queue to wait for updates to complete
@@ -592,7 +592,7 @@ struct transaction_s
592 * @j_tail: Journal tail - identifies the oldest still-used block in the 592 * @j_tail: Journal tail - identifies the oldest still-used block in the
593 * journal. 593 * journal.
594 * @j_free: Journal free - how many free blocks are there in the journal? 594 * @j_free: Journal free - how many free blocks are there in the journal?
595 * @j_first: The block number of the first usable block 595 * @j_first: The block number of the first usable block
596 * @j_last: The block number one beyond the last usable block 596 * @j_last: The block number one beyond the last usable block
597 * @j_dev: Device where we store the journal 597 * @j_dev: Device where we store the journal
598 * @j_blocksize: blocksize for the location where we store the journal. 598 * @j_blocksize: blocksize for the location where we store the journal.
@@ -604,12 +604,12 @@ struct transaction_s
604 * @j_list_lock: Protects the buffer lists and internal buffer state. 604 * @j_list_lock: Protects the buffer lists and internal buffer state.
605 * @j_inode: Optional inode where we store the journal. If present, all journal 605 * @j_inode: Optional inode where we store the journal. If present, all journal
606 * block numbers are mapped into this inode via bmap(). 606 * block numbers are mapped into this inode via bmap().
607 * @j_tail_sequence: Sequence number of the oldest transaction in the log 607 * @j_tail_sequence: Sequence number of the oldest transaction in the log
608 * @j_transaction_sequence: Sequence number of the next transaction to grant 608 * @j_transaction_sequence: Sequence number of the next transaction to grant
609 * @j_commit_sequence: Sequence number of the most recently committed 609 * @j_commit_sequence: Sequence number of the most recently committed
610 * transaction 610 * transaction
611 * @j_commit_request: Sequence number of the most recent transaction wanting 611 * @j_commit_request: Sequence number of the most recent transaction wanting
612 * commit 612 * commit
613 * @j_uuid: Uuid of client object. 613 * @j_uuid: Uuid of client object.
614 * @j_task: Pointer to the current commit thread for this journal 614 * @j_task: Pointer to the current commit thread for this journal
615 * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a 615 * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a
@@ -823,8 +823,8 @@ struct journal_s
823 void *j_private; 823 void *j_private;
824}; 824};
825 825
826/* 826/*
827 * Journal flag definitions 827 * Journal flag definitions
828 */ 828 */
829#define JFS_UNMOUNT 0x001 /* Journal thread is being destroyed */ 829#define JFS_UNMOUNT 0x001 /* Journal thread is being destroyed */
830#define JFS_ABORT 0x002 /* Journaling has been aborted for errors. */ 830#define JFS_ABORT 0x002 /* Journaling has been aborted for errors. */
@@ -833,7 +833,7 @@ struct journal_s
833#define JFS_LOADED 0x010 /* The journal superblock has been loaded */ 833#define JFS_LOADED 0x010 /* The journal superblock has been loaded */
834#define JFS_BARRIER 0x020 /* Use IDE barriers */ 834#define JFS_BARRIER 0x020 /* Use IDE barriers */
835 835
836/* 836/*
837 * Function declarations for the journaling transaction and buffer 837 * Function declarations for the journaling transaction and buffer
838 * management 838 * management
839 */ 839 */
@@ -862,7 +862,7 @@ int __journal_remove_checkpoint(struct journal_head *);
862void __journal_insert_checkpoint(struct journal_head *, transaction_t *); 862void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
863 863
864/* Buffer IO */ 864/* Buffer IO */
865extern int 865extern int
866journal_write_metadata_buffer(transaction_t *transaction, 866journal_write_metadata_buffer(transaction_t *transaction,
867 struct journal_head *jh_in, 867 struct journal_head *jh_in,
868 struct journal_head **jh_out, 868 struct journal_head **jh_out,
@@ -890,7 +890,7 @@ static inline handle_t *journal_current_handle(void)
890/* The journaling code user interface: 890/* The journaling code user interface:
891 * 891 *
892 * Create and destroy handles 892 * Create and destroy handles
893 * Register buffer modifications against the current transaction. 893 * Register buffer modifications against the current transaction.
894 */ 894 */
895 895
896extern handle_t *journal_start(journal_t *, int nblocks); 896extern handle_t *journal_start(journal_t *, int nblocks);
@@ -917,11 +917,11 @@ extern journal_t * journal_init_dev(struct block_device *bdev,
917 int start, int len, int bsize); 917 int start, int len, int bsize);
918extern journal_t * journal_init_inode (struct inode *); 918extern journal_t * journal_init_inode (struct inode *);
919extern int journal_update_format (journal_t *); 919extern int journal_update_format (journal_t *);
920extern int journal_check_used_features 920extern int journal_check_used_features
921 (journal_t *, unsigned long, unsigned long, unsigned long); 921 (journal_t *, unsigned long, unsigned long, unsigned long);
922extern int journal_check_available_features 922extern int journal_check_available_features
923 (journal_t *, unsigned long, unsigned long, unsigned long); 923 (journal_t *, unsigned long, unsigned long, unsigned long);
924extern int journal_set_features 924extern int journal_set_features
925 (journal_t *, unsigned long, unsigned long, unsigned long); 925 (journal_t *, unsigned long, unsigned long, unsigned long);
926extern int journal_create (journal_t *); 926extern int journal_create (journal_t *);
927extern int journal_load (journal_t *journal); 927extern int journal_load (journal_t *journal);
@@ -1015,7 +1015,7 @@ do { \
1015 * bit, when set, indicates that we have had a fatal error somewhere, 1015 * bit, when set, indicates that we have had a fatal error somewhere,
1016 * either inside the journaling layer or indicated to us by the client 1016 * either inside the journaling layer or indicated to us by the client
1017 * (eg. ext3), and that we and should not commit any further 1017 * (eg. ext3), and that we and should not commit any further
1018 * transactions. 1018 * transactions.
1019 */ 1019 */
1020 1020
1021static inline int is_journal_aborted(journal_t *journal) 1021static inline int is_journal_aborted(journal_t *journal)
@@ -1082,7 +1082,7 @@ static inline int jbd_space_needed(journal_t *journal)
1082#define BJ_Reserved 7 /* Buffer is reserved for access by journal */ 1082#define BJ_Reserved 7 /* Buffer is reserved for access by journal */
1083#define BJ_Locked 8 /* Locked for I/O during commit */ 1083#define BJ_Locked 8 /* Locked for I/O during commit */
1084#define BJ_Types 9 1084#define BJ_Types 9
1085 1085
1086extern int jbd_blocks_per_page(struct inode *inode); 1086extern int jbd_blocks_per_page(struct inode *inode);
1087 1087
1088#ifdef __KERNEL__ 1088#ifdef __KERNEL__