diff options
author | Mingming Cao <cmm@us.ibm.com> | 2006-09-27 04:49:27 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-27 11:26:09 -0400 |
commit | ae6ddcc5f24d6b06ae9231dc128904750a4155e0 (patch) | |
tree | 93c6e20b513f39b616af101dabe9b756f7300d0d /fs/ext3/inode.c | |
parent | e7ab8d65055e9b9dfc131d0467cfc5a8368d7ee4 (diff) |
[PATCH] ext3 and jbd cleanup: remove whitespace
Remove whitespace from ext3 and jbd, before we clone ext4.
Signed-off-by: Mingming Cao<cmm@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/ext3/inode.c')
-rw-r--r-- | fs/ext3/inode.c | 64 |
1 files changed, 32 insertions, 32 deletions
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 84be02e93652..473d206b1d7e 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c | |||
@@ -55,7 +55,7 @@ static int ext3_inode_is_fast_symlink(struct inode *inode) | |||
55 | /* | 55 | /* |
56 | * The ext3 forget function must perform a revoke if we are freeing data | 56 | * The ext3 forget function must perform a revoke if we are freeing data |
57 | * which has been journaled. Metadata (eg. indirect blocks) must be | 57 | * which has been journaled. Metadata (eg. indirect blocks) must be |
58 | * revoked in all cases. | 58 | * revoked in all cases. |
59 | * | 59 | * |
60 | * "bh" may be NULL: a metadata block may have been freed from memory | 60 | * "bh" may be NULL: a metadata block may have been freed from memory |
61 | * but there may still be a record of it in the journal, and that record | 61 | * but there may still be a record of it in the journal, and that record |
@@ -105,7 +105,7 @@ int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode, | |||
105 | * Work out how many blocks we need to proceed with the next chunk of a | 105 | * Work out how many blocks we need to proceed with the next chunk of a |
106 | * truncate transaction. | 106 | * truncate transaction. |
107 | */ | 107 | */ |
108 | static unsigned long blocks_for_truncate(struct inode *inode) | 108 | static unsigned long blocks_for_truncate(struct inode *inode) |
109 | { | 109 | { |
110 | unsigned long needed; | 110 | unsigned long needed; |
111 | 111 | ||
@@ -122,13 +122,13 @@ static unsigned long blocks_for_truncate(struct inode *inode) | |||
122 | 122 | ||
123 | /* But we need to bound the transaction so we don't overflow the | 123 | /* But we need to bound the transaction so we don't overflow the |
124 | * journal. */ | 124 | * journal. */ |
125 | if (needed > EXT3_MAX_TRANS_DATA) | 125 | if (needed > EXT3_MAX_TRANS_DATA) |
126 | needed = EXT3_MAX_TRANS_DATA; | 126 | needed = EXT3_MAX_TRANS_DATA; |
127 | 127 | ||
128 | return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed; | 128 | return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed; |
129 | } | 129 | } |
130 | 130 | ||
131 | /* | 131 | /* |
132 | * Truncate transactions can be complex and absolutely huge. So we need to | 132 | * Truncate transactions can be complex and absolutely huge. So we need to |
133 | * be able to restart the transaction at a conventient checkpoint to make | 133 | * be able to restart the transaction at a conventient checkpoint to make |
134 | * sure we don't overflow the journal. | 134 | * sure we don't overflow the journal. |
@@ -136,9 +136,9 @@ static unsigned long blocks_for_truncate(struct inode *inode) | |||
136 | * start_transaction gets us a new handle for a truncate transaction, | 136 | * start_transaction gets us a new handle for a truncate transaction, |
137 | * and extend_transaction tries to extend the existing one a bit. If | 137 | * and extend_transaction tries to extend the existing one a bit. If |
138 | * extend fails, we need to propagate the failure up and restart the | 138 | * extend fails, we need to propagate the failure up and restart the |
139 | * transaction in the top-level truncate loop. --sct | 139 | * transaction in the top-level truncate loop. --sct |
140 | */ | 140 | */ |
141 | static handle_t *start_transaction(struct inode *inode) | 141 | static handle_t *start_transaction(struct inode *inode) |
142 | { | 142 | { |
143 | handle_t *result; | 143 | handle_t *result; |
144 | 144 | ||
@@ -215,12 +215,12 @@ void ext3_delete_inode (struct inode * inode) | |||
215 | ext3_orphan_del(handle, inode); | 215 | ext3_orphan_del(handle, inode); |
216 | EXT3_I(inode)->i_dtime = get_seconds(); | 216 | EXT3_I(inode)->i_dtime = get_seconds(); |
217 | 217 | ||
218 | /* | 218 | /* |
219 | * One subtle ordering requirement: if anything has gone wrong | 219 | * One subtle ordering requirement: if anything has gone wrong |
220 | * (transaction abort, IO errors, whatever), then we can still | 220 | * (transaction abort, IO errors, whatever), then we can still |
221 | * do these next steps (the fs will already have been marked as | 221 | * do these next steps (the fs will already have been marked as |
222 | * having errors), but we can't free the inode if the mark_dirty | 222 | * having errors), but we can't free the inode if the mark_dirty |
223 | * fails. | 223 | * fails. |
224 | */ | 224 | */ |
225 | if (ext3_mark_inode_dirty(handle, inode)) | 225 | if (ext3_mark_inode_dirty(handle, inode)) |
226 | /* If that failed, just do the required in-core inode clear. */ | 226 | /* If that failed, just do the required in-core inode clear. */ |
@@ -398,7 +398,7 @@ no_block: | |||
398 | * + if there is a block to the left of our position - allocate near it. | 398 | * + if there is a block to the left of our position - allocate near it. |
399 | * + if pointer will live in indirect block - allocate near that block. | 399 | * + if pointer will live in indirect block - allocate near that block. |
400 | * + if pointer will live in inode - allocate in the same | 400 | * + if pointer will live in inode - allocate in the same |
401 | * cylinder group. | 401 | * cylinder group. |
402 | * | 402 | * |
403 | * In the latter case we colour the starting block by the callers PID to | 403 | * In the latter case we colour the starting block by the callers PID to |
404 | * prevent it from clashing with concurrent allocations for a different inode | 404 | * prevent it from clashing with concurrent allocations for a different inode |
@@ -744,7 +744,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, | |||
744 | jbd_debug(5, "splicing indirect only\n"); | 744 | jbd_debug(5, "splicing indirect only\n"); |
745 | BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata"); | 745 | BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata"); |
746 | err = ext3_journal_dirty_metadata(handle, where->bh); | 746 | err = ext3_journal_dirty_metadata(handle, where->bh); |
747 | if (err) | 747 | if (err) |
748 | goto err_out; | 748 | goto err_out; |
749 | } else { | 749 | } else { |
750 | /* | 750 | /* |
@@ -1137,7 +1137,7 @@ static int walk_page_buffers( handle_t *handle, | |||
1137 | * So what we do is to rely on the fact that journal_stop/journal_start | 1137 | * So what we do is to rely on the fact that journal_stop/journal_start |
1138 | * will _not_ run commit under these circumstances because handle->h_ref | 1138 | * will _not_ run commit under these circumstances because handle->h_ref |
1139 | * is elevated. We'll still have enough credits for the tiny quotafile | 1139 | * is elevated. We'll still have enough credits for the tiny quotafile |
1140 | * write. | 1140 | * write. |
1141 | */ | 1141 | */ |
1142 | static int do_journal_get_write_access(handle_t *handle, | 1142 | static int do_journal_get_write_access(handle_t *handle, |
1143 | struct buffer_head *bh) | 1143 | struct buffer_head *bh) |
@@ -1282,7 +1282,7 @@ static int ext3_journalled_commit_write(struct file *file, | |||
1282 | if (inode->i_size > EXT3_I(inode)->i_disksize) { | 1282 | if (inode->i_size > EXT3_I(inode)->i_disksize) { |
1283 | EXT3_I(inode)->i_disksize = inode->i_size; | 1283 | EXT3_I(inode)->i_disksize = inode->i_size; |
1284 | ret2 = ext3_mark_inode_dirty(handle, inode); | 1284 | ret2 = ext3_mark_inode_dirty(handle, inode); |
1285 | if (!ret) | 1285 | if (!ret) |
1286 | ret = ret2; | 1286 | ret = ret2; |
1287 | } | 1287 | } |
1288 | ret2 = ext3_journal_stop(handle); | 1288 | ret2 = ext3_journal_stop(handle); |
@@ -1291,7 +1291,7 @@ static int ext3_journalled_commit_write(struct file *file, | |||
1291 | return ret; | 1291 | return ret; |
1292 | } | 1292 | } |
1293 | 1293 | ||
1294 | /* | 1294 | /* |
1295 | * bmap() is special. It gets used by applications such as lilo and by | 1295 | * bmap() is special. It gets used by applications such as lilo and by |
1296 | * the swapper to find the on-disk block of a specific piece of data. | 1296 | * the swapper to find the on-disk block of a specific piece of data. |
1297 | * | 1297 | * |
@@ -1300,10 +1300,10 @@ static int ext3_journalled_commit_write(struct file *file, | |||
1300 | * filesystem and enables swap, then they may get a nasty shock when the | 1300 | * filesystem and enables swap, then they may get a nasty shock when the |
1301 | * data getting swapped to that swapfile suddenly gets overwritten by | 1301 | * data getting swapped to that swapfile suddenly gets overwritten by |
1302 | * the original zero's written out previously to the journal and | 1302 | * the original zero's written out previously to the journal and |
1303 | * awaiting writeback in the kernel's buffer cache. | 1303 | * awaiting writeback in the kernel's buffer cache. |
1304 | * | 1304 | * |
1305 | * So, if we see any bmap calls here on a modified, data-journaled file, | 1305 | * So, if we see any bmap calls here on a modified, data-journaled file, |
1306 | * take extra steps to flush any blocks which might be in the cache. | 1306 | * take extra steps to flush any blocks which might be in the cache. |
1307 | */ | 1307 | */ |
1308 | static sector_t ext3_bmap(struct address_space *mapping, sector_t block) | 1308 | static sector_t ext3_bmap(struct address_space *mapping, sector_t block) |
1309 | { | 1309 | { |
@@ -1312,16 +1312,16 @@ static sector_t ext3_bmap(struct address_space *mapping, sector_t block) | |||
1312 | int err; | 1312 | int err; |
1313 | 1313 | ||
1314 | if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) { | 1314 | if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) { |
1315 | /* | 1315 | /* |
1316 | * This is a REALLY heavyweight approach, but the use of | 1316 | * This is a REALLY heavyweight approach, but the use of |
1317 | * bmap on dirty files is expected to be extremely rare: | 1317 | * bmap on dirty files is expected to be extremely rare: |
1318 | * only if we run lilo or swapon on a freshly made file | 1318 | * only if we run lilo or swapon on a freshly made file |
1319 | * do we expect this to happen. | 1319 | * do we expect this to happen. |
1320 | * | 1320 | * |
1321 | * (bmap requires CAP_SYS_RAWIO so this does not | 1321 | * (bmap requires CAP_SYS_RAWIO so this does not |
1322 | * represent an unprivileged user DOS attack --- we'd be | 1322 | * represent an unprivileged user DOS attack --- we'd be |
1323 | * in trouble if mortal users could trigger this path at | 1323 | * in trouble if mortal users could trigger this path at |
1324 | * will.) | 1324 | * will.) |
1325 | * | 1325 | * |
1326 | * NB. EXT3_STATE_JDATA is not set on files other than | 1326 | * NB. EXT3_STATE_JDATA is not set on files other than |
1327 | * regular files. If somebody wants to bmap a directory | 1327 | * regular files. If somebody wants to bmap a directory |
@@ -1457,7 +1457,7 @@ static int ext3_ordered_writepage(struct page *page, | |||
1457 | */ | 1457 | */ |
1458 | 1458 | ||
1459 | /* | 1459 | /* |
1460 | * And attach them to the current transaction. But only if | 1460 | * And attach them to the current transaction. But only if |
1461 | * block_write_full_page() succeeded. Otherwise they are unmapped, | 1461 | * block_write_full_page() succeeded. Otherwise they are unmapped, |
1462 | * and generally junk. | 1462 | * and generally junk. |
1463 | */ | 1463 | */ |
@@ -1644,7 +1644,7 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, | |||
1644 | } | 1644 | } |
1645 | } | 1645 | } |
1646 | 1646 | ||
1647 | ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, | 1647 | ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, |
1648 | offset, nr_segs, | 1648 | offset, nr_segs, |
1649 | ext3_get_block, NULL); | 1649 | ext3_get_block, NULL); |
1650 | 1650 | ||
@@ -2025,7 +2025,7 @@ static void ext3_free_data(handle_t *handle, struct inode *inode, | |||
2025 | __le32 *first, __le32 *last) | 2025 | __le32 *first, __le32 *last) |
2026 | { | 2026 | { |
2027 | ext3_fsblk_t block_to_free = 0; /* Starting block # of a run */ | 2027 | ext3_fsblk_t block_to_free = 0; /* Starting block # of a run */ |
2028 | unsigned long count = 0; /* Number of blocks in the run */ | 2028 | unsigned long count = 0; /* Number of blocks in the run */ |
2029 | __le32 *block_to_free_p = NULL; /* Pointer into inode/ind | 2029 | __le32 *block_to_free_p = NULL; /* Pointer into inode/ind |
2030 | corresponding to | 2030 | corresponding to |
2031 | block_to_free */ | 2031 | block_to_free */ |
@@ -2054,7 +2054,7 @@ static void ext3_free_data(handle_t *handle, struct inode *inode, | |||
2054 | } else if (nr == block_to_free + count) { | 2054 | } else if (nr == block_to_free + count) { |
2055 | count++; | 2055 | count++; |
2056 | } else { | 2056 | } else { |
2057 | ext3_clear_blocks(handle, inode, this_bh, | 2057 | ext3_clear_blocks(handle, inode, this_bh, |
2058 | block_to_free, | 2058 | block_to_free, |
2059 | count, block_to_free_p, p); | 2059 | count, block_to_free_p, p); |
2060 | block_to_free = nr; | 2060 | block_to_free = nr; |
@@ -2184,7 +2184,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode, | |||
2184 | *p = 0; | 2184 | *p = 0; |
2185 | BUFFER_TRACE(parent_bh, | 2185 | BUFFER_TRACE(parent_bh, |
2186 | "call ext3_journal_dirty_metadata"); | 2186 | "call ext3_journal_dirty_metadata"); |
2187 | ext3_journal_dirty_metadata(handle, | 2187 | ext3_journal_dirty_metadata(handle, |
2188 | parent_bh); | 2188 | parent_bh); |
2189 | } | 2189 | } |
2190 | } | 2190 | } |
@@ -2704,7 +2704,7 @@ void ext3_read_inode(struct inode * inode) | |||
2704 | if (raw_inode->i_block[0]) | 2704 | if (raw_inode->i_block[0]) |
2705 | init_special_inode(inode, inode->i_mode, | 2705 | init_special_inode(inode, inode->i_mode, |
2706 | old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); | 2706 | old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); |
2707 | else | 2707 | else |
2708 | init_special_inode(inode, inode->i_mode, | 2708 | init_special_inode(inode, inode->i_mode, |
2709 | new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); | 2709 | new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); |
2710 | } | 2710 | } |
@@ -2724,8 +2724,8 @@ bad_inode: | |||
2724 | * | 2724 | * |
2725 | * The caller must have write access to iloc->bh. | 2725 | * The caller must have write access to iloc->bh. |
2726 | */ | 2726 | */ |
2727 | static int ext3_do_update_inode(handle_t *handle, | 2727 | static int ext3_do_update_inode(handle_t *handle, |
2728 | struct inode *inode, | 2728 | struct inode *inode, |
2729 | struct ext3_iloc *iloc) | 2729 | struct ext3_iloc *iloc) |
2730 | { | 2730 | { |
2731 | struct ext3_inode *raw_inode = ext3_raw_inode(iloc); | 2731 | struct ext3_inode *raw_inode = ext3_raw_inode(iloc); |
@@ -2900,7 +2900,7 @@ int ext3_write_inode(struct inode *inode, int wait) | |||
2900 | * commit will leave the blocks being flushed in an unused state on | 2900 | * commit will leave the blocks being flushed in an unused state on |
2901 | * disk. (On recovery, the inode will get truncated and the blocks will | 2901 | * disk. (On recovery, the inode will get truncated and the blocks will |
2902 | * be freed, so we have a strong guarantee that no future commit will | 2902 | * be freed, so we have a strong guarantee that no future commit will |
2903 | * leave these blocks visible to the user.) | 2903 | * leave these blocks visible to the user.) |
2904 | * | 2904 | * |
2905 | * Called with inode->sem down. | 2905 | * Called with inode->sem down. |
2906 | */ | 2906 | */ |
@@ -3043,13 +3043,13 @@ int ext3_mark_iloc_dirty(handle_t *handle, | |||
3043 | return err; | 3043 | return err; |
3044 | } | 3044 | } |
3045 | 3045 | ||
3046 | /* | 3046 | /* |
3047 | * On success, We end up with an outstanding reference count against | 3047 | * On success, We end up with an outstanding reference count against |
3048 | * iloc->bh. This _must_ be cleaned up later. | 3048 | * iloc->bh. This _must_ be cleaned up later. |
3049 | */ | 3049 | */ |
3050 | 3050 | ||
3051 | int | 3051 | int |
3052 | ext3_reserve_inode_write(handle_t *handle, struct inode *inode, | 3052 | ext3_reserve_inode_write(handle_t *handle, struct inode *inode, |
3053 | struct ext3_iloc *iloc) | 3053 | struct ext3_iloc *iloc) |
3054 | { | 3054 | { |
3055 | int err = 0; | 3055 | int err = 0; |
@@ -3139,7 +3139,7 @@ out: | |||
3139 | } | 3139 | } |
3140 | 3140 | ||
3141 | #if 0 | 3141 | #if 0 |
3142 | /* | 3142 | /* |
3143 | * Bind an inode's backing buffer_head into this transaction, to prevent | 3143 | * Bind an inode's backing buffer_head into this transaction, to prevent |
3144 | * it from being flushed to disk early. Unlike | 3144 | * it from being flushed to disk early. Unlike |
3145 | * ext3_reserve_inode_write, this leaves behind no bh reference and | 3145 | * ext3_reserve_inode_write, this leaves behind no bh reference and |
@@ -3157,7 +3157,7 @@ static int ext3_pin_inode(handle_t *handle, struct inode *inode) | |||
3157 | BUFFER_TRACE(iloc.bh, "get_write_access"); | 3157 | BUFFER_TRACE(iloc.bh, "get_write_access"); |
3158 | err = journal_get_write_access(handle, iloc.bh); | 3158 | err = journal_get_write_access(handle, iloc.bh); |
3159 | if (!err) | 3159 | if (!err) |
3160 | err = ext3_journal_dirty_metadata(handle, | 3160 | err = ext3_journal_dirty_metadata(handle, |
3161 | iloc.bh); | 3161 | iloc.bh); |
3162 | brelse(iloc.bh); | 3162 | brelse(iloc.bh); |
3163 | } | 3163 | } |