diff options
author | Theodore Ts'o <tytso@mit.edu> | 2008-09-08 22:25:24 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2008-09-08 22:25:24 -0400 |
commit | af5bc92dded4d98dfeabc8b5b9812571345b263d (patch) | |
tree | 5cfaf27e673a09d3ad1341c175559be0a3ea990d /fs/ext4/inode.c | |
parent | e5f8eab8851dff162e7ade46f084cb8575dc45f7 (diff) |
ext4: Fix whitespace checkpatch warnings/errors
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/inode.c')
-rw-r--r-- | fs/ext4/inode.c | 96 |
1 files changed, 48 insertions, 48 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 7e91913e325b..89c92c0f8297 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -190,7 +190,7 @@ static int ext4_journal_test_restart(handle_t *handle, struct inode *inode) | |||
190 | /* | 190 | /* |
191 | * Called at the last iput() if i_nlink is zero. | 191 | * Called at the last iput() if i_nlink is zero. |
192 | */ | 192 | */ |
193 | void ext4_delete_inode (struct inode * inode) | 193 | void ext4_delete_inode(struct inode *inode) |
194 | { | 194 | { |
195 | handle_t *handle; | 195 | handle_t *handle; |
196 | int err; | 196 | int err; |
@@ -330,11 +330,11 @@ static int ext4_block_to_path(struct inode *inode, | |||
330 | int final = 0; | 330 | int final = 0; |
331 | 331 | ||
332 | if (i_block < 0) { | 332 | if (i_block < 0) { |
333 | ext4_warning (inode->i_sb, "ext4_block_to_path", "block < 0"); | 333 | ext4_warning(inode->i_sb, "ext4_block_to_path", "block < 0"); |
334 | } else if (i_block < direct_blocks) { | 334 | } else if (i_block < direct_blocks) { |
335 | offsets[n++] = i_block; | 335 | offsets[n++] = i_block; |
336 | final = direct_blocks; | 336 | final = direct_blocks; |
337 | } else if ( (i_block -= direct_blocks) < indirect_blocks) { | 337 | } else if ((i_block -= direct_blocks) < indirect_blocks) { |
338 | offsets[n++] = EXT4_IND_BLOCK; | 338 | offsets[n++] = EXT4_IND_BLOCK; |
339 | offsets[n++] = i_block; | 339 | offsets[n++] = i_block; |
340 | final = ptrs; | 340 | final = ptrs; |
@@ -400,14 +400,14 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth, | |||
400 | 400 | ||
401 | *err = 0; | 401 | *err = 0; |
402 | /* i_data is not going away, no lock needed */ | 402 | /* i_data is not going away, no lock needed */ |
403 | add_chain (chain, NULL, EXT4_I(inode)->i_data + *offsets); | 403 | add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets); |
404 | if (!p->key) | 404 | if (!p->key) |
405 | goto no_block; | 405 | goto no_block; |
406 | while (--depth) { | 406 | while (--depth) { |
407 | bh = sb_bread(sb, le32_to_cpu(p->key)); | 407 | bh = sb_bread(sb, le32_to_cpu(p->key)); |
408 | if (!bh) | 408 | if (!bh) |
409 | goto failure; | 409 | goto failure; |
410 | add_chain(++p, bh, (__le32*)bh->b_data + *++offsets); | 410 | add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets); |
411 | /* Reader: end */ | 411 | /* Reader: end */ |
412 | if (!p->key) | 412 | if (!p->key) |
413 | goto no_block; | 413 | goto no_block; |
@@ -443,7 +443,7 @@ no_block: | |||
443 | static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) | 443 | static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) |
444 | { | 444 | { |
445 | struct ext4_inode_info *ei = EXT4_I(inode); | 445 | struct ext4_inode_info *ei = EXT4_I(inode); |
446 | __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data; | 446 | __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; |
447 | __le32 *p; | 447 | __le32 *p; |
448 | ext4_fsblk_t bg_start; | 448 | ext4_fsblk_t bg_start; |
449 | ext4_fsblk_t last_block; | 449 | ext4_fsblk_t last_block; |
@@ -630,7 +630,7 @@ allocated: | |||
630 | *err = 0; | 630 | *err = 0; |
631 | return ret; | 631 | return ret; |
632 | failed_out: | 632 | failed_out: |
633 | for (i = 0; i <index; i++) | 633 | for (i = 0; i < index; i++) |
634 | ext4_free_blocks(handle, inode, new_blocks[i], 1, 0); | 634 | ext4_free_blocks(handle, inode, new_blocks[i], 1, 0); |
635 | return ret; | 635 | return ret; |
636 | } | 636 | } |
@@ -703,7 +703,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode, | |||
703 | branch[n].p = (__le32 *) bh->b_data + offsets[n]; | 703 | branch[n].p = (__le32 *) bh->b_data + offsets[n]; |
704 | branch[n].key = cpu_to_le32(new_blocks[n]); | 704 | branch[n].key = cpu_to_le32(new_blocks[n]); |
705 | *branch[n].p = branch[n].key; | 705 | *branch[n].p = branch[n].key; |
706 | if ( n == indirect_blks) { | 706 | if (n == indirect_blks) { |
707 | current_block = new_blocks[n]; | 707 | current_block = new_blocks[n]; |
708 | /* | 708 | /* |
709 | * End of chain, update the last new metablock of | 709 | * End of chain, update the last new metablock of |
@@ -730,7 +730,7 @@ failed: | |||
730 | BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget"); | 730 | BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget"); |
731 | ext4_journal_forget(handle, branch[i].bh); | 731 | ext4_journal_forget(handle, branch[i].bh); |
732 | } | 732 | } |
733 | for (i = 0; i <indirect_blks; i++) | 733 | for (i = 0; i < indirect_blks; i++) |
734 | ext4_free_blocks(handle, inode, new_blocks[i], 1, 0); | 734 | ext4_free_blocks(handle, inode, new_blocks[i], 1, 0); |
735 | 735 | ||
736 | ext4_free_blocks(handle, inode, new_blocks[i], num, 0); | 736 | ext4_free_blocks(handle, inode, new_blocks[i], num, 0); |
@@ -783,7 +783,7 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode, | |||
783 | if (num == 0 && blks > 1) { | 783 | if (num == 0 && blks > 1) { |
784 | current_block = le32_to_cpu(where->key) + 1; | 784 | current_block = le32_to_cpu(where->key) + 1; |
785 | for (i = 1; i < blks; i++) | 785 | for (i = 1; i < blks; i++) |
786 | *(where->p + i ) = cpu_to_le32(current_block++); | 786 | *(where->p + i) = cpu_to_le32(current_block++); |
787 | } | 787 | } |
788 | 788 | ||
789 | /* | 789 | /* |
@@ -1241,7 +1241,7 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, | |||
1241 | BUFFER_TRACE(bh, "call get_create_access"); | 1241 | BUFFER_TRACE(bh, "call get_create_access"); |
1242 | fatal = ext4_journal_get_create_access(handle, bh); | 1242 | fatal = ext4_journal_get_create_access(handle, bh); |
1243 | if (!fatal && !buffer_uptodate(bh)) { | 1243 | if (!fatal && !buffer_uptodate(bh)) { |
1244 | memset(bh->b_data,0,inode->i_sb->s_blocksize); | 1244 | memset(bh->b_data, 0, inode->i_sb->s_blocksize); |
1245 | set_buffer_uptodate(bh); | 1245 | set_buffer_uptodate(bh); |
1246 | } | 1246 | } |
1247 | unlock_buffer(bh); | 1247 | unlock_buffer(bh); |
@@ -1266,7 +1266,7 @@ err: | |||
1266 | struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, | 1266 | struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, |
1267 | ext4_lblk_t block, int create, int *err) | 1267 | ext4_lblk_t block, int create, int *err) |
1268 | { | 1268 | { |
1269 | struct buffer_head * bh; | 1269 | struct buffer_head *bh; |
1270 | 1270 | ||
1271 | bh = ext4_getblk(handle, inode, block, create, err); | 1271 | bh = ext4_getblk(handle, inode, block, create, err); |
1272 | if (!bh) | 1272 | if (!bh) |
@@ -1282,13 +1282,13 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, | |||
1282 | return NULL; | 1282 | return NULL; |
1283 | } | 1283 | } |
1284 | 1284 | ||
1285 | static int walk_page_buffers( handle_t *handle, | 1285 | static int walk_page_buffers(handle_t *handle, |
1286 | struct buffer_head *head, | 1286 | struct buffer_head *head, |
1287 | unsigned from, | 1287 | unsigned from, |
1288 | unsigned to, | 1288 | unsigned to, |
1289 | int *partial, | 1289 | int *partial, |
1290 | int (*fn)( handle_t *handle, | 1290 | int (*fn)(handle_t *handle, |
1291 | struct buffer_head *bh)) | 1291 | struct buffer_head *bh)) |
1292 | { | 1292 | { |
1293 | struct buffer_head *bh; | 1293 | struct buffer_head *bh; |
1294 | unsigned block_start, block_end; | 1294 | unsigned block_start, block_end; |
@@ -1296,9 +1296,9 @@ static int walk_page_buffers( handle_t *handle, | |||
1296 | int err, ret = 0; | 1296 | int err, ret = 0; |
1297 | struct buffer_head *next; | 1297 | struct buffer_head *next; |
1298 | 1298 | ||
1299 | for ( bh = head, block_start = 0; | 1299 | for (bh = head, block_start = 0; |
1300 | ret == 0 && (bh != head || !block_start); | 1300 | ret == 0 && (bh != head || !block_start); |
1301 | block_start = block_end, bh = next) | 1301 | block_start = block_end, bh = next) |
1302 | { | 1302 | { |
1303 | next = bh->b_this_page; | 1303 | next = bh->b_this_page; |
1304 | block_end = block_start + blocksize; | 1304 | block_end = block_start + blocksize; |
@@ -1351,23 +1351,23 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping, | |||
1351 | loff_t pos, unsigned len, unsigned flags, | 1351 | loff_t pos, unsigned len, unsigned flags, |
1352 | struct page **pagep, void **fsdata) | 1352 | struct page **pagep, void **fsdata) |
1353 | { | 1353 | { |
1354 | struct inode *inode = mapping->host; | 1354 | struct inode *inode = mapping->host; |
1355 | int ret, needed_blocks = ext4_writepage_trans_blocks(inode); | 1355 | int ret, needed_blocks = ext4_writepage_trans_blocks(inode); |
1356 | handle_t *handle; | 1356 | handle_t *handle; |
1357 | int retries = 0; | 1357 | int retries = 0; |
1358 | struct page *page; | 1358 | struct page *page; |
1359 | pgoff_t index; | 1359 | pgoff_t index; |
1360 | unsigned from, to; | 1360 | unsigned from, to; |
1361 | 1361 | ||
1362 | index = pos >> PAGE_CACHE_SHIFT; | 1362 | index = pos >> PAGE_CACHE_SHIFT; |
1363 | from = pos & (PAGE_CACHE_SIZE - 1); | 1363 | from = pos & (PAGE_CACHE_SIZE - 1); |
1364 | to = from + len; | 1364 | to = from + len; |
1365 | 1365 | ||
1366 | retry: | 1366 | retry: |
1367 | handle = ext4_journal_start(inode, needed_blocks); | 1367 | handle = ext4_journal_start(inode, needed_blocks); |
1368 | if (IS_ERR(handle)) { | 1368 | if (IS_ERR(handle)) { |
1369 | ret = PTR_ERR(handle); | 1369 | ret = PTR_ERR(handle); |
1370 | goto out; | 1370 | goto out; |
1371 | } | 1371 | } |
1372 | 1372 | ||
1373 | page = __grab_cache_page(mapping, index); | 1373 | page = __grab_cache_page(mapping, index); |
@@ -1387,9 +1387,9 @@ retry: | |||
1387 | } | 1387 | } |
1388 | 1388 | ||
1389 | if (ret) { | 1389 | if (ret) { |
1390 | unlock_page(page); | 1390 | unlock_page(page); |
1391 | ext4_journal_stop(handle); | 1391 | ext4_journal_stop(handle); |
1392 | page_cache_release(page); | 1392 | page_cache_release(page); |
1393 | } | 1393 | } |
1394 | 1394 | ||
1395 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) | 1395 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) |
@@ -2456,7 +2456,7 @@ static int ext4_da_should_update_i_disksize(struct page *page, | |||
2456 | bh = page_buffers(page); | 2456 | bh = page_buffers(page); |
2457 | idx = offset >> inode->i_blkbits; | 2457 | idx = offset >> inode->i_blkbits; |
2458 | 2458 | ||
2459 | for (i=0; i < idx; i++) | 2459 | for (i = 0; i < idx; i++) |
2460 | bh = bh->b_this_page; | 2460 | bh = bh->b_this_page; |
2461 | 2461 | ||
2462 | if (!buffer_mapped(bh) || (buffer_delay(bh))) | 2462 | if (!buffer_mapped(bh) || (buffer_delay(bh))) |
@@ -2476,7 +2476,7 @@ static int ext4_da_write_end(struct file *file, | |||
2476 | unsigned long start, end; | 2476 | unsigned long start, end; |
2477 | 2477 | ||
2478 | start = pos & (PAGE_CACHE_SIZE - 1); | 2478 | start = pos & (PAGE_CACHE_SIZE - 1); |
2479 | end = start + copied -1; | 2479 | end = start + copied - 1; |
2480 | 2480 | ||
2481 | /* | 2481 | /* |
2482 | * generic_write_end() will run mark_inode_dirty() if i_size | 2482 | * generic_write_end() will run mark_inode_dirty() if i_size |
@@ -2591,7 +2591,7 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block) | |||
2591 | return 0; | 2591 | return 0; |
2592 | } | 2592 | } |
2593 | 2593 | ||
2594 | return generic_block_bmap(mapping,block,ext4_get_block); | 2594 | return generic_block_bmap(mapping, block, ext4_get_block); |
2595 | } | 2595 | } |
2596 | 2596 | ||
2597 | static int bget_one(handle_t *handle, struct buffer_head *bh) | 2597 | static int bget_one(handle_t *handle, struct buffer_head *bh) |
@@ -3197,7 +3197,7 @@ static Indirect *ext4_find_shared(struct inode *inode, int depth, | |||
3197 | if (!partial->key && *partial->p) | 3197 | if (!partial->key && *partial->p) |
3198 | /* Writer: end */ | 3198 | /* Writer: end */ |
3199 | goto no_top; | 3199 | goto no_top; |
3200 | for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--) | 3200 | for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--) |
3201 | ; | 3201 | ; |
3202 | /* | 3202 | /* |
3203 | * OK, we've found the last block that must survive. The rest of our | 3203 | * OK, we've found the last block that must survive. The rest of our |
@@ -3216,7 +3216,7 @@ static Indirect *ext4_find_shared(struct inode *inode, int depth, | |||
3216 | } | 3216 | } |
3217 | /* Writer: end */ | 3217 | /* Writer: end */ |
3218 | 3218 | ||
3219 | while(partial > p) { | 3219 | while (partial > p) { |
3220 | brelse(partial->bh); | 3220 | brelse(partial->bh); |
3221 | partial--; | 3221 | partial--; |
3222 | } | 3222 | } |
@@ -3408,9 +3408,9 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode, | |||
3408 | /* This zaps the entire block. Bottom up. */ | 3408 | /* This zaps the entire block. Bottom up. */ |
3409 | BUFFER_TRACE(bh, "free child branches"); | 3409 | BUFFER_TRACE(bh, "free child branches"); |
3410 | ext4_free_branches(handle, inode, bh, | 3410 | ext4_free_branches(handle, inode, bh, |
3411 | (__le32*)bh->b_data, | 3411 | (__le32 *) bh->b_data, |
3412 | (__le32*)bh->b_data + addr_per_block, | 3412 | (__le32 *) bh->b_data + addr_per_block, |
3413 | depth); | 3413 | depth); |
3414 | 3414 | ||
3415 | /* | 3415 | /* |
3416 | * We've probably journalled the indirect block several | 3416 | * We've probably journalled the indirect block several |
@@ -3927,7 +3927,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | |||
3927 | inode->i_mode = le16_to_cpu(raw_inode->i_mode); | 3927 | inode->i_mode = le16_to_cpu(raw_inode->i_mode); |
3928 | inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); | 3928 | inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); |
3929 | inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); | 3929 | inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); |
3930 | if(!(test_opt (inode->i_sb, NO_UID32))) { | 3930 | if (!(test_opt(inode->i_sb, NO_UID32))) { |
3931 | inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; | 3931 | inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; |
3932 | inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; | 3932 | inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; |
3933 | } | 3933 | } |
@@ -3945,7 +3945,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | |||
3945 | if (inode->i_mode == 0 || | 3945 | if (inode->i_mode == 0 || |
3946 | !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { | 3946 | !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { |
3947 | /* this inode is deleted */ | 3947 | /* this inode is deleted */ |
3948 | brelse (bh); | 3948 | brelse(bh); |
3949 | ret = -ESTALE; | 3949 | ret = -ESTALE; |
3950 | goto bad_inode; | 3950 | goto bad_inode; |
3951 | } | 3951 | } |
@@ -3978,7 +3978,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | |||
3978 | ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); | 3978 | ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); |
3979 | if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > | 3979 | if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > |
3980 | EXT4_INODE_SIZE(inode->i_sb)) { | 3980 | EXT4_INODE_SIZE(inode->i_sb)) { |
3981 | brelse (bh); | 3981 | brelse(bh); |
3982 | ret = -EIO; | 3982 | ret = -EIO; |
3983 | goto bad_inode; | 3983 | goto bad_inode; |
3984 | } | 3984 | } |
@@ -4031,7 +4031,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | |||
4031 | init_special_inode(inode, inode->i_mode, | 4031 | init_special_inode(inode, inode->i_mode, |
4032 | new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); | 4032 | new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); |
4033 | } | 4033 | } |
4034 | brelse (iloc.bh); | 4034 | brelse(iloc.bh); |
4035 | ext4_set_inode_flags(inode); | 4035 | ext4_set_inode_flags(inode); |
4036 | unlock_new_inode(inode); | 4036 | unlock_new_inode(inode); |
4037 | return inode; | 4037 | return inode; |
@@ -4113,14 +4113,14 @@ static int ext4_do_update_inode(handle_t *handle, | |||
4113 | 4113 | ||
4114 | ext4_get_inode_flags(ei); | 4114 | ext4_get_inode_flags(ei); |
4115 | raw_inode->i_mode = cpu_to_le16(inode->i_mode); | 4115 | raw_inode->i_mode = cpu_to_le16(inode->i_mode); |
4116 | if(!(test_opt(inode->i_sb, NO_UID32))) { | 4116 | if (!(test_opt(inode->i_sb, NO_UID32))) { |
4117 | raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); | 4117 | raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); |
4118 | raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid)); | 4118 | raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid)); |
4119 | /* | 4119 | /* |
4120 | * Fix up interoperability with old kernels. Otherwise, old inodes get | 4120 | * Fix up interoperability with old kernels. Otherwise, old inodes get |
4121 | * re-used with the upper 16 bits of the uid/gid intact | 4121 | * re-used with the upper 16 bits of the uid/gid intact |
4122 | */ | 4122 | */ |
4123 | if(!ei->i_dtime) { | 4123 | if (!ei->i_dtime) { |
4124 | raw_inode->i_uid_high = | 4124 | raw_inode->i_uid_high = |
4125 | cpu_to_le16(high_16_bits(inode->i_uid)); | 4125 | cpu_to_le16(high_16_bits(inode->i_uid)); |
4126 | raw_inode->i_gid_high = | 4126 | raw_inode->i_gid_high = |
@@ -4208,7 +4208,7 @@ static int ext4_do_update_inode(handle_t *handle, | |||
4208 | ei->i_state &= ~EXT4_STATE_NEW; | 4208 | ei->i_state &= ~EXT4_STATE_NEW; |
4209 | 4209 | ||
4210 | out_brelse: | 4210 | out_brelse: |
4211 | brelse (bh); | 4211 | brelse(bh); |
4212 | ext4_std_error(inode->i_sb, err); | 4212 | ext4_std_error(inode->i_sb, err); |
4213 | return err; | 4213 | return err; |
4214 | } | 4214 | } |