diff options
author | Jeff Mahoney <jeffm@suse.com> | 2009-03-30 14:02:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-30 15:16:39 -0400 |
commit | 0222e6571c332563a48d4cf5487b67feabe60b5e (patch) | |
tree | 44829ca0a4b1343edec08d4f70696cb0d3218975 /fs/reiserfs/do_balan.c | |
parent | 3cd6dbe6feb9b32347e6c6f25a27f0cde9d50418 (diff) |
reiserfs: strip trailing whitespace
This patch strips trailing whitespace from the reiserfs code.
Signed-off-by: Jeff Mahoney <jeffm@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/reiserfs/do_balan.c')
-rw-r--r-- | fs/reiserfs/do_balan.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c index 723a7f4011d0..4beb964a2a3e 100644 --- a/fs/reiserfs/do_balan.c +++ b/fs/reiserfs/do_balan.c | |||
@@ -76,21 +76,21 @@ inline void do_balance_mark_leaf_dirty(struct tree_balance *tb, | |||
76 | #define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty | 76 | #define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty |
77 | #define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty | 77 | #define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty |
78 | 78 | ||
79 | /* summary: | 79 | /* summary: |
80 | if deleting something ( tb->insert_size[0] < 0 ) | 80 | if deleting something ( tb->insert_size[0] < 0 ) |
81 | return(balance_leaf_when_delete()); (flag d handled here) | 81 | return(balance_leaf_when_delete()); (flag d handled here) |
82 | else | 82 | else |
83 | if lnum is larger than 0 we put items into the left node | 83 | if lnum is larger than 0 we put items into the left node |
84 | if rnum is larger than 0 we put items into the right node | 84 | if rnum is larger than 0 we put items into the right node |
85 | if snum1 is larger than 0 we put items into the new node s1 | 85 | if snum1 is larger than 0 we put items into the new node s1 |
86 | if snum2 is larger than 0 we put items into the new node s2 | 86 | if snum2 is larger than 0 we put items into the new node s2 |
87 | Note that all *num* count new items being created. | 87 | Note that all *num* count new items being created. |
88 | 88 | ||
89 | It would be easier to read balance_leaf() if each of these summary | 89 | It would be easier to read balance_leaf() if each of these summary |
90 | lines was a separate procedure rather than being inlined. I think | 90 | lines was a separate procedure rather than being inlined. I think |
91 | that there are many passages here and in balance_leaf_when_delete() in | 91 | that there are many passages here and in balance_leaf_when_delete() in |
92 | which two calls to one procedure can replace two passages, and it | 92 | which two calls to one procedure can replace two passages, and it |
93 | might save cache space and improve software maintenance costs to do so. | 93 | might save cache space and improve software maintenance costs to do so. |
94 | 94 | ||
95 | Vladimir made the perceptive comment that we should offload most of | 95 | Vladimir made the perceptive comment that we should offload most of |
96 | the decision making in this function into fix_nodes/check_balance, and | 96 | the decision making in this function into fix_nodes/check_balance, and |
@@ -288,15 +288,15 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h | |||
288 | ) | 288 | ) |
289 | { | 289 | { |
290 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); | 290 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); |
291 | int item_pos = PATH_LAST_POSITION(tb->tb_path); /* index into the array of item headers in S[0] | 291 | int item_pos = PATH_LAST_POSITION(tb->tb_path); /* index into the array of item headers in S[0] |
292 | of the affected item */ | 292 | of the affected item */ |
293 | struct buffer_info bi; | 293 | struct buffer_info bi; |
294 | struct buffer_head *S_new[2]; /* new nodes allocated to hold what could not fit into S */ | 294 | struct buffer_head *S_new[2]; /* new nodes allocated to hold what could not fit into S */ |
295 | int snum[2]; /* number of items that will be placed | 295 | int snum[2]; /* number of items that will be placed |
296 | into S_new (includes partially shifted | 296 | into S_new (includes partially shifted |
297 | items) */ | 297 | items) */ |
298 | int sbytes[2]; /* if an item is partially shifted into S_new then | 298 | int sbytes[2]; /* if an item is partially shifted into S_new then |
299 | if it is a directory item | 299 | if it is a directory item |
300 | it is the number of entries from the item that are shifted into S_new | 300 | it is the number of entries from the item that are shifted into S_new |
301 | else | 301 | else |
302 | it is the number of bytes from the item that are shifted into S_new | 302 | it is the number of bytes from the item that are shifted into S_new |
@@ -1983,7 +1983,7 @@ static inline void do_balance_starts(struct tree_balance *tb) | |||
1983 | /* store_print_tb (tb); */ | 1983 | /* store_print_tb (tb); */ |
1984 | 1984 | ||
1985 | /* do not delete, just comment it out */ | 1985 | /* do not delete, just comment it out */ |
1986 | /* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb, | 1986 | /* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb, |
1987 | "check");*/ | 1987 | "check");*/ |
1988 | RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB"); | 1988 | RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB"); |
1989 | #ifdef CONFIG_REISERFS_CHECK | 1989 | #ifdef CONFIG_REISERFS_CHECK |