aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeff Mahoney <jeffm@suse.com>2009-03-30 14:02:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-03-30 15:16:39 -0400
commit0222e6571c332563a48d4cf5487b67feabe60b5e (patch)
tree44829ca0a4b1343edec08d4f70696cb0d3218975
parent3cd6dbe6feb9b32347e6c6f25a27f0cde9d50418 (diff)
reiserfs: strip trailing whitespace
This patch strips trailing whitespace from the reiserfs code. Signed-off-by: Jeff Mahoney <jeffm@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/reiserfs/README4
-rw-r--r--fs/reiserfs/do_balan.c14
-rw-r--r--fs/reiserfs/file.c8
-rw-r--r--fs/reiserfs/fix_node.c38
-rw-r--r--fs/reiserfs/hashes.c2
-rw-r--r--fs/reiserfs/ibalance.c10
-rw-r--r--fs/reiserfs/inode.c52
-rw-r--r--fs/reiserfs/ioctl.c2
-rw-r--r--fs/reiserfs/journal.c120
-rw-r--r--fs/reiserfs/lbalance.c18
-rw-r--r--fs/reiserfs/namei.c30
-rw-r--r--fs/reiserfs/objectid.c2
-rw-r--r--fs/reiserfs/prints.c26
-rw-r--r--fs/reiserfs/procfs.c2
-rw-r--r--fs/reiserfs/resize.c6
-rw-r--r--fs/reiserfs/stree.c8
-rw-r--r--fs/reiserfs/super.c10
-rw-r--r--fs/reiserfs/tail_conversion.c2
-rw-r--r--include/linux/reiserfs_fs_sb.h14
19 files changed, 184 insertions, 184 deletions
diff --git a/fs/reiserfs/README b/fs/reiserfs/README
index 90e1670e4e6f..14e8c9d460e5 100644
--- a/fs/reiserfs/README
+++ b/fs/reiserfs/README
@@ -1,4 +1,4 @@
1[LICENSING] 1[LICENSING]
2 2
3ReiserFS is hereby licensed under the GNU General 3ReiserFS is hereby licensed under the GNU General
4Public License version 2. 4Public License version 2.
@@ -31,7 +31,7 @@ the GPL as not allowing those additional licensing options, you read
31it wrongly, and Richard Stallman agrees with me, when carefully read 31it wrongly, and Richard Stallman agrees with me, when carefully read
32you can see that those restrictions on additional terms do not apply 32you can see that those restrictions on additional terms do not apply
33to the owner of the copyright, and my interpretation of this shall 33to the owner of the copyright, and my interpretation of this shall
34govern for this license. 34govern for this license.
35 35
36Finally, nothing in this license shall be interpreted to allow you to 36Finally, nothing in this license shall be interpreted to allow you to
37fail to fairly credit me, or to remove my credits, without my 37fail to fairly credit me, or to remove my credits, without my
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
index 723a7f4011d0..4beb964a2a3e 100644
--- a/fs/reiserfs/do_balan.c
+++ b/fs/reiserfs/do_balan.c
@@ -76,21 +76,21 @@ inline void do_balance_mark_leaf_dirty(struct tree_balance *tb,
76#define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty 76#define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty
77#define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty 77#define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty
78 78
79/* summary: 79/* summary:
80 if deleting something ( tb->insert_size[0] < 0 ) 80 if deleting something ( tb->insert_size[0] < 0 )
81 return(balance_leaf_when_delete()); (flag d handled here) 81 return(balance_leaf_when_delete()); (flag d handled here)
82 else 82 else
83 if lnum is larger than 0 we put items into the left node 83 if lnum is larger than 0 we put items into the left node
84 if rnum is larger than 0 we put items into the right node 84 if rnum is larger than 0 we put items into the right node
85 if snum1 is larger than 0 we put items into the new node s1 85 if snum1 is larger than 0 we put items into the new node s1
86 if snum2 is larger than 0 we put items into the new node s2 86 if snum2 is larger than 0 we put items into the new node s2
87Note that all *num* count new items being created. 87Note that all *num* count new items being created.
88 88
89It would be easier to read balance_leaf() if each of these summary 89It would be easier to read balance_leaf() if each of these summary
90lines was a separate procedure rather than being inlined. I think 90lines was a separate procedure rather than being inlined. I think
91that there are many passages here and in balance_leaf_when_delete() in 91that there are many passages here and in balance_leaf_when_delete() in
92which two calls to one procedure can replace two passages, and it 92which two calls to one procedure can replace two passages, and it
93might save cache space and improve software maintenance costs to do so. 93might save cache space and improve software maintenance costs to do so.
94 94
95Vladimir made the perceptive comment that we should offload most of 95Vladimir made the perceptive comment that we should offload most of
96the decision making in this function into fix_nodes/check_balance, and 96the decision making in this function into fix_nodes/check_balance, and
@@ -288,15 +288,15 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
288 ) 288 )
289{ 289{
290 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 290 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
291 int item_pos = PATH_LAST_POSITION(tb->tb_path); /* index into the array of item headers in S[0] 291 int item_pos = PATH_LAST_POSITION(tb->tb_path); /* index into the array of item headers in S[0]
292 of the affected item */ 292 of the affected item */
293 struct buffer_info bi; 293 struct buffer_info bi;
294 struct buffer_head *S_new[2]; /* new nodes allocated to hold what could not fit into S */ 294 struct buffer_head *S_new[2]; /* new nodes allocated to hold what could not fit into S */
295 int snum[2]; /* number of items that will be placed 295 int snum[2]; /* number of items that will be placed
296 into S_new (includes partially shifted 296 into S_new (includes partially shifted
297 items) */ 297 items) */
298 int sbytes[2]; /* if an item is partially shifted into S_new then 298 int sbytes[2]; /* if an item is partially shifted into S_new then
299 if it is a directory item 299 if it is a directory item
300 it is the number of entries from the item that are shifted into S_new 300 it is the number of entries from the item that are shifted into S_new
301 else 301 else
302 it is the number of bytes from the item that are shifted into S_new 302 it is the number of bytes from the item that are shifted into S_new
@@ -1983,7 +1983,7 @@ static inline void do_balance_starts(struct tree_balance *tb)
1983 /* store_print_tb (tb); */ 1983 /* store_print_tb (tb); */
1984 1984
1985 /* do not delete, just comment it out */ 1985 /* do not delete, just comment it out */
1986/* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb, 1986/* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb,
1987 "check");*/ 1987 "check");*/
1988 RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB"); 1988 RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB");
1989#ifdef CONFIG_REISERFS_CHECK 1989#ifdef CONFIG_REISERFS_CHECK
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 47bab8978be1..f0160ee03e17 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -20,14 +20,14 @@
20** insertion/balancing, for files that are written in one write. 20** insertion/balancing, for files that are written in one write.
21** It avoids unnecessary tail packings (balances) for files that are written in 21** It avoids unnecessary tail packings (balances) for files that are written in
22** multiple writes and are small enough to have tails. 22** multiple writes and are small enough to have tails.
23** 23**
24** file_release is called by the VFS layer when the file is closed. If 24** file_release is called by the VFS layer when the file is closed. If
25** this is the last open file descriptor, and the file 25** this is the last open file descriptor, and the file
26** small enough to have a tail, and the tail is currently in an 26** small enough to have a tail, and the tail is currently in an
27** unformatted node, the tail is converted back into a direct item. 27** unformatted node, the tail is converted back into a direct item.
28** 28**
29** We use reiserfs_truncate_file to pack the tail, since it already has 29** We use reiserfs_truncate_file to pack the tail, since it already has
30** all the conditions coded. 30** all the conditions coded.
31*/ 31*/
32static int reiserfs_file_release(struct inode *inode, struct file *filp) 32static int reiserfs_file_release(struct inode *inode, struct file *filp)
33{ 33{
@@ -223,7 +223,7 @@ int reiserfs_commit_page(struct inode *inode, struct page *page,
223} 223}
224 224
225/* Write @count bytes at position @ppos in a file indicated by @file 225/* Write @count bytes at position @ppos in a file indicated by @file
226 from the buffer @buf. 226 from the buffer @buf.
227 227
228 generic_file_write() is only appropriate for filesystems that are not seeking to optimize performance and want 228 generic_file_write() is only appropriate for filesystems that are not seeking to optimize performance and want
229 something simple that works. It is not for serious use by general purpose filesystems, excepting the one that it was 229 something simple that works. It is not for serious use by general purpose filesystems, excepting the one that it was
diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c
index aee50c97988d..a3be7da3e2b9 100644
--- a/fs/reiserfs/fix_node.c
+++ b/fs/reiserfs/fix_node.c
@@ -30,8 +30,8 @@
30 ** get_direct_parent 30 ** get_direct_parent
31 ** get_neighbors 31 ** get_neighbors
32 ** fix_nodes 32 ** fix_nodes
33 ** 33 **
34 ** 34 **
35 **/ 35 **/
36 36
37#include <linux/time.h> 37#include <linux/time.h>
@@ -377,9 +377,9 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
377 int needed_nodes; 377 int needed_nodes;
378 int start_item, /* position of item we start filling node from */ 378 int start_item, /* position of item we start filling node from */
379 end_item, /* position of item we finish filling node by */ 379 end_item, /* position of item we finish filling node by */
380 start_bytes, /* number of first bytes (entries for directory) of start_item-th item 380 start_bytes, /* number of first bytes (entries for directory) of start_item-th item
381 we do not include into node that is being filled */ 381 we do not include into node that is being filled */
382 end_bytes; /* number of last bytes (entries for directory) of end_item-th item 382 end_bytes; /* number of last bytes (entries for directory) of end_item-th item
383 we do node include into node that is being filled */ 383 we do node include into node that is being filled */
384 int split_item_positions[2]; /* these are positions in virtual item of 384 int split_item_positions[2]; /* these are positions in virtual item of
385 items, that are split between S[0] and 385 items, that are split between S[0] and
@@ -569,7 +569,7 @@ extern struct tree_balance *cur_tb;
569 569
570/* Set parameters for balancing. 570/* Set parameters for balancing.
571 * Performs write of results of analysis of balancing into structure tb, 571 * Performs write of results of analysis of balancing into structure tb,
572 * where it will later be used by the functions that actually do the balancing. 572 * where it will later be used by the functions that actually do the balancing.
573 * Parameters: 573 * Parameters:
574 * tb tree_balance structure; 574 * tb tree_balance structure;
575 * h current level of the node; 575 * h current level of the node;
@@ -1204,7 +1204,7 @@ static inline int can_node_be_removed(int mode, int lfree, int sfree, int rfree,
1204 * h current level of the node; 1204 * h current level of the node;
1205 * inum item number in S[h]; 1205 * inum item number in S[h];
1206 * mode i - insert, p - paste; 1206 * mode i - insert, p - paste;
1207 * Returns: 1 - schedule occurred; 1207 * Returns: 1 - schedule occurred;
1208 * 0 - balancing for higher levels needed; 1208 * 0 - balancing for higher levels needed;
1209 * -1 - no balancing for higher levels needed; 1209 * -1 - no balancing for higher levels needed;
1210 * -2 - no disk space. 1210 * -2 - no disk space.
@@ -1239,7 +1239,7 @@ static int ip_check_balance(struct tree_balance *tb, int h)
1239 /* we perform 8 calls to get_num_ver(). For each call we calculate five parameters. 1239 /* we perform 8 calls to get_num_ver(). For each call we calculate five parameters.
1240 where 4th parameter is s1bytes and 5th - s2bytes 1240 where 4th parameter is s1bytes and 5th - s2bytes
1241 */ 1241 */
1242 short snum012[40] = { 0, }; /* s0num, s1num, s2num for 8 cases 1242 short snum012[40] = { 0, }; /* s0num, s1num, s2num for 8 cases
1243 0,1 - do not shift and do not shift but bottle 1243 0,1 - do not shift and do not shift but bottle
1244 2 - shift only whole item to left 1244 2 - shift only whole item to left
1245 3 - shift to left and bottle as much as possible 1245 3 - shift to left and bottle as much as possible
@@ -1288,7 +1288,7 @@ static int ip_check_balance(struct tree_balance *tb, int h)
1288 1288
1289 create_virtual_node(tb, h); 1289 create_virtual_node(tb, h);
1290 1290
1291 /* 1291 /*
1292 determine maximal number of items we can shift to the left neighbor (in tb structure) 1292 determine maximal number of items we can shift to the left neighbor (in tb structure)
1293 and the maximal number of bytes that can flow to the left neighbor 1293 and the maximal number of bytes that can flow to the left neighbor
1294 from the left most liquid item that cannot be shifted from S[0] entirely (returned value) 1294 from the left most liquid item that cannot be shifted from S[0] entirely (returned value)
@@ -1349,13 +1349,13 @@ static int ip_check_balance(struct tree_balance *tb, int h)
1349 1349
1350 { 1350 {
1351 int lpar, rpar, nset, lset, rset, lrset; 1351 int lpar, rpar, nset, lset, rset, lrset;
1352 /* 1352 /*
1353 * regular overflowing of the node 1353 * regular overflowing of the node
1354 */ 1354 */
1355 1355
1356 /* get_num_ver works in 2 modes (FLOW & NO_FLOW) 1356 /* get_num_ver works in 2 modes (FLOW & NO_FLOW)
1357 lpar, rpar - number of items we can shift to left/right neighbor (including splitting item) 1357 lpar, rpar - number of items we can shift to left/right neighbor (including splitting item)
1358 nset, lset, rset, lrset - shows, whether flowing items give better packing 1358 nset, lset, rset, lrset - shows, whether flowing items give better packing
1359 */ 1359 */
1360#define FLOW 1 1360#define FLOW 1
1361#define NO_FLOW 0 /* do not any splitting */ 1361#define NO_FLOW 0 /* do not any splitting */
@@ -1545,7 +1545,7 @@ static int ip_check_balance(struct tree_balance *tb, int h)
1545 * h current level of the node; 1545 * h current level of the node;
1546 * inum item number in S[h]; 1546 * inum item number in S[h];
1547 * mode i - insert, p - paste; 1547 * mode i - insert, p - paste;
1548 * Returns: 1 - schedule occurred; 1548 * Returns: 1 - schedule occurred;
1549 * 0 - balancing for higher levels needed; 1549 * 0 - balancing for higher levels needed;
1550 * -1 - no balancing for higher levels needed; 1550 * -1 - no balancing for higher levels needed;
1551 * -2 - no disk space. 1551 * -2 - no disk space.
@@ -1728,7 +1728,7 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h)
1728 * h current level of the node; 1728 * h current level of the node;
1729 * inum item number in S[h]; 1729 * inum item number in S[h];
1730 * mode i - insert, p - paste; 1730 * mode i - insert, p - paste;
1731 * Returns: 1 - schedule occurred; 1731 * Returns: 1 - schedule occurred;
1732 * 0 - balancing for higher levels needed; 1732 * 0 - balancing for higher levels needed;
1733 * -1 - no balancing for higher levels needed; 1733 * -1 - no balancing for higher levels needed;
1734 * -2 - no disk space. 1734 * -2 - no disk space.
@@ -1822,7 +1822,7 @@ static int dc_check_balance_leaf(struct tree_balance *tb, int h)
1822 * h current level of the node; 1822 * h current level of the node;
1823 * inum item number in S[h]; 1823 * inum item number in S[h];
1824 * mode d - delete, c - cut. 1824 * mode d - delete, c - cut.
1825 * Returns: 1 - schedule occurred; 1825 * Returns: 1 - schedule occurred;
1826 * 0 - balancing for higher levels needed; 1826 * 0 - balancing for higher levels needed;
1827 * -1 - no balancing for higher levels needed; 1827 * -1 - no balancing for higher levels needed;
1828 * -2 - no disk space. 1828 * -2 - no disk space.
@@ -1851,7 +1851,7 @@ static int dc_check_balance(struct tree_balance *tb, int h)
1851 * h current level of the node; 1851 * h current level of the node;
1852 * inum item number in S[h]; 1852 * inum item number in S[h];
1853 * mode i - insert, p - paste, d - delete, c - cut. 1853 * mode i - insert, p - paste, d - delete, c - cut.
1854 * Returns: 1 - schedule occurred; 1854 * Returns: 1 - schedule occurred;
1855 * 0 - balancing for higher levels needed; 1855 * 0 - balancing for higher levels needed;
1856 * -1 - no balancing for higher levels needed; 1856 * -1 - no balancing for higher levels needed;
1857 * -2 - no disk space. 1857 * -2 - no disk space.
@@ -2296,15 +2296,15 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *p_s_tb)
2296 * analyze what and where should be moved; 2296 * analyze what and where should be moved;
2297 * get sufficient number of new nodes; 2297 * get sufficient number of new nodes;
2298 * Balancing will start only after all resources will be collected at a time. 2298 * Balancing will start only after all resources will be collected at a time.
2299 * 2299 *
2300 * When ported to SMP kernels, only at the last moment after all needed nodes 2300 * When ported to SMP kernels, only at the last moment after all needed nodes
2301 * are collected in cache, will the resources be locked using the usual 2301 * are collected in cache, will the resources be locked using the usual
2302 * textbook ordered lock acquisition algorithms. Note that ensuring that 2302 * textbook ordered lock acquisition algorithms. Note that ensuring that
2303 * this code neither write locks what it does not need to write lock nor locks out of order 2303 * this code neither write locks what it does not need to write lock nor locks out of order
2304 * will be a pain in the butt that could have been avoided. Grumble grumble. -Hans 2304 * will be a pain in the butt that could have been avoided. Grumble grumble. -Hans
2305 * 2305 *
2306 * fix is meant in the sense of render unchanging 2306 * fix is meant in the sense of render unchanging
2307 * 2307 *
2308 * Latency might be improved by first gathering a list of what buffers are needed 2308 * Latency might be improved by first gathering a list of what buffers are needed
2309 * and then getting as many of them in parallel as possible? -Hans 2309 * and then getting as many of them in parallel as possible? -Hans
2310 * 2310 *
@@ -2316,7 +2316,7 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *p_s_tb)
2316 * ins_ih & ins_sd are used when inserting 2316 * ins_ih & ins_sd are used when inserting
2317 * Returns: 1 - schedule occurred while the function worked; 2317 * Returns: 1 - schedule occurred while the function worked;
2318 * 0 - schedule didn't occur while the function worked; 2318 * 0 - schedule didn't occur while the function worked;
2319 * -1 - if no_disk_space 2319 * -1 - if no_disk_space
2320 */ 2320 */
2321 2321
2322int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb, struct item_head *p_s_ins_ih, // item head of item being inserted 2322int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb, struct item_head *p_s_ins_ih, // item head of item being inserted
diff --git a/fs/reiserfs/hashes.c b/fs/reiserfs/hashes.c
index e664ac16fad9..6471c670743e 100644
--- a/fs/reiserfs/hashes.c
+++ b/fs/reiserfs/hashes.c
@@ -7,7 +7,7 @@
7 * (see Applied Cryptography, 2nd edition, p448). 7 * (see Applied Cryptography, 2nd edition, p448).
8 * 8 *
9 * Jeremy Fitzhardinge <jeremy@zip.com.au> 1998 9 * Jeremy Fitzhardinge <jeremy@zip.com.au> 1998
10 * 10 *
11 * Jeremy has agreed to the contents of reiserfs/README. -Hans 11 * Jeremy has agreed to the contents of reiserfs/README. -Hans
12 * Yura's function is added (04/07/2000) 12 * Yura's function is added (04/07/2000)
13 */ 13 */
diff --git a/fs/reiserfs/ibalance.c b/fs/reiserfs/ibalance.c
index 063b5514fe29..2074fd95046b 100644
--- a/fs/reiserfs/ibalance.c
+++ b/fs/reiserfs/ibalance.c
@@ -278,7 +278,7 @@ static void internal_delete_childs(struct buffer_info *cur_bi, int from, int n)
278 278
279/* copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest 279/* copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest
280* last_first == FIRST_TO_LAST means, that we copy first items from src to tail of dest 280* last_first == FIRST_TO_LAST means, that we copy first items from src to tail of dest
281 * last_first == LAST_TO_FIRST means, that we copy last items from src to head of dest 281 * last_first == LAST_TO_FIRST means, that we copy last items from src to head of dest
282 */ 282 */
283static void internal_copy_pointers_items(struct buffer_info *dest_bi, 283static void internal_copy_pointers_items(struct buffer_info *dest_bi,
284 struct buffer_head *src, 284 struct buffer_head *src,
@@ -385,7 +385,7 @@ static void internal_move_pointers_items(struct buffer_info *dest_bi,
385 if (last_first == FIRST_TO_LAST) { /* shift_left occurs */ 385 if (last_first == FIRST_TO_LAST) { /* shift_left occurs */
386 first_pointer = 0; 386 first_pointer = 0;
387 first_item = 0; 387 first_item = 0;
388 /* delete cpy_num - del_par pointers and keys starting for pointers with first_pointer, 388 /* delete cpy_num - del_par pointers and keys starting for pointers with first_pointer,
389 for key - with first_item */ 389 for key - with first_item */
390 internal_delete_pointers_items(src_bi, first_pointer, 390 internal_delete_pointers_items(src_bi, first_pointer,
391 first_item, cpy_num - del_par); 391 first_item, cpy_num - del_par);
@@ -453,7 +453,7 @@ static void internal_insert_key(struct buffer_info *dest_bi, int dest_position_b
453 } 453 }
454} 454}
455 455
456/* Insert d_key'th (delimiting) key from buffer cfl to tail of dest. 456/* Insert d_key'th (delimiting) key from buffer cfl to tail of dest.
457 * Copy pointer_amount node pointers and pointer_amount - 1 items from buffer src to buffer dest. 457 * Copy pointer_amount node pointers and pointer_amount - 1 items from buffer src to buffer dest.
458 * Replace d_key'th key in buffer cfl. 458 * Replace d_key'th key in buffer cfl.
459 * Delete pointer_amount items and node pointers from buffer src. 459 * Delete pointer_amount items and node pointers from buffer src.
@@ -518,7 +518,7 @@ static void internal_shift1_left(struct tree_balance *tb,
518 /* internal_move_pointers_items (tb->L[h], tb->S[h], FIRST_TO_LAST, pointer_amount, 1); */ 518 /* internal_move_pointers_items (tb->L[h], tb->S[h], FIRST_TO_LAST, pointer_amount, 1); */
519} 519}
520 520
521/* Insert d_key'th (delimiting) key from buffer cfr to head of dest. 521/* Insert d_key'th (delimiting) key from buffer cfr to head of dest.
522 * Copy n node pointers and n - 1 items from buffer src to buffer dest. 522 * Copy n node pointers and n - 1 items from buffer src to buffer dest.
523 * Replace d_key'th key in buffer cfr. 523 * Replace d_key'th key in buffer cfr.
524 * Delete n items and node pointers from buffer src. 524 * Delete n items and node pointers from buffer src.
@@ -749,7 +749,7 @@ int balance_internal(struct tree_balance *tb, /* tree_balance structure
749 this means that new pointers and items must be inserted AFTER * 749 this means that new pointers and items must be inserted AFTER *
750 child_pos 750 child_pos
751 } 751 }
752 else 752 else
753 { 753 {
754 it is the position of the leftmost pointer that must be deleted (together with 754 it is the position of the leftmost pointer that must be deleted (together with
755 its corresponding key to the left of the pointer) 755 its corresponding key to the left of the pointer)
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index fcd302d81447..d106edaef64f 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -52,7 +52,7 @@ void reiserfs_delete_inode(struct inode *inode)
52 /* Do quota update inside a transaction for journaled quotas. We must do that 52 /* Do quota update inside a transaction for journaled quotas. We must do that
53 * after delete_object so that quota updates go into the same transaction as 53 * after delete_object so that quota updates go into the same transaction as
54 * stat data deletion */ 54 * stat data deletion */
55 if (!err) 55 if (!err)
56 DQUOT_FREE_INODE(inode); 56 DQUOT_FREE_INODE(inode);
57 57
58 if (journal_end(&th, inode->i_sb, jbegin_count)) 58 if (journal_end(&th, inode->i_sb, jbegin_count))
@@ -363,7 +363,7 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
363 } 363 }
364 /* make sure we don't read more bytes than actually exist in 364 /* make sure we don't read more bytes than actually exist in
365 ** the file. This can happen in odd cases where i_size isn't 365 ** the file. This can happen in odd cases where i_size isn't
366 ** correct, and when direct item padding results in a few 366 ** correct, and when direct item padding results in a few
367 ** extra bytes at the end of the direct item 367 ** extra bytes at the end of the direct item
368 */ 368 */
369 if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size) 369 if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size)
@@ -438,15 +438,15 @@ static int reiserfs_bmap(struct inode *inode, sector_t block,
438** -ENOENT instead of a valid buffer. block_prepare_write expects to 438** -ENOENT instead of a valid buffer. block_prepare_write expects to
439** be able to do i/o on the buffers returned, unless an error value 439** be able to do i/o on the buffers returned, unless an error value
440** is also returned. 440** is also returned.
441** 441**
442** So, this allows block_prepare_write to be used for reading a single block 442** So, this allows block_prepare_write to be used for reading a single block
443** in a page. Where it does not produce a valid page for holes, or past the 443** in a page. Where it does not produce a valid page for holes, or past the
444** end of the file. This turns out to be exactly what we need for reading 444** end of the file. This turns out to be exactly what we need for reading
445** tails for conversion. 445** tails for conversion.
446** 446**
447** The point of the wrapper is forcing a certain value for create, even 447** The point of the wrapper is forcing a certain value for create, even
448** though the VFS layer is calling this function with create==1. If you 448** though the VFS layer is calling this function with create==1. If you
449** don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block, 449** don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block,
450** don't use this function. 450** don't use this function.
451*/ 451*/
452static int reiserfs_get_block_create_0(struct inode *inode, sector_t block, 452static int reiserfs_get_block_create_0(struct inode *inode, sector_t block,
@@ -602,7 +602,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
602 int done; 602 int done;
603 int fs_gen; 603 int fs_gen;
604 struct reiserfs_transaction_handle *th = NULL; 604 struct reiserfs_transaction_handle *th = NULL;
605 /* space reserved in transaction batch: 605 /* space reserved in transaction batch:
606 . 3 balancings in direct->indirect conversion 606 . 3 balancings in direct->indirect conversion
607 . 1 block involved into reiserfs_update_sd() 607 . 1 block involved into reiserfs_update_sd()
608 XXX in practically impossible worst case direct2indirect() 608 XXX in practically impossible worst case direct2indirect()
@@ -754,7 +754,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
754 reiserfs_write_unlock(inode->i_sb); 754 reiserfs_write_unlock(inode->i_sb);
755 755
756 /* the item was found, so new blocks were not added to the file 756 /* the item was found, so new blocks were not added to the file
757 ** there is no need to make sure the inode is updated with this 757 ** there is no need to make sure the inode is updated with this
758 ** transaction 758 ** transaction
759 */ 759 */
760 return retval; 760 return retval;
@@ -986,7 +986,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
986 986
987 /* this loop could log more blocks than we had originally asked 987 /* this loop could log more blocks than we had originally asked
988 ** for. So, we have to allow the transaction to end if it is 988 ** for. So, we have to allow the transaction to end if it is
989 ** too big or too full. Update the inode so things are 989 ** too big or too full. Update the inode so things are
990 ** consistent if we crash before the function returns 990 ** consistent if we crash before the function returns
991 ** 991 **
992 ** release the path so that anybody waiting on the path before 992 ** release the path so that anybody waiting on the path before
@@ -997,7 +997,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
997 if (retval) 997 if (retval)
998 goto failure; 998 goto failure;
999 } 999 }
1000 /* inserting indirect pointers for a hole can take a 1000 /* inserting indirect pointers for a hole can take a
1001 ** long time. reschedule if needed 1001 ** long time. reschedule if needed
1002 */ 1002 */
1003 cond_resched(); 1003 cond_resched();
@@ -1444,7 +1444,7 @@ void reiserfs_read_locked_inode(struct inode *inode,
1444 update sd on unlink all that is required is to check for nlink 1444 update sd on unlink all that is required is to check for nlink
1445 here. This bug was first found by Sizif when debugging 1445 here. This bug was first found by Sizif when debugging
1446 SquidNG/Butterfly, forgotten, and found again after Philippe 1446 SquidNG/Butterfly, forgotten, and found again after Philippe
1447 Gramoulle <philippe.gramoulle@mmania.com> reproduced it. 1447 Gramoulle <philippe.gramoulle@mmania.com> reproduced it.
1448 1448
1449 More logical fix would require changes in fs/inode.c:iput() to 1449 More logical fix would require changes in fs/inode.c:iput() to
1450 remove inode from hash-table _after_ fs cleaned disk stuff up and 1450 remove inode from hash-table _after_ fs cleaned disk stuff up and
@@ -1619,7 +1619,7 @@ int reiserfs_write_inode(struct inode *inode, int do_sync)
1619 if (inode->i_sb->s_flags & MS_RDONLY) 1619 if (inode->i_sb->s_flags & MS_RDONLY)
1620 return -EROFS; 1620 return -EROFS;
1621 /* memory pressure can sometimes initiate write_inode calls with sync == 1, 1621 /* memory pressure can sometimes initiate write_inode calls with sync == 1,
1622 ** these cases are just when the system needs ram, not when the 1622 ** these cases are just when the system needs ram, not when the
1623 ** inode needs to reach disk for safety, and they can safely be 1623 ** inode needs to reach disk for safety, and they can safely be
1624 ** ignored because the altered inode has already been logged. 1624 ** ignored because the altered inode has already been logged.
1625 */ 1625 */
@@ -1736,7 +1736,7 @@ static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th, struct i
1736/* inserts the stat data into the tree, and then calls 1736/* inserts the stat data into the tree, and then calls
1737 reiserfs_new_directory (to insert ".", ".." item if new object is 1737 reiserfs_new_directory (to insert ".", ".." item if new object is
1738 directory) or reiserfs_new_symlink (to insert symlink body if new 1738 directory) or reiserfs_new_symlink (to insert symlink body if new
1739 object is symlink) or nothing (if new object is regular file) 1739 object is symlink) or nothing (if new object is regular file)
1740 1740
1741 NOTE! uid and gid must already be set in the inode. If we return 1741 NOTE! uid and gid must already be set in the inode. If we return
1742 non-zero due to an error, we have to drop the quota previously allocated 1742 non-zero due to an error, we have to drop the quota previously allocated
@@ -1744,7 +1744,7 @@ static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th, struct i
1744 if we return non-zero, we also end the transaction. */ 1744 if we return non-zero, we also end the transaction. */
1745int reiserfs_new_inode(struct reiserfs_transaction_handle *th, 1745int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1746 struct inode *dir, int mode, const char *symname, 1746 struct inode *dir, int mode, const char *symname,
1747 /* 0 for regular, EMTRY_DIR_SIZE for dirs, 1747 /* 0 for regular, EMTRY_DIR_SIZE for dirs,
1748 strlen (symname) for symlinks) */ 1748 strlen (symname) for symlinks) */
1749 loff_t i_size, struct dentry *dentry, 1749 loff_t i_size, struct dentry *dentry,
1750 struct inode *inode, 1750 struct inode *inode,
@@ -1794,7 +1794,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1794 goto out_bad_inode; 1794 goto out_bad_inode;
1795 } 1795 }
1796 if (old_format_only(sb)) 1796 if (old_format_only(sb))
1797 /* not a perfect generation count, as object ids can be reused, but 1797 /* not a perfect generation count, as object ids can be reused, but
1798 ** this is as good as reiserfs can do right now. 1798 ** this is as good as reiserfs can do right now.
1799 ** note that the private part of inode isn't filled in yet, we have 1799 ** note that the private part of inode isn't filled in yet, we have
1800 ** to use the directory. 1800 ** to use the directory.
@@ -2081,7 +2081,7 @@ int reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps)
2081 2081
2082 if (p_s_inode->i_size > 0) { 2082 if (p_s_inode->i_size > 0) {
2083 if ((error = grab_tail_page(p_s_inode, &page, &bh))) { 2083 if ((error = grab_tail_page(p_s_inode, &page, &bh))) {
2084 // -ENOENT means we truncated past the end of the file, 2084 // -ENOENT means we truncated past the end of the file,
2085 // and get_block_create_0 could not find a block to read in, 2085 // and get_block_create_0 could not find a block to read in,
2086 // which is ok. 2086 // which is ok.
2087 if (error != -ENOENT) 2087 if (error != -ENOENT)
@@ -2093,11 +2093,11 @@ int reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps)
2093 } 2093 }
2094 } 2094 }
2095 2095
2096 /* so, if page != NULL, we have a buffer head for the offset at 2096 /* so, if page != NULL, we have a buffer head for the offset at
2097 ** the end of the file. if the bh is mapped, and bh->b_blocknr != 0, 2097 ** the end of the file. if the bh is mapped, and bh->b_blocknr != 0,
2098 ** then we have an unformatted node. Otherwise, we have a direct item, 2098 ** then we have an unformatted node. Otherwise, we have a direct item,
2099 ** and no zeroing is required on disk. We zero after the truncate, 2099 ** and no zeroing is required on disk. We zero after the truncate,
2100 ** because the truncate might pack the item anyway 2100 ** because the truncate might pack the item anyway
2101 ** (it will unmap bh if it packs). 2101 ** (it will unmap bh if it packs).
2102 */ 2102 */
2103 /* it is enough to reserve space in transaction for 2 balancings: 2103 /* it is enough to reserve space in transaction for 2 balancings:
@@ -2306,8 +2306,8 @@ static int map_block_for_writepage(struct inode *inode,
2306 return retval; 2306 return retval;
2307} 2307}
2308 2308
2309/* 2309/*
2310 * mason@suse.com: updated in 2.5.54 to follow the same general io 2310 * mason@suse.com: updated in 2.5.54 to follow the same general io
2311 * start/recovery path as __block_write_full_page, along with special 2311 * start/recovery path as __block_write_full_page, along with special
2312 * code to handle reiserfs tails. 2312 * code to handle reiserfs tails.
2313 */ 2313 */
@@ -2447,7 +2447,7 @@ static int reiserfs_write_full_page(struct page *page,
2447 unlock_page(page); 2447 unlock_page(page);
2448 2448
2449 /* 2449 /*
2450 * since any buffer might be the only dirty buffer on the page, 2450 * since any buffer might be the only dirty buffer on the page,
2451 * the first submit_bh can bring the page out of writeback. 2451 * the first submit_bh can bring the page out of writeback.
2452 * be careful with the buffers. 2452 * be careful with the buffers.
2453 */ 2453 */
@@ -2466,8 +2466,8 @@ static int reiserfs_write_full_page(struct page *page,
2466 if (nr == 0) { 2466 if (nr == 0) {
2467 /* 2467 /*
2468 * if this page only had a direct item, it is very possible for 2468 * if this page only had a direct item, it is very possible for
2469 * no io to be required without there being an error. Or, 2469 * no io to be required without there being an error. Or,
2470 * someone else could have locked them and sent them down the 2470 * someone else could have locked them and sent them down the
2471 * pipe without locking the page 2471 * pipe without locking the page
2472 */ 2472 */
2473 bh = head; 2473 bh = head;
@@ -2486,7 +2486,7 @@ static int reiserfs_write_full_page(struct page *page,
2486 2486
2487 fail: 2487 fail:
2488 /* catches various errors, we need to make sure any valid dirty blocks 2488 /* catches various errors, we need to make sure any valid dirty blocks
2489 * get to the media. The page is currently locked and not marked for 2489 * get to the media. The page is currently locked and not marked for
2490 * writeback 2490 * writeback
2491 */ 2491 */
2492 ClearPageUptodate(page); 2492 ClearPageUptodate(page);
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 830332021ed4..0ccc3fdda7bf 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -189,7 +189,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
189 } 189 }
190 190
191 /* we unpack by finding the page with the tail, and calling 191 /* we unpack by finding the page with the tail, and calling
192 ** reiserfs_prepare_write on that page. This will force a 192 ** reiserfs_prepare_write on that page. This will force a
193 ** reiserfs_get_block to unpack the tail for us. 193 ** reiserfs_get_block to unpack the tail for us.
194 */ 194 */
195 index = inode->i_size >> PAGE_CACHE_SHIFT; 195 index = inode->i_size >> PAGE_CACHE_SHIFT;
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index db91754cfb83..4f787462becc 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1,36 +1,36 @@
1/* 1/*
2** Write ahead logging implementation copyright Chris Mason 2000 2** Write ahead logging implementation copyright Chris Mason 2000
3** 3**
4** The background commits make this code very interelated, and 4** The background commits make this code very interelated, and
5** overly complex. I need to rethink things a bit....The major players: 5** overly complex. I need to rethink things a bit....The major players:
6** 6**
7** journal_begin -- call with the number of blocks you expect to log. 7** journal_begin -- call with the number of blocks you expect to log.
8** If the current transaction is too 8** If the current transaction is too
9** old, it will block until the current transaction is 9** old, it will block until the current transaction is
10** finished, and then start a new one. 10** finished, and then start a new one.
11** Usually, your transaction will get joined in with 11** Usually, your transaction will get joined in with
12** previous ones for speed. 12** previous ones for speed.
13** 13**
14** journal_join -- same as journal_begin, but won't block on the current 14** journal_join -- same as journal_begin, but won't block on the current
15** transaction regardless of age. Don't ever call 15** transaction regardless of age. Don't ever call
16** this. Ever. There are only two places it should be 16** this. Ever. There are only two places it should be
17** called from, and they are both inside this file. 17** called from, and they are both inside this file.
18** 18**
19** journal_mark_dirty -- adds blocks into this transaction. clears any flags 19** journal_mark_dirty -- adds blocks into this transaction. clears any flags
20** that might make them get sent to disk 20** that might make them get sent to disk
21** and then marks them BH_JDirty. Puts the buffer head 21** and then marks them BH_JDirty. Puts the buffer head
22** into the current transaction hash. 22** into the current transaction hash.
23** 23**
24** journal_end -- if the current transaction is batchable, it does nothing 24** journal_end -- if the current transaction is batchable, it does nothing
25** otherwise, it could do an async/synchronous commit, or 25** otherwise, it could do an async/synchronous commit, or
26** a full flush of all log and real blocks in the 26** a full flush of all log and real blocks in the
27** transaction. 27** transaction.
28** 28**
29** flush_old_commits -- if the current transaction is too old, it is ended and 29** flush_old_commits -- if the current transaction is too old, it is ended and
30** commit blocks are sent to disk. Forces commit blocks 30** commit blocks are sent to disk. Forces commit blocks
31** to disk for all backgrounded commits that have been 31** to disk for all backgrounded commits that have been
32** around too long. 32** around too long.
33** -- Note, if you call this as an immediate flush from 33** -- Note, if you call this as an immediate flush from
34** from within kupdate, it will ignore the immediate flag 34** from within kupdate, it will ignore the immediate flag
35*/ 35*/
36 36
@@ -212,7 +212,7 @@ static void allocate_bitmap_nodes(struct super_block *p_s_sb)
212 list_add(&bn->list, &journal->j_bitmap_nodes); 212 list_add(&bn->list, &journal->j_bitmap_nodes);
213 journal->j_free_bitmap_nodes++; 213 journal->j_free_bitmap_nodes++;
214 } else { 214 } else {
215 break; // this is ok, we'll try again when more are needed 215 break; /* this is ok, we'll try again when more are needed */
216 } 216 }
217 } 217 }
218} 218}
@@ -283,7 +283,7 @@ static int free_bitmap_nodes(struct super_block *p_s_sb)
283} 283}
284 284
285/* 285/*
286** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps. 286** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
287** jb_array is the array to be filled in. 287** jb_array is the array to be filled in.
288*/ 288*/
289int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb, 289int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
@@ -315,7 +315,7 @@ int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
315} 315}
316 316
317/* 317/*
318** find an available list bitmap. If you can't find one, flush a commit list 318** find an available list bitmap. If you can't find one, flush a commit list
319** and try again 319** and try again
320*/ 320*/
321static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb, 321static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb,
@@ -348,7 +348,7 @@ static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb,
348 return jb; 348 return jb;
349} 349}
350 350
351/* 351/*
352** allocates a new chunk of X nodes, and links them all together as a list. 352** allocates a new chunk of X nodes, and links them all together as a list.
353** Uses the cnode->next and cnode->prev pointers 353** Uses the cnode->next and cnode->prev pointers
354** returns NULL on failure 354** returns NULL on failure
@@ -376,7 +376,7 @@ static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes)
376} 376}
377 377
378/* 378/*
379** pulls a cnode off the free list, or returns NULL on failure 379** pulls a cnode off the free list, or returns NULL on failure
380*/ 380*/
381static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb) 381static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb)
382{ 382{
@@ -403,7 +403,7 @@ static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb)
403} 403}
404 404
405/* 405/*
406** returns a cnode to the free list 406** returns a cnode to the free list
407*/ 407*/
408static void free_cnode(struct super_block *p_s_sb, 408static void free_cnode(struct super_block *p_s_sb,
409 struct reiserfs_journal_cnode *cn) 409 struct reiserfs_journal_cnode *cn)
@@ -1192,8 +1192,8 @@ static int flush_commit_list(struct super_block *s,
1192} 1192}
1193 1193
1194/* 1194/*
1195** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or 1195** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
1196** returns NULL if it can't find anything 1196** returns NULL if it can't find anything
1197*/ 1197*/
1198static struct reiserfs_journal_list *find_newer_jl_for_cn(struct 1198static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
1199 reiserfs_journal_cnode 1199 reiserfs_journal_cnode
@@ -1335,8 +1335,8 @@ static int update_journal_header_block(struct super_block *p_s_sb,
1335 return _update_journal_header_block(p_s_sb, offset, trans_id); 1335 return _update_journal_header_block(p_s_sb, offset, trans_id);
1336} 1336}
1337 1337
1338/* 1338/*
1339** flush any and all journal lists older than you are 1339** flush any and all journal lists older than you are
1340** can only be called from flush_journal_list 1340** can only be called from flush_journal_list
1341*/ 1341*/
1342static int flush_older_journal_lists(struct super_block *p_s_sb, 1342static int flush_older_journal_lists(struct super_block *p_s_sb,
@@ -1382,8 +1382,8 @@ static void del_from_work_list(struct super_block *s,
1382** always set flushall to 1, unless you are calling from inside 1382** always set flushall to 1, unless you are calling from inside
1383** flush_journal_list 1383** flush_journal_list
1384** 1384**
1385** IMPORTANT. This can only be called while there are no journal writers, 1385** IMPORTANT. This can only be called while there are no journal writers,
1386** and the journal is locked. That means it can only be called from 1386** and the journal is locked. That means it can only be called from
1387** do_journal_end, or by journal_release 1387** do_journal_end, or by journal_release
1388*/ 1388*/
1389static int flush_journal_list(struct super_block *s, 1389static int flush_journal_list(struct super_block *s,
@@ -1429,7 +1429,7 @@ static int flush_journal_list(struct super_block *s,
1429 goto flush_older_and_return; 1429 goto flush_older_and_return;
1430 } 1430 }
1431 1431
1432 /* start by putting the commit list on disk. This will also flush 1432 /* start by putting the commit list on disk. This will also flush
1433 ** the commit lists of any olders transactions 1433 ** the commit lists of any olders transactions
1434 */ 1434 */
1435 flush_commit_list(s, jl, 1); 1435 flush_commit_list(s, jl, 1);
@@ -1444,8 +1444,8 @@ static int flush_journal_list(struct super_block *s,
1444 goto flush_older_and_return; 1444 goto flush_older_and_return;
1445 } 1445 }
1446 1446
1447 /* loop through each cnode, see if we need to write it, 1447 /* loop through each cnode, see if we need to write it,
1448 ** or wait on a more recent transaction, or just ignore it 1448 ** or wait on a more recent transaction, or just ignore it
1449 */ 1449 */
1450 if (atomic_read(&(journal->j_wcount)) != 0) { 1450 if (atomic_read(&(journal->j_wcount)) != 0) {
1451 reiserfs_panic(s, "journal-844", "journal list is flushing, " 1451 reiserfs_panic(s, "journal-844", "journal list is flushing, "
@@ -1473,8 +1473,8 @@ static int flush_journal_list(struct super_block *s,
1473 if (!pjl && cn->bh) { 1473 if (!pjl && cn->bh) {
1474 saved_bh = cn->bh; 1474 saved_bh = cn->bh;
1475 1475
1476 /* we do this to make sure nobody releases the buffer while 1476 /* we do this to make sure nobody releases the buffer while
1477 ** we are working with it 1477 ** we are working with it
1478 */ 1478 */
1479 get_bh(saved_bh); 1479 get_bh(saved_bh);
1480 1480
@@ -1497,8 +1497,8 @@ static int flush_journal_list(struct super_block *s,
1497 goto free_cnode; 1497 goto free_cnode;
1498 } 1498 }
1499 1499
1500 /* bh == NULL when the block got to disk on its own, OR, 1500 /* bh == NULL when the block got to disk on its own, OR,
1501 ** the block got freed in a future transaction 1501 ** the block got freed in a future transaction
1502 */ 1502 */
1503 if (saved_bh == NULL) { 1503 if (saved_bh == NULL) {
1504 goto free_cnode; 1504 goto free_cnode;
@@ -1586,7 +1586,7 @@ static int flush_journal_list(struct super_block *s,
1586 __func__); 1586 __func__);
1587 flush_older_and_return: 1587 flush_older_and_return:
1588 1588
1589 /* before we can update the journal header block, we _must_ flush all 1589 /* before we can update the journal header block, we _must_ flush all
1590 ** real blocks from all older transactions to disk. This is because 1590 ** real blocks from all older transactions to disk. This is because
1591 ** once the header block is updated, this transaction will not be 1591 ** once the header block is updated, this transaction will not be
1592 ** replayed after a crash 1592 ** replayed after a crash
@@ -1596,7 +1596,7 @@ static int flush_journal_list(struct super_block *s,
1596 } 1596 }
1597 1597
1598 err = journal->j_errno; 1598 err = journal->j_errno;
1599 /* before we can remove everything from the hash tables for this 1599 /* before we can remove everything from the hash tables for this
1600 ** transaction, we must make sure it can never be replayed 1600 ** transaction, we must make sure it can never be replayed
1601 ** 1601 **
1602 ** since we are only called from do_journal_end, we know for sure there 1602 ** since we are only called from do_journal_end, we know for sure there
@@ -2016,9 +2016,9 @@ static int journal_compare_desc_commit(struct super_block *p_s_sb,
2016 return 0; 2016 return 0;
2017} 2017}
2018 2018
2019/* returns 0 if it did not find a description block 2019/* returns 0 if it did not find a description block
2020** returns -1 if it found a corrupt commit block 2020** returns -1 if it found a corrupt commit block
2021** returns 1 if both desc and commit were valid 2021** returns 1 if both desc and commit were valid
2022*/ 2022*/
2023static int journal_transaction_is_valid(struct super_block *p_s_sb, 2023static int journal_transaction_is_valid(struct super_block *p_s_sb,
2024 struct buffer_head *d_bh, 2024 struct buffer_head *d_bh,
@@ -2380,8 +2380,8 @@ static int journal_read(struct super_block *p_s_sb)
2380 bdevname(journal->j_dev_bd, b)); 2380 bdevname(journal->j_dev_bd, b));
2381 start = get_seconds(); 2381 start = get_seconds();
2382 2382
2383 /* step 1, read in the journal header block. Check the transaction it says 2383 /* step 1, read in the journal header block. Check the transaction it says
2384 ** is the first unflushed, and if that transaction is not valid, 2384 ** is the first unflushed, and if that transaction is not valid,
2385 ** replay is done 2385 ** replay is done
2386 */ 2386 */
2387 journal->j_header_bh = journal_bread(p_s_sb, 2387 journal->j_header_bh = journal_bread(p_s_sb,
@@ -2406,8 +2406,8 @@ static int journal_read(struct super_block *p_s_sb)
2406 le32_to_cpu(jh->j_last_flush_trans_id)); 2406 le32_to_cpu(jh->j_last_flush_trans_id));
2407 valid_journal_header = 1; 2407 valid_journal_header = 1;
2408 2408
2409 /* now, we try to read the first unflushed offset. If it is not valid, 2409 /* now, we try to read the first unflushed offset. If it is not valid,
2410 ** there is nothing more we can do, and it makes no sense to read 2410 ** there is nothing more we can do, and it makes no sense to read
2411 ** through the whole log. 2411 ** through the whole log.
2412 */ 2412 */
2413 d_bh = 2413 d_bh =
@@ -2919,7 +2919,7 @@ int journal_transaction_should_end(struct reiserfs_transaction_handle *th,
2919 return 0; 2919 return 0;
2920} 2920}
2921 2921
2922/* this must be called inside a transaction, and requires the 2922/* this must be called inside a transaction, and requires the
2923** kernel_lock to be held 2923** kernel_lock to be held
2924*/ 2924*/
2925void reiserfs_block_writes(struct reiserfs_transaction_handle *th) 2925void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
@@ -3040,7 +3040,7 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
3040 now = get_seconds(); 3040 now = get_seconds();
3041 3041
3042 /* if there is no room in the journal OR 3042 /* if there is no room in the journal OR
3043 ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning 3043 ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
3044 ** we don't sleep if there aren't other writers 3044 ** we don't sleep if there aren't other writers
3045 */ 3045 */
3046 3046
@@ -3240,7 +3240,7 @@ int journal_begin(struct reiserfs_transaction_handle *th,
3240** 3240**
3241** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the 3241** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the
3242** transaction is committed. 3242** transaction is committed.
3243** 3243**
3244** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len. 3244** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
3245*/ 3245*/
3246int journal_mark_dirty(struct reiserfs_transaction_handle *th, 3246int journal_mark_dirty(struct reiserfs_transaction_handle *th,
@@ -3290,7 +3290,7 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th,
3290 atomic_read(&(journal->j_wcount))); 3290 atomic_read(&(journal->j_wcount)));
3291 return 1; 3291 return 1;
3292 } 3292 }
3293 /* this error means I've screwed up, and we've overflowed the transaction. 3293 /* this error means I've screwed up, and we've overflowed the transaction.
3294 ** Nothing can be done here, except make the FS readonly or panic. 3294 ** Nothing can be done here, except make the FS readonly or panic.
3295 */ 3295 */
3296 if (journal->j_len >= journal->j_trans_max) { 3296 if (journal->j_len >= journal->j_trans_max) {
@@ -3380,7 +3380,7 @@ int journal_end(struct reiserfs_transaction_handle *th,
3380 } 3380 }
3381} 3381}
3382 3382
3383/* removes from the current transaction, relsing and descrementing any counters. 3383/* removes from the current transaction, relsing and descrementing any counters.
3384** also files the removed buffer directly onto the clean list 3384** also files the removed buffer directly onto the clean list
3385** 3385**
3386** called by journal_mark_freed when a block has been deleted 3386** called by journal_mark_freed when a block has been deleted
@@ -3478,7 +3478,7 @@ static int can_dirty(struct reiserfs_journal_cnode *cn)
3478} 3478}
3479 3479
3480/* syncs the commit blocks, but does not force the real buffers to disk 3480/* syncs the commit blocks, but does not force the real buffers to disk
3481** will wait until the current transaction is done/committed before returning 3481** will wait until the current transaction is done/committed before returning
3482*/ 3482*/
3483int journal_end_sync(struct reiserfs_transaction_handle *th, 3483int journal_end_sync(struct reiserfs_transaction_handle *th,
3484 struct super_block *p_s_sb, unsigned long nblocks) 3484 struct super_block *p_s_sb, unsigned long nblocks)
@@ -3560,13 +3560,13 @@ int reiserfs_flush_old_commits(struct super_block *p_s_sb)
3560 3560
3561/* 3561/*
3562** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit 3562** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
3563** 3563**
3564** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all 3564** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
3565** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just 3565** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just
3566** flushes the commit list and returns 0. 3566** flushes the commit list and returns 0.
3567** 3567**
3568** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait. 3568** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait.
3569** 3569**
3570** Note, we can't allow the journal_end to proceed while there are still writers in the log. 3570** Note, we can't allow the journal_end to proceed while there are still writers in the log.
3571*/ 3571*/
3572static int check_journal_end(struct reiserfs_transaction_handle *th, 3572static int check_journal_end(struct reiserfs_transaction_handle *th,
@@ -3594,7 +3594,7 @@ static int check_journal_end(struct reiserfs_transaction_handle *th,
3594 atomic_dec(&(journal->j_wcount)); 3594 atomic_dec(&(journal->j_wcount));
3595 } 3595 }
3596 3596
3597 /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released 3597 /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
3598 ** will be dealt with by next transaction that actually writes something, but should be taken 3598 ** will be dealt with by next transaction that actually writes something, but should be taken
3599 ** care of in this trans 3599 ** care of in this trans
3600 */ 3600 */
@@ -3603,7 +3603,7 @@ static int check_journal_end(struct reiserfs_transaction_handle *th,
3603 /* if wcount > 0, and we are called to with flush or commit_now, 3603 /* if wcount > 0, and we are called to with flush or commit_now,
3604 ** we wait on j_join_wait. We will wake up when the last writer has 3604 ** we wait on j_join_wait. We will wake up when the last writer has
3605 ** finished the transaction, and started it on its way to the disk. 3605 ** finished the transaction, and started it on its way to the disk.
3606 ** Then, we flush the commit or journal list, and just return 0 3606 ** Then, we flush the commit or journal list, and just return 0
3607 ** because the rest of journal end was already done for this transaction. 3607 ** because the rest of journal end was already done for this transaction.
3608 */ 3608 */
3609 if (atomic_read(&(journal->j_wcount)) > 0) { 3609 if (atomic_read(&(journal->j_wcount)) > 0) {
@@ -3674,7 +3674,7 @@ static int check_journal_end(struct reiserfs_transaction_handle *th,
3674/* 3674/*
3675** Does all the work that makes deleting blocks safe. 3675** Does all the work that makes deleting blocks safe.
3676** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on. 3676** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
3677** 3677**
3678** otherwise: 3678** otherwise:
3679** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes 3679** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes
3680** before this transaction has finished. 3680** before this transaction has finished.
@@ -3878,7 +3878,7 @@ extern struct tree_balance *cur_tb;
3878** be written to disk while we are altering it. So, we must: 3878** be written to disk while we are altering it. So, we must:
3879** clean it 3879** clean it
3880** wait on it. 3880** wait on it.
3881** 3881**
3882*/ 3882*/
3883int reiserfs_prepare_for_journal(struct super_block *p_s_sb, 3883int reiserfs_prepare_for_journal(struct super_block *p_s_sb,
3884 struct buffer_head *bh, int wait) 3884 struct buffer_head *bh, int wait)
@@ -3920,7 +3920,7 @@ static void flush_old_journal_lists(struct super_block *s)
3920 } 3920 }
3921} 3921}
3922 3922
3923/* 3923/*
3924** long and ugly. If flush, will not return until all commit 3924** long and ugly. If flush, will not return until all commit
3925** blocks and all real buffers in the trans are on disk. 3925** blocks and all real buffers in the trans are on disk.
3926** If no_async, won't return until all commit blocks are on disk. 3926** If no_async, won't return until all commit blocks are on disk.
@@ -3981,7 +3981,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
3981 wait_on_commit = 1; 3981 wait_on_commit = 1;
3982 } 3982 }
3983 3983
3984 /* check_journal_end locks the journal, and unlocks if it does not return 1 3984 /* check_journal_end locks the journal, and unlocks if it does not return 1
3985 ** it tells us if we should continue with the journal_end, or just return 3985 ** it tells us if we should continue with the journal_end, or just return
3986 */ 3986 */
3987 if (!check_journal_end(th, p_s_sb, nblocks, flags)) { 3987 if (!check_journal_end(th, p_s_sb, nblocks, flags)) {
@@ -4078,7 +4078,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
4078 last_cn->next = jl_cn; 4078 last_cn->next = jl_cn;
4079 } 4079 }
4080 last_cn = jl_cn; 4080 last_cn = jl_cn;
4081 /* make sure the block we are trying to log is not a block 4081 /* make sure the block we are trying to log is not a block
4082 of journal or reserved area */ 4082 of journal or reserved area */
4083 4083
4084 if (is_block_in_log_or_reserved_area 4084 if (is_block_in_log_or_reserved_area
@@ -4225,9 +4225,9 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
4225 } else if (!(jl->j_state & LIST_COMMIT_PENDING)) 4225 } else if (!(jl->j_state & LIST_COMMIT_PENDING))
4226 queue_delayed_work(commit_wq, &journal->j_work, HZ / 10); 4226 queue_delayed_work(commit_wq, &journal->j_work, HZ / 10);
4227 4227
4228 /* if the next transaction has any chance of wrapping, flush 4228 /* if the next transaction has any chance of wrapping, flush
4229 ** transactions that might get overwritten. If any journal lists are very 4229 ** transactions that might get overwritten. If any journal lists are very
4230 ** old flush them as well. 4230 ** old flush them as well.
4231 */ 4231 */
4232 first_jl: 4232 first_jl:
4233 list_for_each_safe(entry, safe, &journal->j_journal_list) { 4233 list_for_each_safe(entry, safe, &journal->j_journal_list) {
diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c
index 21a171ceba1d..381750a155f6 100644
--- a/fs/reiserfs/lbalance.c
+++ b/fs/reiserfs/lbalance.c
@@ -119,8 +119,8 @@ static void leaf_copy_dir_entries(struct buffer_info *dest_bi,
119 DEH_SIZE * copy_count + copy_records_len); 119 DEH_SIZE * copy_count + copy_records_len);
120} 120}
121 121
122/* Copy the first (if last_first == FIRST_TO_LAST) or last (last_first == LAST_TO_FIRST) item or 122/* Copy the first (if last_first == FIRST_TO_LAST) or last (last_first == LAST_TO_FIRST) item or
123 part of it or nothing (see the return 0 below) from SOURCE to the end 123 part of it or nothing (see the return 0 below) from SOURCE to the end
124 (if last_first) or beginning (!last_first) of the DEST */ 124 (if last_first) or beginning (!last_first) of the DEST */
125/* returns 1 if anything was copied, else 0 */ 125/* returns 1 if anything was copied, else 0 */
126static int leaf_copy_boundary_item(struct buffer_info *dest_bi, 126static int leaf_copy_boundary_item(struct buffer_info *dest_bi,
@@ -396,7 +396,7 @@ static void leaf_item_bottle(struct buffer_info *dest_bi,
396 else { 396 else {
397 struct item_head n_ih; 397 struct item_head n_ih;
398 398
399 /* copy part of the body of the item number 'item_num' of SOURCE to the end of the DEST 399 /* copy part of the body of the item number 'item_num' of SOURCE to the end of the DEST
400 part defined by 'cpy_bytes'; create new item header; change old item_header (????); 400 part defined by 'cpy_bytes'; create new item header; change old item_header (????);
401 n_ih = new item_header; 401 n_ih = new item_header;
402 */ 402 */
@@ -426,7 +426,7 @@ static void leaf_item_bottle(struct buffer_info *dest_bi,
426 else { 426 else {
427 struct item_head n_ih; 427 struct item_head n_ih;
428 428
429 /* copy part of the body of the item number 'item_num' of SOURCE to the begin of the DEST 429 /* copy part of the body of the item number 'item_num' of SOURCE to the begin of the DEST
430 part defined by 'cpy_bytes'; create new item header; 430 part defined by 'cpy_bytes'; create new item header;
431 n_ih = new item_header; 431 n_ih = new item_header;
432 */ 432 */
@@ -724,7 +724,7 @@ int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes)
724static void leaf_delete_items_entirely(struct buffer_info *bi, 724static void leaf_delete_items_entirely(struct buffer_info *bi,
725 int first, int del_num); 725 int first, int del_num);
726/* If del_bytes == -1, starting from position 'first' delete del_num items in whole in buffer CUR. 726/* If del_bytes == -1, starting from position 'first' delete del_num items in whole in buffer CUR.
727 If not. 727 If not.
728 If last_first == 0. Starting from position 'first' delete del_num-1 items in whole. Delete part of body of 728 If last_first == 0. Starting from position 'first' delete del_num-1 items in whole. Delete part of body of
729 the first item. Part defined by del_bytes. Don't delete first item header 729 the first item. Part defined by del_bytes. Don't delete first item header
730 If last_first == 1. Starting from position 'first+1' delete del_num-1 items in whole. Delete part of body of 730 If last_first == 1. Starting from position 'first+1' delete del_num-1 items in whole. Delete part of body of
@@ -783,7 +783,7 @@ void leaf_delete_items(struct buffer_info *cur_bi, int last_first,
783 /* len = body len of item */ 783 /* len = body len of item */
784 len = ih_item_len(ih); 784 len = ih_item_len(ih);
785 785
786 /* delete the part of the last item of the bh 786 /* delete the part of the last item of the bh
787 do not delete item header 787 do not delete item header
788 */ 788 */
789 leaf_cut_from_buffer(cur_bi, B_NR_ITEMS(bh) - 1, 789 leaf_cut_from_buffer(cur_bi, B_NR_ITEMS(bh) - 1,
@@ -865,7 +865,7 @@ void leaf_insert_into_buf(struct buffer_info *bi, int before,
865 } 865 }
866} 866}
867 867
868/* paste paste_size bytes to affected_item_num-th item. 868/* paste paste_size bytes to affected_item_num-th item.
869 When item is a directory, this only prepare space for new entries */ 869 When item is a directory, this only prepare space for new entries */
870void leaf_paste_in_buffer(struct buffer_info *bi, int affected_item_num, 870void leaf_paste_in_buffer(struct buffer_info *bi, int affected_item_num,
871 int pos_in_item, int paste_size, 871 int pos_in_item, int paste_size,
@@ -1022,7 +1022,7 @@ static int leaf_cut_entries(struct buffer_head *bh,
1022/* when cut item is part of regular file 1022/* when cut item is part of regular file
1023 pos_in_item - first byte that must be cut 1023 pos_in_item - first byte that must be cut
1024 cut_size - number of bytes to be cut beginning from pos_in_item 1024 cut_size - number of bytes to be cut beginning from pos_in_item
1025 1025
1026 when cut item is part of directory 1026 when cut item is part of directory
1027 pos_in_item - number of first deleted entry 1027 pos_in_item - number of first deleted entry
1028 cut_size - count of deleted entries 1028 cut_size - count of deleted entries
@@ -1275,7 +1275,7 @@ void leaf_paste_entries(struct buffer_info *bi,
1275 /* change item key if necessary (when we paste before 0-th entry */ 1275 /* change item key if necessary (when we paste before 0-th entry */
1276 if (!before) { 1276 if (!before) {
1277 set_le_ih_k_offset(ih, deh_offset(new_dehs)); 1277 set_le_ih_k_offset(ih, deh_offset(new_dehs));
1278/* memcpy (&ih->ih_key.k_offset, 1278/* memcpy (&ih->ih_key.k_offset,
1279 &new_dehs->deh_offset, SHORT_KEY_SIZE);*/ 1279 &new_dehs->deh_offset, SHORT_KEY_SIZE);*/
1280 } 1280 }
1281#ifdef CONFIG_REISERFS_CHECK 1281#ifdef CONFIG_REISERFS_CHECK
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index cb1a9e977907..9d1070e741fc 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -106,7 +106,7 @@ key of the first directory entry in it.
106This function first calls search_by_key, then, if item whose first 106This function first calls search_by_key, then, if item whose first
107entry matches is not found it looks for the entry inside directory 107entry matches is not found it looks for the entry inside directory
108item found by search_by_key. Fills the path to the entry, and to the 108item found by search_by_key. Fills the path to the entry, and to the
109entry position in the item 109entry position in the item
110 110
111*/ 111*/
112 112
@@ -371,7 +371,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
371 return d_splice_alias(inode, dentry); 371 return d_splice_alias(inode, dentry);
372} 372}
373 373
374/* 374/*
375** looks up the dentry of the parent directory for child. 375** looks up the dentry of the parent directory for child.
376** taken from ext2_get_parent 376** taken from ext2_get_parent
377*/ 377*/
@@ -401,7 +401,7 @@ struct dentry *reiserfs_get_parent(struct dentry *child)
401 return d_obtain_alias(inode); 401 return d_obtain_alias(inode);
402} 402}
403 403
404/* add entry to the directory (entry can be hidden). 404/* add entry to the directory (entry can be hidden).
405 405
406insert definition of when hidden directories are used here -Hans 406insert definition of when hidden directories are used here -Hans
407 407
@@ -559,7 +559,7 @@ static int drop_new_inode(struct inode *inode)
559 return 0; 559 return 0;
560} 560}
561 561
562/* utility function that does setup for reiserfs_new_inode. 562/* utility function that does setup for reiserfs_new_inode.
563** DQUOT_INIT needs lots of credits so it's better to have it 563** DQUOT_INIT needs lots of credits so it's better to have it
564** outside of a transaction, so we had to pull some bits of 564** outside of a transaction, so we had to pull some bits of
565** reiserfs_new_inode out into this func. 565** reiserfs_new_inode out into this func.
@@ -820,7 +820,7 @@ static inline int reiserfs_empty_dir(struct inode *inode)
820{ 820{
821 /* we can cheat because an old format dir cannot have 821 /* we can cheat because an old format dir cannot have
822 ** EMPTY_DIR_SIZE, and a new format dir cannot have 822 ** EMPTY_DIR_SIZE, and a new format dir cannot have
823 ** EMPTY_DIR_SIZE_V1. So, if the inode is either size, 823 ** EMPTY_DIR_SIZE_V1. So, if the inode is either size,
824 ** regardless of disk format version, the directory is empty. 824 ** regardless of disk format version, the directory is empty.
825 */ 825 */
826 if (inode->i_size != EMPTY_DIR_SIZE && 826 if (inode->i_size != EMPTY_DIR_SIZE &&
@@ -1162,7 +1162,7 @@ static int reiserfs_link(struct dentry *old_dentry, struct inode *dir,
1162 return retval; 1162 return retval;
1163} 1163}
1164 1164
1165// de contains information pointing to an entry which 1165/* de contains information pointing to an entry which */
1166static int de_still_valid(const char *name, int len, 1166static int de_still_valid(const char *name, int len,
1167 struct reiserfs_dir_entry *de) 1167 struct reiserfs_dir_entry *de)
1168{ 1168{
@@ -1206,10 +1206,10 @@ static void set_ino_in_dir_entry(struct reiserfs_dir_entry *de,
1206 de->de_deh[de->de_entry_num].deh_objectid = key->k_objectid; 1206 de->de_deh[de->de_entry_num].deh_objectid = key->k_objectid;
1207} 1207}
1208 1208
1209/* 1209/*
1210 * process, that is going to call fix_nodes/do_balance must hold only 1210 * process, that is going to call fix_nodes/do_balance must hold only
1211 * one path. If it holds 2 or more, it can get into endless waiting in 1211 * one path. If it holds 2 or more, it can get into endless waiting in
1212 * get_empty_nodes or its clones 1212 * get_empty_nodes or its clones
1213 */ 1213 */
1214static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry, 1214static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1215 struct inode *new_dir, struct dentry *new_dentry) 1215 struct inode *new_dir, struct dentry *new_dentry)
@@ -1263,7 +1263,7 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1263 1263
1264 old_inode_mode = old_inode->i_mode; 1264 old_inode_mode = old_inode->i_mode;
1265 if (S_ISDIR(old_inode_mode)) { 1265 if (S_ISDIR(old_inode_mode)) {
1266 // make sure, that directory being renamed has correct ".." 1266 // make sure, that directory being renamed has correct ".."
1267 // and that its new parent directory has not too many links 1267 // and that its new parent directory has not too many links
1268 // already 1268 // already
1269 1269
@@ -1274,8 +1274,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1274 } 1274 }
1275 } 1275 }
1276 1276
1277 /* directory is renamed, its parent directory will be changed, 1277 /* directory is renamed, its parent directory will be changed,
1278 ** so find ".." entry 1278 ** so find ".." entry
1279 */ 1279 */
1280 dot_dot_de.de_gen_number_bit_string = NULL; 1280 dot_dot_de.de_gen_number_bit_string = NULL;
1281 retval = 1281 retval =
@@ -1385,9 +1385,9 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1385 this stuff, yes? Then, having 1385 this stuff, yes? Then, having
1386 gathered everything into RAM we 1386 gathered everything into RAM we
1387 should lock the buffers, yes? -Hans */ 1387 should lock the buffers, yes? -Hans */
1388 /* probably. our rename needs to hold more 1388 /* probably. our rename needs to hold more
1389 ** than one path at once. The seals would 1389 ** than one path at once. The seals would
1390 ** have to be written to deal with multi-path 1390 ** have to be written to deal with multi-path
1391 ** issues -chris 1391 ** issues -chris
1392 */ 1392 */
1393 /* sanity checking before doing the rename - avoid races many 1393 /* sanity checking before doing the rename - avoid races many
@@ -1465,7 +1465,7 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1465 } 1465 }
1466 1466
1467 if (S_ISDIR(old_inode_mode)) { 1467 if (S_ISDIR(old_inode_mode)) {
1468 // adjust ".." of renamed directory 1468 /* adjust ".." of renamed directory */
1469 set_ino_in_dir_entry(&dot_dot_de, INODE_PKEY(new_dir)); 1469 set_ino_in_dir_entry(&dot_dot_de, INODE_PKEY(new_dir));
1470 journal_mark_dirty(&th, new_dir->i_sb, dot_dot_de.de_bh); 1470 journal_mark_dirty(&th, new_dir->i_sb, dot_dot_de.de_bh);
1471 1471
diff --git a/fs/reiserfs/objectid.c b/fs/reiserfs/objectid.c
index d2d6b5650188..3a6de810bd61 100644
--- a/fs/reiserfs/objectid.c
+++ b/fs/reiserfs/objectid.c
@@ -180,7 +180,7 @@ int reiserfs_convert_objectid_map_v1(struct super_block *s)
180 180
181 if (cur_size > new_size) { 181 if (cur_size > new_size) {
182 /* mark everyone used that was listed as free at the end of the objectid 182 /* mark everyone used that was listed as free at the end of the objectid
183 ** map 183 ** map
184 */ 184 */
185 objectid_map[new_size - 1] = objectid_map[cur_size - 1]; 185 objectid_map[new_size - 1] = objectid_map[cur_size - 1];
186 set_sb_oid_cursize(disk_sb, new_size); 186 set_sb_oid_cursize(disk_sb, new_size);
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index 8e826c07cd21..536eacaeb710 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -178,11 +178,11 @@ static char *is_there_reiserfs_struct(char *fmt, int *what)
178 appropriative printk. With this reiserfs_warning you can use format 178 appropriative printk. With this reiserfs_warning you can use format
179 specification for complex structures like you used to do with 179 specification for complex structures like you used to do with
180 printfs for integers, doubles and pointers. For instance, to print 180 printfs for integers, doubles and pointers. For instance, to print
181 out key structure you have to write just: 181 out key structure you have to write just:
182 reiserfs_warning ("bad key %k", key); 182 reiserfs_warning ("bad key %k", key);
183 instead of 183 instead of
184 printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid, 184 printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid,
185 key->k_offset, key->k_uniqueness); 185 key->k_offset, key->k_uniqueness);
186*/ 186*/
187static DEFINE_SPINLOCK(error_lock); 187static DEFINE_SPINLOCK(error_lock);
188static void prepare_error_buf(const char *fmt, va_list args) 188static void prepare_error_buf(const char *fmt, va_list args)
@@ -244,11 +244,11 @@ static void prepare_error_buf(const char *fmt, va_list args)
244} 244}
245 245
246/* in addition to usual conversion specifiers this accepts reiserfs 246/* in addition to usual conversion specifiers this accepts reiserfs
247 specific conversion specifiers: 247 specific conversion specifiers:
248 %k to print little endian key, 248 %k to print little endian key,
249 %K to print cpu key, 249 %K to print cpu key,
250 %h to print item_head, 250 %h to print item_head,
251 %t to print directory entry 251 %t to print directory entry
252 %z to print block head (arg must be struct buffer_head * 252 %z to print block head (arg must be struct buffer_head *
253 %b to print buffer_head 253 %b to print buffer_head
254*/ 254*/
@@ -314,17 +314,17 @@ void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...)
314 maintainer-errorid. Don't bother with reusing errorids, there are 314 maintainer-errorid. Don't bother with reusing errorids, there are
315 lots of numbers out there. 315 lots of numbers out there.
316 316
317 Example: 317 Example:
318 318
319 reiserfs_panic( 319 reiserfs_panic(
320 p_sb, "reiser-29: reiserfs_new_blocknrs: " 320 p_sb, "reiser-29: reiserfs_new_blocknrs: "
321 "one of search_start or rn(%d) is equal to MAX_B_NUM," 321 "one of search_start or rn(%d) is equal to MAX_B_NUM,"
322 "which means that we are optimizing location based on the bogus location of a temp buffer (%p).", 322 "which means that we are optimizing location based on the bogus location of a temp buffer (%p).",
323 rn, bh 323 rn, bh
324 ); 324 );
325 325
326 Regular panic()s sometimes clear the screen before the message can 326 Regular panic()s sometimes clear the screen before the message can
327 be read, thus the need for the while loop. 327 be read, thus the need for the while loop.
328 328
329 Numbering scheme for panic used by Vladimir and Anatoly( Hans completely ignores this scheme, and considers it 329 Numbering scheme for panic used by Vladimir and Anatoly( Hans completely ignores this scheme, and considers it
330 pointless complexity): 330 pointless complexity):
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index d4d7f1433ed0..d5066400638a 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -633,7 +633,7 @@ int reiserfs_global_version_in_proc(char *buffer, char **start,
633 * 633 *
634 */ 634 */
635 635
636/* 636/*
637 * Make Linus happy. 637 * Make Linus happy.
638 * Local variables: 638 * Local variables:
639 * c-indentation-style: "K&R" 639 * c-indentation-style: "K&R"
diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c
index f71c3948edef..238e9d9b31e0 100644
--- a/fs/reiserfs/resize.c
+++ b/fs/reiserfs/resize.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README 2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
3 */ 3 */
4 4
5/* 5/*
6 * Written by Alexander Zarochentcev. 6 * Written by Alexander Zarochentcev.
7 * 7 *
8 * The kernel part of the (on-line) reiserfs resizer. 8 * The kernel part of the (on-line) reiserfs resizer.
@@ -101,7 +101,7 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
101 memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size); 101 memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size);
102 102
103 /* just in case vfree schedules on us, copy the new 103 /* just in case vfree schedules on us, copy the new
104 ** pointer into the journal struct before freeing the 104 ** pointer into the journal struct before freeing the
105 ** old one 105 ** old one
106 */ 106 */
107 node_tmp = jb->bitmaps; 107 node_tmp = jb->bitmaps;
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index b2eaa0c6b7b7..a65bfee28bb8 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -77,7 +77,7 @@ inline void copy_item_head(struct item_head *p_v_to,
77/* k1 is pointer to on-disk structure which is stored in little-endian 77/* k1 is pointer to on-disk structure which is stored in little-endian
78 form. k2 is pointer to cpu variable. For key of items of the same 78 form. k2 is pointer to cpu variable. For key of items of the same
79 object this returns 0. 79 object this returns 0.
80 Returns: -1 if key1 < key2 80 Returns: -1 if key1 < key2
81 0 if key1 == key2 81 0 if key1 == key2
82 1 if key1 > key2 */ 82 1 if key1 > key2 */
83inline int comp_short_keys(const struct reiserfs_key *le_key, 83inline int comp_short_keys(const struct reiserfs_key *le_key,
@@ -890,7 +890,7 @@ static inline int prepare_for_direct_item(struct treepath *path,
890 } 890 }
891 // new file gets truncated 891 // new file gets truncated
892 if (get_inode_item_key_version(inode) == KEY_FORMAT_3_6) { 892 if (get_inode_item_key_version(inode) == KEY_FORMAT_3_6) {
893 // 893 //
894 round_len = ROUND_UP(new_file_length); 894 round_len = ROUND_UP(new_file_length);
895 /* this was n_new_file_length < le_ih ... */ 895 /* this was n_new_file_length < le_ih ... */
896 if (round_len < le_ih_k_offset(le_ih)) { 896 if (round_len < le_ih_k_offset(le_ih)) {
@@ -1443,7 +1443,7 @@ static int maybe_indirect_to_direct(struct reiserfs_transaction_handle *th,
1443 if (atomic_read(&p_s_inode->i_count) > 1 || 1443 if (atomic_read(&p_s_inode->i_count) > 1 ||
1444 !tail_has_to_be_packed(p_s_inode) || 1444 !tail_has_to_be_packed(p_s_inode) ||
1445 !page || (REISERFS_I(p_s_inode)->i_flags & i_nopack_mask)) { 1445 !page || (REISERFS_I(p_s_inode)->i_flags & i_nopack_mask)) {
1446 // leave tail in an unformatted node 1446 /* leave tail in an unformatted node */
1447 *p_c_mode = M_SKIP_BALANCING; 1447 *p_c_mode = M_SKIP_BALANCING;
1448 cut_bytes = 1448 cut_bytes =
1449 n_block_size - (n_new_file_size & (n_block_size - 1)); 1449 n_block_size - (n_new_file_size & (n_block_size - 1));
@@ -1826,7 +1826,7 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th, struct inode *p
1826 /* While there are bytes to truncate and previous file item is presented in the tree. */ 1826 /* While there are bytes to truncate and previous file item is presented in the tree. */
1827 1827
1828 /* 1828 /*
1829 ** This loop could take a really long time, and could log 1829 ** This loop could take a really long time, and could log
1830 ** many more blocks than a transaction can hold. So, we do a polite 1830 ** many more blocks than a transaction can hold. So, we do a polite
1831 ** journal end here, and if the transaction needs ending, we make 1831 ** journal end here, and if the transaction needs ending, we make
1832 ** sure the file is consistent before ending the current trans 1832 ** sure the file is consistent before ending the current trans
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 4a1e16362ebd..d7519b951500 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -758,7 +758,7 @@ static int reiserfs_getopt(struct super_block *s, char **cur, opt_desc_t * opts,
758 char **opt_arg, unsigned long *bit_flags) 758 char **opt_arg, unsigned long *bit_flags)
759{ 759{
760 char *p; 760 char *p;
761 /* foo=bar, 761 /* foo=bar,
762 ^ ^ ^ 762 ^ ^ ^
763 | | +-- option_end 763 | | +-- option_end
764 | +-- arg_start 764 | +-- arg_start
@@ -1348,7 +1348,7 @@ static int read_super_block(struct super_block *s, int offset)
1348 } 1348 }
1349 // 1349 //
1350 // ok, reiserfs signature (old or new) found in at the given offset 1350 // ok, reiserfs signature (old or new) found in at the given offset
1351 // 1351 //
1352 fs_blocksize = sb_blocksize(rs); 1352 fs_blocksize = sb_blocksize(rs);
1353 brelse(bh); 1353 brelse(bh);
1354 sb_set_blocksize(s, fs_blocksize); 1354 sb_set_blocksize(s, fs_blocksize);
@@ -1534,8 +1534,8 @@ static int what_hash(struct super_block *s)
1534 code = find_hash_out(s); 1534 code = find_hash_out(s);
1535 1535
1536 if (code != UNSET_HASH && reiserfs_hash_detect(s)) { 1536 if (code != UNSET_HASH && reiserfs_hash_detect(s)) {
1537 /* detection has found the hash, and we must check against the 1537 /* detection has found the hash, and we must check against the
1538 ** mount options 1538 ** mount options
1539 */ 1539 */
1540 if (reiserfs_rupasov_hash(s) && code != YURA_HASH) { 1540 if (reiserfs_rupasov_hash(s) && code != YURA_HASH) {
1541 reiserfs_warning(s, "reiserfs-2507", 1541 reiserfs_warning(s, "reiserfs-2507",
@@ -1567,7 +1567,7 @@ static int what_hash(struct super_block *s)
1567 } 1567 }
1568 } 1568 }
1569 1569
1570 /* if we are mounted RW, and we have a new valid hash code, update 1570 /* if we are mounted RW, and we have a new valid hash code, update
1571 ** the super 1571 ** the super
1572 */ 1572 */
1573 if (code != UNSET_HASH && 1573 if (code != UNSET_HASH &&
diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c
index 083f74435f65..0635cfe0f0b7 100644
--- a/fs/reiserfs/tail_conversion.c
+++ b/fs/reiserfs/tail_conversion.c
@@ -46,7 +46,7 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode,
46 /* Set the key to search for the place for new unfm pointer */ 46 /* Set the key to search for the place for new unfm pointer */
47 make_cpu_key(&end_key, inode, tail_offset, TYPE_INDIRECT, 4); 47 make_cpu_key(&end_key, inode, tail_offset, TYPE_INDIRECT, 4);
48 48
49 // FIXME: we could avoid this 49 /* FIXME: we could avoid this */
50 if (search_for_position_by_key(sb, &end_key, path) == POSITION_FOUND) { 50 if (search_for_position_by_key(sb, &end_key, path) == POSITION_FOUND) {
51 reiserfs_error(sb, "PAP-14030", 51 reiserfs_error(sb, "PAP-14030",
52 "pasted or inserted byte exists in " 52 "pasted or inserted byte exists in "
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
index 4686b90886ed..5621d87c4479 100644
--- a/include/linux/reiserfs_fs_sb.h
+++ b/include/linux/reiserfs_fs_sb.h
@@ -14,7 +14,7 @@ typedef enum {
14} reiserfs_super_block_flags; 14} reiserfs_super_block_flags;
15 15
16/* struct reiserfs_super_block accessors/mutators 16/* struct reiserfs_super_block accessors/mutators
17 * since this is a disk structure, it will always be in 17 * since this is a disk structure, it will always be in
18 * little endian format. */ 18 * little endian format. */
19#define sb_block_count(sbp) (le32_to_cpu((sbp)->s_v1.s_block_count)) 19#define sb_block_count(sbp) (le32_to_cpu((sbp)->s_v1.s_block_count))
20#define set_sb_block_count(sbp,v) ((sbp)->s_v1.s_block_count = cpu_to_le32(v)) 20#define set_sb_block_count(sbp,v) ((sbp)->s_v1.s_block_count = cpu_to_le32(v))
@@ -83,16 +83,16 @@ typedef enum {
83 83
84/* LOGGING -- */ 84/* LOGGING -- */
85 85
86/* These all interelate for performance. 86/* These all interelate for performance.
87** 87**
88** If the journal block count is smaller than n transactions, you lose speed. 88** If the journal block count is smaller than n transactions, you lose speed.
89** I don't know what n is yet, I'm guessing 8-16. 89** I don't know what n is yet, I'm guessing 8-16.
90** 90**
91** typical transaction size depends on the application, how often fsync is 91** typical transaction size depends on the application, how often fsync is
92** called, and how many metadata blocks you dirty in a 30 second period. 92** called, and how many metadata blocks you dirty in a 30 second period.
93** The more small files (<16k) you use, the larger your transactions will 93** The more small files (<16k) you use, the larger your transactions will
94** be. 94** be.
95** 95**
96** If your journal fills faster than dirty buffers get flushed to disk, it must flush them before allowing the journal 96** If your journal fills faster than dirty buffers get flushed to disk, it must flush them before allowing the journal
97** to wrap, which slows things down. If you need high speed meta data updates, the journal should be big enough 97** to wrap, which slows things down. If you need high speed meta data updates, the journal should be big enough
98** to prevent wrapping before dirty meta blocks get to disk. 98** to prevent wrapping before dirty meta blocks get to disk.
@@ -242,7 +242,7 @@ struct reiserfs_journal {
242 242
243 struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS]; /* array of bitmaps to record the deleted blocks */ 243 struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS]; /* array of bitmaps to record the deleted blocks */
244 struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE]; /* hash table for real buffer heads in current trans */ 244 struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE]; /* hash table for real buffer heads in current trans */
245 struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all 245 struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all
246 the transactions */ 246 the transactions */
247 struct list_head j_prealloc_list; /* list of inodes which have preallocated blocks */ 247 struct list_head j_prealloc_list; /* list of inodes which have preallocated blocks */
248 int j_persistent_trans; 248 int j_persistent_trans;
@@ -426,7 +426,7 @@ enum reiserfs_mount_options {
426 partition will be dealt with in a 426 partition will be dealt with in a
427 manner of 3.5.x */ 427 manner of 3.5.x */
428 428
429/* -o hash={tea, rupasov, r5, detect} is meant for properly mounting 429/* -o hash={tea, rupasov, r5, detect} is meant for properly mounting
430** reiserfs disks from 3.5.19 or earlier. 99% of the time, this option 430** reiserfs disks from 3.5.19 or earlier. 99% of the time, this option
431** is not required. If the normal autodection code can't determine which 431** is not required. If the normal autodection code can't determine which
432** hash to use (because both hashes had the same value for a file) 432** hash to use (because both hashes had the same value for a file)