aboutsummaryrefslogtreecommitdiffstats
path: root/fs/reiserfs/do_balan.c
diff options
context:
space:
mode:
authorJeff Mahoney <jeffm@suse.com>2014-04-23 10:00:36 -0400
committerJan Kara <jack@suse.cz>2014-05-06 16:52:19 -0400
commit098297b27d23ad9d0fc302e3417474d9342c6c14 (patch)
tree58f2054cd9933225ef1ae9c7febedc9160041af6 /fs/reiserfs/do_balan.c
parent4cf5f7addf18ecae2ea49b11944976cbd26d5281 (diff)
reiserfs: cleanup, reformat comments to normal kernel style
This patch reformats comments in the reiserfs code to fit in 80 columns and to follow the style rules. There is no functional change but it helps make my eyes bleed less. Signed-off-by: Jeff Mahoney <jeffm@suse.com> Signed-off-by: Jan Kara <jack@suse.cz>
Diffstat (limited to 'fs/reiserfs/do_balan.c')
-rw-r--r--fs/reiserfs/do_balan.c276
1 files changed, 154 insertions, 122 deletions
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
index 80b2b1b37169..399b2009b677 100644
--- a/fs/reiserfs/do_balan.c
+++ b/fs/reiserfs/do_balan.c
@@ -2,18 +2,13 @@
2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README 2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
3 */ 3 */
4 4
5/* Now we have all buffers that must be used in balancing of the tree */ 5/*
6/* Further calculations can not cause schedule(), and thus the buffer */ 6 * Now we have all buffers that must be used in balancing of the tree
7/* tree will be stable until the balancing will be finished */ 7 * Further calculations can not cause schedule(), and thus the buffer
8/* balance the tree according to the analysis made before, */ 8 * tree will be stable until the balancing will be finished
9/* and using buffers obtained after all above. */ 9 * balance the tree according to the analysis made before,
10 10 * and using buffers obtained after all above.
11/** 11 */
12 ** balance_leaf_when_delete
13 ** balance_leaf
14 ** do_balance
15 **
16 **/
17 12
18#include <asm/uaccess.h> 13#include <asm/uaccess.h>
19#include <linux/time.h> 14#include <linux/time.h>
@@ -68,35 +63,39 @@ inline void do_balance_mark_leaf_dirty(struct tree_balance *tb,
68#define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty 63#define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty
69#define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty 64#define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty
70 65
71/* summary: 66/*
72 if deleting something ( tb->insert_size[0] < 0 ) 67 * summary:
73 return(balance_leaf_when_delete()); (flag d handled here) 68 * if deleting something ( tb->insert_size[0] < 0 )
74 else 69 * return(balance_leaf_when_delete()); (flag d handled here)
75 if lnum is larger than 0 we put items into the left node 70 * else
76 if rnum is larger than 0 we put items into the right node 71 * if lnum is larger than 0 we put items into the left node
77 if snum1 is larger than 0 we put items into the new node s1 72 * if rnum is larger than 0 we put items into the right node
78 if snum2 is larger than 0 we put items into the new node s2 73 * if snum1 is larger than 0 we put items into the new node s1
79Note that all *num* count new items being created. 74 * if snum2 is larger than 0 we put items into the new node s2
80 75 * Note that all *num* count new items being created.
81It would be easier to read balance_leaf() if each of these summary 76 *
82lines was a separate procedure rather than being inlined. I think 77 * It would be easier to read balance_leaf() if each of these summary
83that there are many passages here and in balance_leaf_when_delete() in 78 * lines was a separate procedure rather than being inlined. I think
84which two calls to one procedure can replace two passages, and it 79 * that there are many passages here and in balance_leaf_when_delete() in
85might save cache space and improve software maintenance costs to do so. 80 * which two calls to one procedure can replace two passages, and it
86 81 * might save cache space and improve software maintenance costs to do so.
87Vladimir made the perceptive comment that we should offload most of 82 *
88the decision making in this function into fix_nodes/check_balance, and 83 * Vladimir made the perceptive comment that we should offload most of
89then create some sort of structure in tb that says what actions should 84 * the decision making in this function into fix_nodes/check_balance, and
90be performed by do_balance. 85 * then create some sort of structure in tb that says what actions should
91 86 * be performed by do_balance.
92-Hans */ 87 *
93 88 * -Hans
94/* Balance leaf node in case of delete or cut: insert_size[0] < 0 89 */
90
91/*
92 * Balance leaf node in case of delete or cut: insert_size[0] < 0
95 * 93 *
96 * lnum, rnum can have values >= -1 94 * lnum, rnum can have values >= -1
97 * -1 means that the neighbor must be joined with S 95 * -1 means that the neighbor must be joined with S
98 * 0 means that nothing should be done with the neighbor 96 * 0 means that nothing should be done with the neighbor
99 * >0 means to shift entirely or partly the specified number of items to the neighbor 97 * >0 means to shift entirely or partly the specified number of items
98 * to the neighbor
100 */ 99 */
101static int balance_leaf_when_delete(struct tree_balance *tb, int flag) 100static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
102{ 101{
@@ -149,8 +148,16 @@ static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
149 case M_CUT:{ /* cut item in S[0] */ 148 case M_CUT:{ /* cut item in S[0] */
150 if (is_direntry_le_ih(ih)) { 149 if (is_direntry_le_ih(ih)) {
151 150
152 /* UFS unlink semantics are such that you can only delete one directory entry at a time. */ 151 /*
153 /* when we cut a directory tb->insert_size[0] means number of entries to be cut (always 1) */ 152 * UFS unlink semantics are such that you
153 * can only delete one directory entry at
154 * a time.
155 */
156
157 /*
158 * when we cut a directory tb->insert_size[0]
159 * means number of entries to be cut (always 1)
160 */
154 tb->insert_size[0] = -1; 161 tb->insert_size[0] = -1;
155 leaf_cut_from_buffer(&bi, item_pos, pos_in_item, 162 leaf_cut_from_buffer(&bi, item_pos, pos_in_item,
156 -tb->insert_size[0]); 163 -tb->insert_size[0]);
@@ -183,13 +190,22 @@ static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
183 "UNKNOWN"), flag); 190 "UNKNOWN"), flag);
184 } 191 }
185 192
186 /* the rule is that no shifting occurs unless by shifting a node can be freed */ 193 /*
194 * the rule is that no shifting occurs unless by shifting
195 * a node can be freed
196 */
187 n = B_NR_ITEMS(tbS0); 197 n = B_NR_ITEMS(tbS0);
188 if (tb->lnum[0]) { /* L[0] takes part in balancing */ 198 /* L[0] takes part in balancing */
189 if (tb->lnum[0] == -1) { /* L[0] must be joined with S[0] */ 199 if (tb->lnum[0]) {
190 if (tb->rnum[0] == -1) { /* R[0] must be also joined with S[0] */ 200 /* L[0] must be joined with S[0] */
201 if (tb->lnum[0] == -1) {
202 /* R[0] must be also joined with S[0] */
203 if (tb->rnum[0] == -1) {
191 if (tb->FR[0] == PATH_H_PPARENT(tb->tb_path, 0)) { 204 if (tb->FR[0] == PATH_H_PPARENT(tb->tb_path, 0)) {
192 /* all contents of all the 3 buffers will be in L[0] */ 205 /*
206 * all contents of all the 3 buffers
207 * will be in L[0]
208 */
193 if (PATH_H_POSITION(tb->tb_path, 1) == 0 209 if (PATH_H_POSITION(tb->tb_path, 1) == 0
194 && 1 < B_NR_ITEMS(tb->FR[0])) 210 && 1 < B_NR_ITEMS(tb->FR[0]))
195 replace_key(tb, tb->CFL[0], 211 replace_key(tb, tb->CFL[0],
@@ -208,7 +224,10 @@ static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
208 224
209 return 0; 225 return 0;
210 } 226 }
211 /* all contents of all the 3 buffers will be in R[0] */ 227 /*
228 * all contents of all the 3 buffers will
229 * be in R[0]
230 */
212 leaf_move_items(LEAF_FROM_S_TO_R, tb, n, -1, 231 leaf_move_items(LEAF_FROM_S_TO_R, tb, n, -1,
213 NULL); 232 NULL);
214 leaf_move_items(LEAF_FROM_L_TO_R, tb, 233 leaf_move_items(LEAF_FROM_L_TO_R, tb,
@@ -233,7 +252,11 @@ static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
233 252
234 return 0; 253 return 0;
235 } 254 }
236 /* a part of contents of S[0] will be in L[0] and the rest part of S[0] will be in R[0] */ 255
256 /*
257 * a part of contents of S[0] will be in L[0] and the
258 * rest part of S[0] will be in R[0]
259 */
237 260
238 RFALSE((tb->lnum[0] + tb->rnum[0] < n) || 261 RFALSE((tb->lnum[0] + tb->rnum[0] < n) ||
239 (tb->lnum[0] + tb->rnum[0] > n + 1), 262 (tb->lnum[0] + tb->rnum[0] > n + 1),
@@ -1178,9 +1201,7 @@ struct buffer_head *get_FEB(struct tree_balance *tb)
1178 return tb->used[i]; 1201 return tb->used[i];
1179} 1202}
1180 1203
1181/* This is now used because reiserfs_free_block has to be able to 1204/* This is now used because reiserfs_free_block has to be able to schedule. */
1182** schedule.
1183*/
1184static void store_thrown(struct tree_balance *tb, struct buffer_head *bh) 1205static void store_thrown(struct tree_balance *tb, struct buffer_head *bh)
1185{ 1206{
1186 int i; 1207 int i;
@@ -1335,8 +1356,10 @@ static int check_before_balancing(struct tree_balance *tb)
1335 "mount point."); 1356 "mount point.");
1336 } 1357 }
1337 1358
1338 /* double check that buffers that we will modify are unlocked. (fix_nodes should already have 1359 /*
1339 prepped all of these for us). */ 1360 * double check that buffers that we will modify are unlocked.
1361 * (fix_nodes should already have prepped all of these for us).
1362 */
1340 if (tb->lnum[0]) { 1363 if (tb->lnum[0]) {
1341 retval |= locked_or_not_in_tree(tb, tb->L[0], "L[0]"); 1364 retval |= locked_or_not_in_tree(tb, tb->L[0], "L[0]");
1342 retval |= locked_or_not_in_tree(tb, tb->FL[0], "FL[0]"); 1365 retval |= locked_or_not_in_tree(tb, tb->FL[0], "FL[0]");
@@ -1429,49 +1452,51 @@ static void check_internal_levels(struct tree_balance *tb)
1429 1452
1430#endif 1453#endif
1431 1454
1432/* Now we have all of the buffers that must be used in balancing of 1455/*
1433 the tree. We rely on the assumption that schedule() will not occur 1456 * Now we have all of the buffers that must be used in balancing of
1434 while do_balance works. ( Only interrupt handlers are acceptable.) 1457 * the tree. We rely on the assumption that schedule() will not occur
1435 We balance the tree according to the analysis made before this, 1458 * while do_balance works. ( Only interrupt handlers are acceptable.)
1436 using buffers already obtained. For SMP support it will someday be 1459 * We balance the tree according to the analysis made before this,
1437 necessary to add ordered locking of tb. */ 1460 * using buffers already obtained. For SMP support it will someday be
1438 1461 * necessary to add ordered locking of tb.
1439/* Some interesting rules of balancing: 1462 */
1440
1441 we delete a maximum of two nodes per level per balancing: we never
1442 delete R, when we delete two of three nodes L, S, R then we move
1443 them into R.
1444
1445 we only delete L if we are deleting two nodes, if we delete only
1446 one node we delete S
1447
1448 if we shift leaves then we shift as much as we can: this is a
1449 deliberate policy of extremism in node packing which results in
1450 higher average utilization after repeated random balance operations
1451 at the cost of more memory copies and more balancing as a result of
1452 small insertions to full nodes.
1453
1454 if we shift internal nodes we try to evenly balance the node
1455 utilization, with consequent less balancing at the cost of lower
1456 utilization.
1457
1458 one could argue that the policy for directories in leaves should be
1459 that of internal nodes, but we will wait until another day to
1460 evaluate this.... It would be nice to someday measure and prove
1461 these assumptions as to what is optimal....
1462 1463
1463*/ 1464/*
1465 * Some interesting rules of balancing:
1466 * we delete a maximum of two nodes per level per balancing: we never
1467 * delete R, when we delete two of three nodes L, S, R then we move
1468 * them into R.
1469 *
1470 * we only delete L if we are deleting two nodes, if we delete only
1471 * one node we delete S
1472 *
1473 * if we shift leaves then we shift as much as we can: this is a
1474 * deliberate policy of extremism in node packing which results in
1475 * higher average utilization after repeated random balance operations
1476 * at the cost of more memory copies and more balancing as a result of
1477 * small insertions to full nodes.
1478 *
1479 * if we shift internal nodes we try to evenly balance the node
1480 * utilization, with consequent less balancing at the cost of lower
1481 * utilization.
1482 *
1483 * one could argue that the policy for directories in leaves should be
1484 * that of internal nodes, but we will wait until another day to
1485 * evaluate this.... It would be nice to someday measure and prove
1486 * these assumptions as to what is optimal....
1487 */
1464 1488
1465static inline void do_balance_starts(struct tree_balance *tb) 1489static inline void do_balance_starts(struct tree_balance *tb)
1466{ 1490{
1467 /* use print_cur_tb() to see initial state of struct 1491 /* use print_cur_tb() to see initial state of struct tree_balance */
1468 tree_balance */
1469 1492
1470 /* store_print_tb (tb); */ 1493 /* store_print_tb (tb); */
1471 1494
1472 /* do not delete, just comment it out */ 1495 /* do not delete, just comment it out */
1473/* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb, 1496 /*
1474 "check");*/ 1497 print_tb(flag, PATH_LAST_POSITION(tb->tb_path),
1498 tb->tb_path->pos_in_item, tb, "check");
1499 */
1475 RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB"); 1500 RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB");
1476#ifdef CONFIG_REISERFS_CHECK 1501#ifdef CONFIG_REISERFS_CHECK
1477 REISERFS_SB(tb->tb_sb)->cur_tb = tb; 1502 REISERFS_SB(tb->tb_sb)->cur_tb = tb;
@@ -1487,9 +1512,10 @@ static inline void do_balance_completed(struct tree_balance *tb)
1487 REISERFS_SB(tb->tb_sb)->cur_tb = NULL; 1512 REISERFS_SB(tb->tb_sb)->cur_tb = NULL;
1488#endif 1513#endif
1489 1514
1490 /* reiserfs_free_block is no longer schedule safe. So, we need to 1515 /*
1491 ** put the buffers we want freed on the thrown list during do_balance, 1516 * reiserfs_free_block is no longer schedule safe. So, we need to
1492 ** and then free them now 1517 * put the buffers we want freed on the thrown list during do_balance,
1518 * and then free them now
1493 */ 1519 */
1494 1520
1495 REISERFS_SB(tb->tb_sb)->s_do_balance++; 1521 REISERFS_SB(tb->tb_sb)->s_do_balance++;
@@ -1500,36 +1526,40 @@ static inline void do_balance_completed(struct tree_balance *tb)
1500 free_thrown(tb); 1526 free_thrown(tb);
1501} 1527}
1502 1528
1503void do_balance(struct tree_balance *tb, /* tree_balance structure */ 1529/*
1504 struct item_head *ih, /* item header of inserted item */ 1530 * do_balance - balance the tree
1505 const char *body, /* body of inserted item or bytes to paste */ 1531 *
1506 int flag) 1532 * @tb: tree_balance structure
1507{ /* i - insert, d - delete 1533 * @ih: item header of inserted item
1508 c - cut, p - paste 1534 * @body: body of inserted item or bytes to paste
1509 1535 * @flag: 'i' - insert, 'd' - delete, 'c' - cut, 'p' paste
1510 Cut means delete part of an item 1536 *
1511 (includes removing an entry from a 1537 * Cut means delete part of an item (includes removing an entry from a
1512 directory). 1538 * directory).
1513 1539 *
1514 Delete means delete whole item. 1540 * Delete means delete whole item.
1515 1541 *
1516 Insert means add a new item into the 1542 * Insert means add a new item into the tree.
1517 tree. 1543 *
1518 1544 * Paste means to append to the end of an existing file or to
1519 Paste means to append to the end of an 1545 * insert a directory entry.
1520 existing file or to insert a directory 1546 */
1521 entry. */ 1547void do_balance(struct tree_balance *tb, struct item_head *ih,
1522 int child_pos, /* position of a child node in its parent */ 1548 const char *body, int flag)
1523 h; /* level of the tree being processed */ 1549{
1524 struct item_head insert_key[2]; /* in our processing of one level 1550 int child_pos; /* position of a child node in its parent */
1525 we sometimes determine what 1551 int h; /* level of the tree being processed */
1526 must be inserted into the next 1552
1527 higher level. This insertion 1553 /*
1528 consists of a key or two keys 1554 * in our processing of one level we sometimes determine what
1529 and their corresponding 1555 * must be inserted into the next higher level. This insertion
1530 pointers */ 1556 * consists of a key or two keys and their corresponding
1531 struct buffer_head *insert_ptr[2]; /* inserted node-ptrs for the next 1557 * pointers
1532 level */ 1558 */
1559 struct item_head insert_key[2];
1560
1561 /* inserted node-ptrs for the next level */
1562 struct buffer_head *insert_ptr[2];
1533 1563
1534 tb->tb_mode = flag; 1564 tb->tb_mode = flag;
1535 tb->need_balance_dirty = 0; 1565 tb->need_balance_dirty = 0;
@@ -1549,9 +1579,11 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
1549 atomic_inc(&(fs_generation(tb->tb_sb))); 1579 atomic_inc(&(fs_generation(tb->tb_sb)));
1550 do_balance_starts(tb); 1580 do_balance_starts(tb);
1551 1581
1552 /* balance leaf returns 0 except if combining L R and S into 1582 /*
1553 one node. see balance_internal() for explanation of this 1583 * balance_leaf returns 0 except if combining L R and S into
1554 line of code. */ 1584 * one node. see balance_internal() for explanation of this
1585 * line of code.
1586 */
1555 child_pos = PATH_H_B_ITEM_ORDER(tb->tb_path, 0) + 1587 child_pos = PATH_H_B_ITEM_ORDER(tb->tb_path, 0) +
1556 balance_leaf(tb, ih, body, flag, insert_key, insert_ptr); 1588 balance_leaf(tb, ih, body, flag, insert_key, insert_ptr);
1557 1589