diff options
Diffstat (limited to 'fs/reiserfs/fix_node.c')
-rw-r--r-- | fs/reiserfs/fix_node.c | 1008 |
1 files changed, 619 insertions, 389 deletions
diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c index dc4d41530316..6b0ddb2a9091 100644 --- a/fs/reiserfs/fix_node.c +++ b/fs/reiserfs/fix_node.c | |||
@@ -2,59 +2,32 @@ | |||
2 | * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README | 2 | * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README |
3 | */ | 3 | */ |
4 | 4 | ||
5 | /** | ||
6 | ** old_item_num | ||
7 | ** old_entry_num | ||
8 | ** set_entry_sizes | ||
9 | ** create_virtual_node | ||
10 | ** check_left | ||
11 | ** check_right | ||
12 | ** directory_part_size | ||
13 | ** get_num_ver | ||
14 | ** set_parameters | ||
15 | ** is_leaf_removable | ||
16 | ** are_leaves_removable | ||
17 | ** get_empty_nodes | ||
18 | ** get_lfree | ||
19 | ** get_rfree | ||
20 | ** is_left_neighbor_in_cache | ||
21 | ** decrement_key | ||
22 | ** get_far_parent | ||
23 | ** get_parents | ||
24 | ** can_node_be_removed | ||
25 | ** ip_check_balance | ||
26 | ** dc_check_balance_internal | ||
27 | ** dc_check_balance_leaf | ||
28 | ** dc_check_balance | ||
29 | ** check_balance | ||
30 | ** get_direct_parent | ||
31 | ** get_neighbors | ||
32 | ** fix_nodes | ||
33 | ** | ||
34 | ** | ||
35 | **/ | ||
36 | |||
37 | #include <linux/time.h> | 5 | #include <linux/time.h> |
38 | #include <linux/slab.h> | 6 | #include <linux/slab.h> |
39 | #include <linux/string.h> | 7 | #include <linux/string.h> |
40 | #include "reiserfs.h" | 8 | #include "reiserfs.h" |
41 | #include <linux/buffer_head.h> | 9 | #include <linux/buffer_head.h> |
42 | 10 | ||
43 | /* To make any changes in the tree we find a node, that contains item | 11 | /* |
44 | to be changed/deleted or position in the node we insert a new item | 12 | * To make any changes in the tree we find a node that contains item |
45 | to. We call this node S. To do balancing we need to decide what we | 13 | * to be changed/deleted or position in the node we insert a new item |
46 | will shift to left/right neighbor, or to a new node, where new item | 14 | * to. We call this node S. To do balancing we need to decide what we |
47 | will be etc. To make this analysis simpler we build virtual | 15 | * will shift to left/right neighbor, or to a new node, where new item |
48 | node. Virtual node is an array of items, that will replace items of | 16 | * will be etc. To make this analysis simpler we build virtual |
49 | node S. (For instance if we are going to delete an item, virtual | 17 | * node. Virtual node is an array of items, that will replace items of |
50 | node does not contain it). Virtual node keeps information about | 18 | * node S. (For instance if we are going to delete an item, virtual |
51 | item sizes and types, mergeability of first and last items, sizes | 19 | * node does not contain it). Virtual node keeps information about |
52 | of all entries in directory item. We use this array of items when | 20 | * item sizes and types, mergeability of first and last items, sizes |
53 | calculating what we can shift to neighbors and how many nodes we | 21 | * of all entries in directory item. We use this array of items when |
54 | have to have if we do not any shiftings, if we shift to left/right | 22 | * calculating what we can shift to neighbors and how many nodes we |
55 | neighbor or to both. */ | 23 | * have to have if we do not any shiftings, if we shift to left/right |
56 | 24 | * neighbor or to both. | |
57 | /* taking item number in virtual node, returns number of item, that it has in source buffer */ | 25 | */ |
26 | |||
27 | /* | ||
28 | * Takes item number in virtual node, returns number of item | ||
29 | * that it has in source buffer | ||
30 | */ | ||
58 | static inline int old_item_num(int new_num, int affected_item_num, int mode) | 31 | static inline int old_item_num(int new_num, int affected_item_num, int mode) |
59 | { | 32 | { |
60 | if (mode == M_PASTE || mode == M_CUT || new_num < affected_item_num) | 33 | if (mode == M_PASTE || mode == M_CUT || new_num < affected_item_num) |
@@ -105,14 +78,17 @@ static void create_virtual_node(struct tree_balance *tb, int h) | |||
105 | vn->vn_free_ptr += vn->vn_nr_item * sizeof(struct virtual_item); | 78 | vn->vn_free_ptr += vn->vn_nr_item * sizeof(struct virtual_item); |
106 | 79 | ||
107 | /* first item in the node */ | 80 | /* first item in the node */ |
108 | ih = B_N_PITEM_HEAD(Sh, 0); | 81 | ih = item_head(Sh, 0); |
109 | 82 | ||
110 | /* define the mergeability for 0-th item (if it is not being deleted) */ | 83 | /* define the mergeability for 0-th item (if it is not being deleted) */ |
111 | if (op_is_left_mergeable(&(ih->ih_key), Sh->b_size) | 84 | if (op_is_left_mergeable(&ih->ih_key, Sh->b_size) |
112 | && (vn->vn_mode != M_DELETE || vn->vn_affected_item_num)) | 85 | && (vn->vn_mode != M_DELETE || vn->vn_affected_item_num)) |
113 | vn->vn_vi[0].vi_type |= VI_TYPE_LEFT_MERGEABLE; | 86 | vn->vn_vi[0].vi_type |= VI_TYPE_LEFT_MERGEABLE; |
114 | 87 | ||
115 | /* go through all items those remain in the virtual node (except for the new (inserted) one) */ | 88 | /* |
89 | * go through all items that remain in the virtual | ||
90 | * node (except for the new (inserted) one) | ||
91 | */ | ||
116 | for (new_num = 0; new_num < vn->vn_nr_item; new_num++) { | 92 | for (new_num = 0; new_num < vn->vn_nr_item; new_num++) { |
117 | int j; | 93 | int j; |
118 | struct virtual_item *vi = vn->vn_vi + new_num; | 94 | struct virtual_item *vi = vn->vn_vi + new_num; |
@@ -128,11 +104,13 @@ static void create_virtual_node(struct tree_balance *tb, int h) | |||
128 | 104 | ||
129 | vi->vi_item_len += ih_item_len(ih + j) + IH_SIZE; | 105 | vi->vi_item_len += ih_item_len(ih + j) + IH_SIZE; |
130 | vi->vi_ih = ih + j; | 106 | vi->vi_ih = ih + j; |
131 | vi->vi_item = B_I_PITEM(Sh, ih + j); | 107 | vi->vi_item = ih_item_body(Sh, ih + j); |
132 | vi->vi_uarea = vn->vn_free_ptr; | 108 | vi->vi_uarea = vn->vn_free_ptr; |
133 | 109 | ||
134 | // FIXME: there is no check, that item operation did not | 110 | /* |
135 | // consume too much memory | 111 | * FIXME: there is no check that item operation did not |
112 | * consume too much memory | ||
113 | */ | ||
136 | vn->vn_free_ptr += | 114 | vn->vn_free_ptr += |
137 | op_create_vi(vn, vi, is_affected, tb->insert_size[0]); | 115 | op_create_vi(vn, vi, is_affected, tb->insert_size[0]); |
138 | if (tb->vn_buf + tb->vn_buf_size < vn->vn_free_ptr) | 116 | if (tb->vn_buf + tb->vn_buf_size < vn->vn_free_ptr) |
@@ -145,7 +123,8 @@ static void create_virtual_node(struct tree_balance *tb, int h) | |||
145 | 123 | ||
146 | if (vn->vn_mode == M_PASTE || vn->vn_mode == M_CUT) { | 124 | if (vn->vn_mode == M_PASTE || vn->vn_mode == M_CUT) { |
147 | vn->vn_vi[new_num].vi_item_len += tb->insert_size[0]; | 125 | vn->vn_vi[new_num].vi_item_len += tb->insert_size[0]; |
148 | vi->vi_new_data = vn->vn_data; // pointer to data which is going to be pasted | 126 | /* pointer to data which is going to be pasted */ |
127 | vi->vi_new_data = vn->vn_data; | ||
149 | } | 128 | } |
150 | } | 129 | } |
151 | 130 | ||
@@ -164,11 +143,14 @@ static void create_virtual_node(struct tree_balance *tb, int h) | |||
164 | tb->insert_size[0]); | 143 | tb->insert_size[0]); |
165 | } | 144 | } |
166 | 145 | ||
167 | /* set right merge flag we take right delimiting key and check whether it is a mergeable item */ | 146 | /* |
147 | * set right merge flag we take right delimiting key and | ||
148 | * check whether it is a mergeable item | ||
149 | */ | ||
168 | if (tb->CFR[0]) { | 150 | if (tb->CFR[0]) { |
169 | struct reiserfs_key *key; | 151 | struct reiserfs_key *key; |
170 | 152 | ||
171 | key = B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0]); | 153 | key = internal_key(tb->CFR[0], tb->rkey[0]); |
172 | if (op_is_left_mergeable(key, Sh->b_size) | 154 | if (op_is_left_mergeable(key, Sh->b_size) |
173 | && (vn->vn_mode != M_DELETE | 155 | && (vn->vn_mode != M_DELETE |
174 | || vn->vn_affected_item_num != B_NR_ITEMS(Sh) - 1)) | 156 | || vn->vn_affected_item_num != B_NR_ITEMS(Sh) - 1)) |
@@ -179,12 +161,19 @@ static void create_virtual_node(struct tree_balance *tb, int h) | |||
179 | if (op_is_left_mergeable(key, Sh->b_size) && | 161 | if (op_is_left_mergeable(key, Sh->b_size) && |
180 | !(vn->vn_mode != M_DELETE | 162 | !(vn->vn_mode != M_DELETE |
181 | || vn->vn_affected_item_num != B_NR_ITEMS(Sh) - 1)) { | 163 | || vn->vn_affected_item_num != B_NR_ITEMS(Sh) - 1)) { |
182 | /* we delete last item and it could be merged with right neighbor's first item */ | 164 | /* |
165 | * we delete last item and it could be merged | ||
166 | * with right neighbor's first item | ||
167 | */ | ||
183 | if (! | 168 | if (! |
184 | (B_NR_ITEMS(Sh) == 1 | 169 | (B_NR_ITEMS(Sh) == 1 |
185 | && is_direntry_le_ih(B_N_PITEM_HEAD(Sh, 0)) | 170 | && is_direntry_le_ih(item_head(Sh, 0)) |
186 | && I_ENTRY_COUNT(B_N_PITEM_HEAD(Sh, 0)) == 1)) { | 171 | && ih_entry_count(item_head(Sh, 0)) == 1)) { |
187 | /* node contains more than 1 item, or item is not directory item, or this item contains more than 1 entry */ | 172 | /* |
173 | * node contains more than 1 item, or item | ||
174 | * is not directory item, or this item | ||
175 | * contains more than 1 entry | ||
176 | */ | ||
188 | print_block(Sh, 0, -1, -1); | 177 | print_block(Sh, 0, -1, -1); |
189 | reiserfs_panic(tb->tb_sb, "vs-8045", | 178 | reiserfs_panic(tb->tb_sb, "vs-8045", |
190 | "rdkey %k, affected item==%d " | 179 | "rdkey %k, affected item==%d " |
@@ -198,8 +187,10 @@ static void create_virtual_node(struct tree_balance *tb, int h) | |||
198 | } | 187 | } |
199 | } | 188 | } |
200 | 189 | ||
201 | /* using virtual node check, how many items can be shifted to left | 190 | /* |
202 | neighbor */ | 191 | * Using virtual node check, how many items can be |
192 | * shifted to left neighbor | ||
193 | */ | ||
203 | static void check_left(struct tree_balance *tb, int h, int cur_free) | 194 | static void check_left(struct tree_balance *tb, int h, int cur_free) |
204 | { | 195 | { |
205 | int i; | 196 | int i; |
@@ -259,9 +250,13 @@ static void check_left(struct tree_balance *tb, int h, int cur_free) | |||
259 | } | 250 | } |
260 | 251 | ||
261 | /* the item cannot be shifted entirely, try to split it */ | 252 | /* the item cannot be shifted entirely, try to split it */ |
262 | /* check whether L[0] can hold ih and at least one byte of the item body */ | 253 | /* |
254 | * check whether L[0] can hold ih and at least one byte | ||
255 | * of the item body | ||
256 | */ | ||
257 | |||
258 | /* cannot shift even a part of the current item */ | ||
263 | if (cur_free <= ih_size) { | 259 | if (cur_free <= ih_size) { |
264 | /* cannot shift even a part of the current item */ | ||
265 | tb->lbytes = -1; | 260 | tb->lbytes = -1; |
266 | return; | 261 | return; |
267 | } | 262 | } |
@@ -278,8 +273,10 @@ static void check_left(struct tree_balance *tb, int h, int cur_free) | |||
278 | return; | 273 | return; |
279 | } | 274 | } |
280 | 275 | ||
281 | /* using virtual node check, how many items can be shifted to right | 276 | /* |
282 | neighbor */ | 277 | * Using virtual node check, how many items can be |
278 | * shifted to right neighbor | ||
279 | */ | ||
283 | static void check_right(struct tree_balance *tb, int h, int cur_free) | 280 | static void check_right(struct tree_balance *tb, int h, int cur_free) |
284 | { | 281 | { |
285 | int i; | 282 | int i; |
@@ -338,13 +335,21 @@ static void check_right(struct tree_balance *tb, int h, int cur_free) | |||
338 | continue; | 335 | continue; |
339 | } | 336 | } |
340 | 337 | ||
341 | /* check whether R[0] can hold ih and at least one byte of the item body */ | 338 | /* |
342 | if (cur_free <= ih_size) { /* cannot shift even a part of the current item */ | 339 | * check whether R[0] can hold ih and at least one |
340 | * byte of the item body | ||
341 | */ | ||
342 | |||
343 | /* cannot shift even a part of the current item */ | ||
344 | if (cur_free <= ih_size) { | ||
343 | tb->rbytes = -1; | 345 | tb->rbytes = -1; |
344 | return; | 346 | return; |
345 | } | 347 | } |
346 | 348 | ||
347 | /* R[0] can hold the header of the item and at least one byte of its body */ | 349 | /* |
350 | * R[0] can hold the header of the item and at least | ||
351 | * one byte of its body | ||
352 | */ | ||
348 | cur_free -= ih_size; /* cur_free is still > 0 */ | 353 | cur_free -= ih_size; /* cur_free is still > 0 */ |
349 | 354 | ||
350 | tb->rbytes = op_check_right(vi, cur_free); | 355 | tb->rbytes = op_check_right(vi, cur_free); |
@@ -361,45 +366,64 @@ static void check_right(struct tree_balance *tb, int h, int cur_free) | |||
361 | /* | 366 | /* |
362 | * from - number of items, which are shifted to left neighbor entirely | 367 | * from - number of items, which are shifted to left neighbor entirely |
363 | * to - number of item, which are shifted to right neighbor entirely | 368 | * to - number of item, which are shifted to right neighbor entirely |
364 | * from_bytes - number of bytes of boundary item (or directory entries) which are shifted to left neighbor | 369 | * from_bytes - number of bytes of boundary item (or directory entries) |
365 | * to_bytes - number of bytes of boundary item (or directory entries) which are shifted to right neighbor */ | 370 | * which are shifted to left neighbor |
371 | * to_bytes - number of bytes of boundary item (or directory entries) | ||
372 | * which are shifted to right neighbor | ||
373 | */ | ||
366 | static int get_num_ver(int mode, struct tree_balance *tb, int h, | 374 | static int get_num_ver(int mode, struct tree_balance *tb, int h, |
367 | int from, int from_bytes, | 375 | int from, int from_bytes, |
368 | int to, int to_bytes, short *snum012, int flow) | 376 | int to, int to_bytes, short *snum012, int flow) |
369 | { | 377 | { |
370 | int i; | 378 | int i; |
371 | int cur_free; | 379 | int cur_free; |
372 | // int bytes; | ||
373 | int units; | 380 | int units; |
374 | struct virtual_node *vn = tb->tb_vn; | 381 | struct virtual_node *vn = tb->tb_vn; |
375 | // struct virtual_item * vi; | ||
376 | |||
377 | int total_node_size, max_node_size, current_item_size; | 382 | int total_node_size, max_node_size, current_item_size; |
378 | int needed_nodes; | 383 | int needed_nodes; |
379 | int start_item, /* position of item we start filling node from */ | 384 | |
380 | end_item, /* position of item we finish filling node by */ | 385 | /* position of item we start filling node from */ |
381 | start_bytes, /* number of first bytes (entries for directory) of start_item-th item | 386 | int start_item; |
382 | we do not include into node that is being filled */ | 387 | |
383 | end_bytes; /* number of last bytes (entries for directory) of end_item-th item | 388 | /* position of item we finish filling node by */ |
384 | we do node include into node that is being filled */ | 389 | int end_item; |
385 | int split_item_positions[2]; /* these are positions in virtual item of | 390 | |
386 | items, that are split between S[0] and | 391 | /* |
387 | S1new and S1new and S2new */ | 392 | * number of first bytes (entries for directory) of start_item-th item |
393 | * we do not include into node that is being filled | ||
394 | */ | ||
395 | int start_bytes; | ||
396 | |||
397 | /* | ||
398 | * number of last bytes (entries for directory) of end_item-th item | ||
399 | * we do node include into node that is being filled | ||
400 | */ | ||
401 | int end_bytes; | ||
402 | |||
403 | /* | ||
404 | * these are positions in virtual item of items, that are split | ||
405 | * between S[0] and S1new and S1new and S2new | ||
406 | */ | ||
407 | int split_item_positions[2]; | ||
388 | 408 | ||
389 | split_item_positions[0] = -1; | 409 | split_item_positions[0] = -1; |
390 | split_item_positions[1] = -1; | 410 | split_item_positions[1] = -1; |
391 | 411 | ||
392 | /* We only create additional nodes if we are in insert or paste mode | 412 | /* |
393 | or we are in replace mode at the internal level. If h is 0 and | 413 | * We only create additional nodes if we are in insert or paste mode |
394 | the mode is M_REPLACE then in fix_nodes we change the mode to | 414 | * or we are in replace mode at the internal level. If h is 0 and |
395 | paste or insert before we get here in the code. */ | 415 | * the mode is M_REPLACE then in fix_nodes we change the mode to |
416 | * paste or insert before we get here in the code. | ||
417 | */ | ||
396 | RFALSE(tb->insert_size[h] < 0 || (mode != M_INSERT && mode != M_PASTE), | 418 | RFALSE(tb->insert_size[h] < 0 || (mode != M_INSERT && mode != M_PASTE), |
397 | "vs-8100: insert_size < 0 in overflow"); | 419 | "vs-8100: insert_size < 0 in overflow"); |
398 | 420 | ||
399 | max_node_size = MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, h)); | 421 | max_node_size = MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, h)); |
400 | 422 | ||
401 | /* snum012 [0-2] - number of items, that lay | 423 | /* |
402 | to S[0], first new node and second new node */ | 424 | * snum012 [0-2] - number of items, that lay |
425 | * to S[0], first new node and second new node | ||
426 | */ | ||
403 | snum012[3] = -1; /* s1bytes */ | 427 | snum012[3] = -1; /* s1bytes */ |
404 | snum012[4] = -1; /* s2bytes */ | 428 | snum012[4] = -1; /* s2bytes */ |
405 | 429 | ||
@@ -416,20 +440,22 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h, | |||
416 | total_node_size = 0; | 440 | total_node_size = 0; |
417 | cur_free = max_node_size; | 441 | cur_free = max_node_size; |
418 | 442 | ||
419 | // start from 'from'-th item | 443 | /* start from 'from'-th item */ |
420 | start_item = from; | 444 | start_item = from; |
421 | // skip its first 'start_bytes' units | 445 | /* skip its first 'start_bytes' units */ |
422 | start_bytes = ((from_bytes != -1) ? from_bytes : 0); | 446 | start_bytes = ((from_bytes != -1) ? from_bytes : 0); |
423 | 447 | ||
424 | // last included item is the 'end_item'-th one | 448 | /* last included item is the 'end_item'-th one */ |
425 | end_item = vn->vn_nr_item - to - 1; | 449 | end_item = vn->vn_nr_item - to - 1; |
426 | // do not count last 'end_bytes' units of 'end_item'-th item | 450 | /* do not count last 'end_bytes' units of 'end_item'-th item */ |
427 | end_bytes = (to_bytes != -1) ? to_bytes : 0; | 451 | end_bytes = (to_bytes != -1) ? to_bytes : 0; |
428 | 452 | ||
429 | /* go through all item beginning from the start_item-th item and ending by | 453 | /* |
430 | the end_item-th item. Do not count first 'start_bytes' units of | 454 | * go through all item beginning from the start_item-th item |
431 | 'start_item'-th item and last 'end_bytes' of 'end_item'-th item */ | 455 | * and ending by the end_item-th item. Do not count first |
432 | 456 | * 'start_bytes' units of 'start_item'-th item and last | |
457 | * 'end_bytes' of 'end_item'-th item | ||
458 | */ | ||
433 | for (i = start_item; i <= end_item; i++) { | 459 | for (i = start_item; i <= end_item; i++) { |
434 | struct virtual_item *vi = vn->vn_vi + i; | 460 | struct virtual_item *vi = vn->vn_vi + i; |
435 | int skip_from_end = ((i == end_item) ? end_bytes : 0); | 461 | int skip_from_end = ((i == end_item) ? end_bytes : 0); |
@@ -439,7 +465,10 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h, | |||
439 | /* get size of current item */ | 465 | /* get size of current item */ |
440 | current_item_size = vi->vi_item_len; | 466 | current_item_size = vi->vi_item_len; |
441 | 467 | ||
442 | /* do not take in calculation head part (from_bytes) of from-th item */ | 468 | /* |
469 | * do not take in calculation head part (from_bytes) | ||
470 | * of from-th item | ||
471 | */ | ||
443 | current_item_size -= | 472 | current_item_size -= |
444 | op_part_size(vi, 0 /*from start */ , start_bytes); | 473 | op_part_size(vi, 0 /*from start */ , start_bytes); |
445 | 474 | ||
@@ -455,9 +484,11 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h, | |||
455 | continue; | 484 | continue; |
456 | } | 485 | } |
457 | 486 | ||
487 | /* | ||
488 | * virtual item length is longer, than max size of item in | ||
489 | * a node. It is impossible for direct item | ||
490 | */ | ||
458 | if (current_item_size > max_node_size) { | 491 | if (current_item_size > max_node_size) { |
459 | /* virtual item length is longer, than max size of item in | ||
460 | a node. It is impossible for direct item */ | ||
461 | RFALSE(is_direct_le_ih(vi->vi_ih), | 492 | RFALSE(is_direct_le_ih(vi->vi_ih), |
462 | "vs-8110: " | 493 | "vs-8110: " |
463 | "direct item length is %d. It can not be longer than %d", | 494 | "direct item length is %d. It can not be longer than %d", |
@@ -466,15 +497,18 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h, | |||
466 | flow = 1; | 497 | flow = 1; |
467 | } | 498 | } |
468 | 499 | ||
500 | /* as we do not split items, take new node and continue */ | ||
469 | if (!flow) { | 501 | if (!flow) { |
470 | /* as we do not split items, take new node and continue */ | ||
471 | needed_nodes++; | 502 | needed_nodes++; |
472 | i--; | 503 | i--; |
473 | total_node_size = 0; | 504 | total_node_size = 0; |
474 | continue; | 505 | continue; |
475 | } | 506 | } |
476 | // calculate number of item units which fit into node being | 507 | |
477 | // filled | 508 | /* |
509 | * calculate number of item units which fit into node being | ||
510 | * filled | ||
511 | */ | ||
478 | { | 512 | { |
479 | int free_space; | 513 | int free_space; |
480 | 514 | ||
@@ -482,17 +516,17 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h, | |||
482 | units = | 516 | units = |
483 | op_check_left(vi, free_space, start_bytes, | 517 | op_check_left(vi, free_space, start_bytes, |
484 | skip_from_end); | 518 | skip_from_end); |
519 | /* | ||
520 | * nothing fits into current node, take new | ||
521 | * node and continue | ||
522 | */ | ||
485 | if (units == -1) { | 523 | if (units == -1) { |
486 | /* nothing fits into current node, take new node and continue */ | ||
487 | needed_nodes++, i--, total_node_size = 0; | 524 | needed_nodes++, i--, total_node_size = 0; |
488 | continue; | 525 | continue; |
489 | } | 526 | } |
490 | } | 527 | } |
491 | 528 | ||
492 | /* something fits into the current node */ | 529 | /* something fits into the current node */ |
493 | //if (snum012[3] != -1 || needed_nodes != 1) | ||
494 | // reiserfs_panic (tb->tb_sb, "vs-8115: get_num_ver: too many nodes required"); | ||
495 | //snum012[needed_nodes - 1 + 3] = op_unit_num (vi) - start_bytes - units; | ||
496 | start_bytes += units; | 530 | start_bytes += units; |
497 | snum012[needed_nodes - 1 + 3] = units; | 531 | snum012[needed_nodes - 1 + 3] = units; |
498 | 532 | ||
@@ -508,9 +542,11 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h, | |||
508 | total_node_size = 0; | 542 | total_node_size = 0; |
509 | } | 543 | } |
510 | 544 | ||
511 | // sum012[4] (if it is not -1) contains number of units of which | 545 | /* |
512 | // are to be in S1new, snum012[3] - to be in S0. They are supposed | 546 | * sum012[4] (if it is not -1) contains number of units of which |
513 | // to be S1bytes and S2bytes correspondingly, so recalculate | 547 | * are to be in S1new, snum012[3] - to be in S0. They are supposed |
548 | * to be S1bytes and S2bytes correspondingly, so recalculate | ||
549 | */ | ||
514 | if (snum012[4] > 0) { | 550 | if (snum012[4] > 0) { |
515 | int split_item_num; | 551 | int split_item_num; |
516 | int bytes_to_r, bytes_to_l; | 552 | int bytes_to_r, bytes_to_l; |
@@ -527,7 +563,7 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h, | |||
527 | ((split_item_positions[0] == | 563 | ((split_item_positions[0] == |
528 | split_item_positions[1]) ? snum012[3] : 0); | 564 | split_item_positions[1]) ? snum012[3] : 0); |
529 | 565 | ||
530 | // s2bytes | 566 | /* s2bytes */ |
531 | snum012[4] = | 567 | snum012[4] = |
532 | op_unit_num(&vn->vn_vi[split_item_num]) - snum012[4] - | 568 | op_unit_num(&vn->vn_vi[split_item_num]) - snum012[4] - |
533 | bytes_to_r - bytes_to_l - bytes_to_S1new; | 569 | bytes_to_r - bytes_to_l - bytes_to_S1new; |
@@ -555,7 +591,7 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h, | |||
555 | ((split_item_positions[0] == split_item_positions[1] | 591 | ((split_item_positions[0] == split_item_positions[1] |
556 | && snum012[4] != -1) ? snum012[4] : 0); | 592 | && snum012[4] != -1) ? snum012[4] : 0); |
557 | 593 | ||
558 | // s1bytes | 594 | /* s1bytes */ |
559 | snum012[3] = | 595 | snum012[3] = |
560 | op_unit_num(&vn->vn_vi[split_item_num]) - snum012[3] - | 596 | op_unit_num(&vn->vn_vi[split_item_num]) - snum012[3] - |
561 | bytes_to_r - bytes_to_l - bytes_to_S2new; | 597 | bytes_to_r - bytes_to_l - bytes_to_S2new; |
@@ -565,7 +601,8 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h, | |||
565 | } | 601 | } |
566 | 602 | ||
567 | 603 | ||
568 | /* Set parameters for balancing. | 604 | /* |
605 | * Set parameters for balancing. | ||
569 | * Performs write of results of analysis of balancing into structure tb, | 606 | * Performs write of results of analysis of balancing into structure tb, |
570 | * where it will later be used by the functions that actually do the balancing. | 607 | * where it will later be used by the functions that actually do the balancing. |
571 | * Parameters: | 608 | * Parameters: |
@@ -575,11 +612,12 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h, | |||
575 | * rnum number of items from S[h] that must be shifted to R[h]; | 612 | * rnum number of items from S[h] that must be shifted to R[h]; |
576 | * blk_num number of blocks that S[h] will be splitted into; | 613 | * blk_num number of blocks that S[h] will be splitted into; |
577 | * s012 number of items that fall into splitted nodes. | 614 | * s012 number of items that fall into splitted nodes. |
578 | * lbytes number of bytes which flow to the left neighbor from the item that is not | 615 | * lbytes number of bytes which flow to the left neighbor from the |
579 | * not shifted entirely | 616 | * item that is not not shifted entirely |
580 | * rbytes number of bytes which flow to the right neighbor from the item that is not | 617 | * rbytes number of bytes which flow to the right neighbor from the |
581 | * not shifted entirely | 618 | * item that is not not shifted entirely |
582 | * s1bytes number of bytes which flow to the first new node when S[0] splits (this number is contained in s012 array) | 619 | * s1bytes number of bytes which flow to the first new node when |
620 | * S[0] splits (this number is contained in s012 array) | ||
583 | */ | 621 | */ |
584 | 622 | ||
585 | static void set_parameters(struct tree_balance *tb, int h, int lnum, | 623 | static void set_parameters(struct tree_balance *tb, int h, int lnum, |
@@ -590,12 +628,14 @@ static void set_parameters(struct tree_balance *tb, int h, int lnum, | |||
590 | tb->rnum[h] = rnum; | 628 | tb->rnum[h] = rnum; |
591 | tb->blknum[h] = blk_num; | 629 | tb->blknum[h] = blk_num; |
592 | 630 | ||
593 | if (h == 0) { /* only for leaf level */ | 631 | /* only for leaf level */ |
632 | if (h == 0) { | ||
594 | if (s012 != NULL) { | 633 | if (s012 != NULL) { |
595 | tb->s0num = *s012++, | 634 | tb->s0num = *s012++; |
596 | tb->s1num = *s012++, tb->s2num = *s012++; | 635 | tb->snum[0] = *s012++; |
597 | tb->s1bytes = *s012++; | 636 | tb->snum[1] = *s012++; |
598 | tb->s2bytes = *s012; | 637 | tb->sbytes[0] = *s012++; |
638 | tb->sbytes[1] = *s012; | ||
599 | } | 639 | } |
600 | tb->lbytes = lb; | 640 | tb->lbytes = lb; |
601 | tb->rbytes = rb; | 641 | tb->rbytes = rb; |
@@ -607,8 +647,10 @@ static void set_parameters(struct tree_balance *tb, int h, int lnum, | |||
607 | PROC_INFO_ADD(tb->tb_sb, rbytes[h], rb); | 647 | PROC_INFO_ADD(tb->tb_sb, rbytes[h], rb); |
608 | } | 648 | } |
609 | 649 | ||
610 | /* check, does node disappear if we shift tb->lnum[0] items to left | 650 | /* |
611 | neighbor and tb->rnum[0] to the right one. */ | 651 | * check if node disappears if we shift tb->lnum[0] items to left |
652 | * neighbor and tb->rnum[0] to the right one. | ||
653 | */ | ||
612 | static int is_leaf_removable(struct tree_balance *tb) | 654 | static int is_leaf_removable(struct tree_balance *tb) |
613 | { | 655 | { |
614 | struct virtual_node *vn = tb->tb_vn; | 656 | struct virtual_node *vn = tb->tb_vn; |
@@ -616,8 +658,10 @@ static int is_leaf_removable(struct tree_balance *tb) | |||
616 | int size; | 658 | int size; |
617 | int remain_items; | 659 | int remain_items; |
618 | 660 | ||
619 | /* number of items, that will be shifted to left (right) neighbor | 661 | /* |
620 | entirely */ | 662 | * number of items that will be shifted to left (right) neighbor |
663 | * entirely | ||
664 | */ | ||
621 | to_left = tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0); | 665 | to_left = tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0); |
622 | to_right = tb->rnum[0] - ((tb->rbytes != -1) ? 1 : 0); | 666 | to_right = tb->rnum[0] - ((tb->rbytes != -1) ? 1 : 0); |
623 | remain_items = vn->vn_nr_item; | 667 | remain_items = vn->vn_nr_item; |
@@ -625,21 +669,21 @@ static int is_leaf_removable(struct tree_balance *tb) | |||
625 | /* how many items remain in S[0] after shiftings to neighbors */ | 669 | /* how many items remain in S[0] after shiftings to neighbors */ |
626 | remain_items -= (to_left + to_right); | 670 | remain_items -= (to_left + to_right); |
627 | 671 | ||
672 | /* all content of node can be shifted to neighbors */ | ||
628 | if (remain_items < 1) { | 673 | if (remain_items < 1) { |
629 | /* all content of node can be shifted to neighbors */ | ||
630 | set_parameters(tb, 0, to_left, vn->vn_nr_item - to_left, 0, | 674 | set_parameters(tb, 0, to_left, vn->vn_nr_item - to_left, 0, |
631 | NULL, -1, -1); | 675 | NULL, -1, -1); |
632 | return 1; | 676 | return 1; |
633 | } | 677 | } |
634 | 678 | ||
679 | /* S[0] is not removable */ | ||
635 | if (remain_items > 1 || tb->lbytes == -1 || tb->rbytes == -1) | 680 | if (remain_items > 1 || tb->lbytes == -1 || tb->rbytes == -1) |
636 | /* S[0] is not removable */ | ||
637 | return 0; | 681 | return 0; |
638 | 682 | ||
639 | /* check, whether we can divide 1 remaining item between neighbors */ | 683 | /* check whether we can divide 1 remaining item between neighbors */ |
640 | 684 | ||
641 | /* get size of remaining item (in item units) */ | 685 | /* get size of remaining item (in item units) */ |
642 | size = op_unit_num(&(vn->vn_vi[to_left])); | 686 | size = op_unit_num(&vn->vn_vi[to_left]); |
643 | 687 | ||
644 | if (tb->lbytes + tb->rbytes >= size) { | 688 | if (tb->lbytes + tb->rbytes >= size) { |
645 | set_parameters(tb, 0, to_left + 1, to_right + 1, 0, NULL, | 689 | set_parameters(tb, 0, to_left + 1, to_right + 1, 0, NULL, |
@@ -675,23 +719,28 @@ static int are_leaves_removable(struct tree_balance *tb, int lfree, int rfree) | |||
675 | "vs-8125: item number must be 1: it is %d", | 719 | "vs-8125: item number must be 1: it is %d", |
676 | B_NR_ITEMS(S0)); | 720 | B_NR_ITEMS(S0)); |
677 | 721 | ||
678 | ih = B_N_PITEM_HEAD(S0, 0); | 722 | ih = item_head(S0, 0); |
679 | if (tb->CFR[0] | 723 | if (tb->CFR[0] |
680 | && !comp_short_le_keys(&(ih->ih_key), | 724 | && !comp_short_le_keys(&ih->ih_key, |
681 | B_N_PDELIM_KEY(tb->CFR[0], | 725 | internal_key(tb->CFR[0], |
682 | tb->rkey[0]))) | 726 | tb->rkey[0]))) |
727 | /* | ||
728 | * Directory must be in correct state here: that is | ||
729 | * somewhere at the left side should exist first | ||
730 | * directory item. But the item being deleted can | ||
731 | * not be that first one because its right neighbor | ||
732 | * is item of the same directory. (But first item | ||
733 | * always gets deleted in last turn). So, neighbors | ||
734 | * of deleted item can be merged, so we can save | ||
735 | * ih_size | ||
736 | */ | ||
683 | if (is_direntry_le_ih(ih)) { | 737 | if (is_direntry_le_ih(ih)) { |
684 | /* Directory must be in correct state here: that is | ||
685 | somewhere at the left side should exist first directory | ||
686 | item. But the item being deleted can not be that first | ||
687 | one because its right neighbor is item of the same | ||
688 | directory. (But first item always gets deleted in last | ||
689 | turn). So, neighbors of deleted item can be merged, so | ||
690 | we can save ih_size */ | ||
691 | ih_size = IH_SIZE; | 738 | ih_size = IH_SIZE; |
692 | 739 | ||
693 | /* we might check that left neighbor exists and is of the | 740 | /* |
694 | same directory */ | 741 | * we might check that left neighbor exists |
742 | * and is of the same directory | ||
743 | */ | ||
695 | RFALSE(le_ih_k_offset(ih) == DOT_OFFSET, | 744 | RFALSE(le_ih_k_offset(ih) == DOT_OFFSET, |
696 | "vs-8130: first directory item can not be removed until directory is not empty"); | 745 | "vs-8130: first directory item can not be removed until directory is not empty"); |
697 | } | 746 | } |
@@ -770,7 +819,8 @@ static void free_buffers_in_tb(struct tree_balance *tb) | |||
770 | } | 819 | } |
771 | } | 820 | } |
772 | 821 | ||
773 | /* Get new buffers for storing new nodes that are created while balancing. | 822 | /* |
823 | * Get new buffers for storing new nodes that are created while balancing. | ||
774 | * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked; | 824 | * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked; |
775 | * CARRY_ON - schedule didn't occur while the function worked; | 825 | * CARRY_ON - schedule didn't occur while the function worked; |
776 | * NO_DISK_SPACE - no disk space. | 826 | * NO_DISK_SPACE - no disk space. |
@@ -778,28 +828,33 @@ static void free_buffers_in_tb(struct tree_balance *tb) | |||
778 | /* The function is NOT SCHEDULE-SAFE! */ | 828 | /* The function is NOT SCHEDULE-SAFE! */ |
779 | static int get_empty_nodes(struct tree_balance *tb, int h) | 829 | static int get_empty_nodes(struct tree_balance *tb, int h) |
780 | { | 830 | { |
781 | struct buffer_head *new_bh, | 831 | struct buffer_head *new_bh, *Sh = PATH_H_PBUFFER(tb->tb_path, h); |
782 | *Sh = PATH_H_PBUFFER(tb->tb_path, h); | ||
783 | b_blocknr_t *blocknr, blocknrs[MAX_AMOUNT_NEEDED] = { 0, }; | 832 | b_blocknr_t *blocknr, blocknrs[MAX_AMOUNT_NEEDED] = { 0, }; |
784 | int counter, number_of_freeblk, amount_needed, /* number of needed empty blocks */ | 833 | int counter, number_of_freeblk; |
785 | retval = CARRY_ON; | 834 | int amount_needed; /* number of needed empty blocks */ |
835 | int retval = CARRY_ON; | ||
786 | struct super_block *sb = tb->tb_sb; | 836 | struct super_block *sb = tb->tb_sb; |
787 | 837 | ||
788 | /* number_of_freeblk is the number of empty blocks which have been | 838 | /* |
789 | acquired for use by the balancing algorithm minus the number of | 839 | * number_of_freeblk is the number of empty blocks which have been |
790 | empty blocks used in the previous levels of the analysis, | 840 | * acquired for use by the balancing algorithm minus the number of |
791 | number_of_freeblk = tb->cur_blknum can be non-zero if a schedule occurs | 841 | * empty blocks used in the previous levels of the analysis, |
792 | after empty blocks are acquired, and the balancing analysis is | 842 | * number_of_freeblk = tb->cur_blknum can be non-zero if a schedule |
793 | then restarted, amount_needed is the number needed by this level | 843 | * occurs after empty blocks are acquired, and the balancing analysis |
794 | (h) of the balancing analysis. | 844 | * is then restarted, amount_needed is the number needed by this |
795 | 845 | * level (h) of the balancing analysis. | |
796 | Note that for systems with many processes writing, it would be | 846 | * |
797 | more layout optimal to calculate the total number needed by all | 847 | * Note that for systems with many processes writing, it would be |
798 | levels and then to run reiserfs_new_blocks to get all of them at once. */ | 848 | * more layout optimal to calculate the total number needed by all |
799 | 849 | * levels and then to run reiserfs_new_blocks to get all of them at | |
800 | /* Initiate number_of_freeblk to the amount acquired prior to the restart of | 850 | * once. |
801 | the analysis or 0 if not restarted, then subtract the amount needed | 851 | */ |
802 | by all of the levels of the tree below h. */ | 852 | |
853 | /* | ||
854 | * Initiate number_of_freeblk to the amount acquired prior to the | ||
855 | * restart of the analysis or 0 if not restarted, then subtract the | ||
856 | * amount needed by all of the levels of the tree below h. | ||
857 | */ | ||
803 | /* blknum includes S[h], so we subtract 1 in this calculation */ | 858 | /* blknum includes S[h], so we subtract 1 in this calculation */ |
804 | for (counter = 0, number_of_freeblk = tb->cur_blknum; | 859 | for (counter = 0, number_of_freeblk = tb->cur_blknum; |
805 | counter < h; counter++) | 860 | counter < h; counter++) |
@@ -810,13 +865,19 @@ static int get_empty_nodes(struct tree_balance *tb, int h) | |||
810 | /* Allocate missing empty blocks. */ | 865 | /* Allocate missing empty blocks. */ |
811 | /* if Sh == 0 then we are getting a new root */ | 866 | /* if Sh == 0 then we are getting a new root */ |
812 | amount_needed = (Sh) ? (tb->blknum[h] - 1) : 1; | 867 | amount_needed = (Sh) ? (tb->blknum[h] - 1) : 1; |
813 | /* Amount_needed = the amount that we need more than the amount that we have. */ | 868 | /* |
869 | * Amount_needed = the amount that we need more than the | ||
870 | * amount that we have. | ||
871 | */ | ||
814 | if (amount_needed > number_of_freeblk) | 872 | if (amount_needed > number_of_freeblk) |
815 | amount_needed -= number_of_freeblk; | 873 | amount_needed -= number_of_freeblk; |
816 | else /* If we have enough already then there is nothing to do. */ | 874 | else /* If we have enough already then there is nothing to do. */ |
817 | return CARRY_ON; | 875 | return CARRY_ON; |
818 | 876 | ||
819 | /* No need to check quota - is not allocated for blocks used for formatted nodes */ | 877 | /* |
878 | * No need to check quota - is not allocated for blocks used | ||
879 | * for formatted nodes | ||
880 | */ | ||
820 | if (reiserfs_new_form_blocknrs(tb, blocknrs, | 881 | if (reiserfs_new_form_blocknrs(tb, blocknrs, |
821 | amount_needed) == NO_DISK_SPACE) | 882 | amount_needed) == NO_DISK_SPACE) |
822 | return NO_DISK_SPACE; | 883 | return NO_DISK_SPACE; |
@@ -849,8 +910,10 @@ static int get_empty_nodes(struct tree_balance *tb, int h) | |||
849 | return retval; | 910 | return retval; |
850 | } | 911 | } |
851 | 912 | ||
852 | /* Get free space of the left neighbor, which is stored in the parent | 913 | /* |
853 | * node of the left neighbor. */ | 914 | * Get free space of the left neighbor, which is stored in the parent |
915 | * node of the left neighbor. | ||
916 | */ | ||
854 | static int get_lfree(struct tree_balance *tb, int h) | 917 | static int get_lfree(struct tree_balance *tb, int h) |
855 | { | 918 | { |
856 | struct buffer_head *l, *f; | 919 | struct buffer_head *l, *f; |
@@ -870,7 +933,8 @@ static int get_lfree(struct tree_balance *tb, int h) | |||
870 | return (MAX_CHILD_SIZE(f) - dc_size(B_N_CHILD(f, order))); | 933 | return (MAX_CHILD_SIZE(f) - dc_size(B_N_CHILD(f, order))); |
871 | } | 934 | } |
872 | 935 | ||
873 | /* Get free space of the right neighbor, | 936 | /* |
937 | * Get free space of the right neighbor, | ||
874 | * which is stored in the parent node of the right neighbor. | 938 | * which is stored in the parent node of the right neighbor. |
875 | */ | 939 | */ |
876 | static int get_rfree(struct tree_balance *tb, int h) | 940 | static int get_rfree(struct tree_balance *tb, int h) |
@@ -916,7 +980,10 @@ static int is_left_neighbor_in_cache(struct tree_balance *tb, int h) | |||
916 | "vs-8165: F[h] (%b) or FL[h] (%b) is invalid", | 980 | "vs-8165: F[h] (%b) or FL[h] (%b) is invalid", |
917 | father, tb->FL[h]); | 981 | father, tb->FL[h]); |
918 | 982 | ||
919 | /* Get position of the pointer to the left neighbor into the left father. */ | 983 | /* |
984 | * Get position of the pointer to the left neighbor | ||
985 | * into the left father. | ||
986 | */ | ||
920 | left_neighbor_position = (father == tb->FL[h]) ? | 987 | left_neighbor_position = (father == tb->FL[h]) ? |
921 | tb->lkey[h] : B_NR_ITEMS(tb->FL[h]); | 988 | tb->lkey[h] : B_NR_ITEMS(tb->FL[h]); |
922 | /* Get left neighbor block number. */ | 989 | /* Get left neighbor block number. */ |
@@ -940,17 +1007,20 @@ static int is_left_neighbor_in_cache(struct tree_balance *tb, int h) | |||
940 | 1007 | ||
941 | static void decrement_key(struct cpu_key *key) | 1008 | static void decrement_key(struct cpu_key *key) |
942 | { | 1009 | { |
943 | // call item specific function for this key | 1010 | /* call item specific function for this key */ |
944 | item_ops[cpu_key_k_type(key)]->decrement_key(key); | 1011 | item_ops[cpu_key_k_type(key)]->decrement_key(key); |
945 | } | 1012 | } |
946 | 1013 | ||
947 | /* Calculate far left/right parent of the left/right neighbor of the current node, that | 1014 | /* |
948 | * is calculate the left/right (FL[h]/FR[h]) neighbor of the parent F[h]. | 1015 | * Calculate far left/right parent of the left/right neighbor of the |
1016 | * current node, that is calculate the left/right (FL[h]/FR[h]) neighbor | ||
1017 | * of the parent F[h]. | ||
949 | * Calculate left/right common parent of the current node and L[h]/R[h]. | 1018 | * Calculate left/right common parent of the current node and L[h]/R[h]. |
950 | * Calculate left/right delimiting key position. | 1019 | * Calculate left/right delimiting key position. |
951 | * Returns: PATH_INCORRECT - path in the tree is not correct; | 1020 | * Returns: PATH_INCORRECT - path in the tree is not correct |
952 | SCHEDULE_OCCURRED - schedule occurred while the function worked; | 1021 | * SCHEDULE_OCCURRED - schedule occurred while the function worked |
953 | * CARRY_ON - schedule didn't occur while the function worked; | 1022 | * CARRY_ON - schedule didn't occur while the function |
1023 | * worked | ||
954 | */ | 1024 | */ |
955 | static int get_far_parent(struct tree_balance *tb, | 1025 | static int get_far_parent(struct tree_balance *tb, |
956 | int h, | 1026 | int h, |
@@ -966,8 +1036,10 @@ static int get_far_parent(struct tree_balance *tb, | |||
966 | first_last_position = 0, | 1036 | first_last_position = 0, |
967 | path_offset = PATH_H_PATH_OFFSET(path, h); | 1037 | path_offset = PATH_H_PATH_OFFSET(path, h); |
968 | 1038 | ||
969 | /* Starting from F[h] go upwards in the tree, and look for the common | 1039 | /* |
970 | ancestor of F[h], and its neighbor l/r, that should be obtained. */ | 1040 | * Starting from F[h] go upwards in the tree, and look for the common |
1041 | * ancestor of F[h], and its neighbor l/r, that should be obtained. | ||
1042 | */ | ||
971 | 1043 | ||
972 | counter = path_offset; | 1044 | counter = path_offset; |
973 | 1045 | ||
@@ -975,21 +1047,33 @@ static int get_far_parent(struct tree_balance *tb, | |||
975 | "PAP-8180: invalid path length"); | 1047 | "PAP-8180: invalid path length"); |
976 | 1048 | ||
977 | for (; counter > FIRST_PATH_ELEMENT_OFFSET; counter--) { | 1049 | for (; counter > FIRST_PATH_ELEMENT_OFFSET; counter--) { |
978 | /* Check whether parent of the current buffer in the path is really parent in the tree. */ | 1050 | /* |
1051 | * Check whether parent of the current buffer in the path | ||
1052 | * is really parent in the tree. | ||
1053 | */ | ||
979 | if (!B_IS_IN_TREE | 1054 | if (!B_IS_IN_TREE |
980 | (parent = PATH_OFFSET_PBUFFER(path, counter - 1))) | 1055 | (parent = PATH_OFFSET_PBUFFER(path, counter - 1))) |
981 | return REPEAT_SEARCH; | 1056 | return REPEAT_SEARCH; |
1057 | |||
982 | /* Check whether position in the parent is correct. */ | 1058 | /* Check whether position in the parent is correct. */ |
983 | if ((position = | 1059 | if ((position = |
984 | PATH_OFFSET_POSITION(path, | 1060 | PATH_OFFSET_POSITION(path, |
985 | counter - 1)) > | 1061 | counter - 1)) > |
986 | B_NR_ITEMS(parent)) | 1062 | B_NR_ITEMS(parent)) |
987 | return REPEAT_SEARCH; | 1063 | return REPEAT_SEARCH; |
988 | /* Check whether parent at the path really points to the child. */ | 1064 | |
1065 | /* | ||
1066 | * Check whether parent at the path really points | ||
1067 | * to the child. | ||
1068 | */ | ||
989 | if (B_N_CHILD_NUM(parent, position) != | 1069 | if (B_N_CHILD_NUM(parent, position) != |
990 | PATH_OFFSET_PBUFFER(path, counter)->b_blocknr) | 1070 | PATH_OFFSET_PBUFFER(path, counter)->b_blocknr) |
991 | return REPEAT_SEARCH; | 1071 | return REPEAT_SEARCH; |
992 | /* Return delimiting key if position in the parent is not equal to first/last one. */ | 1072 | |
1073 | /* | ||
1074 | * Return delimiting key if position in the parent is not | ||
1075 | * equal to first/last one. | ||
1076 | */ | ||
993 | if (c_lr_par == RIGHT_PARENTS) | 1077 | if (c_lr_par == RIGHT_PARENTS) |
994 | first_last_position = B_NR_ITEMS(parent); | 1078 | first_last_position = B_NR_ITEMS(parent); |
995 | if (position != first_last_position) { | 1079 | if (position != first_last_position) { |
@@ -1002,7 +1086,10 @@ static int get_far_parent(struct tree_balance *tb, | |||
1002 | 1086 | ||
1003 | /* if we are in the root of the tree, then there is no common father */ | 1087 | /* if we are in the root of the tree, then there is no common father */ |
1004 | if (counter == FIRST_PATH_ELEMENT_OFFSET) { | 1088 | if (counter == FIRST_PATH_ELEMENT_OFFSET) { |
1005 | /* Check whether first buffer in the path is the root of the tree. */ | 1089 | /* |
1090 | * Check whether first buffer in the path is the | ||
1091 | * root of the tree. | ||
1092 | */ | ||
1006 | if (PATH_OFFSET_PBUFFER | 1093 | if (PATH_OFFSET_PBUFFER |
1007 | (tb->tb_path, | 1094 | (tb->tb_path, |
1008 | FIRST_PATH_ELEMENT_OFFSET)->b_blocknr == | 1095 | FIRST_PATH_ELEMENT_OFFSET)->b_blocknr == |
@@ -1031,12 +1118,15 @@ static int get_far_parent(struct tree_balance *tb, | |||
1031 | } | 1118 | } |
1032 | } | 1119 | } |
1033 | 1120 | ||
1034 | /* So, we got common parent of the current node and its left/right neighbor. | 1121 | /* |
1035 | Now we are geting the parent of the left/right neighbor. */ | 1122 | * So, we got common parent of the current node and its |
1123 | * left/right neighbor. Now we are getting the parent of the | ||
1124 | * left/right neighbor. | ||
1125 | */ | ||
1036 | 1126 | ||
1037 | /* Form key to get parent of the left/right neighbor. */ | 1127 | /* Form key to get parent of the left/right neighbor. */ |
1038 | le_key2cpu_key(&s_lr_father_key, | 1128 | le_key2cpu_key(&s_lr_father_key, |
1039 | B_N_PDELIM_KEY(*pcom_father, | 1129 | internal_key(*pcom_father, |
1040 | (c_lr_par == | 1130 | (c_lr_par == |
1041 | LEFT_PARENTS) ? (tb->lkey[h - 1] = | 1131 | LEFT_PARENTS) ? (tb->lkey[h - 1] = |
1042 | position - | 1132 | position - |
@@ -1050,7 +1140,7 @@ static int get_far_parent(struct tree_balance *tb, | |||
1050 | if (search_by_key | 1140 | if (search_by_key |
1051 | (tb->tb_sb, &s_lr_father_key, &s_path_to_neighbor_father, | 1141 | (tb->tb_sb, &s_lr_father_key, &s_path_to_neighbor_father, |
1052 | h + 1) == IO_ERROR) | 1142 | h + 1) == IO_ERROR) |
1053 | // path is released | 1143 | /* path is released */ |
1054 | return IO_ERROR; | 1144 | return IO_ERROR; |
1055 | 1145 | ||
1056 | if (FILESYSTEM_CHANGED_TB(tb)) { | 1146 | if (FILESYSTEM_CHANGED_TB(tb)) { |
@@ -1071,12 +1161,15 @@ static int get_far_parent(struct tree_balance *tb, | |||
1071 | return CARRY_ON; | 1161 | return CARRY_ON; |
1072 | } | 1162 | } |
1073 | 1163 | ||
1074 | /* Get parents of neighbors of node in the path(S[path_offset]) and common parents of | 1164 | /* |
1075 | * S[path_offset] and L[path_offset]/R[path_offset]: F[path_offset], FL[path_offset], | 1165 | * Get parents of neighbors of node in the path(S[path_offset]) and |
1076 | * FR[path_offset], CFL[path_offset], CFR[path_offset]. | 1166 | * common parents of S[path_offset] and L[path_offset]/R[path_offset]: |
1077 | * Calculate numbers of left and right delimiting keys position: lkey[path_offset], rkey[path_offset]. | 1167 | * F[path_offset], FL[path_offset], FR[path_offset], CFL[path_offset], |
1078 | * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked; | 1168 | * CFR[path_offset]. |
1079 | * CARRY_ON - schedule didn't occur while the function worked; | 1169 | * Calculate numbers of left and right delimiting keys position: |
1170 | * lkey[path_offset], rkey[path_offset]. | ||
1171 | * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked | ||
1172 | * CARRY_ON - schedule didn't occur while the function worked | ||
1080 | */ | 1173 | */ |
1081 | static int get_parents(struct tree_balance *tb, int h) | 1174 | static int get_parents(struct tree_balance *tb, int h) |
1082 | { | 1175 | { |
@@ -1088,8 +1181,11 @@ static int get_parents(struct tree_balance *tb, int h) | |||
1088 | 1181 | ||
1089 | /* Current node is the root of the tree or will be root of the tree */ | 1182 | /* Current node is the root of the tree or will be root of the tree */ |
1090 | if (path_offset <= FIRST_PATH_ELEMENT_OFFSET) { | 1183 | if (path_offset <= FIRST_PATH_ELEMENT_OFFSET) { |
1091 | /* The root can not have parents. | 1184 | /* |
1092 | Release nodes which previously were obtained as parents of the current node neighbors. */ | 1185 | * The root can not have parents. |
1186 | * Release nodes which previously were obtained as | ||
1187 | * parents of the current node neighbors. | ||
1188 | */ | ||
1093 | brelse(tb->FL[h]); | 1189 | brelse(tb->FL[h]); |
1094 | brelse(tb->CFL[h]); | 1190 | brelse(tb->CFL[h]); |
1095 | brelse(tb->FR[h]); | 1191 | brelse(tb->FR[h]); |
@@ -1111,10 +1207,14 @@ static int get_parents(struct tree_balance *tb, int h) | |||
1111 | get_bh(curf); | 1207 | get_bh(curf); |
1112 | tb->lkey[h] = position - 1; | 1208 | tb->lkey[h] = position - 1; |
1113 | } else { | 1209 | } else { |
1114 | /* Calculate current parent of L[path_offset], which is the left neighbor of the current node. | 1210 | /* |
1115 | Calculate current common parent of L[path_offset] and the current node. Note that | 1211 | * Calculate current parent of L[path_offset], which is the |
1116 | CFL[path_offset] not equal FL[path_offset] and CFL[path_offset] not equal F[path_offset]. | 1212 | * left neighbor of the current node. Calculate current |
1117 | Calculate lkey[path_offset]. */ | 1213 | * common parent of L[path_offset] and the current node. |
1214 | * Note that CFL[path_offset] not equal FL[path_offset] and | ||
1215 | * CFL[path_offset] not equal F[path_offset]. | ||
1216 | * Calculate lkey[path_offset]. | ||
1217 | */ | ||
1118 | if ((ret = get_far_parent(tb, h + 1, &curf, | 1218 | if ((ret = get_far_parent(tb, h + 1, &curf, |
1119 | &curcf, | 1219 | &curcf, |
1120 | LEFT_PARENTS)) != CARRY_ON) | 1220 | LEFT_PARENTS)) != CARRY_ON) |
@@ -1130,19 +1230,22 @@ static int get_parents(struct tree_balance *tb, int h) | |||
1130 | (curcf && !B_IS_IN_TREE(curcf)), | 1230 | (curcf && !B_IS_IN_TREE(curcf)), |
1131 | "PAP-8195: FL (%b) or CFL (%b) is invalid", curf, curcf); | 1231 | "PAP-8195: FL (%b) or CFL (%b) is invalid", curf, curcf); |
1132 | 1232 | ||
1133 | /* Get parent FR[h] of R[h]. */ | 1233 | /* Get parent FR[h] of R[h]. */ |
1134 | 1234 | ||
1135 | /* Current node is the last child of F[h]. FR[h] != F[h]. */ | 1235 | /* Current node is the last child of F[h]. FR[h] != F[h]. */ |
1136 | if (position == B_NR_ITEMS(PATH_H_PBUFFER(path, h + 1))) { | 1236 | if (position == B_NR_ITEMS(PATH_H_PBUFFER(path, h + 1))) { |
1137 | /* Calculate current parent of R[h], which is the right neighbor of F[h]. | 1237 | /* |
1138 | Calculate current common parent of R[h] and current node. Note that CFR[h] | 1238 | * Calculate current parent of R[h], which is the right |
1139 | not equal FR[path_offset] and CFR[h] not equal F[h]. */ | 1239 | * neighbor of F[h]. Calculate current common parent of |
1240 | * R[h] and current node. Note that CFR[h] not equal | ||
1241 | * FR[path_offset] and CFR[h] not equal F[h]. | ||
1242 | */ | ||
1140 | if ((ret = | 1243 | if ((ret = |
1141 | get_far_parent(tb, h + 1, &curf, &curcf, | 1244 | get_far_parent(tb, h + 1, &curf, &curcf, |
1142 | RIGHT_PARENTS)) != CARRY_ON) | 1245 | RIGHT_PARENTS)) != CARRY_ON) |
1143 | return ret; | 1246 | return ret; |
1144 | } else { | 1247 | } else { |
1145 | /* Current node is not the last child of its parent F[h]. */ | 1248 | /* Current node is not the last child of its parent F[h]. */ |
1146 | curf = PATH_OFFSET_PBUFFER(path, path_offset - 1); | 1249 | curf = PATH_OFFSET_PBUFFER(path, path_offset - 1); |
1147 | curcf = PATH_OFFSET_PBUFFER(path, path_offset - 1); | 1250 | curcf = PATH_OFFSET_PBUFFER(path, path_offset - 1); |
1148 | get_bh(curf); | 1251 | get_bh(curf); |
@@ -1165,8 +1268,10 @@ static int get_parents(struct tree_balance *tb, int h) | |||
1165 | return CARRY_ON; | 1268 | return CARRY_ON; |
1166 | } | 1269 | } |
1167 | 1270 | ||
1168 | /* it is possible to remove node as result of shiftings to | 1271 | /* |
1169 | neighbors even when we insert or paste item. */ | 1272 | * it is possible to remove node as result of shiftings to |
1273 | * neighbors even when we insert or paste item. | ||
1274 | */ | ||
1170 | static inline int can_node_be_removed(int mode, int lfree, int sfree, int rfree, | 1275 | static inline int can_node_be_removed(int mode, int lfree, int sfree, int rfree, |
1171 | struct tree_balance *tb, int h) | 1276 | struct tree_balance *tb, int h) |
1172 | { | 1277 | { |
@@ -1175,21 +1280,22 @@ static inline int can_node_be_removed(int mode, int lfree, int sfree, int rfree, | |||
1175 | struct item_head *ih; | 1280 | struct item_head *ih; |
1176 | struct reiserfs_key *r_key = NULL; | 1281 | struct reiserfs_key *r_key = NULL; |
1177 | 1282 | ||
1178 | ih = B_N_PITEM_HEAD(Sh, 0); | 1283 | ih = item_head(Sh, 0); |
1179 | if (tb->CFR[h]) | 1284 | if (tb->CFR[h]) |
1180 | r_key = B_N_PDELIM_KEY(tb->CFR[h], tb->rkey[h]); | 1285 | r_key = internal_key(tb->CFR[h], tb->rkey[h]); |
1181 | 1286 | ||
1182 | if (lfree + rfree + sfree < MAX_CHILD_SIZE(Sh) + levbytes | 1287 | if (lfree + rfree + sfree < MAX_CHILD_SIZE(Sh) + levbytes |
1183 | /* shifting may merge items which might save space */ | 1288 | /* shifting may merge items which might save space */ |
1184 | - | 1289 | - |
1185 | ((!h | 1290 | ((!h |
1186 | && op_is_left_mergeable(&(ih->ih_key), Sh->b_size)) ? IH_SIZE : 0) | 1291 | && op_is_left_mergeable(&ih->ih_key, Sh->b_size)) ? IH_SIZE : 0) |
1187 | - | 1292 | - |
1188 | ((!h && r_key | 1293 | ((!h && r_key |
1189 | && op_is_left_mergeable(r_key, Sh->b_size)) ? IH_SIZE : 0) | 1294 | && op_is_left_mergeable(r_key, Sh->b_size)) ? IH_SIZE : 0) |
1190 | + ((h) ? KEY_SIZE : 0)) { | 1295 | + ((h) ? KEY_SIZE : 0)) { |
1191 | /* node can not be removed */ | 1296 | /* node can not be removed */ |
1192 | if (sfree >= levbytes) { /* new item fits into node S[h] without any shifting */ | 1297 | if (sfree >= levbytes) { |
1298 | /* new item fits into node S[h] without any shifting */ | ||
1193 | if (!h) | 1299 | if (!h) |
1194 | tb->s0num = | 1300 | tb->s0num = |
1195 | B_NR_ITEMS(Sh) + | 1301 | B_NR_ITEMS(Sh) + |
@@ -1202,7 +1308,8 @@ static inline int can_node_be_removed(int mode, int lfree, int sfree, int rfree, | |||
1202 | return !NO_BALANCING_NEEDED; | 1308 | return !NO_BALANCING_NEEDED; |
1203 | } | 1309 | } |
1204 | 1310 | ||
1205 | /* Check whether current node S[h] is balanced when increasing its size by | 1311 | /* |
1312 | * Check whether current node S[h] is balanced when increasing its size by | ||
1206 | * Inserting or Pasting. | 1313 | * Inserting or Pasting. |
1207 | * Calculate parameters for balancing for current level h. | 1314 | * Calculate parameters for balancing for current level h. |
1208 | * Parameters: | 1315 | * Parameters: |
@@ -1219,39 +1326,48 @@ static inline int can_node_be_removed(int mode, int lfree, int sfree, int rfree, | |||
1219 | static int ip_check_balance(struct tree_balance *tb, int h) | 1326 | static int ip_check_balance(struct tree_balance *tb, int h) |
1220 | { | 1327 | { |
1221 | struct virtual_node *vn = tb->tb_vn; | 1328 | struct virtual_node *vn = tb->tb_vn; |
1222 | int levbytes, /* Number of bytes that must be inserted into (value | 1329 | /* |
1223 | is negative if bytes are deleted) buffer which | 1330 | * Number of bytes that must be inserted into (value is negative |
1224 | contains node being balanced. The mnemonic is | 1331 | * if bytes are deleted) buffer which contains node being balanced. |
1225 | that the attempted change in node space used level | 1332 | * The mnemonic is that the attempted change in node space used |
1226 | is levbytes bytes. */ | 1333 | * level is levbytes bytes. |
1227 | ret; | 1334 | */ |
1335 | int levbytes; | ||
1336 | int ret; | ||
1228 | 1337 | ||
1229 | int lfree, sfree, rfree /* free space in L, S and R */ ; | 1338 | int lfree, sfree, rfree /* free space in L, S and R */ ; |
1230 | 1339 | ||
1231 | /* nver is short for number of vertixes, and lnver is the number if | 1340 | /* |
1232 | we shift to the left, rnver is the number if we shift to the | 1341 | * nver is short for number of vertixes, and lnver is the number if |
1233 | right, and lrnver is the number if we shift in both directions. | 1342 | * we shift to the left, rnver is the number if we shift to the |
1234 | The goal is to minimize first the number of vertixes, and second, | 1343 | * right, and lrnver is the number if we shift in both directions. |
1235 | the number of vertixes whose contents are changed by shifting, | 1344 | * The goal is to minimize first the number of vertixes, and second, |
1236 | and third the number of uncached vertixes whose contents are | 1345 | * the number of vertixes whose contents are changed by shifting, |
1237 | changed by shifting and must be read from disk. */ | 1346 | * and third the number of uncached vertixes whose contents are |
1347 | * changed by shifting and must be read from disk. | ||
1348 | */ | ||
1238 | int nver, lnver, rnver, lrnver; | 1349 | int nver, lnver, rnver, lrnver; |
1239 | 1350 | ||
1240 | /* used at leaf level only, S0 = S[0] is the node being balanced, | 1351 | /* |
1241 | sInum [ I = 0,1,2 ] is the number of items that will | 1352 | * used at leaf level only, S0 = S[0] is the node being balanced, |
1242 | remain in node SI after balancing. S1 and S2 are new | 1353 | * sInum [ I = 0,1,2 ] is the number of items that will |
1243 | nodes that might be created. */ | 1354 | * remain in node SI after balancing. S1 and S2 are new |
1355 | * nodes that might be created. | ||
1356 | */ | ||
1244 | 1357 | ||
1245 | /* we perform 8 calls to get_num_ver(). For each call we calculate five parameters. | 1358 | /* |
1246 | where 4th parameter is s1bytes and 5th - s2bytes | 1359 | * we perform 8 calls to get_num_ver(). For each call we |
1360 | * calculate five parameters. where 4th parameter is s1bytes | ||
1361 | * and 5th - s2bytes | ||
1362 | * | ||
1363 | * s0num, s1num, s2num for 8 cases | ||
1364 | * 0,1 - do not shift and do not shift but bottle | ||
1365 | * 2 - shift only whole item to left | ||
1366 | * 3 - shift to left and bottle as much as possible | ||
1367 | * 4,5 - shift to right (whole items and as much as possible | ||
1368 | * 6,7 - shift to both directions (whole items and as much as possible) | ||
1247 | */ | 1369 | */ |
1248 | short snum012[40] = { 0, }; /* s0num, s1num, s2num for 8 cases | 1370 | short snum012[40] = { 0, }; |
1249 | 0,1 - do not shift and do not shift but bottle | ||
1250 | 2 - shift only whole item to left | ||
1251 | 3 - shift to left and bottle as much as possible | ||
1252 | 4,5 - shift to right (whole items and as much as possible | ||
1253 | 6,7 - shift to both directions (whole items and as much as possible) | ||
1254 | */ | ||
1255 | 1371 | ||
1256 | /* Sh is the node whose balance is currently being checked */ | 1372 | /* Sh is the node whose balance is currently being checked */ |
1257 | struct buffer_head *Sh; | 1373 | struct buffer_head *Sh; |
@@ -1265,9 +1381,10 @@ static int ip_check_balance(struct tree_balance *tb, int h) | |||
1265 | reiserfs_panic(tb->tb_sb, "vs-8210", | 1381 | reiserfs_panic(tb->tb_sb, "vs-8210", |
1266 | "S[0] can not be 0"); | 1382 | "S[0] can not be 0"); |
1267 | switch (ret = get_empty_nodes(tb, h)) { | 1383 | switch (ret = get_empty_nodes(tb, h)) { |
1384 | /* no balancing for higher levels needed */ | ||
1268 | case CARRY_ON: | 1385 | case CARRY_ON: |
1269 | set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); | 1386 | set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); |
1270 | return NO_BALANCING_NEEDED; /* no balancing for higher levels needed */ | 1387 | return NO_BALANCING_NEEDED; |
1271 | 1388 | ||
1272 | case NO_DISK_SPACE: | 1389 | case NO_DISK_SPACE: |
1273 | case REPEAT_SEARCH: | 1390 | case REPEAT_SEARCH: |
@@ -1278,7 +1395,9 @@ static int ip_check_balance(struct tree_balance *tb, int h) | |||
1278 | } | 1395 | } |
1279 | } | 1396 | } |
1280 | 1397 | ||
1281 | if ((ret = get_parents(tb, h)) != CARRY_ON) /* get parents of S[h] neighbors. */ | 1398 | /* get parents of S[h] neighbors. */ |
1399 | ret = get_parents(tb, h); | ||
1400 | if (ret != CARRY_ON) | ||
1282 | return ret; | 1401 | return ret; |
1283 | 1402 | ||
1284 | sfree = B_FREE_SPACE(Sh); | 1403 | sfree = B_FREE_SPACE(Sh); |
@@ -1287,38 +1406,44 @@ static int ip_check_balance(struct tree_balance *tb, int h) | |||
1287 | rfree = get_rfree(tb, h); | 1406 | rfree = get_rfree(tb, h); |
1288 | lfree = get_lfree(tb, h); | 1407 | lfree = get_lfree(tb, h); |
1289 | 1408 | ||
1409 | /* and new item fits into node S[h] without any shifting */ | ||
1290 | if (can_node_be_removed(vn->vn_mode, lfree, sfree, rfree, tb, h) == | 1410 | if (can_node_be_removed(vn->vn_mode, lfree, sfree, rfree, tb, h) == |
1291 | NO_BALANCING_NEEDED) | 1411 | NO_BALANCING_NEEDED) |
1292 | /* and new item fits into node S[h] without any shifting */ | ||
1293 | return NO_BALANCING_NEEDED; | 1412 | return NO_BALANCING_NEEDED; |
1294 | 1413 | ||
1295 | create_virtual_node(tb, h); | 1414 | create_virtual_node(tb, h); |
1296 | 1415 | ||
1297 | /* | 1416 | /* |
1298 | determine maximal number of items we can shift to the left neighbor (in tb structure) | 1417 | * determine maximal number of items we can shift to the left |
1299 | and the maximal number of bytes that can flow to the left neighbor | 1418 | * neighbor (in tb structure) and the maximal number of bytes |
1300 | from the left most liquid item that cannot be shifted from S[0] entirely (returned value) | 1419 | * that can flow to the left neighbor from the left most liquid |
1420 | * item that cannot be shifted from S[0] entirely (returned value) | ||
1301 | */ | 1421 | */ |
1302 | check_left(tb, h, lfree); | 1422 | check_left(tb, h, lfree); |
1303 | 1423 | ||
1304 | /* | 1424 | /* |
1305 | determine maximal number of items we can shift to the right neighbor (in tb structure) | 1425 | * determine maximal number of items we can shift to the right |
1306 | and the maximal number of bytes that can flow to the right neighbor | 1426 | * neighbor (in tb structure) and the maximal number of bytes |
1307 | from the right most liquid item that cannot be shifted from S[0] entirely (returned value) | 1427 | * that can flow to the right neighbor from the right most liquid |
1428 | * item that cannot be shifted from S[0] entirely (returned value) | ||
1308 | */ | 1429 | */ |
1309 | check_right(tb, h, rfree); | 1430 | check_right(tb, h, rfree); |
1310 | 1431 | ||
1311 | /* all contents of internal node S[h] can be moved into its | 1432 | /* |
1312 | neighbors, S[h] will be removed after balancing */ | 1433 | * all contents of internal node S[h] can be moved into its |
1434 | * neighbors, S[h] will be removed after balancing | ||
1435 | */ | ||
1313 | if (h && (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1)) { | 1436 | if (h && (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1)) { |
1314 | int to_r; | 1437 | int to_r; |
1315 | 1438 | ||
1316 | /* Since we are working on internal nodes, and our internal | 1439 | /* |
1317 | nodes have fixed size entries, then we can balance by the | 1440 | * Since we are working on internal nodes, and our internal |
1318 | number of items rather than the space they consume. In this | 1441 | * nodes have fixed size entries, then we can balance by the |
1319 | routine we set the left node equal to the right node, | 1442 | * number of items rather than the space they consume. In this |
1320 | allowing a difference of less than or equal to 1 child | 1443 | * routine we set the left node equal to the right node, |
1321 | pointer. */ | 1444 | * allowing a difference of less than or equal to 1 child |
1445 | * pointer. | ||
1446 | */ | ||
1322 | to_r = | 1447 | to_r = |
1323 | ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] + | 1448 | ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] + |
1324 | vn->vn_nr_item + 1) / 2 - (MAX_NR_KEY(Sh) + 1 - | 1449 | vn->vn_nr_item + 1) / 2 - (MAX_NR_KEY(Sh) + 1 - |
@@ -1328,7 +1453,10 @@ static int ip_check_balance(struct tree_balance *tb, int h) | |||
1328 | return CARRY_ON; | 1453 | return CARRY_ON; |
1329 | } | 1454 | } |
1330 | 1455 | ||
1331 | /* this checks balance condition, that any two neighboring nodes can not fit in one node */ | 1456 | /* |
1457 | * this checks balance condition, that any two neighboring nodes | ||
1458 | * can not fit in one node | ||
1459 | */ | ||
1332 | RFALSE(h && | 1460 | RFALSE(h && |
1333 | (tb->lnum[h] >= vn->vn_nr_item + 1 || | 1461 | (tb->lnum[h] >= vn->vn_nr_item + 1 || |
1334 | tb->rnum[h] >= vn->vn_nr_item + 1), | 1462 | tb->rnum[h] >= vn->vn_nr_item + 1), |
@@ -1337,16 +1465,22 @@ static int ip_check_balance(struct tree_balance *tb, int h) | |||
1337 | (tb->rnum[h] >= vn->vn_nr_item && (tb->rbytes == -1))), | 1465 | (tb->rnum[h] >= vn->vn_nr_item && (tb->rbytes == -1))), |
1338 | "vs-8225: tree is not balanced on leaf level"); | 1466 | "vs-8225: tree is not balanced on leaf level"); |
1339 | 1467 | ||
1340 | /* all contents of S[0] can be moved into its neighbors | 1468 | /* |
1341 | S[0] will be removed after balancing. */ | 1469 | * all contents of S[0] can be moved into its neighbors |
1470 | * S[0] will be removed after balancing. | ||
1471 | */ | ||
1342 | if (!h && is_leaf_removable(tb)) | 1472 | if (!h && is_leaf_removable(tb)) |
1343 | return CARRY_ON; | 1473 | return CARRY_ON; |
1344 | 1474 | ||
1345 | /* why do we perform this check here rather than earlier?? | 1475 | /* |
1346 | Answer: we can win 1 node in some cases above. Moreover we | 1476 | * why do we perform this check here rather than earlier?? |
1347 | checked it above, when we checked, that S[0] is not removable | 1477 | * Answer: we can win 1 node in some cases above. Moreover we |
1348 | in principle */ | 1478 | * checked it above, when we checked, that S[0] is not removable |
1349 | if (sfree >= levbytes) { /* new item fits into node S[h] without any shifting */ | 1479 | * in principle |
1480 | */ | ||
1481 | |||
1482 | /* new item fits into node S[h] without any shifting */ | ||
1483 | if (sfree >= levbytes) { | ||
1350 | if (!h) | 1484 | if (!h) |
1351 | tb->s0num = vn->vn_nr_item; | 1485 | tb->s0num = vn->vn_nr_item; |
1352 | set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); | 1486 | set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); |
@@ -1355,18 +1489,19 @@ static int ip_check_balance(struct tree_balance *tb, int h) | |||
1355 | 1489 | ||
1356 | { | 1490 | { |
1357 | int lpar, rpar, nset, lset, rset, lrset; | 1491 | int lpar, rpar, nset, lset, rset, lrset; |
1358 | /* | 1492 | /* regular overflowing of the node */ |
1359 | * regular overflowing of the node | ||
1360 | */ | ||
1361 | 1493 | ||
1362 | /* get_num_ver works in 2 modes (FLOW & NO_FLOW) | 1494 | /* |
1363 | lpar, rpar - number of items we can shift to left/right neighbor (including splitting item) | 1495 | * get_num_ver works in 2 modes (FLOW & NO_FLOW) |
1364 | nset, lset, rset, lrset - shows, whether flowing items give better packing | 1496 | * lpar, rpar - number of items we can shift to left/right |
1497 | * neighbor (including splitting item) | ||
1498 | * nset, lset, rset, lrset - shows, whether flowing items | ||
1499 | * give better packing | ||
1365 | */ | 1500 | */ |
1366 | #define FLOW 1 | 1501 | #define FLOW 1 |
1367 | #define NO_FLOW 0 /* do not any splitting */ | 1502 | #define NO_FLOW 0 /* do not any splitting */ |
1368 | 1503 | ||
1369 | /* we choose one the following */ | 1504 | /* we choose one of the following */ |
1370 | #define NOTHING_SHIFT_NO_FLOW 0 | 1505 | #define NOTHING_SHIFT_NO_FLOW 0 |
1371 | #define NOTHING_SHIFT_FLOW 5 | 1506 | #define NOTHING_SHIFT_FLOW 5 |
1372 | #define LEFT_SHIFT_NO_FLOW 10 | 1507 | #define LEFT_SHIFT_NO_FLOW 10 |
@@ -1379,10 +1514,13 @@ static int ip_check_balance(struct tree_balance *tb, int h) | |||
1379 | lpar = tb->lnum[h]; | 1514 | lpar = tb->lnum[h]; |
1380 | rpar = tb->rnum[h]; | 1515 | rpar = tb->rnum[h]; |
1381 | 1516 | ||
1382 | /* calculate number of blocks S[h] must be split into when | 1517 | /* |
1383 | nothing is shifted to the neighbors, | 1518 | * calculate number of blocks S[h] must be split into when |
1384 | as well as number of items in each part of the split node (s012 numbers), | 1519 | * nothing is shifted to the neighbors, as well as number of |
1385 | and number of bytes (s1bytes) of the shared drop which flow to S1 if any */ | 1520 | * items in each part of the split node (s012 numbers), |
1521 | * and number of bytes (s1bytes) of the shared drop which | ||
1522 | * flow to S1 if any | ||
1523 | */ | ||
1386 | nset = NOTHING_SHIFT_NO_FLOW; | 1524 | nset = NOTHING_SHIFT_NO_FLOW; |
1387 | nver = get_num_ver(vn->vn_mode, tb, h, | 1525 | nver = get_num_ver(vn->vn_mode, tb, h, |
1388 | 0, -1, h ? vn->vn_nr_item : 0, -1, | 1526 | 0, -1, h ? vn->vn_nr_item : 0, -1, |
@@ -1391,7 +1529,10 @@ static int ip_check_balance(struct tree_balance *tb, int h) | |||
1391 | if (!h) { | 1529 | if (!h) { |
1392 | int nver1; | 1530 | int nver1; |
1393 | 1531 | ||
1394 | /* note, that in this case we try to bottle between S[0] and S1 (S1 - the first new node) */ | 1532 | /* |
1533 | * note, that in this case we try to bottle | ||
1534 | * between S[0] and S1 (S1 - the first new node) | ||
1535 | */ | ||
1395 | nver1 = get_num_ver(vn->vn_mode, tb, h, | 1536 | nver1 = get_num_ver(vn->vn_mode, tb, h, |
1396 | 0, -1, 0, -1, | 1537 | 0, -1, 0, -1, |
1397 | snum012 + NOTHING_SHIFT_FLOW, FLOW); | 1538 | snum012 + NOTHING_SHIFT_FLOW, FLOW); |
@@ -1399,11 +1540,13 @@ static int ip_check_balance(struct tree_balance *tb, int h) | |||
1399 | nset = NOTHING_SHIFT_FLOW, nver = nver1; | 1540 | nset = NOTHING_SHIFT_FLOW, nver = nver1; |
1400 | } | 1541 | } |
1401 | 1542 | ||
1402 | /* calculate number of blocks S[h] must be split into when | 1543 | /* |
1403 | l_shift_num first items and l_shift_bytes of the right most | 1544 | * calculate number of blocks S[h] must be split into when |
1404 | liquid item to be shifted are shifted to the left neighbor, | 1545 | * l_shift_num first items and l_shift_bytes of the right |
1405 | as well as number of items in each part of the splitted node (s012 numbers), | 1546 | * most liquid item to be shifted are shifted to the left |
1406 | and number of bytes (s1bytes) of the shared drop which flow to S1 if any | 1547 | * neighbor, as well as number of items in each part of the |
1548 | * splitted node (s012 numbers), and number of bytes | ||
1549 | * (s1bytes) of the shared drop which flow to S1 if any | ||
1407 | */ | 1550 | */ |
1408 | lset = LEFT_SHIFT_NO_FLOW; | 1551 | lset = LEFT_SHIFT_NO_FLOW; |
1409 | lnver = get_num_ver(vn->vn_mode, tb, h, | 1552 | lnver = get_num_ver(vn->vn_mode, tb, h, |
@@ -1422,11 +1565,13 @@ static int ip_check_balance(struct tree_balance *tb, int h) | |||
1422 | lset = LEFT_SHIFT_FLOW, lnver = lnver1; | 1565 | lset = LEFT_SHIFT_FLOW, lnver = lnver1; |
1423 | } | 1566 | } |
1424 | 1567 | ||
1425 | /* calculate number of blocks S[h] must be split into when | 1568 | /* |
1426 | r_shift_num first items and r_shift_bytes of the left most | 1569 | * calculate number of blocks S[h] must be split into when |
1427 | liquid item to be shifted are shifted to the right neighbor, | 1570 | * r_shift_num first items and r_shift_bytes of the left most |
1428 | as well as number of items in each part of the splitted node (s012 numbers), | 1571 | * liquid item to be shifted are shifted to the right neighbor, |
1429 | and number of bytes (s1bytes) of the shared drop which flow to S1 if any | 1572 | * as well as number of items in each part of the splitted |
1573 | * node (s012 numbers), and number of bytes (s1bytes) of the | ||
1574 | * shared drop which flow to S1 if any | ||
1430 | */ | 1575 | */ |
1431 | rset = RIGHT_SHIFT_NO_FLOW; | 1576 | rset = RIGHT_SHIFT_NO_FLOW; |
1432 | rnver = get_num_ver(vn->vn_mode, tb, h, | 1577 | rnver = get_num_ver(vn->vn_mode, tb, h, |
@@ -1451,10 +1596,12 @@ static int ip_check_balance(struct tree_balance *tb, int h) | |||
1451 | rset = RIGHT_SHIFT_FLOW, rnver = rnver1; | 1596 | rset = RIGHT_SHIFT_FLOW, rnver = rnver1; |
1452 | } | 1597 | } |
1453 | 1598 | ||
1454 | /* calculate number of blocks S[h] must be split into when | 1599 | /* |
1455 | items are shifted in both directions, | 1600 | * calculate number of blocks S[h] must be split into when |
1456 | as well as number of items in each part of the splitted node (s012 numbers), | 1601 | * items are shifted in both directions, as well as number |
1457 | and number of bytes (s1bytes) of the shared drop which flow to S1 if any | 1602 | * of items in each part of the splitted node (s012 numbers), |
1603 | * and number of bytes (s1bytes) of the shared drop which | ||
1604 | * flow to S1 if any | ||
1458 | */ | 1605 | */ |
1459 | lrset = LR_SHIFT_NO_FLOW; | 1606 | lrset = LR_SHIFT_NO_FLOW; |
1460 | lrnver = get_num_ver(vn->vn_mode, tb, h, | 1607 | lrnver = get_num_ver(vn->vn_mode, tb, h, |
@@ -1481,10 +1628,12 @@ static int ip_check_balance(struct tree_balance *tb, int h) | |||
1481 | lrset = LR_SHIFT_FLOW, lrnver = lrnver1; | 1628 | lrset = LR_SHIFT_FLOW, lrnver = lrnver1; |
1482 | } | 1629 | } |
1483 | 1630 | ||
1484 | /* Our general shifting strategy is: | 1631 | /* |
1485 | 1) to minimized number of new nodes; | 1632 | * Our general shifting strategy is: |
1486 | 2) to minimized number of neighbors involved in shifting; | 1633 | * 1) to minimized number of new nodes; |
1487 | 3) to minimized number of disk reads; */ | 1634 | * 2) to minimized number of neighbors involved in shifting; |
1635 | * 3) to minimized number of disk reads; | ||
1636 | */ | ||
1488 | 1637 | ||
1489 | /* we can win TWO or ONE nodes by shifting in both directions */ | 1638 | /* we can win TWO or ONE nodes by shifting in both directions */ |
1490 | if (lrnver < lnver && lrnver < rnver) { | 1639 | if (lrnver < lnver && lrnver < rnver) { |
@@ -1508,42 +1657,59 @@ static int ip_check_balance(struct tree_balance *tb, int h) | |||
1508 | return CARRY_ON; | 1657 | return CARRY_ON; |
1509 | } | 1658 | } |
1510 | 1659 | ||
1511 | /* if shifting doesn't lead to better packing then don't shift */ | 1660 | /* |
1661 | * if shifting doesn't lead to better packing | ||
1662 | * then don't shift | ||
1663 | */ | ||
1512 | if (nver == lrnver) { | 1664 | if (nver == lrnver) { |
1513 | set_parameters(tb, h, 0, 0, nver, snum012 + nset, -1, | 1665 | set_parameters(tb, h, 0, 0, nver, snum012 + nset, -1, |
1514 | -1); | 1666 | -1); |
1515 | return CARRY_ON; | 1667 | return CARRY_ON; |
1516 | } | 1668 | } |
1517 | 1669 | ||
1518 | /* now we know that for better packing shifting in only one | 1670 | /* |
1519 | direction either to the left or to the right is required */ | 1671 | * now we know that for better packing shifting in only one |
1672 | * direction either to the left or to the right is required | ||
1673 | */ | ||
1520 | 1674 | ||
1521 | /* if shifting to the left is better than shifting to the right */ | 1675 | /* |
1676 | * if shifting to the left is better than | ||
1677 | * shifting to the right | ||
1678 | */ | ||
1522 | if (lnver < rnver) { | 1679 | if (lnver < rnver) { |
1523 | SET_PAR_SHIFT_LEFT; | 1680 | SET_PAR_SHIFT_LEFT; |
1524 | return CARRY_ON; | 1681 | return CARRY_ON; |
1525 | } | 1682 | } |
1526 | 1683 | ||
1527 | /* if shifting to the right is better than shifting to the left */ | 1684 | /* |
1685 | * if shifting to the right is better than | ||
1686 | * shifting to the left | ||
1687 | */ | ||
1528 | if (lnver > rnver) { | 1688 | if (lnver > rnver) { |
1529 | SET_PAR_SHIFT_RIGHT; | 1689 | SET_PAR_SHIFT_RIGHT; |
1530 | return CARRY_ON; | 1690 | return CARRY_ON; |
1531 | } | 1691 | } |
1532 | 1692 | ||
1533 | /* now shifting in either direction gives the same number | 1693 | /* |
1534 | of nodes and we can make use of the cached neighbors */ | 1694 | * now shifting in either direction gives the same number |
1695 | * of nodes and we can make use of the cached neighbors | ||
1696 | */ | ||
1535 | if (is_left_neighbor_in_cache(tb, h)) { | 1697 | if (is_left_neighbor_in_cache(tb, h)) { |
1536 | SET_PAR_SHIFT_LEFT; | 1698 | SET_PAR_SHIFT_LEFT; |
1537 | return CARRY_ON; | 1699 | return CARRY_ON; |
1538 | } | 1700 | } |
1539 | 1701 | ||
1540 | /* shift to the right independently on whether the right neighbor in cache or not */ | 1702 | /* |
1703 | * shift to the right independently on whether the | ||
1704 | * right neighbor in cache or not | ||
1705 | */ | ||
1541 | SET_PAR_SHIFT_RIGHT; | 1706 | SET_PAR_SHIFT_RIGHT; |
1542 | return CARRY_ON; | 1707 | return CARRY_ON; |
1543 | } | 1708 | } |
1544 | } | 1709 | } |
1545 | 1710 | ||
1546 | /* Check whether current node S[h] is balanced when Decreasing its size by | 1711 | /* |
1712 | * Check whether current node S[h] is balanced when Decreasing its size by | ||
1547 | * Deleting or Cutting for INTERNAL node of S+tree. | 1713 | * Deleting or Cutting for INTERNAL node of S+tree. |
1548 | * Calculate parameters for balancing for current level h. | 1714 | * Calculate parameters for balancing for current level h. |
1549 | * Parameters: | 1715 | * Parameters: |
@@ -1563,8 +1729,10 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h) | |||
1563 | { | 1729 | { |
1564 | struct virtual_node *vn = tb->tb_vn; | 1730 | struct virtual_node *vn = tb->tb_vn; |
1565 | 1731 | ||
1566 | /* Sh is the node whose balance is currently being checked, | 1732 | /* |
1567 | and Fh is its father. */ | 1733 | * Sh is the node whose balance is currently being checked, |
1734 | * and Fh is its father. | ||
1735 | */ | ||
1568 | struct buffer_head *Sh, *Fh; | 1736 | struct buffer_head *Sh, *Fh; |
1569 | int maxsize, ret; | 1737 | int maxsize, ret; |
1570 | int lfree, rfree /* free space in L and R */ ; | 1738 | int lfree, rfree /* free space in L and R */ ; |
@@ -1574,19 +1742,25 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h) | |||
1574 | 1742 | ||
1575 | maxsize = MAX_CHILD_SIZE(Sh); | 1743 | maxsize = MAX_CHILD_SIZE(Sh); |
1576 | 1744 | ||
1577 | /* using tb->insert_size[h], which is negative in this case, create_virtual_node calculates: */ | 1745 | /* |
1578 | /* new_nr_item = number of items node would have if operation is */ | 1746 | * using tb->insert_size[h], which is negative in this case, |
1579 | /* performed without balancing (new_nr_item); */ | 1747 | * create_virtual_node calculates: |
1748 | * new_nr_item = number of items node would have if operation is | ||
1749 | * performed without balancing (new_nr_item); | ||
1750 | */ | ||
1580 | create_virtual_node(tb, h); | 1751 | create_virtual_node(tb, h); |
1581 | 1752 | ||
1582 | if (!Fh) { /* S[h] is the root. */ | 1753 | if (!Fh) { /* S[h] is the root. */ |
1754 | /* no balancing for higher levels needed */ | ||
1583 | if (vn->vn_nr_item > 0) { | 1755 | if (vn->vn_nr_item > 0) { |
1584 | set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); | 1756 | set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); |
1585 | return NO_BALANCING_NEEDED; /* no balancing for higher levels needed */ | 1757 | return NO_BALANCING_NEEDED; |
1586 | } | 1758 | } |
1587 | /* new_nr_item == 0. | 1759 | /* |
1760 | * new_nr_item == 0. | ||
1588 | * Current root will be deleted resulting in | 1761 | * Current root will be deleted resulting in |
1589 | * decrementing the tree height. */ | 1762 | * decrementing the tree height. |
1763 | */ | ||
1590 | set_parameters(tb, h, 0, 0, 0, NULL, -1, -1); | 1764 | set_parameters(tb, h, 0, 0, 0, NULL, -1, -1); |
1591 | return CARRY_ON; | 1765 | return CARRY_ON; |
1592 | } | 1766 | } |
@@ -1602,12 +1776,18 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h) | |||
1602 | check_left(tb, h, lfree); | 1776 | check_left(tb, h, lfree); |
1603 | check_right(tb, h, rfree); | 1777 | check_right(tb, h, rfree); |
1604 | 1778 | ||
1605 | if (vn->vn_nr_item >= MIN_NR_KEY(Sh)) { /* Balance condition for the internal node is valid. | 1779 | /* |
1606 | * In this case we balance only if it leads to better packing. */ | 1780 | * Balance condition for the internal node is valid. |
1607 | if (vn->vn_nr_item == MIN_NR_KEY(Sh)) { /* Here we join S[h] with one of its neighbors, | 1781 | * In this case we balance only if it leads to better packing. |
1608 | * which is impossible with greater values of new_nr_item. */ | 1782 | */ |
1783 | if (vn->vn_nr_item >= MIN_NR_KEY(Sh)) { | ||
1784 | /* | ||
1785 | * Here we join S[h] with one of its neighbors, | ||
1786 | * which is impossible with greater values of new_nr_item. | ||
1787 | */ | ||
1788 | if (vn->vn_nr_item == MIN_NR_KEY(Sh)) { | ||
1789 | /* All contents of S[h] can be moved to L[h]. */ | ||
1609 | if (tb->lnum[h] >= vn->vn_nr_item + 1) { | 1790 | if (tb->lnum[h] >= vn->vn_nr_item + 1) { |
1610 | /* All contents of S[h] can be moved to L[h]. */ | ||
1611 | int n; | 1791 | int n; |
1612 | int order_L; | 1792 | int order_L; |
1613 | 1793 | ||
@@ -1623,8 +1803,8 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h) | |||
1623 | return CARRY_ON; | 1803 | return CARRY_ON; |
1624 | } | 1804 | } |
1625 | 1805 | ||
1806 | /* All contents of S[h] can be moved to R[h]. */ | ||
1626 | if (tb->rnum[h] >= vn->vn_nr_item + 1) { | 1807 | if (tb->rnum[h] >= vn->vn_nr_item + 1) { |
1627 | /* All contents of S[h] can be moved to R[h]. */ | ||
1628 | int n; | 1808 | int n; |
1629 | int order_R; | 1809 | int order_R; |
1630 | 1810 | ||
@@ -1641,8 +1821,11 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h) | |||
1641 | } | 1821 | } |
1642 | } | 1822 | } |
1643 | 1823 | ||
1824 | /* | ||
1825 | * All contents of S[h] can be moved to the neighbors | ||
1826 | * (L[h] & R[h]). | ||
1827 | */ | ||
1644 | if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1) { | 1828 | if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1) { |
1645 | /* All contents of S[h] can be moved to the neighbors (L[h] & R[h]). */ | ||
1646 | int to_r; | 1829 | int to_r; |
1647 | 1830 | ||
1648 | to_r = | 1831 | to_r = |
@@ -1659,7 +1842,10 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h) | |||
1659 | return NO_BALANCING_NEEDED; | 1842 | return NO_BALANCING_NEEDED; |
1660 | } | 1843 | } |
1661 | 1844 | ||
1662 | /* Current node contain insufficient number of items. Balancing is required. */ | 1845 | /* |
1846 | * Current node contain insufficient number of items. | ||
1847 | * Balancing is required. | ||
1848 | */ | ||
1663 | /* Check whether we can merge S[h] with left neighbor. */ | 1849 | /* Check whether we can merge S[h] with left neighbor. */ |
1664 | if (tb->lnum[h] >= vn->vn_nr_item + 1) | 1850 | if (tb->lnum[h] >= vn->vn_nr_item + 1) |
1665 | if (is_left_neighbor_in_cache(tb, h) | 1851 | if (is_left_neighbor_in_cache(tb, h) |
@@ -1726,7 +1912,8 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h) | |||
1726 | return CARRY_ON; | 1912 | return CARRY_ON; |
1727 | } | 1913 | } |
1728 | 1914 | ||
1729 | /* Check whether current node S[h] is balanced when Decreasing its size by | 1915 | /* |
1916 | * Check whether current node S[h] is balanced when Decreasing its size by | ||
1730 | * Deleting or Truncating for LEAF node of S+tree. | 1917 | * Deleting or Truncating for LEAF node of S+tree. |
1731 | * Calculate parameters for balancing for current level h. | 1918 | * Calculate parameters for balancing for current level h. |
1732 | * Parameters: | 1919 | * Parameters: |
@@ -1743,15 +1930,21 @@ static int dc_check_balance_leaf(struct tree_balance *tb, int h) | |||
1743 | { | 1930 | { |
1744 | struct virtual_node *vn = tb->tb_vn; | 1931 | struct virtual_node *vn = tb->tb_vn; |
1745 | 1932 | ||
1746 | /* Number of bytes that must be deleted from | 1933 | /* |
1747 | (value is negative if bytes are deleted) buffer which | 1934 | * Number of bytes that must be deleted from |
1748 | contains node being balanced. The mnemonic is that the | 1935 | * (value is negative if bytes are deleted) buffer which |
1749 | attempted change in node space used level is levbytes bytes. */ | 1936 | * contains node being balanced. The mnemonic is that the |
1937 | * attempted change in node space used level is levbytes bytes. | ||
1938 | */ | ||
1750 | int levbytes; | 1939 | int levbytes; |
1940 | |||
1751 | /* the maximal item size */ | 1941 | /* the maximal item size */ |
1752 | int maxsize, ret; | 1942 | int maxsize, ret; |
1753 | /* S0 is the node whose balance is currently being checked, | 1943 | |
1754 | and F0 is its father. */ | 1944 | /* |
1945 | * S0 is the node whose balance is currently being checked, | ||
1946 | * and F0 is its father. | ||
1947 | */ | ||
1755 | struct buffer_head *S0, *F0; | 1948 | struct buffer_head *S0, *F0; |
1756 | int lfree, rfree /* free space in L and R */ ; | 1949 | int lfree, rfree /* free space in L and R */ ; |
1757 | 1950 | ||
@@ -1784,9 +1977,11 @@ static int dc_check_balance_leaf(struct tree_balance *tb, int h) | |||
1784 | if (are_leaves_removable(tb, lfree, rfree)) | 1977 | if (are_leaves_removable(tb, lfree, rfree)) |
1785 | return CARRY_ON; | 1978 | return CARRY_ON; |
1786 | 1979 | ||
1787 | /* determine maximal number of items we can shift to the left/right neighbor | 1980 | /* |
1788 | and the maximal number of bytes that can flow to the left/right neighbor | 1981 | * determine maximal number of items we can shift to the left/right |
1789 | from the left/right most liquid item that cannot be shifted from S[0] entirely | 1982 | * neighbor and the maximal number of bytes that can flow to the |
1983 | * left/right neighbor from the left/right most liquid item that | ||
1984 | * cannot be shifted from S[0] entirely | ||
1790 | */ | 1985 | */ |
1791 | check_left(tb, h, lfree); | 1986 | check_left(tb, h, lfree); |
1792 | check_right(tb, h, rfree); | 1987 | check_right(tb, h, rfree); |
@@ -1810,7 +2005,10 @@ static int dc_check_balance_leaf(struct tree_balance *tb, int h) | |||
1810 | return CARRY_ON; | 2005 | return CARRY_ON; |
1811 | } | 2006 | } |
1812 | 2007 | ||
1813 | /* All contents of S[0] can be moved to the neighbors (L[0] & R[0]). Set parameters and return */ | 2008 | /* |
2009 | * All contents of S[0] can be moved to the neighbors (L[0] & R[0]). | ||
2010 | * Set parameters and return | ||
2011 | */ | ||
1814 | if (is_leaf_removable(tb)) | 2012 | if (is_leaf_removable(tb)) |
1815 | return CARRY_ON; | 2013 | return CARRY_ON; |
1816 | 2014 | ||
@@ -1820,7 +2018,8 @@ static int dc_check_balance_leaf(struct tree_balance *tb, int h) | |||
1820 | return NO_BALANCING_NEEDED; | 2018 | return NO_BALANCING_NEEDED; |
1821 | } | 2019 | } |
1822 | 2020 | ||
1823 | /* Check whether current node S[h] is balanced when Decreasing its size by | 2021 | /* |
2022 | * Check whether current node S[h] is balanced when Decreasing its size by | ||
1824 | * Deleting or Cutting. | 2023 | * Deleting or Cutting. |
1825 | * Calculate parameters for balancing for current level h. | 2024 | * Calculate parameters for balancing for current level h. |
1826 | * Parameters: | 2025 | * Parameters: |
@@ -1844,15 +2043,16 @@ static int dc_check_balance(struct tree_balance *tb, int h) | |||
1844 | return dc_check_balance_leaf(tb, h); | 2043 | return dc_check_balance_leaf(tb, h); |
1845 | } | 2044 | } |
1846 | 2045 | ||
1847 | /* Check whether current node S[h] is balanced. | 2046 | /* |
2047 | * Check whether current node S[h] is balanced. | ||
1848 | * Calculate parameters for balancing for current level h. | 2048 | * Calculate parameters for balancing for current level h. |
1849 | * Parameters: | 2049 | * Parameters: |
1850 | * | 2050 | * |
1851 | * tb tree_balance structure: | 2051 | * tb tree_balance structure: |
1852 | * | 2052 | * |
1853 | * tb is a large structure that must be read about in the header file | 2053 | * tb is a large structure that must be read about in the header |
1854 | * at the same time as this procedure if the reader is to successfully | 2054 | * file at the same time as this procedure if the reader is |
1855 | * understand this procedure | 2055 | * to successfully understand this procedure |
1856 | * | 2056 | * |
1857 | * h current level of the node; | 2057 | * h current level of the node; |
1858 | * inum item number in S[h]; | 2058 | * inum item number in S[h]; |
@@ -1882,8 +2082,8 @@ static int check_balance(int mode, | |||
1882 | RFALSE(mode == M_INSERT && !vn->vn_ins_ih, | 2082 | RFALSE(mode == M_INSERT && !vn->vn_ins_ih, |
1883 | "vs-8255: ins_ih can not be 0 in insert mode"); | 2083 | "vs-8255: ins_ih can not be 0 in insert mode"); |
1884 | 2084 | ||
2085 | /* Calculate balance parameters when size of node is increasing. */ | ||
1885 | if (tb->insert_size[h] > 0) | 2086 | if (tb->insert_size[h] > 0) |
1886 | /* Calculate balance parameters when size of node is increasing. */ | ||
1887 | return ip_check_balance(tb, h); | 2087 | return ip_check_balance(tb, h); |
1888 | 2088 | ||
1889 | /* Calculate balance parameters when size of node is decreasing. */ | 2089 | /* Calculate balance parameters when size of node is decreasing. */ |
@@ -1911,21 +2111,23 @@ static int get_direct_parent(struct tree_balance *tb, int h) | |||
1911 | PATH_OFFSET_POSITION(path, path_offset - 1) = 0; | 2111 | PATH_OFFSET_POSITION(path, path_offset - 1) = 0; |
1912 | return CARRY_ON; | 2112 | return CARRY_ON; |
1913 | } | 2113 | } |
1914 | return REPEAT_SEARCH; /* Root is changed and we must recalculate the path. */ | 2114 | /* Root is changed and we must recalculate the path. */ |
2115 | return REPEAT_SEARCH; | ||
1915 | } | 2116 | } |
1916 | 2117 | ||
2118 | /* Parent in the path is not in the tree. */ | ||
1917 | if (!B_IS_IN_TREE | 2119 | if (!B_IS_IN_TREE |
1918 | (bh = PATH_OFFSET_PBUFFER(path, path_offset - 1))) | 2120 | (bh = PATH_OFFSET_PBUFFER(path, path_offset - 1))) |
1919 | return REPEAT_SEARCH; /* Parent in the path is not in the tree. */ | 2121 | return REPEAT_SEARCH; |
1920 | 2122 | ||
1921 | if ((position = | 2123 | if ((position = |
1922 | PATH_OFFSET_POSITION(path, | 2124 | PATH_OFFSET_POSITION(path, |
1923 | path_offset - 1)) > B_NR_ITEMS(bh)) | 2125 | path_offset - 1)) > B_NR_ITEMS(bh)) |
1924 | return REPEAT_SEARCH; | 2126 | return REPEAT_SEARCH; |
1925 | 2127 | ||
2128 | /* Parent in the path is not parent of the current node in the tree. */ | ||
1926 | if (B_N_CHILD_NUM(bh, position) != | 2129 | if (B_N_CHILD_NUM(bh, position) != |
1927 | PATH_OFFSET_PBUFFER(path, path_offset)->b_blocknr) | 2130 | PATH_OFFSET_PBUFFER(path, path_offset)->b_blocknr) |
1928 | /* Parent in the path is not parent of the current node in the tree. */ | ||
1929 | return REPEAT_SEARCH; | 2131 | return REPEAT_SEARCH; |
1930 | 2132 | ||
1931 | if (buffer_locked(bh)) { | 2133 | if (buffer_locked(bh)) { |
@@ -1936,10 +2138,15 @@ static int get_direct_parent(struct tree_balance *tb, int h) | |||
1936 | return REPEAT_SEARCH; | 2138 | return REPEAT_SEARCH; |
1937 | } | 2139 | } |
1938 | 2140 | ||
1939 | return CARRY_ON; /* Parent in the path is unlocked and really parent of the current node. */ | 2141 | /* |
2142 | * Parent in the path is unlocked and really parent | ||
2143 | * of the current node. | ||
2144 | */ | ||
2145 | return CARRY_ON; | ||
1940 | } | 2146 | } |
1941 | 2147 | ||
1942 | /* Using lnum[h] and rnum[h] we should determine what neighbors | 2148 | /* |
2149 | * Using lnum[h] and rnum[h] we should determine what neighbors | ||
1943 | * of S[h] we | 2150 | * of S[h] we |
1944 | * need in order to balance S[h], and get them if necessary. | 2151 | * need in order to balance S[h], and get them if necessary. |
1945 | * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked; | 2152 | * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked; |
@@ -1997,7 +2204,7 @@ static int get_neighbors(struct tree_balance *tb, int h) | |||
1997 | } | 2204 | } |
1998 | 2205 | ||
1999 | /* We need right neighbor to balance S[path_offset]. */ | 2206 | /* We need right neighbor to balance S[path_offset]. */ |
2000 | if (tb->rnum[h]) { /* We need right neighbor to balance S[path_offset]. */ | 2207 | if (tb->rnum[h]) { |
2001 | PROC_INFO_INC(sb, need_r_neighbor[h]); | 2208 | PROC_INFO_INC(sb, need_r_neighbor[h]); |
2002 | bh = PATH_OFFSET_PBUFFER(tb->tb_path, path_offset); | 2209 | bh = PATH_OFFSET_PBUFFER(tb->tb_path, path_offset); |
2003 | 2210 | ||
@@ -2053,9 +2260,11 @@ static int get_virtual_node_size(struct super_block *sb, struct buffer_head *bh) | |||
2053 | (max_num_of_entries - 1) * sizeof(__u16)); | 2260 | (max_num_of_entries - 1) * sizeof(__u16)); |
2054 | } | 2261 | } |
2055 | 2262 | ||
2056 | /* maybe we should fail balancing we are going to perform when kmalloc | 2263 | /* |
2057 | fails several times. But now it will loop until kmalloc gets | 2264 | * maybe we should fail balancing we are going to perform when kmalloc |
2058 | required memory */ | 2265 | * fails several times. But now it will loop until kmalloc gets |
2266 | * required memory | ||
2267 | */ | ||
2059 | static int get_mem_for_virtual_node(struct tree_balance *tb) | 2268 | static int get_mem_for_virtual_node(struct tree_balance *tb) |
2060 | { | 2269 | { |
2061 | int check_fs = 0; | 2270 | int check_fs = 0; |
@@ -2064,8 +2273,8 @@ static int get_mem_for_virtual_node(struct tree_balance *tb) | |||
2064 | 2273 | ||
2065 | size = get_virtual_node_size(tb->tb_sb, PATH_PLAST_BUFFER(tb->tb_path)); | 2274 | size = get_virtual_node_size(tb->tb_sb, PATH_PLAST_BUFFER(tb->tb_path)); |
2066 | 2275 | ||
2276 | /* we have to allocate more memory for virtual node */ | ||
2067 | if (size > tb->vn_buf_size) { | 2277 | if (size > tb->vn_buf_size) { |
2068 | /* we have to allocate more memory for virtual node */ | ||
2069 | if (tb->vn_buf) { | 2278 | if (tb->vn_buf) { |
2070 | /* free memory allocated before */ | 2279 | /* free memory allocated before */ |
2071 | kfree(tb->vn_buf); | 2280 | kfree(tb->vn_buf); |
@@ -2079,10 +2288,12 @@ static int get_mem_for_virtual_node(struct tree_balance *tb) | |||
2079 | /* get memory for virtual item */ | 2288 | /* get memory for virtual item */ |
2080 | buf = kmalloc(size, GFP_ATOMIC | __GFP_NOWARN); | 2289 | buf = kmalloc(size, GFP_ATOMIC | __GFP_NOWARN); |
2081 | if (!buf) { | 2290 | if (!buf) { |
2082 | /* getting memory with GFP_KERNEL priority may involve | 2291 | /* |
2083 | balancing now (due to indirect_to_direct conversion on | 2292 | * getting memory with GFP_KERNEL priority may involve |
2084 | dcache shrinking). So, release path and collected | 2293 | * balancing now (due to indirect_to_direct conversion |
2085 | resources here */ | 2294 | * on dcache shrinking). So, release path and collected |
2295 | * resources here | ||
2296 | */ | ||
2086 | free_buffers_in_tb(tb); | 2297 | free_buffers_in_tb(tb); |
2087 | buf = kmalloc(size, GFP_NOFS); | 2298 | buf = kmalloc(size, GFP_NOFS); |
2088 | if (!buf) { | 2299 | if (!buf) { |
@@ -2168,8 +2379,10 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *tb) | |||
2168 | for (i = tb->tb_path->path_length; | 2379 | for (i = tb->tb_path->path_length; |
2169 | !locked && i > ILLEGAL_PATH_ELEMENT_OFFSET; i--) { | 2380 | !locked && i > ILLEGAL_PATH_ELEMENT_OFFSET; i--) { |
2170 | if (PATH_OFFSET_PBUFFER(tb->tb_path, i)) { | 2381 | if (PATH_OFFSET_PBUFFER(tb->tb_path, i)) { |
2171 | /* if I understand correctly, we can only be sure the last buffer | 2382 | /* |
2172 | ** in the path is in the tree --clm | 2383 | * if I understand correctly, we can only |
2384 | * be sure the last buffer in the path is | ||
2385 | * in the tree --clm | ||
2173 | */ | 2386 | */ |
2174 | #ifdef CONFIG_REISERFS_CHECK | 2387 | #ifdef CONFIG_REISERFS_CHECK |
2175 | if (PATH_PLAST_BUFFER(tb->tb_path) == | 2388 | if (PATH_PLAST_BUFFER(tb->tb_path) == |
@@ -2256,13 +2469,15 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *tb) | |||
2256 | } | 2469 | } |
2257 | } | 2470 | } |
2258 | } | 2471 | } |
2259 | /* as far as I can tell, this is not required. The FEB list seems | 2472 | |
2260 | ** to be full of newly allocated nodes, which will never be locked, | 2473 | /* |
2261 | ** dirty, or anything else. | 2474 | * as far as I can tell, this is not required. The FEB list |
2262 | ** To be safe, I'm putting in the checks and waits in. For the moment, | 2475 | * seems to be full of newly allocated nodes, which will |
2263 | ** they are needed to keep the code in journal.c from complaining | 2476 | * never be locked, dirty, or anything else. |
2264 | ** about the buffer. That code is inside CONFIG_REISERFS_CHECK as well. | 2477 | * To be safe, I'm putting in the checks and waits in. |
2265 | ** --clm | 2478 | * For the moment, they are needed to keep the code in |
2479 | * journal.c from complaining about the buffer. | ||
2480 | * That code is inside CONFIG_REISERFS_CHECK as well. --clm | ||
2266 | */ | 2481 | */ |
2267 | for (i = 0; !locked && i < MAX_FEB_SIZE; i++) { | 2482 | for (i = 0; !locked && i < MAX_FEB_SIZE; i++) { |
2268 | if (tb->FEB[i]) { | 2483 | if (tb->FEB[i]) { |
@@ -2300,7 +2515,8 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *tb) | |||
2300 | return CARRY_ON; | 2515 | return CARRY_ON; |
2301 | } | 2516 | } |
2302 | 2517 | ||
2303 | /* Prepare for balancing, that is | 2518 | /* |
2519 | * Prepare for balancing, that is | ||
2304 | * get all necessary parents, and neighbors; | 2520 | * get all necessary parents, and neighbors; |
2305 | * analyze what and where should be moved; | 2521 | * analyze what and where should be moved; |
2306 | * get sufficient number of new nodes; | 2522 | * get sufficient number of new nodes; |
@@ -2309,13 +2525,14 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *tb) | |||
2309 | * When ported to SMP kernels, only at the last moment after all needed nodes | 2525 | * When ported to SMP kernels, only at the last moment after all needed nodes |
2310 | * are collected in cache, will the resources be locked using the usual | 2526 | * are collected in cache, will the resources be locked using the usual |
2311 | * textbook ordered lock acquisition algorithms. Note that ensuring that | 2527 | * textbook ordered lock acquisition algorithms. Note that ensuring that |
2312 | * this code neither write locks what it does not need to write lock nor locks out of order | 2528 | * this code neither write locks what it does not need to write lock nor locks |
2313 | * will be a pain in the butt that could have been avoided. Grumble grumble. -Hans | 2529 | * out of order will be a pain in the butt that could have been avoided. |
2530 | * Grumble grumble. -Hans | ||
2314 | * | 2531 | * |
2315 | * fix is meant in the sense of render unchanging | 2532 | * fix is meant in the sense of render unchanging |
2316 | * | 2533 | * |
2317 | * Latency might be improved by first gathering a list of what buffers are needed | 2534 | * Latency might be improved by first gathering a list of what buffers |
2318 | * and then getting as many of them in parallel as possible? -Hans | 2535 | * are needed and then getting as many of them in parallel as possible? -Hans |
2319 | * | 2536 | * |
2320 | * Parameters: | 2537 | * Parameters: |
2321 | * op_mode i - insert, d - delete, c - cut (truncate), p - paste (append) | 2538 | * op_mode i - insert, d - delete, c - cut (truncate), p - paste (append) |
@@ -2335,8 +2552,9 @@ int fix_nodes(int op_mode, struct tree_balance *tb, | |||
2335 | int ret, h, item_num = PATH_LAST_POSITION(tb->tb_path); | 2552 | int ret, h, item_num = PATH_LAST_POSITION(tb->tb_path); |
2336 | int pos_in_item; | 2553 | int pos_in_item; |
2337 | 2554 | ||
2338 | /* we set wait_tb_buffers_run when we have to restore any dirty bits cleared | 2555 | /* |
2339 | ** during wait_tb_buffers_run | 2556 | * we set wait_tb_buffers_run when we have to restore any dirty |
2557 | * bits cleared during wait_tb_buffers_run | ||
2340 | */ | 2558 | */ |
2341 | int wait_tb_buffers_run = 0; | 2559 | int wait_tb_buffers_run = 0; |
2342 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); | 2560 | struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); |
@@ -2347,14 +2565,15 @@ int fix_nodes(int op_mode, struct tree_balance *tb, | |||
2347 | 2565 | ||
2348 | tb->fs_gen = get_generation(tb->tb_sb); | 2566 | tb->fs_gen = get_generation(tb->tb_sb); |
2349 | 2567 | ||
2350 | /* we prepare and log the super here so it will already be in the | 2568 | /* |
2351 | ** transaction when do_balance needs to change it. | 2569 | * we prepare and log the super here so it will already be in the |
2352 | ** This way do_balance won't have to schedule when trying to prepare | 2570 | * transaction when do_balance needs to change it. |
2353 | ** the super for logging | 2571 | * This way do_balance won't have to schedule when trying to prepare |
2572 | * the super for logging | ||
2354 | */ | 2573 | */ |
2355 | reiserfs_prepare_for_journal(tb->tb_sb, | 2574 | reiserfs_prepare_for_journal(tb->tb_sb, |
2356 | SB_BUFFER_WITH_SB(tb->tb_sb), 1); | 2575 | SB_BUFFER_WITH_SB(tb->tb_sb), 1); |
2357 | journal_mark_dirty(tb->transaction_handle, tb->tb_sb, | 2576 | journal_mark_dirty(tb->transaction_handle, |
2358 | SB_BUFFER_WITH_SB(tb->tb_sb)); | 2577 | SB_BUFFER_WITH_SB(tb->tb_sb)); |
2359 | if (FILESYSTEM_CHANGED_TB(tb)) | 2578 | if (FILESYSTEM_CHANGED_TB(tb)) |
2360 | return REPEAT_SEARCH; | 2579 | return REPEAT_SEARCH; |
@@ -2408,7 +2627,7 @@ int fix_nodes(int op_mode, struct tree_balance *tb, | |||
2408 | #endif | 2627 | #endif |
2409 | 2628 | ||
2410 | if (get_mem_for_virtual_node(tb) == REPEAT_SEARCH) | 2629 | if (get_mem_for_virtual_node(tb) == REPEAT_SEARCH) |
2411 | // FIXME: maybe -ENOMEM when tb->vn_buf == 0? Now just repeat | 2630 | /* FIXME: maybe -ENOMEM when tb->vn_buf == 0? Now just repeat */ |
2412 | return REPEAT_SEARCH; | 2631 | return REPEAT_SEARCH; |
2413 | 2632 | ||
2414 | /* Starting from the leaf level; for all levels h of the tree. */ | 2633 | /* Starting from the leaf level; for all levels h of the tree. */ |
@@ -2427,7 +2646,10 @@ int fix_nodes(int op_mode, struct tree_balance *tb, | |||
2427 | goto repeat; | 2646 | goto repeat; |
2428 | if (h != MAX_HEIGHT - 1) | 2647 | if (h != MAX_HEIGHT - 1) |
2429 | tb->insert_size[h + 1] = 0; | 2648 | tb->insert_size[h + 1] = 0; |
2430 | /* ok, analysis and resource gathering are complete */ | 2649 | /* |
2650 | * ok, analysis and resource gathering | ||
2651 | * are complete | ||
2652 | */ | ||
2431 | break; | 2653 | break; |
2432 | } | 2654 | } |
2433 | goto repeat; | 2655 | goto repeat; |
@@ -2437,15 +2659,19 @@ int fix_nodes(int op_mode, struct tree_balance *tb, | |||
2437 | if (ret != CARRY_ON) | 2659 | if (ret != CARRY_ON) |
2438 | goto repeat; | 2660 | goto repeat; |
2439 | 2661 | ||
2440 | /* No disk space, or schedule occurred and analysis may be | 2662 | /* |
2441 | * invalid and needs to be redone. */ | 2663 | * No disk space, or schedule occurred and analysis may be |
2664 | * invalid and needs to be redone. | ||
2665 | */ | ||
2442 | ret = get_empty_nodes(tb, h); | 2666 | ret = get_empty_nodes(tb, h); |
2443 | if (ret != CARRY_ON) | 2667 | if (ret != CARRY_ON) |
2444 | goto repeat; | 2668 | goto repeat; |
2445 | 2669 | ||
2670 | /* | ||
2671 | * We have a positive insert size but no nodes exist on this | ||
2672 | * level, this means that we are creating a new root. | ||
2673 | */ | ||
2446 | if (!PATH_H_PBUFFER(tb->tb_path, h)) { | 2674 | if (!PATH_H_PBUFFER(tb->tb_path, h)) { |
2447 | /* We have a positive insert size but no nodes exist on this | ||
2448 | level, this means that we are creating a new root. */ | ||
2449 | 2675 | ||
2450 | RFALSE(tb->blknum[h] != 1, | 2676 | RFALSE(tb->blknum[h] != 1, |
2451 | "PAP-8350: creating new empty root"); | 2677 | "PAP-8350: creating new empty root"); |
@@ -2453,11 +2679,13 @@ int fix_nodes(int op_mode, struct tree_balance *tb, | |||
2453 | if (h < MAX_HEIGHT - 1) | 2679 | if (h < MAX_HEIGHT - 1) |
2454 | tb->insert_size[h + 1] = 0; | 2680 | tb->insert_size[h + 1] = 0; |
2455 | } else if (!PATH_H_PBUFFER(tb->tb_path, h + 1)) { | 2681 | } else if (!PATH_H_PBUFFER(tb->tb_path, h + 1)) { |
2682 | /* | ||
2683 | * The tree needs to be grown, so this node S[h] | ||
2684 | * which is the root node is split into two nodes, | ||
2685 | * and a new node (S[h+1]) will be created to | ||
2686 | * become the root node. | ||
2687 | */ | ||
2456 | if (tb->blknum[h] > 1) { | 2688 | if (tb->blknum[h] > 1) { |
2457 | /* The tree needs to be grown, so this node S[h] | ||
2458 | which is the root node is split into two nodes, | ||
2459 | and a new node (S[h+1]) will be created to | ||
2460 | become the root node. */ | ||
2461 | 2689 | ||
2462 | RFALSE(h == MAX_HEIGHT - 1, | 2690 | RFALSE(h == MAX_HEIGHT - 1, |
2463 | "PAP-8355: attempt to create too high of a tree"); | 2691 | "PAP-8355: attempt to create too high of a tree"); |
@@ -2487,12 +2715,14 @@ int fix_nodes(int op_mode, struct tree_balance *tb, | |||
2487 | goto repeat; | 2715 | goto repeat; |
2488 | } | 2716 | } |
2489 | 2717 | ||
2490 | repeat: | 2718 | repeat: |
2491 | // fix_nodes was unable to perform its calculation due to | 2719 | /* |
2492 | // filesystem got changed under us, lack of free disk space or i/o | 2720 | * fix_nodes was unable to perform its calculation due to |
2493 | // failure. If the first is the case - the search will be | 2721 | * filesystem got changed under us, lack of free disk space or i/o |
2494 | // repeated. For now - free all resources acquired so far except | 2722 | * failure. If the first is the case - the search will be |
2495 | // for the new allocated nodes | 2723 | * repeated. For now - free all resources acquired so far except |
2724 | * for the new allocated nodes | ||
2725 | */ | ||
2496 | { | 2726 | { |
2497 | int i; | 2727 | int i; |
2498 | 2728 | ||
@@ -2548,8 +2778,6 @@ int fix_nodes(int op_mode, struct tree_balance *tb, | |||
2548 | 2778 | ||
2549 | } | 2779 | } |
2550 | 2780 | ||
2551 | /* Anatoly will probably forgive me renaming tb to tb. I just | ||
2552 | wanted to make lines shorter */ | ||
2553 | void unfix_nodes(struct tree_balance *tb) | 2781 | void unfix_nodes(struct tree_balance *tb) |
2554 | { | 2782 | { |
2555 | int i; | 2783 | int i; |
@@ -2578,8 +2806,10 @@ void unfix_nodes(struct tree_balance *tb) | |||
2578 | for (i = 0; i < MAX_FEB_SIZE; i++) { | 2806 | for (i = 0; i < MAX_FEB_SIZE; i++) { |
2579 | if (tb->FEB[i]) { | 2807 | if (tb->FEB[i]) { |
2580 | b_blocknr_t blocknr = tb->FEB[i]->b_blocknr; | 2808 | b_blocknr_t blocknr = tb->FEB[i]->b_blocknr; |
2581 | /* de-allocated block which was not used by balancing and | 2809 | /* |
2582 | bforget about buffer for it */ | 2810 | * de-allocated block which was not used by |
2811 | * balancing and bforget about buffer for it | ||
2812 | */ | ||
2583 | brelse(tb->FEB[i]); | 2813 | brelse(tb->FEB[i]); |
2584 | reiserfs_free_block(tb->transaction_handle, NULL, | 2814 | reiserfs_free_block(tb->transaction_handle, NULL, |
2585 | blocknr, 0); | 2815 | blocknr, 0); |