aboutsummaryrefslogtreecommitdiffstats
path: root/fs/reiserfs/ibalance.c
diff options
context:
space:
mode:
authorAnton Altaparmakov <aia21@cantab.net>2005-07-13 18:09:23 -0400
committerAnton Altaparmakov <aia21@cantab.net>2005-07-13 18:09:23 -0400
commitc514720716c7b109ff980f8b3cb93f9af872c91c (patch)
tree490a9578995705de69712893a190b67651bddc56 /fs/reiserfs/ibalance.c
parent07929dcb963786512c760dd3ecd148d89295e7e5 (diff)
parent1e279dd855d15b72364b4103f872d67d8592647e (diff)
Automatic merge with /usr/src/ntfs-2.6.git.
Diffstat (limited to 'fs/reiserfs/ibalance.c')
-rw-r--r--fs/reiserfs/ibalance.c1844
1 files changed, 938 insertions, 906 deletions
diff --git a/fs/reiserfs/ibalance.c b/fs/reiserfs/ibalance.c
index a362125da0d8..6c5a726fd34b 100644
--- a/fs/reiserfs/ibalance.c
+++ b/fs/reiserfs/ibalance.c
@@ -10,13 +10,8 @@
10#include <linux/buffer_head.h> 10#include <linux/buffer_head.h>
11 11
12/* this is one and only function that is used outside (do_balance.c) */ 12/* this is one and only function that is used outside (do_balance.c) */
13int balance_internal ( 13int balance_internal(struct tree_balance *,
14 struct tree_balance * , 14 int, int, struct item_head *, struct buffer_head **);
15 int,
16 int,
17 struct item_head * ,
18 struct buffer_head **
19 );
20 15
21/* modes of internal_shift_left, internal_shift_right and internal_insert_childs */ 16/* modes of internal_shift_left, internal_shift_right and internal_insert_childs */
22#define INTERNAL_SHIFT_FROM_S_TO_L 0 17#define INTERNAL_SHIFT_FROM_S_TO_L 0
@@ -27,464 +22,474 @@ int balance_internal (
27#define INTERNAL_INSERT_TO_L 5 22#define INTERNAL_INSERT_TO_L 5
28#define INTERNAL_INSERT_TO_R 6 23#define INTERNAL_INSERT_TO_R 6
29 24
30static void internal_define_dest_src_infos ( 25static void internal_define_dest_src_infos(int shift_mode,
31 int shift_mode, 26 struct tree_balance *tb,
32 struct tree_balance * tb, 27 int h,
33 int h, 28 struct buffer_info *dest_bi,
34 struct buffer_info * dest_bi, 29 struct buffer_info *src_bi,
35 struct buffer_info * src_bi, 30 int *d_key, struct buffer_head **cf)
36 int * d_key,
37 struct buffer_head ** cf
38 )
39{ 31{
40 memset (dest_bi, 0, sizeof (struct buffer_info)); 32 memset(dest_bi, 0, sizeof(struct buffer_info));
41 memset (src_bi, 0, sizeof (struct buffer_info)); 33 memset(src_bi, 0, sizeof(struct buffer_info));
42 /* define dest, src, dest parent, dest position */ 34 /* define dest, src, dest parent, dest position */
43 switch (shift_mode) { 35 switch (shift_mode) {
44 case INTERNAL_SHIFT_FROM_S_TO_L: /* used in internal_shift_left */ 36 case INTERNAL_SHIFT_FROM_S_TO_L: /* used in internal_shift_left */
45 src_bi->tb = tb; 37 src_bi->tb = tb;
46 src_bi->bi_bh = PATH_H_PBUFFER (tb->tb_path, h); 38 src_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h);
47 src_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, h); 39 src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h);
48 src_bi->bi_position = PATH_H_POSITION (tb->tb_path, h + 1); 40 src_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
49 dest_bi->tb = tb; 41 dest_bi->tb = tb;
50 dest_bi->bi_bh = tb->L[h]; 42 dest_bi->bi_bh = tb->L[h];
51 dest_bi->bi_parent = tb->FL[h]; 43 dest_bi->bi_parent = tb->FL[h];
52 dest_bi->bi_position = get_left_neighbor_position (tb, h); 44 dest_bi->bi_position = get_left_neighbor_position(tb, h);
53 *d_key = tb->lkey[h]; 45 *d_key = tb->lkey[h];
54 *cf = tb->CFL[h]; 46 *cf = tb->CFL[h];
55 break; 47 break;
56 case INTERNAL_SHIFT_FROM_L_TO_S: 48 case INTERNAL_SHIFT_FROM_L_TO_S:
57 src_bi->tb = tb; 49 src_bi->tb = tb;
58 src_bi->bi_bh = tb->L[h]; 50 src_bi->bi_bh = tb->L[h];
59 src_bi->bi_parent = tb->FL[h]; 51 src_bi->bi_parent = tb->FL[h];
60 src_bi->bi_position = get_left_neighbor_position (tb, h); 52 src_bi->bi_position = get_left_neighbor_position(tb, h);
61 dest_bi->tb = tb; 53 dest_bi->tb = tb;
62 dest_bi->bi_bh = PATH_H_PBUFFER (tb->tb_path, h); 54 dest_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h);
63 dest_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, h); 55 dest_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h);
64 dest_bi->bi_position = PATH_H_POSITION (tb->tb_path, h + 1); /* dest position is analog of dest->b_item_order */ 56 dest_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1); /* dest position is analog of dest->b_item_order */
65 *d_key = tb->lkey[h]; 57 *d_key = tb->lkey[h];
66 *cf = tb->CFL[h]; 58 *cf = tb->CFL[h];
67 break; 59 break;
68 60
69 case INTERNAL_SHIFT_FROM_R_TO_S: /* used in internal_shift_left */ 61 case INTERNAL_SHIFT_FROM_R_TO_S: /* used in internal_shift_left */
70 src_bi->tb = tb; 62 src_bi->tb = tb;
71 src_bi->bi_bh = tb->R[h]; 63 src_bi->bi_bh = tb->R[h];
72 src_bi->bi_parent = tb->FR[h]; 64 src_bi->bi_parent = tb->FR[h];
73 src_bi->bi_position = get_right_neighbor_position (tb, h); 65 src_bi->bi_position = get_right_neighbor_position(tb, h);
74 dest_bi->tb = tb; 66 dest_bi->tb = tb;
75 dest_bi->bi_bh = PATH_H_PBUFFER (tb->tb_path, h); 67 dest_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h);
76 dest_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, h); 68 dest_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h);
77 dest_bi->bi_position = PATH_H_POSITION (tb->tb_path, h + 1); 69 dest_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
78 *d_key = tb->rkey[h]; 70 *d_key = tb->rkey[h];
79 *cf = tb->CFR[h]; 71 *cf = tb->CFR[h];
80 break; 72 break;
81 73
82 case INTERNAL_SHIFT_FROM_S_TO_R: 74 case INTERNAL_SHIFT_FROM_S_TO_R:
83 src_bi->tb = tb; 75 src_bi->tb = tb;
84 src_bi->bi_bh = PATH_H_PBUFFER (tb->tb_path, h); 76 src_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h);
85 src_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, h); 77 src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h);
86 src_bi->bi_position = PATH_H_POSITION (tb->tb_path, h + 1); 78 src_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
87 dest_bi->tb = tb; 79 dest_bi->tb = tb;
88 dest_bi->bi_bh = tb->R[h]; 80 dest_bi->bi_bh = tb->R[h];
89 dest_bi->bi_parent = tb->FR[h]; 81 dest_bi->bi_parent = tb->FR[h];
90 dest_bi->bi_position = get_right_neighbor_position (tb, h); 82 dest_bi->bi_position = get_right_neighbor_position(tb, h);
91 *d_key = tb->rkey[h]; 83 *d_key = tb->rkey[h];
92 *cf = tb->CFR[h]; 84 *cf = tb->CFR[h];
93 break; 85 break;
94 86
95 case INTERNAL_INSERT_TO_L: 87 case INTERNAL_INSERT_TO_L:
96 dest_bi->tb = tb; 88 dest_bi->tb = tb;
97 dest_bi->bi_bh = tb->L[h]; 89 dest_bi->bi_bh = tb->L[h];
98 dest_bi->bi_parent = tb->FL[h]; 90 dest_bi->bi_parent = tb->FL[h];
99 dest_bi->bi_position = get_left_neighbor_position (tb, h); 91 dest_bi->bi_position = get_left_neighbor_position(tb, h);
100 break; 92 break;
101 93
102 case INTERNAL_INSERT_TO_S: 94 case INTERNAL_INSERT_TO_S:
103 dest_bi->tb = tb; 95 dest_bi->tb = tb;
104 dest_bi->bi_bh = PATH_H_PBUFFER (tb->tb_path, h); 96 dest_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h);
105 dest_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, h); 97 dest_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h);
106 dest_bi->bi_position = PATH_H_POSITION (tb->tb_path, h + 1); 98 dest_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
107 break; 99 break;
108 100
109 case INTERNAL_INSERT_TO_R: 101 case INTERNAL_INSERT_TO_R:
110 dest_bi->tb = tb; 102 dest_bi->tb = tb;
111 dest_bi->bi_bh = tb->R[h]; 103 dest_bi->bi_bh = tb->R[h];
112 dest_bi->bi_parent = tb->FR[h]; 104 dest_bi->bi_parent = tb->FR[h];
113 dest_bi->bi_position = get_right_neighbor_position (tb, h); 105 dest_bi->bi_position = get_right_neighbor_position(tb, h);
114 break; 106 break;
115 107
116 default: 108 default:
117 reiserfs_panic (tb->tb_sb, "internal_define_dest_src_infos: shift type is unknown (%d)", shift_mode); 109 reiserfs_panic(tb->tb_sb,
118 } 110 "internal_define_dest_src_infos: shift type is unknown (%d)",
111 shift_mode);
112 }
119} 113}
120 114
121
122
123/* Insert count node pointers into buffer cur before position to + 1. 115/* Insert count node pointers into buffer cur before position to + 1.
124 * Insert count items into buffer cur before position to. 116 * Insert count items into buffer cur before position to.
125 * Items and node pointers are specified by inserted and bh respectively. 117 * Items and node pointers are specified by inserted and bh respectively.
126 */ 118 */
127static void internal_insert_childs (struct buffer_info * cur_bi, 119static void internal_insert_childs(struct buffer_info *cur_bi,
128 int to, int count, 120 int to, int count,
129 struct item_head * inserted, 121 struct item_head *inserted,
130 struct buffer_head ** bh 122 struct buffer_head **bh)
131 )
132{ 123{
133 struct buffer_head * cur = cur_bi->bi_bh; 124 struct buffer_head *cur = cur_bi->bi_bh;
134 struct block_head * blkh; 125 struct block_head *blkh;
135 int nr; 126 int nr;
136 struct reiserfs_key * ih; 127 struct reiserfs_key *ih;
137 struct disk_child new_dc[2]; 128 struct disk_child new_dc[2];
138 struct disk_child * dc; 129 struct disk_child *dc;
139 int i; 130 int i;
140 131
141 if (count <= 0) 132 if (count <= 0)
142 return; 133 return;
143 134
144 blkh = B_BLK_HEAD(cur); 135 blkh = B_BLK_HEAD(cur);
145 nr = blkh_nr_item(blkh); 136 nr = blkh_nr_item(blkh);
146 137
147 RFALSE( count > 2, 138 RFALSE(count > 2, "too many children (%d) are to be inserted", count);
148 "too many children (%d) are to be inserted", count); 139 RFALSE(B_FREE_SPACE(cur) < count * (KEY_SIZE + DC_SIZE),
149 RFALSE( B_FREE_SPACE (cur) < count * (KEY_SIZE + DC_SIZE), 140 "no enough free space (%d), needed %d bytes",
150 "no enough free space (%d), needed %d bytes", 141 B_FREE_SPACE(cur), count * (KEY_SIZE + DC_SIZE));
151 B_FREE_SPACE (cur), count * (KEY_SIZE + DC_SIZE)); 142
152 143 /* prepare space for count disk_child */
153 /* prepare space for count disk_child */ 144 dc = B_N_CHILD(cur, to + 1);
154 dc = B_N_CHILD(cur,to+1); 145
155 146 memmove(dc + count, dc, (nr + 1 - (to + 1)) * DC_SIZE);
156 memmove (dc + count, dc, (nr+1-(to+1)) * DC_SIZE); 147
157 148 /* copy to_be_insert disk children */
158 /* copy to_be_insert disk children */ 149 for (i = 0; i < count; i++) {
159 for (i = 0; i < count; i ++) { 150 put_dc_size(&(new_dc[i]),
160 put_dc_size( &(new_dc[i]), MAX_CHILD_SIZE(bh[i]) - B_FREE_SPACE(bh[i])); 151 MAX_CHILD_SIZE(bh[i]) - B_FREE_SPACE(bh[i]));
161 put_dc_block_number( &(new_dc[i]), bh[i]->b_blocknr ); 152 put_dc_block_number(&(new_dc[i]), bh[i]->b_blocknr);
162 } 153 }
163 memcpy (dc, new_dc, DC_SIZE * count); 154 memcpy(dc, new_dc, DC_SIZE * count);
164 155
165 156 /* prepare space for count items */
166 /* prepare space for count items */ 157 ih = B_N_PDELIM_KEY(cur, ((to == -1) ? 0 : to));
167 ih = B_N_PDELIM_KEY (cur, ((to == -1) ? 0 : to)); 158
168 159 memmove(ih + count, ih,
169 memmove (ih + count, ih, (nr - to) * KEY_SIZE + (nr + 1 + count) * DC_SIZE); 160 (nr - to) * KEY_SIZE + (nr + 1 + count) * DC_SIZE);
170 161
171 /* copy item headers (keys) */ 162 /* copy item headers (keys) */
172 memcpy (ih, inserted, KEY_SIZE); 163 memcpy(ih, inserted, KEY_SIZE);
173 if ( count > 1 ) 164 if (count > 1)
174 memcpy (ih + 1, inserted + 1, KEY_SIZE); 165 memcpy(ih + 1, inserted + 1, KEY_SIZE);
175 166
176 /* sizes, item number */ 167 /* sizes, item number */
177 set_blkh_nr_item( blkh, blkh_nr_item(blkh) + count ); 168 set_blkh_nr_item(blkh, blkh_nr_item(blkh) + count);
178 set_blkh_free_space( blkh, 169 set_blkh_free_space(blkh,
179 blkh_free_space(blkh) - count * (DC_SIZE + KEY_SIZE ) ); 170 blkh_free_space(blkh) - count * (DC_SIZE +
180 171 KEY_SIZE));
181 do_balance_mark_internal_dirty (cur_bi->tb, cur,0); 172
182 173 do_balance_mark_internal_dirty(cur_bi->tb, cur, 0);
183 /*&&&&&&&&&&&&&&&&&&&&&&&&*/ 174
184 check_internal (cur); 175 /*&&&&&&&&&&&&&&&&&&&&&&&& */
185 /*&&&&&&&&&&&&&&&&&&&&&&&&*/ 176 check_internal(cur);
186 177 /*&&&&&&&&&&&&&&&&&&&&&&&& */
187 if (cur_bi->bi_parent) { 178
188 struct disk_child *t_dc = B_N_CHILD (cur_bi->bi_parent,cur_bi->bi_position); 179 if (cur_bi->bi_parent) {
189 put_dc_size( t_dc, dc_size(t_dc) + (count * (DC_SIZE + KEY_SIZE))); 180 struct disk_child *t_dc =
190 do_balance_mark_internal_dirty(cur_bi->tb, cur_bi->bi_parent, 0); 181 B_N_CHILD(cur_bi->bi_parent, cur_bi->bi_position);
191 182 put_dc_size(t_dc,
192 /*&&&&&&&&&&&&&&&&&&&&&&&&*/ 183 dc_size(t_dc) + (count * (DC_SIZE + KEY_SIZE)));
193 check_internal (cur_bi->bi_parent); 184 do_balance_mark_internal_dirty(cur_bi->tb, cur_bi->bi_parent,
194 /*&&&&&&&&&&&&&&&&&&&&&&&&*/ 185 0);
195 } 186
187 /*&&&&&&&&&&&&&&&&&&&&&&&& */
188 check_internal(cur_bi->bi_parent);
189 /*&&&&&&&&&&&&&&&&&&&&&&&& */
190 }
196 191
197} 192}
198 193
199
200/* Delete del_num items and node pointers from buffer cur starting from * 194/* Delete del_num items and node pointers from buffer cur starting from *
201 * the first_i'th item and first_p'th pointers respectively. */ 195 * the first_i'th item and first_p'th pointers respectively. */
202static void internal_delete_pointers_items ( 196static void internal_delete_pointers_items(struct buffer_info *cur_bi,
203 struct buffer_info * cur_bi, 197 int first_p,
204 int first_p, 198 int first_i, int del_num)
205 int first_i,
206 int del_num
207 )
208{ 199{
209 struct buffer_head * cur = cur_bi->bi_bh; 200 struct buffer_head *cur = cur_bi->bi_bh;
210 int nr; 201 int nr;
211 struct block_head * blkh; 202 struct block_head *blkh;
212 struct reiserfs_key * key; 203 struct reiserfs_key *key;
213 struct disk_child * dc; 204 struct disk_child *dc;
214 205
215 RFALSE( cur == NULL, "buffer is 0"); 206 RFALSE(cur == NULL, "buffer is 0");
216 RFALSE( del_num < 0, 207 RFALSE(del_num < 0,
217 "negative number of items (%d) can not be deleted", del_num); 208 "negative number of items (%d) can not be deleted", del_num);
218 RFALSE( first_p < 0 || first_p + del_num > B_NR_ITEMS (cur) + 1 || first_i < 0, 209 RFALSE(first_p < 0 || first_p + del_num > B_NR_ITEMS(cur) + 1
219 "first pointer order (%d) < 0 or " 210 || first_i < 0,
220 "no so many pointers (%d), only (%d) or " 211 "first pointer order (%d) < 0 or "
221 "first key order %d < 0", first_p, 212 "no so many pointers (%d), only (%d) or "
222 first_p + del_num, B_NR_ITEMS (cur) + 1, first_i); 213 "first key order %d < 0", first_p, first_p + del_num,
223 if ( del_num == 0 ) 214 B_NR_ITEMS(cur) + 1, first_i);
224 return; 215 if (del_num == 0)
225 216 return;
226 blkh = B_BLK_HEAD(cur); 217
227 nr = blkh_nr_item(blkh); 218 blkh = B_BLK_HEAD(cur);
228 219 nr = blkh_nr_item(blkh);
229 if ( first_p == 0 && del_num == nr + 1 ) { 220
230 RFALSE( first_i != 0, "1st deleted key must have order 0, not %d", first_i); 221 if (first_p == 0 && del_num == nr + 1) {
231 make_empty_node (cur_bi); 222 RFALSE(first_i != 0,
232 return; 223 "1st deleted key must have order 0, not %d", first_i);
233 } 224 make_empty_node(cur_bi);
234 225 return;
235 RFALSE( first_i + del_num > B_NR_ITEMS (cur), 226 }
236 "first_i = %d del_num = %d "
237 "no so many keys (%d) in the node (%b)(%z)",
238 first_i, del_num, first_i + del_num, cur, cur);
239
240
241 /* deleting */
242 dc = B_N_CHILD (cur, first_p);
243
244 memmove (dc, dc + del_num, (nr + 1 - first_p - del_num) * DC_SIZE);
245 key = B_N_PDELIM_KEY (cur, first_i);
246 memmove (key, key + del_num, (nr - first_i - del_num) * KEY_SIZE + (nr + 1 - del_num) * DC_SIZE);
247
248
249 /* sizes, item number */
250 set_blkh_nr_item( blkh, blkh_nr_item(blkh) - del_num );
251 set_blkh_free_space( blkh,
252 blkh_free_space(blkh) + (del_num * (KEY_SIZE + DC_SIZE) ) );
253
254 do_balance_mark_internal_dirty (cur_bi->tb, cur, 0);
255 /*&&&&&&&&&&&&&&&&&&&&&&&*/
256 check_internal (cur);
257 /*&&&&&&&&&&&&&&&&&&&&&&&*/
258
259 if (cur_bi->bi_parent) {
260 struct disk_child *t_dc;
261 t_dc = B_N_CHILD (cur_bi->bi_parent, cur_bi->bi_position);
262 put_dc_size( t_dc, dc_size(t_dc) - (del_num * (KEY_SIZE + DC_SIZE) ) );
263
264 do_balance_mark_internal_dirty (cur_bi->tb, cur_bi->bi_parent,0);
265 /*&&&&&&&&&&&&&&&&&&&&&&&&*/
266 check_internal (cur_bi->bi_parent);
267 /*&&&&&&&&&&&&&&&&&&&&&&&&*/
268 }
269}
270 227
228 RFALSE(first_i + del_num > B_NR_ITEMS(cur),
229 "first_i = %d del_num = %d "
230 "no so many keys (%d) in the node (%b)(%z)",
231 first_i, del_num, first_i + del_num, cur, cur);
232
233 /* deleting */
234 dc = B_N_CHILD(cur, first_p);
235
236 memmove(dc, dc + del_num, (nr + 1 - first_p - del_num) * DC_SIZE);
237 key = B_N_PDELIM_KEY(cur, first_i);
238 memmove(key, key + del_num,
239 (nr - first_i - del_num) * KEY_SIZE + (nr + 1 -
240 del_num) * DC_SIZE);
241
242 /* sizes, item number */
243 set_blkh_nr_item(blkh, blkh_nr_item(blkh) - del_num);
244 set_blkh_free_space(blkh,
245 blkh_free_space(blkh) +
246 (del_num * (KEY_SIZE + DC_SIZE)));
247
248 do_balance_mark_internal_dirty(cur_bi->tb, cur, 0);
249 /*&&&&&&&&&&&&&&&&&&&&&&& */
250 check_internal(cur);
251 /*&&&&&&&&&&&&&&&&&&&&&&& */
252
253 if (cur_bi->bi_parent) {
254 struct disk_child *t_dc;
255 t_dc = B_N_CHILD(cur_bi->bi_parent, cur_bi->bi_position);
256 put_dc_size(t_dc,
257 dc_size(t_dc) - (del_num * (KEY_SIZE + DC_SIZE)));
258
259 do_balance_mark_internal_dirty(cur_bi->tb, cur_bi->bi_parent,
260 0);
261 /*&&&&&&&&&&&&&&&&&&&&&&&& */
262 check_internal(cur_bi->bi_parent);
263 /*&&&&&&&&&&&&&&&&&&&&&&&& */
264 }
265}
271 266
272/* delete n node pointers and items starting from given position */ 267/* delete n node pointers and items starting from given position */
273static void internal_delete_childs (struct buffer_info * cur_bi, 268static void internal_delete_childs(struct buffer_info *cur_bi, int from, int n)
274 int from, int n)
275{ 269{
276 int i_from; 270 int i_from;
277 271
278 i_from = (from == 0) ? from : from - 1; 272 i_from = (from == 0) ? from : from - 1;
279 273
280 /* delete n pointers starting from `from' position in CUR; 274 /* delete n pointers starting from `from' position in CUR;
281 delete n keys starting from 'i_from' position in CUR; 275 delete n keys starting from 'i_from' position in CUR;
282 */ 276 */
283 internal_delete_pointers_items (cur_bi, from, i_from, n); 277 internal_delete_pointers_items(cur_bi, from, i_from, n);
284} 278}
285 279
286
287/* copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest 280/* copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest
288* last_first == FIRST_TO_LAST means, that we copy first items from src to tail of dest 281* last_first == FIRST_TO_LAST means, that we copy first items from src to tail of dest
289 * last_first == LAST_TO_FIRST means, that we copy last items from src to head of dest 282 * last_first == LAST_TO_FIRST means, that we copy last items from src to head of dest
290 */ 283 */
291static void internal_copy_pointers_items ( 284static void internal_copy_pointers_items(struct buffer_info *dest_bi,
292 struct buffer_info * dest_bi, 285 struct buffer_head *src,
293 struct buffer_head * src, 286 int last_first, int cpy_num)
294 int last_first, int cpy_num
295 )
296{ 287{
297 /* ATTENTION! Number of node pointers in DEST is equal to number of items in DEST * 288 /* ATTENTION! Number of node pointers in DEST is equal to number of items in DEST *
298 * as delimiting key have already inserted to buffer dest.*/ 289 * as delimiting key have already inserted to buffer dest.*/
299 struct buffer_head * dest = dest_bi->bi_bh; 290 struct buffer_head *dest = dest_bi->bi_bh;
300 int nr_dest, nr_src; 291 int nr_dest, nr_src;
301 int dest_order, src_order; 292 int dest_order, src_order;
302 struct block_head * blkh; 293 struct block_head *blkh;
303 struct reiserfs_key * key; 294 struct reiserfs_key *key;
304 struct disk_child * dc; 295 struct disk_child *dc;
305 296
306 nr_src = B_NR_ITEMS (src); 297 nr_src = B_NR_ITEMS(src);
307 298
308 RFALSE( dest == NULL || src == NULL, 299 RFALSE(dest == NULL || src == NULL,
309 "src (%p) or dest (%p) buffer is 0", src, dest); 300 "src (%p) or dest (%p) buffer is 0", src, dest);
310 RFALSE( last_first != FIRST_TO_LAST && last_first != LAST_TO_FIRST, 301 RFALSE(last_first != FIRST_TO_LAST && last_first != LAST_TO_FIRST,
311 "invalid last_first parameter (%d)", last_first); 302 "invalid last_first parameter (%d)", last_first);
312 RFALSE( nr_src < cpy_num - 1, 303 RFALSE(nr_src < cpy_num - 1,
313 "no so many items (%d) in src (%d)", cpy_num, nr_src); 304 "no so many items (%d) in src (%d)", cpy_num, nr_src);
314 RFALSE( cpy_num < 0, "cpy_num less than 0 (%d)", cpy_num); 305 RFALSE(cpy_num < 0, "cpy_num less than 0 (%d)", cpy_num);
315 RFALSE( cpy_num - 1 + B_NR_ITEMS(dest) > (int)MAX_NR_KEY(dest), 306 RFALSE(cpy_num - 1 + B_NR_ITEMS(dest) > (int)MAX_NR_KEY(dest),
316 "cpy_num (%d) + item number in dest (%d) can not be > MAX_NR_KEY(%d)", 307 "cpy_num (%d) + item number in dest (%d) can not be > MAX_NR_KEY(%d)",
317 cpy_num, B_NR_ITEMS(dest), MAX_NR_KEY(dest)); 308 cpy_num, B_NR_ITEMS(dest), MAX_NR_KEY(dest));
318 309
319 if ( cpy_num == 0 ) 310 if (cpy_num == 0)
320 return; 311 return;
321 312
322 /* coping */ 313 /* coping */
323 blkh = B_BLK_HEAD(dest); 314 blkh = B_BLK_HEAD(dest);
324 nr_dest = blkh_nr_item(blkh); 315 nr_dest = blkh_nr_item(blkh);
325 316
326 /*dest_order = (last_first == LAST_TO_FIRST) ? 0 : nr_dest;*/ 317 /*dest_order = (last_first == LAST_TO_FIRST) ? 0 : nr_dest; */
327 /*src_order = (last_first == LAST_TO_FIRST) ? (nr_src - cpy_num + 1) : 0;*/ 318 /*src_order = (last_first == LAST_TO_FIRST) ? (nr_src - cpy_num + 1) : 0; */
328 (last_first == LAST_TO_FIRST) ? (dest_order = 0, src_order = nr_src - cpy_num + 1) : 319 (last_first == LAST_TO_FIRST) ? (dest_order = 0, src_order =
329 (dest_order = nr_dest, src_order = 0); 320 nr_src - cpy_num + 1) : (dest_order =
321 nr_dest,
322 src_order =
323 0);
330 324
331 /* prepare space for cpy_num pointers */ 325 /* prepare space for cpy_num pointers */
332 dc = B_N_CHILD (dest, dest_order); 326 dc = B_N_CHILD(dest, dest_order);
333 327
334 memmove (dc + cpy_num, dc, (nr_dest - dest_order) * DC_SIZE); 328 memmove(dc + cpy_num, dc, (nr_dest - dest_order) * DC_SIZE);
335 329
336 /* insert pointers */ 330 /* insert pointers */
337 memcpy (dc, B_N_CHILD (src, src_order), DC_SIZE * cpy_num); 331 memcpy(dc, B_N_CHILD(src, src_order), DC_SIZE * cpy_num);
338 332
339 333 /* prepare space for cpy_num - 1 item headers */
340 /* prepare space for cpy_num - 1 item headers */ 334 key = B_N_PDELIM_KEY(dest, dest_order);
341 key = B_N_PDELIM_KEY(dest, dest_order); 335 memmove(key + cpy_num - 1, key,
342 memmove (key + cpy_num - 1, key, 336 KEY_SIZE * (nr_dest - dest_order) + DC_SIZE * (nr_dest +
343 KEY_SIZE * (nr_dest - dest_order) + DC_SIZE * (nr_dest + cpy_num)); 337 cpy_num));
344 338
345 339 /* insert headers */
346 /* insert headers */ 340 memcpy(key, B_N_PDELIM_KEY(src, src_order), KEY_SIZE * (cpy_num - 1));
347 memcpy (key, B_N_PDELIM_KEY (src, src_order), KEY_SIZE * (cpy_num - 1)); 341
348 342 /* sizes, item number */
349 /* sizes, item number */ 343 set_blkh_nr_item(blkh, blkh_nr_item(blkh) + (cpy_num - 1));
350 set_blkh_nr_item( blkh, blkh_nr_item(blkh) + (cpy_num - 1 ) ); 344 set_blkh_free_space(blkh,
351 set_blkh_free_space( blkh, 345 blkh_free_space(blkh) - (KEY_SIZE * (cpy_num - 1) +
352 blkh_free_space(blkh) - (KEY_SIZE * (cpy_num - 1) + DC_SIZE * cpy_num ) ); 346 DC_SIZE * cpy_num));
353 347
354 do_balance_mark_internal_dirty (dest_bi->tb, dest, 0); 348 do_balance_mark_internal_dirty(dest_bi->tb, dest, 0);
355 349
356 /*&&&&&&&&&&&&&&&&&&&&&&&&*/ 350 /*&&&&&&&&&&&&&&&&&&&&&&&& */
357 check_internal (dest); 351 check_internal(dest);
358 /*&&&&&&&&&&&&&&&&&&&&&&&&*/ 352 /*&&&&&&&&&&&&&&&&&&&&&&&& */
359 353
360 if (dest_bi->bi_parent) { 354 if (dest_bi->bi_parent) {
361 struct disk_child *t_dc; 355 struct disk_child *t_dc;
362 t_dc = B_N_CHILD(dest_bi->bi_parent,dest_bi->bi_position); 356 t_dc = B_N_CHILD(dest_bi->bi_parent, dest_bi->bi_position);
363 put_dc_size( t_dc, dc_size(t_dc) + (KEY_SIZE * (cpy_num - 1) + DC_SIZE * cpy_num) ); 357 put_dc_size(t_dc,
364 358 dc_size(t_dc) + (KEY_SIZE * (cpy_num - 1) +
365 do_balance_mark_internal_dirty (dest_bi->tb, dest_bi->bi_parent,0); 359 DC_SIZE * cpy_num));
366 /*&&&&&&&&&&&&&&&&&&&&&&&&*/ 360
367 check_internal (dest_bi->bi_parent); 361 do_balance_mark_internal_dirty(dest_bi->tb, dest_bi->bi_parent,
368 /*&&&&&&&&&&&&&&&&&&&&&&&&*/ 362 0);
369 } 363 /*&&&&&&&&&&&&&&&&&&&&&&&& */
364 check_internal(dest_bi->bi_parent);
365 /*&&&&&&&&&&&&&&&&&&&&&&&& */
366 }
370 367
371} 368}
372 369
373
374/* Copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest. 370/* Copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest.
375 * Delete cpy_num - del_par items and node pointers from buffer src. 371 * Delete cpy_num - del_par items and node pointers from buffer src.
376 * last_first == FIRST_TO_LAST means, that we copy/delete first items from src. 372 * last_first == FIRST_TO_LAST means, that we copy/delete first items from src.
377 * last_first == LAST_TO_FIRST means, that we copy/delete last items from src. 373 * last_first == LAST_TO_FIRST means, that we copy/delete last items from src.
378 */ 374 */
379static void internal_move_pointers_items (struct buffer_info * dest_bi, 375static void internal_move_pointers_items(struct buffer_info *dest_bi,
380 struct buffer_info * src_bi, 376 struct buffer_info *src_bi,
381 int last_first, int cpy_num, int del_par) 377 int last_first, int cpy_num,
378 int del_par)
382{ 379{
383 int first_pointer; 380 int first_pointer;
384 int first_item; 381 int first_item;
385 382
386 internal_copy_pointers_items (dest_bi, src_bi->bi_bh, last_first, cpy_num); 383 internal_copy_pointers_items(dest_bi, src_bi->bi_bh, last_first,
387 384 cpy_num);
388 if (last_first == FIRST_TO_LAST) { /* shift_left occurs */ 385
389 first_pointer = 0; 386 if (last_first == FIRST_TO_LAST) { /* shift_left occurs */
390 first_item = 0; 387 first_pointer = 0;
391 /* delete cpy_num - del_par pointers and keys starting for pointers with first_pointer, 388 first_item = 0;
392 for key - with first_item */ 389 /* delete cpy_num - del_par pointers and keys starting for pointers with first_pointer,
393 internal_delete_pointers_items (src_bi, first_pointer, first_item, cpy_num - del_par); 390 for key - with first_item */
394 } else { /* shift_right occurs */ 391 internal_delete_pointers_items(src_bi, first_pointer,
395 int i, j; 392 first_item, cpy_num - del_par);
396 393 } else { /* shift_right occurs */
397 i = ( cpy_num - del_par == ( j = B_NR_ITEMS(src_bi->bi_bh)) + 1 ) ? 0 : j - cpy_num + del_par; 394 int i, j;
398 395
399 internal_delete_pointers_items (src_bi, j + 1 - cpy_num + del_par, i, cpy_num - del_par); 396 i = (cpy_num - del_par ==
400 } 397 (j =
398 B_NR_ITEMS(src_bi->bi_bh)) + 1) ? 0 : j - cpy_num +
399 del_par;
400
401 internal_delete_pointers_items(src_bi,
402 j + 1 - cpy_num + del_par, i,
403 cpy_num - del_par);
404 }
401} 405}
402 406
403/* Insert n_src'th key of buffer src before n_dest'th key of buffer dest. */ 407/* Insert n_src'th key of buffer src before n_dest'th key of buffer dest. */
404static void internal_insert_key (struct buffer_info * dest_bi, 408static void internal_insert_key(struct buffer_info *dest_bi, int dest_position_before, /* insert key before key with n_dest number */
405 int dest_position_before, /* insert key before key with n_dest number */ 409 struct buffer_head *src, int src_position)
406 struct buffer_head * src,
407 int src_position)
408{ 410{
409 struct buffer_head * dest = dest_bi->bi_bh; 411 struct buffer_head *dest = dest_bi->bi_bh;
410 int nr; 412 int nr;
411 struct block_head * blkh; 413 struct block_head *blkh;
412 struct reiserfs_key * key; 414 struct reiserfs_key *key;
413 415
414 RFALSE( dest == NULL || src == NULL, 416 RFALSE(dest == NULL || src == NULL,
415 "source(%p) or dest(%p) buffer is 0", src, dest); 417 "source(%p) or dest(%p) buffer is 0", src, dest);
416 RFALSE( dest_position_before < 0 || src_position < 0, 418 RFALSE(dest_position_before < 0 || src_position < 0,
417 "source(%d) or dest(%d) key number less than 0", 419 "source(%d) or dest(%d) key number less than 0",
418 src_position, dest_position_before); 420 src_position, dest_position_before);
419 RFALSE( dest_position_before > B_NR_ITEMS (dest) || 421 RFALSE(dest_position_before > B_NR_ITEMS(dest) ||
420 src_position >= B_NR_ITEMS(src), 422 src_position >= B_NR_ITEMS(src),
421 "invalid position in dest (%d (key number %d)) or in src (%d (key number %d))", 423 "invalid position in dest (%d (key number %d)) or in src (%d (key number %d))",
422 dest_position_before, B_NR_ITEMS (dest), 424 dest_position_before, B_NR_ITEMS(dest),
423 src_position, B_NR_ITEMS(src)); 425 src_position, B_NR_ITEMS(src));
424 RFALSE( B_FREE_SPACE (dest) < KEY_SIZE, 426 RFALSE(B_FREE_SPACE(dest) < KEY_SIZE,
425 "no enough free space (%d) in dest buffer", B_FREE_SPACE (dest)); 427 "no enough free space (%d) in dest buffer", B_FREE_SPACE(dest));
426 428
427 blkh = B_BLK_HEAD(dest); 429 blkh = B_BLK_HEAD(dest);
428 nr = blkh_nr_item(blkh); 430 nr = blkh_nr_item(blkh);
429 431
430 /* prepare space for inserting key */ 432 /* prepare space for inserting key */
431 key = B_N_PDELIM_KEY (dest, dest_position_before); 433 key = B_N_PDELIM_KEY(dest, dest_position_before);
432 memmove (key + 1, key, (nr - dest_position_before) * KEY_SIZE + (nr + 1) * DC_SIZE); 434 memmove(key + 1, key,
433 435 (nr - dest_position_before) * KEY_SIZE + (nr + 1) * DC_SIZE);
434 /* insert key */ 436
435 memcpy (key, B_N_PDELIM_KEY(src, src_position), KEY_SIZE); 437 /* insert key */
436 438 memcpy(key, B_N_PDELIM_KEY(src, src_position), KEY_SIZE);
437 /* Change dirt, free space, item number fields. */ 439
438 440 /* Change dirt, free space, item number fields. */
439 set_blkh_nr_item( blkh, blkh_nr_item(blkh) + 1 ); 441
440 set_blkh_free_space( blkh, blkh_free_space(blkh) - KEY_SIZE ); 442 set_blkh_nr_item(blkh, blkh_nr_item(blkh) + 1);
441 443 set_blkh_free_space(blkh, blkh_free_space(blkh) - KEY_SIZE);
442 do_balance_mark_internal_dirty (dest_bi->tb, dest, 0); 444
443 445 do_balance_mark_internal_dirty(dest_bi->tb, dest, 0);
444 if (dest_bi->bi_parent) { 446
445 struct disk_child *t_dc; 447 if (dest_bi->bi_parent) {
446 t_dc = B_N_CHILD(dest_bi->bi_parent,dest_bi->bi_position); 448 struct disk_child *t_dc;
447 put_dc_size( t_dc, dc_size(t_dc) + KEY_SIZE ); 449 t_dc = B_N_CHILD(dest_bi->bi_parent, dest_bi->bi_position);
448 450 put_dc_size(t_dc, dc_size(t_dc) + KEY_SIZE);
449 do_balance_mark_internal_dirty (dest_bi->tb, dest_bi->bi_parent,0); 451
450 } 452 do_balance_mark_internal_dirty(dest_bi->tb, dest_bi->bi_parent,
453 0);
454 }
451} 455}
452 456
453
454
455/* Insert d_key'th (delimiting) key from buffer cfl to tail of dest. 457/* Insert d_key'th (delimiting) key from buffer cfl to tail of dest.
456 * Copy pointer_amount node pointers and pointer_amount - 1 items from buffer src to buffer dest. 458 * Copy pointer_amount node pointers and pointer_amount - 1 items from buffer src to buffer dest.
457 * Replace d_key'th key in buffer cfl. 459 * Replace d_key'th key in buffer cfl.
458 * Delete pointer_amount items and node pointers from buffer src. 460 * Delete pointer_amount items and node pointers from buffer src.
459 */ 461 */
460/* this can be invoked both to shift from S to L and from R to S */ 462/* this can be invoked both to shift from S to L and from R to S */
461static void internal_shift_left ( 463static void internal_shift_left(int mode, /* INTERNAL_FROM_S_TO_L | INTERNAL_FROM_R_TO_S */
462 int mode, /* INTERNAL_FROM_S_TO_L | INTERNAL_FROM_R_TO_S */ 464 struct tree_balance *tb,
463 struct tree_balance * tb, 465 int h, int pointer_amount)
464 int h,
465 int pointer_amount
466 )
467{ 466{
468 struct buffer_info dest_bi, src_bi; 467 struct buffer_info dest_bi, src_bi;
469 struct buffer_head * cf; 468 struct buffer_head *cf;
470 int d_key_position; 469 int d_key_position;
471 470
472 internal_define_dest_src_infos (mode, tb, h, &dest_bi, &src_bi, &d_key_position, &cf); 471 internal_define_dest_src_infos(mode, tb, h, &dest_bi, &src_bi,
473 472 &d_key_position, &cf);
474 /*printk("pointer_amount = %d\n",pointer_amount);*/ 473
475 474 /*printk("pointer_amount = %d\n",pointer_amount); */
476 if (pointer_amount) { 475
477 /* insert delimiting key from common father of dest and src to node dest into position B_NR_ITEM(dest) */ 476 if (pointer_amount) {
478 internal_insert_key (&dest_bi, B_NR_ITEMS(dest_bi.bi_bh), cf, d_key_position); 477 /* insert delimiting key from common father of dest and src to node dest into position B_NR_ITEM(dest) */
479 478 internal_insert_key(&dest_bi, B_NR_ITEMS(dest_bi.bi_bh), cf,
480 if (B_NR_ITEMS(src_bi.bi_bh) == pointer_amount - 1) { 479 d_key_position);
481 if (src_bi.bi_position/*src->b_item_order*/ == 0) 480
482 replace_key (tb, cf, d_key_position, src_bi.bi_parent/*src->b_parent*/, 0); 481 if (B_NR_ITEMS(src_bi.bi_bh) == pointer_amount - 1) {
483 } else 482 if (src_bi.bi_position /*src->b_item_order */ == 0)
484 replace_key (tb, cf, d_key_position, src_bi.bi_bh, pointer_amount - 1); 483 replace_key(tb, cf, d_key_position,
485 } 484 src_bi.
486 /* last parameter is del_parameter */ 485 bi_parent /*src->b_parent */ , 0);
487 internal_move_pointers_items (&dest_bi, &src_bi, FIRST_TO_LAST, pointer_amount, 0); 486 } else
487 replace_key(tb, cf, d_key_position, src_bi.bi_bh,
488 pointer_amount - 1);
489 }
490 /* last parameter is del_parameter */
491 internal_move_pointers_items(&dest_bi, &src_bi, FIRST_TO_LAST,
492 pointer_amount, 0);
488 493
489} 494}
490 495
@@ -493,67 +498,66 @@ static void internal_shift_left (
493 * Delete n - 1 items and node pointers from buffer S[h]. 498 * Delete n - 1 items and node pointers from buffer S[h].
494 */ 499 */
495/* it always shifts from S[h] to L[h] */ 500/* it always shifts from S[h] to L[h] */
496static void internal_shift1_left ( 501static void internal_shift1_left(struct tree_balance *tb,
497 struct tree_balance * tb, 502 int h, int pointer_amount)
498 int h,
499 int pointer_amount
500 )
501{ 503{
502 struct buffer_info dest_bi, src_bi; 504 struct buffer_info dest_bi, src_bi;
503 struct buffer_head * cf; 505 struct buffer_head *cf;
504 int d_key_position; 506 int d_key_position;
505 507
506 internal_define_dest_src_infos (INTERNAL_SHIFT_FROM_S_TO_L, tb, h, &dest_bi, &src_bi, &d_key_position, &cf); 508 internal_define_dest_src_infos(INTERNAL_SHIFT_FROM_S_TO_L, tb, h,
509 &dest_bi, &src_bi, &d_key_position, &cf);
507 510
508 if ( pointer_amount > 0 ) /* insert lkey[h]-th key from CFL[h] to left neighbor L[h] */ 511 if (pointer_amount > 0) /* insert lkey[h]-th key from CFL[h] to left neighbor L[h] */
509 internal_insert_key (&dest_bi, B_NR_ITEMS(dest_bi.bi_bh), cf, d_key_position); 512 internal_insert_key(&dest_bi, B_NR_ITEMS(dest_bi.bi_bh), cf,
510 /* internal_insert_key (tb->L[h], B_NR_ITEM(tb->L[h]), tb->CFL[h], tb->lkey[h]);*/ 513 d_key_position);
514 /* internal_insert_key (tb->L[h], B_NR_ITEM(tb->L[h]), tb->CFL[h], tb->lkey[h]); */
511 515
512 /* last parameter is del_parameter */ 516 /* last parameter is del_parameter */
513 internal_move_pointers_items (&dest_bi, &src_bi, FIRST_TO_LAST, pointer_amount, 1); 517 internal_move_pointers_items(&dest_bi, &src_bi, FIRST_TO_LAST,
514 /* internal_move_pointers_items (tb->L[h], tb->S[h], FIRST_TO_LAST, pointer_amount, 1);*/ 518 pointer_amount, 1);
519 /* internal_move_pointers_items (tb->L[h], tb->S[h], FIRST_TO_LAST, pointer_amount, 1); */
515} 520}
516 521
517
518/* Insert d_key'th (delimiting) key from buffer cfr to head of dest. 522/* Insert d_key'th (delimiting) key from buffer cfr to head of dest.
519 * Copy n node pointers and n - 1 items from buffer src to buffer dest. 523 * Copy n node pointers and n - 1 items from buffer src to buffer dest.
520 * Replace d_key'th key in buffer cfr. 524 * Replace d_key'th key in buffer cfr.
521 * Delete n items and node pointers from buffer src. 525 * Delete n items and node pointers from buffer src.
522 */ 526 */
523static void internal_shift_right ( 527static void internal_shift_right(int mode, /* INTERNAL_FROM_S_TO_R | INTERNAL_FROM_L_TO_S */
524 int mode, /* INTERNAL_FROM_S_TO_R | INTERNAL_FROM_L_TO_S */ 528 struct tree_balance *tb,
525 struct tree_balance * tb, 529 int h, int pointer_amount)
526 int h,
527 int pointer_amount
528 )
529{ 530{
530 struct buffer_info dest_bi, src_bi; 531 struct buffer_info dest_bi, src_bi;
531 struct buffer_head * cf; 532 struct buffer_head *cf;
532 int d_key_position; 533 int d_key_position;
533 int nr; 534 int nr;
534 535
535 536 internal_define_dest_src_infos(mode, tb, h, &dest_bi, &src_bi,
536 internal_define_dest_src_infos (mode, tb, h, &dest_bi, &src_bi, &d_key_position, &cf); 537 &d_key_position, &cf);
537 538
538 nr = B_NR_ITEMS (src_bi.bi_bh); 539 nr = B_NR_ITEMS(src_bi.bi_bh);
539 540
540 if (pointer_amount > 0) { 541 if (pointer_amount > 0) {
541 /* insert delimiting key from common father of dest and src to dest node into position 0 */ 542 /* insert delimiting key from common father of dest and src to dest node into position 0 */
542 internal_insert_key (&dest_bi, 0, cf, d_key_position); 543 internal_insert_key(&dest_bi, 0, cf, d_key_position);
543 if (nr == pointer_amount - 1) { 544 if (nr == pointer_amount - 1) {
544 RFALSE( src_bi.bi_bh != PATH_H_PBUFFER (tb->tb_path, h)/*tb->S[h]*/ || 545 RFALSE(src_bi.bi_bh != PATH_H_PBUFFER(tb->tb_path, h) /*tb->S[h] */ ||
545 dest_bi.bi_bh != tb->R[h], 546 dest_bi.bi_bh != tb->R[h],
546 "src (%p) must be == tb->S[h](%p) when it disappears", 547 "src (%p) must be == tb->S[h](%p) when it disappears",
547 src_bi.bi_bh, PATH_H_PBUFFER (tb->tb_path, h)); 548 src_bi.bi_bh, PATH_H_PBUFFER(tb->tb_path, h));
548 /* when S[h] disappers replace left delemiting key as well */ 549 /* when S[h] disappers replace left delemiting key as well */
549 if (tb->CFL[h]) 550 if (tb->CFL[h])
550 replace_key (tb, cf, d_key_position, tb->CFL[h], tb->lkey[h]); 551 replace_key(tb, cf, d_key_position, tb->CFL[h],
551 } else 552 tb->lkey[h]);
552 replace_key (tb, cf, d_key_position, src_bi.bi_bh, nr - pointer_amount); 553 } else
553 } 554 replace_key(tb, cf, d_key_position, src_bi.bi_bh,
554 555 nr - pointer_amount);
555 /* last parameter is del_parameter */ 556 }
556 internal_move_pointers_items (&dest_bi, &src_bi, LAST_TO_FIRST, pointer_amount, 0); 557
558 /* last parameter is del_parameter */
559 internal_move_pointers_items(&dest_bi, &src_bi, LAST_TO_FIRST,
560 pointer_amount, 0);
557} 561}
558 562
559/* Insert delimiting key to R[h]. 563/* Insert delimiting key to R[h].
@@ -561,498 +565,526 @@ static void internal_shift_right (
561 * Delete n - 1 items and node pointers from buffer S[h]. 565 * Delete n - 1 items and node pointers from buffer S[h].
562 */ 566 */
563/* it always shift from S[h] to R[h] */ 567/* it always shift from S[h] to R[h] */
564static void internal_shift1_right ( 568static void internal_shift1_right(struct tree_balance *tb,
565 struct tree_balance * tb, 569 int h, int pointer_amount)
566 int h,
567 int pointer_amount
568 )
569{ 570{
570 struct buffer_info dest_bi, src_bi; 571 struct buffer_info dest_bi, src_bi;
571 struct buffer_head * cf; 572 struct buffer_head *cf;
572 int d_key_position; 573 int d_key_position;
573 574
574 internal_define_dest_src_infos (INTERNAL_SHIFT_FROM_S_TO_R, tb, h, &dest_bi, &src_bi, &d_key_position, &cf); 575 internal_define_dest_src_infos(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
575 576 &dest_bi, &src_bi, &d_key_position, &cf);
576 if (pointer_amount > 0) /* insert rkey from CFR[h] to right neighbor R[h] */ 577
577 internal_insert_key (&dest_bi, 0, cf, d_key_position); 578 if (pointer_amount > 0) /* insert rkey from CFR[h] to right neighbor R[h] */
578 /* internal_insert_key (tb->R[h], 0, tb->CFR[h], tb->rkey[h]);*/ 579 internal_insert_key(&dest_bi, 0, cf, d_key_position);
579 580 /* internal_insert_key (tb->R[h], 0, tb->CFR[h], tb->rkey[h]); */
580 /* last parameter is del_parameter */
581 internal_move_pointers_items (&dest_bi, &src_bi, LAST_TO_FIRST, pointer_amount, 1);
582 /* internal_move_pointers_items (tb->R[h], tb->S[h], LAST_TO_FIRST, pointer_amount, 1);*/
583}
584 581
582 /* last parameter is del_parameter */
583 internal_move_pointers_items(&dest_bi, &src_bi, LAST_TO_FIRST,
584 pointer_amount, 1);
585 /* internal_move_pointers_items (tb->R[h], tb->S[h], LAST_TO_FIRST, pointer_amount, 1); */
586}
585 587
586/* Delete insert_num node pointers together with their left items 588/* Delete insert_num node pointers together with their left items
587 * and balance current node.*/ 589 * and balance current node.*/
588static void balance_internal_when_delete (struct tree_balance * tb, 590static void balance_internal_when_delete(struct tree_balance *tb,
589 int h, int child_pos) 591 int h, int child_pos)
590{ 592{
591 int insert_num; 593 int insert_num;
592 int n; 594 int n;
593 struct buffer_head * tbSh = PATH_H_PBUFFER (tb->tb_path, h); 595 struct buffer_head *tbSh = PATH_H_PBUFFER(tb->tb_path, h);
594 struct buffer_info bi; 596 struct buffer_info bi;
595 597
596 insert_num = tb->insert_size[h] / ((int)(DC_SIZE + KEY_SIZE)); 598 insert_num = tb->insert_size[h] / ((int)(DC_SIZE + KEY_SIZE));
597 599
598 /* delete child-node-pointer(s) together with their left item(s) */ 600 /* delete child-node-pointer(s) together with their left item(s) */
599 bi.tb = tb; 601 bi.tb = tb;
600 bi.bi_bh = tbSh; 602 bi.bi_bh = tbSh;
601 bi.bi_parent = PATH_H_PPARENT (tb->tb_path, h); 603 bi.bi_parent = PATH_H_PPARENT(tb->tb_path, h);
602 bi.bi_position = PATH_H_POSITION (tb->tb_path, h + 1); 604 bi.bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
603 605
604 internal_delete_childs (&bi, child_pos, -insert_num); 606 internal_delete_childs(&bi, child_pos, -insert_num);
605 607
606 RFALSE( tb->blknum[h] > 1, 608 RFALSE(tb->blknum[h] > 1,
607 "tb->blknum[%d]=%d when insert_size < 0", h, tb->blknum[h]); 609 "tb->blknum[%d]=%d when insert_size < 0", h, tb->blknum[h]);
608 610
609 n = B_NR_ITEMS(tbSh); 611 n = B_NR_ITEMS(tbSh);
610 612
611 if ( tb->lnum[h] == 0 && tb->rnum[h] == 0 ) { 613 if (tb->lnum[h] == 0 && tb->rnum[h] == 0) {
612 if ( tb->blknum[h] == 0 ) { 614 if (tb->blknum[h] == 0) {
613 /* node S[h] (root of the tree) is empty now */ 615 /* node S[h] (root of the tree) is empty now */
614 struct buffer_head *new_root; 616 struct buffer_head *new_root;
615 617
616 RFALSE( n || B_FREE_SPACE (tbSh) != MAX_CHILD_SIZE(tbSh) - DC_SIZE, 618 RFALSE(n
617 "buffer must have only 0 keys (%d)", n); 619 || B_FREE_SPACE(tbSh) !=
618 RFALSE( bi.bi_parent, "root has parent (%p)", bi.bi_parent); 620 MAX_CHILD_SIZE(tbSh) - DC_SIZE,
619 621 "buffer must have only 0 keys (%d)", n);
620 /* choose a new root */ 622 RFALSE(bi.bi_parent, "root has parent (%p)",
621 if ( ! tb->L[h-1] || ! B_NR_ITEMS(tb->L[h-1]) ) 623 bi.bi_parent);
622 new_root = tb->R[h-1]; 624
623 else 625 /* choose a new root */
624 new_root = tb->L[h-1]; 626 if (!tb->L[h - 1] || !B_NR_ITEMS(tb->L[h - 1]))
625 /* switch super block's tree root block number to the new value */ 627 new_root = tb->R[h - 1];
626 PUT_SB_ROOT_BLOCK( tb->tb_sb, new_root->b_blocknr ); 628 else
627 //REISERFS_SB(tb->tb_sb)->s_rs->s_tree_height --; 629 new_root = tb->L[h - 1];
628 PUT_SB_TREE_HEIGHT( tb->tb_sb, SB_TREE_HEIGHT(tb->tb_sb) - 1 ); 630 /* switch super block's tree root block number to the new value */
629 631 PUT_SB_ROOT_BLOCK(tb->tb_sb, new_root->b_blocknr);
630 do_balance_mark_sb_dirty (tb, REISERFS_SB(tb->tb_sb)->s_sbh, 1); 632 //REISERFS_SB(tb->tb_sb)->s_rs->s_tree_height --;
631 /*&&&&&&&&&&&&&&&&&&&&&&*/ 633 PUT_SB_TREE_HEIGHT(tb->tb_sb,
632 if (h > 1) 634 SB_TREE_HEIGHT(tb->tb_sb) - 1);
633 /* use check_internal if new root is an internal node */ 635
634 check_internal (new_root); 636 do_balance_mark_sb_dirty(tb,
635 /*&&&&&&&&&&&&&&&&&&&&&&*/ 637 REISERFS_SB(tb->tb_sb)->s_sbh,
636 638 1);
637 /* do what is needed for buffer thrown from tree */ 639 /*&&&&&&&&&&&&&&&&&&&&&& */
638 reiserfs_invalidate_buffer(tb, tbSh); 640 if (h > 1)
639 return; 641 /* use check_internal if new root is an internal node */
642 check_internal(new_root);
643 /*&&&&&&&&&&&&&&&&&&&&&& */
644
645 /* do what is needed for buffer thrown from tree */
646 reiserfs_invalidate_buffer(tb, tbSh);
647 return;
648 }
649 return;
650 }
651
652 if (tb->L[h] && tb->lnum[h] == -B_NR_ITEMS(tb->L[h]) - 1) { /* join S[h] with L[h] */
653
654 RFALSE(tb->rnum[h] != 0,
655 "invalid tb->rnum[%d]==%d when joining S[h] with L[h]",
656 h, tb->rnum[h]);
657
658 internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h, n + 1);
659 reiserfs_invalidate_buffer(tb, tbSh);
660
661 return;
662 }
663
664 if (tb->R[h] && tb->rnum[h] == -B_NR_ITEMS(tb->R[h]) - 1) { /* join S[h] with R[h] */
665 RFALSE(tb->lnum[h] != 0,
666 "invalid tb->lnum[%d]==%d when joining S[h] with R[h]",
667 h, tb->lnum[h]);
668
669 internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h, n + 1);
670
671 reiserfs_invalidate_buffer(tb, tbSh);
672 return;
640 } 673 }
641 return;
642 }
643
644 if ( tb->L[h] && tb->lnum[h] == -B_NR_ITEMS(tb->L[h]) - 1 ) { /* join S[h] with L[h] */
645
646 RFALSE( tb->rnum[h] != 0,
647 "invalid tb->rnum[%d]==%d when joining S[h] with L[h]",
648 h, tb->rnum[h]);
649
650 internal_shift_left (INTERNAL_SHIFT_FROM_S_TO_L, tb, h, n + 1);
651 reiserfs_invalidate_buffer(tb, tbSh);
652
653 return;
654 }
655
656 if ( tb->R[h] && tb->rnum[h] == -B_NR_ITEMS(tb->R[h]) - 1 ) { /* join S[h] with R[h] */
657 RFALSE( tb->lnum[h] != 0,
658 "invalid tb->lnum[%d]==%d when joining S[h] with R[h]",
659 h, tb->lnum[h]);
660
661 internal_shift_right (INTERNAL_SHIFT_FROM_S_TO_R, tb, h, n + 1);
662
663 reiserfs_invalidate_buffer(tb,tbSh);
664 return;
665 }
666
667 if ( tb->lnum[h] < 0 ) { /* borrow from left neighbor L[h] */
668 RFALSE( tb->rnum[h] != 0,
669 "wrong tb->rnum[%d]==%d when borrow from L[h]", h, tb->rnum[h]);
670 /*internal_shift_right (tb, h, tb->L[h], tb->CFL[h], tb->lkey[h], tb->S[h], -tb->lnum[h]);*/
671 internal_shift_right (INTERNAL_SHIFT_FROM_L_TO_S, tb, h, -tb->lnum[h]);
672 return;
673 }
674
675 if ( tb->rnum[h] < 0 ) { /* borrow from right neighbor R[h] */
676 RFALSE( tb->lnum[h] != 0,
677 "invalid tb->lnum[%d]==%d when borrow from R[h]",
678 h, tb->lnum[h]);
679 internal_shift_left (INTERNAL_SHIFT_FROM_R_TO_S, tb, h, -tb->rnum[h]);/*tb->S[h], tb->CFR[h], tb->rkey[h], tb->R[h], -tb->rnum[h]);*/
680 return;
681 }
682
683 if ( tb->lnum[h] > 0 ) { /* split S[h] into two parts and put them into neighbors */
684 RFALSE( tb->rnum[h] == 0 || tb->lnum[h] + tb->rnum[h] != n + 1,
685 "invalid tb->lnum[%d]==%d or tb->rnum[%d]==%d when S[h](item number == %d) is split between them",
686 h, tb->lnum[h], h, tb->rnum[h], n);
687
688 internal_shift_left (INTERNAL_SHIFT_FROM_S_TO_L, tb, h, tb->lnum[h]);/*tb->L[h], tb->CFL[h], tb->lkey[h], tb->S[h], tb->lnum[h]);*/
689 internal_shift_right (INTERNAL_SHIFT_FROM_S_TO_R, tb, h, tb->rnum[h]);
690
691 reiserfs_invalidate_buffer (tb, tbSh);
692
693 return;
694 }
695 reiserfs_panic (tb->tb_sb, "balance_internal_when_delete: unexpected tb->lnum[%d]==%d or tb->rnum[%d]==%d",
696 h, tb->lnum[h], h, tb->rnum[h]);
697}
698 674
675 if (tb->lnum[h] < 0) { /* borrow from left neighbor L[h] */
676 RFALSE(tb->rnum[h] != 0,
677 "wrong tb->rnum[%d]==%d when borrow from L[h]", h,
678 tb->rnum[h]);
679 /*internal_shift_right (tb, h, tb->L[h], tb->CFL[h], tb->lkey[h], tb->S[h], -tb->lnum[h]); */
680 internal_shift_right(INTERNAL_SHIFT_FROM_L_TO_S, tb, h,
681 -tb->lnum[h]);
682 return;
683 }
684
685 if (tb->rnum[h] < 0) { /* borrow from right neighbor R[h] */
686 RFALSE(tb->lnum[h] != 0,
687 "invalid tb->lnum[%d]==%d when borrow from R[h]",
688 h, tb->lnum[h]);
689 internal_shift_left(INTERNAL_SHIFT_FROM_R_TO_S, tb, h, -tb->rnum[h]); /*tb->S[h], tb->CFR[h], tb->rkey[h], tb->R[h], -tb->rnum[h]); */
690 return;
691 }
692
693 if (tb->lnum[h] > 0) { /* split S[h] into two parts and put them into neighbors */
694 RFALSE(tb->rnum[h] == 0 || tb->lnum[h] + tb->rnum[h] != n + 1,
695 "invalid tb->lnum[%d]==%d or tb->rnum[%d]==%d when S[h](item number == %d) is split between them",
696 h, tb->lnum[h], h, tb->rnum[h], n);
697
698 internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h, tb->lnum[h]); /*tb->L[h], tb->CFL[h], tb->lkey[h], tb->S[h], tb->lnum[h]); */
699 internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
700 tb->rnum[h]);
701
702 reiserfs_invalidate_buffer(tb, tbSh);
703
704 return;
705 }
706 reiserfs_panic(tb->tb_sb,
707 "balance_internal_when_delete: unexpected tb->lnum[%d]==%d or tb->rnum[%d]==%d",
708 h, tb->lnum[h], h, tb->rnum[h]);
709}
699 710
700/* Replace delimiting key of buffers L[h] and S[h] by the given key.*/ 711/* Replace delimiting key of buffers L[h] and S[h] by the given key.*/
701static void replace_lkey ( 712static void replace_lkey(struct tree_balance *tb, int h, struct item_head *key)
702 struct tree_balance * tb,
703 int h,
704 struct item_head * key
705 )
706{ 713{
707 RFALSE( tb->L[h] == NULL || tb->CFL[h] == NULL, 714 RFALSE(tb->L[h] == NULL || tb->CFL[h] == NULL,
708 "L[h](%p) and CFL[h](%p) must exist in replace_lkey", 715 "L[h](%p) and CFL[h](%p) must exist in replace_lkey",
709 tb->L[h], tb->CFL[h]); 716 tb->L[h], tb->CFL[h]);
710 717
711 if (B_NR_ITEMS(PATH_H_PBUFFER(tb->tb_path, h)) == 0) 718 if (B_NR_ITEMS(PATH_H_PBUFFER(tb->tb_path, h)) == 0)
712 return; 719 return;
713 720
714 memcpy (B_N_PDELIM_KEY(tb->CFL[h],tb->lkey[h]), key, KEY_SIZE); 721 memcpy(B_N_PDELIM_KEY(tb->CFL[h], tb->lkey[h]), key, KEY_SIZE);
715 722
716 do_balance_mark_internal_dirty (tb, tb->CFL[h],0); 723 do_balance_mark_internal_dirty(tb, tb->CFL[h], 0);
717} 724}
718 725
719
720/* Replace delimiting key of buffers S[h] and R[h] by the given key.*/ 726/* Replace delimiting key of buffers S[h] and R[h] by the given key.*/
721static void replace_rkey ( 727static void replace_rkey(struct tree_balance *tb, int h, struct item_head *key)
722 struct tree_balance * tb,
723 int h,
724 struct item_head * key
725 )
726{ 728{
727 RFALSE( tb->R[h] == NULL || tb->CFR[h] == NULL, 729 RFALSE(tb->R[h] == NULL || tb->CFR[h] == NULL,
728 "R[h](%p) and CFR[h](%p) must exist in replace_rkey", 730 "R[h](%p) and CFR[h](%p) must exist in replace_rkey",
729 tb->R[h], tb->CFR[h]); 731 tb->R[h], tb->CFR[h]);
730 RFALSE( B_NR_ITEMS(tb->R[h]) == 0, 732 RFALSE(B_NR_ITEMS(tb->R[h]) == 0,
731 "R[h] can not be empty if it exists (item number=%d)", 733 "R[h] can not be empty if it exists (item number=%d)",
732 B_NR_ITEMS(tb->R[h])); 734 B_NR_ITEMS(tb->R[h]));
733 735
734 memcpy (B_N_PDELIM_KEY(tb->CFR[h],tb->rkey[h]), key, KEY_SIZE); 736 memcpy(B_N_PDELIM_KEY(tb->CFR[h], tb->rkey[h]), key, KEY_SIZE);
735 737
736 do_balance_mark_internal_dirty (tb, tb->CFR[h], 0); 738 do_balance_mark_internal_dirty(tb, tb->CFR[h], 0);
737} 739}
738 740
739 741int balance_internal(struct tree_balance *tb, /* tree_balance structure */
740int balance_internal (struct tree_balance * tb, /* tree_balance structure */ 742 int h, /* level of the tree */
741 int h, /* level of the tree */ 743 int child_pos, struct item_head *insert_key, /* key for insertion on higher level */
742 int child_pos, 744 struct buffer_head **insert_ptr /* node for insertion on higher level */
743 struct item_head * insert_key, /* key for insertion on higher level */
744 struct buffer_head ** insert_ptr /* node for insertion on higher level*/
745 ) 745 )
746 /* if inserting/pasting 746 /* if inserting/pasting
747 { 747 {
748 child_pos is the position of the node-pointer in S[h] that * 748 child_pos is the position of the node-pointer in S[h] that *
749 pointed to S[h-1] before balancing of the h-1 level; * 749 pointed to S[h-1] before balancing of the h-1 level; *
750 this means that new pointers and items must be inserted AFTER * 750 this means that new pointers and items must be inserted AFTER *
751 child_pos 751 child_pos
752 } 752 }
753 else 753 else
754 { 754 {
755 it is the position of the leftmost pointer that must be deleted (together with 755 it is the position of the leftmost pointer that must be deleted (together with
756 its corresponding key to the left of the pointer) 756 its corresponding key to the left of the pointer)
757 as a result of the previous level's balancing. 757 as a result of the previous level's balancing.
758 } 758 }
759*/ 759 */
760{ 760{
761 struct buffer_head * tbSh = PATH_H_PBUFFER (tb->tb_path, h); 761 struct buffer_head *tbSh = PATH_H_PBUFFER(tb->tb_path, h);
762 struct buffer_info bi; 762 struct buffer_info bi;
763 int order; /* we return this: it is 0 if there is no S[h], else it is tb->S[h]->b_item_order */ 763 int order; /* we return this: it is 0 if there is no S[h], else it is tb->S[h]->b_item_order */
764 int insert_num, n, k; 764 int insert_num, n, k;
765 struct buffer_head * S_new; 765 struct buffer_head *S_new;
766 struct item_head new_insert_key; 766 struct item_head new_insert_key;
767 struct buffer_head * new_insert_ptr = NULL; 767 struct buffer_head *new_insert_ptr = NULL;
768 struct item_head * new_insert_key_addr = insert_key; 768 struct item_head *new_insert_key_addr = insert_key;
769 769
770 RFALSE( h < 1, "h (%d) can not be < 1 on internal level", h); 770 RFALSE(h < 1, "h (%d) can not be < 1 on internal level", h);
771 771
772 PROC_INFO_INC( tb -> tb_sb, balance_at[ h ] ); 772 PROC_INFO_INC(tb->tb_sb, balance_at[h]);
773 773
774 order = ( tbSh ) ? PATH_H_POSITION (tb->tb_path, h + 1)/*tb->S[h]->b_item_order*/ : 0; 774 order =
775 775 (tbSh) ? PATH_H_POSITION(tb->tb_path,
776 /* Using insert_size[h] calculate the number insert_num of items 776 h + 1) /*tb->S[h]->b_item_order */ : 0;
777 that must be inserted to or deleted from S[h]. */ 777
778 insert_num = tb->insert_size[h]/((int)(KEY_SIZE + DC_SIZE)); 778 /* Using insert_size[h] calculate the number insert_num of items
779 779 that must be inserted to or deleted from S[h]. */
780 /* Check whether insert_num is proper **/ 780 insert_num = tb->insert_size[h] / ((int)(KEY_SIZE + DC_SIZE));
781 RFALSE( insert_num < -2 || insert_num > 2, 781
782 "incorrect number of items inserted to the internal node (%d)", 782 /* Check whether insert_num is proper * */
783 insert_num); 783 RFALSE(insert_num < -2 || insert_num > 2,
784 RFALSE( h > 1 && (insert_num > 1 || insert_num < -1), 784 "incorrect number of items inserted to the internal node (%d)",
785 "incorrect number of items (%d) inserted to the internal node on a level (h=%d) higher than last internal level", 785 insert_num);
786 insert_num, h); 786 RFALSE(h > 1 && (insert_num > 1 || insert_num < -1),
787 787 "incorrect number of items (%d) inserted to the internal node on a level (h=%d) higher than last internal level",
788 /* Make balance in case insert_num < 0 */ 788 insert_num, h);
789 if ( insert_num < 0 ) { 789
790 balance_internal_when_delete (tb, h, child_pos); 790 /* Make balance in case insert_num < 0 */
791 return order; 791 if (insert_num < 0) {
792 } 792 balance_internal_when_delete(tb, h, child_pos);
793 793 return order;
794 k = 0;
795 if ( tb->lnum[h] > 0 ) {
796 /* shift lnum[h] items from S[h] to the left neighbor L[h].
797 check how many of new items fall into L[h] or CFL[h] after
798 shifting */
799 n = B_NR_ITEMS (tb->L[h]); /* number of items in L[h] */
800 if ( tb->lnum[h] <= child_pos ) {
801 /* new items don't fall into L[h] or CFL[h] */
802 internal_shift_left (INTERNAL_SHIFT_FROM_S_TO_L, tb, h, tb->lnum[h]);
803 /*internal_shift_left (tb->L[h],tb->CFL[h],tb->lkey[h],tbSh,tb->lnum[h]);*/
804 child_pos -= tb->lnum[h];
805 } else if ( tb->lnum[h] > child_pos + insert_num ) {
806 /* all new items fall into L[h] */
807 internal_shift_left (INTERNAL_SHIFT_FROM_S_TO_L, tb, h, tb->lnum[h] - insert_num);
808 /* internal_shift_left(tb->L[h],tb->CFL[h],tb->lkey[h],tbSh,
809 tb->lnum[h]-insert_num);
810 */
811 /* insert insert_num keys and node-pointers into L[h] */
812 bi.tb = tb;
813 bi.bi_bh = tb->L[h];
814 bi.bi_parent = tb->FL[h];
815 bi.bi_position = get_left_neighbor_position (tb, h);
816 internal_insert_childs (&bi,/*tb->L[h], tb->S[h-1]->b_next*/ n + child_pos + 1,
817 insert_num,insert_key,insert_ptr);
818
819 insert_num = 0;
820 } else {
821 struct disk_child * dc;
822
823 /* some items fall into L[h] or CFL[h], but some don't fall */
824 internal_shift1_left(tb,h,child_pos+1);
825 /* calculate number of new items that fall into L[h] */
826 k = tb->lnum[h] - child_pos - 1;
827 bi.tb = tb;
828 bi.bi_bh = tb->L[h];
829 bi.bi_parent = tb->FL[h];
830 bi.bi_position = get_left_neighbor_position (tb, h);
831 internal_insert_childs (&bi,/*tb->L[h], tb->S[h-1]->b_next,*/ n + child_pos + 1,k,
832 insert_key,insert_ptr);
833
834 replace_lkey(tb,h,insert_key + k);
835
836 /* replace the first node-ptr in S[h] by node-ptr to insert_ptr[k] */
837 dc = B_N_CHILD(tbSh, 0);
838 put_dc_size( dc, MAX_CHILD_SIZE(insert_ptr[k]) - B_FREE_SPACE (insert_ptr[k]));
839 put_dc_block_number( dc, insert_ptr[k]->b_blocknr );
840
841 do_balance_mark_internal_dirty (tb, tbSh, 0);
842
843 k++;
844 insert_key += k;
845 insert_ptr += k;
846 insert_num -= k;
847 child_pos = 0;
848 } 794 }
849 } /* tb->lnum[h] > 0 */
850
851 if ( tb->rnum[h] > 0 ) {
852 /*shift rnum[h] items from S[h] to the right neighbor R[h]*/
853 /* check how many of new items fall into R or CFR after shifting */
854 n = B_NR_ITEMS (tbSh); /* number of items in S[h] */
855 if ( n - tb->rnum[h] >= child_pos )
856 /* new items fall into S[h] */
857 /*internal_shift_right(tb,h,tbSh,tb->CFR[h],tb->rkey[h],tb->R[h],tb->rnum[h]);*/
858 internal_shift_right (INTERNAL_SHIFT_FROM_S_TO_R, tb, h, tb->rnum[h]);
859 else
860 if ( n + insert_num - tb->rnum[h] < child_pos )
861 {
862 /* all new items fall into R[h] */
863 /*internal_shift_right(tb,h,tbSh,tb->CFR[h],tb->rkey[h],tb->R[h],
864 tb->rnum[h] - insert_num);*/
865 internal_shift_right (INTERNAL_SHIFT_FROM_S_TO_R, tb, h, tb->rnum[h] - insert_num);
866
867 /* insert insert_num keys and node-pointers into R[h] */
868 bi.tb = tb;
869 bi.bi_bh = tb->R[h];
870 bi.bi_parent = tb->FR[h];
871 bi.bi_position = get_right_neighbor_position (tb, h);
872 internal_insert_childs (&bi, /*tb->R[h],tb->S[h-1]->b_next*/ child_pos - n - insert_num + tb->rnum[h] - 1,
873 insert_num,insert_key,insert_ptr);
874 insert_num = 0;
875 }
876 else
877 {
878 struct disk_child * dc;
879
880 /* one of the items falls into CFR[h] */
881 internal_shift1_right(tb,h,n - child_pos + 1);
882 /* calculate number of new items that fall into R[h] */
883 k = tb->rnum[h] - n + child_pos - 1;
884 bi.tb = tb;
885 bi.bi_bh = tb->R[h];
886 bi.bi_parent = tb->FR[h];
887 bi.bi_position = get_right_neighbor_position (tb, h);
888 internal_insert_childs (&bi, /*tb->R[h], tb->R[h]->b_child,*/ 0, k, insert_key + 1, insert_ptr + 1);
889 795
890 replace_rkey(tb,h,insert_key + insert_num - k - 1); 796 k = 0;
797 if (tb->lnum[h] > 0) {
798 /* shift lnum[h] items from S[h] to the left neighbor L[h].
799 check how many of new items fall into L[h] or CFL[h] after
800 shifting */
801 n = B_NR_ITEMS(tb->L[h]); /* number of items in L[h] */
802 if (tb->lnum[h] <= child_pos) {
803 /* new items don't fall into L[h] or CFL[h] */
804 internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h,
805 tb->lnum[h]);
806 /*internal_shift_left (tb->L[h],tb->CFL[h],tb->lkey[h],tbSh,tb->lnum[h]); */
807 child_pos -= tb->lnum[h];
808 } else if (tb->lnum[h] > child_pos + insert_num) {
809 /* all new items fall into L[h] */
810 internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h,
811 tb->lnum[h] - insert_num);
812 /* internal_shift_left(tb->L[h],tb->CFL[h],tb->lkey[h],tbSh,
813 tb->lnum[h]-insert_num);
814 */
815 /* insert insert_num keys and node-pointers into L[h] */
816 bi.tb = tb;
817 bi.bi_bh = tb->L[h];
818 bi.bi_parent = tb->FL[h];
819 bi.bi_position = get_left_neighbor_position(tb, h);
820 internal_insert_childs(&bi,
821 /*tb->L[h], tb->S[h-1]->b_next */
822 n + child_pos + 1,
823 insert_num, insert_key,
824 insert_ptr);
825
826 insert_num = 0;
827 } else {
828 struct disk_child *dc;
829
830 /* some items fall into L[h] or CFL[h], but some don't fall */
831 internal_shift1_left(tb, h, child_pos + 1);
832 /* calculate number of new items that fall into L[h] */
833 k = tb->lnum[h] - child_pos - 1;
834 bi.tb = tb;
835 bi.bi_bh = tb->L[h];
836 bi.bi_parent = tb->FL[h];
837 bi.bi_position = get_left_neighbor_position(tb, h);
838 internal_insert_childs(&bi,
839 /*tb->L[h], tb->S[h-1]->b_next, */
840 n + child_pos + 1, k,
841 insert_key, insert_ptr);
842
843 replace_lkey(tb, h, insert_key + k);
844
845 /* replace the first node-ptr in S[h] by node-ptr to insert_ptr[k] */
846 dc = B_N_CHILD(tbSh, 0);
847 put_dc_size(dc,
848 MAX_CHILD_SIZE(insert_ptr[k]) -
849 B_FREE_SPACE(insert_ptr[k]));
850 put_dc_block_number(dc, insert_ptr[k]->b_blocknr);
851
852 do_balance_mark_internal_dirty(tb, tbSh, 0);
853
854 k++;
855 insert_key += k;
856 insert_ptr += k;
857 insert_num -= k;
858 child_pos = 0;
859 }
860 }
861 /* tb->lnum[h] > 0 */
862 if (tb->rnum[h] > 0) {
863 /*shift rnum[h] items from S[h] to the right neighbor R[h] */
864 /* check how many of new items fall into R or CFR after shifting */
865 n = B_NR_ITEMS(tbSh); /* number of items in S[h] */
866 if (n - tb->rnum[h] >= child_pos)
867 /* new items fall into S[h] */
868 /*internal_shift_right(tb,h,tbSh,tb->CFR[h],tb->rkey[h],tb->R[h],tb->rnum[h]); */
869 internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
870 tb->rnum[h]);
871 else if (n + insert_num - tb->rnum[h] < child_pos) {
872 /* all new items fall into R[h] */
873 /*internal_shift_right(tb,h,tbSh,tb->CFR[h],tb->rkey[h],tb->R[h],
874 tb->rnum[h] - insert_num); */
875 internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
876 tb->rnum[h] - insert_num);
877
878 /* insert insert_num keys and node-pointers into R[h] */
879 bi.tb = tb;
880 bi.bi_bh = tb->R[h];
881 bi.bi_parent = tb->FR[h];
882 bi.bi_position = get_right_neighbor_position(tb, h);
883 internal_insert_childs(&bi,
884 /*tb->R[h],tb->S[h-1]->b_next */
885 child_pos - n - insert_num +
886 tb->rnum[h] - 1,
887 insert_num, insert_key,
888 insert_ptr);
889 insert_num = 0;
890 } else {
891 struct disk_child *dc;
892
893 /* one of the items falls into CFR[h] */
894 internal_shift1_right(tb, h, n - child_pos + 1);
895 /* calculate number of new items that fall into R[h] */
896 k = tb->rnum[h] - n + child_pos - 1;
897 bi.tb = tb;
898 bi.bi_bh = tb->R[h];
899 bi.bi_parent = tb->FR[h];
900 bi.bi_position = get_right_neighbor_position(tb, h);
901 internal_insert_childs(&bi,
902 /*tb->R[h], tb->R[h]->b_child, */
903 0, k, insert_key + 1,
904 insert_ptr + 1);
905
906 replace_rkey(tb, h, insert_key + insert_num - k - 1);
907
908 /* replace the first node-ptr in R[h] by node-ptr insert_ptr[insert_num-k-1] */
909 dc = B_N_CHILD(tb->R[h], 0);
910 put_dc_size(dc,
911 MAX_CHILD_SIZE(insert_ptr
912 [insert_num - k - 1]) -
913 B_FREE_SPACE(insert_ptr
914 [insert_num - k - 1]));
915 put_dc_block_number(dc,
916 insert_ptr[insert_num - k -
917 1]->b_blocknr);
918
919 do_balance_mark_internal_dirty(tb, tb->R[h], 0);
920
921 insert_num -= (k + 1);
922 }
923 }
891 924
892 /* replace the first node-ptr in R[h] by node-ptr insert_ptr[insert_num-k-1]*/ 925 /** Fill new node that appears instead of S[h] **/
893 dc = B_N_CHILD(tb->R[h], 0); 926 RFALSE(tb->blknum[h] > 2, "blknum can not be > 2 for internal level");
894 put_dc_size( dc, MAX_CHILD_SIZE(insert_ptr[insert_num-k-1]) - 927 RFALSE(tb->blknum[h] < 0, "blknum can not be < 0");
895 B_FREE_SPACE (insert_ptr[insert_num-k-1]));
896 put_dc_block_number( dc, insert_ptr[insert_num-k-1]->b_blocknr );
897 928
898 do_balance_mark_internal_dirty (tb, tb->R[h],0); 929 if (!tb->blknum[h]) { /* node S[h] is empty now */
930 RFALSE(!tbSh, "S[h] is equal NULL");
899 931
900 insert_num -= (k + 1); 932 /* do what is needed for buffer thrown from tree */
901 } 933 reiserfs_invalidate_buffer(tb, tbSh);
902 } 934 return order;
935 }
903 936
904 /** Fill new node that appears instead of S[h] **/ 937 if (!tbSh) {
905 RFALSE( tb->blknum[h] > 2, "blknum can not be > 2 for internal level"); 938 /* create new root */
906 RFALSE( tb->blknum[h] < 0, "blknum can not be < 0"); 939 struct disk_child *dc;
940 struct buffer_head *tbSh_1 = PATH_H_PBUFFER(tb->tb_path, h - 1);
941 struct block_head *blkh;
907 942
908 if ( ! tb->blknum[h] ) 943 if (tb->blknum[h] != 1)
909 { /* node S[h] is empty now */ 944 reiserfs_panic(NULL,
910 RFALSE( ! tbSh, "S[h] is equal NULL"); 945 "balance_internal: One new node required for creating the new root");
946 /* S[h] = empty buffer from the list FEB. */
947 tbSh = get_FEB(tb);
948 blkh = B_BLK_HEAD(tbSh);
949 set_blkh_level(blkh, h + 1);
911 950
912 /* do what is needed for buffer thrown from tree */ 951 /* Put the unique node-pointer to S[h] that points to S[h-1]. */
913 reiserfs_invalidate_buffer(tb,tbSh); 952
914 return order; 953 dc = B_N_CHILD(tbSh, 0);
915 } 954 put_dc_block_number(dc, tbSh_1->b_blocknr);
916 955 put_dc_size(dc,
917 if ( ! tbSh ) { 956 (MAX_CHILD_SIZE(tbSh_1) - B_FREE_SPACE(tbSh_1)));
918 /* create new root */ 957
919 struct disk_child * dc; 958 tb->insert_size[h] -= DC_SIZE;
920 struct buffer_head * tbSh_1 = PATH_H_PBUFFER (tb->tb_path, h - 1); 959 set_blkh_free_space(blkh, blkh_free_space(blkh) - DC_SIZE);
921 struct block_head * blkh;
922
923
924 if ( tb->blknum[h] != 1 )
925 reiserfs_panic(NULL, "balance_internal: One new node required for creating the new root");
926 /* S[h] = empty buffer from the list FEB. */
927 tbSh = get_FEB (tb);
928 blkh = B_BLK_HEAD(tbSh);
929 set_blkh_level( blkh, h + 1 );
930
931 /* Put the unique node-pointer to S[h] that points to S[h-1]. */
932
933 dc = B_N_CHILD(tbSh, 0);
934 put_dc_block_number( dc, tbSh_1->b_blocknr );
935 put_dc_size( dc, (MAX_CHILD_SIZE (tbSh_1) - B_FREE_SPACE (tbSh_1)));
936
937 tb->insert_size[h] -= DC_SIZE;
938 set_blkh_free_space( blkh, blkh_free_space(blkh) - DC_SIZE );
939
940 do_balance_mark_internal_dirty (tb, tbSh, 0);
941
942 /*&&&&&&&&&&&&&&&&&&&&&&&&*/
943 check_internal (tbSh);
944 /*&&&&&&&&&&&&&&&&&&&&&&&&*/
945
946 /* put new root into path structure */
947 PATH_OFFSET_PBUFFER(tb->tb_path, ILLEGAL_PATH_ELEMENT_OFFSET) = tbSh;
948
949 /* Change root in structure super block. */
950 PUT_SB_ROOT_BLOCK( tb->tb_sb, tbSh->b_blocknr );
951 PUT_SB_TREE_HEIGHT( tb->tb_sb, SB_TREE_HEIGHT(tb->tb_sb) + 1 );
952 do_balance_mark_sb_dirty (tb, REISERFS_SB(tb->tb_sb)->s_sbh, 1);
953 }
954
955 if ( tb->blknum[h] == 2 ) {
956 int snum;
957 struct buffer_info dest_bi, src_bi;
958 960
961 do_balance_mark_internal_dirty(tb, tbSh, 0);
959 962
960 /* S_new = free buffer from list FEB */ 963 /*&&&&&&&&&&&&&&&&&&&&&&&& */
961 S_new = get_FEB(tb); 964 check_internal(tbSh);
962 965 /*&&&&&&&&&&&&&&&&&&&&&&&& */
963 set_blkh_level( B_BLK_HEAD(S_new), h + 1 ); 966
964 967 /* put new root into path structure */
965 dest_bi.tb = tb; 968 PATH_OFFSET_PBUFFER(tb->tb_path, ILLEGAL_PATH_ELEMENT_OFFSET) =
966 dest_bi.bi_bh = S_new; 969 tbSh;
967 dest_bi.bi_parent = NULL; 970
968 dest_bi.bi_position = 0; 971 /* Change root in structure super block. */
969 src_bi.tb = tb; 972 PUT_SB_ROOT_BLOCK(tb->tb_sb, tbSh->b_blocknr);
970 src_bi.bi_bh = tbSh; 973 PUT_SB_TREE_HEIGHT(tb->tb_sb, SB_TREE_HEIGHT(tb->tb_sb) + 1);
971 src_bi.bi_parent = PATH_H_PPARENT (tb->tb_path, h); 974 do_balance_mark_sb_dirty(tb, REISERFS_SB(tb->tb_sb)->s_sbh, 1);
972 src_bi.bi_position = PATH_H_POSITION (tb->tb_path, h + 1);
973
974 n = B_NR_ITEMS (tbSh); /* number of items in S[h] */
975 snum = (insert_num + n + 1)/2;
976 if ( n - snum >= child_pos ) {
977 /* new items don't fall into S_new */
978 /* store the delimiting key for the next level */
979 /* new_insert_key = (n - snum)'th key in S[h] */
980 memcpy (&new_insert_key,B_N_PDELIM_KEY(tbSh,n - snum),
981 KEY_SIZE);
982 /* last parameter is del_par */
983 internal_move_pointers_items (&dest_bi, &src_bi, LAST_TO_FIRST, snum, 0);
984 /* internal_move_pointers_items(S_new, tbSh, LAST_TO_FIRST, snum, 0);*/
985 } else if ( n + insert_num - snum < child_pos ) {
986 /* all new items fall into S_new */
987 /* store the delimiting key for the next level */
988 /* new_insert_key = (n + insert_item - snum)'th key in S[h] */
989 memcpy(&new_insert_key,B_N_PDELIM_KEY(tbSh,n + insert_num - snum),
990 KEY_SIZE);
991 /* last parameter is del_par */
992 internal_move_pointers_items (&dest_bi, &src_bi, LAST_TO_FIRST, snum - insert_num, 0);
993 /* internal_move_pointers_items(S_new,tbSh,1,snum - insert_num,0);*/
994
995 /* insert insert_num keys and node-pointers into S_new */
996 internal_insert_childs (&dest_bi, /*S_new,tb->S[h-1]->b_next,*/child_pos - n - insert_num + snum - 1,
997 insert_num,insert_key,insert_ptr);
998
999 insert_num = 0;
1000 } else {
1001 struct disk_child * dc;
1002
1003 /* some items fall into S_new, but some don't fall */
1004 /* last parameter is del_par */
1005 internal_move_pointers_items (&dest_bi, &src_bi, LAST_TO_FIRST, n - child_pos + 1, 1);
1006 /* internal_move_pointers_items(S_new,tbSh,1,n - child_pos + 1,1);*/
1007 /* calculate number of new items that fall into S_new */
1008 k = snum - n + child_pos - 1;
1009
1010 internal_insert_childs (&dest_bi, /*S_new,*/ 0, k, insert_key + 1, insert_ptr+1);
1011
1012 /* new_insert_key = insert_key[insert_num - k - 1] */
1013 memcpy(&new_insert_key,insert_key + insert_num - k - 1,
1014 KEY_SIZE);
1015 /* replace first node-ptr in S_new by node-ptr to insert_ptr[insert_num-k-1] */
1016
1017 dc = B_N_CHILD(S_new,0);
1018 put_dc_size( dc, (MAX_CHILD_SIZE(insert_ptr[insert_num-k-1]) -
1019 B_FREE_SPACE(insert_ptr[insert_num-k-1])) );
1020 put_dc_block_number( dc, insert_ptr[insert_num-k-1]->b_blocknr );
1021
1022 do_balance_mark_internal_dirty (tb, S_new,0);
1023
1024 insert_num -= (k + 1);
1025 } 975 }
1026 /* new_insert_ptr = node_pointer to S_new */ 976
1027 new_insert_ptr = S_new; 977 if (tb->blknum[h] == 2) {
1028 978 int snum;
1029 RFALSE (!buffer_journaled(S_new) || buffer_journal_dirty(S_new) || 979 struct buffer_info dest_bi, src_bi;
1030 buffer_dirty (S_new), 980
1031 "cm-00001: bad S_new (%b)", S_new); 981 /* S_new = free buffer from list FEB */
1032 982 S_new = get_FEB(tb);
1033 // S_new is released in unfix_nodes 983
1034 } 984 set_blkh_level(B_BLK_HEAD(S_new), h + 1);
1035 985
1036 n = B_NR_ITEMS (tbSh); /*number of items in S[h] */ 986 dest_bi.tb = tb;
1037 987 dest_bi.bi_bh = S_new;
1038 if ( 0 <= child_pos && child_pos <= n && insert_num > 0 ) { 988 dest_bi.bi_parent = NULL;
1039 bi.tb = tb; 989 dest_bi.bi_position = 0;
1040 bi.bi_bh = tbSh; 990 src_bi.tb = tb;
1041 bi.bi_parent = PATH_H_PPARENT (tb->tb_path, h); 991 src_bi.bi_bh = tbSh;
1042 bi.bi_position = PATH_H_POSITION (tb->tb_path, h + 1); 992 src_bi.bi_parent = PATH_H_PPARENT(tb->tb_path, h);
1043 internal_insert_childs ( 993 src_bi.bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
1044 &bi,/*tbSh,*/ 994
1045 /* ( tb->S[h-1]->b_parent == tb->S[h] ) ? tb->S[h-1]->b_next : tb->S[h]->b_child->b_next,*/ 995 n = B_NR_ITEMS(tbSh); /* number of items in S[h] */
1046 child_pos,insert_num,insert_key,insert_ptr 996 snum = (insert_num + n + 1) / 2;
1047 ); 997 if (n - snum >= child_pos) {
998 /* new items don't fall into S_new */
999 /* store the delimiting key for the next level */
1000 /* new_insert_key = (n - snum)'th key in S[h] */
1001 memcpy(&new_insert_key, B_N_PDELIM_KEY(tbSh, n - snum),
1002 KEY_SIZE);
1003 /* last parameter is del_par */
1004 internal_move_pointers_items(&dest_bi, &src_bi,
1005 LAST_TO_FIRST, snum, 0);
1006 /* internal_move_pointers_items(S_new, tbSh, LAST_TO_FIRST, snum, 0); */
1007 } else if (n + insert_num - snum < child_pos) {
1008 /* all new items fall into S_new */
1009 /* store the delimiting key for the next level */
1010 /* new_insert_key = (n + insert_item - snum)'th key in S[h] */
1011 memcpy(&new_insert_key,
1012 B_N_PDELIM_KEY(tbSh, n + insert_num - snum),
1013 KEY_SIZE);
1014 /* last parameter is del_par */
1015 internal_move_pointers_items(&dest_bi, &src_bi,
1016 LAST_TO_FIRST,
1017 snum - insert_num, 0);
1018 /* internal_move_pointers_items(S_new,tbSh,1,snum - insert_num,0); */
1019
1020 /* insert insert_num keys and node-pointers into S_new */
1021 internal_insert_childs(&dest_bi,
1022 /*S_new,tb->S[h-1]->b_next, */
1023 child_pos - n - insert_num +
1024 snum - 1,
1025 insert_num, insert_key,
1026 insert_ptr);
1027
1028 insert_num = 0;
1029 } else {
1030 struct disk_child *dc;
1031
1032 /* some items fall into S_new, but some don't fall */
1033 /* last parameter is del_par */
1034 internal_move_pointers_items(&dest_bi, &src_bi,
1035 LAST_TO_FIRST,
1036 n - child_pos + 1, 1);
1037 /* internal_move_pointers_items(S_new,tbSh,1,n - child_pos + 1,1); */
1038 /* calculate number of new items that fall into S_new */
1039 k = snum - n + child_pos - 1;
1040
1041 internal_insert_childs(&dest_bi, /*S_new, */ 0, k,
1042 insert_key + 1, insert_ptr + 1);
1043
1044 /* new_insert_key = insert_key[insert_num - k - 1] */
1045 memcpy(&new_insert_key, insert_key + insert_num - k - 1,
1046 KEY_SIZE);
1047 /* replace first node-ptr in S_new by node-ptr to insert_ptr[insert_num-k-1] */
1048
1049 dc = B_N_CHILD(S_new, 0);
1050 put_dc_size(dc,
1051 (MAX_CHILD_SIZE
1052 (insert_ptr[insert_num - k - 1]) -
1053 B_FREE_SPACE(insert_ptr
1054 [insert_num - k - 1])));
1055 put_dc_block_number(dc,
1056 insert_ptr[insert_num - k -
1057 1]->b_blocknr);
1058
1059 do_balance_mark_internal_dirty(tb, S_new, 0);
1060
1061 insert_num -= (k + 1);
1062 }
1063 /* new_insert_ptr = node_pointer to S_new */
1064 new_insert_ptr = S_new;
1065
1066 RFALSE(!buffer_journaled(S_new) || buffer_journal_dirty(S_new)
1067 || buffer_dirty(S_new), "cm-00001: bad S_new (%b)",
1068 S_new);
1069
1070 // S_new is released in unfix_nodes
1048 } 1071 }
1049 1072
1073 n = B_NR_ITEMS(tbSh); /*number of items in S[h] */
1050 1074
1051 memcpy (new_insert_key_addr,&new_insert_key,KEY_SIZE); 1075 if (0 <= child_pos && child_pos <= n && insert_num > 0) {
1076 bi.tb = tb;
1077 bi.bi_bh = tbSh;
1078 bi.bi_parent = PATH_H_PPARENT(tb->tb_path, h);
1079 bi.bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
1080 internal_insert_childs(&bi, /*tbSh, */
1081 /* ( tb->S[h-1]->b_parent == tb->S[h] ) ? tb->S[h-1]->b_next : tb->S[h]->b_child->b_next, */
1082 child_pos, insert_num, insert_key,
1083 insert_ptr);
1084 }
1085
1086 memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE);
1052 insert_ptr[0] = new_insert_ptr; 1087 insert_ptr[0] = new_insert_ptr;
1053 1088
1054 return order; 1089 return order;
1055 } 1090}
1056
1057
1058