diff options
Diffstat (limited to 'fs/reiserfs/ibalance.c')
-rw-r--r-- | fs/reiserfs/ibalance.c | 1058 |
1 files changed, 1058 insertions, 0 deletions
diff --git a/fs/reiserfs/ibalance.c b/fs/reiserfs/ibalance.c new file mode 100644 index 000000000000..a362125da0d8 --- /dev/null +++ b/fs/reiserfs/ibalance.c | |||
@@ -0,0 +1,1058 @@ | |||
1 | /* | ||
2 | * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README | ||
3 | */ | ||
4 | |||
5 | #include <linux/config.h> | ||
6 | #include <asm/uaccess.h> | ||
7 | #include <linux/string.h> | ||
8 | #include <linux/time.h> | ||
9 | #include <linux/reiserfs_fs.h> | ||
10 | #include <linux/buffer_head.h> | ||
11 | |||
12 | /* this is one and only function that is used outside (do_balance.c) */ | ||
13 | int balance_internal ( | ||
14 | struct tree_balance * , | ||
15 | int, | ||
16 | int, | ||
17 | struct item_head * , | ||
18 | struct buffer_head ** | ||
19 | ); | ||
20 | |||
21 | /* modes of internal_shift_left, internal_shift_right and internal_insert_childs */ | ||
22 | #define INTERNAL_SHIFT_FROM_S_TO_L 0 | ||
23 | #define INTERNAL_SHIFT_FROM_R_TO_S 1 | ||
24 | #define INTERNAL_SHIFT_FROM_L_TO_S 2 | ||
25 | #define INTERNAL_SHIFT_FROM_S_TO_R 3 | ||
26 | #define INTERNAL_INSERT_TO_S 4 | ||
27 | #define INTERNAL_INSERT_TO_L 5 | ||
28 | #define INTERNAL_INSERT_TO_R 6 | ||
29 | |||
30 | static void internal_define_dest_src_infos ( | ||
31 | int shift_mode, | ||
32 | struct tree_balance * tb, | ||
33 | int h, | ||
34 | struct buffer_info * dest_bi, | ||
35 | struct buffer_info * src_bi, | ||
36 | int * d_key, | ||
37 | struct buffer_head ** cf | ||
38 | ) | ||
39 | { | ||
40 | memset (dest_bi, 0, sizeof (struct buffer_info)); | ||
41 | memset (src_bi, 0, sizeof (struct buffer_info)); | ||
42 | /* define dest, src, dest parent, dest position */ | ||
43 | switch (shift_mode) { | ||
44 | case INTERNAL_SHIFT_FROM_S_TO_L: /* used in internal_shift_left */ | ||
45 | src_bi->tb = tb; | ||
46 | src_bi->bi_bh = PATH_H_PBUFFER (tb->tb_path, h); | ||
47 | src_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, h); | ||
48 | src_bi->bi_position = PATH_H_POSITION (tb->tb_path, h + 1); | ||
49 | dest_bi->tb = tb; | ||
50 | dest_bi->bi_bh = tb->L[h]; | ||
51 | dest_bi->bi_parent = tb->FL[h]; | ||
52 | dest_bi->bi_position = get_left_neighbor_position (tb, h); | ||
53 | *d_key = tb->lkey[h]; | ||
54 | *cf = tb->CFL[h]; | ||
55 | break; | ||
56 | case INTERNAL_SHIFT_FROM_L_TO_S: | ||
57 | src_bi->tb = tb; | ||
58 | src_bi->bi_bh = tb->L[h]; | ||
59 | src_bi->bi_parent = tb->FL[h]; | ||
60 | src_bi->bi_position = get_left_neighbor_position (tb, h); | ||
61 | dest_bi->tb = tb; | ||
62 | dest_bi->bi_bh = PATH_H_PBUFFER (tb->tb_path, h); | ||
63 | dest_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, h); | ||
64 | dest_bi->bi_position = PATH_H_POSITION (tb->tb_path, h + 1); /* dest position is analog of dest->b_item_order */ | ||
65 | *d_key = tb->lkey[h]; | ||
66 | *cf = tb->CFL[h]; | ||
67 | break; | ||
68 | |||
69 | case INTERNAL_SHIFT_FROM_R_TO_S: /* used in internal_shift_left */ | ||
70 | src_bi->tb = tb; | ||
71 | src_bi->bi_bh = tb->R[h]; | ||
72 | src_bi->bi_parent = tb->FR[h]; | ||
73 | src_bi->bi_position = get_right_neighbor_position (tb, h); | ||
74 | dest_bi->tb = tb; | ||
75 | dest_bi->bi_bh = PATH_H_PBUFFER (tb->tb_path, h); | ||
76 | dest_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, h); | ||
77 | dest_bi->bi_position = PATH_H_POSITION (tb->tb_path, h + 1); | ||
78 | *d_key = tb->rkey[h]; | ||
79 | *cf = tb->CFR[h]; | ||
80 | break; | ||
81 | |||
82 | case INTERNAL_SHIFT_FROM_S_TO_R: | ||
83 | src_bi->tb = tb; | ||
84 | src_bi->bi_bh = PATH_H_PBUFFER (tb->tb_path, h); | ||
85 | src_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, h); | ||
86 | src_bi->bi_position = PATH_H_POSITION (tb->tb_path, h + 1); | ||
87 | dest_bi->tb = tb; | ||
88 | dest_bi->bi_bh = tb->R[h]; | ||
89 | dest_bi->bi_parent = tb->FR[h]; | ||
90 | dest_bi->bi_position = get_right_neighbor_position (tb, h); | ||
91 | *d_key = tb->rkey[h]; | ||
92 | *cf = tb->CFR[h]; | ||
93 | break; | ||
94 | |||
95 | case INTERNAL_INSERT_TO_L: | ||
96 | dest_bi->tb = tb; | ||
97 | dest_bi->bi_bh = tb->L[h]; | ||
98 | dest_bi->bi_parent = tb->FL[h]; | ||
99 | dest_bi->bi_position = get_left_neighbor_position (tb, h); | ||
100 | break; | ||
101 | |||
102 | case INTERNAL_INSERT_TO_S: | ||
103 | dest_bi->tb = tb; | ||
104 | dest_bi->bi_bh = PATH_H_PBUFFER (tb->tb_path, h); | ||
105 | dest_bi->bi_parent = PATH_H_PPARENT (tb->tb_path, h); | ||
106 | dest_bi->bi_position = PATH_H_POSITION (tb->tb_path, h + 1); | ||
107 | break; | ||
108 | |||
109 | case INTERNAL_INSERT_TO_R: | ||
110 | dest_bi->tb = tb; | ||
111 | dest_bi->bi_bh = tb->R[h]; | ||
112 | dest_bi->bi_parent = tb->FR[h]; | ||
113 | dest_bi->bi_position = get_right_neighbor_position (tb, h); | ||
114 | break; | ||
115 | |||
116 | default: | ||
117 | reiserfs_panic (tb->tb_sb, "internal_define_dest_src_infos: shift type is unknown (%d)", shift_mode); | ||
118 | } | ||
119 | } | ||
120 | |||
121 | |||
122 | |||
123 | /* Insert count node pointers into buffer cur before position to + 1. | ||
124 | * Insert count items into buffer cur before position to. | ||
125 | * Items and node pointers are specified by inserted and bh respectively. | ||
126 | */ | ||
127 | static void internal_insert_childs (struct buffer_info * cur_bi, | ||
128 | int to, int count, | ||
129 | struct item_head * inserted, | ||
130 | struct buffer_head ** bh | ||
131 | ) | ||
132 | { | ||
133 | struct buffer_head * cur = cur_bi->bi_bh; | ||
134 | struct block_head * blkh; | ||
135 | int nr; | ||
136 | struct reiserfs_key * ih; | ||
137 | struct disk_child new_dc[2]; | ||
138 | struct disk_child * dc; | ||
139 | int i; | ||
140 | |||
141 | if (count <= 0) | ||
142 | return; | ||
143 | |||
144 | blkh = B_BLK_HEAD(cur); | ||
145 | nr = blkh_nr_item(blkh); | ||
146 | |||
147 | RFALSE( count > 2, | ||
148 | "too many children (%d) are to be inserted", count); | ||
149 | RFALSE( B_FREE_SPACE (cur) < count * (KEY_SIZE + DC_SIZE), | ||
150 | "no enough free space (%d), needed %d bytes", | ||
151 | B_FREE_SPACE (cur), count * (KEY_SIZE + DC_SIZE)); | ||
152 | |||
153 | /* prepare space for count disk_child */ | ||
154 | dc = B_N_CHILD(cur,to+1); | ||
155 | |||
156 | memmove (dc + count, dc, (nr+1-(to+1)) * DC_SIZE); | ||
157 | |||
158 | /* copy to_be_insert disk children */ | ||
159 | for (i = 0; i < count; i ++) { | ||
160 | put_dc_size( &(new_dc[i]), MAX_CHILD_SIZE(bh[i]) - B_FREE_SPACE(bh[i])); | ||
161 | put_dc_block_number( &(new_dc[i]), bh[i]->b_blocknr ); | ||
162 | } | ||
163 | memcpy (dc, new_dc, DC_SIZE * count); | ||
164 | |||
165 | |||
166 | /* prepare space for count items */ | ||
167 | ih = B_N_PDELIM_KEY (cur, ((to == -1) ? 0 : to)); | ||
168 | |||
169 | memmove (ih + count, ih, (nr - to) * KEY_SIZE + (nr + 1 + count) * DC_SIZE); | ||
170 | |||
171 | /* copy item headers (keys) */ | ||
172 | memcpy (ih, inserted, KEY_SIZE); | ||
173 | if ( count > 1 ) | ||
174 | memcpy (ih + 1, inserted + 1, KEY_SIZE); | ||
175 | |||
176 | /* sizes, item number */ | ||
177 | set_blkh_nr_item( blkh, blkh_nr_item(blkh) + count ); | ||
178 | set_blkh_free_space( blkh, | ||
179 | blkh_free_space(blkh) - count * (DC_SIZE + KEY_SIZE ) ); | ||
180 | |||
181 | do_balance_mark_internal_dirty (cur_bi->tb, cur,0); | ||
182 | |||
183 | /*&&&&&&&&&&&&&&&&&&&&&&&&*/ | ||
184 | check_internal (cur); | ||
185 | /*&&&&&&&&&&&&&&&&&&&&&&&&*/ | ||
186 | |||
187 | if (cur_bi->bi_parent) { | ||
188 | struct disk_child *t_dc = B_N_CHILD (cur_bi->bi_parent,cur_bi->bi_position); | ||
189 | put_dc_size( t_dc, dc_size(t_dc) + (count * (DC_SIZE + KEY_SIZE))); | ||
190 | do_balance_mark_internal_dirty(cur_bi->tb, cur_bi->bi_parent, 0); | ||
191 | |||
192 | /*&&&&&&&&&&&&&&&&&&&&&&&&*/ | ||
193 | check_internal (cur_bi->bi_parent); | ||
194 | /*&&&&&&&&&&&&&&&&&&&&&&&&*/ | ||
195 | } | ||
196 | |||
197 | } | ||
198 | |||
199 | |||
200 | /* Delete del_num items and node pointers from buffer cur starting from * | ||
201 | * the first_i'th item and first_p'th pointers respectively. */ | ||
202 | static void internal_delete_pointers_items ( | ||
203 | struct buffer_info * cur_bi, | ||
204 | int first_p, | ||
205 | int first_i, | ||
206 | int del_num | ||
207 | ) | ||
208 | { | ||
209 | struct buffer_head * cur = cur_bi->bi_bh; | ||
210 | int nr; | ||
211 | struct block_head * blkh; | ||
212 | struct reiserfs_key * key; | ||
213 | struct disk_child * dc; | ||
214 | |||
215 | RFALSE( cur == NULL, "buffer is 0"); | ||
216 | RFALSE( del_num < 0, | ||
217 | "negative number of items (%d) can not be deleted", del_num); | ||
218 | RFALSE( first_p < 0 || first_p + del_num > B_NR_ITEMS (cur) + 1 || first_i < 0, | ||
219 | "first pointer order (%d) < 0 or " | ||
220 | "no so many pointers (%d), only (%d) or " | ||
221 | "first key order %d < 0", first_p, | ||
222 | first_p + del_num, B_NR_ITEMS (cur) + 1, first_i); | ||
223 | if ( del_num == 0 ) | ||
224 | return; | ||
225 | |||
226 | blkh = B_BLK_HEAD(cur); | ||
227 | nr = blkh_nr_item(blkh); | ||
228 | |||
229 | if ( first_p == 0 && del_num == nr + 1 ) { | ||
230 | RFALSE( first_i != 0, "1st deleted key must have order 0, not %d", first_i); | ||
231 | make_empty_node (cur_bi); | ||
232 | return; | ||
233 | } | ||
234 | |||
235 | RFALSE( first_i + del_num > B_NR_ITEMS (cur), | ||
236 | "first_i = %d del_num = %d " | ||
237 | "no so many keys (%d) in the node (%b)(%z)", | ||
238 | first_i, del_num, first_i + del_num, cur, cur); | ||
239 | |||
240 | |||
241 | /* deleting */ | ||
242 | dc = B_N_CHILD (cur, first_p); | ||
243 | |||
244 | memmove (dc, dc + del_num, (nr + 1 - first_p - del_num) * DC_SIZE); | ||
245 | key = B_N_PDELIM_KEY (cur, first_i); | ||
246 | memmove (key, key + del_num, (nr - first_i - del_num) * KEY_SIZE + (nr + 1 - del_num) * DC_SIZE); | ||
247 | |||
248 | |||
249 | /* sizes, item number */ | ||
250 | set_blkh_nr_item( blkh, blkh_nr_item(blkh) - del_num ); | ||
251 | set_blkh_free_space( blkh, | ||
252 | blkh_free_space(blkh) + (del_num * (KEY_SIZE + DC_SIZE) ) ); | ||
253 | |||
254 | do_balance_mark_internal_dirty (cur_bi->tb, cur, 0); | ||
255 | /*&&&&&&&&&&&&&&&&&&&&&&&*/ | ||
256 | check_internal (cur); | ||
257 | /*&&&&&&&&&&&&&&&&&&&&&&&*/ | ||
258 | |||
259 | if (cur_bi->bi_parent) { | ||
260 | struct disk_child *t_dc; | ||
261 | t_dc = B_N_CHILD (cur_bi->bi_parent, cur_bi->bi_position); | ||
262 | put_dc_size( t_dc, dc_size(t_dc) - (del_num * (KEY_SIZE + DC_SIZE) ) ); | ||
263 | |||
264 | do_balance_mark_internal_dirty (cur_bi->tb, cur_bi->bi_parent,0); | ||
265 | /*&&&&&&&&&&&&&&&&&&&&&&&&*/ | ||
266 | check_internal (cur_bi->bi_parent); | ||
267 | /*&&&&&&&&&&&&&&&&&&&&&&&&*/ | ||
268 | } | ||
269 | } | ||
270 | |||
271 | |||
272 | /* delete n node pointers and items starting from given position */ | ||
273 | static void internal_delete_childs (struct buffer_info * cur_bi, | ||
274 | int from, int n) | ||
275 | { | ||
276 | int i_from; | ||
277 | |||
278 | i_from = (from == 0) ? from : from - 1; | ||
279 | |||
280 | /* delete n pointers starting from `from' position in CUR; | ||
281 | delete n keys starting from 'i_from' position in CUR; | ||
282 | */ | ||
283 | internal_delete_pointers_items (cur_bi, from, i_from, n); | ||
284 | } | ||
285 | |||
286 | |||
287 | /* copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest | ||
288 | * last_first == FIRST_TO_LAST means, that we copy first items from src to tail of dest | ||
289 | * last_first == LAST_TO_FIRST means, that we copy last items from src to head of dest | ||
290 | */ | ||
291 | static void internal_copy_pointers_items ( | ||
292 | struct buffer_info * dest_bi, | ||
293 | struct buffer_head * src, | ||
294 | int last_first, int cpy_num | ||
295 | ) | ||
296 | { | ||
297 | /* ATTENTION! Number of node pointers in DEST is equal to number of items in DEST * | ||
298 | * as delimiting key have already inserted to buffer dest.*/ | ||
299 | struct buffer_head * dest = dest_bi->bi_bh; | ||
300 | int nr_dest, nr_src; | ||
301 | int dest_order, src_order; | ||
302 | struct block_head * blkh; | ||
303 | struct reiserfs_key * key; | ||
304 | struct disk_child * dc; | ||
305 | |||
306 | nr_src = B_NR_ITEMS (src); | ||
307 | |||
308 | RFALSE( dest == NULL || src == NULL, | ||
309 | "src (%p) or dest (%p) buffer is 0", src, dest); | ||
310 | RFALSE( last_first != FIRST_TO_LAST && last_first != LAST_TO_FIRST, | ||
311 | "invalid last_first parameter (%d)", last_first); | ||
312 | RFALSE( nr_src < cpy_num - 1, | ||
313 | "no so many items (%d) in src (%d)", cpy_num, nr_src); | ||
314 | RFALSE( cpy_num < 0, "cpy_num less than 0 (%d)", cpy_num); | ||
315 | RFALSE( cpy_num - 1 + B_NR_ITEMS(dest) > (int)MAX_NR_KEY(dest), | ||
316 | "cpy_num (%d) + item number in dest (%d) can not be > MAX_NR_KEY(%d)", | ||
317 | cpy_num, B_NR_ITEMS(dest), MAX_NR_KEY(dest)); | ||
318 | |||
319 | if ( cpy_num == 0 ) | ||
320 | return; | ||
321 | |||
322 | /* coping */ | ||
323 | blkh = B_BLK_HEAD(dest); | ||
324 | nr_dest = blkh_nr_item(blkh); | ||
325 | |||
326 | /*dest_order = (last_first == LAST_TO_FIRST) ? 0 : nr_dest;*/ | ||
327 | /*src_order = (last_first == LAST_TO_FIRST) ? (nr_src - cpy_num + 1) : 0;*/ | ||
328 | (last_first == LAST_TO_FIRST) ? (dest_order = 0, src_order = nr_src - cpy_num + 1) : | ||
329 | (dest_order = nr_dest, src_order = 0); | ||
330 | |||
331 | /* prepare space for cpy_num pointers */ | ||
332 | dc = B_N_CHILD (dest, dest_order); | ||
333 | |||
334 | memmove (dc + cpy_num, dc, (nr_dest - dest_order) * DC_SIZE); | ||
335 | |||
336 | /* insert pointers */ | ||
337 | memcpy (dc, B_N_CHILD (src, src_order), DC_SIZE * cpy_num); | ||
338 | |||
339 | |||
340 | /* prepare space for cpy_num - 1 item headers */ | ||
341 | key = B_N_PDELIM_KEY(dest, dest_order); | ||
342 | memmove (key + cpy_num - 1, key, | ||
343 | KEY_SIZE * (nr_dest - dest_order) + DC_SIZE * (nr_dest + cpy_num)); | ||
344 | |||
345 | |||
346 | /* insert headers */ | ||
347 | memcpy (key, B_N_PDELIM_KEY (src, src_order), KEY_SIZE * (cpy_num - 1)); | ||
348 | |||
349 | /* sizes, item number */ | ||
350 | set_blkh_nr_item( blkh, blkh_nr_item(blkh) + (cpy_num - 1 ) ); | ||
351 | set_blkh_free_space( blkh, | ||
352 | blkh_free_space(blkh) - (KEY_SIZE * (cpy_num - 1) + DC_SIZE * cpy_num ) ); | ||
353 | |||
354 | do_balance_mark_internal_dirty (dest_bi->tb, dest, 0); | ||
355 | |||
356 | /*&&&&&&&&&&&&&&&&&&&&&&&&*/ | ||
357 | check_internal (dest); | ||
358 | /*&&&&&&&&&&&&&&&&&&&&&&&&*/ | ||
359 | |||
360 | if (dest_bi->bi_parent) { | ||
361 | struct disk_child *t_dc; | ||
362 | t_dc = B_N_CHILD(dest_bi->bi_parent,dest_bi->bi_position); | ||
363 | put_dc_size( t_dc, dc_size(t_dc) + (KEY_SIZE * (cpy_num - 1) + DC_SIZE * cpy_num) ); | ||
364 | |||
365 | do_balance_mark_internal_dirty (dest_bi->tb, dest_bi->bi_parent,0); | ||
366 | /*&&&&&&&&&&&&&&&&&&&&&&&&*/ | ||
367 | check_internal (dest_bi->bi_parent); | ||
368 | /*&&&&&&&&&&&&&&&&&&&&&&&&*/ | ||
369 | } | ||
370 | |||
371 | } | ||
372 | |||
373 | |||
374 | /* Copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest. | ||
375 | * Delete cpy_num - del_par items and node pointers from buffer src. | ||
376 | * last_first == FIRST_TO_LAST means, that we copy/delete first items from src. | ||
377 | * last_first == LAST_TO_FIRST means, that we copy/delete last items from src. | ||
378 | */ | ||
379 | static void internal_move_pointers_items (struct buffer_info * dest_bi, | ||
380 | struct buffer_info * src_bi, | ||
381 | int last_first, int cpy_num, int del_par) | ||
382 | { | ||
383 | int first_pointer; | ||
384 | int first_item; | ||
385 | |||
386 | internal_copy_pointers_items (dest_bi, src_bi->bi_bh, last_first, cpy_num); | ||
387 | |||
388 | if (last_first == FIRST_TO_LAST) { /* shift_left occurs */ | ||
389 | first_pointer = 0; | ||
390 | first_item = 0; | ||
391 | /* delete cpy_num - del_par pointers and keys starting for pointers with first_pointer, | ||
392 | for key - with first_item */ | ||
393 | internal_delete_pointers_items (src_bi, first_pointer, first_item, cpy_num - del_par); | ||
394 | } else { /* shift_right occurs */ | ||
395 | int i, j; | ||
396 | |||
397 | i = ( cpy_num - del_par == ( j = B_NR_ITEMS(src_bi->bi_bh)) + 1 ) ? 0 : j - cpy_num + del_par; | ||
398 | |||
399 | internal_delete_pointers_items (src_bi, j + 1 - cpy_num + del_par, i, cpy_num - del_par); | ||
400 | } | ||
401 | } | ||
402 | |||
403 | /* Insert n_src'th key of buffer src before n_dest'th key of buffer dest. */ | ||
404 | static void internal_insert_key (struct buffer_info * dest_bi, | ||
405 | int dest_position_before, /* insert key before key with n_dest number */ | ||
406 | struct buffer_head * src, | ||
407 | int src_position) | ||
408 | { | ||
409 | struct buffer_head * dest = dest_bi->bi_bh; | ||
410 | int nr; | ||
411 | struct block_head * blkh; | ||
412 | struct reiserfs_key * key; | ||
413 | |||
414 | RFALSE( dest == NULL || src == NULL, | ||
415 | "source(%p) or dest(%p) buffer is 0", src, dest); | ||
416 | RFALSE( dest_position_before < 0 || src_position < 0, | ||
417 | "source(%d) or dest(%d) key number less than 0", | ||
418 | src_position, dest_position_before); | ||
419 | RFALSE( dest_position_before > B_NR_ITEMS (dest) || | ||
420 | src_position >= B_NR_ITEMS(src), | ||
421 | "invalid position in dest (%d (key number %d)) or in src (%d (key number %d))", | ||
422 | dest_position_before, B_NR_ITEMS (dest), | ||
423 | src_position, B_NR_ITEMS(src)); | ||
424 | RFALSE( B_FREE_SPACE (dest) < KEY_SIZE, | ||
425 | "no enough free space (%d) in dest buffer", B_FREE_SPACE (dest)); | ||
426 | |||
427 | blkh = B_BLK_HEAD(dest); | ||
428 | nr = blkh_nr_item(blkh); | ||
429 | |||
430 | /* prepare space for inserting key */ | ||
431 | key = B_N_PDELIM_KEY (dest, dest_position_before); | ||
432 | memmove (key + 1, key, (nr - dest_position_before) * KEY_SIZE + (nr + 1) * DC_SIZE); | ||
433 | |||
434 | /* insert key */ | ||
435 | memcpy (key, B_N_PDELIM_KEY(src, src_position), KEY_SIZE); | ||
436 | |||
437 | /* Change dirt, free space, item number fields. */ | ||
438 | |||
439 | set_blkh_nr_item( blkh, blkh_nr_item(blkh) + 1 ); | ||
440 | set_blkh_free_space( blkh, blkh_free_space(blkh) - KEY_SIZE ); | ||
441 | |||
442 | do_balance_mark_internal_dirty (dest_bi->tb, dest, 0); | ||
443 | |||
444 | if (dest_bi->bi_parent) { | ||
445 | struct disk_child *t_dc; | ||
446 | t_dc = B_N_CHILD(dest_bi->bi_parent,dest_bi->bi_position); | ||
447 | put_dc_size( t_dc, dc_size(t_dc) + KEY_SIZE ); | ||
448 | |||
449 | do_balance_mark_internal_dirty (dest_bi->tb, dest_bi->bi_parent,0); | ||
450 | } | ||
451 | } | ||
452 | |||
453 | |||
454 | |||
455 | /* Insert d_key'th (delimiting) key from buffer cfl to tail of dest. | ||
456 | * Copy pointer_amount node pointers and pointer_amount - 1 items from buffer src to buffer dest. | ||
457 | * Replace d_key'th key in buffer cfl. | ||
458 | * Delete pointer_amount items and node pointers from buffer src. | ||
459 | */ | ||
460 | /* this can be invoked both to shift from S to L and from R to S */ | ||
461 | static void internal_shift_left ( | ||
462 | int mode, /* INTERNAL_FROM_S_TO_L | INTERNAL_FROM_R_TO_S */ | ||
463 | struct tree_balance * tb, | ||
464 | int h, | ||
465 | int pointer_amount | ||
466 | ) | ||
467 | { | ||
468 | struct buffer_info dest_bi, src_bi; | ||
469 | struct buffer_head * cf; | ||
470 | int d_key_position; | ||
471 | |||
472 | internal_define_dest_src_infos (mode, tb, h, &dest_bi, &src_bi, &d_key_position, &cf); | ||
473 | |||
474 | /*printk("pointer_amount = %d\n",pointer_amount);*/ | ||
475 | |||
476 | if (pointer_amount) { | ||
477 | /* insert delimiting key from common father of dest and src to node dest into position B_NR_ITEM(dest) */ | ||
478 | internal_insert_key (&dest_bi, B_NR_ITEMS(dest_bi.bi_bh), cf, d_key_position); | ||
479 | |||
480 | if (B_NR_ITEMS(src_bi.bi_bh) == pointer_amount - 1) { | ||
481 | if (src_bi.bi_position/*src->b_item_order*/ == 0) | ||
482 | replace_key (tb, cf, d_key_position, src_bi.bi_parent/*src->b_parent*/, 0); | ||
483 | } else | ||
484 | replace_key (tb, cf, d_key_position, src_bi.bi_bh, pointer_amount - 1); | ||
485 | } | ||
486 | /* last parameter is del_parameter */ | ||
487 | internal_move_pointers_items (&dest_bi, &src_bi, FIRST_TO_LAST, pointer_amount, 0); | ||
488 | |||
489 | } | ||
490 | |||
491 | /* Insert delimiting key to L[h]. | ||
492 | * Copy n node pointers and n - 1 items from buffer S[h] to L[h]. | ||
493 | * Delete n - 1 items and node pointers from buffer S[h]. | ||
494 | */ | ||
495 | /* it always shifts from S[h] to L[h] */ | ||
496 | static void internal_shift1_left ( | ||
497 | struct tree_balance * tb, | ||
498 | int h, | ||
499 | int pointer_amount | ||
500 | ) | ||
501 | { | ||
502 | struct buffer_info dest_bi, src_bi; | ||
503 | struct buffer_head * cf; | ||
504 | int d_key_position; | ||
505 | |||
506 | internal_define_dest_src_infos (INTERNAL_SHIFT_FROM_S_TO_L, tb, h, &dest_bi, &src_bi, &d_key_position, &cf); | ||
507 | |||
508 | if ( pointer_amount > 0 ) /* insert lkey[h]-th key from CFL[h] to left neighbor L[h] */ | ||
509 | internal_insert_key (&dest_bi, B_NR_ITEMS(dest_bi.bi_bh), cf, d_key_position); | ||
510 | /* internal_insert_key (tb->L[h], B_NR_ITEM(tb->L[h]), tb->CFL[h], tb->lkey[h]);*/ | ||
511 | |||
512 | /* last parameter is del_parameter */ | ||
513 | internal_move_pointers_items (&dest_bi, &src_bi, FIRST_TO_LAST, pointer_amount, 1); | ||
514 | /* internal_move_pointers_items (tb->L[h], tb->S[h], FIRST_TO_LAST, pointer_amount, 1);*/ | ||
515 | } | ||
516 | |||
517 | |||
518 | /* Insert d_key'th (delimiting) key from buffer cfr to head of dest. | ||
519 | * Copy n node pointers and n - 1 items from buffer src to buffer dest. | ||
520 | * Replace d_key'th key in buffer cfr. | ||
521 | * Delete n items and node pointers from buffer src. | ||
522 | */ | ||
523 | static void internal_shift_right ( | ||
524 | int mode, /* INTERNAL_FROM_S_TO_R | INTERNAL_FROM_L_TO_S */ | ||
525 | struct tree_balance * tb, | ||
526 | int h, | ||
527 | int pointer_amount | ||
528 | ) | ||
529 | { | ||
530 | struct buffer_info dest_bi, src_bi; | ||
531 | struct buffer_head * cf; | ||
532 | int d_key_position; | ||
533 | int nr; | ||
534 | |||
535 | |||
536 | internal_define_dest_src_infos (mode, tb, h, &dest_bi, &src_bi, &d_key_position, &cf); | ||
537 | |||
538 | nr = B_NR_ITEMS (src_bi.bi_bh); | ||
539 | |||
540 | if (pointer_amount > 0) { | ||
541 | /* insert delimiting key from common father of dest and src to dest node into position 0 */ | ||
542 | internal_insert_key (&dest_bi, 0, cf, d_key_position); | ||
543 | if (nr == pointer_amount - 1) { | ||
544 | RFALSE( src_bi.bi_bh != PATH_H_PBUFFER (tb->tb_path, h)/*tb->S[h]*/ || | ||
545 | dest_bi.bi_bh != tb->R[h], | ||
546 | "src (%p) must be == tb->S[h](%p) when it disappears", | ||
547 | src_bi.bi_bh, PATH_H_PBUFFER (tb->tb_path, h)); | ||
548 | /* when S[h] disappers replace left delemiting key as well */ | ||
549 | if (tb->CFL[h]) | ||
550 | replace_key (tb, cf, d_key_position, tb->CFL[h], tb->lkey[h]); | ||
551 | } else | ||
552 | replace_key (tb, cf, d_key_position, src_bi.bi_bh, nr - pointer_amount); | ||
553 | } | ||
554 | |||
555 | /* last parameter is del_parameter */ | ||
556 | internal_move_pointers_items (&dest_bi, &src_bi, LAST_TO_FIRST, pointer_amount, 0); | ||
557 | } | ||
558 | |||
559 | /* Insert delimiting key to R[h]. | ||
560 | * Copy n node pointers and n - 1 items from buffer S[h] to R[h]. | ||
561 | * Delete n - 1 items and node pointers from buffer S[h]. | ||
562 | */ | ||
563 | /* it always shift from S[h] to R[h] */ | ||
564 | static void internal_shift1_right ( | ||
565 | struct tree_balance * tb, | ||
566 | int h, | ||
567 | int pointer_amount | ||
568 | ) | ||
569 | { | ||
570 | struct buffer_info dest_bi, src_bi; | ||
571 | struct buffer_head * cf; | ||
572 | int d_key_position; | ||
573 | |||
574 | internal_define_dest_src_infos (INTERNAL_SHIFT_FROM_S_TO_R, tb, h, &dest_bi, &src_bi, &d_key_position, &cf); | ||
575 | |||
576 | if (pointer_amount > 0) /* insert rkey from CFR[h] to right neighbor R[h] */ | ||
577 | internal_insert_key (&dest_bi, 0, cf, d_key_position); | ||
578 | /* internal_insert_key (tb->R[h], 0, tb->CFR[h], tb->rkey[h]);*/ | ||
579 | |||
580 | /* last parameter is del_parameter */ | ||
581 | internal_move_pointers_items (&dest_bi, &src_bi, LAST_TO_FIRST, pointer_amount, 1); | ||
582 | /* internal_move_pointers_items (tb->R[h], tb->S[h], LAST_TO_FIRST, pointer_amount, 1);*/ | ||
583 | } | ||
584 | |||
585 | |||
586 | /* Delete insert_num node pointers together with their left items | ||
587 | * and balance current node.*/ | ||
588 | static void balance_internal_when_delete (struct tree_balance * tb, | ||
589 | int h, int child_pos) | ||
590 | { | ||
591 | int insert_num; | ||
592 | int n; | ||
593 | struct buffer_head * tbSh = PATH_H_PBUFFER (tb->tb_path, h); | ||
594 | struct buffer_info bi; | ||
595 | |||
596 | insert_num = tb->insert_size[h] / ((int)(DC_SIZE + KEY_SIZE)); | ||
597 | |||
598 | /* delete child-node-pointer(s) together with their left item(s) */ | ||
599 | bi.tb = tb; | ||
600 | bi.bi_bh = tbSh; | ||
601 | bi.bi_parent = PATH_H_PPARENT (tb->tb_path, h); | ||
602 | bi.bi_position = PATH_H_POSITION (tb->tb_path, h + 1); | ||
603 | |||
604 | internal_delete_childs (&bi, child_pos, -insert_num); | ||
605 | |||
606 | RFALSE( tb->blknum[h] > 1, | ||
607 | "tb->blknum[%d]=%d when insert_size < 0", h, tb->blknum[h]); | ||
608 | |||
609 | n = B_NR_ITEMS(tbSh); | ||
610 | |||
611 | if ( tb->lnum[h] == 0 && tb->rnum[h] == 0 ) { | ||
612 | if ( tb->blknum[h] == 0 ) { | ||
613 | /* node S[h] (root of the tree) is empty now */ | ||
614 | struct buffer_head *new_root; | ||
615 | |||
616 | RFALSE( n || B_FREE_SPACE (tbSh) != MAX_CHILD_SIZE(tbSh) - DC_SIZE, | ||
617 | "buffer must have only 0 keys (%d)", n); | ||
618 | RFALSE( bi.bi_parent, "root has parent (%p)", bi.bi_parent); | ||
619 | |||
620 | /* choose a new root */ | ||
621 | if ( ! tb->L[h-1] || ! B_NR_ITEMS(tb->L[h-1]) ) | ||
622 | new_root = tb->R[h-1]; | ||
623 | else | ||
624 | new_root = tb->L[h-1]; | ||
625 | /* switch super block's tree root block number to the new value */ | ||
626 | PUT_SB_ROOT_BLOCK( tb->tb_sb, new_root->b_blocknr ); | ||
627 | //REISERFS_SB(tb->tb_sb)->s_rs->s_tree_height --; | ||
628 | PUT_SB_TREE_HEIGHT( tb->tb_sb, SB_TREE_HEIGHT(tb->tb_sb) - 1 ); | ||
629 | |||
630 | do_balance_mark_sb_dirty (tb, REISERFS_SB(tb->tb_sb)->s_sbh, 1); | ||
631 | /*&&&&&&&&&&&&&&&&&&&&&&*/ | ||
632 | if (h > 1) | ||
633 | /* use check_internal if new root is an internal node */ | ||
634 | check_internal (new_root); | ||
635 | /*&&&&&&&&&&&&&&&&&&&&&&*/ | ||
636 | |||
637 | /* do what is needed for buffer thrown from tree */ | ||
638 | reiserfs_invalidate_buffer(tb, tbSh); | ||
639 | return; | ||
640 | } | ||
641 | return; | ||
642 | } | ||
643 | |||
644 | if ( tb->L[h] && tb->lnum[h] == -B_NR_ITEMS(tb->L[h]) - 1 ) { /* join S[h] with L[h] */ | ||
645 | |||
646 | RFALSE( tb->rnum[h] != 0, | ||
647 | "invalid tb->rnum[%d]==%d when joining S[h] with L[h]", | ||
648 | h, tb->rnum[h]); | ||
649 | |||
650 | internal_shift_left (INTERNAL_SHIFT_FROM_S_TO_L, tb, h, n + 1); | ||
651 | reiserfs_invalidate_buffer(tb, tbSh); | ||
652 | |||
653 | return; | ||
654 | } | ||
655 | |||
656 | if ( tb->R[h] && tb->rnum[h] == -B_NR_ITEMS(tb->R[h]) - 1 ) { /* join S[h] with R[h] */ | ||
657 | RFALSE( tb->lnum[h] != 0, | ||
658 | "invalid tb->lnum[%d]==%d when joining S[h] with R[h]", | ||
659 | h, tb->lnum[h]); | ||
660 | |||
661 | internal_shift_right (INTERNAL_SHIFT_FROM_S_TO_R, tb, h, n + 1); | ||
662 | |||
663 | reiserfs_invalidate_buffer(tb,tbSh); | ||
664 | return; | ||
665 | } | ||
666 | |||
667 | if ( tb->lnum[h] < 0 ) { /* borrow from left neighbor L[h] */ | ||
668 | RFALSE( tb->rnum[h] != 0, | ||
669 | "wrong tb->rnum[%d]==%d when borrow from L[h]", h, tb->rnum[h]); | ||
670 | /*internal_shift_right (tb, h, tb->L[h], tb->CFL[h], tb->lkey[h], tb->S[h], -tb->lnum[h]);*/ | ||
671 | internal_shift_right (INTERNAL_SHIFT_FROM_L_TO_S, tb, h, -tb->lnum[h]); | ||
672 | return; | ||
673 | } | ||
674 | |||
675 | if ( tb->rnum[h] < 0 ) { /* borrow from right neighbor R[h] */ | ||
676 | RFALSE( tb->lnum[h] != 0, | ||
677 | "invalid tb->lnum[%d]==%d when borrow from R[h]", | ||
678 | h, tb->lnum[h]); | ||
679 | internal_shift_left (INTERNAL_SHIFT_FROM_R_TO_S, tb, h, -tb->rnum[h]);/*tb->S[h], tb->CFR[h], tb->rkey[h], tb->R[h], -tb->rnum[h]);*/ | ||
680 | return; | ||
681 | } | ||
682 | |||
683 | if ( tb->lnum[h] > 0 ) { /* split S[h] into two parts and put them into neighbors */ | ||
684 | RFALSE( tb->rnum[h] == 0 || tb->lnum[h] + tb->rnum[h] != n + 1, | ||
685 | "invalid tb->lnum[%d]==%d or tb->rnum[%d]==%d when S[h](item number == %d) is split between them", | ||
686 | h, tb->lnum[h], h, tb->rnum[h], n); | ||
687 | |||
688 | internal_shift_left (INTERNAL_SHIFT_FROM_S_TO_L, tb, h, tb->lnum[h]);/*tb->L[h], tb->CFL[h], tb->lkey[h], tb->S[h], tb->lnum[h]);*/ | ||
689 | internal_shift_right (INTERNAL_SHIFT_FROM_S_TO_R, tb, h, tb->rnum[h]); | ||
690 | |||
691 | reiserfs_invalidate_buffer (tb, tbSh); | ||
692 | |||
693 | return; | ||
694 | } | ||
695 | reiserfs_panic (tb->tb_sb, "balance_internal_when_delete: unexpected tb->lnum[%d]==%d or tb->rnum[%d]==%d", | ||
696 | h, tb->lnum[h], h, tb->rnum[h]); | ||
697 | } | ||
698 | |||
699 | |||
700 | /* Replace delimiting key of buffers L[h] and S[h] by the given key.*/ | ||
701 | static void replace_lkey ( | ||
702 | struct tree_balance * tb, | ||
703 | int h, | ||
704 | struct item_head * key | ||
705 | ) | ||
706 | { | ||
707 | RFALSE( tb->L[h] == NULL || tb->CFL[h] == NULL, | ||
708 | "L[h](%p) and CFL[h](%p) must exist in replace_lkey", | ||
709 | tb->L[h], tb->CFL[h]); | ||
710 | |||
711 | if (B_NR_ITEMS(PATH_H_PBUFFER(tb->tb_path, h)) == 0) | ||
712 | return; | ||
713 | |||
714 | memcpy (B_N_PDELIM_KEY(tb->CFL[h],tb->lkey[h]), key, KEY_SIZE); | ||
715 | |||
716 | do_balance_mark_internal_dirty (tb, tb->CFL[h],0); | ||
717 | } | ||
718 | |||
719 | |||
720 | /* Replace delimiting key of buffers S[h] and R[h] by the given key.*/ | ||
721 | static void replace_rkey ( | ||
722 | struct tree_balance * tb, | ||
723 | int h, | ||
724 | struct item_head * key | ||
725 | ) | ||
726 | { | ||
727 | RFALSE( tb->R[h] == NULL || tb->CFR[h] == NULL, | ||
728 | "R[h](%p) and CFR[h](%p) must exist in replace_rkey", | ||
729 | tb->R[h], tb->CFR[h]); | ||
730 | RFALSE( B_NR_ITEMS(tb->R[h]) == 0, | ||
731 | "R[h] can not be empty if it exists (item number=%d)", | ||
732 | B_NR_ITEMS(tb->R[h])); | ||
733 | |||
734 | memcpy (B_N_PDELIM_KEY(tb->CFR[h],tb->rkey[h]), key, KEY_SIZE); | ||
735 | |||
736 | do_balance_mark_internal_dirty (tb, tb->CFR[h], 0); | ||
737 | } | ||
738 | |||
739 | |||
740 | int balance_internal (struct tree_balance * tb, /* tree_balance structure */ | ||
741 | int h, /* level of the tree */ | ||
742 | int child_pos, | ||
743 | struct item_head * insert_key, /* key for insertion on higher level */ | ||
744 | struct buffer_head ** insert_ptr /* node for insertion on higher level*/ | ||
745 | ) | ||
746 | /* if inserting/pasting | ||
747 | { | ||
748 | child_pos is the position of the node-pointer in S[h] that * | ||
749 | pointed to S[h-1] before balancing of the h-1 level; * | ||
750 | this means that new pointers and items must be inserted AFTER * | ||
751 | child_pos | ||
752 | } | ||
753 | else | ||
754 | { | ||
755 | it is the position of the leftmost pointer that must be deleted (together with | ||
756 | its corresponding key to the left of the pointer) | ||
757 | as a result of the previous level's balancing. | ||
758 | } | ||
759 | */ | ||
760 | { | ||
761 | struct buffer_head * tbSh = PATH_H_PBUFFER (tb->tb_path, h); | ||
762 | struct buffer_info bi; | ||
763 | int order; /* we return this: it is 0 if there is no S[h], else it is tb->S[h]->b_item_order */ | ||
764 | int insert_num, n, k; | ||
765 | struct buffer_head * S_new; | ||
766 | struct item_head new_insert_key; | ||
767 | struct buffer_head * new_insert_ptr = NULL; | ||
768 | struct item_head * new_insert_key_addr = insert_key; | ||
769 | |||
770 | RFALSE( h < 1, "h (%d) can not be < 1 on internal level", h); | ||
771 | |||
772 | PROC_INFO_INC( tb -> tb_sb, balance_at[ h ] ); | ||
773 | |||
774 | order = ( tbSh ) ? PATH_H_POSITION (tb->tb_path, h + 1)/*tb->S[h]->b_item_order*/ : 0; | ||
775 | |||
776 | /* Using insert_size[h] calculate the number insert_num of items | ||
777 | that must be inserted to or deleted from S[h]. */ | ||
778 | insert_num = tb->insert_size[h]/((int)(KEY_SIZE + DC_SIZE)); | ||
779 | |||
780 | /* Check whether insert_num is proper **/ | ||
781 | RFALSE( insert_num < -2 || insert_num > 2, | ||
782 | "incorrect number of items inserted to the internal node (%d)", | ||
783 | insert_num); | ||
784 | RFALSE( h > 1 && (insert_num > 1 || insert_num < -1), | ||
785 | "incorrect number of items (%d) inserted to the internal node on a level (h=%d) higher than last internal level", | ||
786 | insert_num, h); | ||
787 | |||
788 | /* Make balance in case insert_num < 0 */ | ||
789 | if ( insert_num < 0 ) { | ||
790 | balance_internal_when_delete (tb, h, child_pos); | ||
791 | return order; | ||
792 | } | ||
793 | |||
794 | k = 0; | ||
795 | if ( tb->lnum[h] > 0 ) { | ||
796 | /* shift lnum[h] items from S[h] to the left neighbor L[h]. | ||
797 | check how many of new items fall into L[h] or CFL[h] after | ||
798 | shifting */ | ||
799 | n = B_NR_ITEMS (tb->L[h]); /* number of items in L[h] */ | ||
800 | if ( tb->lnum[h] <= child_pos ) { | ||
801 | /* new items don't fall into L[h] or CFL[h] */ | ||
802 | internal_shift_left (INTERNAL_SHIFT_FROM_S_TO_L, tb, h, tb->lnum[h]); | ||
803 | /*internal_shift_left (tb->L[h],tb->CFL[h],tb->lkey[h],tbSh,tb->lnum[h]);*/ | ||
804 | child_pos -= tb->lnum[h]; | ||
805 | } else if ( tb->lnum[h] > child_pos + insert_num ) { | ||
806 | /* all new items fall into L[h] */ | ||
807 | internal_shift_left (INTERNAL_SHIFT_FROM_S_TO_L, tb, h, tb->lnum[h] - insert_num); | ||
808 | /* internal_shift_left(tb->L[h],tb->CFL[h],tb->lkey[h],tbSh, | ||
809 | tb->lnum[h]-insert_num); | ||
810 | */ | ||
811 | /* insert insert_num keys and node-pointers into L[h] */ | ||
812 | bi.tb = tb; | ||
813 | bi.bi_bh = tb->L[h]; | ||
814 | bi.bi_parent = tb->FL[h]; | ||
815 | bi.bi_position = get_left_neighbor_position (tb, h); | ||
816 | internal_insert_childs (&bi,/*tb->L[h], tb->S[h-1]->b_next*/ n + child_pos + 1, | ||
817 | insert_num,insert_key,insert_ptr); | ||
818 | |||
819 | insert_num = 0; | ||
820 | } else { | ||
821 | struct disk_child * dc; | ||
822 | |||
823 | /* some items fall into L[h] or CFL[h], but some don't fall */ | ||
824 | internal_shift1_left(tb,h,child_pos+1); | ||
825 | /* calculate number of new items that fall into L[h] */ | ||
826 | k = tb->lnum[h] - child_pos - 1; | ||
827 | bi.tb = tb; | ||
828 | bi.bi_bh = tb->L[h]; | ||
829 | bi.bi_parent = tb->FL[h]; | ||
830 | bi.bi_position = get_left_neighbor_position (tb, h); | ||
831 | internal_insert_childs (&bi,/*tb->L[h], tb->S[h-1]->b_next,*/ n + child_pos + 1,k, | ||
832 | insert_key,insert_ptr); | ||
833 | |||
834 | replace_lkey(tb,h,insert_key + k); | ||
835 | |||
836 | /* replace the first node-ptr in S[h] by node-ptr to insert_ptr[k] */ | ||
837 | dc = B_N_CHILD(tbSh, 0); | ||
838 | put_dc_size( dc, MAX_CHILD_SIZE(insert_ptr[k]) - B_FREE_SPACE (insert_ptr[k])); | ||
839 | put_dc_block_number( dc, insert_ptr[k]->b_blocknr ); | ||
840 | |||
841 | do_balance_mark_internal_dirty (tb, tbSh, 0); | ||
842 | |||
843 | k++; | ||
844 | insert_key += k; | ||
845 | insert_ptr += k; | ||
846 | insert_num -= k; | ||
847 | child_pos = 0; | ||
848 | } | ||
849 | } /* tb->lnum[h] > 0 */ | ||
850 | |||
851 | if ( tb->rnum[h] > 0 ) { | ||
852 | /*shift rnum[h] items from S[h] to the right neighbor R[h]*/ | ||
853 | /* check how many of new items fall into R or CFR after shifting */ | ||
854 | n = B_NR_ITEMS (tbSh); /* number of items in S[h] */ | ||
855 | if ( n - tb->rnum[h] >= child_pos ) | ||
856 | /* new items fall into S[h] */ | ||
857 | /*internal_shift_right(tb,h,tbSh,tb->CFR[h],tb->rkey[h],tb->R[h],tb->rnum[h]);*/ | ||
858 | internal_shift_right (INTERNAL_SHIFT_FROM_S_TO_R, tb, h, tb->rnum[h]); | ||
859 | else | ||
860 | if ( n + insert_num - tb->rnum[h] < child_pos ) | ||
861 | { | ||
862 | /* all new items fall into R[h] */ | ||
863 | /*internal_shift_right(tb,h,tbSh,tb->CFR[h],tb->rkey[h],tb->R[h], | ||
864 | tb->rnum[h] - insert_num);*/ | ||
865 | internal_shift_right (INTERNAL_SHIFT_FROM_S_TO_R, tb, h, tb->rnum[h] - insert_num); | ||
866 | |||
867 | /* insert insert_num keys and node-pointers into R[h] */ | ||
868 | bi.tb = tb; | ||
869 | bi.bi_bh = tb->R[h]; | ||
870 | bi.bi_parent = tb->FR[h]; | ||
871 | bi.bi_position = get_right_neighbor_position (tb, h); | ||
872 | internal_insert_childs (&bi, /*tb->R[h],tb->S[h-1]->b_next*/ child_pos - n - insert_num + tb->rnum[h] - 1, | ||
873 | insert_num,insert_key,insert_ptr); | ||
874 | insert_num = 0; | ||
875 | } | ||
876 | else | ||
877 | { | ||
878 | struct disk_child * dc; | ||
879 | |||
880 | /* one of the items falls into CFR[h] */ | ||
881 | internal_shift1_right(tb,h,n - child_pos + 1); | ||
882 | /* calculate number of new items that fall into R[h] */ | ||
883 | k = tb->rnum[h] - n + child_pos - 1; | ||
884 | bi.tb = tb; | ||
885 | bi.bi_bh = tb->R[h]; | ||
886 | bi.bi_parent = tb->FR[h]; | ||
887 | bi.bi_position = get_right_neighbor_position (tb, h); | ||
888 | internal_insert_childs (&bi, /*tb->R[h], tb->R[h]->b_child,*/ 0, k, insert_key + 1, insert_ptr + 1); | ||
889 | |||
890 | replace_rkey(tb,h,insert_key + insert_num - k - 1); | ||
891 | |||
892 | /* replace the first node-ptr in R[h] by node-ptr insert_ptr[insert_num-k-1]*/ | ||
893 | dc = B_N_CHILD(tb->R[h], 0); | ||
894 | put_dc_size( dc, MAX_CHILD_SIZE(insert_ptr[insert_num-k-1]) - | ||
895 | B_FREE_SPACE (insert_ptr[insert_num-k-1])); | ||
896 | put_dc_block_number( dc, insert_ptr[insert_num-k-1]->b_blocknr ); | ||
897 | |||
898 | do_balance_mark_internal_dirty (tb, tb->R[h],0); | ||
899 | |||
900 | insert_num -= (k + 1); | ||
901 | } | ||
902 | } | ||
903 | |||
904 | /** Fill new node that appears instead of S[h] **/ | ||
905 | RFALSE( tb->blknum[h] > 2, "blknum can not be > 2 for internal level"); | ||
906 | RFALSE( tb->blknum[h] < 0, "blknum can not be < 0"); | ||
907 | |||
908 | if ( ! tb->blknum[h] ) | ||
909 | { /* node S[h] is empty now */ | ||
910 | RFALSE( ! tbSh, "S[h] is equal NULL"); | ||
911 | |||
912 | /* do what is needed for buffer thrown from tree */ | ||
913 | reiserfs_invalidate_buffer(tb,tbSh); | ||
914 | return order; | ||
915 | } | ||
916 | |||
917 | if ( ! tbSh ) { | ||
918 | /* create new root */ | ||
919 | struct disk_child * dc; | ||
920 | struct buffer_head * tbSh_1 = PATH_H_PBUFFER (tb->tb_path, h - 1); | ||
921 | struct block_head * blkh; | ||
922 | |||
923 | |||
924 | if ( tb->blknum[h] != 1 ) | ||
925 | reiserfs_panic(NULL, "balance_internal: One new node required for creating the new root"); | ||
926 | /* S[h] = empty buffer from the list FEB. */ | ||
927 | tbSh = get_FEB (tb); | ||
928 | blkh = B_BLK_HEAD(tbSh); | ||
929 | set_blkh_level( blkh, h + 1 ); | ||
930 | |||
931 | /* Put the unique node-pointer to S[h] that points to S[h-1]. */ | ||
932 | |||
933 | dc = B_N_CHILD(tbSh, 0); | ||
934 | put_dc_block_number( dc, tbSh_1->b_blocknr ); | ||
935 | put_dc_size( dc, (MAX_CHILD_SIZE (tbSh_1) - B_FREE_SPACE (tbSh_1))); | ||
936 | |||
937 | tb->insert_size[h] -= DC_SIZE; | ||
938 | set_blkh_free_space( blkh, blkh_free_space(blkh) - DC_SIZE ); | ||
939 | |||
940 | do_balance_mark_internal_dirty (tb, tbSh, 0); | ||
941 | |||
942 | /*&&&&&&&&&&&&&&&&&&&&&&&&*/ | ||
943 | check_internal (tbSh); | ||
944 | /*&&&&&&&&&&&&&&&&&&&&&&&&*/ | ||
945 | |||
946 | /* put new root into path structure */ | ||
947 | PATH_OFFSET_PBUFFER(tb->tb_path, ILLEGAL_PATH_ELEMENT_OFFSET) = tbSh; | ||
948 | |||
949 | /* Change root in structure super block. */ | ||
950 | PUT_SB_ROOT_BLOCK( tb->tb_sb, tbSh->b_blocknr ); | ||
951 | PUT_SB_TREE_HEIGHT( tb->tb_sb, SB_TREE_HEIGHT(tb->tb_sb) + 1 ); | ||
952 | do_balance_mark_sb_dirty (tb, REISERFS_SB(tb->tb_sb)->s_sbh, 1); | ||
953 | } | ||
954 | |||
955 | if ( tb->blknum[h] == 2 ) { | ||
956 | int snum; | ||
957 | struct buffer_info dest_bi, src_bi; | ||
958 | |||
959 | |||
960 | /* S_new = free buffer from list FEB */ | ||
961 | S_new = get_FEB(tb); | ||
962 | |||
963 | set_blkh_level( B_BLK_HEAD(S_new), h + 1 ); | ||
964 | |||
965 | dest_bi.tb = tb; | ||
966 | dest_bi.bi_bh = S_new; | ||
967 | dest_bi.bi_parent = NULL; | ||
968 | dest_bi.bi_position = 0; | ||
969 | src_bi.tb = tb; | ||
970 | src_bi.bi_bh = tbSh; | ||
971 | src_bi.bi_parent = PATH_H_PPARENT (tb->tb_path, h); | ||
972 | src_bi.bi_position = PATH_H_POSITION (tb->tb_path, h + 1); | ||
973 | |||
974 | n = B_NR_ITEMS (tbSh); /* number of items in S[h] */ | ||
975 | snum = (insert_num + n + 1)/2; | ||
976 | if ( n - snum >= child_pos ) { | ||
977 | /* new items don't fall into S_new */ | ||
978 | /* store the delimiting key for the next level */ | ||
979 | /* new_insert_key = (n - snum)'th key in S[h] */ | ||
980 | memcpy (&new_insert_key,B_N_PDELIM_KEY(tbSh,n - snum), | ||
981 | KEY_SIZE); | ||
982 | /* last parameter is del_par */ | ||
983 | internal_move_pointers_items (&dest_bi, &src_bi, LAST_TO_FIRST, snum, 0); | ||
984 | /* internal_move_pointers_items(S_new, tbSh, LAST_TO_FIRST, snum, 0);*/ | ||
985 | } else if ( n + insert_num - snum < child_pos ) { | ||
986 | /* all new items fall into S_new */ | ||
987 | /* store the delimiting key for the next level */ | ||
988 | /* new_insert_key = (n + insert_item - snum)'th key in S[h] */ | ||
989 | memcpy(&new_insert_key,B_N_PDELIM_KEY(tbSh,n + insert_num - snum), | ||
990 | KEY_SIZE); | ||
991 | /* last parameter is del_par */ | ||
992 | internal_move_pointers_items (&dest_bi, &src_bi, LAST_TO_FIRST, snum - insert_num, 0); | ||
993 | /* internal_move_pointers_items(S_new,tbSh,1,snum - insert_num,0);*/ | ||
994 | |||
995 | /* insert insert_num keys and node-pointers into S_new */ | ||
996 | internal_insert_childs (&dest_bi, /*S_new,tb->S[h-1]->b_next,*/child_pos - n - insert_num + snum - 1, | ||
997 | insert_num,insert_key,insert_ptr); | ||
998 | |||
999 | insert_num = 0; | ||
1000 | } else { | ||
1001 | struct disk_child * dc; | ||
1002 | |||
1003 | /* some items fall into S_new, but some don't fall */ | ||
1004 | /* last parameter is del_par */ | ||
1005 | internal_move_pointers_items (&dest_bi, &src_bi, LAST_TO_FIRST, n - child_pos + 1, 1); | ||
1006 | /* internal_move_pointers_items(S_new,tbSh,1,n - child_pos + 1,1);*/ | ||
1007 | /* calculate number of new items that fall into S_new */ | ||
1008 | k = snum - n + child_pos - 1; | ||
1009 | |||
1010 | internal_insert_childs (&dest_bi, /*S_new,*/ 0, k, insert_key + 1, insert_ptr+1); | ||
1011 | |||
1012 | /* new_insert_key = insert_key[insert_num - k - 1] */ | ||
1013 | memcpy(&new_insert_key,insert_key + insert_num - k - 1, | ||
1014 | KEY_SIZE); | ||
1015 | /* replace first node-ptr in S_new by node-ptr to insert_ptr[insert_num-k-1] */ | ||
1016 | |||
1017 | dc = B_N_CHILD(S_new,0); | ||
1018 | put_dc_size( dc, (MAX_CHILD_SIZE(insert_ptr[insert_num-k-1]) - | ||
1019 | B_FREE_SPACE(insert_ptr[insert_num-k-1])) ); | ||
1020 | put_dc_block_number( dc, insert_ptr[insert_num-k-1]->b_blocknr ); | ||
1021 | |||
1022 | do_balance_mark_internal_dirty (tb, S_new,0); | ||
1023 | |||
1024 | insert_num -= (k + 1); | ||
1025 | } | ||
1026 | /* new_insert_ptr = node_pointer to S_new */ | ||
1027 | new_insert_ptr = S_new; | ||
1028 | |||
1029 | RFALSE (!buffer_journaled(S_new) || buffer_journal_dirty(S_new) || | ||
1030 | buffer_dirty (S_new), | ||
1031 | "cm-00001: bad S_new (%b)", S_new); | ||
1032 | |||
1033 | // S_new is released in unfix_nodes | ||
1034 | } | ||
1035 | |||
1036 | n = B_NR_ITEMS (tbSh); /*number of items in S[h] */ | ||
1037 | |||
1038 | if ( 0 <= child_pos && child_pos <= n && insert_num > 0 ) { | ||
1039 | bi.tb = tb; | ||
1040 | bi.bi_bh = tbSh; | ||
1041 | bi.bi_parent = PATH_H_PPARENT (tb->tb_path, h); | ||
1042 | bi.bi_position = PATH_H_POSITION (tb->tb_path, h + 1); | ||
1043 | internal_insert_childs ( | ||
1044 | &bi,/*tbSh,*/ | ||
1045 | /* ( tb->S[h-1]->b_parent == tb->S[h] ) ? tb->S[h-1]->b_next : tb->S[h]->b_child->b_next,*/ | ||
1046 | child_pos,insert_num,insert_key,insert_ptr | ||
1047 | ); | ||
1048 | } | ||
1049 | |||
1050 | |||
1051 | memcpy (new_insert_key_addr,&new_insert_key,KEY_SIZE); | ||
1052 | insert_ptr[0] = new_insert_ptr; | ||
1053 | |||
1054 | return order; | ||
1055 | } | ||
1056 | |||
1057 | |||
1058 | |||