diff options
Diffstat (limited to 'arch/powerpc/lib/rheap.c')
-rw-r--r-- | arch/powerpc/lib/rheap.c | 693 |
1 files changed, 693 insertions, 0 deletions
diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c new file mode 100644 index 000000000000..42c5de2c898f --- /dev/null +++ b/arch/powerpc/lib/rheap.c | |||
@@ -0,0 +1,693 @@ | |||
1 | /* | ||
2 | * arch/ppc/syslib/rheap.c | ||
3 | * | ||
4 | * A Remote Heap. Remote means that we don't touch the memory that the | ||
5 | * heap points to. Normal heap implementations use the memory they manage | ||
6 | * to place their list. We cannot do that because the memory we manage may | ||
7 | * have special properties, for example it is uncachable or of different | ||
8 | * endianess. | ||
9 | * | ||
10 | * Author: Pantelis Antoniou <panto@intracom.gr> | ||
11 | * | ||
12 | * 2004 (c) INTRACOM S.A. Greece. This file is licensed under | ||
13 | * the terms of the GNU General Public License version 2. This program | ||
14 | * is licensed "as is" without any warranty of any kind, whether express | ||
15 | * or implied. | ||
16 | */ | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/slab.h> | ||
21 | |||
22 | #include <asm/rheap.h> | ||
23 | |||
24 | /* | ||
25 | * Fixup a list_head, needed when copying lists. If the pointers fall | ||
26 | * between s and e, apply the delta. This assumes that | ||
27 | * sizeof(struct list_head *) == sizeof(unsigned long *). | ||
28 | */ | ||
29 | static inline void fixup(unsigned long s, unsigned long e, int d, | ||
30 | struct list_head *l) | ||
31 | { | ||
32 | unsigned long *pp; | ||
33 | |||
34 | pp = (unsigned long *)&l->next; | ||
35 | if (*pp >= s && *pp < e) | ||
36 | *pp += d; | ||
37 | |||
38 | pp = (unsigned long *)&l->prev; | ||
39 | if (*pp >= s && *pp < e) | ||
40 | *pp += d; | ||
41 | } | ||
42 | |||
43 | /* Grow the allocated blocks */ | ||
44 | static int grow(rh_info_t * info, int max_blocks) | ||
45 | { | ||
46 | rh_block_t *block, *blk; | ||
47 | int i, new_blocks; | ||
48 | int delta; | ||
49 | unsigned long blks, blke; | ||
50 | |||
51 | if (max_blocks <= info->max_blocks) | ||
52 | return -EINVAL; | ||
53 | |||
54 | new_blocks = max_blocks - info->max_blocks; | ||
55 | |||
56 | block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_KERNEL); | ||
57 | if (block == NULL) | ||
58 | return -ENOMEM; | ||
59 | |||
60 | if (info->max_blocks > 0) { | ||
61 | |||
62 | /* copy old block area */ | ||
63 | memcpy(block, info->block, | ||
64 | sizeof(rh_block_t) * info->max_blocks); | ||
65 | |||
66 | delta = (char *)block - (char *)info->block; | ||
67 | |||
68 | /* and fixup list pointers */ | ||
69 | blks = (unsigned long)info->block; | ||
70 | blke = (unsigned long)(info->block + info->max_blocks); | ||
71 | |||
72 | for (i = 0, blk = block; i < info->max_blocks; i++, blk++) | ||
73 | fixup(blks, blke, delta, &blk->list); | ||
74 | |||
75 | fixup(blks, blke, delta, &info->empty_list); | ||
76 | fixup(blks, blke, delta, &info->free_list); | ||
77 | fixup(blks, blke, delta, &info->taken_list); | ||
78 | |||
79 | /* free the old allocated memory */ | ||
80 | if ((info->flags & RHIF_STATIC_BLOCK) == 0) | ||
81 | kfree(info->block); | ||
82 | } | ||
83 | |||
84 | info->block = block; | ||
85 | info->empty_slots += new_blocks; | ||
86 | info->max_blocks = max_blocks; | ||
87 | info->flags &= ~RHIF_STATIC_BLOCK; | ||
88 | |||
89 | /* add all new blocks to the free list */ | ||
90 | for (i = 0, blk = block + info->max_blocks; i < new_blocks; i++, blk++) | ||
91 | list_add(&blk->list, &info->empty_list); | ||
92 | |||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * Assure at least the required amount of empty slots. If this function | ||
98 | * causes a grow in the block area then all pointers kept to the block | ||
99 | * area are invalid! | ||
100 | */ | ||
101 | static int assure_empty(rh_info_t * info, int slots) | ||
102 | { | ||
103 | int max_blocks; | ||
104 | |||
105 | /* This function is not meant to be used to grow uncontrollably */ | ||
106 | if (slots >= 4) | ||
107 | return -EINVAL; | ||
108 | |||
109 | /* Enough space */ | ||
110 | if (info->empty_slots >= slots) | ||
111 | return 0; | ||
112 | |||
113 | /* Next 16 sized block */ | ||
114 | max_blocks = ((info->max_blocks + slots) + 15) & ~15; | ||
115 | |||
116 | return grow(info, max_blocks); | ||
117 | } | ||
118 | |||
119 | static rh_block_t *get_slot(rh_info_t * info) | ||
120 | { | ||
121 | rh_block_t *blk; | ||
122 | |||
123 | /* If no more free slots, and failure to extend. */ | ||
124 | /* XXX: You should have called assure_empty before */ | ||
125 | if (info->empty_slots == 0) { | ||
126 | printk(KERN_ERR "rh: out of slots; crash is imminent.\n"); | ||
127 | return NULL; | ||
128 | } | ||
129 | |||
130 | /* Get empty slot to use */ | ||
131 | blk = list_entry(info->empty_list.next, rh_block_t, list); | ||
132 | list_del_init(&blk->list); | ||
133 | info->empty_slots--; | ||
134 | |||
135 | /* Initialize */ | ||
136 | blk->start = NULL; | ||
137 | blk->size = 0; | ||
138 | blk->owner = NULL; | ||
139 | |||
140 | return blk; | ||
141 | } | ||
142 | |||
143 | static inline void release_slot(rh_info_t * info, rh_block_t * blk) | ||
144 | { | ||
145 | list_add(&blk->list, &info->empty_list); | ||
146 | info->empty_slots++; | ||
147 | } | ||
148 | |||
149 | static void attach_free_block(rh_info_t * info, rh_block_t * blkn) | ||
150 | { | ||
151 | rh_block_t *blk; | ||
152 | rh_block_t *before; | ||
153 | rh_block_t *after; | ||
154 | rh_block_t *next; | ||
155 | int size; | ||
156 | unsigned long s, e, bs, be; | ||
157 | struct list_head *l; | ||
158 | |||
159 | /* We assume that they are aligned properly */ | ||
160 | size = blkn->size; | ||
161 | s = (unsigned long)blkn->start; | ||
162 | e = s + size; | ||
163 | |||
164 | /* Find the blocks immediately before and after the given one | ||
165 | * (if any) */ | ||
166 | before = NULL; | ||
167 | after = NULL; | ||
168 | next = NULL; | ||
169 | |||
170 | list_for_each(l, &info->free_list) { | ||
171 | blk = list_entry(l, rh_block_t, list); | ||
172 | |||
173 | bs = (unsigned long)blk->start; | ||
174 | be = bs + blk->size; | ||
175 | |||
176 | if (next == NULL && s >= bs) | ||
177 | next = blk; | ||
178 | |||
179 | if (be == s) | ||
180 | before = blk; | ||
181 | |||
182 | if (e == bs) | ||
183 | after = blk; | ||
184 | |||
185 | /* If both are not null, break now */ | ||
186 | if (before != NULL && after != NULL) | ||
187 | break; | ||
188 | } | ||
189 | |||
190 | /* Now check if they are really adjacent */ | ||
191 | if (before != NULL && s != (unsigned long)before->start + before->size) | ||
192 | before = NULL; | ||
193 | |||
194 | if (after != NULL && e != (unsigned long)after->start) | ||
195 | after = NULL; | ||
196 | |||
197 | /* No coalescing; list insert and return */ | ||
198 | if (before == NULL && after == NULL) { | ||
199 | |||
200 | if (next != NULL) | ||
201 | list_add(&blkn->list, &next->list); | ||
202 | else | ||
203 | list_add(&blkn->list, &info->free_list); | ||
204 | |||
205 | return; | ||
206 | } | ||
207 | |||
208 | /* We don't need it anymore */ | ||
209 | release_slot(info, blkn); | ||
210 | |||
211 | /* Grow the before block */ | ||
212 | if (before != NULL && after == NULL) { | ||
213 | before->size += size; | ||
214 | return; | ||
215 | } | ||
216 | |||
217 | /* Grow the after block backwards */ | ||
218 | if (before == NULL && after != NULL) { | ||
219 | after->start = (int8_t *)after->start - size; | ||
220 | after->size += size; | ||
221 | return; | ||
222 | } | ||
223 | |||
224 | /* Grow the before block, and release the after block */ | ||
225 | before->size += size + after->size; | ||
226 | list_del(&after->list); | ||
227 | release_slot(info, after); | ||
228 | } | ||
229 | |||
230 | static void attach_taken_block(rh_info_t * info, rh_block_t * blkn) | ||
231 | { | ||
232 | rh_block_t *blk; | ||
233 | struct list_head *l; | ||
234 | |||
235 | /* Find the block immediately before the given one (if any) */ | ||
236 | list_for_each(l, &info->taken_list) { | ||
237 | blk = list_entry(l, rh_block_t, list); | ||
238 | if (blk->start > blkn->start) { | ||
239 | list_add_tail(&blkn->list, &blk->list); | ||
240 | return; | ||
241 | } | ||
242 | } | ||
243 | |||
244 | list_add_tail(&blkn->list, &info->taken_list); | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * Create a remote heap dynamically. Note that no memory for the blocks | ||
249 | * are allocated. It will upon the first allocation | ||
250 | */ | ||
251 | rh_info_t *rh_create(unsigned int alignment) | ||
252 | { | ||
253 | rh_info_t *info; | ||
254 | |||
255 | /* Alignment must be a power of two */ | ||
256 | if ((alignment & (alignment - 1)) != 0) | ||
257 | return ERR_PTR(-EINVAL); | ||
258 | |||
259 | info = kmalloc(sizeof(*info), GFP_KERNEL); | ||
260 | if (info == NULL) | ||
261 | return ERR_PTR(-ENOMEM); | ||
262 | |||
263 | info->alignment = alignment; | ||
264 | |||
265 | /* Initially everything as empty */ | ||
266 | info->block = NULL; | ||
267 | info->max_blocks = 0; | ||
268 | info->empty_slots = 0; | ||
269 | info->flags = 0; | ||
270 | |||
271 | INIT_LIST_HEAD(&info->empty_list); | ||
272 | INIT_LIST_HEAD(&info->free_list); | ||
273 | INIT_LIST_HEAD(&info->taken_list); | ||
274 | |||
275 | return info; | ||
276 | } | ||
277 | |||
278 | /* | ||
279 | * Destroy a dynamically created remote heap. Deallocate only if the areas | ||
280 | * are not static | ||
281 | */ | ||
282 | void rh_destroy(rh_info_t * info) | ||
283 | { | ||
284 | if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL) | ||
285 | kfree(info->block); | ||
286 | |||
287 | if ((info->flags & RHIF_STATIC_INFO) == 0) | ||
288 | kfree(info); | ||
289 | } | ||
290 | |||
291 | /* | ||
292 | * Initialize in place a remote heap info block. This is needed to support | ||
293 | * operation very early in the startup of the kernel, when it is not yet safe | ||
294 | * to call kmalloc. | ||
295 | */ | ||
296 | void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks, | ||
297 | rh_block_t * block) | ||
298 | { | ||
299 | int i; | ||
300 | rh_block_t *blk; | ||
301 | |||
302 | /* Alignment must be a power of two */ | ||
303 | if ((alignment & (alignment - 1)) != 0) | ||
304 | return; | ||
305 | |||
306 | info->alignment = alignment; | ||
307 | |||
308 | /* Initially everything as empty */ | ||
309 | info->block = block; | ||
310 | info->max_blocks = max_blocks; | ||
311 | info->empty_slots = max_blocks; | ||
312 | info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK; | ||
313 | |||
314 | INIT_LIST_HEAD(&info->empty_list); | ||
315 | INIT_LIST_HEAD(&info->free_list); | ||
316 | INIT_LIST_HEAD(&info->taken_list); | ||
317 | |||
318 | /* Add all new blocks to the free list */ | ||
319 | for (i = 0, blk = block; i < max_blocks; i++, blk++) | ||
320 | list_add(&blk->list, &info->empty_list); | ||
321 | } | ||
322 | |||
323 | /* Attach a free memory region, coalesces regions if adjuscent */ | ||
324 | int rh_attach_region(rh_info_t * info, void *start, int size) | ||
325 | { | ||
326 | rh_block_t *blk; | ||
327 | unsigned long s, e, m; | ||
328 | int r; | ||
329 | |||
330 | /* The region must be aligned */ | ||
331 | s = (unsigned long)start; | ||
332 | e = s + size; | ||
333 | m = info->alignment - 1; | ||
334 | |||
335 | /* Round start up */ | ||
336 | s = (s + m) & ~m; | ||
337 | |||
338 | /* Round end down */ | ||
339 | e = e & ~m; | ||
340 | |||
341 | /* Take final values */ | ||
342 | start = (void *)s; | ||
343 | size = (int)(e - s); | ||
344 | |||
345 | /* Grow the blocks, if needed */ | ||
346 | r = assure_empty(info, 1); | ||
347 | if (r < 0) | ||
348 | return r; | ||
349 | |||
350 | blk = get_slot(info); | ||
351 | blk->start = start; | ||
352 | blk->size = size; | ||
353 | blk->owner = NULL; | ||
354 | |||
355 | attach_free_block(info, blk); | ||
356 | |||
357 | return 0; | ||
358 | } | ||
359 | |||
360 | /* Detatch given address range, splits free block if needed. */ | ||
361 | void *rh_detach_region(rh_info_t * info, void *start, int size) | ||
362 | { | ||
363 | struct list_head *l; | ||
364 | rh_block_t *blk, *newblk; | ||
365 | unsigned long s, e, m, bs, be; | ||
366 | |||
367 | /* Validate size */ | ||
368 | if (size <= 0) | ||
369 | return ERR_PTR(-EINVAL); | ||
370 | |||
371 | /* The region must be aligned */ | ||
372 | s = (unsigned long)start; | ||
373 | e = s + size; | ||
374 | m = info->alignment - 1; | ||
375 | |||
376 | /* Round start up */ | ||
377 | s = (s + m) & ~m; | ||
378 | |||
379 | /* Round end down */ | ||
380 | e = e & ~m; | ||
381 | |||
382 | if (assure_empty(info, 1) < 0) | ||
383 | return ERR_PTR(-ENOMEM); | ||
384 | |||
385 | blk = NULL; | ||
386 | list_for_each(l, &info->free_list) { | ||
387 | blk = list_entry(l, rh_block_t, list); | ||
388 | /* The range must lie entirely inside one free block */ | ||
389 | bs = (unsigned long)blk->start; | ||
390 | be = (unsigned long)blk->start + blk->size; | ||
391 | if (s >= bs && e <= be) | ||
392 | break; | ||
393 | blk = NULL; | ||
394 | } | ||
395 | |||
396 | if (blk == NULL) | ||
397 | return ERR_PTR(-ENOMEM); | ||
398 | |||
399 | /* Perfect fit */ | ||
400 | if (bs == s && be == e) { | ||
401 | /* Delete from free list, release slot */ | ||
402 | list_del(&blk->list); | ||
403 | release_slot(info, blk); | ||
404 | return (void *)s; | ||
405 | } | ||
406 | |||
407 | /* blk still in free list, with updated start and/or size */ | ||
408 | if (bs == s || be == e) { | ||
409 | if (bs == s) | ||
410 | blk->start = (int8_t *)blk->start + size; | ||
411 | blk->size -= size; | ||
412 | |||
413 | } else { | ||
414 | /* The front free fragment */ | ||
415 | blk->size = s - bs; | ||
416 | |||
417 | /* the back free fragment */ | ||
418 | newblk = get_slot(info); | ||
419 | newblk->start = (void *)e; | ||
420 | newblk->size = be - e; | ||
421 | |||
422 | list_add(&newblk->list, &blk->list); | ||
423 | } | ||
424 | |||
425 | return (void *)s; | ||
426 | } | ||
427 | |||
428 | void *rh_alloc(rh_info_t * info, int size, const char *owner) | ||
429 | { | ||
430 | struct list_head *l; | ||
431 | rh_block_t *blk; | ||
432 | rh_block_t *newblk; | ||
433 | void *start; | ||
434 | |||
435 | /* Validate size */ | ||
436 | if (size <= 0) | ||
437 | return ERR_PTR(-EINVAL); | ||
438 | |||
439 | /* Align to configured alignment */ | ||
440 | size = (size + (info->alignment - 1)) & ~(info->alignment - 1); | ||
441 | |||
442 | if (assure_empty(info, 1) < 0) | ||
443 | return ERR_PTR(-ENOMEM); | ||
444 | |||
445 | blk = NULL; | ||
446 | list_for_each(l, &info->free_list) { | ||
447 | blk = list_entry(l, rh_block_t, list); | ||
448 | if (size <= blk->size) | ||
449 | break; | ||
450 | blk = NULL; | ||
451 | } | ||
452 | |||
453 | if (blk == NULL) | ||
454 | return ERR_PTR(-ENOMEM); | ||
455 | |||
456 | /* Just fits */ | ||
457 | if (blk->size == size) { | ||
458 | /* Move from free list to taken list */ | ||
459 | list_del(&blk->list); | ||
460 | blk->owner = owner; | ||
461 | start = blk->start; | ||
462 | |||
463 | attach_taken_block(info, blk); | ||
464 | |||
465 | return start; | ||
466 | } | ||
467 | |||
468 | newblk = get_slot(info); | ||
469 | newblk->start = blk->start; | ||
470 | newblk->size = size; | ||
471 | newblk->owner = owner; | ||
472 | |||
473 | /* blk still in free list, with updated start, size */ | ||
474 | blk->start = (int8_t *)blk->start + size; | ||
475 | blk->size -= size; | ||
476 | |||
477 | start = newblk->start; | ||
478 | |||
479 | attach_taken_block(info, newblk); | ||
480 | |||
481 | return start; | ||
482 | } | ||
483 | |||
484 | /* allocate at precisely the given address */ | ||
485 | void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner) | ||
486 | { | ||
487 | struct list_head *l; | ||
488 | rh_block_t *blk, *newblk1, *newblk2; | ||
489 | unsigned long s, e, m, bs, be; | ||
490 | |||
491 | /* Validate size */ | ||
492 | if (size <= 0) | ||
493 | return ERR_PTR(-EINVAL); | ||
494 | |||
495 | /* The region must be aligned */ | ||
496 | s = (unsigned long)start; | ||
497 | e = s + size; | ||
498 | m = info->alignment - 1; | ||
499 | |||
500 | /* Round start up */ | ||
501 | s = (s + m) & ~m; | ||
502 | |||
503 | /* Round end down */ | ||
504 | e = e & ~m; | ||
505 | |||
506 | if (assure_empty(info, 2) < 0) | ||
507 | return ERR_PTR(-ENOMEM); | ||
508 | |||
509 | blk = NULL; | ||
510 | list_for_each(l, &info->free_list) { | ||
511 | blk = list_entry(l, rh_block_t, list); | ||
512 | /* The range must lie entirely inside one free block */ | ||
513 | bs = (unsigned long)blk->start; | ||
514 | be = (unsigned long)blk->start + blk->size; | ||
515 | if (s >= bs && e <= be) | ||
516 | break; | ||
517 | } | ||
518 | |||
519 | if (blk == NULL) | ||
520 | return ERR_PTR(-ENOMEM); | ||
521 | |||
522 | /* Perfect fit */ | ||
523 | if (bs == s && be == e) { | ||
524 | /* Move from free list to taken list */ | ||
525 | list_del(&blk->list); | ||
526 | blk->owner = owner; | ||
527 | |||
528 | start = blk->start; | ||
529 | attach_taken_block(info, blk); | ||
530 | |||
531 | return start; | ||
532 | |||
533 | } | ||
534 | |||
535 | /* blk still in free list, with updated start and/or size */ | ||
536 | if (bs == s || be == e) { | ||
537 | if (bs == s) | ||
538 | blk->start = (int8_t *)blk->start + size; | ||
539 | blk->size -= size; | ||
540 | |||
541 | } else { | ||
542 | /* The front free fragment */ | ||
543 | blk->size = s - bs; | ||
544 | |||
545 | /* The back free fragment */ | ||
546 | newblk2 = get_slot(info); | ||
547 | newblk2->start = (void *)e; | ||
548 | newblk2->size = be - e; | ||
549 | |||
550 | list_add(&newblk2->list, &blk->list); | ||
551 | } | ||
552 | |||
553 | newblk1 = get_slot(info); | ||
554 | newblk1->start = (void *)s; | ||
555 | newblk1->size = e - s; | ||
556 | newblk1->owner = owner; | ||
557 | |||
558 | start = newblk1->start; | ||
559 | attach_taken_block(info, newblk1); | ||
560 | |||
561 | return start; | ||
562 | } | ||
563 | |||
564 | int rh_free(rh_info_t * info, void *start) | ||
565 | { | ||
566 | rh_block_t *blk, *blk2; | ||
567 | struct list_head *l; | ||
568 | int size; | ||
569 | |||
570 | /* Linear search for block */ | ||
571 | blk = NULL; | ||
572 | list_for_each(l, &info->taken_list) { | ||
573 | blk2 = list_entry(l, rh_block_t, list); | ||
574 | if (start < blk2->start) | ||
575 | break; | ||
576 | blk = blk2; | ||
577 | } | ||
578 | |||
579 | if (blk == NULL || start > (blk->start + blk->size)) | ||
580 | return -EINVAL; | ||
581 | |||
582 | /* Remove from taken list */ | ||
583 | list_del(&blk->list); | ||
584 | |||
585 | /* Get size of freed block */ | ||
586 | size = blk->size; | ||
587 | attach_free_block(info, blk); | ||
588 | |||
589 | return size; | ||
590 | } | ||
591 | |||
592 | int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats) | ||
593 | { | ||
594 | rh_block_t *blk; | ||
595 | struct list_head *l; | ||
596 | struct list_head *h; | ||
597 | int nr; | ||
598 | |||
599 | switch (what) { | ||
600 | |||
601 | case RHGS_FREE: | ||
602 | h = &info->free_list; | ||
603 | break; | ||
604 | |||
605 | case RHGS_TAKEN: | ||
606 | h = &info->taken_list; | ||
607 | break; | ||
608 | |||
609 | default: | ||
610 | return -EINVAL; | ||
611 | } | ||
612 | |||
613 | /* Linear search for block */ | ||
614 | nr = 0; | ||
615 | list_for_each(l, h) { | ||
616 | blk = list_entry(l, rh_block_t, list); | ||
617 | if (stats != NULL && nr < max_stats) { | ||
618 | stats->start = blk->start; | ||
619 | stats->size = blk->size; | ||
620 | stats->owner = blk->owner; | ||
621 | stats++; | ||
622 | } | ||
623 | nr++; | ||
624 | } | ||
625 | |||
626 | return nr; | ||
627 | } | ||
628 | |||
629 | int rh_set_owner(rh_info_t * info, void *start, const char *owner) | ||
630 | { | ||
631 | rh_block_t *blk, *blk2; | ||
632 | struct list_head *l; | ||
633 | int size; | ||
634 | |||
635 | /* Linear search for block */ | ||
636 | blk = NULL; | ||
637 | list_for_each(l, &info->taken_list) { | ||
638 | blk2 = list_entry(l, rh_block_t, list); | ||
639 | if (start < blk2->start) | ||
640 | break; | ||
641 | blk = blk2; | ||
642 | } | ||
643 | |||
644 | if (blk == NULL || start > (blk->start + blk->size)) | ||
645 | return -EINVAL; | ||
646 | |||
647 | blk->owner = owner; | ||
648 | size = blk->size; | ||
649 | |||
650 | return size; | ||
651 | } | ||
652 | |||
653 | void rh_dump(rh_info_t * info) | ||
654 | { | ||
655 | static rh_stats_t st[32]; /* XXX maximum 32 blocks */ | ||
656 | int maxnr; | ||
657 | int i, nr; | ||
658 | |||
659 | maxnr = sizeof(st) / sizeof(st[0]); | ||
660 | |||
661 | printk(KERN_INFO | ||
662 | "info @0x%p (%d slots empty / %d max)\n", | ||
663 | info, info->empty_slots, info->max_blocks); | ||
664 | |||
665 | printk(KERN_INFO " Free:\n"); | ||
666 | nr = rh_get_stats(info, RHGS_FREE, maxnr, st); | ||
667 | if (nr > maxnr) | ||
668 | nr = maxnr; | ||
669 | for (i = 0; i < nr; i++) | ||
670 | printk(KERN_INFO | ||
671 | " 0x%p-0x%p (%u)\n", | ||
672 | st[i].start, (int8_t *) st[i].start + st[i].size, | ||
673 | st[i].size); | ||
674 | printk(KERN_INFO "\n"); | ||
675 | |||
676 | printk(KERN_INFO " Taken:\n"); | ||
677 | nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st); | ||
678 | if (nr > maxnr) | ||
679 | nr = maxnr; | ||
680 | for (i = 0; i < nr; i++) | ||
681 | printk(KERN_INFO | ||
682 | " 0x%p-0x%p (%u) %s\n", | ||
683 | st[i].start, (int8_t *) st[i].start + st[i].size, | ||
684 | st[i].size, st[i].owner != NULL ? st[i].owner : ""); | ||
685 | printk(KERN_INFO "\n"); | ||
686 | } | ||
687 | |||
688 | void rh_dump_blk(rh_info_t * info, rh_block_t * blk) | ||
689 | { | ||
690 | printk(KERN_INFO | ||
691 | "blk @0x%p: 0x%p-0x%p (%u)\n", | ||
692 | blk, blk->start, (int8_t *) blk->start + blk->size, blk->size); | ||
693 | } | ||