aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc/lib/rheap.c
diff options
context:
space:
mode:
authorKumar Gala <galak@kernel.crashing.org>2007-05-10 00:28:17 -0400
committerKumar Gala <galak@kernel.crashing.org>2007-05-10 00:28:17 -0400
commitb99ab6a8c7f24d1dd2e435c1d04877bc034a8dd8 (patch)
tree58a82c502d5400755dd6d5d86ca702eb9ad4489f /arch/ppc/lib/rheap.c
parent3fae4210ba620fde5122e4c4f564a85c18901940 (diff)
[POWERPC] User rheap from arch/powerpc/lib
Removed rheap in arch/ppc/lib and changed build system to use the one in arch/powerpc/lib. Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Diffstat (limited to 'arch/ppc/lib/rheap.c')
-rw-r--r--arch/ppc/lib/rheap.c695
1 files changed, 0 insertions, 695 deletions
diff --git a/arch/ppc/lib/rheap.c b/arch/ppc/lib/rheap.c
deleted file mode 100644
index 9dc2f3458ded..000000000000
--- a/arch/ppc/lib/rheap.c
+++ /dev/null
@@ -1,695 +0,0 @@
1/*
2 * A Remote Heap. Remote means that we don't touch the memory that the
3 * heap points to. Normal heap implementations use the memory they manage
4 * to place their list. We cannot do that because the memory we manage may
5 * have special properties, for example it is uncachable or of different
6 * endianess.
7 *
8 * Author: Pantelis Antoniou <panto@intracom.gr>
9 *
10 * 2004 (c) INTRACOM S.A. Greece. This file is licensed under
11 * the terms of the GNU General Public License version 2. This program
12 * is licensed "as is" without any warranty of any kind, whether express
13 * or implied.
14 */
15#include <linux/types.h>
16#include <linux/errno.h>
17#include <linux/kernel.h>
18#include <linux/mm.h>
19#include <linux/slab.h>
20
21#include <asm/rheap.h>
22
23/*
24 * Fixup a list_head, needed when copying lists. If the pointers fall
25 * between s and e, apply the delta. This assumes that
26 * sizeof(struct list_head *) == sizeof(unsigned long *).
27 */
28static inline void fixup(unsigned long s, unsigned long e, int d,
29 struct list_head *l)
30{
31 unsigned long *pp;
32
33 pp = (unsigned long *)&l->next;
34 if (*pp >= s && *pp < e)
35 *pp += d;
36
37 pp = (unsigned long *)&l->prev;
38 if (*pp >= s && *pp < e)
39 *pp += d;
40}
41
42/* Grow the allocated blocks */
43static int grow(rh_info_t * info, int max_blocks)
44{
45 rh_block_t *block, *blk;
46 int i, new_blocks;
47 int delta;
48 unsigned long blks, blke;
49
50 if (max_blocks <= info->max_blocks)
51 return -EINVAL;
52
53 new_blocks = max_blocks - info->max_blocks;
54
55 block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_KERNEL);
56 if (block == NULL)
57 return -ENOMEM;
58
59 if (info->max_blocks > 0) {
60
61 /* copy old block area */
62 memcpy(block, info->block,
63 sizeof(rh_block_t) * info->max_blocks);
64
65 delta = (char *)block - (char *)info->block;
66
67 /* and fixup list pointers */
68 blks = (unsigned long)info->block;
69 blke = (unsigned long)(info->block + info->max_blocks);
70
71 for (i = 0, blk = block; i < info->max_blocks; i++, blk++)
72 fixup(blks, blke, delta, &blk->list);
73
74 fixup(blks, blke, delta, &info->empty_list);
75 fixup(blks, blke, delta, &info->free_list);
76 fixup(blks, blke, delta, &info->taken_list);
77
78 /* free the old allocated memory */
79 if ((info->flags & RHIF_STATIC_BLOCK) == 0)
80 kfree(info->block);
81 }
82
83 info->block = block;
84 info->empty_slots += new_blocks;
85 info->max_blocks = max_blocks;
86 info->flags &= ~RHIF_STATIC_BLOCK;
87
88 /* add all new blocks to the free list */
89 for (i = 0, blk = block + info->max_blocks; i < new_blocks; i++, blk++)
90 list_add(&blk->list, &info->empty_list);
91
92 return 0;
93}
94
95/*
96 * Assure at least the required amount of empty slots. If this function
97 * causes a grow in the block area then all pointers kept to the block
98 * area are invalid!
99 */
100static int assure_empty(rh_info_t * info, int slots)
101{
102 int max_blocks;
103
104 /* This function is not meant to be used to grow uncontrollably */
105 if (slots >= 4)
106 return -EINVAL;
107
108 /* Enough space */
109 if (info->empty_slots >= slots)
110 return 0;
111
112 /* Next 16 sized block */
113 max_blocks = ((info->max_blocks + slots) + 15) & ~15;
114
115 return grow(info, max_blocks);
116}
117
118static rh_block_t *get_slot(rh_info_t * info)
119{
120 rh_block_t *blk;
121
122 /* If no more free slots, and failure to extend. */
123 /* XXX: You should have called assure_empty before */
124 if (info->empty_slots == 0) {
125 printk(KERN_ERR "rh: out of slots; crash is imminent.\n");
126 return NULL;
127 }
128
129 /* Get empty slot to use */
130 blk = list_entry(info->empty_list.next, rh_block_t, list);
131 list_del_init(&blk->list);
132 info->empty_slots--;
133
134 /* Initialize */
135 blk->start = 0;
136 blk->size = 0;
137 blk->owner = NULL;
138
139 return blk;
140}
141
142static inline void release_slot(rh_info_t * info, rh_block_t * blk)
143{
144 list_add(&blk->list, &info->empty_list);
145 info->empty_slots++;
146}
147
148static void attach_free_block(rh_info_t * info, rh_block_t * blkn)
149{
150 rh_block_t *blk;
151 rh_block_t *before;
152 rh_block_t *after;
153 rh_block_t *next;
154 int size;
155 unsigned long s, e, bs, be;
156 struct list_head *l;
157
158 /* We assume that they are aligned properly */
159 size = blkn->size;
160 s = blkn->start;
161 e = s + size;
162
163 /* Find the blocks immediately before and after the given one
164 * (if any) */
165 before = NULL;
166 after = NULL;
167 next = NULL;
168
169 list_for_each(l, &info->free_list) {
170 blk = list_entry(l, rh_block_t, list);
171
172 bs = blk->start;
173 be = bs + blk->size;
174
175 if (next == NULL && s >= bs)
176 next = blk;
177
178 if (be == s)
179 before = blk;
180
181 if (e == bs)
182 after = blk;
183
184 /* If both are not null, break now */
185 if (before != NULL && after != NULL)
186 break;
187 }
188
189 /* Now check if they are really adjacent */
190 if (before && s != (before->start + before->size))
191 before = NULL;
192
193 if (after && e != after->start)
194 after = NULL;
195
196 /* No coalescing; list insert and return */
197 if (before == NULL && after == NULL) {
198
199 if (next != NULL)
200 list_add(&blkn->list, &next->list);
201 else
202 list_add(&blkn->list, &info->free_list);
203
204 return;
205 }
206
207 /* We don't need it anymore */
208 release_slot(info, blkn);
209
210 /* Grow the before block */
211 if (before != NULL && after == NULL) {
212 before->size += size;
213 return;
214 }
215
216 /* Grow the after block backwards */
217 if (before == NULL && after != NULL) {
218 after->start -= size;
219 after->size += size;
220 return;
221 }
222
223 /* Grow the before block, and release the after block */
224 before->size += size + after->size;
225 list_del(&after->list);
226 release_slot(info, after);
227}
228
229static void attach_taken_block(rh_info_t * info, rh_block_t * blkn)
230{
231 rh_block_t *blk;
232 struct list_head *l;
233
234 /* Find the block immediately before the given one (if any) */
235 list_for_each(l, &info->taken_list) {
236 blk = list_entry(l, rh_block_t, list);
237 if (blk->start > blkn->start) {
238 list_add_tail(&blkn->list, &blk->list);
239 return;
240 }
241 }
242
243 list_add_tail(&blkn->list, &info->taken_list);
244}
245
246/*
247 * Create a remote heap dynamically. Note that no memory for the blocks
248 * are allocated. It will upon the first allocation
249 */
250rh_info_t *rh_create(unsigned int alignment)
251{
252 rh_info_t *info;
253
254 /* Alignment must be a power of two */
255 if ((alignment & (alignment - 1)) != 0)
256 return ERR_PTR(-EINVAL);
257
258 info = kmalloc(sizeof(*info), GFP_KERNEL);
259 if (info == NULL)
260 return ERR_PTR(-ENOMEM);
261
262 info->alignment = alignment;
263
264 /* Initially everything as empty */
265 info->block = NULL;
266 info->max_blocks = 0;
267 info->empty_slots = 0;
268 info->flags = 0;
269
270 INIT_LIST_HEAD(&info->empty_list);
271 INIT_LIST_HEAD(&info->free_list);
272 INIT_LIST_HEAD(&info->taken_list);
273
274 return info;
275}
276
277/*
278 * Destroy a dynamically created remote heap. Deallocate only if the areas
279 * are not static
280 */
281void rh_destroy(rh_info_t * info)
282{
283 if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL)
284 kfree(info->block);
285
286 if ((info->flags & RHIF_STATIC_INFO) == 0)
287 kfree(info);
288}
289
290/*
291 * Initialize in place a remote heap info block. This is needed to support
292 * operation very early in the startup of the kernel, when it is not yet safe
293 * to call kmalloc.
294 */
295void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
296 rh_block_t * block)
297{
298 int i;
299 rh_block_t *blk;
300
301 /* Alignment must be a power of two */
302 if ((alignment & (alignment - 1)) != 0)
303 return;
304
305 info->alignment = alignment;
306
307 /* Initially everything as empty */
308 info->block = block;
309 info->max_blocks = max_blocks;
310 info->empty_slots = max_blocks;
311 info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK;
312
313 INIT_LIST_HEAD(&info->empty_list);
314 INIT_LIST_HEAD(&info->free_list);
315 INIT_LIST_HEAD(&info->taken_list);
316
317 /* Add all new blocks to the free list */
318 for (i = 0, blk = block; i < max_blocks; i++, blk++)
319 list_add(&blk->list, &info->empty_list);
320}
321
322/* Attach a free memory region, coalesces regions if adjuscent */
323int rh_attach_region(rh_info_t * info, unsigned long start, int size)
324{
325 rh_block_t *blk;
326 unsigned long s, e, m;
327 int r;
328
329 /* The region must be aligned */
330 s = start;
331 e = s + size;
332 m = info->alignment - 1;
333
334 /* Round start up */
335 s = (s + m) & ~m;
336
337 /* Round end down */
338 e = e & ~m;
339
340 if (IS_ERR_VALUE(e) || (e < s))
341 return -ERANGE;
342
343 /* Take final values */
344 start = s;
345 size = e - s;
346
347 /* Grow the blocks, if needed */
348 r = assure_empty(info, 1);
349 if (r < 0)
350 return r;
351
352 blk = get_slot(info);
353 blk->start = start;
354 blk->size = size;
355 blk->owner = NULL;
356
357 attach_free_block(info, blk);
358
359 return 0;
360}
361
362/* Detatch given address range, splits free block if needed. */
363unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size)
364{
365 struct list_head *l;
366 rh_block_t *blk, *newblk;
367 unsigned long s, e, m, bs, be;
368
369 /* Validate size */
370 if (size <= 0)
371 return (unsigned long) -EINVAL;
372
373 /* The region must be aligned */
374 s = start;
375 e = s + size;
376 m = info->alignment - 1;
377
378 /* Round start up */
379 s = (s + m) & ~m;
380
381 /* Round end down */
382 e = e & ~m;
383
384 if (assure_empty(info, 1) < 0)
385 return (unsigned long) -ENOMEM;
386
387 blk = NULL;
388 list_for_each(l, &info->free_list) {
389 blk = list_entry(l, rh_block_t, list);
390 /* The range must lie entirely inside one free block */
391 bs = blk->start;
392 be = blk->start + blk->size;
393 if (s >= bs && e <= be)
394 break;
395 blk = NULL;
396 }
397
398 if (blk == NULL)
399 return (unsigned long) -ENOMEM;
400
401 /* Perfect fit */
402 if (bs == s && be == e) {
403 /* Delete from free list, release slot */
404 list_del(&blk->list);
405 release_slot(info, blk);
406 return s;
407 }
408
409 /* blk still in free list, with updated start and/or size */
410 if (bs == s || be == e) {
411 if (bs == s)
412 blk->start += size;
413 blk->size -= size;
414
415 } else {
416 /* The front free fragment */
417 blk->size = s - bs;
418
419 /* the back free fragment */
420 newblk = get_slot(info);
421 newblk->start = e;
422 newblk->size = be - e;
423
424 list_add(&newblk->list, &blk->list);
425 }
426
427 return s;
428}
429
430unsigned long rh_alloc(rh_info_t * info, int size, const char *owner)
431{
432 struct list_head *l;
433 rh_block_t *blk;
434 rh_block_t *newblk;
435 unsigned long start;
436
437 /* Validate size */
438 if (size <= 0)
439 return (unsigned long) -EINVAL;
440
441 /* Align to configured alignment */
442 size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
443
444 if (assure_empty(info, 1) < 0)
445 return (unsigned long) -ENOMEM;
446
447 blk = NULL;
448 list_for_each(l, &info->free_list) {
449 blk = list_entry(l, rh_block_t, list);
450 if (size <= blk->size)
451 break;
452 blk = NULL;
453 }
454
455 if (blk == NULL)
456 return (unsigned long) -ENOMEM;
457
458 /* Just fits */
459 if (blk->size == size) {
460 /* Move from free list to taken list */
461 list_del(&blk->list);
462 blk->owner = owner;
463 start = blk->start;
464
465 attach_taken_block(info, blk);
466
467 return start;
468 }
469
470 newblk = get_slot(info);
471 newblk->start = blk->start;
472 newblk->size = size;
473 newblk->owner = owner;
474
475 /* blk still in free list, with updated start, size */
476 blk->start += size;
477 blk->size -= size;
478
479 start = newblk->start;
480
481 attach_taken_block(info, newblk);
482
483 return start;
484}
485
486/* allocate at precisely the given address */
487unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, const char *owner)
488{
489 struct list_head *l;
490 rh_block_t *blk, *newblk1, *newblk2;
491 unsigned long s, e, m, bs=0, be=0;
492
493 /* Validate size */
494 if (size <= 0)
495 return (unsigned long) -EINVAL;
496
497 /* The region must be aligned */
498 s = start;
499 e = s + size;
500 m = info->alignment - 1;
501
502 /* Round start up */
503 s = (s + m) & ~m;
504
505 /* Round end down */
506 e = e & ~m;
507
508 if (assure_empty(info, 2) < 0)
509 return (unsigned long) -ENOMEM;
510
511 blk = NULL;
512 list_for_each(l, &info->free_list) {
513 blk = list_entry(l, rh_block_t, list);
514 /* The range must lie entirely inside one free block */
515 bs = blk->start;
516 be = blk->start + blk->size;
517 if (s >= bs && e <= be)
518 break;
519 }
520
521 if (blk == NULL)
522 return (unsigned long) -ENOMEM;
523
524 /* Perfect fit */
525 if (bs == s && be == e) {
526 /* Move from free list to taken list */
527 list_del(&blk->list);
528 blk->owner = owner;
529
530 start = blk->start;
531 attach_taken_block(info, blk);
532
533 return start;
534
535 }
536
537 /* blk still in free list, with updated start and/or size */
538 if (bs == s || be == e) {
539 if (bs == s)
540 blk->start += size;
541 blk->size -= size;
542
543 } else {
544 /* The front free fragment */
545 blk->size = s - bs;
546
547 /* The back free fragment */
548 newblk2 = get_slot(info);
549 newblk2->start = e;
550 newblk2->size = be - e;
551
552 list_add(&newblk2->list, &blk->list);
553 }
554
555 newblk1 = get_slot(info);
556 newblk1->start = s;
557 newblk1->size = e - s;
558 newblk1->owner = owner;
559
560 start = newblk1->start;
561 attach_taken_block(info, newblk1);
562
563 return start;
564}
565
566int rh_free(rh_info_t * info, unsigned long start)
567{
568 rh_block_t *blk, *blk2;
569 struct list_head *l;
570 int size;
571
572 /* Linear search for block */
573 blk = NULL;
574 list_for_each(l, &info->taken_list) {
575 blk2 = list_entry(l, rh_block_t, list);
576 if (start < blk2->start)
577 break;
578 blk = blk2;
579 }
580
581 if (blk == NULL || start > (blk->start + blk->size))
582 return -EINVAL;
583
584 /* Remove from taken list */
585 list_del(&blk->list);
586
587 /* Get size of freed block */
588 size = blk->size;
589 attach_free_block(info, blk);
590
591 return size;
592}
593
594int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats)
595{
596 rh_block_t *blk;
597 struct list_head *l;
598 struct list_head *h;
599 int nr;
600
601 switch (what) {
602
603 case RHGS_FREE:
604 h = &info->free_list;
605 break;
606
607 case RHGS_TAKEN:
608 h = &info->taken_list;
609 break;
610
611 default:
612 return -EINVAL;
613 }
614
615 /* Linear search for block */
616 nr = 0;
617 list_for_each(l, h) {
618 blk = list_entry(l, rh_block_t, list);
619 if (stats != NULL && nr < max_stats) {
620 stats->start = blk->start;
621 stats->size = blk->size;
622 stats->owner = blk->owner;
623 stats++;
624 }
625 nr++;
626 }
627
628 return nr;
629}
630
631int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner)
632{
633 rh_block_t *blk, *blk2;
634 struct list_head *l;
635 int size;
636
637 /* Linear search for block */
638 blk = NULL;
639 list_for_each(l, &info->taken_list) {
640 blk2 = list_entry(l, rh_block_t, list);
641 if (start < blk2->start)
642 break;
643 blk = blk2;
644 }
645
646 if (blk == NULL || start > (blk->start + blk->size))
647 return -EINVAL;
648
649 blk->owner = owner;
650 size = blk->size;
651
652 return size;
653}
654
655void rh_dump(rh_info_t * info)
656{
657 static rh_stats_t st[32]; /* XXX maximum 32 blocks */
658 int maxnr;
659 int i, nr;
660
661 maxnr = ARRAY_SIZE(st);
662
663 printk(KERN_INFO
664 "info @0x%p (%d slots empty / %d max)\n",
665 info, info->empty_slots, info->max_blocks);
666
667 printk(KERN_INFO " Free:\n");
668 nr = rh_get_stats(info, RHGS_FREE, maxnr, st);
669 if (nr > maxnr)
670 nr = maxnr;
671 for (i = 0; i < nr; i++)
672 printk(KERN_INFO
673 " 0x%lx-0x%lx (%u)\n",
674 st[i].start, st[i].start + st[i].size,
675 st[i].size);
676 printk(KERN_INFO "\n");
677
678 printk(KERN_INFO " Taken:\n");
679 nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st);
680 if (nr > maxnr)
681 nr = maxnr;
682 for (i = 0; i < nr; i++)
683 printk(KERN_INFO
684 " 0x%lx-0x%lx (%u) %s\n",
685 st[i].start, st[i].start + st[i].size,
686 st[i].size, st[i].owner != NULL ? st[i].owner : "");
687 printk(KERN_INFO "\n");
688}
689
690void rh_dump_blk(rh_info_t * info, rh_block_t * blk)
691{
692 printk(KERN_INFO
693 "blk @0x%p: 0x%lx-0x%lx (%u)\n",
694 blk, blk->start, blk->start + blk->size, blk->size);
695}