diff options
author | Timur Tabi <timur@freescale.com> | 2007-05-08 15:46:36 -0400 |
---|---|---|
committer | Kumar Gala <galak@kernel.crashing.org> | 2007-05-10 00:01:43 -0400 |
commit | 4c35630ccda56ed494f6102d2e147fefe14b78d2 (patch) | |
tree | 4f04754fb0ec6978923b3c1e0318997e420f6551 /arch/ppc/lib | |
parent | 742226c579c573c24386aaf41969a01ee058b97e (diff) |
[POWERPC] Change rheap functions to use ulongs instead of pointers
The rheap allocation functions return a pointer, but the actual value is based
on how the heap was initialized, and so it can be anything, e.g. an offset
into a buffer. A ulong is a better representation of the value returned by
the allocation functions.
This patch changes all of the relevant rheap functions to use a unsigned long
integers instead of a pointer. In case of an error, the value returned is
a negative error code that has been cast to an unsigned long. The caller can
use the IS_ERR_VALUE() macro to check for this.
All code which calls the rheap functions is updated accordingly. Macros
IS_MURAM_ERR() and IS_DPERR(), have been deleted in favor of IS_ERR_VALUE().
Also added error checking to rh_attach_region().
Signed-off-by: Timur Tabi <timur@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Diffstat (limited to 'arch/ppc/lib')
-rw-r--r-- | arch/ppc/lib/rheap.c | 95 |
1 files changed, 49 insertions, 46 deletions
diff --git a/arch/ppc/lib/rheap.c b/arch/ppc/lib/rheap.c index d40700795a9c..9dc2f3458ded 100644 --- a/arch/ppc/lib/rheap.c +++ b/arch/ppc/lib/rheap.c | |||
@@ -132,7 +132,7 @@ static rh_block_t *get_slot(rh_info_t * info) | |||
132 | info->empty_slots--; | 132 | info->empty_slots--; |
133 | 133 | ||
134 | /* Initialize */ | 134 | /* Initialize */ |
135 | blk->start = NULL; | 135 | blk->start = 0; |
136 | blk->size = 0; | 136 | blk->size = 0; |
137 | blk->owner = NULL; | 137 | blk->owner = NULL; |
138 | 138 | ||
@@ -157,7 +157,7 @@ static void attach_free_block(rh_info_t * info, rh_block_t * blkn) | |||
157 | 157 | ||
158 | /* We assume that they are aligned properly */ | 158 | /* We assume that they are aligned properly */ |
159 | size = blkn->size; | 159 | size = blkn->size; |
160 | s = (unsigned long)blkn->start; | 160 | s = blkn->start; |
161 | e = s + size; | 161 | e = s + size; |
162 | 162 | ||
163 | /* Find the blocks immediately before and after the given one | 163 | /* Find the blocks immediately before and after the given one |
@@ -169,7 +169,7 @@ static void attach_free_block(rh_info_t * info, rh_block_t * blkn) | |||
169 | list_for_each(l, &info->free_list) { | 169 | list_for_each(l, &info->free_list) { |
170 | blk = list_entry(l, rh_block_t, list); | 170 | blk = list_entry(l, rh_block_t, list); |
171 | 171 | ||
172 | bs = (unsigned long)blk->start; | 172 | bs = blk->start; |
173 | be = bs + blk->size; | 173 | be = bs + blk->size; |
174 | 174 | ||
175 | if (next == NULL && s >= bs) | 175 | if (next == NULL && s >= bs) |
@@ -187,10 +187,10 @@ static void attach_free_block(rh_info_t * info, rh_block_t * blkn) | |||
187 | } | 187 | } |
188 | 188 | ||
189 | /* Now check if they are really adjacent */ | 189 | /* Now check if they are really adjacent */ |
190 | if (before != NULL && s != (unsigned long)before->start + before->size) | 190 | if (before && s != (before->start + before->size)) |
191 | before = NULL; | 191 | before = NULL; |
192 | 192 | ||
193 | if (after != NULL && e != (unsigned long)after->start) | 193 | if (after && e != after->start) |
194 | after = NULL; | 194 | after = NULL; |
195 | 195 | ||
196 | /* No coalescing; list insert and return */ | 196 | /* No coalescing; list insert and return */ |
@@ -215,7 +215,7 @@ static void attach_free_block(rh_info_t * info, rh_block_t * blkn) | |||
215 | 215 | ||
216 | /* Grow the after block backwards */ | 216 | /* Grow the after block backwards */ |
217 | if (before == NULL && after != NULL) { | 217 | if (before == NULL && after != NULL) { |
218 | after->start = (int8_t *)after->start - size; | 218 | after->start -= size; |
219 | after->size += size; | 219 | after->size += size; |
220 | return; | 220 | return; |
221 | } | 221 | } |
@@ -320,14 +320,14 @@ void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks, | |||
320 | } | 320 | } |
321 | 321 | ||
322 | /* Attach a free memory region, coalesces regions if adjuscent */ | 322 | /* Attach a free memory region, coalesces regions if adjuscent */ |
323 | int rh_attach_region(rh_info_t * info, void *start, int size) | 323 | int rh_attach_region(rh_info_t * info, unsigned long start, int size) |
324 | { | 324 | { |
325 | rh_block_t *blk; | 325 | rh_block_t *blk; |
326 | unsigned long s, e, m; | 326 | unsigned long s, e, m; |
327 | int r; | 327 | int r; |
328 | 328 | ||
329 | /* The region must be aligned */ | 329 | /* The region must be aligned */ |
330 | s = (unsigned long)start; | 330 | s = start; |
331 | e = s + size; | 331 | e = s + size; |
332 | m = info->alignment - 1; | 332 | m = info->alignment - 1; |
333 | 333 | ||
@@ -337,9 +337,12 @@ int rh_attach_region(rh_info_t * info, void *start, int size) | |||
337 | /* Round end down */ | 337 | /* Round end down */ |
338 | e = e & ~m; | 338 | e = e & ~m; |
339 | 339 | ||
340 | if (IS_ERR_VALUE(e) || (e < s)) | ||
341 | return -ERANGE; | ||
342 | |||
340 | /* Take final values */ | 343 | /* Take final values */ |
341 | start = (void *)s; | 344 | start = s; |
342 | size = (int)(e - s); | 345 | size = e - s; |
343 | 346 | ||
344 | /* Grow the blocks, if needed */ | 347 | /* Grow the blocks, if needed */ |
345 | r = assure_empty(info, 1); | 348 | r = assure_empty(info, 1); |
@@ -357,7 +360,7 @@ int rh_attach_region(rh_info_t * info, void *start, int size) | |||
357 | } | 360 | } |
358 | 361 | ||
359 | /* Detatch given address range, splits free block if needed. */ | 362 | /* Detatch given address range, splits free block if needed. */ |
360 | void *rh_detach_region(rh_info_t * info, void *start, int size) | 363 | unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size) |
361 | { | 364 | { |
362 | struct list_head *l; | 365 | struct list_head *l; |
363 | rh_block_t *blk, *newblk; | 366 | rh_block_t *blk, *newblk; |
@@ -365,10 +368,10 @@ void *rh_detach_region(rh_info_t * info, void *start, int size) | |||
365 | 368 | ||
366 | /* Validate size */ | 369 | /* Validate size */ |
367 | if (size <= 0) | 370 | if (size <= 0) |
368 | return ERR_PTR(-EINVAL); | 371 | return (unsigned long) -EINVAL; |
369 | 372 | ||
370 | /* The region must be aligned */ | 373 | /* The region must be aligned */ |
371 | s = (unsigned long)start; | 374 | s = start; |
372 | e = s + size; | 375 | e = s + size; |
373 | m = info->alignment - 1; | 376 | m = info->alignment - 1; |
374 | 377 | ||
@@ -379,34 +382,34 @@ void *rh_detach_region(rh_info_t * info, void *start, int size) | |||
379 | e = e & ~m; | 382 | e = e & ~m; |
380 | 383 | ||
381 | if (assure_empty(info, 1) < 0) | 384 | if (assure_empty(info, 1) < 0) |
382 | return ERR_PTR(-ENOMEM); | 385 | return (unsigned long) -ENOMEM; |
383 | 386 | ||
384 | blk = NULL; | 387 | blk = NULL; |
385 | list_for_each(l, &info->free_list) { | 388 | list_for_each(l, &info->free_list) { |
386 | blk = list_entry(l, rh_block_t, list); | 389 | blk = list_entry(l, rh_block_t, list); |
387 | /* The range must lie entirely inside one free block */ | 390 | /* The range must lie entirely inside one free block */ |
388 | bs = (unsigned long)blk->start; | 391 | bs = blk->start; |
389 | be = (unsigned long)blk->start + blk->size; | 392 | be = blk->start + blk->size; |
390 | if (s >= bs && e <= be) | 393 | if (s >= bs && e <= be) |
391 | break; | 394 | break; |
392 | blk = NULL; | 395 | blk = NULL; |
393 | } | 396 | } |
394 | 397 | ||
395 | if (blk == NULL) | 398 | if (blk == NULL) |
396 | return ERR_PTR(-ENOMEM); | 399 | return (unsigned long) -ENOMEM; |
397 | 400 | ||
398 | /* Perfect fit */ | 401 | /* Perfect fit */ |
399 | if (bs == s && be == e) { | 402 | if (bs == s && be == e) { |
400 | /* Delete from free list, release slot */ | 403 | /* Delete from free list, release slot */ |
401 | list_del(&blk->list); | 404 | list_del(&blk->list); |
402 | release_slot(info, blk); | 405 | release_slot(info, blk); |
403 | return (void *)s; | 406 | return s; |
404 | } | 407 | } |
405 | 408 | ||
406 | /* blk still in free list, with updated start and/or size */ | 409 | /* blk still in free list, with updated start and/or size */ |
407 | if (bs == s || be == e) { | 410 | if (bs == s || be == e) { |
408 | if (bs == s) | 411 | if (bs == s) |
409 | blk->start = (int8_t *)blk->start + size; | 412 | blk->start += size; |
410 | blk->size -= size; | 413 | blk->size -= size; |
411 | 414 | ||
412 | } else { | 415 | } else { |
@@ -415,31 +418,31 @@ void *rh_detach_region(rh_info_t * info, void *start, int size) | |||
415 | 418 | ||
416 | /* the back free fragment */ | 419 | /* the back free fragment */ |
417 | newblk = get_slot(info); | 420 | newblk = get_slot(info); |
418 | newblk->start = (void *)e; | 421 | newblk->start = e; |
419 | newblk->size = be - e; | 422 | newblk->size = be - e; |
420 | 423 | ||
421 | list_add(&newblk->list, &blk->list); | 424 | list_add(&newblk->list, &blk->list); |
422 | } | 425 | } |
423 | 426 | ||
424 | return (void *)s; | 427 | return s; |
425 | } | 428 | } |
426 | 429 | ||
427 | void *rh_alloc(rh_info_t * info, int size, const char *owner) | 430 | unsigned long rh_alloc(rh_info_t * info, int size, const char *owner) |
428 | { | 431 | { |
429 | struct list_head *l; | 432 | struct list_head *l; |
430 | rh_block_t *blk; | 433 | rh_block_t *blk; |
431 | rh_block_t *newblk; | 434 | rh_block_t *newblk; |
432 | void *start; | 435 | unsigned long start; |
433 | 436 | ||
434 | /* Validate size */ | 437 | /* Validate size */ |
435 | if (size <= 0) | 438 | if (size <= 0) |
436 | return ERR_PTR(-EINVAL); | 439 | return (unsigned long) -EINVAL; |
437 | 440 | ||
438 | /* Align to configured alignment */ | 441 | /* Align to configured alignment */ |
439 | size = (size + (info->alignment - 1)) & ~(info->alignment - 1); | 442 | size = (size + (info->alignment - 1)) & ~(info->alignment - 1); |
440 | 443 | ||
441 | if (assure_empty(info, 1) < 0) | 444 | if (assure_empty(info, 1) < 0) |
442 | return ERR_PTR(-ENOMEM); | 445 | return (unsigned long) -ENOMEM; |
443 | 446 | ||
444 | blk = NULL; | 447 | blk = NULL; |
445 | list_for_each(l, &info->free_list) { | 448 | list_for_each(l, &info->free_list) { |
@@ -450,7 +453,7 @@ void *rh_alloc(rh_info_t * info, int size, const char *owner) | |||
450 | } | 453 | } |
451 | 454 | ||
452 | if (blk == NULL) | 455 | if (blk == NULL) |
453 | return ERR_PTR(-ENOMEM); | 456 | return (unsigned long) -ENOMEM; |
454 | 457 | ||
455 | /* Just fits */ | 458 | /* Just fits */ |
456 | if (blk->size == size) { | 459 | if (blk->size == size) { |
@@ -470,7 +473,7 @@ void *rh_alloc(rh_info_t * info, int size, const char *owner) | |||
470 | newblk->owner = owner; | 473 | newblk->owner = owner; |
471 | 474 | ||
472 | /* blk still in free list, with updated start, size */ | 475 | /* blk still in free list, with updated start, size */ |
473 | blk->start = (int8_t *)blk->start + size; | 476 | blk->start += size; |
474 | blk->size -= size; | 477 | blk->size -= size; |
475 | 478 | ||
476 | start = newblk->start; | 479 | start = newblk->start; |
@@ -481,18 +484,18 @@ void *rh_alloc(rh_info_t * info, int size, const char *owner) | |||
481 | } | 484 | } |
482 | 485 | ||
483 | /* allocate at precisely the given address */ | 486 | /* allocate at precisely the given address */ |
484 | void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner) | 487 | unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, const char *owner) |
485 | { | 488 | { |
486 | struct list_head *l; | 489 | struct list_head *l; |
487 | rh_block_t *blk, *newblk1, *newblk2; | 490 | rh_block_t *blk, *newblk1, *newblk2; |
488 | unsigned long s, e, m, bs, be; | 491 | unsigned long s, e, m, bs=0, be=0; |
489 | 492 | ||
490 | /* Validate size */ | 493 | /* Validate size */ |
491 | if (size <= 0) | 494 | if (size <= 0) |
492 | return ERR_PTR(-EINVAL); | 495 | return (unsigned long) -EINVAL; |
493 | 496 | ||
494 | /* The region must be aligned */ | 497 | /* The region must be aligned */ |
495 | s = (unsigned long)start; | 498 | s = start; |
496 | e = s + size; | 499 | e = s + size; |
497 | m = info->alignment - 1; | 500 | m = info->alignment - 1; |
498 | 501 | ||
@@ -503,20 +506,20 @@ void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner) | |||
503 | e = e & ~m; | 506 | e = e & ~m; |
504 | 507 | ||
505 | if (assure_empty(info, 2) < 0) | 508 | if (assure_empty(info, 2) < 0) |
506 | return ERR_PTR(-ENOMEM); | 509 | return (unsigned long) -ENOMEM; |
507 | 510 | ||
508 | blk = NULL; | 511 | blk = NULL; |
509 | list_for_each(l, &info->free_list) { | 512 | list_for_each(l, &info->free_list) { |
510 | blk = list_entry(l, rh_block_t, list); | 513 | blk = list_entry(l, rh_block_t, list); |
511 | /* The range must lie entirely inside one free block */ | 514 | /* The range must lie entirely inside one free block */ |
512 | bs = (unsigned long)blk->start; | 515 | bs = blk->start; |
513 | be = (unsigned long)blk->start + blk->size; | 516 | be = blk->start + blk->size; |
514 | if (s >= bs && e <= be) | 517 | if (s >= bs && e <= be) |
515 | break; | 518 | break; |
516 | } | 519 | } |
517 | 520 | ||
518 | if (blk == NULL) | 521 | if (blk == NULL) |
519 | return ERR_PTR(-ENOMEM); | 522 | return (unsigned long) -ENOMEM; |
520 | 523 | ||
521 | /* Perfect fit */ | 524 | /* Perfect fit */ |
522 | if (bs == s && be == e) { | 525 | if (bs == s && be == e) { |
@@ -534,7 +537,7 @@ void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner) | |||
534 | /* blk still in free list, with updated start and/or size */ | 537 | /* blk still in free list, with updated start and/or size */ |
535 | if (bs == s || be == e) { | 538 | if (bs == s || be == e) { |
536 | if (bs == s) | 539 | if (bs == s) |
537 | blk->start = (int8_t *)blk->start + size; | 540 | blk->start += size; |
538 | blk->size -= size; | 541 | blk->size -= size; |
539 | 542 | ||
540 | } else { | 543 | } else { |
@@ -543,14 +546,14 @@ void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner) | |||
543 | 546 | ||
544 | /* The back free fragment */ | 547 | /* The back free fragment */ |
545 | newblk2 = get_slot(info); | 548 | newblk2 = get_slot(info); |
546 | newblk2->start = (void *)e; | 549 | newblk2->start = e; |
547 | newblk2->size = be - e; | 550 | newblk2->size = be - e; |
548 | 551 | ||
549 | list_add(&newblk2->list, &blk->list); | 552 | list_add(&newblk2->list, &blk->list); |
550 | } | 553 | } |
551 | 554 | ||
552 | newblk1 = get_slot(info); | 555 | newblk1 = get_slot(info); |
553 | newblk1->start = (void *)s; | 556 | newblk1->start = s; |
554 | newblk1->size = e - s; | 557 | newblk1->size = e - s; |
555 | newblk1->owner = owner; | 558 | newblk1->owner = owner; |
556 | 559 | ||
@@ -560,7 +563,7 @@ void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner) | |||
560 | return start; | 563 | return start; |
561 | } | 564 | } |
562 | 565 | ||
563 | int rh_free(rh_info_t * info, void *start) | 566 | int rh_free(rh_info_t * info, unsigned long start) |
564 | { | 567 | { |
565 | rh_block_t *blk, *blk2; | 568 | rh_block_t *blk, *blk2; |
566 | struct list_head *l; | 569 | struct list_head *l; |
@@ -625,7 +628,7 @@ int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats) | |||
625 | return nr; | 628 | return nr; |
626 | } | 629 | } |
627 | 630 | ||
628 | int rh_set_owner(rh_info_t * info, void *start, const char *owner) | 631 | int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner) |
629 | { | 632 | { |
630 | rh_block_t *blk, *blk2; | 633 | rh_block_t *blk, *blk2; |
631 | struct list_head *l; | 634 | struct list_head *l; |
@@ -667,8 +670,8 @@ void rh_dump(rh_info_t * info) | |||
667 | nr = maxnr; | 670 | nr = maxnr; |
668 | for (i = 0; i < nr; i++) | 671 | for (i = 0; i < nr; i++) |
669 | printk(KERN_INFO | 672 | printk(KERN_INFO |
670 | " 0x%p-0x%p (%u)\n", | 673 | " 0x%lx-0x%lx (%u)\n", |
671 | st[i].start, (int8_t *) st[i].start + st[i].size, | 674 | st[i].start, st[i].start + st[i].size, |
672 | st[i].size); | 675 | st[i].size); |
673 | printk(KERN_INFO "\n"); | 676 | printk(KERN_INFO "\n"); |
674 | 677 | ||
@@ -678,8 +681,8 @@ void rh_dump(rh_info_t * info) | |||
678 | nr = maxnr; | 681 | nr = maxnr; |
679 | for (i = 0; i < nr; i++) | 682 | for (i = 0; i < nr; i++) |
680 | printk(KERN_INFO | 683 | printk(KERN_INFO |
681 | " 0x%p-0x%p (%u) %s\n", | 684 | " 0x%lx-0x%lx (%u) %s\n", |
682 | st[i].start, (int8_t *) st[i].start + st[i].size, | 685 | st[i].start, st[i].start + st[i].size, |
683 | st[i].size, st[i].owner != NULL ? st[i].owner : ""); | 686 | st[i].size, st[i].owner != NULL ? st[i].owner : ""); |
684 | printk(KERN_INFO "\n"); | 687 | printk(KERN_INFO "\n"); |
685 | } | 688 | } |
@@ -687,6 +690,6 @@ void rh_dump(rh_info_t * info) | |||
687 | void rh_dump_blk(rh_info_t * info, rh_block_t * blk) | 690 | void rh_dump_blk(rh_info_t * info, rh_block_t * blk) |
688 | { | 691 | { |
689 | printk(KERN_INFO | 692 | printk(KERN_INFO |
690 | "blk @0x%p: 0x%p-0x%p (%u)\n", | 693 | "blk @0x%p: 0x%lx-0x%lx (%u)\n", |
691 | blk, blk->start, (int8_t *) blk->start + blk->size, blk->size); | 694 | blk, blk->start, blk->start + blk->size, blk->size); |
692 | } | 695 | } |