diff options
Diffstat (limited to 'arch/powerpc/boot/simple_alloc.c')
-rw-r--r-- | arch/powerpc/boot/simple_alloc.c | 31 |
1 files changed, 16 insertions, 15 deletions
diff --git a/arch/powerpc/boot/simple_alloc.c b/arch/powerpc/boot/simple_alloc.c index cfe3a7505ba0..65ec135d0157 100644 --- a/arch/powerpc/boot/simple_alloc.c +++ b/arch/powerpc/boot/simple_alloc.c | |||
@@ -19,24 +19,24 @@ | |||
19 | #define ENTRY_IN_USE 0x02 | 19 | #define ENTRY_IN_USE 0x02 |
20 | 20 | ||
21 | static struct alloc_info { | 21 | static struct alloc_info { |
22 | u32 flags; | 22 | unsigned long flags; |
23 | u32 base; | 23 | unsigned long base; |
24 | u32 size; | 24 | unsigned long size; |
25 | } *alloc_tbl; | 25 | } *alloc_tbl; |
26 | 26 | ||
27 | static u32 tbl_entries; | 27 | static unsigned long tbl_entries; |
28 | static u32 alloc_min; | 28 | static unsigned long alloc_min; |
29 | static u32 next_base; | 29 | static unsigned long next_base; |
30 | static u32 space_left; | 30 | static unsigned long space_left; |
31 | 31 | ||
32 | /* | 32 | /* |
33 | * First time an entry is used, its base and size are set. | 33 | * First time an entry is used, its base and size are set. |
34 | * An entry can be freed and re-malloc'd but its base & size don't change. | 34 | * An entry can be freed and re-malloc'd but its base & size don't change. |
35 | * Should be smart enough for needs of bootwrapper. | 35 | * Should be smart enough for needs of bootwrapper. |
36 | */ | 36 | */ |
37 | static void *simple_malloc(u32 size) | 37 | static void *simple_malloc(unsigned long size) |
38 | { | 38 | { |
39 | u32 i; | 39 | unsigned long i; |
40 | struct alloc_info *p = alloc_tbl; | 40 | struct alloc_info *p = alloc_tbl; |
41 | 41 | ||
42 | if (size == 0) | 42 | if (size == 0) |
@@ -67,13 +67,14 @@ err_out: | |||
67 | 67 | ||
68 | static struct alloc_info *simple_find_entry(void *ptr) | 68 | static struct alloc_info *simple_find_entry(void *ptr) |
69 | { | 69 | { |
70 | u32 i; | 70 | unsigned long i; |
71 | struct alloc_info *p = alloc_tbl; | 71 | struct alloc_info *p = alloc_tbl; |
72 | 72 | ||
73 | for (i=0; i<tbl_entries; i++,p++) { | 73 | for (i=0; i<tbl_entries; i++,p++) { |
74 | if (!(p->flags & ENTRY_BEEN_USED)) | 74 | if (!(p->flags & ENTRY_BEEN_USED)) |
75 | break; | 75 | break; |
76 | if ((p->flags & ENTRY_IN_USE) && (p->base == (u32)ptr)) | 76 | if ((p->flags & ENTRY_IN_USE) && |
77 | (p->base == (unsigned long)ptr)) | ||
77 | return p; | 78 | return p; |
78 | } | 79 | } |
79 | return NULL; | 80 | return NULL; |
@@ -122,10 +123,10 @@ static void *simple_realloc(void *ptr, unsigned long size) | |||
122 | * Returns addr of first byte after heap so caller can see if it took | 123 | * Returns addr of first byte after heap so caller can see if it took |
123 | * too much space. If so, change args & try again. | 124 | * too much space. If so, change args & try again. |
124 | */ | 125 | */ |
125 | void *simple_alloc_init(char *base, u32 heap_size, u32 granularity, | 126 | void *simple_alloc_init(char *base, unsigned long heap_size, |
126 | u32 max_allocs) | 127 | unsigned long granularity, unsigned long max_allocs) |
127 | { | 128 | { |
128 | u32 heap_base, tbl_size; | 129 | unsigned long heap_base, tbl_size; |
129 | 130 | ||
130 | heap_size = _ALIGN_UP(heap_size, granularity); | 131 | heap_size = _ALIGN_UP(heap_size, granularity); |
131 | alloc_min = granularity; | 132 | alloc_min = granularity; |
@@ -136,7 +137,7 @@ void *simple_alloc_init(char *base, u32 heap_size, u32 granularity, | |||
136 | alloc_tbl = (struct alloc_info *)_ALIGN_UP((unsigned long)base, 8); | 137 | alloc_tbl = (struct alloc_info *)_ALIGN_UP((unsigned long)base, 8); |
137 | memset(alloc_tbl, 0, tbl_size); | 138 | memset(alloc_tbl, 0, tbl_size); |
138 | 139 | ||
139 | heap_base = _ALIGN_UP((u32)alloc_tbl + tbl_size, alloc_min); | 140 | heap_base = _ALIGN_UP((unsigned long)alloc_tbl + tbl_size, alloc_min); |
140 | 141 | ||
141 | next_base = heap_base; | 142 | next_base = heap_base; |
142 | space_left = heap_size; | 143 | space_left = heap_size; |