diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/genalloc.c | 263 |
1 files changed, 121 insertions, 142 deletions
diff --git a/lib/genalloc.c b/lib/genalloc.c index 9ce0a6a3b85a..71338b48e889 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
@@ -4,10 +4,6 @@ | |||
4 | * Uses for this includes on-device special memory, uncached memory | 4 | * Uses for this includes on-device special memory, uncached memory |
5 | * etc. | 5 | * etc. |
6 | * | 6 | * |
7 | * This code is based on the buddy allocator found in the sym53c8xx_2 | ||
8 | * driver Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>, | ||
9 | * and adapted for general purpose use. | ||
10 | * | ||
11 | * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> | 7 | * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> |
12 | * | 8 | * |
13 | * This source code is licensed under the GNU General Public License, | 9 | * This source code is licensed under the GNU General Public License, |
@@ -15,172 +11,155 @@ | |||
15 | */ | 11 | */ |
16 | 12 | ||
17 | #include <linux/module.h> | 13 | #include <linux/module.h> |
18 | #include <linux/stddef.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/string.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/genalloc.h> | 14 | #include <linux/genalloc.h> |
26 | 15 | ||
27 | #include <asm/page.h> | ||
28 | |||
29 | 16 | ||
30 | struct gen_pool *gen_pool_create(int nr_chunks, int max_chunk_shift, | 17 | /* |
31 | unsigned long (*fp)(struct gen_pool *), | 18 | * Create a new special memory pool. |
32 | unsigned long data) | 19 | * |
20 | * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents | ||
21 | * @nid: node id of the node the pool structure should be allocated on, or -1 | ||
22 | */ | ||
23 | struct gen_pool *gen_pool_create(int min_alloc_order, int nid) | ||
33 | { | 24 | { |
34 | struct gen_pool *poolp; | 25 | struct gen_pool *pool; |
35 | unsigned long tmp; | ||
36 | int i; | ||
37 | |||
38 | /* | ||
39 | * This is really an arbitrary limit, +10 is enough for | ||
40 | * IA64_GRANULE_SHIFT, aka 16MB. If anyone needs a large limit | ||
41 | * this can be increased without problems. | ||
42 | */ | ||
43 | if ((max_chunk_shift > (PAGE_SHIFT + 10)) || | ||
44 | ((max_chunk_shift < ALLOC_MIN_SHIFT) && max_chunk_shift)) | ||
45 | return NULL; | ||
46 | |||
47 | if (!max_chunk_shift) | ||
48 | max_chunk_shift = PAGE_SHIFT; | ||
49 | |||
50 | poolp = kmalloc(sizeof(struct gen_pool), GFP_KERNEL); | ||
51 | if (!poolp) | ||
52 | return NULL; | ||
53 | memset(poolp, 0, sizeof(struct gen_pool)); | ||
54 | poolp->h = kmalloc(sizeof(struct gen_pool_link) * | ||
55 | (max_chunk_shift - ALLOC_MIN_SHIFT + 1), | ||
56 | GFP_KERNEL); | ||
57 | if (!poolp->h) { | ||
58 | printk(KERN_WARNING "gen_pool_alloc() failed to allocate\n"); | ||
59 | kfree(poolp); | ||
60 | return NULL; | ||
61 | } | ||
62 | memset(poolp->h, 0, sizeof(struct gen_pool_link) * | ||
63 | (max_chunk_shift - ALLOC_MIN_SHIFT + 1)); | ||
64 | |||
65 | spin_lock_init(&poolp->lock); | ||
66 | poolp->get_new_chunk = fp; | ||
67 | poolp->max_chunk_shift = max_chunk_shift; | ||
68 | poolp->private = data; | ||
69 | |||
70 | for (i = 0; i < nr_chunks; i++) { | ||
71 | tmp = poolp->get_new_chunk(poolp); | ||
72 | printk(KERN_INFO "allocated %lx\n", tmp); | ||
73 | if (!tmp) | ||
74 | break; | ||
75 | gen_pool_free(poolp, tmp, (1 << poolp->max_chunk_shift)); | ||
76 | } | ||
77 | 26 | ||
78 | return poolp; | 27 | pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); |
28 | if (pool != NULL) { | ||
29 | rwlock_init(&pool->lock); | ||
30 | INIT_LIST_HEAD(&pool->chunks); | ||
31 | pool->min_alloc_order = min_alloc_order; | ||
32 | } | ||
33 | return pool; | ||
79 | } | 34 | } |
80 | EXPORT_SYMBOL(gen_pool_create); | 35 | EXPORT_SYMBOL(gen_pool_create); |
81 | 36 | ||
82 | 37 | ||
83 | /* | 38 | /* |
84 | * Simple power of two buddy-like generic allocator. | 39 | * Add a new chunk of memory to the specified pool. |
85 | * Provides naturally aligned memory chunks. | 40 | * |
41 | * @pool: pool to add new memory chunk to | ||
42 | * @addr: starting address of memory chunk to add to pool | ||
43 | * @size: size in bytes of the memory chunk to add to pool | ||
44 | * @nid: node id of the node the chunk structure and bitmap should be | ||
45 | * allocated on, or -1 | ||
86 | */ | 46 | */ |
87 | unsigned long gen_pool_alloc(struct gen_pool *poolp, int size) | 47 | int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size, |
48 | int nid) | ||
88 | { | 49 | { |
89 | int j, i, s, max_chunk_size; | 50 | struct gen_pool_chunk *chunk; |
90 | unsigned long a, flags; | 51 | int nbits = size >> pool->min_alloc_order; |
91 | struct gen_pool_link *h = poolp->h; | 52 | int nbytes = sizeof(struct gen_pool_chunk) + |
53 | (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE; | ||
92 | 54 | ||
93 | max_chunk_size = 1 << poolp->max_chunk_shift; | 55 | chunk = kmalloc_node(nbytes, GFP_KERNEL, nid); |
56 | if (unlikely(chunk == NULL)) | ||
57 | return -1; | ||
94 | 58 | ||
95 | if (size > max_chunk_size) | 59 | memset(chunk, 0, nbytes); |
96 | return 0; | 60 | spin_lock_init(&chunk->lock); |
61 | chunk->start_addr = addr; | ||
62 | chunk->end_addr = addr + size; | ||
97 | 63 | ||
98 | size = max(size, 1 << ALLOC_MIN_SHIFT); | 64 | write_lock(&pool->lock); |
99 | i = fls(size - 1); | 65 | list_add(&chunk->next_chunk, &pool->chunks); |
100 | s = 1 << i; | 66 | write_unlock(&pool->lock); |
101 | j = i -= ALLOC_MIN_SHIFT; | 67 | |
102 | 68 | return 0; | |
103 | spin_lock_irqsave(&poolp->lock, flags); | ||
104 | while (!h[j].next) { | ||
105 | if (s == max_chunk_size) { | ||
106 | struct gen_pool_link *ptr; | ||
107 | spin_unlock_irqrestore(&poolp->lock, flags); | ||
108 | ptr = (struct gen_pool_link *)poolp->get_new_chunk(poolp); | ||
109 | spin_lock_irqsave(&poolp->lock, flags); | ||
110 | h[j].next = ptr; | ||
111 | if (h[j].next) | ||
112 | h[j].next->next = NULL; | ||
113 | break; | ||
114 | } | ||
115 | j++; | ||
116 | s <<= 1; | ||
117 | } | ||
118 | a = (unsigned long) h[j].next; | ||
119 | if (a) { | ||
120 | h[j].next = h[j].next->next; | ||
121 | /* | ||
122 | * This should be split into a seperate function doing | ||
123 | * the chunk split in order to support custom | ||
124 | * handling memory not physically accessible by host | ||
125 | */ | ||
126 | while (j > i) { | ||
127 | j -= 1; | ||
128 | s >>= 1; | ||
129 | h[j].next = (struct gen_pool_link *) (a + s); | ||
130 | h[j].next->next = NULL; | ||
131 | } | ||
132 | } | ||
133 | spin_unlock_irqrestore(&poolp->lock, flags); | ||
134 | return a; | ||
135 | } | 69 | } |
136 | EXPORT_SYMBOL(gen_pool_alloc); | 70 | EXPORT_SYMBOL(gen_pool_add); |
137 | 71 | ||
138 | 72 | ||
139 | /* | 73 | /* |
140 | * Counter-part of the generic allocator. | 74 | * Allocate the requested number of bytes from the specified pool. |
75 | * Uses a first-fit algorithm. | ||
76 | * | ||
77 | * @pool: pool to allocate from | ||
78 | * @size: number of bytes to allocate from the pool | ||
141 | */ | 79 | */ |
142 | void gen_pool_free(struct gen_pool *poolp, unsigned long ptr, int size) | 80 | unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) |
143 | { | 81 | { |
144 | struct gen_pool_link *q; | 82 | struct list_head *_chunk; |
145 | struct gen_pool_link *h = poolp->h; | 83 | struct gen_pool_chunk *chunk; |
146 | unsigned long a, b, flags; | 84 | unsigned long addr, flags; |
147 | int i, s, max_chunk_size; | 85 | int order = pool->min_alloc_order; |
148 | 86 | int nbits, bit, start_bit, end_bit; | |
149 | max_chunk_size = 1 << poolp->max_chunk_shift; | ||
150 | 87 | ||
151 | if (size > max_chunk_size) | 88 | if (size == 0) |
152 | return; | 89 | return 0; |
153 | |||
154 | size = max(size, 1 << ALLOC_MIN_SHIFT); | ||
155 | i = fls(size - 1); | ||
156 | s = 1 << i; | ||
157 | i -= ALLOC_MIN_SHIFT; | ||
158 | |||
159 | a = ptr; | ||
160 | 90 | ||
161 | spin_lock_irqsave(&poolp->lock, flags); | 91 | nbits = (size + (1UL << order) - 1) >> order; |
162 | while (1) { | 92 | |
163 | if (s == max_chunk_size) { | 93 | read_lock(&pool->lock); |
164 | ((struct gen_pool_link *)a)->next = h[i].next; | 94 | list_for_each(_chunk, &pool->chunks) { |
165 | h[i].next = (struct gen_pool_link *)a; | 95 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); |
166 | break; | 96 | |
97 | end_bit = (chunk->end_addr - chunk->start_addr) >> order; | ||
98 | end_bit -= nbits + 1; | ||
99 | |||
100 | spin_lock_irqsave(&chunk->lock, flags); | ||
101 | bit = -1; | ||
102 | while (bit + 1 < end_bit) { | ||
103 | bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1); | ||
104 | if (bit >= end_bit) | ||
105 | break; | ||
106 | |||
107 | start_bit = bit; | ||
108 | if (nbits > 1) { | ||
109 | bit = find_next_bit(chunk->bits, bit + nbits, | ||
110 | bit + 1); | ||
111 | if (bit - start_bit < nbits) | ||
112 | continue; | ||
113 | } | ||
114 | |||
115 | addr = chunk->start_addr + | ||
116 | ((unsigned long)start_bit << order); | ||
117 | while (nbits--) | ||
118 | __set_bit(start_bit++, &chunk->bits); | ||
119 | spin_unlock_irqrestore(&chunk->lock, flags); | ||
120 | read_unlock(&pool->lock); | ||
121 | return addr; | ||
167 | } | 122 | } |
168 | b = a ^ s; | 123 | spin_unlock_irqrestore(&chunk->lock, flags); |
169 | q = &h[i]; | 124 | } |
125 | read_unlock(&pool->lock); | ||
126 | return 0; | ||
127 | } | ||
128 | EXPORT_SYMBOL(gen_pool_alloc); | ||
170 | 129 | ||
171 | while (q->next && q->next != (struct gen_pool_link *)b) | ||
172 | q = q->next; | ||
173 | 130 | ||
174 | if (!q->next) { | 131 | /* |
175 | ((struct gen_pool_link *)a)->next = h[i].next; | 132 | * Free the specified memory back to the specified pool. |
176 | h[i].next = (struct gen_pool_link *)a; | 133 | * |
134 | * @pool: pool to free to | ||
135 | * @addr: starting address of memory to free back to pool | ||
136 | * @size: size in bytes of memory to free | ||
137 | */ | ||
138 | void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) | ||
139 | { | ||
140 | struct list_head *_chunk; | ||
141 | struct gen_pool_chunk *chunk; | ||
142 | unsigned long flags; | ||
143 | int order = pool->min_alloc_order; | ||
144 | int bit, nbits; | ||
145 | |||
146 | nbits = (size + (1UL << order) - 1) >> order; | ||
147 | |||
148 | read_lock(&pool->lock); | ||
149 | list_for_each(_chunk, &pool->chunks) { | ||
150 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); | ||
151 | |||
152 | if (addr >= chunk->start_addr && addr < chunk->end_addr) { | ||
153 | BUG_ON(addr + size > chunk->end_addr); | ||
154 | spin_lock_irqsave(&chunk->lock, flags); | ||
155 | bit = (addr - chunk->start_addr) >> order; | ||
156 | while (nbits--) | ||
157 | __clear_bit(bit++, &chunk->bits); | ||
158 | spin_unlock_irqrestore(&chunk->lock, flags); | ||
177 | break; | 159 | break; |
178 | } | 160 | } |
179 | q->next = q->next->next; | ||
180 | a = a & b; | ||
181 | s <<= 1; | ||
182 | i++; | ||
183 | } | 161 | } |
184 | spin_unlock_irqrestore(&poolp->lock, flags); | 162 | BUG_ON(nbits > 0); |
163 | read_unlock(&pool->lock); | ||
185 | } | 164 | } |
186 | EXPORT_SYMBOL(gen_pool_free); | 165 | EXPORT_SYMBOL(gen_pool_free); |