aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slob.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slob.c')
-rw-r--r--mm/slob.c57
1 files changed, 41 insertions, 16 deletions
diff --git a/mm/slob.c b/mm/slob.c
index 5adc29cb58dd..c6933bc19bcd 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -21,7 +21,7 @@
21 * 21 *
22 * SLAB is emulated on top of SLOB by simply calling constructors and 22 * SLAB is emulated on top of SLOB by simply calling constructors and
23 * destructors for every SLAB allocation. Objects are returned with 23 * destructors for every SLAB allocation. Objects are returned with
24 * the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is 24 * the 8-byte alignment unless the SLAB_HWCACHE_ALIGN flag is
25 * set, in which case the low-level allocator will fragment blocks to 25 * set, in which case the low-level allocator will fragment blocks to
26 * create the proper alignment. Again, objects of page-size or greater 26 * create the proper alignment. Again, objects of page-size or greater
27 * are allocated by calling __get_free_pages. As SLAB objects know 27 * are allocated by calling __get_free_pages. As SLAB objects know
@@ -150,15 +150,6 @@ static void slob_free(void *block, int size)
150 spin_unlock_irqrestore(&slob_lock, flags); 150 spin_unlock_irqrestore(&slob_lock, flags);
151} 151}
152 152
153static int FASTCALL(find_order(int size));
154static int fastcall find_order(int size)
155{
156 int order = 0;
157 for ( ; size > 4096 ; size >>=1)
158 order++;
159 return order;
160}
161
162void *__kmalloc(size_t size, gfp_t gfp) 153void *__kmalloc(size_t size, gfp_t gfp)
163{ 154{
164 slob_t *m; 155 slob_t *m;
@@ -174,7 +165,7 @@ void *__kmalloc(size_t size, gfp_t gfp)
174 if (!bb) 165 if (!bb)
175 return 0; 166 return 0;
176 167
177 bb->order = find_order(size); 168 bb->order = get_order(size);
178 bb->pages = (void *)__get_free_pages(gfp, bb->order); 169 bb->pages = (void *)__get_free_pages(gfp, bb->order);
179 170
180 if (bb->pages) { 171 if (bb->pages) {
@@ -190,6 +181,39 @@ void *__kmalloc(size_t size, gfp_t gfp)
190} 181}
191EXPORT_SYMBOL(__kmalloc); 182EXPORT_SYMBOL(__kmalloc);
192 183
184/**
185 * krealloc - reallocate memory. The contents will remain unchanged.
186 *
187 * @p: object to reallocate memory for.
188 * @new_size: how many bytes of memory are required.
189 * @flags: the type of memory to allocate.
190 *
191 * The contents of the object pointed to are preserved up to the
192 * lesser of the new and old sizes. If @p is %NULL, krealloc()
193 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
194 * %NULL pointer, the object pointed to is freed.
195 */
196void *krealloc(const void *p, size_t new_size, gfp_t flags)
197{
198 void *ret;
199
200 if (unlikely(!p))
201 return kmalloc_track_caller(new_size, flags);
202
203 if (unlikely(!new_size)) {
204 kfree(p);
205 return NULL;
206 }
207
208 ret = kmalloc_track_caller(new_size, flags);
209 if (ret) {
210 memcpy(ret, p, min(new_size, ksize(p)));
211 kfree(p);
212 }
213 return ret;
214}
215EXPORT_SYMBOL(krealloc);
216
193void kfree(const void *block) 217void kfree(const void *block)
194{ 218{
195 bigblock_t *bb, **last = &bigblocks; 219 bigblock_t *bb, **last = &bigblocks;
@@ -219,7 +243,7 @@ void kfree(const void *block)
219 243
220EXPORT_SYMBOL(kfree); 244EXPORT_SYMBOL(kfree);
221 245
222unsigned int ksize(const void *block) 246size_t ksize(const void *block)
223{ 247{
224 bigblock_t *bb; 248 bigblock_t *bb;
225 unsigned long flags; 249 unsigned long flags;
@@ -262,10 +286,11 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
262 c->ctor = ctor; 286 c->ctor = ctor;
263 c->dtor = dtor; 287 c->dtor = dtor;
264 /* ignore alignment unless it's forced */ 288 /* ignore alignment unless it's forced */
265 c->align = (flags & SLAB_MUST_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; 289 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
266 if (c->align < align) 290 if (c->align < align)
267 c->align = align; 291 c->align = align;
268 } 292 } else if (flags & SLAB_PANIC)
293 panic("Cannot create slab cache %s\n", name);
269 294
270 return c; 295 return c;
271} 296}
@@ -284,7 +309,7 @@ void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags)
284 if (c->size < PAGE_SIZE) 309 if (c->size < PAGE_SIZE)
285 b = slob_alloc(c->size, flags, c->align); 310 b = slob_alloc(c->size, flags, c->align);
286 else 311 else
287 b = (void *)__get_free_pages(flags, find_order(c->size)); 312 b = (void *)__get_free_pages(flags, get_order(c->size));
288 313
289 if (c->ctor) 314 if (c->ctor)
290 c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR); 315 c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR);
@@ -311,7 +336,7 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
311 if (c->size < PAGE_SIZE) 336 if (c->size < PAGE_SIZE)
312 slob_free(b, c->size); 337 slob_free(b, c->size);
313 else 338 else
314 free_pages((unsigned long)b, find_order(c->size)); 339 free_pages((unsigned long)b, get_order(c->size));
315} 340}
316EXPORT_SYMBOL(kmem_cache_free); 341EXPORT_SYMBOL(kmem_cache_free);
317 342