aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slob.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slob.c')
-rw-r--r--mm/slob.c44
1 files changed, 28 insertions, 16 deletions
diff --git a/mm/slob.c b/mm/slob.c
index 4b1c0c1d63cb..7a3411524dac 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -126,9 +126,9 @@ static LIST_HEAD(free_slob_medium);
126static LIST_HEAD(free_slob_large); 126static LIST_HEAD(free_slob_large);
127 127
128/* 128/*
129 * slob_page: True for all slob pages (false for bigblock pages) 129 * is_slob_page: True for all slob pages (false for bigblock pages)
130 */ 130 */
131static inline int slob_page(struct slob_page *sp) 131static inline int is_slob_page(struct slob_page *sp)
132{ 132{
133 return PageSlobPage((struct page *)sp); 133 return PageSlobPage((struct page *)sp);
134} 134}
@@ -143,6 +143,11 @@ static inline void clear_slob_page(struct slob_page *sp)
143 __ClearPageSlobPage((struct page *)sp); 143 __ClearPageSlobPage((struct page *)sp);
144} 144}
145 145
146static inline struct slob_page *slob_page(const void *addr)
147{
148 return (struct slob_page *)virt_to_page(addr);
149}
150
146/* 151/*
147 * slob_page_free: true for pages on free_slob_pages list. 152 * slob_page_free: true for pages on free_slob_pages list.
148 */ 153 */
@@ -230,7 +235,7 @@ static int slob_last(slob_t *s)
230 return !((unsigned long)slob_next(s) & ~PAGE_MASK); 235 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
231} 236}
232 237
233static void *slob_new_page(gfp_t gfp, int order, int node) 238static void *slob_new_pages(gfp_t gfp, int order, int node)
234{ 239{
235 void *page; 240 void *page;
236 241
@@ -247,12 +252,17 @@ static void *slob_new_page(gfp_t gfp, int order, int node)
247 return page_address(page); 252 return page_address(page);
248} 253}
249 254
255static void slob_free_pages(void *b, int order)
256{
257 free_pages((unsigned long)b, order);
258}
259
250/* 260/*
251 * Allocate a slob block within a given slob_page sp. 261 * Allocate a slob block within a given slob_page sp.
252 */ 262 */
253static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) 263static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
254{ 264{
255 slob_t *prev, *cur, *aligned = 0; 265 slob_t *prev, *cur, *aligned = NULL;
256 int delta = 0, units = SLOB_UNITS(size); 266 int delta = 0, units = SLOB_UNITS(size);
257 267
258 for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { 268 for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
@@ -349,10 +359,10 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
349 359
350 /* Not enough space: must allocate a new page */ 360 /* Not enough space: must allocate a new page */
351 if (!b) { 361 if (!b) {
352 b = slob_new_page(gfp & ~__GFP_ZERO, 0, node); 362 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
353 if (!b) 363 if (!b)
354 return 0; 364 return NULL;
355 sp = (struct slob_page *)virt_to_page(b); 365 sp = slob_page(b);
356 set_slob_page(sp); 366 set_slob_page(sp);
357 367
358 spin_lock_irqsave(&slob_lock, flags); 368 spin_lock_irqsave(&slob_lock, flags);
@@ -384,7 +394,7 @@ static void slob_free(void *block, int size)
384 return; 394 return;
385 BUG_ON(!size); 395 BUG_ON(!size);
386 396
387 sp = (struct slob_page *)virt_to_page(block); 397 sp = slob_page(block);
388 units = SLOB_UNITS(size); 398 units = SLOB_UNITS(size);
389 399
390 spin_lock_irqsave(&slob_lock, flags); 400 spin_lock_irqsave(&slob_lock, flags);
@@ -393,10 +403,11 @@ static void slob_free(void *block, int size)
393 /* Go directly to page allocator. Do not pass slob allocator */ 403 /* Go directly to page allocator. Do not pass slob allocator */
394 if (slob_page_free(sp)) 404 if (slob_page_free(sp))
395 clear_slob_page_free(sp); 405 clear_slob_page_free(sp);
406 spin_unlock_irqrestore(&slob_lock, flags);
396 clear_slob_page(sp); 407 clear_slob_page(sp);
397 free_slob_page(sp); 408 free_slob_page(sp);
398 free_page((unsigned long)b); 409 free_page((unsigned long)b);
399 goto out; 410 return;
400 } 411 }
401 412
402 if (!slob_page_free(sp)) { 413 if (!slob_page_free(sp)) {
@@ -478,7 +489,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
478 } else { 489 } else {
479 void *ret; 490 void *ret;
480 491
481 ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node); 492 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
482 if (ret) { 493 if (ret) {
483 struct page *page; 494 struct page *page;
484 page = virt_to_page(ret); 495 page = virt_to_page(ret);
@@ -496,8 +507,8 @@ void kfree(const void *block)
496 if (unlikely(ZERO_OR_NULL_PTR(block))) 507 if (unlikely(ZERO_OR_NULL_PTR(block)))
497 return; 508 return;
498 509
499 sp = (struct slob_page *)virt_to_page(block); 510 sp = slob_page(block);
500 if (slob_page(sp)) { 511 if (is_slob_page(sp)) {
501 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 512 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
502 unsigned int *m = (unsigned int *)(block - align); 513 unsigned int *m = (unsigned int *)(block - align);
503 slob_free(m, *m + align); 514 slob_free(m, *m + align);
@@ -515,14 +526,15 @@ size_t ksize(const void *block)
515 if (unlikely(block == ZERO_SIZE_PTR)) 526 if (unlikely(block == ZERO_SIZE_PTR))
516 return 0; 527 return 0;
517 528
518 sp = (struct slob_page *)virt_to_page(block); 529 sp = slob_page(block);
519 if (slob_page(sp)) { 530 if (is_slob_page(sp)) {
520 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 531 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
521 unsigned int *m = (unsigned int *)(block - align); 532 unsigned int *m = (unsigned int *)(block - align);
522 return SLOB_UNITS(*m) * SLOB_UNIT; 533 return SLOB_UNITS(*m) * SLOB_UNIT;
523 } else 534 } else
524 return sp->page.private; 535 return sp->page.private;
525} 536}
537EXPORT_SYMBOL(ksize);
526 538
527struct kmem_cache { 539struct kmem_cache {
528 unsigned int size, align; 540 unsigned int size, align;
@@ -574,7 +586,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
574 if (c->size < PAGE_SIZE) 586 if (c->size < PAGE_SIZE)
575 b = slob_alloc(c->size, flags, c->align, node); 587 b = slob_alloc(c->size, flags, c->align, node);
576 else 588 else
577 b = slob_new_page(flags, get_order(c->size), node); 589 b = slob_new_pages(flags, get_order(c->size), node);
578 590
579 if (c->ctor) 591 if (c->ctor)
580 c->ctor(b); 592 c->ctor(b);
@@ -588,7 +600,7 @@ static void __kmem_cache_free(void *b, int size)
588 if (size < PAGE_SIZE) 600 if (size < PAGE_SIZE)
589 slob_free(b, size); 601 slob_free(b, size);
590 else 602 else
591 free_pages((unsigned long)b, get_order(size)); 603 slob_free_pages(b, get_order(size));
592} 604}
593 605
594static void kmem_rcu_free(struct rcu_head *head) 606static void kmem_rcu_free(struct rcu_head *head)