diff options
author | Nick Piggin <npiggin@suse.de> | 2007-07-16 02:38:08 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-16 12:05:35 -0400 |
commit | d87a133fc21d842e3cc285e6bbff727181abec81 (patch) | |
tree | 1f89a5dab95c7813eca37039a94e39c73a797cdc | |
parent | 95b35127f13661abb0dc3459042cdb417d21e692 (diff) |
slob: remove bigblock tracking
Remove the bigblock lists in favour of using compound pages and going directly
to the page allocator. Allocation size is stored in page->private, which also
makes ksize more accurate than it previously was.
Saves ~.5K of code, and 12-24 bytes overhead per >= PAGE_SIZE allocation.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Acked-by: Matt Mackall <mpm@selenic.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/slob.c | 103 |
1 files changed, 29 insertions, 74 deletions
@@ -18,9 +18,11 @@ | |||
18 | * Above this is an implementation of kmalloc/kfree. Blocks returned | 18 | * Above this is an implementation of kmalloc/kfree. Blocks returned |
19 | * from kmalloc are 4-byte aligned and prepended with a 4-byte header. | 19 | * from kmalloc are 4-byte aligned and prepended with a 4-byte header. |
20 | * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls | 20 | * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls |
21 | * __get_free_pages directly so that it can return page-aligned blocks | 21 | * __get_free_pages directly, allocating compound pages so the page order |
22 | * and keeps a linked list of such pages and their orders. These | 22 | * does not have to be separately tracked, and also stores the exact |
23 | * objects are detected in kfree() by their page alignment. | 23 | * allocation size in page->private so that it can be used to accurately |
24 | * provide ksize(). These objects are detected in kfree() because slob_page() | ||
25 | * is false for them. | ||
24 | * | 26 | * |
25 | * SLAB is emulated on top of SLOB by simply calling constructors and | 27 | * SLAB is emulated on top of SLOB by simply calling constructors and |
26 | * destructors for every SLAB allocation. Objects are returned with the | 28 | * destructors for every SLAB allocation. Objects are returned with the |
@@ -29,7 +31,8 @@ | |||
29 | * alignment. Again, objects of page-size or greater are allocated by | 31 | * alignment. Again, objects of page-size or greater are allocated by |
30 | * calling __get_free_pages. As SLAB objects know their size, no separate | 32 | * calling __get_free_pages. As SLAB objects know their size, no separate |
31 | * size bookkeeping is necessary and there is essentially no allocation | 33 | * size bookkeeping is necessary and there is essentially no allocation |
32 | * space overhead. | 34 | * space overhead, and compound pages aren't needed for multi-page |
35 | * allocations. | ||
33 | */ | 36 | */ |
34 | 37 | ||
35 | #include <linux/kernel.h> | 38 | #include <linux/kernel.h> |
@@ -381,48 +384,26 @@ out: | |||
381 | * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. | 384 | * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. |
382 | */ | 385 | */ |
383 | 386 | ||
384 | struct bigblock { | ||
385 | int order; | ||
386 | void *pages; | ||
387 | struct bigblock *next; | ||
388 | }; | ||
389 | typedef struct bigblock bigblock_t; | ||
390 | |||
391 | static bigblock_t *bigblocks; | ||
392 | |||
393 | static DEFINE_SPINLOCK(block_lock); | ||
394 | |||
395 | |||
396 | void *__kmalloc(size_t size, gfp_t gfp) | 387 | void *__kmalloc(size_t size, gfp_t gfp) |
397 | { | 388 | { |
398 | slob_t *m; | ||
399 | bigblock_t *bb; | ||
400 | unsigned long flags; | ||
401 | |||
402 | if (size < PAGE_SIZE - SLOB_UNIT) { | 389 | if (size < PAGE_SIZE - SLOB_UNIT) { |
390 | slob_t *m; | ||
403 | m = slob_alloc(size + SLOB_UNIT, gfp, 0); | 391 | m = slob_alloc(size + SLOB_UNIT, gfp, 0); |
404 | if (m) | 392 | if (m) |
405 | m->units = size; | 393 | m->units = size; |
406 | return m+1; | 394 | return m+1; |
395 | } else { | ||
396 | void *ret; | ||
397 | |||
398 | ret = (void *) __get_free_pages(gfp | __GFP_COMP, | ||
399 | get_order(size)); | ||
400 | if (ret) { | ||
401 | struct page *page; | ||
402 | page = virt_to_page(ret); | ||
403 | page->private = size; | ||
404 | } | ||
405 | return ret; | ||
407 | } | 406 | } |
408 | |||
409 | bb = slob_alloc(sizeof(bigblock_t), gfp, 0); | ||
410 | if (!bb) | ||
411 | return 0; | ||
412 | |||
413 | bb->order = get_order(size); | ||
414 | bb->pages = (void *)__get_free_pages(gfp, bb->order); | ||
415 | |||
416 | if (bb->pages) { | ||
417 | spin_lock_irqsave(&block_lock, flags); | ||
418 | bb->next = bigblocks; | ||
419 | bigblocks = bb; | ||
420 | spin_unlock_irqrestore(&block_lock, flags); | ||
421 | return bb->pages; | ||
422 | } | ||
423 | |||
424 | slob_free(bb, sizeof(bigblock_t)); | ||
425 | return 0; | ||
426 | } | 407 | } |
427 | EXPORT_SYMBOL(__kmalloc); | 408 | EXPORT_SYMBOL(__kmalloc); |
428 | 409 | ||
@@ -462,59 +443,33 @@ EXPORT_SYMBOL(krealloc); | |||
462 | void kfree(const void *block) | 443 | void kfree(const void *block) |
463 | { | 444 | { |
464 | struct slob_page *sp; | 445 | struct slob_page *sp; |
465 | slob_t *m; | ||
466 | bigblock_t *bb, **last = &bigblocks; | ||
467 | unsigned long flags; | ||
468 | 446 | ||
469 | if (!block) | 447 | if (!block) |
470 | return; | 448 | return; |
471 | 449 | ||
472 | sp = (struct slob_page *)virt_to_page(block); | 450 | sp = (struct slob_page *)virt_to_page(block); |
473 | if (!slob_page(sp)) { | 451 | if (slob_page(sp)) { |
474 | /* on the big block list */ | 452 | slob_t *m = (slob_t *)block - 1; |
475 | spin_lock_irqsave(&block_lock, flags); | 453 | slob_free(m, m->units + SLOB_UNIT); |
476 | for (bb = bigblocks; bb; last = &bb->next, bb = bb->next) { | 454 | } else |
477 | if (bb->pages == block) { | 455 | put_page(&sp->page); |
478 | *last = bb->next; | ||
479 | spin_unlock_irqrestore(&block_lock, flags); | ||
480 | free_pages((unsigned long)block, bb->order); | ||
481 | slob_free(bb, sizeof(bigblock_t)); | ||
482 | return; | ||
483 | } | ||
484 | } | ||
485 | spin_unlock_irqrestore(&block_lock, flags); | ||
486 | WARN_ON(1); | ||
487 | return; | ||
488 | } | ||
489 | |||
490 | m = (slob_t *)block - 1; | ||
491 | slob_free(m, m->units + SLOB_UNIT); | ||
492 | return; | ||
493 | } | 456 | } |
494 | 457 | ||
495 | EXPORT_SYMBOL(kfree); | 458 | EXPORT_SYMBOL(kfree); |
496 | 459 | ||
460 | /* can't use ksize for kmem_cache_alloc memory, only kmalloc */ | ||
497 | size_t ksize(const void *block) | 461 | size_t ksize(const void *block) |
498 | { | 462 | { |
499 | struct slob_page *sp; | 463 | struct slob_page *sp; |
500 | bigblock_t *bb; | ||
501 | unsigned long flags; | ||
502 | 464 | ||
503 | if (!block) | 465 | if (!block) |
504 | return 0; | 466 | return 0; |
505 | 467 | ||
506 | sp = (struct slob_page *)virt_to_page(block); | 468 | sp = (struct slob_page *)virt_to_page(block); |
507 | if (!slob_page(sp)) { | 469 | if (slob_page(sp)) |
508 | spin_lock_irqsave(&block_lock, flags); | 470 | return ((slob_t *)block - 1)->units + SLOB_UNIT; |
509 | for (bb = bigblocks; bb; bb = bb->next) | 471 | else |
510 | if (bb->pages == block) { | 472 | return sp->page.private; |
511 | spin_unlock_irqrestore(&slob_lock, flags); | ||
512 | return PAGE_SIZE << bb->order; | ||
513 | } | ||
514 | spin_unlock_irqrestore(&block_lock, flags); | ||
515 | } | ||
516 | |||
517 | return ((slob_t *)block - 1)->units + SLOB_UNIT; | ||
518 | } | 473 | } |
519 | 474 | ||
520 | struct kmem_cache { | 475 | struct kmem_cache { |