diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 169 |
1 files changed, 76 insertions, 93 deletions
@@ -318,34 +318,18 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int len, | |||
318 | static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); | 318 | static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); |
319 | static void cache_reap(struct work_struct *unused); | 319 | static void cache_reap(struct work_struct *unused); |
320 | 320 | ||
321 | /* | 321 | struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; |
322 | * This function must be completely optimized away if a constant is passed to | 322 | EXPORT_SYMBOL(kmalloc_caches); |
323 | * it. Mostly the same as what is in linux/slab.h except it returns an index. | ||
324 | */ | ||
325 | static __always_inline int index_of(const size_t size) | ||
326 | { | ||
327 | extern void __bad_size(void); | ||
328 | |||
329 | if (__builtin_constant_p(size)) { | ||
330 | int i = 0; | ||
331 | 323 | ||
332 | #define CACHE(x) \ | 324 | #ifdef CONFIG_ZONE_DMA |
333 | if (size <=x) \ | 325 | struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; |
334 | return i; \ | 326 | EXPORT_SYMBOL(kmalloc_dma_caches); |
335 | else \ | 327 | #endif |
336 | i++; | ||
337 | #include <linux/kmalloc_sizes.h> | ||
338 | #undef CACHE | ||
339 | __bad_size(); | ||
340 | } else | ||
341 | __bad_size(); | ||
342 | return 0; | ||
343 | } | ||
344 | 328 | ||
345 | static int slab_early_init = 1; | 329 | static int slab_early_init = 1; |
346 | 330 | ||
347 | #define INDEX_AC index_of(sizeof(struct arraycache_init)) | 331 | #define INDEX_AC kmalloc_index(sizeof(struct arraycache_init)) |
348 | #define INDEX_L3 index_of(sizeof(struct kmem_list3)) | 332 | #define INDEX_L3 kmalloc_index(sizeof(struct kmem_list3)) |
349 | 333 | ||
350 | static void kmem_list3_init(struct kmem_list3 *parent) | 334 | static void kmem_list3_init(struct kmem_list3 *parent) |
351 | { | 335 | { |
@@ -524,30 +508,6 @@ static inline unsigned int obj_to_index(const struct kmem_cache *cache, | |||
524 | return reciprocal_divide(offset, cache->reciprocal_buffer_size); | 508 | return reciprocal_divide(offset, cache->reciprocal_buffer_size); |
525 | } | 509 | } |
526 | 510 | ||
527 | /* | ||
528 | * These are the default caches for kmalloc. Custom caches can have other sizes. | ||
529 | */ | ||
530 | struct cache_sizes malloc_sizes[] = { | ||
531 | #define CACHE(x) { .cs_size = (x) }, | ||
532 | #include <linux/kmalloc_sizes.h> | ||
533 | CACHE(ULONG_MAX) | ||
534 | #undef CACHE | ||
535 | }; | ||
536 | EXPORT_SYMBOL(malloc_sizes); | ||
537 | |||
538 | /* Must match cache_sizes above. Out of line to keep cache footprint low. */ | ||
539 | struct cache_names { | ||
540 | char *name; | ||
541 | char *name_dma; | ||
542 | }; | ||
543 | |||
544 | static struct cache_names __initdata cache_names[] = { | ||
545 | #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, | ||
546 | #include <linux/kmalloc_sizes.h> | ||
547 | {NULL,} | ||
548 | #undef CACHE | ||
549 | }; | ||
550 | |||
551 | static struct arraycache_init initarray_generic = | 511 | static struct arraycache_init initarray_generic = |
552 | { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; | 512 | { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; |
553 | 513 | ||
@@ -625,19 +585,23 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) | |||
625 | 585 | ||
626 | static void init_node_lock_keys(int q) | 586 | static void init_node_lock_keys(int q) |
627 | { | 587 | { |
628 | struct cache_sizes *s = malloc_sizes; | 588 | int i; |
629 | 589 | ||
630 | if (slab_state < UP) | 590 | if (slab_state < UP) |
631 | return; | 591 | return; |
632 | 592 | ||
633 | for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { | 593 | for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) { |
634 | struct kmem_list3 *l3; | 594 | struct kmem_list3 *l3; |
595 | struct kmem_cache *cache = kmalloc_caches[i]; | ||
596 | |||
597 | if (!cache) | ||
598 | continue; | ||
635 | 599 | ||
636 | l3 = s->cs_cachep->nodelists[q]; | 600 | l3 = cache->nodelists[q]; |
637 | if (!l3 || OFF_SLAB(s->cs_cachep)) | 601 | if (!l3 || OFF_SLAB(cache)) |
638 | continue; | 602 | continue; |
639 | 603 | ||
640 | slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key, | 604 | slab_set_lock_classes(cache, &on_slab_l3_key, |
641 | &on_slab_alc_key, q); | 605 | &on_slab_alc_key, q); |
642 | } | 606 | } |
643 | } | 607 | } |
@@ -705,20 +669,19 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | |||
705 | static inline struct kmem_cache *__find_general_cachep(size_t size, | 669 | static inline struct kmem_cache *__find_general_cachep(size_t size, |
706 | gfp_t gfpflags) | 670 | gfp_t gfpflags) |
707 | { | 671 | { |
708 | struct cache_sizes *csizep = malloc_sizes; | 672 | int i; |
709 | 673 | ||
710 | #if DEBUG | 674 | #if DEBUG |
711 | /* This happens if someone tries to call | 675 | /* This happens if someone tries to call |
712 | * kmem_cache_create(), or __kmalloc(), before | 676 | * kmem_cache_create(), or __kmalloc(), before |
713 | * the generic caches are initialized. | 677 | * the generic caches are initialized. |
714 | */ | 678 | */ |
715 | BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); | 679 | BUG_ON(kmalloc_caches[INDEX_AC] == NULL); |
716 | #endif | 680 | #endif |
717 | if (!size) | 681 | if (!size) |
718 | return ZERO_SIZE_PTR; | 682 | return ZERO_SIZE_PTR; |
719 | 683 | ||
720 | while (size > csizep->cs_size) | 684 | i = kmalloc_index(size); |
721 | csizep++; | ||
722 | 685 | ||
723 | /* | 686 | /* |
724 | * Really subtle: The last entry with cs->cs_size==ULONG_MAX | 687 | * Really subtle: The last entry with cs->cs_size==ULONG_MAX |
@@ -727,9 +690,9 @@ static inline struct kmem_cache *__find_general_cachep(size_t size, | |||
727 | */ | 690 | */ |
728 | #ifdef CONFIG_ZONE_DMA | 691 | #ifdef CONFIG_ZONE_DMA |
729 | if (unlikely(gfpflags & GFP_DMA)) | 692 | if (unlikely(gfpflags & GFP_DMA)) |
730 | return csizep->cs_dmacachep; | 693 | return kmalloc_dma_caches[i]; |
731 | #endif | 694 | #endif |
732 | return csizep->cs_cachep; | 695 | return kmalloc_caches[i]; |
733 | } | 696 | } |
734 | 697 | ||
735 | static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) | 698 | static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) |
@@ -1602,8 +1565,6 @@ static void setup_nodelists_pointer(struct kmem_cache *cachep) | |||
1602 | */ | 1565 | */ |
1603 | void __init kmem_cache_init(void) | 1566 | void __init kmem_cache_init(void) |
1604 | { | 1567 | { |
1605 | struct cache_sizes *sizes; | ||
1606 | struct cache_names *names; | ||
1607 | int i; | 1568 | int i; |
1608 | 1569 | ||
1609 | kmem_cache = &kmem_cache_boot; | 1570 | kmem_cache = &kmem_cache_boot; |
@@ -1657,8 +1618,6 @@ void __init kmem_cache_init(void) | |||
1657 | list_add(&kmem_cache->list, &slab_caches); | 1618 | list_add(&kmem_cache->list, &slab_caches); |
1658 | 1619 | ||
1659 | /* 2+3) create the kmalloc caches */ | 1620 | /* 2+3) create the kmalloc caches */ |
1660 | sizes = malloc_sizes; | ||
1661 | names = cache_names; | ||
1662 | 1621 | ||
1663 | /* | 1622 | /* |
1664 | * Initialize the caches that provide memory for the array cache and the | 1623 | * Initialize the caches that provide memory for the array cache and the |
@@ -1666,35 +1625,39 @@ void __init kmem_cache_init(void) | |||
1666 | * bug. | 1625 | * bug. |
1667 | */ | 1626 | */ |
1668 | 1627 | ||
1669 | sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name, | 1628 | kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac", |
1670 | sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS); | 1629 | kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS); |
1671 | 1630 | ||
1672 | if (INDEX_AC != INDEX_L3) | 1631 | if (INDEX_AC != INDEX_L3) |
1673 | sizes[INDEX_L3].cs_cachep = | 1632 | kmalloc_caches[INDEX_L3] = |
1674 | create_kmalloc_cache(names[INDEX_L3].name, | 1633 | create_kmalloc_cache("kmalloc-l3", |
1675 | sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS); | 1634 | kmalloc_size(INDEX_L3), ARCH_KMALLOC_FLAGS); |
1676 | 1635 | ||
1677 | slab_early_init = 0; | 1636 | slab_early_init = 0; |
1678 | 1637 | ||
1679 | while (sizes->cs_size != ULONG_MAX) { | 1638 | for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) { |
1680 | /* | 1639 | size_t cs_size = kmalloc_size(i); |
1681 | * For performance, all the general caches are L1 aligned. | 1640 | |
1682 | * This should be particularly beneficial on SMP boxes, as it | 1641 | if (cs_size < KMALLOC_MIN_SIZE) |
1683 | * eliminates "false sharing". | 1642 | continue; |
1684 | * Note for systems short on memory removing the alignment will | 1643 | |
1685 | * allow tighter packing of the smaller caches. | 1644 | if (!kmalloc_caches[i]) { |
1686 | */ | 1645 | /* |
1687 | if (!sizes->cs_cachep) | 1646 | * For performance, all the general caches are L1 aligned. |
1688 | sizes->cs_cachep = create_kmalloc_cache(names->name, | 1647 | * This should be particularly beneficial on SMP boxes, as it |
1689 | sizes->cs_size, ARCH_KMALLOC_FLAGS); | 1648 | * eliminates "false sharing". |
1649 | * Note for systems short on memory removing the alignment will | ||
1650 | * allow tighter packing of the smaller caches. | ||
1651 | */ | ||
1652 | kmalloc_caches[i] = create_kmalloc_cache("kmalloc", | ||
1653 | cs_size, ARCH_KMALLOC_FLAGS); | ||
1654 | } | ||
1690 | 1655 | ||
1691 | #ifdef CONFIG_ZONE_DMA | 1656 | #ifdef CONFIG_ZONE_DMA |
1692 | sizes->cs_dmacachep = create_kmalloc_cache( | 1657 | kmalloc_dma_caches[i] = create_kmalloc_cache( |
1693 | names->name_dma, sizes->cs_size, | 1658 | "kmalloc-dma", cs_size, |
1694 | SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS); | 1659 | SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS); |
1695 | #endif | 1660 | #endif |
1696 | sizes++; | ||
1697 | names++; | ||
1698 | } | 1661 | } |
1699 | /* 4) Replace the bootstrap head arrays */ | 1662 | /* 4) Replace the bootstrap head arrays */ |
1700 | { | 1663 | { |
@@ -1713,17 +1676,16 @@ void __init kmem_cache_init(void) | |||
1713 | 1676 | ||
1714 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); | 1677 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); |
1715 | 1678 | ||
1716 | BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) | 1679 | BUG_ON(cpu_cache_get(kmalloc_caches[INDEX_AC]) |
1717 | != &initarray_generic.cache); | 1680 | != &initarray_generic.cache); |
1718 | memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), | 1681 | memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]), |
1719 | sizeof(struct arraycache_init)); | 1682 | sizeof(struct arraycache_init)); |
1720 | /* | 1683 | /* |
1721 | * Do not assume that spinlocks can be initialized via memcpy: | 1684 | * Do not assume that spinlocks can be initialized via memcpy: |
1722 | */ | 1685 | */ |
1723 | spin_lock_init(&ptr->lock); | 1686 | spin_lock_init(&ptr->lock); |
1724 | 1687 | ||
1725 | malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = | 1688 | kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr; |
1726 | ptr; | ||
1727 | } | 1689 | } |
1728 | /* 5) Replace the bootstrap kmem_list3's */ | 1690 | /* 5) Replace the bootstrap kmem_list3's */ |
1729 | { | 1691 | { |
@@ -1732,17 +1694,39 @@ void __init kmem_cache_init(void) | |||
1732 | for_each_online_node(nid) { | 1694 | for_each_online_node(nid) { |
1733 | init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid); | 1695 | init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid); |
1734 | 1696 | ||
1735 | init_list(malloc_sizes[INDEX_AC].cs_cachep, | 1697 | init_list(kmalloc_caches[INDEX_AC], |
1736 | &initkmem_list3[SIZE_AC + nid], nid); | 1698 | &initkmem_list3[SIZE_AC + nid], nid); |
1737 | 1699 | ||
1738 | if (INDEX_AC != INDEX_L3) { | 1700 | if (INDEX_AC != INDEX_L3) { |
1739 | init_list(malloc_sizes[INDEX_L3].cs_cachep, | 1701 | init_list(kmalloc_caches[INDEX_L3], |
1740 | &initkmem_list3[SIZE_L3 + nid], nid); | 1702 | &initkmem_list3[SIZE_L3 + nid], nid); |
1741 | } | 1703 | } |
1742 | } | 1704 | } |
1743 | } | 1705 | } |
1744 | 1706 | ||
1745 | slab_state = UP; | 1707 | slab_state = UP; |
1708 | |||
1709 | /* Create the proper names */ | ||
1710 | for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) { | ||
1711 | char *s; | ||
1712 | struct kmem_cache *c = kmalloc_caches[i]; | ||
1713 | |||
1714 | if (!c) | ||
1715 | continue; | ||
1716 | |||
1717 | s = kasprintf(GFP_NOWAIT, "kmalloc-%d", kmalloc_size(i)); | ||
1718 | |||
1719 | BUG_ON(!s); | ||
1720 | c->name = s; | ||
1721 | |||
1722 | #ifdef CONFIG_ZONE_DMA | ||
1723 | c = kmalloc_dma_caches[i]; | ||
1724 | BUG_ON(!c); | ||
1725 | s = kasprintf(GFP_NOWAIT, "dma-kmalloc-%d", kmalloc_size(i)); | ||
1726 | BUG_ON(!s); | ||
1727 | c->name = s; | ||
1728 | #endif | ||
1729 | } | ||
1746 | } | 1730 | } |
1747 | 1731 | ||
1748 | void __init kmem_cache_init_late(void) | 1732 | void __init kmem_cache_init_late(void) |
@@ -2428,10 +2412,9 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |||
2428 | size += BYTES_PER_WORD; | 2412 | size += BYTES_PER_WORD; |
2429 | } | 2413 | } |
2430 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) | 2414 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) |
2431 | if (size >= malloc_sizes[INDEX_L3 + 1].cs_size | 2415 | if (size >= kmalloc_size(INDEX_L3 + 1) |
2432 | && cachep->object_size > cache_line_size() | 2416 | && cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { |
2433 | && ALIGN(size, cachep->align) < PAGE_SIZE) { | 2417 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, align); |
2434 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); | ||
2435 | size = PAGE_SIZE; | 2418 | size = PAGE_SIZE; |
2436 | } | 2419 | } |
2437 | #endif | 2420 | #endif |