diff options
-rw-r--r-- | include/linux/kmalloc_sizes.h | 45 | ||||
-rw-r--r-- | include/linux/slab_def.h | 47 | ||||
-rw-r--r-- | mm/slab.c | 169 |
3 files changed, 88 insertions, 173 deletions
diff --git a/include/linux/kmalloc_sizes.h b/include/linux/kmalloc_sizes.h deleted file mode 100644 index e576b848ce10..000000000000 --- a/include/linux/kmalloc_sizes.h +++ /dev/null | |||
@@ -1,45 +0,0 @@ | |||
1 | #if (PAGE_SIZE == 4096) | ||
2 | CACHE(32) | ||
3 | #endif | ||
4 | CACHE(64) | ||
5 | #if L1_CACHE_BYTES < 64 | ||
6 | CACHE(96) | ||
7 | #endif | ||
8 | CACHE(128) | ||
9 | #if L1_CACHE_BYTES < 128 | ||
10 | CACHE(192) | ||
11 | #endif | ||
12 | CACHE(256) | ||
13 | CACHE(512) | ||
14 | CACHE(1024) | ||
15 | CACHE(2048) | ||
16 | CACHE(4096) | ||
17 | CACHE(8192) | ||
18 | CACHE(16384) | ||
19 | CACHE(32768) | ||
20 | CACHE(65536) | ||
21 | CACHE(131072) | ||
22 | #if KMALLOC_MAX_SIZE >= 262144 | ||
23 | CACHE(262144) | ||
24 | #endif | ||
25 | #if KMALLOC_MAX_SIZE >= 524288 | ||
26 | CACHE(524288) | ||
27 | #endif | ||
28 | #if KMALLOC_MAX_SIZE >= 1048576 | ||
29 | CACHE(1048576) | ||
30 | #endif | ||
31 | #if KMALLOC_MAX_SIZE >= 2097152 | ||
32 | CACHE(2097152) | ||
33 | #endif | ||
34 | #if KMALLOC_MAX_SIZE >= 4194304 | ||
35 | CACHE(4194304) | ||
36 | #endif | ||
37 | #if KMALLOC_MAX_SIZE >= 8388608 | ||
38 | CACHE(8388608) | ||
39 | #endif | ||
40 | #if KMALLOC_MAX_SIZE >= 16777216 | ||
41 | CACHE(16777216) | ||
42 | #endif | ||
43 | #if KMALLOC_MAX_SIZE >= 33554432 | ||
44 | CACHE(33554432) | ||
45 | #endif | ||
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 8bb6e0eaf3c6..e0f30ef9525d 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -11,8 +11,6 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ | ||
15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ | ||
16 | #include <linux/compiler.h> | 14 | #include <linux/compiler.h> |
17 | 15 | ||
18 | /* | 16 | /* |
@@ -104,15 +102,8 @@ struct kmem_cache { | |||
104 | */ | 102 | */ |
105 | }; | 103 | }; |
106 | 104 | ||
107 | /* Size description struct for general caches. */ | 105 | extern struct kmem_cache *kmalloc_caches[PAGE_SHIFT + MAX_ORDER]; |
108 | struct cache_sizes { | 106 | extern struct kmem_cache *kmalloc_dma_caches[PAGE_SHIFT + MAX_ORDER]; |
109 | size_t cs_size; | ||
110 | struct kmem_cache *cs_cachep; | ||
111 | #ifdef CONFIG_ZONE_DMA | ||
112 | struct kmem_cache *cs_dmacachep; | ||
113 | #endif | ||
114 | }; | ||
115 | extern struct cache_sizes malloc_sizes[]; | ||
116 | 107 | ||
117 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 108 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
118 | void *__kmalloc(size_t size, gfp_t flags); | 109 | void *__kmalloc(size_t size, gfp_t flags); |
@@ -133,26 +124,19 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) | |||
133 | void *ret; | 124 | void *ret; |
134 | 125 | ||
135 | if (__builtin_constant_p(size)) { | 126 | if (__builtin_constant_p(size)) { |
136 | int i = 0; | 127 | int i; |
137 | 128 | ||
138 | if (!size) | 129 | if (!size) |
139 | return ZERO_SIZE_PTR; | 130 | return ZERO_SIZE_PTR; |
140 | 131 | ||
141 | #define CACHE(x) \ | 132 | i = kmalloc_index(size); |
142 | if (size <= x) \ | 133 | |
143 | goto found; \ | ||
144 | else \ | ||
145 | i++; | ||
146 | #include <linux/kmalloc_sizes.h> | ||
147 | #undef CACHE | ||
148 | return NULL; | ||
149 | found: | ||
150 | #ifdef CONFIG_ZONE_DMA | 134 | #ifdef CONFIG_ZONE_DMA |
151 | if (flags & GFP_DMA) | 135 | if (flags & GFP_DMA) |
152 | cachep = malloc_sizes[i].cs_dmacachep; | 136 | cachep = kmalloc_dma_caches[i]; |
153 | else | 137 | else |
154 | #endif | 138 | #endif |
155 | cachep = malloc_sizes[i].cs_cachep; | 139 | cachep = kmalloc_caches[i]; |
156 | 140 | ||
157 | ret = kmem_cache_alloc_trace(cachep, flags, size); | 141 | ret = kmem_cache_alloc_trace(cachep, flags, size); |
158 | 142 | ||
@@ -186,26 +170,19 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
186 | struct kmem_cache *cachep; | 170 | struct kmem_cache *cachep; |
187 | 171 | ||
188 | if (__builtin_constant_p(size)) { | 172 | if (__builtin_constant_p(size)) { |
189 | int i = 0; | 173 | int i; |
190 | 174 | ||
191 | if (!size) | 175 | if (!size) |
192 | return ZERO_SIZE_PTR; | 176 | return ZERO_SIZE_PTR; |
193 | 177 | ||
194 | #define CACHE(x) \ | 178 | i = kmalloc_index(size); |
195 | if (size <= x) \ | 179 | |
196 | goto found; \ | ||
197 | else \ | ||
198 | i++; | ||
199 | #include <linux/kmalloc_sizes.h> | ||
200 | #undef CACHE | ||
201 | return NULL; | ||
202 | found: | ||
203 | #ifdef CONFIG_ZONE_DMA | 180 | #ifdef CONFIG_ZONE_DMA |
204 | if (flags & GFP_DMA) | 181 | if (flags & GFP_DMA) |
205 | cachep = malloc_sizes[i].cs_dmacachep; | 182 | cachep = kmalloc_dma_caches[i]; |
206 | else | 183 | else |
207 | #endif | 184 | #endif |
208 | cachep = malloc_sizes[i].cs_cachep; | 185 | cachep = kmalloc_caches[i]; |
209 | 186 | ||
210 | return kmem_cache_alloc_node_trace(cachep, flags, node, size); | 187 | return kmem_cache_alloc_node_trace(cachep, flags, node, size); |
211 | } | 188 | } |
@@ -318,34 +318,18 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int len, | |||
318 | static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); | 318 | static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); |
319 | static void cache_reap(struct work_struct *unused); | 319 | static void cache_reap(struct work_struct *unused); |
320 | 320 | ||
321 | /* | 321 | struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; |
322 | * This function must be completely optimized away if a constant is passed to | 322 | EXPORT_SYMBOL(kmalloc_caches); |
323 | * it. Mostly the same as what is in linux/slab.h except it returns an index. | ||
324 | */ | ||
325 | static __always_inline int index_of(const size_t size) | ||
326 | { | ||
327 | extern void __bad_size(void); | ||
328 | |||
329 | if (__builtin_constant_p(size)) { | ||
330 | int i = 0; | ||
331 | 323 | ||
332 | #define CACHE(x) \ | 324 | #ifdef CONFIG_ZONE_DMA |
333 | if (size <=x) \ | 325 | struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; |
334 | return i; \ | 326 | EXPORT_SYMBOL(kmalloc_dma_caches); |
335 | else \ | 327 | #endif |
336 | i++; | ||
337 | #include <linux/kmalloc_sizes.h> | ||
338 | #undef CACHE | ||
339 | __bad_size(); | ||
340 | } else | ||
341 | __bad_size(); | ||
342 | return 0; | ||
343 | } | ||
344 | 328 | ||
345 | static int slab_early_init = 1; | 329 | static int slab_early_init = 1; |
346 | 330 | ||
347 | #define INDEX_AC index_of(sizeof(struct arraycache_init)) | 331 | #define INDEX_AC kmalloc_index(sizeof(struct arraycache_init)) |
348 | #define INDEX_L3 index_of(sizeof(struct kmem_list3)) | 332 | #define INDEX_L3 kmalloc_index(sizeof(struct kmem_list3)) |
349 | 333 | ||
350 | static void kmem_list3_init(struct kmem_list3 *parent) | 334 | static void kmem_list3_init(struct kmem_list3 *parent) |
351 | { | 335 | { |
@@ -524,30 +508,6 @@ static inline unsigned int obj_to_index(const struct kmem_cache *cache, | |||
524 | return reciprocal_divide(offset, cache->reciprocal_buffer_size); | 508 | return reciprocal_divide(offset, cache->reciprocal_buffer_size); |
525 | } | 509 | } |
526 | 510 | ||
527 | /* | ||
528 | * These are the default caches for kmalloc. Custom caches can have other sizes. | ||
529 | */ | ||
530 | struct cache_sizes malloc_sizes[] = { | ||
531 | #define CACHE(x) { .cs_size = (x) }, | ||
532 | #include <linux/kmalloc_sizes.h> | ||
533 | CACHE(ULONG_MAX) | ||
534 | #undef CACHE | ||
535 | }; | ||
536 | EXPORT_SYMBOL(malloc_sizes); | ||
537 | |||
538 | /* Must match cache_sizes above. Out of line to keep cache footprint low. */ | ||
539 | struct cache_names { | ||
540 | char *name; | ||
541 | char *name_dma; | ||
542 | }; | ||
543 | |||
544 | static struct cache_names __initdata cache_names[] = { | ||
545 | #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, | ||
546 | #include <linux/kmalloc_sizes.h> | ||
547 | {NULL,} | ||
548 | #undef CACHE | ||
549 | }; | ||
550 | |||
551 | static struct arraycache_init initarray_generic = | 511 | static struct arraycache_init initarray_generic = |
552 | { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; | 512 | { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; |
553 | 513 | ||
@@ -625,19 +585,23 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) | |||
625 | 585 | ||
626 | static void init_node_lock_keys(int q) | 586 | static void init_node_lock_keys(int q) |
627 | { | 587 | { |
628 | struct cache_sizes *s = malloc_sizes; | 588 | int i; |
629 | 589 | ||
630 | if (slab_state < UP) | 590 | if (slab_state < UP) |
631 | return; | 591 | return; |
632 | 592 | ||
633 | for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { | 593 | for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) { |
634 | struct kmem_list3 *l3; | 594 | struct kmem_list3 *l3; |
595 | struct kmem_cache *cache = kmalloc_caches[i]; | ||
596 | |||
597 | if (!cache) | ||
598 | continue; | ||
635 | 599 | ||
636 | l3 = s->cs_cachep->nodelists[q]; | 600 | l3 = cache->nodelists[q]; |
637 | if (!l3 || OFF_SLAB(s->cs_cachep)) | 601 | if (!l3 || OFF_SLAB(cache)) |
638 | continue; | 602 | continue; |
639 | 603 | ||
640 | slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key, | 604 | slab_set_lock_classes(cache, &on_slab_l3_key, |
641 | &on_slab_alc_key, q); | 605 | &on_slab_alc_key, q); |
642 | } | 606 | } |
643 | } | 607 | } |
@@ -705,20 +669,19 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | |||
705 | static inline struct kmem_cache *__find_general_cachep(size_t size, | 669 | static inline struct kmem_cache *__find_general_cachep(size_t size, |
706 | gfp_t gfpflags) | 670 | gfp_t gfpflags) |
707 | { | 671 | { |
708 | struct cache_sizes *csizep = malloc_sizes; | 672 | int i; |
709 | 673 | ||
710 | #if DEBUG | 674 | #if DEBUG |
711 | /* This happens if someone tries to call | 675 | /* This happens if someone tries to call |
712 | * kmem_cache_create(), or __kmalloc(), before | 676 | * kmem_cache_create(), or __kmalloc(), before |
713 | * the generic caches are initialized. | 677 | * the generic caches are initialized. |
714 | */ | 678 | */ |
715 | BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); | 679 | BUG_ON(kmalloc_caches[INDEX_AC] == NULL); |
716 | #endif | 680 | #endif |
717 | if (!size) | 681 | if (!size) |
718 | return ZERO_SIZE_PTR; | 682 | return ZERO_SIZE_PTR; |
719 | 683 | ||
720 | while (size > csizep->cs_size) | 684 | i = kmalloc_index(size); |
721 | csizep++; | ||
722 | 685 | ||
723 | /* | 686 | /* |
724 | * Really subtle: The last entry with cs->cs_size==ULONG_MAX | 687 | * Really subtle: The last entry with cs->cs_size==ULONG_MAX |
@@ -727,9 +690,9 @@ static inline struct kmem_cache *__find_general_cachep(size_t size, | |||
727 | */ | 690 | */ |
728 | #ifdef CONFIG_ZONE_DMA | 691 | #ifdef CONFIG_ZONE_DMA |
729 | if (unlikely(gfpflags & GFP_DMA)) | 692 | if (unlikely(gfpflags & GFP_DMA)) |
730 | return csizep->cs_dmacachep; | 693 | return kmalloc_dma_caches[i]; |
731 | #endif | 694 | #endif |
732 | return csizep->cs_cachep; | 695 | return kmalloc_caches[i]; |
733 | } | 696 | } |
734 | 697 | ||
735 | static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) | 698 | static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) |
@@ -1602,8 +1565,6 @@ static void setup_nodelists_pointer(struct kmem_cache *cachep) | |||
1602 | */ | 1565 | */ |
1603 | void __init kmem_cache_init(void) | 1566 | void __init kmem_cache_init(void) |
1604 | { | 1567 | { |
1605 | struct cache_sizes *sizes; | ||
1606 | struct cache_names *names; | ||
1607 | int i; | 1568 | int i; |
1608 | 1569 | ||
1609 | kmem_cache = &kmem_cache_boot; | 1570 | kmem_cache = &kmem_cache_boot; |
@@ -1657,8 +1618,6 @@ void __init kmem_cache_init(void) | |||
1657 | list_add(&kmem_cache->list, &slab_caches); | 1618 | list_add(&kmem_cache->list, &slab_caches); |
1658 | 1619 | ||
1659 | /* 2+3) create the kmalloc caches */ | 1620 | /* 2+3) create the kmalloc caches */ |
1660 | sizes = malloc_sizes; | ||
1661 | names = cache_names; | ||
1662 | 1621 | ||
1663 | /* | 1622 | /* |
1664 | * Initialize the caches that provide memory for the array cache and the | 1623 | * Initialize the caches that provide memory for the array cache and the |
@@ -1666,35 +1625,39 @@ void __init kmem_cache_init(void) | |||
1666 | * bug. | 1625 | * bug. |
1667 | */ | 1626 | */ |
1668 | 1627 | ||
1669 | sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name, | 1628 | kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac", |
1670 | sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS); | 1629 | kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS); |
1671 | 1630 | ||
1672 | if (INDEX_AC != INDEX_L3) | 1631 | if (INDEX_AC != INDEX_L3) |
1673 | sizes[INDEX_L3].cs_cachep = | 1632 | kmalloc_caches[INDEX_L3] = |
1674 | create_kmalloc_cache(names[INDEX_L3].name, | 1633 | create_kmalloc_cache("kmalloc-l3", |
1675 | sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS); | 1634 | kmalloc_size(INDEX_L3), ARCH_KMALLOC_FLAGS); |
1676 | 1635 | ||
1677 | slab_early_init = 0; | 1636 | slab_early_init = 0; |
1678 | 1637 | ||
1679 | while (sizes->cs_size != ULONG_MAX) { | 1638 | for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) { |
1680 | /* | 1639 | size_t cs_size = kmalloc_size(i); |
1681 | * For performance, all the general caches are L1 aligned. | 1640 | |
1682 | * This should be particularly beneficial on SMP boxes, as it | 1641 | if (cs_size < KMALLOC_MIN_SIZE) |
1683 | * eliminates "false sharing". | 1642 | continue; |
1684 | * Note for systems short on memory removing the alignment will | 1643 | |
1685 | * allow tighter packing of the smaller caches. | 1644 | if (!kmalloc_caches[i]) { |
1686 | */ | 1645 | /* |
1687 | if (!sizes->cs_cachep) | 1646 | * For performance, all the general caches are L1 aligned. |
1688 | sizes->cs_cachep = create_kmalloc_cache(names->name, | 1647 | * This should be particularly beneficial on SMP boxes, as it |
1689 | sizes->cs_size, ARCH_KMALLOC_FLAGS); | 1648 | * eliminates "false sharing". |
1649 | * Note for systems short on memory removing the alignment will | ||
1650 | * allow tighter packing of the smaller caches. | ||
1651 | */ | ||
1652 | kmalloc_caches[i] = create_kmalloc_cache("kmalloc", | ||
1653 | cs_size, ARCH_KMALLOC_FLAGS); | ||
1654 | } | ||
1690 | 1655 | ||
1691 | #ifdef CONFIG_ZONE_DMA | 1656 | #ifdef CONFIG_ZONE_DMA |
1692 | sizes->cs_dmacachep = create_kmalloc_cache( | 1657 | kmalloc_dma_caches[i] = create_kmalloc_cache( |
1693 | names->name_dma, sizes->cs_size, | 1658 | "kmalloc-dma", cs_size, |
1694 | SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS); | 1659 | SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS); |
1695 | #endif | 1660 | #endif |
1696 | sizes++; | ||
1697 | names++; | ||
1698 | } | 1661 | } |
1699 | /* 4) Replace the bootstrap head arrays */ | 1662 | /* 4) Replace the bootstrap head arrays */ |
1700 | { | 1663 | { |
@@ -1713,17 +1676,16 @@ void __init kmem_cache_init(void) | |||
1713 | 1676 | ||
1714 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); | 1677 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); |
1715 | 1678 | ||
1716 | BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) | 1679 | BUG_ON(cpu_cache_get(kmalloc_caches[INDEX_AC]) |
1717 | != &initarray_generic.cache); | 1680 | != &initarray_generic.cache); |
1718 | memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), | 1681 | memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]), |
1719 | sizeof(struct arraycache_init)); | 1682 | sizeof(struct arraycache_init)); |
1720 | /* | 1683 | /* |
1721 | * Do not assume that spinlocks can be initialized via memcpy: | 1684 | * Do not assume that spinlocks can be initialized via memcpy: |
1722 | */ | 1685 | */ |
1723 | spin_lock_init(&ptr->lock); | 1686 | spin_lock_init(&ptr->lock); |
1724 | 1687 | ||
1725 | malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = | 1688 | kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr; |
1726 | ptr; | ||
1727 | } | 1689 | } |
1728 | /* 5) Replace the bootstrap kmem_list3's */ | 1690 | /* 5) Replace the bootstrap kmem_list3's */ |
1729 | { | 1691 | { |
@@ -1732,17 +1694,39 @@ void __init kmem_cache_init(void) | |||
1732 | for_each_online_node(nid) { | 1694 | for_each_online_node(nid) { |
1733 | init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid); | 1695 | init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid); |
1734 | 1696 | ||
1735 | init_list(malloc_sizes[INDEX_AC].cs_cachep, | 1697 | init_list(kmalloc_caches[INDEX_AC], |
1736 | &initkmem_list3[SIZE_AC + nid], nid); | 1698 | &initkmem_list3[SIZE_AC + nid], nid); |
1737 | 1699 | ||
1738 | if (INDEX_AC != INDEX_L3) { | 1700 | if (INDEX_AC != INDEX_L3) { |
1739 | init_list(malloc_sizes[INDEX_L3].cs_cachep, | 1701 | init_list(kmalloc_caches[INDEX_L3], |
1740 | &initkmem_list3[SIZE_L3 + nid], nid); | 1702 | &initkmem_list3[SIZE_L3 + nid], nid); |
1741 | } | 1703 | } |
1742 | } | 1704 | } |
1743 | } | 1705 | } |
1744 | 1706 | ||
1745 | slab_state = UP; | 1707 | slab_state = UP; |
1708 | |||
1709 | /* Create the proper names */ | ||
1710 | for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) { | ||
1711 | char *s; | ||
1712 | struct kmem_cache *c = kmalloc_caches[i]; | ||
1713 | |||
1714 | if (!c) | ||
1715 | continue; | ||
1716 | |||
1717 | s = kasprintf(GFP_NOWAIT, "kmalloc-%d", kmalloc_size(i)); | ||
1718 | |||
1719 | BUG_ON(!s); | ||
1720 | c->name = s; | ||
1721 | |||
1722 | #ifdef CONFIG_ZONE_DMA | ||
1723 | c = kmalloc_dma_caches[i]; | ||
1724 | BUG_ON(!c); | ||
1725 | s = kasprintf(GFP_NOWAIT, "dma-kmalloc-%d", kmalloc_size(i)); | ||
1726 | BUG_ON(!s); | ||
1727 | c->name = s; | ||
1728 | #endif | ||
1729 | } | ||
1746 | } | 1730 | } |
1747 | 1731 | ||
1748 | void __init kmem_cache_init_late(void) | 1732 | void __init kmem_cache_init_late(void) |
@@ -2428,10 +2412,9 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |||
2428 | size += BYTES_PER_WORD; | 2412 | size += BYTES_PER_WORD; |
2429 | } | 2413 | } |
2430 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) | 2414 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) |
2431 | if (size >= malloc_sizes[INDEX_L3 + 1].cs_size | 2415 | if (size >= kmalloc_size(INDEX_L3 + 1) |
2432 | && cachep->object_size > cache_line_size() | 2416 | && cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { |
2433 | && ALIGN(size, cachep->align) < PAGE_SIZE) { | 2417 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, align); |
2434 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); | ||
2435 | size = PAGE_SIZE; | 2418 | size = PAGE_SIZE; |
2436 | } | 2419 | } |
2437 | #endif | 2420 | #endif |