diff options
author | Pekka Enberg <penberg@kernel.org> | 2013-05-07 02:19:47 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2013-05-07 02:19:47 -0400 |
commit | 69df2ac1288b456a95aceadafbf88cd891a577c8 (patch) | |
tree | 0f2e83a8c4bc826f12d3f3871ecc1d7be0c9e4e3 /mm | |
parent | c1be5a5b1b355d40e6cf79cc979eb66dafa24ad1 (diff) | |
parent | 8a965b3baa89ffedc73c0fbc750006c631012ced (diff) |
Merge branch 'slab/next' into slab/for-linus
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 790 | ||||
-rw-r--r-- | mm/slab.h | 43 | ||||
-rw-r--r-- | mm/slab_common.c | 174 | ||||
-rw-r--r-- | mm/slub.c | 221 |
4 files changed, 589 insertions, 639 deletions
@@ -286,68 +286,27 @@ struct arraycache_init { | |||
286 | }; | 286 | }; |
287 | 287 | ||
288 | /* | 288 | /* |
289 | * The slab lists for all objects. | ||
290 | */ | ||
291 | struct kmem_list3 { | ||
292 | struct list_head slabs_partial; /* partial list first, better asm code */ | ||
293 | struct list_head slabs_full; | ||
294 | struct list_head slabs_free; | ||
295 | unsigned long free_objects; | ||
296 | unsigned int free_limit; | ||
297 | unsigned int colour_next; /* Per-node cache coloring */ | ||
298 | spinlock_t list_lock; | ||
299 | struct array_cache *shared; /* shared per node */ | ||
300 | struct array_cache **alien; /* on other nodes */ | ||
301 | unsigned long next_reap; /* updated without locking */ | ||
302 | int free_touched; /* updated without locking */ | ||
303 | }; | ||
304 | |||
305 | /* | ||
306 | * Need this for bootstrapping a per node allocator. | 289 | * Need this for bootstrapping a per node allocator. |
307 | */ | 290 | */ |
308 | #define NUM_INIT_LISTS (3 * MAX_NUMNODES) | 291 | #define NUM_INIT_LISTS (3 * MAX_NUMNODES) |
309 | static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; | 292 | static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS]; |
310 | #define CACHE_CACHE 0 | 293 | #define CACHE_CACHE 0 |
311 | #define SIZE_AC MAX_NUMNODES | 294 | #define SIZE_AC MAX_NUMNODES |
312 | #define SIZE_L3 (2 * MAX_NUMNODES) | 295 | #define SIZE_NODE (2 * MAX_NUMNODES) |
313 | 296 | ||
314 | static int drain_freelist(struct kmem_cache *cache, | 297 | static int drain_freelist(struct kmem_cache *cache, |
315 | struct kmem_list3 *l3, int tofree); | 298 | struct kmem_cache_node *n, int tofree); |
316 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, | 299 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, |
317 | int node); | 300 | int node); |
318 | static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); | 301 | static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); |
319 | static void cache_reap(struct work_struct *unused); | 302 | static void cache_reap(struct work_struct *unused); |
320 | 303 | ||
321 | /* | ||
322 | * This function must be completely optimized away if a constant is passed to | ||
323 | * it. Mostly the same as what is in linux/slab.h except it returns an index. | ||
324 | */ | ||
325 | static __always_inline int index_of(const size_t size) | ||
326 | { | ||
327 | extern void __bad_size(void); | ||
328 | |||
329 | if (__builtin_constant_p(size)) { | ||
330 | int i = 0; | ||
331 | |||
332 | #define CACHE(x) \ | ||
333 | if (size <=x) \ | ||
334 | return i; \ | ||
335 | else \ | ||
336 | i++; | ||
337 | #include <linux/kmalloc_sizes.h> | ||
338 | #undef CACHE | ||
339 | __bad_size(); | ||
340 | } else | ||
341 | __bad_size(); | ||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | static int slab_early_init = 1; | 304 | static int slab_early_init = 1; |
346 | 305 | ||
347 | #define INDEX_AC index_of(sizeof(struct arraycache_init)) | 306 | #define INDEX_AC kmalloc_index(sizeof(struct arraycache_init)) |
348 | #define INDEX_L3 index_of(sizeof(struct kmem_list3)) | 307 | #define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node)) |
349 | 308 | ||
350 | static void kmem_list3_init(struct kmem_list3 *parent) | 309 | static void kmem_cache_node_init(struct kmem_cache_node *parent) |
351 | { | 310 | { |
352 | INIT_LIST_HEAD(&parent->slabs_full); | 311 | INIT_LIST_HEAD(&parent->slabs_full); |
353 | INIT_LIST_HEAD(&parent->slabs_partial); | 312 | INIT_LIST_HEAD(&parent->slabs_partial); |
@@ -363,7 +322,7 @@ static void kmem_list3_init(struct kmem_list3 *parent) | |||
363 | #define MAKE_LIST(cachep, listp, slab, nodeid) \ | 322 | #define MAKE_LIST(cachep, listp, slab, nodeid) \ |
364 | do { \ | 323 | do { \ |
365 | INIT_LIST_HEAD(listp); \ | 324 | INIT_LIST_HEAD(listp); \ |
366 | list_splice(&(cachep->nodelists[nodeid]->slab), listp); \ | 325 | list_splice(&(cachep->node[nodeid]->slab), listp); \ |
367 | } while (0) | 326 | } while (0) |
368 | 327 | ||
369 | #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ | 328 | #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ |
@@ -524,30 +483,6 @@ static inline unsigned int obj_to_index(const struct kmem_cache *cache, | |||
524 | return reciprocal_divide(offset, cache->reciprocal_buffer_size); | 483 | return reciprocal_divide(offset, cache->reciprocal_buffer_size); |
525 | } | 484 | } |
526 | 485 | ||
527 | /* | ||
528 | * These are the default caches for kmalloc. Custom caches can have other sizes. | ||
529 | */ | ||
530 | struct cache_sizes malloc_sizes[] = { | ||
531 | #define CACHE(x) { .cs_size = (x) }, | ||
532 | #include <linux/kmalloc_sizes.h> | ||
533 | CACHE(ULONG_MAX) | ||
534 | #undef CACHE | ||
535 | }; | ||
536 | EXPORT_SYMBOL(malloc_sizes); | ||
537 | |||
538 | /* Must match cache_sizes above. Out of line to keep cache footprint low. */ | ||
539 | struct cache_names { | ||
540 | char *name; | ||
541 | char *name_dma; | ||
542 | }; | ||
543 | |||
544 | static struct cache_names __initdata cache_names[] = { | ||
545 | #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, | ||
546 | #include <linux/kmalloc_sizes.h> | ||
547 | {NULL,} | ||
548 | #undef CACHE | ||
549 | }; | ||
550 | |||
551 | static struct arraycache_init initarray_generic = | 486 | static struct arraycache_init initarray_generic = |
552 | { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; | 487 | { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; |
553 | 488 | ||
@@ -586,15 +521,15 @@ static void slab_set_lock_classes(struct kmem_cache *cachep, | |||
586 | int q) | 521 | int q) |
587 | { | 522 | { |
588 | struct array_cache **alc; | 523 | struct array_cache **alc; |
589 | struct kmem_list3 *l3; | 524 | struct kmem_cache_node *n; |
590 | int r; | 525 | int r; |
591 | 526 | ||
592 | l3 = cachep->nodelists[q]; | 527 | n = cachep->node[q]; |
593 | if (!l3) | 528 | if (!n) |
594 | return; | 529 | return; |
595 | 530 | ||
596 | lockdep_set_class(&l3->list_lock, l3_key); | 531 | lockdep_set_class(&n->list_lock, l3_key); |
597 | alc = l3->alien; | 532 | alc = n->alien; |
598 | /* | 533 | /* |
599 | * FIXME: This check for BAD_ALIEN_MAGIC | 534 | * FIXME: This check for BAD_ALIEN_MAGIC |
600 | * should go away when common slab code is taught to | 535 | * should go away when common slab code is taught to |
@@ -625,28 +560,30 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) | |||
625 | 560 | ||
626 | static void init_node_lock_keys(int q) | 561 | static void init_node_lock_keys(int q) |
627 | { | 562 | { |
628 | struct cache_sizes *s = malloc_sizes; | 563 | int i; |
629 | 564 | ||
630 | if (slab_state < UP) | 565 | if (slab_state < UP) |
631 | return; | 566 | return; |
632 | 567 | ||
633 | for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { | 568 | for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) { |
634 | struct kmem_list3 *l3; | 569 | struct kmem_cache_node *n; |
570 | struct kmem_cache *cache = kmalloc_caches[i]; | ||
571 | |||
572 | if (!cache) | ||
573 | continue; | ||
635 | 574 | ||
636 | l3 = s->cs_cachep->nodelists[q]; | 575 | n = cache->node[q]; |
637 | if (!l3 || OFF_SLAB(s->cs_cachep)) | 576 | if (!n || OFF_SLAB(cache)) |
638 | continue; | 577 | continue; |
639 | 578 | ||
640 | slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key, | 579 | slab_set_lock_classes(cache, &on_slab_l3_key, |
641 | &on_slab_alc_key, q); | 580 | &on_slab_alc_key, q); |
642 | } | 581 | } |
643 | } | 582 | } |
644 | 583 | ||
645 | static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q) | 584 | static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q) |
646 | { | 585 | { |
647 | struct kmem_list3 *l3; | 586 | if (!cachep->node[q]) |
648 | l3 = cachep->nodelists[q]; | ||
649 | if (!l3) | ||
650 | return; | 587 | return; |
651 | 588 | ||
652 | slab_set_lock_classes(cachep, &on_slab_l3_key, | 589 | slab_set_lock_classes(cachep, &on_slab_l3_key, |
@@ -702,41 +639,6 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | |||
702 | return cachep->array[smp_processor_id()]; | 639 | return cachep->array[smp_processor_id()]; |
703 | } | 640 | } |
704 | 641 | ||
705 | static inline struct kmem_cache *__find_general_cachep(size_t size, | ||
706 | gfp_t gfpflags) | ||
707 | { | ||
708 | struct cache_sizes *csizep = malloc_sizes; | ||
709 | |||
710 | #if DEBUG | ||
711 | /* This happens if someone tries to call | ||
712 | * kmem_cache_create(), or __kmalloc(), before | ||
713 | * the generic caches are initialized. | ||
714 | */ | ||
715 | BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); | ||
716 | #endif | ||
717 | if (!size) | ||
718 | return ZERO_SIZE_PTR; | ||
719 | |||
720 | while (size > csizep->cs_size) | ||
721 | csizep++; | ||
722 | |||
723 | /* | ||
724 | * Really subtle: The last entry with cs->cs_size==ULONG_MAX | ||
725 | * has cs_{dma,}cachep==NULL. Thus no special case | ||
726 | * for large kmalloc calls required. | ||
727 | */ | ||
728 | #ifdef CONFIG_ZONE_DMA | ||
729 | if (unlikely(gfpflags & GFP_DMA)) | ||
730 | return csizep->cs_dmacachep; | ||
731 | #endif | ||
732 | return csizep->cs_cachep; | ||
733 | } | ||
734 | |||
735 | static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) | ||
736 | { | ||
737 | return __find_general_cachep(size, gfpflags); | ||
738 | } | ||
739 | |||
740 | static size_t slab_mgmt_size(size_t nr_objs, size_t align) | 642 | static size_t slab_mgmt_size(size_t nr_objs, size_t align) |
741 | { | 643 | { |
742 | return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); | 644 | return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); |
@@ -938,29 +840,29 @@ static inline bool is_slab_pfmemalloc(struct slab *slabp) | |||
938 | static void recheck_pfmemalloc_active(struct kmem_cache *cachep, | 840 | static void recheck_pfmemalloc_active(struct kmem_cache *cachep, |
939 | struct array_cache *ac) | 841 | struct array_cache *ac) |
940 | { | 842 | { |
941 | struct kmem_list3 *l3 = cachep->nodelists[numa_mem_id()]; | 843 | struct kmem_cache_node *n = cachep->node[numa_mem_id()]; |
942 | struct slab *slabp; | 844 | struct slab *slabp; |
943 | unsigned long flags; | 845 | unsigned long flags; |
944 | 846 | ||
945 | if (!pfmemalloc_active) | 847 | if (!pfmemalloc_active) |
946 | return; | 848 | return; |
947 | 849 | ||
948 | spin_lock_irqsave(&l3->list_lock, flags); | 850 | spin_lock_irqsave(&n->list_lock, flags); |
949 | list_for_each_entry(slabp, &l3->slabs_full, list) | 851 | list_for_each_entry(slabp, &n->slabs_full, list) |
950 | if (is_slab_pfmemalloc(slabp)) | 852 | if (is_slab_pfmemalloc(slabp)) |
951 | goto out; | 853 | goto out; |
952 | 854 | ||
953 | list_for_each_entry(slabp, &l3->slabs_partial, list) | 855 | list_for_each_entry(slabp, &n->slabs_partial, list) |
954 | if (is_slab_pfmemalloc(slabp)) | 856 | if (is_slab_pfmemalloc(slabp)) |
955 | goto out; | 857 | goto out; |
956 | 858 | ||
957 | list_for_each_entry(slabp, &l3->slabs_free, list) | 859 | list_for_each_entry(slabp, &n->slabs_free, list) |
958 | if (is_slab_pfmemalloc(slabp)) | 860 | if (is_slab_pfmemalloc(slabp)) |
959 | goto out; | 861 | goto out; |
960 | 862 | ||
961 | pfmemalloc_active = false; | 863 | pfmemalloc_active = false; |
962 | out: | 864 | out: |
963 | spin_unlock_irqrestore(&l3->list_lock, flags); | 865 | spin_unlock_irqrestore(&n->list_lock, flags); |
964 | } | 866 | } |
965 | 867 | ||
966 | static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, | 868 | static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, |
@@ -971,7 +873,7 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, | |||
971 | 873 | ||
972 | /* Ensure the caller is allowed to use objects from PFMEMALLOC slab */ | 874 | /* Ensure the caller is allowed to use objects from PFMEMALLOC slab */ |
973 | if (unlikely(is_obj_pfmemalloc(objp))) { | 875 | if (unlikely(is_obj_pfmemalloc(objp))) { |
974 | struct kmem_list3 *l3; | 876 | struct kmem_cache_node *n; |
975 | 877 | ||
976 | if (gfp_pfmemalloc_allowed(flags)) { | 878 | if (gfp_pfmemalloc_allowed(flags)) { |
977 | clear_obj_pfmemalloc(&objp); | 879 | clear_obj_pfmemalloc(&objp); |
@@ -993,8 +895,8 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, | |||
993 | * If there are empty slabs on the slabs_free list and we are | 895 | * If there are empty slabs on the slabs_free list and we are |
994 | * being forced to refill the cache, mark this one !pfmemalloc. | 896 | * being forced to refill the cache, mark this one !pfmemalloc. |
995 | */ | 897 | */ |
996 | l3 = cachep->nodelists[numa_mem_id()]; | 898 | n = cachep->node[numa_mem_id()]; |
997 | if (!list_empty(&l3->slabs_free) && force_refill) { | 899 | if (!list_empty(&n->slabs_free) && force_refill) { |
998 | struct slab *slabp = virt_to_slab(objp); | 900 | struct slab *slabp = virt_to_slab(objp); |
999 | ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem)); | 901 | ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem)); |
1000 | clear_obj_pfmemalloc(&objp); | 902 | clear_obj_pfmemalloc(&objp); |
@@ -1071,7 +973,7 @@ static int transfer_objects(struct array_cache *to, | |||
1071 | #ifndef CONFIG_NUMA | 973 | #ifndef CONFIG_NUMA |
1072 | 974 | ||
1073 | #define drain_alien_cache(cachep, alien) do { } while (0) | 975 | #define drain_alien_cache(cachep, alien) do { } while (0) |
1074 | #define reap_alien(cachep, l3) do { } while (0) | 976 | #define reap_alien(cachep, n) do { } while (0) |
1075 | 977 | ||
1076 | static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) | 978 | static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) |
1077 | { | 979 | { |
@@ -1143,33 +1045,33 @@ static void free_alien_cache(struct array_cache **ac_ptr) | |||
1143 | static void __drain_alien_cache(struct kmem_cache *cachep, | 1045 | static void __drain_alien_cache(struct kmem_cache *cachep, |
1144 | struct array_cache *ac, int node) | 1046 | struct array_cache *ac, int node) |
1145 | { | 1047 | { |
1146 | struct kmem_list3 *rl3 = cachep->nodelists[node]; | 1048 | struct kmem_cache_node *n = cachep->node[node]; |
1147 | 1049 | ||
1148 | if (ac->avail) { | 1050 | if (ac->avail) { |
1149 | spin_lock(&rl3->list_lock); | 1051 | spin_lock(&n->list_lock); |
1150 | /* | 1052 | /* |
1151 | * Stuff objects into the remote nodes shared array first. | 1053 | * Stuff objects into the remote nodes shared array first. |
1152 | * That way we could avoid the overhead of putting the objects | 1054 | * That way we could avoid the overhead of putting the objects |
1153 | * into the free lists and getting them back later. | 1055 | * into the free lists and getting them back later. |
1154 | */ | 1056 | */ |
1155 | if (rl3->shared) | 1057 | if (n->shared) |
1156 | transfer_objects(rl3->shared, ac, ac->limit); | 1058 | transfer_objects(n->shared, ac, ac->limit); |
1157 | 1059 | ||
1158 | free_block(cachep, ac->entry, ac->avail, node); | 1060 | free_block(cachep, ac->entry, ac->avail, node); |
1159 | ac->avail = 0; | 1061 | ac->avail = 0; |
1160 | spin_unlock(&rl3->list_lock); | 1062 | spin_unlock(&n->list_lock); |
1161 | } | 1063 | } |
1162 | } | 1064 | } |
1163 | 1065 | ||
1164 | /* | 1066 | /* |
1165 | * Called from cache_reap() to regularly drain alien caches round robin. | 1067 | * Called from cache_reap() to regularly drain alien caches round robin. |
1166 | */ | 1068 | */ |
1167 | static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) | 1069 | static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) |
1168 | { | 1070 | { |
1169 | int node = __this_cpu_read(slab_reap_node); | 1071 | int node = __this_cpu_read(slab_reap_node); |
1170 | 1072 | ||
1171 | if (l3->alien) { | 1073 | if (n->alien) { |
1172 | struct array_cache *ac = l3->alien[node]; | 1074 | struct array_cache *ac = n->alien[node]; |
1173 | 1075 | ||
1174 | if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { | 1076 | if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { |
1175 | __drain_alien_cache(cachep, ac, node); | 1077 | __drain_alien_cache(cachep, ac, node); |
@@ -1199,7 +1101,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |||
1199 | { | 1101 | { |
1200 | struct slab *slabp = virt_to_slab(objp); | 1102 | struct slab *slabp = virt_to_slab(objp); |
1201 | int nodeid = slabp->nodeid; | 1103 | int nodeid = slabp->nodeid; |
1202 | struct kmem_list3 *l3; | 1104 | struct kmem_cache_node *n; |
1203 | struct array_cache *alien = NULL; | 1105 | struct array_cache *alien = NULL; |
1204 | int node; | 1106 | int node; |
1205 | 1107 | ||
@@ -1212,10 +1114,10 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |||
1212 | if (likely(slabp->nodeid == node)) | 1114 | if (likely(slabp->nodeid == node)) |
1213 | return 0; | 1115 | return 0; |
1214 | 1116 | ||
1215 | l3 = cachep->nodelists[node]; | 1117 | n = cachep->node[node]; |
1216 | STATS_INC_NODEFREES(cachep); | 1118 | STATS_INC_NODEFREES(cachep); |
1217 | if (l3->alien && l3->alien[nodeid]) { | 1119 | if (n->alien && n->alien[nodeid]) { |
1218 | alien = l3->alien[nodeid]; | 1120 | alien = n->alien[nodeid]; |
1219 | spin_lock(&alien->lock); | 1121 | spin_lock(&alien->lock); |
1220 | if (unlikely(alien->avail == alien->limit)) { | 1122 | if (unlikely(alien->avail == alien->limit)) { |
1221 | STATS_INC_ACOVERFLOW(cachep); | 1123 | STATS_INC_ACOVERFLOW(cachep); |
@@ -1224,28 +1126,28 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |||
1224 | ac_put_obj(cachep, alien, objp); | 1126 | ac_put_obj(cachep, alien, objp); |
1225 | spin_unlock(&alien->lock); | 1127 | spin_unlock(&alien->lock); |
1226 | } else { | 1128 | } else { |
1227 | spin_lock(&(cachep->nodelists[nodeid])->list_lock); | 1129 | spin_lock(&(cachep->node[nodeid])->list_lock); |
1228 | free_block(cachep, &objp, 1, nodeid); | 1130 | free_block(cachep, &objp, 1, nodeid); |
1229 | spin_unlock(&(cachep->nodelists[nodeid])->list_lock); | 1131 | spin_unlock(&(cachep->node[nodeid])->list_lock); |
1230 | } | 1132 | } |
1231 | return 1; | 1133 | return 1; |
1232 | } | 1134 | } |
1233 | #endif | 1135 | #endif |
1234 | 1136 | ||
1235 | /* | 1137 | /* |
1236 | * Allocates and initializes nodelists for a node on each slab cache, used for | 1138 | * Allocates and initializes node for a node on each slab cache, used for |
1237 | * either memory or cpu hotplug. If memory is being hot-added, the kmem_list3 | 1139 | * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node |
1238 | * will be allocated off-node since memory is not yet online for the new node. | 1140 | * will be allocated off-node since memory is not yet online for the new node. |
1239 | * When hotplugging memory or a cpu, existing nodelists are not replaced if | 1141 | * When hotplugging memory or a cpu, existing node are not replaced if |
1240 | * already in use. | 1142 | * already in use. |
1241 | * | 1143 | * |
1242 | * Must hold slab_mutex. | 1144 | * Must hold slab_mutex. |
1243 | */ | 1145 | */ |
1244 | static int init_cache_nodelists_node(int node) | 1146 | static int init_cache_node_node(int node) |
1245 | { | 1147 | { |
1246 | struct kmem_cache *cachep; | 1148 | struct kmem_cache *cachep; |
1247 | struct kmem_list3 *l3; | 1149 | struct kmem_cache_node *n; |
1248 | const int memsize = sizeof(struct kmem_list3); | 1150 | const int memsize = sizeof(struct kmem_cache_node); |
1249 | 1151 | ||
1250 | list_for_each_entry(cachep, &slab_caches, list) { | 1152 | list_for_each_entry(cachep, &slab_caches, list) { |
1251 | /* | 1153 | /* |
@@ -1253,12 +1155,12 @@ static int init_cache_nodelists_node(int node) | |||
1253 | * begin anything. Make sure some other cpu on this | 1155 | * begin anything. Make sure some other cpu on this |
1254 | * node has not already allocated this | 1156 | * node has not already allocated this |
1255 | */ | 1157 | */ |
1256 | if (!cachep->nodelists[node]) { | 1158 | if (!cachep->node[node]) { |
1257 | l3 = kmalloc_node(memsize, GFP_KERNEL, node); | 1159 | n = kmalloc_node(memsize, GFP_KERNEL, node); |
1258 | if (!l3) | 1160 | if (!n) |
1259 | return -ENOMEM; | 1161 | return -ENOMEM; |
1260 | kmem_list3_init(l3); | 1162 | kmem_cache_node_init(n); |
1261 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + | 1163 | n->next_reap = jiffies + REAPTIMEOUT_LIST3 + |
1262 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; | 1164 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; |
1263 | 1165 | ||
1264 | /* | 1166 | /* |
@@ -1266,14 +1168,14 @@ static int init_cache_nodelists_node(int node) | |||
1266 | * go. slab_mutex is sufficient | 1168 | * go. slab_mutex is sufficient |
1267 | * protection here. | 1169 | * protection here. |
1268 | */ | 1170 | */ |
1269 | cachep->nodelists[node] = l3; | 1171 | cachep->node[node] = n; |
1270 | } | 1172 | } |
1271 | 1173 | ||
1272 | spin_lock_irq(&cachep->nodelists[node]->list_lock); | 1174 | spin_lock_irq(&cachep->node[node]->list_lock); |
1273 | cachep->nodelists[node]->free_limit = | 1175 | cachep->node[node]->free_limit = |
1274 | (1 + nr_cpus_node(node)) * | 1176 | (1 + nr_cpus_node(node)) * |
1275 | cachep->batchcount + cachep->num; | 1177 | cachep->batchcount + cachep->num; |
1276 | spin_unlock_irq(&cachep->nodelists[node]->list_lock); | 1178 | spin_unlock_irq(&cachep->node[node]->list_lock); |
1277 | } | 1179 | } |
1278 | return 0; | 1180 | return 0; |
1279 | } | 1181 | } |
@@ -1281,7 +1183,7 @@ static int init_cache_nodelists_node(int node) | |||
1281 | static void __cpuinit cpuup_canceled(long cpu) | 1183 | static void __cpuinit cpuup_canceled(long cpu) |
1282 | { | 1184 | { |
1283 | struct kmem_cache *cachep; | 1185 | struct kmem_cache *cachep; |
1284 | struct kmem_list3 *l3 = NULL; | 1186 | struct kmem_cache_node *n = NULL; |
1285 | int node = cpu_to_mem(cpu); | 1187 | int node = cpu_to_mem(cpu); |
1286 | const struct cpumask *mask = cpumask_of_node(node); | 1188 | const struct cpumask *mask = cpumask_of_node(node); |
1287 | 1189 | ||
@@ -1293,34 +1195,34 @@ static void __cpuinit cpuup_canceled(long cpu) | |||
1293 | /* cpu is dead; no one can alloc from it. */ | 1195 | /* cpu is dead; no one can alloc from it. */ |
1294 | nc = cachep->array[cpu]; | 1196 | nc = cachep->array[cpu]; |
1295 | cachep->array[cpu] = NULL; | 1197 | cachep->array[cpu] = NULL; |
1296 | l3 = cachep->nodelists[node]; | 1198 | n = cachep->node[node]; |
1297 | 1199 | ||
1298 | if (!l3) | 1200 | if (!n) |
1299 | goto free_array_cache; | 1201 | goto free_array_cache; |
1300 | 1202 | ||
1301 | spin_lock_irq(&l3->list_lock); | 1203 | spin_lock_irq(&n->list_lock); |
1302 | 1204 | ||
1303 | /* Free limit for this kmem_list3 */ | 1205 | /* Free limit for this kmem_cache_node */ |
1304 | l3->free_limit -= cachep->batchcount; | 1206 | n->free_limit -= cachep->batchcount; |
1305 | if (nc) | 1207 | if (nc) |
1306 | free_block(cachep, nc->entry, nc->avail, node); | 1208 | free_block(cachep, nc->entry, nc->avail, node); |
1307 | 1209 | ||
1308 | if (!cpumask_empty(mask)) { | 1210 | if (!cpumask_empty(mask)) { |
1309 | spin_unlock_irq(&l3->list_lock); | 1211 | spin_unlock_irq(&n->list_lock); |
1310 | goto free_array_cache; | 1212 | goto free_array_cache; |
1311 | } | 1213 | } |
1312 | 1214 | ||
1313 | shared = l3->shared; | 1215 | shared = n->shared; |
1314 | if (shared) { | 1216 | if (shared) { |
1315 | free_block(cachep, shared->entry, | 1217 | free_block(cachep, shared->entry, |
1316 | shared->avail, node); | 1218 | shared->avail, node); |
1317 | l3->shared = NULL; | 1219 | n->shared = NULL; |
1318 | } | 1220 | } |
1319 | 1221 | ||
1320 | alien = l3->alien; | 1222 | alien = n->alien; |
1321 | l3->alien = NULL; | 1223 | n->alien = NULL; |
1322 | 1224 | ||
1323 | spin_unlock_irq(&l3->list_lock); | 1225 | spin_unlock_irq(&n->list_lock); |
1324 | 1226 | ||
1325 | kfree(shared); | 1227 | kfree(shared); |
1326 | if (alien) { | 1228 | if (alien) { |
@@ -1336,17 +1238,17 @@ free_array_cache: | |||
1336 | * shrink each nodelist to its limit. | 1238 | * shrink each nodelist to its limit. |
1337 | */ | 1239 | */ |
1338 | list_for_each_entry(cachep, &slab_caches, list) { | 1240 | list_for_each_entry(cachep, &slab_caches, list) { |
1339 | l3 = cachep->nodelists[node]; | 1241 | n = cachep->node[node]; |
1340 | if (!l3) | 1242 | if (!n) |
1341 | continue; | 1243 | continue; |
1342 | drain_freelist(cachep, l3, l3->free_objects); | 1244 | drain_freelist(cachep, n, n->free_objects); |
1343 | } | 1245 | } |
1344 | } | 1246 | } |
1345 | 1247 | ||
1346 | static int __cpuinit cpuup_prepare(long cpu) | 1248 | static int __cpuinit cpuup_prepare(long cpu) |
1347 | { | 1249 | { |
1348 | struct kmem_cache *cachep; | 1250 | struct kmem_cache *cachep; |
1349 | struct kmem_list3 *l3 = NULL; | 1251 | struct kmem_cache_node *n = NULL; |
1350 | int node = cpu_to_mem(cpu); | 1252 | int node = cpu_to_mem(cpu); |
1351 | int err; | 1253 | int err; |
1352 | 1254 | ||
@@ -1354,9 +1256,9 @@ static int __cpuinit cpuup_prepare(long cpu) | |||
1354 | * We need to do this right in the beginning since | 1256 | * We need to do this right in the beginning since |
1355 | * alloc_arraycache's are going to use this list. | 1257 | * alloc_arraycache's are going to use this list. |
1356 | * kmalloc_node allows us to add the slab to the right | 1258 | * kmalloc_node allows us to add the slab to the right |
1357 | * kmem_list3 and not this cpu's kmem_list3 | 1259 | * kmem_cache_node and not this cpu's kmem_cache_node |
1358 | */ | 1260 | */ |
1359 | err = init_cache_nodelists_node(node); | 1261 | err = init_cache_node_node(node); |
1360 | if (err < 0) | 1262 | if (err < 0) |
1361 | goto bad; | 1263 | goto bad; |
1362 | 1264 | ||
@@ -1391,25 +1293,25 @@ static int __cpuinit cpuup_prepare(long cpu) | |||
1391 | } | 1293 | } |
1392 | } | 1294 | } |
1393 | cachep->array[cpu] = nc; | 1295 | cachep->array[cpu] = nc; |
1394 | l3 = cachep->nodelists[node]; | 1296 | n = cachep->node[node]; |
1395 | BUG_ON(!l3); | 1297 | BUG_ON(!n); |
1396 | 1298 | ||
1397 | spin_lock_irq(&l3->list_lock); | 1299 | spin_lock_irq(&n->list_lock); |
1398 | if (!l3->shared) { | 1300 | if (!n->shared) { |
1399 | /* | 1301 | /* |
1400 | * We are serialised from CPU_DEAD or | 1302 | * We are serialised from CPU_DEAD or |
1401 | * CPU_UP_CANCELLED by the cpucontrol lock | 1303 | * CPU_UP_CANCELLED by the cpucontrol lock |
1402 | */ | 1304 | */ |
1403 | l3->shared = shared; | 1305 | n->shared = shared; |
1404 | shared = NULL; | 1306 | shared = NULL; |
1405 | } | 1307 | } |
1406 | #ifdef CONFIG_NUMA | 1308 | #ifdef CONFIG_NUMA |
1407 | if (!l3->alien) { | 1309 | if (!n->alien) { |
1408 | l3->alien = alien; | 1310 | n->alien = alien; |
1409 | alien = NULL; | 1311 | alien = NULL; |
1410 | } | 1312 | } |
1411 | #endif | 1313 | #endif |
1412 | spin_unlock_irq(&l3->list_lock); | 1314 | spin_unlock_irq(&n->list_lock); |
1413 | kfree(shared); | 1315 | kfree(shared); |
1414 | free_alien_cache(alien); | 1316 | free_alien_cache(alien); |
1415 | if (cachep->flags & SLAB_DEBUG_OBJECTS) | 1317 | if (cachep->flags & SLAB_DEBUG_OBJECTS) |
@@ -1464,9 +1366,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, | |||
1464 | case CPU_DEAD_FROZEN: | 1366 | case CPU_DEAD_FROZEN: |
1465 | /* | 1367 | /* |
1466 | * Even if all the cpus of a node are down, we don't free the | 1368 | * Even if all the cpus of a node are down, we don't free the |
1467 | * kmem_list3 of any cache. This to avoid a race between | 1369 | * kmem_cache_node of any cache. This to avoid a race between |
1468 | * cpu_down, and a kmalloc allocation from another cpu for | 1370 | * cpu_down, and a kmalloc allocation from another cpu for |
1469 | * memory from the node of the cpu going down. The list3 | 1371 | * memory from the node of the cpu going down. The node |
1470 | * structure is usually allocated from kmem_cache_create() and | 1372 | * structure is usually allocated from kmem_cache_create() and |
1471 | * gets destroyed at kmem_cache_destroy(). | 1373 | * gets destroyed at kmem_cache_destroy(). |
1472 | */ | 1374 | */ |
@@ -1494,22 +1396,22 @@ static struct notifier_block __cpuinitdata cpucache_notifier = { | |||
1494 | * | 1396 | * |
1495 | * Must hold slab_mutex. | 1397 | * Must hold slab_mutex. |
1496 | */ | 1398 | */ |
1497 | static int __meminit drain_cache_nodelists_node(int node) | 1399 | static int __meminit drain_cache_node_node(int node) |
1498 | { | 1400 | { |
1499 | struct kmem_cache *cachep; | 1401 | struct kmem_cache *cachep; |
1500 | int ret = 0; | 1402 | int ret = 0; |
1501 | 1403 | ||
1502 | list_for_each_entry(cachep, &slab_caches, list) { | 1404 | list_for_each_entry(cachep, &slab_caches, list) { |
1503 | struct kmem_list3 *l3; | 1405 | struct kmem_cache_node *n; |
1504 | 1406 | ||
1505 | l3 = cachep->nodelists[node]; | 1407 | n = cachep->node[node]; |
1506 | if (!l3) | 1408 | if (!n) |
1507 | continue; | 1409 | continue; |
1508 | 1410 | ||
1509 | drain_freelist(cachep, l3, l3->free_objects); | 1411 | drain_freelist(cachep, n, n->free_objects); |
1510 | 1412 | ||
1511 | if (!list_empty(&l3->slabs_full) || | 1413 | if (!list_empty(&n->slabs_full) || |
1512 | !list_empty(&l3->slabs_partial)) { | 1414 | !list_empty(&n->slabs_partial)) { |
1513 | ret = -EBUSY; | 1415 | ret = -EBUSY; |
1514 | break; | 1416 | break; |
1515 | } | 1417 | } |
@@ -1531,12 +1433,12 @@ static int __meminit slab_memory_callback(struct notifier_block *self, | |||
1531 | switch (action) { | 1433 | switch (action) { |
1532 | case MEM_GOING_ONLINE: | 1434 | case MEM_GOING_ONLINE: |
1533 | mutex_lock(&slab_mutex); | 1435 | mutex_lock(&slab_mutex); |
1534 | ret = init_cache_nodelists_node(nid); | 1436 | ret = init_cache_node_node(nid); |
1535 | mutex_unlock(&slab_mutex); | 1437 | mutex_unlock(&slab_mutex); |
1536 | break; | 1438 | break; |
1537 | case MEM_GOING_OFFLINE: | 1439 | case MEM_GOING_OFFLINE: |
1538 | mutex_lock(&slab_mutex); | 1440 | mutex_lock(&slab_mutex); |
1539 | ret = drain_cache_nodelists_node(nid); | 1441 | ret = drain_cache_node_node(nid); |
1540 | mutex_unlock(&slab_mutex); | 1442 | mutex_unlock(&slab_mutex); |
1541 | break; | 1443 | break; |
1542 | case MEM_ONLINE: | 1444 | case MEM_ONLINE: |
@@ -1551,37 +1453,37 @@ out: | |||
1551 | #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */ | 1453 | #endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */ |
1552 | 1454 | ||
1553 | /* | 1455 | /* |
1554 | * swap the static kmem_list3 with kmalloced memory | 1456 | * swap the static kmem_cache_node with kmalloced memory |
1555 | */ | 1457 | */ |
1556 | static void __init init_list(struct kmem_cache *cachep, struct kmem_list3 *list, | 1458 | static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list, |
1557 | int nodeid) | 1459 | int nodeid) |
1558 | { | 1460 | { |
1559 | struct kmem_list3 *ptr; | 1461 | struct kmem_cache_node *ptr; |
1560 | 1462 | ||
1561 | ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_NOWAIT, nodeid); | 1463 | ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid); |
1562 | BUG_ON(!ptr); | 1464 | BUG_ON(!ptr); |
1563 | 1465 | ||
1564 | memcpy(ptr, list, sizeof(struct kmem_list3)); | 1466 | memcpy(ptr, list, sizeof(struct kmem_cache_node)); |
1565 | /* | 1467 | /* |
1566 | * Do not assume that spinlocks can be initialized via memcpy: | 1468 | * Do not assume that spinlocks can be initialized via memcpy: |
1567 | */ | 1469 | */ |
1568 | spin_lock_init(&ptr->list_lock); | 1470 | spin_lock_init(&ptr->list_lock); |
1569 | 1471 | ||
1570 | MAKE_ALL_LISTS(cachep, ptr, nodeid); | 1472 | MAKE_ALL_LISTS(cachep, ptr, nodeid); |
1571 | cachep->nodelists[nodeid] = ptr; | 1473 | cachep->node[nodeid] = ptr; |
1572 | } | 1474 | } |
1573 | 1475 | ||
1574 | /* | 1476 | /* |
1575 | * For setting up all the kmem_list3s for cache whose buffer_size is same as | 1477 | * For setting up all the kmem_cache_node for cache whose buffer_size is same as |
1576 | * size of kmem_list3. | 1478 | * size of kmem_cache_node. |
1577 | */ | 1479 | */ |
1578 | static void __init set_up_list3s(struct kmem_cache *cachep, int index) | 1480 | static void __init set_up_node(struct kmem_cache *cachep, int index) |
1579 | { | 1481 | { |
1580 | int node; | 1482 | int node; |
1581 | 1483 | ||
1582 | for_each_online_node(node) { | 1484 | for_each_online_node(node) { |
1583 | cachep->nodelists[node] = &initkmem_list3[index + node]; | 1485 | cachep->node[node] = &init_kmem_cache_node[index + node]; |
1584 | cachep->nodelists[node]->next_reap = jiffies + | 1486 | cachep->node[node]->next_reap = jiffies + |
1585 | REAPTIMEOUT_LIST3 + | 1487 | REAPTIMEOUT_LIST3 + |
1586 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; | 1488 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; |
1587 | } | 1489 | } |
@@ -1589,11 +1491,11 @@ static void __init set_up_list3s(struct kmem_cache *cachep, int index) | |||
1589 | 1491 | ||
1590 | /* | 1492 | /* |
1591 | * The memory after the last cpu cache pointer is used for the | 1493 | * The memory after the last cpu cache pointer is used for the |
1592 | * the nodelists pointer. | 1494 | * the node pointer. |
1593 | */ | 1495 | */ |
1594 | static void setup_nodelists_pointer(struct kmem_cache *cachep) | 1496 | static void setup_node_pointer(struct kmem_cache *cachep) |
1595 | { | 1497 | { |
1596 | cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; | 1498 | cachep->node = (struct kmem_cache_node **)&cachep->array[nr_cpu_ids]; |
1597 | } | 1499 | } |
1598 | 1500 | ||
1599 | /* | 1501 | /* |
@@ -1602,20 +1504,18 @@ static void setup_nodelists_pointer(struct kmem_cache *cachep) | |||
1602 | */ | 1504 | */ |
1603 | void __init kmem_cache_init(void) | 1505 | void __init kmem_cache_init(void) |
1604 | { | 1506 | { |
1605 | struct cache_sizes *sizes; | ||
1606 | struct cache_names *names; | ||
1607 | int i; | 1507 | int i; |
1608 | 1508 | ||
1609 | kmem_cache = &kmem_cache_boot; | 1509 | kmem_cache = &kmem_cache_boot; |
1610 | setup_nodelists_pointer(kmem_cache); | 1510 | setup_node_pointer(kmem_cache); |
1611 | 1511 | ||
1612 | if (num_possible_nodes() == 1) | 1512 | if (num_possible_nodes() == 1) |
1613 | use_alien_caches = 0; | 1513 | use_alien_caches = 0; |
1614 | 1514 | ||
1615 | for (i = 0; i < NUM_INIT_LISTS; i++) | 1515 | for (i = 0; i < NUM_INIT_LISTS; i++) |
1616 | kmem_list3_init(&initkmem_list3[i]); | 1516 | kmem_cache_node_init(&init_kmem_cache_node[i]); |
1617 | 1517 | ||
1618 | set_up_list3s(kmem_cache, CACHE_CACHE); | 1518 | set_up_node(kmem_cache, CACHE_CACHE); |
1619 | 1519 | ||
1620 | /* | 1520 | /* |
1621 | * Fragmentation resistance on low memory - only use bigger | 1521 | * Fragmentation resistance on low memory - only use bigger |
@@ -1631,7 +1531,7 @@ void __init kmem_cache_init(void) | |||
1631 | * kmem_cache structures of all caches, except kmem_cache itself: | 1531 | * kmem_cache structures of all caches, except kmem_cache itself: |
1632 | * kmem_cache is statically allocated. | 1532 | * kmem_cache is statically allocated. |
1633 | * Initially an __init data area is used for the head array and the | 1533 | * Initially an __init data area is used for the head array and the |
1634 | * kmem_list3 structures, it's replaced with a kmalloc allocated | 1534 | * kmem_cache_node structures, it's replaced with a kmalloc allocated |
1635 | * array at the end of the bootstrap. | 1535 | * array at the end of the bootstrap. |
1636 | * 2) Create the first kmalloc cache. | 1536 | * 2) Create the first kmalloc cache. |
1637 | * The struct kmem_cache for the new cache is allocated normally. | 1537 | * The struct kmem_cache for the new cache is allocated normally. |
@@ -1640,7 +1540,7 @@ void __init kmem_cache_init(void) | |||
1640 | * head arrays. | 1540 | * head arrays. |
1641 | * 4) Replace the __init data head arrays for kmem_cache and the first | 1541 | * 4) Replace the __init data head arrays for kmem_cache and the first |
1642 | * kmalloc cache with kmalloc allocated arrays. | 1542 | * kmalloc cache with kmalloc allocated arrays. |
1643 | * 5) Replace the __init data for kmem_list3 for kmem_cache and | 1543 | * 5) Replace the __init data for kmem_cache_node for kmem_cache and |
1644 | * the other cache's with kmalloc allocated memory. | 1544 | * the other cache's with kmalloc allocated memory. |
1645 | * 6) Resize the head arrays of the kmalloc caches to their final sizes. | 1545 | * 6) Resize the head arrays of the kmalloc caches to their final sizes. |
1646 | */ | 1546 | */ |
@@ -1652,50 +1552,28 @@ void __init kmem_cache_init(void) | |||
1652 | */ | 1552 | */ |
1653 | create_boot_cache(kmem_cache, "kmem_cache", | 1553 | create_boot_cache(kmem_cache, "kmem_cache", |
1654 | offsetof(struct kmem_cache, array[nr_cpu_ids]) + | 1554 | offsetof(struct kmem_cache, array[nr_cpu_ids]) + |
1655 | nr_node_ids * sizeof(struct kmem_list3 *), | 1555 | nr_node_ids * sizeof(struct kmem_cache_node *), |
1656 | SLAB_HWCACHE_ALIGN); | 1556 | SLAB_HWCACHE_ALIGN); |
1657 | list_add(&kmem_cache->list, &slab_caches); | 1557 | list_add(&kmem_cache->list, &slab_caches); |
1658 | 1558 | ||
1659 | /* 2+3) create the kmalloc caches */ | 1559 | /* 2+3) create the kmalloc caches */ |
1660 | sizes = malloc_sizes; | ||
1661 | names = cache_names; | ||
1662 | 1560 | ||
1663 | /* | 1561 | /* |
1664 | * Initialize the caches that provide memory for the array cache and the | 1562 | * Initialize the caches that provide memory for the array cache and the |
1665 | * kmem_list3 structures first. Without this, further allocations will | 1563 | * kmem_cache_node structures first. Without this, further allocations will |
1666 | * bug. | 1564 | * bug. |
1667 | */ | 1565 | */ |
1668 | 1566 | ||
1669 | sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name, | 1567 | kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac", |
1670 | sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS); | 1568 | kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS); |
1671 | 1569 | ||
1672 | if (INDEX_AC != INDEX_L3) | 1570 | if (INDEX_AC != INDEX_NODE) |
1673 | sizes[INDEX_L3].cs_cachep = | 1571 | kmalloc_caches[INDEX_NODE] = |
1674 | create_kmalloc_cache(names[INDEX_L3].name, | 1572 | create_kmalloc_cache("kmalloc-node", |
1675 | sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS); | 1573 | kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS); |
1676 | 1574 | ||
1677 | slab_early_init = 0; | 1575 | slab_early_init = 0; |
1678 | 1576 | ||
1679 | while (sizes->cs_size != ULONG_MAX) { | ||
1680 | /* | ||
1681 | * For performance, all the general caches are L1 aligned. | ||
1682 | * This should be particularly beneficial on SMP boxes, as it | ||
1683 | * eliminates "false sharing". | ||
1684 | * Note for systems short on memory removing the alignment will | ||
1685 | * allow tighter packing of the smaller caches. | ||
1686 | */ | ||
1687 | if (!sizes->cs_cachep) | ||
1688 | sizes->cs_cachep = create_kmalloc_cache(names->name, | ||
1689 | sizes->cs_size, ARCH_KMALLOC_FLAGS); | ||
1690 | |||
1691 | #ifdef CONFIG_ZONE_DMA | ||
1692 | sizes->cs_dmacachep = create_kmalloc_cache( | ||
1693 | names->name_dma, sizes->cs_size, | ||
1694 | SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS); | ||
1695 | #endif | ||
1696 | sizes++; | ||
1697 | names++; | ||
1698 | } | ||
1699 | /* 4) Replace the bootstrap head arrays */ | 1577 | /* 4) Replace the bootstrap head arrays */ |
1700 | { | 1578 | { |
1701 | struct array_cache *ptr; | 1579 | struct array_cache *ptr; |
@@ -1713,36 +1591,35 @@ void __init kmem_cache_init(void) | |||
1713 | 1591 | ||
1714 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); | 1592 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); |
1715 | 1593 | ||
1716 | BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) | 1594 | BUG_ON(cpu_cache_get(kmalloc_caches[INDEX_AC]) |
1717 | != &initarray_generic.cache); | 1595 | != &initarray_generic.cache); |
1718 | memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), | 1596 | memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]), |
1719 | sizeof(struct arraycache_init)); | 1597 | sizeof(struct arraycache_init)); |
1720 | /* | 1598 | /* |
1721 | * Do not assume that spinlocks can be initialized via memcpy: | 1599 | * Do not assume that spinlocks can be initialized via memcpy: |
1722 | */ | 1600 | */ |
1723 | spin_lock_init(&ptr->lock); | 1601 | spin_lock_init(&ptr->lock); |
1724 | 1602 | ||
1725 | malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = | 1603 | kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr; |
1726 | ptr; | ||
1727 | } | 1604 | } |
1728 | /* 5) Replace the bootstrap kmem_list3's */ | 1605 | /* 5) Replace the bootstrap kmem_cache_node */ |
1729 | { | 1606 | { |
1730 | int nid; | 1607 | int nid; |
1731 | 1608 | ||
1732 | for_each_online_node(nid) { | 1609 | for_each_online_node(nid) { |
1733 | init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid); | 1610 | init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid); |
1734 | 1611 | ||
1735 | init_list(malloc_sizes[INDEX_AC].cs_cachep, | 1612 | init_list(kmalloc_caches[INDEX_AC], |
1736 | &initkmem_list3[SIZE_AC + nid], nid); | 1613 | &init_kmem_cache_node[SIZE_AC + nid], nid); |
1737 | 1614 | ||
1738 | if (INDEX_AC != INDEX_L3) { | 1615 | if (INDEX_AC != INDEX_NODE) { |
1739 | init_list(malloc_sizes[INDEX_L3].cs_cachep, | 1616 | init_list(kmalloc_caches[INDEX_NODE], |
1740 | &initkmem_list3[SIZE_L3 + nid], nid); | 1617 | &init_kmem_cache_node[SIZE_NODE + nid], nid); |
1741 | } | 1618 | } |
1742 | } | 1619 | } |
1743 | } | 1620 | } |
1744 | 1621 | ||
1745 | slab_state = UP; | 1622 | create_kmalloc_caches(ARCH_KMALLOC_FLAGS); |
1746 | } | 1623 | } |
1747 | 1624 | ||
1748 | void __init kmem_cache_init_late(void) | 1625 | void __init kmem_cache_init_late(void) |
@@ -1773,7 +1650,7 @@ void __init kmem_cache_init_late(void) | |||
1773 | #ifdef CONFIG_NUMA | 1650 | #ifdef CONFIG_NUMA |
1774 | /* | 1651 | /* |
1775 | * Register a memory hotplug callback that initializes and frees | 1652 | * Register a memory hotplug callback that initializes and frees |
1776 | * nodelists. | 1653 | * node. |
1777 | */ | 1654 | */ |
1778 | hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); | 1655 | hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); |
1779 | #endif | 1656 | #endif |
@@ -1803,7 +1680,7 @@ __initcall(cpucache_init); | |||
1803 | static noinline void | 1680 | static noinline void |
1804 | slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) | 1681 | slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) |
1805 | { | 1682 | { |
1806 | struct kmem_list3 *l3; | 1683 | struct kmem_cache_node *n; |
1807 | struct slab *slabp; | 1684 | struct slab *slabp; |
1808 | unsigned long flags; | 1685 | unsigned long flags; |
1809 | int node; | 1686 | int node; |
@@ -1818,24 +1695,24 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) | |||
1818 | unsigned long active_objs = 0, num_objs = 0, free_objects = 0; | 1695 | unsigned long active_objs = 0, num_objs = 0, free_objects = 0; |
1819 | unsigned long active_slabs = 0, num_slabs = 0; | 1696 | unsigned long active_slabs = 0, num_slabs = 0; |
1820 | 1697 | ||
1821 | l3 = cachep->nodelists[node]; | 1698 | n = cachep->node[node]; |
1822 | if (!l3) | 1699 | if (!n) |
1823 | continue; | 1700 | continue; |
1824 | 1701 | ||
1825 | spin_lock_irqsave(&l3->list_lock, flags); | 1702 | spin_lock_irqsave(&n->list_lock, flags); |
1826 | list_for_each_entry(slabp, &l3->slabs_full, list) { | 1703 | list_for_each_entry(slabp, &n->slabs_full, list) { |
1827 | active_objs += cachep->num; | 1704 | active_objs += cachep->num; |
1828 | active_slabs++; | 1705 | active_slabs++; |
1829 | } | 1706 | } |
1830 | list_for_each_entry(slabp, &l3->slabs_partial, list) { | 1707 | list_for_each_entry(slabp, &n->slabs_partial, list) { |
1831 | active_objs += slabp->inuse; | 1708 | active_objs += slabp->inuse; |
1832 | active_slabs++; | 1709 | active_slabs++; |
1833 | } | 1710 | } |
1834 | list_for_each_entry(slabp, &l3->slabs_free, list) | 1711 | list_for_each_entry(slabp, &n->slabs_free, list) |
1835 | num_slabs++; | 1712 | num_slabs++; |
1836 | 1713 | ||
1837 | free_objects += l3->free_objects; | 1714 | free_objects += n->free_objects; |
1838 | spin_unlock_irqrestore(&l3->list_lock, flags); | 1715 | spin_unlock_irqrestore(&n->list_lock, flags); |
1839 | 1716 | ||
1840 | num_slabs += active_slabs; | 1717 | num_slabs += active_slabs; |
1841 | num_objs = num_slabs * cachep->num; | 1718 | num_objs = num_slabs * cachep->num; |
@@ -2260,7 +2137,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2260 | if (slab_state == DOWN) { | 2137 | if (slab_state == DOWN) { |
2261 | /* | 2138 | /* |
2262 | * Note: Creation of first cache (kmem_cache). | 2139 | * Note: Creation of first cache (kmem_cache). |
2263 | * The setup_list3s is taken care | 2140 | * The setup_node is taken care |
2264 | * of by the caller of __kmem_cache_create | 2141 | * of by the caller of __kmem_cache_create |
2265 | */ | 2142 | */ |
2266 | cachep->array[smp_processor_id()] = &initarray_generic.cache; | 2143 | cachep->array[smp_processor_id()] = &initarray_generic.cache; |
@@ -2274,13 +2151,13 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2274 | cachep->array[smp_processor_id()] = &initarray_generic.cache; | 2151 | cachep->array[smp_processor_id()] = &initarray_generic.cache; |
2275 | 2152 | ||
2276 | /* | 2153 | /* |
2277 | * If the cache that's used by kmalloc(sizeof(kmem_list3)) is | 2154 | * If the cache that's used by kmalloc(sizeof(kmem_cache_node)) is |
2278 | * the second cache, then we need to set up all its list3s, | 2155 | * the second cache, then we need to set up all its node/, |
2279 | * otherwise the creation of further caches will BUG(). | 2156 | * otherwise the creation of further caches will BUG(). |
2280 | */ | 2157 | */ |
2281 | set_up_list3s(cachep, SIZE_AC); | 2158 | set_up_node(cachep, SIZE_AC); |
2282 | if (INDEX_AC == INDEX_L3) | 2159 | if (INDEX_AC == INDEX_NODE) |
2283 | slab_state = PARTIAL_L3; | 2160 | slab_state = PARTIAL_NODE; |
2284 | else | 2161 | else |
2285 | slab_state = PARTIAL_ARRAYCACHE; | 2162 | slab_state = PARTIAL_ARRAYCACHE; |
2286 | } else { | 2163 | } else { |
@@ -2289,20 +2166,20 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
2289 | kmalloc(sizeof(struct arraycache_init), gfp); | 2166 | kmalloc(sizeof(struct arraycache_init), gfp); |
2290 | 2167 | ||
2291 | if (slab_state == PARTIAL_ARRAYCACHE) { | 2168 | if (slab_state == PARTIAL_ARRAYCACHE) { |
2292 | set_up_list3s(cachep, SIZE_L3); | 2169 | set_up_node(cachep, SIZE_NODE); |
2293 | slab_state = PARTIAL_L3; | 2170 | slab_state = PARTIAL_NODE; |
2294 | } else { | 2171 | } else { |
2295 | int node; | 2172 | int node; |
2296 | for_each_online_node(node) { | 2173 | for_each_online_node(node) { |
2297 | cachep->nodelists[node] = | 2174 | cachep->node[node] = |
2298 | kmalloc_node(sizeof(struct kmem_list3), | 2175 | kmalloc_node(sizeof(struct kmem_cache_node), |
2299 | gfp, node); | 2176 | gfp, node); |
2300 | BUG_ON(!cachep->nodelists[node]); | 2177 | BUG_ON(!cachep->node[node]); |
2301 | kmem_list3_init(cachep->nodelists[node]); | 2178 | kmem_cache_node_init(cachep->node[node]); |
2302 | } | 2179 | } |
2303 | } | 2180 | } |
2304 | } | 2181 | } |
2305 | cachep->nodelists[numa_mem_id()]->next_reap = | 2182 | cachep->node[numa_mem_id()]->next_reap = |
2306 | jiffies + REAPTIMEOUT_LIST3 + | 2183 | jiffies + REAPTIMEOUT_LIST3 + |
2307 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; | 2184 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; |
2308 | 2185 | ||
@@ -2405,7 +2282,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |||
2405 | else | 2282 | else |
2406 | gfp = GFP_NOWAIT; | 2283 | gfp = GFP_NOWAIT; |
2407 | 2284 | ||
2408 | setup_nodelists_pointer(cachep); | 2285 | setup_node_pointer(cachep); |
2409 | #if DEBUG | 2286 | #if DEBUG |
2410 | 2287 | ||
2411 | /* | 2288 | /* |
@@ -2428,7 +2305,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |||
2428 | size += BYTES_PER_WORD; | 2305 | size += BYTES_PER_WORD; |
2429 | } | 2306 | } |
2430 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) | 2307 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) |
2431 | if (size >= malloc_sizes[INDEX_L3 + 1].cs_size | 2308 | if (size >= kmalloc_size(INDEX_NODE + 1) |
2432 | && cachep->object_size > cache_line_size() | 2309 | && cachep->object_size > cache_line_size() |
2433 | && ALIGN(size, cachep->align) < PAGE_SIZE) { | 2310 | && ALIGN(size, cachep->align) < PAGE_SIZE) { |
2434 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); | 2311 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); |
@@ -2499,7 +2376,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |||
2499 | cachep->reciprocal_buffer_size = reciprocal_value(size); | 2376 | cachep->reciprocal_buffer_size = reciprocal_value(size); |
2500 | 2377 | ||
2501 | if (flags & CFLGS_OFF_SLAB) { | 2378 | if (flags & CFLGS_OFF_SLAB) { |
2502 | cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); | 2379 | cachep->slabp_cache = kmalloc_slab(slab_size, 0u); |
2503 | /* | 2380 | /* |
2504 | * This is a possibility for one of the malloc_sizes caches. | 2381 | * This is a possibility for one of the malloc_sizes caches. |
2505 | * But since we go off slab only for object size greater than | 2382 | * But since we go off slab only for object size greater than |
@@ -2545,7 +2422,7 @@ static void check_spinlock_acquired(struct kmem_cache *cachep) | |||
2545 | { | 2422 | { |
2546 | #ifdef CONFIG_SMP | 2423 | #ifdef CONFIG_SMP |
2547 | check_irq_off(); | 2424 | check_irq_off(); |
2548 | assert_spin_locked(&cachep->nodelists[numa_mem_id()]->list_lock); | 2425 | assert_spin_locked(&cachep->node[numa_mem_id()]->list_lock); |
2549 | #endif | 2426 | #endif |
2550 | } | 2427 | } |
2551 | 2428 | ||
@@ -2553,7 +2430,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) | |||
2553 | { | 2430 | { |
2554 | #ifdef CONFIG_SMP | 2431 | #ifdef CONFIG_SMP |
2555 | check_irq_off(); | 2432 | check_irq_off(); |
2556 | assert_spin_locked(&cachep->nodelists[node]->list_lock); | 2433 | assert_spin_locked(&cachep->node[node]->list_lock); |
2557 | #endif | 2434 | #endif |
2558 | } | 2435 | } |
2559 | 2436 | ||
@@ -2564,7 +2441,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) | |||
2564 | #define check_spinlock_acquired_node(x, y) do { } while(0) | 2441 | #define check_spinlock_acquired_node(x, y) do { } while(0) |
2565 | #endif | 2442 | #endif |
2566 | 2443 | ||
2567 | static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, | 2444 | static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, |
2568 | struct array_cache *ac, | 2445 | struct array_cache *ac, |
2569 | int force, int node); | 2446 | int force, int node); |
2570 | 2447 | ||
@@ -2576,29 +2453,29 @@ static void do_drain(void *arg) | |||
2576 | 2453 | ||
2577 | check_irq_off(); | 2454 | check_irq_off(); |
2578 | ac = cpu_cache_get(cachep); | 2455 | ac = cpu_cache_get(cachep); |
2579 | spin_lock(&cachep->nodelists[node]->list_lock); | 2456 | spin_lock(&cachep->node[node]->list_lock); |
2580 | free_block(cachep, ac->entry, ac->avail, node); | 2457 | free_block(cachep, ac->entry, ac->avail, node); |
2581 | spin_unlock(&cachep->nodelists[node]->list_lock); | 2458 | spin_unlock(&cachep->node[node]->list_lock); |
2582 | ac->avail = 0; | 2459 | ac->avail = 0; |
2583 | } | 2460 | } |
2584 | 2461 | ||
2585 | static void drain_cpu_caches(struct kmem_cache *cachep) | 2462 | static void drain_cpu_caches(struct kmem_cache *cachep) |
2586 | { | 2463 | { |
2587 | struct kmem_list3 *l3; | 2464 | struct kmem_cache_node *n; |
2588 | int node; | 2465 | int node; |
2589 | 2466 | ||
2590 | on_each_cpu(do_drain, cachep, 1); | 2467 | on_each_cpu(do_drain, cachep, 1); |
2591 | check_irq_on(); | 2468 | check_irq_on(); |
2592 | for_each_online_node(node) { | 2469 | for_each_online_node(node) { |
2593 | l3 = cachep->nodelists[node]; | 2470 | n = cachep->node[node]; |
2594 | if (l3 && l3->alien) | 2471 | if (n && n->alien) |
2595 | drain_alien_cache(cachep, l3->alien); | 2472 | drain_alien_cache(cachep, n->alien); |
2596 | } | 2473 | } |
2597 | 2474 | ||
2598 | for_each_online_node(node) { | 2475 | for_each_online_node(node) { |
2599 | l3 = cachep->nodelists[node]; | 2476 | n = cachep->node[node]; |
2600 | if (l3) | 2477 | if (n) |
2601 | drain_array(cachep, l3, l3->shared, 1, node); | 2478 | drain_array(cachep, n, n->shared, 1, node); |
2602 | } | 2479 | } |
2603 | } | 2480 | } |
2604 | 2481 | ||
@@ -2609,19 +2486,19 @@ static void drain_cpu_caches(struct kmem_cache *cachep) | |||
2609 | * Returns the actual number of slabs released. | 2486 | * Returns the actual number of slabs released. |
2610 | */ | 2487 | */ |
2611 | static int drain_freelist(struct kmem_cache *cache, | 2488 | static int drain_freelist(struct kmem_cache *cache, |
2612 | struct kmem_list3 *l3, int tofree) | 2489 | struct kmem_cache_node *n, int tofree) |
2613 | { | 2490 | { |
2614 | struct list_head *p; | 2491 | struct list_head *p; |
2615 | int nr_freed; | 2492 | int nr_freed; |
2616 | struct slab *slabp; | 2493 | struct slab *slabp; |
2617 | 2494 | ||
2618 | nr_freed = 0; | 2495 | nr_freed = 0; |
2619 | while (nr_freed < tofree && !list_empty(&l3->slabs_free)) { | 2496 | while (nr_freed < tofree && !list_empty(&n->slabs_free)) { |
2620 | 2497 | ||
2621 | spin_lock_irq(&l3->list_lock); | 2498 | spin_lock_irq(&n->list_lock); |
2622 | p = l3->slabs_free.prev; | 2499 | p = n->slabs_free.prev; |
2623 | if (p == &l3->slabs_free) { | 2500 | if (p == &n->slabs_free) { |
2624 | spin_unlock_irq(&l3->list_lock); | 2501 | spin_unlock_irq(&n->list_lock); |
2625 | goto out; | 2502 | goto out; |
2626 | } | 2503 | } |
2627 | 2504 | ||
@@ -2634,8 +2511,8 @@ static int drain_freelist(struct kmem_cache *cache, | |||
2634 | * Safe to drop the lock. The slab is no longer linked | 2511 | * Safe to drop the lock. The slab is no longer linked |
2635 | * to the cache. | 2512 | * to the cache. |
2636 | */ | 2513 | */ |
2637 | l3->free_objects -= cache->num; | 2514 | n->free_objects -= cache->num; |
2638 | spin_unlock_irq(&l3->list_lock); | 2515 | spin_unlock_irq(&n->list_lock); |
2639 | slab_destroy(cache, slabp); | 2516 | slab_destroy(cache, slabp); |
2640 | nr_freed++; | 2517 | nr_freed++; |
2641 | } | 2518 | } |
@@ -2647,20 +2524,20 @@ out: | |||
2647 | static int __cache_shrink(struct kmem_cache *cachep) | 2524 | static int __cache_shrink(struct kmem_cache *cachep) |
2648 | { | 2525 | { |
2649 | int ret = 0, i = 0; | 2526 | int ret = 0, i = 0; |
2650 | struct kmem_list3 *l3; | 2527 | struct kmem_cache_node *n; |
2651 | 2528 | ||
2652 | drain_cpu_caches(cachep); | 2529 | drain_cpu_caches(cachep); |
2653 | 2530 | ||
2654 | check_irq_on(); | 2531 | check_irq_on(); |
2655 | for_each_online_node(i) { | 2532 | for_each_online_node(i) { |
2656 | l3 = cachep->nodelists[i]; | 2533 | n = cachep->node[i]; |
2657 | if (!l3) | 2534 | if (!n) |
2658 | continue; | 2535 | continue; |
2659 | 2536 | ||
2660 | drain_freelist(cachep, l3, l3->free_objects); | 2537 | drain_freelist(cachep, n, n->free_objects); |
2661 | 2538 | ||
2662 | ret += !list_empty(&l3->slabs_full) || | 2539 | ret += !list_empty(&n->slabs_full) || |
2663 | !list_empty(&l3->slabs_partial); | 2540 | !list_empty(&n->slabs_partial); |
2664 | } | 2541 | } |
2665 | return (ret ? 1 : 0); | 2542 | return (ret ? 1 : 0); |
2666 | } | 2543 | } |
@@ -2689,7 +2566,7 @@ EXPORT_SYMBOL(kmem_cache_shrink); | |||
2689 | int __kmem_cache_shutdown(struct kmem_cache *cachep) | 2566 | int __kmem_cache_shutdown(struct kmem_cache *cachep) |
2690 | { | 2567 | { |
2691 | int i; | 2568 | int i; |
2692 | struct kmem_list3 *l3; | 2569 | struct kmem_cache_node *n; |
2693 | int rc = __cache_shrink(cachep); | 2570 | int rc = __cache_shrink(cachep); |
2694 | 2571 | ||
2695 | if (rc) | 2572 | if (rc) |
@@ -2698,13 +2575,13 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep) | |||
2698 | for_each_online_cpu(i) | 2575 | for_each_online_cpu(i) |
2699 | kfree(cachep->array[i]); | 2576 | kfree(cachep->array[i]); |
2700 | 2577 | ||
2701 | /* NUMA: free the list3 structures */ | 2578 | /* NUMA: free the node structures */ |
2702 | for_each_online_node(i) { | 2579 | for_each_online_node(i) { |
2703 | l3 = cachep->nodelists[i]; | 2580 | n = cachep->node[i]; |
2704 | if (l3) { | 2581 | if (n) { |
2705 | kfree(l3->shared); | 2582 | kfree(n->shared); |
2706 | free_alien_cache(l3->alien); | 2583 | free_alien_cache(n->alien); |
2707 | kfree(l3); | 2584 | kfree(n); |
2708 | } | 2585 | } |
2709 | } | 2586 | } |
2710 | return 0; | 2587 | return 0; |
@@ -2886,7 +2763,7 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2886 | struct slab *slabp; | 2763 | struct slab *slabp; |
2887 | size_t offset; | 2764 | size_t offset; |
2888 | gfp_t local_flags; | 2765 | gfp_t local_flags; |
2889 | struct kmem_list3 *l3; | 2766 | struct kmem_cache_node *n; |
2890 | 2767 | ||
2891 | /* | 2768 | /* |
2892 | * Be lazy and only check for valid flags here, keeping it out of the | 2769 | * Be lazy and only check for valid flags here, keeping it out of the |
@@ -2895,17 +2772,17 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2895 | BUG_ON(flags & GFP_SLAB_BUG_MASK); | 2772 | BUG_ON(flags & GFP_SLAB_BUG_MASK); |
2896 | local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); | 2773 | local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); |
2897 | 2774 | ||
2898 | /* Take the l3 list lock to change the colour_next on this node */ | 2775 | /* Take the node list lock to change the colour_next on this node */ |
2899 | check_irq_off(); | 2776 | check_irq_off(); |
2900 | l3 = cachep->nodelists[nodeid]; | 2777 | n = cachep->node[nodeid]; |
2901 | spin_lock(&l3->list_lock); | 2778 | spin_lock(&n->list_lock); |
2902 | 2779 | ||
2903 | /* Get colour for the slab, and cal the next value. */ | 2780 | /* Get colour for the slab, and cal the next value. */ |
2904 | offset = l3->colour_next; | 2781 | offset = n->colour_next; |
2905 | l3->colour_next++; | 2782 | n->colour_next++; |
2906 | if (l3->colour_next >= cachep->colour) | 2783 | if (n->colour_next >= cachep->colour) |
2907 | l3->colour_next = 0; | 2784 | n->colour_next = 0; |
2908 | spin_unlock(&l3->list_lock); | 2785 | spin_unlock(&n->list_lock); |
2909 | 2786 | ||
2910 | offset *= cachep->colour_off; | 2787 | offset *= cachep->colour_off; |
2911 | 2788 | ||
@@ -2942,13 +2819,13 @@ static int cache_grow(struct kmem_cache *cachep, | |||
2942 | if (local_flags & __GFP_WAIT) | 2819 | if (local_flags & __GFP_WAIT) |
2943 | local_irq_disable(); | 2820 | local_irq_disable(); |
2944 | check_irq_off(); | 2821 | check_irq_off(); |
2945 | spin_lock(&l3->list_lock); | 2822 | spin_lock(&n->list_lock); |
2946 | 2823 | ||
2947 | /* Make slab active. */ | 2824 | /* Make slab active. */ |
2948 | list_add_tail(&slabp->list, &(l3->slabs_free)); | 2825 | list_add_tail(&slabp->list, &(n->slabs_free)); |
2949 | STATS_INC_GROWN(cachep); | 2826 | STATS_INC_GROWN(cachep); |
2950 | l3->free_objects += cachep->num; | 2827 | n->free_objects += cachep->num; |
2951 | spin_unlock(&l3->list_lock); | 2828 | spin_unlock(&n->list_lock); |
2952 | return 1; | 2829 | return 1; |
2953 | opps1: | 2830 | opps1: |
2954 | kmem_freepages(cachep, objp); | 2831 | kmem_freepages(cachep, objp); |
@@ -3076,7 +2953,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, | |||
3076 | bool force_refill) | 2953 | bool force_refill) |
3077 | { | 2954 | { |
3078 | int batchcount; | 2955 | int batchcount; |
3079 | struct kmem_list3 *l3; | 2956 | struct kmem_cache_node *n; |
3080 | struct array_cache *ac; | 2957 | struct array_cache *ac; |
3081 | int node; | 2958 | int node; |
3082 | 2959 | ||
@@ -3095,14 +2972,14 @@ retry: | |||
3095 | */ | 2972 | */ |
3096 | batchcount = BATCHREFILL_LIMIT; | 2973 | batchcount = BATCHREFILL_LIMIT; |
3097 | } | 2974 | } |
3098 | l3 = cachep->nodelists[node]; | 2975 | n = cachep->node[node]; |
3099 | 2976 | ||
3100 | BUG_ON(ac->avail > 0 || !l3); | 2977 | BUG_ON(ac->avail > 0 || !n); |
3101 | spin_lock(&l3->list_lock); | 2978 | spin_lock(&n->list_lock); |
3102 | 2979 | ||
3103 | /* See if we can refill from the shared array */ | 2980 | /* See if we can refill from the shared array */ |
3104 | if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) { | 2981 | if (n->shared && transfer_objects(ac, n->shared, batchcount)) { |
3105 | l3->shared->touched = 1; | 2982 | n->shared->touched = 1; |
3106 | goto alloc_done; | 2983 | goto alloc_done; |
3107 | } | 2984 | } |
3108 | 2985 | ||
@@ -3110,11 +2987,11 @@ retry: | |||
3110 | struct list_head *entry; | 2987 | struct list_head *entry; |
3111 | struct slab *slabp; | 2988 | struct slab *slabp; |
3112 | /* Get slab alloc is to come from. */ | 2989 | /* Get slab alloc is to come from. */ |
3113 | entry = l3->slabs_partial.next; | 2990 | entry = n->slabs_partial.next; |
3114 | if (entry == &l3->slabs_partial) { | 2991 | if (entry == &n->slabs_partial) { |
3115 | l3->free_touched = 1; | 2992 | n->free_touched = 1; |
3116 | entry = l3->slabs_free.next; | 2993 | entry = n->slabs_free.next; |
3117 | if (entry == &l3->slabs_free) | 2994 | if (entry == &n->slabs_free) |
3118 | goto must_grow; | 2995 | goto must_grow; |
3119 | } | 2996 | } |
3120 | 2997 | ||
@@ -3142,15 +3019,15 @@ retry: | |||
3142 | /* move slabp to correct slabp list: */ | 3019 | /* move slabp to correct slabp list: */ |
3143 | list_del(&slabp->list); | 3020 | list_del(&slabp->list); |
3144 | if (slabp->free == BUFCTL_END) | 3021 | if (slabp->free == BUFCTL_END) |
3145 | list_add(&slabp->list, &l3->slabs_full); | 3022 | list_add(&slabp->list, &n->slabs_full); |
3146 | else | 3023 | else |
3147 | list_add(&slabp->list, &l3->slabs_partial); | 3024 | list_add(&slabp->list, &n->slabs_partial); |
3148 | } | 3025 | } |
3149 | 3026 | ||
3150 | must_grow: | 3027 | must_grow: |
3151 | l3->free_objects -= ac->avail; | 3028 | n->free_objects -= ac->avail; |
3152 | alloc_done: | 3029 | alloc_done: |
3153 | spin_unlock(&l3->list_lock); | 3030 | spin_unlock(&n->list_lock); |
3154 | 3031 | ||
3155 | if (unlikely(!ac->avail)) { | 3032 | if (unlikely(!ac->avail)) { |
3156 | int x; | 3033 | int x; |
@@ -3317,7 +3194,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
3317 | /* | 3194 | /* |
3318 | * Fallback function if there was no memory available and no objects on a | 3195 | * Fallback function if there was no memory available and no objects on a |
3319 | * certain node and fall back is permitted. First we scan all the | 3196 | * certain node and fall back is permitted. First we scan all the |
3320 | * available nodelists for available objects. If that fails then we | 3197 | * available node for available objects. If that fails then we |
3321 | * perform an allocation without specifying a node. This allows the page | 3198 | * perform an allocation without specifying a node. This allows the page |
3322 | * allocator to do its reclaim / fallback magic. We then insert the | 3199 | * allocator to do its reclaim / fallback magic. We then insert the |
3323 | * slab into the proper nodelist and then allocate from it. | 3200 | * slab into the proper nodelist and then allocate from it. |
@@ -3351,8 +3228,8 @@ retry: | |||
3351 | nid = zone_to_nid(zone); | 3228 | nid = zone_to_nid(zone); |
3352 | 3229 | ||
3353 | if (cpuset_zone_allowed_hardwall(zone, flags) && | 3230 | if (cpuset_zone_allowed_hardwall(zone, flags) && |
3354 | cache->nodelists[nid] && | 3231 | cache->node[nid] && |
3355 | cache->nodelists[nid]->free_objects) { | 3232 | cache->node[nid]->free_objects) { |
3356 | obj = ____cache_alloc_node(cache, | 3233 | obj = ____cache_alloc_node(cache, |
3357 | flags | GFP_THISNODE, nid); | 3234 | flags | GFP_THISNODE, nid); |
3358 | if (obj) | 3235 | if (obj) |
@@ -3408,21 +3285,22 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, | |||
3408 | { | 3285 | { |
3409 | struct list_head *entry; | 3286 | struct list_head *entry; |
3410 | struct slab *slabp; | 3287 | struct slab *slabp; |
3411 | struct kmem_list3 *l3; | 3288 | struct kmem_cache_node *n; |
3412 | void *obj; | 3289 | void *obj; |
3413 | int x; | 3290 | int x; |
3414 | 3291 | ||
3415 | l3 = cachep->nodelists[nodeid]; | 3292 | VM_BUG_ON(nodeid > num_online_nodes()); |
3416 | BUG_ON(!l3); | 3293 | n = cachep->node[nodeid]; |
3294 | BUG_ON(!n); | ||
3417 | 3295 | ||
3418 | retry: | 3296 | retry: |
3419 | check_irq_off(); | 3297 | check_irq_off(); |
3420 | spin_lock(&l3->list_lock); | 3298 | spin_lock(&n->list_lock); |
3421 | entry = l3->slabs_partial.next; | 3299 | entry = n->slabs_partial.next; |
3422 | if (entry == &l3->slabs_partial) { | 3300 | if (entry == &n->slabs_partial) { |
3423 | l3->free_touched = 1; | 3301 | n->free_touched = 1; |
3424 | entry = l3->slabs_free.next; | 3302 | entry = n->slabs_free.next; |
3425 | if (entry == &l3->slabs_free) | 3303 | if (entry == &n->slabs_free) |
3426 | goto must_grow; | 3304 | goto must_grow; |
3427 | } | 3305 | } |
3428 | 3306 | ||
@@ -3438,20 +3316,20 @@ retry: | |||
3438 | 3316 | ||
3439 | obj = slab_get_obj(cachep, slabp, nodeid); | 3317 | obj = slab_get_obj(cachep, slabp, nodeid); |
3440 | check_slabp(cachep, slabp); | 3318 | check_slabp(cachep, slabp); |
3441 | l3->free_objects--; | 3319 | n->free_objects--; |
3442 | /* move slabp to correct slabp list: */ | 3320 | /* move slabp to correct slabp list: */ |
3443 | list_del(&slabp->list); | 3321 | list_del(&slabp->list); |
3444 | 3322 | ||
3445 | if (slabp->free == BUFCTL_END) | 3323 | if (slabp->free == BUFCTL_END) |
3446 | list_add(&slabp->list, &l3->slabs_full); | 3324 | list_add(&slabp->list, &n->slabs_full); |
3447 | else | 3325 | else |
3448 | list_add(&slabp->list, &l3->slabs_partial); | 3326 | list_add(&slabp->list, &n->slabs_partial); |
3449 | 3327 | ||
3450 | spin_unlock(&l3->list_lock); | 3328 | spin_unlock(&n->list_lock); |
3451 | goto done; | 3329 | goto done; |
3452 | 3330 | ||
3453 | must_grow: | 3331 | must_grow: |
3454 | spin_unlock(&l3->list_lock); | 3332 | spin_unlock(&n->list_lock); |
3455 | x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL); | 3333 | x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL); |
3456 | if (x) | 3334 | if (x) |
3457 | goto retry; | 3335 | goto retry; |
@@ -3497,7 +3375,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | |||
3497 | if (nodeid == NUMA_NO_NODE) | 3375 | if (nodeid == NUMA_NO_NODE) |
3498 | nodeid = slab_node; | 3376 | nodeid = slab_node; |
3499 | 3377 | ||
3500 | if (unlikely(!cachep->nodelists[nodeid])) { | 3378 | if (unlikely(!cachep->node[nodeid])) { |
3501 | /* Node not bootstrapped yet */ | 3379 | /* Node not bootstrapped yet */ |
3502 | ptr = fallback_alloc(cachep, flags); | 3380 | ptr = fallback_alloc(cachep, flags); |
3503 | goto out; | 3381 | goto out; |
@@ -3603,7 +3481,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, | |||
3603 | int node) | 3481 | int node) |
3604 | { | 3482 | { |
3605 | int i; | 3483 | int i; |
3606 | struct kmem_list3 *l3; | 3484 | struct kmem_cache_node *n; |
3607 | 3485 | ||
3608 | for (i = 0; i < nr_objects; i++) { | 3486 | for (i = 0; i < nr_objects; i++) { |
3609 | void *objp; | 3487 | void *objp; |
@@ -3613,19 +3491,19 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, | |||
3613 | objp = objpp[i]; | 3491 | objp = objpp[i]; |
3614 | 3492 | ||
3615 | slabp = virt_to_slab(objp); | 3493 | slabp = virt_to_slab(objp); |
3616 | l3 = cachep->nodelists[node]; | 3494 | n = cachep->node[node]; |
3617 | list_del(&slabp->list); | 3495 | list_del(&slabp->list); |
3618 | check_spinlock_acquired_node(cachep, node); | 3496 | check_spinlock_acquired_node(cachep, node); |
3619 | check_slabp(cachep, slabp); | 3497 | check_slabp(cachep, slabp); |
3620 | slab_put_obj(cachep, slabp, objp, node); | 3498 | slab_put_obj(cachep, slabp, objp, node); |
3621 | STATS_DEC_ACTIVE(cachep); | 3499 | STATS_DEC_ACTIVE(cachep); |
3622 | l3->free_objects++; | 3500 | n->free_objects++; |
3623 | check_slabp(cachep, slabp); | 3501 | check_slabp(cachep, slabp); |
3624 | 3502 | ||
3625 | /* fixup slab chains */ | 3503 | /* fixup slab chains */ |
3626 | if (slabp->inuse == 0) { | 3504 | if (slabp->inuse == 0) { |
3627 | if (l3->free_objects > l3->free_limit) { | 3505 | if (n->free_objects > n->free_limit) { |
3628 | l3->free_objects -= cachep->num; | 3506 | n->free_objects -= cachep->num; |
3629 | /* No need to drop any previously held | 3507 | /* No need to drop any previously held |
3630 | * lock here, even if we have a off-slab slab | 3508 | * lock here, even if we have a off-slab slab |
3631 | * descriptor it is guaranteed to come from | 3509 | * descriptor it is guaranteed to come from |
@@ -3634,14 +3512,14 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, | |||
3634 | */ | 3512 | */ |
3635 | slab_destroy(cachep, slabp); | 3513 | slab_destroy(cachep, slabp); |
3636 | } else { | 3514 | } else { |
3637 | list_add(&slabp->list, &l3->slabs_free); | 3515 | list_add(&slabp->list, &n->slabs_free); |
3638 | } | 3516 | } |
3639 | } else { | 3517 | } else { |
3640 | /* Unconditionally move a slab to the end of the | 3518 | /* Unconditionally move a slab to the end of the |
3641 | * partial list on free - maximum time for the | 3519 | * partial list on free - maximum time for the |
3642 | * other objects to be freed, too. | 3520 | * other objects to be freed, too. |
3643 | */ | 3521 | */ |
3644 | list_add_tail(&slabp->list, &l3->slabs_partial); | 3522 | list_add_tail(&slabp->list, &n->slabs_partial); |
3645 | } | 3523 | } |
3646 | } | 3524 | } |
3647 | } | 3525 | } |
@@ -3649,7 +3527,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, | |||
3649 | static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) | 3527 | static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) |
3650 | { | 3528 | { |
3651 | int batchcount; | 3529 | int batchcount; |
3652 | struct kmem_list3 *l3; | 3530 | struct kmem_cache_node *n; |
3653 | int node = numa_mem_id(); | 3531 | int node = numa_mem_id(); |
3654 | 3532 | ||
3655 | batchcount = ac->batchcount; | 3533 | batchcount = ac->batchcount; |
@@ -3657,10 +3535,10 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) | |||
3657 | BUG_ON(!batchcount || batchcount > ac->avail); | 3535 | BUG_ON(!batchcount || batchcount > ac->avail); |
3658 | #endif | 3536 | #endif |
3659 | check_irq_off(); | 3537 | check_irq_off(); |
3660 | l3 = cachep->nodelists[node]; | 3538 | n = cachep->node[node]; |
3661 | spin_lock(&l3->list_lock); | 3539 | spin_lock(&n->list_lock); |
3662 | if (l3->shared) { | 3540 | if (n->shared) { |
3663 | struct array_cache *shared_array = l3->shared; | 3541 | struct array_cache *shared_array = n->shared; |
3664 | int max = shared_array->limit - shared_array->avail; | 3542 | int max = shared_array->limit - shared_array->avail; |
3665 | if (max) { | 3543 | if (max) { |
3666 | if (batchcount > max) | 3544 | if (batchcount > max) |
@@ -3679,8 +3557,8 @@ free_done: | |||
3679 | int i = 0; | 3557 | int i = 0; |
3680 | struct list_head *p; | 3558 | struct list_head *p; |
3681 | 3559 | ||
3682 | p = l3->slabs_free.next; | 3560 | p = n->slabs_free.next; |
3683 | while (p != &(l3->slabs_free)) { | 3561 | while (p != &(n->slabs_free)) { |
3684 | struct slab *slabp; | 3562 | struct slab *slabp; |
3685 | 3563 | ||
3686 | slabp = list_entry(p, struct slab, list); | 3564 | slabp = list_entry(p, struct slab, list); |
@@ -3692,7 +3570,7 @@ free_done: | |||
3692 | STATS_SET_FREEABLE(cachep, i); | 3570 | STATS_SET_FREEABLE(cachep, i); |
3693 | } | 3571 | } |
3694 | #endif | 3572 | #endif |
3695 | spin_unlock(&l3->list_lock); | 3573 | spin_unlock(&n->list_lock); |
3696 | ac->avail -= batchcount; | 3574 | ac->avail -= batchcount; |
3697 | memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); | 3575 | memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); |
3698 | } | 3576 | } |
@@ -3802,7 +3680,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) | |||
3802 | { | 3680 | { |
3803 | struct kmem_cache *cachep; | 3681 | struct kmem_cache *cachep; |
3804 | 3682 | ||
3805 | cachep = kmem_find_general_cachep(size, flags); | 3683 | cachep = kmalloc_slab(size, flags); |
3806 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | 3684 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
3807 | return cachep; | 3685 | return cachep; |
3808 | return kmem_cache_alloc_node_trace(cachep, flags, node, size); | 3686 | return kmem_cache_alloc_node_trace(cachep, flags, node, size); |
@@ -3847,7 +3725,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
3847 | * Then kmalloc uses the uninlined functions instead of the inline | 3725 | * Then kmalloc uses the uninlined functions instead of the inline |
3848 | * functions. | 3726 | * functions. |
3849 | */ | 3727 | */ |
3850 | cachep = __find_general_cachep(size, flags); | 3728 | cachep = kmalloc_slab(size, flags); |
3851 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | 3729 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
3852 | return cachep; | 3730 | return cachep; |
3853 | ret = slab_alloc(cachep, flags, caller); | 3731 | ret = slab_alloc(cachep, flags, caller); |
@@ -3936,12 +3814,12 @@ void kfree(const void *objp) | |||
3936 | EXPORT_SYMBOL(kfree); | 3814 | EXPORT_SYMBOL(kfree); |
3937 | 3815 | ||
3938 | /* | 3816 | /* |
3939 | * This initializes kmem_list3 or resizes various caches for all nodes. | 3817 | * This initializes kmem_cache_node or resizes various caches for all nodes. |
3940 | */ | 3818 | */ |
3941 | static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) | 3819 | static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) |
3942 | { | 3820 | { |
3943 | int node; | 3821 | int node; |
3944 | struct kmem_list3 *l3; | 3822 | struct kmem_cache_node *n; |
3945 | struct array_cache *new_shared; | 3823 | struct array_cache *new_shared; |
3946 | struct array_cache **new_alien = NULL; | 3824 | struct array_cache **new_alien = NULL; |
3947 | 3825 | ||
@@ -3964,43 +3842,43 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) | |||
3964 | } | 3842 | } |
3965 | } | 3843 | } |
3966 | 3844 | ||
3967 | l3 = cachep->nodelists[node]; | 3845 | n = cachep->node[node]; |
3968 | if (l3) { | 3846 | if (n) { |
3969 | struct array_cache *shared = l3->shared; | 3847 | struct array_cache *shared = n->shared; |
3970 | 3848 | ||
3971 | spin_lock_irq(&l3->list_lock); | 3849 | spin_lock_irq(&n->list_lock); |
3972 | 3850 | ||
3973 | if (shared) | 3851 | if (shared) |
3974 | free_block(cachep, shared->entry, | 3852 | free_block(cachep, shared->entry, |
3975 | shared->avail, node); | 3853 | shared->avail, node); |
3976 | 3854 | ||
3977 | l3->shared = new_shared; | 3855 | n->shared = new_shared; |
3978 | if (!l3->alien) { | 3856 | if (!n->alien) { |
3979 | l3->alien = new_alien; | 3857 | n->alien = new_alien; |
3980 | new_alien = NULL; | 3858 | new_alien = NULL; |
3981 | } | 3859 | } |
3982 | l3->free_limit = (1 + nr_cpus_node(node)) * | 3860 | n->free_limit = (1 + nr_cpus_node(node)) * |
3983 | cachep->batchcount + cachep->num; | 3861 | cachep->batchcount + cachep->num; |
3984 | spin_unlock_irq(&l3->list_lock); | 3862 | spin_unlock_irq(&n->list_lock); |
3985 | kfree(shared); | 3863 | kfree(shared); |
3986 | free_alien_cache(new_alien); | 3864 | free_alien_cache(new_alien); |
3987 | continue; | 3865 | continue; |
3988 | } | 3866 | } |
3989 | l3 = kmalloc_node(sizeof(struct kmem_list3), gfp, node); | 3867 | n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node); |
3990 | if (!l3) { | 3868 | if (!n) { |
3991 | free_alien_cache(new_alien); | 3869 | free_alien_cache(new_alien); |
3992 | kfree(new_shared); | 3870 | kfree(new_shared); |
3993 | goto fail; | 3871 | goto fail; |
3994 | } | 3872 | } |
3995 | 3873 | ||
3996 | kmem_list3_init(l3); | 3874 | kmem_cache_node_init(n); |
3997 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + | 3875 | n->next_reap = jiffies + REAPTIMEOUT_LIST3 + |
3998 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; | 3876 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; |
3999 | l3->shared = new_shared; | 3877 | n->shared = new_shared; |
4000 | l3->alien = new_alien; | 3878 | n->alien = new_alien; |
4001 | l3->free_limit = (1 + nr_cpus_node(node)) * | 3879 | n->free_limit = (1 + nr_cpus_node(node)) * |
4002 | cachep->batchcount + cachep->num; | 3880 | cachep->batchcount + cachep->num; |
4003 | cachep->nodelists[node] = l3; | 3881 | cachep->node[node] = n; |
4004 | } | 3882 | } |
4005 | return 0; | 3883 | return 0; |
4006 | 3884 | ||
@@ -4009,13 +3887,13 @@ fail: | |||
4009 | /* Cache is not active yet. Roll back what we did */ | 3887 | /* Cache is not active yet. Roll back what we did */ |
4010 | node--; | 3888 | node--; |
4011 | while (node >= 0) { | 3889 | while (node >= 0) { |
4012 | if (cachep->nodelists[node]) { | 3890 | if (cachep->node[node]) { |
4013 | l3 = cachep->nodelists[node]; | 3891 | n = cachep->node[node]; |
4014 | 3892 | ||
4015 | kfree(l3->shared); | 3893 | kfree(n->shared); |
4016 | free_alien_cache(l3->alien); | 3894 | free_alien_cache(n->alien); |
4017 | kfree(l3); | 3895 | kfree(n); |
4018 | cachep->nodelists[node] = NULL; | 3896 | cachep->node[node] = NULL; |
4019 | } | 3897 | } |
4020 | node--; | 3898 | node--; |
4021 | } | 3899 | } |
@@ -4075,9 +3953,9 @@ static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, | |||
4075 | struct array_cache *ccold = new->new[i]; | 3953 | struct array_cache *ccold = new->new[i]; |
4076 | if (!ccold) | 3954 | if (!ccold) |
4077 | continue; | 3955 | continue; |
4078 | spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock); | 3956 | spin_lock_irq(&cachep->node[cpu_to_mem(i)]->list_lock); |
4079 | free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i)); | 3957 | free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i)); |
4080 | spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock); | 3958 | spin_unlock_irq(&cachep->node[cpu_to_mem(i)]->list_lock); |
4081 | kfree(ccold); | 3959 | kfree(ccold); |
4082 | } | 3960 | } |
4083 | kfree(new); | 3961 | kfree(new); |
@@ -4178,11 +4056,11 @@ skip_setup: | |||
4178 | } | 4056 | } |
4179 | 4057 | ||
4180 | /* | 4058 | /* |
4181 | * Drain an array if it contains any elements taking the l3 lock only if | 4059 | * Drain an array if it contains any elements taking the node lock only if |
4182 | * necessary. Note that the l3 listlock also protects the array_cache | 4060 | * necessary. Note that the node listlock also protects the array_cache |
4183 | * if drain_array() is used on the shared array. | 4061 | * if drain_array() is used on the shared array. |
4184 | */ | 4062 | */ |
4185 | static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, | 4063 | static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, |
4186 | struct array_cache *ac, int force, int node) | 4064 | struct array_cache *ac, int force, int node) |
4187 | { | 4065 | { |
4188 | int tofree; | 4066 | int tofree; |
@@ -4192,7 +4070,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, | |||
4192 | if (ac->touched && !force) { | 4070 | if (ac->touched && !force) { |
4193 | ac->touched = 0; | 4071 | ac->touched = 0; |
4194 | } else { | 4072 | } else { |
4195 | spin_lock_irq(&l3->list_lock); | 4073 | spin_lock_irq(&n->list_lock); |
4196 | if (ac->avail) { | 4074 | if (ac->avail) { |
4197 | tofree = force ? ac->avail : (ac->limit + 4) / 5; | 4075 | tofree = force ? ac->avail : (ac->limit + 4) / 5; |
4198 | if (tofree > ac->avail) | 4076 | if (tofree > ac->avail) |
@@ -4202,7 +4080,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, | |||
4202 | memmove(ac->entry, &(ac->entry[tofree]), | 4080 | memmove(ac->entry, &(ac->entry[tofree]), |
4203 | sizeof(void *) * ac->avail); | 4081 | sizeof(void *) * ac->avail); |
4204 | } | 4082 | } |
4205 | spin_unlock_irq(&l3->list_lock); | 4083 | spin_unlock_irq(&n->list_lock); |
4206 | } | 4084 | } |
4207 | } | 4085 | } |
4208 | 4086 | ||
@@ -4221,7 +4099,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, | |||
4221 | static void cache_reap(struct work_struct *w) | 4099 | static void cache_reap(struct work_struct *w) |
4222 | { | 4100 | { |
4223 | struct kmem_cache *searchp; | 4101 | struct kmem_cache *searchp; |
4224 | struct kmem_list3 *l3; | 4102 | struct kmem_cache_node *n; |
4225 | int node = numa_mem_id(); | 4103 | int node = numa_mem_id(); |
4226 | struct delayed_work *work = to_delayed_work(w); | 4104 | struct delayed_work *work = to_delayed_work(w); |
4227 | 4105 | ||
@@ -4233,33 +4111,33 @@ static void cache_reap(struct work_struct *w) | |||
4233 | check_irq_on(); | 4111 | check_irq_on(); |
4234 | 4112 | ||
4235 | /* | 4113 | /* |
4236 | * We only take the l3 lock if absolutely necessary and we | 4114 | * We only take the node lock if absolutely necessary and we |
4237 | * have established with reasonable certainty that | 4115 | * have established with reasonable certainty that |
4238 | * we can do some work if the lock was obtained. | 4116 | * we can do some work if the lock was obtained. |
4239 | */ | 4117 | */ |
4240 | l3 = searchp->nodelists[node]; | 4118 | n = searchp->node[node]; |
4241 | 4119 | ||
4242 | reap_alien(searchp, l3); | 4120 | reap_alien(searchp, n); |
4243 | 4121 | ||
4244 | drain_array(searchp, l3, cpu_cache_get(searchp), 0, node); | 4122 | drain_array(searchp, n, cpu_cache_get(searchp), 0, node); |
4245 | 4123 | ||
4246 | /* | 4124 | /* |
4247 | * These are racy checks but it does not matter | 4125 | * These are racy checks but it does not matter |
4248 | * if we skip one check or scan twice. | 4126 | * if we skip one check or scan twice. |
4249 | */ | 4127 | */ |
4250 | if (time_after(l3->next_reap, jiffies)) | 4128 | if (time_after(n->next_reap, jiffies)) |
4251 | goto next; | 4129 | goto next; |
4252 | 4130 | ||
4253 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3; | 4131 | n->next_reap = jiffies + REAPTIMEOUT_LIST3; |
4254 | 4132 | ||
4255 | drain_array(searchp, l3, l3->shared, 0, node); | 4133 | drain_array(searchp, n, n->shared, 0, node); |
4256 | 4134 | ||
4257 | if (l3->free_touched) | 4135 | if (n->free_touched) |
4258 | l3->free_touched = 0; | 4136 | n->free_touched = 0; |
4259 | else { | 4137 | else { |
4260 | int freed; | 4138 | int freed; |
4261 | 4139 | ||
4262 | freed = drain_freelist(searchp, l3, (l3->free_limit + | 4140 | freed = drain_freelist(searchp, n, (n->free_limit + |
4263 | 5 * searchp->num - 1) / (5 * searchp->num)); | 4141 | 5 * searchp->num - 1) / (5 * searchp->num)); |
4264 | STATS_ADD_REAPED(searchp, freed); | 4142 | STATS_ADD_REAPED(searchp, freed); |
4265 | } | 4143 | } |
@@ -4285,25 +4163,25 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) | |||
4285 | const char *name; | 4163 | const char *name; |
4286 | char *error = NULL; | 4164 | char *error = NULL; |
4287 | int node; | 4165 | int node; |
4288 | struct kmem_list3 *l3; | 4166 | struct kmem_cache_node *n; |
4289 | 4167 | ||
4290 | active_objs = 0; | 4168 | active_objs = 0; |
4291 | num_slabs = 0; | 4169 | num_slabs = 0; |
4292 | for_each_online_node(node) { | 4170 | for_each_online_node(node) { |
4293 | l3 = cachep->nodelists[node]; | 4171 | n = cachep->node[node]; |
4294 | if (!l3) | 4172 | if (!n) |
4295 | continue; | 4173 | continue; |
4296 | 4174 | ||
4297 | check_irq_on(); | 4175 | check_irq_on(); |
4298 | spin_lock_irq(&l3->list_lock); | 4176 | spin_lock_irq(&n->list_lock); |
4299 | 4177 | ||
4300 | list_for_each_entry(slabp, &l3->slabs_full, list) { | 4178 | list_for_each_entry(slabp, &n->slabs_full, list) { |
4301 | if (slabp->inuse != cachep->num && !error) | 4179 | if (slabp->inuse != cachep->num && !error) |
4302 | error = "slabs_full accounting error"; | 4180 | error = "slabs_full accounting error"; |
4303 | active_objs += cachep->num; | 4181 | active_objs += cachep->num; |
4304 | active_slabs++; | 4182 | active_slabs++; |
4305 | } | 4183 | } |
4306 | list_for_each_entry(slabp, &l3->slabs_partial, list) { | 4184 | list_for_each_entry(slabp, &n->slabs_partial, list) { |
4307 | if (slabp->inuse == cachep->num && !error) | 4185 | if (slabp->inuse == cachep->num && !error) |
4308 | error = "slabs_partial inuse accounting error"; | 4186 | error = "slabs_partial inuse accounting error"; |
4309 | if (!slabp->inuse && !error) | 4187 | if (!slabp->inuse && !error) |
@@ -4311,16 +4189,16 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) | |||
4311 | active_objs += slabp->inuse; | 4189 | active_objs += slabp->inuse; |
4312 | active_slabs++; | 4190 | active_slabs++; |
4313 | } | 4191 | } |
4314 | list_for_each_entry(slabp, &l3->slabs_free, list) { | 4192 | list_for_each_entry(slabp, &n->slabs_free, list) { |
4315 | if (slabp->inuse && !error) | 4193 | if (slabp->inuse && !error) |
4316 | error = "slabs_free/inuse accounting error"; | 4194 | error = "slabs_free/inuse accounting error"; |
4317 | num_slabs++; | 4195 | num_slabs++; |
4318 | } | 4196 | } |
4319 | free_objects += l3->free_objects; | 4197 | free_objects += n->free_objects; |
4320 | if (l3->shared) | 4198 | if (n->shared) |
4321 | shared_avail += l3->shared->avail; | 4199 | shared_avail += n->shared->avail; |
4322 | 4200 | ||
4323 | spin_unlock_irq(&l3->list_lock); | 4201 | spin_unlock_irq(&n->list_lock); |
4324 | } | 4202 | } |
4325 | num_slabs += active_slabs; | 4203 | num_slabs += active_slabs; |
4326 | num_objs = num_slabs * cachep->num; | 4204 | num_objs = num_slabs * cachep->num; |
@@ -4346,7 +4224,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) | |||
4346 | void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) | 4224 | void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) |
4347 | { | 4225 | { |
4348 | #if STATS | 4226 | #if STATS |
4349 | { /* list3 stats */ | 4227 | { /* node stats */ |
4350 | unsigned long high = cachep->high_mark; | 4228 | unsigned long high = cachep->high_mark; |
4351 | unsigned long allocs = cachep->num_allocations; | 4229 | unsigned long allocs = cachep->num_allocations; |
4352 | unsigned long grown = cachep->grown; | 4230 | unsigned long grown = cachep->grown; |
@@ -4499,9 +4377,9 @@ static int leaks_show(struct seq_file *m, void *p) | |||
4499 | { | 4377 | { |
4500 | struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); | 4378 | struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); |
4501 | struct slab *slabp; | 4379 | struct slab *slabp; |
4502 | struct kmem_list3 *l3; | 4380 | struct kmem_cache_node *n; |
4503 | const char *name; | 4381 | const char *name; |
4504 | unsigned long *n = m->private; | 4382 | unsigned long *x = m->private; |
4505 | int node; | 4383 | int node; |
4506 | int i; | 4384 | int i; |
4507 | 4385 | ||
@@ -4512,43 +4390,43 @@ static int leaks_show(struct seq_file *m, void *p) | |||
4512 | 4390 | ||
4513 | /* OK, we can do it */ | 4391 | /* OK, we can do it */ |
4514 | 4392 | ||
4515 | n[1] = 0; | 4393 | x[1] = 0; |
4516 | 4394 | ||
4517 | for_each_online_node(node) { | 4395 | for_each_online_node(node) { |
4518 | l3 = cachep->nodelists[node]; | 4396 | n = cachep->node[node]; |
4519 | if (!l3) | 4397 | if (!n) |
4520 | continue; | 4398 | continue; |
4521 | 4399 | ||
4522 | check_irq_on(); | 4400 | check_irq_on(); |
4523 | spin_lock_irq(&l3->list_lock); | 4401 | spin_lock_irq(&n->list_lock); |
4524 | 4402 | ||
4525 | list_for_each_entry(slabp, &l3->slabs_full, list) | 4403 | list_for_each_entry(slabp, &n->slabs_full, list) |
4526 | handle_slab(n, cachep, slabp); | 4404 | handle_slab(x, cachep, slabp); |
4527 | list_for_each_entry(slabp, &l3->slabs_partial, list) | 4405 | list_for_each_entry(slabp, &n->slabs_partial, list) |
4528 | handle_slab(n, cachep, slabp); | 4406 | handle_slab(x, cachep, slabp); |
4529 | spin_unlock_irq(&l3->list_lock); | 4407 | spin_unlock_irq(&n->list_lock); |
4530 | } | 4408 | } |
4531 | name = cachep->name; | 4409 | name = cachep->name; |
4532 | if (n[0] == n[1]) { | 4410 | if (x[0] == x[1]) { |
4533 | /* Increase the buffer size */ | 4411 | /* Increase the buffer size */ |
4534 | mutex_unlock(&slab_mutex); | 4412 | mutex_unlock(&slab_mutex); |
4535 | m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); | 4413 | m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL); |
4536 | if (!m->private) { | 4414 | if (!m->private) { |
4537 | /* Too bad, we are really out */ | 4415 | /* Too bad, we are really out */ |
4538 | m->private = n; | 4416 | m->private = x; |
4539 | mutex_lock(&slab_mutex); | 4417 | mutex_lock(&slab_mutex); |
4540 | return -ENOMEM; | 4418 | return -ENOMEM; |
4541 | } | 4419 | } |
4542 | *(unsigned long *)m->private = n[0] * 2; | 4420 | *(unsigned long *)m->private = x[0] * 2; |
4543 | kfree(n); | 4421 | kfree(x); |
4544 | mutex_lock(&slab_mutex); | 4422 | mutex_lock(&slab_mutex); |
4545 | /* Now make sure this entry will be retried */ | 4423 | /* Now make sure this entry will be retried */ |
4546 | m->count = m->size; | 4424 | m->count = m->size; |
4547 | return 0; | 4425 | return 0; |
4548 | } | 4426 | } |
4549 | for (i = 0; i < n[1]; i++) { | 4427 | for (i = 0; i < x[1]; i++) { |
4550 | seq_printf(m, "%s: %lu ", name, n[2*i+3]); | 4428 | seq_printf(m, "%s: %lu ", name, x[2*i+3]); |
4551 | show_symbol(m, n[2*i+2]); | 4429 | show_symbol(m, x[2*i+2]); |
4552 | seq_putc(m, '\n'); | 4430 | seq_putc(m, '\n'); |
4553 | } | 4431 | } |
4554 | 4432 | ||
@@ -16,7 +16,7 @@ enum slab_state { | |||
16 | DOWN, /* No slab functionality yet */ | 16 | DOWN, /* No slab functionality yet */ |
17 | PARTIAL, /* SLUB: kmem_cache_node available */ | 17 | PARTIAL, /* SLUB: kmem_cache_node available */ |
18 | PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */ | 18 | PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */ |
19 | PARTIAL_L3, /* SLAB: kmalloc size for l3 struct available */ | 19 | PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ |
20 | UP, /* Slab caches usable but not all extras yet */ | 20 | UP, /* Slab caches usable but not all extras yet */ |
21 | FULL /* Everything is working */ | 21 | FULL /* Everything is working */ |
22 | }; | 22 | }; |
@@ -35,6 +35,15 @@ extern struct kmem_cache *kmem_cache; | |||
35 | unsigned long calculate_alignment(unsigned long flags, | 35 | unsigned long calculate_alignment(unsigned long flags, |
36 | unsigned long align, unsigned long size); | 36 | unsigned long align, unsigned long size); |
37 | 37 | ||
38 | #ifndef CONFIG_SLOB | ||
39 | /* Kmalloc array related functions */ | ||
40 | void create_kmalloc_caches(unsigned long); | ||
41 | |||
42 | /* Find the kmalloc slab corresponding for a certain size */ | ||
43 | struct kmem_cache *kmalloc_slab(size_t, gfp_t); | ||
44 | #endif | ||
45 | |||
46 | |||
38 | /* Functions provided by the slab allocators */ | 47 | /* Functions provided by the slab allocators */ |
39 | extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); | 48 | extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); |
40 | 49 | ||
@@ -230,3 +239,35 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) | |||
230 | return s; | 239 | return s; |
231 | } | 240 | } |
232 | #endif | 241 | #endif |
242 | |||
243 | |||
244 | /* | ||
245 | * The slab lists for all objects. | ||
246 | */ | ||
247 | struct kmem_cache_node { | ||
248 | spinlock_t list_lock; | ||
249 | |||
250 | #ifdef CONFIG_SLAB | ||
251 | struct list_head slabs_partial; /* partial list first, better asm code */ | ||
252 | struct list_head slabs_full; | ||
253 | struct list_head slabs_free; | ||
254 | unsigned long free_objects; | ||
255 | unsigned int free_limit; | ||
256 | unsigned int colour_next; /* Per-node cache coloring */ | ||
257 | struct array_cache *shared; /* shared per node */ | ||
258 | struct array_cache **alien; /* on other nodes */ | ||
259 | unsigned long next_reap; /* updated without locking */ | ||
260 | int free_touched; /* updated without locking */ | ||
261 | #endif | ||
262 | |||
263 | #ifdef CONFIG_SLUB | ||
264 | unsigned long nr_partial; | ||
265 | struct list_head partial; | ||
266 | #ifdef CONFIG_SLUB_DEBUG | ||
267 | atomic_long_t nr_slabs; | ||
268 | atomic_long_t total_objects; | ||
269 | struct list_head full; | ||
270 | #endif | ||
271 | #endif | ||
272 | |||
273 | }; | ||
diff --git a/mm/slab_common.c b/mm/slab_common.c index 3f3cd97d3fdf..d2517b05d5bc 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -299,7 +299,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz | |||
299 | err = __kmem_cache_create(s, flags); | 299 | err = __kmem_cache_create(s, flags); |
300 | 300 | ||
301 | if (err) | 301 | if (err) |
302 | panic("Creation of kmalloc slab %s size=%zd failed. Reason %d\n", | 302 | panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n", |
303 | name, size, err); | 303 | name, size, err); |
304 | 304 | ||
305 | s->refcount = -1; /* Exempt from merging for now */ | 305 | s->refcount = -1; /* Exempt from merging for now */ |
@@ -319,6 +319,178 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size, | |||
319 | return s; | 319 | return s; |
320 | } | 320 | } |
321 | 321 | ||
322 | struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; | ||
323 | EXPORT_SYMBOL(kmalloc_caches); | ||
324 | |||
325 | #ifdef CONFIG_ZONE_DMA | ||
326 | struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; | ||
327 | EXPORT_SYMBOL(kmalloc_dma_caches); | ||
328 | #endif | ||
329 | |||
330 | /* | ||
331 | * Conversion table for small slabs sizes / 8 to the index in the | ||
332 | * kmalloc array. This is necessary for slabs < 192 since we have non power | ||
333 | * of two cache sizes there. The size of larger slabs can be determined using | ||
334 | * fls. | ||
335 | */ | ||
336 | static s8 size_index[24] = { | ||
337 | 3, /* 8 */ | ||
338 | 4, /* 16 */ | ||
339 | 5, /* 24 */ | ||
340 | 5, /* 32 */ | ||
341 | 6, /* 40 */ | ||
342 | 6, /* 48 */ | ||
343 | 6, /* 56 */ | ||
344 | 6, /* 64 */ | ||
345 | 1, /* 72 */ | ||
346 | 1, /* 80 */ | ||
347 | 1, /* 88 */ | ||
348 | 1, /* 96 */ | ||
349 | 7, /* 104 */ | ||
350 | 7, /* 112 */ | ||
351 | 7, /* 120 */ | ||
352 | 7, /* 128 */ | ||
353 | 2, /* 136 */ | ||
354 | 2, /* 144 */ | ||
355 | 2, /* 152 */ | ||
356 | 2, /* 160 */ | ||
357 | 2, /* 168 */ | ||
358 | 2, /* 176 */ | ||
359 | 2, /* 184 */ | ||
360 | 2 /* 192 */ | ||
361 | }; | ||
362 | |||
363 | static inline int size_index_elem(size_t bytes) | ||
364 | { | ||
365 | return (bytes - 1) / 8; | ||
366 | } | ||
367 | |||
368 | /* | ||
369 | * Find the kmem_cache structure that serves a given size of | ||
370 | * allocation | ||
371 | */ | ||
372 | struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) | ||
373 | { | ||
374 | int index; | ||
375 | |||
376 | if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE)) | ||
377 | return NULL; | ||
378 | |||
379 | if (size <= 192) { | ||
380 | if (!size) | ||
381 | return ZERO_SIZE_PTR; | ||
382 | |||
383 | index = size_index[size_index_elem(size)]; | ||
384 | } else | ||
385 | index = fls(size - 1); | ||
386 | |||
387 | #ifdef CONFIG_ZONE_DMA | ||
388 | if (unlikely((flags & GFP_DMA))) | ||
389 | return kmalloc_dma_caches[index]; | ||
390 | |||
391 | #endif | ||
392 | return kmalloc_caches[index]; | ||
393 | } | ||
394 | |||
395 | /* | ||
396 | * Create the kmalloc array. Some of the regular kmalloc arrays | ||
397 | * may already have been created because they were needed to | ||
398 | * enable allocations for slab creation. | ||
399 | */ | ||
400 | void __init create_kmalloc_caches(unsigned long flags) | ||
401 | { | ||
402 | int i; | ||
403 | |||
404 | /* | ||
405 | * Patch up the size_index table if we have strange large alignment | ||
406 | * requirements for the kmalloc array. This is only the case for | ||
407 | * MIPS it seems. The standard arches will not generate any code here. | ||
408 | * | ||
409 | * Largest permitted alignment is 256 bytes due to the way we | ||
410 | * handle the index determination for the smaller caches. | ||
411 | * | ||
412 | * Make sure that nothing crazy happens if someone starts tinkering | ||
413 | * around with ARCH_KMALLOC_MINALIGN | ||
414 | */ | ||
415 | BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || | ||
416 | (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); | ||
417 | |||
418 | for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { | ||
419 | int elem = size_index_elem(i); | ||
420 | |||
421 | if (elem >= ARRAY_SIZE(size_index)) | ||
422 | break; | ||
423 | size_index[elem] = KMALLOC_SHIFT_LOW; | ||
424 | } | ||
425 | |||
426 | if (KMALLOC_MIN_SIZE >= 64) { | ||
427 | /* | ||
428 | * The 96 byte size cache is not used if the alignment | ||
429 | * is 64 byte. | ||
430 | */ | ||
431 | for (i = 64 + 8; i <= 96; i += 8) | ||
432 | size_index[size_index_elem(i)] = 7; | ||
433 | |||
434 | } | ||
435 | |||
436 | if (KMALLOC_MIN_SIZE >= 128) { | ||
437 | /* | ||
438 | * The 192 byte sized cache is not used if the alignment | ||
439 | * is 128 byte. Redirect kmalloc to use the 256 byte cache | ||
440 | * instead. | ||
441 | */ | ||
442 | for (i = 128 + 8; i <= 192; i += 8) | ||
443 | size_index[size_index_elem(i)] = 8; | ||
444 | } | ||
445 | for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { | ||
446 | if (!kmalloc_caches[i]) { | ||
447 | kmalloc_caches[i] = create_kmalloc_cache(NULL, | ||
448 | 1 << i, flags); | ||
449 | |||
450 | /* | ||
451 | * Caches that are not of the two-to-the-power-of size. | ||
452 | * These have to be created immediately after the | ||
453 | * earlier power of two caches | ||
454 | */ | ||
455 | if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6) | ||
456 | kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags); | ||
457 | |||
458 | if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7) | ||
459 | kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags); | ||
460 | } | ||
461 | } | ||
462 | |||
463 | /* Kmalloc array is now usable */ | ||
464 | slab_state = UP; | ||
465 | |||
466 | for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) { | ||
467 | struct kmem_cache *s = kmalloc_caches[i]; | ||
468 | char *n; | ||
469 | |||
470 | if (s) { | ||
471 | n = kasprintf(GFP_NOWAIT, "kmalloc-%d", kmalloc_size(i)); | ||
472 | |||
473 | BUG_ON(!n); | ||
474 | s->name = n; | ||
475 | } | ||
476 | } | ||
477 | |||
478 | #ifdef CONFIG_ZONE_DMA | ||
479 | for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) { | ||
480 | struct kmem_cache *s = kmalloc_caches[i]; | ||
481 | |||
482 | if (s) { | ||
483 | int size = kmalloc_size(i); | ||
484 | char *n = kasprintf(GFP_NOWAIT, | ||
485 | "dma-kmalloc-%d", size); | ||
486 | |||
487 | BUG_ON(!n); | ||
488 | kmalloc_dma_caches[i] = create_kmalloc_cache(n, | ||
489 | size, SLAB_CACHE_DMA | flags); | ||
490 | } | ||
491 | } | ||
492 | #endif | ||
493 | } | ||
322 | #endif /* !CONFIG_SLOB */ | 494 | #endif /* !CONFIG_SLOB */ |
323 | 495 | ||
324 | 496 | ||
@@ -1005,7 +1005,7 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) | |||
1005 | * dilemma by deferring the increment of the count during | 1005 | * dilemma by deferring the increment of the count during |
1006 | * bootstrap (see early_kmem_cache_node_alloc). | 1006 | * bootstrap (see early_kmem_cache_node_alloc). |
1007 | */ | 1007 | */ |
1008 | if (n) { | 1008 | if (likely(n)) { |
1009 | atomic_long_inc(&n->nr_slabs); | 1009 | atomic_long_inc(&n->nr_slabs); |
1010 | atomic_long_add(objects, &n->total_objects); | 1010 | atomic_long_add(objects, &n->total_objects); |
1011 | } | 1011 | } |
@@ -1493,7 +1493,7 @@ static inline void remove_partial(struct kmem_cache_node *n, | |||
1493 | */ | 1493 | */ |
1494 | static inline void *acquire_slab(struct kmem_cache *s, | 1494 | static inline void *acquire_slab(struct kmem_cache *s, |
1495 | struct kmem_cache_node *n, struct page *page, | 1495 | struct kmem_cache_node *n, struct page *page, |
1496 | int mode) | 1496 | int mode, int *objects) |
1497 | { | 1497 | { |
1498 | void *freelist; | 1498 | void *freelist; |
1499 | unsigned long counters; | 1499 | unsigned long counters; |
@@ -1507,6 +1507,7 @@ static inline void *acquire_slab(struct kmem_cache *s, | |||
1507 | freelist = page->freelist; | 1507 | freelist = page->freelist; |
1508 | counters = page->counters; | 1508 | counters = page->counters; |
1509 | new.counters = counters; | 1509 | new.counters = counters; |
1510 | *objects = new.objects - new.inuse; | ||
1510 | if (mode) { | 1511 | if (mode) { |
1511 | new.inuse = page->objects; | 1512 | new.inuse = page->objects; |
1512 | new.freelist = NULL; | 1513 | new.freelist = NULL; |
@@ -1528,7 +1529,7 @@ static inline void *acquire_slab(struct kmem_cache *s, | |||
1528 | return freelist; | 1529 | return freelist; |
1529 | } | 1530 | } |
1530 | 1531 | ||
1531 | static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); | 1532 | static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); |
1532 | static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags); | 1533 | static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags); |
1533 | 1534 | ||
1534 | /* | 1535 | /* |
@@ -1539,6 +1540,8 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, | |||
1539 | { | 1540 | { |
1540 | struct page *page, *page2; | 1541 | struct page *page, *page2; |
1541 | void *object = NULL; | 1542 | void *object = NULL; |
1543 | int available = 0; | ||
1544 | int objects; | ||
1542 | 1545 | ||
1543 | /* | 1546 | /* |
1544 | * Racy check. If we mistakenly see no partial slabs then we | 1547 | * Racy check. If we mistakenly see no partial slabs then we |
@@ -1552,22 +1555,21 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, | |||
1552 | spin_lock(&n->list_lock); | 1555 | spin_lock(&n->list_lock); |
1553 | list_for_each_entry_safe(page, page2, &n->partial, lru) { | 1556 | list_for_each_entry_safe(page, page2, &n->partial, lru) { |
1554 | void *t; | 1557 | void *t; |
1555 | int available; | ||
1556 | 1558 | ||
1557 | if (!pfmemalloc_match(page, flags)) | 1559 | if (!pfmemalloc_match(page, flags)) |
1558 | continue; | 1560 | continue; |
1559 | 1561 | ||
1560 | t = acquire_slab(s, n, page, object == NULL); | 1562 | t = acquire_slab(s, n, page, object == NULL, &objects); |
1561 | if (!t) | 1563 | if (!t) |
1562 | break; | 1564 | break; |
1563 | 1565 | ||
1566 | available += objects; | ||
1564 | if (!object) { | 1567 | if (!object) { |
1565 | c->page = page; | 1568 | c->page = page; |
1566 | stat(s, ALLOC_FROM_PARTIAL); | 1569 | stat(s, ALLOC_FROM_PARTIAL); |
1567 | object = t; | 1570 | object = t; |
1568 | available = page->objects - page->inuse; | ||
1569 | } else { | 1571 | } else { |
1570 | available = put_cpu_partial(s, page, 0); | 1572 | put_cpu_partial(s, page, 0); |
1571 | stat(s, CPU_PARTIAL_NODE); | 1573 | stat(s, CPU_PARTIAL_NODE); |
1572 | } | 1574 | } |
1573 | if (kmem_cache_debug(s) || available > s->cpu_partial / 2) | 1575 | if (kmem_cache_debug(s) || available > s->cpu_partial / 2) |
@@ -1946,7 +1948,7 @@ static void unfreeze_partials(struct kmem_cache *s, | |||
1946 | * If we did not find a slot then simply move all the partials to the | 1948 | * If we did not find a slot then simply move all the partials to the |
1947 | * per node partial list. | 1949 | * per node partial list. |
1948 | */ | 1950 | */ |
1949 | static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | 1951 | static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) |
1950 | { | 1952 | { |
1951 | struct page *oldpage; | 1953 | struct page *oldpage; |
1952 | int pages; | 1954 | int pages; |
@@ -1984,7 +1986,6 @@ static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | |||
1984 | page->next = oldpage; | 1986 | page->next = oldpage; |
1985 | 1987 | ||
1986 | } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); | 1988 | } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); |
1987 | return pobjects; | ||
1988 | } | 1989 | } |
1989 | 1990 | ||
1990 | static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) | 1991 | static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) |
@@ -2041,7 +2042,7 @@ static void flush_all(struct kmem_cache *s) | |||
2041 | static inline int node_match(struct page *page, int node) | 2042 | static inline int node_match(struct page *page, int node) |
2042 | { | 2043 | { |
2043 | #ifdef CONFIG_NUMA | 2044 | #ifdef CONFIG_NUMA |
2044 | if (node != NUMA_NO_NODE && page_to_nid(page) != node) | 2045 | if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node)) |
2045 | return 0; | 2046 | return 0; |
2046 | #endif | 2047 | #endif |
2047 | return 1; | 2048 | return 1; |
@@ -2331,13 +2332,18 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, | |||
2331 | 2332 | ||
2332 | s = memcg_kmem_get_cache(s, gfpflags); | 2333 | s = memcg_kmem_get_cache(s, gfpflags); |
2333 | redo: | 2334 | redo: |
2334 | |||
2335 | /* | 2335 | /* |
2336 | * Must read kmem_cache cpu data via this cpu ptr. Preemption is | 2336 | * Must read kmem_cache cpu data via this cpu ptr. Preemption is |
2337 | * enabled. We may switch back and forth between cpus while | 2337 | * enabled. We may switch back and forth between cpus while |
2338 | * reading from one cpu area. That does not matter as long | 2338 | * reading from one cpu area. That does not matter as long |
2339 | * as we end up on the original cpu again when doing the cmpxchg. | 2339 | * as we end up on the original cpu again when doing the cmpxchg. |
2340 | * | ||
2341 | * Preemption is disabled for the retrieval of the tid because that | ||
2342 | * must occur from the current processor. We cannot allow rescheduling | ||
2343 | * on a different processor between the determination of the pointer | ||
2344 | * and the retrieval of the tid. | ||
2340 | */ | 2345 | */ |
2346 | preempt_disable(); | ||
2341 | c = __this_cpu_ptr(s->cpu_slab); | 2347 | c = __this_cpu_ptr(s->cpu_slab); |
2342 | 2348 | ||
2343 | /* | 2349 | /* |
@@ -2347,7 +2353,7 @@ redo: | |||
2347 | * linked list in between. | 2353 | * linked list in between. |
2348 | */ | 2354 | */ |
2349 | tid = c->tid; | 2355 | tid = c->tid; |
2350 | barrier(); | 2356 | preempt_enable(); |
2351 | 2357 | ||
2352 | object = c->freelist; | 2358 | object = c->freelist; |
2353 | page = c->page; | 2359 | page = c->page; |
@@ -2594,10 +2600,11 @@ redo: | |||
2594 | * data is retrieved via this pointer. If we are on the same cpu | 2600 | * data is retrieved via this pointer. If we are on the same cpu |
2595 | * during the cmpxchg then the free will succedd. | 2601 | * during the cmpxchg then the free will succedd. |
2596 | */ | 2602 | */ |
2603 | preempt_disable(); | ||
2597 | c = __this_cpu_ptr(s->cpu_slab); | 2604 | c = __this_cpu_ptr(s->cpu_slab); |
2598 | 2605 | ||
2599 | tid = c->tid; | 2606 | tid = c->tid; |
2600 | barrier(); | 2607 | preempt_enable(); |
2601 | 2608 | ||
2602 | if (likely(page == c->page)) { | 2609 | if (likely(page == c->page)) { |
2603 | set_freepointer(s, object, c->freelist); | 2610 | set_freepointer(s, object, c->freelist); |
@@ -2775,7 +2782,7 @@ init_kmem_cache_node(struct kmem_cache_node *n) | |||
2775 | static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) | 2782 | static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) |
2776 | { | 2783 | { |
2777 | BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < | 2784 | BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < |
2778 | SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); | 2785 | KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu)); |
2779 | 2786 | ||
2780 | /* | 2787 | /* |
2781 | * Must align to double word boundary for the double cmpxchg | 2788 | * Must align to double word boundary for the double cmpxchg |
@@ -2982,7 +2989,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) | |||
2982 | s->allocflags |= __GFP_COMP; | 2989 | s->allocflags |= __GFP_COMP; |
2983 | 2990 | ||
2984 | if (s->flags & SLAB_CACHE_DMA) | 2991 | if (s->flags & SLAB_CACHE_DMA) |
2985 | s->allocflags |= SLUB_DMA; | 2992 | s->allocflags |= GFP_DMA; |
2986 | 2993 | ||
2987 | if (s->flags & SLAB_RECLAIM_ACCOUNT) | 2994 | if (s->flags & SLAB_RECLAIM_ACCOUNT) |
2988 | s->allocflags |= __GFP_RECLAIMABLE; | 2995 | s->allocflags |= __GFP_RECLAIMABLE; |
@@ -3174,13 +3181,6 @@ int __kmem_cache_shutdown(struct kmem_cache *s) | |||
3174 | * Kmalloc subsystem | 3181 | * Kmalloc subsystem |
3175 | *******************************************************************/ | 3182 | *******************************************************************/ |
3176 | 3183 | ||
3177 | struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; | ||
3178 | EXPORT_SYMBOL(kmalloc_caches); | ||
3179 | |||
3180 | #ifdef CONFIG_ZONE_DMA | ||
3181 | static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT]; | ||
3182 | #endif | ||
3183 | |||
3184 | static int __init setup_slub_min_order(char *str) | 3184 | static int __init setup_slub_min_order(char *str) |
3185 | { | 3185 | { |
3186 | get_option(&str, &slub_min_order); | 3186 | get_option(&str, &slub_min_order); |
@@ -3217,73 +3217,15 @@ static int __init setup_slub_nomerge(char *str) | |||
3217 | 3217 | ||
3218 | __setup("slub_nomerge", setup_slub_nomerge); | 3218 | __setup("slub_nomerge", setup_slub_nomerge); |
3219 | 3219 | ||
3220 | /* | ||
3221 | * Conversion table for small slabs sizes / 8 to the index in the | ||
3222 | * kmalloc array. This is necessary for slabs < 192 since we have non power | ||
3223 | * of two cache sizes there. The size of larger slabs can be determined using | ||
3224 | * fls. | ||
3225 | */ | ||
3226 | static s8 size_index[24] = { | ||
3227 | 3, /* 8 */ | ||
3228 | 4, /* 16 */ | ||
3229 | 5, /* 24 */ | ||
3230 | 5, /* 32 */ | ||
3231 | 6, /* 40 */ | ||
3232 | 6, /* 48 */ | ||
3233 | 6, /* 56 */ | ||
3234 | 6, /* 64 */ | ||
3235 | 1, /* 72 */ | ||
3236 | 1, /* 80 */ | ||
3237 | 1, /* 88 */ | ||
3238 | 1, /* 96 */ | ||
3239 | 7, /* 104 */ | ||
3240 | 7, /* 112 */ | ||
3241 | 7, /* 120 */ | ||
3242 | 7, /* 128 */ | ||
3243 | 2, /* 136 */ | ||
3244 | 2, /* 144 */ | ||
3245 | 2, /* 152 */ | ||
3246 | 2, /* 160 */ | ||
3247 | 2, /* 168 */ | ||
3248 | 2, /* 176 */ | ||
3249 | 2, /* 184 */ | ||
3250 | 2 /* 192 */ | ||
3251 | }; | ||
3252 | |||
3253 | static inline int size_index_elem(size_t bytes) | ||
3254 | { | ||
3255 | return (bytes - 1) / 8; | ||
3256 | } | ||
3257 | |||
3258 | static struct kmem_cache *get_slab(size_t size, gfp_t flags) | ||
3259 | { | ||
3260 | int index; | ||
3261 | |||
3262 | if (size <= 192) { | ||
3263 | if (!size) | ||
3264 | return ZERO_SIZE_PTR; | ||
3265 | |||
3266 | index = size_index[size_index_elem(size)]; | ||
3267 | } else | ||
3268 | index = fls(size - 1); | ||
3269 | |||
3270 | #ifdef CONFIG_ZONE_DMA | ||
3271 | if (unlikely((flags & SLUB_DMA))) | ||
3272 | return kmalloc_dma_caches[index]; | ||
3273 | |||
3274 | #endif | ||
3275 | return kmalloc_caches[index]; | ||
3276 | } | ||
3277 | |||
3278 | void *__kmalloc(size_t size, gfp_t flags) | 3220 | void *__kmalloc(size_t size, gfp_t flags) |
3279 | { | 3221 | { |
3280 | struct kmem_cache *s; | 3222 | struct kmem_cache *s; |
3281 | void *ret; | 3223 | void *ret; |
3282 | 3224 | ||
3283 | if (unlikely(size > SLUB_MAX_SIZE)) | 3225 | if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) |
3284 | return kmalloc_large(size, flags); | 3226 | return kmalloc_large(size, flags); |
3285 | 3227 | ||
3286 | s = get_slab(size, flags); | 3228 | s = kmalloc_slab(size, flags); |
3287 | 3229 | ||
3288 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3230 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3289 | return s; | 3231 | return s; |
@@ -3316,7 +3258,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
3316 | struct kmem_cache *s; | 3258 | struct kmem_cache *s; |
3317 | void *ret; | 3259 | void *ret; |
3318 | 3260 | ||
3319 | if (unlikely(size > SLUB_MAX_SIZE)) { | 3261 | if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { |
3320 | ret = kmalloc_large_node(size, flags, node); | 3262 | ret = kmalloc_large_node(size, flags, node); |
3321 | 3263 | ||
3322 | trace_kmalloc_node(_RET_IP_, ret, | 3264 | trace_kmalloc_node(_RET_IP_, ret, |
@@ -3326,7 +3268,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
3326 | return ret; | 3268 | return ret; |
3327 | } | 3269 | } |
3328 | 3270 | ||
3329 | s = get_slab(size, flags); | 3271 | s = kmalloc_slab(size, flags); |
3330 | 3272 | ||
3331 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3273 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3332 | return s; | 3274 | return s; |
@@ -3617,6 +3559,12 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) | |||
3617 | 3559 | ||
3618 | memcpy(s, static_cache, kmem_cache->object_size); | 3560 | memcpy(s, static_cache, kmem_cache->object_size); |
3619 | 3561 | ||
3562 | /* | ||
3563 | * This runs very early, and only the boot processor is supposed to be | ||
3564 | * up. Even if it weren't true, IRQs are not up so we couldn't fire | ||
3565 | * IPIs around. | ||
3566 | */ | ||
3567 | __flush_cpu_slab(s, smp_processor_id()); | ||
3620 | for_each_node_state(node, N_NORMAL_MEMORY) { | 3568 | for_each_node_state(node, N_NORMAL_MEMORY) { |
3621 | struct kmem_cache_node *n = get_node(s, node); | 3569 | struct kmem_cache_node *n = get_node(s, node); |
3622 | struct page *p; | 3570 | struct page *p; |
@@ -3639,8 +3587,6 @@ void __init kmem_cache_init(void) | |||
3639 | { | 3587 | { |
3640 | static __initdata struct kmem_cache boot_kmem_cache, | 3588 | static __initdata struct kmem_cache boot_kmem_cache, |
3641 | boot_kmem_cache_node; | 3589 | boot_kmem_cache_node; |
3642 | int i; | ||
3643 | int caches = 2; | ||
3644 | 3590 | ||
3645 | if (debug_guardpage_minorder()) | 3591 | if (debug_guardpage_minorder()) |
3646 | slub_max_order = 0; | 3592 | slub_max_order = 0; |
@@ -3671,103 +3617,16 @@ void __init kmem_cache_init(void) | |||
3671 | kmem_cache_node = bootstrap(&boot_kmem_cache_node); | 3617 | kmem_cache_node = bootstrap(&boot_kmem_cache_node); |
3672 | 3618 | ||
3673 | /* Now we can use the kmem_cache to allocate kmalloc slabs */ | 3619 | /* Now we can use the kmem_cache to allocate kmalloc slabs */ |
3674 | 3620 | create_kmalloc_caches(0); | |
3675 | /* | ||
3676 | * Patch up the size_index table if we have strange large alignment | ||
3677 | * requirements for the kmalloc array. This is only the case for | ||
3678 | * MIPS it seems. The standard arches will not generate any code here. | ||
3679 | * | ||
3680 | * Largest permitted alignment is 256 bytes due to the way we | ||
3681 | * handle the index determination for the smaller caches. | ||
3682 | * | ||
3683 | * Make sure that nothing crazy happens if someone starts tinkering | ||
3684 | * around with ARCH_KMALLOC_MINALIGN | ||
3685 | */ | ||
3686 | BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || | ||
3687 | (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); | ||
3688 | |||
3689 | for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { | ||
3690 | int elem = size_index_elem(i); | ||
3691 | if (elem >= ARRAY_SIZE(size_index)) | ||
3692 | break; | ||
3693 | size_index[elem] = KMALLOC_SHIFT_LOW; | ||
3694 | } | ||
3695 | |||
3696 | if (KMALLOC_MIN_SIZE == 64) { | ||
3697 | /* | ||
3698 | * The 96 byte size cache is not used if the alignment | ||
3699 | * is 64 byte. | ||
3700 | */ | ||
3701 | for (i = 64 + 8; i <= 96; i += 8) | ||
3702 | size_index[size_index_elem(i)] = 7; | ||
3703 | } else if (KMALLOC_MIN_SIZE == 128) { | ||
3704 | /* | ||
3705 | * The 192 byte sized cache is not used if the alignment | ||
3706 | * is 128 byte. Redirect kmalloc to use the 256 byte cache | ||
3707 | * instead. | ||
3708 | */ | ||
3709 | for (i = 128 + 8; i <= 192; i += 8) | ||
3710 | size_index[size_index_elem(i)] = 8; | ||
3711 | } | ||
3712 | |||
3713 | /* Caches that are not of the two-to-the-power-of size */ | ||
3714 | if (KMALLOC_MIN_SIZE <= 32) { | ||
3715 | kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0); | ||
3716 | caches++; | ||
3717 | } | ||
3718 | |||
3719 | if (KMALLOC_MIN_SIZE <= 64) { | ||
3720 | kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0); | ||
3721 | caches++; | ||
3722 | } | ||
3723 | |||
3724 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { | ||
3725 | kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0); | ||
3726 | caches++; | ||
3727 | } | ||
3728 | |||
3729 | slab_state = UP; | ||
3730 | |||
3731 | /* Provide the correct kmalloc names now that the caches are up */ | ||
3732 | if (KMALLOC_MIN_SIZE <= 32) { | ||
3733 | kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT); | ||
3734 | BUG_ON(!kmalloc_caches[1]->name); | ||
3735 | } | ||
3736 | |||
3737 | if (KMALLOC_MIN_SIZE <= 64) { | ||
3738 | kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT); | ||
3739 | BUG_ON(!kmalloc_caches[2]->name); | ||
3740 | } | ||
3741 | |||
3742 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { | ||
3743 | char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); | ||
3744 | |||
3745 | BUG_ON(!s); | ||
3746 | kmalloc_caches[i]->name = s; | ||
3747 | } | ||
3748 | 3621 | ||
3749 | #ifdef CONFIG_SMP | 3622 | #ifdef CONFIG_SMP |
3750 | register_cpu_notifier(&slab_notifier); | 3623 | register_cpu_notifier(&slab_notifier); |
3751 | #endif | 3624 | #endif |
3752 | 3625 | ||
3753 | #ifdef CONFIG_ZONE_DMA | ||
3754 | for (i = 0; i < SLUB_PAGE_SHIFT; i++) { | ||
3755 | struct kmem_cache *s = kmalloc_caches[i]; | ||
3756 | |||
3757 | if (s && s->size) { | ||
3758 | char *name = kasprintf(GFP_NOWAIT, | ||
3759 | "dma-kmalloc-%d", s->object_size); | ||
3760 | |||
3761 | BUG_ON(!name); | ||
3762 | kmalloc_dma_caches[i] = create_kmalloc_cache(name, | ||
3763 | s->object_size, SLAB_CACHE_DMA); | ||
3764 | } | ||
3765 | } | ||
3766 | #endif | ||
3767 | printk(KERN_INFO | 3626 | printk(KERN_INFO |
3768 | "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," | 3627 | "SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d," |
3769 | " CPUs=%d, Nodes=%d\n", | 3628 | " CPUs=%d, Nodes=%d\n", |
3770 | caches, cache_line_size(), | 3629 | cache_line_size(), |
3771 | slub_min_order, slub_max_order, slub_min_objects, | 3630 | slub_min_order, slub_max_order, slub_min_objects, |
3772 | nr_cpu_ids, nr_node_ids); | 3631 | nr_cpu_ids, nr_node_ids); |
3773 | } | 3632 | } |
@@ -3930,10 +3789,10 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | |||
3930 | struct kmem_cache *s; | 3789 | struct kmem_cache *s; |
3931 | void *ret; | 3790 | void *ret; |
3932 | 3791 | ||
3933 | if (unlikely(size > SLUB_MAX_SIZE)) | 3792 | if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) |
3934 | return kmalloc_large(size, gfpflags); | 3793 | return kmalloc_large(size, gfpflags); |
3935 | 3794 | ||
3936 | s = get_slab(size, gfpflags); | 3795 | s = kmalloc_slab(size, gfpflags); |
3937 | 3796 | ||
3938 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3797 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3939 | return s; | 3798 | return s; |
@@ -3953,7 +3812,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
3953 | struct kmem_cache *s; | 3812 | struct kmem_cache *s; |
3954 | void *ret; | 3813 | void *ret; |
3955 | 3814 | ||
3956 | if (unlikely(size > SLUB_MAX_SIZE)) { | 3815 | if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { |
3957 | ret = kmalloc_large_node(size, gfpflags, node); | 3816 | ret = kmalloc_large_node(size, gfpflags, node); |
3958 | 3817 | ||
3959 | trace_kmalloc_node(caller, ret, | 3818 | trace_kmalloc_node(caller, ret, |
@@ -3963,7 +3822,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
3963 | return ret; | 3822 | return ret; |
3964 | } | 3823 | } |
3965 | 3824 | ||
3966 | s = get_slab(size, gfpflags); | 3825 | s = kmalloc_slab(size, gfpflags); |
3967 | 3826 | ||
3968 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3827 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3969 | return s; | 3828 | return s; |
@@ -4312,7 +4171,7 @@ static void resiliency_test(void) | |||
4312 | { | 4171 | { |
4313 | u8 *p; | 4172 | u8 *p; |
4314 | 4173 | ||
4315 | BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10); | 4174 | BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10); |
4316 | 4175 | ||
4317 | printk(KERN_ERR "SLUB resiliency testing\n"); | 4176 | printk(KERN_ERR "SLUB resiliency testing\n"); |
4318 | printk(KERN_ERR "-----------------------\n"); | 4177 | printk(KERN_ERR "-----------------------\n"); |