diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-24 15:47:55 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-24 15:47:55 -0400 |
commit | 76c39e4fef73e42777c27d6b87a04f89ebd0ef66 (patch) | |
tree | 40f490c46a409bfe4cecd99cf08ad709065d116b /mm | |
parent | 1765a1fe5d6f82c0eceb1ad10594cfc83759b6d0 (diff) | |
parent | 6d4121f6c20a0e86231d52f535f1c82423b3326f (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: (27 commits)
SLUB: Fix memory hotplug with !NUMA
slub: Move functions to reduce #ifdefs
slub: Enable sysfs support for !CONFIG_SLUB_DEBUG
SLUB: Optimize slab_free() debug check
slub: Move NUMA-related functions under CONFIG_NUMA
slub: Add lock release annotation
slub: Fix signedness warnings
slub: extract common code to remove objects from partial list without locking
SLUB: Pass active and inactive redzone flags instead of boolean to debug functions
slub: reduce differences between SMP and NUMA
Revert "Slub: UP bandaid"
percpu: clear memory allocated with the km allocator
percpu: use percpu allocator on UP too
percpu: reduce PCPU_MIN_UNIT_SIZE to 32k
vmalloc: pcpu_get/free_vm_areas() aren't needed on UP
SLUB: Fix merged slab cache names
Slub: UP bandaid
slub: fix SLUB_RESILIENCY_TEST for dynamic kmalloc caches
slub: Fix up missing kmalloc_cache -> kmem_cache_node case for memoryhotplug
slub: Add dummy functions for the !SLUB_DEBUG case
...
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slob.c | 4 | ||||
-rw-r--r-- | mm/slub.c | 788 |
2 files changed, 419 insertions, 373 deletions
@@ -500,7 +500,9 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
500 | } else { | 500 | } else { |
501 | unsigned int order = get_order(size); | 501 | unsigned int order = get_order(size); |
502 | 502 | ||
503 | ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node); | 503 | if (likely(order)) |
504 | gfp |= __GFP_COMP; | ||
505 | ret = slob_new_pages(gfp, order, node); | ||
504 | if (ret) { | 506 | if (ret) { |
505 | struct page *page; | 507 | struct page *page; |
506 | page = virt_to_page(ret); | 508 | page = virt_to_page(ret); |
@@ -168,7 +168,6 @@ static inline int kmem_cache_debug(struct kmem_cache *s) | |||
168 | 168 | ||
169 | /* Internal SLUB flags */ | 169 | /* Internal SLUB flags */ |
170 | #define __OBJECT_POISON 0x80000000UL /* Poison object */ | 170 | #define __OBJECT_POISON 0x80000000UL /* Poison object */ |
171 | #define __SYSFS_ADD_DEFERRED 0x40000000UL /* Not yet visible via sysfs */ | ||
172 | 171 | ||
173 | static int kmem_size = sizeof(struct kmem_cache); | 172 | static int kmem_size = sizeof(struct kmem_cache); |
174 | 173 | ||
@@ -178,7 +177,7 @@ static struct notifier_block slab_notifier; | |||
178 | 177 | ||
179 | static enum { | 178 | static enum { |
180 | DOWN, /* No slab functionality available */ | 179 | DOWN, /* No slab functionality available */ |
181 | PARTIAL, /* kmem_cache_open() works but kmalloc does not */ | 180 | PARTIAL, /* Kmem_cache_node works */ |
182 | UP, /* Everything works but does not show up in sysfs */ | 181 | UP, /* Everything works but does not show up in sysfs */ |
183 | SYSFS /* Sysfs up */ | 182 | SYSFS /* Sysfs up */ |
184 | } slab_state = DOWN; | 183 | } slab_state = DOWN; |
@@ -199,7 +198,7 @@ struct track { | |||
199 | 198 | ||
200 | enum track_item { TRACK_ALLOC, TRACK_FREE }; | 199 | enum track_item { TRACK_ALLOC, TRACK_FREE }; |
201 | 200 | ||
202 | #ifdef CONFIG_SLUB_DEBUG | 201 | #ifdef CONFIG_SYSFS |
203 | static int sysfs_slab_add(struct kmem_cache *); | 202 | static int sysfs_slab_add(struct kmem_cache *); |
204 | static int sysfs_slab_alias(struct kmem_cache *, const char *); | 203 | static int sysfs_slab_alias(struct kmem_cache *, const char *); |
205 | static void sysfs_slab_remove(struct kmem_cache *); | 204 | static void sysfs_slab_remove(struct kmem_cache *); |
@@ -210,6 +209,7 @@ static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) | |||
210 | { return 0; } | 209 | { return 0; } |
211 | static inline void sysfs_slab_remove(struct kmem_cache *s) | 210 | static inline void sysfs_slab_remove(struct kmem_cache *s) |
212 | { | 211 | { |
212 | kfree(s->name); | ||
213 | kfree(s); | 213 | kfree(s); |
214 | } | 214 | } |
215 | 215 | ||
@@ -233,11 +233,7 @@ int slab_is_available(void) | |||
233 | 233 | ||
234 | static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) | 234 | static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) |
235 | { | 235 | { |
236 | #ifdef CONFIG_NUMA | ||
237 | return s->node[node]; | 236 | return s->node[node]; |
238 | #else | ||
239 | return &s->local_node; | ||
240 | #endif | ||
241 | } | 237 | } |
242 | 238 | ||
243 | /* Verify that a pointer has an address that is valid within a slab page */ | 239 | /* Verify that a pointer has an address that is valid within a slab page */ |
@@ -494,7 +490,7 @@ static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) | |||
494 | dump_stack(); | 490 | dump_stack(); |
495 | } | 491 | } |
496 | 492 | ||
497 | static void init_object(struct kmem_cache *s, void *object, int active) | 493 | static void init_object(struct kmem_cache *s, void *object, u8 val) |
498 | { | 494 | { |
499 | u8 *p = object; | 495 | u8 *p = object; |
500 | 496 | ||
@@ -504,9 +500,7 @@ static void init_object(struct kmem_cache *s, void *object, int active) | |||
504 | } | 500 | } |
505 | 501 | ||
506 | if (s->flags & SLAB_RED_ZONE) | 502 | if (s->flags & SLAB_RED_ZONE) |
507 | memset(p + s->objsize, | 503 | memset(p + s->objsize, val, s->inuse - s->objsize); |
508 | active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE, | ||
509 | s->inuse - s->objsize); | ||
510 | } | 504 | } |
511 | 505 | ||
512 | static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes) | 506 | static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes) |
@@ -641,17 +635,14 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page) | |||
641 | } | 635 | } |
642 | 636 | ||
643 | static int check_object(struct kmem_cache *s, struct page *page, | 637 | static int check_object(struct kmem_cache *s, struct page *page, |
644 | void *object, int active) | 638 | void *object, u8 val) |
645 | { | 639 | { |
646 | u8 *p = object; | 640 | u8 *p = object; |
647 | u8 *endobject = object + s->objsize; | 641 | u8 *endobject = object + s->objsize; |
648 | 642 | ||
649 | if (s->flags & SLAB_RED_ZONE) { | 643 | if (s->flags & SLAB_RED_ZONE) { |
650 | unsigned int red = | ||
651 | active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE; | ||
652 | |||
653 | if (!check_bytes_and_report(s, page, object, "Redzone", | 644 | if (!check_bytes_and_report(s, page, object, "Redzone", |
654 | endobject, red, s->inuse - s->objsize)) | 645 | endobject, val, s->inuse - s->objsize)) |
655 | return 0; | 646 | return 0; |
656 | } else { | 647 | } else { |
657 | if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) { | 648 | if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) { |
@@ -661,7 +652,7 @@ static int check_object(struct kmem_cache *s, struct page *page, | |||
661 | } | 652 | } |
662 | 653 | ||
663 | if (s->flags & SLAB_POISON) { | 654 | if (s->flags & SLAB_POISON) { |
664 | if (!active && (s->flags & __OBJECT_POISON) && | 655 | if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && |
665 | (!check_bytes_and_report(s, page, p, "Poison", p, | 656 | (!check_bytes_and_report(s, page, p, "Poison", p, |
666 | POISON_FREE, s->objsize - 1) || | 657 | POISON_FREE, s->objsize - 1) || |
667 | !check_bytes_and_report(s, page, p, "Poison", | 658 | !check_bytes_and_report(s, page, p, "Poison", |
@@ -673,7 +664,7 @@ static int check_object(struct kmem_cache *s, struct page *page, | |||
673 | check_pad_bytes(s, page, p); | 664 | check_pad_bytes(s, page, p); |
674 | } | 665 | } |
675 | 666 | ||
676 | if (!s->offset && active) | 667 | if (!s->offset && val == SLUB_RED_ACTIVE) |
677 | /* | 668 | /* |
678 | * Object and freepointer overlap. Cannot check | 669 | * Object and freepointer overlap. Cannot check |
679 | * freepointer while object is allocated. | 670 | * freepointer while object is allocated. |
@@ -792,6 +783,39 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, | |||
792 | } | 783 | } |
793 | 784 | ||
794 | /* | 785 | /* |
786 | * Hooks for other subsystems that check memory allocations. In a typical | ||
787 | * production configuration these hooks all should produce no code at all. | ||
788 | */ | ||
789 | static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) | ||
790 | { | ||
791 | flags &= gfp_allowed_mask; | ||
792 | lockdep_trace_alloc(flags); | ||
793 | might_sleep_if(flags & __GFP_WAIT); | ||
794 | |||
795 | return should_failslab(s->objsize, flags, s->flags); | ||
796 | } | ||
797 | |||
798 | static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) | ||
799 | { | ||
800 | flags &= gfp_allowed_mask; | ||
801 | kmemcheck_slab_alloc(s, flags, object, s->objsize); | ||
802 | kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags); | ||
803 | } | ||
804 | |||
805 | static inline void slab_free_hook(struct kmem_cache *s, void *x) | ||
806 | { | ||
807 | kmemleak_free_recursive(x, s->flags); | ||
808 | } | ||
809 | |||
810 | static inline void slab_free_hook_irq(struct kmem_cache *s, void *object) | ||
811 | { | ||
812 | kmemcheck_slab_free(s, object, s->objsize); | ||
813 | debug_check_no_locks_freed(object, s->objsize); | ||
814 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) | ||
815 | debug_check_no_obj_freed(object, s->objsize); | ||
816 | } | ||
817 | |||
818 | /* | ||
795 | * Tracking of fully allocated slabs for debugging purposes. | 819 | * Tracking of fully allocated slabs for debugging purposes. |
796 | */ | 820 | */ |
797 | static void add_full(struct kmem_cache_node *n, struct page *page) | 821 | static void add_full(struct kmem_cache_node *n, struct page *page) |
@@ -838,7 +862,7 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) | |||
838 | * dilemma by deferring the increment of the count during | 862 | * dilemma by deferring the increment of the count during |
839 | * bootstrap (see early_kmem_cache_node_alloc). | 863 | * bootstrap (see early_kmem_cache_node_alloc). |
840 | */ | 864 | */ |
841 | if (!NUMA_BUILD || n) { | 865 | if (n) { |
842 | atomic_long_inc(&n->nr_slabs); | 866 | atomic_long_inc(&n->nr_slabs); |
843 | atomic_long_add(objects, &n->total_objects); | 867 | atomic_long_add(objects, &n->total_objects); |
844 | } | 868 | } |
@@ -858,11 +882,11 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, | |||
858 | if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) | 882 | if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) |
859 | return; | 883 | return; |
860 | 884 | ||
861 | init_object(s, object, 0); | 885 | init_object(s, object, SLUB_RED_INACTIVE); |
862 | init_tracking(s, object); | 886 | init_tracking(s, object); |
863 | } | 887 | } |
864 | 888 | ||
865 | static int alloc_debug_processing(struct kmem_cache *s, struct page *page, | 889 | static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page, |
866 | void *object, unsigned long addr) | 890 | void *object, unsigned long addr) |
867 | { | 891 | { |
868 | if (!check_slab(s, page)) | 892 | if (!check_slab(s, page)) |
@@ -878,14 +902,14 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page, | |||
878 | goto bad; | 902 | goto bad; |
879 | } | 903 | } |
880 | 904 | ||
881 | if (!check_object(s, page, object, 0)) | 905 | if (!check_object(s, page, object, SLUB_RED_INACTIVE)) |
882 | goto bad; | 906 | goto bad; |
883 | 907 | ||
884 | /* Success perform special debug activities for allocs */ | 908 | /* Success perform special debug activities for allocs */ |
885 | if (s->flags & SLAB_STORE_USER) | 909 | if (s->flags & SLAB_STORE_USER) |
886 | set_track(s, object, TRACK_ALLOC, addr); | 910 | set_track(s, object, TRACK_ALLOC, addr); |
887 | trace(s, page, object, 1); | 911 | trace(s, page, object, 1); |
888 | init_object(s, object, 1); | 912 | init_object(s, object, SLUB_RED_ACTIVE); |
889 | return 1; | 913 | return 1; |
890 | 914 | ||
891 | bad: | 915 | bad: |
@@ -902,8 +926,8 @@ bad: | |||
902 | return 0; | 926 | return 0; |
903 | } | 927 | } |
904 | 928 | ||
905 | static int free_debug_processing(struct kmem_cache *s, struct page *page, | 929 | static noinline int free_debug_processing(struct kmem_cache *s, |
906 | void *object, unsigned long addr) | 930 | struct page *page, void *object, unsigned long addr) |
907 | { | 931 | { |
908 | if (!check_slab(s, page)) | 932 | if (!check_slab(s, page)) |
909 | goto fail; | 933 | goto fail; |
@@ -918,7 +942,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page, | |||
918 | goto fail; | 942 | goto fail; |
919 | } | 943 | } |
920 | 944 | ||
921 | if (!check_object(s, page, object, 1)) | 945 | if (!check_object(s, page, object, SLUB_RED_ACTIVE)) |
922 | return 0; | 946 | return 0; |
923 | 947 | ||
924 | if (unlikely(s != page->slab)) { | 948 | if (unlikely(s != page->slab)) { |
@@ -942,7 +966,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page, | |||
942 | if (s->flags & SLAB_STORE_USER) | 966 | if (s->flags & SLAB_STORE_USER) |
943 | set_track(s, object, TRACK_FREE, addr); | 967 | set_track(s, object, TRACK_FREE, addr); |
944 | trace(s, page, object, 0); | 968 | trace(s, page, object, 0); |
945 | init_object(s, object, 0); | 969 | init_object(s, object, SLUB_RED_INACTIVE); |
946 | return 1; | 970 | return 1; |
947 | 971 | ||
948 | fail: | 972 | fail: |
@@ -1046,7 +1070,7 @@ static inline int free_debug_processing(struct kmem_cache *s, | |||
1046 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) | 1070 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) |
1047 | { return 1; } | 1071 | { return 1; } |
1048 | static inline int check_object(struct kmem_cache *s, struct page *page, | 1072 | static inline int check_object(struct kmem_cache *s, struct page *page, |
1049 | void *object, int active) { return 1; } | 1073 | void *object, u8 val) { return 1; } |
1050 | static inline void add_full(struct kmem_cache_node *n, struct page *page) {} | 1074 | static inline void add_full(struct kmem_cache_node *n, struct page *page) {} |
1051 | static inline unsigned long kmem_cache_flags(unsigned long objsize, | 1075 | static inline unsigned long kmem_cache_flags(unsigned long objsize, |
1052 | unsigned long flags, const char *name, | 1076 | unsigned long flags, const char *name, |
@@ -1066,7 +1090,19 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, | |||
1066 | int objects) {} | 1090 | int objects) {} |
1067 | static inline void dec_slabs_node(struct kmem_cache *s, int node, | 1091 | static inline void dec_slabs_node(struct kmem_cache *s, int node, |
1068 | int objects) {} | 1092 | int objects) {} |
1069 | #endif | 1093 | |
1094 | static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) | ||
1095 | { return 0; } | ||
1096 | |||
1097 | static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, | ||
1098 | void *object) {} | ||
1099 | |||
1100 | static inline void slab_free_hook(struct kmem_cache *s, void *x) {} | ||
1101 | |||
1102 | static inline void slab_free_hook_irq(struct kmem_cache *s, | ||
1103 | void *object) {} | ||
1104 | |||
1105 | #endif /* CONFIG_SLUB_DEBUG */ | ||
1070 | 1106 | ||
1071 | /* | 1107 | /* |
1072 | * Slab allocation and freeing | 1108 | * Slab allocation and freeing |
@@ -1194,7 +1230,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page) | |||
1194 | slab_pad_check(s, page); | 1230 | slab_pad_check(s, page); |
1195 | for_each_object(p, s, page_address(page), | 1231 | for_each_object(p, s, page_address(page), |
1196 | page->objects) | 1232 | page->objects) |
1197 | check_object(s, page, p, 0); | 1233 | check_object(s, page, p, SLUB_RED_INACTIVE); |
1198 | } | 1234 | } |
1199 | 1235 | ||
1200 | kmemcheck_free_shadow(page, compound_order(page)); | 1236 | kmemcheck_free_shadow(page, compound_order(page)); |
@@ -1274,13 +1310,19 @@ static void add_partial(struct kmem_cache_node *n, | |||
1274 | spin_unlock(&n->list_lock); | 1310 | spin_unlock(&n->list_lock); |
1275 | } | 1311 | } |
1276 | 1312 | ||
1313 | static inline void __remove_partial(struct kmem_cache_node *n, | ||
1314 | struct page *page) | ||
1315 | { | ||
1316 | list_del(&page->lru); | ||
1317 | n->nr_partial--; | ||
1318 | } | ||
1319 | |||
1277 | static void remove_partial(struct kmem_cache *s, struct page *page) | 1320 | static void remove_partial(struct kmem_cache *s, struct page *page) |
1278 | { | 1321 | { |
1279 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); | 1322 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); |
1280 | 1323 | ||
1281 | spin_lock(&n->list_lock); | 1324 | spin_lock(&n->list_lock); |
1282 | list_del(&page->lru); | 1325 | __remove_partial(n, page); |
1283 | n->nr_partial--; | ||
1284 | spin_unlock(&n->list_lock); | 1326 | spin_unlock(&n->list_lock); |
1285 | } | 1327 | } |
1286 | 1328 | ||
@@ -1293,8 +1335,7 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n, | |||
1293 | struct page *page) | 1335 | struct page *page) |
1294 | { | 1336 | { |
1295 | if (slab_trylock(page)) { | 1337 | if (slab_trylock(page)) { |
1296 | list_del(&page->lru); | 1338 | __remove_partial(n, page); |
1297 | n->nr_partial--; | ||
1298 | __SetPageSlubFrozen(page); | 1339 | __SetPageSlubFrozen(page); |
1299 | return 1; | 1340 | return 1; |
1300 | } | 1341 | } |
@@ -1405,6 +1446,7 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) | |||
1405 | * On exit the slab lock will have been dropped. | 1446 | * On exit the slab lock will have been dropped. |
1406 | */ | 1447 | */ |
1407 | static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) | 1448 | static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) |
1449 | __releases(bitlock) | ||
1408 | { | 1450 | { |
1409 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); | 1451 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); |
1410 | 1452 | ||
@@ -1447,6 +1489,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) | |||
1447 | * Remove the cpu slab | 1489 | * Remove the cpu slab |
1448 | */ | 1490 | */ |
1449 | static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) | 1491 | static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) |
1492 | __releases(bitlock) | ||
1450 | { | 1493 | { |
1451 | struct page *page = c->page; | 1494 | struct page *page = c->page; |
1452 | int tail = 1; | 1495 | int tail = 1; |
@@ -1647,6 +1690,7 @@ new_slab: | |||
1647 | goto load_freelist; | 1690 | goto load_freelist; |
1648 | } | 1691 | } |
1649 | 1692 | ||
1693 | gfpflags &= gfp_allowed_mask; | ||
1650 | if (gfpflags & __GFP_WAIT) | 1694 | if (gfpflags & __GFP_WAIT) |
1651 | local_irq_enable(); | 1695 | local_irq_enable(); |
1652 | 1696 | ||
@@ -1674,7 +1718,7 @@ debug: | |||
1674 | 1718 | ||
1675 | c->page->inuse++; | 1719 | c->page->inuse++; |
1676 | c->page->freelist = get_freepointer(s, object); | 1720 | c->page->freelist = get_freepointer(s, object); |
1677 | c->node = -1; | 1721 | c->node = NUMA_NO_NODE; |
1678 | goto unlock_out; | 1722 | goto unlock_out; |
1679 | } | 1723 | } |
1680 | 1724 | ||
@@ -1695,12 +1739,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1695 | struct kmem_cache_cpu *c; | 1739 | struct kmem_cache_cpu *c; |
1696 | unsigned long flags; | 1740 | unsigned long flags; |
1697 | 1741 | ||
1698 | gfpflags &= gfp_allowed_mask; | 1742 | if (slab_pre_alloc_hook(s, gfpflags)) |
1699 | |||
1700 | lockdep_trace_alloc(gfpflags); | ||
1701 | might_sleep_if(gfpflags & __GFP_WAIT); | ||
1702 | |||
1703 | if (should_failslab(s->objsize, gfpflags, s->flags)) | ||
1704 | return NULL; | 1743 | return NULL; |
1705 | 1744 | ||
1706 | local_irq_save(flags); | 1745 | local_irq_save(flags); |
@@ -1719,8 +1758,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1719 | if (unlikely(gfpflags & __GFP_ZERO) && object) | 1758 | if (unlikely(gfpflags & __GFP_ZERO) && object) |
1720 | memset(object, 0, s->objsize); | 1759 | memset(object, 0, s->objsize); |
1721 | 1760 | ||
1722 | kmemcheck_slab_alloc(s, gfpflags, object, s->objsize); | 1761 | slab_post_alloc_hook(s, gfpflags, object); |
1723 | kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, gfpflags); | ||
1724 | 1762 | ||
1725 | return object; | 1763 | return object; |
1726 | } | 1764 | } |
@@ -1754,7 +1792,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) | |||
1754 | return ret; | 1792 | return ret; |
1755 | } | 1793 | } |
1756 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 1794 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
1757 | #endif | ||
1758 | 1795 | ||
1759 | #ifdef CONFIG_TRACING | 1796 | #ifdef CONFIG_TRACING |
1760 | void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | 1797 | void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, |
@@ -1765,6 +1802,7 @@ void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | |||
1765 | } | 1802 | } |
1766 | EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); | 1803 | EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); |
1767 | #endif | 1804 | #endif |
1805 | #endif | ||
1768 | 1806 | ||
1769 | /* | 1807 | /* |
1770 | * Slow patch handling. This may still be called frequently since objects | 1808 | * Slow patch handling. This may still be called frequently since objects |
@@ -1850,14 +1888,14 @@ static __always_inline void slab_free(struct kmem_cache *s, | |||
1850 | struct kmem_cache_cpu *c; | 1888 | struct kmem_cache_cpu *c; |
1851 | unsigned long flags; | 1889 | unsigned long flags; |
1852 | 1890 | ||
1853 | kmemleak_free_recursive(x, s->flags); | 1891 | slab_free_hook(s, x); |
1892 | |||
1854 | local_irq_save(flags); | 1893 | local_irq_save(flags); |
1855 | c = __this_cpu_ptr(s->cpu_slab); | 1894 | c = __this_cpu_ptr(s->cpu_slab); |
1856 | kmemcheck_slab_free(s, object, s->objsize); | 1895 | |
1857 | debug_check_no_locks_freed(object, s->objsize); | 1896 | slab_free_hook_irq(s, x); |
1858 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) | 1897 | |
1859 | debug_check_no_obj_freed(object, s->objsize); | 1898 | if (likely(page == c->page && c->node != NUMA_NO_NODE)) { |
1860 | if (likely(page == c->page && c->node >= 0)) { | ||
1861 | set_freepointer(s, object, c->freelist); | 1899 | set_freepointer(s, object, c->freelist); |
1862 | c->freelist = object; | 1900 | c->freelist = object; |
1863 | stat(s, FREE_FASTPATH); | 1901 | stat(s, FREE_FASTPATH); |
@@ -2062,26 +2100,18 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) | |||
2062 | #endif | 2100 | #endif |
2063 | } | 2101 | } |
2064 | 2102 | ||
2065 | static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[KMALLOC_CACHES]); | 2103 | static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) |
2066 | |||
2067 | static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) | ||
2068 | { | 2104 | { |
2069 | if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches) | 2105 | BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < |
2070 | /* | 2106 | SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); |
2071 | * Boot time creation of the kmalloc array. Use static per cpu data | ||
2072 | * since the per cpu allocator is not available yet. | ||
2073 | */ | ||
2074 | s->cpu_slab = kmalloc_percpu + (s - kmalloc_caches); | ||
2075 | else | ||
2076 | s->cpu_slab = alloc_percpu(struct kmem_cache_cpu); | ||
2077 | 2107 | ||
2078 | if (!s->cpu_slab) | 2108 | s->cpu_slab = alloc_percpu(struct kmem_cache_cpu); |
2079 | return 0; | ||
2080 | 2109 | ||
2081 | return 1; | 2110 | return s->cpu_slab != NULL; |
2082 | } | 2111 | } |
2083 | 2112 | ||
2084 | #ifdef CONFIG_NUMA | 2113 | static struct kmem_cache *kmem_cache_node; |
2114 | |||
2085 | /* | 2115 | /* |
2086 | * No kmalloc_node yet so do it by hand. We know that this is the first | 2116 | * No kmalloc_node yet so do it by hand. We know that this is the first |
2087 | * slab on the node for this slabcache. There are no concurrent accesses | 2117 | * slab on the node for this slabcache. There are no concurrent accesses |
@@ -2091,15 +2121,15 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) | |||
2091 | * when allocating for the kmalloc_node_cache. This is used for bootstrapping | 2121 | * when allocating for the kmalloc_node_cache. This is used for bootstrapping |
2092 | * memory on a fresh node that has no slab structures yet. | 2122 | * memory on a fresh node that has no slab structures yet. |
2093 | */ | 2123 | */ |
2094 | static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node) | 2124 | static void early_kmem_cache_node_alloc(int node) |
2095 | { | 2125 | { |
2096 | struct page *page; | 2126 | struct page *page; |
2097 | struct kmem_cache_node *n; | 2127 | struct kmem_cache_node *n; |
2098 | unsigned long flags; | 2128 | unsigned long flags; |
2099 | 2129 | ||
2100 | BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); | 2130 | BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); |
2101 | 2131 | ||
2102 | page = new_slab(kmalloc_caches, gfpflags, node); | 2132 | page = new_slab(kmem_cache_node, GFP_NOWAIT, node); |
2103 | 2133 | ||
2104 | BUG_ON(!page); | 2134 | BUG_ON(!page); |
2105 | if (page_to_nid(page) != node) { | 2135 | if (page_to_nid(page) != node) { |
@@ -2111,15 +2141,15 @@ static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node) | |||
2111 | 2141 | ||
2112 | n = page->freelist; | 2142 | n = page->freelist; |
2113 | BUG_ON(!n); | 2143 | BUG_ON(!n); |
2114 | page->freelist = get_freepointer(kmalloc_caches, n); | 2144 | page->freelist = get_freepointer(kmem_cache_node, n); |
2115 | page->inuse++; | 2145 | page->inuse++; |
2116 | kmalloc_caches->node[node] = n; | 2146 | kmem_cache_node->node[node] = n; |
2117 | #ifdef CONFIG_SLUB_DEBUG | 2147 | #ifdef CONFIG_SLUB_DEBUG |
2118 | init_object(kmalloc_caches, n, 1); | 2148 | init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); |
2119 | init_tracking(kmalloc_caches, n); | 2149 | init_tracking(kmem_cache_node, n); |
2120 | #endif | 2150 | #endif |
2121 | init_kmem_cache_node(n, kmalloc_caches); | 2151 | init_kmem_cache_node(n, kmem_cache_node); |
2122 | inc_slabs_node(kmalloc_caches, node, page->objects); | 2152 | inc_slabs_node(kmem_cache_node, node, page->objects); |
2123 | 2153 | ||
2124 | /* | 2154 | /* |
2125 | * lockdep requires consistent irq usage for each lock | 2155 | * lockdep requires consistent irq usage for each lock |
@@ -2137,13 +2167,15 @@ static void free_kmem_cache_nodes(struct kmem_cache *s) | |||
2137 | 2167 | ||
2138 | for_each_node_state(node, N_NORMAL_MEMORY) { | 2168 | for_each_node_state(node, N_NORMAL_MEMORY) { |
2139 | struct kmem_cache_node *n = s->node[node]; | 2169 | struct kmem_cache_node *n = s->node[node]; |
2170 | |||
2140 | if (n) | 2171 | if (n) |
2141 | kmem_cache_free(kmalloc_caches, n); | 2172 | kmem_cache_free(kmem_cache_node, n); |
2173 | |||
2142 | s->node[node] = NULL; | 2174 | s->node[node] = NULL; |
2143 | } | 2175 | } |
2144 | } | 2176 | } |
2145 | 2177 | ||
2146 | static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) | 2178 | static int init_kmem_cache_nodes(struct kmem_cache *s) |
2147 | { | 2179 | { |
2148 | int node; | 2180 | int node; |
2149 | 2181 | ||
@@ -2151,11 +2183,11 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) | |||
2151 | struct kmem_cache_node *n; | 2183 | struct kmem_cache_node *n; |
2152 | 2184 | ||
2153 | if (slab_state == DOWN) { | 2185 | if (slab_state == DOWN) { |
2154 | early_kmem_cache_node_alloc(gfpflags, node); | 2186 | early_kmem_cache_node_alloc(node); |
2155 | continue; | 2187 | continue; |
2156 | } | 2188 | } |
2157 | n = kmem_cache_alloc_node(kmalloc_caches, | 2189 | n = kmem_cache_alloc_node(kmem_cache_node, |
2158 | gfpflags, node); | 2190 | GFP_KERNEL, node); |
2159 | 2191 | ||
2160 | if (!n) { | 2192 | if (!n) { |
2161 | free_kmem_cache_nodes(s); | 2193 | free_kmem_cache_nodes(s); |
@@ -2167,17 +2199,6 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) | |||
2167 | } | 2199 | } |
2168 | return 1; | 2200 | return 1; |
2169 | } | 2201 | } |
2170 | #else | ||
2171 | static void free_kmem_cache_nodes(struct kmem_cache *s) | ||
2172 | { | ||
2173 | } | ||
2174 | |||
2175 | static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) | ||
2176 | { | ||
2177 | init_kmem_cache_node(&s->local_node, s); | ||
2178 | return 1; | ||
2179 | } | ||
2180 | #endif | ||
2181 | 2202 | ||
2182 | static void set_min_partial(struct kmem_cache *s, unsigned long min) | 2203 | static void set_min_partial(struct kmem_cache *s, unsigned long min) |
2183 | { | 2204 | { |
@@ -2312,7 +2333,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) | |||
2312 | 2333 | ||
2313 | } | 2334 | } |
2314 | 2335 | ||
2315 | static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, | 2336 | static int kmem_cache_open(struct kmem_cache *s, |
2316 | const char *name, size_t size, | 2337 | const char *name, size_t size, |
2317 | size_t align, unsigned long flags, | 2338 | size_t align, unsigned long flags, |
2318 | void (*ctor)(void *)) | 2339 | void (*ctor)(void *)) |
@@ -2348,10 +2369,10 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, | |||
2348 | #ifdef CONFIG_NUMA | 2369 | #ifdef CONFIG_NUMA |
2349 | s->remote_node_defrag_ratio = 1000; | 2370 | s->remote_node_defrag_ratio = 1000; |
2350 | #endif | 2371 | #endif |
2351 | if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA)) | 2372 | if (!init_kmem_cache_nodes(s)) |
2352 | goto error; | 2373 | goto error; |
2353 | 2374 | ||
2354 | if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA)) | 2375 | if (alloc_kmem_cache_cpus(s)) |
2355 | return 1; | 2376 | return 1; |
2356 | 2377 | ||
2357 | free_kmem_cache_nodes(s); | 2378 | free_kmem_cache_nodes(s); |
@@ -2414,9 +2435,8 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, | |||
2414 | #ifdef CONFIG_SLUB_DEBUG | 2435 | #ifdef CONFIG_SLUB_DEBUG |
2415 | void *addr = page_address(page); | 2436 | void *addr = page_address(page); |
2416 | void *p; | 2437 | void *p; |
2417 | long *map = kzalloc(BITS_TO_LONGS(page->objects) * sizeof(long), | 2438 | unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) * |
2418 | GFP_ATOMIC); | 2439 | sizeof(long), GFP_ATOMIC); |
2419 | |||
2420 | if (!map) | 2440 | if (!map) |
2421 | return; | 2441 | return; |
2422 | slab_err(s, page, "%s", text); | 2442 | slab_err(s, page, "%s", text); |
@@ -2448,9 +2468,8 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) | |||
2448 | spin_lock_irqsave(&n->list_lock, flags); | 2468 | spin_lock_irqsave(&n->list_lock, flags); |
2449 | list_for_each_entry_safe(page, h, &n->partial, lru) { | 2469 | list_for_each_entry_safe(page, h, &n->partial, lru) { |
2450 | if (!page->inuse) { | 2470 | if (!page->inuse) { |
2451 | list_del(&page->lru); | 2471 | __remove_partial(n, page); |
2452 | discard_slab(s, page); | 2472 | discard_slab(s, page); |
2453 | n->nr_partial--; | ||
2454 | } else { | 2473 | } else { |
2455 | list_slab_objects(s, page, | 2474 | list_slab_objects(s, page, |
2456 | "Objects remaining on kmem_cache_close()"); | 2475 | "Objects remaining on kmem_cache_close()"); |
@@ -2507,9 +2526,15 @@ EXPORT_SYMBOL(kmem_cache_destroy); | |||
2507 | * Kmalloc subsystem | 2526 | * Kmalloc subsystem |
2508 | *******************************************************************/ | 2527 | *******************************************************************/ |
2509 | 2528 | ||
2510 | struct kmem_cache kmalloc_caches[KMALLOC_CACHES] __cacheline_aligned; | 2529 | struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; |
2511 | EXPORT_SYMBOL(kmalloc_caches); | 2530 | EXPORT_SYMBOL(kmalloc_caches); |
2512 | 2531 | ||
2532 | static struct kmem_cache *kmem_cache; | ||
2533 | |||
2534 | #ifdef CONFIG_ZONE_DMA | ||
2535 | static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT]; | ||
2536 | #endif | ||
2537 | |||
2513 | static int __init setup_slub_min_order(char *str) | 2538 | static int __init setup_slub_min_order(char *str) |
2514 | { | 2539 | { |
2515 | get_option(&str, &slub_min_order); | 2540 | get_option(&str, &slub_min_order); |
@@ -2546,116 +2571,29 @@ static int __init setup_slub_nomerge(char *str) | |||
2546 | 2571 | ||
2547 | __setup("slub_nomerge", setup_slub_nomerge); | 2572 | __setup("slub_nomerge", setup_slub_nomerge); |
2548 | 2573 | ||
2549 | static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, | 2574 | static struct kmem_cache *__init create_kmalloc_cache(const char *name, |
2550 | const char *name, int size, gfp_t gfp_flags) | 2575 | int size, unsigned int flags) |
2551 | { | 2576 | { |
2552 | unsigned int flags = 0; | 2577 | struct kmem_cache *s; |
2553 | 2578 | ||
2554 | if (gfp_flags & SLUB_DMA) | 2579 | s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); |
2555 | flags = SLAB_CACHE_DMA; | ||
2556 | 2580 | ||
2557 | /* | 2581 | /* |
2558 | * This function is called with IRQs disabled during early-boot on | 2582 | * This function is called with IRQs disabled during early-boot on |
2559 | * single CPU so there's no need to take slub_lock here. | 2583 | * single CPU so there's no need to take slub_lock here. |
2560 | */ | 2584 | */ |
2561 | if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, | 2585 | if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, |
2562 | flags, NULL)) | 2586 | flags, NULL)) |
2563 | goto panic; | 2587 | goto panic; |
2564 | 2588 | ||
2565 | list_add(&s->list, &slab_caches); | 2589 | list_add(&s->list, &slab_caches); |
2566 | |||
2567 | if (sysfs_slab_add(s)) | ||
2568 | goto panic; | ||
2569 | return s; | 2590 | return s; |
2570 | 2591 | ||
2571 | panic: | 2592 | panic: |
2572 | panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); | 2593 | panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); |
2594 | return NULL; | ||
2573 | } | 2595 | } |
2574 | 2596 | ||
2575 | #ifdef CONFIG_ZONE_DMA | ||
2576 | static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT]; | ||
2577 | |||
2578 | static void sysfs_add_func(struct work_struct *w) | ||
2579 | { | ||
2580 | struct kmem_cache *s; | ||
2581 | |||
2582 | down_write(&slub_lock); | ||
2583 | list_for_each_entry(s, &slab_caches, list) { | ||
2584 | if (s->flags & __SYSFS_ADD_DEFERRED) { | ||
2585 | s->flags &= ~__SYSFS_ADD_DEFERRED; | ||
2586 | sysfs_slab_add(s); | ||
2587 | } | ||
2588 | } | ||
2589 | up_write(&slub_lock); | ||
2590 | } | ||
2591 | |||
2592 | static DECLARE_WORK(sysfs_add_work, sysfs_add_func); | ||
2593 | |||
2594 | static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) | ||
2595 | { | ||
2596 | struct kmem_cache *s; | ||
2597 | char *text; | ||
2598 | size_t realsize; | ||
2599 | unsigned long slabflags; | ||
2600 | int i; | ||
2601 | |||
2602 | s = kmalloc_caches_dma[index]; | ||
2603 | if (s) | ||
2604 | return s; | ||
2605 | |||
2606 | /* Dynamically create dma cache */ | ||
2607 | if (flags & __GFP_WAIT) | ||
2608 | down_write(&slub_lock); | ||
2609 | else { | ||
2610 | if (!down_write_trylock(&slub_lock)) | ||
2611 | goto out; | ||
2612 | } | ||
2613 | |||
2614 | if (kmalloc_caches_dma[index]) | ||
2615 | goto unlock_out; | ||
2616 | |||
2617 | realsize = kmalloc_caches[index].objsize; | ||
2618 | text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", | ||
2619 | (unsigned int)realsize); | ||
2620 | |||
2621 | s = NULL; | ||
2622 | for (i = 0; i < KMALLOC_CACHES; i++) | ||
2623 | if (!kmalloc_caches[i].size) | ||
2624 | break; | ||
2625 | |||
2626 | BUG_ON(i >= KMALLOC_CACHES); | ||
2627 | s = kmalloc_caches + i; | ||
2628 | |||
2629 | /* | ||
2630 | * Must defer sysfs creation to a workqueue because we don't know | ||
2631 | * what context we are called from. Before sysfs comes up, we don't | ||
2632 | * need to do anything because our sysfs initcall will start by | ||
2633 | * adding all existing slabs to sysfs. | ||
2634 | */ | ||
2635 | slabflags = SLAB_CACHE_DMA|SLAB_NOTRACK; | ||
2636 | if (slab_state >= SYSFS) | ||
2637 | slabflags |= __SYSFS_ADD_DEFERRED; | ||
2638 | |||
2639 | if (!text || !kmem_cache_open(s, flags, text, | ||
2640 | realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) { | ||
2641 | s->size = 0; | ||
2642 | kfree(text); | ||
2643 | goto unlock_out; | ||
2644 | } | ||
2645 | |||
2646 | list_add(&s->list, &slab_caches); | ||
2647 | kmalloc_caches_dma[index] = s; | ||
2648 | |||
2649 | if (slab_state >= SYSFS) | ||
2650 | schedule_work(&sysfs_add_work); | ||
2651 | |||
2652 | unlock_out: | ||
2653 | up_write(&slub_lock); | ||
2654 | out: | ||
2655 | return kmalloc_caches_dma[index]; | ||
2656 | } | ||
2657 | #endif | ||
2658 | |||
2659 | /* | 2597 | /* |
2660 | * Conversion table for small slabs sizes / 8 to the index in the | 2598 | * Conversion table for small slabs sizes / 8 to the index in the |
2661 | * kmalloc array. This is necessary for slabs < 192 since we have non power | 2599 | * kmalloc array. This is necessary for slabs < 192 since we have non power |
@@ -2708,10 +2646,10 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags) | |||
2708 | 2646 | ||
2709 | #ifdef CONFIG_ZONE_DMA | 2647 | #ifdef CONFIG_ZONE_DMA |
2710 | if (unlikely((flags & SLUB_DMA))) | 2648 | if (unlikely((flags & SLUB_DMA))) |
2711 | return dma_kmalloc_cache(index, flags); | 2649 | return kmalloc_dma_caches[index]; |
2712 | 2650 | ||
2713 | #endif | 2651 | #endif |
2714 | return &kmalloc_caches[index]; | 2652 | return kmalloc_caches[index]; |
2715 | } | 2653 | } |
2716 | 2654 | ||
2717 | void *__kmalloc(size_t size, gfp_t flags) | 2655 | void *__kmalloc(size_t size, gfp_t flags) |
@@ -2735,6 +2673,7 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
2735 | } | 2673 | } |
2736 | EXPORT_SYMBOL(__kmalloc); | 2674 | EXPORT_SYMBOL(__kmalloc); |
2737 | 2675 | ||
2676 | #ifdef CONFIG_NUMA | ||
2738 | static void *kmalloc_large_node(size_t size, gfp_t flags, int node) | 2677 | static void *kmalloc_large_node(size_t size, gfp_t flags, int node) |
2739 | { | 2678 | { |
2740 | struct page *page; | 2679 | struct page *page; |
@@ -2749,7 +2688,6 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) | |||
2749 | return ptr; | 2688 | return ptr; |
2750 | } | 2689 | } |
2751 | 2690 | ||
2752 | #ifdef CONFIG_NUMA | ||
2753 | void *__kmalloc_node(size_t size, gfp_t flags, int node) | 2691 | void *__kmalloc_node(size_t size, gfp_t flags, int node) |
2754 | { | 2692 | { |
2755 | struct kmem_cache *s; | 2693 | struct kmem_cache *s; |
@@ -2889,8 +2827,7 @@ int kmem_cache_shrink(struct kmem_cache *s) | |||
2889 | * may have freed the last object and be | 2827 | * may have freed the last object and be |
2890 | * waiting to release the slab. | 2828 | * waiting to release the slab. |
2891 | */ | 2829 | */ |
2892 | list_del(&page->lru); | 2830 | __remove_partial(n, page); |
2893 | n->nr_partial--; | ||
2894 | slab_unlock(page); | 2831 | slab_unlock(page); |
2895 | discard_slab(s, page); | 2832 | discard_slab(s, page); |
2896 | } else { | 2833 | } else { |
@@ -2914,7 +2851,7 @@ int kmem_cache_shrink(struct kmem_cache *s) | |||
2914 | } | 2851 | } |
2915 | EXPORT_SYMBOL(kmem_cache_shrink); | 2852 | EXPORT_SYMBOL(kmem_cache_shrink); |
2916 | 2853 | ||
2917 | #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) | 2854 | #if defined(CONFIG_MEMORY_HOTPLUG) |
2918 | static int slab_mem_going_offline_callback(void *arg) | 2855 | static int slab_mem_going_offline_callback(void *arg) |
2919 | { | 2856 | { |
2920 | struct kmem_cache *s; | 2857 | struct kmem_cache *s; |
@@ -2956,7 +2893,7 @@ static void slab_mem_offline_callback(void *arg) | |||
2956 | BUG_ON(slabs_node(s, offline_node)); | 2893 | BUG_ON(slabs_node(s, offline_node)); |
2957 | 2894 | ||
2958 | s->node[offline_node] = NULL; | 2895 | s->node[offline_node] = NULL; |
2959 | kmem_cache_free(kmalloc_caches, n); | 2896 | kmem_cache_free(kmem_cache_node, n); |
2960 | } | 2897 | } |
2961 | } | 2898 | } |
2962 | up_read(&slub_lock); | 2899 | up_read(&slub_lock); |
@@ -2989,7 +2926,7 @@ static int slab_mem_going_online_callback(void *arg) | |||
2989 | * since memory is not yet available from the node that | 2926 | * since memory is not yet available from the node that |
2990 | * is brought up. | 2927 | * is brought up. |
2991 | */ | 2928 | */ |
2992 | n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL); | 2929 | n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); |
2993 | if (!n) { | 2930 | if (!n) { |
2994 | ret = -ENOMEM; | 2931 | ret = -ENOMEM; |
2995 | goto out; | 2932 | goto out; |
@@ -3035,46 +2972,92 @@ static int slab_memory_callback(struct notifier_block *self, | |||
3035 | * Basic setup of slabs | 2972 | * Basic setup of slabs |
3036 | *******************************************************************/ | 2973 | *******************************************************************/ |
3037 | 2974 | ||
2975 | /* | ||
2976 | * Used for early kmem_cache structures that were allocated using | ||
2977 | * the page allocator | ||
2978 | */ | ||
2979 | |||
2980 | static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s) | ||
2981 | { | ||
2982 | int node; | ||
2983 | |||
2984 | list_add(&s->list, &slab_caches); | ||
2985 | s->refcount = -1; | ||
2986 | |||
2987 | for_each_node_state(node, N_NORMAL_MEMORY) { | ||
2988 | struct kmem_cache_node *n = get_node(s, node); | ||
2989 | struct page *p; | ||
2990 | |||
2991 | if (n) { | ||
2992 | list_for_each_entry(p, &n->partial, lru) | ||
2993 | p->slab = s; | ||
2994 | |||
2995 | #ifdef CONFIG_SLAB_DEBUG | ||
2996 | list_for_each_entry(p, &n->full, lru) | ||
2997 | p->slab = s; | ||
2998 | #endif | ||
2999 | } | ||
3000 | } | ||
3001 | } | ||
3002 | |||
3038 | void __init kmem_cache_init(void) | 3003 | void __init kmem_cache_init(void) |
3039 | { | 3004 | { |
3040 | int i; | 3005 | int i; |
3041 | int caches = 0; | 3006 | int caches = 0; |
3007 | struct kmem_cache *temp_kmem_cache; | ||
3008 | int order; | ||
3009 | struct kmem_cache *temp_kmem_cache_node; | ||
3010 | unsigned long kmalloc_size; | ||
3011 | |||
3012 | kmem_size = offsetof(struct kmem_cache, node) + | ||
3013 | nr_node_ids * sizeof(struct kmem_cache_node *); | ||
3014 | |||
3015 | /* Allocate two kmem_caches from the page allocator */ | ||
3016 | kmalloc_size = ALIGN(kmem_size, cache_line_size()); | ||
3017 | order = get_order(2 * kmalloc_size); | ||
3018 | kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order); | ||
3042 | 3019 | ||
3043 | #ifdef CONFIG_NUMA | ||
3044 | /* | 3020 | /* |
3045 | * Must first have the slab cache available for the allocations of the | 3021 | * Must first have the slab cache available for the allocations of the |
3046 | * struct kmem_cache_node's. There is special bootstrap code in | 3022 | * struct kmem_cache_node's. There is special bootstrap code in |
3047 | * kmem_cache_open for slab_state == DOWN. | 3023 | * kmem_cache_open for slab_state == DOWN. |
3048 | */ | 3024 | */ |
3049 | create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", | 3025 | kmem_cache_node = (void *)kmem_cache + kmalloc_size; |
3050 | sizeof(struct kmem_cache_node), GFP_NOWAIT); | 3026 | |
3051 | kmalloc_caches[0].refcount = -1; | 3027 | kmem_cache_open(kmem_cache_node, "kmem_cache_node", |
3052 | caches++; | 3028 | sizeof(struct kmem_cache_node), |
3029 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); | ||
3053 | 3030 | ||
3054 | hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); | 3031 | hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); |
3055 | #endif | ||
3056 | 3032 | ||
3057 | /* Able to allocate the per node structures */ | 3033 | /* Able to allocate the per node structures */ |
3058 | slab_state = PARTIAL; | 3034 | slab_state = PARTIAL; |
3059 | 3035 | ||
3060 | /* Caches that are not of the two-to-the-power-of size */ | 3036 | temp_kmem_cache = kmem_cache; |
3061 | if (KMALLOC_MIN_SIZE <= 32) { | 3037 | kmem_cache_open(kmem_cache, "kmem_cache", kmem_size, |
3062 | create_kmalloc_cache(&kmalloc_caches[1], | 3038 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); |
3063 | "kmalloc-96", 96, GFP_NOWAIT); | 3039 | kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); |
3064 | caches++; | 3040 | memcpy(kmem_cache, temp_kmem_cache, kmem_size); |
3065 | } | ||
3066 | if (KMALLOC_MIN_SIZE <= 64) { | ||
3067 | create_kmalloc_cache(&kmalloc_caches[2], | ||
3068 | "kmalloc-192", 192, GFP_NOWAIT); | ||
3069 | caches++; | ||
3070 | } | ||
3071 | 3041 | ||
3072 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { | 3042 | /* |
3073 | create_kmalloc_cache(&kmalloc_caches[i], | 3043 | * Allocate kmem_cache_node properly from the kmem_cache slab. |
3074 | "kmalloc", 1 << i, GFP_NOWAIT); | 3044 | * kmem_cache_node is separately allocated so no need to |
3075 | caches++; | 3045 | * update any list pointers. |
3076 | } | 3046 | */ |
3047 | temp_kmem_cache_node = kmem_cache_node; | ||
3048 | |||
3049 | kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); | ||
3050 | memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size); | ||
3051 | |||
3052 | kmem_cache_bootstrap_fixup(kmem_cache_node); | ||
3077 | 3053 | ||
3054 | caches++; | ||
3055 | kmem_cache_bootstrap_fixup(kmem_cache); | ||
3056 | caches++; | ||
3057 | /* Free temporary boot structure */ | ||
3058 | free_pages((unsigned long)temp_kmem_cache, order); | ||
3059 | |||
3060 | /* Now we can use the kmem_cache to allocate kmalloc slabs */ | ||
3078 | 3061 | ||
3079 | /* | 3062 | /* |
3080 | * Patch up the size_index table if we have strange large alignment | 3063 | * Patch up the size_index table if we have strange large alignment |
@@ -3114,26 +3097,60 @@ void __init kmem_cache_init(void) | |||
3114 | size_index[size_index_elem(i)] = 8; | 3097 | size_index[size_index_elem(i)] = 8; |
3115 | } | 3098 | } |
3116 | 3099 | ||
3100 | /* Caches that are not of the two-to-the-power-of size */ | ||
3101 | if (KMALLOC_MIN_SIZE <= 32) { | ||
3102 | kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0); | ||
3103 | caches++; | ||
3104 | } | ||
3105 | |||
3106 | if (KMALLOC_MIN_SIZE <= 64) { | ||
3107 | kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0); | ||
3108 | caches++; | ||
3109 | } | ||
3110 | |||
3111 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { | ||
3112 | kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0); | ||
3113 | caches++; | ||
3114 | } | ||
3115 | |||
3117 | slab_state = UP; | 3116 | slab_state = UP; |
3118 | 3117 | ||
3119 | /* Provide the correct kmalloc names now that the caches are up */ | 3118 | /* Provide the correct kmalloc names now that the caches are up */ |
3119 | if (KMALLOC_MIN_SIZE <= 32) { | ||
3120 | kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT); | ||
3121 | BUG_ON(!kmalloc_caches[1]->name); | ||
3122 | } | ||
3123 | |||
3124 | if (KMALLOC_MIN_SIZE <= 64) { | ||
3125 | kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT); | ||
3126 | BUG_ON(!kmalloc_caches[2]->name); | ||
3127 | } | ||
3128 | |||
3120 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { | 3129 | for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { |
3121 | char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); | 3130 | char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); |
3122 | 3131 | ||
3123 | BUG_ON(!s); | 3132 | BUG_ON(!s); |
3124 | kmalloc_caches[i].name = s; | 3133 | kmalloc_caches[i]->name = s; |
3125 | } | 3134 | } |
3126 | 3135 | ||
3127 | #ifdef CONFIG_SMP | 3136 | #ifdef CONFIG_SMP |
3128 | register_cpu_notifier(&slab_notifier); | 3137 | register_cpu_notifier(&slab_notifier); |
3129 | #endif | 3138 | #endif |
3130 | #ifdef CONFIG_NUMA | ||
3131 | kmem_size = offsetof(struct kmem_cache, node) + | ||
3132 | nr_node_ids * sizeof(struct kmem_cache_node *); | ||
3133 | #else | ||
3134 | kmem_size = sizeof(struct kmem_cache); | ||
3135 | #endif | ||
3136 | 3139 | ||
3140 | #ifdef CONFIG_ZONE_DMA | ||
3141 | for (i = 0; i < SLUB_PAGE_SHIFT; i++) { | ||
3142 | struct kmem_cache *s = kmalloc_caches[i]; | ||
3143 | |||
3144 | if (s && s->size) { | ||
3145 | char *name = kasprintf(GFP_NOWAIT, | ||
3146 | "dma-kmalloc-%d", s->objsize); | ||
3147 | |||
3148 | BUG_ON(!name); | ||
3149 | kmalloc_dma_caches[i] = create_kmalloc_cache(name, | ||
3150 | s->objsize, SLAB_CACHE_DMA); | ||
3151 | } | ||
3152 | } | ||
3153 | #endif | ||
3137 | printk(KERN_INFO | 3154 | printk(KERN_INFO |
3138 | "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," | 3155 | "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," |
3139 | " CPUs=%d, Nodes=%d\n", | 3156 | " CPUs=%d, Nodes=%d\n", |
@@ -3211,6 +3228,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
3211 | size_t align, unsigned long flags, void (*ctor)(void *)) | 3228 | size_t align, unsigned long flags, void (*ctor)(void *)) |
3212 | { | 3229 | { |
3213 | struct kmem_cache *s; | 3230 | struct kmem_cache *s; |
3231 | char *n; | ||
3214 | 3232 | ||
3215 | if (WARN_ON(!name)) | 3233 | if (WARN_ON(!name)) |
3216 | return NULL; | 3234 | return NULL; |
@@ -3234,19 +3252,25 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
3234 | return s; | 3252 | return s; |
3235 | } | 3253 | } |
3236 | 3254 | ||
3255 | n = kstrdup(name, GFP_KERNEL); | ||
3256 | if (!n) | ||
3257 | goto err; | ||
3258 | |||
3237 | s = kmalloc(kmem_size, GFP_KERNEL); | 3259 | s = kmalloc(kmem_size, GFP_KERNEL); |
3238 | if (s) { | 3260 | if (s) { |
3239 | if (kmem_cache_open(s, GFP_KERNEL, name, | 3261 | if (kmem_cache_open(s, n, |
3240 | size, align, flags, ctor)) { | 3262 | size, align, flags, ctor)) { |
3241 | list_add(&s->list, &slab_caches); | 3263 | list_add(&s->list, &slab_caches); |
3242 | if (sysfs_slab_add(s)) { | 3264 | if (sysfs_slab_add(s)) { |
3243 | list_del(&s->list); | 3265 | list_del(&s->list); |
3266 | kfree(n); | ||
3244 | kfree(s); | 3267 | kfree(s); |
3245 | goto err; | 3268 | goto err; |
3246 | } | 3269 | } |
3247 | up_write(&slub_lock); | 3270 | up_write(&slub_lock); |
3248 | return s; | 3271 | return s; |
3249 | } | 3272 | } |
3273 | kfree(n); | ||
3250 | kfree(s); | 3274 | kfree(s); |
3251 | } | 3275 | } |
3252 | up_write(&slub_lock); | 3276 | up_write(&slub_lock); |
@@ -3318,6 +3342,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | |||
3318 | return ret; | 3342 | return ret; |
3319 | } | 3343 | } |
3320 | 3344 | ||
3345 | #ifdef CONFIG_NUMA | ||
3321 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | 3346 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, |
3322 | int node, unsigned long caller) | 3347 | int node, unsigned long caller) |
3323 | { | 3348 | { |
@@ -3346,8 +3371,9 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | |||
3346 | 3371 | ||
3347 | return ret; | 3372 | return ret; |
3348 | } | 3373 | } |
3374 | #endif | ||
3349 | 3375 | ||
3350 | #ifdef CONFIG_SLUB_DEBUG | 3376 | #ifdef CONFIG_SYSFS |
3351 | static int count_inuse(struct page *page) | 3377 | static int count_inuse(struct page *page) |
3352 | { | 3378 | { |
3353 | return page->inuse; | 3379 | return page->inuse; |
@@ -3357,7 +3383,9 @@ static int count_total(struct page *page) | |||
3357 | { | 3383 | { |
3358 | return page->objects; | 3384 | return page->objects; |
3359 | } | 3385 | } |
3386 | #endif | ||
3360 | 3387 | ||
3388 | #ifdef CONFIG_SLUB_DEBUG | ||
3361 | static int validate_slab(struct kmem_cache *s, struct page *page, | 3389 | static int validate_slab(struct kmem_cache *s, struct page *page, |
3362 | unsigned long *map) | 3390 | unsigned long *map) |
3363 | { | 3391 | { |
@@ -3448,65 +3476,6 @@ static long validate_slab_cache(struct kmem_cache *s) | |||
3448 | kfree(map); | 3476 | kfree(map); |
3449 | return count; | 3477 | return count; |
3450 | } | 3478 | } |
3451 | |||
3452 | #ifdef SLUB_RESILIENCY_TEST | ||
3453 | static void resiliency_test(void) | ||
3454 | { | ||
3455 | u8 *p; | ||
3456 | |||
3457 | printk(KERN_ERR "SLUB resiliency testing\n"); | ||
3458 | printk(KERN_ERR "-----------------------\n"); | ||
3459 | printk(KERN_ERR "A. Corruption after allocation\n"); | ||
3460 | |||
3461 | p = kzalloc(16, GFP_KERNEL); | ||
3462 | p[16] = 0x12; | ||
3463 | printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer" | ||
3464 | " 0x12->0x%p\n\n", p + 16); | ||
3465 | |||
3466 | validate_slab_cache(kmalloc_caches + 4); | ||
3467 | |||
3468 | /* Hmmm... The next two are dangerous */ | ||
3469 | p = kzalloc(32, GFP_KERNEL); | ||
3470 | p[32 + sizeof(void *)] = 0x34; | ||
3471 | printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab" | ||
3472 | " 0x34 -> -0x%p\n", p); | ||
3473 | printk(KERN_ERR | ||
3474 | "If allocated object is overwritten then not detectable\n\n"); | ||
3475 | |||
3476 | validate_slab_cache(kmalloc_caches + 5); | ||
3477 | p = kzalloc(64, GFP_KERNEL); | ||
3478 | p += 64 + (get_cycles() & 0xff) * sizeof(void *); | ||
3479 | *p = 0x56; | ||
3480 | printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", | ||
3481 | p); | ||
3482 | printk(KERN_ERR | ||
3483 | "If allocated object is overwritten then not detectable\n\n"); | ||
3484 | validate_slab_cache(kmalloc_caches + 6); | ||
3485 | |||
3486 | printk(KERN_ERR "\nB. Corruption after free\n"); | ||
3487 | p = kzalloc(128, GFP_KERNEL); | ||
3488 | kfree(p); | ||
3489 | *p = 0x78; | ||
3490 | printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); | ||
3491 | validate_slab_cache(kmalloc_caches + 7); | ||
3492 | |||
3493 | p = kzalloc(256, GFP_KERNEL); | ||
3494 | kfree(p); | ||
3495 | p[50] = 0x9a; | ||
3496 | printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", | ||
3497 | p); | ||
3498 | validate_slab_cache(kmalloc_caches + 8); | ||
3499 | |||
3500 | p = kzalloc(512, GFP_KERNEL); | ||
3501 | kfree(p); | ||
3502 | p[512] = 0xab; | ||
3503 | printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); | ||
3504 | validate_slab_cache(kmalloc_caches + 9); | ||
3505 | } | ||
3506 | #else | ||
3507 | static void resiliency_test(void) {}; | ||
3508 | #endif | ||
3509 | |||
3510 | /* | 3479 | /* |
3511 | * Generate lists of code addresses where slabcache objects are allocated | 3480 | * Generate lists of code addresses where slabcache objects are allocated |
3512 | * and freed. | 3481 | * and freed. |
@@ -3635,7 +3604,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, | |||
3635 | 3604 | ||
3636 | static void process_slab(struct loc_track *t, struct kmem_cache *s, | 3605 | static void process_slab(struct loc_track *t, struct kmem_cache *s, |
3637 | struct page *page, enum track_item alloc, | 3606 | struct page *page, enum track_item alloc, |
3638 | long *map) | 3607 | unsigned long *map) |
3639 | { | 3608 | { |
3640 | void *addr = page_address(page); | 3609 | void *addr = page_address(page); |
3641 | void *p; | 3610 | void *p; |
@@ -3735,7 +3704,71 @@ static int list_locations(struct kmem_cache *s, char *buf, | |||
3735 | len += sprintf(buf, "No data\n"); | 3704 | len += sprintf(buf, "No data\n"); |
3736 | return len; | 3705 | return len; |
3737 | } | 3706 | } |
3707 | #endif | ||
3708 | |||
3709 | #ifdef SLUB_RESILIENCY_TEST | ||
3710 | static void resiliency_test(void) | ||
3711 | { | ||
3712 | u8 *p; | ||
3738 | 3713 | ||
3714 | BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10); | ||
3715 | |||
3716 | printk(KERN_ERR "SLUB resiliency testing\n"); | ||
3717 | printk(KERN_ERR "-----------------------\n"); | ||
3718 | printk(KERN_ERR "A. Corruption after allocation\n"); | ||
3719 | |||
3720 | p = kzalloc(16, GFP_KERNEL); | ||
3721 | p[16] = 0x12; | ||
3722 | printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer" | ||
3723 | " 0x12->0x%p\n\n", p + 16); | ||
3724 | |||
3725 | validate_slab_cache(kmalloc_caches[4]); | ||
3726 | |||
3727 | /* Hmmm... The next two are dangerous */ | ||
3728 | p = kzalloc(32, GFP_KERNEL); | ||
3729 | p[32 + sizeof(void *)] = 0x34; | ||
3730 | printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab" | ||
3731 | " 0x34 -> -0x%p\n", p); | ||
3732 | printk(KERN_ERR | ||
3733 | "If allocated object is overwritten then not detectable\n\n"); | ||
3734 | |||
3735 | validate_slab_cache(kmalloc_caches[5]); | ||
3736 | p = kzalloc(64, GFP_KERNEL); | ||
3737 | p += 64 + (get_cycles() & 0xff) * sizeof(void *); | ||
3738 | *p = 0x56; | ||
3739 | printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", | ||
3740 | p); | ||
3741 | printk(KERN_ERR | ||
3742 | "If allocated object is overwritten then not detectable\n\n"); | ||
3743 | validate_slab_cache(kmalloc_caches[6]); | ||
3744 | |||
3745 | printk(KERN_ERR "\nB. Corruption after free\n"); | ||
3746 | p = kzalloc(128, GFP_KERNEL); | ||
3747 | kfree(p); | ||
3748 | *p = 0x78; | ||
3749 | printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); | ||
3750 | validate_slab_cache(kmalloc_caches[7]); | ||
3751 | |||
3752 | p = kzalloc(256, GFP_KERNEL); | ||
3753 | kfree(p); | ||
3754 | p[50] = 0x9a; | ||
3755 | printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", | ||
3756 | p); | ||
3757 | validate_slab_cache(kmalloc_caches[8]); | ||
3758 | |||
3759 | p = kzalloc(512, GFP_KERNEL); | ||
3760 | kfree(p); | ||
3761 | p[512] = 0xab; | ||
3762 | printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); | ||
3763 | validate_slab_cache(kmalloc_caches[9]); | ||
3764 | } | ||
3765 | #else | ||
3766 | #ifdef CONFIG_SYSFS | ||
3767 | static void resiliency_test(void) {}; | ||
3768 | #endif | ||
3769 | #endif | ||
3770 | |||
3771 | #ifdef CONFIG_SYSFS | ||
3739 | enum slab_stat_type { | 3772 | enum slab_stat_type { |
3740 | SL_ALL, /* All slabs */ | 3773 | SL_ALL, /* All slabs */ |
3741 | SL_PARTIAL, /* Only partially allocated slabs */ | 3774 | SL_PARTIAL, /* Only partially allocated slabs */ |
@@ -3788,6 +3821,8 @@ static ssize_t show_slab_objects(struct kmem_cache *s, | |||
3788 | } | 3821 | } |
3789 | } | 3822 | } |
3790 | 3823 | ||
3824 | down_read(&slub_lock); | ||
3825 | #ifdef CONFIG_SLUB_DEBUG | ||
3791 | if (flags & SO_ALL) { | 3826 | if (flags & SO_ALL) { |
3792 | for_each_node_state(node, N_NORMAL_MEMORY) { | 3827 | for_each_node_state(node, N_NORMAL_MEMORY) { |
3793 | struct kmem_cache_node *n = get_node(s, node); | 3828 | struct kmem_cache_node *n = get_node(s, node); |
@@ -3804,7 +3839,9 @@ static ssize_t show_slab_objects(struct kmem_cache *s, | |||
3804 | nodes[node] += x; | 3839 | nodes[node] += x; |
3805 | } | 3840 | } |
3806 | 3841 | ||
3807 | } else if (flags & SO_PARTIAL) { | 3842 | } else |
3843 | #endif | ||
3844 | if (flags & SO_PARTIAL) { | ||
3808 | for_each_node_state(node, N_NORMAL_MEMORY) { | 3845 | for_each_node_state(node, N_NORMAL_MEMORY) { |
3809 | struct kmem_cache_node *n = get_node(s, node); | 3846 | struct kmem_cache_node *n = get_node(s, node); |
3810 | 3847 | ||
@@ -3829,6 +3866,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, | |||
3829 | return x + sprintf(buf + x, "\n"); | 3866 | return x + sprintf(buf + x, "\n"); |
3830 | } | 3867 | } |
3831 | 3868 | ||
3869 | #ifdef CONFIG_SLUB_DEBUG | ||
3832 | static int any_slab_objects(struct kmem_cache *s) | 3870 | static int any_slab_objects(struct kmem_cache *s) |
3833 | { | 3871 | { |
3834 | int node; | 3872 | int node; |
@@ -3844,6 +3882,7 @@ static int any_slab_objects(struct kmem_cache *s) | |||
3844 | } | 3882 | } |
3845 | return 0; | 3883 | return 0; |
3846 | } | 3884 | } |
3885 | #endif | ||
3847 | 3886 | ||
3848 | #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) | 3887 | #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) |
3849 | #define to_slab(n) container_of(n, struct kmem_cache, kobj); | 3888 | #define to_slab(n) container_of(n, struct kmem_cache, kobj); |
@@ -3945,12 +3984,6 @@ static ssize_t aliases_show(struct kmem_cache *s, char *buf) | |||
3945 | } | 3984 | } |
3946 | SLAB_ATTR_RO(aliases); | 3985 | SLAB_ATTR_RO(aliases); |
3947 | 3986 | ||
3948 | static ssize_t slabs_show(struct kmem_cache *s, char *buf) | ||
3949 | { | ||
3950 | return show_slab_objects(s, buf, SO_ALL); | ||
3951 | } | ||
3952 | SLAB_ATTR_RO(slabs); | ||
3953 | |||
3954 | static ssize_t partial_show(struct kmem_cache *s, char *buf) | 3987 | static ssize_t partial_show(struct kmem_cache *s, char *buf) |
3955 | { | 3988 | { |
3956 | return show_slab_objects(s, buf, SO_PARTIAL); | 3989 | return show_slab_objects(s, buf, SO_PARTIAL); |
@@ -3975,93 +4008,83 @@ static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) | |||
3975 | } | 4008 | } |
3976 | SLAB_ATTR_RO(objects_partial); | 4009 | SLAB_ATTR_RO(objects_partial); |
3977 | 4010 | ||
3978 | static ssize_t total_objects_show(struct kmem_cache *s, char *buf) | 4011 | static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) |
3979 | { | ||
3980 | return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); | ||
3981 | } | ||
3982 | SLAB_ATTR_RO(total_objects); | ||
3983 | |||
3984 | static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) | ||
3985 | { | 4012 | { |
3986 | return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); | 4013 | return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); |
3987 | } | 4014 | } |
3988 | 4015 | ||
3989 | static ssize_t sanity_checks_store(struct kmem_cache *s, | 4016 | static ssize_t reclaim_account_store(struct kmem_cache *s, |
3990 | const char *buf, size_t length) | 4017 | const char *buf, size_t length) |
3991 | { | 4018 | { |
3992 | s->flags &= ~SLAB_DEBUG_FREE; | 4019 | s->flags &= ~SLAB_RECLAIM_ACCOUNT; |
3993 | if (buf[0] == '1') | 4020 | if (buf[0] == '1') |
3994 | s->flags |= SLAB_DEBUG_FREE; | 4021 | s->flags |= SLAB_RECLAIM_ACCOUNT; |
3995 | return length; | 4022 | return length; |
3996 | } | 4023 | } |
3997 | SLAB_ATTR(sanity_checks); | 4024 | SLAB_ATTR(reclaim_account); |
3998 | 4025 | ||
3999 | static ssize_t trace_show(struct kmem_cache *s, char *buf) | 4026 | static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) |
4000 | { | 4027 | { |
4001 | return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); | 4028 | return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); |
4002 | } | 4029 | } |
4030 | SLAB_ATTR_RO(hwcache_align); | ||
4003 | 4031 | ||
4004 | static ssize_t trace_store(struct kmem_cache *s, const char *buf, | 4032 | #ifdef CONFIG_ZONE_DMA |
4005 | size_t length) | 4033 | static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) |
4006 | { | 4034 | { |
4007 | s->flags &= ~SLAB_TRACE; | 4035 | return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); |
4008 | if (buf[0] == '1') | ||
4009 | s->flags |= SLAB_TRACE; | ||
4010 | return length; | ||
4011 | } | 4036 | } |
4012 | SLAB_ATTR(trace); | 4037 | SLAB_ATTR_RO(cache_dma); |
4038 | #endif | ||
4013 | 4039 | ||
4014 | #ifdef CONFIG_FAILSLAB | 4040 | static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) |
4015 | static ssize_t failslab_show(struct kmem_cache *s, char *buf) | ||
4016 | { | 4041 | { |
4017 | return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); | 4042 | return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); |
4018 | } | 4043 | } |
4044 | SLAB_ATTR_RO(destroy_by_rcu); | ||
4019 | 4045 | ||
4020 | static ssize_t failslab_store(struct kmem_cache *s, const char *buf, | 4046 | #ifdef CONFIG_SLUB_DEBUG |
4021 | size_t length) | 4047 | static ssize_t slabs_show(struct kmem_cache *s, char *buf) |
4022 | { | 4048 | { |
4023 | s->flags &= ~SLAB_FAILSLAB; | 4049 | return show_slab_objects(s, buf, SO_ALL); |
4024 | if (buf[0] == '1') | ||
4025 | s->flags |= SLAB_FAILSLAB; | ||
4026 | return length; | ||
4027 | } | 4050 | } |
4028 | SLAB_ATTR(failslab); | 4051 | SLAB_ATTR_RO(slabs); |
4029 | #endif | ||
4030 | 4052 | ||
4031 | static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) | 4053 | static ssize_t total_objects_show(struct kmem_cache *s, char *buf) |
4032 | { | 4054 | { |
4033 | return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); | 4055 | return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); |
4034 | } | 4056 | } |
4057 | SLAB_ATTR_RO(total_objects); | ||
4035 | 4058 | ||
4036 | static ssize_t reclaim_account_store(struct kmem_cache *s, | 4059 | static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) |
4037 | const char *buf, size_t length) | ||
4038 | { | 4060 | { |
4039 | s->flags &= ~SLAB_RECLAIM_ACCOUNT; | 4061 | return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); |
4040 | if (buf[0] == '1') | ||
4041 | s->flags |= SLAB_RECLAIM_ACCOUNT; | ||
4042 | return length; | ||
4043 | } | 4062 | } |
4044 | SLAB_ATTR(reclaim_account); | ||
4045 | 4063 | ||
4046 | static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) | 4064 | static ssize_t sanity_checks_store(struct kmem_cache *s, |
4065 | const char *buf, size_t length) | ||
4047 | { | 4066 | { |
4048 | return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); | 4067 | s->flags &= ~SLAB_DEBUG_FREE; |
4068 | if (buf[0] == '1') | ||
4069 | s->flags |= SLAB_DEBUG_FREE; | ||
4070 | return length; | ||
4049 | } | 4071 | } |
4050 | SLAB_ATTR_RO(hwcache_align); | 4072 | SLAB_ATTR(sanity_checks); |
4051 | 4073 | ||
4052 | #ifdef CONFIG_ZONE_DMA | 4074 | static ssize_t trace_show(struct kmem_cache *s, char *buf) |
4053 | static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) | ||
4054 | { | 4075 | { |
4055 | return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); | 4076 | return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); |
4056 | } | 4077 | } |
4057 | SLAB_ATTR_RO(cache_dma); | ||
4058 | #endif | ||
4059 | 4078 | ||
4060 | static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) | 4079 | static ssize_t trace_store(struct kmem_cache *s, const char *buf, |
4080 | size_t length) | ||
4061 | { | 4081 | { |
4062 | return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); | 4082 | s->flags &= ~SLAB_TRACE; |
4083 | if (buf[0] == '1') | ||
4084 | s->flags |= SLAB_TRACE; | ||
4085 | return length; | ||
4063 | } | 4086 | } |
4064 | SLAB_ATTR_RO(destroy_by_rcu); | 4087 | SLAB_ATTR(trace); |
4065 | 4088 | ||
4066 | static ssize_t red_zone_show(struct kmem_cache *s, char *buf) | 4089 | static ssize_t red_zone_show(struct kmem_cache *s, char *buf) |
4067 | { | 4090 | { |
@@ -4139,6 +4162,40 @@ static ssize_t validate_store(struct kmem_cache *s, | |||
4139 | } | 4162 | } |
4140 | SLAB_ATTR(validate); | 4163 | SLAB_ATTR(validate); |
4141 | 4164 | ||
4165 | static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf) | ||
4166 | { | ||
4167 | if (!(s->flags & SLAB_STORE_USER)) | ||
4168 | return -ENOSYS; | ||
4169 | return list_locations(s, buf, TRACK_ALLOC); | ||
4170 | } | ||
4171 | SLAB_ATTR_RO(alloc_calls); | ||
4172 | |||
4173 | static ssize_t free_calls_show(struct kmem_cache *s, char *buf) | ||
4174 | { | ||
4175 | if (!(s->flags & SLAB_STORE_USER)) | ||
4176 | return -ENOSYS; | ||
4177 | return list_locations(s, buf, TRACK_FREE); | ||
4178 | } | ||
4179 | SLAB_ATTR_RO(free_calls); | ||
4180 | #endif /* CONFIG_SLUB_DEBUG */ | ||
4181 | |||
4182 | #ifdef CONFIG_FAILSLAB | ||
4183 | static ssize_t failslab_show(struct kmem_cache *s, char *buf) | ||
4184 | { | ||
4185 | return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); | ||
4186 | } | ||
4187 | |||
4188 | static ssize_t failslab_store(struct kmem_cache *s, const char *buf, | ||
4189 | size_t length) | ||
4190 | { | ||
4191 | s->flags &= ~SLAB_FAILSLAB; | ||
4192 | if (buf[0] == '1') | ||
4193 | s->flags |= SLAB_FAILSLAB; | ||
4194 | return length; | ||
4195 | } | ||
4196 | SLAB_ATTR(failslab); | ||
4197 | #endif | ||
4198 | |||
4142 | static ssize_t shrink_show(struct kmem_cache *s, char *buf) | 4199 | static ssize_t shrink_show(struct kmem_cache *s, char *buf) |
4143 | { | 4200 | { |
4144 | return 0; | 4201 | return 0; |
@@ -4158,22 +4215,6 @@ static ssize_t shrink_store(struct kmem_cache *s, | |||
4158 | } | 4215 | } |
4159 | SLAB_ATTR(shrink); | 4216 | SLAB_ATTR(shrink); |
4160 | 4217 | ||
4161 | static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf) | ||
4162 | { | ||
4163 | if (!(s->flags & SLAB_STORE_USER)) | ||
4164 | return -ENOSYS; | ||
4165 | return list_locations(s, buf, TRACK_ALLOC); | ||
4166 | } | ||
4167 | SLAB_ATTR_RO(alloc_calls); | ||
4168 | |||
4169 | static ssize_t free_calls_show(struct kmem_cache *s, char *buf) | ||
4170 | { | ||
4171 | if (!(s->flags & SLAB_STORE_USER)) | ||
4172 | return -ENOSYS; | ||
4173 | return list_locations(s, buf, TRACK_FREE); | ||
4174 | } | ||
4175 | SLAB_ATTR_RO(free_calls); | ||
4176 | |||
4177 | #ifdef CONFIG_NUMA | 4218 | #ifdef CONFIG_NUMA |
4178 | static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) | 4219 | static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) |
4179 | { | 4220 | { |
@@ -4279,25 +4320,27 @@ static struct attribute *slab_attrs[] = { | |||
4279 | &min_partial_attr.attr, | 4320 | &min_partial_attr.attr, |
4280 | &objects_attr.attr, | 4321 | &objects_attr.attr, |
4281 | &objects_partial_attr.attr, | 4322 | &objects_partial_attr.attr, |
4282 | &total_objects_attr.attr, | ||
4283 | &slabs_attr.attr, | ||
4284 | &partial_attr.attr, | 4323 | &partial_attr.attr, |
4285 | &cpu_slabs_attr.attr, | 4324 | &cpu_slabs_attr.attr, |
4286 | &ctor_attr.attr, | 4325 | &ctor_attr.attr, |
4287 | &aliases_attr.attr, | 4326 | &aliases_attr.attr, |
4288 | &align_attr.attr, | 4327 | &align_attr.attr, |
4289 | &sanity_checks_attr.attr, | ||
4290 | &trace_attr.attr, | ||
4291 | &hwcache_align_attr.attr, | 4328 | &hwcache_align_attr.attr, |
4292 | &reclaim_account_attr.attr, | 4329 | &reclaim_account_attr.attr, |
4293 | &destroy_by_rcu_attr.attr, | 4330 | &destroy_by_rcu_attr.attr, |
4331 | &shrink_attr.attr, | ||
4332 | #ifdef CONFIG_SLUB_DEBUG | ||
4333 | &total_objects_attr.attr, | ||
4334 | &slabs_attr.attr, | ||
4335 | &sanity_checks_attr.attr, | ||
4336 | &trace_attr.attr, | ||
4294 | &red_zone_attr.attr, | 4337 | &red_zone_attr.attr, |
4295 | &poison_attr.attr, | 4338 | &poison_attr.attr, |
4296 | &store_user_attr.attr, | 4339 | &store_user_attr.attr, |
4297 | &validate_attr.attr, | 4340 | &validate_attr.attr, |
4298 | &shrink_attr.attr, | ||
4299 | &alloc_calls_attr.attr, | 4341 | &alloc_calls_attr.attr, |
4300 | &free_calls_attr.attr, | 4342 | &free_calls_attr.attr, |
4343 | #endif | ||
4301 | #ifdef CONFIG_ZONE_DMA | 4344 | #ifdef CONFIG_ZONE_DMA |
4302 | &cache_dma_attr.attr, | 4345 | &cache_dma_attr.attr, |
4303 | #endif | 4346 | #endif |
@@ -4377,6 +4420,7 @@ static void kmem_cache_release(struct kobject *kobj) | |||
4377 | { | 4420 | { |
4378 | struct kmem_cache *s = to_slab(kobj); | 4421 | struct kmem_cache *s = to_slab(kobj); |
4379 | 4422 | ||
4423 | kfree(s->name); | ||
4380 | kfree(s); | 4424 | kfree(s); |
4381 | } | 4425 | } |
4382 | 4426 | ||
@@ -4579,7 +4623,7 @@ static int __init slab_sysfs_init(void) | |||
4579 | } | 4623 | } |
4580 | 4624 | ||
4581 | __initcall(slab_sysfs_init); | 4625 | __initcall(slab_sysfs_init); |
4582 | #endif | 4626 | #endif /* CONFIG_SYSFS */ |
4583 | 4627 | ||
4584 | /* | 4628 | /* |
4585 | * The /proc/slabinfo ABI | 4629 | * The /proc/slabinfo ABI |