diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-12-30 20:28:09 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-12-30 20:28:09 -0500 |
commit | db5e53fbf0abf5cadc83be57032242e5e7c6c394 (patch) | |
tree | e391aebab8b81a68fe36e5fef8a729062f643259 | |
parent | 3f4b5c5d275608d42ff54c4981307f9a5c75ea4a (diff) | |
parent | 3c506efd7e0f615bd9603ce8c06bc4a896952599 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
slub: avoid leaking caches or refcounts on sysfs error
slab: Fix comment on #endif
slab: remove GFP_THISNODE clearing from alloc_slabmgmt()
slub: Add might_sleep_if() to slab_alloc()
SLUB: failslab support
slub: Fix incorrect use of loose
slab: Update the kmem_cache_create documentation regarding the name parameter
slub: make early_kmem_cache_node_alloc void
slab: unsigned slabp->inuse cannot be less than 0
slub - fix get_object_page comment
SLUB: Replace __builtin_return_address(0) with _RET_IP_.
SLUB: cleanup - define macros instead of hardcoded numbers
-rw-r--r-- | include/linux/fault-inject.h | 9 | ||||
-rw-r--r-- | include/linux/slab.h | 10 | ||||
-rw-r--r-- | lib/Kconfig.debug | 1 | ||||
-rw-r--r-- | mm/Makefile | 1 | ||||
-rw-r--r-- | mm/failslab.c | 59 | ||||
-rw-r--r-- | mm/slab.c | 89 | ||||
-rw-r--r-- | mm/slub.c | 98 |
7 files changed, 145 insertions, 122 deletions
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 32368c4f0326..06ca9b21dad2 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h | |||
@@ -81,4 +81,13 @@ static inline void cleanup_fault_attr_dentries(struct fault_attr *attr) | |||
81 | 81 | ||
82 | #endif /* CONFIG_FAULT_INJECTION */ | 82 | #endif /* CONFIG_FAULT_INJECTION */ |
83 | 83 | ||
84 | #ifdef CONFIG_FAILSLAB | ||
85 | extern bool should_failslab(size_t size, gfp_t gfpflags); | ||
86 | #else | ||
87 | static inline bool should_failslab(size_t size, gfp_t gfpflags) | ||
88 | { | ||
89 | return false; | ||
90 | } | ||
91 | #endif /* CONFIG_FAILSLAB */ | ||
92 | |||
84 | #endif /* _LINUX_FAULT_INJECT_H */ | 93 | #endif /* _LINUX_FAULT_INJECT_H */ |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 000da12b5cf0..f96d13c281e8 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -253,9 +253,9 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, | |||
253 | * request comes from. | 253 | * request comes from. |
254 | */ | 254 | */ |
255 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) | 255 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) |
256 | extern void *__kmalloc_track_caller(size_t, gfp_t, void*); | 256 | extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); |
257 | #define kmalloc_track_caller(size, flags) \ | 257 | #define kmalloc_track_caller(size, flags) \ |
258 | __kmalloc_track_caller(size, flags, __builtin_return_address(0)) | 258 | __kmalloc_track_caller(size, flags, _RET_IP_) |
259 | #else | 259 | #else |
260 | #define kmalloc_track_caller(size, flags) \ | 260 | #define kmalloc_track_caller(size, flags) \ |
261 | __kmalloc(size, flags) | 261 | __kmalloc(size, flags) |
@@ -271,10 +271,10 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, void*); | |||
271 | * allocation request comes from. | 271 | * allocation request comes from. |
272 | */ | 272 | */ |
273 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) | 273 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) |
274 | extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); | 274 | extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); |
275 | #define kmalloc_node_track_caller(size, flags, node) \ | 275 | #define kmalloc_node_track_caller(size, flags, node) \ |
276 | __kmalloc_node_track_caller(size, flags, node, \ | 276 | __kmalloc_node_track_caller(size, flags, node, \ |
277 | __builtin_return_address(0)) | 277 | _RET_IP_) |
278 | #else | 278 | #else |
279 | #define kmalloc_node_track_caller(size, flags, node) \ | 279 | #define kmalloc_node_track_caller(size, flags, node) \ |
280 | __kmalloc_node(size, flags, node) | 280 | __kmalloc_node(size, flags, node) |
@@ -285,7 +285,7 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); | |||
285 | #define kmalloc_node_track_caller(size, flags, node) \ | 285 | #define kmalloc_node_track_caller(size, flags, node) \ |
286 | kmalloc_track_caller(size, flags) | 286 | kmalloc_track_caller(size, flags) |
287 | 287 | ||
288 | #endif /* DEBUG_SLAB */ | 288 | #endif /* CONFIG_NUMA */ |
289 | 289 | ||
290 | /* | 290 | /* |
291 | * Shortcuts | 291 | * Shortcuts |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index eae594cb6ea9..2e75478e9c69 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -730,6 +730,7 @@ config FAULT_INJECTION | |||
730 | config FAILSLAB | 730 | config FAILSLAB |
731 | bool "Fault-injection capability for kmalloc" | 731 | bool "Fault-injection capability for kmalloc" |
732 | depends on FAULT_INJECTION | 732 | depends on FAULT_INJECTION |
733 | depends on SLAB || SLUB | ||
733 | help | 734 | help |
734 | Provide fault-injection capability for kmalloc. | 735 | Provide fault-injection capability for kmalloc. |
735 | 736 | ||
diff --git a/mm/Makefile b/mm/Makefile index c06b45a1ff5f..51c27709cc7c 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
@@ -28,6 +28,7 @@ obj-$(CONFIG_SLOB) += slob.o | |||
28 | obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o | 28 | obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o |
29 | obj-$(CONFIG_SLAB) += slab.o | 29 | obj-$(CONFIG_SLAB) += slab.o |
30 | obj-$(CONFIG_SLUB) += slub.o | 30 | obj-$(CONFIG_SLUB) += slub.o |
31 | obj-$(CONFIG_FAILSLAB) += failslab.o | ||
31 | obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o | 32 | obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o |
32 | obj-$(CONFIG_FS_XIP) += filemap_xip.o | 33 | obj-$(CONFIG_FS_XIP) += filemap_xip.o |
33 | obj-$(CONFIG_MIGRATION) += migrate.o | 34 | obj-$(CONFIG_MIGRATION) += migrate.o |
diff --git a/mm/failslab.c b/mm/failslab.c new file mode 100644 index 000000000000..7c6ea6493f80 --- /dev/null +++ b/mm/failslab.c | |||
@@ -0,0 +1,59 @@ | |||
1 | #include <linux/fault-inject.h> | ||
2 | |||
3 | static struct { | ||
4 | struct fault_attr attr; | ||
5 | u32 ignore_gfp_wait; | ||
6 | #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS | ||
7 | struct dentry *ignore_gfp_wait_file; | ||
8 | #endif | ||
9 | } failslab = { | ||
10 | .attr = FAULT_ATTR_INITIALIZER, | ||
11 | .ignore_gfp_wait = 1, | ||
12 | }; | ||
13 | |||
14 | bool should_failslab(size_t size, gfp_t gfpflags) | ||
15 | { | ||
16 | if (gfpflags & __GFP_NOFAIL) | ||
17 | return false; | ||
18 | |||
19 | if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT)) | ||
20 | return false; | ||
21 | |||
22 | return should_fail(&failslab.attr, size); | ||
23 | } | ||
24 | |||
25 | static int __init setup_failslab(char *str) | ||
26 | { | ||
27 | return setup_fault_attr(&failslab.attr, str); | ||
28 | } | ||
29 | __setup("failslab=", setup_failslab); | ||
30 | |||
31 | #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS | ||
32 | |||
33 | static int __init failslab_debugfs_init(void) | ||
34 | { | ||
35 | mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; | ||
36 | struct dentry *dir; | ||
37 | int err; | ||
38 | |||
39 | err = init_fault_attr_dentries(&failslab.attr, "failslab"); | ||
40 | if (err) | ||
41 | return err; | ||
42 | dir = failslab.attr.dentries.dir; | ||
43 | |||
44 | failslab.ignore_gfp_wait_file = | ||
45 | debugfs_create_bool("ignore-gfp-wait", mode, dir, | ||
46 | &failslab.ignore_gfp_wait); | ||
47 | |||
48 | if (!failslab.ignore_gfp_wait_file) { | ||
49 | err = -ENOMEM; | ||
50 | debugfs_remove(failslab.ignore_gfp_wait_file); | ||
51 | cleanup_fault_attr_dentries(&failslab.attr); | ||
52 | } | ||
53 | |||
54 | return err; | ||
55 | } | ||
56 | |||
57 | late_initcall(failslab_debugfs_init); | ||
58 | |||
59 | #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ | ||
@@ -2123,6 +2123,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) | |||
2123 | * | 2123 | * |
2124 | * @name must be valid until the cache is destroyed. This implies that | 2124 | * @name must be valid until the cache is destroyed. This implies that |
2125 | * the module calling this has to destroy the cache before getting unloaded. | 2125 | * the module calling this has to destroy the cache before getting unloaded. |
2126 | * Note that kmem_cache_name() is not guaranteed to return the same pointer, | ||
2127 | * therefore applications must manage it themselves. | ||
2126 | * | 2128 | * |
2127 | * The flags are | 2129 | * The flags are |
2128 | * | 2130 | * |
@@ -2609,7 +2611,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, | |||
2609 | if (OFF_SLAB(cachep)) { | 2611 | if (OFF_SLAB(cachep)) { |
2610 | /* Slab management obj is off-slab. */ | 2612 | /* Slab management obj is off-slab. */ |
2611 | slabp = kmem_cache_alloc_node(cachep->slabp_cache, | 2613 | slabp = kmem_cache_alloc_node(cachep->slabp_cache, |
2612 | local_flags & ~GFP_THISNODE, nodeid); | 2614 | local_flags, nodeid); |
2613 | if (!slabp) | 2615 | if (!slabp) |
2614 | return NULL; | 2616 | return NULL; |
2615 | } else { | 2617 | } else { |
@@ -2997,7 +2999,7 @@ retry: | |||
2997 | * there must be at least one object available for | 2999 | * there must be at least one object available for |
2998 | * allocation. | 3000 | * allocation. |
2999 | */ | 3001 | */ |
3000 | BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num); | 3002 | BUG_ON(slabp->inuse >= cachep->num); |
3001 | 3003 | ||
3002 | while (slabp->inuse < cachep->num && batchcount--) { | 3004 | while (slabp->inuse < cachep->num && batchcount--) { |
3003 | STATS_INC_ALLOCED(cachep); | 3005 | STATS_INC_ALLOCED(cachep); |
@@ -3106,79 +3108,14 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
3106 | #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) | 3108 | #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) |
3107 | #endif | 3109 | #endif |
3108 | 3110 | ||
3109 | #ifdef CONFIG_FAILSLAB | 3111 | static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) |
3110 | |||
3111 | static struct failslab_attr { | ||
3112 | |||
3113 | struct fault_attr attr; | ||
3114 | |||
3115 | u32 ignore_gfp_wait; | ||
3116 | #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS | ||
3117 | struct dentry *ignore_gfp_wait_file; | ||
3118 | #endif | ||
3119 | |||
3120 | } failslab = { | ||
3121 | .attr = FAULT_ATTR_INITIALIZER, | ||
3122 | .ignore_gfp_wait = 1, | ||
3123 | }; | ||
3124 | |||
3125 | static int __init setup_failslab(char *str) | ||
3126 | { | ||
3127 | return setup_fault_attr(&failslab.attr, str); | ||
3128 | } | ||
3129 | __setup("failslab=", setup_failslab); | ||
3130 | |||
3131 | static int should_failslab(struct kmem_cache *cachep, gfp_t flags) | ||
3132 | { | 3112 | { |
3133 | if (cachep == &cache_cache) | 3113 | if (cachep == &cache_cache) |
3134 | return 0; | 3114 | return false; |
3135 | if (flags & __GFP_NOFAIL) | ||
3136 | return 0; | ||
3137 | if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT)) | ||
3138 | return 0; | ||
3139 | 3115 | ||
3140 | return should_fail(&failslab.attr, obj_size(cachep)); | 3116 | return should_failslab(obj_size(cachep), flags); |
3141 | } | 3117 | } |
3142 | 3118 | ||
3143 | #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS | ||
3144 | |||
3145 | static int __init failslab_debugfs(void) | ||
3146 | { | ||
3147 | mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; | ||
3148 | struct dentry *dir; | ||
3149 | int err; | ||
3150 | |||
3151 | err = init_fault_attr_dentries(&failslab.attr, "failslab"); | ||
3152 | if (err) | ||
3153 | return err; | ||
3154 | dir = failslab.attr.dentries.dir; | ||
3155 | |||
3156 | failslab.ignore_gfp_wait_file = | ||
3157 | debugfs_create_bool("ignore-gfp-wait", mode, dir, | ||
3158 | &failslab.ignore_gfp_wait); | ||
3159 | |||
3160 | if (!failslab.ignore_gfp_wait_file) { | ||
3161 | err = -ENOMEM; | ||
3162 | debugfs_remove(failslab.ignore_gfp_wait_file); | ||
3163 | cleanup_fault_attr_dentries(&failslab.attr); | ||
3164 | } | ||
3165 | |||
3166 | return err; | ||
3167 | } | ||
3168 | |||
3169 | late_initcall(failslab_debugfs); | ||
3170 | |||
3171 | #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ | ||
3172 | |||
3173 | #else /* CONFIG_FAILSLAB */ | ||
3174 | |||
3175 | static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags) | ||
3176 | { | ||
3177 | return 0; | ||
3178 | } | ||
3179 | |||
3180 | #endif /* CONFIG_FAILSLAB */ | ||
3181 | |||
3182 | static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) | 3119 | static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) |
3183 | { | 3120 | { |
3184 | void *objp; | 3121 | void *objp; |
@@ -3381,7 +3318,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | |||
3381 | unsigned long save_flags; | 3318 | unsigned long save_flags; |
3382 | void *ptr; | 3319 | void *ptr; |
3383 | 3320 | ||
3384 | if (should_failslab(cachep, flags)) | 3321 | if (slab_should_failslab(cachep, flags)) |
3385 | return NULL; | 3322 | return NULL; |
3386 | 3323 | ||
3387 | cache_alloc_debugcheck_before(cachep, flags); | 3324 | cache_alloc_debugcheck_before(cachep, flags); |
@@ -3457,7 +3394,7 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) | |||
3457 | unsigned long save_flags; | 3394 | unsigned long save_flags; |
3458 | void *objp; | 3395 | void *objp; |
3459 | 3396 | ||
3460 | if (should_failslab(cachep, flags)) | 3397 | if (slab_should_failslab(cachep, flags)) |
3461 | return NULL; | 3398 | return NULL; |
3462 | 3399 | ||
3463 | cache_alloc_debugcheck_before(cachep, flags); | 3400 | cache_alloc_debugcheck_before(cachep, flags); |
@@ -3686,9 +3623,9 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
3686 | EXPORT_SYMBOL(__kmalloc_node); | 3623 | EXPORT_SYMBOL(__kmalloc_node); |
3687 | 3624 | ||
3688 | void *__kmalloc_node_track_caller(size_t size, gfp_t flags, | 3625 | void *__kmalloc_node_track_caller(size_t size, gfp_t flags, |
3689 | int node, void *caller) | 3626 | int node, unsigned long caller) |
3690 | { | 3627 | { |
3691 | return __do_kmalloc_node(size, flags, node, caller); | 3628 | return __do_kmalloc_node(size, flags, node, (void *)caller); |
3692 | } | 3629 | } |
3693 | EXPORT_SYMBOL(__kmalloc_node_track_caller); | 3630 | EXPORT_SYMBOL(__kmalloc_node_track_caller); |
3694 | #else | 3631 | #else |
@@ -3730,9 +3667,9 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
3730 | } | 3667 | } |
3731 | EXPORT_SYMBOL(__kmalloc); | 3668 | EXPORT_SYMBOL(__kmalloc); |
3732 | 3669 | ||
3733 | void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) | 3670 | void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) |
3734 | { | 3671 | { |
3735 | return __do_kmalloc(size, flags, caller); | 3672 | return __do_kmalloc(size, flags, (void *)caller); |
3736 | } | 3673 | } |
3737 | EXPORT_SYMBOL(__kmalloc_track_caller); | 3674 | EXPORT_SYMBOL(__kmalloc_track_caller); |
3738 | 3675 | ||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/kallsyms.h> | 24 | #include <linux/kallsyms.h> |
25 | #include <linux/memory.h> | 25 | #include <linux/memory.h> |
26 | #include <linux/math64.h> | 26 | #include <linux/math64.h> |
27 | #include <linux/fault-inject.h> | ||
27 | 28 | ||
28 | /* | 29 | /* |
29 | * Lock order: | 30 | * Lock order: |
@@ -153,6 +154,10 @@ | |||
153 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | 154 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) |
154 | #endif | 155 | #endif |
155 | 156 | ||
157 | #define OO_SHIFT 16 | ||
158 | #define OO_MASK ((1 << OO_SHIFT) - 1) | ||
159 | #define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */ | ||
160 | |||
156 | /* Internal SLUB flags */ | 161 | /* Internal SLUB flags */ |
157 | #define __OBJECT_POISON 0x80000000 /* Poison object */ | 162 | #define __OBJECT_POISON 0x80000000 /* Poison object */ |
158 | #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ | 163 | #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ |
@@ -178,7 +183,7 @@ static LIST_HEAD(slab_caches); | |||
178 | * Tracking user of a slab. | 183 | * Tracking user of a slab. |
179 | */ | 184 | */ |
180 | struct track { | 185 | struct track { |
181 | void *addr; /* Called from address */ | 186 | unsigned long addr; /* Called from address */ |
182 | int cpu; /* Was running on cpu */ | 187 | int cpu; /* Was running on cpu */ |
183 | int pid; /* Pid context */ | 188 | int pid; /* Pid context */ |
184 | unsigned long when; /* When did the operation occur */ | 189 | unsigned long when; /* When did the operation occur */ |
@@ -290,7 +295,7 @@ static inline struct kmem_cache_order_objects oo_make(int order, | |||
290 | unsigned long size) | 295 | unsigned long size) |
291 | { | 296 | { |
292 | struct kmem_cache_order_objects x = { | 297 | struct kmem_cache_order_objects x = { |
293 | (order << 16) + (PAGE_SIZE << order) / size | 298 | (order << OO_SHIFT) + (PAGE_SIZE << order) / size |
294 | }; | 299 | }; |
295 | 300 | ||
296 | return x; | 301 | return x; |
@@ -298,12 +303,12 @@ static inline struct kmem_cache_order_objects oo_make(int order, | |||
298 | 303 | ||
299 | static inline int oo_order(struct kmem_cache_order_objects x) | 304 | static inline int oo_order(struct kmem_cache_order_objects x) |
300 | { | 305 | { |
301 | return x.x >> 16; | 306 | return x.x >> OO_SHIFT; |
302 | } | 307 | } |
303 | 308 | ||
304 | static inline int oo_objects(struct kmem_cache_order_objects x) | 309 | static inline int oo_objects(struct kmem_cache_order_objects x) |
305 | { | 310 | { |
306 | return x.x & ((1 << 16) - 1); | 311 | return x.x & OO_MASK; |
307 | } | 312 | } |
308 | 313 | ||
309 | #ifdef CONFIG_SLUB_DEBUG | 314 | #ifdef CONFIG_SLUB_DEBUG |
@@ -367,7 +372,7 @@ static struct track *get_track(struct kmem_cache *s, void *object, | |||
367 | } | 372 | } |
368 | 373 | ||
369 | static void set_track(struct kmem_cache *s, void *object, | 374 | static void set_track(struct kmem_cache *s, void *object, |
370 | enum track_item alloc, void *addr) | 375 | enum track_item alloc, unsigned long addr) |
371 | { | 376 | { |
372 | struct track *p; | 377 | struct track *p; |
373 | 378 | ||
@@ -391,8 +396,8 @@ static void init_tracking(struct kmem_cache *s, void *object) | |||
391 | if (!(s->flags & SLAB_STORE_USER)) | 396 | if (!(s->flags & SLAB_STORE_USER)) |
392 | return; | 397 | return; |
393 | 398 | ||
394 | set_track(s, object, TRACK_FREE, NULL); | 399 | set_track(s, object, TRACK_FREE, 0UL); |
395 | set_track(s, object, TRACK_ALLOC, NULL); | 400 | set_track(s, object, TRACK_ALLOC, 0UL); |
396 | } | 401 | } |
397 | 402 | ||
398 | static void print_track(const char *s, struct track *t) | 403 | static void print_track(const char *s, struct track *t) |
@@ -401,7 +406,7 @@ static void print_track(const char *s, struct track *t) | |||
401 | return; | 406 | return; |
402 | 407 | ||
403 | printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", | 408 | printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", |
404 | s, t->addr, jiffies - t->when, t->cpu, t->pid); | 409 | s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); |
405 | } | 410 | } |
406 | 411 | ||
407 | static void print_tracking(struct kmem_cache *s, void *object) | 412 | static void print_tracking(struct kmem_cache *s, void *object) |
@@ -692,7 +697,7 @@ static int check_object(struct kmem_cache *s, struct page *page, | |||
692 | if (!check_valid_pointer(s, page, get_freepointer(s, p))) { | 697 | if (!check_valid_pointer(s, page, get_freepointer(s, p))) { |
693 | object_err(s, page, p, "Freepointer corrupt"); | 698 | object_err(s, page, p, "Freepointer corrupt"); |
694 | /* | 699 | /* |
695 | * No choice but to zap it and thus loose the remainder | 700 | * No choice but to zap it and thus lose the remainder |
696 | * of the free objects in this slab. May cause | 701 | * of the free objects in this slab. May cause |
697 | * another error because the object count is now wrong. | 702 | * another error because the object count is now wrong. |
698 | */ | 703 | */ |
@@ -764,8 +769,8 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) | |||
764 | } | 769 | } |
765 | 770 | ||
766 | max_objects = (PAGE_SIZE << compound_order(page)) / s->size; | 771 | max_objects = (PAGE_SIZE << compound_order(page)) / s->size; |
767 | if (max_objects > 65535) | 772 | if (max_objects > MAX_OBJS_PER_PAGE) |
768 | max_objects = 65535; | 773 | max_objects = MAX_OBJS_PER_PAGE; |
769 | 774 | ||
770 | if (page->objects != max_objects) { | 775 | if (page->objects != max_objects) { |
771 | slab_err(s, page, "Wrong number of objects. Found %d but " | 776 | slab_err(s, page, "Wrong number of objects. Found %d but " |
@@ -866,7 +871,7 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, | |||
866 | } | 871 | } |
867 | 872 | ||
868 | static int alloc_debug_processing(struct kmem_cache *s, struct page *page, | 873 | static int alloc_debug_processing(struct kmem_cache *s, struct page *page, |
869 | void *object, void *addr) | 874 | void *object, unsigned long addr) |
870 | { | 875 | { |
871 | if (!check_slab(s, page)) | 876 | if (!check_slab(s, page)) |
872 | goto bad; | 877 | goto bad; |
@@ -906,7 +911,7 @@ bad: | |||
906 | } | 911 | } |
907 | 912 | ||
908 | static int free_debug_processing(struct kmem_cache *s, struct page *page, | 913 | static int free_debug_processing(struct kmem_cache *s, struct page *page, |
909 | void *object, void *addr) | 914 | void *object, unsigned long addr) |
910 | { | 915 | { |
911 | if (!check_slab(s, page)) | 916 | if (!check_slab(s, page)) |
912 | goto fail; | 917 | goto fail; |
@@ -1029,10 +1034,10 @@ static inline void setup_object_debug(struct kmem_cache *s, | |||
1029 | struct page *page, void *object) {} | 1034 | struct page *page, void *object) {} |
1030 | 1035 | ||
1031 | static inline int alloc_debug_processing(struct kmem_cache *s, | 1036 | static inline int alloc_debug_processing(struct kmem_cache *s, |
1032 | struct page *page, void *object, void *addr) { return 0; } | 1037 | struct page *page, void *object, unsigned long addr) { return 0; } |
1033 | 1038 | ||
1034 | static inline int free_debug_processing(struct kmem_cache *s, | 1039 | static inline int free_debug_processing(struct kmem_cache *s, |
1035 | struct page *page, void *object, void *addr) { return 0; } | 1040 | struct page *page, void *object, unsigned long addr) { return 0; } |
1036 | 1041 | ||
1037 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) | 1042 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) |
1038 | { return 1; } | 1043 | { return 1; } |
@@ -1499,8 +1504,8 @@ static inline int node_match(struct kmem_cache_cpu *c, int node) | |||
1499 | * we need to allocate a new slab. This is the slowest path since it involves | 1504 | * we need to allocate a new slab. This is the slowest path since it involves |
1500 | * a call to the page allocator and the setup of a new slab. | 1505 | * a call to the page allocator and the setup of a new slab. |
1501 | */ | 1506 | */ |
1502 | static void *__slab_alloc(struct kmem_cache *s, | 1507 | static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, |
1503 | gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) | 1508 | unsigned long addr, struct kmem_cache_cpu *c) |
1504 | { | 1509 | { |
1505 | void **object; | 1510 | void **object; |
1506 | struct page *new; | 1511 | struct page *new; |
@@ -1584,13 +1589,18 @@ debug: | |||
1584 | * Otherwise we can simply pick the next object from the lockless free list. | 1589 | * Otherwise we can simply pick the next object from the lockless free list. |
1585 | */ | 1590 | */ |
1586 | static __always_inline void *slab_alloc(struct kmem_cache *s, | 1591 | static __always_inline void *slab_alloc(struct kmem_cache *s, |
1587 | gfp_t gfpflags, int node, void *addr) | 1592 | gfp_t gfpflags, int node, unsigned long addr) |
1588 | { | 1593 | { |
1589 | void **object; | 1594 | void **object; |
1590 | struct kmem_cache_cpu *c; | 1595 | struct kmem_cache_cpu *c; |
1591 | unsigned long flags; | 1596 | unsigned long flags; |
1592 | unsigned int objsize; | 1597 | unsigned int objsize; |
1593 | 1598 | ||
1599 | might_sleep_if(gfpflags & __GFP_WAIT); | ||
1600 | |||
1601 | if (should_failslab(s->objsize, gfpflags)) | ||
1602 | return NULL; | ||
1603 | |||
1594 | local_irq_save(flags); | 1604 | local_irq_save(flags); |
1595 | c = get_cpu_slab(s, smp_processor_id()); | 1605 | c = get_cpu_slab(s, smp_processor_id()); |
1596 | objsize = c->objsize; | 1606 | objsize = c->objsize; |
@@ -1613,14 +1623,14 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1613 | 1623 | ||
1614 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | 1624 | void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) |
1615 | { | 1625 | { |
1616 | return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); | 1626 | return slab_alloc(s, gfpflags, -1, _RET_IP_); |
1617 | } | 1627 | } |
1618 | EXPORT_SYMBOL(kmem_cache_alloc); | 1628 | EXPORT_SYMBOL(kmem_cache_alloc); |
1619 | 1629 | ||
1620 | #ifdef CONFIG_NUMA | 1630 | #ifdef CONFIG_NUMA |
1621 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) | 1631 | void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) |
1622 | { | 1632 | { |
1623 | return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); | 1633 | return slab_alloc(s, gfpflags, node, _RET_IP_); |
1624 | } | 1634 | } |
1625 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 1635 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
1626 | #endif | 1636 | #endif |
@@ -1634,7 +1644,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); | |||
1634 | * handling required then we can return immediately. | 1644 | * handling required then we can return immediately. |
1635 | */ | 1645 | */ |
1636 | static void __slab_free(struct kmem_cache *s, struct page *page, | 1646 | static void __slab_free(struct kmem_cache *s, struct page *page, |
1637 | void *x, void *addr, unsigned int offset) | 1647 | void *x, unsigned long addr, unsigned int offset) |
1638 | { | 1648 | { |
1639 | void *prior; | 1649 | void *prior; |
1640 | void **object = (void *)x; | 1650 | void **object = (void *)x; |
@@ -1704,7 +1714,7 @@ debug: | |||
1704 | * with all sorts of special processing. | 1714 | * with all sorts of special processing. |
1705 | */ | 1715 | */ |
1706 | static __always_inline void slab_free(struct kmem_cache *s, | 1716 | static __always_inline void slab_free(struct kmem_cache *s, |
1707 | struct page *page, void *x, void *addr) | 1717 | struct page *page, void *x, unsigned long addr) |
1708 | { | 1718 | { |
1709 | void **object = (void *)x; | 1719 | void **object = (void *)x; |
1710 | struct kmem_cache_cpu *c; | 1720 | struct kmem_cache_cpu *c; |
@@ -1731,11 +1741,11 @@ void kmem_cache_free(struct kmem_cache *s, void *x) | |||
1731 | 1741 | ||
1732 | page = virt_to_head_page(x); | 1742 | page = virt_to_head_page(x); |
1733 | 1743 | ||
1734 | slab_free(s, page, x, __builtin_return_address(0)); | 1744 | slab_free(s, page, x, _RET_IP_); |
1735 | } | 1745 | } |
1736 | EXPORT_SYMBOL(kmem_cache_free); | 1746 | EXPORT_SYMBOL(kmem_cache_free); |
1737 | 1747 | ||
1738 | /* Figure out on which slab object the object resides */ | 1748 | /* Figure out on which slab page the object resides */ |
1739 | static struct page *get_object_page(const void *x) | 1749 | static struct page *get_object_page(const void *x) |
1740 | { | 1750 | { |
1741 | struct page *page = virt_to_head_page(x); | 1751 | struct page *page = virt_to_head_page(x); |
@@ -1807,8 +1817,8 @@ static inline int slab_order(int size, int min_objects, | |||
1807 | int rem; | 1817 | int rem; |
1808 | int min_order = slub_min_order; | 1818 | int min_order = slub_min_order; |
1809 | 1819 | ||
1810 | if ((PAGE_SIZE << min_order) / size > 65535) | 1820 | if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE) |
1811 | return get_order(size * 65535) - 1; | 1821 | return get_order(size * MAX_OBJS_PER_PAGE) - 1; |
1812 | 1822 | ||
1813 | for (order = max(min_order, | 1823 | for (order = max(min_order, |
1814 | fls(min_objects * size - 1) - PAGE_SHIFT); | 1824 | fls(min_objects * size - 1) - PAGE_SHIFT); |
@@ -2073,8 +2083,7 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) | |||
2073 | * when allocating for the kmalloc_node_cache. This is used for bootstrapping | 2083 | * when allocating for the kmalloc_node_cache. This is used for bootstrapping |
2074 | * memory on a fresh node that has no slab structures yet. | 2084 | * memory on a fresh node that has no slab structures yet. |
2075 | */ | 2085 | */ |
2076 | static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, | 2086 | static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node) |
2077 | int node) | ||
2078 | { | 2087 | { |
2079 | struct page *page; | 2088 | struct page *page; |
2080 | struct kmem_cache_node *n; | 2089 | struct kmem_cache_node *n; |
@@ -2112,7 +2121,6 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, | |||
2112 | local_irq_save(flags); | 2121 | local_irq_save(flags); |
2113 | add_partial(n, page, 0); | 2122 | add_partial(n, page, 0); |
2114 | local_irq_restore(flags); | 2123 | local_irq_restore(flags); |
2115 | return n; | ||
2116 | } | 2124 | } |
2117 | 2125 | ||
2118 | static void free_kmem_cache_nodes(struct kmem_cache *s) | 2126 | static void free_kmem_cache_nodes(struct kmem_cache *s) |
@@ -2144,8 +2152,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) | |||
2144 | n = &s->local_node; | 2152 | n = &s->local_node; |
2145 | else { | 2153 | else { |
2146 | if (slab_state == DOWN) { | 2154 | if (slab_state == DOWN) { |
2147 | n = early_kmem_cache_node_alloc(gfpflags, | 2155 | early_kmem_cache_node_alloc(gfpflags, node); |
2148 | node); | ||
2149 | continue; | 2156 | continue; |
2150 | } | 2157 | } |
2151 | n = kmem_cache_alloc_node(kmalloc_caches, | 2158 | n = kmem_cache_alloc_node(kmalloc_caches, |
@@ -2659,7 +2666,7 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
2659 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2666 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2660 | return s; | 2667 | return s; |
2661 | 2668 | ||
2662 | return slab_alloc(s, flags, -1, __builtin_return_address(0)); | 2669 | return slab_alloc(s, flags, -1, _RET_IP_); |
2663 | } | 2670 | } |
2664 | EXPORT_SYMBOL(__kmalloc); | 2671 | EXPORT_SYMBOL(__kmalloc); |
2665 | 2672 | ||
@@ -2687,7 +2694,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
2687 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 2694 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
2688 | return s; | 2695 | return s; |
2689 | 2696 | ||
2690 | return slab_alloc(s, flags, node, __builtin_return_address(0)); | 2697 | return slab_alloc(s, flags, node, _RET_IP_); |
2691 | } | 2698 | } |
2692 | EXPORT_SYMBOL(__kmalloc_node); | 2699 | EXPORT_SYMBOL(__kmalloc_node); |
2693 | #endif | 2700 | #endif |
@@ -2744,7 +2751,7 @@ void kfree(const void *x) | |||
2744 | put_page(page); | 2751 | put_page(page); |
2745 | return; | 2752 | return; |
2746 | } | 2753 | } |
2747 | slab_free(page->slab, page, object, __builtin_return_address(0)); | 2754 | slab_free(page->slab, page, object, _RET_IP_); |
2748 | } | 2755 | } |
2749 | EXPORT_SYMBOL(kfree); | 2756 | EXPORT_SYMBOL(kfree); |
2750 | 2757 | ||
@@ -3123,8 +3130,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
3123 | s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); | 3130 | s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); |
3124 | up_write(&slub_lock); | 3131 | up_write(&slub_lock); |
3125 | 3132 | ||
3126 | if (sysfs_slab_alias(s, name)) | 3133 | if (sysfs_slab_alias(s, name)) { |
3134 | down_write(&slub_lock); | ||
3135 | s->refcount--; | ||
3136 | up_write(&slub_lock); | ||
3127 | goto err; | 3137 | goto err; |
3138 | } | ||
3128 | return s; | 3139 | return s; |
3129 | } | 3140 | } |
3130 | 3141 | ||
@@ -3134,8 +3145,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
3134 | size, align, flags, ctor)) { | 3145 | size, align, flags, ctor)) { |
3135 | list_add(&s->list, &slab_caches); | 3146 | list_add(&s->list, &slab_caches); |
3136 | up_write(&slub_lock); | 3147 | up_write(&slub_lock); |
3137 | if (sysfs_slab_add(s)) | 3148 | if (sysfs_slab_add(s)) { |
3149 | down_write(&slub_lock); | ||
3150 | list_del(&s->list); | ||
3151 | up_write(&slub_lock); | ||
3152 | kfree(s); | ||
3138 | goto err; | 3153 | goto err; |
3154 | } | ||
3139 | return s; | 3155 | return s; |
3140 | } | 3156 | } |
3141 | kfree(s); | 3157 | kfree(s); |
@@ -3202,7 +3218,7 @@ static struct notifier_block __cpuinitdata slab_notifier = { | |||
3202 | 3218 | ||
3203 | #endif | 3219 | #endif |
3204 | 3220 | ||
3205 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) | 3221 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) |
3206 | { | 3222 | { |
3207 | struct kmem_cache *s; | 3223 | struct kmem_cache *s; |
3208 | 3224 | ||
@@ -3218,7 +3234,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) | |||
3218 | } | 3234 | } |
3219 | 3235 | ||
3220 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, | 3236 | void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, |
3221 | int node, void *caller) | 3237 | int node, unsigned long caller) |
3222 | { | 3238 | { |
3223 | struct kmem_cache *s; | 3239 | struct kmem_cache *s; |
3224 | 3240 | ||
@@ -3429,7 +3445,7 @@ static void resiliency_test(void) {}; | |||
3429 | 3445 | ||
3430 | struct location { | 3446 | struct location { |
3431 | unsigned long count; | 3447 | unsigned long count; |
3432 | void *addr; | 3448 | unsigned long addr; |
3433 | long long sum_time; | 3449 | long long sum_time; |
3434 | long min_time; | 3450 | long min_time; |
3435 | long max_time; | 3451 | long max_time; |
@@ -3477,7 +3493,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, | |||
3477 | { | 3493 | { |
3478 | long start, end, pos; | 3494 | long start, end, pos; |
3479 | struct location *l; | 3495 | struct location *l; |
3480 | void *caddr; | 3496 | unsigned long caddr; |
3481 | unsigned long age = jiffies - track->when; | 3497 | unsigned long age = jiffies - track->when; |
3482 | 3498 | ||
3483 | start = -1; | 3499 | start = -1; |
@@ -4345,7 +4361,7 @@ static void sysfs_slab_remove(struct kmem_cache *s) | |||
4345 | 4361 | ||
4346 | /* | 4362 | /* |
4347 | * Need to buffer aliases during bootup until sysfs becomes | 4363 | * Need to buffer aliases during bootup until sysfs becomes |
4348 | * available lest we loose that information. | 4364 | * available lest we lose that information. |
4349 | */ | 4365 | */ |
4350 | struct saved_alias { | 4366 | struct saved_alias { |
4351 | struct kmem_cache *s; | 4367 | struct kmem_cache *s; |