aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2013-12-16 05:04:49 -0500
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2013-12-16 05:04:49 -0500
commit348324c5b10bcba8d9daabdfb85a6927311be34f (patch)
treed06ca3a264407a14a1f36c1b798d6dc0dc1582d8 /mm/slub.c
parent1e63bd9cc43db5400a1423a7ec8266b4e7c54bd0 (diff)
parent319e2e3f63c348a9b66db4667efa73178e18b17d (diff)
Merge tag 'v3.13-rc4' into next
Synchronize with mainline to bring in the new keycode definitions and new hwmon API.
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c49
1 files changed, 38 insertions, 11 deletions
diff --git a/mm/slub.c b/mm/slub.c
index c3eb3d3ca835..545a170ebf9f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -155,7 +155,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
155/* 155/*
156 * Maximum number of desirable partial slabs. 156 * Maximum number of desirable partial slabs.
157 * The existence of more partial slabs makes kmem_cache_shrink 157 * The existence of more partial slabs makes kmem_cache_shrink
158 * sort the partial list by the number of objects in the. 158 * sort the partial list by the number of objects in use.
159 */ 159 */
160#define MAX_PARTIAL 10 160#define MAX_PARTIAL 10
161 161
@@ -933,6 +933,16 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
933 * Hooks for other subsystems that check memory allocations. In a typical 933 * Hooks for other subsystems that check memory allocations. In a typical
934 * production configuration these hooks all should produce no code at all. 934 * production configuration these hooks all should produce no code at all.
935 */ 935 */
936static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
937{
938 kmemleak_alloc(ptr, size, 1, flags);
939}
940
941static inline void kfree_hook(const void *x)
942{
943 kmemleak_free(x);
944}
945
936static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) 946static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
937{ 947{
938 flags &= gfp_allowed_mask; 948 flags &= gfp_allowed_mask;
@@ -955,7 +965,7 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
955 kmemleak_free_recursive(x, s->flags); 965 kmemleak_free_recursive(x, s->flags);
956 966
957 /* 967 /*
958 * Trouble is that we may no longer disable interupts in the fast path 968 * Trouble is that we may no longer disable interrupts in the fast path
959 * So in order to make the debug calls that expect irqs to be 969 * So in order to make the debug calls that expect irqs to be
960 * disabled we need to disable interrupts temporarily. 970 * disabled we need to disable interrupts temporarily.
961 */ 971 */
@@ -1217,8 +1227,8 @@ static unsigned long kmem_cache_flags(unsigned long object_size,
1217 /* 1227 /*
1218 * Enable debugging if selected on the kernel commandline. 1228 * Enable debugging if selected on the kernel commandline.
1219 */ 1229 */
1220 if (slub_debug && (!slub_debug_slabs || 1230 if (slub_debug && (!slub_debug_slabs || (name &&
1221 !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))) 1231 !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
1222 flags |= slub_debug; 1232 flags |= slub_debug;
1223 1233
1224 return flags; 1234 return flags;
@@ -1260,13 +1270,30 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
1260static inline void dec_slabs_node(struct kmem_cache *s, int node, 1270static inline void dec_slabs_node(struct kmem_cache *s, int node,
1261 int objects) {} 1271 int objects) {}
1262 1272
1273static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1274{
1275 kmemleak_alloc(ptr, size, 1, flags);
1276}
1277
1278static inline void kfree_hook(const void *x)
1279{
1280 kmemleak_free(x);
1281}
1282
1263static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) 1283static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
1264 { return 0; } 1284 { return 0; }
1265 1285
1266static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, 1286static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1267 void *object) {} 1287 void *object)
1288{
1289 kmemleak_alloc_recursive(object, s->object_size, 1, s->flags,
1290 flags & gfp_allowed_mask);
1291}
1268 1292
1269static inline void slab_free_hook(struct kmem_cache *s, void *x) {} 1293static inline void slab_free_hook(struct kmem_cache *s, void *x)
1294{
1295 kmemleak_free_recursive(x, s->flags);
1296}
1270 1297
1271#endif /* CONFIG_SLUB_DEBUG */ 1298#endif /* CONFIG_SLUB_DEBUG */
1272 1299
@@ -2829,8 +2856,8 @@ static struct kmem_cache *kmem_cache_node;
2829 * slab on the node for this slabcache. There are no concurrent accesses 2856 * slab on the node for this slabcache. There are no concurrent accesses
2830 * possible. 2857 * possible.
2831 * 2858 *
2832 * Note that this function only works on the kmalloc_node_cache 2859 * Note that this function only works on the kmem_cache_node
2833 * when allocating for the kmalloc_node_cache. This is used for bootstrapping 2860 * when allocating for the kmem_cache_node. This is used for bootstrapping
2834 * memory on a fresh node that has no slab structures yet. 2861 * memory on a fresh node that has no slab structures yet.
2835 */ 2862 */
2836static void early_kmem_cache_node_alloc(int node) 2863static void early_kmem_cache_node_alloc(int node)
@@ -3272,7 +3299,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3272 if (page) 3299 if (page)
3273 ptr = page_address(page); 3300 ptr = page_address(page);
3274 3301
3275 kmemleak_alloc(ptr, size, 1, flags); 3302 kmalloc_large_node_hook(ptr, size, flags);
3276 return ptr; 3303 return ptr;
3277} 3304}
3278 3305
@@ -3336,7 +3363,7 @@ void kfree(const void *x)
3336 page = virt_to_head_page(x); 3363 page = virt_to_head_page(x);
3337 if (unlikely(!PageSlab(page))) { 3364 if (unlikely(!PageSlab(page))) {
3338 BUG_ON(!PageCompound(page)); 3365 BUG_ON(!PageCompound(page));
3339 kmemleak_free(x); 3366 kfree_hook(x);
3340 __free_memcg_kmem_pages(page, compound_order(page)); 3367 __free_memcg_kmem_pages(page, compound_order(page));
3341 return; 3368 return;
3342 } 3369 }
@@ -4983,7 +5010,7 @@ static ssize_t slab_attr_store(struct kobject *kobj,
4983 * through the descendants with best-effort propagation. 5010 * through the descendants with best-effort propagation.
4984 */ 5011 */
4985 for_each_memcg_cache_index(i) { 5012 for_each_memcg_cache_index(i) {
4986 struct kmem_cache *c = cache_from_memcg(s, i); 5013 struct kmem_cache *c = cache_from_memcg_idx(s, i);
4987 if (c) 5014 if (c)
4988 attribute->store(c, buf, len); 5015 attribute->store(c, buf, len);
4989 } 5016 }