diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 64 |
1 files changed, 41 insertions, 23 deletions
@@ -1321,8 +1321,10 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s, | |||
1321 | return NULL; | 1321 | return NULL; |
1322 | 1322 | ||
1323 | if (node == NUMA_NO_NODE) { | 1323 | if (node == NUMA_NO_NODE) { |
1324 | #ifdef CONFIG_SCHED_DEBUG_TRACE | ||
1324 | if (flags&GFP_COLOR) | 1325 | if (flags&GFP_COLOR) |
1325 | printk(KERN_INFO "alloc_pages calls with GFP_COLOR order = %d\n", order); | 1326 | printk(KERN_INFO "alloc_pages calls with GFP_COLOR order = %d\n", order); |
1327 | #endif | ||
1326 | page = alloc_pages(flags, order); | 1328 | page = alloc_pages(flags, order); |
1327 | } | 1329 | } |
1328 | else | 1330 | else |
@@ -1340,8 +1342,10 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1340 | struct kmem_cache_order_objects oo = s->oo; | 1342 | struct kmem_cache_order_objects oo = s->oo; |
1341 | gfp_t alloc_gfp; | 1343 | gfp_t alloc_gfp; |
1342 | 1344 | ||
1343 | if (flags&GFP_COLOR) | 1345 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
1344 | printk(KERN_INFO "gfp_allowed_mask = %08x\n", gfp_allowed_mask); | 1346 | if (flags&GFP_COLOR) |
1347 | printk(KERN_INFO "gfp_allowed_mask = %08x\n", gfp_allowed_mask); | ||
1348 | #endif | ||
1345 | 1349 | ||
1346 | flags &= gfp_allowed_mask; | 1350 | flags &= gfp_allowed_mask; |
1347 | 1351 | ||
@@ -1355,9 +1359,12 @@ if (flags&GFP_COLOR) | |||
1355 | * so we fall-back to the minimum order allocation. | 1359 | * so we fall-back to the minimum order allocation. |
1356 | */ | 1360 | */ |
1357 | alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; | 1361 | alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; |
1358 | if (flags&__GFP_COLOR) { | 1362 | |
1359 | printk(KERN_INFO "allocate_slab with GFP_COLOR alloc_gfp = %08x\n", alloc_gfp); | 1363 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
1360 | } | 1364 | if (flags&__GFP_COLOR) |
1365 | printk(KERN_INFO "allocate_slab with GFP_COLOR alloc_gfp = %08x\n", alloc_gfp); | ||
1366 | #endif | ||
1367 | |||
1361 | page = alloc_slab_page(s, alloc_gfp, node, oo); | 1368 | page = alloc_slab_page(s, alloc_gfp, node, oo); |
1362 | if (unlikely(!page)) { | 1369 | if (unlikely(!page)) { |
1363 | oo = s->min; | 1370 | oo = s->min; |
@@ -2232,9 +2239,10 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, | |||
2232 | 2239 | ||
2233 | page = new_slab(s, flags, node); | 2240 | page = new_slab(s, flags, node); |
2234 | 2241 | ||
2235 | if (flags&GFP_COLOR) { | 2242 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
2236 | printk(KERN_INFO "new_slab_objects(): gets page %p\n", page); | 2243 | if (flags&GFP_COLOR) |
2237 | } | 2244 | printk(KERN_INFO "new_slab_objects(): gets page %p\n", page); |
2245 | #endif | ||
2238 | 2246 | ||
2239 | if (page) { | 2247 | if (page) { |
2240 | c = raw_cpu_ptr(s->cpu_slab); | 2248 | c = raw_cpu_ptr(s->cpu_slab); |
@@ -2321,8 +2329,11 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, | |||
2321 | void *freelist; | 2329 | void *freelist; |
2322 | struct page *page; | 2330 | struct page *page; |
2323 | unsigned long flags; | 2331 | unsigned long flags; |
2324 | if (gfpflags&GFP_COLOR) | 2332 | |
2325 | printk(KERN_INFO "__slab_alloc slow_path\n"); | 2333 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
2334 | if (gfpflags&GFP_COLOR) | ||
2335 | printk(KERN_INFO "__slab_alloc slow_path\n"); | ||
2336 | #endif | ||
2326 | 2337 | ||
2327 | local_irq_save(flags); | 2338 | local_irq_save(flags); |
2328 | #ifdef CONFIG_PREEMPT | 2339 | #ifdef CONFIG_PREEMPT |
@@ -2334,10 +2345,10 @@ if (gfpflags&GFP_COLOR) | |||
2334 | c = this_cpu_ptr(s->cpu_slab); | 2345 | c = this_cpu_ptr(s->cpu_slab); |
2335 | #endif | 2346 | #endif |
2336 | 2347 | ||
2337 | 2348 | #ifdef CONFIG_SCHED_DEBUG_TRACE | |
2338 | if (gfpflags&GFP_COLOR) { | 2349 | if (gfpflags&GFP_COLOR) |
2339 | printk(KERN_INFO "__slab_alloc : page %p, partial %p\n", c->page, c->partial); | 2350 | printk(KERN_INFO "__slab_alloc : page %p, partial %p\n", c->page, c->partial); |
2340 | } | 2351 | #endif |
2341 | 2352 | ||
2342 | page = c->page; | 2353 | page = c->page; |
2343 | if (!page) | 2354 | if (!page) |
@@ -3328,22 +3339,29 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
3328 | struct kmem_cache *s; | 3339 | struct kmem_cache *s; |
3329 | void *ret; | 3340 | void *ret; |
3330 | 3341 | ||
3331 | if (flags & GFP_COLOR) { | 3342 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
3332 | printk(KERN_INFO "kmalloc size %d\n", size); | 3343 | if (flags & GFP_COLOR) |
3333 | } | 3344 | printk(KERN_INFO "kmalloc size %d\n", size); |
3345 | #endif | ||
3346 | |||
3334 | if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) | 3347 | if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) |
3335 | return kmalloc_large(size, flags); | 3348 | return kmalloc_large(size, flags); |
3336 | 3349 | ||
3337 | s = kmalloc_slab(size, flags); | 3350 | s = kmalloc_slab(size, flags); |
3338 | if (flags & GFP_COLOR) { | 3351 | |
3339 | printk(KERN_INFO "kmalloc_slab %p\n", s); | 3352 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
3340 | } | 3353 | if (flags & GFP_COLOR) |
3354 | printk(KERN_INFO "kmalloc_slab %p\n", s); | ||
3355 | #endif | ||
3356 | |||
3341 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3357 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3342 | return s; | 3358 | return s; |
3343 | 3359 | ||
3344 | if (flags & GFP_COLOR) { | 3360 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
3345 | printk(KERN_INFO "slab_alloc calls!!\n"); | 3361 | if (flags & GFP_COLOR) |
3346 | } | 3362 | printk(KERN_INFO "slab_alloc calls!!\n"); |
3363 | #endif | ||
3364 | |||
3347 | ret = slab_alloc(s, flags, _RET_IP_); | 3365 | ret = slab_alloc(s, flags, _RET_IP_); |
3348 | 3366 | ||
3349 | trace_kmalloc(_RET_IP_, ret, size, s->size, flags); | 3367 | trace_kmalloc(_RET_IP_, ret, size, s->size, flags); |