diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 36 |
1 files changed, 32 insertions, 4 deletions
@@ -1320,8 +1320,11 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s, | |||
1320 | if (memcg_charge_slab(s, flags, order)) | 1320 | if (memcg_charge_slab(s, flags, order)) |
1321 | return NULL; | 1321 | return NULL; |
1322 | 1322 | ||
1323 | if (node == NUMA_NO_NODE) | 1323 | if (node == NUMA_NO_NODE) { |
1324 | if (flags&GFP_COLOR) | ||
1325 | printk(KERN_INFO "alloc_pages calls with GFP_COLOR order = %d\n", order); | ||
1324 | page = alloc_pages(flags, order); | 1326 | page = alloc_pages(flags, order); |
1327 | } | ||
1325 | else | 1328 | else |
1326 | page = alloc_pages_exact_node(node, flags, order); | 1329 | page = alloc_pages_exact_node(node, flags, order); |
1327 | 1330 | ||
@@ -1337,6 +1340,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1337 | struct kmem_cache_order_objects oo = s->oo; | 1340 | struct kmem_cache_order_objects oo = s->oo; |
1338 | gfp_t alloc_gfp; | 1341 | gfp_t alloc_gfp; |
1339 | 1342 | ||
1343 | if (flags&GFP_COLOR) | ||
1344 | printk(KERN_INFO "gfp_allowed_mask = %08x\n", gfp_allowed_mask); | ||
1345 | |||
1340 | flags &= gfp_allowed_mask; | 1346 | flags &= gfp_allowed_mask; |
1341 | 1347 | ||
1342 | if (flags & __GFP_WAIT) | 1348 | if (flags & __GFP_WAIT) |
@@ -1349,7 +1355,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1349 | * so we fall-back to the minimum order allocation. | 1355 | * so we fall-back to the minimum order allocation. |
1350 | */ | 1356 | */ |
1351 | alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; | 1357 | alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; |
1352 | 1358 | if (flags&__GFP_COLOR) { | |
1359 | printk(KERN_INFO "allocate_slab with GFP_COLOR alloc_gfp = %08x\n", alloc_gfp); | ||
1360 | } | ||
1353 | page = alloc_slab_page(s, alloc_gfp, node, oo); | 1361 | page = alloc_slab_page(s, alloc_gfp, node, oo); |
1354 | if (unlikely(!page)) { | 1362 | if (unlikely(!page)) { |
1355 | oo = s->min; | 1363 | oo = s->min; |
@@ -1419,7 +1427,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1419 | } | 1427 | } |
1420 | 1428 | ||
1421 | page = allocate_slab(s, | 1429 | page = allocate_slab(s, |
1422 | flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); | 1430 | flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK | GFP_COLOR), node); |
1423 | if (!page) | 1431 | if (!page) |
1424 | goto out; | 1432 | goto out; |
1425 | 1433 | ||
@@ -2223,6 +2231,11 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, | |||
2223 | return freelist; | 2231 | return freelist; |
2224 | 2232 | ||
2225 | page = new_slab(s, flags, node); | 2233 | page = new_slab(s, flags, node); |
2234 | |||
2235 | if (flags&GFP_COLOR) { | ||
2236 | printk(KERN_INFO "new_slab_objects(): gets page %p\n", page); | ||
2237 | } | ||
2238 | |||
2226 | if (page) { | 2239 | if (page) { |
2227 | c = raw_cpu_ptr(s->cpu_slab); | 2240 | c = raw_cpu_ptr(s->cpu_slab); |
2228 | if (c->page) | 2241 | if (c->page) |
@@ -2308,6 +2321,8 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, | |||
2308 | void *freelist; | 2321 | void *freelist; |
2309 | struct page *page; | 2322 | struct page *page; |
2310 | unsigned long flags; | 2323 | unsigned long flags; |
2324 | if (gfpflags&GFP_COLOR) | ||
2325 | printk(KERN_INFO "__slab_alloc slow_path\n"); | ||
2311 | 2326 | ||
2312 | local_irq_save(flags); | 2327 | local_irq_save(flags); |
2313 | #ifdef CONFIG_PREEMPT | 2328 | #ifdef CONFIG_PREEMPT |
@@ -2319,6 +2334,11 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, | |||
2319 | c = this_cpu_ptr(s->cpu_slab); | 2334 | c = this_cpu_ptr(s->cpu_slab); |
2320 | #endif | 2335 | #endif |
2321 | 2336 | ||
2337 | |||
2338 | if (gfpflags&GFP_COLOR) { | ||
2339 | printk(KERN_INFO "__slab_alloc : page %p, partial %p\n", c->page, c->partial); | ||
2340 | } | ||
2341 | |||
2322 | page = c->page; | 2342 | page = c->page; |
2323 | if (!page) | 2343 | if (!page) |
2324 | goto new_slab; | 2344 | goto new_slab; |
@@ -3308,14 +3328,22 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
3308 | struct kmem_cache *s; | 3328 | struct kmem_cache *s; |
3309 | void *ret; | 3329 | void *ret; |
3310 | 3330 | ||
3331 | if (flags & GFP_COLOR) { | ||
3332 | printk(KERN_INFO "kmalloc size %d\n", size); | ||
3333 | } | ||
3311 | if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) | 3334 | if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) |
3312 | return kmalloc_large(size, flags); | 3335 | return kmalloc_large(size, flags); |
3313 | 3336 | ||
3314 | s = kmalloc_slab(size, flags); | 3337 | s = kmalloc_slab(size, flags); |
3315 | 3338 | if (flags & GFP_COLOR) { | |
3339 | printk(KERN_INFO "kmalloc_slab %p\n", s); | ||
3340 | } | ||
3316 | if (unlikely(ZERO_OR_NULL_PTR(s))) | 3341 | if (unlikely(ZERO_OR_NULL_PTR(s))) |
3317 | return s; | 3342 | return s; |
3318 | 3343 | ||
3344 | if (flags & GFP_COLOR) { | ||
3345 | printk(KERN_INFO "slab_alloc calls!!\n"); | ||
3346 | } | ||
3319 | ret = slab_alloc(s, flags, _RET_IP_); | 3347 | ret = slab_alloc(s, flags, _RET_IP_); |
3320 | 3348 | ||
3321 | trace_kmalloc(_RET_IP_, ret, size, s->size, flags); | 3349 | trace_kmalloc(_RET_IP_, ret, size, s->size, flags); |