diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2006-04-21 12:52:36 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-04-21 12:52:36 -0400 |
commit | a748422ee45725e04e1d3792fa19dfa90ddfd116 (patch) | |
tree | 978e12895468baaa9f7ab2747b9f7d50beaf1717 /mm/slab.c | |
parent | c63e31c2cc1ec67372920b5e1aff8204d04dd172 (diff) | |
parent | f4ffaa452e71495a06376f12f772342bc57051fc (diff) |
Merge branch 'master'
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 30 |
1 files changed, 23 insertions, 7 deletions
@@ -420,6 +420,7 @@ struct kmem_cache { | |||
420 | unsigned long max_freeable; | 420 | unsigned long max_freeable; |
421 | unsigned long node_allocs; | 421 | unsigned long node_allocs; |
422 | unsigned long node_frees; | 422 | unsigned long node_frees; |
423 | unsigned long node_overflow; | ||
423 | atomic_t allochit; | 424 | atomic_t allochit; |
424 | atomic_t allocmiss; | 425 | atomic_t allocmiss; |
425 | atomic_t freehit; | 426 | atomic_t freehit; |
@@ -465,6 +466,7 @@ struct kmem_cache { | |||
465 | #define STATS_INC_ERR(x) ((x)->errors++) | 466 | #define STATS_INC_ERR(x) ((x)->errors++) |
466 | #define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) | 467 | #define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) |
467 | #define STATS_INC_NODEFREES(x) ((x)->node_frees++) | 468 | #define STATS_INC_NODEFREES(x) ((x)->node_frees++) |
469 | #define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++) | ||
468 | #define STATS_SET_FREEABLE(x, i) \ | 470 | #define STATS_SET_FREEABLE(x, i) \ |
469 | do { \ | 471 | do { \ |
470 | if ((x)->max_freeable < i) \ | 472 | if ((x)->max_freeable < i) \ |
@@ -484,6 +486,7 @@ struct kmem_cache { | |||
484 | #define STATS_INC_ERR(x) do { } while (0) | 486 | #define STATS_INC_ERR(x) do { } while (0) |
485 | #define STATS_INC_NODEALLOCS(x) do { } while (0) | 487 | #define STATS_INC_NODEALLOCS(x) do { } while (0) |
486 | #define STATS_INC_NODEFREES(x) do { } while (0) | 488 | #define STATS_INC_NODEFREES(x) do { } while (0) |
489 | #define STATS_INC_ACOVERFLOW(x) do { } while (0) | ||
487 | #define STATS_SET_FREEABLE(x, i) do { } while (0) | 490 | #define STATS_SET_FREEABLE(x, i) do { } while (0) |
488 | #define STATS_INC_ALLOCHIT(x) do { } while (0) | 491 | #define STATS_INC_ALLOCHIT(x) do { } while (0) |
489 | #define STATS_INC_ALLOCMISS(x) do { } while (0) | 492 | #define STATS_INC_ALLOCMISS(x) do { } while (0) |
@@ -1453,7 +1456,14 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
1453 | int i; | 1456 | int i; |
1454 | 1457 | ||
1455 | flags |= cachep->gfpflags; | 1458 | flags |= cachep->gfpflags; |
1459 | #ifndef CONFIG_MMU | ||
1460 | /* nommu uses slab's for process anonymous memory allocations, so | ||
1461 | * requires __GFP_COMP to properly refcount higher order allocations" | ||
1462 | */ | ||
1463 | page = alloc_pages_node(nodeid, (flags | __GFP_COMP), cachep->gfporder); | ||
1464 | #else | ||
1456 | page = alloc_pages_node(nodeid, flags, cachep->gfporder); | 1465 | page = alloc_pages_node(nodeid, flags, cachep->gfporder); |
1466 | #endif | ||
1457 | if (!page) | 1467 | if (!page) |
1458 | return NULL; | 1468 | return NULL; |
1459 | addr = page_address(page); | 1469 | addr = page_address(page); |
@@ -2318,13 +2328,15 @@ EXPORT_SYMBOL(kmem_cache_destroy); | |||
2318 | 2328 | ||
2319 | /* Get the memory for a slab management obj. */ | 2329 | /* Get the memory for a slab management obj. */ |
2320 | static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, | 2330 | static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, |
2321 | int colour_off, gfp_t local_flags) | 2331 | int colour_off, gfp_t local_flags, |
2332 | int nodeid) | ||
2322 | { | 2333 | { |
2323 | struct slab *slabp; | 2334 | struct slab *slabp; |
2324 | 2335 | ||
2325 | if (OFF_SLAB(cachep)) { | 2336 | if (OFF_SLAB(cachep)) { |
2326 | /* Slab management obj is off-slab. */ | 2337 | /* Slab management obj is off-slab. */ |
2327 | slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags); | 2338 | slabp = kmem_cache_alloc_node(cachep->slabp_cache, |
2339 | local_flags, nodeid); | ||
2328 | if (!slabp) | 2340 | if (!slabp) |
2329 | return NULL; | 2341 | return NULL; |
2330 | } else { | 2342 | } else { |
@@ -2334,6 +2346,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, | |||
2334 | slabp->inuse = 0; | 2346 | slabp->inuse = 0; |
2335 | slabp->colouroff = colour_off; | 2347 | slabp->colouroff = colour_off; |
2336 | slabp->s_mem = objp + colour_off; | 2348 | slabp->s_mem = objp + colour_off; |
2349 | slabp->nodeid = nodeid; | ||
2337 | return slabp; | 2350 | return slabp; |
2338 | } | 2351 | } |
2339 | 2352 | ||
@@ -2519,7 +2532,7 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
2519 | goto failed; | 2532 | goto failed; |
2520 | 2533 | ||
2521 | /* Get slab management. */ | 2534 | /* Get slab management. */ |
2522 | slabp = alloc_slabmgmt(cachep, objp, offset, local_flags); | 2535 | slabp = alloc_slabmgmt(cachep, objp, offset, local_flags, nodeid); |
2523 | if (!slabp) | 2536 | if (!slabp) |
2524 | goto opps1; | 2537 | goto opps1; |
2525 | 2538 | ||
@@ -3080,9 +3093,11 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) | |||
3080 | if (l3->alien && l3->alien[nodeid]) { | 3093 | if (l3->alien && l3->alien[nodeid]) { |
3081 | alien = l3->alien[nodeid]; | 3094 | alien = l3->alien[nodeid]; |
3082 | spin_lock(&alien->lock); | 3095 | spin_lock(&alien->lock); |
3083 | if (unlikely(alien->avail == alien->limit)) | 3096 | if (unlikely(alien->avail == alien->limit)) { |
3097 | STATS_INC_ACOVERFLOW(cachep); | ||
3084 | __drain_alien_cache(cachep, | 3098 | __drain_alien_cache(cachep, |
3085 | alien, nodeid); | 3099 | alien, nodeid); |
3100 | } | ||
3086 | alien->entry[alien->avail++] = objp; | 3101 | alien->entry[alien->avail++] = objp; |
3087 | spin_unlock(&alien->lock); | 3102 | spin_unlock(&alien->lock); |
3088 | } else { | 3103 | } else { |
@@ -3760,7 +3775,7 @@ static void print_slabinfo_header(struct seq_file *m) | |||
3760 | seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); | 3775 | seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); |
3761 | #if STATS | 3776 | #if STATS |
3762 | seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " | 3777 | seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " |
3763 | "<error> <maxfreeable> <nodeallocs> <remotefrees>"); | 3778 | "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>"); |
3764 | seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); | 3779 | seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); |
3765 | #endif | 3780 | #endif |
3766 | seq_putc(m, '\n'); | 3781 | seq_putc(m, '\n'); |
@@ -3874,11 +3889,12 @@ static int s_show(struct seq_file *m, void *p) | |||
3874 | unsigned long max_freeable = cachep->max_freeable; | 3889 | unsigned long max_freeable = cachep->max_freeable; |
3875 | unsigned long node_allocs = cachep->node_allocs; | 3890 | unsigned long node_allocs = cachep->node_allocs; |
3876 | unsigned long node_frees = cachep->node_frees; | 3891 | unsigned long node_frees = cachep->node_frees; |
3892 | unsigned long overflows = cachep->node_overflow; | ||
3877 | 3893 | ||
3878 | seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ | 3894 | seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ |
3879 | %4lu %4lu %4lu %4lu", allocs, high, grown, | 3895 | %4lu %4lu %4lu %4lu %4lu", allocs, high, grown, |
3880 | reaped, errors, max_freeable, node_allocs, | 3896 | reaped, errors, max_freeable, node_allocs, |
3881 | node_frees); | 3897 | node_frees, overflows); |
3882 | } | 3898 | } |
3883 | /* cpu stats */ | 3899 | /* cpu stats */ |
3884 | { | 3900 | { |