aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCurt Wohlgemuth <curtw@google.com>2010-10-27 21:29:12 -0400
committerTheodore Ts'o <tytso@mit.edu>2010-10-27 21:29:12 -0400
commitfb1813f4a8a27bbd4735967e46931e61fc837a3e (patch)
treec9d7c9d851c81663a8e501ba5c14f2a4b332f893
parentb853fd364810a241050778124842a8c415c72a69 (diff)
ext4: use dedicated slab caches for group_info structures
ext4_group_info structures are currently allocated with kmalloc(). With a typical 4K block size, these are 136 bytes each -- meaning they'll each consume a 256-byte slab object. On a system with many ext4 large partitions, that's a lot of wasted kernel slab space. (E.g., a single 1TB partition will have about 8000 block groups, using about 2MB of slab, of which nearly 1MB is wasted.) This patch creates an array of slab pointers created as needed -- depending on the superblock block size -- and uses these slabs to allocate the group info objects. Google-Bug-Id: 2980809 Signed-off-by: Curt Wohlgemuth <curtw@google.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
-rw-r--r--fs/ext4/ext4.h1
-rw-r--r--fs/ext4/mballoc.c98
2 files changed, 78 insertions, 21 deletions
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 889ec9d5e6ad..b364b9df09b3 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -205,6 +205,7 @@ typedef struct ext4_io_end {
205#define EXT4_MIN_BLOCK_SIZE 1024 205#define EXT4_MIN_BLOCK_SIZE 1024
206#define EXT4_MAX_BLOCK_SIZE 65536 206#define EXT4_MAX_BLOCK_SIZE 65536
207#define EXT4_MIN_BLOCK_LOG_SIZE 10 207#define EXT4_MIN_BLOCK_LOG_SIZE 10
208#define EXT4_MAX_BLOCK_LOG_SIZE 16
208#ifdef __KERNEL__ 209#ifdef __KERNEL__
209# define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize) 210# define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize)
210#else 211#else
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 4b4ad4b7ce57..d4714d6cf7d9 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -338,6 +338,14 @@
338static struct kmem_cache *ext4_pspace_cachep; 338static struct kmem_cache *ext4_pspace_cachep;
339static struct kmem_cache *ext4_ac_cachep; 339static struct kmem_cache *ext4_ac_cachep;
340static struct kmem_cache *ext4_free_ext_cachep; 340static struct kmem_cache *ext4_free_ext_cachep;
341
342/* We create slab caches for groupinfo data structures based on the
343 * superblock block size. There will be one per mounted filesystem for
344 * each unique s_blocksize_bits */
345#define NR_GRPINFO_CACHES \
346 (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE + 1)
347static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
348
341static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 349static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
342 ext4_group_t group); 350 ext4_group_t group);
343static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 351static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
@@ -2233,15 +2241,24 @@ static const struct file_operations ext4_mb_seq_groups_fops = {
2233 .release = seq_release, 2241 .release = seq_release,
2234}; 2242};
2235 2243
2244static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
2245{
2246 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2247 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
2248
2249 BUG_ON(!cachep);
2250 return cachep;
2251}
2236 2252
2237/* Create and initialize ext4_group_info data for the given group. */ 2253/* Create and initialize ext4_group_info data for the given group. */
2238int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, 2254int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2239 struct ext4_group_desc *desc) 2255 struct ext4_group_desc *desc)
2240{ 2256{
2241 int i, len; 2257 int i;
2242 int metalen = 0; 2258 int metalen = 0;
2243 struct ext4_sb_info *sbi = EXT4_SB(sb); 2259 struct ext4_sb_info *sbi = EXT4_SB(sb);
2244 struct ext4_group_info **meta_group_info; 2260 struct ext4_group_info **meta_group_info;
2261 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2245 2262
2246 /* 2263 /*
2247 * First check if this group is the first of a reserved block. 2264 * First check if this group is the first of a reserved block.
@@ -2261,22 +2278,16 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2261 meta_group_info; 2278 meta_group_info;
2262 } 2279 }
2263 2280
2264 /*
2265 * calculate needed size. if change bb_counters size,
2266 * don't forget about ext4_mb_generate_buddy()
2267 */
2268 len = offsetof(typeof(**meta_group_info),
2269 bb_counters[sb->s_blocksize_bits + 2]);
2270
2271 meta_group_info = 2281 meta_group_info =
2272 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]; 2282 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2273 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); 2283 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2274 2284
2275 meta_group_info[i] = kzalloc(len, GFP_KERNEL); 2285 meta_group_info[i] = kmem_cache_alloc(cachep, GFP_KERNEL);
2276 if (meta_group_info[i] == NULL) { 2286 if (meta_group_info[i] == NULL) {
2277 printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n"); 2287 printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n");
2278 goto exit_group_info; 2288 goto exit_group_info;
2279 } 2289 }
2290 memset(meta_group_info[i], 0, kmem_cache_size(cachep));
2280 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, 2291 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2281 &(meta_group_info[i]->bb_state)); 2292 &(meta_group_info[i]->bb_state));
2282 2293
@@ -2331,6 +2342,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
2331 int num_meta_group_infos_max; 2342 int num_meta_group_infos_max;
2332 int array_size; 2343 int array_size;
2333 struct ext4_group_desc *desc; 2344 struct ext4_group_desc *desc;
2345 struct kmem_cache *cachep;
2334 2346
2335 /* This is the number of blocks used by GDT */ 2347 /* This is the number of blocks used by GDT */
2336 num_meta_group_infos = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 2348 num_meta_group_infos = (ngroups + EXT4_DESC_PER_BLOCK(sb) -
@@ -2388,8 +2400,9 @@ static int ext4_mb_init_backend(struct super_block *sb)
2388 return 0; 2400 return 0;
2389 2401
2390err_freebuddy: 2402err_freebuddy:
2403 cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2391 while (i-- > 0) 2404 while (i-- > 0)
2392 kfree(ext4_get_group_info(sb, i)); 2405 kmem_cache_free(cachep, ext4_get_group_info(sb, i));
2393 i = num_meta_group_infos; 2406 i = num_meta_group_infos;
2394 while (i-- > 0) 2407 while (i-- > 0)
2395 kfree(sbi->s_group_info[i]); 2408 kfree(sbi->s_group_info[i]);
@@ -2406,19 +2419,48 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
2406 unsigned offset; 2419 unsigned offset;
2407 unsigned max; 2420 unsigned max;
2408 int ret; 2421 int ret;
2422 int cache_index;
2423 struct kmem_cache *cachep;
2424 char *namep = NULL;
2409 2425
2410 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets); 2426 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
2411 2427
2412 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); 2428 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2413 if (sbi->s_mb_offsets == NULL) { 2429 if (sbi->s_mb_offsets == NULL) {
2414 return -ENOMEM; 2430 ret = -ENOMEM;
2431 goto out;
2415 } 2432 }
2416 2433
2417 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs); 2434 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs);
2418 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); 2435 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2419 if (sbi->s_mb_maxs == NULL) { 2436 if (sbi->s_mb_maxs == NULL) {
2420 kfree(sbi->s_mb_offsets); 2437 ret = -ENOMEM;
2421 return -ENOMEM; 2438 goto out;
2439 }
2440
2441 cache_index = sb->s_blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2442 cachep = ext4_groupinfo_caches[cache_index];
2443 if (!cachep) {
2444 char name[32];
2445 int len = offsetof(struct ext4_group_info,
2446 bb_counters[sb->s_blocksize_bits + 2]);
2447
2448 sprintf(name, "ext4_groupinfo_%d", sb->s_blocksize_bits);
2449 namep = kstrdup(name, GFP_KERNEL);
2450 if (!namep) {
2451 ret = -ENOMEM;
2452 goto out;
2453 }
2454
2455 /* Need to free the kmem_cache_name() when we
2456 * destroy the slab */
2457 cachep = kmem_cache_create(namep, len, 0,
2458 SLAB_RECLAIM_ACCOUNT, NULL);
2459 if (!cachep) {
2460 ret = -ENOMEM;
2461 goto out;
2462 }
2463 ext4_groupinfo_caches[cache_index] = cachep;
2422 } 2464 }
2423 2465
2424 /* order 0 is regular bitmap */ 2466 /* order 0 is regular bitmap */
@@ -2439,9 +2481,7 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
2439 /* init file for buddy data */ 2481 /* init file for buddy data */
2440 ret = ext4_mb_init_backend(sb); 2482 ret = ext4_mb_init_backend(sb);
2441 if (ret != 0) { 2483 if (ret != 0) {
2442 kfree(sbi->s_mb_offsets); 2484 goto out;
2443 kfree(sbi->s_mb_maxs);
2444 return ret;
2445 } 2485 }
2446 2486
2447 spin_lock_init(&sbi->s_md_lock); 2487 spin_lock_init(&sbi->s_md_lock);
@@ -2456,9 +2496,8 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
2456 2496
2457 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); 2497 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
2458 if (sbi->s_locality_groups == NULL) { 2498 if (sbi->s_locality_groups == NULL) {
2459 kfree(sbi->s_mb_offsets); 2499 ret = -ENOMEM;
2460 kfree(sbi->s_mb_maxs); 2500 goto out;
2461 return -ENOMEM;
2462 } 2501 }
2463 for_each_possible_cpu(i) { 2502 for_each_possible_cpu(i) {
2464 struct ext4_locality_group *lg; 2503 struct ext4_locality_group *lg;
@@ -2475,7 +2514,13 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
2475 2514
2476 if (sbi->s_journal) 2515 if (sbi->s_journal)
2477 sbi->s_journal->j_commit_callback = release_blocks_on_commit; 2516 sbi->s_journal->j_commit_callback = release_blocks_on_commit;
2478 return 0; 2517out:
2518 if (ret) {
2519 kfree(sbi->s_mb_offsets);
2520 kfree(sbi->s_mb_maxs);
2521 kfree(namep);
2522 }
2523 return ret;
2479} 2524}
2480 2525
2481/* need to called with the ext4 group lock held */ 2526/* need to called with the ext4 group lock held */
@@ -2503,6 +2548,7 @@ int ext4_mb_release(struct super_block *sb)
2503 int num_meta_group_infos; 2548 int num_meta_group_infos;
2504 struct ext4_group_info *grinfo; 2549 struct ext4_group_info *grinfo;
2505 struct ext4_sb_info *sbi = EXT4_SB(sb); 2550 struct ext4_sb_info *sbi = EXT4_SB(sb);
2551 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2506 2552
2507 if (sbi->s_group_info) { 2553 if (sbi->s_group_info) {
2508 for (i = 0; i < ngroups; i++) { 2554 for (i = 0; i < ngroups; i++) {
@@ -2513,7 +2559,7 @@ int ext4_mb_release(struct super_block *sb)
2513 ext4_lock_group(sb, i); 2559 ext4_lock_group(sb, i);
2514 ext4_mb_cleanup_pa(grinfo); 2560 ext4_mb_cleanup_pa(grinfo);
2515 ext4_unlock_group(sb, i); 2561 ext4_unlock_group(sb, i);
2516 kfree(grinfo); 2562 kmem_cache_free(cachep, grinfo);
2517 } 2563 }
2518 num_meta_group_infos = (ngroups + 2564 num_meta_group_infos = (ngroups +
2519 EXT4_DESC_PER_BLOCK(sb) - 1) >> 2565 EXT4_DESC_PER_BLOCK(sb) - 1) >>
@@ -2691,6 +2737,7 @@ int __init init_ext4_mballoc(void)
2691 2737
2692void exit_ext4_mballoc(void) 2738void exit_ext4_mballoc(void)
2693{ 2739{
2740 int i;
2694 /* 2741 /*
2695 * Wait for completion of call_rcu()'s on ext4_pspace_cachep 2742 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
2696 * before destroying the slab cache. 2743 * before destroying the slab cache.
@@ -2699,6 +2746,15 @@ void exit_ext4_mballoc(void)
2699 kmem_cache_destroy(ext4_pspace_cachep); 2746 kmem_cache_destroy(ext4_pspace_cachep);
2700 kmem_cache_destroy(ext4_ac_cachep); 2747 kmem_cache_destroy(ext4_ac_cachep);
2701 kmem_cache_destroy(ext4_free_ext_cachep); 2748 kmem_cache_destroy(ext4_free_ext_cachep);
2749
2750 for (i = 0; i < NR_GRPINFO_CACHES; i++) {
2751 struct kmem_cache *cachep = ext4_groupinfo_caches[i];
2752 if (cachep) {
2753 char *name = (char *)kmem_cache_name(cachep);
2754 kmem_cache_destroy(cachep);
2755 kfree(name);
2756 }
2757 }
2702 ext4_remove_debugfs_entry(); 2758 ext4_remove_debugfs_entry();
2703} 2759}
2704 2760