aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorMingming Cao <cmm@us.ibm.com>2007-10-16 18:38:25 -0400
committerTheodore Ts'o <tytso@mit.edu>2007-10-17 18:49:56 -0400
commitaf1e76d6b3f37cb89d9192eaf83588adaf4728eb (patch)
tree8b30dd421361a61c3f2e9c96bd574986b4e78c9e /fs
parentc089d490dfbf53bc0893dc9ef57cf3ee6448314d (diff)
JBD2: jbd2 slab allocation cleanups
JBD2: Replace slab allocations with page allocations JBD2 allocate memory for committed_data and frozen_data from slab. However JBD2 should not pass slab pages down to the block layer. Use page allocator pages instead. This will also prepare JBD for the large blocksize patchset. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Mingming Cao <cmm@us.ibm.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/jbd2/commit.c6
-rw-r--r--fs/jbd2/journal.c88
-rw-r--r--fs/jbd2/transaction.c14
3 files changed, 14 insertions, 94 deletions
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index c0f59d1b13dc..2cac34ac756a 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -384,7 +384,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
384 struct buffer_head *bh = jh2bh(jh); 384 struct buffer_head *bh = jh2bh(jh);
385 385
386 jbd_lock_bh_state(bh); 386 jbd_lock_bh_state(bh);
387 jbd2_slab_free(jh->b_committed_data, bh->b_size); 387 jbd2_free(jh->b_committed_data, bh->b_size);
388 jh->b_committed_data = NULL; 388 jh->b_committed_data = NULL;
389 jbd_unlock_bh_state(bh); 389 jbd_unlock_bh_state(bh);
390 } 390 }
@@ -801,14 +801,14 @@ restart_loop:
801 * Otherwise, we can just throw away the frozen data now. 801 * Otherwise, we can just throw away the frozen data now.
802 */ 802 */
803 if (jh->b_committed_data) { 803 if (jh->b_committed_data) {
804 jbd2_slab_free(jh->b_committed_data, bh->b_size); 804 jbd2_free(jh->b_committed_data, bh->b_size);
805 jh->b_committed_data = NULL; 805 jh->b_committed_data = NULL;
806 if (jh->b_frozen_data) { 806 if (jh->b_frozen_data) {
807 jh->b_committed_data = jh->b_frozen_data; 807 jh->b_committed_data = jh->b_frozen_data;
808 jh->b_frozen_data = NULL; 808 jh->b_frozen_data = NULL;
809 } 809 }
810 } else if (jh->b_frozen_data) { 810 } else if (jh->b_frozen_data) {
811 jbd2_slab_free(jh->b_frozen_data, bh->b_size); 811 jbd2_free(jh->b_frozen_data, bh->b_size);
812 jh->b_frozen_data = NULL; 812 jh->b_frozen_data = NULL;
813 } 813 }
814 814
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index f37324aee817..2d9ecca74f19 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -84,7 +84,6 @@ EXPORT_SYMBOL(jbd2_journal_force_commit);
84 84
85static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); 85static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
86static void __journal_abort_soft (journal_t *journal, int errno); 86static void __journal_abort_soft (journal_t *journal, int errno);
87static int jbd2_journal_create_jbd_slab(size_t slab_size);
88 87
89/* 88/*
90 * Helper function used to manage commit timeouts 89 * Helper function used to manage commit timeouts
@@ -335,10 +334,10 @@ repeat:
335 char *tmp; 334 char *tmp;
336 335
337 jbd_unlock_bh_state(bh_in); 336 jbd_unlock_bh_state(bh_in);
338 tmp = jbd2_slab_alloc(bh_in->b_size, GFP_NOFS); 337 tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS);
339 jbd_lock_bh_state(bh_in); 338 jbd_lock_bh_state(bh_in);
340 if (jh_in->b_frozen_data) { 339 if (jh_in->b_frozen_data) {
341 jbd2_slab_free(tmp, bh_in->b_size); 340 jbd2_free(tmp, bh_in->b_size);
342 goto repeat; 341 goto repeat;
343 } 342 }
344 343
@@ -1096,13 +1095,6 @@ int jbd2_journal_load(journal_t *journal)
1096 } 1095 }
1097 } 1096 }
1098 1097
1099 /*
1100 * Create a slab for this blocksize
1101 */
1102 err = jbd2_journal_create_jbd_slab(be32_to_cpu(sb->s_blocksize));
1103 if (err)
1104 return err;
1105
1106 /* Let the recovery code check whether it needs to recover any 1098 /* Let the recovery code check whether it needs to recover any
1107 * data from the journal. */ 1099 * data from the journal. */
1108 if (jbd2_journal_recover(journal)) 1100 if (jbd2_journal_recover(journal))
@@ -1636,77 +1628,6 @@ void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
1636} 1628}
1637 1629
1638/* 1630/*
1639 * jbd slab management: create 1k, 2k, 4k, 8k slabs as needed
1640 * and allocate frozen and commit buffers from these slabs.
1641 *
1642 * Reason for doing this is to avoid, SLAB_DEBUG - since it could
1643 * cause bh to cross page boundary.
1644 */
1645
1646#define JBD_MAX_SLABS 5
1647#define JBD_SLAB_INDEX(size) (size >> 11)
1648
1649static struct kmem_cache *jbd_slab[JBD_MAX_SLABS];
1650static const char *jbd_slab_names[JBD_MAX_SLABS] = {
1651 "jbd2_1k", "jbd2_2k", "jbd2_4k", NULL, "jbd2_8k"
1652};
1653
1654static void jbd2_journal_destroy_jbd_slabs(void)
1655{
1656 int i;
1657
1658 for (i = 0; i < JBD_MAX_SLABS; i++) {
1659 if (jbd_slab[i])
1660 kmem_cache_destroy(jbd_slab[i]);
1661 jbd_slab[i] = NULL;
1662 }
1663}
1664
1665static int jbd2_journal_create_jbd_slab(size_t slab_size)
1666{
1667 int i = JBD_SLAB_INDEX(slab_size);
1668
1669 BUG_ON(i >= JBD_MAX_SLABS);
1670
1671 /*
1672 * Check if we already have a slab created for this size
1673 */
1674 if (jbd_slab[i])
1675 return 0;
1676
1677 /*
1678 * Create a slab and force alignment to be same as slabsize -
1679 * this will make sure that allocations won't cross the page
1680 * boundary.
1681 */
1682 jbd_slab[i] = kmem_cache_create(jbd_slab_names[i],
1683 slab_size, slab_size, 0, NULL);
1684 if (!jbd_slab[i]) {
1685 printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n");
1686 return -ENOMEM;
1687 }
1688 return 0;
1689}
1690
1691void * jbd2_slab_alloc(size_t size, gfp_t flags)
1692{
1693 int idx;
1694
1695 idx = JBD_SLAB_INDEX(size);
1696 BUG_ON(jbd_slab[idx] == NULL);
1697 return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL);
1698}
1699
1700void jbd2_slab_free(void *ptr, size_t size)
1701{
1702 int idx;
1703
1704 idx = JBD_SLAB_INDEX(size);
1705 BUG_ON(jbd_slab[idx] == NULL);
1706 kmem_cache_free(jbd_slab[idx], ptr);
1707}
1708
1709/*
1710 * Journal_head storage management 1631 * Journal_head storage management
1711 */ 1632 */
1712static struct kmem_cache *jbd2_journal_head_cache; 1633static struct kmem_cache *jbd2_journal_head_cache;
@@ -1893,13 +1814,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
1893 printk(KERN_WARNING "%s: freeing " 1814 printk(KERN_WARNING "%s: freeing "
1894 "b_frozen_data\n", 1815 "b_frozen_data\n",
1895 __FUNCTION__); 1816 __FUNCTION__);
1896 jbd2_slab_free(jh->b_frozen_data, bh->b_size); 1817 jbd2_free(jh->b_frozen_data, bh->b_size);
1897 } 1818 }
1898 if (jh->b_committed_data) { 1819 if (jh->b_committed_data) {
1899 printk(KERN_WARNING "%s: freeing " 1820 printk(KERN_WARNING "%s: freeing "
1900 "b_committed_data\n", 1821 "b_committed_data\n",
1901 __FUNCTION__); 1822 __FUNCTION__);
1902 jbd2_slab_free(jh->b_committed_data, bh->b_size); 1823 jbd2_free(jh->b_committed_data, bh->b_size);
1903 } 1824 }
1904 bh->b_private = NULL; 1825 bh->b_private = NULL;
1905 jh->b_bh = NULL; /* debug, really */ 1826 jh->b_bh = NULL; /* debug, really */
@@ -2040,7 +1961,6 @@ static void jbd2_journal_destroy_caches(void)
2040 jbd2_journal_destroy_revoke_caches(); 1961 jbd2_journal_destroy_revoke_caches();
2041 jbd2_journal_destroy_jbd2_journal_head_cache(); 1962 jbd2_journal_destroy_jbd2_journal_head_cache();
2042 jbd2_journal_destroy_handle_cache(); 1963 jbd2_journal_destroy_handle_cache();
2043 jbd2_journal_destroy_jbd_slabs();
2044} 1964}
2045 1965
2046static int __init journal_init(void) 1966static int __init journal_init(void)
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 7946ff43fc40..bd047f9af8e7 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -236,7 +236,7 @@ out:
236/* Allocate a new handle. This should probably be in a slab... */ 236/* Allocate a new handle. This should probably be in a slab... */
237static handle_t *new_handle(int nblocks) 237static handle_t *new_handle(int nblocks)
238{ 238{
239 handle_t *handle = jbd_alloc_handle(GFP_NOFS); 239 handle_t *handle = jbd2_alloc_handle(GFP_NOFS);
240 if (!handle) 240 if (!handle)
241 return NULL; 241 return NULL;
242 memset(handle, 0, sizeof(*handle)); 242 memset(handle, 0, sizeof(*handle));
@@ -282,7 +282,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
282 282
283 err = start_this_handle(journal, handle); 283 err = start_this_handle(journal, handle);
284 if (err < 0) { 284 if (err < 0) {
285 jbd_free_handle(handle); 285 jbd2_free_handle(handle);
286 current->journal_info = NULL; 286 current->journal_info = NULL;
287 handle = ERR_PTR(err); 287 handle = ERR_PTR(err);
288 } 288 }
@@ -668,7 +668,7 @@ repeat:
668 JBUFFER_TRACE(jh, "allocate memory for buffer"); 668 JBUFFER_TRACE(jh, "allocate memory for buffer");
669 jbd_unlock_bh_state(bh); 669 jbd_unlock_bh_state(bh);
670 frozen_buffer = 670 frozen_buffer =
671 jbd2_slab_alloc(jh2bh(jh)->b_size, 671 jbd2_alloc(jh2bh(jh)->b_size,
672 GFP_NOFS); 672 GFP_NOFS);
673 if (!frozen_buffer) { 673 if (!frozen_buffer) {
674 printk(KERN_EMERG 674 printk(KERN_EMERG
@@ -728,7 +728,7 @@ done:
728 728
729out: 729out:
730 if (unlikely(frozen_buffer)) /* It's usually NULL */ 730 if (unlikely(frozen_buffer)) /* It's usually NULL */
731 jbd2_slab_free(frozen_buffer, bh->b_size); 731 jbd2_free(frozen_buffer, bh->b_size);
732 732
733 JBUFFER_TRACE(jh, "exit"); 733 JBUFFER_TRACE(jh, "exit");
734 return error; 734 return error;
@@ -881,7 +881,7 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
881 881
882repeat: 882repeat:
883 if (!jh->b_committed_data) { 883 if (!jh->b_committed_data) {
884 committed_data = jbd2_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS); 884 committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
885 if (!committed_data) { 885 if (!committed_data) {
886 printk(KERN_EMERG "%s: No memory for committed data\n", 886 printk(KERN_EMERG "%s: No memory for committed data\n",
887 __FUNCTION__); 887 __FUNCTION__);
@@ -908,7 +908,7 @@ repeat:
908out: 908out:
909 jbd2_journal_put_journal_head(jh); 909 jbd2_journal_put_journal_head(jh);
910 if (unlikely(committed_data)) 910 if (unlikely(committed_data))
911 jbd2_slab_free(committed_data, bh->b_size); 911 jbd2_free(committed_data, bh->b_size);
912 return err; 912 return err;
913} 913}
914 914
@@ -1411,7 +1411,7 @@ int jbd2_journal_stop(handle_t *handle)
1411 spin_unlock(&journal->j_state_lock); 1411 spin_unlock(&journal->j_state_lock);
1412 } 1412 }
1413 1413
1414 jbd_free_handle(handle); 1414 jbd2_free_handle(handle);
1415 return err; 1415 return err;
1416} 1416}
1417 1417