aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jbd/journal.c
diff options
context:
space:
mode:
authorMingming Cao <cmm@us.ibm.com>2007-10-16 18:38:25 -0400
committerTheodore Ts'o <tytso@mit.edu>2007-10-17 18:49:56 -0400
commitc089d490dfbf53bc0893dc9ef57cf3ee6448314d (patch)
tree8faffea3bdcfdd48ce175ac92d5088ced4f1c969 /fs/jbd/journal.c
parentd85714d81cc0408daddb68c10f7fd69eafe7c213 (diff)
JBD: JBD slab allocation cleanups
JBD: Replace slab allocations with page allocations JBD allocate memory for committed_data and frozen_data from slab. However JBD should not pass slab pages down to the block layer. Use page allocator pages instead. This will also prepare JBD for the large blocksize patchset. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Mingming Cao <cmm@us.ibm.com>
Diffstat (limited to 'fs/jbd/journal.c')
-rw-r--r--fs/jbd/journal.c88
1 files changed, 4 insertions, 84 deletions
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index a6be78c05dce..7edf3fdfdadd 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -83,7 +83,6 @@ EXPORT_SYMBOL(journal_force_commit);
83 83
84static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); 84static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
85static void __journal_abort_soft (journal_t *journal, int errno); 85static void __journal_abort_soft (journal_t *journal, int errno);
86static int journal_create_jbd_slab(size_t slab_size);
87 86
88/* 87/*
89 * Helper function used to manage commit timeouts 88 * Helper function used to manage commit timeouts
@@ -334,10 +333,10 @@ repeat:
334 char *tmp; 333 char *tmp;
335 334
336 jbd_unlock_bh_state(bh_in); 335 jbd_unlock_bh_state(bh_in);
337 tmp = jbd_slab_alloc(bh_in->b_size, GFP_NOFS); 336 tmp = jbd_alloc(bh_in->b_size, GFP_NOFS);
338 jbd_lock_bh_state(bh_in); 337 jbd_lock_bh_state(bh_in);
339 if (jh_in->b_frozen_data) { 338 if (jh_in->b_frozen_data) {
340 jbd_slab_free(tmp, bh_in->b_size); 339 jbd_free(tmp, bh_in->b_size);
341 goto repeat; 340 goto repeat;
342 } 341 }
343 342
@@ -1095,13 +1094,6 @@ int journal_load(journal_t *journal)
1095 } 1094 }
1096 } 1095 }
1097 1096
1098 /*
1099 * Create a slab for this blocksize
1100 */
1101 err = journal_create_jbd_slab(be32_to_cpu(sb->s_blocksize));
1102 if (err)
1103 return err;
1104
1105 /* Let the recovery code check whether it needs to recover any 1097 /* Let the recovery code check whether it needs to recover any
1106 * data from the journal. */ 1098 * data from the journal. */
1107 if (journal_recover(journal)) 1099 if (journal_recover(journal))
@@ -1624,77 +1616,6 @@ void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
1624} 1616}
1625 1617
1626/* 1618/*
1627 * jbd slab management: create 1k, 2k, 4k, 8k slabs as needed
1628 * and allocate frozen and commit buffers from these slabs.
1629 *
1630 * Reason for doing this is to avoid, SLAB_DEBUG - since it could
1631 * cause bh to cross page boundary.
1632 */
1633
1634#define JBD_MAX_SLABS 5
1635#define JBD_SLAB_INDEX(size) (size >> 11)
1636
1637static struct kmem_cache *jbd_slab[JBD_MAX_SLABS];
1638static const char *jbd_slab_names[JBD_MAX_SLABS] = {
1639 "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k"
1640};
1641
1642static void journal_destroy_jbd_slabs(void)
1643{
1644 int i;
1645
1646 for (i = 0; i < JBD_MAX_SLABS; i++) {
1647 if (jbd_slab[i])
1648 kmem_cache_destroy(jbd_slab[i]);
1649 jbd_slab[i] = NULL;
1650 }
1651}
1652
1653static int journal_create_jbd_slab(size_t slab_size)
1654{
1655 int i = JBD_SLAB_INDEX(slab_size);
1656
1657 BUG_ON(i >= JBD_MAX_SLABS);
1658
1659 /*
1660 * Check if we already have a slab created for this size
1661 */
1662 if (jbd_slab[i])
1663 return 0;
1664
1665 /*
1666 * Create a slab and force alignment to be same as slabsize -
1667 * this will make sure that allocations won't cross the page
1668 * boundary.
1669 */
1670 jbd_slab[i] = kmem_cache_create(jbd_slab_names[i],
1671 slab_size, slab_size, 0, NULL);
1672 if (!jbd_slab[i]) {
1673 printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n");
1674 return -ENOMEM;
1675 }
1676 return 0;
1677}
1678
1679void * jbd_slab_alloc(size_t size, gfp_t flags)
1680{
1681 int idx;
1682
1683 idx = JBD_SLAB_INDEX(size);
1684 BUG_ON(jbd_slab[idx] == NULL);
1685 return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL);
1686}
1687
1688void jbd_slab_free(void *ptr, size_t size)
1689{
1690 int idx;
1691
1692 idx = JBD_SLAB_INDEX(size);
1693 BUG_ON(jbd_slab[idx] == NULL);
1694 kmem_cache_free(jbd_slab[idx], ptr);
1695}
1696
1697/*
1698 * Journal_head storage management 1619 * Journal_head storage management
1699 */ 1620 */
1700static struct kmem_cache *journal_head_cache; 1621static struct kmem_cache *journal_head_cache;
@@ -1881,13 +1802,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
1881 printk(KERN_WARNING "%s: freeing " 1802 printk(KERN_WARNING "%s: freeing "
1882 "b_frozen_data\n", 1803 "b_frozen_data\n",
1883 __FUNCTION__); 1804 __FUNCTION__);
1884 jbd_slab_free(jh->b_frozen_data, bh->b_size); 1805 jbd_free(jh->b_frozen_data, bh->b_size);
1885 } 1806 }
1886 if (jh->b_committed_data) { 1807 if (jh->b_committed_data) {
1887 printk(KERN_WARNING "%s: freeing " 1808 printk(KERN_WARNING "%s: freeing "
1888 "b_committed_data\n", 1809 "b_committed_data\n",
1889 __FUNCTION__); 1810 __FUNCTION__);
1890 jbd_slab_free(jh->b_committed_data, bh->b_size); 1811 jbd_free(jh->b_committed_data, bh->b_size);
1891 } 1812 }
1892 bh->b_private = NULL; 1813 bh->b_private = NULL;
1893 jh->b_bh = NULL; /* debug, really */ 1814 jh->b_bh = NULL; /* debug, really */
@@ -2042,7 +1963,6 @@ static void journal_destroy_caches(void)
2042 journal_destroy_revoke_caches(); 1963 journal_destroy_revoke_caches();
2043 journal_destroy_journal_head_cache(); 1964 journal_destroy_journal_head_cache();
2044 journal_destroy_handle_cache(); 1965 journal_destroy_handle_cache();
2045 journal_destroy_jbd_slabs();
2046} 1966}
2047 1967
2048static int __init journal_init(void) 1968static int __init journal_init(void)