aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jbd2/journal.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/jbd2/journal.c')
-rw-r--r--fs/jbd2/journal.c88
1 files changed, 4 insertions, 84 deletions
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index f37324aee817..2d9ecca74f19 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -84,7 +84,6 @@ EXPORT_SYMBOL(jbd2_journal_force_commit);
84 84
85static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); 85static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
86static void __journal_abort_soft (journal_t *journal, int errno); 86static void __journal_abort_soft (journal_t *journal, int errno);
87static int jbd2_journal_create_jbd_slab(size_t slab_size);
88 87
89/* 88/*
90 * Helper function used to manage commit timeouts 89 * Helper function used to manage commit timeouts
@@ -335,10 +334,10 @@ repeat:
335 char *tmp; 334 char *tmp;
336 335
337 jbd_unlock_bh_state(bh_in); 336 jbd_unlock_bh_state(bh_in);
338 tmp = jbd2_slab_alloc(bh_in->b_size, GFP_NOFS); 337 tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS);
339 jbd_lock_bh_state(bh_in); 338 jbd_lock_bh_state(bh_in);
340 if (jh_in->b_frozen_data) { 339 if (jh_in->b_frozen_data) {
341 jbd2_slab_free(tmp, bh_in->b_size); 340 jbd2_free(tmp, bh_in->b_size);
342 goto repeat; 341 goto repeat;
343 } 342 }
344 343
@@ -1096,13 +1095,6 @@ int jbd2_journal_load(journal_t *journal)
1096 } 1095 }
1097 } 1096 }
1098 1097
1099 /*
1100 * Create a slab for this blocksize
1101 */
1102 err = jbd2_journal_create_jbd_slab(be32_to_cpu(sb->s_blocksize));
1103 if (err)
1104 return err;
1105
1106 /* Let the recovery code check whether it needs to recover any 1098 /* Let the recovery code check whether it needs to recover any
1107 * data from the journal. */ 1099 * data from the journal. */
1108 if (jbd2_journal_recover(journal)) 1100 if (jbd2_journal_recover(journal))
@@ -1636,77 +1628,6 @@ void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
1636} 1628}
1637 1629
1638/* 1630/*
1639 * jbd slab management: create 1k, 2k, 4k, 8k slabs as needed
1640 * and allocate frozen and commit buffers from these slabs.
1641 *
1642 * Reason for doing this is to avoid, SLAB_DEBUG - since it could
1643 * cause bh to cross page boundary.
1644 */
1645
1646#define JBD_MAX_SLABS 5
1647#define JBD_SLAB_INDEX(size) (size >> 11)
1648
1649static struct kmem_cache *jbd_slab[JBD_MAX_SLABS];
1650static const char *jbd_slab_names[JBD_MAX_SLABS] = {
1651 "jbd2_1k", "jbd2_2k", "jbd2_4k", NULL, "jbd2_8k"
1652};
1653
1654static void jbd2_journal_destroy_jbd_slabs(void)
1655{
1656 int i;
1657
1658 for (i = 0; i < JBD_MAX_SLABS; i++) {
1659 if (jbd_slab[i])
1660 kmem_cache_destroy(jbd_slab[i]);
1661 jbd_slab[i] = NULL;
1662 }
1663}
1664
1665static int jbd2_journal_create_jbd_slab(size_t slab_size)
1666{
1667 int i = JBD_SLAB_INDEX(slab_size);
1668
1669 BUG_ON(i >= JBD_MAX_SLABS);
1670
1671 /*
1672 * Check if we already have a slab created for this size
1673 */
1674 if (jbd_slab[i])
1675 return 0;
1676
1677 /*
1678 * Create a slab and force alignment to be same as slabsize -
1679 * this will make sure that allocations won't cross the page
1680 * boundary.
1681 */
1682 jbd_slab[i] = kmem_cache_create(jbd_slab_names[i],
1683 slab_size, slab_size, 0, NULL);
1684 if (!jbd_slab[i]) {
1685 printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n");
1686 return -ENOMEM;
1687 }
1688 return 0;
1689}
1690
1691void * jbd2_slab_alloc(size_t size, gfp_t flags)
1692{
1693 int idx;
1694
1695 idx = JBD_SLAB_INDEX(size);
1696 BUG_ON(jbd_slab[idx] == NULL);
1697 return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL);
1698}
1699
1700void jbd2_slab_free(void *ptr, size_t size)
1701{
1702 int idx;
1703
1704 idx = JBD_SLAB_INDEX(size);
1705 BUG_ON(jbd_slab[idx] == NULL);
1706 kmem_cache_free(jbd_slab[idx], ptr);
1707}
1708
1709/*
1710 * Journal_head storage management 1631 * Journal_head storage management
1711 */ 1632 */
1712static struct kmem_cache *jbd2_journal_head_cache; 1633static struct kmem_cache *jbd2_journal_head_cache;
@@ -1893,13 +1814,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
1893 printk(KERN_WARNING "%s: freeing " 1814 printk(KERN_WARNING "%s: freeing "
1894 "b_frozen_data\n", 1815 "b_frozen_data\n",
1895 __FUNCTION__); 1816 __FUNCTION__);
1896 jbd2_slab_free(jh->b_frozen_data, bh->b_size); 1817 jbd2_free(jh->b_frozen_data, bh->b_size);
1897 } 1818 }
1898 if (jh->b_committed_data) { 1819 if (jh->b_committed_data) {
1899 printk(KERN_WARNING "%s: freeing " 1820 printk(KERN_WARNING "%s: freeing "
1900 "b_committed_data\n", 1821 "b_committed_data\n",
1901 __FUNCTION__); 1822 __FUNCTION__);
1902 jbd2_slab_free(jh->b_committed_data, bh->b_size); 1823 jbd2_free(jh->b_committed_data, bh->b_size);
1903 } 1824 }
1904 bh->b_private = NULL; 1825 bh->b_private = NULL;
1905 jh->b_bh = NULL; /* debug, really */ 1826 jh->b_bh = NULL; /* debug, really */
@@ -2040,7 +1961,6 @@ static void jbd2_journal_destroy_caches(void)
2040 jbd2_journal_destroy_revoke_caches(); 1961 jbd2_journal_destroy_revoke_caches();
2041 jbd2_journal_destroy_jbd2_journal_head_cache(); 1962 jbd2_journal_destroy_jbd2_journal_head_cache();
2042 jbd2_journal_destroy_handle_cache(); 1963 jbd2_journal_destroy_handle_cache();
2043 jbd2_journal_destroy_jbd_slabs();
2044} 1964}
2045 1965
2046static int __init journal_init(void) 1966static int __init journal_init(void)