aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jbd/journal.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/jbd/journal.c')
-rw-r--r--fs/jbd/journal.c108
1 files changed, 10 insertions, 98 deletions
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index a6be78c05dce..5d9fec0b7ebd 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -83,7 +83,6 @@ EXPORT_SYMBOL(journal_force_commit);
83 83
84static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); 84static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
85static void __journal_abort_soft (journal_t *journal, int errno); 85static void __journal_abort_soft (journal_t *journal, int errno);
86static int journal_create_jbd_slab(size_t slab_size);
87 86
88/* 87/*
89 * Helper function used to manage commit timeouts 88 * Helper function used to manage commit timeouts
@@ -218,7 +217,7 @@ static int journal_start_thread(journal_t *journal)
218 if (IS_ERR(t)) 217 if (IS_ERR(t))
219 return PTR_ERR(t); 218 return PTR_ERR(t);
220 219
221 wait_event(journal->j_wait_done_commit, journal->j_task != 0); 220 wait_event(journal->j_wait_done_commit, journal->j_task != NULL);
222 return 0; 221 return 0;
223} 222}
224 223
@@ -230,7 +229,8 @@ static void journal_kill_thread(journal_t *journal)
230 while (journal->j_task) { 229 while (journal->j_task) {
231 wake_up(&journal->j_wait_commit); 230 wake_up(&journal->j_wait_commit);
232 spin_unlock(&journal->j_state_lock); 231 spin_unlock(&journal->j_state_lock);
233 wait_event(journal->j_wait_done_commit, journal->j_task == 0); 232 wait_event(journal->j_wait_done_commit,
233 journal->j_task == NULL);
234 spin_lock(&journal->j_state_lock); 234 spin_lock(&journal->j_state_lock);
235 } 235 }
236 spin_unlock(&journal->j_state_lock); 236 spin_unlock(&journal->j_state_lock);
@@ -334,10 +334,10 @@ repeat:
334 char *tmp; 334 char *tmp;
335 335
336 jbd_unlock_bh_state(bh_in); 336 jbd_unlock_bh_state(bh_in);
337 tmp = jbd_slab_alloc(bh_in->b_size, GFP_NOFS); 337 tmp = jbd_alloc(bh_in->b_size, GFP_NOFS);
338 jbd_lock_bh_state(bh_in); 338 jbd_lock_bh_state(bh_in);
339 if (jh_in->b_frozen_data) { 339 if (jh_in->b_frozen_data) {
340 jbd_slab_free(tmp, bh_in->b_size); 340 jbd_free(tmp, bh_in->b_size);
341 goto repeat; 341 goto repeat;
342 } 342 }
343 343
@@ -654,7 +654,7 @@ static journal_t * journal_init_common (void)
654 journal_t *journal; 654 journal_t *journal;
655 int err; 655 int err;
656 656
657 journal = jbd_kmalloc(sizeof(*journal), GFP_KERNEL); 657 journal = kmalloc(sizeof(*journal), GFP_KERNEL);
658 if (!journal) 658 if (!journal)
659 goto fail; 659 goto fail;
660 memset(journal, 0, sizeof(*journal)); 660 memset(journal, 0, sizeof(*journal));
@@ -1095,13 +1095,6 @@ int journal_load(journal_t *journal)
1095 } 1095 }
1096 } 1096 }
1097 1097
1098 /*
1099 * Create a slab for this blocksize
1100 */
1101 err = journal_create_jbd_slab(be32_to_cpu(sb->s_blocksize));
1102 if (err)
1103 return err;
1104
1105 /* Let the recovery code check whether it needs to recover any 1098 /* Let the recovery code check whether it needs to recover any
1106 * data from the journal. */ 1099 * data from the journal. */
1107 if (journal_recover(journal)) 1100 if (journal_recover(journal))
@@ -1615,86 +1608,6 @@ int journal_blocks_per_page(struct inode *inode)
1615} 1608}
1616 1609
1617/* 1610/*
1618 * Simple support for retrying memory allocations. Introduced to help to
1619 * debug different VM deadlock avoidance strategies.
1620 */
1621void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
1622{
1623 return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0));
1624}
1625
1626/*
1627 * jbd slab management: create 1k, 2k, 4k, 8k slabs as needed
1628 * and allocate frozen and commit buffers from these slabs.
1629 *
1630 * Reason for doing this is to avoid, SLAB_DEBUG - since it could
1631 * cause bh to cross page boundary.
1632 */
1633
1634#define JBD_MAX_SLABS 5
1635#define JBD_SLAB_INDEX(size) (size >> 11)
1636
1637static struct kmem_cache *jbd_slab[JBD_MAX_SLABS];
1638static const char *jbd_slab_names[JBD_MAX_SLABS] = {
1639 "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k"
1640};
1641
1642static void journal_destroy_jbd_slabs(void)
1643{
1644 int i;
1645
1646 for (i = 0; i < JBD_MAX_SLABS; i++) {
1647 if (jbd_slab[i])
1648 kmem_cache_destroy(jbd_slab[i]);
1649 jbd_slab[i] = NULL;
1650 }
1651}
1652
1653static int journal_create_jbd_slab(size_t slab_size)
1654{
1655 int i = JBD_SLAB_INDEX(slab_size);
1656
1657 BUG_ON(i >= JBD_MAX_SLABS);
1658
1659 /*
1660 * Check if we already have a slab created for this size
1661 */
1662 if (jbd_slab[i])
1663 return 0;
1664
1665 /*
1666 * Create a slab and force alignment to be same as slabsize -
1667 * this will make sure that allocations won't cross the page
1668 * boundary.
1669 */
1670 jbd_slab[i] = kmem_cache_create(jbd_slab_names[i],
1671 slab_size, slab_size, 0, NULL);
1672 if (!jbd_slab[i]) {
1673 printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n");
1674 return -ENOMEM;
1675 }
1676 return 0;
1677}
1678
1679void * jbd_slab_alloc(size_t size, gfp_t flags)
1680{
1681 int idx;
1682
1683 idx = JBD_SLAB_INDEX(size);
1684 BUG_ON(jbd_slab[idx] == NULL);
1685 return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL);
1686}
1687
1688void jbd_slab_free(void *ptr, size_t size)
1689{
1690 int idx;
1691
1692 idx = JBD_SLAB_INDEX(size);
1693 BUG_ON(jbd_slab[idx] == NULL);
1694 kmem_cache_free(jbd_slab[idx], ptr);
1695}
1696
1697/*
1698 * Journal_head storage management 1611 * Journal_head storage management
1699 */ 1612 */
1700static struct kmem_cache *journal_head_cache; 1613static struct kmem_cache *journal_head_cache;
@@ -1739,14 +1652,14 @@ static struct journal_head *journal_alloc_journal_head(void)
1739 atomic_inc(&nr_journal_heads); 1652 atomic_inc(&nr_journal_heads);
1740#endif 1653#endif
1741 ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS); 1654 ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
1742 if (ret == 0) { 1655 if (ret == NULL) {
1743 jbd_debug(1, "out of memory for journal_head\n"); 1656 jbd_debug(1, "out of memory for journal_head\n");
1744 if (time_after(jiffies, last_warning + 5*HZ)) { 1657 if (time_after(jiffies, last_warning + 5*HZ)) {
1745 printk(KERN_NOTICE "ENOMEM in %s, retrying.\n", 1658 printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
1746 __FUNCTION__); 1659 __FUNCTION__);
1747 last_warning = jiffies; 1660 last_warning = jiffies;
1748 } 1661 }
1749 while (ret == 0) { 1662 while (ret == NULL) {
1750 yield(); 1663 yield();
1751 ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS); 1664 ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
1752 } 1665 }
@@ -1881,13 +1794,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
1881 printk(KERN_WARNING "%s: freeing " 1794 printk(KERN_WARNING "%s: freeing "
1882 "b_frozen_data\n", 1795 "b_frozen_data\n",
1883 __FUNCTION__); 1796 __FUNCTION__);
1884 jbd_slab_free(jh->b_frozen_data, bh->b_size); 1797 jbd_free(jh->b_frozen_data, bh->b_size);
1885 } 1798 }
1886 if (jh->b_committed_data) { 1799 if (jh->b_committed_data) {
1887 printk(KERN_WARNING "%s: freeing " 1800 printk(KERN_WARNING "%s: freeing "
1888 "b_committed_data\n", 1801 "b_committed_data\n",
1889 __FUNCTION__); 1802 __FUNCTION__);
1890 jbd_slab_free(jh->b_committed_data, bh->b_size); 1803 jbd_free(jh->b_committed_data, bh->b_size);
1891 } 1804 }
1892 bh->b_private = NULL; 1805 bh->b_private = NULL;
1893 jh->b_bh = NULL; /* debug, really */ 1806 jh->b_bh = NULL; /* debug, really */
@@ -2042,7 +1955,6 @@ static void journal_destroy_caches(void)
2042 journal_destroy_revoke_caches(); 1955 journal_destroy_revoke_caches();
2043 journal_destroy_journal_head_cache(); 1956 journal_destroy_journal_head_cache();
2044 journal_destroy_handle_cache(); 1957 journal_destroy_handle_cache();
2045 journal_destroy_jbd_slabs();
2046} 1958}
2047 1959
2048static int __init journal_init(void) 1960static int __init journal_init(void)