aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMingming Cao <cmm@us.ibm.com>2007-10-16 18:38:25 -0400
committerTheodore Ts'o <tytso@mit.edu>2007-10-17 18:49:56 -0400
commitc089d490dfbf53bc0893dc9ef57cf3ee6448314d (patch)
tree8faffea3bdcfdd48ce175ac92d5088ced4f1c969
parentd85714d81cc0408daddb68c10f7fd69eafe7c213 (diff)
JBD: JBD slab allocation cleanups
JBD: Replace slab allocations with page allocations JBD allocate memory for committed_data and frozen_data from slab. However JBD should not pass slab pages down to the block layer. Use page allocator pages instead. This will also prepare JBD for the large blocksize patchset. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Mingming Cao <cmm@us.ibm.com>
-rw-r--r--fs/jbd/commit.c6
-rw-r--r--fs/jbd/journal.c88
-rw-r--r--fs/jbd/transaction.c8
-rw-r--r--include/linux/jbd.h13
4 files changed, 21 insertions, 94 deletions
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index a003d50edcdb..a263d82761df 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -375,7 +375,7 @@ void journal_commit_transaction(journal_t *journal)
375 struct buffer_head *bh = jh2bh(jh); 375 struct buffer_head *bh = jh2bh(jh);
376 376
377 jbd_lock_bh_state(bh); 377 jbd_lock_bh_state(bh);
378 jbd_slab_free(jh->b_committed_data, bh->b_size); 378 jbd_free(jh->b_committed_data, bh->b_size);
379 jh->b_committed_data = NULL; 379 jh->b_committed_data = NULL;
380 jbd_unlock_bh_state(bh); 380 jbd_unlock_bh_state(bh);
381 } 381 }
@@ -792,14 +792,14 @@ restart_loop:
792 * Otherwise, we can just throw away the frozen data now. 792 * Otherwise, we can just throw away the frozen data now.
793 */ 793 */
794 if (jh->b_committed_data) { 794 if (jh->b_committed_data) {
795 jbd_slab_free(jh->b_committed_data, bh->b_size); 795 jbd_free(jh->b_committed_data, bh->b_size);
796 jh->b_committed_data = NULL; 796 jh->b_committed_data = NULL;
797 if (jh->b_frozen_data) { 797 if (jh->b_frozen_data) {
798 jh->b_committed_data = jh->b_frozen_data; 798 jh->b_committed_data = jh->b_frozen_data;
799 jh->b_frozen_data = NULL; 799 jh->b_frozen_data = NULL;
800 } 800 }
801 } else if (jh->b_frozen_data) { 801 } else if (jh->b_frozen_data) {
802 jbd_slab_free(jh->b_frozen_data, bh->b_size); 802 jbd_free(jh->b_frozen_data, bh->b_size);
803 jh->b_frozen_data = NULL; 803 jh->b_frozen_data = NULL;
804 } 804 }
805 805
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index a6be78c05dce..7edf3fdfdadd 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -83,7 +83,6 @@ EXPORT_SYMBOL(journal_force_commit);
83 83
84static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); 84static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
85static void __journal_abort_soft (journal_t *journal, int errno); 85static void __journal_abort_soft (journal_t *journal, int errno);
86static int journal_create_jbd_slab(size_t slab_size);
87 86
88/* 87/*
89 * Helper function used to manage commit timeouts 88 * Helper function used to manage commit timeouts
@@ -334,10 +333,10 @@ repeat:
334 char *tmp; 333 char *tmp;
335 334
336 jbd_unlock_bh_state(bh_in); 335 jbd_unlock_bh_state(bh_in);
337 tmp = jbd_slab_alloc(bh_in->b_size, GFP_NOFS); 336 tmp = jbd_alloc(bh_in->b_size, GFP_NOFS);
338 jbd_lock_bh_state(bh_in); 337 jbd_lock_bh_state(bh_in);
339 if (jh_in->b_frozen_data) { 338 if (jh_in->b_frozen_data) {
340 jbd_slab_free(tmp, bh_in->b_size); 339 jbd_free(tmp, bh_in->b_size);
341 goto repeat; 340 goto repeat;
342 } 341 }
343 342
@@ -1095,13 +1094,6 @@ int journal_load(journal_t *journal)
1095 } 1094 }
1096 } 1095 }
1097 1096
1098 /*
1099 * Create a slab for this blocksize
1100 */
1101 err = journal_create_jbd_slab(be32_to_cpu(sb->s_blocksize));
1102 if (err)
1103 return err;
1104
1105 /* Let the recovery code check whether it needs to recover any 1097 /* Let the recovery code check whether it needs to recover any
1106 * data from the journal. */ 1098 * data from the journal. */
1107 if (journal_recover(journal)) 1099 if (journal_recover(journal))
@@ -1624,77 +1616,6 @@ void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
1624} 1616}
1625 1617
1626/* 1618/*
1627 * jbd slab management: create 1k, 2k, 4k, 8k slabs as needed
1628 * and allocate frozen and commit buffers from these slabs.
1629 *
1630 * Reason for doing this is to avoid, SLAB_DEBUG - since it could
1631 * cause bh to cross page boundary.
1632 */
1633
1634#define JBD_MAX_SLABS 5
1635#define JBD_SLAB_INDEX(size) (size >> 11)
1636
1637static struct kmem_cache *jbd_slab[JBD_MAX_SLABS];
1638static const char *jbd_slab_names[JBD_MAX_SLABS] = {
1639 "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k"
1640};
1641
1642static void journal_destroy_jbd_slabs(void)
1643{
1644 int i;
1645
1646 for (i = 0; i < JBD_MAX_SLABS; i++) {
1647 if (jbd_slab[i])
1648 kmem_cache_destroy(jbd_slab[i]);
1649 jbd_slab[i] = NULL;
1650 }
1651}
1652
1653static int journal_create_jbd_slab(size_t slab_size)
1654{
1655 int i = JBD_SLAB_INDEX(slab_size);
1656
1657 BUG_ON(i >= JBD_MAX_SLABS);
1658
1659 /*
1660 * Check if we already have a slab created for this size
1661 */
1662 if (jbd_slab[i])
1663 return 0;
1664
1665 /*
1666 * Create a slab and force alignment to be same as slabsize -
1667 * this will make sure that allocations won't cross the page
1668 * boundary.
1669 */
1670 jbd_slab[i] = kmem_cache_create(jbd_slab_names[i],
1671 slab_size, slab_size, 0, NULL);
1672 if (!jbd_slab[i]) {
1673 printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n");
1674 return -ENOMEM;
1675 }
1676 return 0;
1677}
1678
1679void * jbd_slab_alloc(size_t size, gfp_t flags)
1680{
1681 int idx;
1682
1683 idx = JBD_SLAB_INDEX(size);
1684 BUG_ON(jbd_slab[idx] == NULL);
1685 return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL);
1686}
1687
1688void jbd_slab_free(void *ptr, size_t size)
1689{
1690 int idx;
1691
1692 idx = JBD_SLAB_INDEX(size);
1693 BUG_ON(jbd_slab[idx] == NULL);
1694 kmem_cache_free(jbd_slab[idx], ptr);
1695}
1696
1697/*
1698 * Journal_head storage management 1619 * Journal_head storage management
1699 */ 1620 */
1700static struct kmem_cache *journal_head_cache; 1621static struct kmem_cache *journal_head_cache;
@@ -1881,13 +1802,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
1881 printk(KERN_WARNING "%s: freeing " 1802 printk(KERN_WARNING "%s: freeing "
1882 "b_frozen_data\n", 1803 "b_frozen_data\n",
1883 __FUNCTION__); 1804 __FUNCTION__);
1884 jbd_slab_free(jh->b_frozen_data, bh->b_size); 1805 jbd_free(jh->b_frozen_data, bh->b_size);
1885 } 1806 }
1886 if (jh->b_committed_data) { 1807 if (jh->b_committed_data) {
1887 printk(KERN_WARNING "%s: freeing " 1808 printk(KERN_WARNING "%s: freeing "
1888 "b_committed_data\n", 1809 "b_committed_data\n",
1889 __FUNCTION__); 1810 __FUNCTION__);
1890 jbd_slab_free(jh->b_committed_data, bh->b_size); 1811 jbd_free(jh->b_committed_data, bh->b_size);
1891 } 1812 }
1892 bh->b_private = NULL; 1813 bh->b_private = NULL;
1893 jh->b_bh = NULL; /* debug, really */ 1814 jh->b_bh = NULL; /* debug, really */
@@ -2042,7 +1963,6 @@ static void journal_destroy_caches(void)
2042 journal_destroy_revoke_caches(); 1963 journal_destroy_revoke_caches();
2043 journal_destroy_journal_head_cache(); 1964 journal_destroy_journal_head_cache();
2044 journal_destroy_handle_cache(); 1965 journal_destroy_handle_cache();
2045 journal_destroy_jbd_slabs();
2046} 1966}
2047 1967
2048static int __init journal_init(void) 1968static int __init journal_init(void)
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 8df5bac0b7a5..db8404514c92 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -675,7 +675,7 @@ repeat:
675 JBUFFER_TRACE(jh, "allocate memory for buffer"); 675 JBUFFER_TRACE(jh, "allocate memory for buffer");
676 jbd_unlock_bh_state(bh); 676 jbd_unlock_bh_state(bh);
677 frozen_buffer = 677 frozen_buffer =
678 jbd_slab_alloc(jh2bh(jh)->b_size, 678 jbd_alloc(jh2bh(jh)->b_size,
679 GFP_NOFS); 679 GFP_NOFS);
680 if (!frozen_buffer) { 680 if (!frozen_buffer) {
681 printk(KERN_EMERG 681 printk(KERN_EMERG
@@ -735,7 +735,7 @@ done:
735 735
736out: 736out:
737 if (unlikely(frozen_buffer)) /* It's usually NULL */ 737 if (unlikely(frozen_buffer)) /* It's usually NULL */
738 jbd_slab_free(frozen_buffer, bh->b_size); 738 jbd_free(frozen_buffer, bh->b_size);
739 739
740 JBUFFER_TRACE(jh, "exit"); 740 JBUFFER_TRACE(jh, "exit");
741 return error; 741 return error;
@@ -888,7 +888,7 @@ int journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
888 888
889repeat: 889repeat:
890 if (!jh->b_committed_data) { 890 if (!jh->b_committed_data) {
891 committed_data = jbd_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS); 891 committed_data = jbd_alloc(jh2bh(jh)->b_size, GFP_NOFS);
892 if (!committed_data) { 892 if (!committed_data) {
893 printk(KERN_EMERG "%s: No memory for committed data\n", 893 printk(KERN_EMERG "%s: No memory for committed data\n",
894 __FUNCTION__); 894 __FUNCTION__);
@@ -915,7 +915,7 @@ repeat:
915out: 915out:
916 journal_put_journal_head(jh); 916 journal_put_journal_head(jh);
917 if (unlikely(committed_data)) 917 if (unlikely(committed_data))
918 jbd_slab_free(committed_data, bh->b_size); 918 jbd_free(committed_data, bh->b_size);
919 return err; 919 return err;
920} 920}
921 921
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 72f522372924..1db3b684f557 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -73,14 +73,21 @@ extern int journal_enable_debug;
73#endif 73#endif
74 74
75extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry); 75extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
76extern void * jbd_slab_alloc(size_t size, gfp_t flags);
77extern void jbd_slab_free(void *ptr, size_t size);
78
79#define jbd_kmalloc(size, flags) \ 76#define jbd_kmalloc(size, flags) \
80 __jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry) 77 __jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
81#define jbd_rep_kmalloc(size, flags) \ 78#define jbd_rep_kmalloc(size, flags) \
82 __jbd_kmalloc(__FUNCTION__, (size), (flags), 1) 79 __jbd_kmalloc(__FUNCTION__, (size), (flags), 1)
83 80
81static inline void *jbd_alloc(size_t size, gfp_t flags)
82{
83 return (void *)__get_free_pages(flags, get_order(size));
84}
85
86static inline void jbd_free(void *ptr, size_t size)
87{
88 free_pages((unsigned long)ptr, get_order(size));
89};
90
84#define JFS_MIN_JOURNAL_BLOCKS 1024 91#define JFS_MIN_JOURNAL_BLOCKS 1024
85 92
86#ifdef __KERNEL__ 93#ifdef __KERNEL__