aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jbd2/transaction.c
diff options
context:
space:
mode:
authorMingming Cao <cmm@us.ibm.com>2007-10-16 18:38:25 -0400
committerTheodore Ts'o <tytso@mit.edu>2007-10-17 18:49:56 -0400
commitaf1e76d6b3f37cb89d9192eaf83588adaf4728eb (patch)
tree8b30dd421361a61c3f2e9c96bd574986b4e78c9e /fs/jbd2/transaction.c
parentc089d490dfbf53bc0893dc9ef57cf3ee6448314d (diff)
JBD2: jbd2 slab allocation cleanups
JBD2: Replace slab allocations with page allocations JBD2 allocate memory for committed_data and frozen_data from slab. However JBD2 should not pass slab pages down to the block layer. Use page allocator pages instead. This will also prepare JBD for the large blocksize patchset. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Mingming Cao <cmm@us.ibm.com>
Diffstat (limited to 'fs/jbd2/transaction.c')
-rw-r--r--fs/jbd2/transaction.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 7946ff43fc40..bd047f9af8e7 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -236,7 +236,7 @@ out:
236/* Allocate a new handle. This should probably be in a slab... */ 236/* Allocate a new handle. This should probably be in a slab... */
237static handle_t *new_handle(int nblocks) 237static handle_t *new_handle(int nblocks)
238{ 238{
239 handle_t *handle = jbd_alloc_handle(GFP_NOFS); 239 handle_t *handle = jbd2_alloc_handle(GFP_NOFS);
240 if (!handle) 240 if (!handle)
241 return NULL; 241 return NULL;
242 memset(handle, 0, sizeof(*handle)); 242 memset(handle, 0, sizeof(*handle));
@@ -282,7 +282,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
282 282
283 err = start_this_handle(journal, handle); 283 err = start_this_handle(journal, handle);
284 if (err < 0) { 284 if (err < 0) {
285 jbd_free_handle(handle); 285 jbd2_free_handle(handle);
286 current->journal_info = NULL; 286 current->journal_info = NULL;
287 handle = ERR_PTR(err); 287 handle = ERR_PTR(err);
288 } 288 }
@@ -668,7 +668,7 @@ repeat:
668 JBUFFER_TRACE(jh, "allocate memory for buffer"); 668 JBUFFER_TRACE(jh, "allocate memory for buffer");
669 jbd_unlock_bh_state(bh); 669 jbd_unlock_bh_state(bh);
670 frozen_buffer = 670 frozen_buffer =
671 jbd2_slab_alloc(jh2bh(jh)->b_size, 671 jbd2_alloc(jh2bh(jh)->b_size,
672 GFP_NOFS); 672 GFP_NOFS);
673 if (!frozen_buffer) { 673 if (!frozen_buffer) {
674 printk(KERN_EMERG 674 printk(KERN_EMERG
@@ -728,7 +728,7 @@ done:
728 728
729out: 729out:
730 if (unlikely(frozen_buffer)) /* It's usually NULL */ 730 if (unlikely(frozen_buffer)) /* It's usually NULL */
731 jbd2_slab_free(frozen_buffer, bh->b_size); 731 jbd2_free(frozen_buffer, bh->b_size);
732 732
733 JBUFFER_TRACE(jh, "exit"); 733 JBUFFER_TRACE(jh, "exit");
734 return error; 734 return error;
@@ -881,7 +881,7 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
881 881
882repeat: 882repeat:
883 if (!jh->b_committed_data) { 883 if (!jh->b_committed_data) {
884 committed_data = jbd2_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS); 884 committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
885 if (!committed_data) { 885 if (!committed_data) {
886 printk(KERN_EMERG "%s: No memory for committed data\n", 886 printk(KERN_EMERG "%s: No memory for committed data\n",
887 __FUNCTION__); 887 __FUNCTION__);
@@ -908,7 +908,7 @@ repeat:
908out: 908out:
909 jbd2_journal_put_journal_head(jh); 909 jbd2_journal_put_journal_head(jh);
910 if (unlikely(committed_data)) 910 if (unlikely(committed_data))
911 jbd2_slab_free(committed_data, bh->b_size); 911 jbd2_free(committed_data, bh->b_size);
912 return err; 912 return err;
913} 913}
914 914
@@ -1411,7 +1411,7 @@ int jbd2_journal_stop(handle_t *handle)
1411 spin_unlock(&journal->j_state_lock); 1411 spin_unlock(&journal->j_state_lock);
1412 } 1412 }
1413 1413
1414 jbd_free_handle(handle); 1414 jbd2_free_handle(handle);
1415 return err; 1415 return err;
1416} 1416}
1417 1417