diff options
Diffstat (limited to 'fs/jbd2')
-rw-r--r-- | fs/jbd2/commit.c | 16 | ||||
-rw-r--r-- | fs/jbd2/journal.c | 128 | ||||
-rw-r--r-- | fs/jbd2/recovery.c | 2 | ||||
-rw-r--r-- | fs/jbd2/revoke.c | 4 | ||||
-rw-r--r-- | fs/jbd2/transaction.c | 19 |
5 files changed, 35 insertions, 134 deletions
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index c0f59d1b13dc..6986f334c643 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c | |||
@@ -278,7 +278,7 @@ static inline void write_tag_block(int tag_bytes, journal_block_tag_t *tag, | |||
278 | unsigned long long block) | 278 | unsigned long long block) |
279 | { | 279 | { |
280 | tag->t_blocknr = cpu_to_be32(block & (u32)~0); | 280 | tag->t_blocknr = cpu_to_be32(block & (u32)~0); |
281 | if (tag_bytes > JBD_TAG_SIZE32) | 281 | if (tag_bytes > JBD2_TAG_SIZE32) |
282 | tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1); | 282 | tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1); |
283 | } | 283 | } |
284 | 284 | ||
@@ -384,7 +384,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) | |||
384 | struct buffer_head *bh = jh2bh(jh); | 384 | struct buffer_head *bh = jh2bh(jh); |
385 | 385 | ||
386 | jbd_lock_bh_state(bh); | 386 | jbd_lock_bh_state(bh); |
387 | jbd2_slab_free(jh->b_committed_data, bh->b_size); | 387 | jbd2_free(jh->b_committed_data, bh->b_size); |
388 | jh->b_committed_data = NULL; | 388 | jh->b_committed_data = NULL; |
389 | jbd_unlock_bh_state(bh); | 389 | jbd_unlock_bh_state(bh); |
390 | } | 390 | } |
@@ -475,7 +475,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) | |||
475 | spin_unlock(&journal->j_list_lock); | 475 | spin_unlock(&journal->j_list_lock); |
476 | 476 | ||
477 | if (err) | 477 | if (err) |
478 | __jbd2_journal_abort_hard(journal); | 478 | jbd2_journal_abort(journal, err); |
479 | 479 | ||
480 | jbd2_journal_write_revoke_records(journal, commit_transaction); | 480 | jbd2_journal_write_revoke_records(journal, commit_transaction); |
481 | 481 | ||
@@ -533,7 +533,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) | |||
533 | 533 | ||
534 | descriptor = jbd2_journal_get_descriptor_buffer(journal); | 534 | descriptor = jbd2_journal_get_descriptor_buffer(journal); |
535 | if (!descriptor) { | 535 | if (!descriptor) { |
536 | __jbd2_journal_abort_hard(journal); | 536 | jbd2_journal_abort(journal, -EIO); |
537 | continue; | 537 | continue; |
538 | } | 538 | } |
539 | 539 | ||
@@ -566,7 +566,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) | |||
566 | and repeat this loop: we'll fall into the | 566 | and repeat this loop: we'll fall into the |
567 | refile-on-abort condition above. */ | 567 | refile-on-abort condition above. */ |
568 | if (err) { | 568 | if (err) { |
569 | __jbd2_journal_abort_hard(journal); | 569 | jbd2_journal_abort(journal, err); |
570 | continue; | 570 | continue; |
571 | } | 571 | } |
572 | 572 | ||
@@ -757,7 +757,7 @@ wait_for_iobuf: | |||
757 | err = -EIO; | 757 | err = -EIO; |
758 | 758 | ||
759 | if (err) | 759 | if (err) |
760 | __jbd2_journal_abort_hard(journal); | 760 | jbd2_journal_abort(journal, err); |
761 | 761 | ||
762 | /* End of a transaction! Finally, we can do checkpoint | 762 | /* End of a transaction! Finally, we can do checkpoint |
763 | processing: any buffers committed as a result of this | 763 | processing: any buffers committed as a result of this |
@@ -801,14 +801,14 @@ restart_loop: | |||
801 | * Otherwise, we can just throw away the frozen data now. | 801 | * Otherwise, we can just throw away the frozen data now. |
802 | */ | 802 | */ |
803 | if (jh->b_committed_data) { | 803 | if (jh->b_committed_data) { |
804 | jbd2_slab_free(jh->b_committed_data, bh->b_size); | 804 | jbd2_free(jh->b_committed_data, bh->b_size); |
805 | jh->b_committed_data = NULL; | 805 | jh->b_committed_data = NULL; |
806 | if (jh->b_frozen_data) { | 806 | if (jh->b_frozen_data) { |
807 | jh->b_committed_data = jh->b_frozen_data; | 807 | jh->b_committed_data = jh->b_frozen_data; |
808 | jh->b_frozen_data = NULL; | 808 | jh->b_frozen_data = NULL; |
809 | } | 809 | } |
810 | } else if (jh->b_frozen_data) { | 810 | } else if (jh->b_frozen_data) { |
811 | jbd2_slab_free(jh->b_frozen_data, bh->b_size); | 811 | jbd2_free(jh->b_frozen_data, bh->b_size); |
812 | jh->b_frozen_data = NULL; | 812 | jh->b_frozen_data = NULL; |
813 | } | 813 | } |
814 | 814 | ||
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index f37324aee817..6ddc5531587c 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
@@ -84,7 +84,6 @@ EXPORT_SYMBOL(jbd2_journal_force_commit); | |||
84 | 84 | ||
85 | static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); | 85 | static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); |
86 | static void __journal_abort_soft (journal_t *journal, int errno); | 86 | static void __journal_abort_soft (journal_t *journal, int errno); |
87 | static int jbd2_journal_create_jbd_slab(size_t slab_size); | ||
88 | 87 | ||
89 | /* | 88 | /* |
90 | * Helper function used to manage commit timeouts | 89 | * Helper function used to manage commit timeouts |
@@ -335,10 +334,10 @@ repeat: | |||
335 | char *tmp; | 334 | char *tmp; |
336 | 335 | ||
337 | jbd_unlock_bh_state(bh_in); | 336 | jbd_unlock_bh_state(bh_in); |
338 | tmp = jbd2_slab_alloc(bh_in->b_size, GFP_NOFS); | 337 | tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS); |
339 | jbd_lock_bh_state(bh_in); | 338 | jbd_lock_bh_state(bh_in); |
340 | if (jh_in->b_frozen_data) { | 339 | if (jh_in->b_frozen_data) { |
341 | jbd2_slab_free(tmp, bh_in->b_size); | 340 | jbd2_free(tmp, bh_in->b_size); |
342 | goto repeat; | 341 | goto repeat; |
343 | } | 342 | } |
344 | 343 | ||
@@ -655,10 +654,9 @@ static journal_t * journal_init_common (void) | |||
655 | journal_t *journal; | 654 | journal_t *journal; |
656 | int err; | 655 | int err; |
657 | 656 | ||
658 | journal = jbd_kmalloc(sizeof(*journal), GFP_KERNEL); | 657 | journal = kzalloc(sizeof(*journal), GFP_KERNEL|__GFP_NOFAIL); |
659 | if (!journal) | 658 | if (!journal) |
660 | goto fail; | 659 | goto fail; |
661 | memset(journal, 0, sizeof(*journal)); | ||
662 | 660 | ||
663 | init_waitqueue_head(&journal->j_wait_transaction_locked); | 661 | init_waitqueue_head(&journal->j_wait_transaction_locked); |
664 | init_waitqueue_head(&journal->j_wait_logspace); | 662 | init_waitqueue_head(&journal->j_wait_logspace); |
@@ -672,7 +670,7 @@ static journal_t * journal_init_common (void) | |||
672 | spin_lock_init(&journal->j_list_lock); | 670 | spin_lock_init(&journal->j_list_lock); |
673 | spin_lock_init(&journal->j_state_lock); | 671 | spin_lock_init(&journal->j_state_lock); |
674 | 672 | ||
675 | journal->j_commit_interval = (HZ * JBD_DEFAULT_MAX_COMMIT_AGE); | 673 | journal->j_commit_interval = (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE); |
676 | 674 | ||
677 | /* The journal is marked for error until we succeed with recovery! */ | 675 | /* The journal is marked for error until we succeed with recovery! */ |
678 | journal->j_flags = JBD2_ABORT; | 676 | journal->j_flags = JBD2_ABORT; |
@@ -1096,13 +1094,6 @@ int jbd2_journal_load(journal_t *journal) | |||
1096 | } | 1094 | } |
1097 | } | 1095 | } |
1098 | 1096 | ||
1099 | /* | ||
1100 | * Create a slab for this blocksize | ||
1101 | */ | ||
1102 | err = jbd2_journal_create_jbd_slab(be32_to_cpu(sb->s_blocksize)); | ||
1103 | if (err) | ||
1104 | return err; | ||
1105 | |||
1106 | /* Let the recovery code check whether it needs to recover any | 1097 | /* Let the recovery code check whether it needs to recover any |
1107 | * data from the journal. */ | 1098 | * data from the journal. */ |
1108 | if (jbd2_journal_recover(journal)) | 1099 | if (jbd2_journal_recover(journal)) |
@@ -1621,89 +1612,9 @@ int jbd2_journal_blocks_per_page(struct inode *inode) | |||
1621 | size_t journal_tag_bytes(journal_t *journal) | 1612 | size_t journal_tag_bytes(journal_t *journal) |
1622 | { | 1613 | { |
1623 | if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) | 1614 | if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) |
1624 | return JBD_TAG_SIZE64; | 1615 | return JBD2_TAG_SIZE64; |
1625 | else | 1616 | else |
1626 | return JBD_TAG_SIZE32; | 1617 | return JBD2_TAG_SIZE32; |
1627 | } | ||
1628 | |||
1629 | /* | ||
1630 | * Simple support for retrying memory allocations. Introduced to help to | ||
1631 | * debug different VM deadlock avoidance strategies. | ||
1632 | */ | ||
1633 | void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry) | ||
1634 | { | ||
1635 | return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0)); | ||
1636 | } | ||
1637 | |||
1638 | /* | ||
1639 | * jbd slab management: create 1k, 2k, 4k, 8k slabs as needed | ||
1640 | * and allocate frozen and commit buffers from these slabs. | ||
1641 | * | ||
1642 | * Reason for doing this is to avoid, SLAB_DEBUG - since it could | ||
1643 | * cause bh to cross page boundary. | ||
1644 | */ | ||
1645 | |||
1646 | #define JBD_MAX_SLABS 5 | ||
1647 | #define JBD_SLAB_INDEX(size) (size >> 11) | ||
1648 | |||
1649 | static struct kmem_cache *jbd_slab[JBD_MAX_SLABS]; | ||
1650 | static const char *jbd_slab_names[JBD_MAX_SLABS] = { | ||
1651 | "jbd2_1k", "jbd2_2k", "jbd2_4k", NULL, "jbd2_8k" | ||
1652 | }; | ||
1653 | |||
1654 | static void jbd2_journal_destroy_jbd_slabs(void) | ||
1655 | { | ||
1656 | int i; | ||
1657 | |||
1658 | for (i = 0; i < JBD_MAX_SLABS; i++) { | ||
1659 | if (jbd_slab[i]) | ||
1660 | kmem_cache_destroy(jbd_slab[i]); | ||
1661 | jbd_slab[i] = NULL; | ||
1662 | } | ||
1663 | } | ||
1664 | |||
1665 | static int jbd2_journal_create_jbd_slab(size_t slab_size) | ||
1666 | { | ||
1667 | int i = JBD_SLAB_INDEX(slab_size); | ||
1668 | |||
1669 | BUG_ON(i >= JBD_MAX_SLABS); | ||
1670 | |||
1671 | /* | ||
1672 | * Check if we already have a slab created for this size | ||
1673 | */ | ||
1674 | if (jbd_slab[i]) | ||
1675 | return 0; | ||
1676 | |||
1677 | /* | ||
1678 | * Create a slab and force alignment to be same as slabsize - | ||
1679 | * this will make sure that allocations won't cross the page | ||
1680 | * boundary. | ||
1681 | */ | ||
1682 | jbd_slab[i] = kmem_cache_create(jbd_slab_names[i], | ||
1683 | slab_size, slab_size, 0, NULL); | ||
1684 | if (!jbd_slab[i]) { | ||
1685 | printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n"); | ||
1686 | return -ENOMEM; | ||
1687 | } | ||
1688 | return 0; | ||
1689 | } | ||
1690 | |||
1691 | void * jbd2_slab_alloc(size_t size, gfp_t flags) | ||
1692 | { | ||
1693 | int idx; | ||
1694 | |||
1695 | idx = JBD_SLAB_INDEX(size); | ||
1696 | BUG_ON(jbd_slab[idx] == NULL); | ||
1697 | return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL); | ||
1698 | } | ||
1699 | |||
1700 | void jbd2_slab_free(void *ptr, size_t size) | ||
1701 | { | ||
1702 | int idx; | ||
1703 | |||
1704 | idx = JBD_SLAB_INDEX(size); | ||
1705 | BUG_ON(jbd_slab[idx] == NULL); | ||
1706 | kmem_cache_free(jbd_slab[idx], ptr); | ||
1707 | } | 1618 | } |
1708 | 1619 | ||
1709 | /* | 1620 | /* |
@@ -1770,7 +1681,7 @@ static void journal_free_journal_head(struct journal_head *jh) | |||
1770 | { | 1681 | { |
1771 | #ifdef CONFIG_JBD2_DEBUG | 1682 | #ifdef CONFIG_JBD2_DEBUG |
1772 | atomic_dec(&nr_journal_heads); | 1683 | atomic_dec(&nr_journal_heads); |
1773 | memset(jh, JBD_POISON_FREE, sizeof(*jh)); | 1684 | memset(jh, JBD2_POISON_FREE, sizeof(*jh)); |
1774 | #endif | 1685 | #endif |
1775 | kmem_cache_free(jbd2_journal_head_cache, jh); | 1686 | kmem_cache_free(jbd2_journal_head_cache, jh); |
1776 | } | 1687 | } |
@@ -1893,13 +1804,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh) | |||
1893 | printk(KERN_WARNING "%s: freeing " | 1804 | printk(KERN_WARNING "%s: freeing " |
1894 | "b_frozen_data\n", | 1805 | "b_frozen_data\n", |
1895 | __FUNCTION__); | 1806 | __FUNCTION__); |
1896 | jbd2_slab_free(jh->b_frozen_data, bh->b_size); | 1807 | jbd2_free(jh->b_frozen_data, bh->b_size); |
1897 | } | 1808 | } |
1898 | if (jh->b_committed_data) { | 1809 | if (jh->b_committed_data) { |
1899 | printk(KERN_WARNING "%s: freeing " | 1810 | printk(KERN_WARNING "%s: freeing " |
1900 | "b_committed_data\n", | 1811 | "b_committed_data\n", |
1901 | __FUNCTION__); | 1812 | __FUNCTION__); |
1902 | jbd2_slab_free(jh->b_committed_data, bh->b_size); | 1813 | jbd2_free(jh->b_committed_data, bh->b_size); |
1903 | } | 1814 | } |
1904 | bh->b_private = NULL; | 1815 | bh->b_private = NULL; |
1905 | jh->b_bh = NULL; /* debug, really */ | 1816 | jh->b_bh = NULL; /* debug, really */ |
@@ -1953,16 +1864,14 @@ void jbd2_journal_put_journal_head(struct journal_head *jh) | |||
1953 | /* | 1864 | /* |
1954 | * debugfs tunables | 1865 | * debugfs tunables |
1955 | */ | 1866 | */ |
1956 | #if defined(CONFIG_JBD2_DEBUG) | 1867 | #ifdef CONFIG_JBD2_DEBUG |
1957 | u8 jbd2_journal_enable_debug; | 1868 | u8 jbd2_journal_enable_debug __read_mostly; |
1958 | EXPORT_SYMBOL(jbd2_journal_enable_debug); | 1869 | EXPORT_SYMBOL(jbd2_journal_enable_debug); |
1959 | #endif | ||
1960 | |||
1961 | #if defined(CONFIG_JBD2_DEBUG) && defined(CONFIG_DEBUG_FS) | ||
1962 | 1870 | ||
1963 | #define JBD2_DEBUG_NAME "jbd2-debug" | 1871 | #define JBD2_DEBUG_NAME "jbd2-debug" |
1964 | 1872 | ||
1965 | struct dentry *jbd2_debugfs_dir, *jbd2_debug; | 1873 | static struct dentry *jbd2_debugfs_dir; |
1874 | static struct dentry *jbd2_debug; | ||
1966 | 1875 | ||
1967 | static void __init jbd2_create_debugfs_entry(void) | 1876 | static void __init jbd2_create_debugfs_entry(void) |
1968 | { | 1877 | { |
@@ -1975,24 +1884,18 @@ static void __init jbd2_create_debugfs_entry(void) | |||
1975 | 1884 | ||
1976 | static void __exit jbd2_remove_debugfs_entry(void) | 1885 | static void __exit jbd2_remove_debugfs_entry(void) |
1977 | { | 1886 | { |
1978 | if (jbd2_debug) | 1887 | debugfs_remove(jbd2_debug); |
1979 | debugfs_remove(jbd2_debug); | 1888 | debugfs_remove(jbd2_debugfs_dir); |
1980 | if (jbd2_debugfs_dir) | ||
1981 | debugfs_remove(jbd2_debugfs_dir); | ||
1982 | } | 1889 | } |
1983 | 1890 | ||
1984 | #else | 1891 | #else |
1985 | 1892 | ||
1986 | static void __init jbd2_create_debugfs_entry(void) | 1893 | static void __init jbd2_create_debugfs_entry(void) |
1987 | { | 1894 | { |
1988 | do { | ||
1989 | } while (0); | ||
1990 | } | 1895 | } |
1991 | 1896 | ||
1992 | static void __exit jbd2_remove_debugfs_entry(void) | 1897 | static void __exit jbd2_remove_debugfs_entry(void) |
1993 | { | 1898 | { |
1994 | do { | ||
1995 | } while (0); | ||
1996 | } | 1899 | } |
1997 | 1900 | ||
1998 | #endif | 1901 | #endif |
@@ -2040,7 +1943,6 @@ static void jbd2_journal_destroy_caches(void) | |||
2040 | jbd2_journal_destroy_revoke_caches(); | 1943 | jbd2_journal_destroy_revoke_caches(); |
2041 | jbd2_journal_destroy_jbd2_journal_head_cache(); | 1944 | jbd2_journal_destroy_jbd2_journal_head_cache(); |
2042 | jbd2_journal_destroy_handle_cache(); | 1945 | jbd2_journal_destroy_handle_cache(); |
2043 | jbd2_journal_destroy_jbd_slabs(); | ||
2044 | } | 1946 | } |
2045 | 1947 | ||
2046 | static int __init journal_init(void) | 1948 | static int __init journal_init(void) |
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index b50be8a044eb..d0ce627539ef 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c | |||
@@ -311,7 +311,7 @@ int jbd2_journal_skip_recovery(journal_t *journal) | |||
311 | static inline unsigned long long read_tag_block(int tag_bytes, journal_block_tag_t *tag) | 311 | static inline unsigned long long read_tag_block(int tag_bytes, journal_block_tag_t *tag) |
312 | { | 312 | { |
313 | unsigned long long block = be32_to_cpu(tag->t_blocknr); | 313 | unsigned long long block = be32_to_cpu(tag->t_blocknr); |
314 | if (tag_bytes > JBD_TAG_SIZE32) | 314 | if (tag_bytes > JBD2_TAG_SIZE32) |
315 | block |= (u64)be32_to_cpu(tag->t_blocknr_high) << 32; | 315 | block |= (u64)be32_to_cpu(tag->t_blocknr_high) << 32; |
316 | return block; | 316 | return block; |
317 | } | 317 | } |
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c index 01d88975e0c5..3595fd432d5b 100644 --- a/fs/jbd2/revoke.c +++ b/fs/jbd2/revoke.c | |||
@@ -352,7 +352,7 @@ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr, | |||
352 | if (bh) | 352 | if (bh) |
353 | BUFFER_TRACE(bh, "found on hash"); | 353 | BUFFER_TRACE(bh, "found on hash"); |
354 | } | 354 | } |
355 | #ifdef JBD_EXPENSIVE_CHECKING | 355 | #ifdef JBD2_EXPENSIVE_CHECKING |
356 | else { | 356 | else { |
357 | struct buffer_head *bh2; | 357 | struct buffer_head *bh2; |
358 | 358 | ||
@@ -453,7 +453,7 @@ int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh) | |||
453 | } | 453 | } |
454 | } | 454 | } |
455 | 455 | ||
456 | #ifdef JBD_EXPENSIVE_CHECKING | 456 | #ifdef JBD2_EXPENSIVE_CHECKING |
457 | /* There better not be one left behind by now! */ | 457 | /* There better not be one left behind by now! */ |
458 | record = find_revoke_record(journal, bh->b_blocknr); | 458 | record = find_revoke_record(journal, bh->b_blocknr); |
459 | J_ASSERT_JH(jh, record == NULL); | 459 | J_ASSERT_JH(jh, record == NULL); |
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 7946ff43fc40..b1fcf2b3dca3 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c | |||
@@ -96,13 +96,12 @@ static int start_this_handle(journal_t *journal, handle_t *handle) | |||
96 | 96 | ||
97 | alloc_transaction: | 97 | alloc_transaction: |
98 | if (!journal->j_running_transaction) { | 98 | if (!journal->j_running_transaction) { |
99 | new_transaction = jbd_kmalloc(sizeof(*new_transaction), | 99 | new_transaction = kzalloc(sizeof(*new_transaction), |
100 | GFP_NOFS); | 100 | GFP_NOFS|__GFP_NOFAIL); |
101 | if (!new_transaction) { | 101 | if (!new_transaction) { |
102 | ret = -ENOMEM; | 102 | ret = -ENOMEM; |
103 | goto out; | 103 | goto out; |
104 | } | 104 | } |
105 | memset(new_transaction, 0, sizeof(*new_transaction)); | ||
106 | } | 105 | } |
107 | 106 | ||
108 | jbd_debug(3, "New handle %p going live.\n", handle); | 107 | jbd_debug(3, "New handle %p going live.\n", handle); |
@@ -236,7 +235,7 @@ out: | |||
236 | /* Allocate a new handle. This should probably be in a slab... */ | 235 | /* Allocate a new handle. This should probably be in a slab... */ |
237 | static handle_t *new_handle(int nblocks) | 236 | static handle_t *new_handle(int nblocks) |
238 | { | 237 | { |
239 | handle_t *handle = jbd_alloc_handle(GFP_NOFS); | 238 | handle_t *handle = jbd2_alloc_handle(GFP_NOFS); |
240 | if (!handle) | 239 | if (!handle) |
241 | return NULL; | 240 | return NULL; |
242 | memset(handle, 0, sizeof(*handle)); | 241 | memset(handle, 0, sizeof(*handle)); |
@@ -282,7 +281,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks) | |||
282 | 281 | ||
283 | err = start_this_handle(journal, handle); | 282 | err = start_this_handle(journal, handle); |
284 | if (err < 0) { | 283 | if (err < 0) { |
285 | jbd_free_handle(handle); | 284 | jbd2_free_handle(handle); |
286 | current->journal_info = NULL; | 285 | current->journal_info = NULL; |
287 | handle = ERR_PTR(err); | 286 | handle = ERR_PTR(err); |
288 | } | 287 | } |
@@ -668,7 +667,7 @@ repeat: | |||
668 | JBUFFER_TRACE(jh, "allocate memory for buffer"); | 667 | JBUFFER_TRACE(jh, "allocate memory for buffer"); |
669 | jbd_unlock_bh_state(bh); | 668 | jbd_unlock_bh_state(bh); |
670 | frozen_buffer = | 669 | frozen_buffer = |
671 | jbd2_slab_alloc(jh2bh(jh)->b_size, | 670 | jbd2_alloc(jh2bh(jh)->b_size, |
672 | GFP_NOFS); | 671 | GFP_NOFS); |
673 | if (!frozen_buffer) { | 672 | if (!frozen_buffer) { |
674 | printk(KERN_EMERG | 673 | printk(KERN_EMERG |
@@ -728,7 +727,7 @@ done: | |||
728 | 727 | ||
729 | out: | 728 | out: |
730 | if (unlikely(frozen_buffer)) /* It's usually NULL */ | 729 | if (unlikely(frozen_buffer)) /* It's usually NULL */ |
731 | jbd2_slab_free(frozen_buffer, bh->b_size); | 730 | jbd2_free(frozen_buffer, bh->b_size); |
732 | 731 | ||
733 | JBUFFER_TRACE(jh, "exit"); | 732 | JBUFFER_TRACE(jh, "exit"); |
734 | return error; | 733 | return error; |
@@ -881,7 +880,7 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh) | |||
881 | 880 | ||
882 | repeat: | 881 | repeat: |
883 | if (!jh->b_committed_data) { | 882 | if (!jh->b_committed_data) { |
884 | committed_data = jbd2_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS); | 883 | committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS); |
885 | if (!committed_data) { | 884 | if (!committed_data) { |
886 | printk(KERN_EMERG "%s: No memory for committed data\n", | 885 | printk(KERN_EMERG "%s: No memory for committed data\n", |
887 | __FUNCTION__); | 886 | __FUNCTION__); |
@@ -908,7 +907,7 @@ repeat: | |||
908 | out: | 907 | out: |
909 | jbd2_journal_put_journal_head(jh); | 908 | jbd2_journal_put_journal_head(jh); |
910 | if (unlikely(committed_data)) | 909 | if (unlikely(committed_data)) |
911 | jbd2_slab_free(committed_data, bh->b_size); | 910 | jbd2_free(committed_data, bh->b_size); |
912 | return err; | 911 | return err; |
913 | } | 912 | } |
914 | 913 | ||
@@ -1411,7 +1410,7 @@ int jbd2_journal_stop(handle_t *handle) | |||
1411 | spin_unlock(&journal->j_state_lock); | 1410 | spin_unlock(&journal->j_state_lock); |
1412 | } | 1411 | } |
1413 | 1412 | ||
1414 | jbd_free_handle(handle); | 1413 | jbd2_free_handle(handle); |
1415 | return err; | 1414 | return err; |
1416 | } | 1415 | } |
1417 | 1416 | ||