diff options
author | Mingming Cao <cmm@us.ibm.com> | 2007-10-16 18:38:25 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2007-10-17 18:49:56 -0400 |
commit | af1e76d6b3f37cb89d9192eaf83588adaf4728eb (patch) | |
tree | 8b30dd421361a61c3f2e9c96bd574986b4e78c9e /include/linux/jbd2.h | |
parent | c089d490dfbf53bc0893dc9ef57cf3ee6448314d (diff) |
JBD2: jbd2 slab allocation cleanups
JBD2: Replace slab allocations with page allocations
JBD2 allocate memory for committed_data and frozen_data from slab. However
JBD2 should not pass slab pages down to the block layer. Use page allocator
pages instead. This will also prepare JBD for the large blocksize patchset.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Mingming Cao <cmm@us.ibm.com>
Diffstat (limited to 'include/linux/jbd2.h')
-rw-r--r-- | include/linux/jbd2.h | 18 |
1 files changed, 13 insertions, 5 deletions
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 260d6d76c5f3..e3677929884a 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h | |||
@@ -72,14 +72,22 @@ extern u8 jbd2_journal_enable_debug; | |||
72 | #endif | 72 | #endif |
73 | 73 | ||
74 | extern void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry); | 74 | extern void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry); |
75 | extern void * jbd2_slab_alloc(size_t size, gfp_t flags); | ||
76 | extern void jbd2_slab_free(void *ptr, size_t size); | ||
77 | |||
78 | #define jbd_kmalloc(size, flags) \ | 75 | #define jbd_kmalloc(size, flags) \ |
79 | __jbd2_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry) | 76 | __jbd2_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry) |
80 | #define jbd_rep_kmalloc(size, flags) \ | 77 | #define jbd_rep_kmalloc(size, flags) \ |
81 | __jbd2_kmalloc(__FUNCTION__, (size), (flags), 1) | 78 | __jbd2_kmalloc(__FUNCTION__, (size), (flags), 1) |
82 | 79 | ||
80 | |||
81 | static inline void *jbd2_alloc(size_t size, gfp_t flags) | ||
82 | { | ||
83 | return (void *)__get_free_pages(flags, get_order(size)); | ||
84 | } | ||
85 | |||
86 | static inline void jbd2_free(void *ptr, size_t size) | ||
87 | { | ||
88 | free_pages((unsigned long)ptr, get_order(size)); | ||
89 | }; | ||
90 | |||
83 | #define JBD2_MIN_JOURNAL_BLOCKS 1024 | 91 | #define JBD2_MIN_JOURNAL_BLOCKS 1024 |
84 | 92 | ||
85 | #ifdef __KERNEL__ | 93 | #ifdef __KERNEL__ |
@@ -959,12 +967,12 @@ void jbd2_journal_put_journal_head(struct journal_head *jh); | |||
959 | */ | 967 | */ |
960 | extern struct kmem_cache *jbd2_handle_cache; | 968 | extern struct kmem_cache *jbd2_handle_cache; |
961 | 969 | ||
962 | static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags) | 970 | static inline handle_t *jbd2_alloc_handle(gfp_t gfp_flags) |
963 | { | 971 | { |
964 | return kmem_cache_alloc(jbd2_handle_cache, gfp_flags); | 972 | return kmem_cache_alloc(jbd2_handle_cache, gfp_flags); |
965 | } | 973 | } |
966 | 974 | ||
967 | static inline void jbd_free_handle(handle_t *handle) | 975 | static inline void jbd2_free_handle(handle_t *handle) |
968 | { | 976 | { |
969 | kmem_cache_free(jbd2_handle_cache, handle); | 977 | kmem_cache_free(jbd2_handle_cache, handle); |
970 | } | 978 | } |