aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_buf.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2011-10-10 12:52:48 -0400
committerAlex Elder <aelder@sgi.com>2011-10-11 22:15:10 -0400
commit4347b9d7ad4223474d315c3ab6bc1ce7cce7fa2d (patch)
treee82674b33a3a86528749d4960e0b0f97395c309e /fs/xfs/xfs_buf.c
parentaf5c4bee499eb68bc36ca046030394d82d0e3669 (diff)
xfs: clean up buffer allocation
Change _xfs_buf_initialize to allocate the buffer directly and rename it to xfs_buf_alloc now that is the only buffer allocation routine. Also remove the xfs_buf_deallocate wrapper around the kmem_zone_free calls for buffers. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_buf.c')
-rw-r--r--fs/xfs/xfs_buf.c50
1 files changed, 18 insertions, 32 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 3df7d0a2b245..1f24ee5f0d7a 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -65,10 +65,6 @@ struct workqueue_struct *xfsconvertd_workqueue;
65#define xb_to_km(flags) \ 65#define xb_to_km(flags) \
66 (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP) 66 (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
67 67
68#define xfs_buf_allocate(flags) \
69 kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
70#define xfs_buf_deallocate(bp) \
71 kmem_zone_free(xfs_buf_zone, (bp));
72 68
73static inline int 69static inline int
74xfs_buf_is_vmapped( 70xfs_buf_is_vmapped(
@@ -167,14 +163,19 @@ xfs_buf_stale(
167 ASSERT(atomic_read(&bp->b_hold) >= 1); 163 ASSERT(atomic_read(&bp->b_hold) >= 1);
168} 164}
169 165
170STATIC void 166struct xfs_buf *
171_xfs_buf_initialize( 167xfs_buf_alloc(
172 xfs_buf_t *bp, 168 struct xfs_buftarg *target,
173 xfs_buftarg_t *target,
174 xfs_off_t range_base, 169 xfs_off_t range_base,
175 size_t range_length, 170 size_t range_length,
176 xfs_buf_flags_t flags) 171 xfs_buf_flags_t flags)
177{ 172{
173 struct xfs_buf *bp;
174
175 bp = kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags));
176 if (unlikely(!bp))
177 return NULL;
178
178 /* 179 /*
179 * We don't want certain flags to appear in b_flags. 180 * We don't want certain flags to appear in b_flags.
180 */ 181 */
@@ -203,8 +204,9 @@ _xfs_buf_initialize(
203 init_waitqueue_head(&bp->b_waiters); 204 init_waitqueue_head(&bp->b_waiters);
204 205
205 XFS_STATS_INC(xb_create); 206 XFS_STATS_INC(xb_create);
206
207 trace_xfs_buf_init(bp, _RET_IP_); 207 trace_xfs_buf_init(bp, _RET_IP_);
208
209 return bp;
208} 210}
209 211
210/* 212/*
@@ -277,7 +279,7 @@ xfs_buf_free(
277 } else if (bp->b_flags & _XBF_KMEM) 279 } else if (bp->b_flags & _XBF_KMEM)
278 kmem_free(bp->b_addr); 280 kmem_free(bp->b_addr);
279 _xfs_buf_free_pages(bp); 281 _xfs_buf_free_pages(bp);
280 xfs_buf_deallocate(bp); 282 kmem_zone_free(xfs_buf_zone, bp);
281} 283}
282 284
283/* 285/*
@@ -539,16 +541,14 @@ xfs_buf_get(
539 if (likely(bp)) 541 if (likely(bp))
540 goto found; 542 goto found;
541 543
542 new_bp = xfs_buf_allocate(flags); 544 new_bp = xfs_buf_alloc(target, ioff << BBSHIFT, isize << BBSHIFT,
545 flags);
543 if (unlikely(!new_bp)) 546 if (unlikely(!new_bp))
544 return NULL; 547 return NULL;
545 548
546 _xfs_buf_initialize(new_bp, target,
547 ioff << BBSHIFT, isize << BBSHIFT, flags);
548
549 bp = _xfs_buf_find(target, ioff, isize, flags, new_bp); 549 bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
550 if (!bp) { 550 if (!bp) {
551 xfs_buf_deallocate(new_bp); 551 kmem_zone_free(xfs_buf_zone, new_bp);
552 return NULL; 552 return NULL;
553 } 553 }
554 554
@@ -557,7 +557,7 @@ xfs_buf_get(
557 if (error) 557 if (error)
558 goto no_buffer; 558 goto no_buffer;
559 } else 559 } else
560 xfs_buf_deallocate(new_bp); 560 kmem_zone_free(xfs_buf_zone, new_bp);
561 561
562 /* 562 /*
563 * Now we have a workable buffer, fill in the block number so 563 * Now we have a workable buffer, fill in the block number so
@@ -694,19 +694,6 @@ xfs_buf_read_uncached(
694 return bp; 694 return bp;
695} 695}
696 696
697xfs_buf_t *
698xfs_buf_get_empty(
699 size_t len,
700 xfs_buftarg_t *target)
701{
702 xfs_buf_t *bp;
703
704 bp = xfs_buf_allocate(0);
705 if (bp)
706 _xfs_buf_initialize(bp, target, 0, len, 0);
707 return bp;
708}
709
710/* 697/*
711 * Return a buffer allocated as an empty buffer and associated to external 698 * Return a buffer allocated as an empty buffer and associated to external
712 * memory via xfs_buf_associate_memory() back to it's empty state. 699 * memory via xfs_buf_associate_memory() back to it's empty state.
@@ -792,10 +779,9 @@ xfs_buf_get_uncached(
792 int error, i; 779 int error, i;
793 xfs_buf_t *bp; 780 xfs_buf_t *bp;
794 781
795 bp = xfs_buf_allocate(0); 782 bp = xfs_buf_alloc(target, 0, len, 0);
796 if (unlikely(bp == NULL)) 783 if (unlikely(bp == NULL))
797 goto fail; 784 goto fail;
798 _xfs_buf_initialize(bp, target, 0, len, 0);
799 785
800 error = _xfs_buf_get_pages(bp, page_count, 0); 786 error = _xfs_buf_get_pages(bp, page_count, 0);
801 if (error) 787 if (error)
@@ -823,7 +809,7 @@ xfs_buf_get_uncached(
823 __free_page(bp->b_pages[i]); 809 __free_page(bp->b_pages[i]);
824 _xfs_buf_free_pages(bp); 810 _xfs_buf_free_pages(bp);
825 fail_free_buf: 811 fail_free_buf:
826 xfs_buf_deallocate(bp); 812 kmem_zone_free(xfs_buf_zone, bp);
827 fail: 813 fail:
828 return NULL; 814 return NULL;
829} 815}