aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_buf.c
diff options
context:
space:
mode:
authorAlex Elder <aelder@sgi.com>2010-03-16 14:55:56 -0400
committerAlex Elder <aelder@sgi.com>2010-03-16 16:40:36 -0400
commit8a262e573d30187b32b5534ec489446931239cc5 (patch)
treee510327146e3329d19fac8269cc51d437c8a650f /fs/xfs/linux-2.6/xfs_buf.c
parentcd9640a70d542ca026a812ac34733799da0a39c9 (diff)
xfs: use scalable vmap API
Re-apply a commit that had been reverted due to regressions that have since been fixed. From 95f8e302c04c0b0c6de35ab399a5551605eeb006 Mon Sep 17 00:00:00 2001 From: Nick Piggin <npiggin@suse.de> Date: Tue, 6 Jan 2009 14:43:09 +1100 Implement XFS's large buffer support with the new vmap APIs. See the vmap rewrite (db64fe02) for some numbers. The biggest improvement that comes from using the new APIs is avoiding the global KVA allocation lock on every call. Signed-off-by: Nick Piggin <npiggin@suse.de> Reviewed-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com> Only modifications here were a minor reformat, plus making the patch apply given the new use of xfs_buf_is_vmapped(). Modified-by: Alex Elder <aelder@sgi.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_buf.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 81f4ef27de3e..bd111b7e1daa 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -268,7 +268,8 @@ xfs_buf_free(
268 uint i; 268 uint i;
269 269
270 if (xfs_buf_is_vmapped(bp)) 270 if (xfs_buf_is_vmapped(bp))
271 vunmap(bp->b_addr - bp->b_offset); 271 vm_unmap_ram(bp->b_addr - bp->b_offset,
272 bp->b_page_count);
272 273
273 for (i = 0; i < bp->b_page_count; i++) { 274 for (i = 0; i < bp->b_page_count; i++) {
274 struct page *page = bp->b_pages[i]; 275 struct page *page = bp->b_pages[i];
@@ -388,8 +389,8 @@ _xfs_buf_map_pages(
388 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; 389 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
389 bp->b_flags |= XBF_MAPPED; 390 bp->b_flags |= XBF_MAPPED;
390 } else if (flags & XBF_MAPPED) { 391 } else if (flags & XBF_MAPPED) {
391 bp->b_addr = vmap(bp->b_pages, bp->b_page_count, 392 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
392 VM_MAP, PAGE_KERNEL); 393 -1, PAGE_KERNEL);
393 if (unlikely(bp->b_addr == NULL)) 394 if (unlikely(bp->b_addr == NULL))
394 return -ENOMEM; 395 return -ENOMEM;
395 bp->b_addr += bp->b_offset; 396 bp->b_addr += bp->b_offset;