aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Bottomley <James.Bottomley@suse.de>2010-01-25 12:42:24 -0500
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2010-02-05 13:32:35 -0500
commit73c77e2ccc14413c232c3e0b3aa43a0c4b72ec70 (patch)
tree31ff85de9d06c07d1e06114274239c75560ff15c
parentc9334f6067dbe0380141fc75b122e0a533878838 (diff)
xfs: fix xfs to work with Virtually Indexed architectures
xfs_buf.c includes what is essentially a hand rolled version of blk_rq_map_kern(). In order to work properly with the vmalloc buffers that xfs uses, this hand rolled routine must also implement the flushing API for vmap/vmalloc areas. [style updates from hch@lst.de] Acked-by: Christoph Hellwig <hch@lst.de> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c30
1 files changed, 29 insertions, 1 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 77b8be81c769..6f3ebb634b8b 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -76,6 +76,27 @@ struct workqueue_struct *xfsconvertd_workqueue;
76#define xfs_buf_deallocate(bp) \ 76#define xfs_buf_deallocate(bp) \
77 kmem_zone_free(xfs_buf_zone, (bp)); 77 kmem_zone_free(xfs_buf_zone, (bp));
78 78
79static inline int
80xfs_buf_is_vmapped(
81 struct xfs_buf *bp)
82{
83 /*
84 * Return true if the buffer is vmapped.
85 *
86 * The XBF_MAPPED flag is set if the buffer should be mapped, but the
87 * code is clever enough to know it doesn't have to map a single page,
88 * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
89 */
90 return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
91}
92
93static inline int
94xfs_buf_vmap_len(
95 struct xfs_buf *bp)
96{
97 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
98}
99
79/* 100/*
80 * Page Region interfaces. 101 * Page Region interfaces.
81 * 102 *
@@ -314,7 +335,7 @@ xfs_buf_free(
314 if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) { 335 if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
315 uint i; 336 uint i;
316 337
317 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1)) 338 if (xfs_buf_is_vmapped(bp))
318 free_address(bp->b_addr - bp->b_offset); 339 free_address(bp->b_addr - bp->b_offset);
319 340
320 for (i = 0; i < bp->b_page_count; i++) { 341 for (i = 0; i < bp->b_page_count; i++) {
@@ -1107,6 +1128,9 @@ xfs_buf_bio_end_io(
1107 1128
1108 xfs_buf_ioerror(bp, -error); 1129 xfs_buf_ioerror(bp, -error);
1109 1130
1131 if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1132 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1133
1110 do { 1134 do {
1111 struct page *page = bvec->bv_page; 1135 struct page *page = bvec->bv_page;
1112 1136
@@ -1216,6 +1240,10 @@ next_chunk:
1216 1240
1217submit_io: 1241submit_io:
1218 if (likely(bio->bi_size)) { 1242 if (likely(bio->bi_size)) {
1243 if (xfs_buf_is_vmapped(bp)) {
1244 flush_kernel_vmap_range(bp->b_addr,
1245 xfs_buf_vmap_len(bp));
1246 }
1219 submit_bio(rw, bio); 1247 submit_bio(rw, bio);
1220 if (size) 1248 if (size)
1221 goto next_chunk; 1249 goto next_chunk;