diff options
| author | Mark Tinguely <tinguely@sgi.com> | 2012-12-04 18:18:02 -0500 |
|---|---|---|
| committer | Ben Myers <bpm@sgi.com> | 2013-01-16 17:07:11 -0500 |
| commit | d44d9bc68e32ad5881b105f82bd259d261d1ef74 (patch) | |
| tree | 263045fc7f9efcaaa70e01cb4800193bc4aa1ce9 | |
| parent | a49f0d1ea3ec94fc7cf33a7c36a16343b74bd565 (diff) | |
xfs: use b_maps[] for discontiguous buffers
Commits starting at 77c1a08 introduced a multiple segment support
to xfs_buf. xfs_trans_buf_item_match() could not find a multi-segment
buffer in the transaction because it was looking at the single segment
block number rather than the multi-segment b_maps[0].bm.bn. This
results on a recursive buffer lock that can never be satisfied.
This patch:
1) Changed the remaining b_map accesses to be b_maps[0] accesses.
2) Renames the single segment b_map structure to __b_map to avoid
future confusion.
Signed-off-by: Mark Tinguely <tinguely@sgi.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ben Myers <bpm@sgi.com>
| -rw-r--r-- | fs/xfs/xfs_buf.c | 12 | ||||
| -rw-r--r-- | fs/xfs/xfs_buf.h | 6 |
2 files changed, 9 insertions, 9 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 26673a0b20e7..56d1614760cf 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
| @@ -175,7 +175,7 @@ xfs_buf_get_maps( | |||
| 175 | bp->b_map_count = map_count; | 175 | bp->b_map_count = map_count; |
| 176 | 176 | ||
| 177 | if (map_count == 1) { | 177 | if (map_count == 1) { |
| 178 | bp->b_maps = &bp->b_map; | 178 | bp->b_maps = &bp->__b_map; |
| 179 | return 0; | 179 | return 0; |
| 180 | } | 180 | } |
| 181 | 181 | ||
| @@ -193,7 +193,7 @@ static void | |||
| 193 | xfs_buf_free_maps( | 193 | xfs_buf_free_maps( |
| 194 | struct xfs_buf *bp) | 194 | struct xfs_buf *bp) |
| 195 | { | 195 | { |
| 196 | if (bp->b_maps != &bp->b_map) { | 196 | if (bp->b_maps != &bp->__b_map) { |
| 197 | kmem_free(bp->b_maps); | 197 | kmem_free(bp->b_maps); |
| 198 | bp->b_maps = NULL; | 198 | bp->b_maps = NULL; |
| 199 | } | 199 | } |
| @@ -377,8 +377,8 @@ xfs_buf_allocate_memory( | |||
| 377 | } | 377 | } |
| 378 | 378 | ||
| 379 | use_alloc_page: | 379 | use_alloc_page: |
| 380 | start = BBTOB(bp->b_map.bm_bn) >> PAGE_SHIFT; | 380 | start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT; |
| 381 | end = (BBTOB(bp->b_map.bm_bn + bp->b_length) + PAGE_SIZE - 1) | 381 | end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1) |
| 382 | >> PAGE_SHIFT; | 382 | >> PAGE_SHIFT; |
| 383 | page_count = end - start; | 383 | page_count = end - start; |
| 384 | error = _xfs_buf_get_pages(bp, page_count, flags); | 384 | error = _xfs_buf_get_pages(bp, page_count, flags); |
| @@ -640,7 +640,7 @@ _xfs_buf_read( | |||
| 640 | xfs_buf_flags_t flags) | 640 | xfs_buf_flags_t flags) |
| 641 | { | 641 | { |
| 642 | ASSERT(!(flags & XBF_WRITE)); | 642 | ASSERT(!(flags & XBF_WRITE)); |
| 643 | ASSERT(bp->b_map.bm_bn != XFS_BUF_DADDR_NULL); | 643 | ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL); |
| 644 | 644 | ||
| 645 | bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD); | 645 | bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD); |
| 646 | bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); | 646 | bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); |
| @@ -1709,7 +1709,7 @@ xfs_buf_cmp( | |||
| 1709 | struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); | 1709 | struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); |
| 1710 | xfs_daddr_t diff; | 1710 | xfs_daddr_t diff; |
| 1711 | 1711 | ||
| 1712 | diff = ap->b_map.bm_bn - bp->b_map.bm_bn; | 1712 | diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn; |
| 1713 | if (diff < 0) | 1713 | if (diff < 0) |
| 1714 | return -1; | 1714 | return -1; |
| 1715 | if (diff > 0) | 1715 | if (diff > 0) |
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index 23f5642480bb..433a12ed7b17 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h | |||
| @@ -151,7 +151,7 @@ typedef struct xfs_buf { | |||
| 151 | struct page **b_pages; /* array of page pointers */ | 151 | struct page **b_pages; /* array of page pointers */ |
| 152 | struct page *b_page_array[XB_PAGES]; /* inline pages */ | 152 | struct page *b_page_array[XB_PAGES]; /* inline pages */ |
| 153 | struct xfs_buf_map *b_maps; /* compound buffer map */ | 153 | struct xfs_buf_map *b_maps; /* compound buffer map */ |
| 154 | struct xfs_buf_map b_map; /* inline compound buffer map */ | 154 | struct xfs_buf_map __b_map; /* inline compound buffer map */ |
| 155 | int b_map_count; | 155 | int b_map_count; |
| 156 | int b_io_length; /* IO size in BBs */ | 156 | int b_io_length; /* IO size in BBs */ |
| 157 | atomic_t b_pin_count; /* pin count */ | 157 | atomic_t b_pin_count; /* pin count */ |
| @@ -330,8 +330,8 @@ void xfs_buf_stale(struct xfs_buf *bp); | |||
| 330 | * In future, uncached buffers will pass the block number directly to the io | 330 | * In future, uncached buffers will pass the block number directly to the io |
| 331 | * request function and hence these macros will go away at that point. | 331 | * request function and hence these macros will go away at that point. |
| 332 | */ | 332 | */ |
| 333 | #define XFS_BUF_ADDR(bp) ((bp)->b_map.bm_bn) | 333 | #define XFS_BUF_ADDR(bp) ((bp)->b_maps[0].bm_bn) |
| 334 | #define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_map.bm_bn = (xfs_daddr_t)(bno)) | 334 | #define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno)) |
| 335 | 335 | ||
| 336 | static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref) | 336 | static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref) |
| 337 | { | 337 | { |
