aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_buf.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2012-04-23 01:58:53 -0400
committerBen Myers <bpm@sgi.com>2012-05-14 17:20:49 -0400
commit795cac72e902496adac399389f9affe5d1ab821a (patch)
tree120f20faeb98e4f01a9fa1eb6eab26392b3a5e75 /fs/xfs/xfs_buf.c
parentaa0e8833b05cbd9d34d6a1ddaf23a74a58d76a03 (diff)
xfs: kill xfs_buf_btoc
xfs_buf_btoc and friends are simple macros that do basic block to page index conversion and vice versa. These aren't widely used, and we use open coded masking and shifting everywhere else. Hence remove the macros and open code the work they do. Also, use of PAGE_CACHE_{SIZE|SHIFT|MASK} for these macros is now incorrect - we are using pages directly and not the page cache, so use PAGE_{SIZE|MASK|SHIFT} instead. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Mark Tinguely <tinguely@sgi.com> Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_buf.c')
-rw-r--r--fs/xfs/xfs_buf.c39
1 files changed, 21 insertions, 18 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index ab3c4491777b..942cf5051ab4 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -306,7 +306,7 @@ xfs_buf_allocate_memory(
306 size_t nbytes, offset; 306 size_t nbytes, offset;
307 gfp_t gfp_mask = xb_to_gfp(flags); 307 gfp_t gfp_mask = xb_to_gfp(flags);
308 unsigned short page_count, i; 308 unsigned short page_count, i;
309 xfs_off_t end; 309 xfs_off_t start, end;
310 int error; 310 int error;
311 311
312 /* 312 /*
@@ -314,15 +314,15 @@ xfs_buf_allocate_memory(
314 * the memory from the heap - there's no need for the complexity of 314 * the memory from the heap - there's no need for the complexity of
315 * page arrays to keep allocation down to order 0. 315 * page arrays to keep allocation down to order 0.
316 */ 316 */
317 if (bp->b_length < BTOBB(PAGE_SIZE)) { 317 size = BBTOB(bp->b_length);
318 bp->b_addr = kmem_alloc(BBTOB(bp->b_length), xb_to_km(flags)); 318 if (size < PAGE_SIZE) {
319 bp->b_addr = kmem_alloc(size, xb_to_km(flags));
319 if (!bp->b_addr) { 320 if (!bp->b_addr) {
320 /* low memory - use alloc_page loop instead */ 321 /* low memory - use alloc_page loop instead */
321 goto use_alloc_page; 322 goto use_alloc_page;
322 } 323 }
323 324
324 if (((unsigned long)(bp->b_addr + BBTOB(bp->b_length) - 1) & 325 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
325 PAGE_MASK) !=
326 ((unsigned long)bp->b_addr & PAGE_MASK)) { 326 ((unsigned long)bp->b_addr & PAGE_MASK)) {
327 /* b_addr spans two pages - use alloc_page instead */ 327 /* b_addr spans two pages - use alloc_page instead */
328 kmem_free(bp->b_addr); 328 kmem_free(bp->b_addr);
@@ -338,14 +338,14 @@ xfs_buf_allocate_memory(
338 } 338 }
339 339
340use_alloc_page: 340use_alloc_page:
341 end = BBTOB(bp->b_bn + bp->b_length); 341 start = BBTOB(bp->b_bn) >> PAGE_SHIFT;
342 page_count = xfs_buf_btoc(end) - xfs_buf_btoct(BBTOB(bp->b_bn)); 342 end = (BBTOB(bp->b_bn + bp->b_length) + PAGE_SIZE - 1) >> PAGE_SHIFT;
343 page_count = end - start;
343 error = _xfs_buf_get_pages(bp, page_count, flags); 344 error = _xfs_buf_get_pages(bp, page_count, flags);
344 if (unlikely(error)) 345 if (unlikely(error))
345 return error; 346 return error;
346 347
347 offset = bp->b_offset; 348 offset = bp->b_offset;
348 size = BBTOB(bp->b_length);
349 bp->b_flags |= _XBF_PAGES; 349 bp->b_flags |= _XBF_PAGES;
350 350
351 for (i = 0; i < bp->b_page_count; i++) { 351 for (i = 0; i < bp->b_page_count; i++) {
@@ -1320,27 +1320,30 @@ xfs_buf_iomove(
1320 void *data, /* data address */ 1320 void *data, /* data address */
1321 xfs_buf_rw_t mode) /* read/write/zero flag */ 1321 xfs_buf_rw_t mode) /* read/write/zero flag */
1322{ 1322{
1323 size_t bend, cpoff, csize; 1323 size_t bend;
1324 struct page *page;
1325 1324
1326 bend = boff + bsize; 1325 bend = boff + bsize;
1327 while (boff < bend) { 1326 while (boff < bend) {
1328 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)]; 1327 struct page *page;
1329 cpoff = xfs_buf_poff(boff + bp->b_offset); 1328 int page_index, page_offset, csize;
1330 csize = min_t(size_t, 1329
1331 PAGE_SIZE - cpoff, BBTOB(bp->b_io_length) - boff); 1330 page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1331 page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1332 page = bp->b_pages[page_index];
1333 csize = min_t(size_t, PAGE_SIZE - page_offset,
1334 BBTOB(bp->b_io_length) - boff);
1332 1335
1333 ASSERT(((csize + cpoff) <= PAGE_SIZE)); 1336 ASSERT((csize + page_offset) <= PAGE_SIZE);
1334 1337
1335 switch (mode) { 1338 switch (mode) {
1336 case XBRW_ZERO: 1339 case XBRW_ZERO:
1337 memset(page_address(page) + cpoff, 0, csize); 1340 memset(page_address(page) + page_offset, 0, csize);
1338 break; 1341 break;
1339 case XBRW_READ: 1342 case XBRW_READ:
1340 memcpy(data, page_address(page) + cpoff, csize); 1343 memcpy(data, page_address(page) + page_offset, csize);
1341 break; 1344 break;
1342 case XBRW_WRITE: 1345 case XBRW_WRITE:
1343 memcpy(page_address(page) + cpoff, data, csize); 1346 memcpy(page_address(page) + page_offset, data, csize);
1344 } 1347 }
1345 1348
1346 boff += csize; 1349 boff += csize;