aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_buf.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-07 22:12:12 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-07 22:12:12 -0500
commit0b61a2ba5dfd1620731e717d686e6ade657fd975 (patch)
treedea84efd43934a7d6139048f87c4ba86d68d4b6d /fs/xfs/linux-2.6/xfs_buf.c
parenta13ff0bb3feda8b1fcffc69951320277ed7c4101 (diff)
parentde2eeea609b55e8c3994133a565b39edeaaaaf69 (diff)
Merge branch 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6
* 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6: (62 commits) [XFS] add __init/__exit mark to specific init/cleanup functions [XFS] Fix oops in xfs_file_readdir() [XFS] kill xfs_root [XFS] keep i_nlink updated and use proper accessors [XFS] stop updating inode->i_blocks [XFS] Make xfs_ail_check check less by default [XFS] Move AIL pushing into it's own thread [XFS] use generic_permission [XFS] stop re-checking permissions in xfs_swapext [XFS] clean up xfs_swapext [XFS] remove permission check from xfs_change_file_space [XFS] prevent panic during log recovery due to bogus op_hdr length [XFS] Cleanup various fid related bits: [XFS] Fix xfs_lowbit64 [XFS] Remove CFORK macros and use code directly in IFORK and DFORK macros. [XFS] kill superflous buffer locking (2nd attempt) [XFS] Use kernel-supplied "roundup_pow_of_two" for simplicity [XFS] Remove the BPCSHIFT and NB* based macros from XFS. [XFS] Remove bogus assert [XFS] optimize XFS_IS_REALTIME_INODE w/o realtime config ...
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_buf.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c57
1 files changed, 6 insertions, 51 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 0382c19d6523..e347bfd47c91 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -387,8 +387,6 @@ _xfs_buf_lookup_pages(
387 if (unlikely(page == NULL)) { 387 if (unlikely(page == NULL)) {
388 if (flags & XBF_READ_AHEAD) { 388 if (flags & XBF_READ_AHEAD) {
389 bp->b_page_count = i; 389 bp->b_page_count = i;
390 for (i = 0; i < bp->b_page_count; i++)
391 unlock_page(bp->b_pages[i]);
392 return -ENOMEM; 390 return -ENOMEM;
393 } 391 }
394 392
@@ -418,24 +416,17 @@ _xfs_buf_lookup_pages(
418 ASSERT(!PagePrivate(page)); 416 ASSERT(!PagePrivate(page));
419 if (!PageUptodate(page)) { 417 if (!PageUptodate(page)) {
420 page_count--; 418 page_count--;
421 if (blocksize >= PAGE_CACHE_SIZE) { 419 if (blocksize < PAGE_CACHE_SIZE && !PagePrivate(page)) {
422 if (flags & XBF_READ)
423 bp->b_locked = 1;
424 } else if (!PagePrivate(page)) {
425 if (test_page_region(page, offset, nbytes)) 420 if (test_page_region(page, offset, nbytes))
426 page_count++; 421 page_count++;
427 } 422 }
428 } 423 }
429 424
425 unlock_page(page);
430 bp->b_pages[i] = page; 426 bp->b_pages[i] = page;
431 offset = 0; 427 offset = 0;
432 } 428 }
433 429
434 if (!bp->b_locked) {
435 for (i = 0; i < bp->b_page_count; i++)
436 unlock_page(bp->b_pages[i]);
437 }
438
439 if (page_count == bp->b_page_count) 430 if (page_count == bp->b_page_count)
440 bp->b_flags |= XBF_DONE; 431 bp->b_flags |= XBF_DONE;
441 432
@@ -751,7 +742,6 @@ xfs_buf_associate_memory(
751 bp->b_pages[i] = mem_to_page((void *)pageaddr); 742 bp->b_pages[i] = mem_to_page((void *)pageaddr);
752 pageaddr += PAGE_CACHE_SIZE; 743 pageaddr += PAGE_CACHE_SIZE;
753 } 744 }
754 bp->b_locked = 0;
755 745
756 bp->b_count_desired = len; 746 bp->b_count_desired = len;
757 bp->b_buffer_length = buflen; 747 bp->b_buffer_length = buflen;
@@ -1098,25 +1088,13 @@ xfs_buf_iostart(
1098 return status; 1088 return status;
1099} 1089}
1100 1090
1101STATIC_INLINE int
1102_xfs_buf_iolocked(
1103 xfs_buf_t *bp)
1104{
1105 ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
1106 if (bp->b_flags & XBF_READ)
1107 return bp->b_locked;
1108 return 0;
1109}
1110
1111STATIC_INLINE void 1091STATIC_INLINE void
1112_xfs_buf_ioend( 1092_xfs_buf_ioend(
1113 xfs_buf_t *bp, 1093 xfs_buf_t *bp,
1114 int schedule) 1094 int schedule)
1115{ 1095{
1116 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { 1096 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1117 bp->b_locked = 0;
1118 xfs_buf_ioend(bp, schedule); 1097 xfs_buf_ioend(bp, schedule);
1119 }
1120} 1098}
1121 1099
1122STATIC void 1100STATIC void
@@ -1147,10 +1125,6 @@ xfs_buf_bio_end_io(
1147 1125
1148 if (--bvec >= bio->bi_io_vec) 1126 if (--bvec >= bio->bi_io_vec)
1149 prefetchw(&bvec->bv_page->flags); 1127 prefetchw(&bvec->bv_page->flags);
1150
1151 if (_xfs_buf_iolocked(bp)) {
1152 unlock_page(page);
1153 }
1154 } while (bvec >= bio->bi_io_vec); 1128 } while (bvec >= bio->bi_io_vec);
1155 1129
1156 _xfs_buf_ioend(bp, 1); 1130 _xfs_buf_ioend(bp, 1);
@@ -1161,13 +1135,12 @@ STATIC void
1161_xfs_buf_ioapply( 1135_xfs_buf_ioapply(
1162 xfs_buf_t *bp) 1136 xfs_buf_t *bp)
1163{ 1137{
1164 int i, rw, map_i, total_nr_pages, nr_pages; 1138 int rw, map_i, total_nr_pages, nr_pages;
1165 struct bio *bio; 1139 struct bio *bio;
1166 int offset = bp->b_offset; 1140 int offset = bp->b_offset;
1167 int size = bp->b_count_desired; 1141 int size = bp->b_count_desired;
1168 sector_t sector = bp->b_bn; 1142 sector_t sector = bp->b_bn;
1169 unsigned int blocksize = bp->b_target->bt_bsize; 1143 unsigned int blocksize = bp->b_target->bt_bsize;
1170 int locking = _xfs_buf_iolocked(bp);
1171 1144
1172 total_nr_pages = bp->b_page_count; 1145 total_nr_pages = bp->b_page_count;
1173 map_i = 0; 1146 map_i = 0;
@@ -1190,7 +1163,7 @@ _xfs_buf_ioapply(
1190 * filesystem block size is not smaller than the page size. 1163 * filesystem block size is not smaller than the page size.
1191 */ 1164 */
1192 if ((bp->b_buffer_length < PAGE_CACHE_SIZE) && 1165 if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1193 (bp->b_flags & XBF_READ) && locking && 1166 (bp->b_flags & XBF_READ) &&
1194 (blocksize >= PAGE_CACHE_SIZE)) { 1167 (blocksize >= PAGE_CACHE_SIZE)) {
1195 bio = bio_alloc(GFP_NOIO, 1); 1168 bio = bio_alloc(GFP_NOIO, 1);
1196 1169
@@ -1207,24 +1180,6 @@ _xfs_buf_ioapply(
1207 goto submit_io; 1180 goto submit_io;
1208 } 1181 }
1209 1182
1210 /* Lock down the pages which we need to for the request */
1211 if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
1212 for (i = 0; size; i++) {
1213 int nbytes = PAGE_CACHE_SIZE - offset;
1214 struct page *page = bp->b_pages[i];
1215
1216 if (nbytes > size)
1217 nbytes = size;
1218
1219 lock_page(page);
1220
1221 size -= nbytes;
1222 offset = 0;
1223 }
1224 offset = bp->b_offset;
1225 size = bp->b_count_desired;
1226 }
1227
1228next_chunk: 1183next_chunk:
1229 atomic_inc(&bp->b_io_remaining); 1184 atomic_inc(&bp->b_io_remaining);
1230 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); 1185 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
@@ -1571,7 +1526,7 @@ xfs_alloc_delwrite_queue(
1571 1526
1572 INIT_LIST_HEAD(&btp->bt_list); 1527 INIT_LIST_HEAD(&btp->bt_list);
1573 INIT_LIST_HEAD(&btp->bt_delwrite_queue); 1528 INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1574 spinlock_init(&btp->bt_delwrite_lock, "delwri_lock"); 1529 spin_lock_init(&btp->bt_delwrite_lock);
1575 btp->bt_flags = 0; 1530 btp->bt_flags = 0;
1576 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd"); 1531 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
1577 if (IS_ERR(btp->bt_task)) { 1532 if (IS_ERR(btp->bt_task)) {