aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_buf.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2007-10-11 21:12:39 -0400
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2008-02-07 02:12:07 -0500
commitc40ea74101ab75a8f320d057e7cf4b772b090110 (patch)
tree047b725cf7347e4111b65edd532cf9b1ee8010d1 /fs/xfs/linux-2.6/xfs_buf.c
parent0771fb4515229821b7d74865b87a430de9fc1113 (diff)
[XFS] kill superflous buffer locking
There is no need to lock any page in xfs_buf.c because we operate on our own address_space and all locking is covered by the buffer semaphore. If we ever switch back to main blockdeive address_space as suggested e.g. for fsblock with a similar scheme the locking will have to be totally revised anyway because the current scheme is neither correct nor coherent with itself. SGI-PV: 971186 SGI-Modid: xfs-linux-melb:xfs-kern:29845a Signed-off-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com> Signed-off-by: Tim Shimmin <tes@sgi.com>
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_buf.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c53
1 files changed, 5 insertions, 48 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 522cfaa70258..a7c7cb27fa5a 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -409,6 +409,7 @@ _xfs_buf_lookup_pages(
409 congestion_wait(WRITE, HZ/50); 409 congestion_wait(WRITE, HZ/50);
410 goto retry; 410 goto retry;
411 } 411 }
412 unlock_page(page);
412 413
413 XFS_STATS_INC(xb_page_found); 414 XFS_STATS_INC(xb_page_found);
414 415
@@ -418,10 +419,7 @@ _xfs_buf_lookup_pages(
418 ASSERT(!PagePrivate(page)); 419 ASSERT(!PagePrivate(page));
419 if (!PageUptodate(page)) { 420 if (!PageUptodate(page)) {
420 page_count--; 421 page_count--;
421 if (blocksize >= PAGE_CACHE_SIZE) { 422 if (blocksize < PAGE_CACHE_SIZE && !PagePrivate(page)) {
422 if (flags & XBF_READ)
423 bp->b_locked = 1;
424 } else if (!PagePrivate(page)) {
425 if (test_page_region(page, offset, nbytes)) 423 if (test_page_region(page, offset, nbytes))
426 page_count++; 424 page_count++;
427 } 425 }
@@ -431,11 +429,6 @@ _xfs_buf_lookup_pages(
431 offset = 0; 429 offset = 0;
432 } 430 }
433 431
434 if (!bp->b_locked) {
435 for (i = 0; i < bp->b_page_count; i++)
436 unlock_page(bp->b_pages[i]);
437 }
438
439 if (page_count == bp->b_page_count) 432 if (page_count == bp->b_page_count)
440 bp->b_flags |= XBF_DONE; 433 bp->b_flags |= XBF_DONE;
441 434
@@ -752,7 +745,6 @@ xfs_buf_associate_memory(
752 bp->b_pages[i] = mem_to_page((void *)pageaddr); 745 bp->b_pages[i] = mem_to_page((void *)pageaddr);
753 pageaddr += PAGE_CACHE_SIZE; 746 pageaddr += PAGE_CACHE_SIZE;
754 } 747 }
755 bp->b_locked = 0;
756 748
757 bp->b_count_desired = len; 749 bp->b_count_desired = len;
758 bp->b_buffer_length = buflen; 750 bp->b_buffer_length = buflen;
@@ -1099,25 +1091,13 @@ xfs_buf_iostart(
1099 return status; 1091 return status;
1100} 1092}
1101 1093
1102STATIC_INLINE int
1103_xfs_buf_iolocked(
1104 xfs_buf_t *bp)
1105{
1106 ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
1107 if (bp->b_flags & XBF_READ)
1108 return bp->b_locked;
1109 return 0;
1110}
1111
1112STATIC_INLINE void 1094STATIC_INLINE void
1113_xfs_buf_ioend( 1095_xfs_buf_ioend(
1114 xfs_buf_t *bp, 1096 xfs_buf_t *bp,
1115 int schedule) 1097 int schedule)
1116{ 1098{
1117 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { 1099 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1118 bp->b_locked = 0;
1119 xfs_buf_ioend(bp, schedule); 1100 xfs_buf_ioend(bp, schedule);
1120 }
1121} 1101}
1122 1102
1123STATIC void 1103STATIC void
@@ -1148,10 +1128,6 @@ xfs_buf_bio_end_io(
1148 1128
1149 if (--bvec >= bio->bi_io_vec) 1129 if (--bvec >= bio->bi_io_vec)
1150 prefetchw(&bvec->bv_page->flags); 1130 prefetchw(&bvec->bv_page->flags);
1151
1152 if (_xfs_buf_iolocked(bp)) {
1153 unlock_page(page);
1154 }
1155 } while (bvec >= bio->bi_io_vec); 1131 } while (bvec >= bio->bi_io_vec);
1156 1132
1157 _xfs_buf_ioend(bp, 1); 1133 _xfs_buf_ioend(bp, 1);
@@ -1162,13 +1138,12 @@ STATIC void
1162_xfs_buf_ioapply( 1138_xfs_buf_ioapply(
1163 xfs_buf_t *bp) 1139 xfs_buf_t *bp)
1164{ 1140{
1165 int i, rw, map_i, total_nr_pages, nr_pages; 1141 int rw, map_i, total_nr_pages, nr_pages;
1166 struct bio *bio; 1142 struct bio *bio;
1167 int offset = bp->b_offset; 1143 int offset = bp->b_offset;
1168 int size = bp->b_count_desired; 1144 int size = bp->b_count_desired;
1169 sector_t sector = bp->b_bn; 1145 sector_t sector = bp->b_bn;
1170 unsigned int blocksize = bp->b_target->bt_bsize; 1146 unsigned int blocksize = bp->b_target->bt_bsize;
1171 int locking = _xfs_buf_iolocked(bp);
1172 1147
1173 total_nr_pages = bp->b_page_count; 1148 total_nr_pages = bp->b_page_count;
1174 map_i = 0; 1149 map_i = 0;
@@ -1191,7 +1166,7 @@ _xfs_buf_ioapply(
1191 * filesystem block size is not smaller than the page size. 1166 * filesystem block size is not smaller than the page size.
1192 */ 1167 */
1193 if ((bp->b_buffer_length < PAGE_CACHE_SIZE) && 1168 if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1194 (bp->b_flags & XBF_READ) && locking && 1169 (bp->b_flags & XBF_READ) &&
1195 (blocksize >= PAGE_CACHE_SIZE)) { 1170 (blocksize >= PAGE_CACHE_SIZE)) {
1196 bio = bio_alloc(GFP_NOIO, 1); 1171 bio = bio_alloc(GFP_NOIO, 1);
1197 1172
@@ -1208,24 +1183,6 @@ _xfs_buf_ioapply(
1208 goto submit_io; 1183 goto submit_io;
1209 } 1184 }
1210 1185
1211 /* Lock down the pages which we need to for the request */
1212 if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
1213 for (i = 0; size; i++) {
1214 int nbytes = PAGE_CACHE_SIZE - offset;
1215 struct page *page = bp->b_pages[i];
1216
1217 if (nbytes > size)
1218 nbytes = size;
1219
1220 lock_page(page);
1221
1222 size -= nbytes;
1223 offset = 0;
1224 }
1225 offset = bp->b_offset;
1226 size = bp->b_count_desired;
1227 }
1228
1229next_chunk: 1186next_chunk:
1230 atomic_inc(&bp->b_io_remaining); 1187 atomic_inc(&bp->b_io_remaining);
1231 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); 1188 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);