diff options
author | Christoph Hellwig <hch@infradead.org> | 2007-12-06 22:07:08 -0500 |
---|---|---|
committer | Lachlan McIlroy <lachlan@redback.melbourne.sgi.com> | 2008-02-07 02:18:50 -0500 |
commit | a9759f2de38a3443d5107bddde03b4f3f550060e (patch) | |
tree | 3a61d9818f5e457bc073958363a3136ac229f532 /fs/xfs/linux-2.6 | |
parent | 40ebd81d1a7635cf92a59c387a599fce4863206b (diff) |
[XFS] kill superflous buffer locking (2nd attempt)
There is no need to lock any page in xfs_buf.c because we operate on our
own address_space and all locking is covered by the buffer semaphore. If
we ever switch back to main blockdeive address_space as suggested e.g. for
fsblock with a similar scheme the locking will have to be totally revised
anyway because the current scheme is neither correct nor coherent with
itself.
SGI-PV: 971186
SGI-Modid: xfs-linux-melb:xfs-kern:30156a
Signed-off-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Diffstat (limited to 'fs/xfs/linux-2.6')
-rw-r--r-- | fs/xfs/linux-2.6/xfs_buf.c | 55 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_buf.h | 1 |
2 files changed, 5 insertions, 51 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 522cfaa70258..302273f8e2a9 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -387,8 +387,6 @@ _xfs_buf_lookup_pages( | |||
387 | if (unlikely(page == NULL)) { | 387 | if (unlikely(page == NULL)) { |
388 | if (flags & XBF_READ_AHEAD) { | 388 | if (flags & XBF_READ_AHEAD) { |
389 | bp->b_page_count = i; | 389 | bp->b_page_count = i; |
390 | for (i = 0; i < bp->b_page_count; i++) | ||
391 | unlock_page(bp->b_pages[i]); | ||
392 | return -ENOMEM; | 390 | return -ENOMEM; |
393 | } | 391 | } |
394 | 392 | ||
@@ -418,24 +416,17 @@ _xfs_buf_lookup_pages( | |||
418 | ASSERT(!PagePrivate(page)); | 416 | ASSERT(!PagePrivate(page)); |
419 | if (!PageUptodate(page)) { | 417 | if (!PageUptodate(page)) { |
420 | page_count--; | 418 | page_count--; |
421 | if (blocksize >= PAGE_CACHE_SIZE) { | 419 | if (blocksize < PAGE_CACHE_SIZE && !PagePrivate(page)) { |
422 | if (flags & XBF_READ) | ||
423 | bp->b_locked = 1; | ||
424 | } else if (!PagePrivate(page)) { | ||
425 | if (test_page_region(page, offset, nbytes)) | 420 | if (test_page_region(page, offset, nbytes)) |
426 | page_count++; | 421 | page_count++; |
427 | } | 422 | } |
428 | } | 423 | } |
429 | 424 | ||
425 | unlock_page(page); | ||
430 | bp->b_pages[i] = page; | 426 | bp->b_pages[i] = page; |
431 | offset = 0; | 427 | offset = 0; |
432 | } | 428 | } |
433 | 429 | ||
434 | if (!bp->b_locked) { | ||
435 | for (i = 0; i < bp->b_page_count; i++) | ||
436 | unlock_page(bp->b_pages[i]); | ||
437 | } | ||
438 | |||
439 | if (page_count == bp->b_page_count) | 430 | if (page_count == bp->b_page_count) |
440 | bp->b_flags |= XBF_DONE; | 431 | bp->b_flags |= XBF_DONE; |
441 | 432 | ||
@@ -752,7 +743,6 @@ xfs_buf_associate_memory( | |||
752 | bp->b_pages[i] = mem_to_page((void *)pageaddr); | 743 | bp->b_pages[i] = mem_to_page((void *)pageaddr); |
753 | pageaddr += PAGE_CACHE_SIZE; | 744 | pageaddr += PAGE_CACHE_SIZE; |
754 | } | 745 | } |
755 | bp->b_locked = 0; | ||
756 | 746 | ||
757 | bp->b_count_desired = len; | 747 | bp->b_count_desired = len; |
758 | bp->b_buffer_length = buflen; | 748 | bp->b_buffer_length = buflen; |
@@ -1099,25 +1089,13 @@ xfs_buf_iostart( | |||
1099 | return status; | 1089 | return status; |
1100 | } | 1090 | } |
1101 | 1091 | ||
1102 | STATIC_INLINE int | ||
1103 | _xfs_buf_iolocked( | ||
1104 | xfs_buf_t *bp) | ||
1105 | { | ||
1106 | ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE)); | ||
1107 | if (bp->b_flags & XBF_READ) | ||
1108 | return bp->b_locked; | ||
1109 | return 0; | ||
1110 | } | ||
1111 | |||
1112 | STATIC_INLINE void | 1092 | STATIC_INLINE void |
1113 | _xfs_buf_ioend( | 1093 | _xfs_buf_ioend( |
1114 | xfs_buf_t *bp, | 1094 | xfs_buf_t *bp, |
1115 | int schedule) | 1095 | int schedule) |
1116 | { | 1096 | { |
1117 | if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { | 1097 | if (atomic_dec_and_test(&bp->b_io_remaining) == 1) |
1118 | bp->b_locked = 0; | ||
1119 | xfs_buf_ioend(bp, schedule); | 1098 | xfs_buf_ioend(bp, schedule); |
1120 | } | ||
1121 | } | 1099 | } |
1122 | 1100 | ||
1123 | STATIC void | 1101 | STATIC void |
@@ -1148,10 +1126,6 @@ xfs_buf_bio_end_io( | |||
1148 | 1126 | ||
1149 | if (--bvec >= bio->bi_io_vec) | 1127 | if (--bvec >= bio->bi_io_vec) |
1150 | prefetchw(&bvec->bv_page->flags); | 1128 | prefetchw(&bvec->bv_page->flags); |
1151 | |||
1152 | if (_xfs_buf_iolocked(bp)) { | ||
1153 | unlock_page(page); | ||
1154 | } | ||
1155 | } while (bvec >= bio->bi_io_vec); | 1129 | } while (bvec >= bio->bi_io_vec); |
1156 | 1130 | ||
1157 | _xfs_buf_ioend(bp, 1); | 1131 | _xfs_buf_ioend(bp, 1); |
@@ -1162,13 +1136,12 @@ STATIC void | |||
1162 | _xfs_buf_ioapply( | 1136 | _xfs_buf_ioapply( |
1163 | xfs_buf_t *bp) | 1137 | xfs_buf_t *bp) |
1164 | { | 1138 | { |
1165 | int i, rw, map_i, total_nr_pages, nr_pages; | 1139 | int rw, map_i, total_nr_pages, nr_pages; |
1166 | struct bio *bio; | 1140 | struct bio *bio; |
1167 | int offset = bp->b_offset; | 1141 | int offset = bp->b_offset; |
1168 | int size = bp->b_count_desired; | 1142 | int size = bp->b_count_desired; |
1169 | sector_t sector = bp->b_bn; | 1143 | sector_t sector = bp->b_bn; |
1170 | unsigned int blocksize = bp->b_target->bt_bsize; | 1144 | unsigned int blocksize = bp->b_target->bt_bsize; |
1171 | int locking = _xfs_buf_iolocked(bp); | ||
1172 | 1145 | ||
1173 | total_nr_pages = bp->b_page_count; | 1146 | total_nr_pages = bp->b_page_count; |
1174 | map_i = 0; | 1147 | map_i = 0; |
@@ -1191,7 +1164,7 @@ _xfs_buf_ioapply( | |||
1191 | * filesystem block size is not smaller than the page size. | 1164 | * filesystem block size is not smaller than the page size. |
1192 | */ | 1165 | */ |
1193 | if ((bp->b_buffer_length < PAGE_CACHE_SIZE) && | 1166 | if ((bp->b_buffer_length < PAGE_CACHE_SIZE) && |
1194 | (bp->b_flags & XBF_READ) && locking && | 1167 | (bp->b_flags & XBF_READ) && |
1195 | (blocksize >= PAGE_CACHE_SIZE)) { | 1168 | (blocksize >= PAGE_CACHE_SIZE)) { |
1196 | bio = bio_alloc(GFP_NOIO, 1); | 1169 | bio = bio_alloc(GFP_NOIO, 1); |
1197 | 1170 | ||
@@ -1208,24 +1181,6 @@ _xfs_buf_ioapply( | |||
1208 | goto submit_io; | 1181 | goto submit_io; |
1209 | } | 1182 | } |
1210 | 1183 | ||
1211 | /* Lock down the pages which we need to for the request */ | ||
1212 | if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) { | ||
1213 | for (i = 0; size; i++) { | ||
1214 | int nbytes = PAGE_CACHE_SIZE - offset; | ||
1215 | struct page *page = bp->b_pages[i]; | ||
1216 | |||
1217 | if (nbytes > size) | ||
1218 | nbytes = size; | ||
1219 | |||
1220 | lock_page(page); | ||
1221 | |||
1222 | size -= nbytes; | ||
1223 | offset = 0; | ||
1224 | } | ||
1225 | offset = bp->b_offset; | ||
1226 | size = bp->b_count_desired; | ||
1227 | } | ||
1228 | |||
1229 | next_chunk: | 1184 | next_chunk: |
1230 | atomic_inc(&bp->b_io_remaining); | 1185 | atomic_inc(&bp->b_io_remaining); |
1231 | nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); | 1186 | nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); |
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index b5908a34b15d..a3d207de48b8 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h | |||
@@ -143,7 +143,6 @@ typedef struct xfs_buf { | |||
143 | void *b_fspriv2; | 143 | void *b_fspriv2; |
144 | void *b_fspriv3; | 144 | void *b_fspriv3; |
145 | unsigned short b_error; /* error code on I/O */ | 145 | unsigned short b_error; /* error code on I/O */ |
146 | unsigned short b_locked; /* page array is locked */ | ||
147 | unsigned int b_page_count; /* size of page array */ | 146 | unsigned int b_page_count; /* size of page array */ |
148 | unsigned int b_offset; /* page offset in first page */ | 147 | unsigned int b_offset; /* page offset in first page */ |
149 | struct page **b_pages; /* array of page pointers */ | 148 | struct page **b_pages; /* array of page pointers */ |