aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2012-06-22 04:50:09 -0400
committerBen Myers <bpm@sgi.com>2012-07-01 15:50:05 -0400
commit3e85c868a697805a3d4c7800a6bacdfc81d15cdf (patch)
treec4c1ee345e2e84a6ff9abdef782cccc6293fc7c7
parentcbb7baab285a540f173ef1ec3d5bcf9d0ad29d16 (diff)
xfs: convert internal buffer functions to pass maps
While the external interface currently uses separate blockno/length variables, we need to move internal interfaces to passing and parsing vector maps. This will then allow us to add external interfaces to support discontiguous buffer maps as the internal code will already support them. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Ben Myers <bpm@sgi.com>
-rw-r--r--fs/xfs/xfs_buf.c202
-rw-r--r--fs/xfs/xfs_buf.h43
2 files changed, 191 insertions, 54 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index a843873b0954..82bb8123ab2b 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -164,14 +164,49 @@ xfs_buf_stale(
164 ASSERT(atomic_read(&bp->b_hold) >= 1); 164 ASSERT(atomic_read(&bp->b_hold) >= 1);
165} 165}
166 166
167static int
168xfs_buf_get_maps(
169 struct xfs_buf *bp,
170 int map_count)
171{
172 ASSERT(bp->b_maps == NULL);
173 bp->b_map_count = map_count;
174
175 if (map_count == 1) {
176 bp->b_maps = &bp->b_map;
177 return 0;
178 }
179
180 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
181 KM_NOFS);
182 if (!bp->b_maps)
183 return ENOMEM;
184 return 0;
185}
186
187/*
188 * Frees b_pages if it was allocated.
189 */
190static void
191xfs_buf_free_maps(
192 struct xfs_buf *bp)
193{
194 if (bp->b_maps != &bp->b_map) {
195 kmem_free(bp->b_maps);
196 bp->b_maps = NULL;
197 }
198}
199
167struct xfs_buf * 200struct xfs_buf *
168xfs_buf_alloc( 201_xfs_buf_alloc(
169 struct xfs_buftarg *target, 202 struct xfs_buftarg *target,
170 xfs_daddr_t blkno, 203 struct xfs_buf_map *map,
171 size_t numblks, 204 int nmaps,
172 xfs_buf_flags_t flags) 205 xfs_buf_flags_t flags)
173{ 206{
174 struct xfs_buf *bp; 207 struct xfs_buf *bp;
208 int error;
209 int i;
175 210
176 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS); 211 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
177 if (unlikely(!bp)) 212 if (unlikely(!bp))
@@ -192,18 +227,28 @@ xfs_buf_alloc(
192 sema_init(&bp->b_sema, 0); /* held, no waiters */ 227 sema_init(&bp->b_sema, 0); /* held, no waiters */
193 XB_SET_OWNER(bp); 228 XB_SET_OWNER(bp);
194 bp->b_target = target; 229 bp->b_target = target;
230 bp->b_flags = flags;
195 231
196 /* 232 /*
197 * Set length and io_length to the same value initially. 233 * Set length and io_length to the same value initially.
198 * I/O routines should use io_length, which will be the same in 234 * I/O routines should use io_length, which will be the same in
199 * most cases but may be reset (e.g. XFS recovery). 235 * most cases but may be reset (e.g. XFS recovery).
200 */ 236 */
201 bp->b_length = numblks; 237 error = xfs_buf_get_maps(bp, nmaps);
202 bp->b_io_length = numblks; 238 if (error) {
203 bp->b_flags = flags; 239 kmem_zone_free(xfs_buf_zone, bp);
204 bp->b_bn = blkno; 240 return NULL;
205 bp->b_map.bm_bn = blkno; 241 }
206 bp->b_map.bm_len = numblks; 242
243 bp->b_bn = map[0].bm_bn;
244 bp->b_length = 0;
245 for (i = 0; i < nmaps; i++) {
246 bp->b_maps[i].bm_bn = map[i].bm_bn;
247 bp->b_maps[i].bm_len = map[i].bm_len;
248 bp->b_length += map[i].bm_len;
249 }
250 bp->b_io_length = bp->b_length;
251
207 atomic_set(&bp->b_pin_count, 0); 252 atomic_set(&bp->b_pin_count, 0);
208 init_waitqueue_head(&bp->b_waiters); 253 init_waitqueue_head(&bp->b_waiters);
209 254
@@ -282,6 +327,7 @@ xfs_buf_free(
282 } else if (bp->b_flags & _XBF_KMEM) 327 } else if (bp->b_flags & _XBF_KMEM)
283 kmem_free(bp->b_addr); 328 kmem_free(bp->b_addr);
284 _xfs_buf_free_pages(bp); 329 _xfs_buf_free_pages(bp);
330 xfs_buf_free_maps(bp);
285 kmem_zone_free(xfs_buf_zone, bp); 331 kmem_zone_free(xfs_buf_zone, bp);
286} 332}
287 333
@@ -428,8 +474,8 @@ _xfs_buf_map_pages(
428xfs_buf_t * 474xfs_buf_t *
429_xfs_buf_find( 475_xfs_buf_find(
430 struct xfs_buftarg *btp, 476 struct xfs_buftarg *btp,
431 xfs_daddr_t blkno, 477 struct xfs_buf_map *map,
432 size_t numblks, 478 int nmaps,
433 xfs_buf_flags_t flags, 479 xfs_buf_flags_t flags,
434 xfs_buf_t *new_bp) 480 xfs_buf_t *new_bp)
435{ 481{
@@ -438,7 +484,12 @@ _xfs_buf_find(
438 struct rb_node **rbp; 484 struct rb_node **rbp;
439 struct rb_node *parent; 485 struct rb_node *parent;
440 xfs_buf_t *bp; 486 xfs_buf_t *bp;
487 xfs_daddr_t blkno = map[0].bm_bn;
488 int numblks = 0;
489 int i;
441 490
491 for (i = 0; i < nmaps; i++)
492 numblks += map[i].bm_len;
442 numbytes = BBTOB(numblks); 493 numbytes = BBTOB(numblks);
443 494
444 /* Check for IOs smaller than the sector size / not sector aligned */ 495 /* Check for IOs smaller than the sector size / not sector aligned */
@@ -539,22 +590,23 @@ xfs_buf_get(
539 struct xfs_buf *bp; 590 struct xfs_buf *bp;
540 struct xfs_buf *new_bp; 591 struct xfs_buf *new_bp;
541 int error = 0; 592 int error = 0;
593 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
542 594
543 bp = _xfs_buf_find(target, blkno, numblks, flags, NULL); 595 bp = _xfs_buf_find(target, &map, 1, flags, NULL);
544 if (likely(bp)) 596 if (likely(bp))
545 goto found; 597 goto found;
546 598
547 new_bp = xfs_buf_alloc(target, blkno, numblks, flags); 599 new_bp = _xfs_buf_alloc(target, &map, 1, flags);
548 if (unlikely(!new_bp)) 600 if (unlikely(!new_bp))
549 return NULL; 601 return NULL;
550 602
551 error = xfs_buf_allocate_memory(new_bp, flags); 603 error = xfs_buf_allocate_memory(new_bp, flags);
552 if (error) { 604 if (error) {
553 kmem_zone_free(xfs_buf_zone, new_bp); 605 xfs_buf_free(new_bp);
554 return NULL; 606 return NULL;
555 } 607 }
556 608
557 bp = _xfs_buf_find(target, blkno, numblks, flags, new_bp); 609 bp = _xfs_buf_find(target, &map, 1, flags, new_bp);
558 if (!bp) { 610 if (!bp) {
559 xfs_buf_free(new_bp); 611 xfs_buf_free(new_bp);
560 return NULL; 612 return NULL;
@@ -666,7 +718,9 @@ xfs_buf_read_uncached(
666 return NULL; 718 return NULL;
667 719
668 /* set up the buffer for a read IO */ 720 /* set up the buffer for a read IO */
669 bp->b_map.bm_bn = daddr; 721 ASSERT(bp->b_map_count == 1);
722 bp->b_bn = daddr;
723 bp->b_maps[0].bm_bn = daddr;
670 bp->b_flags |= XBF_READ; 724 bp->b_flags |= XBF_READ;
671 725
672 xfsbdstrat(target->bt_mount, bp); 726 xfsbdstrat(target->bt_mount, bp);
@@ -695,9 +749,11 @@ xfs_buf_set_empty(
695 bp->b_addr = NULL; 749 bp->b_addr = NULL;
696 bp->b_length = numblks; 750 bp->b_length = numblks;
697 bp->b_io_length = numblks; 751 bp->b_io_length = numblks;
752
753 ASSERT(bp->b_map_count == 1);
698 bp->b_bn = XFS_BUF_DADDR_NULL; 754 bp->b_bn = XFS_BUF_DADDR_NULL;
699 bp->b_map.bm_bn = XFS_BUF_DADDR_NULL; 755 bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
700 bp->b_map.bm_len = bp->b_length; 756 bp->b_maps[0].bm_len = bp->b_length;
701} 757}
702 758
703static inline struct page * 759static inline struct page *
@@ -761,9 +817,10 @@ xfs_buf_get_uncached(
761{ 817{
762 unsigned long page_count; 818 unsigned long page_count;
763 int error, i; 819 int error, i;
764 xfs_buf_t *bp; 820 struct xfs_buf *bp;
821 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
765 822
766 bp = xfs_buf_alloc(target, XFS_BUF_DADDR_NULL, numblks, 0); 823 bp = _xfs_buf_alloc(target, &map, 1, 0);
767 if (unlikely(bp == NULL)) 824 if (unlikely(bp == NULL))
768 goto fail; 825 goto fail;
769 826
@@ -794,6 +851,7 @@ xfs_buf_get_uncached(
794 __free_page(bp->b_pages[i]); 851 __free_page(bp->b_pages[i]);
795 _xfs_buf_free_pages(bp); 852 _xfs_buf_free_pages(bp);
796 fail_free_buf: 853 fail_free_buf:
854 xfs_buf_free_maps(bp);
797 kmem_zone_free(xfs_buf_zone, bp); 855 kmem_zone_free(xfs_buf_zone, bp);
798 fail: 856 fail:
799 return NULL; 857 return NULL;
@@ -1154,36 +1212,39 @@ xfs_buf_bio_end_io(
1154 bio_put(bio); 1212 bio_put(bio);
1155} 1213}
1156 1214
1157STATIC void 1215static void
1158_xfs_buf_ioapply( 1216xfs_buf_ioapply_map(
1159 xfs_buf_t *bp) 1217 struct xfs_buf *bp,
1218 int map,
1219 int *buf_offset,
1220 int *count,
1221 int rw)
1160{ 1222{
1161 int rw, map_i, total_nr_pages, nr_pages; 1223 int page_index;
1162 struct bio *bio; 1224 int total_nr_pages = bp->b_page_count;
1163 int offset = bp->b_offset; 1225 int nr_pages;
1164 int size = BBTOB(bp->b_io_length); 1226 struct bio *bio;
1165 sector_t sector = bp->b_map.bm_bn; 1227 sector_t sector = bp->b_maps[map].bm_bn;
1228 int size;
1229 int offset;
1166 1230
1167 total_nr_pages = bp->b_page_count; 1231 total_nr_pages = bp->b_page_count;
1168 map_i = 0;
1169 1232
1170 if (bp->b_flags & XBF_WRITE) { 1233 /* skip the pages in the buffer before the start offset */
1171 if (bp->b_flags & XBF_SYNCIO) 1234 page_index = 0;
1172 rw = WRITE_SYNC; 1235 offset = *buf_offset;
1173 else 1236 while (offset >= PAGE_SIZE) {
1174 rw = WRITE; 1237 page_index++;
1175 if (bp->b_flags & XBF_FUA) 1238 offset -= PAGE_SIZE;
1176 rw |= REQ_FUA;
1177 if (bp->b_flags & XBF_FLUSH)
1178 rw |= REQ_FLUSH;
1179 } else if (bp->b_flags & XBF_READ_AHEAD) {
1180 rw = READA;
1181 } else {
1182 rw = READ;
1183 } 1239 }
1184 1240
1185 /* we only use the buffer cache for meta-data */ 1241 /*
1186 rw |= REQ_META; 1242 * Limit the IO size to the length of the current vector, and update the
1243 * remaining IO count for the next time around.
1244 */
1245 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1246 *count -= size;
1247 *buf_offset += size;
1187 1248
1188next_chunk: 1249next_chunk:
1189 atomic_inc(&bp->b_io_remaining); 1250 atomic_inc(&bp->b_io_remaining);
@@ -1198,13 +1259,14 @@ next_chunk:
1198 bio->bi_private = bp; 1259 bio->bi_private = bp;
1199 1260
1200 1261
1201 for (; size && nr_pages; nr_pages--, map_i++) { 1262 for (; size && nr_pages; nr_pages--, page_index++) {
1202 int rbytes, nbytes = PAGE_SIZE - offset; 1263 int rbytes, nbytes = PAGE_SIZE - offset;
1203 1264
1204 if (nbytes > size) 1265 if (nbytes > size)
1205 nbytes = size; 1266 nbytes = size;
1206 1267
1207 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset); 1268 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1269 offset);
1208 if (rbytes < nbytes) 1270 if (rbytes < nbytes)
1209 break; 1271 break;
1210 1272
@@ -1226,6 +1288,54 @@ next_chunk:
1226 xfs_buf_ioerror(bp, EIO); 1288 xfs_buf_ioerror(bp, EIO);
1227 bio_put(bio); 1289 bio_put(bio);
1228 } 1290 }
1291
1292}
1293
1294STATIC void
1295_xfs_buf_ioapply(
1296 struct xfs_buf *bp)
1297{
1298 struct blk_plug plug;
1299 int rw;
1300 int offset;
1301 int size;
1302 int i;
1303
1304 if (bp->b_flags & XBF_WRITE) {
1305 if (bp->b_flags & XBF_SYNCIO)
1306 rw = WRITE_SYNC;
1307 else
1308 rw = WRITE;
1309 if (bp->b_flags & XBF_FUA)
1310 rw |= REQ_FUA;
1311 if (bp->b_flags & XBF_FLUSH)
1312 rw |= REQ_FLUSH;
1313 } else if (bp->b_flags & XBF_READ_AHEAD) {
1314 rw = READA;
1315 } else {
1316 rw = READ;
1317 }
1318
1319 /* we only use the buffer cache for meta-data */
1320 rw |= REQ_META;
1321
1322 /*
1323 * Walk all the vectors issuing IO on them. Set up the initial offset
1324 * into the buffer and the desired IO size before we start -
1325 * _xfs_buf_ioapply_vec() will modify them appropriately for each
1326 * subsequent call.
1327 */
1328 offset = bp->b_offset;
1329 size = BBTOB(bp->b_io_length);
1330 blk_start_plug(&plug);
1331 for (i = 0; i < bp->b_map_count; i++) {
1332 xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
1333 if (bp->b_error)
1334 break;
1335 if (size <= 0)
1336 break; /* all done */
1337 }
1338 blk_finish_plug(&plug);
1229} 1339}
1230 1340
1231void 1341void
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index c9c2ba90c53c..67d134994ae4 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -105,6 +105,9 @@ struct xfs_buf_map {
105 int bm_len; /* size of I/O */ 105 int bm_len; /* size of I/O */
106}; 106};
107 107
108#define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
109 struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
110
108typedef struct xfs_buf { 111typedef struct xfs_buf {
109 /* 112 /*
110 * first cacheline holds all the fields needed for an uncontended cache 113 * first cacheline holds all the fields needed for an uncontended cache
@@ -134,7 +137,9 @@ typedef struct xfs_buf {
134 struct xfs_trans *b_transp; 137 struct xfs_trans *b_transp;
135 struct page **b_pages; /* array of page pointers */ 138 struct page **b_pages; /* array of page pointers */
136 struct page *b_page_array[XB_PAGES]; /* inline pages */ 139 struct page *b_page_array[XB_PAGES]; /* inline pages */
137 struct xfs_buf_map b_map; /* compound buffer map */ 140 struct xfs_buf_map *b_maps; /* compound buffer map */
141 struct xfs_buf_map b_map; /* inline compound buffer map */
142 int b_map_count;
138 int b_io_length; /* IO size in BBs */ 143 int b_io_length; /* IO size in BBs */
139 atomic_t b_pin_count; /* pin count */ 144 atomic_t b_pin_count; /* pin count */
140 atomic_t b_io_remaining; /* #outstanding I/O requests */ 145 atomic_t b_io_remaining; /* #outstanding I/O requests */
@@ -149,11 +154,35 @@ typedef struct xfs_buf {
149 154
150 155
151/* Finding and Reading Buffers */ 156/* Finding and Reading Buffers */
152struct xfs_buf *_xfs_buf_find(struct xfs_buftarg *target, xfs_daddr_t blkno, 157struct xfs_buf *_xfs_buf_find(struct xfs_buftarg *target,
153 size_t numblks, xfs_buf_flags_t flags, 158 struct xfs_buf_map *map, int nmaps,
154 struct xfs_buf *new_bp); 159 xfs_buf_flags_t flags, struct xfs_buf *new_bp);
155#define xfs_incore(buftarg,blkno,len,lockit) \ 160
156 _xfs_buf_find(buftarg, blkno ,len, lockit, NULL) 161static inline struct xfs_buf *
162xfs_incore(
163 struct xfs_buftarg *target,
164 xfs_daddr_t blkno,
165 size_t numblks,
166 xfs_buf_flags_t flags)
167{
168 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
169 return _xfs_buf_find(target, &map, 1, flags, NULL);
170}
171
172struct xfs_buf *_xfs_buf_alloc(struct xfs_buftarg *target,
173 struct xfs_buf_map *map, int nmaps,
174 xfs_buf_flags_t flags);
175
176static inline struct xfs_buf *
177xfs_buf_alloc(
178 struct xfs_buftarg *target,
179 xfs_daddr_t blkno,
180 size_t numblks,
181 xfs_buf_flags_t flags)
182{
183 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
184 return _xfs_buf_alloc(target, &map, 1, flags);
185}
157 186
158struct xfs_buf *xfs_buf_get(struct xfs_buftarg *target, xfs_daddr_t blkno, 187struct xfs_buf *xfs_buf_get(struct xfs_buftarg *target, xfs_daddr_t blkno,
159 size_t numblks, xfs_buf_flags_t flags); 188 size_t numblks, xfs_buf_flags_t flags);
@@ -163,8 +192,6 @@ void xfs_buf_readahead(struct xfs_buftarg *target, xfs_daddr_t blkno,
163 size_t numblks); 192 size_t numblks);
164 193
165struct xfs_buf *xfs_buf_get_empty(struct xfs_buftarg *target, size_t numblks); 194struct xfs_buf *xfs_buf_get_empty(struct xfs_buftarg *target, size_t numblks);
166struct xfs_buf *xfs_buf_alloc(struct xfs_buftarg *target, xfs_daddr_t blkno,
167 size_t numblks, xfs_buf_flags_t flags);
168void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks); 195void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks);
169int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length); 196int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length);
170 197