diff options
author | Christoph Hellwig <hch@infradead.org> | 2010-12-10 03:42:20 -0500 |
---|---|---|
committer | Alex Elder <aelder@sgi.com> | 2010-12-16 17:05:51 -0500 |
commit | a206c817c864583c44e2f418db8e6c7a000fbc38 (patch) | |
tree | 71c7327482299de93b4183c23d118939e8bd30fc | |
parent | 405f80429436d38ab4e6b4c0d99861a1f00648fd (diff) |
xfs: kill xfs_iomap
Opencode the xfs_iomap code in it's two callers. The overlap of
passed flags already was minimal and will be further reduced in the
next patch.
As a side effect the BMAPI_* flags for xfs_bmapi and the IO_* flags
for I/O end processing are merged into a single set of flags, which
should be a bit more descriptive of the operation we perform.
Also improve the tracing by giving each caller it's own type set of
tracepoints.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Alex Elder <aelder@sgi.com>
-rw-r--r-- | fs/xfs/linux-2.6/xfs_aops.c | 211 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_aops.h | 16 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_trace.h | 28 | ||||
-rw-r--r-- | fs/xfs/xfs_iomap.c | 122 | ||||
-rw-r--r-- | fs/xfs/xfs_iomap.h | 27 |
5 files changed, 191 insertions, 213 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 1ace78bfbea7..365040f61d89 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
@@ -38,15 +38,6 @@ | |||
38 | #include <linux/pagevec.h> | 38 | #include <linux/pagevec.h> |
39 | #include <linux/writeback.h> | 39 | #include <linux/writeback.h> |
40 | 40 | ||
41 | /* | ||
42 | * Types of I/O for bmap clustering and I/O completion tracking. | ||
43 | */ | ||
44 | enum { | ||
45 | IO_READ, /* mapping for a read */ | ||
46 | IO_DELAY, /* mapping covers delalloc region */ | ||
47 | IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */ | ||
48 | IO_NEW /* just allocated */ | ||
49 | }; | ||
50 | 41 | ||
51 | /* | 42 | /* |
52 | * Prime number of hash buckets since address is used as the key. | 43 | * Prime number of hash buckets since address is used as the key. |
@@ -182,9 +173,6 @@ xfs_setfilesize( | |||
182 | xfs_inode_t *ip = XFS_I(ioend->io_inode); | 173 | xfs_inode_t *ip = XFS_I(ioend->io_inode); |
183 | xfs_fsize_t isize; | 174 | xfs_fsize_t isize; |
184 | 175 | ||
185 | ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG); | ||
186 | ASSERT(ioend->io_type != IO_READ); | ||
187 | |||
188 | if (unlikely(ioend->io_error)) | 176 | if (unlikely(ioend->io_error)) |
189 | return 0; | 177 | return 0; |
190 | 178 | ||
@@ -244,10 +232,8 @@ xfs_end_io( | |||
244 | * We might have to update the on-disk file size after extending | 232 | * We might have to update the on-disk file size after extending |
245 | * writes. | 233 | * writes. |
246 | */ | 234 | */ |
247 | if (ioend->io_type != IO_READ) { | 235 | error = xfs_setfilesize(ioend); |
248 | error = xfs_setfilesize(ioend); | 236 | ASSERT(!error || error == EAGAIN); |
249 | ASSERT(!error || error == EAGAIN); | ||
250 | } | ||
251 | 237 | ||
252 | /* | 238 | /* |
253 | * If we didn't complete processing of the ioend, requeue it to the | 239 | * If we didn't complete processing of the ioend, requeue it to the |
@@ -320,12 +306,88 @@ xfs_map_blocks( | |||
320 | loff_t offset, | 306 | loff_t offset, |
321 | ssize_t count, | 307 | ssize_t count, |
322 | struct xfs_bmbt_irec *imap, | 308 | struct xfs_bmbt_irec *imap, |
323 | int flags) | 309 | int type, |
310 | int nonblocking) | ||
324 | { | 311 | { |
325 | int nmaps = 1; | 312 | struct xfs_inode *ip = XFS_I(inode); |
326 | int new = 0; | 313 | struct xfs_mount *mp = ip->i_mount; |
314 | xfs_fileoff_t offset_fsb, end_fsb; | ||
315 | int error = 0; | ||
316 | int lockmode = 0; | ||
317 | int bmapi_flags = XFS_BMAPI_ENTIRE; | ||
318 | int nimaps = 1; | ||
319 | |||
320 | if (XFS_FORCED_SHUTDOWN(mp)) | ||
321 | return -XFS_ERROR(EIO); | ||
322 | |||
323 | switch (type) { | ||
324 | case IO_OVERWRITE: | ||
325 | lockmode = xfs_ilock_map_shared(ip); | ||
326 | break; | ||
327 | case IO_UNWRITTEN: | ||
328 | lockmode = XFS_ILOCK_EXCL; | ||
329 | bmapi_flags |= XFS_BMAPI_IGSTATE; | ||
330 | xfs_ilock(ip, lockmode); | ||
331 | break; | ||
332 | case IO_DELALLOC: | ||
333 | lockmode = XFS_ILOCK_SHARED; | ||
334 | |||
335 | if (!xfs_ilock_nowait(ip, lockmode)) { | ||
336 | if (nonblocking) | ||
337 | return -XFS_ERROR(EAGAIN); | ||
338 | xfs_ilock(ip, lockmode); | ||
339 | } | ||
340 | break; | ||
341 | } | ||
342 | |||
343 | ASSERT(offset <= mp->m_maxioffset); | ||
344 | if (offset + count > mp->m_maxioffset) | ||
345 | count = mp->m_maxioffset - offset; | ||
346 | end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); | ||
347 | offset_fsb = XFS_B_TO_FSBT(mp, offset); | ||
348 | |||
349 | error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb, | ||
350 | bmapi_flags, NULL, 0, imap, &nimaps, NULL); | ||
351 | if (error) | ||
352 | goto out; | ||
353 | |||
354 | switch (type) { | ||
355 | case IO_UNWRITTEN: | ||
356 | /* If we found an extent, return it */ | ||
357 | if (nimaps && | ||
358 | (imap->br_startblock != HOLESTARTBLOCK) && | ||
359 | (imap->br_startblock != DELAYSTARTBLOCK)) { | ||
360 | trace_xfs_map_blocks_found(ip, offset, count, type, imap); | ||
361 | break; | ||
362 | } | ||
363 | |||
364 | error = xfs_iomap_write_delay(ip, offset, count, imap); | ||
365 | if (!error) | ||
366 | trace_xfs_map_blocks_alloc(ip, offset, count, type, imap); | ||
367 | break; | ||
368 | case IO_DELALLOC: | ||
369 | /* If we found an extent, return it */ | ||
370 | xfs_iunlock(ip, lockmode); | ||
371 | lockmode = 0; | ||
372 | |||
373 | if (nimaps && !isnullstartblock(imap->br_startblock)) { | ||
374 | trace_xfs_map_blocks_found(ip, offset, count, type, imap); | ||
375 | break; | ||
376 | } | ||
377 | |||
378 | error = xfs_iomap_write_allocate(ip, offset, count, imap); | ||
379 | if (!error) | ||
380 | trace_xfs_map_blocks_alloc(ip, offset, count, type, imap); | ||
381 | break; | ||
382 | default: | ||
383 | if (nimaps) | ||
384 | trace_xfs_map_blocks_found(ip, offset, count, type, imap); | ||
385 | } | ||
327 | 386 | ||
328 | return -xfs_iomap(XFS_I(inode), offset, count, flags, imap, &nmaps, &new); | 387 | out: |
388 | if (lockmode) | ||
389 | xfs_iunlock(ip, lockmode); | ||
390 | return -XFS_ERROR(error); | ||
329 | } | 391 | } |
330 | 392 | ||
331 | STATIC int | 393 | STATIC int |
@@ -722,9 +784,9 @@ xfs_is_delayed_page( | |||
722 | if (buffer_unwritten(bh)) | 784 | if (buffer_unwritten(bh)) |
723 | acceptable = (type == IO_UNWRITTEN); | 785 | acceptable = (type == IO_UNWRITTEN); |
724 | else if (buffer_delay(bh)) | 786 | else if (buffer_delay(bh)) |
725 | acceptable = (type == IO_DELAY); | 787 | acceptable = (type == IO_DELALLOC); |
726 | else if (buffer_dirty(bh) && buffer_mapped(bh)) | 788 | else if (buffer_dirty(bh) && buffer_mapped(bh)) |
727 | acceptable = (type == IO_NEW); | 789 | acceptable = (type == IO_OVERWRITE); |
728 | else | 790 | else |
729 | break; | 791 | break; |
730 | } while ((bh = bh->b_this_page) != head); | 792 | } while ((bh = bh->b_this_page) != head); |
@@ -809,7 +871,7 @@ xfs_convert_page( | |||
809 | if (buffer_unwritten(bh)) | 871 | if (buffer_unwritten(bh)) |
810 | type = IO_UNWRITTEN; | 872 | type = IO_UNWRITTEN; |
811 | else | 873 | else |
812 | type = IO_DELAY; | 874 | type = IO_DELALLOC; |
813 | 875 | ||
814 | if (!xfs_imap_valid(inode, imap, offset)) { | 876 | if (!xfs_imap_valid(inode, imap, offset)) { |
815 | done = 1; | 877 | done = 1; |
@@ -826,7 +888,7 @@ xfs_convert_page( | |||
826 | page_dirty--; | 888 | page_dirty--; |
827 | count++; | 889 | count++; |
828 | } else { | 890 | } else { |
829 | type = IO_NEW; | 891 | type = IO_OVERWRITE; |
830 | if (buffer_mapped(bh) && all_bh) { | 892 | if (buffer_mapped(bh) && all_bh) { |
831 | lock_buffer(bh); | 893 | lock_buffer(bh); |
832 | xfs_add_to_ioend(inode, bh, offset, | 894 | xfs_add_to_ioend(inode, bh, offset, |
@@ -926,7 +988,7 @@ xfs_aops_discard_page( | |||
926 | struct buffer_head *bh, *head; | 988 | struct buffer_head *bh, *head; |
927 | loff_t offset = page_offset(page); | 989 | loff_t offset = page_offset(page); |
928 | 990 | ||
929 | if (!xfs_is_delayed_page(page, IO_DELAY)) | 991 | if (!xfs_is_delayed_page(page, IO_DELALLOC)) |
930 | goto out_invalidate; | 992 | goto out_invalidate; |
931 | 993 | ||
932 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | 994 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
@@ -994,9 +1056,10 @@ xfs_vm_writepage( | |||
994 | __uint64_t end_offset; | 1056 | __uint64_t end_offset; |
995 | pgoff_t end_index, last_index; | 1057 | pgoff_t end_index, last_index; |
996 | ssize_t size, len; | 1058 | ssize_t size, len; |
997 | int flags, err, imap_valid = 0, uptodate = 1; | 1059 | int err, imap_valid = 0, uptodate = 1; |
998 | int count = 0; | 1060 | int count = 0; |
999 | int all_bh = 0; | 1061 | int all_bh = 0; |
1062 | int nonblocking = 0; | ||
1000 | 1063 | ||
1001 | trace_xfs_writepage(inode, page, 0); | 1064 | trace_xfs_writepage(inode, page, 0); |
1002 | 1065 | ||
@@ -1047,8 +1110,10 @@ xfs_vm_writepage( | |||
1047 | 1110 | ||
1048 | bh = head = page_buffers(page); | 1111 | bh = head = page_buffers(page); |
1049 | offset = page_offset(page); | 1112 | offset = page_offset(page); |
1050 | flags = BMAPI_READ; | 1113 | type = IO_OVERWRITE; |
1051 | type = IO_NEW; | 1114 | |
1115 | if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking) | ||
1116 | nonblocking = 1; | ||
1052 | 1117 | ||
1053 | do { | 1118 | do { |
1054 | int new_ioend = 0; | 1119 | int new_ioend = 0; |
@@ -1078,16 +1143,11 @@ xfs_vm_writepage( | |||
1078 | type = IO_UNWRITTEN; | 1143 | type = IO_UNWRITTEN; |
1079 | imap_valid = 0; | 1144 | imap_valid = 0; |
1080 | } | 1145 | } |
1081 | flags = BMAPI_WRITE | BMAPI_IGNSTATE; | ||
1082 | } else if (buffer_delay(bh)) { | 1146 | } else if (buffer_delay(bh)) { |
1083 | if (type != IO_DELAY) { | 1147 | if (type != IO_DELALLOC) { |
1084 | type = IO_DELAY; | 1148 | type = IO_DELALLOC; |
1085 | imap_valid = 0; | 1149 | imap_valid = 0; |
1086 | } | 1150 | } |
1087 | flags = BMAPI_ALLOCATE; | ||
1088 | |||
1089 | if (wbc->sync_mode == WB_SYNC_NONE) | ||
1090 | flags |= BMAPI_TRYLOCK; | ||
1091 | } | 1151 | } |
1092 | 1152 | ||
1093 | if (!imap_valid) { | 1153 | if (!imap_valid) { |
@@ -1100,8 +1160,8 @@ xfs_vm_writepage( | |||
1100 | * for unwritten extent conversion. | 1160 | * for unwritten extent conversion. |
1101 | */ | 1161 | */ |
1102 | new_ioend = 1; | 1162 | new_ioend = 1; |
1103 | err = xfs_map_blocks(inode, offset, len, | 1163 | err = xfs_map_blocks(inode, offset, len, &imap, |
1104 | &imap, flags); | 1164 | type, nonblocking); |
1105 | if (err) | 1165 | if (err) |
1106 | goto error; | 1166 | goto error; |
1107 | imap_valid = xfs_imap_valid(inode, &imap, | 1167 | imap_valid = xfs_imap_valid(inode, &imap, |
@@ -1119,30 +1179,21 @@ xfs_vm_writepage( | |||
1119 | * That means it must already have extents allocated | 1179 | * That means it must already have extents allocated |
1120 | * underneath it. Map the extent by reading it. | 1180 | * underneath it. Map the extent by reading it. |
1121 | */ | 1181 | */ |
1122 | if (flags != BMAPI_READ) { | 1182 | if (type != IO_OVERWRITE) { |
1123 | flags = BMAPI_READ; | 1183 | type = IO_OVERWRITE; |
1124 | imap_valid = 0; | 1184 | imap_valid = 0; |
1125 | } | 1185 | } |
1126 | if (!imap_valid) { | 1186 | if (!imap_valid) { |
1127 | new_ioend = 1; | 1187 | new_ioend = 1; |
1128 | size = xfs_probe_cluster(inode, page, bh, head); | 1188 | size = xfs_probe_cluster(inode, page, bh, head); |
1129 | err = xfs_map_blocks(inode, offset, size, | 1189 | err = xfs_map_blocks(inode, offset, size, |
1130 | &imap, flags); | 1190 | &imap, type, nonblocking); |
1131 | if (err) | 1191 | if (err) |
1132 | goto error; | 1192 | goto error; |
1133 | imap_valid = xfs_imap_valid(inode, &imap, | 1193 | imap_valid = xfs_imap_valid(inode, &imap, |
1134 | offset); | 1194 | offset); |
1135 | } | 1195 | } |
1136 | 1196 | ||
1137 | /* | ||
1138 | * We set the type to IO_NEW in case we are doing a | ||
1139 | * small write at EOF that is extending the file but | ||
1140 | * without needing an allocation. We need to update the | ||
1141 | * file size on I/O completion in this case so it is | ||
1142 | * the same case as having just allocated a new extent | ||
1143 | * that we are writing into for the first time. | ||
1144 | */ | ||
1145 | type = IO_NEW; | ||
1146 | if (imap_valid) { | 1197 | if (imap_valid) { |
1147 | all_bh = 1; | 1198 | all_bh = 1; |
1148 | lock_buffer(bh); | 1199 | lock_buffer(bh); |
@@ -1250,13 +1301,19 @@ __xfs_get_blocks( | |||
1250 | int create, | 1301 | int create, |
1251 | int direct) | 1302 | int direct) |
1252 | { | 1303 | { |
1253 | int flags = create ? BMAPI_WRITE : BMAPI_READ; | 1304 | struct xfs_inode *ip = XFS_I(inode); |
1305 | struct xfs_mount *mp = ip->i_mount; | ||
1306 | xfs_fileoff_t offset_fsb, end_fsb; | ||
1307 | int error = 0; | ||
1308 | int lockmode = 0; | ||
1254 | struct xfs_bmbt_irec imap; | 1309 | struct xfs_bmbt_irec imap; |
1310 | int nimaps = 1; | ||
1255 | xfs_off_t offset; | 1311 | xfs_off_t offset; |
1256 | ssize_t size; | 1312 | ssize_t size; |
1257 | int nimap = 1; | ||
1258 | int new = 0; | 1313 | int new = 0; |
1259 | int error; | 1314 | |
1315 | if (XFS_FORCED_SHUTDOWN(mp)) | ||
1316 | return -XFS_ERROR(EIO); | ||
1260 | 1317 | ||
1261 | offset = (xfs_off_t)iblock << inode->i_blkbits; | 1318 | offset = (xfs_off_t)iblock << inode->i_blkbits; |
1262 | ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); | 1319 | ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); |
@@ -1265,15 +1322,45 @@ __xfs_get_blocks( | |||
1265 | if (!create && direct && offset >= i_size_read(inode)) | 1322 | if (!create && direct && offset >= i_size_read(inode)) |
1266 | return 0; | 1323 | return 0; |
1267 | 1324 | ||
1268 | if (direct && create) | 1325 | if (create) { |
1269 | flags |= BMAPI_DIRECT; | 1326 | lockmode = XFS_ILOCK_EXCL; |
1327 | xfs_ilock(ip, lockmode); | ||
1328 | } else { | ||
1329 | lockmode = xfs_ilock_map_shared(ip); | ||
1330 | } | ||
1331 | |||
1332 | ASSERT(offset <= mp->m_maxioffset); | ||
1333 | if (offset + size > mp->m_maxioffset) | ||
1334 | size = mp->m_maxioffset - offset; | ||
1335 | end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size); | ||
1336 | offset_fsb = XFS_B_TO_FSBT(mp, offset); | ||
1270 | 1337 | ||
1271 | error = xfs_iomap(XFS_I(inode), offset, size, flags, &imap, &nimap, | 1338 | error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb, |
1272 | &new); | 1339 | XFS_BMAPI_ENTIRE, NULL, 0, &imap, &nimaps, NULL); |
1273 | if (error) | 1340 | if (error) |
1274 | return -error; | 1341 | goto out_unlock; |
1275 | if (nimap == 0) | 1342 | |
1276 | return 0; | 1343 | if (create && |
1344 | (!nimaps || | ||
1345 | (imap.br_startblock == HOLESTARTBLOCK || | ||
1346 | imap.br_startblock == DELAYSTARTBLOCK))) { | ||
1347 | if (direct) { | ||
1348 | error = xfs_iomap_write_direct(ip, offset, size, | ||
1349 | &imap, nimaps); | ||
1350 | } else { | ||
1351 | error = xfs_iomap_write_delay(ip, offset, size, &imap); | ||
1352 | } | ||
1353 | if (error) | ||
1354 | goto out_unlock; | ||
1355 | |||
1356 | trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap); | ||
1357 | } else if (nimaps) { | ||
1358 | trace_xfs_get_blocks_found(ip, offset, size, 0, &imap); | ||
1359 | } else { | ||
1360 | trace_xfs_get_blocks_notfound(ip, offset, size); | ||
1361 | goto out_unlock; | ||
1362 | } | ||
1363 | xfs_iunlock(ip, lockmode); | ||
1277 | 1364 | ||
1278 | if (imap.br_startblock != HOLESTARTBLOCK && | 1365 | if (imap.br_startblock != HOLESTARTBLOCK && |
1279 | imap.br_startblock != DELAYSTARTBLOCK) { | 1366 | imap.br_startblock != DELAYSTARTBLOCK) { |
@@ -1340,6 +1427,10 @@ __xfs_get_blocks( | |||
1340 | } | 1427 | } |
1341 | 1428 | ||
1342 | return 0; | 1429 | return 0; |
1430 | |||
1431 | out_unlock: | ||
1432 | xfs_iunlock(ip, lockmode); | ||
1433 | return -error; | ||
1343 | } | 1434 | } |
1344 | 1435 | ||
1345 | int | 1436 | int |
@@ -1427,7 +1518,7 @@ xfs_vm_direct_IO( | |||
1427 | ssize_t ret; | 1518 | ssize_t ret; |
1428 | 1519 | ||
1429 | if (rw & WRITE) { | 1520 | if (rw & WRITE) { |
1430 | iocb->private = xfs_alloc_ioend(inode, IO_NEW); | 1521 | iocb->private = xfs_alloc_ioend(inode, IO_DIRECT); |
1431 | 1522 | ||
1432 | ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov, | 1523 | ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov, |
1433 | offset, nr_segs, | 1524 | offset, nr_segs, |
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h index c5057fb6237a..71f721e1a71f 100644 --- a/fs/xfs/linux-2.6/xfs_aops.h +++ b/fs/xfs/linux-2.6/xfs_aops.h | |||
@@ -23,6 +23,22 @@ extern struct workqueue_struct *xfsconvertd_workqueue; | |||
23 | extern mempool_t *xfs_ioend_pool; | 23 | extern mempool_t *xfs_ioend_pool; |
24 | 24 | ||
25 | /* | 25 | /* |
26 | * Types of I/O for bmap clustering and I/O completion tracking. | ||
27 | */ | ||
28 | enum { | ||
29 | IO_DIRECT = 0, /* special case for direct I/O ioends */ | ||
30 | IO_DELALLOC, /* mapping covers delalloc region */ | ||
31 | IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */ | ||
32 | IO_OVERWRITE, /* mapping covers already allocated extent */ | ||
33 | }; | ||
34 | |||
35 | #define XFS_IO_TYPES \ | ||
36 | { 0, "" }, \ | ||
37 | { IO_DELALLOC, "delalloc" }, \ | ||
38 | { IO_UNWRITTEN, "unwritten" }, \ | ||
39 | { IO_OVERWRITE, "overwrite" } | ||
40 | |||
41 | /* | ||
26 | * xfs_ioend struct manages large extent writes for XFS. | 42 | * xfs_ioend struct manages large extent writes for XFS. |
27 | * It can manage several multi-page bio's at once. | 43 | * It can manage several multi-page bio's at once. |
28 | */ | 44 | */ |
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h index acef2e98c594..f56431c916a0 100644 --- a/fs/xfs/linux-2.6/xfs_trace.h +++ b/fs/xfs/linux-2.6/xfs_trace.h | |||
@@ -935,10 +935,10 @@ DEFINE_PAGE_EVENT(xfs_writepage); | |||
935 | DEFINE_PAGE_EVENT(xfs_releasepage); | 935 | DEFINE_PAGE_EVENT(xfs_releasepage); |
936 | DEFINE_PAGE_EVENT(xfs_invalidatepage); | 936 | DEFINE_PAGE_EVENT(xfs_invalidatepage); |
937 | 937 | ||
938 | DECLARE_EVENT_CLASS(xfs_iomap_class, | 938 | DECLARE_EVENT_CLASS(xfs_imap_class, |
939 | TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, | 939 | TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, |
940 | int flags, struct xfs_bmbt_irec *irec), | 940 | int type, struct xfs_bmbt_irec *irec), |
941 | TP_ARGS(ip, offset, count, flags, irec), | 941 | TP_ARGS(ip, offset, count, type, irec), |
942 | TP_STRUCT__entry( | 942 | TP_STRUCT__entry( |
943 | __field(dev_t, dev) | 943 | __field(dev_t, dev) |
944 | __field(xfs_ino_t, ino) | 944 | __field(xfs_ino_t, ino) |
@@ -946,7 +946,7 @@ DECLARE_EVENT_CLASS(xfs_iomap_class, | |||
946 | __field(loff_t, new_size) | 946 | __field(loff_t, new_size) |
947 | __field(loff_t, offset) | 947 | __field(loff_t, offset) |
948 | __field(size_t, count) | 948 | __field(size_t, count) |
949 | __field(int, flags) | 949 | __field(int, type) |
950 | __field(xfs_fileoff_t, startoff) | 950 | __field(xfs_fileoff_t, startoff) |
951 | __field(xfs_fsblock_t, startblock) | 951 | __field(xfs_fsblock_t, startblock) |
952 | __field(xfs_filblks_t, blockcount) | 952 | __field(xfs_filblks_t, blockcount) |
@@ -958,13 +958,13 @@ DECLARE_EVENT_CLASS(xfs_iomap_class, | |||
958 | __entry->new_size = ip->i_new_size; | 958 | __entry->new_size = ip->i_new_size; |
959 | __entry->offset = offset; | 959 | __entry->offset = offset; |
960 | __entry->count = count; | 960 | __entry->count = count; |
961 | __entry->flags = flags; | 961 | __entry->type = type; |
962 | __entry->startoff = irec ? irec->br_startoff : 0; | 962 | __entry->startoff = irec ? irec->br_startoff : 0; |
963 | __entry->startblock = irec ? irec->br_startblock : 0; | 963 | __entry->startblock = irec ? irec->br_startblock : 0; |
964 | __entry->blockcount = irec ? irec->br_blockcount : 0; | 964 | __entry->blockcount = irec ? irec->br_blockcount : 0; |
965 | ), | 965 | ), |
966 | TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " | 966 | TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " |
967 | "offset 0x%llx count %zd flags %s " | 967 | "offset 0x%llx count %zd type %s " |
968 | "startoff 0x%llx startblock %lld blockcount 0x%llx", | 968 | "startoff 0x%llx startblock %lld blockcount 0x%llx", |
969 | MAJOR(__entry->dev), MINOR(__entry->dev), | 969 | MAJOR(__entry->dev), MINOR(__entry->dev), |
970 | __entry->ino, | 970 | __entry->ino, |
@@ -972,20 +972,21 @@ DECLARE_EVENT_CLASS(xfs_iomap_class, | |||
972 | __entry->new_size, | 972 | __entry->new_size, |
973 | __entry->offset, | 973 | __entry->offset, |
974 | __entry->count, | 974 | __entry->count, |
975 | __print_flags(__entry->flags, "|", BMAPI_FLAGS), | 975 | __print_symbolic(__entry->type, XFS_IO_TYPES), |
976 | __entry->startoff, | 976 | __entry->startoff, |
977 | (__int64_t)__entry->startblock, | 977 | (__int64_t)__entry->startblock, |
978 | __entry->blockcount) | 978 | __entry->blockcount) |
979 | ) | 979 | ) |
980 | 980 | ||
981 | #define DEFINE_IOMAP_EVENT(name) \ | 981 | #define DEFINE_IOMAP_EVENT(name) \ |
982 | DEFINE_EVENT(xfs_iomap_class, name, \ | 982 | DEFINE_EVENT(xfs_imap_class, name, \ |
983 | TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \ | 983 | TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \ |
984 | int flags, struct xfs_bmbt_irec *irec), \ | 984 | int type, struct xfs_bmbt_irec *irec), \ |
985 | TP_ARGS(ip, offset, count, flags, irec)) | 985 | TP_ARGS(ip, offset, count, type, irec)) |
986 | DEFINE_IOMAP_EVENT(xfs_iomap_enter); | 986 | DEFINE_IOMAP_EVENT(xfs_map_blocks_found); |
987 | DEFINE_IOMAP_EVENT(xfs_iomap_found); | 987 | DEFINE_IOMAP_EVENT(xfs_map_blocks_alloc); |
988 | DEFINE_IOMAP_EVENT(xfs_iomap_alloc); | 988 | DEFINE_IOMAP_EVENT(xfs_get_blocks_found); |
989 | DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc); | ||
989 | 990 | ||
990 | DECLARE_EVENT_CLASS(xfs_simple_io_class, | 991 | DECLARE_EVENT_CLASS(xfs_simple_io_class, |
991 | TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), | 992 | TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), |
@@ -1022,6 +1023,7 @@ DEFINE_EVENT(xfs_simple_io_class, name, \ | |||
1022 | TP_ARGS(ip, offset, count)) | 1023 | TP_ARGS(ip, offset, count)) |
1023 | DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc); | 1024 | DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc); |
1024 | DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert); | 1025 | DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert); |
1026 | DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound); | ||
1025 | 1027 | ||
1026 | 1028 | ||
1027 | TRACE_EVENT(xfs_itruncate_start, | 1029 | TRACE_EVENT(xfs_itruncate_start, |
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 991291068378..22b62a179e89 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c | |||
@@ -47,124 +47,8 @@ | |||
47 | 47 | ||
48 | #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ | 48 | #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ |
49 | << mp->m_writeio_log) | 49 | << mp->m_writeio_log) |
50 | #define XFS_STRAT_WRITE_IMAPS 2 | ||
51 | #define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP | 50 | #define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP |
52 | 51 | ||
53 | STATIC int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t, | ||
54 | struct xfs_bmbt_irec *, int); | ||
55 | STATIC int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t, | ||
56 | struct xfs_bmbt_irec *); | ||
57 | STATIC int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t, | ||
58 | struct xfs_bmbt_irec *); | ||
59 | |||
60 | int | ||
61 | xfs_iomap( | ||
62 | struct xfs_inode *ip, | ||
63 | xfs_off_t offset, | ||
64 | ssize_t count, | ||
65 | int flags, | ||
66 | struct xfs_bmbt_irec *imap, | ||
67 | int *nimaps, | ||
68 | int *new) | ||
69 | { | ||
70 | struct xfs_mount *mp = ip->i_mount; | ||
71 | xfs_fileoff_t offset_fsb, end_fsb; | ||
72 | int error = 0; | ||
73 | int lockmode = 0; | ||
74 | int bmapi_flags = 0; | ||
75 | |||
76 | ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG); | ||
77 | |||
78 | *new = 0; | ||
79 | |||
80 | if (XFS_FORCED_SHUTDOWN(mp)) | ||
81 | return XFS_ERROR(EIO); | ||
82 | |||
83 | trace_xfs_iomap_enter(ip, offset, count, flags, NULL); | ||
84 | |||
85 | switch (flags & (BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE)) { | ||
86 | case BMAPI_READ: | ||
87 | lockmode = xfs_ilock_map_shared(ip); | ||
88 | bmapi_flags = XFS_BMAPI_ENTIRE; | ||
89 | break; | ||
90 | case BMAPI_WRITE: | ||
91 | lockmode = XFS_ILOCK_EXCL; | ||
92 | if (flags & BMAPI_IGNSTATE) | ||
93 | bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE; | ||
94 | xfs_ilock(ip, lockmode); | ||
95 | break; | ||
96 | case BMAPI_ALLOCATE: | ||
97 | lockmode = XFS_ILOCK_SHARED; | ||
98 | bmapi_flags = XFS_BMAPI_ENTIRE; | ||
99 | |||
100 | /* Attempt non-blocking lock */ | ||
101 | if (flags & BMAPI_TRYLOCK) { | ||
102 | if (!xfs_ilock_nowait(ip, lockmode)) | ||
103 | return XFS_ERROR(EAGAIN); | ||
104 | } else { | ||
105 | xfs_ilock(ip, lockmode); | ||
106 | } | ||
107 | break; | ||
108 | default: | ||
109 | BUG(); | ||
110 | } | ||
111 | |||
112 | ASSERT(offset <= mp->m_maxioffset); | ||
113 | if ((xfs_fsize_t)offset + count > mp->m_maxioffset) | ||
114 | count = mp->m_maxioffset - offset; | ||
115 | end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); | ||
116 | offset_fsb = XFS_B_TO_FSBT(mp, offset); | ||
117 | |||
118 | error = xfs_bmapi(NULL, ip, offset_fsb, | ||
119 | (xfs_filblks_t)(end_fsb - offset_fsb), | ||
120 | bmapi_flags, NULL, 0, imap, | ||
121 | nimaps, NULL); | ||
122 | |||
123 | if (error) | ||
124 | goto out; | ||
125 | |||
126 | switch (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)) { | ||
127 | case BMAPI_WRITE: | ||
128 | /* If we found an extent, return it */ | ||
129 | if (*nimaps && | ||
130 | (imap->br_startblock != HOLESTARTBLOCK) && | ||
131 | (imap->br_startblock != DELAYSTARTBLOCK)) { | ||
132 | trace_xfs_iomap_found(ip, offset, count, flags, imap); | ||
133 | break; | ||
134 | } | ||
135 | |||
136 | if (flags & BMAPI_DIRECT) { | ||
137 | error = xfs_iomap_write_direct(ip, offset, count, imap, | ||
138 | *nimaps); | ||
139 | } else { | ||
140 | error = xfs_iomap_write_delay(ip, offset, count, imap); | ||
141 | } | ||
142 | |||
143 | if (!error) { | ||
144 | trace_xfs_iomap_alloc(ip, offset, count, flags, imap); | ||
145 | } | ||
146 | *new = 1; | ||
147 | break; | ||
148 | case BMAPI_ALLOCATE: | ||
149 | /* If we found an extent, return it */ | ||
150 | xfs_iunlock(ip, lockmode); | ||
151 | lockmode = 0; | ||
152 | |||
153 | if (*nimaps && !isnullstartblock(imap->br_startblock)) { | ||
154 | trace_xfs_iomap_found(ip, offset, count, flags, imap); | ||
155 | break; | ||
156 | } | ||
157 | |||
158 | error = xfs_iomap_write_allocate(ip, offset, count, imap); | ||
159 | break; | ||
160 | } | ||
161 | |||
162 | out: | ||
163 | if (lockmode) | ||
164 | xfs_iunlock(ip, lockmode); | ||
165 | return XFS_ERROR(error); | ||
166 | } | ||
167 | |||
168 | STATIC int | 52 | STATIC int |
169 | xfs_iomap_eof_align_last_fsb( | 53 | xfs_iomap_eof_align_last_fsb( |
170 | xfs_mount_t *mp, | 54 | xfs_mount_t *mp, |
@@ -233,7 +117,7 @@ xfs_cmn_err_fsblock_zero( | |||
233 | return EFSCORRUPTED; | 117 | return EFSCORRUPTED; |
234 | } | 118 | } |
235 | 119 | ||
236 | STATIC int | 120 | int |
237 | xfs_iomap_write_direct( | 121 | xfs_iomap_write_direct( |
238 | xfs_inode_t *ip, | 122 | xfs_inode_t *ip, |
239 | xfs_off_t offset, | 123 | xfs_off_t offset, |
@@ -428,7 +312,7 @@ xfs_iomap_eof_want_preallocate( | |||
428 | return 0; | 312 | return 0; |
429 | } | 313 | } |
430 | 314 | ||
431 | STATIC int | 315 | int |
432 | xfs_iomap_write_delay( | 316 | xfs_iomap_write_delay( |
433 | xfs_inode_t *ip, | 317 | xfs_inode_t *ip, |
434 | xfs_off_t offset, | 318 | xfs_off_t offset, |
@@ -527,7 +411,7 @@ retry: | |||
527 | * We no longer bother to look at the incoming map - all we have to | 411 | * We no longer bother to look at the incoming map - all we have to |
528 | * guarantee is that whatever we allocate fills the required range. | 412 | * guarantee is that whatever we allocate fills the required range. |
529 | */ | 413 | */ |
530 | STATIC int | 414 | int |
531 | xfs_iomap_write_allocate( | 415 | xfs_iomap_write_allocate( |
532 | xfs_inode_t *ip, | 416 | xfs_inode_t *ip, |
533 | xfs_off_t offset, | 417 | xfs_off_t offset, |
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h index 7748a430f50d..80615760959a 100644 --- a/fs/xfs/xfs_iomap.h +++ b/fs/xfs/xfs_iomap.h | |||
@@ -18,30 +18,15 @@ | |||
18 | #ifndef __XFS_IOMAP_H__ | 18 | #ifndef __XFS_IOMAP_H__ |
19 | #define __XFS_IOMAP_H__ | 19 | #define __XFS_IOMAP_H__ |
20 | 20 | ||
21 | /* base extent manipulation calls */ | ||
22 | #define BMAPI_READ (1 << 0) /* read extents */ | ||
23 | #define BMAPI_WRITE (1 << 1) /* create extents */ | ||
24 | #define BMAPI_ALLOCATE (1 << 2) /* delayed allocate to real extents */ | ||
25 | |||
26 | /* modifiers */ | ||
27 | #define BMAPI_IGNSTATE (1 << 4) /* ignore unwritten state on read */ | ||
28 | #define BMAPI_DIRECT (1 << 5) /* direct instead of buffered write */ | ||
29 | #define BMAPI_MMA (1 << 6) /* allocate for mmap write */ | ||
30 | #define BMAPI_TRYLOCK (1 << 7) /* non-blocking request */ | ||
31 | |||
32 | #define BMAPI_FLAGS \ | ||
33 | { BMAPI_READ, "READ" }, \ | ||
34 | { BMAPI_WRITE, "WRITE" }, \ | ||
35 | { BMAPI_ALLOCATE, "ALLOCATE" }, \ | ||
36 | { BMAPI_IGNSTATE, "IGNSTATE" }, \ | ||
37 | { BMAPI_DIRECT, "DIRECT" }, \ | ||
38 | { BMAPI_TRYLOCK, "TRYLOCK" } | ||
39 | |||
40 | struct xfs_inode; | 21 | struct xfs_inode; |
41 | struct xfs_bmbt_irec; | 22 | struct xfs_bmbt_irec; |
42 | 23 | ||
43 | extern int xfs_iomap(struct xfs_inode *, xfs_off_t, ssize_t, int, | 24 | extern int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t, |
44 | struct xfs_bmbt_irec *, int *, int *); | 25 | struct xfs_bmbt_irec *, int); |
26 | extern int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t, | ||
27 | struct xfs_bmbt_irec *); | ||
28 | extern int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t, | ||
29 | struct xfs_bmbt_irec *); | ||
45 | extern int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t); | 30 | extern int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t); |
46 | 31 | ||
47 | #endif /* __XFS_IOMAP_H__*/ | 32 | #endif /* __XFS_IOMAP_H__*/ |