aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2010-04-28 08:28:57 -0400
committerAlex Elder <aelder@sgi.com>2010-05-19 10:58:17 -0400
commit34a52c6c064fb9f1fd1310407ce076a4bb049734 (patch)
tree2b05a8d21f644ab733f7e242971bd0314c46b355 /fs
parent207d041602cead1c1a16288f6225aea9da1f5bc4 (diff)
xfs: move I/O type flags into xfs_aops.c
The IOMAP_ flags are now only used inside xfs_aops.c for extent probing and I/O completion tracking, so more them here, and rename them to IO_* as there's no mapping involved at all. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c51
-rw-r--r--fs/xfs/xfs_iomap.h8
2 files changed, 30 insertions, 29 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 70ce1da73d01..f1dd70e201cf 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -45,6 +45,15 @@
45#include <linux/pagevec.h> 45#include <linux/pagevec.h>
46#include <linux/writeback.h> 46#include <linux/writeback.h>
47 47
48/*
49 * Types of I/O for bmap clustering and I/O completion tracking.
50 */
51enum {
52 IO_READ, /* mapping for a read */
53 IO_DELAY, /* mapping covers delalloc region */
54 IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */
55 IO_NEW /* just allocated */
56};
48 57
49/* 58/*
50 * Prime number of hash buckets since address is used as the key. 59 * Prime number of hash buckets since address is used as the key.
@@ -184,7 +193,7 @@ xfs_setfilesize(
184 xfs_fsize_t isize; 193 xfs_fsize_t isize;
185 194
186 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG); 195 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
187 ASSERT(ioend->io_type != IOMAP_READ); 196 ASSERT(ioend->io_type != IO_READ);
188 197
189 if (unlikely(ioend->io_error)) 198 if (unlikely(ioend->io_error))
190 return 0; 199 return 0;
@@ -215,7 +224,7 @@ xfs_finish_ioend(
215 if (atomic_dec_and_test(&ioend->io_remaining)) { 224 if (atomic_dec_and_test(&ioend->io_remaining)) {
216 struct workqueue_struct *wq; 225 struct workqueue_struct *wq;
217 226
218 wq = (ioend->io_type == IOMAP_UNWRITTEN) ? 227 wq = (ioend->io_type == IO_UNWRITTEN) ?
219 xfsconvertd_workqueue : xfsdatad_workqueue; 228 xfsconvertd_workqueue : xfsdatad_workqueue;
220 queue_work(wq, &ioend->io_work); 229 queue_work(wq, &ioend->io_work);
221 if (wait) 230 if (wait)
@@ -238,7 +247,7 @@ xfs_end_io(
238 * For unwritten extents we need to issue transactions to convert a 247 * For unwritten extents we need to issue transactions to convert a
239 * range to normal written extens after the data I/O has finished. 248 * range to normal written extens after the data I/O has finished.
240 */ 249 */
241 if (ioend->io_type == IOMAP_UNWRITTEN && 250 if (ioend->io_type == IO_UNWRITTEN &&
242 likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) { 251 likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
243 252
244 error = xfs_iomap_write_unwritten(ip, ioend->io_offset, 253 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
@@ -251,7 +260,7 @@ xfs_end_io(
251 * We might have to update the on-disk file size after extending 260 * We might have to update the on-disk file size after extending
252 * writes. 261 * writes.
253 */ 262 */
254 if (ioend->io_type != IOMAP_READ) { 263 if (ioend->io_type != IO_READ) {
255 error = xfs_setfilesize(ioend); 264 error = xfs_setfilesize(ioend);
256 ASSERT(!error || error == EAGAIN); 265 ASSERT(!error || error == EAGAIN);
257 } 266 }
@@ -724,11 +733,11 @@ xfs_is_delayed_page(
724 bh = head = page_buffers(page); 733 bh = head = page_buffers(page);
725 do { 734 do {
726 if (buffer_unwritten(bh)) 735 if (buffer_unwritten(bh))
727 acceptable = (type == IOMAP_UNWRITTEN); 736 acceptable = (type == IO_UNWRITTEN);
728 else if (buffer_delay(bh)) 737 else if (buffer_delay(bh))
729 acceptable = (type == IOMAP_DELAY); 738 acceptable = (type == IO_DELAY);
730 else if (buffer_dirty(bh) && buffer_mapped(bh)) 739 else if (buffer_dirty(bh) && buffer_mapped(bh))
731 acceptable = (type == IOMAP_NEW); 740 acceptable = (type == IO_NEW);
732 else 741 else
733 break; 742 break;
734 } while ((bh = bh->b_this_page) != head); 743 } while ((bh = bh->b_this_page) != head);
@@ -812,9 +821,9 @@ xfs_convert_page(
812 821
813 if (buffer_unwritten(bh) || buffer_delay(bh)) { 822 if (buffer_unwritten(bh) || buffer_delay(bh)) {
814 if (buffer_unwritten(bh)) 823 if (buffer_unwritten(bh))
815 type = IOMAP_UNWRITTEN; 824 type = IO_UNWRITTEN;
816 else 825 else
817 type = IOMAP_DELAY; 826 type = IO_DELAY;
818 827
819 if (!xfs_iomap_valid(inode, imap, offset)) { 828 if (!xfs_iomap_valid(inode, imap, offset)) {
820 done = 1; 829 done = 1;
@@ -836,7 +845,7 @@ xfs_convert_page(
836 page_dirty--; 845 page_dirty--;
837 count++; 846 count++;
838 } else { 847 } else {
839 type = IOMAP_NEW; 848 type = IO_NEW;
840 if (buffer_mapped(bh) && all_bh && startio) { 849 if (buffer_mapped(bh) && all_bh && startio) {
841 lock_buffer(bh); 850 lock_buffer(bh);
842 xfs_add_to_ioend(inode, bh, offset, 851 xfs_add_to_ioend(inode, bh, offset,
@@ -940,7 +949,7 @@ xfs_aops_discard_page(
940 loff_t offset = page_offset(page); 949 loff_t offset = page_offset(page);
941 ssize_t len = 1 << inode->i_blkbits; 950 ssize_t len = 1 << inode->i_blkbits;
942 951
943 if (!xfs_is_delayed_page(page, IOMAP_DELAY)) 952 if (!xfs_is_delayed_page(page, IO_DELAY))
944 goto out_invalidate; 953 goto out_invalidate;
945 954
946 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 955 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
@@ -1107,7 +1116,7 @@ xfs_page_state_convert(
1107 bh = head = page_buffers(page); 1116 bh = head = page_buffers(page);
1108 offset = page_offset(page); 1117 offset = page_offset(page);
1109 flags = BMAPI_READ; 1118 flags = BMAPI_READ;
1110 type = IOMAP_NEW; 1119 type = IO_NEW;
1111 1120
1112 /* TODO: cleanup count and page_dirty */ 1121 /* TODO: cleanup count and page_dirty */
1113 1122
@@ -1150,13 +1159,13 @@ xfs_page_state_convert(
1150 iomap_valid = 0; 1159 iomap_valid = 0;
1151 1160
1152 if (buffer_unwritten(bh)) { 1161 if (buffer_unwritten(bh)) {
1153 type = IOMAP_UNWRITTEN; 1162 type = IO_UNWRITTEN;
1154 flags = BMAPI_WRITE | BMAPI_IGNSTATE; 1163 flags = BMAPI_WRITE | BMAPI_IGNSTATE;
1155 } else if (buffer_delay(bh)) { 1164 } else if (buffer_delay(bh)) {
1156 type = IOMAP_DELAY; 1165 type = IO_DELAY;
1157 flags = BMAPI_ALLOCATE | trylock; 1166 flags = BMAPI_ALLOCATE | trylock;
1158 } else { 1167 } else {
1159 type = IOMAP_NEW; 1168 type = IO_NEW;
1160 flags = BMAPI_WRITE | BMAPI_MMAP; 1169 flags = BMAPI_WRITE | BMAPI_MMAP;
1161 } 1170 }
1162 1171
@@ -1170,7 +1179,7 @@ xfs_page_state_convert(
1170 * for unwritten extent conversion. 1179 * for unwritten extent conversion.
1171 */ 1180 */
1172 new_ioend = 1; 1181 new_ioend = 1;
1173 if (type == IOMAP_NEW) { 1182 if (type == IO_NEW) {
1174 size = xfs_probe_cluster(inode, 1183 size = xfs_probe_cluster(inode,
1175 page, bh, head, 0); 1184 page, bh, head, 0);
1176 } else { 1185 } else {
@@ -1215,14 +1224,14 @@ xfs_page_state_convert(
1215 } 1224 }
1216 1225
1217 /* 1226 /*
1218 * We set the type to IOMAP_NEW in case we are doing a 1227 * We set the type to IO_NEW in case we are doing a
1219 * small write at EOF that is extending the file but 1228 * small write at EOF that is extending the file but
1220 * without needing an allocation. We need to update the 1229 * without needing an allocation. We need to update the
1221 * file size on I/O completion in this case so it is 1230 * file size on I/O completion in this case so it is
1222 * the same case as having just allocated a new extent 1231 * the same case as having just allocated a new extent
1223 * that we are writing into for the first time. 1232 * that we are writing into for the first time.
1224 */ 1233 */
1225 type = IOMAP_NEW; 1234 type = IO_NEW;
1226 if (trylock_buffer(bh)) { 1235 if (trylock_buffer(bh)) {
1227 ASSERT(buffer_mapped(bh)); 1236 ASSERT(buffer_mapped(bh));
1228 if (iomap_valid) 1237 if (iomap_valid)
@@ -1594,7 +1603,7 @@ xfs_end_io_direct(
1594 */ 1603 */
1595 ioend->io_offset = offset; 1604 ioend->io_offset = offset;
1596 ioend->io_size = size; 1605 ioend->io_size = size;
1597 if (ioend->io_type == IOMAP_READ) { 1606 if (ioend->io_type == IO_READ) {
1598 xfs_finish_ioend(ioend, 0); 1607 xfs_finish_ioend(ioend, 0);
1599 } else if (private && size > 0) { 1608 } else if (private && size > 0) {
1600 xfs_finish_ioend(ioend, is_sync_kiocb(iocb)); 1609 xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
@@ -1605,7 +1614,7 @@ xfs_end_io_direct(
1605 * didn't map an unwritten extent so switch it's completion 1614 * didn't map an unwritten extent so switch it's completion
1606 * handler. 1615 * handler.
1607 */ 1616 */
1608 ioend->io_type = IOMAP_NEW; 1617 ioend->io_type = IO_NEW;
1609 xfs_finish_ioend(ioend, 0); 1618 xfs_finish_ioend(ioend, 0);
1610 } 1619 }
1611 1620
@@ -1633,7 +1642,7 @@ xfs_vm_direct_IO(
1633 bdev = xfs_find_bdev_for_inode(inode); 1642 bdev = xfs_find_bdev_for_inode(inode);
1634 1643
1635 iocb->private = xfs_alloc_ioend(inode, rw == WRITE ? 1644 iocb->private = xfs_alloc_ioend(inode, rw == WRITE ?
1636 IOMAP_UNWRITTEN : IOMAP_READ); 1645 IO_UNWRITTEN : IO_READ);
1637 1646
1638 ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov, 1647 ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov,
1639 offset, nr_segs, 1648 offset, nr_segs,
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index ba49a4fd1b3f..41e32d20a405 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -18,14 +18,6 @@
18#ifndef __XFS_IOMAP_H__ 18#ifndef __XFS_IOMAP_H__
19#define __XFS_IOMAP_H__ 19#define __XFS_IOMAP_H__
20 20
21typedef enum { /* iomap_flags values */
22 IOMAP_READ = 0, /* mapping for a read */
23 IOMAP_DELAY = 0x04, /* mapping covers delalloc region */
24 IOMAP_UNWRITTEN = 0x20, /* mapping covers allocated */
25 /* but uninitialized file data */
26 IOMAP_NEW = 0x40 /* just allocate */
27} iomap_flags_t;
28
29typedef enum { 21typedef enum {
30 /* base extent manipulation calls */ 22 /* base extent manipulation calls */
31 BMAPI_READ = (1 << 0), /* read extents */ 23 BMAPI_READ = (1 << 0), /* read extents */