aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlain Renaud <arenaud@sgi.com>2012-05-22 16:56:21 -0400
committerBen Myers <bpm@sgi.com>2012-07-22 12:00:55 -0400
commit0d882a360b9012bc7a7e921c935774c3fba1bfd9 (patch)
treea5730ef5d51f49116a7776de21810ab9208b2e8d
parent129dbc9a2d93bab823e57fe47f53d098a0d350f3 (diff)
Prefix IO_XX flags with XFS_IO_XX to avoid namespace colision.
Add a XFS_ prefix to IO_DIRECT,XFS_IO_DELALLOC, XFS_IO_UNWRITTEN and XFS_IO_OVERWRITE. This to avoid namespace conflict with other modules. Signed-off-by: Alain Renaud <arenaud@sgi.com> Reviewed-by: Rich Johnston <rjohnston@sgi.com> Signed-off-by: Ben Myers <bpm@sgi.com>
-rw-r--r--fs/xfs/xfs_aops.c48
-rw-r--r--fs/xfs/xfs_aops.h14
2 files changed, 31 insertions, 31 deletions
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 91d77ac51bba..15052ff916ec 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -179,7 +179,7 @@ xfs_finish_ioend(
179 if (atomic_dec_and_test(&ioend->io_remaining)) { 179 if (atomic_dec_and_test(&ioend->io_remaining)) {
180 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount; 180 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
181 181
182 if (ioend->io_type == IO_UNWRITTEN) 182 if (ioend->io_type == XFS_IO_UNWRITTEN)
183 queue_work(mp->m_unwritten_workqueue, &ioend->io_work); 183 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
184 else if (ioend->io_append_trans) 184 else if (ioend->io_append_trans)
185 queue_work(mp->m_data_workqueue, &ioend->io_work); 185 queue_work(mp->m_data_workqueue, &ioend->io_work);
@@ -210,7 +210,7 @@ xfs_end_io(
210 * For unwritten extents we need to issue transactions to convert a 210 * For unwritten extents we need to issue transactions to convert a
211 * range to normal written extens after the data I/O has finished. 211 * range to normal written extens after the data I/O has finished.
212 */ 212 */
213 if (ioend->io_type == IO_UNWRITTEN) { 213 if (ioend->io_type == XFS_IO_UNWRITTEN) {
214 /* 214 /*
215 * For buffered I/O we never preallocate a transaction when 215 * For buffered I/O we never preallocate a transaction when
216 * doing the unwritten extent conversion, but for direct I/O 216 * doing the unwritten extent conversion, but for direct I/O
@@ -312,7 +312,7 @@ xfs_map_blocks(
312 if (XFS_FORCED_SHUTDOWN(mp)) 312 if (XFS_FORCED_SHUTDOWN(mp))
313 return -XFS_ERROR(EIO); 313 return -XFS_ERROR(EIO);
314 314
315 if (type == IO_UNWRITTEN) 315 if (type == XFS_IO_UNWRITTEN)
316 bmapi_flags |= XFS_BMAPI_IGSTATE; 316 bmapi_flags |= XFS_BMAPI_IGSTATE;
317 317
318 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { 318 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
@@ -336,7 +336,7 @@ xfs_map_blocks(
336 if (error) 336 if (error)
337 return -XFS_ERROR(error); 337 return -XFS_ERROR(error);
338 338
339 if (type == IO_DELALLOC && 339 if (type == XFS_IO_DELALLOC &&
340 (!nimaps || isnullstartblock(imap->br_startblock))) { 340 (!nimaps || isnullstartblock(imap->br_startblock))) {
341 error = xfs_iomap_write_allocate(ip, offset, count, imap); 341 error = xfs_iomap_write_allocate(ip, offset, count, imap);
342 if (!error) 342 if (!error)
@@ -345,7 +345,7 @@ xfs_map_blocks(
345 } 345 }
346 346
347#ifdef DEBUG 347#ifdef DEBUG
348 if (type == IO_UNWRITTEN) { 348 if (type == XFS_IO_UNWRITTEN) {
349 ASSERT(nimaps); 349 ASSERT(nimaps);
350 ASSERT(imap->br_startblock != HOLESTARTBLOCK); 350 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
351 ASSERT(imap->br_startblock != DELAYSTARTBLOCK); 351 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
@@ -634,11 +634,11 @@ xfs_check_page_type(
634 bh = head = page_buffers(page); 634 bh = head = page_buffers(page);
635 do { 635 do {
636 if (buffer_unwritten(bh)) 636 if (buffer_unwritten(bh))
637 acceptable += (type == IO_UNWRITTEN); 637 acceptable += (type == XFS_IO_UNWRITTEN);
638 else if (buffer_delay(bh)) 638 else if (buffer_delay(bh))
639 acceptable += (type == IO_DELALLOC); 639 acceptable += (type == XFS_IO_DELALLOC);
640 else if (buffer_dirty(bh) && buffer_mapped(bh)) 640 else if (buffer_dirty(bh) && buffer_mapped(bh))
641 acceptable += (type == IO_OVERWRITE); 641 acceptable += (type == XFS_IO_OVERWRITE);
642 else 642 else
643 break; 643 break;
644 } while ((bh = bh->b_this_page) != head); 644 } while ((bh = bh->b_this_page) != head);
@@ -721,11 +721,11 @@ xfs_convert_page(
721 if (buffer_unwritten(bh) || buffer_delay(bh) || 721 if (buffer_unwritten(bh) || buffer_delay(bh) ||
722 buffer_mapped(bh)) { 722 buffer_mapped(bh)) {
723 if (buffer_unwritten(bh)) 723 if (buffer_unwritten(bh))
724 type = IO_UNWRITTEN; 724 type = XFS_IO_UNWRITTEN;
725 else if (buffer_delay(bh)) 725 else if (buffer_delay(bh))
726 type = IO_DELALLOC; 726 type = XFS_IO_DELALLOC;
727 else 727 else
728 type = IO_OVERWRITE; 728 type = XFS_IO_OVERWRITE;
729 729
730 if (!xfs_imap_valid(inode, imap, offset)) { 730 if (!xfs_imap_valid(inode, imap, offset)) {
731 done = 1; 731 done = 1;
@@ -733,7 +733,7 @@ xfs_convert_page(
733 } 733 }
734 734
735 lock_buffer(bh); 735 lock_buffer(bh);
736 if (type != IO_OVERWRITE) 736 if (type != XFS_IO_OVERWRITE)
737 xfs_map_at_offset(inode, bh, imap, offset); 737 xfs_map_at_offset(inode, bh, imap, offset);
738 xfs_add_to_ioend(inode, bh, offset, type, 738 xfs_add_to_ioend(inode, bh, offset, type,
739 ioendp, done); 739 ioendp, done);
@@ -831,7 +831,7 @@ xfs_aops_discard_page(
831 struct buffer_head *bh, *head; 831 struct buffer_head *bh, *head;
832 loff_t offset = page_offset(page); 832 loff_t offset = page_offset(page);
833 833
834 if (!xfs_check_page_type(page, IO_DELALLOC)) 834 if (!xfs_check_page_type(page, XFS_IO_DELALLOC))
835 goto out_invalidate; 835 goto out_invalidate;
836 836
837 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 837 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
@@ -956,7 +956,7 @@ xfs_vm_writepage(
956 956
957 bh = head = page_buffers(page); 957 bh = head = page_buffers(page);
958 offset = page_offset(page); 958 offset = page_offset(page);
959 type = IO_OVERWRITE; 959 type = XFS_IO_OVERWRITE;
960 960
961 if (wbc->sync_mode == WB_SYNC_NONE) 961 if (wbc->sync_mode == WB_SYNC_NONE)
962 nonblocking = 1; 962 nonblocking = 1;
@@ -981,18 +981,18 @@ xfs_vm_writepage(
981 } 981 }
982 982
983 if (buffer_unwritten(bh)) { 983 if (buffer_unwritten(bh)) {
984 if (type != IO_UNWRITTEN) { 984 if (type != XFS_IO_UNWRITTEN) {
985 type = IO_UNWRITTEN; 985 type = XFS_IO_UNWRITTEN;
986 imap_valid = 0; 986 imap_valid = 0;
987 } 987 }
988 } else if (buffer_delay(bh)) { 988 } else if (buffer_delay(bh)) {
989 if (type != IO_DELALLOC) { 989 if (type != XFS_IO_DELALLOC) {
990 type = IO_DELALLOC; 990 type = XFS_IO_DELALLOC;
991 imap_valid = 0; 991 imap_valid = 0;
992 } 992 }
993 } else if (buffer_uptodate(bh)) { 993 } else if (buffer_uptodate(bh)) {
994 if (type != IO_OVERWRITE) { 994 if (type != XFS_IO_OVERWRITE) {
995 type = IO_OVERWRITE; 995 type = XFS_IO_OVERWRITE;
996 imap_valid = 0; 996 imap_valid = 0;
997 } 997 }
998 } else { 998 } else {
@@ -1028,7 +1028,7 @@ xfs_vm_writepage(
1028 } 1028 }
1029 if (imap_valid) { 1029 if (imap_valid) {
1030 lock_buffer(bh); 1030 lock_buffer(bh);
1031 if (type != IO_OVERWRITE) 1031 if (type != XFS_IO_OVERWRITE)
1032 xfs_map_at_offset(inode, bh, &imap, offset); 1032 xfs_map_at_offset(inode, bh, &imap, offset);
1033 xfs_add_to_ioend(inode, bh, offset, type, &ioend, 1033 xfs_add_to_ioend(inode, bh, offset, type, &ioend,
1034 new_ioend); 1034 new_ioend);
@@ -1069,7 +1069,7 @@ xfs_vm_writepage(
1069 * Reserve log space if we might write beyond the on-disk 1069 * Reserve log space if we might write beyond the on-disk
1070 * inode size. 1070 * inode size.
1071 */ 1071 */
1072 if (ioend->io_type != IO_UNWRITTEN && 1072 if (ioend->io_type != XFS_IO_UNWRITTEN &&
1073 xfs_ioend_is_append(ioend)) { 1073 xfs_ioend_is_append(ioend)) {
1074 err = xfs_setfilesize_trans_alloc(ioend); 1074 err = xfs_setfilesize_trans_alloc(ioend);
1075 if (err) 1075 if (err)
@@ -1366,7 +1366,7 @@ xfs_end_io_direct_write(
1366 ioend->io_iocb = iocb; 1366 ioend->io_iocb = iocb;
1367 ioend->io_result = ret; 1367 ioend->io_result = ret;
1368 if (private && size > 0) 1368 if (private && size > 0)
1369 ioend->io_type = IO_UNWRITTEN; 1369 ioend->io_type = XFS_IO_UNWRITTEN;
1370 1370
1371 if (is_async) { 1371 if (is_async) {
1372 ioend->io_isasync = 1; 1372 ioend->io_isasync = 1;
@@ -1398,7 +1398,7 @@ xfs_vm_direct_IO(
1398 * and converts at least on unwritten extent we will cancel 1398 * and converts at least on unwritten extent we will cancel
1399 * the still clean transaction after the I/O has finished. 1399 * the still clean transaction after the I/O has finished.
1400 */ 1400 */
1401 iocb->private = ioend = xfs_alloc_ioend(inode, IO_DIRECT); 1401 iocb->private = ioend = xfs_alloc_ioend(inode, XFS_IO_DIRECT);
1402 if (offset + size > XFS_I(inode)->i_d.di_size) { 1402 if (offset + size > XFS_I(inode)->i_d.di_size) {
1403 ret = xfs_setfilesize_trans_alloc(ioend); 1403 ret = xfs_setfilesize_trans_alloc(ioend);
1404 if (ret) 1404 if (ret)
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index 84eafbcb0d9d..c325abb8d61a 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -24,17 +24,17 @@ extern mempool_t *xfs_ioend_pool;
24 * Types of I/O for bmap clustering and I/O completion tracking. 24 * Types of I/O for bmap clustering and I/O completion tracking.
25 */ 25 */
26enum { 26enum {
27 IO_DIRECT = 0, /* special case for direct I/O ioends */ 27 XFS_IO_DIRECT = 0, /* special case for direct I/O ioends */
28 IO_DELALLOC, /* mapping covers delalloc region */ 28 XFS_IO_DELALLOC, /* covers delalloc region */
29 IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */ 29 XFS_IO_UNWRITTEN, /* covers allocated but uninitialized data */
30 IO_OVERWRITE, /* mapping covers already allocated extent */ 30 XFS_IO_OVERWRITE, /* covers already allocated extent */
31}; 31};
32 32
33#define XFS_IO_TYPES \ 33#define XFS_IO_TYPES \
34 { 0, "" }, \ 34 { 0, "" }, \
35 { IO_DELALLOC, "delalloc" }, \ 35 { XFS_IO_DELALLOC, "delalloc" }, \
36 { IO_UNWRITTEN, "unwritten" }, \ 36 { XFS_IO_UNWRITTEN, "unwritten" }, \
37 { IO_OVERWRITE, "overwrite" } 37 { XFS_IO_OVERWRITE, "overwrite" }
38 38
39/* 39/*
40 * xfs_ioend struct manages large extent writes for XFS. 40 * xfs_ioend struct manages large extent writes for XFS.