aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/xfs/xfs_aops.c39
-rw-r--r--fs/xfs/xfs_aops.h3
-rw-r--r--fs/xfs/xfs_file.c8
-rw-r--r--fs/xfs/xfs_iget.c2
-rw-r--r--fs/xfs/xfs_inode.h1
-rw-r--r--fs/xfs/xfs_iops.c4
-rw-r--r--fs/xfs/xfs_super.c7
-rw-r--r--fs/xfs/xfs_sync.c8
-rw-r--r--fs/xfs/xfs_vnodeops.c4
9 files changed, 9 insertions, 67 deletions
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index e1ff0770784e..46bba3e0af47 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -38,40 +38,6 @@
38#include <linux/pagevec.h> 38#include <linux/pagevec.h>
39#include <linux/writeback.h> 39#include <linux/writeback.h>
40 40
41
42/*
43 * Prime number of hash buckets since address is used as the key.
44 */
45#define NVSYNC 37
46#define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
47static wait_queue_head_t xfs_ioend_wq[NVSYNC];
48
49void __init
50xfs_ioend_init(void)
51{
52 int i;
53
54 for (i = 0; i < NVSYNC; i++)
55 init_waitqueue_head(&xfs_ioend_wq[i]);
56}
57
58void
59xfs_ioend_wait(
60 xfs_inode_t *ip)
61{
62 wait_queue_head_t *wq = to_ioend_wq(ip);
63
64 wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
65}
66
67STATIC void
68xfs_ioend_wake(
69 xfs_inode_t *ip)
70{
71 if (atomic_dec_and_test(&ip->i_iocount))
72 wake_up(to_ioend_wq(ip));
73}
74
75void 41void
76xfs_count_page_state( 42xfs_count_page_state(
77 struct page *page, 43 struct page *page,
@@ -115,7 +81,6 @@ xfs_destroy_ioend(
115 xfs_ioend_t *ioend) 81 xfs_ioend_t *ioend)
116{ 82{
117 struct buffer_head *bh, *next; 83 struct buffer_head *bh, *next;
118 struct xfs_inode *ip = XFS_I(ioend->io_inode);
119 84
120 for (bh = ioend->io_buffer_head; bh; bh = next) { 85 for (bh = ioend->io_buffer_head; bh; bh = next) {
121 next = bh->b_private; 86 next = bh->b_private;
@@ -127,7 +92,7 @@ xfs_destroy_ioend(
127 aio_complete(ioend->io_iocb, ioend->io_result, 0); 92 aio_complete(ioend->io_iocb, ioend->io_result, 0);
128 inode_dio_done(ioend->io_inode); 93 inode_dio_done(ioend->io_inode);
129 } 94 }
130 xfs_ioend_wake(ip); 95
131 mempool_free(ioend, xfs_ioend_pool); 96 mempool_free(ioend, xfs_ioend_pool);
132} 97}
133 98
@@ -298,7 +263,6 @@ xfs_alloc_ioend(
298 ioend->io_inode = inode; 263 ioend->io_inode = inode;
299 ioend->io_buffer_head = NULL; 264 ioend->io_buffer_head = NULL;
300 ioend->io_buffer_tail = NULL; 265 ioend->io_buffer_tail = NULL;
301 atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
302 ioend->io_offset = 0; 266 ioend->io_offset = 0;
303 ioend->io_size = 0; 267 ioend->io_size = 0;
304 ioend->io_iocb = NULL; 268 ioend->io_iocb = NULL;
@@ -558,7 +522,6 @@ xfs_cancel_ioend(
558 unlock_buffer(bh); 522 unlock_buffer(bh);
559 } while ((bh = next_bh) != NULL); 523 } while ((bh = next_bh) != NULL);
560 524
561 xfs_ioend_wake(XFS_I(ioend->io_inode));
562 mempool_free(ioend, xfs_ioend_pool); 525 mempool_free(ioend, xfs_ioend_pool);
563 } while ((ioend = next) != NULL); 526 } while ((ioend = next) != NULL);
564} 527}
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index ce3dcb50762e..116dd5c37034 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -61,9 +61,6 @@ typedef struct xfs_ioend {
61extern const struct address_space_operations xfs_address_space_operations; 61extern const struct address_space_operations xfs_address_space_operations;
62extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int); 62extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int);
63 63
64extern void xfs_ioend_init(void);
65extern void xfs_ioend_wait(struct xfs_inode *);
66
67extern void xfs_count_page_state(struct page *, int *, int *); 64extern void xfs_count_page_state(struct page *, int *, int *);
68 65
69#endif /* __XFS_AOPS_H__ */ 66#endif /* __XFS_AOPS_H__ */
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index cbbac5cc9c26..ee63c4fb3639 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -149,10 +149,6 @@ xfs_file_fsync(
149 149
150 xfs_iflags_clear(ip, XFS_ITRUNCATED); 150 xfs_iflags_clear(ip, XFS_ITRUNCATED);
151 151
152 xfs_ilock(ip, XFS_IOLOCK_SHARED);
153 xfs_ioend_wait(ip);
154 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
155
156 if (mp->m_flags & XFS_MOUNT_BARRIER) { 152 if (mp->m_flags & XFS_MOUNT_BARRIER) {
157 /* 153 /*
158 * If we have an RT and/or log subvolume we need to make sure 154 * If we have an RT and/or log subvolume we need to make sure
@@ -758,7 +754,7 @@ restart:
758 * the dio layer. To avoid the problem with aio, we also need to wait for 754 * the dio layer. To avoid the problem with aio, we also need to wait for
759 * outstanding IOs to complete so that unwritten extent conversion is completed 755 * outstanding IOs to complete so that unwritten extent conversion is completed
760 * before we try to map the overlapping block. This is currently implemented by 756 * before we try to map the overlapping block. This is currently implemented by
761 * hitting it with a big hammer (i.e. xfs_ioend_wait()). 757 * hitting it with a big hammer (i.e. inode_dio_wait()).
762 * 758 *
763 * Returns with locks held indicated by @iolock and errors indicated by 759 * Returns with locks held indicated by @iolock and errors indicated by
764 * negative return values. 760 * negative return values.
@@ -821,7 +817,7 @@ xfs_file_dio_aio_write(
821 * otherwise demote the lock if we had to flush cached pages 817 * otherwise demote the lock if we had to flush cached pages
822 */ 818 */
823 if (unaligned_io) 819 if (unaligned_io)
824 xfs_ioend_wait(ip); 820 inode_dio_wait(inode);
825 else if (*iolock == XFS_IOLOCK_EXCL) { 821 else if (*iolock == XFS_IOLOCK_EXCL) {
826 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); 822 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
827 *iolock = XFS_IOLOCK_SHARED; 823 *iolock = XFS_IOLOCK_SHARED;
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 7759812c1bbe..0fa98b1c70ea 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -75,7 +75,6 @@ xfs_inode_alloc(
75 return NULL; 75 return NULL;
76 } 76 }
77 77
78 ASSERT(atomic_read(&ip->i_iocount) == 0);
79 ASSERT(atomic_read(&ip->i_pincount) == 0); 78 ASSERT(atomic_read(&ip->i_pincount) == 0);
80 ASSERT(!spin_is_locked(&ip->i_flags_lock)); 79 ASSERT(!spin_is_locked(&ip->i_flags_lock));
81 ASSERT(completion_done(&ip->i_flush)); 80 ASSERT(completion_done(&ip->i_flush));
@@ -150,7 +149,6 @@ xfs_inode_free(
150 } 149 }
151 150
152 /* asserts to verify all state is correct here */ 151 /* asserts to verify all state is correct here */
153 ASSERT(atomic_read(&ip->i_iocount) == 0);
154 ASSERT(atomic_read(&ip->i_pincount) == 0); 152 ASSERT(atomic_read(&ip->i_pincount) == 0);
155 ASSERT(!spin_is_locked(&ip->i_flags_lock)); 153 ASSERT(!spin_is_locked(&ip->i_flags_lock));
156 ASSERT(completion_done(&ip->i_flush)); 154 ASSERT(completion_done(&ip->i_flush));
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 2380a4bcbece..760140d1dd66 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -257,7 +257,6 @@ typedef struct xfs_inode {
257 257
258 xfs_fsize_t i_size; /* in-memory size */ 258 xfs_fsize_t i_size; /* in-memory size */
259 xfs_fsize_t i_new_size; /* size when write completes */ 259 xfs_fsize_t i_new_size; /* size when write completes */
260 atomic_t i_iocount; /* outstanding I/O count */
261 260
262 /* VFS inode */ 261 /* VFS inode */
263 struct inode i_vnode; /* embedded VFS inode */ 262 struct inode i_vnode; /* embedded VFS inode */
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 32aca87bde5e..e041e917c1d9 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -840,9 +840,9 @@ xfs_setattr_size(
840 } 840 }
841 841
842 /* 842 /*
843 * Wait for all I/O to complete. 843 * Wait for all direct I/O to complete.
844 */ 844 */
845 xfs_ioend_wait(ip); 845 inode_dio_wait(inode);
846 846
847 error = -block_truncate_page(inode->i_mapping, iattr->ia_size, 847 error = -block_truncate_page(inode->i_mapping, iattr->ia_size,
848 xfs_get_blocks); 848 xfs_get_blocks);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 2366c54cc4fa..54d5e102ffe1 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -796,8 +796,6 @@ xfs_fs_destroy_inode(
796 if (is_bad_inode(inode)) 796 if (is_bad_inode(inode))
797 goto out_reclaim; 797 goto out_reclaim;
798 798
799 xfs_ioend_wait(ip);
800
801 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0); 799 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
802 800
803 /* 801 /*
@@ -837,7 +835,6 @@ xfs_fs_inode_init_once(
837 inode_init_once(VFS_I(ip)); 835 inode_init_once(VFS_I(ip));
838 836
839 /* xfs inode */ 837 /* xfs inode */
840 atomic_set(&ip->i_iocount, 0);
841 atomic_set(&ip->i_pincount, 0); 838 atomic_set(&ip->i_pincount, 0);
842 spin_lock_init(&ip->i_flags_lock); 839 spin_lock_init(&ip->i_flags_lock);
843 init_waitqueue_head(&ip->i_ipin_wait); 840 init_waitqueue_head(&ip->i_ipin_wait);
@@ -914,9 +911,8 @@ xfs_fs_write_inode(
914 * of forcing it all the way to stable storage using a 911 * of forcing it all the way to stable storage using a
915 * synchronous transaction we let the log force inside the 912 * synchronous transaction we let the log force inside the
916 * ->sync_fs call do that for thus, which reduces the number 913 * ->sync_fs call do that for thus, which reduces the number
917 * of synchronous log foces dramatically. 914 * of synchronous log forces dramatically.
918 */ 915 */
919 xfs_ioend_wait(ip);
920 error = xfs_log_inode(ip); 916 error = xfs_log_inode(ip);
921 if (error) 917 if (error)
922 goto out; 918 goto out;
@@ -1681,7 +1677,6 @@ init_xfs_fs(void)
1681 printk(KERN_INFO XFS_VERSION_STRING " with " 1677 printk(KERN_INFO XFS_VERSION_STRING " with "
1682 XFS_BUILD_OPTIONS " enabled\n"); 1678 XFS_BUILD_OPTIONS " enabled\n");
1683 1679
1684 xfs_ioend_init();
1685 xfs_dir_startup(); 1680 xfs_dir_startup();
1686 1681
1687 error = xfs_init_zones(); 1682 error = xfs_init_zones();
diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c
index 90cc197e0433..bf2b38c21caa 100644
--- a/fs/xfs/xfs_sync.c
+++ b/fs/xfs/xfs_sync.c
@@ -227,21 +227,17 @@ xfs_sync_inode_data(
227 int error = 0; 227 int error = 0;
228 228
229 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 229 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
230 goto out_wait; 230 return 0;
231 231
232 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) { 232 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
233 if (flags & SYNC_TRYLOCK) 233 if (flags & SYNC_TRYLOCK)
234 goto out_wait; 234 return 0;
235 xfs_ilock(ip, XFS_IOLOCK_SHARED); 235 xfs_ilock(ip, XFS_IOLOCK_SHARED);
236 } 236 }
237 237
238 error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ? 238 error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
239 0 : XBF_ASYNC, FI_NONE); 239 0 : XBF_ASYNC, FI_NONE);
240 xfs_iunlock(ip, XFS_IOLOCK_SHARED); 240 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
241
242 out_wait:
243 if (flags & SYNC_WAIT)
244 xfs_ioend_wait(ip);
245 return error; 241 return error;
246} 242}
247 243
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 51fc429527bc..c2ff0fc86567 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -647,8 +647,6 @@ xfs_inactive(
647 if (truncate) { 647 if (truncate) {
648 xfs_ilock(ip, XFS_IOLOCK_EXCL); 648 xfs_ilock(ip, XFS_IOLOCK_EXCL);
649 649
650 xfs_ioend_wait(ip);
651
652 error = xfs_trans_reserve(tp, 0, 650 error = xfs_trans_reserve(tp, 0,
653 XFS_ITRUNCATE_LOG_RES(mp), 651 XFS_ITRUNCATE_LOG_RES(mp),
654 0, XFS_TRANS_PERM_LOG_RES, 652 0, XFS_TRANS_PERM_LOG_RES,
@@ -2076,7 +2074,7 @@ xfs_free_file_space(
2076 if (need_iolock) { 2074 if (need_iolock) {
2077 xfs_ilock(ip, XFS_IOLOCK_EXCL); 2075 xfs_ilock(ip, XFS_IOLOCK_EXCL);
2078 /* wait for the completion of any pending DIOs */ 2076 /* wait for the completion of any pending DIOs */
2079 xfs_ioend_wait(ip); 2077 inode_dio_wait(VFS_I(ip));
2080 } 2078 }
2081 2079
2082 rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); 2080 rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);