aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_sync.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_sync.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c316
1 files changed, 150 insertions, 166 deletions
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 7eb9d9cca52a..9798643feb3b 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -49,6 +49,123 @@
49#include <linux/freezer.h> 49#include <linux/freezer.h>
50 50
51 51
52STATIC xfs_inode_t *
53xfs_inode_ag_lookup(
54 struct xfs_mount *mp,
55 struct xfs_perag *pag,
56 uint32_t *first_index,
57 int tag)
58{
59 int nr_found;
60 struct xfs_inode *ip;
61
62 /*
63 * use a gang lookup to find the next inode in the tree
64 * as the tree is sparse and a gang lookup walks to find
65 * the number of objects requested.
66 */
67 read_lock(&pag->pag_ici_lock);
68 if (tag == XFS_ICI_NO_TAG) {
69 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
70 (void **)&ip, *first_index, 1);
71 } else {
72 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
73 (void **)&ip, *first_index, 1, tag);
74 }
75 if (!nr_found)
76 goto unlock;
77
78 /*
79 * Update the index for the next lookup. Catch overflows
80 * into the next AG range which can occur if we have inodes
81 * in the last block of the AG and we are currently
82 * pointing to the last inode.
83 */
84 *first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
85 if (*first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
86 goto unlock;
87
88 return ip;
89
90unlock:
91 read_unlock(&pag->pag_ici_lock);
92 return NULL;
93}
94
95STATIC int
96xfs_inode_ag_walk(
97 struct xfs_mount *mp,
98 xfs_agnumber_t ag,
99 int (*execute)(struct xfs_inode *ip,
100 struct xfs_perag *pag, int flags),
101 int flags,
102 int tag)
103{
104 struct xfs_perag *pag = &mp->m_perag[ag];
105 uint32_t first_index;
106 int last_error = 0;
107 int skipped;
108
109restart:
110 skipped = 0;
111 first_index = 0;
112 do {
113 int error = 0;
114 xfs_inode_t *ip;
115
116 ip = xfs_inode_ag_lookup(mp, pag, &first_index, tag);
117 if (!ip)
118 break;
119
120 error = execute(ip, pag, flags);
121 if (error == EAGAIN) {
122 skipped++;
123 continue;
124 }
125 if (error)
126 last_error = error;
127 /*
128 * bail out if the filesystem is corrupted.
129 */
130 if (error == EFSCORRUPTED)
131 break;
132
133 } while (1);
134
135 if (skipped) {
136 delay(1);
137 goto restart;
138 }
139
140 xfs_put_perag(mp, pag);
141 return last_error;
142}
143
144STATIC int
145xfs_inode_ag_iterator(
146 struct xfs_mount *mp,
147 int (*execute)(struct xfs_inode *ip,
148 struct xfs_perag *pag, int flags),
149 int flags,
150 int tag)
151{
152 int error = 0;
153 int last_error = 0;
154 xfs_agnumber_t ag;
155
156 for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
157 if (!mp->m_perag[ag].pag_ici_init)
158 continue;
159 error = xfs_inode_ag_walk(mp, ag, execute, flags, tag);
160 if (error) {
161 last_error = error;
162 if (error == EFSCORRUPTED)
163 break;
164 }
165 }
166 return XFS_ERROR(last_error);
167}
168
52/* must be called with pag_ici_lock held and releases it */ 169/* must be called with pag_ici_lock held and releases it */
53STATIC int 170STATIC int
54xfs_sync_inode_valid( 171xfs_sync_inode_valid(
@@ -85,12 +202,17 @@ xfs_sync_inode_valid(
85STATIC int 202STATIC int
86xfs_sync_inode_data( 203xfs_sync_inode_data(
87 struct xfs_inode *ip, 204 struct xfs_inode *ip,
205 struct xfs_perag *pag,
88 int flags) 206 int flags)
89{ 207{
90 struct inode *inode = VFS_I(ip); 208 struct inode *inode = VFS_I(ip);
91 struct address_space *mapping = inode->i_mapping; 209 struct address_space *mapping = inode->i_mapping;
92 int error = 0; 210 int error = 0;
93 211
212 error = xfs_sync_inode_valid(ip, pag);
213 if (error)
214 return error;
215
94 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 216 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
95 goto out_wait; 217 goto out_wait;
96 218
@@ -107,16 +229,22 @@ xfs_sync_inode_data(
107 out_wait: 229 out_wait:
108 if (flags & SYNC_IOWAIT) 230 if (flags & SYNC_IOWAIT)
109 xfs_ioend_wait(ip); 231 xfs_ioend_wait(ip);
232 IRELE(ip);
110 return error; 233 return error;
111} 234}
112 235
113STATIC int 236STATIC int
114xfs_sync_inode_attr( 237xfs_sync_inode_attr(
115 struct xfs_inode *ip, 238 struct xfs_inode *ip,
239 struct xfs_perag *pag,
116 int flags) 240 int flags)
117{ 241{
118 int error = 0; 242 int error = 0;
119 243
244 error = xfs_sync_inode_valid(ip, pag);
245 if (error)
246 return error;
247
120 xfs_ilock(ip, XFS_ILOCK_SHARED); 248 xfs_ilock(ip, XFS_ILOCK_SHARED);
121 if (xfs_inode_clean(ip)) 249 if (xfs_inode_clean(ip))
122 goto out_unlock; 250 goto out_unlock;
@@ -136,117 +264,33 @@ xfs_sync_inode_attr(
136 264
137 out_unlock: 265 out_unlock:
138 xfs_iunlock(ip, XFS_ILOCK_SHARED); 266 xfs_iunlock(ip, XFS_ILOCK_SHARED);
267 IRELE(ip);
139 return error; 268 return error;
140} 269}
141 270
142/*
143 * Sync all the inodes in the given AG according to the
144 * direction given by the flags.
145 */
146STATIC int
147xfs_sync_inodes_ag(
148 xfs_mount_t *mp,
149 int ag,
150 int flags)
151{
152 xfs_perag_t *pag = &mp->m_perag[ag];
153 int nr_found;
154 uint32_t first_index = 0;
155 int error = 0;
156 int last_error = 0;
157
158 do {
159 xfs_inode_t *ip = NULL;
160
161 /*
162 * use a gang lookup to find the next inode in the tree
163 * as the tree is sparse and a gang lookup walks to find
164 * the number of objects requested.
165 */
166 read_lock(&pag->pag_ici_lock);
167 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
168 (void**)&ip, first_index, 1);
169
170 if (!nr_found) {
171 read_unlock(&pag->pag_ici_lock);
172 break;
173 }
174
175 /*
176 * Update the index for the next lookup. Catch overflows
177 * into the next AG range which can occur if we have inodes
178 * in the last block of the AG and we are currently
179 * pointing to the last inode.
180 */
181 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
182 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) {
183 read_unlock(&pag->pag_ici_lock);
184 break;
185 }
186
187 error = xfs_sync_inode_valid(ip, pag);
188 if (error) {
189 if (error == EFSCORRUPTED)
190 return 0;
191 continue;
192 }
193
194 /*
195 * If we have to flush data or wait for I/O completion
196 * we need to hold the iolock.
197 */
198 if (flags & SYNC_DELWRI)
199 error = xfs_sync_inode_data(ip, flags);
200
201 if (flags & SYNC_ATTR)
202 error = xfs_sync_inode_attr(ip, flags);
203
204 IRELE(ip);
205
206 if (error)
207 last_error = error;
208 /*
209 * bail out if the filesystem is corrupted.
210 */
211 if (error == EFSCORRUPTED)
212 return XFS_ERROR(error);
213
214 } while (nr_found);
215
216 return last_error;
217}
218
219int 271int
220xfs_sync_inodes( 272xfs_sync_inodes(
221 xfs_mount_t *mp, 273 xfs_mount_t *mp,
222 int flags) 274 int flags)
223{ 275{
224 int error; 276 int error = 0;
225 int last_error;
226 int i;
227 int lflags = XFS_LOG_FORCE; 277 int lflags = XFS_LOG_FORCE;
228 278
229 if (mp->m_flags & XFS_MOUNT_RDONLY) 279 if (mp->m_flags & XFS_MOUNT_RDONLY)
230 return 0; 280 return 0;
231 error = 0;
232 last_error = 0;
233 281
234 if (flags & SYNC_WAIT) 282 if (flags & SYNC_WAIT)
235 lflags |= XFS_LOG_SYNC; 283 lflags |= XFS_LOG_SYNC;
236 284
237 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
238 if (!mp->m_perag[i].pag_ici_init)
239 continue;
240 error = xfs_sync_inodes_ag(mp, i, flags);
241 if (error)
242 last_error = error;
243 if (error == EFSCORRUPTED)
244 break;
245 }
246 if (flags & SYNC_DELWRI) 285 if (flags & SYNC_DELWRI)
247 xfs_log_force(mp, 0, lflags); 286 error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags, XFS_ICI_NO_TAG);
248 287
249 return XFS_ERROR(last_error); 288 if (flags & SYNC_ATTR)
289 error = xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags, XFS_ICI_NO_TAG);
290
291 if (!error && (flags & SYNC_DELWRI))
292 xfs_log_force(mp, 0, lflags);
293 return XFS_ERROR(error);
250} 294}
251 295
252STATIC int 296STATIC int
@@ -613,7 +657,7 @@ xfs_reclaim_inode(
613 xfs_ifunlock(ip); 657 xfs_ifunlock(ip);
614 xfs_iunlock(ip, XFS_ILOCK_EXCL); 658 xfs_iunlock(ip, XFS_ILOCK_EXCL);
615 } 659 }
616 return 1; 660 return -EAGAIN;
617 } 661 }
618 __xfs_iflags_set(ip, XFS_IRECLAIM); 662 __xfs_iflags_set(ip, XFS_IRECLAIM);
619 spin_unlock(&ip->i_flags_lock); 663 spin_unlock(&ip->i_flags_lock);
@@ -698,72 +742,20 @@ xfs_inode_clear_reclaim_tag(
698 xfs_put_perag(mp, pag); 742 xfs_put_perag(mp, pag);
699} 743}
700 744
701 745STATIC int
702STATIC void 746xfs_reclaim_inode_now(
703xfs_reclaim_inodes_ag( 747 struct xfs_inode *ip,
704 xfs_mount_t *mp, 748 struct xfs_perag *pag,
705 int ag, 749 int flags)
706 int mode)
707{ 750{
708 xfs_inode_t *ip = NULL; 751 /* ignore if already under reclaim */
709 xfs_perag_t *pag = &mp->m_perag[ag]; 752 if (xfs_iflags_test(ip, XFS_IRECLAIM)) {
710 int nr_found;
711 uint32_t first_index;
712 int skipped;
713
714restart:
715 first_index = 0;
716 skipped = 0;
717 do {
718 /*
719 * use a gang lookup to find the next inode in the tree
720 * as the tree is sparse and a gang lookup walks to find
721 * the number of objects requested.
722 */
723 read_lock(&pag->pag_ici_lock);
724 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
725 (void**)&ip, first_index, 1,
726 XFS_ICI_RECLAIM_TAG);
727
728 if (!nr_found) {
729 read_unlock(&pag->pag_ici_lock);
730 break;
731 }
732
733 /*
734 * Update the index for the next lookup. Catch overflows
735 * into the next AG range which can occur if we have inodes
736 * in the last block of the AG and we are currently
737 * pointing to the last inode.
738 */
739 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
740 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) {
741 read_unlock(&pag->pag_ici_lock);
742 break;
743 }
744
745 /* ignore if already under reclaim */
746 if (xfs_iflags_test(ip, XFS_IRECLAIM)) {
747 read_unlock(&pag->pag_ici_lock);
748 continue;
749 }
750
751 read_unlock(&pag->pag_ici_lock); 753 read_unlock(&pag->pag_ici_lock);
752 754 return 0;
753 /*
754 * hmmm - this is an inode already in reclaim. Do
755 * we even bother catching it here?
756 */
757 if (xfs_reclaim_inode(ip, 0, mode))
758 skipped++;
759 } while (nr_found);
760
761 if (skipped) {
762 delay(1);
763 goto restart;
764 } 755 }
765 return; 756 read_unlock(&pag->pag_ici_lock);
766 757
758 return xfs_reclaim_inode(ip, 0, flags);
767} 759}
768 760
769int 761int
@@ -771,14 +763,6 @@ xfs_reclaim_inodes(
771 xfs_mount_t *mp, 763 xfs_mount_t *mp,
772 int mode) 764 int mode)
773{ 765{
774 int i; 766 return xfs_inode_ag_iterator(mp, xfs_reclaim_inode_now, mode,
775 767 XFS_ICI_RECLAIM_TAG);
776 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
777 if (!mp->m_perag[i].pag_ici_init)
778 continue;
779 xfs_reclaim_inodes_ag(mp, i, mode);
780 }
781 return 0;
782} 768}
783
784