aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLachlan McIlroy <lachlan@sgi.com>2007-09-14 01:21:08 -0400
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2008-02-07 00:40:53 -0500
commit44866d39282d0782b15fa4cb62aad937bf0a0897 (patch)
tree12cb75bb94a86952106ee58c5e83709b93afddf7
parent9ef9dc69d4167276c04590d67ee55de8380bc1ad (diff)
[XFS] remove dead SYNC_BDFLUSH case in xfs_sync_inodes
A large part of xfs_sync_inodes is conditional on the SYNC_BDFLUSH which is never passed to it. This patch removes it and adds an assert that triggers in case some new code tries to pass SYNC_BDFLUSH to it. SGI-PV: 970242 SGI-Modid: xfs-linux-melb:xfs-kern:29630a Signed-off-by: Lachlan McIlroy <lachlan@sgi.com> Signed-off-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Tim Shimmin <tes@sgi.com>
-rw-r--r--fs/xfs/xfs_vfsops.c198
1 files changed, 23 insertions, 175 deletions
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index a1544597bcd3..0f237f9f0c3a 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -981,8 +981,6 @@ xfs_sync_inodes(
981 int *bypassed) 981 int *bypassed)
982{ 982{
983 xfs_inode_t *ip = NULL; 983 xfs_inode_t *ip = NULL;
984 xfs_inode_t *ip_next;
985 xfs_buf_t *bp;
986 bhv_vnode_t *vp = NULL; 984 bhv_vnode_t *vp = NULL;
987 int error; 985 int error;
988 int last_error; 986 int last_error;
@@ -992,7 +990,6 @@ xfs_sync_inodes(
992 boolean_t mount_locked; 990 boolean_t mount_locked;
993 boolean_t vnode_refed; 991 boolean_t vnode_refed;
994 int preempt; 992 int preempt;
995 xfs_dinode_t *dip;
996 xfs_iptr_t *ipointer; 993 xfs_iptr_t *ipointer;
997#ifdef DEBUG 994#ifdef DEBUG
998 boolean_t ipointer_in = B_FALSE; 995 boolean_t ipointer_in = B_FALSE;
@@ -1045,6 +1042,8 @@ xfs_sync_inodes(
1045 1042
1046#define XFS_PREEMPT_MASK 0x7f 1043#define XFS_PREEMPT_MASK 0x7f
1047 1044
1045 ASSERT(!(flags & SYNC_BDFLUSH));
1046
1048 if (bypassed) 1047 if (bypassed)
1049 *bypassed = 0; 1048 *bypassed = 0;
1050 if (mp->m_flags & XFS_MOUNT_RDONLY) 1049 if (mp->m_flags & XFS_MOUNT_RDONLY)
@@ -1057,7 +1056,7 @@ xfs_sync_inodes(
1057 ipointer = (xfs_iptr_t *)kmem_zalloc(sizeof(xfs_iptr_t), KM_SLEEP); 1056 ipointer = (xfs_iptr_t *)kmem_zalloc(sizeof(xfs_iptr_t), KM_SLEEP);
1058 1057
1059 fflag = XFS_B_ASYNC; /* default is don't wait */ 1058 fflag = XFS_B_ASYNC; /* default is don't wait */
1060 if (flags & (SYNC_BDFLUSH | SYNC_DELWRI)) 1059 if (flags & SYNC_DELWRI)
1061 fflag = XFS_B_DELWRI; 1060 fflag = XFS_B_DELWRI;
1062 if (flags & SYNC_WAIT) 1061 if (flags & SYNC_WAIT)
1063 fflag = 0; /* synchronous overrides all */ 1062 fflag = 0; /* synchronous overrides all */
@@ -1147,24 +1146,6 @@ xfs_sync_inodes(
1147 } 1146 }
1148 1147
1149 /* 1148 /*
1150 * If this is just vfs_sync() or pflushd() calling
1151 * then we can skip inodes for which it looks like
1152 * there is nothing to do. Since we don't have the
1153 * inode locked this is racy, but these are periodic
1154 * calls so it doesn't matter. For the others we want
1155 * to know for sure, so we at least try to lock them.
1156 */
1157 if (flags & SYNC_BDFLUSH) {
1158 if (((ip->i_itemp == NULL) ||
1159 !(ip->i_itemp->ili_format.ilf_fields &
1160 XFS_ILOG_ALL)) &&
1161 (ip->i_update_core == 0)) {
1162 ip = ip->i_mnext;
1163 continue;
1164 }
1165 }
1166
1167 /*
1168 * Try to lock without sleeping. We're out of order with 1149 * Try to lock without sleeping. We're out of order with
1169 * the inode list lock here, so if we fail we need to drop 1150 * the inode list lock here, so if we fail we need to drop
1170 * the mount lock and try again. If we're called from 1151 * the mount lock and try again. If we're called from
@@ -1181,7 +1162,7 @@ xfs_sync_inodes(
1181 * it. 1162 * it.
1182 */ 1163 */
1183 if (xfs_ilock_nowait(ip, lock_flags) == 0) { 1164 if (xfs_ilock_nowait(ip, lock_flags) == 0) {
1184 if ((flags & SYNC_BDFLUSH) || (vp == NULL)) { 1165 if (vp == NULL) {
1185 ip = ip->i_mnext; 1166 ip = ip->i_mnext;
1186 continue; 1167 continue;
1187 } 1168 }
@@ -1242,160 +1223,27 @@ xfs_sync_inodes(
1242 xfs_ilock(ip, XFS_ILOCK_SHARED); 1223 xfs_ilock(ip, XFS_ILOCK_SHARED);
1243 } 1224 }
1244 1225
1245 if (flags & SYNC_BDFLUSH) { 1226 if ((flags & SYNC_ATTR) &&
1246 if ((flags & SYNC_ATTR) && 1227 (ip->i_update_core ||
1247 ((ip->i_update_core) || 1228 (ip->i_itemp && ip->i_itemp->ili_format.ilf_fields))) {
1248 ((ip->i_itemp != NULL) && 1229 if (mount_locked)
1249 (ip->i_itemp->ili_format.ilf_fields != 0)))) { 1230 IPOINTER_INSERT(ip, mp);
1250
1251 /* Insert marker and drop lock if not already
1252 * done.
1253 */
1254 if (mount_locked) {
1255 IPOINTER_INSERT(ip, mp);
1256 }
1257
1258 /*
1259 * We don't want the periodic flushing of the
1260 * inodes by vfs_sync() to interfere with
1261 * I/O to the file, especially read I/O
1262 * where it is only the access time stamp
1263 * that is being flushed out. To prevent
1264 * long periods where we have both inode
1265 * locks held shared here while reading the
1266 * inode's buffer in from disk, we drop the
1267 * inode lock while reading in the inode
1268 * buffer. We have to release the buffer
1269 * and reacquire the inode lock so that they
1270 * are acquired in the proper order (inode
1271 * locks first). The buffer will go at the
1272 * end of the lru chain, though, so we can
1273 * expect it to still be there when we go
1274 * for it again in xfs_iflush().
1275 */
1276 if ((xfs_ipincount(ip) == 0) &&
1277 xfs_iflock_nowait(ip)) {
1278
1279 xfs_ifunlock(ip);
1280 xfs_iunlock(ip, XFS_ILOCK_SHARED);
1281
1282 error = xfs_itobp(mp, NULL, ip,
1283 &dip, &bp, 0, 0);
1284 if (!error) {
1285 xfs_buf_relse(bp);
1286 } else {
1287 /* Bailing out, remove the
1288 * marker and free it.
1289 */
1290 XFS_MOUNT_ILOCK(mp);
1291 IPOINTER_REMOVE(ip, mp);
1292 XFS_MOUNT_IUNLOCK(mp);
1293
1294 ASSERT(!(lock_flags &
1295 XFS_IOLOCK_SHARED));
1296
1297 kmem_free(ipointer,
1298 sizeof(xfs_iptr_t));
1299 return (0);
1300 }
1301
1302 /*
1303 * Since we dropped the inode lock,
1304 * the inode may have been reclaimed.
1305 * Therefore, we reacquire the mount
1306 * lock and check to see if we were the
1307 * inode reclaimed. If this happened
1308 * then the ipointer marker will no
1309 * longer point back at us. In this
1310 * case, move ip along to the inode
1311 * after the marker, remove the marker
1312 * and continue.
1313 */
1314 XFS_MOUNT_ILOCK(mp);
1315 mount_locked = B_TRUE;
1316
1317 if (ip != ipointer->ip_mprev) {
1318 IPOINTER_REMOVE(ip, mp);
1319
1320 ASSERT(!vnode_refed);
1321 ASSERT(!(lock_flags &
1322 XFS_IOLOCK_SHARED));
1323 continue;
1324 }
1325
1326 ASSERT(ip->i_mount == mp);
1327
1328 if (xfs_ilock_nowait(ip,
1329 XFS_ILOCK_SHARED) == 0) {
1330 ASSERT(ip->i_mount == mp);
1331 /*
1332 * We failed to reacquire
1333 * the inode lock without
1334 * sleeping, so just skip
1335 * the inode for now. We
1336 * clear the ILOCK bit from
1337 * the lock_flags so that we
1338 * won't try to drop a lock
1339 * we don't hold below.
1340 */
1341 lock_flags &= ~XFS_ILOCK_SHARED;
1342 IPOINTER_REMOVE(ip_next, mp);
1343 } else if ((xfs_ipincount(ip) == 0) &&
1344 xfs_iflock_nowait(ip)) {
1345 ASSERT(ip->i_mount == mp);
1346 /*
1347 * Since this is vfs_sync()
1348 * calling we only flush the
1349 * inode out if we can lock
1350 * it without sleeping and
1351 * it is not pinned. Drop
1352 * the mount lock here so
1353 * that we don't hold it for
1354 * too long. We already have
1355 * a marker in the list here.
1356 */
1357 XFS_MOUNT_IUNLOCK(mp);
1358 mount_locked = B_FALSE;
1359 error = xfs_iflush(ip,
1360 XFS_IFLUSH_DELWRI);
1361 } else {
1362 ASSERT(ip->i_mount == mp);
1363 IPOINTER_REMOVE(ip_next, mp);
1364 }
1365 }
1366
1367 }
1368 1231
1369 } else { 1232 if (flags & SYNC_WAIT) {
1370 if ((flags & SYNC_ATTR) && 1233 xfs_iflock(ip);
1371 ((ip->i_update_core) || 1234 error = xfs_iflush(ip, XFS_IFLUSH_SYNC);
1372 ((ip->i_itemp != NULL) &&
1373 (ip->i_itemp->ili_format.ilf_fields != 0)))) {
1374 if (mount_locked) {
1375 IPOINTER_INSERT(ip, mp);
1376 }
1377 1235
1378 if (flags & SYNC_WAIT) { 1236 /*
1379 xfs_iflock(ip); 1237 * If we can't acquire the flush lock, then the inode
1380 error = xfs_iflush(ip, 1238 * is already being flushed so don't bother waiting.
1381 XFS_IFLUSH_SYNC); 1239 *
1382 } else { 1240 * If we can lock it then do a delwri flush so we can
1383 /* 1241 * combine multiple inode flushes in each disk write.
1384 * If we can't acquire the flush 1242 */
1385 * lock, then the inode is already 1243 } else if (xfs_iflock_nowait(ip)) {
1386 * being flushed so don't bother 1244 error = xfs_iflush(ip, XFS_IFLUSH_DELWRI);
1387 * waiting. If we can lock it then 1245 } else if (bypassed) {
1388 * do a delwri flush so we can 1246 (*bypassed)++;
1389 * combine multiple inode flushes
1390 * in each disk write.
1391 */
1392 if (xfs_iflock_nowait(ip)) {
1393 error = xfs_iflush(ip,
1394 XFS_IFLUSH_DELWRI);
1395 }
1396 else if (bypassed)
1397 (*bypassed)++;
1398 }
1399 } 1247 }
1400 } 1248 }
1401 1249