aboutsummaryrefslogtreecommitdiffstats
path: root/fs/namei.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2011-03-11 04:44:53 -0500
committerAl Viro <viro@zeniv.linux.org.uk>2011-03-14 09:15:27 -0400
commit5a18fff2090c3af830d699c8ccb230498a1e37e5 (patch)
tree388675113818a8d14e7cd7dc25185e1be55354f9 /fs/namei.c
parent40b39136f07279fdc868a36cba050f4e84ce0ace (diff)
untangle do_lookup()
That thing has devolved into rats nest of gotos; sane use of unlikely() gets rid of that horror and gives much more readable structure: * make a fast attempt to find a dentry; false negatives are OK. In RCU mode if everything went fine, we are done, otherwise just drop out of RCU. If we'd done (RCU) ->d_revalidate() and it had not refused outright (i.e. didn't give us -ECHILD), remember its result. * now we are not in RCU mode and hopefully have a dentry. If we do not, lock parent, do full d_lookup() and if that has not found anything, allocate and call ->lookup(). If we'd done that ->lookup(), remember that dentry is good and we don't need to revalidate it. * now we have a dentry. If it has ->d_revalidate() and we can't skip it, call it. * hopefully dentry is good; if not, either fail (in case of error) or try to invalidate it. If d_invalidate() has succeeded, drop it and retry everything as if original attempt had not found a dentry. * now we can finish it up - deal with mountpoint crossing and automount. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/namei.c')
-rw-r--r--fs/namei.c141
1 files changed, 56 insertions, 85 deletions
diff --git a/fs/namei.c b/fs/namei.c
index ca9a06a65704..0bebd13e5cb7 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -589,29 +589,6 @@ do_revalidate(struct dentry *dentry, struct nameidata *nd)
589 return dentry; 589 return dentry;
590} 590}
591 591
592static inline struct dentry *
593do_revalidate_rcu(struct dentry *dentry, struct nameidata *nd)
594{
595 int status = d_revalidate(dentry, nd);
596 if (likely(status > 0))
597 return dentry;
598 if (status == -ECHILD) {
599 if (nameidata_dentry_drop_rcu(nd, dentry))
600 return ERR_PTR(-ECHILD);
601 return do_revalidate(dentry, nd);
602 }
603 if (status < 0)
604 return ERR_PTR(status);
605 /* Don't d_invalidate in rcu-walk mode */
606 if (nameidata_dentry_drop_rcu(nd, dentry))
607 return ERR_PTR(-ECHILD);
608 if (!d_invalidate(dentry)) {
609 dput(dentry);
610 dentry = NULL;
611 }
612 return dentry;
613}
614
615/* 592/*
616 * handle_reval_path - force revalidation of a dentry 593 * handle_reval_path - force revalidation of a dentry
617 * 594 *
@@ -1213,7 +1190,8 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
1213{ 1190{
1214 struct vfsmount *mnt = nd->path.mnt; 1191 struct vfsmount *mnt = nd->path.mnt;
1215 struct dentry *dentry, *parent = nd->path.dentry; 1192 struct dentry *dentry, *parent = nd->path.dentry;
1216 struct inode *dir; 1193 int need_reval = 1;
1194 int status = 1;
1217 int err; 1195 int err;
1218 1196
1219 /* 1197 /*
@@ -1223,48 +1201,74 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
1223 */ 1201 */
1224 if (nd->flags & LOOKUP_RCU) { 1202 if (nd->flags & LOOKUP_RCU) {
1225 unsigned seq; 1203 unsigned seq;
1226
1227 *inode = nd->inode; 1204 *inode = nd->inode;
1228 dentry = __d_lookup_rcu(parent, name, &seq, inode); 1205 dentry = __d_lookup_rcu(parent, name, &seq, inode);
1229 if (!dentry) { 1206 if (!dentry)
1230 if (nameidata_drop_rcu(nd)) 1207 goto unlazy;
1231 return -ECHILD; 1208
1232 goto need_lookup;
1233 }
1234 /* Memory barrier in read_seqcount_begin of child is enough */ 1209 /* Memory barrier in read_seqcount_begin of child is enough */
1235 if (__read_seqcount_retry(&parent->d_seq, nd->seq)) 1210 if (__read_seqcount_retry(&parent->d_seq, nd->seq))
1236 return -ECHILD; 1211 return -ECHILD;
1237
1238 nd->seq = seq; 1212 nd->seq = seq;
1213
1239 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) { 1214 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
1240 dentry = do_revalidate_rcu(dentry, nd); 1215 status = d_revalidate(dentry, nd);
1241 if (!dentry) 1216 if (unlikely(status <= 0)) {
1242 goto need_lookup; 1217 if (status != -ECHILD)
1243 if (IS_ERR(dentry)) 1218 need_reval = 0;
1244 goto fail; 1219 goto unlazy;
1245 if (!(nd->flags & LOOKUP_RCU)) 1220 }
1246 goto done;
1247 } 1221 }
1248 path->mnt = mnt; 1222 path->mnt = mnt;
1249 path->dentry = dentry; 1223 path->dentry = dentry;
1250 if (likely(__follow_mount_rcu(nd, path, inode, false))) 1224 if (likely(__follow_mount_rcu(nd, path, inode, false)))
1251 return 0; 1225 return 0;
1252 if (nameidata_drop_rcu(nd)) 1226unlazy:
1253 return -ECHILD; 1227 if (dentry) {
1254 /* fallthru */ 1228 if (nameidata_dentry_drop_rcu(nd, dentry))
1229 return -ECHILD;
1230 } else {
1231 if (nameidata_drop_rcu(nd))
1232 return -ECHILD;
1233 }
1234 } else {
1235 dentry = __d_lookup(parent, name);
1255 } 1236 }
1256 dentry = __d_lookup(parent, name); 1237
1257 if (!dentry) 1238retry:
1258 goto need_lookup; 1239 if (unlikely(!dentry)) {
1259found: 1240 struct inode *dir = parent->d_inode;
1260 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) { 1241 BUG_ON(nd->inode != dir);
1261 dentry = do_revalidate(dentry, nd); 1242
1262 if (!dentry) 1243 mutex_lock(&dir->i_mutex);
1263 goto need_lookup; 1244 dentry = d_lookup(parent, name);
1264 if (IS_ERR(dentry)) 1245 if (likely(!dentry)) {
1265 goto fail; 1246 dentry = d_alloc_and_lookup(parent, name, nd);
1247 if (IS_ERR(dentry)) {
1248 mutex_unlock(&dir->i_mutex);
1249 return PTR_ERR(dentry);
1250 }
1251 /* known good */
1252 need_reval = 0;
1253 status = 1;
1254 }
1255 mutex_unlock(&dir->i_mutex);
1256 }
1257 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval)
1258 status = d_revalidate(dentry, nd);
1259 if (unlikely(status <= 0)) {
1260 if (status < 0) {
1261 dput(dentry);
1262 return status;
1263 }
1264 if (!d_invalidate(dentry)) {
1265 dput(dentry);
1266 dentry = NULL;
1267 need_reval = 1;
1268 goto retry;
1269 }
1266 } 1270 }
1267done: 1271
1268 path->mnt = mnt; 1272 path->mnt = mnt;
1269 path->dentry = dentry; 1273 path->dentry = dentry;
1270 err = follow_managed(path, nd->flags); 1274 err = follow_managed(path, nd->flags);
@@ -1274,39 +1278,6 @@ done:
1274 } 1278 }
1275 *inode = path->dentry->d_inode; 1279 *inode = path->dentry->d_inode;
1276 return 0; 1280 return 0;
1277
1278need_lookup:
1279 dir = parent->d_inode;
1280 BUG_ON(nd->inode != dir);
1281
1282 mutex_lock(&dir->i_mutex);
1283 /*
1284 * First re-do the cached lookup just in case it was created
1285 * while we waited for the directory semaphore, or the first
1286 * lookup failed due to an unrelated rename.
1287 *
1288 * This could use version numbering or similar to avoid unnecessary
1289 * cache lookups, but then we'd have to do the first lookup in the
1290 * non-racy way. However in the common case here, everything should
1291 * be hot in cache, so would it be a big win?
1292 */
1293 dentry = d_lookup(parent, name);
1294 if (likely(!dentry)) {
1295 dentry = d_alloc_and_lookup(parent, name, nd);
1296 mutex_unlock(&dir->i_mutex);
1297 if (IS_ERR(dentry))
1298 goto fail;
1299 goto done;
1300 }
1301 /*
1302 * Uhhuh! Nasty case: the cache was re-populated while
1303 * we waited on the semaphore. Need to revalidate.
1304 */
1305 mutex_unlock(&dir->i_mutex);
1306 goto found;
1307
1308fail:
1309 return PTR_ERR(dentry);
1310} 1281}
1311 1282
1312static inline int may_lookup(struct nameidata *nd) 1283static inline int may_lookup(struct nameidata *nd)