aboutsummaryrefslogtreecommitdiffstats
path: root/fs/namei.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/namei.c')
-rw-r--r--fs/namei.c10
1 files changed, 2 insertions, 8 deletions
diff --git a/fs/namei.c b/fs/namei.c
index 8f77a8cea289..3531deebad30 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -513,8 +513,7 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
513 513
514 if (!lockref_get_not_dead(&parent->d_lockref)) { 514 if (!lockref_get_not_dead(&parent->d_lockref)) {
515 nd->path.dentry = NULL; 515 nd->path.dentry = NULL;
516 rcu_read_unlock(); 516 goto out;
517 return -ECHILD;
518 } 517 }
519 518
520 /* 519 /*
@@ -1599,11 +1598,6 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
1599 * do a "get_unaligned()" if this helps and is sufficiently 1598 * do a "get_unaligned()" if this helps and is sufficiently
1600 * fast. 1599 * fast.
1601 * 1600 *
1602 * - Little-endian machines (so that we can generate the mask
1603 * of low bytes efficiently). Again, we *could* do a byte
1604 * swapping load on big-endian architectures if that is not
1605 * expensive enough to make the optimization worthless.
1606 *
1607 * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we 1601 * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
1608 * do not trap on the (extremely unlikely) case of a page 1602 * do not trap on the (extremely unlikely) case of a page
1609 * crossing operation. 1603 * crossing operation.
@@ -1647,7 +1641,7 @@ unsigned int full_name_hash(const unsigned char *name, unsigned int len)
1647 if (!len) 1641 if (!len)
1648 goto done; 1642 goto done;
1649 } 1643 }
1650 mask = ~(~0ul << len*8); 1644 mask = bytemask_from_count(len);
1651 hash += mask & a; 1645 hash += mask & a;
1652done: 1646done:
1653 return fold_hash(hash); 1647 return fold_hash(hash);