aboutsummaryrefslogtreecommitdiffstats
path: root/fs/namei.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/namei.c')
-rw-r--r--fs/namei.c7
1 files changed, 1 insertions, 6 deletions
diff --git a/fs/namei.c b/fs/namei.c
index c53d3a9547f9..3531deebad30 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1598,11 +1598,6 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
1598 * do a "get_unaligned()" if this helps and is sufficiently 1598 * do a "get_unaligned()" if this helps and is sufficiently
1599 * fast. 1599 * fast.
1600 * 1600 *
1601 * - Little-endian machines (so that we can generate the mask
1602 * of low bytes efficiently). Again, we *could* do a byte
1603 * swapping load on big-endian architectures if that is not
1604 * expensive enough to make the optimization worthless.
1605 *
1606 * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we 1601 * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
1607 * do not trap on the (extremely unlikely) case of a page 1602 * do not trap on the (extremely unlikely) case of a page
1608 * crossing operation. 1603 * crossing operation.
@@ -1646,7 +1641,7 @@ unsigned int full_name_hash(const unsigned char *name, unsigned int len)
1646 if (!len) 1641 if (!len)
1647 goto done; 1642 goto done;
1648 } 1643 }
1649 mask = ~(~0ul << len*8); 1644 mask = bytemask_from_count(len);
1650 hash += mask & a; 1645 hash += mask & a;
1651done: 1646done:
1652 return fold_hash(hash); 1647 return fold_hash(hash);