aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2013-12-12 12:40:21 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-12-12 13:39:01 -0500
commita5c21dcefa1c3d759457a604b3cfc4af29c8713f (patch)
tree6832dfcb836f8d5043ba70f17a0ea9c2c428bc4d /fs
parent319720f534d88039615bceb88d4bc094a7cd4ce9 (diff)
dcache: allow word-at-a-time name hashing with big-endian CPUs
When explicitly hashing the end of a string with the word-at-a-time interface, we have to be careful which end of the word we pick up. On big-endian CPUs, the upper-bits will contain the data we're after, so ensure we generate our masks accordingly (and avoid hashing whatever random junk may have been sitting after the string). This patch adds a new dcache helper, bytemask_from_count, which creates a mask appropriate for the CPU endianness. Cc: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/namei.c7
2 files changed, 2 insertions, 7 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 4bdb300b16e2..6055d61811d3 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -192,7 +192,7 @@ static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char
192 if (!tcount) 192 if (!tcount)
193 return 0; 193 return 0;
194 } 194 }
195 mask = ~(~0ul << tcount*8); 195 mask = bytemask_from_count(tcount);
196 return unlikely(!!((a ^ b) & mask)); 196 return unlikely(!!((a ^ b) & mask));
197} 197}
198 198
diff --git a/fs/namei.c b/fs/namei.c
index c53d3a9547f9..3531deebad30 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1598,11 +1598,6 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
1598 * do a "get_unaligned()" if this helps and is sufficiently 1598 * do a "get_unaligned()" if this helps and is sufficiently
1599 * fast. 1599 * fast.
1600 * 1600 *
1601 * - Little-endian machines (so that we can generate the mask
1602 * of low bytes efficiently). Again, we *could* do a byte
1603 * swapping load on big-endian architectures if that is not
1604 * expensive enough to make the optimization worthless.
1605 *
1606 * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we 1601 * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
1607 * do not trap on the (extremely unlikely) case of a page 1602 * do not trap on the (extremely unlikely) case of a page
1608 * crossing operation. 1603 * crossing operation.
@@ -1646,7 +1641,7 @@ unsigned int full_name_hash(const unsigned char *name, unsigned int len)
1646 if (!len) 1641 if (!len)
1647 goto done; 1642 goto done;
1648 } 1643 }
1649 mask = ~(~0ul << len*8); 1644 mask = bytemask_from_count(len);
1650 hash += mask & a; 1645 hash += mask & a;
1651done: 1646done:
1652 return fold_hash(hash); 1647 return fold_hash(hash);