aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dcache.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-03 13:16:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-03 17:01:40 -0400
commite419b4cc585680940bc42f8ca8a071d6023fb1bb (patch)
tree8fce0f12b7b2a0fdca7a937af137910011efa783 /fs/dcache.c
parentac001e76546523ec2ef05b2f7001d8fdc588d069 (diff)
vfs: make word-at-a-time accesses handle a non-existing page
It turns out that there are more cases than CONFIG_DEBUG_PAGEALLOC that can have holes in the kernel address space: it seems to happen easily with Xen, and it looks like the AMD gart64 code will also punch holes dynamically. Actually hitting that case is still very unlikely, so just do the access, and take an exception and fix it up for the very unlikely case of it being a page-crosser with no next page. And hey, this abstraction might even help other architectures that have other issues with unaligned word accesses than the possible missing next page. IOW, this could do the byte order magic too. Peter Anvin fixed a thinko in the shifting for the exception case. Reported-and-tested-by: Jana Saout <jana@saout.de> Cc: Peter Anvin <hpa@zytor.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c26
1 files changed, 22 insertions, 4 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index b60ddc41d783..b80531c91779 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -141,18 +141,29 @@ int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
141 * Compare 2 name strings, return 0 if they match, otherwise non-zero. 141 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
142 * The strings are both count bytes long, and count is non-zero. 142 * The strings are both count bytes long, and count is non-zero.
143 */ 143 */
144#ifdef CONFIG_DCACHE_WORD_ACCESS
145
146#include <asm/word-at-a-time.h>
147/*
148 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
149 * aligned allocation for this particular component. We don't
150 * strictly need the load_unaligned_zeropad() safety, but it
151 * doesn't hurt either.
152 *
153 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
154 * need the careful unaligned handling.
155 */
144static inline int dentry_cmp(const unsigned char *cs, size_t scount, 156static inline int dentry_cmp(const unsigned char *cs, size_t scount,
145 const unsigned char *ct, size_t tcount) 157 const unsigned char *ct, size_t tcount)
146{ 158{
147#ifdef CONFIG_DCACHE_WORD_ACCESS
148 unsigned long a,b,mask; 159 unsigned long a,b,mask;
149 160
150 if (unlikely(scount != tcount)) 161 if (unlikely(scount != tcount))
151 return 1; 162 return 1;
152 163
153 for (;;) { 164 for (;;) {
154 a = *(unsigned long *)cs; 165 a = load_unaligned_zeropad(cs);
155 b = *(unsigned long *)ct; 166 b = load_unaligned_zeropad(ct);
156 if (tcount < sizeof(unsigned long)) 167 if (tcount < sizeof(unsigned long))
157 break; 168 break;
158 if (unlikely(a != b)) 169 if (unlikely(a != b))
@@ -165,7 +176,13 @@ static inline int dentry_cmp(const unsigned char *cs, size_t scount,
165 } 176 }
166 mask = ~(~0ul << tcount*8); 177 mask = ~(~0ul << tcount*8);
167 return unlikely(!!((a ^ b) & mask)); 178 return unlikely(!!((a ^ b) & mask));
179}
180
168#else 181#else
182
183static inline int dentry_cmp(const unsigned char *cs, size_t scount,
184 const unsigned char *ct, size_t tcount)
185{
169 if (scount != tcount) 186 if (scount != tcount)
170 return 1; 187 return 1;
171 188
@@ -177,9 +194,10 @@ static inline int dentry_cmp(const unsigned char *cs, size_t scount,
177 tcount--; 194 tcount--;
178 } while (tcount); 195 } while (tcount);
179 return 0; 196 return 0;
180#endif
181} 197}
182 198
199#endif
200
183static void __d_free(struct rcu_head *head) 201static void __d_free(struct rcu_head *head)
184{ 202{
185 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); 203 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);