summaryrefslogtreecommitdiffstats
path: root/mm/kasan
diff options
context:
space:
mode:
authorAndrey Ryabinin <aryabinin@virtuozzo.com>2017-07-10 18:50:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-10 19:32:33 -0400
commitc634d807d98e3e7def43e72d28528c84c612ab98 (patch)
treef716e5888283eda8dcfa084b28c4badd29bdfe09 /mm/kasan
parent458f7920f9b1e6c313944d498c440f8599b8a136 (diff)
mm/kasan: get rid of speculative shadow checks
For some unaligned memory accesses we have to check additional byte of the shadow memory. Currently we load that byte speculatively to have only single load + branch on the optimistic fast path. However, this approach has some downsides: - It's unaligned access, so this prevents porting KASAN on architectures which doesn't support unaligned accesses. - We have to map additional shadow page to prevent crash if speculative load happens near the end of the mapped memory. This would significantly complicate upcoming memory hotplug support. I wasn't able to notice any performance degradation with this patch. So these speculative loads is just a pain with no gain, let's remove them. Link: http://lkml.kernel.org/r/20170601162338.23540-1-aryabinin@virtuozzo.com Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Acked-by: Dmitry Vyukov <dvyukov@google.com> Cc: Alexander Potapenko <glider@google.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/kasan')
-rw-r--r--mm/kasan/kasan.c98
1 files changed, 16 insertions, 82 deletions
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index c81549d5c833..212bc62041de 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -134,94 +134,30 @@ static __always_inline bool memory_is_poisoned_1(unsigned long addr)
134 return false; 134 return false;
135} 135}
136 136
137static __always_inline bool memory_is_poisoned_2(unsigned long addr) 137static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
138 unsigned long size)
138{ 139{
139 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); 140 u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
140
141 if (unlikely(*shadow_addr)) {
142 if (memory_is_poisoned_1(addr + 1))
143 return true;
144
145 /*
146 * If single shadow byte covers 2-byte access, we don't
147 * need to do anything more. Otherwise, test the first
148 * shadow byte.
149 */
150 if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
151 return false;
152
153 return unlikely(*(u8 *)shadow_addr);
154 }
155 141
156 return false; 142 /*
157} 143 * Access crosses 8(shadow size)-byte boundary. Such access maps
158 144 * into 2 shadow bytes, so we need to check them both.
159static __always_inline bool memory_is_poisoned_4(unsigned long addr) 145 */
160{ 146 if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
161 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); 147 return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
162
163 if (unlikely(*shadow_addr)) {
164 if (memory_is_poisoned_1(addr + 3))
165 return true;
166
167 /*
168 * If single shadow byte covers 4-byte access, we don't
169 * need to do anything more. Otherwise, test the first
170 * shadow byte.
171 */
172 if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
173 return false;
174
175 return unlikely(*(u8 *)shadow_addr);
176 }
177
178 return false;
179}
180
181static __always_inline bool memory_is_poisoned_8(unsigned long addr)
182{
183 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
184
185 if (unlikely(*shadow_addr)) {
186 if (memory_is_poisoned_1(addr + 7))
187 return true;
188
189 /*
190 * If single shadow byte covers 8-byte access, we don't
191 * need to do anything more. Otherwise, test the first
192 * shadow byte.
193 */
194 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
195 return false;
196
197 return unlikely(*(u8 *)shadow_addr);
198 }
199 148
200 return false; 149 return memory_is_poisoned_1(addr + size - 1);
201} 150}
202 151
203static __always_inline bool memory_is_poisoned_16(unsigned long addr) 152static __always_inline bool memory_is_poisoned_16(unsigned long addr)
204{ 153{
205 u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr); 154 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
206
207 if (unlikely(*shadow_addr)) {
208 u16 shadow_first_bytes = *(u16 *)shadow_addr;
209
210 if (unlikely(shadow_first_bytes))
211 return true;
212
213 /*
214 * If two shadow bytes covers 16-byte access, we don't
215 * need to do anything more. Otherwise, test the last
216 * shadow byte.
217 */
218 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
219 return false;
220 155
221 return memory_is_poisoned_1(addr + 15); 156 /* Unaligned 16-bytes access maps into 3 shadow bytes. */
222 } 157 if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
158 return *shadow_addr || memory_is_poisoned_1(addr + 15);
223 159
224 return false; 160 return *shadow_addr;
225} 161}
226 162
227static __always_inline unsigned long bytes_is_zero(const u8 *start, 163static __always_inline unsigned long bytes_is_zero(const u8 *start,
@@ -292,11 +228,9 @@ static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
292 case 1: 228 case 1:
293 return memory_is_poisoned_1(addr); 229 return memory_is_poisoned_1(addr);
294 case 2: 230 case 2:
295 return memory_is_poisoned_2(addr);
296 case 4: 231 case 4:
297 return memory_is_poisoned_4(addr);
298 case 8: 232 case 8:
299 return memory_is_poisoned_8(addr); 233 return memory_is_poisoned_2_4_8(addr, size);
300 case 16: 234 case 16:
301 return memory_is_poisoned_16(addr); 235 return memory_is_poisoned_16(addr);
302 default: 236 default: