aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/x86/x86_64/mm.txt2
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/boot/Makefile2
-rw-r--r--arch/x86/boot/compressed/Makefile2
-rw-r--r--arch/x86/include/asm/kasan.h31
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/dumpstack.c5
-rw-r--r--arch/x86/kernel/head64.c9
-rw-r--r--arch/x86/kernel/head_64.S30
-rw-r--r--arch/x86/kernel/setup.c3
-rw-r--r--arch/x86/mm/Makefile3
-rw-r--r--arch/x86/mm/kasan_init_64.c199
-rw-r--r--arch/x86/realmode/Makefile2
-rw-r--r--arch/x86/realmode/rm/Makefile1
-rw-r--r--arch/x86/vdso/Makefile1
-rw-r--r--lib/Kconfig.kasan1
16 files changed, 290 insertions, 4 deletions
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index 052ee643a32e..05712ac83e38 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -12,6 +12,8 @@ ffffc90000000000 - ffffe8ffffffffff (=45 bits) vmalloc/ioremap space
12ffffe90000000000 - ffffe9ffffffffff (=40 bits) hole 12ffffe90000000000 - ffffe9ffffffffff (=40 bits) hole
13ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB) 13ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
14... unused hole ... 14... unused hole ...
15ffffec0000000000 - fffffc0000000000 (=44 bits) kasan shadow memory (16TB)
16... unused hole ...
15ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks 17ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
16... unused hole ... 18... unused hole ...
17ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0 19ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 33ce9a344e38..eb1cf898ed3c 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -85,6 +85,7 @@ config X86
85 select HAVE_CMPXCHG_LOCAL 85 select HAVE_CMPXCHG_LOCAL
86 select HAVE_CMPXCHG_DOUBLE 86 select HAVE_CMPXCHG_DOUBLE
87 select HAVE_ARCH_KMEMCHECK 87 select HAVE_ARCH_KMEMCHECK
88 select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
88 select HAVE_USER_RETURN_NOTIFIER 89 select HAVE_USER_RETURN_NOTIFIER
89 select ARCH_BINFMT_ELF_RANDOMIZE_PIE 90 select ARCH_BINFMT_ELF_RANDOMIZE_PIE
90 select HAVE_ARCH_JUMP_LABEL 91 select HAVE_ARCH_JUMP_LABEL
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 3db07f30636f..57bbf2fb21f6 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -14,6 +14,8 @@
14# Set it to -DSVGA_MODE=NORMAL_VGA if you just want the EGA/VGA mode. 14# Set it to -DSVGA_MODE=NORMAL_VGA if you just want the EGA/VGA mode.
15# The number is the same as you would ordinarily press at bootup. 15# The number is the same as you would ordinarily press at bootup.
16 16
17KASAN_SANITIZE := n
18
17SVGA_MODE := -DSVGA_MODE=NORMAL_VGA 19SVGA_MODE := -DSVGA_MODE=NORMAL_VGA
18 20
19targets := vmlinux.bin setup.bin setup.elf bzImage 21targets := vmlinux.bin setup.bin setup.elf bzImage
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index ad754b4411f7..843feb3eb20b 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -16,6 +16,8 @@
16# (see scripts/Makefile.lib size_append) 16# (see scripts/Makefile.lib size_append)
17# compressed vmlinux.bin.all + u32 size of vmlinux.bin.all 17# compressed vmlinux.bin.all + u32 size of vmlinux.bin.all
18 18
19KASAN_SANITIZE := n
20
19targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \ 21targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
20 vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 22 vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4
21 23
diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h
new file mode 100644
index 000000000000..8b22422fbad8
--- /dev/null
+++ b/arch/x86/include/asm/kasan.h
@@ -0,0 +1,31 @@
1#ifndef _ASM_X86_KASAN_H
2#define _ASM_X86_KASAN_H
3
4/*
5 * Compiler uses shadow offset assuming that addresses start
6 * from 0. Kernel addresses don't start from 0, so shadow
7 * for kernel really starts from compiler's shadow offset +
8 * 'kernel address space start' >> KASAN_SHADOW_SCALE_SHIFT
9 */
10#define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \
11 (0xffff800000000000ULL >> 3))
12/* 47 bits for kernel address -> (47 - 3) bits for shadow */
13#define KASAN_SHADOW_END (KASAN_SHADOW_START + (1ULL << (47 - 3)))
14
15#ifndef __ASSEMBLY__
16
17extern pte_t kasan_zero_pte[];
18extern pte_t kasan_zero_pmd[];
19extern pte_t kasan_zero_pud[];
20
21#ifdef CONFIG_KASAN
22void __init kasan_map_early_shadow(pgd_t *pgd);
23void __init kasan_init(void);
24#else
25static inline void kasan_map_early_shadow(pgd_t *pgd) { }
26static inline void kasan_init(void) { }
27#endif
28
29#endif
30
31#endif
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 732223496968..b13b70634124 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -16,6 +16,8 @@ CFLAGS_REMOVE_ftrace.o = -pg
16CFLAGS_REMOVE_early_printk.o = -pg 16CFLAGS_REMOVE_early_printk.o = -pg
17endif 17endif
18 18
19KASAN_SANITIZE_head$(BITS).o := n
20
19CFLAGS_irq.o := -I$(src)/../include/asm/trace 21CFLAGS_irq.o := -I$(src)/../include/asm/trace
20 22
21obj-y := process_$(BITS).o signal.o entry_$(BITS).o 23obj-y := process_$(BITS).o signal.o entry_$(BITS).o
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index b74ebc7c4402..cf3df1d8d039 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -265,7 +265,10 @@ int __die(const char *str, struct pt_regs *regs, long err)
265 printk("SMP "); 265 printk("SMP ");
266#endif 266#endif
267#ifdef CONFIG_DEBUG_PAGEALLOC 267#ifdef CONFIG_DEBUG_PAGEALLOC
268 printk("DEBUG_PAGEALLOC"); 268 printk("DEBUG_PAGEALLOC ");
269#endif
270#ifdef CONFIG_KASAN
271 printk("KASAN");
269#endif 272#endif
270 printk("\n"); 273 printk("\n");
271 if (notify_die(DIE_OOPS, str, regs, err, 274 if (notify_die(DIE_OOPS, str, regs, err,
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index eda1a865641e..efcddfaf05f9 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -27,6 +27,7 @@
27#include <asm/bios_ebda.h> 27#include <asm/bios_ebda.h>
28#include <asm/bootparam_utils.h> 28#include <asm/bootparam_utils.h>
29#include <asm/microcode.h> 29#include <asm/microcode.h>
30#include <asm/kasan.h>
30 31
31/* 32/*
32 * Manage page tables very early on. 33 * Manage page tables very early on.
@@ -46,7 +47,7 @@ static void __init reset_early_page_tables(void)
46 47
47 next_early_pgt = 0; 48 next_early_pgt = 0;
48 49
49 write_cr3(__pa(early_level4_pgt)); 50 write_cr3(__pa_nodebug(early_level4_pgt));
50} 51}
51 52
52/* Create a new PMD entry */ 53/* Create a new PMD entry */
@@ -59,7 +60,7 @@ int __init early_make_pgtable(unsigned long address)
59 pmdval_t pmd, *pmd_p; 60 pmdval_t pmd, *pmd_p;
60 61
61 /* Invalid address or early pgt is done ? */ 62 /* Invalid address or early pgt is done ? */
62 if (physaddr >= MAXMEM || read_cr3() != __pa(early_level4_pgt)) 63 if (physaddr >= MAXMEM || read_cr3() != __pa_nodebug(early_level4_pgt))
63 return -1; 64 return -1;
64 65
65again: 66again:
@@ -158,6 +159,8 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
158 /* Kill off the identity-map trampoline */ 159 /* Kill off the identity-map trampoline */
159 reset_early_page_tables(); 160 reset_early_page_tables();
160 161
162 kasan_map_early_shadow(early_level4_pgt);
163
161 /* clear bss before set_intr_gate with early_idt_handler */ 164 /* clear bss before set_intr_gate with early_idt_handler */
162 clear_bss(); 165 clear_bss();
163 166
@@ -179,6 +182,8 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
179 /* set init_level4_pgt kernel high mapping*/ 182 /* set init_level4_pgt kernel high mapping*/
180 init_level4_pgt[511] = early_level4_pgt[511]; 183 init_level4_pgt[511] = early_level4_pgt[511];
181 184
185 kasan_map_early_shadow(init_level4_pgt);
186
182 x86_64_start_reservations(real_mode_data); 187 x86_64_start_reservations(real_mode_data);
183} 188}
184 189
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index a468c0a65c42..6fd514d9f69a 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -514,8 +514,38 @@ ENTRY(phys_base)
514 /* This must match the first entry in level2_kernel_pgt */ 514 /* This must match the first entry in level2_kernel_pgt */
515 .quad 0x0000000000000000 515 .quad 0x0000000000000000
516 516
517#ifdef CONFIG_KASAN
518#define FILL(VAL, COUNT) \
519 .rept (COUNT) ; \
520 .quad (VAL) ; \
521 .endr
522
523NEXT_PAGE(kasan_zero_pte)
524 FILL(kasan_zero_page - __START_KERNEL_map + _KERNPG_TABLE, 512)
525NEXT_PAGE(kasan_zero_pmd)
526 FILL(kasan_zero_pte - __START_KERNEL_map + _KERNPG_TABLE, 512)
527NEXT_PAGE(kasan_zero_pud)
528 FILL(kasan_zero_pmd - __START_KERNEL_map + _KERNPG_TABLE, 512)
529
530#undef FILL
531#endif
532
533
517#include "../../x86/xen/xen-head.S" 534#include "../../x86/xen/xen-head.S"
518 535
519 __PAGE_ALIGNED_BSS 536 __PAGE_ALIGNED_BSS
520NEXT_PAGE(empty_zero_page) 537NEXT_PAGE(empty_zero_page)
521 .skip PAGE_SIZE 538 .skip PAGE_SIZE
539
540#ifdef CONFIG_KASAN
541/*
542 * This page used as early shadow. We don't use empty_zero_page
543 * at early stages, stack instrumentation could write some garbage
544 * to this page.
545 * Latter we reuse it as zero shadow for large ranges of memory
546 * that allowed to access, but not instrumented by kasan
547 * (vmalloc/vmemmap ...).
548 */
549NEXT_PAGE(kasan_zero_page)
550 .skip PAGE_SIZE
551#endif
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index c4648adadd7d..27d200929864 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -89,6 +89,7 @@
89#include <asm/cacheflush.h> 89#include <asm/cacheflush.h>
90#include <asm/processor.h> 90#include <asm/processor.h>
91#include <asm/bugs.h> 91#include <asm/bugs.h>
92#include <asm/kasan.h>
92 93
93#include <asm/vsyscall.h> 94#include <asm/vsyscall.h>
94#include <asm/cpu.h> 95#include <asm/cpu.h>
@@ -1174,6 +1175,8 @@ void __init setup_arch(char **cmdline_p)
1174 1175
1175 x86_init.paging.pagetable_init(); 1176 x86_init.paging.pagetable_init();
1176 1177
1178 kasan_init();
1179
1177 if (boot_cpu_data.cpuid_level >= 0) { 1180 if (boot_cpu_data.cpuid_level >= 0) {
1178 /* A CPU has %cr4 if and only if it has CPUID */ 1181 /* A CPU has %cr4 if and only if it has CPUID */
1179 mmu_cr4_features = read_cr4(); 1182 mmu_cr4_features = read_cr4();
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index ecfdc46a024a..c4cc74006c61 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -20,6 +20,9 @@ obj-$(CONFIG_HIGHMEM) += highmem_32.o
20 20
21obj-$(CONFIG_KMEMCHECK) += kmemcheck/ 21obj-$(CONFIG_KMEMCHECK) += kmemcheck/
22 22
23KASAN_SANITIZE_kasan_init_$(BITS).o := n
24obj-$(CONFIG_KASAN) += kasan_init_$(BITS).o
25
23obj-$(CONFIG_MMIOTRACE) += mmiotrace.o 26obj-$(CONFIG_MMIOTRACE) += mmiotrace.o
24mmiotrace-y := kmmio.o pf_in.o mmio-mod.o 27mmiotrace-y := kmmio.o pf_in.o mmio-mod.o
25obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o 28obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
new file mode 100644
index 000000000000..3e4d9a1a39fa
--- /dev/null
+++ b/arch/x86/mm/kasan_init_64.c
@@ -0,0 +1,199 @@
1#include <linux/bootmem.h>
2#include <linux/kasan.h>
3#include <linux/kdebug.h>
4#include <linux/mm.h>
5#include <linux/sched.h>
6#include <linux/vmalloc.h>
7
8#include <asm/tlbflush.h>
9#include <asm/sections.h>
10
11extern pgd_t early_level4_pgt[PTRS_PER_PGD];
12extern struct range pfn_mapped[E820_X_MAX];
13
14extern unsigned char kasan_zero_page[PAGE_SIZE];
15
16static int __init map_range(struct range *range)
17{
18 unsigned long start;
19 unsigned long end;
20
21 start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
22 end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
23
24 /*
25 * end + 1 here is intentional. We check several shadow bytes in advance
26 * to slightly speed up fastpath. In some rare cases we could cross
27 * boundary of mapped shadow, so we just map some more here.
28 */
29 return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
30}
31
32static void __init clear_pgds(unsigned long start,
33 unsigned long end)
34{
35 for (; start < end; start += PGDIR_SIZE)
36 pgd_clear(pgd_offset_k(start));
37}
38
39void __init kasan_map_early_shadow(pgd_t *pgd)
40{
41 int i;
42 unsigned long start = KASAN_SHADOW_START;
43 unsigned long end = KASAN_SHADOW_END;
44
45 for (i = pgd_index(start); start < end; i++) {
46 pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud)
47 | _KERNPG_TABLE);
48 start += PGDIR_SIZE;
49 }
50}
51
52static int __init zero_pte_populate(pmd_t *pmd, unsigned long addr,
53 unsigned long end)
54{
55 pte_t *pte = pte_offset_kernel(pmd, addr);
56
57 while (addr + PAGE_SIZE <= end) {
58 WARN_ON(!pte_none(*pte));
59 set_pte(pte, __pte(__pa_nodebug(kasan_zero_page)
60 | __PAGE_KERNEL_RO));
61 addr += PAGE_SIZE;
62 pte = pte_offset_kernel(pmd, addr);
63 }
64 return 0;
65}
66
67static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
68 unsigned long end)
69{
70 int ret = 0;
71 pmd_t *pmd = pmd_offset(pud, addr);
72
73 while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
74 WARN_ON(!pmd_none(*pmd));
75 set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
76 | __PAGE_KERNEL_RO));
77 addr += PMD_SIZE;
78 pmd = pmd_offset(pud, addr);
79 }
80 if (addr < end) {
81 if (pmd_none(*pmd)) {
82 void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
83 if (!p)
84 return -ENOMEM;
85 set_pmd(pmd, __pmd(__pa_nodebug(p) | _KERNPG_TABLE));
86 }
87 ret = zero_pte_populate(pmd, addr, end);
88 }
89 return ret;
90}
91
92
93static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
94 unsigned long end)
95{
96 int ret = 0;
97 pud_t *pud = pud_offset(pgd, addr);
98
99 while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
100 WARN_ON(!pud_none(*pud));
101 set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
102 | __PAGE_KERNEL_RO));
103 addr += PUD_SIZE;
104 pud = pud_offset(pgd, addr);
105 }
106
107 if (addr < end) {
108 if (pud_none(*pud)) {
109 void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
110 if (!p)
111 return -ENOMEM;
112 set_pud(pud, __pud(__pa_nodebug(p) | _KERNPG_TABLE));
113 }
114 ret = zero_pmd_populate(pud, addr, end);
115 }
116 return ret;
117}
118
119static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
120{
121 int ret = 0;
122 pgd_t *pgd = pgd_offset_k(addr);
123
124 while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
125 WARN_ON(!pgd_none(*pgd));
126 set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
127 | __PAGE_KERNEL_RO));
128 addr += PGDIR_SIZE;
129 pgd = pgd_offset_k(addr);
130 }
131
132 if (addr < end) {
133 if (pgd_none(*pgd)) {
134 void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
135 if (!p)
136 return -ENOMEM;
137 set_pgd(pgd, __pgd(__pa_nodebug(p) | _KERNPG_TABLE));
138 }
139 ret = zero_pud_populate(pgd, addr, end);
140 }
141 return ret;
142}
143
144
145static void __init populate_zero_shadow(const void *start, const void *end)
146{
147 if (zero_pgd_populate((unsigned long)start, (unsigned long)end))
148 panic("kasan: unable to map zero shadow!");
149}
150
151
152#ifdef CONFIG_KASAN_INLINE
153static int kasan_die_handler(struct notifier_block *self,
154 unsigned long val,
155 void *data)
156{
157 if (val == DIE_GPF) {
158 pr_emerg("CONFIG_KASAN_INLINE enabled");
159 pr_emerg("GPF could be caused by NULL-ptr deref or user memory access");
160 }
161 return NOTIFY_OK;
162}
163
164static struct notifier_block kasan_die_notifier = {
165 .notifier_call = kasan_die_handler,
166};
167#endif
168
169void __init kasan_init(void)
170{
171 int i;
172
173#ifdef CONFIG_KASAN_INLINE
174 register_die_notifier(&kasan_die_notifier);
175#endif
176
177 memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
178 load_cr3(early_level4_pgt);
179
180 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
181
182 populate_zero_shadow((void *)KASAN_SHADOW_START,
183 kasan_mem_to_shadow((void *)PAGE_OFFSET));
184
185 for (i = 0; i < E820_X_MAX; i++) {
186 if (pfn_mapped[i].end == 0)
187 break;
188
189 if (map_range(&pfn_mapped[i]))
190 panic("kasan: unable to allocate shadow!");
191 }
192
193 populate_zero_shadow(kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
194 (void *)KASAN_SHADOW_END);
195
196 memset(kasan_zero_page, 0, PAGE_SIZE);
197
198 load_cr3(init_level4_pgt);
199}
diff --git a/arch/x86/realmode/Makefile b/arch/x86/realmode/Makefile
index 94f7fbe97b08..e02c2c6c56a5 100644
--- a/arch/x86/realmode/Makefile
+++ b/arch/x86/realmode/Makefile
@@ -6,7 +6,7 @@
6# for more details. 6# for more details.
7# 7#
8# 8#
9 9KASAN_SANITIZE := n
10subdir- := rm 10subdir- := rm
11 11
12obj-y += init.o 12obj-y += init.o
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index 7c0d7be176a5..2730d775ef9a 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -6,6 +6,7 @@
6# for more details. 6# for more details.
7# 7#
8# 8#
9KASAN_SANITIZE := n
9 10
10always := realmode.bin realmode.relocs 11always := realmode.bin realmode.relocs
11 12
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index 09297c8e1fcd..7b9be9822724 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -3,6 +3,7 @@
3# 3#
4 4
5KBUILD_CFLAGS += $(DISABLE_LTO) 5KBUILD_CFLAGS += $(DISABLE_LTO)
6KASAN_SANITIZE := n
6 7
7VDSO64-$(CONFIG_X86_64) := y 8VDSO64-$(CONFIG_X86_64) := y
8VDSOX32-$(CONFIG_X86_X32_ABI) := y 9VDSOX32-$(CONFIG_X86_X32_ABI) := y
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index e5b3fbe5560f..0052b1b9aadd 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -15,6 +15,7 @@ config KASAN
15 15
16config KASAN_SHADOW_OFFSET 16config KASAN_SHADOW_OFFSET
17 hex 17 hex
18 default 0xdffffc0000000000 if X86_64
18 19
19choice 20choice
20 prompt "Instrumentation type" 21 prompt "Instrumentation type"