diff options
-rw-r--r-- | Documentation/kernel-parameters.txt | 4 | ||||
-rw-r--r-- | arch/x86/Kconfig | 59 | ||||
-rw-r--r-- | arch/x86/boot/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/boot/boot.h | 10 | ||||
-rw-r--r-- | arch/x86/boot/compressed/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/boot/compressed/aslr.c | 316 | ||||
-rw-r--r-- | arch/x86/boot/compressed/cmdline.c | 2 | ||||
-rw-r--r-- | arch/x86/boot/compressed/cpuflags.c | 12 | ||||
-rw-r--r-- | arch/x86/boot/compressed/head_32.S | 10 | ||||
-rw-r--r-- | arch/x86/boot/compressed/head_64.S | 16 | ||||
-rw-r--r-- | arch/x86/boot/compressed/misc.c | 18 | ||||
-rw-r--r-- | arch/x86/boot/compressed/misc.h | 37 | ||||
-rw-r--r-- | arch/x86/boot/cpucheck.c | 100 | ||||
-rw-r--r-- | arch/x86/boot/cpuflags.c | 104 | ||||
-rw-r--r-- | arch/x86/boot/cpuflags.h | 19 | ||||
-rw-r--r-- | arch/x86/include/asm/archrandom.h | 21 | ||||
-rw-r--r-- | arch/x86/include/asm/page_64_types.h | 15 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable_64_types.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/rdrand.c | 14 | ||||
-rw-r--r-- | arch/x86/kernel/setup.c | 26 | ||||
-rw-r--r-- | arch/x86/mm/init_32.c | 3 | ||||
-rw-r--r-- | arch/x86/tools/relocs.c | 20 |
22 files changed, 654 insertions, 158 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 092cfd139065..4252af6ffda1 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -2017,6 +2017,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
2017 | noapic [SMP,APIC] Tells the kernel to not make use of any | 2017 | noapic [SMP,APIC] Tells the kernel to not make use of any |
2018 | IOAPICs that may be present in the system. | 2018 | IOAPICs that may be present in the system. |
2019 | 2019 | ||
2020 | nokaslr [X86] | ||
2021 | Disable kernel base offset ASLR (Address Space | ||
2022 | Layout Randomization) if built into the kernel. | ||
2023 | |||
2020 | noautogroup Disable scheduler automatic task group creation. | 2024 | noautogroup Disable scheduler automatic task group creation. |
2021 | 2025 | ||
2022 | nobats [PPC] Do not use BATs for mapping kernel lowmem | 2026 | nobats [PPC] Do not use BATs for mapping kernel lowmem |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5216e283820d..cd18b8393400 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -1693,16 +1693,67 @@ config RELOCATABLE | |||
1693 | 1693 | ||
1694 | Note: If CONFIG_RELOCATABLE=y, then the kernel runs from the address | 1694 | Note: If CONFIG_RELOCATABLE=y, then the kernel runs from the address |
1695 | it has been loaded at and the compile time physical address | 1695 | it has been loaded at and the compile time physical address |
1696 | (CONFIG_PHYSICAL_START) is ignored. | 1696 | (CONFIG_PHYSICAL_START) is used as the minimum location. |
1697 | 1697 | ||
1698 | # Relocation on x86-32 needs some additional build support | 1698 | config RANDOMIZE_BASE |
1699 | bool "Randomize the address of the kernel image" | ||
1700 | depends on RELOCATABLE | ||
1701 | depends on !HIBERNATION | ||
1702 | default n | ||
1703 | ---help--- | ||
1704 | Randomizes the physical and virtual address at which the | ||
1705 | kernel image is decompressed, as a security feature that | ||
1706 | deters exploit attempts relying on knowledge of the location | ||
1707 | of kernel internals. | ||
1708 | |||
1709 | Entropy is generated using the RDRAND instruction if it is | ||
1710 | supported. If RDTSC is supported, it is used as well. If | ||
1711 | neither RDRAND nor RDTSC are supported, then randomness is | ||
1712 | read from the i8254 timer. | ||
1713 | |||
1714 | The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET, | ||
1715 | and aligned according to PHYSICAL_ALIGN. Since the kernel is | ||
1716 | built using 2GiB addressing, and PHYSICAL_ALGIN must be at a | ||
1717 | minimum of 2MiB, only 10 bits of entropy is theoretically | ||
1718 | possible. At best, due to page table layouts, 64-bit can use | ||
1719 | 9 bits of entropy and 32-bit uses 8 bits. | ||
1720 | |||
1721 | If unsure, say N. | ||
1722 | |||
1723 | config RANDOMIZE_BASE_MAX_OFFSET | ||
1724 | hex "Maximum kASLR offset allowed" if EXPERT | ||
1725 | depends on RANDOMIZE_BASE | ||
1726 | range 0x0 0x20000000 if X86_32 | ||
1727 | default "0x20000000" if X86_32 | ||
1728 | range 0x0 0x40000000 if X86_64 | ||
1729 | default "0x40000000" if X86_64 | ||
1730 | ---help--- | ||
1731 | The lesser of RANDOMIZE_BASE_MAX_OFFSET and available physical | ||
1732 | memory is used to determine the maximal offset in bytes that will | ||
1733 | be applied to the kernel when kernel Address Space Layout | ||
1734 | Randomization (kASLR) is active. This must be a multiple of | ||
1735 | PHYSICAL_ALIGN. | ||
1736 | |||
1737 | On 32-bit this is limited to 512MiB by page table layouts. The | ||
1738 | default is 512MiB. | ||
1739 | |||
1740 | On 64-bit this is limited by how the kernel fixmap page table is | ||
1741 | positioned, so this cannot be larger than 1GiB currently. Without | ||
1742 | RANDOMIZE_BASE, there is a 512MiB to 1.5GiB split between kernel | ||
1743 | and modules. When RANDOMIZE_BASE_MAX_OFFSET is above 512MiB, the | ||
1744 | modules area will shrink to compensate, up to the current maximum | ||
1745 | 1GiB to 1GiB split. The default is 1GiB. | ||
1746 | |||
1747 | If unsure, leave at the default value. | ||
1748 | |||
1749 | # Relocation on x86 needs some additional build support | ||
1699 | config X86_NEED_RELOCS | 1750 | config X86_NEED_RELOCS |
1700 | def_bool y | 1751 | def_bool y |
1701 | depends on X86_32 && RELOCATABLE | 1752 | depends on RANDOMIZE_BASE || (X86_32 && RELOCATABLE) |
1702 | 1753 | ||
1703 | config PHYSICAL_ALIGN | 1754 | config PHYSICAL_ALIGN |
1704 | hex "Alignment value to which kernel should be aligned" | 1755 | hex "Alignment value to which kernel should be aligned" |
1705 | default "0x1000000" | 1756 | default "0x200000" |
1706 | range 0x2000 0x1000000 if X86_32 | 1757 | range 0x2000 0x1000000 if X86_32 |
1707 | range 0x200000 0x1000000 if X86_64 | 1758 | range 0x200000 0x1000000 if X86_64 |
1708 | ---help--- | 1759 | ---help--- |
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index d9c11956fce0..de7066918005 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile | |||
@@ -20,7 +20,7 @@ targets := vmlinux.bin setup.bin setup.elf bzImage | |||
20 | targets += fdimage fdimage144 fdimage288 image.iso mtools.conf | 20 | targets += fdimage fdimage144 fdimage288 image.iso mtools.conf |
21 | subdir- := compressed | 21 | subdir- := compressed |
22 | 22 | ||
23 | setup-y += a20.o bioscall.o cmdline.o copy.o cpu.o cpucheck.o | 23 | setup-y += a20.o bioscall.o cmdline.o copy.o cpu.o cpuflags.o cpucheck.o |
24 | setup-y += early_serial_console.o edd.o header.o main.o mca.o memory.o | 24 | setup-y += early_serial_console.o edd.o header.o main.o mca.o memory.o |
25 | setup-y += pm.o pmjump.o printf.o regs.o string.o tty.o video.o | 25 | setup-y += pm.o pmjump.o printf.o regs.o string.o tty.o video.o |
26 | setup-y += video-mode.o version.o | 26 | setup-y += video-mode.o version.o |
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index ef72baeff484..50f8c5e0f37e 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h | |||
@@ -26,9 +26,8 @@ | |||
26 | #include <asm/boot.h> | 26 | #include <asm/boot.h> |
27 | #include <asm/setup.h> | 27 | #include <asm/setup.h> |
28 | #include "bitops.h" | 28 | #include "bitops.h" |
29 | #include <asm/cpufeature.h> | ||
30 | #include <asm/processor-flags.h> | ||
31 | #include "ctype.h" | 29 | #include "ctype.h" |
30 | #include "cpuflags.h" | ||
32 | 31 | ||
33 | /* Useful macros */ | 32 | /* Useful macros */ |
34 | #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) | 33 | #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) |
@@ -307,14 +306,7 @@ static inline int cmdline_find_option_bool(const char *option) | |||
307 | return __cmdline_find_option_bool(cmd_line_ptr, option); | 306 | return __cmdline_find_option_bool(cmd_line_ptr, option); |
308 | } | 307 | } |
309 | 308 | ||
310 | |||
311 | /* cpu.c, cpucheck.c */ | 309 | /* cpu.c, cpucheck.c */ |
312 | struct cpu_features { | ||
313 | int level; /* Family, or 64 for x86-64 */ | ||
314 | int model; | ||
315 | u32 flags[NCAPINTS]; | ||
316 | }; | ||
317 | extern struct cpu_features cpu; | ||
318 | int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr); | 310 | int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr); |
319 | int validate_cpu(void); | 311 | int validate_cpu(void); |
320 | 312 | ||
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index c8a6792e7842..0fcd9133790c 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile | |||
@@ -28,7 +28,7 @@ HOST_EXTRACFLAGS += -I$(srctree)/tools/include | |||
28 | 28 | ||
29 | VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \ | 29 | VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \ |
30 | $(obj)/string.o $(obj)/cmdline.o $(obj)/early_serial_console.o \ | 30 | $(obj)/string.o $(obj)/cmdline.o $(obj)/early_serial_console.o \ |
31 | $(obj)/piggy.o | 31 | $(obj)/piggy.o $(obj)/cpuflags.o $(obj)/aslr.o |
32 | 32 | ||
33 | $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone | 33 | $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone |
34 | 34 | ||
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c new file mode 100644 index 000000000000..90a21f430117 --- /dev/null +++ b/arch/x86/boot/compressed/aslr.c | |||
@@ -0,0 +1,316 @@ | |||
1 | #include "misc.h" | ||
2 | |||
3 | #ifdef CONFIG_RANDOMIZE_BASE | ||
4 | #include <asm/msr.h> | ||
5 | #include <asm/archrandom.h> | ||
6 | #include <asm/e820.h> | ||
7 | |||
8 | #include <generated/compile.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/uts.h> | ||
11 | #include <linux/utsname.h> | ||
12 | #include <generated/utsrelease.h> | ||
13 | |||
14 | /* Simplified build-specific string for starting entropy. */ | ||
15 | static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" | ||
16 | LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; | ||
17 | |||
18 | #define I8254_PORT_CONTROL 0x43 | ||
19 | #define I8254_PORT_COUNTER0 0x40 | ||
20 | #define I8254_CMD_READBACK 0xC0 | ||
21 | #define I8254_SELECT_COUNTER0 0x02 | ||
22 | #define I8254_STATUS_NOTREADY 0x40 | ||
23 | static inline u16 i8254(void) | ||
24 | { | ||
25 | u16 status, timer; | ||
26 | |||
27 | do { | ||
28 | outb(I8254_PORT_CONTROL, | ||
29 | I8254_CMD_READBACK | I8254_SELECT_COUNTER0); | ||
30 | status = inb(I8254_PORT_COUNTER0); | ||
31 | timer = inb(I8254_PORT_COUNTER0); | ||
32 | timer |= inb(I8254_PORT_COUNTER0) << 8; | ||
33 | } while (status & I8254_STATUS_NOTREADY); | ||
34 | |||
35 | return timer; | ||
36 | } | ||
37 | |||
38 | static unsigned long rotate_xor(unsigned long hash, const void *area, | ||
39 | size_t size) | ||
40 | { | ||
41 | size_t i; | ||
42 | unsigned long *ptr = (unsigned long *)area; | ||
43 | |||
44 | for (i = 0; i < size / sizeof(hash); i++) { | ||
45 | /* Rotate by odd number of bits and XOR. */ | ||
46 | hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7); | ||
47 | hash ^= ptr[i]; | ||
48 | } | ||
49 | |||
50 | return hash; | ||
51 | } | ||
52 | |||
53 | /* Attempt to create a simple but unpredictable starting entropy. */ | ||
54 | static unsigned long get_random_boot(void) | ||
55 | { | ||
56 | unsigned long hash = 0; | ||
57 | |||
58 | hash = rotate_xor(hash, build_str, sizeof(build_str)); | ||
59 | hash = rotate_xor(hash, real_mode, sizeof(*real_mode)); | ||
60 | |||
61 | return hash; | ||
62 | } | ||
63 | |||
64 | static unsigned long get_random_long(void) | ||
65 | { | ||
66 | #ifdef CONFIG_X86_64 | ||
67 | const unsigned long mix_const = 0x5d6008cbf3848dd3UL; | ||
68 | #else | ||
69 | const unsigned long mix_const = 0x3f39e593UL; | ||
70 | #endif | ||
71 | unsigned long raw, random = get_random_boot(); | ||
72 | bool use_i8254 = true; | ||
73 | |||
74 | debug_putstr("KASLR using"); | ||
75 | |||
76 | if (has_cpuflag(X86_FEATURE_RDRAND)) { | ||
77 | debug_putstr(" RDRAND"); | ||
78 | if (rdrand_long(&raw)) { | ||
79 | random ^= raw; | ||
80 | use_i8254 = false; | ||
81 | } | ||
82 | } | ||
83 | |||
84 | if (has_cpuflag(X86_FEATURE_TSC)) { | ||
85 | debug_putstr(" RDTSC"); | ||
86 | rdtscll(raw); | ||
87 | |||
88 | random ^= raw; | ||
89 | use_i8254 = false; | ||
90 | } | ||
91 | |||
92 | if (use_i8254) { | ||
93 | debug_putstr(" i8254"); | ||
94 | random ^= i8254(); | ||
95 | } | ||
96 | |||
97 | /* Circular multiply for better bit diffusion */ | ||
98 | asm("mul %3" | ||
99 | : "=a" (random), "=d" (raw) | ||
100 | : "a" (random), "rm" (mix_const)); | ||
101 | random += raw; | ||
102 | |||
103 | debug_putstr("...\n"); | ||
104 | |||
105 | return random; | ||
106 | } | ||
107 | |||
108 | struct mem_vector { | ||
109 | unsigned long start; | ||
110 | unsigned long size; | ||
111 | }; | ||
112 | |||
113 | #define MEM_AVOID_MAX 5 | ||
114 | struct mem_vector mem_avoid[MEM_AVOID_MAX]; | ||
115 | |||
116 | static bool mem_contains(struct mem_vector *region, struct mem_vector *item) | ||
117 | { | ||
118 | /* Item at least partially before region. */ | ||
119 | if (item->start < region->start) | ||
120 | return false; | ||
121 | /* Item at least partially after region. */ | ||
122 | if (item->start + item->size > region->start + region->size) | ||
123 | return false; | ||
124 | return true; | ||
125 | } | ||
126 | |||
127 | static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two) | ||
128 | { | ||
129 | /* Item one is entirely before item two. */ | ||
130 | if (one->start + one->size <= two->start) | ||
131 | return false; | ||
132 | /* Item one is entirely after item two. */ | ||
133 | if (one->start >= two->start + two->size) | ||
134 | return false; | ||
135 | return true; | ||
136 | } | ||
137 | |||
138 | static void mem_avoid_init(unsigned long input, unsigned long input_size, | ||
139 | unsigned long output, unsigned long output_size) | ||
140 | { | ||
141 | u64 initrd_start, initrd_size; | ||
142 | u64 cmd_line, cmd_line_size; | ||
143 | unsigned long unsafe, unsafe_len; | ||
144 | char *ptr; | ||
145 | |||
146 | /* | ||
147 | * Avoid the region that is unsafe to overlap during | ||
148 | * decompression (see calculations at top of misc.c). | ||
149 | */ | ||
150 | unsafe_len = (output_size >> 12) + 32768 + 18; | ||
151 | unsafe = (unsigned long)input + input_size - unsafe_len; | ||
152 | mem_avoid[0].start = unsafe; | ||
153 | mem_avoid[0].size = unsafe_len; | ||
154 | |||
155 | /* Avoid initrd. */ | ||
156 | initrd_start = (u64)real_mode->ext_ramdisk_image << 32; | ||
157 | initrd_start |= real_mode->hdr.ramdisk_image; | ||
158 | initrd_size = (u64)real_mode->ext_ramdisk_size << 32; | ||
159 | initrd_size |= real_mode->hdr.ramdisk_size; | ||
160 | mem_avoid[1].start = initrd_start; | ||
161 | mem_avoid[1].size = initrd_size; | ||
162 | |||
163 | /* Avoid kernel command line. */ | ||
164 | cmd_line = (u64)real_mode->ext_cmd_line_ptr << 32; | ||
165 | cmd_line |= real_mode->hdr.cmd_line_ptr; | ||
166 | /* Calculate size of cmd_line. */ | ||
167 | ptr = (char *)(unsigned long)cmd_line; | ||
168 | for (cmd_line_size = 0; ptr[cmd_line_size++]; ) | ||
169 | ; | ||
170 | mem_avoid[2].start = cmd_line; | ||
171 | mem_avoid[2].size = cmd_line_size; | ||
172 | |||
173 | /* Avoid heap memory. */ | ||
174 | mem_avoid[3].start = (unsigned long)free_mem_ptr; | ||
175 | mem_avoid[3].size = BOOT_HEAP_SIZE; | ||
176 | |||
177 | /* Avoid stack memory. */ | ||
178 | mem_avoid[4].start = (unsigned long)free_mem_end_ptr; | ||
179 | mem_avoid[4].size = BOOT_STACK_SIZE; | ||
180 | } | ||
181 | |||
182 | /* Does this memory vector overlap a known avoided area? */ | ||
183 | bool mem_avoid_overlap(struct mem_vector *img) | ||
184 | { | ||
185 | int i; | ||
186 | |||
187 | for (i = 0; i < MEM_AVOID_MAX; i++) { | ||
188 | if (mem_overlaps(img, &mem_avoid[i])) | ||
189 | return true; | ||
190 | } | ||
191 | |||
192 | return false; | ||
193 | } | ||
194 | |||
195 | unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET / CONFIG_PHYSICAL_ALIGN]; | ||
196 | unsigned long slot_max = 0; | ||
197 | |||
198 | static void slots_append(unsigned long addr) | ||
199 | { | ||
200 | /* Overflowing the slots list should be impossible. */ | ||
201 | if (slot_max >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET / | ||
202 | CONFIG_PHYSICAL_ALIGN) | ||
203 | return; | ||
204 | |||
205 | slots[slot_max++] = addr; | ||
206 | } | ||
207 | |||
208 | static unsigned long slots_fetch_random(void) | ||
209 | { | ||
210 | /* Handle case of no slots stored. */ | ||
211 | if (slot_max == 0) | ||
212 | return 0; | ||
213 | |||
214 | return slots[get_random_long() % slot_max]; | ||
215 | } | ||
216 | |||
217 | static void process_e820_entry(struct e820entry *entry, | ||
218 | unsigned long minimum, | ||
219 | unsigned long image_size) | ||
220 | { | ||
221 | struct mem_vector region, img; | ||
222 | |||
223 | /* Skip non-RAM entries. */ | ||
224 | if (entry->type != E820_RAM) | ||
225 | return; | ||
226 | |||
227 | /* Ignore entries entirely above our maximum. */ | ||
228 | if (entry->addr >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET) | ||
229 | return; | ||
230 | |||
231 | /* Ignore entries entirely below our minimum. */ | ||
232 | if (entry->addr + entry->size < minimum) | ||
233 | return; | ||
234 | |||
235 | region.start = entry->addr; | ||
236 | region.size = entry->size; | ||
237 | |||
238 | /* Potentially raise address to minimum location. */ | ||
239 | if (region.start < minimum) | ||
240 | region.start = minimum; | ||
241 | |||
242 | /* Potentially raise address to meet alignment requirements. */ | ||
243 | region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN); | ||
244 | |||
245 | /* Did we raise the address above the bounds of this e820 region? */ | ||
246 | if (region.start > entry->addr + entry->size) | ||
247 | return; | ||
248 | |||
249 | /* Reduce size by any delta from the original address. */ | ||
250 | region.size -= region.start - entry->addr; | ||
251 | |||
252 | /* Reduce maximum size to fit end of image within maximum limit. */ | ||
253 | if (region.start + region.size > CONFIG_RANDOMIZE_BASE_MAX_OFFSET) | ||
254 | region.size = CONFIG_RANDOMIZE_BASE_MAX_OFFSET - region.start; | ||
255 | |||
256 | /* Walk each aligned slot and check for avoided areas. */ | ||
257 | for (img.start = region.start, img.size = image_size ; | ||
258 | mem_contains(®ion, &img) ; | ||
259 | img.start += CONFIG_PHYSICAL_ALIGN) { | ||
260 | if (mem_avoid_overlap(&img)) | ||
261 | continue; | ||
262 | slots_append(img.start); | ||
263 | } | ||
264 | } | ||
265 | |||
266 | static unsigned long find_random_addr(unsigned long minimum, | ||
267 | unsigned long size) | ||
268 | { | ||
269 | int i; | ||
270 | unsigned long addr; | ||
271 | |||
272 | /* Make sure minimum is aligned. */ | ||
273 | minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN); | ||
274 | |||
275 | /* Verify potential e820 positions, appending to slots list. */ | ||
276 | for (i = 0; i < real_mode->e820_entries; i++) { | ||
277 | process_e820_entry(&real_mode->e820_map[i], minimum, size); | ||
278 | } | ||
279 | |||
280 | return slots_fetch_random(); | ||
281 | } | ||
282 | |||
283 | unsigned char *choose_kernel_location(unsigned char *input, | ||
284 | unsigned long input_size, | ||
285 | unsigned char *output, | ||
286 | unsigned long output_size) | ||
287 | { | ||
288 | unsigned long choice = (unsigned long)output; | ||
289 | unsigned long random; | ||
290 | |||
291 | if (cmdline_find_option_bool("nokaslr")) { | ||
292 | debug_putstr("KASLR disabled...\n"); | ||
293 | goto out; | ||
294 | } | ||
295 | |||
296 | /* Record the various known unsafe memory ranges. */ | ||
297 | mem_avoid_init((unsigned long)input, input_size, | ||
298 | (unsigned long)output, output_size); | ||
299 | |||
300 | /* Walk e820 and find a random address. */ | ||
301 | random = find_random_addr(choice, output_size); | ||
302 | if (!random) { | ||
303 | debug_putstr("KASLR could not find suitable E820 region...\n"); | ||
304 | goto out; | ||
305 | } | ||
306 | |||
307 | /* Always enforce the minimum. */ | ||
308 | if (random < choice) | ||
309 | goto out; | ||
310 | |||
311 | choice = random; | ||
312 | out: | ||
313 | return (unsigned char *)choice; | ||
314 | } | ||
315 | |||
316 | #endif /* CONFIG_RANDOMIZE_BASE */ | ||
diff --git a/arch/x86/boot/compressed/cmdline.c b/arch/x86/boot/compressed/cmdline.c index bffd73b45b1f..b68e3033e6b9 100644 --- a/arch/x86/boot/compressed/cmdline.c +++ b/arch/x86/boot/compressed/cmdline.c | |||
@@ -1,6 +1,6 @@ | |||
1 | #include "misc.h" | 1 | #include "misc.h" |
2 | 2 | ||
3 | #ifdef CONFIG_EARLY_PRINTK | 3 | #if CONFIG_EARLY_PRINTK || CONFIG_RANDOMIZE_BASE |
4 | 4 | ||
5 | static unsigned long fs; | 5 | static unsigned long fs; |
6 | static inline void set_fs(unsigned long seg) | 6 | static inline void set_fs(unsigned long seg) |
diff --git a/arch/x86/boot/compressed/cpuflags.c b/arch/x86/boot/compressed/cpuflags.c new file mode 100644 index 000000000000..aa313466118b --- /dev/null +++ b/arch/x86/boot/compressed/cpuflags.c | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifdef CONFIG_RANDOMIZE_BASE | ||
2 | |||
3 | #include "../cpuflags.c" | ||
4 | |||
5 | bool has_cpuflag(int flag) | ||
6 | { | ||
7 | get_cpuflags(); | ||
8 | |||
9 | return test_bit(flag, cpu.flags); | ||
10 | } | ||
11 | |||
12 | #endif | ||
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index 5d6f6891b188..9116aac232c7 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S | |||
@@ -117,9 +117,11 @@ preferred_addr: | |||
117 | addl %eax, %ebx | 117 | addl %eax, %ebx |
118 | notl %eax | 118 | notl %eax |
119 | andl %eax, %ebx | 119 | andl %eax, %ebx |
120 | #else | 120 | cmpl $LOAD_PHYSICAL_ADDR, %ebx |
121 | movl $LOAD_PHYSICAL_ADDR, %ebx | 121 | jge 1f |
122 | #endif | 122 | #endif |
123 | movl $LOAD_PHYSICAL_ADDR, %ebx | ||
124 | 1: | ||
123 | 125 | ||
124 | /* Target address to relocate to for decompression */ | 126 | /* Target address to relocate to for decompression */ |
125 | addl $z_extract_offset, %ebx | 127 | addl $z_extract_offset, %ebx |
@@ -191,14 +193,14 @@ relocated: | |||
191 | leal boot_heap(%ebx), %eax | 193 | leal boot_heap(%ebx), %eax |
192 | pushl %eax /* heap area */ | 194 | pushl %eax /* heap area */ |
193 | pushl %esi /* real mode pointer */ | 195 | pushl %esi /* real mode pointer */ |
194 | call decompress_kernel | 196 | call decompress_kernel /* returns kernel location in %eax */ |
195 | addl $24, %esp | 197 | addl $24, %esp |
196 | 198 | ||
197 | /* | 199 | /* |
198 | * Jump to the decompressed kernel. | 200 | * Jump to the decompressed kernel. |
199 | */ | 201 | */ |
200 | xorl %ebx, %ebx | 202 | xorl %ebx, %ebx |
201 | jmp *%ebp | 203 | jmp *%eax |
202 | 204 | ||
203 | /* | 205 | /* |
204 | * Stack and heap for uncompression | 206 | * Stack and heap for uncompression |
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index c337422b575d..c5c1ae0997e7 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S | |||
@@ -94,9 +94,11 @@ ENTRY(startup_32) | |||
94 | addl %eax, %ebx | 94 | addl %eax, %ebx |
95 | notl %eax | 95 | notl %eax |
96 | andl %eax, %ebx | 96 | andl %eax, %ebx |
97 | #else | 97 | cmpl $LOAD_PHYSICAL_ADDR, %ebx |
98 | movl $LOAD_PHYSICAL_ADDR, %ebx | 98 | jge 1f |
99 | #endif | 99 | #endif |
100 | movl $LOAD_PHYSICAL_ADDR, %ebx | ||
101 | 1: | ||
100 | 102 | ||
101 | /* Target address to relocate to for decompression */ | 103 | /* Target address to relocate to for decompression */ |
102 | addl $z_extract_offset, %ebx | 104 | addl $z_extract_offset, %ebx |
@@ -269,9 +271,11 @@ preferred_addr: | |||
269 | addq %rax, %rbp | 271 | addq %rax, %rbp |
270 | notq %rax | 272 | notq %rax |
271 | andq %rax, %rbp | 273 | andq %rax, %rbp |
272 | #else | 274 | cmpq $LOAD_PHYSICAL_ADDR, %rbp |
273 | movq $LOAD_PHYSICAL_ADDR, %rbp | 275 | jge 1f |
274 | #endif | 276 | #endif |
277 | movq $LOAD_PHYSICAL_ADDR, %rbp | ||
278 | 1: | ||
275 | 279 | ||
276 | /* Target address to relocate to for decompression */ | 280 | /* Target address to relocate to for decompression */ |
277 | leaq z_extract_offset(%rbp), %rbx | 281 | leaq z_extract_offset(%rbp), %rbx |
@@ -339,13 +343,13 @@ relocated: | |||
339 | movl $z_input_len, %ecx /* input_len */ | 343 | movl $z_input_len, %ecx /* input_len */ |
340 | movq %rbp, %r8 /* output target address */ | 344 | movq %rbp, %r8 /* output target address */ |
341 | movq $z_output_len, %r9 /* decompressed length */ | 345 | movq $z_output_len, %r9 /* decompressed length */ |
342 | call decompress_kernel | 346 | call decompress_kernel /* returns kernel location in %rax */ |
343 | popq %rsi | 347 | popq %rsi |
344 | 348 | ||
345 | /* | 349 | /* |
346 | * Jump to the decompressed kernel. | 350 | * Jump to the decompressed kernel. |
347 | */ | 351 | */ |
348 | jmp *%rbp | 352 | jmp *%rax |
349 | 353 | ||
350 | .code32 | 354 | .code32 |
351 | no_longmode: | 355 | no_longmode: |
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 434f077d2c4d..196eaf373a06 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c | |||
@@ -112,14 +112,8 @@ struct boot_params *real_mode; /* Pointer to real-mode data */ | |||
112 | void *memset(void *s, int c, size_t n); | 112 | void *memset(void *s, int c, size_t n); |
113 | void *memcpy(void *dest, const void *src, size_t n); | 113 | void *memcpy(void *dest, const void *src, size_t n); |
114 | 114 | ||
115 | #ifdef CONFIG_X86_64 | 115 | memptr free_mem_ptr; |
116 | #define memptr long | 116 | memptr free_mem_end_ptr; |
117 | #else | ||
118 | #define memptr unsigned | ||
119 | #endif | ||
120 | |||
121 | static memptr free_mem_ptr; | ||
122 | static memptr free_mem_end_ptr; | ||
123 | 117 | ||
124 | static char *vidmem; | 118 | static char *vidmem; |
125 | static int vidport; | 119 | static int vidport; |
@@ -395,7 +389,7 @@ static void parse_elf(void *output) | |||
395 | free(phdrs); | 389 | free(phdrs); |
396 | } | 390 | } |
397 | 391 | ||
398 | asmlinkage void decompress_kernel(void *rmode, memptr heap, | 392 | asmlinkage void *decompress_kernel(void *rmode, memptr heap, |
399 | unsigned char *input_data, | 393 | unsigned char *input_data, |
400 | unsigned long input_len, | 394 | unsigned long input_len, |
401 | unsigned char *output, | 395 | unsigned char *output, |
@@ -422,6 +416,10 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, | |||
422 | free_mem_ptr = heap; /* Heap */ | 416 | free_mem_ptr = heap; /* Heap */ |
423 | free_mem_end_ptr = heap + BOOT_HEAP_SIZE; | 417 | free_mem_end_ptr = heap + BOOT_HEAP_SIZE; |
424 | 418 | ||
419 | output = choose_kernel_location(input_data, input_len, | ||
420 | output, output_len); | ||
421 | |||
422 | /* Validate memory location choices. */ | ||
425 | if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1)) | 423 | if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1)) |
426 | error("Destination address inappropriately aligned"); | 424 | error("Destination address inappropriately aligned"); |
427 | #ifdef CONFIG_X86_64 | 425 | #ifdef CONFIG_X86_64 |
@@ -441,5 +439,5 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, | |||
441 | parse_elf(output); | 439 | parse_elf(output); |
442 | handle_relocations(output, output_len); | 440 | handle_relocations(output, output_len); |
443 | debug_putstr("done.\nBooting the kernel.\n"); | 441 | debug_putstr("done.\nBooting the kernel.\n"); |
444 | return; | 442 | return output; |
445 | } | 443 | } |
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index 674019d8e235..24e3e569a13c 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h | |||
@@ -23,7 +23,15 @@ | |||
23 | #define BOOT_BOOT_H | 23 | #define BOOT_BOOT_H |
24 | #include "../ctype.h" | 24 | #include "../ctype.h" |
25 | 25 | ||
26 | #ifdef CONFIG_X86_64 | ||
27 | #define memptr long | ||
28 | #else | ||
29 | #define memptr unsigned | ||
30 | #endif | ||
31 | |||
26 | /* misc.c */ | 32 | /* misc.c */ |
33 | extern memptr free_mem_ptr; | ||
34 | extern memptr free_mem_end_ptr; | ||
27 | extern struct boot_params *real_mode; /* Pointer to real-mode data */ | 35 | extern struct boot_params *real_mode; /* Pointer to real-mode data */ |
28 | void __putstr(const char *s); | 36 | void __putstr(const char *s); |
29 | #define error_putstr(__x) __putstr(__x) | 37 | #define error_putstr(__x) __putstr(__x) |
@@ -39,23 +47,40 @@ static inline void debug_putstr(const char *s) | |||
39 | 47 | ||
40 | #endif | 48 | #endif |
41 | 49 | ||
42 | #ifdef CONFIG_EARLY_PRINTK | 50 | #if CONFIG_EARLY_PRINTK || CONFIG_RANDOMIZE_BASE |
43 | |||
44 | /* cmdline.c */ | 51 | /* cmdline.c */ |
45 | int cmdline_find_option(const char *option, char *buffer, int bufsize); | 52 | int cmdline_find_option(const char *option, char *buffer, int bufsize); |
46 | int cmdline_find_option_bool(const char *option); | 53 | int cmdline_find_option_bool(const char *option); |
54 | #endif | ||
47 | 55 | ||
48 | /* early_serial_console.c */ | ||
49 | extern int early_serial_base; | ||
50 | void console_init(void); | ||
51 | 56 | ||
57 | #if CONFIG_RANDOMIZE_BASE | ||
58 | /* aslr.c */ | ||
59 | unsigned char *choose_kernel_location(unsigned char *input, | ||
60 | unsigned long input_size, | ||
61 | unsigned char *output, | ||
62 | unsigned long output_size); | ||
63 | /* cpuflags.c */ | ||
64 | bool has_cpuflag(int flag); | ||
52 | #else | 65 | #else |
66 | static inline | ||
67 | unsigned char *choose_kernel_location(unsigned char *input, | ||
68 | unsigned long input_size, | ||
69 | unsigned char *output, | ||
70 | unsigned long output_size) | ||
71 | { | ||
72 | return output; | ||
73 | } | ||
74 | #endif | ||
53 | 75 | ||
76 | #ifdef CONFIG_EARLY_PRINTK | ||
54 | /* early_serial_console.c */ | 77 | /* early_serial_console.c */ |
78 | extern int early_serial_base; | ||
79 | void console_init(void); | ||
80 | #else | ||
55 | static const int early_serial_base; | 81 | static const int early_serial_base; |
56 | static inline void console_init(void) | 82 | static inline void console_init(void) |
57 | { } | 83 | { } |
58 | |||
59 | #endif | 84 | #endif |
60 | 85 | ||
61 | #endif | 86 | #endif |
diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c index 4d3ff037201f..100a9a10076a 100644 --- a/arch/x86/boot/cpucheck.c +++ b/arch/x86/boot/cpucheck.c | |||
@@ -28,8 +28,6 @@ | |||
28 | #include <asm/required-features.h> | 28 | #include <asm/required-features.h> |
29 | #include <asm/msr-index.h> | 29 | #include <asm/msr-index.h> |
30 | 30 | ||
31 | struct cpu_features cpu; | ||
32 | static u32 cpu_vendor[3]; | ||
33 | static u32 err_flags[NCAPINTS]; | 31 | static u32 err_flags[NCAPINTS]; |
34 | 32 | ||
35 | static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY; | 33 | static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY; |
@@ -69,92 +67,8 @@ static int is_transmeta(void) | |||
69 | cpu_vendor[2] == A32('M', 'x', '8', '6'); | 67 | cpu_vendor[2] == A32('M', 'x', '8', '6'); |
70 | } | 68 | } |
71 | 69 | ||
72 | static int has_fpu(void) | ||
73 | { | ||
74 | u16 fcw = -1, fsw = -1; | ||
75 | u32 cr0; | ||
76 | |||
77 | asm("movl %%cr0,%0" : "=r" (cr0)); | ||
78 | if (cr0 & (X86_CR0_EM|X86_CR0_TS)) { | ||
79 | cr0 &= ~(X86_CR0_EM|X86_CR0_TS); | ||
80 | asm volatile("movl %0,%%cr0" : : "r" (cr0)); | ||
81 | } | ||
82 | |||
83 | asm volatile("fninit ; fnstsw %0 ; fnstcw %1" | ||
84 | : "+m" (fsw), "+m" (fcw)); | ||
85 | |||
86 | return fsw == 0 && (fcw & 0x103f) == 0x003f; | ||
87 | } | ||
88 | |||
89 | static int has_eflag(u32 mask) | ||
90 | { | ||
91 | u32 f0, f1; | ||
92 | |||
93 | asm("pushfl ; " | ||
94 | "pushfl ; " | ||
95 | "popl %0 ; " | ||
96 | "movl %0,%1 ; " | ||
97 | "xorl %2,%1 ; " | ||
98 | "pushl %1 ; " | ||
99 | "popfl ; " | ||
100 | "pushfl ; " | ||
101 | "popl %1 ; " | ||
102 | "popfl" | ||
103 | : "=&r" (f0), "=&r" (f1) | ||
104 | : "ri" (mask)); | ||
105 | |||
106 | return !!((f0^f1) & mask); | ||
107 | } | ||
108 | |||
109 | static void get_flags(void) | ||
110 | { | ||
111 | u32 max_intel_level, max_amd_level; | ||
112 | u32 tfms; | ||
113 | |||
114 | if (has_fpu()) | ||
115 | set_bit(X86_FEATURE_FPU, cpu.flags); | ||
116 | |||
117 | if (has_eflag(X86_EFLAGS_ID)) { | ||
118 | asm("cpuid" | ||
119 | : "=a" (max_intel_level), | ||
120 | "=b" (cpu_vendor[0]), | ||
121 | "=d" (cpu_vendor[1]), | ||
122 | "=c" (cpu_vendor[2]) | ||
123 | : "a" (0)); | ||
124 | |||
125 | if (max_intel_level >= 0x00000001 && | ||
126 | max_intel_level <= 0x0000ffff) { | ||
127 | asm("cpuid" | ||
128 | : "=a" (tfms), | ||
129 | "=c" (cpu.flags[4]), | ||
130 | "=d" (cpu.flags[0]) | ||
131 | : "a" (0x00000001) | ||
132 | : "ebx"); | ||
133 | cpu.level = (tfms >> 8) & 15; | ||
134 | cpu.model = (tfms >> 4) & 15; | ||
135 | if (cpu.level >= 6) | ||
136 | cpu.model += ((tfms >> 16) & 0xf) << 4; | ||
137 | } | ||
138 | |||
139 | asm("cpuid" | ||
140 | : "=a" (max_amd_level) | ||
141 | : "a" (0x80000000) | ||
142 | : "ebx", "ecx", "edx"); | ||
143 | |||
144 | if (max_amd_level >= 0x80000001 && | ||
145 | max_amd_level <= 0x8000ffff) { | ||
146 | u32 eax = 0x80000001; | ||
147 | asm("cpuid" | ||
148 | : "+a" (eax), | ||
149 | "=c" (cpu.flags[6]), | ||
150 | "=d" (cpu.flags[1]) | ||
151 | : : "ebx"); | ||
152 | } | ||
153 | } | ||
154 | } | ||
155 | |||
156 | /* Returns a bitmask of which words we have error bits in */ | 70 | /* Returns a bitmask of which words we have error bits in */ |
157 | static int check_flags(void) | 71 | static int check_cpuflags(void) |
158 | { | 72 | { |
159 | u32 err; | 73 | u32 err; |
160 | int i; | 74 | int i; |
@@ -187,8 +101,8 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) | |||
187 | if (has_eflag(X86_EFLAGS_AC)) | 101 | if (has_eflag(X86_EFLAGS_AC)) |
188 | cpu.level = 4; | 102 | cpu.level = 4; |
189 | 103 | ||
190 | get_flags(); | 104 | get_cpuflags(); |
191 | err = check_flags(); | 105 | err = check_cpuflags(); |
192 | 106 | ||
193 | if (test_bit(X86_FEATURE_LM, cpu.flags)) | 107 | if (test_bit(X86_FEATURE_LM, cpu.flags)) |
194 | cpu.level = 64; | 108 | cpu.level = 64; |
@@ -207,8 +121,8 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) | |||
207 | eax &= ~(1 << 15); | 121 | eax &= ~(1 << 15); |
208 | asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); | 122 | asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); |
209 | 123 | ||
210 | get_flags(); /* Make sure it really did something */ | 124 | get_cpuflags(); /* Make sure it really did something */ |
211 | err = check_flags(); | 125 | err = check_cpuflags(); |
212 | } else if (err == 0x01 && | 126 | } else if (err == 0x01 && |
213 | !(err_flags[0] & ~(1 << X86_FEATURE_CX8)) && | 127 | !(err_flags[0] & ~(1 << X86_FEATURE_CX8)) && |
214 | is_centaur() && cpu.model >= 6) { | 128 | is_centaur() && cpu.model >= 6) { |
@@ -223,7 +137,7 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) | |||
223 | asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); | 137 | asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); |
224 | 138 | ||
225 | set_bit(X86_FEATURE_CX8, cpu.flags); | 139 | set_bit(X86_FEATURE_CX8, cpu.flags); |
226 | err = check_flags(); | 140 | err = check_cpuflags(); |
227 | } else if (err == 0x01 && is_transmeta()) { | 141 | } else if (err == 0x01 && is_transmeta()) { |
228 | /* Transmeta might have masked feature bits in word 0 */ | 142 | /* Transmeta might have masked feature bits in word 0 */ |
229 | 143 | ||
@@ -238,7 +152,7 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) | |||
238 | : : "ecx", "ebx"); | 152 | : : "ecx", "ebx"); |
239 | asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); | 153 | asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); |
240 | 154 | ||
241 | err = check_flags(); | 155 | err = check_cpuflags(); |
242 | } | 156 | } |
243 | 157 | ||
244 | if (err_flags_ptr) | 158 | if (err_flags_ptr) |
diff --git a/arch/x86/boot/cpuflags.c b/arch/x86/boot/cpuflags.c new file mode 100644 index 000000000000..a9fcb7cfb241 --- /dev/null +++ b/arch/x86/boot/cpuflags.c | |||
@@ -0,0 +1,104 @@ | |||
1 | #include <linux/types.h> | ||
2 | #include "bitops.h" | ||
3 | |||
4 | #include <asm/processor-flags.h> | ||
5 | #include <asm/required-features.h> | ||
6 | #include <asm/msr-index.h> | ||
7 | #include "cpuflags.h" | ||
8 | |||
9 | struct cpu_features cpu; | ||
10 | u32 cpu_vendor[3]; | ||
11 | |||
12 | static bool loaded_flags; | ||
13 | |||
14 | static int has_fpu(void) | ||
15 | { | ||
16 | u16 fcw = -1, fsw = -1; | ||
17 | unsigned long cr0; | ||
18 | |||
19 | asm volatile("mov %%cr0,%0" : "=r" (cr0)); | ||
20 | if (cr0 & (X86_CR0_EM|X86_CR0_TS)) { | ||
21 | cr0 &= ~(X86_CR0_EM|X86_CR0_TS); | ||
22 | asm volatile("mov %0,%%cr0" : : "r" (cr0)); | ||
23 | } | ||
24 | |||
25 | asm volatile("fninit ; fnstsw %0 ; fnstcw %1" | ||
26 | : "+m" (fsw), "+m" (fcw)); | ||
27 | |||
28 | return fsw == 0 && (fcw & 0x103f) == 0x003f; | ||
29 | } | ||
30 | |||
31 | int has_eflag(unsigned long mask) | ||
32 | { | ||
33 | unsigned long f0, f1; | ||
34 | |||
35 | asm volatile("pushf \n\t" | ||
36 | "pushf \n\t" | ||
37 | "pop %0 \n\t" | ||
38 | "mov %0,%1 \n\t" | ||
39 | "xor %2,%1 \n\t" | ||
40 | "push %1 \n\t" | ||
41 | "popf \n\t" | ||
42 | "pushf \n\t" | ||
43 | "pop %1 \n\t" | ||
44 | "popf" | ||
45 | : "=&r" (f0), "=&r" (f1) | ||
46 | : "ri" (mask)); | ||
47 | |||
48 | return !!((f0^f1) & mask); | ||
49 | } | ||
50 | |||
51 | /* Handle x86_32 PIC using ebx. */ | ||
52 | #if defined(__i386__) && defined(__PIC__) | ||
53 | # define EBX_REG "=r" | ||
54 | #else | ||
55 | # define EBX_REG "=b" | ||
56 | #endif | ||
57 | |||
58 | static inline void cpuid(u32 id, u32 *a, u32 *b, u32 *c, u32 *d) | ||
59 | { | ||
60 | asm volatile(".ifnc %%ebx,%3 ; movl %%ebx,%3 ; .endif \n\t" | ||
61 | "cpuid \n\t" | ||
62 | ".ifnc %%ebx,%3 ; xchgl %%ebx,%3 ; .endif \n\t" | ||
63 | : "=a" (*a), "=c" (*c), "=d" (*d), EBX_REG (*b) | ||
64 | : "a" (id) | ||
65 | ); | ||
66 | } | ||
67 | |||
68 | void get_cpuflags(void) | ||
69 | { | ||
70 | u32 max_intel_level, max_amd_level; | ||
71 | u32 tfms; | ||
72 | u32 ignored; | ||
73 | |||
74 | if (loaded_flags) | ||
75 | return; | ||
76 | loaded_flags = true; | ||
77 | |||
78 | if (has_fpu()) | ||
79 | set_bit(X86_FEATURE_FPU, cpu.flags); | ||
80 | |||
81 | if (has_eflag(X86_EFLAGS_ID)) { | ||
82 | cpuid(0x0, &max_intel_level, &cpu_vendor[0], &cpu_vendor[2], | ||
83 | &cpu_vendor[1]); | ||
84 | |||
85 | if (max_intel_level >= 0x00000001 && | ||
86 | max_intel_level <= 0x0000ffff) { | ||
87 | cpuid(0x1, &tfms, &ignored, &cpu.flags[4], | ||
88 | &cpu.flags[0]); | ||
89 | cpu.level = (tfms >> 8) & 15; | ||
90 | cpu.model = (tfms >> 4) & 15; | ||
91 | if (cpu.level >= 6) | ||
92 | cpu.model += ((tfms >> 16) & 0xf) << 4; | ||
93 | } | ||
94 | |||
95 | cpuid(0x80000000, &max_amd_level, &ignored, &ignored, | ||
96 | &ignored); | ||
97 | |||
98 | if (max_amd_level >= 0x80000001 && | ||
99 | max_amd_level <= 0x8000ffff) { | ||
100 | cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6], | ||
101 | &cpu.flags[1]); | ||
102 | } | ||
103 | } | ||
104 | } | ||
diff --git a/arch/x86/boot/cpuflags.h b/arch/x86/boot/cpuflags.h new file mode 100644 index 000000000000..ea97697e51e4 --- /dev/null +++ b/arch/x86/boot/cpuflags.h | |||
@@ -0,0 +1,19 @@ | |||
1 | #ifndef BOOT_CPUFLAGS_H | ||
2 | #define BOOT_CPUFLAGS_H | ||
3 | |||
4 | #include <asm/cpufeature.h> | ||
5 | #include <asm/processor-flags.h> | ||
6 | |||
7 | struct cpu_features { | ||
8 | int level; /* Family, or 64 for x86-64 */ | ||
9 | int model; | ||
10 | u32 flags[NCAPINTS]; | ||
11 | }; | ||
12 | |||
13 | extern struct cpu_features cpu; | ||
14 | extern u32 cpu_vendor[3]; | ||
15 | |||
16 | int has_eflag(unsigned long mask); | ||
17 | void get_cpuflags(void); | ||
18 | |||
19 | #endif | ||
diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h index 0d9ec770f2f8..e6a92455740e 100644 --- a/arch/x86/include/asm/archrandom.h +++ b/arch/x86/include/asm/archrandom.h | |||
@@ -39,6 +39,20 @@ | |||
39 | 39 | ||
40 | #ifdef CONFIG_ARCH_RANDOM | 40 | #ifdef CONFIG_ARCH_RANDOM |
41 | 41 | ||
42 | /* Instead of arch_get_random_long() when alternatives haven't run. */ | ||
43 | static inline int rdrand_long(unsigned long *v) | ||
44 | { | ||
45 | int ok; | ||
46 | asm volatile("1: " RDRAND_LONG "\n\t" | ||
47 | "jc 2f\n\t" | ||
48 | "decl %0\n\t" | ||
49 | "jnz 1b\n\t" | ||
50 | "2:" | ||
51 | : "=r" (ok), "=a" (*v) | ||
52 | : "0" (RDRAND_RETRY_LOOPS)); | ||
53 | return ok; | ||
54 | } | ||
55 | |||
42 | #define GET_RANDOM(name, type, rdrand, nop) \ | 56 | #define GET_RANDOM(name, type, rdrand, nop) \ |
43 | static inline int name(type *v) \ | 57 | static inline int name(type *v) \ |
44 | { \ | 58 | { \ |
@@ -68,6 +82,13 @@ GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3); | |||
68 | 82 | ||
69 | #endif /* CONFIG_X86_64 */ | 83 | #endif /* CONFIG_X86_64 */ |
70 | 84 | ||
85 | #else | ||
86 | |||
87 | static inline int rdrand_long(unsigned long *v) | ||
88 | { | ||
89 | return 0; | ||
90 | } | ||
91 | |||
71 | #endif /* CONFIG_ARCH_RANDOM */ | 92 | #endif /* CONFIG_ARCH_RANDOM */ |
72 | 93 | ||
73 | extern void x86_init_rdrand(struct cpuinfo_x86 *c); | 94 | extern void x86_init_rdrand(struct cpuinfo_x86 *c); |
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 43dcd804ebd5..8de6d9cf3b95 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h | |||
@@ -39,9 +39,18 @@ | |||
39 | #define __VIRTUAL_MASK_SHIFT 47 | 39 | #define __VIRTUAL_MASK_SHIFT 47 |
40 | 40 | ||
41 | /* | 41 | /* |
42 | * Kernel image size is limited to 512 MB (see level2_kernel_pgt in | 42 | * Kernel image size is limited to 1GiB due to the fixmap living in the |
43 | * arch/x86/kernel/head_64.S), and it is mapped here: | 43 | * next 1GiB (see level2_kernel_pgt in arch/x86/kernel/head_64.S). Use |
44 | * 512MiB by default, leaving 1.5GiB for modules once the page tables | ||
45 | * are fully set up. If kernel ASLR is configured, it can extend the | ||
46 | * kernel page table mapping, reducing the size of the modules area. | ||
44 | */ | 47 | */ |
45 | #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) | 48 | #define KERNEL_IMAGE_SIZE_DEFAULT (512 * 1024 * 1024) |
49 | #if defined(CONFIG_RANDOMIZE_BASE) && \ | ||
50 | CONFIG_RANDOMIZE_BASE_MAX_OFFSET > KERNEL_IMAGE_SIZE_DEFAULT | ||
51 | #define KERNEL_IMAGE_SIZE CONFIG_RANDOMIZE_BASE_MAX_OFFSET | ||
52 | #else | ||
53 | #define KERNEL_IMAGE_SIZE KERNEL_IMAGE_SIZE_DEFAULT | ||
54 | #endif | ||
46 | 55 | ||
47 | #endif /* _ASM_X86_PAGE_64_DEFS_H */ | 56 | #endif /* _ASM_X86_PAGE_64_DEFS_H */ |
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 2d883440cb9a..c883bf726398 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h | |||
@@ -58,7 +58,7 @@ typedef struct { pteval_t pte; } pte_t; | |||
58 | #define VMALLOC_START _AC(0xffffc90000000000, UL) | 58 | #define VMALLOC_START _AC(0xffffc90000000000, UL) |
59 | #define VMALLOC_END _AC(0xffffe8ffffffffff, UL) | 59 | #define VMALLOC_END _AC(0xffffe8ffffffffff, UL) |
60 | #define VMEMMAP_START _AC(0xffffea0000000000, UL) | 60 | #define VMEMMAP_START _AC(0xffffea0000000000, UL) |
61 | #define MODULES_VADDR _AC(0xffffffffa0000000, UL) | 61 | #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) |
62 | #define MODULES_END _AC(0xffffffffff000000, UL) | 62 | #define MODULES_END _AC(0xffffffffff000000, UL) |
63 | #define MODULES_LEN (MODULES_END - MODULES_VADDR) | 63 | #define MODULES_LEN (MODULES_END - MODULES_VADDR) |
64 | 64 | ||
diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c index 88db010845cb..384df5105fbc 100644 --- a/arch/x86/kernel/cpu/rdrand.c +++ b/arch/x86/kernel/cpu/rdrand.c | |||
@@ -31,20 +31,6 @@ static int __init x86_rdrand_setup(char *s) | |||
31 | } | 31 | } |
32 | __setup("nordrand", x86_rdrand_setup); | 32 | __setup("nordrand", x86_rdrand_setup); |
33 | 33 | ||
34 | /* We can't use arch_get_random_long() here since alternatives haven't run */ | ||
35 | static inline int rdrand_long(unsigned long *v) | ||
36 | { | ||
37 | int ok; | ||
38 | asm volatile("1: " RDRAND_LONG "\n\t" | ||
39 | "jc 2f\n\t" | ||
40 | "decl %0\n\t" | ||
41 | "jnz 1b\n\t" | ||
42 | "2:" | ||
43 | : "=r" (ok), "=a" (*v) | ||
44 | : "0" (RDRAND_RETRY_LOOPS)); | ||
45 | return ok; | ||
46 | } | ||
47 | |||
48 | /* | 34 | /* |
49 | * Force a reseed cycle; we are architecturally guaranteed a reseed | 35 | * Force a reseed cycle; we are architecturally guaranteed a reseed |
50 | * after no more than 512 128-bit chunks of random data. This also | 36 | * after no more than 512 128-bit chunks of random data. This also |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 182b3f97dcf4..06853e670354 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -828,6 +828,20 @@ static void __init trim_low_memory_range(void) | |||
828 | } | 828 | } |
829 | 829 | ||
830 | /* | 830 | /* |
831 | * Dump out kernel offset information on panic. | ||
832 | */ | ||
833 | static int | ||
834 | dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) | ||
835 | { | ||
836 | pr_emerg("Kernel Offset: 0x%lx from 0x%lx " | ||
837 | "(relocation range: 0x%lx-0x%lx)\n", | ||
838 | (unsigned long)&_text - __START_KERNEL, __START_KERNEL, | ||
839 | __START_KERNEL_map, MODULES_VADDR-1); | ||
840 | |||
841 | return 0; | ||
842 | } | ||
843 | |||
844 | /* | ||
831 | * Determine if we were loaded by an EFI loader. If so, then we have also been | 845 | * Determine if we were loaded by an EFI loader. If so, then we have also been |
832 | * passed the efi memmap, systab, etc., so we should use these data structures | 846 | * passed the efi memmap, systab, etc., so we should use these data structures |
833 | * for initialization. Note, the efi init code path is determined by the | 847 | * for initialization. Note, the efi init code path is determined by the |
@@ -1252,3 +1266,15 @@ void __init i386_reserve_resources(void) | |||
1252 | } | 1266 | } |
1253 | 1267 | ||
1254 | #endif /* CONFIG_X86_32 */ | 1268 | #endif /* CONFIG_X86_32 */ |
1269 | |||
1270 | static struct notifier_block kernel_offset_notifier = { | ||
1271 | .notifier_call = dump_kernel_offset | ||
1272 | }; | ||
1273 | |||
1274 | static int __init register_kernel_offset_dumper(void) | ||
1275 | { | ||
1276 | atomic_notifier_chain_register(&panic_notifier_list, | ||
1277 | &kernel_offset_notifier); | ||
1278 | return 0; | ||
1279 | } | ||
1280 | __initcall(register_kernel_offset_dumper); | ||
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 4287f1ffba7e..5bdc5430597c 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -806,6 +806,9 @@ void __init mem_init(void) | |||
806 | BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); | 806 | BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); |
807 | #undef high_memory | 807 | #undef high_memory |
808 | #undef __FIXADDR_TOP | 808 | #undef __FIXADDR_TOP |
809 | #ifdef CONFIG_RANDOMIZE_BASE | ||
810 | BUILD_BUG_ON(CONFIG_RANDOMIZE_BASE_MAX_OFFSET > KERNEL_IMAGE_SIZE); | ||
811 | #endif | ||
809 | 812 | ||
810 | #ifdef CONFIG_HIGHMEM | 813 | #ifdef CONFIG_HIGHMEM |
811 | BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); | 814 | BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); |
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index f7bab68a4b83..11f9285a2ff6 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c | |||
@@ -722,15 +722,25 @@ static void percpu_init(void) | |||
722 | 722 | ||
723 | /* | 723 | /* |
724 | * Check to see if a symbol lies in the .data..percpu section. | 724 | * Check to see if a symbol lies in the .data..percpu section. |
725 | * For some as yet not understood reason the "__init_begin" | 725 | * |
726 | * symbol which immediately preceeds the .data..percpu section | 726 | * The linker incorrectly associates some symbols with the |
727 | * also shows up as it it were part of it so we do an explict | 727 | * .data..percpu section so we also need to check the symbol |
728 | * check for that symbol name and ignore it. | 728 | * name to make sure that we classify the symbol correctly. |
729 | * | ||
730 | * The GNU linker incorrectly associates: | ||
731 | * __init_begin | ||
732 | * __per_cpu_load | ||
733 | * | ||
734 | * The "gold" linker incorrectly associates: | ||
735 | * init_per_cpu__irq_stack_union | ||
736 | * init_per_cpu__gdt_page | ||
729 | */ | 737 | */ |
730 | static int is_percpu_sym(ElfW(Sym) *sym, const char *symname) | 738 | static int is_percpu_sym(ElfW(Sym) *sym, const char *symname) |
731 | { | 739 | { |
732 | return (sym->st_shndx == per_cpu_shndx) && | 740 | return (sym->st_shndx == per_cpu_shndx) && |
733 | strcmp(symname, "__init_begin"); | 741 | strcmp(symname, "__init_begin") && |
742 | strcmp(symname, "__per_cpu_load") && | ||
743 | strncmp(symname, "init_per_cpu_", 13); | ||
734 | } | 744 | } |
735 | 745 | ||
736 | 746 | ||