diff options
Diffstat (limited to 'arch/x86_64/kernel/setup64.c')
-rw-r--r-- | arch/x86_64/kernel/setup64.c | 45 |
1 files changed, 20 insertions, 25 deletions
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c index 417de564456e..8c4b80fe71a1 100644 --- a/arch/x86_64/kernel/setup64.c +++ b/arch/x86_64/kernel/setup64.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <asm/proto.h> | 24 | #include <asm/proto.h> |
25 | #include <asm/sections.h> | 25 | #include <asm/sections.h> |
26 | 26 | ||
27 | char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,}; | 27 | char x86_boot_params[BOOT_PARAM_SIZE] __initdata; |
28 | 28 | ||
29 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; | 29 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; |
30 | 30 | ||
@@ -46,8 +46,10 @@ Control non executable mappings for 64bit processes. | |||
46 | on Enable(default) | 46 | on Enable(default) |
47 | off Disable | 47 | off Disable |
48 | */ | 48 | */ |
49 | int __init nonx_setup(char *str) | 49 | static int __init nonx_setup(char *str) |
50 | { | 50 | { |
51 | if (!str) | ||
52 | return -EINVAL; | ||
51 | if (!strncmp(str, "on", 2)) { | 53 | if (!strncmp(str, "on", 2)) { |
52 | __supported_pte_mask |= _PAGE_NX; | 54 | __supported_pte_mask |= _PAGE_NX; |
53 | do_not_nx = 0; | 55 | do_not_nx = 0; |
@@ -55,9 +57,9 @@ int __init nonx_setup(char *str) | |||
55 | do_not_nx = 1; | 57 | do_not_nx = 1; |
56 | __supported_pte_mask &= ~_PAGE_NX; | 58 | __supported_pte_mask &= ~_PAGE_NX; |
57 | } | 59 | } |
58 | return 1; | 60 | return 0; |
59 | } | 61 | } |
60 | __setup("noexec=", nonx_setup); /* parsed early actually */ | 62 | early_param("noexec", nonx_setup); |
61 | 63 | ||
62 | int force_personality32 = 0; | 64 | int force_personality32 = 0; |
63 | 65 | ||
@@ -93,12 +95,9 @@ void __init setup_per_cpu_areas(void) | |||
93 | #endif | 95 | #endif |
94 | 96 | ||
95 | /* Copy section for each CPU (we discard the original) */ | 97 | /* Copy section for each CPU (we discard the original) */ |
96 | size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES); | 98 | size = PERCPU_ENOUGH_ROOM; |
97 | #ifdef CONFIG_MODULES | ||
98 | if (size < PERCPU_ENOUGH_ROOM) | ||
99 | size = PERCPU_ENOUGH_ROOM; | ||
100 | #endif | ||
101 | 99 | ||
100 | printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size); | ||
102 | for_each_cpu_mask (i, cpu_possible_map) { | 101 | for_each_cpu_mask (i, cpu_possible_map) { |
103 | char *ptr; | 102 | char *ptr; |
104 | 103 | ||
@@ -122,7 +121,10 @@ void pda_init(int cpu) | |||
122 | 121 | ||
123 | /* Setup up data that may be needed in __get_free_pages early */ | 122 | /* Setup up data that may be needed in __get_free_pages early */ |
124 | asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0)); | 123 | asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0)); |
124 | /* Memory clobbers used to order PDA accessed */ | ||
125 | mb(); | ||
125 | wrmsrl(MSR_GS_BASE, pda); | 126 | wrmsrl(MSR_GS_BASE, pda); |
127 | mb(); | ||
126 | 128 | ||
127 | pda->cpunumber = cpu; | 129 | pda->cpunumber = cpu; |
128 | pda->irqcount = -1; | 130 | pda->irqcount = -1; |
@@ -178,6 +180,8 @@ void __cpuinit check_efer(void) | |||
178 | } | 180 | } |
179 | } | 181 | } |
180 | 182 | ||
183 | unsigned long kernel_eflags; | ||
184 | |||
181 | /* | 185 | /* |
182 | * cpu_init() initializes state that is per-CPU. Some data is already | 186 | * cpu_init() initializes state that is per-CPU. Some data is already |
183 | * initialized (naturally) in the bootstrap process, such as the GDT | 187 | * initialized (naturally) in the bootstrap process, such as the GDT |
@@ -235,28 +239,17 @@ void __cpuinit cpu_init (void) | |||
235 | * set up and load the per-CPU TSS | 239 | * set up and load the per-CPU TSS |
236 | */ | 240 | */ |
237 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | 241 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { |
242 | static const unsigned int order[N_EXCEPTION_STACKS] = { | ||
243 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, | ||
244 | [DEBUG_STACK - 1] = DEBUG_STACK_ORDER | ||
245 | }; | ||
238 | if (cpu) { | 246 | if (cpu) { |
239 | static const unsigned int order[N_EXCEPTION_STACKS] = { | ||
240 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, | ||
241 | [DEBUG_STACK - 1] = DEBUG_STACK_ORDER | ||
242 | }; | ||
243 | |||
244 | estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); | 247 | estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); |
245 | if (!estacks) | 248 | if (!estacks) |
246 | panic("Cannot allocate exception stack %ld %d\n", | 249 | panic("Cannot allocate exception stack %ld %d\n", |
247 | v, cpu); | 250 | v, cpu); |
248 | } | 251 | } |
249 | switch (v + 1) { | 252 | estacks += PAGE_SIZE << order[v]; |
250 | #if DEBUG_STKSZ > EXCEPTION_STKSZ | ||
251 | case DEBUG_STACK: | ||
252 | cpu_pda(cpu)->debugstack = (unsigned long)estacks; | ||
253 | estacks += DEBUG_STKSZ; | ||
254 | break; | ||
255 | #endif | ||
256 | default: | ||
257 | estacks += EXCEPTION_STKSZ; | ||
258 | break; | ||
259 | } | ||
260 | orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks; | 253 | orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks; |
261 | } | 254 | } |
262 | 255 | ||
@@ -290,4 +283,6 @@ void __cpuinit cpu_init (void) | |||
290 | set_debugreg(0UL, 7); | 283 | set_debugreg(0UL, 7); |
291 | 284 | ||
292 | fpu_init(); | 285 | fpu_init(); |
286 | |||
287 | raw_local_save_flags(kernel_eflags); | ||
293 | } | 288 | } |