diff options
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/asm-offsets_64.c | 11 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/amd_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common_64.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/entry_64.S | 106 | ||||
-rw-r--r-- | arch/x86/kernel/head64.c | 11 | ||||
-rw-r--r-- | arch/x86/kernel/head_64.S | 1 | ||||
-rw-r--r-- | arch/x86/kernel/irq_32.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 56 | ||||
-rw-r--r-- | arch/x86/kernel/setup.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 2 |
11 files changed, 168 insertions, 42 deletions
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index bacf5deeec2d..aa89387006fe 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <asm/ia32.h> | 18 | #include <asm/ia32.h> |
19 | #include <asm/bootparam.h> | 19 | #include <asm/bootparam.h> |
20 | 20 | ||
21 | #include <xen/interface/xen.h> | ||
22 | |||
21 | #define __NO_STUBS 1 | 23 | #define __NO_STUBS 1 |
22 | #undef __SYSCALL | 24 | #undef __SYSCALL |
23 | #undef _ASM_X86_64_UNISTD_H_ | 25 | #undef _ASM_X86_64_UNISTD_H_ |
@@ -131,5 +133,14 @@ int main(void) | |||
131 | OFFSET(BP_loadflags, boot_params, hdr.loadflags); | 133 | OFFSET(BP_loadflags, boot_params, hdr.loadflags); |
132 | OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch); | 134 | OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch); |
133 | OFFSET(BP_version, boot_params, hdr.version); | 135 | OFFSET(BP_version, boot_params, hdr.version); |
136 | |||
137 | BLANK(); | ||
138 | DEFINE(PAGE_SIZE_asm, PAGE_SIZE); | ||
139 | #ifdef CONFIG_XEN | ||
140 | BLANK(); | ||
141 | OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask); | ||
142 | OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending); | ||
143 | #undef ENTRY | ||
144 | #endif | ||
134 | return 0; | 145 | return 0; |
135 | } | 146 | } |
diff --git a/arch/x86/kernel/cpu/amd_64.c b/arch/x86/kernel/cpu/amd_64.c index 7c36fb8a28d4..d1692b2a41ff 100644 --- a/arch/x86/kernel/cpu/amd_64.c +++ b/arch/x86/kernel/cpu/amd_64.c | |||
@@ -115,6 +115,8 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | |||
115 | /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */ | 115 | /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */ |
116 | if (c->x86_power & (1<<8)) | 116 | if (c->x86_power & (1<<8)) |
117 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 117 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
118 | |||
119 | set_cpu_cap(c, X86_FEATURE_SYSCALL32); | ||
118 | } | 120 | } |
119 | 121 | ||
120 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | 122 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) |
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c index 7b8cc72feb40..736f50fa433d 100644 --- a/arch/x86/kernel/cpu/common_64.c +++ b/arch/x86/kernel/cpu/common_64.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <asm/i387.h> | 16 | #include <asm/i387.h> |
17 | #include <asm/msr.h> | 17 | #include <asm/msr.h> |
18 | #include <asm/io.h> | 18 | #include <asm/io.h> |
19 | #include <asm/linkage.h> | ||
19 | #include <asm/mmu_context.h> | 20 | #include <asm/mmu_context.h> |
20 | #include <asm/mtrr.h> | 21 | #include <asm/mtrr.h> |
21 | #include <asm/mce.h> | 22 | #include <asm/mce.h> |
@@ -316,9 +317,6 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | |||
316 | c->x86_phys_bits = eax & 0xff; | 317 | c->x86_phys_bits = eax & 0xff; |
317 | } | 318 | } |
318 | 319 | ||
319 | /* Assume all 64-bit CPUs support 32-bit syscall */ | ||
320 | set_cpu_cap(c, X86_FEATURE_SYSCALL32); | ||
321 | |||
322 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && | 320 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && |
323 | cpu_devs[c->x86_vendor]->c_early_init) | 321 | cpu_devs[c->x86_vendor]->c_early_init) |
324 | cpu_devs[c->x86_vendor]->c_early_init(c); | 322 | cpu_devs[c->x86_vendor]->c_early_init(c); |
@@ -517,8 +515,7 @@ void pda_init(int cpu) | |||
517 | } | 515 | } |
518 | 516 | ||
519 | char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + | 517 | char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + |
520 | DEBUG_STKSZ] | 518 | DEBUG_STKSZ] __page_aligned_bss; |
521 | __attribute__((section(".bss.page_aligned"))); | ||
522 | 519 | ||
523 | extern asmlinkage void ignore_sysret(void); | 520 | extern asmlinkage void ignore_sysret(void); |
524 | 521 | ||
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index ae63e584c340..80d5663db3bc 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -1189,6 +1189,7 @@ END(device_not_available) | |||
1189 | /* runs on exception stack */ | 1189 | /* runs on exception stack */ |
1190 | KPROBE_ENTRY(debug) | 1190 | KPROBE_ENTRY(debug) |
1191 | INTR_FRAME | 1191 | INTR_FRAME |
1192 | PARAVIRT_ADJUST_EXCEPTION_FRAME | ||
1192 | pushq $0 | 1193 | pushq $0 |
1193 | CFI_ADJUST_CFA_OFFSET 8 | 1194 | CFI_ADJUST_CFA_OFFSET 8 |
1194 | paranoidentry do_debug, DEBUG_STACK | 1195 | paranoidentry do_debug, DEBUG_STACK |
@@ -1198,6 +1199,7 @@ KPROBE_END(debug) | |||
1198 | /* runs on exception stack */ | 1199 | /* runs on exception stack */ |
1199 | KPROBE_ENTRY(nmi) | 1200 | KPROBE_ENTRY(nmi) |
1200 | INTR_FRAME | 1201 | INTR_FRAME |
1202 | PARAVIRT_ADJUST_EXCEPTION_FRAME | ||
1201 | pushq $-1 | 1203 | pushq $-1 |
1202 | CFI_ADJUST_CFA_OFFSET 8 | 1204 | CFI_ADJUST_CFA_OFFSET 8 |
1203 | paranoidentry do_nmi, 0, 0 | 1205 | paranoidentry do_nmi, 0, 0 |
@@ -1211,6 +1213,7 @@ KPROBE_END(nmi) | |||
1211 | 1213 | ||
1212 | KPROBE_ENTRY(int3) | 1214 | KPROBE_ENTRY(int3) |
1213 | INTR_FRAME | 1215 | INTR_FRAME |
1216 | PARAVIRT_ADJUST_EXCEPTION_FRAME | ||
1214 | pushq $0 | 1217 | pushq $0 |
1215 | CFI_ADJUST_CFA_OFFSET 8 | 1218 | CFI_ADJUST_CFA_OFFSET 8 |
1216 | paranoidentry do_int3, DEBUG_STACK | 1219 | paranoidentry do_int3, DEBUG_STACK |
@@ -1237,6 +1240,7 @@ END(coprocessor_segment_overrun) | |||
1237 | /* runs on exception stack */ | 1240 | /* runs on exception stack */ |
1238 | ENTRY(double_fault) | 1241 | ENTRY(double_fault) |
1239 | XCPT_FRAME | 1242 | XCPT_FRAME |
1243 | PARAVIRT_ADJUST_EXCEPTION_FRAME | ||
1240 | paranoidentry do_double_fault | 1244 | paranoidentry do_double_fault |
1241 | jmp paranoid_exit1 | 1245 | jmp paranoid_exit1 |
1242 | CFI_ENDPROC | 1246 | CFI_ENDPROC |
@@ -1253,6 +1257,7 @@ END(segment_not_present) | |||
1253 | /* runs on exception stack */ | 1257 | /* runs on exception stack */ |
1254 | ENTRY(stack_segment) | 1258 | ENTRY(stack_segment) |
1255 | XCPT_FRAME | 1259 | XCPT_FRAME |
1260 | PARAVIRT_ADJUST_EXCEPTION_FRAME | ||
1256 | paranoidentry do_stack_segment | 1261 | paranoidentry do_stack_segment |
1257 | jmp paranoid_exit1 | 1262 | jmp paranoid_exit1 |
1258 | CFI_ENDPROC | 1263 | CFI_ENDPROC |
@@ -1278,6 +1283,7 @@ END(spurious_interrupt_bug) | |||
1278 | /* runs on exception stack */ | 1283 | /* runs on exception stack */ |
1279 | ENTRY(machine_check) | 1284 | ENTRY(machine_check) |
1280 | INTR_FRAME | 1285 | INTR_FRAME |
1286 | PARAVIRT_ADJUST_EXCEPTION_FRAME | ||
1281 | pushq $0 | 1287 | pushq $0 |
1282 | CFI_ADJUST_CFA_OFFSET 8 | 1288 | CFI_ADJUST_CFA_OFFSET 8 |
1283 | paranoidentry do_machine_check | 1289 | paranoidentry do_machine_check |
@@ -1312,3 +1318,103 @@ KPROBE_ENTRY(ignore_sysret) | |||
1312 | sysret | 1318 | sysret |
1313 | CFI_ENDPROC | 1319 | CFI_ENDPROC |
1314 | ENDPROC(ignore_sysret) | 1320 | ENDPROC(ignore_sysret) |
1321 | |||
1322 | #ifdef CONFIG_XEN | ||
1323 | ENTRY(xen_hypervisor_callback) | ||
1324 | zeroentry xen_do_hypervisor_callback | ||
1325 | END(xen_hypervisor_callback) | ||
1326 | |||
1327 | /* | ||
1328 | # A note on the "critical region" in our callback handler. | ||
1329 | # We want to avoid stacking callback handlers due to events occurring | ||
1330 | # during handling of the last event. To do this, we keep events disabled | ||
1331 | # until we've done all processing. HOWEVER, we must enable events before | ||
1332 | # popping the stack frame (can't be done atomically) and so it would still | ||
1333 | # be possible to get enough handler activations to overflow the stack. | ||
1334 | # Although unlikely, bugs of that kind are hard to track down, so we'd | ||
1335 | # like to avoid the possibility. | ||
1336 | # So, on entry to the handler we detect whether we interrupted an | ||
1337 | # existing activation in its critical region -- if so, we pop the current | ||
1338 | # activation and restart the handler using the previous one. | ||
1339 | */ | ||
1340 | ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) | ||
1341 | CFI_STARTPROC | ||
1342 | /* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will | ||
1343 | see the correct pointer to the pt_regs */ | ||
1344 | movq %rdi, %rsp # we don't return, adjust the stack frame | ||
1345 | CFI_ENDPROC | ||
1346 | CFI_DEFAULT_STACK | ||
1347 | 11: incl %gs:pda_irqcount | ||
1348 | movq %rsp,%rbp | ||
1349 | CFI_DEF_CFA_REGISTER rbp | ||
1350 | cmovzq %gs:pda_irqstackptr,%rsp | ||
1351 | pushq %rbp # backlink for old unwinder | ||
1352 | call xen_evtchn_do_upcall | ||
1353 | popq %rsp | ||
1354 | CFI_DEF_CFA_REGISTER rsp | ||
1355 | decl %gs:pda_irqcount | ||
1356 | jmp error_exit | ||
1357 | CFI_ENDPROC | ||
1358 | END(do_hypervisor_callback) | ||
1359 | |||
1360 | /* | ||
1361 | # Hypervisor uses this for application faults while it executes. | ||
1362 | # We get here for two reasons: | ||
1363 | # 1. Fault while reloading DS, ES, FS or GS | ||
1364 | # 2. Fault while executing IRET | ||
1365 | # Category 1 we do not need to fix up as Xen has already reloaded all segment | ||
1366 | # registers that could be reloaded and zeroed the others. | ||
1367 | # Category 2 we fix up by killing the current process. We cannot use the | ||
1368 | # normal Linux return path in this case because if we use the IRET hypercall | ||
1369 | # to pop the stack frame we end up in an infinite loop of failsafe callbacks. | ||
1370 | # We distinguish between categories by comparing each saved segment register | ||
1371 | # with its current contents: any discrepancy means we in category 1. | ||
1372 | */ | ||
1373 | ENTRY(xen_failsafe_callback) | ||
1374 | framesz = (RIP-0x30) /* workaround buggy gas */ | ||
1375 | _frame framesz | ||
1376 | CFI_REL_OFFSET rcx, 0 | ||
1377 | CFI_REL_OFFSET r11, 8 | ||
1378 | movw %ds,%cx | ||
1379 | cmpw %cx,0x10(%rsp) | ||
1380 | CFI_REMEMBER_STATE | ||
1381 | jne 1f | ||
1382 | movw %es,%cx | ||
1383 | cmpw %cx,0x18(%rsp) | ||
1384 | jne 1f | ||
1385 | movw %fs,%cx | ||
1386 | cmpw %cx,0x20(%rsp) | ||
1387 | jne 1f | ||
1388 | movw %gs,%cx | ||
1389 | cmpw %cx,0x28(%rsp) | ||
1390 | jne 1f | ||
1391 | /* All segments match their saved values => Category 2 (Bad IRET). */ | ||
1392 | movq (%rsp),%rcx | ||
1393 | CFI_RESTORE rcx | ||
1394 | movq 8(%rsp),%r11 | ||
1395 | CFI_RESTORE r11 | ||
1396 | addq $0x30,%rsp | ||
1397 | CFI_ADJUST_CFA_OFFSET -0x30 | ||
1398 | pushq $0 | ||
1399 | CFI_ADJUST_CFA_OFFSET 8 | ||
1400 | pushq %r11 | ||
1401 | CFI_ADJUST_CFA_OFFSET 8 | ||
1402 | pushq %rcx | ||
1403 | CFI_ADJUST_CFA_OFFSET 8 | ||
1404 | jmp general_protection | ||
1405 | CFI_RESTORE_STATE | ||
1406 | 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ | ||
1407 | movq (%rsp),%rcx | ||
1408 | CFI_RESTORE rcx | ||
1409 | movq 8(%rsp),%r11 | ||
1410 | CFI_RESTORE r11 | ||
1411 | addq $0x30,%rsp | ||
1412 | CFI_ADJUST_CFA_OFFSET -0x30 | ||
1413 | pushq $0 | ||
1414 | CFI_ADJUST_CFA_OFFSET 8 | ||
1415 | SAVE_ALL | ||
1416 | jmp error_exit | ||
1417 | CFI_ENDPROC | ||
1418 | END(xen_failsafe_callback) | ||
1419 | |||
1420 | #endif /* CONFIG_XEN */ | ||
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index c97819829146..1b318e903bf6 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
@@ -39,6 +39,13 @@ static struct x8664_pda *__cpu_pda[NR_CPUS] __initdata; | |||
39 | static struct x8664_pda *__cpu_pda[NR_CPUS] __read_mostly; | 39 | static struct x8664_pda *__cpu_pda[NR_CPUS] __read_mostly; |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | void __init x86_64_init_pda(void) | ||
43 | { | ||
44 | _cpu_pda = __cpu_pda; | ||
45 | cpu_pda(0) = &_boot_cpu_pda; | ||
46 | pda_init(0); | ||
47 | } | ||
48 | |||
42 | static void __init zap_identity_mappings(void) | 49 | static void __init zap_identity_mappings(void) |
43 | { | 50 | { |
44 | pgd_t *pgd = pgd_offset_k(0UL); | 51 | pgd_t *pgd = pgd_offset_k(0UL); |
@@ -102,9 +109,7 @@ void __init x86_64_start_kernel(char * real_mode_data) | |||
102 | 109 | ||
103 | early_printk("Kernel alive\n"); | 110 | early_printk("Kernel alive\n"); |
104 | 111 | ||
105 | _cpu_pda = __cpu_pda; | 112 | x86_64_init_pda(); |
106 | cpu_pda(0) = &_boot_cpu_pda; | ||
107 | pda_init(0); | ||
108 | 113 | ||
109 | early_printk("Kernel really alive\n"); | 114 | early_printk("Kernel really alive\n"); |
110 | 115 | ||
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index b07ac7b217cb..db3280afe886 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S | |||
@@ -407,6 +407,7 @@ ENTRY(phys_base) | |||
407 | /* This must match the first entry in level2_kernel_pgt */ | 407 | /* This must match the first entry in level2_kernel_pgt */ |
408 | .quad 0x0000000000000000 | 408 | .quad 0x0000000000000000 |
409 | 409 | ||
410 | #include "../../x86/xen/xen-head.S" | ||
410 | 411 | ||
411 | .section .bss, "aw", @nobits | 412 | .section .bss, "aw", @nobits |
412 | .align L1_CACHE_BYTES | 413 | .align L1_CACHE_BYTES |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 47a6f6f12478..1cf8c1fcc088 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -83,11 +83,8 @@ union irq_ctx { | |||
83 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; | 83 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; |
84 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; | 84 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; |
85 | 85 | ||
86 | static char softirq_stack[NR_CPUS * THREAD_SIZE] | 86 | static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; |
87 | __attribute__((__section__(".bss.page_aligned"))); | 87 | static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; |
88 | |||
89 | static char hardirq_stack[NR_CPUS * THREAD_SIZE] | ||
90 | __attribute__((__section__(".bss.page_aligned"))); | ||
91 | 88 | ||
92 | static void call_on_stack(void *func, void *stack) | 89 | static void call_on_stack(void *func, void *stack) |
93 | { | 90 | { |
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index e0f571d58c19..2963ab5d91ee 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <asm/desc.h> | 29 | #include <asm/desc.h> |
30 | #include <asm/setup.h> | 30 | #include <asm/setup.h> |
31 | #include <asm/arch_hooks.h> | 31 | #include <asm/arch_hooks.h> |
32 | #include <asm/pgtable.h> | ||
32 | #include <asm/time.h> | 33 | #include <asm/time.h> |
33 | #include <asm/pgalloc.h> | 34 | #include <asm/pgalloc.h> |
34 | #include <asm/irq.h> | 35 | #include <asm/irq.h> |
@@ -373,6 +374,9 @@ struct pv_mmu_ops pv_mmu_ops = { | |||
373 | #ifndef CONFIG_X86_64 | 374 | #ifndef CONFIG_X86_64 |
374 | .pagetable_setup_start = native_pagetable_setup_start, | 375 | .pagetable_setup_start = native_pagetable_setup_start, |
375 | .pagetable_setup_done = native_pagetable_setup_done, | 376 | .pagetable_setup_done = native_pagetable_setup_done, |
377 | #else | ||
378 | .pagetable_setup_start = paravirt_nop, | ||
379 | .pagetable_setup_done = paravirt_nop, | ||
376 | #endif | 380 | #endif |
377 | 381 | ||
378 | .read_cr2 = native_read_cr2, | 382 | .read_cr2 = native_read_cr2, |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index a8e53626ac9a..e8a8e1b99817 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -537,8 +537,8 @@ static inline void __switch_to_xtra(struct task_struct *prev_p, | |||
537 | struct task_struct * | 537 | struct task_struct * |
538 | __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | 538 | __switch_to(struct task_struct *prev_p, struct task_struct *next_p) |
539 | { | 539 | { |
540 | struct thread_struct *prev = &prev_p->thread, | 540 | struct thread_struct *prev = &prev_p->thread; |
541 | *next = &next_p->thread; | 541 | struct thread_struct *next = &next_p->thread; |
542 | int cpu = smp_processor_id(); | 542 | int cpu = smp_processor_id(); |
543 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 543 | struct tss_struct *tss = &per_cpu(init_tss, cpu); |
544 | unsigned fsindex, gsindex; | 544 | unsigned fsindex, gsindex; |
@@ -586,35 +586,34 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
586 | 586 | ||
587 | /* | 587 | /* |
588 | * Switch FS and GS. | 588 | * Switch FS and GS. |
589 | * | ||
590 | * Segment register != 0 always requires a reload. Also | ||
591 | * reload when it has changed. When prev process used 64bit | ||
592 | * base always reload to avoid an information leak. | ||
589 | */ | 593 | */ |
590 | { | 594 | if (unlikely(fsindex | next->fsindex | prev->fs)) { |
591 | /* segment register != 0 always requires a reload. | 595 | loadsegment(fs, next->fsindex); |
592 | also reload when it has changed. | 596 | /* |
593 | when prev process used 64bit base always reload | 597 | * Check if the user used a selector != 0; if yes |
594 | to avoid an information leak. */ | 598 | * clear 64bit base, since overloaded base is always |
595 | if (unlikely(fsindex | next->fsindex | prev->fs)) { | 599 | * mapped to the Null selector |
596 | loadsegment(fs, next->fsindex); | 600 | */ |
597 | /* check if the user used a selector != 0 | 601 | if (fsindex) |
598 | * if yes clear 64bit base, since overloaded base | ||
599 | * is always mapped to the Null selector | ||
600 | */ | ||
601 | if (fsindex) | ||
602 | prev->fs = 0; | 602 | prev->fs = 0; |
603 | } | 603 | } |
604 | /* when next process has a 64bit base use it */ | 604 | /* when next process has a 64bit base use it */ |
605 | if (next->fs) | 605 | if (next->fs) |
606 | wrmsrl(MSR_FS_BASE, next->fs); | 606 | wrmsrl(MSR_FS_BASE, next->fs); |
607 | prev->fsindex = fsindex; | 607 | prev->fsindex = fsindex; |
608 | 608 | ||
609 | if (unlikely(gsindex | next->gsindex | prev->gs)) { | 609 | if (unlikely(gsindex | next->gsindex | prev->gs)) { |
610 | load_gs_index(next->gsindex); | 610 | load_gs_index(next->gsindex); |
611 | if (gsindex) | 611 | if (gsindex) |
612 | prev->gs = 0; | 612 | prev->gs = 0; |
613 | } | ||
614 | if (next->gs) | ||
615 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); | ||
616 | prev->gsindex = gsindex; | ||
617 | } | 613 | } |
614 | if (next->gs) | ||
615 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); | ||
616 | prev->gsindex = gsindex; | ||
618 | 617 | ||
619 | /* Must be after DS reload */ | 618 | /* Must be after DS reload */ |
620 | unlazy_fpu(prev_p); | 619 | unlazy_fpu(prev_p); |
@@ -627,7 +626,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
627 | write_pda(pcurrent, next_p); | 626 | write_pda(pcurrent, next_p); |
628 | 627 | ||
629 | write_pda(kernelstack, | 628 | write_pda(kernelstack, |
630 | (unsigned long)task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET); | 629 | (unsigned long)task_stack_page(next_p) + |
630 | THREAD_SIZE - PDA_STACKOFFSET); | ||
631 | #ifdef CONFIG_CC_STACKPROTECTOR | 631 | #ifdef CONFIG_CC_STACKPROTECTOR |
632 | write_pda(stack_canary, next_p->stack_canary); | 632 | write_pda(stack_canary, next_p->stack_canary); |
633 | /* | 633 | /* |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 531b55b8e81a..c9010f82141d 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -824,7 +824,10 @@ void __init setup_arch(char **cmdline_p) | |||
824 | vmi_init(); | 824 | vmi_init(); |
825 | #endif | 825 | #endif |
826 | 826 | ||
827 | paravirt_pagetable_setup_start(swapper_pg_dir); | ||
827 | paging_init(); | 828 | paging_init(); |
829 | paravirt_pagetable_setup_done(swapper_pg_dir); | ||
830 | paravirt_post_allocator_init(); | ||
828 | 831 | ||
829 | #ifdef CONFIG_X86_64 | 832 | #ifdef CONFIG_X86_64 |
830 | map_vsyscall(); | 833 | map_vsyscall(); |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 687376ab07e8..1deb3b624a79 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -768,7 +768,7 @@ static void __cpuinit do_fork_idle(struct work_struct *work) | |||
768 | * | 768 | * |
769 | * Must be called after the _cpu_pda pointer table is initialized. | 769 | * Must be called after the _cpu_pda pointer table is initialized. |
770 | */ | 770 | */ |
771 | static int __cpuinit get_local_pda(int cpu) | 771 | int __cpuinit get_local_pda(int cpu) |
772 | { | 772 | { |
773 | struct x8664_pda *oldpda, *newpda; | 773 | struct x8664_pda *oldpda, *newpda; |
774 | unsigned long size = sizeof(struct x8664_pda); | 774 | unsigned long size = sizeof(struct x8664_pda); |