blob: 6d9c9a8e7d0ea7b13031bd586482e849d3c32abc (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
|
/*
*
* Trampoline.S Derived from Setup.S by Linus Torvalds
*
* 4 Jan 1997 Michael Chastain: changed to gnu as.
*
* Entry: CS:IP point to the start of our code, we are
* in real mode with no stack, but the rest of the
* trampoline page to make our stack and everything else
* is a mystery.
*
* In fact we don't actually need a stack so we don't
* set one up.
*
* On entry to trampoline_data, the processor is in real mode
* with 16-bit addressing and 16-bit data. CS has some value
* and IP is zero. Thus, data addresses need to be absolute
* (no relocation) and are taken with regard to r_base.
*
* If you work on this file, check the object module with objdump
* --full-contents --reloc to make sure there are no relocation
* entries. For the GDT entry we do hand relocation in smpboot.c
* because of 64bit linker limitations.
*/
#include <linux/linkage.h>
#include <asm/segment.h>
#include <asm/page.h>
.data
.code16
ENTRY(trampoline_data)
r_base = .
wbinvd
mov %cs, %ax # Code and data in the same place
mov %ax, %ds
cli # We should be safe anyway
movl $0xA5A5A5A5, trampoline_data - r_base
# write marker for master knows we're running
lidt idt_48 - r_base # load idt with 0, 0
lgdt gdt_48 - r_base # load gdt with whatever is appropriate
xor %ax, %ax
inc %ax # protected mode (PE) bit
lmsw %ax # into protected mode
# flaush prefetch and jump to startup_32 in arch/x86_64/kernel/head.S
ljmpl $__KERNEL32_CS, $(startup_32-__START_KERNEL_map)
# Careful these need to be in the same 64K segment as the above;
idt_48:
.word 0 # idt limit = 0
.word 0, 0 # idt base = 0L
gdt_48:
.short __KERNEL32_CS + 7 # gdt limit
.long cpu_gdt_table-__START_KERNEL_map
.globl trampoline_end
trampoline_end:
|