diff options
Diffstat (limited to 'include/asm-x86/system_64.h')
-rw-r--r-- | include/asm-x86/system_64.h | 180 |
1 files changed, 180 insertions, 0 deletions
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h new file mode 100644 index 000000000000..02175aa1d16a --- /dev/null +++ b/include/asm-x86/system_64.h | |||
@@ -0,0 +1,180 @@ | |||
1 | #ifndef __ASM_SYSTEM_H | ||
2 | #define __ASM_SYSTEM_H | ||
3 | |||
4 | #include <linux/kernel.h> | ||
5 | #include <asm/segment.h> | ||
6 | #include <asm/cmpxchg.h> | ||
7 | |||
8 | #ifdef __KERNEL__ | ||
9 | |||
10 | #define __STR(x) #x | ||
11 | #define STR(x) __STR(x) | ||
12 | |||
13 | #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" | ||
14 | #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" | ||
15 | |||
16 | /* frame pointer must be last for get_wchan */ | ||
17 | #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" | ||
18 | #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" | ||
19 | |||
20 | #define __EXTRA_CLOBBER \ | ||
21 | ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15" | ||
22 | |||
23 | /* Save restore flags to clear handle leaking NT */ | ||
24 | #define switch_to(prev,next,last) \ | ||
25 | asm volatile(SAVE_CONTEXT \ | ||
26 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ | ||
27 | "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ | ||
28 | "call __switch_to\n\t" \ | ||
29 | ".globl thread_return\n" \ | ||
30 | "thread_return:\n\t" \ | ||
31 | "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ | ||
32 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ | ||
33 | LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ | ||
34 | "movq %%rax,%%rdi\n\t" \ | ||
35 | "jc ret_from_fork\n\t" \ | ||
36 | RESTORE_CONTEXT \ | ||
37 | : "=a" (last) \ | ||
38 | : [next] "S" (next), [prev] "D" (prev), \ | ||
39 | [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \ | ||
40 | [ti_flags] "i" (offsetof(struct thread_info, flags)),\ | ||
41 | [tif_fork] "i" (TIF_FORK), \ | ||
42 | [thread_info] "i" (offsetof(struct task_struct, stack)), \ | ||
43 | [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ | ||
44 | : "memory", "cc" __EXTRA_CLOBBER) | ||
45 | |||
46 | extern void load_gs_index(unsigned); | ||
47 | |||
48 | /* | ||
49 | * Load a segment. Fall back on loading the zero | ||
50 | * segment if something goes wrong.. | ||
51 | */ | ||
52 | #define loadsegment(seg,value) \ | ||
53 | asm volatile("\n" \ | ||
54 | "1:\t" \ | ||
55 | "movl %k0,%%" #seg "\n" \ | ||
56 | "2:\n" \ | ||
57 | ".section .fixup,\"ax\"\n" \ | ||
58 | "3:\t" \ | ||
59 | "movl %1,%%" #seg "\n\t" \ | ||
60 | "jmp 2b\n" \ | ||
61 | ".previous\n" \ | ||
62 | ".section __ex_table,\"a\"\n\t" \ | ||
63 | ".align 8\n\t" \ | ||
64 | ".quad 1b,3b\n" \ | ||
65 | ".previous" \ | ||
66 | : :"r" (value), "r" (0)) | ||
67 | |||
68 | /* | ||
69 | * Clear and set 'TS' bit respectively | ||
70 | */ | ||
71 | #define clts() __asm__ __volatile__ ("clts") | ||
72 | |||
73 | static inline unsigned long read_cr0(void) | ||
74 | { | ||
75 | unsigned long cr0; | ||
76 | asm volatile("movq %%cr0,%0" : "=r" (cr0)); | ||
77 | return cr0; | ||
78 | } | ||
79 | |||
80 | static inline void write_cr0(unsigned long val) | ||
81 | { | ||
82 | asm volatile("movq %0,%%cr0" :: "r" (val)); | ||
83 | } | ||
84 | |||
85 | static inline unsigned long read_cr2(void) | ||
86 | { | ||
87 | unsigned long cr2; | ||
88 | asm("movq %%cr2,%0" : "=r" (cr2)); | ||
89 | return cr2; | ||
90 | } | ||
91 | |||
92 | static inline void write_cr2(unsigned long val) | ||
93 | { | ||
94 | asm volatile("movq %0,%%cr2" :: "r" (val)); | ||
95 | } | ||
96 | |||
97 | static inline unsigned long read_cr3(void) | ||
98 | { | ||
99 | unsigned long cr3; | ||
100 | asm("movq %%cr3,%0" : "=r" (cr3)); | ||
101 | return cr3; | ||
102 | } | ||
103 | |||
104 | static inline void write_cr3(unsigned long val) | ||
105 | { | ||
106 | asm volatile("movq %0,%%cr3" :: "r" (val) : "memory"); | ||
107 | } | ||
108 | |||
109 | static inline unsigned long read_cr4(void) | ||
110 | { | ||
111 | unsigned long cr4; | ||
112 | asm("movq %%cr4,%0" : "=r" (cr4)); | ||
113 | return cr4; | ||
114 | } | ||
115 | |||
116 | static inline void write_cr4(unsigned long val) | ||
117 | { | ||
118 | asm volatile("movq %0,%%cr4" :: "r" (val) : "memory"); | ||
119 | } | ||
120 | |||
121 | static inline unsigned long read_cr8(void) | ||
122 | { | ||
123 | unsigned long cr8; | ||
124 | asm("movq %%cr8,%0" : "=r" (cr8)); | ||
125 | return cr8; | ||
126 | } | ||
127 | |||
128 | static inline void write_cr8(unsigned long val) | ||
129 | { | ||
130 | asm volatile("movq %0,%%cr8" :: "r" (val) : "memory"); | ||
131 | } | ||
132 | |||
133 | #define stts() write_cr0(8 | read_cr0()) | ||
134 | |||
135 | #define wbinvd() \ | ||
136 | __asm__ __volatile__ ("wbinvd": : :"memory") | ||
137 | |||
138 | #endif /* __KERNEL__ */ | ||
139 | |||
140 | #define nop() __asm__ __volatile__ ("nop") | ||
141 | |||
142 | #ifdef CONFIG_SMP | ||
143 | #define smp_mb() mb() | ||
144 | #define smp_rmb() rmb() | ||
145 | #define smp_wmb() wmb() | ||
146 | #define smp_read_barrier_depends() do {} while(0) | ||
147 | #else | ||
148 | #define smp_mb() barrier() | ||
149 | #define smp_rmb() barrier() | ||
150 | #define smp_wmb() barrier() | ||
151 | #define smp_read_barrier_depends() do {} while(0) | ||
152 | #endif | ||
153 | |||
154 | |||
155 | /* | ||
156 | * Force strict CPU ordering. | ||
157 | * And yes, this is required on UP too when we're talking | ||
158 | * to devices. | ||
159 | */ | ||
160 | #define mb() asm volatile("mfence":::"memory") | ||
161 | #define rmb() asm volatile("lfence":::"memory") | ||
162 | |||
163 | #ifdef CONFIG_UNORDERED_IO | ||
164 | #define wmb() asm volatile("sfence" ::: "memory") | ||
165 | #else | ||
166 | #define wmb() asm volatile("" ::: "memory") | ||
167 | #endif | ||
168 | #define read_barrier_depends() do {} while(0) | ||
169 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) | ||
170 | |||
171 | #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) | ||
172 | |||
173 | #include <linux/irqflags.h> | ||
174 | |||
175 | void cpu_idle_wait(void); | ||
176 | |||
177 | extern unsigned long arch_align_stack(unsigned long sp); | ||
178 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); | ||
179 | |||
180 | #endif | ||