aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/processor.h
diff options
context:
space:
mode:
authorGlauber de Oliveira Costa <gcosta@redhat.com>2008-01-30 07:31:27 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:31:27 -0500
commit683e0253dbd12554b2ee969b15e68105252bff57 (patch)
treeb1b2df43f7fcdf48bc69789d81c437be0cdd639b /include/asm-x86/processor.h
parent62d7d7ed11760a0fea40e4fc6f0553e721d00443 (diff)
x86: unify common parts of processor.h
This patch moves the pieces of processor_32.h and processor_64 that are equal to processor.h. Only what's exactly the same is moved around, the rest not being touched. Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86/processor.h')
-rw-r--r--include/asm-x86/processor.h120
1 files changed, 120 insertions, 0 deletions
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 8b7794766884..52e3637ef59e 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -3,6 +3,10 @@
3 3
4#include <asm/processor-flags.h> 4#include <asm/processor-flags.h>
5 5
6/* Forward declaration, a strange C thing */
7struct task_struct;
8struct mm_struct;
9
6#include <asm/page.h> 10#include <asm/page.h>
7#include <asm/system.h> 11#include <asm/system.h>
8 12
@@ -29,6 +33,11 @@ static inline void load_cr3(pgd_t *pgdir)
29# include "processor_64.h" 33# include "processor_64.h"
30#endif 34#endif
31 35
36extern void print_cpu_info(struct cpuinfo_x86 *);
37extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
38extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
39extern unsigned short num_cache_leaves;
40
32static inline unsigned long native_get_debugreg(int regno) 41static inline unsigned long native_get_debugreg(int regno)
33{ 42{
34 unsigned long val = 0; /* Damn you, gcc! */ 43 unsigned long val = 0; /* Damn you, gcc! */
@@ -138,7 +147,53 @@ static inline void clear_in_cr4(unsigned long mask)
138 write_cr4(cr4); 147 write_cr4(cr4);
139} 148}
140 149
150struct microcode_header {
151 unsigned int hdrver;
152 unsigned int rev;
153 unsigned int date;
154 unsigned int sig;
155 unsigned int cksum;
156 unsigned int ldrver;
157 unsigned int pf;
158 unsigned int datasize;
159 unsigned int totalsize;
160 unsigned int reserved[3];
161};
162
163struct microcode {
164 struct microcode_header hdr;
165 unsigned int bits[0];
166};
167
168typedef struct microcode microcode_t;
169typedef struct microcode_header microcode_header_t;
170
171/* microcode format is extended from prescott processors */
172struct extended_signature {
173 unsigned int sig;
174 unsigned int pf;
175 unsigned int cksum;
176};
177
178struct extended_sigtable {
179 unsigned int count;
180 unsigned int cksum;
181 unsigned int reserved[3];
182 struct extended_signature sigs[0];
183};
184
185/*
186 * create a kernel thread without removing it from tasklists
187 */
188extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
189
190/* Free all resources held by a thread. */
191extern void release_thread(struct task_struct *);
192
193/* Prepare to copy thread state - unlazy all lazy status */
194extern void prepare_to_copy(struct task_struct *tsk);
141 195
196unsigned long get_wchan(struct task_struct *p);
142 197
143/* 198/*
144 * Generic CPUID function 199 * Generic CPUID function
@@ -196,4 +251,69 @@ static inline unsigned int cpuid_edx(unsigned int op)
196 return edx; 251 return edx;
197} 252}
198 253
254/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
255static inline void rep_nop(void)
256{
257 __asm__ __volatile__("rep;nop": : :"memory");
258}
259
260/* Stop speculative execution */
261static inline void sync_core(void)
262{
263 int tmp;
264 asm volatile("cpuid" : "=a" (tmp) : "0" (1)
265 : "ebx", "ecx", "edx", "memory");
266}
267
268#define cpu_relax() rep_nop()
269
270static inline void __monitor(const void *eax, unsigned long ecx,
271 unsigned long edx)
272{
273 /* "monitor %eax,%ecx,%edx;" */
274 asm volatile(
275 ".byte 0x0f,0x01,0xc8;"
276 : :"a" (eax), "c" (ecx), "d"(edx));
277}
278
279static inline void __mwait(unsigned long eax, unsigned long ecx)
280{
281 /* "mwait %eax,%ecx;" */
282 asm volatile(
283 ".byte 0x0f,0x01,0xc9;"
284 : :"a" (eax), "c" (ecx));
285}
286
287static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
288{
289 /* "mwait %eax,%ecx;" */
290 asm volatile(
291 "sti; .byte 0x0f,0x01,0xc9;"
292 : :"a" (eax), "c" (ecx));
293}
294
295extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
296
297extern int force_mwait;
298
299extern void select_idle_routine(const struct cpuinfo_x86 *c);
300
301extern unsigned long boot_option_idle_override;
302
303/* Boot loader type from the setup header */
304extern int bootloader_type;
305#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
306
307#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
308#define ARCH_HAS_PREFETCHW
309#define ARCH_HAS_SPINLOCK_PREFETCH
310
311#define spin_lock_prefetch(x) prefetchw(x)
312/* This decides where the kernel will search for a free chunk of vm
313 * space during mmap's.
314 */
315#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
316
317#define KSTK_EIP(task) (task_pt_regs(task)->ip)
318
199#endif 319#endif