aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/processor.h
diff options
context:
space:
mode:
authorGlauber de Oliveira Costa <gcosta@redhat.com>2008-01-30 07:31:57 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:31:57 -0500
commit2f66dcc933f012cd487c5ebf5d400e50b1d5c488 (patch)
treeb3c7076cc488c79c60af302b582c32ecf088ac3b /include/asm-x86/processor.h
parent7ba65c7e17a292fe1e6e48fd38d1b1ecb626b586 (diff)
x86: finish processor.h integration
What's left in processor_32.h and processor_64.h cannot be cleanly integrated. However, it's just a couple of definitions. They are moved to processor.h around ifdefs, and the original files are deleted. Note that there's much less headers included in the final version. Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86/processor.h')
-rw-r--r--include/asm-x86/processor.h140
1 files changed, 137 insertions, 3 deletions
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index ea222cfe7b00..e6fa06fee72a 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -7,12 +7,24 @@
7struct task_struct; 7struct task_struct;
8struct mm_struct; 8struct mm_struct;
9 9
10#include <asm/vm86.h>
11#include <asm/math_emu.h>
12#include <asm/segment.h>
10#include <asm/page.h> 13#include <asm/page.h>
11#include <asm/percpu.h> 14#include <asm/types.h>
15#include <asm/sigcontext.h>
16#include <asm/current.h>
17#include <asm/cpufeature.h>
12#include <asm/system.h> 18#include <asm/system.h>
19#include <asm/page.h>
13#include <asm/percpu.h> 20#include <asm/percpu.h>
21#include <asm/msr.h>
22#include <asm/desc_defs.h>
23#include <linux/personality.h>
14#include <linux/cpumask.h> 24#include <linux/cpumask.h>
15#include <linux/cache.h> 25#include <linux/cache.h>
26#include <linux/threads.h>
27#include <linux/init.h>
16 28
17/* 29/*
18 * Default implementation of macro that returns current 30 * Default implementation of macro that returns current
@@ -285,9 +297,13 @@ union i387_union {
285}; 297};
286 298
287#ifdef CONFIG_X86_32 299#ifdef CONFIG_X86_32
288# include "processor_32.h" 300/*
301 * the following now lives in the per cpu area:
302 * extern int cpu_llc_id[NR_CPUS];
303 */
304DECLARE_PER_CPU(u8, cpu_llc_id);
289#else 305#else
290# include "processor_64.h" 306DECLARE_PER_CPU(struct orig_ist, orig_ist);
291#endif 307#endif
292 308
293extern void print_cpu_info(struct cpuinfo_x86 *); 309extern void print_cpu_info(struct cpuinfo_x86 *);
@@ -770,6 +786,124 @@ static inline void prefetchw(const void *x)
770} 786}
771 787
772#define spin_lock_prefetch(x) prefetchw(x) 788#define spin_lock_prefetch(x) prefetchw(x)
789#ifdef CONFIG_X86_32
790/*
791 * User space process size: 3GB (default).
792 */
793#define TASK_SIZE (PAGE_OFFSET)
794
795#define INIT_THREAD { \
796 .sp0 = sizeof(init_stack) + (long)&init_stack, \
797 .vm86_info = NULL, \
798 .sysenter_cs = __KERNEL_CS, \
799 .io_bitmap_ptr = NULL, \
800 .fs = __KERNEL_PERCPU, \
801}
802
803/*
804 * Note that the .io_bitmap member must be extra-big. This is because
805 * the CPU will access an additional byte beyond the end of the IO
806 * permission bitmap. The extra byte must be all 1 bits, and must
807 * be within the limit.
808 */
809#define INIT_TSS { \
810 .x86_tss = { \
811 .sp0 = sizeof(init_stack) + (long)&init_stack, \
812 .ss0 = __KERNEL_DS, \
813 .ss1 = __KERNEL_CS, \
814 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
815 }, \
816 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
817}
818
819#define start_thread(regs, new_eip, new_esp) do { \
820 __asm__("movl %0,%%gs": :"r" (0)); \
821 regs->fs = 0; \
822 set_fs(USER_DS); \
823 regs->ds = __USER_DS; \
824 regs->es = __USER_DS; \
825 regs->ss = __USER_DS; \
826 regs->cs = __USER_CS; \
827 regs->ip = new_eip; \
828 regs->sp = new_esp; \
829} while (0)
830
831
832extern unsigned long thread_saved_pc(struct task_struct *tsk);
833
834#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
835#define KSTK_TOP(info) \
836({ \
837 unsigned long *__ptr = (unsigned long *)(info); \
838 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
839})
840
841/*
842 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
843 * This is necessary to guarantee that the entire "struct pt_regs"
844 * is accessable even if the CPU haven't stored the SS/ESP registers
845 * on the stack (interrupt gate does not save these registers
846 * when switching to the same priv ring).
847 * Therefore beware: accessing the ss/esp fields of the
848 * "struct pt_regs" is possible, but they may contain the
849 * completely wrong values.
850 */
851#define task_pt_regs(task) \
852({ \
853 struct pt_regs *__regs__; \
854 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
855 __regs__ - 1; \
856})
857
858#define KSTK_ESP(task) (task_pt_regs(task)->sp)
859
860#else
861/*
862 * User space process size. 47bits minus one guard page.
863 */
864#define TASK_SIZE64 (0x800000000000UL - 4096)
865
866/* This decides where the kernel will search for a free chunk of vm
867 * space during mmap's.
868 */
869#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
870 0xc0000000 : 0xFFFFe000)
871
872#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
873 IA32_PAGE_OFFSET : TASK_SIZE64)
874#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
875 IA32_PAGE_OFFSET : TASK_SIZE64)
876
877#define INIT_THREAD { \
878 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
879}
880
881#define INIT_TSS { \
882 .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
883}
884
885#define start_thread(regs, new_rip, new_rsp) do { \
886 asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
887 load_gs_index(0); \
888 (regs)->ip = (new_rip); \
889 (regs)->sp = (new_rsp); \
890 write_pda(oldrsp, (new_rsp)); \
891 (regs)->cs = __USER_CS; \
892 (regs)->ss = __USER_DS; \
893 (regs)->flags = 0x200; \
894 set_fs(USER_DS); \
895} while (0)
896
897/*
898 * Return saved PC of a blocked thread.
899 * What is this good for? it will be always the scheduler or ret_from_fork.
900 */
901#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
902
903#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
904#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
905#endif /* CONFIG_X86_64 */
906
773/* This decides where the kernel will search for a free chunk of vm 907/* This decides where the kernel will search for a free chunk of vm
774 * space during mmap's. 908 * space during mmap's.
775 */ 909 */