diff options
Diffstat (limited to 'include/asm-i386')
203 files changed, 18805 insertions, 0 deletions
diff --git a/include/asm-i386/8253pit.h b/include/asm-i386/8253pit.h new file mode 100644 index 000000000000..96c7c3592daf --- /dev/null +++ b/include/asm-i386/8253pit.h | |||
@@ -0,0 +1,12 @@ | |||
1 | /* | ||
2 | * 8253/8254 Programmable Interval Timer | ||
3 | */ | ||
4 | |||
5 | #ifndef _8253PIT_H | ||
6 | #define _8253PIT_H | ||
7 | |||
8 | #include <asm/timex.h> | ||
9 | |||
10 | #define PIT_TICK_RATE CLOCK_TICK_RATE | ||
11 | |||
12 | #endif | ||
diff --git a/include/asm-i386/a.out.h b/include/asm-i386/a.out.h new file mode 100644 index 000000000000..ab17bb8e5465 --- /dev/null +++ b/include/asm-i386/a.out.h | |||
@@ -0,0 +1,26 @@ | |||
1 | #ifndef __I386_A_OUT_H__ | ||
2 | #define __I386_A_OUT_H__ | ||
3 | |||
4 | struct exec | ||
5 | { | ||
6 | unsigned long a_info; /* Use macros N_MAGIC, etc for access */ | ||
7 | unsigned a_text; /* length of text, in bytes */ | ||
8 | unsigned a_data; /* length of data, in bytes */ | ||
9 | unsigned a_bss; /* length of uninitialized data area for file, in bytes */ | ||
10 | unsigned a_syms; /* length of symbol table data in file, in bytes */ | ||
11 | unsigned a_entry; /* start address */ | ||
12 | unsigned a_trsize; /* length of relocation info for text, in bytes */ | ||
13 | unsigned a_drsize; /* length of relocation info for data, in bytes */ | ||
14 | }; | ||
15 | |||
16 | #define N_TRSIZE(a) ((a).a_trsize) | ||
17 | #define N_DRSIZE(a) ((a).a_drsize) | ||
18 | #define N_SYMSIZE(a) ((a).a_syms) | ||
19 | |||
20 | #ifdef __KERNEL__ | ||
21 | |||
22 | #define STACK_TOP TASK_SIZE | ||
23 | |||
24 | #endif | ||
25 | |||
26 | #endif /* __A_OUT_GNU_H__ */ | ||
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h new file mode 100644 index 000000000000..c976c1dadece --- /dev/null +++ b/include/asm-i386/acpi.h | |||
@@ -0,0 +1,190 @@ | |||
1 | /* | ||
2 | * asm-i386/acpi.h | ||
3 | * | ||
4 | * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> | ||
5 | * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org> | ||
6 | * | ||
7 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
22 | * | ||
23 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
24 | */ | ||
25 | |||
26 | #ifndef _ASM_ACPI_H | ||
27 | #define _ASM_ACPI_H | ||
28 | |||
29 | #ifdef __KERNEL__ | ||
30 | |||
31 | #include <asm/system.h> /* defines cmpxchg */ | ||
32 | |||
33 | #define COMPILER_DEPENDENT_INT64 long long | ||
34 | #define COMPILER_DEPENDENT_UINT64 unsigned long long | ||
35 | |||
36 | /* | ||
37 | * Calling conventions: | ||
38 | * | ||
39 | * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) | ||
40 | * ACPI_EXTERNAL_XFACE - External ACPI interfaces | ||
41 | * ACPI_INTERNAL_XFACE - Internal ACPI interfaces | ||
42 | * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces | ||
43 | */ | ||
44 | #define ACPI_SYSTEM_XFACE | ||
45 | #define ACPI_EXTERNAL_XFACE | ||
46 | #define ACPI_INTERNAL_XFACE | ||
47 | #define ACPI_INTERNAL_VAR_XFACE | ||
48 | |||
49 | /* Asm macros */ | ||
50 | |||
51 | #define ACPI_ASM_MACROS | ||
52 | #define BREAKPOINT3 | ||
53 | #define ACPI_DISABLE_IRQS() local_irq_disable() | ||
54 | #define ACPI_ENABLE_IRQS() local_irq_enable() | ||
55 | #define ACPI_FLUSH_CPU_CACHE() wbinvd() | ||
56 | |||
57 | |||
58 | static inline int | ||
59 | __acpi_acquire_global_lock (unsigned int *lock) | ||
60 | { | ||
61 | unsigned int old, new, val; | ||
62 | do { | ||
63 | old = *lock; | ||
64 | new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1)); | ||
65 | val = cmpxchg(lock, old, new); | ||
66 | } while (unlikely (val != old)); | ||
67 | return (new < 3) ? -1 : 0; | ||
68 | } | ||
69 | |||
70 | static inline int | ||
71 | __acpi_release_global_lock (unsigned int *lock) | ||
72 | { | ||
73 | unsigned int old, new, val; | ||
74 | do { | ||
75 | old = *lock; | ||
76 | new = old & ~0x3; | ||
77 | val = cmpxchg(lock, old, new); | ||
78 | } while (unlikely (val != old)); | ||
79 | return old & 0x1; | ||
80 | } | ||
81 | |||
82 | #define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \ | ||
83 | ((Acq) = __acpi_acquire_global_lock((unsigned int *) GLptr)) | ||
84 | |||
85 | #define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \ | ||
86 | ((Acq) = __acpi_release_global_lock((unsigned int *) GLptr)) | ||
87 | |||
88 | /* | ||
89 | * Math helper asm macros | ||
90 | */ | ||
91 | #define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ | ||
92 | asm("divl %2;" \ | ||
93 | :"=a"(q32), "=d"(r32) \ | ||
94 | :"r"(d32), \ | ||
95 | "0"(n_lo), "1"(n_hi)) | ||
96 | |||
97 | |||
98 | #define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ | ||
99 | asm("shrl $1,%2;" \ | ||
100 | "rcrl $1,%3;" \ | ||
101 | :"=r"(n_hi), "=r"(n_lo) \ | ||
102 | :"0"(n_hi), "1"(n_lo)) | ||
103 | |||
104 | /* | ||
105 | * Refer Intel ACPI _PDC support document for bit definitions | ||
106 | */ | ||
107 | #define ACPI_PDC_EST_CAPABILITY_SMP 0xa | ||
108 | #define ACPI_PDC_EST_CAPABILITY_MSR 0x1 | ||
109 | |||
110 | #ifdef CONFIG_ACPI_BOOT | ||
111 | extern int acpi_lapic; | ||
112 | extern int acpi_ioapic; | ||
113 | extern int acpi_noirq; | ||
114 | extern int acpi_strict; | ||
115 | extern int acpi_disabled; | ||
116 | extern int acpi_ht; | ||
117 | extern int acpi_pci_disabled; | ||
118 | static inline void disable_acpi(void) | ||
119 | { | ||
120 | acpi_disabled = 1; | ||
121 | acpi_ht = 0; | ||
122 | acpi_pci_disabled = 1; | ||
123 | acpi_noirq = 1; | ||
124 | } | ||
125 | |||
126 | /* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */ | ||
127 | #define FIX_ACPI_PAGES 4 | ||
128 | |||
129 | extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); | ||
130 | |||
131 | #ifdef CONFIG_X86_IO_APIC | ||
132 | extern int skip_ioapic_setup; | ||
133 | extern int acpi_skip_timer_override; | ||
134 | |||
135 | extern void check_acpi_pci(void); | ||
136 | |||
137 | static inline void disable_ioapic_setup(void) | ||
138 | { | ||
139 | skip_ioapic_setup = 1; | ||
140 | } | ||
141 | |||
142 | static inline int ioapic_setup_disabled(void) | ||
143 | { | ||
144 | return skip_ioapic_setup; | ||
145 | } | ||
146 | |||
147 | #else | ||
148 | static inline void disable_ioapic_setup(void) { } | ||
149 | static inline void check_acpi_pci(void) { } | ||
150 | |||
151 | #endif | ||
152 | |||
153 | #else /* CONFIG_ACPI_BOOT */ | ||
154 | # define acpi_lapic 0 | ||
155 | # define acpi_ioapic 0 | ||
156 | |||
157 | #endif | ||
158 | |||
159 | #ifdef CONFIG_ACPI_PCI | ||
160 | static inline void acpi_noirq_set(void) { acpi_noirq = 1; } | ||
161 | static inline void acpi_disable_pci(void) | ||
162 | { | ||
163 | acpi_pci_disabled = 1; | ||
164 | acpi_noirq_set(); | ||
165 | } | ||
166 | extern int acpi_irq_balance_set(char *str); | ||
167 | #else | ||
168 | static inline void acpi_noirq_set(void) { } | ||
169 | static inline void acpi_disable_pci(void) { } | ||
170 | static inline int acpi_irq_balance_set(char *str) { return 0; } | ||
171 | #endif | ||
172 | |||
173 | #ifdef CONFIG_ACPI_SLEEP | ||
174 | |||
175 | /* routines for saving/restoring kernel state */ | ||
176 | extern int acpi_save_state_mem(void); | ||
177 | extern void acpi_restore_state_mem(void); | ||
178 | |||
179 | extern unsigned long acpi_wakeup_address; | ||
180 | |||
181 | /* early initialization routine */ | ||
182 | extern void acpi_reserve_bootmem(void); | ||
183 | |||
184 | #endif /*CONFIG_ACPI_SLEEP*/ | ||
185 | |||
186 | extern u8 x86_acpiid_to_apicid[]; | ||
187 | |||
188 | #endif /*__KERNEL__*/ | ||
189 | |||
190 | #endif /*_ASM_ACPI_H*/ | ||
diff --git a/include/asm-i386/agp.h b/include/asm-i386/agp.h new file mode 100644 index 000000000000..a917ff50354f --- /dev/null +++ b/include/asm-i386/agp.h | |||
@@ -0,0 +1,24 @@ | |||
1 | #ifndef AGP_H | ||
2 | #define AGP_H 1 | ||
3 | |||
4 | #include <asm/pgtable.h> | ||
5 | #include <asm/cacheflush.h> | ||
6 | |||
7 | /* | ||
8 | * Functions to keep the agpgart mappings coherent with the MMU. | ||
9 | * The GART gives the CPU a physical alias of pages in memory. The alias region is | ||
10 | * mapped uncacheable. Make sure there are no conflicting mappings | ||
11 | * with different cachability attributes for the same page. This avoids | ||
12 | * data corruption on some CPUs. | ||
13 | */ | ||
14 | |||
15 | int map_page_into_agp(struct page *page); | ||
16 | int unmap_page_from_agp(struct page *page); | ||
17 | #define flush_agp_mappings() global_flush_tlb() | ||
18 | |||
19 | /* Could use CLFLUSH here if the cpu supports it. But then it would | ||
20 | need to be called for each cacheline of the whole page so it may not be | ||
21 | worth it. Would need a page for it. */ | ||
22 | #define flush_agp_cache() asm volatile("wbinvd":::"memory") | ||
23 | |||
24 | #endif | ||
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h new file mode 100644 index 000000000000..e1de67483f38 --- /dev/null +++ b/include/asm-i386/apic.h | |||
@@ -0,0 +1,126 @@ | |||
1 | #ifndef __ASM_APIC_H | ||
2 | #define __ASM_APIC_H | ||
3 | |||
4 | #include <linux/config.h> | ||
5 | #include <linux/pm.h> | ||
6 | #include <asm/fixmap.h> | ||
7 | #include <asm/apicdef.h> | ||
8 | #include <asm/system.h> | ||
9 | |||
10 | #define Dprintk(x...) | ||
11 | |||
12 | /* | ||
13 | * Debugging macros | ||
14 | */ | ||
15 | #define APIC_QUIET 0 | ||
16 | #define APIC_VERBOSE 1 | ||
17 | #define APIC_DEBUG 2 | ||
18 | |||
19 | extern int apic_verbosity; | ||
20 | |||
21 | /* | ||
22 | * Define the default level of output to be very little | ||
23 | * This can be turned up by using apic=verbose for more | ||
24 | * information and apic=debug for _lots_ of information. | ||
25 | * apic_verbosity is defined in apic.c | ||
26 | */ | ||
27 | #define apic_printk(v, s, a...) do { \ | ||
28 | if ((v) <= apic_verbosity) \ | ||
29 | printk(s, ##a); \ | ||
30 | } while (0) | ||
31 | |||
32 | |||
33 | #ifdef CONFIG_X86_LOCAL_APIC | ||
34 | |||
35 | /* | ||
36 | * Basic functions accessing APICs. | ||
37 | */ | ||
38 | |||
39 | static __inline void apic_write(unsigned long reg, unsigned long v) | ||
40 | { | ||
41 | *((volatile unsigned long *)(APIC_BASE+reg)) = v; | ||
42 | } | ||
43 | |||
44 | static __inline void apic_write_atomic(unsigned long reg, unsigned long v) | ||
45 | { | ||
46 | xchg((volatile unsigned long *)(APIC_BASE+reg), v); | ||
47 | } | ||
48 | |||
49 | static __inline unsigned long apic_read(unsigned long reg) | ||
50 | { | ||
51 | return *((volatile unsigned long *)(APIC_BASE+reg)); | ||
52 | } | ||
53 | |||
54 | static __inline__ void apic_wait_icr_idle(void) | ||
55 | { | ||
56 | while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY ) | ||
57 | cpu_relax(); | ||
58 | } | ||
59 | |||
60 | int get_physical_broadcast(void); | ||
61 | |||
62 | #ifdef CONFIG_X86_GOOD_APIC | ||
63 | # define FORCE_READ_AROUND_WRITE 0 | ||
64 | # define apic_read_around(x) | ||
65 | # define apic_write_around(x,y) apic_write((x),(y)) | ||
66 | #else | ||
67 | # define FORCE_READ_AROUND_WRITE 1 | ||
68 | # define apic_read_around(x) apic_read(x) | ||
69 | # define apic_write_around(x,y) apic_write_atomic((x),(y)) | ||
70 | #endif | ||
71 | |||
72 | static inline void ack_APIC_irq(void) | ||
73 | { | ||
74 | /* | ||
75 | * ack_APIC_irq() actually gets compiled as a single instruction: | ||
76 | * - a single rmw on Pentium/82489DX | ||
77 | * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC) | ||
78 | * ... yummie. | ||
79 | */ | ||
80 | |||
81 | /* Docs say use 0 for future compatibility */ | ||
82 | apic_write_around(APIC_EOI, 0); | ||
83 | } | ||
84 | |||
85 | extern void (*wait_timer_tick)(void); | ||
86 | |||
87 | extern int get_maxlvt(void); | ||
88 | extern void clear_local_APIC(void); | ||
89 | extern void connect_bsp_APIC (void); | ||
90 | extern void disconnect_bsp_APIC (void); | ||
91 | extern void disable_local_APIC (void); | ||
92 | extern void lapic_shutdown (void); | ||
93 | extern int verify_local_APIC (void); | ||
94 | extern void cache_APIC_registers (void); | ||
95 | extern void sync_Arb_IDs (void); | ||
96 | extern void init_bsp_APIC (void); | ||
97 | extern void setup_local_APIC (void); | ||
98 | extern void init_apic_mappings (void); | ||
99 | extern void smp_local_timer_interrupt (struct pt_regs * regs); | ||
100 | extern void setup_boot_APIC_clock (void); | ||
101 | extern void setup_secondary_APIC_clock (void); | ||
102 | extern void setup_apic_nmi_watchdog (void); | ||
103 | extern int reserve_lapic_nmi(void); | ||
104 | extern void release_lapic_nmi(void); | ||
105 | extern void disable_timer_nmi_watchdog(void); | ||
106 | extern void enable_timer_nmi_watchdog(void); | ||
107 | extern void nmi_watchdog_tick (struct pt_regs * regs); | ||
108 | extern int APIC_init_uniprocessor (void); | ||
109 | extern void disable_APIC_timer(void); | ||
110 | extern void enable_APIC_timer(void); | ||
111 | |||
112 | extern int check_nmi_watchdog (void); | ||
113 | extern void enable_NMI_through_LVT0 (void * dummy); | ||
114 | |||
115 | extern unsigned int nmi_watchdog; | ||
116 | #define NMI_NONE 0 | ||
117 | #define NMI_IO_APIC 1 | ||
118 | #define NMI_LOCAL_APIC 2 | ||
119 | #define NMI_INVALID 3 | ||
120 | |||
121 | #else /* !CONFIG_X86_LOCAL_APIC */ | ||
122 | static inline void lapic_shutdown(void) { } | ||
123 | |||
124 | #endif /* !CONFIG_X86_LOCAL_APIC */ | ||
125 | |||
126 | #endif /* __ASM_APIC_H */ | ||
diff --git a/include/asm-i386/apicdef.h b/include/asm-i386/apicdef.h new file mode 100644 index 000000000000..c689554ad5b9 --- /dev/null +++ b/include/asm-i386/apicdef.h | |||
@@ -0,0 +1,377 @@ | |||
1 | #ifndef __ASM_APICDEF_H | ||
2 | #define __ASM_APICDEF_H | ||
3 | |||
4 | /* | ||
5 | * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) | ||
6 | * | ||
7 | * Alan Cox <Alan.Cox@linux.org>, 1995. | ||
8 | * Ingo Molnar <mingo@redhat.com>, 1999, 2000 | ||
9 | */ | ||
10 | |||
11 | #define APIC_DEFAULT_PHYS_BASE 0xfee00000 | ||
12 | |||
13 | #define APIC_ID 0x20 | ||
14 | #define APIC_LVR 0x30 | ||
15 | #define APIC_LVR_MASK 0xFF00FF | ||
16 | #define GET_APIC_VERSION(x) ((x)&0xFF) | ||
17 | #define GET_APIC_MAXLVT(x) (((x)>>16)&0xFF) | ||
18 | #define APIC_INTEGRATED(x) ((x)&0xF0) | ||
19 | #define APIC_TASKPRI 0x80 | ||
20 | #define APIC_TPRI_MASK 0xFF | ||
21 | #define APIC_ARBPRI 0x90 | ||
22 | #define APIC_ARBPRI_MASK 0xFF | ||
23 | #define APIC_PROCPRI 0xA0 | ||
24 | #define APIC_EOI 0xB0 | ||
25 | #define APIC_EIO_ACK 0x0 /* Write this to the EOI register */ | ||
26 | #define APIC_RRR 0xC0 | ||
27 | #define APIC_LDR 0xD0 | ||
28 | #define APIC_LDR_MASK (0xFF<<24) | ||
29 | #define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFF) | ||
30 | #define SET_APIC_LOGICAL_ID(x) (((x)<<24)) | ||
31 | #define APIC_ALL_CPUS 0xFF | ||
32 | #define APIC_DFR 0xE0 | ||
33 | #define APIC_DFR_CLUSTER 0x0FFFFFFFul | ||
34 | #define APIC_DFR_FLAT 0xFFFFFFFFul | ||
35 | #define APIC_SPIV 0xF0 | ||
36 | #define APIC_SPIV_FOCUS_DISABLED (1<<9) | ||
37 | #define APIC_SPIV_APIC_ENABLED (1<<8) | ||
38 | #define APIC_ISR 0x100 | ||
39 | #define APIC_TMR 0x180 | ||
40 | #define APIC_IRR 0x200 | ||
41 | #define APIC_ESR 0x280 | ||
42 | #define APIC_ESR_SEND_CS 0x00001 | ||
43 | #define APIC_ESR_RECV_CS 0x00002 | ||
44 | #define APIC_ESR_SEND_ACC 0x00004 | ||
45 | #define APIC_ESR_RECV_ACC 0x00008 | ||
46 | #define APIC_ESR_SENDILL 0x00020 | ||
47 | #define APIC_ESR_RECVILL 0x00040 | ||
48 | #define APIC_ESR_ILLREGA 0x00080 | ||
49 | #define APIC_ICR 0x300 | ||
50 | #define APIC_DEST_SELF 0x40000 | ||
51 | #define APIC_DEST_ALLINC 0x80000 | ||
52 | #define APIC_DEST_ALLBUT 0xC0000 | ||
53 | #define APIC_ICR_RR_MASK 0x30000 | ||
54 | #define APIC_ICR_RR_INVALID 0x00000 | ||
55 | #define APIC_ICR_RR_INPROG 0x10000 | ||
56 | #define APIC_ICR_RR_VALID 0x20000 | ||
57 | #define APIC_INT_LEVELTRIG 0x08000 | ||
58 | #define APIC_INT_ASSERT 0x04000 | ||
59 | #define APIC_ICR_BUSY 0x01000 | ||
60 | #define APIC_DEST_LOGICAL 0x00800 | ||
61 | #define APIC_DM_FIXED 0x00000 | ||
62 | #define APIC_DM_LOWEST 0x00100 | ||
63 | #define APIC_DM_SMI 0x00200 | ||
64 | #define APIC_DM_REMRD 0x00300 | ||
65 | #define APIC_DM_NMI 0x00400 | ||
66 | #define APIC_DM_INIT 0x00500 | ||
67 | #define APIC_DM_STARTUP 0x00600 | ||
68 | #define APIC_DM_EXTINT 0x00700 | ||
69 | #define APIC_VECTOR_MASK 0x000FF | ||
70 | #define APIC_ICR2 0x310 | ||
71 | #define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) | ||
72 | #define SET_APIC_DEST_FIELD(x) ((x)<<24) | ||
73 | #define APIC_LVTT 0x320 | ||
74 | #define APIC_LVTTHMR 0x330 | ||
75 | #define APIC_LVTPC 0x340 | ||
76 | #define APIC_LVT0 0x350 | ||
77 | #define APIC_LVT_TIMER_BASE_MASK (0x3<<18) | ||
78 | #define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3) | ||
79 | #define SET_APIC_TIMER_BASE(x) (((x)<<18)) | ||
80 | #define APIC_TIMER_BASE_CLKIN 0x0 | ||
81 | #define APIC_TIMER_BASE_TMBASE 0x1 | ||
82 | #define APIC_TIMER_BASE_DIV 0x2 | ||
83 | #define APIC_LVT_TIMER_PERIODIC (1<<17) | ||
84 | #define APIC_LVT_MASKED (1<<16) | ||
85 | #define APIC_LVT_LEVEL_TRIGGER (1<<15) | ||
86 | #define APIC_LVT_REMOTE_IRR (1<<14) | ||
87 | #define APIC_INPUT_POLARITY (1<<13) | ||
88 | #define APIC_SEND_PENDING (1<<12) | ||
89 | #define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) | ||
90 | #define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8)) | ||
91 | #define APIC_MODE_FIXED 0x0 | ||
92 | #define APIC_MODE_NMI 0x4 | ||
93 | #define APIC_MODE_EXINT 0x7 | ||
94 | #define APIC_LVT1 0x360 | ||
95 | #define APIC_LVTERR 0x370 | ||
96 | #define APIC_TMICT 0x380 | ||
97 | #define APIC_TMCCT 0x390 | ||
98 | #define APIC_TDCR 0x3E0 | ||
99 | #define APIC_TDR_DIV_TMBASE (1<<2) | ||
100 | #define APIC_TDR_DIV_1 0xB | ||
101 | #define APIC_TDR_DIV_2 0x0 | ||
102 | #define APIC_TDR_DIV_4 0x1 | ||
103 | #define APIC_TDR_DIV_8 0x2 | ||
104 | #define APIC_TDR_DIV_16 0x3 | ||
105 | #define APIC_TDR_DIV_32 0x8 | ||
106 | #define APIC_TDR_DIV_64 0x9 | ||
107 | #define APIC_TDR_DIV_128 0xA | ||
108 | |||
109 | #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) | ||
110 | |||
111 | #ifdef CONFIG_NUMA | ||
112 | #define MAX_IO_APICS 32 | ||
113 | #else | ||
114 | #define MAX_IO_APICS 8 | ||
115 | #endif | ||
116 | |||
117 | /* | ||
118 | * the local APIC register structure, memory mapped. Not terribly well | ||
119 | * tested, but we might eventually use this one in the future - the | ||
120 | * problem why we cannot use it right now is the P5 APIC, it has an | ||
121 | * errata which cannot take 8-bit reads and writes, only 32-bit ones ... | ||
122 | */ | ||
123 | #define u32 unsigned int | ||
124 | |||
125 | #define lapic ((volatile struct local_apic *)APIC_BASE) | ||
126 | |||
127 | struct local_apic { | ||
128 | |||
129 | /*000*/ struct { u32 __reserved[4]; } __reserved_01; | ||
130 | |||
131 | /*010*/ struct { u32 __reserved[4]; } __reserved_02; | ||
132 | |||
133 | /*020*/ struct { /* APIC ID Register */ | ||
134 | u32 __reserved_1 : 24, | ||
135 | phys_apic_id : 4, | ||
136 | __reserved_2 : 4; | ||
137 | u32 __reserved[3]; | ||
138 | } id; | ||
139 | |||
140 | /*030*/ const | ||
141 | struct { /* APIC Version Register */ | ||
142 | u32 version : 8, | ||
143 | __reserved_1 : 8, | ||
144 | max_lvt : 8, | ||
145 | __reserved_2 : 8; | ||
146 | u32 __reserved[3]; | ||
147 | } version; | ||
148 | |||
149 | /*040*/ struct { u32 __reserved[4]; } __reserved_03; | ||
150 | |||
151 | /*050*/ struct { u32 __reserved[4]; } __reserved_04; | ||
152 | |||
153 | /*060*/ struct { u32 __reserved[4]; } __reserved_05; | ||
154 | |||
155 | /*070*/ struct { u32 __reserved[4]; } __reserved_06; | ||
156 | |||
157 | /*080*/ struct { /* Task Priority Register */ | ||
158 | u32 priority : 8, | ||
159 | __reserved_1 : 24; | ||
160 | u32 __reserved_2[3]; | ||
161 | } tpr; | ||
162 | |||
163 | /*090*/ const | ||
164 | struct { /* Arbitration Priority Register */ | ||
165 | u32 priority : 8, | ||
166 | __reserved_1 : 24; | ||
167 | u32 __reserved_2[3]; | ||
168 | } apr; | ||
169 | |||
170 | /*0A0*/ const | ||
171 | struct { /* Processor Priority Register */ | ||
172 | u32 priority : 8, | ||
173 | __reserved_1 : 24; | ||
174 | u32 __reserved_2[3]; | ||
175 | } ppr; | ||
176 | |||
177 | /*0B0*/ struct { /* End Of Interrupt Register */ | ||
178 | u32 eoi; | ||
179 | u32 __reserved[3]; | ||
180 | } eoi; | ||
181 | |||
182 | /*0C0*/ struct { u32 __reserved[4]; } __reserved_07; | ||
183 | |||
184 | /*0D0*/ struct { /* Logical Destination Register */ | ||
185 | u32 __reserved_1 : 24, | ||
186 | logical_dest : 8; | ||
187 | u32 __reserved_2[3]; | ||
188 | } ldr; | ||
189 | |||
190 | /*0E0*/ struct { /* Destination Format Register */ | ||
191 | u32 __reserved_1 : 28, | ||
192 | model : 4; | ||
193 | u32 __reserved_2[3]; | ||
194 | } dfr; | ||
195 | |||
196 | /*0F0*/ struct { /* Spurious Interrupt Vector Register */ | ||
197 | u32 spurious_vector : 8, | ||
198 | apic_enabled : 1, | ||
199 | focus_cpu : 1, | ||
200 | __reserved_2 : 22; | ||
201 | u32 __reserved_3[3]; | ||
202 | } svr; | ||
203 | |||
204 | /*100*/ struct { /* In Service Register */ | ||
205 | /*170*/ u32 bitfield; | ||
206 | u32 __reserved[3]; | ||
207 | } isr [8]; | ||
208 | |||
209 | /*180*/ struct { /* Trigger Mode Register */ | ||
210 | /*1F0*/ u32 bitfield; | ||
211 | u32 __reserved[3]; | ||
212 | } tmr [8]; | ||
213 | |||
214 | /*200*/ struct { /* Interrupt Request Register */ | ||
215 | /*270*/ u32 bitfield; | ||
216 | u32 __reserved[3]; | ||
217 | } irr [8]; | ||
218 | |||
219 | /*280*/ union { /* Error Status Register */ | ||
220 | struct { | ||
221 | u32 send_cs_error : 1, | ||
222 | receive_cs_error : 1, | ||
223 | send_accept_error : 1, | ||
224 | receive_accept_error : 1, | ||
225 | __reserved_1 : 1, | ||
226 | send_illegal_vector : 1, | ||
227 | receive_illegal_vector : 1, | ||
228 | illegal_register_address : 1, | ||
229 | __reserved_2 : 24; | ||
230 | u32 __reserved_3[3]; | ||
231 | } error_bits; | ||
232 | struct { | ||
233 | u32 errors; | ||
234 | u32 __reserved_3[3]; | ||
235 | } all_errors; | ||
236 | } esr; | ||
237 | |||
238 | /*290*/ struct { u32 __reserved[4]; } __reserved_08; | ||
239 | |||
240 | /*2A0*/ struct { u32 __reserved[4]; } __reserved_09; | ||
241 | |||
242 | /*2B0*/ struct { u32 __reserved[4]; } __reserved_10; | ||
243 | |||
244 | /*2C0*/ struct { u32 __reserved[4]; } __reserved_11; | ||
245 | |||
246 | /*2D0*/ struct { u32 __reserved[4]; } __reserved_12; | ||
247 | |||
248 | /*2E0*/ struct { u32 __reserved[4]; } __reserved_13; | ||
249 | |||
250 | /*2F0*/ struct { u32 __reserved[4]; } __reserved_14; | ||
251 | |||
252 | /*300*/ struct { /* Interrupt Command Register 1 */ | ||
253 | u32 vector : 8, | ||
254 | delivery_mode : 3, | ||
255 | destination_mode : 1, | ||
256 | delivery_status : 1, | ||
257 | __reserved_1 : 1, | ||
258 | level : 1, | ||
259 | trigger : 1, | ||
260 | __reserved_2 : 2, | ||
261 | shorthand : 2, | ||
262 | __reserved_3 : 12; | ||
263 | u32 __reserved_4[3]; | ||
264 | } icr1; | ||
265 | |||
266 | /*310*/ struct { /* Interrupt Command Register 2 */ | ||
267 | union { | ||
268 | u32 __reserved_1 : 24, | ||
269 | phys_dest : 4, | ||
270 | __reserved_2 : 4; | ||
271 | u32 __reserved_3 : 24, | ||
272 | logical_dest : 8; | ||
273 | } dest; | ||
274 | u32 __reserved_4[3]; | ||
275 | } icr2; | ||
276 | |||
277 | /*320*/ struct { /* LVT - Timer */ | ||
278 | u32 vector : 8, | ||
279 | __reserved_1 : 4, | ||
280 | delivery_status : 1, | ||
281 | __reserved_2 : 3, | ||
282 | mask : 1, | ||
283 | timer_mode : 1, | ||
284 | __reserved_3 : 14; | ||
285 | u32 __reserved_4[3]; | ||
286 | } lvt_timer; | ||
287 | |||
288 | /*330*/ struct { /* LVT - Thermal Sensor */ | ||
289 | u32 vector : 8, | ||
290 | delivery_mode : 3, | ||
291 | __reserved_1 : 1, | ||
292 | delivery_status : 1, | ||
293 | __reserved_2 : 3, | ||
294 | mask : 1, | ||
295 | __reserved_3 : 15; | ||
296 | u32 __reserved_4[3]; | ||
297 | } lvt_thermal; | ||
298 | |||
299 | /*340*/ struct { /* LVT - Performance Counter */ | ||
300 | u32 vector : 8, | ||
301 | delivery_mode : 3, | ||
302 | __reserved_1 : 1, | ||
303 | delivery_status : 1, | ||
304 | __reserved_2 : 3, | ||
305 | mask : 1, | ||
306 | __reserved_3 : 15; | ||
307 | u32 __reserved_4[3]; | ||
308 | } lvt_pc; | ||
309 | |||
310 | /*350*/ struct { /* LVT - LINT0 */ | ||
311 | u32 vector : 8, | ||
312 | delivery_mode : 3, | ||
313 | __reserved_1 : 1, | ||
314 | delivery_status : 1, | ||
315 | polarity : 1, | ||
316 | remote_irr : 1, | ||
317 | trigger : 1, | ||
318 | mask : 1, | ||
319 | __reserved_2 : 15; | ||
320 | u32 __reserved_3[3]; | ||
321 | } lvt_lint0; | ||
322 | |||
323 | /*360*/ struct { /* LVT - LINT1 */ | ||
324 | u32 vector : 8, | ||
325 | delivery_mode : 3, | ||
326 | __reserved_1 : 1, | ||
327 | delivery_status : 1, | ||
328 | polarity : 1, | ||
329 | remote_irr : 1, | ||
330 | trigger : 1, | ||
331 | mask : 1, | ||
332 | __reserved_2 : 15; | ||
333 | u32 __reserved_3[3]; | ||
334 | } lvt_lint1; | ||
335 | |||
336 | /*370*/ struct { /* LVT - Error */ | ||
337 | u32 vector : 8, | ||
338 | __reserved_1 : 4, | ||
339 | delivery_status : 1, | ||
340 | __reserved_2 : 3, | ||
341 | mask : 1, | ||
342 | __reserved_3 : 15; | ||
343 | u32 __reserved_4[3]; | ||
344 | } lvt_error; | ||
345 | |||
346 | /*380*/ struct { /* Timer Initial Count Register */ | ||
347 | u32 initial_count; | ||
348 | u32 __reserved_2[3]; | ||
349 | } timer_icr; | ||
350 | |||
351 | /*390*/ const | ||
352 | struct { /* Timer Current Count Register */ | ||
353 | u32 curr_count; | ||
354 | u32 __reserved_2[3]; | ||
355 | } timer_ccr; | ||
356 | |||
357 | /*3A0*/ struct { u32 __reserved[4]; } __reserved_16; | ||
358 | |||
359 | /*3B0*/ struct { u32 __reserved[4]; } __reserved_17; | ||
360 | |||
361 | /*3C0*/ struct { u32 __reserved[4]; } __reserved_18; | ||
362 | |||
363 | /*3D0*/ struct { u32 __reserved[4]; } __reserved_19; | ||
364 | |||
365 | /*3E0*/ struct { /* Timer Divide Configuration Register */ | ||
366 | u32 divisor : 4, | ||
367 | __reserved_1 : 28; | ||
368 | u32 __reserved_2[3]; | ||
369 | } timer_dcr; | ||
370 | |||
371 | /*3F0*/ struct { u32 __reserved[4]; } __reserved_20; | ||
372 | |||
373 | } __attribute__ ((packed)); | ||
374 | |||
375 | #undef u32 | ||
376 | |||
377 | #endif | ||
diff --git a/include/asm-i386/arch_hooks.h b/include/asm-i386/arch_hooks.h new file mode 100644 index 000000000000..28b96a6fb9fa --- /dev/null +++ b/include/asm-i386/arch_hooks.h | |||
@@ -0,0 +1,27 @@ | |||
1 | #ifndef _ASM_ARCH_HOOKS_H | ||
2 | #define _ASM_ARCH_HOOKS_H | ||
3 | |||
4 | #include <linux/interrupt.h> | ||
5 | |||
6 | /* | ||
7 | * linux/include/asm/arch_hooks.h | ||
8 | * | ||
9 | * define the architecture specific hooks | ||
10 | */ | ||
11 | |||
12 | /* these aren't arch hooks, they are generic routines | ||
13 | * that can be used by the hooks */ | ||
14 | extern void init_ISA_irqs(void); | ||
15 | extern void apic_intr_init(void); | ||
16 | extern void smp_intr_init(void); | ||
17 | extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs); | ||
18 | |||
19 | /* these are the defined hooks */ | ||
20 | extern void intr_init_hook(void); | ||
21 | extern void pre_intr_init_hook(void); | ||
22 | extern void pre_setup_arch_hook(void); | ||
23 | extern void trap_init_hook(void); | ||
24 | extern void time_init_hook(void); | ||
25 | extern void mca_nmi_hook(void); | ||
26 | |||
27 | #endif | ||
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h new file mode 100644 index 000000000000..509720be772a --- /dev/null +++ b/include/asm-i386/atomic.h | |||
@@ -0,0 +1,236 @@ | |||
1 | #ifndef __ARCH_I386_ATOMIC__ | ||
2 | #define __ARCH_I386_ATOMIC__ | ||
3 | |||
4 | #include <linux/config.h> | ||
5 | #include <linux/compiler.h> | ||
6 | #include <asm/processor.h> | ||
7 | |||
8 | /* | ||
9 | * Atomic operations that C can't guarantee us. Useful for | ||
10 | * resource counting etc.. | ||
11 | */ | ||
12 | |||
13 | #ifdef CONFIG_SMP | ||
14 | #define LOCK "lock ; " | ||
15 | #else | ||
16 | #define LOCK "" | ||
17 | #endif | ||
18 | |||
19 | /* | ||
20 | * Make sure gcc doesn't try to be clever and move things around | ||
21 | * on us. We need to use _exactly_ the address the user gave us, | ||
22 | * not some alias that contains the same information. | ||
23 | */ | ||
24 | typedef struct { volatile int counter; } atomic_t; | ||
25 | |||
26 | #define ATOMIC_INIT(i) { (i) } | ||
27 | |||
28 | /** | ||
29 | * atomic_read - read atomic variable | ||
30 | * @v: pointer of type atomic_t | ||
31 | * | ||
32 | * Atomically reads the value of @v. | ||
33 | */ | ||
34 | #define atomic_read(v) ((v)->counter) | ||
35 | |||
36 | /** | ||
37 | * atomic_set - set atomic variable | ||
38 | * @v: pointer of type atomic_t | ||
39 | * @i: required value | ||
40 | * | ||
41 | * Atomically sets the value of @v to @i. | ||
42 | */ | ||
43 | #define atomic_set(v,i) (((v)->counter) = (i)) | ||
44 | |||
45 | /** | ||
46 | * atomic_add - add integer to atomic variable | ||
47 | * @i: integer value to add | ||
48 | * @v: pointer of type atomic_t | ||
49 | * | ||
50 | * Atomically adds @i to @v. | ||
51 | */ | ||
52 | static __inline__ void atomic_add(int i, atomic_t *v) | ||
53 | { | ||
54 | __asm__ __volatile__( | ||
55 | LOCK "addl %1,%0" | ||
56 | :"=m" (v->counter) | ||
57 | :"ir" (i), "m" (v->counter)); | ||
58 | } | ||
59 | |||
60 | /** | ||
61 | * atomic_sub - subtract the atomic variable | ||
62 | * @i: integer value to subtract | ||
63 | * @v: pointer of type atomic_t | ||
64 | * | ||
65 | * Atomically subtracts @i from @v. | ||
66 | */ | ||
67 | static __inline__ void atomic_sub(int i, atomic_t *v) | ||
68 | { | ||
69 | __asm__ __volatile__( | ||
70 | LOCK "subl %1,%0" | ||
71 | :"=m" (v->counter) | ||
72 | :"ir" (i), "m" (v->counter)); | ||
73 | } | ||
74 | |||
75 | /** | ||
76 | * atomic_sub_and_test - subtract value from variable and test result | ||
77 | * @i: integer value to subtract | ||
78 | * @v: pointer of type atomic_t | ||
79 | * | ||
80 | * Atomically subtracts @i from @v and returns | ||
81 | * true if the result is zero, or false for all | ||
82 | * other cases. | ||
83 | */ | ||
84 | static __inline__ int atomic_sub_and_test(int i, atomic_t *v) | ||
85 | { | ||
86 | unsigned char c; | ||
87 | |||
88 | __asm__ __volatile__( | ||
89 | LOCK "subl %2,%0; sete %1" | ||
90 | :"=m" (v->counter), "=qm" (c) | ||
91 | :"ir" (i), "m" (v->counter) : "memory"); | ||
92 | return c; | ||
93 | } | ||
94 | |||
95 | /** | ||
96 | * atomic_inc - increment atomic variable | ||
97 | * @v: pointer of type atomic_t | ||
98 | * | ||
99 | * Atomically increments @v by 1. | ||
100 | */ | ||
101 | static __inline__ void atomic_inc(atomic_t *v) | ||
102 | { | ||
103 | __asm__ __volatile__( | ||
104 | LOCK "incl %0" | ||
105 | :"=m" (v->counter) | ||
106 | :"m" (v->counter)); | ||
107 | } | ||
108 | |||
109 | /** | ||
110 | * atomic_dec - decrement atomic variable | ||
111 | * @v: pointer of type atomic_t | ||
112 | * | ||
113 | * Atomically decrements @v by 1. | ||
114 | */ | ||
115 | static __inline__ void atomic_dec(atomic_t *v) | ||
116 | { | ||
117 | __asm__ __volatile__( | ||
118 | LOCK "decl %0" | ||
119 | :"=m" (v->counter) | ||
120 | :"m" (v->counter)); | ||
121 | } | ||
122 | |||
123 | /** | ||
124 | * atomic_dec_and_test - decrement and test | ||
125 | * @v: pointer of type atomic_t | ||
126 | * | ||
127 | * Atomically decrements @v by 1 and | ||
128 | * returns true if the result is 0, or false for all other | ||
129 | * cases. | ||
130 | */ | ||
131 | static __inline__ int atomic_dec_and_test(atomic_t *v) | ||
132 | { | ||
133 | unsigned char c; | ||
134 | |||
135 | __asm__ __volatile__( | ||
136 | LOCK "decl %0; sete %1" | ||
137 | :"=m" (v->counter), "=qm" (c) | ||
138 | :"m" (v->counter) : "memory"); | ||
139 | return c != 0; | ||
140 | } | ||
141 | |||
142 | /** | ||
143 | * atomic_inc_and_test - increment and test | ||
144 | * @v: pointer of type atomic_t | ||
145 | * | ||
146 | * Atomically increments @v by 1 | ||
147 | * and returns true if the result is zero, or false for all | ||
148 | * other cases. | ||
149 | */ | ||
150 | static __inline__ int atomic_inc_and_test(atomic_t *v) | ||
151 | { | ||
152 | unsigned char c; | ||
153 | |||
154 | __asm__ __volatile__( | ||
155 | LOCK "incl %0; sete %1" | ||
156 | :"=m" (v->counter), "=qm" (c) | ||
157 | :"m" (v->counter) : "memory"); | ||
158 | return c != 0; | ||
159 | } | ||
160 | |||
161 | /** | ||
162 | * atomic_add_negative - add and test if negative | ||
163 | * @v: pointer of type atomic_t | ||
164 | * @i: integer value to add | ||
165 | * | ||
166 | * Atomically adds @i to @v and returns true | ||
167 | * if the result is negative, or false when | ||
168 | * result is greater than or equal to zero. | ||
169 | */ | ||
170 | static __inline__ int atomic_add_negative(int i, atomic_t *v) | ||
171 | { | ||
172 | unsigned char c; | ||
173 | |||
174 | __asm__ __volatile__( | ||
175 | LOCK "addl %2,%0; sets %1" | ||
176 | :"=m" (v->counter), "=qm" (c) | ||
177 | :"ir" (i), "m" (v->counter) : "memory"); | ||
178 | return c; | ||
179 | } | ||
180 | |||
181 | /** | ||
182 | * atomic_add_return - add and return | ||
183 | * @v: pointer of type atomic_t | ||
184 | * @i: integer value to add | ||
185 | * | ||
186 | * Atomically adds @i to @v and returns @i + @v | ||
187 | */ | ||
188 | static __inline__ int atomic_add_return(int i, atomic_t *v) | ||
189 | { | ||
190 | int __i; | ||
191 | #ifdef CONFIG_M386 | ||
192 | if(unlikely(boot_cpu_data.x86==3)) | ||
193 | goto no_xadd; | ||
194 | #endif | ||
195 | /* Modern 486+ processor */ | ||
196 | __i = i; | ||
197 | __asm__ __volatile__( | ||
198 | LOCK "xaddl %0, %1;" | ||
199 | :"=r"(i) | ||
200 | :"m"(v->counter), "0"(i)); | ||
201 | return i + __i; | ||
202 | |||
203 | #ifdef CONFIG_M386 | ||
204 | no_xadd: /* Legacy 386 processor */ | ||
205 | local_irq_disable(); | ||
206 | __i = atomic_read(v); | ||
207 | atomic_set(v, i + __i); | ||
208 | local_irq_enable(); | ||
209 | return i + __i; | ||
210 | #endif | ||
211 | } | ||
212 | |||
213 | static __inline__ int atomic_sub_return(int i, atomic_t *v) | ||
214 | { | ||
215 | return atomic_add_return(-i,v); | ||
216 | } | ||
217 | |||
218 | #define atomic_inc_return(v) (atomic_add_return(1,v)) | ||
219 | #define atomic_dec_return(v) (atomic_sub_return(1,v)) | ||
220 | |||
221 | /* These are x86-specific, used by some header files */ | ||
222 | #define atomic_clear_mask(mask, addr) \ | ||
223 | __asm__ __volatile__(LOCK "andl %0,%1" \ | ||
224 | : : "r" (~(mask)),"m" (*addr) : "memory") | ||
225 | |||
226 | #define atomic_set_mask(mask, addr) \ | ||
227 | __asm__ __volatile__(LOCK "orl %0,%1" \ | ||
228 | : : "r" (mask),"m" (*(addr)) : "memory") | ||
229 | |||
230 | /* Atomic operations are already serializing on x86 */ | ||
231 | #define smp_mb__before_atomic_dec() barrier() | ||
232 | #define smp_mb__after_atomic_dec() barrier() | ||
233 | #define smp_mb__before_atomic_inc() barrier() | ||
234 | #define smp_mb__after_atomic_inc() barrier() | ||
235 | |||
236 | #endif | ||
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h new file mode 100644 index 000000000000..9db0b712d57a --- /dev/null +++ b/include/asm-i386/bitops.h | |||
@@ -0,0 +1,462 @@ | |||
1 | #ifndef _I386_BITOPS_H | ||
2 | #define _I386_BITOPS_H | ||
3 | |||
4 | /* | ||
5 | * Copyright 1992, Linus Torvalds. | ||
6 | */ | ||
7 | |||
8 | #include <linux/config.h> | ||
9 | #include <linux/compiler.h> | ||
10 | |||
11 | /* | ||
12 | * These have to be done with inline assembly: that way the bit-setting | ||
13 | * is guaranteed to be atomic. All bit operations return 0 if the bit | ||
14 | * was cleared before the operation and != 0 if it was not. | ||
15 | * | ||
16 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | ||
17 | */ | ||
18 | |||
19 | #ifdef CONFIG_SMP | ||
20 | #define LOCK_PREFIX "lock ; " | ||
21 | #else | ||
22 | #define LOCK_PREFIX "" | ||
23 | #endif | ||
24 | |||
25 | #define ADDR (*(volatile long *) addr) | ||
26 | |||
27 | /** | ||
28 | * set_bit - Atomically set a bit in memory | ||
29 | * @nr: the bit to set | ||
30 | * @addr: the address to start counting from | ||
31 | * | ||
32 | * This function is atomic and may not be reordered. See __set_bit() | ||
33 | * if you do not require the atomic guarantees. | ||
34 | * | ||
35 | * Note: there are no guarantees that this function will not be reordered | ||
36 | * on non x86 architectures, so if you are writting portable code, | ||
37 | * make sure not to rely on its reordering guarantees. | ||
38 | * | ||
39 | * Note that @nr may be almost arbitrarily large; this function is not | ||
40 | * restricted to acting on a single-word quantity. | ||
41 | */ | ||
42 | static inline void set_bit(int nr, volatile unsigned long * addr) | ||
43 | { | ||
44 | __asm__ __volatile__( LOCK_PREFIX | ||
45 | "btsl %1,%0" | ||
46 | :"=m" (ADDR) | ||
47 | :"Ir" (nr)); | ||
48 | } | ||
49 | |||
50 | /** | ||
51 | * __set_bit - Set a bit in memory | ||
52 | * @nr: the bit to set | ||
53 | * @addr: the address to start counting from | ||
54 | * | ||
55 | * Unlike set_bit(), this function is non-atomic and may be reordered. | ||
56 | * If it's called on the same region of memory simultaneously, the effect | ||
57 | * may be that only one operation succeeds. | ||
58 | */ | ||
59 | static inline void __set_bit(int nr, volatile unsigned long * addr) | ||
60 | { | ||
61 | __asm__( | ||
62 | "btsl %1,%0" | ||
63 | :"=m" (ADDR) | ||
64 | :"Ir" (nr)); | ||
65 | } | ||
66 | |||
67 | /** | ||
68 | * clear_bit - Clears a bit in memory | ||
69 | * @nr: Bit to clear | ||
70 | * @addr: Address to start counting from | ||
71 | * | ||
72 | * clear_bit() is atomic and may not be reordered. However, it does | ||
73 | * not contain a memory barrier, so if it is used for locking purposes, | ||
74 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | ||
75 | * in order to ensure changes are visible on other processors. | ||
76 | */ | ||
77 | static inline void clear_bit(int nr, volatile unsigned long * addr) | ||
78 | { | ||
79 | __asm__ __volatile__( LOCK_PREFIX | ||
80 | "btrl %1,%0" | ||
81 | :"=m" (ADDR) | ||
82 | :"Ir" (nr)); | ||
83 | } | ||
84 | |||
85 | static inline void __clear_bit(int nr, volatile unsigned long * addr) | ||
86 | { | ||
87 | __asm__ __volatile__( | ||
88 | "btrl %1,%0" | ||
89 | :"=m" (ADDR) | ||
90 | :"Ir" (nr)); | ||
91 | } | ||
92 | #define smp_mb__before_clear_bit() barrier() | ||
93 | #define smp_mb__after_clear_bit() barrier() | ||
94 | |||
95 | /** | ||
96 | * __change_bit - Toggle a bit in memory | ||
97 | * @nr: the bit to change | ||
98 | * @addr: the address to start counting from | ||
99 | * | ||
100 | * Unlike change_bit(), this function is non-atomic and may be reordered. | ||
101 | * If it's called on the same region of memory simultaneously, the effect | ||
102 | * may be that only one operation succeeds. | ||
103 | */ | ||
104 | static inline void __change_bit(int nr, volatile unsigned long * addr) | ||
105 | { | ||
106 | __asm__ __volatile__( | ||
107 | "btcl %1,%0" | ||
108 | :"=m" (ADDR) | ||
109 | :"Ir" (nr)); | ||
110 | } | ||
111 | |||
112 | /** | ||
113 | * change_bit - Toggle a bit in memory | ||
114 | * @nr: Bit to change | ||
115 | * @addr: Address to start counting from | ||
116 | * | ||
117 | * change_bit() is atomic and may not be reordered. It may be | ||
118 | * reordered on other architectures than x86. | ||
119 | * Note that @nr may be almost arbitrarily large; this function is not | ||
120 | * restricted to acting on a single-word quantity. | ||
121 | */ | ||
122 | static inline void change_bit(int nr, volatile unsigned long * addr) | ||
123 | { | ||
124 | __asm__ __volatile__( LOCK_PREFIX | ||
125 | "btcl %1,%0" | ||
126 | :"=m" (ADDR) | ||
127 | :"Ir" (nr)); | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * test_and_set_bit - Set a bit and return its old value | ||
132 | * @nr: Bit to set | ||
133 | * @addr: Address to count from | ||
134 | * | ||
135 | * This operation is atomic and cannot be reordered. | ||
136 | * It may be reordered on other architectures than x86. | ||
137 | * It also implies a memory barrier. | ||
138 | */ | ||
139 | static inline int test_and_set_bit(int nr, volatile unsigned long * addr) | ||
140 | { | ||
141 | int oldbit; | ||
142 | |||
143 | __asm__ __volatile__( LOCK_PREFIX | ||
144 | "btsl %2,%1\n\tsbbl %0,%0" | ||
145 | :"=r" (oldbit),"=m" (ADDR) | ||
146 | :"Ir" (nr) : "memory"); | ||
147 | return oldbit; | ||
148 | } | ||
149 | |||
150 | /** | ||
151 | * __test_and_set_bit - Set a bit and return its old value | ||
152 | * @nr: Bit to set | ||
153 | * @addr: Address to count from | ||
154 | * | ||
155 | * This operation is non-atomic and can be reordered. | ||
156 | * If two examples of this operation race, one can appear to succeed | ||
157 | * but actually fail. You must protect multiple accesses with a lock. | ||
158 | */ | ||
159 | static inline int __test_and_set_bit(int nr, volatile unsigned long * addr) | ||
160 | { | ||
161 | int oldbit; | ||
162 | |||
163 | __asm__( | ||
164 | "btsl %2,%1\n\tsbbl %0,%0" | ||
165 | :"=r" (oldbit),"=m" (ADDR) | ||
166 | :"Ir" (nr)); | ||
167 | return oldbit; | ||
168 | } | ||
169 | |||
170 | /** | ||
171 | * test_and_clear_bit - Clear a bit and return its old value | ||
172 | * @nr: Bit to clear | ||
173 | * @addr: Address to count from | ||
174 | * | ||
175 | * This operation is atomic and cannot be reordered. | ||
176 | * It can be reorderdered on other architectures other than x86. | ||
177 | * It also implies a memory barrier. | ||
178 | */ | ||
179 | static inline int test_and_clear_bit(int nr, volatile unsigned long * addr) | ||
180 | { | ||
181 | int oldbit; | ||
182 | |||
183 | __asm__ __volatile__( LOCK_PREFIX | ||
184 | "btrl %2,%1\n\tsbbl %0,%0" | ||
185 | :"=r" (oldbit),"=m" (ADDR) | ||
186 | :"Ir" (nr) : "memory"); | ||
187 | return oldbit; | ||
188 | } | ||
189 | |||
190 | /** | ||
191 | * __test_and_clear_bit - Clear a bit and return its old value | ||
192 | * @nr: Bit to clear | ||
193 | * @addr: Address to count from | ||
194 | * | ||
195 | * This operation is non-atomic and can be reordered. | ||
196 | * If two examples of this operation race, one can appear to succeed | ||
197 | * but actually fail. You must protect multiple accesses with a lock. | ||
198 | */ | ||
199 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
200 | { | ||
201 | int oldbit; | ||
202 | |||
203 | __asm__( | ||
204 | "btrl %2,%1\n\tsbbl %0,%0" | ||
205 | :"=r" (oldbit),"=m" (ADDR) | ||
206 | :"Ir" (nr)); | ||
207 | return oldbit; | ||
208 | } | ||
209 | |||
210 | /* WARNING: non atomic and it can be reordered! */ | ||
211 | static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) | ||
212 | { | ||
213 | int oldbit; | ||
214 | |||
215 | __asm__ __volatile__( | ||
216 | "btcl %2,%1\n\tsbbl %0,%0" | ||
217 | :"=r" (oldbit),"=m" (ADDR) | ||
218 | :"Ir" (nr) : "memory"); | ||
219 | return oldbit; | ||
220 | } | ||
221 | |||
222 | /** | ||
223 | * test_and_change_bit - Change a bit and return its old value | ||
224 | * @nr: Bit to change | ||
225 | * @addr: Address to count from | ||
226 | * | ||
227 | * This operation is atomic and cannot be reordered. | ||
228 | * It also implies a memory barrier. | ||
229 | */ | ||
230 | static inline int test_and_change_bit(int nr, volatile unsigned long* addr) | ||
231 | { | ||
232 | int oldbit; | ||
233 | |||
234 | __asm__ __volatile__( LOCK_PREFIX | ||
235 | "btcl %2,%1\n\tsbbl %0,%0" | ||
236 | :"=r" (oldbit),"=m" (ADDR) | ||
237 | :"Ir" (nr) : "memory"); | ||
238 | return oldbit; | ||
239 | } | ||
240 | |||
241 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ | ||
242 | /** | ||
243 | * test_bit - Determine whether a bit is set | ||
244 | * @nr: bit number to test | ||
245 | * @addr: Address to start counting from | ||
246 | */ | ||
247 | static int test_bit(int nr, const volatile void * addr); | ||
248 | #endif | ||
249 | |||
250 | static inline int constant_test_bit(int nr, const volatile unsigned long *addr) | ||
251 | { | ||
252 | return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; | ||
253 | } | ||
254 | |||
255 | static inline int variable_test_bit(int nr, const volatile unsigned long * addr) | ||
256 | { | ||
257 | int oldbit; | ||
258 | |||
259 | __asm__ __volatile__( | ||
260 | "btl %2,%1\n\tsbbl %0,%0" | ||
261 | :"=r" (oldbit) | ||
262 | :"m" (ADDR),"Ir" (nr)); | ||
263 | return oldbit; | ||
264 | } | ||
265 | |||
266 | #define test_bit(nr,addr) \ | ||
267 | (__builtin_constant_p(nr) ? \ | ||
268 | constant_test_bit((nr),(addr)) : \ | ||
269 | variable_test_bit((nr),(addr))) | ||
270 | |||
271 | #undef ADDR | ||
272 | |||
273 | /** | ||
274 | * find_first_zero_bit - find the first zero bit in a memory region | ||
275 | * @addr: The address to start the search at | ||
276 | * @size: The maximum size to search | ||
277 | * | ||
278 | * Returns the bit-number of the first zero bit, not the number of the byte | ||
279 | * containing a bit. | ||
280 | */ | ||
281 | static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) | ||
282 | { | ||
283 | int d0, d1, d2; | ||
284 | int res; | ||
285 | |||
286 | if (!size) | ||
287 | return 0; | ||
288 | /* This looks at memory. Mark it volatile to tell gcc not to move it around */ | ||
289 | __asm__ __volatile__( | ||
290 | "movl $-1,%%eax\n\t" | ||
291 | "xorl %%edx,%%edx\n\t" | ||
292 | "repe; scasl\n\t" | ||
293 | "je 1f\n\t" | ||
294 | "xorl -4(%%edi),%%eax\n\t" | ||
295 | "subl $4,%%edi\n\t" | ||
296 | "bsfl %%eax,%%edx\n" | ||
297 | "1:\tsubl %%ebx,%%edi\n\t" | ||
298 | "shll $3,%%edi\n\t" | ||
299 | "addl %%edi,%%edx" | ||
300 | :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) | ||
301 | :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory"); | ||
302 | return res; | ||
303 | } | ||
304 | |||
305 | /** | ||
306 | * find_next_zero_bit - find the first zero bit in a memory region | ||
307 | * @addr: The address to base the search on | ||
308 | * @offset: The bitnumber to start searching at | ||
309 | * @size: The maximum size to search | ||
310 | */ | ||
311 | int find_next_zero_bit(const unsigned long *addr, int size, int offset); | ||
312 | |||
313 | /** | ||
314 | * find_first_bit - find the first set bit in a memory region | ||
315 | * @addr: The address to start the search at | ||
316 | * @size: The maximum size to search | ||
317 | * | ||
318 | * Returns the bit-number of the first set bit, not the number of the byte | ||
319 | * containing a bit. | ||
320 | */ | ||
321 | static inline int find_first_bit(const unsigned long *addr, unsigned size) | ||
322 | { | ||
323 | int d0, d1; | ||
324 | int res; | ||
325 | |||
326 | /* This looks at memory. Mark it volatile to tell gcc not to move it around */ | ||
327 | __asm__ __volatile__( | ||
328 | "xorl %%eax,%%eax\n\t" | ||
329 | "repe; scasl\n\t" | ||
330 | "jz 1f\n\t" | ||
331 | "leal -4(%%edi),%%edi\n\t" | ||
332 | "bsfl (%%edi),%%eax\n" | ||
333 | "1:\tsubl %%ebx,%%edi\n\t" | ||
334 | "shll $3,%%edi\n\t" | ||
335 | "addl %%edi,%%eax" | ||
336 | :"=a" (res), "=&c" (d0), "=&D" (d1) | ||
337 | :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory"); | ||
338 | return res; | ||
339 | } | ||
340 | |||
341 | /** | ||
342 | * find_next_bit - find the first set bit in a memory region | ||
343 | * @addr: The address to base the search on | ||
344 | * @offset: The bitnumber to start searching at | ||
345 | * @size: The maximum size to search | ||
346 | */ | ||
347 | int find_next_bit(const unsigned long *addr, int size, int offset); | ||
348 | |||
349 | /** | ||
350 | * ffz - find first zero in word. | ||
351 | * @word: The word to search | ||
352 | * | ||
353 | * Undefined if no zero exists, so code should check against ~0UL first. | ||
354 | */ | ||
355 | static inline unsigned long ffz(unsigned long word) | ||
356 | { | ||
357 | __asm__("bsfl %1,%0" | ||
358 | :"=r" (word) | ||
359 | :"r" (~word)); | ||
360 | return word; | ||
361 | } | ||
362 | |||
363 | /** | ||
364 | * __ffs - find first bit in word. | ||
365 | * @word: The word to search | ||
366 | * | ||
367 | * Undefined if no bit exists, so code should check against 0 first. | ||
368 | */ | ||
369 | static inline unsigned long __ffs(unsigned long word) | ||
370 | { | ||
371 | __asm__("bsfl %1,%0" | ||
372 | :"=r" (word) | ||
373 | :"rm" (word)); | ||
374 | return word; | ||
375 | } | ||
376 | |||
377 | /* | ||
378 | * fls: find last bit set. | ||
379 | */ | ||
380 | |||
381 | #define fls(x) generic_fls(x) | ||
382 | |||
383 | #ifdef __KERNEL__ | ||
384 | |||
385 | /* | ||
386 | * Every architecture must define this function. It's the fastest | ||
387 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
388 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
389 | * bits is cleared. | ||
390 | */ | ||
391 | static inline int sched_find_first_bit(const unsigned long *b) | ||
392 | { | ||
393 | if (unlikely(b[0])) | ||
394 | return __ffs(b[0]); | ||
395 | if (unlikely(b[1])) | ||
396 | return __ffs(b[1]) + 32; | ||
397 | if (unlikely(b[2])) | ||
398 | return __ffs(b[2]) + 64; | ||
399 | if (b[3]) | ||
400 | return __ffs(b[3]) + 96; | ||
401 | return __ffs(b[4]) + 128; | ||
402 | } | ||
403 | |||
404 | /** | ||
405 | * ffs - find first bit set | ||
406 | * @x: the word to search | ||
407 | * | ||
408 | * This is defined the same way as | ||
409 | * the libc and compiler builtin ffs routines, therefore | ||
410 | * differs in spirit from the above ffz (man ffs). | ||
411 | */ | ||
412 | static inline int ffs(int x) | ||
413 | { | ||
414 | int r; | ||
415 | |||
416 | __asm__("bsfl %1,%0\n\t" | ||
417 | "jnz 1f\n\t" | ||
418 | "movl $-1,%0\n" | ||
419 | "1:" : "=r" (r) : "rm" (x)); | ||
420 | return r+1; | ||
421 | } | ||
422 | |||
423 | /** | ||
424 | * hweightN - returns the hamming weight of a N-bit word | ||
425 | * @x: the word to weigh | ||
426 | * | ||
427 | * The Hamming Weight of a number is the total number of bits set in it. | ||
428 | */ | ||
429 | |||
430 | #define hweight32(x) generic_hweight32(x) | ||
431 | #define hweight16(x) generic_hweight16(x) | ||
432 | #define hweight8(x) generic_hweight8(x) | ||
433 | |||
434 | #endif /* __KERNEL__ */ | ||
435 | |||
436 | #ifdef __KERNEL__ | ||
437 | |||
438 | #define ext2_set_bit(nr,addr) \ | ||
439 | __test_and_set_bit((nr),(unsigned long*)addr) | ||
440 | #define ext2_set_bit_atomic(lock,nr,addr) \ | ||
441 | test_and_set_bit((nr),(unsigned long*)addr) | ||
442 | #define ext2_clear_bit(nr, addr) \ | ||
443 | __test_and_clear_bit((nr),(unsigned long*)addr) | ||
444 | #define ext2_clear_bit_atomic(lock,nr, addr) \ | ||
445 | test_and_clear_bit((nr),(unsigned long*)addr) | ||
446 | #define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr) | ||
447 | #define ext2_find_first_zero_bit(addr, size) \ | ||
448 | find_first_zero_bit((unsigned long*)addr, size) | ||
449 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
450 | find_next_zero_bit((unsigned long*)addr, size, off) | ||
451 | |||
452 | /* Bitmap functions for the minix filesystem. */ | ||
453 | #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr) | ||
454 | #define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr) | ||
455 | #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr) | ||
456 | #define minix_test_bit(nr,addr) test_bit(nr,(void*)addr) | ||
457 | #define minix_find_first_zero_bit(addr,size) \ | ||
458 | find_first_zero_bit((void*)addr,size) | ||
459 | |||
460 | #endif /* __KERNEL__ */ | ||
461 | |||
462 | #endif /* _I386_BITOPS_H */ | ||
diff --git a/include/asm-i386/boot.h b/include/asm-i386/boot.h new file mode 100644 index 000000000000..96b228e6e79c --- /dev/null +++ b/include/asm-i386/boot.h | |||
@@ -0,0 +1,15 @@ | |||
1 | #ifndef _LINUX_BOOT_H | ||
2 | #define _LINUX_BOOT_H | ||
3 | |||
4 | /* Don't touch these, unless you really know what you're doing. */ | ||
5 | #define DEF_INITSEG 0x9000 | ||
6 | #define DEF_SYSSEG 0x1000 | ||
7 | #define DEF_SETUPSEG 0x9020 | ||
8 | #define DEF_SYSSIZE 0x7F00 | ||
9 | |||
10 | /* Internal svga startup constants */ | ||
11 | #define NORMAL_VGA 0xffff /* 80x25 mode */ | ||
12 | #define EXTENDED_VGA 0xfffe /* 80x50 mode */ | ||
13 | #define ASK_VGA 0xfffd /* ask for it at bootup */ | ||
14 | |||
15 | #endif | ||
diff --git a/include/asm-i386/bug.h b/include/asm-i386/bug.h new file mode 100644 index 000000000000..706eb511c330 --- /dev/null +++ b/include/asm-i386/bug.h | |||
@@ -0,0 +1,25 @@ | |||
1 | #ifndef _I386_BUG_H | ||
2 | #define _I386_BUG_H | ||
3 | |||
4 | #include <linux/config.h> | ||
5 | |||
6 | /* | ||
7 | * Tell the user there is some problem. | ||
8 | * The offending file and line are encoded after the "officially | ||
9 | * undefined" opcode for parsing in the trap handler. | ||
10 | */ | ||
11 | |||
12 | #ifdef CONFIG_DEBUG_BUGVERBOSE | ||
13 | #define BUG() \ | ||
14 | __asm__ __volatile__( "ud2\n" \ | ||
15 | "\t.word %c0\n" \ | ||
16 | "\t.long %c1\n" \ | ||
17 | : : "i" (__LINE__), "i" (__FILE__)) | ||
18 | #else | ||
19 | #define BUG() __asm__ __volatile__("ud2\n") | ||
20 | #endif | ||
21 | |||
22 | #define HAVE_ARCH_BUG | ||
23 | #include <asm-generic/bug.h> | ||
24 | |||
25 | #endif | ||
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h new file mode 100644 index 000000000000..6789fc275da3 --- /dev/null +++ b/include/asm-i386/bugs.h | |||
@@ -0,0 +1,213 @@ | |||
1 | /* | ||
2 | * include/asm-i386/bugs.h | ||
3 | * | ||
4 | * Copyright (C) 1994 Linus Torvalds | ||
5 | * | ||
6 | * Cyrix stuff, June 1998 by: | ||
7 | * - Rafael R. Reilova (moved everything from head.S), | ||
8 | * <rreilova@ececs.uc.edu> | ||
9 | * - Channing Corn (tests & fixes), | ||
10 | * - Andrew D. Balsa (code cleanup). | ||
11 | * | ||
12 | * Pentium III FXSR, SSE support | ||
13 | * Gareth Hughes <gareth@valinux.com>, May 2000 | ||
14 | */ | ||
15 | |||
16 | /* | ||
17 | * This is included by init/main.c to check for architecture-dependent bugs. | ||
18 | * | ||
19 | * Needs: | ||
20 | * void check_bugs(void); | ||
21 | */ | ||
22 | |||
23 | #include <linux/config.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <asm/processor.h> | ||
26 | #include <asm/i387.h> | ||
27 | #include <asm/msr.h> | ||
28 | |||
29 | static int __init no_halt(char *s) | ||
30 | { | ||
31 | boot_cpu_data.hlt_works_ok = 0; | ||
32 | return 1; | ||
33 | } | ||
34 | |||
35 | __setup("no-hlt", no_halt); | ||
36 | |||
37 | static int __init mca_pentium(char *s) | ||
38 | { | ||
39 | mca_pentium_flag = 1; | ||
40 | return 1; | ||
41 | } | ||
42 | |||
43 | __setup("mca-pentium", mca_pentium); | ||
44 | |||
45 | static int __init no_387(char *s) | ||
46 | { | ||
47 | boot_cpu_data.hard_math = 0; | ||
48 | write_cr0(0xE | read_cr0()); | ||
49 | return 1; | ||
50 | } | ||
51 | |||
52 | __setup("no387", no_387); | ||
53 | |||
54 | static double __initdata x = 4195835.0; | ||
55 | static double __initdata y = 3145727.0; | ||
56 | |||
57 | /* | ||
58 | * This used to check for exceptions.. | ||
59 | * However, it turns out that to support that, | ||
60 | * the XMM trap handlers basically had to | ||
61 | * be buggy. So let's have a correct XMM trap | ||
62 | * handler, and forget about printing out | ||
63 | * some status at boot. | ||
64 | * | ||
65 | * We should really only care about bugs here | ||
66 | * anyway. Not features. | ||
67 | */ | ||
68 | static void __init check_fpu(void) | ||
69 | { | ||
70 | if (!boot_cpu_data.hard_math) { | ||
71 | #ifndef CONFIG_MATH_EMULATION | ||
72 | printk(KERN_EMERG "No coprocessor found and no math emulation present.\n"); | ||
73 | printk(KERN_EMERG "Giving up.\n"); | ||
74 | for (;;) ; | ||
75 | #endif | ||
76 | return; | ||
77 | } | ||
78 | |||
79 | /* Enable FXSR and company _before_ testing for FP problems. */ | ||
80 | /* | ||
81 | * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned. | ||
82 | */ | ||
83 | if (offsetof(struct task_struct, thread.i387.fxsave) & 15) { | ||
84 | extern void __buggy_fxsr_alignment(void); | ||
85 | __buggy_fxsr_alignment(); | ||
86 | } | ||
87 | if (cpu_has_fxsr) { | ||
88 | printk(KERN_INFO "Enabling fast FPU save and restore... "); | ||
89 | set_in_cr4(X86_CR4_OSFXSR); | ||
90 | printk("done.\n"); | ||
91 | } | ||
92 | if (cpu_has_xmm) { | ||
93 | printk(KERN_INFO "Enabling unmasked SIMD FPU exception support... "); | ||
94 | set_in_cr4(X86_CR4_OSXMMEXCPT); | ||
95 | printk("done.\n"); | ||
96 | } | ||
97 | |||
98 | /* Test for the divl bug.. */ | ||
99 | __asm__("fninit\n\t" | ||
100 | "fldl %1\n\t" | ||
101 | "fdivl %2\n\t" | ||
102 | "fmull %2\n\t" | ||
103 | "fldl %1\n\t" | ||
104 | "fsubp %%st,%%st(1)\n\t" | ||
105 | "fistpl %0\n\t" | ||
106 | "fwait\n\t" | ||
107 | "fninit" | ||
108 | : "=m" (*&boot_cpu_data.fdiv_bug) | ||
109 | : "m" (*&x), "m" (*&y)); | ||
110 | if (boot_cpu_data.fdiv_bug) | ||
111 | printk("Hmm, FPU with FDIV bug.\n"); | ||
112 | } | ||
113 | |||
114 | static void __init check_hlt(void) | ||
115 | { | ||
116 | printk(KERN_INFO "Checking 'hlt' instruction... "); | ||
117 | if (!boot_cpu_data.hlt_works_ok) { | ||
118 | printk("disabled\n"); | ||
119 | return; | ||
120 | } | ||
121 | __asm__ __volatile__("hlt ; hlt ; hlt ; hlt"); | ||
122 | printk("OK.\n"); | ||
123 | } | ||
124 | |||
125 | /* | ||
126 | * Most 386 processors have a bug where a POPAD can lock the | ||
127 | * machine even from user space. | ||
128 | */ | ||
129 | |||
130 | static void __init check_popad(void) | ||
131 | { | ||
132 | #ifndef CONFIG_X86_POPAD_OK | ||
133 | int res, inp = (int) &res; | ||
134 | |||
135 | printk(KERN_INFO "Checking for popad bug... "); | ||
136 | __asm__ __volatile__( | ||
137 | "movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl (%%edx,%%edi),%%ecx " | ||
138 | : "=&a" (res) | ||
139 | : "d" (inp) | ||
140 | : "ecx", "edi" ); | ||
141 | /* If this fails, it means that any user program may lock the CPU hard. Too bad. */ | ||
142 | if (res != 12345678) printk( "Buggy.\n" ); | ||
143 | else printk( "OK.\n" ); | ||
144 | #endif | ||
145 | } | ||
146 | |||
147 | /* | ||
148 | * Check whether we are able to run this kernel safely on SMP. | ||
149 | * | ||
150 | * - In order to run on a i386, we need to be compiled for i386 | ||
151 | * (for due to lack of "invlpg" and working WP on a i386) | ||
152 | * - In order to run on anything without a TSC, we need to be | ||
153 | * compiled for a i486. | ||
154 | * - In order to support the local APIC on a buggy Pentium machine, | ||
155 | * we need to be compiled with CONFIG_X86_GOOD_APIC disabled, | ||
156 | * which happens implicitly if compiled for a Pentium or lower | ||
157 | * (unless an advanced selection of CPU features is used) as an | ||
158 | * otherwise config implies a properly working local APIC without | ||
159 | * the need to do extra reads from the APIC. | ||
160 | */ | ||
161 | |||
162 | static void __init check_config(void) | ||
163 | { | ||
164 | /* | ||
165 | * We'd better not be a i386 if we're configured to use some | ||
166 | * i486+ only features! (WP works in supervisor mode and the | ||
167 | * new "invlpg" and "bswap" instructions) | ||
168 | */ | ||
169 | #if defined(CONFIG_X86_WP_WORKS_OK) || defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_BSWAP) | ||
170 | if (boot_cpu_data.x86 == 3) | ||
171 | panic("Kernel requires i486+ for 'invlpg' and other features"); | ||
172 | #endif | ||
173 | |||
174 | /* | ||
175 | * If we configured ourselves for a TSC, we'd better have one! | ||
176 | */ | ||
177 | #ifdef CONFIG_X86_TSC | ||
178 | if (!cpu_has_tsc) | ||
179 | panic("Kernel compiled for Pentium+, requires TSC feature!"); | ||
180 | #endif | ||
181 | |||
182 | /* | ||
183 | * If we were told we had a good local APIC, check for buggy Pentia, | ||
184 | * i.e. all B steppings and the C2 stepping of P54C when using their | ||
185 | * integrated APIC (see 11AP erratum in "Pentium Processor | ||
186 | * Specification Update"). | ||
187 | */ | ||
188 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_GOOD_APIC) | ||
189 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL | ||
190 | && cpu_has_apic | ||
191 | && boot_cpu_data.x86 == 5 | ||
192 | && boot_cpu_data.x86_model == 2 | ||
193 | && (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11)) | ||
194 | panic("Kernel compiled for PMMX+, assumes a local APIC without the read-before-write bug!"); | ||
195 | #endif | ||
196 | } | ||
197 | |||
198 | extern void alternative_instructions(void); | ||
199 | |||
200 | static void __init check_bugs(void) | ||
201 | { | ||
202 | identify_cpu(&boot_cpu_data); | ||
203 | #ifndef CONFIG_SMP | ||
204 | printk("CPU: "); | ||
205 | print_cpu_info(&boot_cpu_data); | ||
206 | #endif | ||
207 | check_config(); | ||
208 | check_fpu(); | ||
209 | check_hlt(); | ||
210 | check_popad(); | ||
211 | system_utsname.machine[1] = '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); | ||
212 | alternative_instructions(); | ||
213 | } | ||
diff --git a/include/asm-i386/byteorder.h b/include/asm-i386/byteorder.h new file mode 100644 index 000000000000..a0d73f48d5be --- /dev/null +++ b/include/asm-i386/byteorder.h | |||
@@ -0,0 +1,59 @@ | |||
1 | #ifndef _I386_BYTEORDER_H | ||
2 | #define _I386_BYTEORDER_H | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | #include <linux/compiler.h> | ||
6 | |||
7 | #ifdef __GNUC__ | ||
8 | |||
9 | /* For avoiding bswap on i386 */ | ||
10 | #ifdef __KERNEL__ | ||
11 | #include <linux/config.h> | ||
12 | #endif | ||
13 | |||
14 | static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) | ||
15 | { | ||
16 | #ifdef CONFIG_X86_BSWAP | ||
17 | __asm__("bswap %0" : "=r" (x) : "0" (x)); | ||
18 | #else | ||
19 | __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */ | ||
20 | "rorl $16,%0\n\t" /* swap words */ | ||
21 | "xchgb %b0,%h0" /* swap higher bytes */ | ||
22 | :"=q" (x) | ||
23 | : "0" (x)); | ||
24 | #endif | ||
25 | return x; | ||
26 | } | ||
27 | |||
28 | static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val) | ||
29 | { | ||
30 | union { | ||
31 | struct { __u32 a,b; } s; | ||
32 | __u64 u; | ||
33 | } v; | ||
34 | v.u = val; | ||
35 | #ifdef CONFIG_X86_BSWAP | ||
36 | asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" | ||
37 | : "=r" (v.s.a), "=r" (v.s.b) | ||
38 | : "0" (v.s.a), "1" (v.s.b)); | ||
39 | #else | ||
40 | v.s.a = ___arch__swab32(v.s.a); | ||
41 | v.s.b = ___arch__swab32(v.s.b); | ||
42 | asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); | ||
43 | #endif | ||
44 | return v.u; | ||
45 | } | ||
46 | |||
47 | /* Do not define swab16. Gcc is smart enough to recognize "C" version and | ||
48 | convert it into rotation or exhange. */ | ||
49 | |||
50 | #define __arch__swab64(x) ___arch__swab64(x) | ||
51 | #define __arch__swab32(x) ___arch__swab32(x) | ||
52 | |||
53 | #define __BYTEORDER_HAS_U64__ | ||
54 | |||
55 | #endif /* __GNUC__ */ | ||
56 | |||
57 | #include <linux/byteorder/little_endian.h> | ||
58 | |||
59 | #endif /* _I386_BYTEORDER_H */ | ||
diff --git a/include/asm-i386/cache.h b/include/asm-i386/cache.h new file mode 100644 index 000000000000..849788710feb --- /dev/null +++ b/include/asm-i386/cache.h | |||
@@ -0,0 +1,15 @@ | |||
1 | /* | ||
2 | * include/asm-i386/cache.h | ||
3 | */ | ||
4 | #ifndef __ARCH_I386_CACHE_H | ||
5 | #define __ARCH_I386_CACHE_H | ||
6 | |||
7 | #include <linux/config.h> | ||
8 | |||
9 | /* L1 cache line size */ | ||
10 | #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) | ||
11 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) | ||
12 | |||
13 | #define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */ | ||
14 | |||
15 | #endif | ||
diff --git a/include/asm-i386/cacheflush.h b/include/asm-i386/cacheflush.h new file mode 100644 index 000000000000..2ea36dea37d9 --- /dev/null +++ b/include/asm-i386/cacheflush.h | |||
@@ -0,0 +1,34 @@ | |||
1 | #ifndef _I386_CACHEFLUSH_H | ||
2 | #define _I386_CACHEFLUSH_H | ||
3 | |||
4 | /* Keep includes the same across arches. */ | ||
5 | #include <linux/mm.h> | ||
6 | |||
7 | /* Caches aren't brain-dead on the intel. */ | ||
8 | #define flush_cache_all() do { } while (0) | ||
9 | #define flush_cache_mm(mm) do { } while (0) | ||
10 | #define flush_cache_range(vma, start, end) do { } while (0) | ||
11 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | ||
12 | #define flush_dcache_page(page) do { } while (0) | ||
13 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | ||
14 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | ||
15 | #define flush_icache_range(start, end) do { } while (0) | ||
16 | #define flush_icache_page(vma,pg) do { } while (0) | ||
17 | #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) | ||
18 | #define flush_cache_vmap(start, end) do { } while (0) | ||
19 | #define flush_cache_vunmap(start, end) do { } while (0) | ||
20 | |||
21 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | ||
22 | memcpy(dst, src, len) | ||
23 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | ||
24 | memcpy(dst, src, len) | ||
25 | |||
26 | void global_flush_tlb(void); | ||
27 | int change_page_attr(struct page *page, int numpages, pgprot_t prot); | ||
28 | |||
29 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
30 | /* internal debugging function */ | ||
31 | void kernel_map_pages(struct page *page, int numpages, int enable); | ||
32 | #endif | ||
33 | |||
34 | #endif /* _I386_CACHEFLUSH_H */ | ||
diff --git a/include/asm-i386/checksum.h b/include/asm-i386/checksum.h new file mode 100644 index 000000000000..d76a5f081c91 --- /dev/null +++ b/include/asm-i386/checksum.h | |||
@@ -0,0 +1,192 @@ | |||
1 | #ifndef _I386_CHECKSUM_H | ||
2 | #define _I386_CHECKSUM_H | ||
3 | |||
4 | #include <linux/in6.h> | ||
5 | |||
6 | /* | ||
7 | * computes the checksum of a memory block at buff, length len, | ||
8 | * and adds in "sum" (32-bit) | ||
9 | * | ||
10 | * returns a 32-bit number suitable for feeding into itself | ||
11 | * or csum_tcpudp_magic | ||
12 | * | ||
13 | * this function must be called with even lengths, except | ||
14 | * for the last fragment, which may be odd | ||
15 | * | ||
16 | * it's best to have buff aligned on a 32-bit boundary | ||
17 | */ | ||
18 | asmlinkage unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); | ||
19 | |||
20 | /* | ||
21 | * the same as csum_partial, but copies from src while it | ||
22 | * checksums, and handles user-space pointer exceptions correctly, when needed. | ||
23 | * | ||
24 | * here even more important to align src and dst on a 32-bit (or even | ||
25 | * better 64-bit) boundary | ||
26 | */ | ||
27 | |||
28 | asmlinkage unsigned int csum_partial_copy_generic(const unsigned char *src, unsigned char *dst, | ||
29 | int len, int sum, int *src_err_ptr, int *dst_err_ptr); | ||
30 | |||
31 | /* | ||
32 | * Note: when you get a NULL pointer exception here this means someone | ||
33 | * passed in an incorrect kernel address to one of these functions. | ||
34 | * | ||
35 | * If you use these functions directly please don't forget the | ||
36 | * verify_area(). | ||
37 | */ | ||
38 | static __inline__ | ||
39 | unsigned int csum_partial_copy_nocheck (const unsigned char *src, unsigned char *dst, | ||
40 | int len, int sum) | ||
41 | { | ||
42 | return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL); | ||
43 | } | ||
44 | |||
45 | static __inline__ | ||
46 | unsigned int csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst, | ||
47 | int len, int sum, int *err_ptr) | ||
48 | { | ||
49 | might_sleep(); | ||
50 | return csum_partial_copy_generic((__force unsigned char *)src, dst, | ||
51 | len, sum, err_ptr, NULL); | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * This is a version of ip_compute_csum() optimized for IP headers, | ||
56 | * which always checksum on 4 octet boundaries. | ||
57 | * | ||
58 | * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by | ||
59 | * Arnt Gulbrandsen. | ||
60 | */ | ||
61 | static inline unsigned short ip_fast_csum(unsigned char * iph, | ||
62 | unsigned int ihl) | ||
63 | { | ||
64 | unsigned int sum; | ||
65 | |||
66 | __asm__ __volatile__( | ||
67 | "movl (%1), %0 ;\n" | ||
68 | "subl $4, %2 ;\n" | ||
69 | "jbe 2f ;\n" | ||
70 | "addl 4(%1), %0 ;\n" | ||
71 | "adcl 8(%1), %0 ;\n" | ||
72 | "adcl 12(%1), %0 ;\n" | ||
73 | "1: adcl 16(%1), %0 ;\n" | ||
74 | "lea 4(%1), %1 ;\n" | ||
75 | "decl %2 ;\n" | ||
76 | "jne 1b ;\n" | ||
77 | "adcl $0, %0 ;\n" | ||
78 | "movl %0, %2 ;\n" | ||
79 | "shrl $16, %0 ;\n" | ||
80 | "addw %w2, %w0 ;\n" | ||
81 | "adcl $0, %0 ;\n" | ||
82 | "notl %0 ;\n" | ||
83 | "2: ;\n" | ||
84 | /* Since the input registers which are loaded with iph and ipl | ||
85 | are modified, we must also specify them as outputs, or gcc | ||
86 | will assume they contain their original values. */ | ||
87 | : "=r" (sum), "=r" (iph), "=r" (ihl) | ||
88 | : "1" (iph), "2" (ihl) | ||
89 | : "memory"); | ||
90 | return(sum); | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * Fold a partial checksum | ||
95 | */ | ||
96 | |||
97 | static inline unsigned int csum_fold(unsigned int sum) | ||
98 | { | ||
99 | __asm__( | ||
100 | "addl %1, %0 ;\n" | ||
101 | "adcl $0xffff, %0 ;\n" | ||
102 | : "=r" (sum) | ||
103 | : "r" (sum << 16), "0" (sum & 0xffff0000) | ||
104 | ); | ||
105 | return (~sum) >> 16; | ||
106 | } | ||
107 | |||
108 | static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, | ||
109 | unsigned long daddr, | ||
110 | unsigned short len, | ||
111 | unsigned short proto, | ||
112 | unsigned int sum) | ||
113 | { | ||
114 | __asm__( | ||
115 | "addl %1, %0 ;\n" | ||
116 | "adcl %2, %0 ;\n" | ||
117 | "adcl %3, %0 ;\n" | ||
118 | "adcl $0, %0 ;\n" | ||
119 | : "=r" (sum) | ||
120 | : "g" (daddr), "g"(saddr), "g"((ntohs(len)<<16)+proto*256), "0"(sum)); | ||
121 | return sum; | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * computes the checksum of the TCP/UDP pseudo-header | ||
126 | * returns a 16-bit checksum, already complemented | ||
127 | */ | ||
128 | static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, | ||
129 | unsigned long daddr, | ||
130 | unsigned short len, | ||
131 | unsigned short proto, | ||
132 | unsigned int sum) | ||
133 | { | ||
134 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); | ||
135 | } | ||
136 | |||
137 | /* | ||
138 | * this routine is used for miscellaneous IP-like checksums, mainly | ||
139 | * in icmp.c | ||
140 | */ | ||
141 | |||
142 | static inline unsigned short ip_compute_csum(unsigned char * buff, int len) | ||
143 | { | ||
144 | return csum_fold (csum_partial(buff, len, 0)); | ||
145 | } | ||
146 | |||
147 | #define _HAVE_ARCH_IPV6_CSUM | ||
148 | static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, | ||
149 | struct in6_addr *daddr, | ||
150 | __u32 len, | ||
151 | unsigned short proto, | ||
152 | unsigned int sum) | ||
153 | { | ||
154 | __asm__( | ||
155 | "addl 0(%1), %0 ;\n" | ||
156 | "adcl 4(%1), %0 ;\n" | ||
157 | "adcl 8(%1), %0 ;\n" | ||
158 | "adcl 12(%1), %0 ;\n" | ||
159 | "adcl 0(%2), %0 ;\n" | ||
160 | "adcl 4(%2), %0 ;\n" | ||
161 | "adcl 8(%2), %0 ;\n" | ||
162 | "adcl 12(%2), %0 ;\n" | ||
163 | "adcl %3, %0 ;\n" | ||
164 | "adcl %4, %0 ;\n" | ||
165 | "adcl $0, %0 ;\n" | ||
166 | : "=&r" (sum) | ||
167 | : "r" (saddr), "r" (daddr), | ||
168 | "r"(htonl(len)), "r"(htonl(proto)), "0"(sum)); | ||
169 | |||
170 | return csum_fold(sum); | ||
171 | } | ||
172 | |||
173 | /* | ||
174 | * Copy and checksum to user | ||
175 | */ | ||
176 | #define HAVE_CSUM_COPY_USER | ||
177 | static __inline__ unsigned int csum_and_copy_to_user(const unsigned char *src, | ||
178 | unsigned char __user *dst, | ||
179 | int len, int sum, | ||
180 | int *err_ptr) | ||
181 | { | ||
182 | might_sleep(); | ||
183 | if (access_ok(VERIFY_WRITE, dst, len)) | ||
184 | return csum_partial_copy_generic(src, (__force unsigned char *)dst, len, sum, NULL, err_ptr); | ||
185 | |||
186 | if (len) | ||
187 | *err_ptr = -EFAULT; | ||
188 | |||
189 | return -1; /* invalid checksum */ | ||
190 | } | ||
191 | |||
192 | #endif | ||
diff --git a/include/asm-i386/cpu.h b/include/asm-i386/cpu.h new file mode 100644 index 000000000000..002740b21951 --- /dev/null +++ b/include/asm-i386/cpu.h | |||
@@ -0,0 +1,19 @@ | |||
1 | #ifndef _ASM_I386_CPU_H_ | ||
2 | #define _ASM_I386_CPU_H_ | ||
3 | |||
4 | #include <linux/device.h> | ||
5 | #include <linux/cpu.h> | ||
6 | #include <linux/topology.h> | ||
7 | #include <linux/nodemask.h> | ||
8 | |||
9 | #include <asm/node.h> | ||
10 | |||
11 | struct i386_cpu { | ||
12 | struct cpu cpu; | ||
13 | }; | ||
14 | extern int arch_register_cpu(int num); | ||
15 | #ifdef CONFIG_HOTPLUG_CPU | ||
16 | extern void arch_unregister_cpu(int); | ||
17 | #endif | ||
18 | |||
19 | #endif /* _ASM_I386_CPU_H_ */ | ||
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h new file mode 100644 index 000000000000..e147cabd3bfe --- /dev/null +++ b/include/asm-i386/cpufeature.h | |||
@@ -0,0 +1,129 @@ | |||
1 | /* | ||
2 | * cpufeature.h | ||
3 | * | ||
4 | * Defines x86 CPU feature bits | ||
5 | */ | ||
6 | |||
7 | #ifndef __ASM_I386_CPUFEATURE_H | ||
8 | #define __ASM_I386_CPUFEATURE_H | ||
9 | |||
10 | #include <linux/bitops.h> | ||
11 | |||
12 | #define NCAPINTS 7 /* N 32-bit words worth of info */ | ||
13 | |||
14 | /* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ | ||
15 | #define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ | ||
16 | #define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */ | ||
17 | #define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ | ||
18 | #define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ | ||
19 | #define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ | ||
20 | #define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */ | ||
21 | #define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ | ||
22 | #define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */ | ||
23 | #define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ | ||
24 | #define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */ | ||
25 | #define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */ | ||
26 | #define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ | ||
27 | #define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ | ||
28 | #define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ | ||
29 | #define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */ | ||
30 | #define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ | ||
31 | #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ | ||
32 | #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ | ||
33 | #define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ | ||
34 | #define X86_FEATURE_DTES (0*32+21) /* Debug Trace Store */ | ||
35 | #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ | ||
36 | #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ | ||
37 | #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ | ||
38 | /* of FPU context), and CR4.OSFXSR available */ | ||
39 | #define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */ | ||
40 | #define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */ | ||
41 | #define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */ | ||
42 | #define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ | ||
43 | #define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */ | ||
44 | #define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ | ||
45 | |||
46 | /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ | ||
47 | /* Don't duplicate feature flags which are redundant with Intel! */ | ||
48 | #define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ | ||
49 | #define X86_FEATURE_MP (1*32+19) /* MP Capable. */ | ||
50 | #define X86_FEATURE_NX (1*32+20) /* Execute Disable */ | ||
51 | #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ | ||
52 | #define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ | ||
53 | #define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ | ||
54 | #define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ | ||
55 | |||
56 | /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ | ||
57 | #define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */ | ||
58 | #define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */ | ||
59 | #define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */ | ||
60 | |||
61 | /* Other features, Linux-defined mapping, word 3 */ | ||
62 | /* This range is used for feature bits which conflict or are synthesized */ | ||
63 | #define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */ | ||
64 | #define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */ | ||
65 | #define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ | ||
66 | #define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ | ||
67 | /* cpu types for specific tunings: */ | ||
68 | #define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */ | ||
69 | #define X86_FEATURE_K7 (3*32+ 5) /* Athlon */ | ||
70 | #define X86_FEATURE_P3 (3*32+ 6) /* P3 */ | ||
71 | #define X86_FEATURE_P4 (3*32+ 7) /* P4 */ | ||
72 | |||
73 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | ||
74 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ | ||
75 | #define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */ | ||
76 | #define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */ | ||
77 | #define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ | ||
78 | #define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ | ||
79 | #define X86_FEATURE_CID (4*32+10) /* Context ID */ | ||
80 | #define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ | ||
81 | #define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ | ||
82 | |||
83 | /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ | ||
84 | #define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ | ||
85 | #define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */ | ||
86 | #define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */ | ||
87 | #define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */ | ||
88 | |||
89 | /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ | ||
90 | #define X86_FEATURE_LAHF_LM (5*32+ 0) /* LAHF/SAHF in long mode */ | ||
91 | #define X86_FEATURE_CMP_LEGACY (5*32+ 1) /* If yes HyperThreading not valid */ | ||
92 | |||
93 | #define cpu_has(c, bit) test_bit(bit, (c)->x86_capability) | ||
94 | #define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability) | ||
95 | |||
96 | #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) | ||
97 | #define cpu_has_vme boot_cpu_has(X86_FEATURE_VME) | ||
98 | #define cpu_has_de boot_cpu_has(X86_FEATURE_DE) | ||
99 | #define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) | ||
100 | #define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) | ||
101 | #define cpu_has_pae boot_cpu_has(X86_FEATURE_PAE) | ||
102 | #define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) | ||
103 | #define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) | ||
104 | #define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP) | ||
105 | #define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR) | ||
106 | #define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX) | ||
107 | #define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) | ||
108 | #define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) | ||
109 | #define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) | ||
110 | #define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) | ||
111 | #define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) | ||
112 | #define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) | ||
113 | #define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) | ||
114 | #define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR) | ||
115 | #define cpu_has_cyrix_arr boot_cpu_has(X86_FEATURE_CYRIX_ARR) | ||
116 | #define cpu_has_centaur_mcr boot_cpu_has(X86_FEATURE_CENTAUR_MCR) | ||
117 | #define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE) | ||
118 | #define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN) | ||
119 | #define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT) | ||
120 | #define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN) | ||
121 | |||
122 | #endif /* __ASM_I386_CPUFEATURE_H */ | ||
123 | |||
124 | /* | ||
125 | * Local Variables: | ||
126 | * mode:c | ||
127 | * comment-column:42 | ||
128 | * End: | ||
129 | */ | ||
diff --git a/include/asm-i386/cputime.h b/include/asm-i386/cputime.h new file mode 100644 index 000000000000..398ed7cd171d --- /dev/null +++ b/include/asm-i386/cputime.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __I386_CPUTIME_H | ||
2 | #define __I386_CPUTIME_H | ||
3 | |||
4 | #include <asm-generic/cputime.h> | ||
5 | |||
6 | #endif /* __I386_CPUTIME_H */ | ||
diff --git a/include/asm-i386/current.h b/include/asm-i386/current.h new file mode 100644 index 000000000000..d97328951f5f --- /dev/null +++ b/include/asm-i386/current.h | |||
@@ -0,0 +1,15 @@ | |||
1 | #ifndef _I386_CURRENT_H | ||
2 | #define _I386_CURRENT_H | ||
3 | |||
4 | #include <linux/thread_info.h> | ||
5 | |||
6 | struct task_struct; | ||
7 | |||
8 | static inline struct task_struct * get_current(void) | ||
9 | { | ||
10 | return current_thread_info()->task; | ||
11 | } | ||
12 | |||
13 | #define current get_current() | ||
14 | |||
15 | #endif /* !(_I386_CURRENT_H) */ | ||
diff --git a/include/asm-i386/debugreg.h b/include/asm-i386/debugreg.h new file mode 100644 index 000000000000..f0b2b06ae0f7 --- /dev/null +++ b/include/asm-i386/debugreg.h | |||
@@ -0,0 +1,64 @@ | |||
1 | #ifndef _I386_DEBUGREG_H | ||
2 | #define _I386_DEBUGREG_H | ||
3 | |||
4 | |||
5 | /* Indicate the register numbers for a number of the specific | ||
6 | debug registers. Registers 0-3 contain the addresses we wish to trap on */ | ||
7 | #define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */ | ||
8 | #define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */ | ||
9 | |||
10 | #define DR_STATUS 6 /* u_debugreg[DR_STATUS] */ | ||
11 | #define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */ | ||
12 | |||
13 | /* Define a few things for the status register. We can use this to determine | ||
14 | which debugging register was responsible for the trap. The other bits | ||
15 | are either reserved or not of interest to us. */ | ||
16 | |||
17 | #define DR_TRAP0 (0x1) /* db0 */ | ||
18 | #define DR_TRAP1 (0x2) /* db1 */ | ||
19 | #define DR_TRAP2 (0x4) /* db2 */ | ||
20 | #define DR_TRAP3 (0x8) /* db3 */ | ||
21 | |||
22 | #define DR_STEP (0x4000) /* single-step */ | ||
23 | #define DR_SWITCH (0x8000) /* task switch */ | ||
24 | |||
25 | /* Now define a bunch of things for manipulating the control register. | ||
26 | The top two bytes of the control register consist of 4 fields of 4 | ||
27 | bits - each field corresponds to one of the four debug registers, | ||
28 | and indicates what types of access we trap on, and how large the data | ||
29 | field is that we are looking at */ | ||
30 | |||
31 | #define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */ | ||
32 | #define DR_CONTROL_SIZE 4 /* 4 control bits per register */ | ||
33 | |||
34 | #define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */ | ||
35 | #define DR_RW_WRITE (0x1) | ||
36 | #define DR_RW_READ (0x3) | ||
37 | |||
38 | #define DR_LEN_1 (0x0) /* Settings for data length to trap on */ | ||
39 | #define DR_LEN_2 (0x4) | ||
40 | #define DR_LEN_4 (0xC) | ||
41 | |||
42 | /* The low byte to the control register determine which registers are | ||
43 | enabled. There are 4 fields of two bits. One bit is "local", meaning | ||
44 | that the processor will reset the bit after a task switch and the other | ||
45 | is global meaning that we have to explicitly reset the bit. With linux, | ||
46 | you can use either one, since we explicitly zero the register when we enter | ||
47 | kernel mode. */ | ||
48 | |||
49 | #define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */ | ||
50 | #define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */ | ||
51 | #define DR_ENABLE_SIZE 2 /* 2 enable bits per register */ | ||
52 | |||
53 | #define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */ | ||
54 | #define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */ | ||
55 | |||
56 | /* The second byte to the control register has a few special things. | ||
57 | We can slow the instruction pipeline for instructions coming via the | ||
58 | gdt or the ldt if we want to. I am not sure why this is an advantage */ | ||
59 | |||
60 | #define DR_CONTROL_RESERVED (0xFC00) /* Reserved by Intel */ | ||
61 | #define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */ | ||
62 | #define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */ | ||
63 | |||
64 | #endif | ||
diff --git a/include/asm-i386/delay.h b/include/asm-i386/delay.h new file mode 100644 index 000000000000..456db8501c09 --- /dev/null +++ b/include/asm-i386/delay.h | |||
@@ -0,0 +1,26 @@ | |||
1 | #ifndef _I386_DELAY_H | ||
2 | #define _I386_DELAY_H | ||
3 | |||
4 | /* | ||
5 | * Copyright (C) 1993 Linus Torvalds | ||
6 | * | ||
7 | * Delay routines calling functions in arch/i386/lib/delay.c | ||
8 | */ | ||
9 | |||
10 | extern void __bad_udelay(void); | ||
11 | extern void __bad_ndelay(void); | ||
12 | |||
13 | extern void __udelay(unsigned long usecs); | ||
14 | extern void __ndelay(unsigned long nsecs); | ||
15 | extern void __const_udelay(unsigned long usecs); | ||
16 | extern void __delay(unsigned long loops); | ||
17 | |||
18 | #define udelay(n) (__builtin_constant_p(n) ? \ | ||
19 | ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \ | ||
20 | __udelay(n)) | ||
21 | |||
22 | #define ndelay(n) (__builtin_constant_p(n) ? \ | ||
23 | ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ | ||
24 | __ndelay(n)) | ||
25 | |||
26 | #endif /* defined(_I386_DELAY_H) */ | ||
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h new file mode 100644 index 000000000000..11e67811a990 --- /dev/null +++ b/include/asm-i386/desc.h | |||
@@ -0,0 +1,144 @@ | |||
1 | #ifndef __ARCH_DESC_H | ||
2 | #define __ARCH_DESC_H | ||
3 | |||
4 | #include <asm/ldt.h> | ||
5 | #include <asm/segment.h> | ||
6 | |||
7 | #define CPU_16BIT_STACK_SIZE 1024 | ||
8 | |||
9 | #ifndef __ASSEMBLY__ | ||
10 | |||
11 | #include <linux/preempt.h> | ||
12 | #include <linux/smp.h> | ||
13 | #include <linux/percpu.h> | ||
14 | |||
15 | #include <asm/mmu.h> | ||
16 | |||
17 | extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; | ||
18 | DECLARE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]); | ||
19 | |||
20 | DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); | ||
21 | |||
22 | struct Xgt_desc_struct { | ||
23 | unsigned short size; | ||
24 | unsigned long address __attribute__((packed)); | ||
25 | unsigned short pad; | ||
26 | } __attribute__ ((packed)); | ||
27 | |||
28 | extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS]; | ||
29 | |||
30 | #define load_TR_desc() __asm__ __volatile__("ltr %%ax"::"a" (GDT_ENTRY_TSS*8)) | ||
31 | #define load_LDT_desc() __asm__ __volatile__("lldt %%ax"::"a" (GDT_ENTRY_LDT*8)) | ||
32 | |||
33 | /* | ||
34 | * This is the ldt that every process will get unless we need | ||
35 | * something other than this. | ||
36 | */ | ||
37 | extern struct desc_struct default_ldt[]; | ||
38 | extern void set_intr_gate(unsigned int irq, void * addr); | ||
39 | |||
40 | #define _set_tssldt_desc(n,addr,limit,type) \ | ||
41 | __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \ | ||
42 | "movw %%ax,2(%2)\n\t" \ | ||
43 | "rorl $16,%%eax\n\t" \ | ||
44 | "movb %%al,4(%2)\n\t" \ | ||
45 | "movb %4,5(%2)\n\t" \ | ||
46 | "movb $0,6(%2)\n\t" \ | ||
47 | "movb %%ah,7(%2)\n\t" \ | ||
48 | "rorl $16,%%eax" \ | ||
49 | : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type)) | ||
50 | |||
51 | static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr) | ||
52 | { | ||
53 | _set_tssldt_desc(&per_cpu(cpu_gdt_table, cpu)[entry], (int)addr, | ||
54 | offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89); | ||
55 | } | ||
56 | |||
57 | #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) | ||
58 | |||
59 | static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size) | ||
60 | { | ||
61 | _set_tssldt_desc(&per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82); | ||
62 | } | ||
63 | |||
64 | #define LDT_entry_a(info) \ | ||
65 | ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) | ||
66 | |||
67 | #define LDT_entry_b(info) \ | ||
68 | (((info)->base_addr & 0xff000000) | \ | ||
69 | (((info)->base_addr & 0x00ff0000) >> 16) | \ | ||
70 | ((info)->limit & 0xf0000) | \ | ||
71 | (((info)->read_exec_only ^ 1) << 9) | \ | ||
72 | ((info)->contents << 10) | \ | ||
73 | (((info)->seg_not_present ^ 1) << 15) | \ | ||
74 | ((info)->seg_32bit << 22) | \ | ||
75 | ((info)->limit_in_pages << 23) | \ | ||
76 | ((info)->useable << 20) | \ | ||
77 | 0x7000) | ||
78 | |||
79 | #define LDT_empty(info) (\ | ||
80 | (info)->base_addr == 0 && \ | ||
81 | (info)->limit == 0 && \ | ||
82 | (info)->contents == 0 && \ | ||
83 | (info)->read_exec_only == 1 && \ | ||
84 | (info)->seg_32bit == 0 && \ | ||
85 | (info)->limit_in_pages == 0 && \ | ||
86 | (info)->seg_not_present == 1 && \ | ||
87 | (info)->useable == 0 ) | ||
88 | |||
89 | #if TLS_SIZE != 24 | ||
90 | # error update this code. | ||
91 | #endif | ||
92 | |||
93 | static inline void load_TLS(struct thread_struct *t, unsigned int cpu) | ||
94 | { | ||
95 | #define C(i) per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i] | ||
96 | C(0); C(1); C(2); | ||
97 | #undef C | ||
98 | } | ||
99 | |||
100 | static inline void clear_LDT(void) | ||
101 | { | ||
102 | int cpu = get_cpu(); | ||
103 | |||
104 | set_ldt_desc(cpu, &default_ldt[0], 5); | ||
105 | load_LDT_desc(); | ||
106 | put_cpu(); | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * load one particular LDT into the current CPU | ||
111 | */ | ||
112 | static inline void load_LDT_nolock(mm_context_t *pc, int cpu) | ||
113 | { | ||
114 | void *segments = pc->ldt; | ||
115 | int count = pc->size; | ||
116 | |||
117 | if (likely(!count)) { | ||
118 | segments = &default_ldt[0]; | ||
119 | count = 5; | ||
120 | } | ||
121 | |||
122 | set_ldt_desc(cpu, segments, count); | ||
123 | load_LDT_desc(); | ||
124 | } | ||
125 | |||
126 | static inline void load_LDT(mm_context_t *pc) | ||
127 | { | ||
128 | int cpu = get_cpu(); | ||
129 | load_LDT_nolock(pc, cpu); | ||
130 | put_cpu(); | ||
131 | } | ||
132 | |||
133 | static inline unsigned long get_desc_base(unsigned long *desc) | ||
134 | { | ||
135 | unsigned long base; | ||
136 | base = ((desc[0] >> 16) & 0x0000ffff) | | ||
137 | ((desc[1] << 16) & 0x00ff0000) | | ||
138 | (desc[1] & 0xff000000); | ||
139 | return base; | ||
140 | } | ||
141 | |||
142 | #endif /* !__ASSEMBLY__ */ | ||
143 | |||
144 | #endif | ||
diff --git a/include/asm-i386/div64.h b/include/asm-i386/div64.h new file mode 100644 index 000000000000..28ed8b296afc --- /dev/null +++ b/include/asm-i386/div64.h | |||
@@ -0,0 +1,48 @@ | |||
1 | #ifndef __I386_DIV64 | ||
2 | #define __I386_DIV64 | ||
3 | |||
4 | /* | ||
5 | * do_div() is NOT a C function. It wants to return | ||
6 | * two values (the quotient and the remainder), but | ||
7 | * since that doesn't work very well in C, what it | ||
8 | * does is: | ||
9 | * | ||
10 | * - modifies the 64-bit dividend _in_place_ | ||
11 | * - returns the 32-bit remainder | ||
12 | * | ||
13 | * This ends up being the most efficient "calling | ||
14 | * convention" on x86. | ||
15 | */ | ||
16 | #define do_div(n,base) ({ \ | ||
17 | unsigned long __upper, __low, __high, __mod, __base; \ | ||
18 | __base = (base); \ | ||
19 | asm("":"=a" (__low), "=d" (__high):"A" (n)); \ | ||
20 | __upper = __high; \ | ||
21 | if (__high) { \ | ||
22 | __upper = __high % (__base); \ | ||
23 | __high = __high / (__base); \ | ||
24 | } \ | ||
25 | asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \ | ||
26 | asm("":"=A" (n):"a" (__low),"d" (__high)); \ | ||
27 | __mod; \ | ||
28 | }) | ||
29 | |||
30 | /* | ||
31 | * (long)X = ((long long)divs) / (long)div | ||
32 | * (long)rem = ((long long)divs) % (long)div | ||
33 | * | ||
34 | * Warning, this will do an exception if X overflows. | ||
35 | */ | ||
36 | #define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c) | ||
37 | |||
38 | extern inline long | ||
39 | div_ll_X_l_rem(long long divs, long div, long *rem) | ||
40 | { | ||
41 | long dum2; | ||
42 | __asm__("divl %2":"=a"(dum2), "=d"(*rem) | ||
43 | : "rm"(div), "A"(divs)); | ||
44 | |||
45 | return dum2; | ||
46 | |||
47 | } | ||
48 | #endif | ||
diff --git a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h new file mode 100644 index 000000000000..563964b2995b --- /dev/null +++ b/include/asm-i386/dma-mapping.h | |||
@@ -0,0 +1,177 @@ | |||
1 | #ifndef _ASM_I386_DMA_MAPPING_H | ||
2 | #define _ASM_I386_DMA_MAPPING_H | ||
3 | |||
4 | #include <linux/mm.h> | ||
5 | |||
6 | #include <asm/cache.h> | ||
7 | #include <asm/io.h> | ||
8 | #include <asm/scatterlist.h> | ||
9 | |||
10 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
11 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
12 | |||
13 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
14 | dma_addr_t *dma_handle, unsigned int __nocast flag); | ||
15 | |||
16 | void dma_free_coherent(struct device *dev, size_t size, | ||
17 | void *vaddr, dma_addr_t dma_handle); | ||
18 | |||
19 | static inline dma_addr_t | ||
20 | dma_map_single(struct device *dev, void *ptr, size_t size, | ||
21 | enum dma_data_direction direction) | ||
22 | { | ||
23 | BUG_ON(direction == DMA_NONE); | ||
24 | flush_write_buffers(); | ||
25 | return virt_to_phys(ptr); | ||
26 | } | ||
27 | |||
28 | static inline void | ||
29 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
30 | enum dma_data_direction direction) | ||
31 | { | ||
32 | BUG_ON(direction == DMA_NONE); | ||
33 | } | ||
34 | |||
35 | static inline int | ||
36 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
37 | enum dma_data_direction direction) | ||
38 | { | ||
39 | int i; | ||
40 | |||
41 | BUG_ON(direction == DMA_NONE); | ||
42 | |||
43 | for (i = 0; i < nents; i++ ) { | ||
44 | BUG_ON(!sg[i].page); | ||
45 | |||
46 | sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; | ||
47 | } | ||
48 | |||
49 | flush_write_buffers(); | ||
50 | return nents; | ||
51 | } | ||
52 | |||
53 | static inline dma_addr_t | ||
54 | dma_map_page(struct device *dev, struct page *page, unsigned long offset, | ||
55 | size_t size, enum dma_data_direction direction) | ||
56 | { | ||
57 | BUG_ON(direction == DMA_NONE); | ||
58 | return page_to_phys(page) + offset; | ||
59 | } | ||
60 | |||
61 | static inline void | ||
62 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
63 | enum dma_data_direction direction) | ||
64 | { | ||
65 | BUG_ON(direction == DMA_NONE); | ||
66 | } | ||
67 | |||
68 | |||
69 | static inline void | ||
70 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
71 | enum dma_data_direction direction) | ||
72 | { | ||
73 | BUG_ON(direction == DMA_NONE); | ||
74 | } | ||
75 | |||
76 | static inline void | ||
77 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
78 | enum dma_data_direction direction) | ||
79 | { | ||
80 | } | ||
81 | |||
82 | static inline void | ||
83 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
84 | enum dma_data_direction direction) | ||
85 | { | ||
86 | flush_write_buffers(); | ||
87 | } | ||
88 | |||
89 | static inline void | ||
90 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
91 | unsigned long offset, size_t size, | ||
92 | enum dma_data_direction direction) | ||
93 | { | ||
94 | } | ||
95 | |||
96 | static inline void | ||
97 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
98 | unsigned long offset, size_t size, | ||
99 | enum dma_data_direction direction) | ||
100 | { | ||
101 | flush_write_buffers(); | ||
102 | } | ||
103 | |||
104 | static inline void | ||
105 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
106 | enum dma_data_direction direction) | ||
107 | { | ||
108 | } | ||
109 | |||
110 | static inline void | ||
111 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
112 | enum dma_data_direction direction) | ||
113 | { | ||
114 | flush_write_buffers(); | ||
115 | } | ||
116 | |||
117 | static inline int | ||
118 | dma_mapping_error(dma_addr_t dma_addr) | ||
119 | { | ||
120 | return 0; | ||
121 | } | ||
122 | |||
123 | static inline int | ||
124 | dma_supported(struct device *dev, u64 mask) | ||
125 | { | ||
126 | /* | ||
127 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
128 | * so we can't guarantee allocations that must be | ||
129 | * within a tighter range than GFP_DMA.. | ||
130 | */ | ||
131 | if(mask < 0x00ffffff) | ||
132 | return 0; | ||
133 | |||
134 | return 1; | ||
135 | } | ||
136 | |||
137 | static inline int | ||
138 | dma_set_mask(struct device *dev, u64 mask) | ||
139 | { | ||
140 | if(!dev->dma_mask || !dma_supported(dev, mask)) | ||
141 | return -EIO; | ||
142 | |||
143 | *dev->dma_mask = mask; | ||
144 | |||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | static inline int | ||
149 | dma_get_cache_alignment(void) | ||
150 | { | ||
151 | /* no easy way to get cache size on all x86, so return the | ||
152 | * maximum possible, to be safe */ | ||
153 | return (1 << L1_CACHE_SHIFT_MAX); | ||
154 | } | ||
155 | |||
156 | #define dma_is_consistent(d) (1) | ||
157 | |||
158 | static inline void | ||
159 | dma_cache_sync(void *vaddr, size_t size, | ||
160 | enum dma_data_direction direction) | ||
161 | { | ||
162 | flush_write_buffers(); | ||
163 | } | ||
164 | |||
165 | #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY | ||
166 | extern int | ||
167 | dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | ||
168 | dma_addr_t device_addr, size_t size, int flags); | ||
169 | |||
170 | extern void | ||
171 | dma_release_declared_memory(struct device *dev); | ||
172 | |||
173 | extern void * | ||
174 | dma_mark_declared_memory_occupied(struct device *dev, | ||
175 | dma_addr_t device_addr, size_t size); | ||
176 | |||
177 | #endif | ||
diff --git a/include/asm-i386/dma.h b/include/asm-i386/dma.h new file mode 100644 index 000000000000..f24b2bba2831 --- /dev/null +++ b/include/asm-i386/dma.h | |||
@@ -0,0 +1,298 @@ | |||
1 | /* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $ | ||
2 | * linux/include/asm/dma.h: Defines for using and allocating dma channels. | ||
3 | * Written by Hennus Bergman, 1992. | ||
4 | * High DMA channel support & info by Hannu Savolainen | ||
5 | * and John Boyd, Nov. 1992. | ||
6 | */ | ||
7 | |||
8 | #ifndef _ASM_DMA_H | ||
9 | #define _ASM_DMA_H | ||
10 | |||
11 | #include <linux/config.h> | ||
12 | #include <linux/spinlock.h> /* And spinlocks */ | ||
13 | #include <asm/io.h> /* need byte IO */ | ||
14 | #include <linux/delay.h> | ||
15 | |||
16 | |||
17 | #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER | ||
18 | #define dma_outb outb_p | ||
19 | #else | ||
20 | #define dma_outb outb | ||
21 | #endif | ||
22 | |||
23 | #define dma_inb inb | ||
24 | |||
25 | /* | ||
26 | * NOTES about DMA transfers: | ||
27 | * | ||
28 | * controller 1: channels 0-3, byte operations, ports 00-1F | ||
29 | * controller 2: channels 4-7, word operations, ports C0-DF | ||
30 | * | ||
31 | * - ALL registers are 8 bits only, regardless of transfer size | ||
32 | * - channel 4 is not used - cascades 1 into 2. | ||
33 | * - channels 0-3 are byte - addresses/counts are for physical bytes | ||
34 | * - channels 5-7 are word - addresses/counts are for physical words | ||
35 | * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries | ||
36 | * - transfer count loaded to registers is 1 less than actual count | ||
37 | * - controller 2 offsets are all even (2x offsets for controller 1) | ||
38 | * - page registers for 5-7 don't use data bit 0, represent 128K pages | ||
39 | * - page registers for 0-3 use bit 0, represent 64K pages | ||
40 | * | ||
41 | * DMA transfers are limited to the lower 16MB of _physical_ memory. | ||
42 | * Note that addresses loaded into registers must be _physical_ addresses, | ||
43 | * not logical addresses (which may differ if paging is active). | ||
44 | * | ||
45 | * Address mapping for channels 0-3: | ||
46 | * | ||
47 | * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) | ||
48 | * | ... | | ... | | ... | | ||
49 | * | ... | | ... | | ... | | ||
50 | * | ... | | ... | | ... | | ||
51 | * P7 ... P0 A7 ... A0 A7 ... A0 | ||
52 | * | Page | Addr MSB | Addr LSB | (DMA registers) | ||
53 | * | ||
54 | * Address mapping for channels 5-7: | ||
55 | * | ||
56 | * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) | ||
57 | * | ... | \ \ ... \ \ \ ... \ \ | ||
58 | * | ... | \ \ ... \ \ \ ... \ (not used) | ||
59 | * | ... | \ \ ... \ \ \ ... \ | ||
60 | * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 | ||
61 | * | Page | Addr MSB | Addr LSB | (DMA registers) | ||
62 | * | ||
63 | * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses | ||
64 | * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at | ||
65 | * the hardware level, so odd-byte transfers aren't possible). | ||
66 | * | ||
67 | * Transfer count (_not # bytes_) is limited to 64K, represented as actual | ||
68 | * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, | ||
69 | * and up to 128K bytes may be transferred on channels 5-7 in one operation. | ||
70 | * | ||
71 | */ | ||
72 | |||
73 | #define MAX_DMA_CHANNELS 8 | ||
74 | |||
75 | /* The maximum address that we can perform a DMA transfer to on this platform */ | ||
76 | #define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000) | ||
77 | |||
78 | /* 8237 DMA controllers */ | ||
79 | #define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ | ||
80 | #define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ | ||
81 | |||
82 | /* DMA controller registers */ | ||
83 | #define DMA1_CMD_REG 0x08 /* command register (w) */ | ||
84 | #define DMA1_STAT_REG 0x08 /* status register (r) */ | ||
85 | #define DMA1_REQ_REG 0x09 /* request register (w) */ | ||
86 | #define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ | ||
87 | #define DMA1_MODE_REG 0x0B /* mode register (w) */ | ||
88 | #define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ | ||
89 | #define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ | ||
90 | #define DMA1_RESET_REG 0x0D /* Master Clear (w) */ | ||
91 | #define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ | ||
92 | #define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ | ||
93 | |||
94 | #define DMA2_CMD_REG 0xD0 /* command register (w) */ | ||
95 | #define DMA2_STAT_REG 0xD0 /* status register (r) */ | ||
96 | #define DMA2_REQ_REG 0xD2 /* request register (w) */ | ||
97 | #define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ | ||
98 | #define DMA2_MODE_REG 0xD6 /* mode register (w) */ | ||
99 | #define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ | ||
100 | #define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ | ||
101 | #define DMA2_RESET_REG 0xDA /* Master Clear (w) */ | ||
102 | #define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ | ||
103 | #define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ | ||
104 | |||
105 | #define DMA_ADDR_0 0x00 /* DMA address registers */ | ||
106 | #define DMA_ADDR_1 0x02 | ||
107 | #define DMA_ADDR_2 0x04 | ||
108 | #define DMA_ADDR_3 0x06 | ||
109 | #define DMA_ADDR_4 0xC0 | ||
110 | #define DMA_ADDR_5 0xC4 | ||
111 | #define DMA_ADDR_6 0xC8 | ||
112 | #define DMA_ADDR_7 0xCC | ||
113 | |||
114 | #define DMA_CNT_0 0x01 /* DMA count registers */ | ||
115 | #define DMA_CNT_1 0x03 | ||
116 | #define DMA_CNT_2 0x05 | ||
117 | #define DMA_CNT_3 0x07 | ||
118 | #define DMA_CNT_4 0xC2 | ||
119 | #define DMA_CNT_5 0xC6 | ||
120 | #define DMA_CNT_6 0xCA | ||
121 | #define DMA_CNT_7 0xCE | ||
122 | |||
123 | #define DMA_PAGE_0 0x87 /* DMA page registers */ | ||
124 | #define DMA_PAGE_1 0x83 | ||
125 | #define DMA_PAGE_2 0x81 | ||
126 | #define DMA_PAGE_3 0x82 | ||
127 | #define DMA_PAGE_5 0x8B | ||
128 | #define DMA_PAGE_6 0x89 | ||
129 | #define DMA_PAGE_7 0x8A | ||
130 | |||
131 | #define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ | ||
132 | #define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ | ||
133 | #define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ | ||
134 | |||
135 | #define DMA_AUTOINIT 0x10 | ||
136 | |||
137 | |||
138 | extern spinlock_t dma_spin_lock; | ||
139 | |||
140 | static __inline__ unsigned long claim_dma_lock(void) | ||
141 | { | ||
142 | unsigned long flags; | ||
143 | spin_lock_irqsave(&dma_spin_lock, flags); | ||
144 | return flags; | ||
145 | } | ||
146 | |||
147 | static __inline__ void release_dma_lock(unsigned long flags) | ||
148 | { | ||
149 | spin_unlock_irqrestore(&dma_spin_lock, flags); | ||
150 | } | ||
151 | |||
152 | /* enable/disable a specific DMA channel */ | ||
153 | static __inline__ void enable_dma(unsigned int dmanr) | ||
154 | { | ||
155 | if (dmanr<=3) | ||
156 | dma_outb(dmanr, DMA1_MASK_REG); | ||
157 | else | ||
158 | dma_outb(dmanr & 3, DMA2_MASK_REG); | ||
159 | } | ||
160 | |||
161 | static __inline__ void disable_dma(unsigned int dmanr) | ||
162 | { | ||
163 | if (dmanr<=3) | ||
164 | dma_outb(dmanr | 4, DMA1_MASK_REG); | ||
165 | else | ||
166 | dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); | ||
167 | } | ||
168 | |||
169 | /* Clear the 'DMA Pointer Flip Flop'. | ||
170 | * Write 0 for LSB/MSB, 1 for MSB/LSB access. | ||
171 | * Use this once to initialize the FF to a known state. | ||
172 | * After that, keep track of it. :-) | ||
173 | * --- In order to do that, the DMA routines below should --- | ||
174 | * --- only be used while holding the DMA lock ! --- | ||
175 | */ | ||
176 | static __inline__ void clear_dma_ff(unsigned int dmanr) | ||
177 | { | ||
178 | if (dmanr<=3) | ||
179 | dma_outb(0, DMA1_CLEAR_FF_REG); | ||
180 | else | ||
181 | dma_outb(0, DMA2_CLEAR_FF_REG); | ||
182 | } | ||
183 | |||
184 | /* set mode (above) for a specific DMA channel */ | ||
185 | static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | ||
186 | { | ||
187 | if (dmanr<=3) | ||
188 | dma_outb(mode | dmanr, DMA1_MODE_REG); | ||
189 | else | ||
190 | dma_outb(mode | (dmanr&3), DMA2_MODE_REG); | ||
191 | } | ||
192 | |||
193 | /* Set only the page register bits of the transfer address. | ||
194 | * This is used for successive transfers when we know the contents of | ||
195 | * the lower 16 bits of the DMA current address register, but a 64k boundary | ||
196 | * may have been crossed. | ||
197 | */ | ||
198 | static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) | ||
199 | { | ||
200 | switch(dmanr) { | ||
201 | case 0: | ||
202 | dma_outb(pagenr, DMA_PAGE_0); | ||
203 | break; | ||
204 | case 1: | ||
205 | dma_outb(pagenr, DMA_PAGE_1); | ||
206 | break; | ||
207 | case 2: | ||
208 | dma_outb(pagenr, DMA_PAGE_2); | ||
209 | break; | ||
210 | case 3: | ||
211 | dma_outb(pagenr, DMA_PAGE_3); | ||
212 | break; | ||
213 | case 5: | ||
214 | dma_outb(pagenr & 0xfe, DMA_PAGE_5); | ||
215 | break; | ||
216 | case 6: | ||
217 | dma_outb(pagenr & 0xfe, DMA_PAGE_6); | ||
218 | break; | ||
219 | case 7: | ||
220 | dma_outb(pagenr & 0xfe, DMA_PAGE_7); | ||
221 | break; | ||
222 | } | ||
223 | } | ||
224 | |||
225 | |||
226 | /* Set transfer address & page bits for specific DMA channel. | ||
227 | * Assumes dma flipflop is clear. | ||
228 | */ | ||
229 | static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) | ||
230 | { | ||
231 | set_dma_page(dmanr, a>>16); | ||
232 | if (dmanr <= 3) { | ||
233 | dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); | ||
234 | dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); | ||
235 | } else { | ||
236 | dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); | ||
237 | dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); | ||
238 | } | ||
239 | } | ||
240 | |||
241 | |||
242 | /* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for | ||
243 | * a specific DMA channel. | ||
244 | * You must ensure the parameters are valid. | ||
245 | * NOTE: from a manual: "the number of transfers is one more | ||
246 | * than the initial word count"! This is taken into account. | ||
247 | * Assumes dma flip-flop is clear. | ||
248 | * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. | ||
249 | */ | ||
250 | static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | ||
251 | { | ||
252 | count--; | ||
253 | if (dmanr <= 3) { | ||
254 | dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); | ||
255 | dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); | ||
256 | } else { | ||
257 | dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); | ||
258 | dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); | ||
259 | } | ||
260 | } | ||
261 | |||
262 | |||
263 | /* Get DMA residue count. After a DMA transfer, this | ||
264 | * should return zero. Reading this while a DMA transfer is | ||
265 | * still in progress will return unpredictable results. | ||
266 | * If called before the channel has been used, it may return 1. | ||
267 | * Otherwise, it returns the number of _bytes_ left to transfer. | ||
268 | * | ||
269 | * Assumes DMA flip-flop is clear. | ||
270 | */ | ||
271 | static __inline__ int get_dma_residue(unsigned int dmanr) | ||
272 | { | ||
273 | unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE | ||
274 | : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; | ||
275 | |||
276 | /* using short to get 16-bit wrap around */ | ||
277 | unsigned short count; | ||
278 | |||
279 | count = 1 + dma_inb(io_port); | ||
280 | count += dma_inb(io_port) << 8; | ||
281 | |||
282 | return (dmanr<=3)? count : (count<<1); | ||
283 | } | ||
284 | |||
285 | |||
286 | /* These are in kernel/dma.c: */ | ||
287 | extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ | ||
288 | extern void free_dma(unsigned int dmanr); /* release it again */ | ||
289 | |||
290 | /* From PCI */ | ||
291 | |||
292 | #ifdef CONFIG_PCI | ||
293 | extern int isa_dma_bridge_buggy; | ||
294 | #else | ||
295 | #define isa_dma_bridge_buggy (0) | ||
296 | #endif | ||
297 | |||
298 | #endif /* _ASM_DMA_H */ | ||
diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h new file mode 100644 index 000000000000..5c285aee7294 --- /dev/null +++ b/include/asm-i386/e820.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * structures and definitions for the int 15, ax=e820 memory map | ||
3 | * scheme. | ||
4 | * | ||
5 | * In a nutshell, arch/i386/boot/setup.S populates a scratch table | ||
6 | * in the empty_zero_block that contains a list of usable address/size | ||
7 | * duples. In arch/i386/kernel/setup.c, this information is | ||
8 | * transferred into the e820map, and in arch/i386/mm/init.c, that | ||
9 | * new information is used to mark pages reserved or not. | ||
10 | * | ||
11 | */ | ||
12 | #ifndef __E820_HEADER | ||
13 | #define __E820_HEADER | ||
14 | |||
15 | #define E820MAP 0x2d0 /* our map */ | ||
16 | #define E820MAX 32 /* number of entries in E820MAP */ | ||
17 | #define E820NR 0x1e8 /* # entries in E820MAP */ | ||
18 | |||
19 | #define E820_RAM 1 | ||
20 | #define E820_RESERVED 2 | ||
21 | #define E820_ACPI 3 /* usable as RAM once ACPI tables have been read */ | ||
22 | #define E820_NVS 4 | ||
23 | |||
24 | #define HIGH_MEMORY (1024*1024) | ||
25 | |||
26 | #ifndef __ASSEMBLY__ | ||
27 | |||
28 | struct e820map { | ||
29 | int nr_map; | ||
30 | struct e820entry { | ||
31 | unsigned long long addr; /* start of memory segment */ | ||
32 | unsigned long long size; /* size of memory segment */ | ||
33 | unsigned long type; /* type of memory segment */ | ||
34 | } map[E820MAX]; | ||
35 | }; | ||
36 | |||
37 | extern struct e820map e820; | ||
38 | #endif/*!__ASSEMBLY__*/ | ||
39 | |||
40 | #endif/*__E820_HEADER*/ | ||
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h new file mode 100644 index 000000000000..130bdc8c68cf --- /dev/null +++ b/include/asm-i386/elf.h | |||
@@ -0,0 +1,193 @@ | |||
1 | #ifndef __ASMi386_ELF_H | ||
2 | #define __ASMi386_ELF_H | ||
3 | |||
4 | /* | ||
5 | * ELF register definitions.. | ||
6 | */ | ||
7 | |||
8 | #include <asm/ptrace.h> | ||
9 | #include <asm/user.h> | ||
10 | #include <asm/processor.h> | ||
11 | #include <asm/system.h> /* for savesegment */ | ||
12 | |||
13 | #include <linux/utsname.h> | ||
14 | |||
15 | #define R_386_NONE 0 | ||
16 | #define R_386_32 1 | ||
17 | #define R_386_PC32 2 | ||
18 | #define R_386_GOT32 3 | ||
19 | #define R_386_PLT32 4 | ||
20 | #define R_386_COPY 5 | ||
21 | #define R_386_GLOB_DAT 6 | ||
22 | #define R_386_JMP_SLOT 7 | ||
23 | #define R_386_RELATIVE 8 | ||
24 | #define R_386_GOTOFF 9 | ||
25 | #define R_386_GOTPC 10 | ||
26 | #define R_386_NUM 11 | ||
27 | |||
28 | typedef unsigned long elf_greg_t; | ||
29 | |||
30 | #define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) | ||
31 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | ||
32 | |||
33 | typedef struct user_i387_struct elf_fpregset_t; | ||
34 | typedef struct user_fxsr_struct elf_fpxregset_t; | ||
35 | |||
36 | /* | ||
37 | * This is used to ensure we don't load something for the wrong architecture. | ||
38 | */ | ||
39 | #define elf_check_arch(x) \ | ||
40 | (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) | ||
41 | |||
42 | /* | ||
43 | * These are used to set parameters in the core dumps. | ||
44 | */ | ||
45 | #define ELF_CLASS ELFCLASS32 | ||
46 | #define ELF_DATA ELFDATA2LSB | ||
47 | #define ELF_ARCH EM_386 | ||
48 | |||
49 | /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx | ||
50 | contains a pointer to a function which might be registered using `atexit'. | ||
51 | This provides a mean for the dynamic linker to call DT_FINI functions for | ||
52 | shared libraries that have been loaded before the code runs. | ||
53 | |||
54 | A value of 0 tells we have no such handler. | ||
55 | |||
56 | We might as well make sure everything else is cleared too (except for %esp), | ||
57 | just to make things more deterministic. | ||
58 | */ | ||
59 | #define ELF_PLAT_INIT(_r, load_addr) do { \ | ||
60 | _r->ebx = 0; _r->ecx = 0; _r->edx = 0; \ | ||
61 | _r->esi = 0; _r->edi = 0; _r->ebp = 0; \ | ||
62 | _r->eax = 0; \ | ||
63 | } while (0) | ||
64 | |||
65 | #define USE_ELF_CORE_DUMP | ||
66 | #define ELF_EXEC_PAGESIZE 4096 | ||
67 | |||
68 | /* This is the location that an ET_DYN program is loaded if exec'ed. Typical | ||
69 | use of this is to invoke "./ld.so someprog" to test out a new version of | ||
70 | the loader. We need to make sure that it is out of the way of the program | ||
71 | that it will "exec", and that there is sufficient room for the brk. */ | ||
72 | |||
73 | #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) | ||
74 | |||
75 | /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is | ||
76 | now struct_user_regs, they are different) */ | ||
77 | |||
78 | #define ELF_CORE_COPY_REGS(pr_reg, regs) \ | ||
79 | pr_reg[0] = regs->ebx; \ | ||
80 | pr_reg[1] = regs->ecx; \ | ||
81 | pr_reg[2] = regs->edx; \ | ||
82 | pr_reg[3] = regs->esi; \ | ||
83 | pr_reg[4] = regs->edi; \ | ||
84 | pr_reg[5] = regs->ebp; \ | ||
85 | pr_reg[6] = regs->eax; \ | ||
86 | pr_reg[7] = regs->xds; \ | ||
87 | pr_reg[8] = regs->xes; \ | ||
88 | savesegment(fs,pr_reg[9]); \ | ||
89 | savesegment(gs,pr_reg[10]); \ | ||
90 | pr_reg[11] = regs->orig_eax; \ | ||
91 | pr_reg[12] = regs->eip; \ | ||
92 | pr_reg[13] = regs->xcs; \ | ||
93 | pr_reg[14] = regs->eflags; \ | ||
94 | pr_reg[15] = regs->esp; \ | ||
95 | pr_reg[16] = regs->xss; | ||
96 | |||
97 | /* This yields a mask that user programs can use to figure out what | ||
98 | instruction set this CPU supports. This could be done in user space, | ||
99 | but it's not easy, and we've already done it here. */ | ||
100 | |||
101 | #define ELF_HWCAP (boot_cpu_data.x86_capability[0]) | ||
102 | |||
103 | /* This yields a string that ld.so will use to load implementation | ||
104 | specific libraries for optimization. This is more specific in | ||
105 | intent than poking at uname or /proc/cpuinfo. | ||
106 | |||
107 | For the moment, we have only optimizations for the Intel generations, | ||
108 | but that could change... */ | ||
109 | |||
110 | #define ELF_PLATFORM (system_utsname.machine) | ||
111 | |||
112 | /* | ||
113 | * Architecture-neutral AT_ values in 0-17, leave some room | ||
114 | * for more of them, start the x86-specific ones at 32. | ||
115 | */ | ||
116 | #define AT_SYSINFO 32 | ||
117 | #define AT_SYSINFO_EHDR 33 | ||
118 | |||
119 | #ifdef __KERNEL__ | ||
120 | #define SET_PERSONALITY(ex, ibcs2) do { } while (0) | ||
121 | |||
122 | /* | ||
123 | * An executable for which elf_read_implies_exec() returns TRUE will | ||
124 | * have the READ_IMPLIES_EXEC personality flag set automatically. | ||
125 | */ | ||
126 | #define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X) | ||
127 | |||
128 | extern int dump_task_regs (struct task_struct *, elf_gregset_t *); | ||
129 | extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); | ||
130 | extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct *); | ||
131 | |||
132 | #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) | ||
133 | #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) | ||
134 | #define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs) | ||
135 | |||
136 | #define VSYSCALL_BASE (__fix_to_virt(FIX_VSYSCALL)) | ||
137 | #define VSYSCALL_EHDR ((const struct elfhdr *) VSYSCALL_BASE) | ||
138 | #define VSYSCALL_ENTRY ((unsigned long) &__kernel_vsyscall) | ||
139 | extern void __kernel_vsyscall; | ||
140 | |||
141 | #define ARCH_DLINFO \ | ||
142 | do { \ | ||
143 | NEW_AUX_ENT(AT_SYSINFO, VSYSCALL_ENTRY); \ | ||
144 | NEW_AUX_ENT(AT_SYSINFO_EHDR, VSYSCALL_BASE); \ | ||
145 | } while (0) | ||
146 | |||
147 | /* | ||
148 | * These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out | ||
149 | * extra segments containing the vsyscall DSO contents. Dumping its | ||
150 | * contents makes post-mortem fully interpretable later without matching up | ||
151 | * the same kernel and hardware config to see what PC values meant. | ||
152 | * Dumping its extra ELF program headers includes all the other information | ||
153 | * a debugger needs to easily find how the vsyscall DSO was being used. | ||
154 | */ | ||
155 | #define ELF_CORE_EXTRA_PHDRS (VSYSCALL_EHDR->e_phnum) | ||
156 | #define ELF_CORE_WRITE_EXTRA_PHDRS \ | ||
157 | do { \ | ||
158 | const struct elf_phdr *const vsyscall_phdrs = \ | ||
159 | (const struct elf_phdr *) (VSYSCALL_BASE \ | ||
160 | + VSYSCALL_EHDR->e_phoff); \ | ||
161 | int i; \ | ||
162 | Elf32_Off ofs = 0; \ | ||
163 | for (i = 0; i < VSYSCALL_EHDR->e_phnum; ++i) { \ | ||
164 | struct elf_phdr phdr = vsyscall_phdrs[i]; \ | ||
165 | if (phdr.p_type == PT_LOAD) { \ | ||
166 | BUG_ON(ofs != 0); \ | ||
167 | ofs = phdr.p_offset = offset; \ | ||
168 | phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); \ | ||
169 | phdr.p_filesz = phdr.p_memsz; \ | ||
170 | offset += phdr.p_filesz; \ | ||
171 | } \ | ||
172 | else \ | ||
173 | phdr.p_offset += ofs; \ | ||
174 | phdr.p_paddr = 0; /* match other core phdrs */ \ | ||
175 | DUMP_WRITE(&phdr, sizeof(phdr)); \ | ||
176 | } \ | ||
177 | } while (0) | ||
178 | #define ELF_CORE_WRITE_EXTRA_DATA \ | ||
179 | do { \ | ||
180 | const struct elf_phdr *const vsyscall_phdrs = \ | ||
181 | (const struct elf_phdr *) (VSYSCALL_BASE \ | ||
182 | + VSYSCALL_EHDR->e_phoff); \ | ||
183 | int i; \ | ||
184 | for (i = 0; i < VSYSCALL_EHDR->e_phnum; ++i) { \ | ||
185 | if (vsyscall_phdrs[i].p_type == PT_LOAD) \ | ||
186 | DUMP_WRITE((void *) vsyscall_phdrs[i].p_vaddr, \ | ||
187 | PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \ | ||
188 | } \ | ||
189 | } while (0) | ||
190 | |||
191 | #endif | ||
192 | |||
193 | #endif | ||
diff --git a/include/asm-i386/errno.h b/include/asm-i386/errno.h new file mode 100644 index 000000000000..969b34374728 --- /dev/null +++ b/include/asm-i386/errno.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _I386_ERRNO_H | ||
2 | #define _I386_ERRNO_H | ||
3 | |||
4 | #include <asm-generic/errno.h> | ||
5 | |||
6 | #endif | ||
diff --git a/include/asm-i386/fcntl.h b/include/asm-i386/fcntl.h new file mode 100644 index 000000000000..511cde94a3ed --- /dev/null +++ b/include/asm-i386/fcntl.h | |||
@@ -0,0 +1,88 @@ | |||
1 | #ifndef _I386_FCNTL_H | ||
2 | #define _I386_FCNTL_H | ||
3 | |||
4 | /* open/fcntl - O_SYNC is only implemented on blocks devices and on files | ||
5 | located on an ext2 file system */ | ||
6 | #define O_ACCMODE 0003 | ||
7 | #define O_RDONLY 00 | ||
8 | #define O_WRONLY 01 | ||
9 | #define O_RDWR 02 | ||
10 | #define O_CREAT 0100 /* not fcntl */ | ||
11 | #define O_EXCL 0200 /* not fcntl */ | ||
12 | #define O_NOCTTY 0400 /* not fcntl */ | ||
13 | #define O_TRUNC 01000 /* not fcntl */ | ||
14 | #define O_APPEND 02000 | ||
15 | #define O_NONBLOCK 04000 | ||
16 | #define O_NDELAY O_NONBLOCK | ||
17 | #define O_SYNC 010000 | ||
18 | #define FASYNC 020000 /* fcntl, for BSD compatibility */ | ||
19 | #define O_DIRECT 040000 /* direct disk access hint */ | ||
20 | #define O_LARGEFILE 0100000 | ||
21 | #define O_DIRECTORY 0200000 /* must be a directory */ | ||
22 | #define O_NOFOLLOW 0400000 /* don't follow links */ | ||
23 | #define O_NOATIME 01000000 | ||
24 | |||
25 | #define F_DUPFD 0 /* dup */ | ||
26 | #define F_GETFD 1 /* get close_on_exec */ | ||
27 | #define F_SETFD 2 /* set/clear close_on_exec */ | ||
28 | #define F_GETFL 3 /* get file->f_flags */ | ||
29 | #define F_SETFL 4 /* set file->f_flags */ | ||
30 | #define F_GETLK 5 | ||
31 | #define F_SETLK 6 | ||
32 | #define F_SETLKW 7 | ||
33 | |||
34 | #define F_SETOWN 8 /* for sockets. */ | ||
35 | #define F_GETOWN 9 /* for sockets. */ | ||
36 | #define F_SETSIG 10 /* for sockets. */ | ||
37 | #define F_GETSIG 11 /* for sockets. */ | ||
38 | |||
39 | #define F_GETLK64 12 /* using 'struct flock64' */ | ||
40 | #define F_SETLK64 13 | ||
41 | #define F_SETLKW64 14 | ||
42 | |||
43 | /* for F_[GET|SET]FL */ | ||
44 | #define FD_CLOEXEC 1 /* actually anything with low bit set goes */ | ||
45 | |||
46 | /* for posix fcntl() and lockf() */ | ||
47 | #define F_RDLCK 0 | ||
48 | #define F_WRLCK 1 | ||
49 | #define F_UNLCK 2 | ||
50 | |||
51 | /* for old implementation of bsd flock () */ | ||
52 | #define F_EXLCK 4 /* or 3 */ | ||
53 | #define F_SHLCK 8 /* or 4 */ | ||
54 | |||
55 | /* for leases */ | ||
56 | #define F_INPROGRESS 16 | ||
57 | |||
58 | /* operations for bsd flock(), also used by the kernel implementation */ | ||
59 | #define LOCK_SH 1 /* shared lock */ | ||
60 | #define LOCK_EX 2 /* exclusive lock */ | ||
61 | #define LOCK_NB 4 /* or'd with one of the above to prevent | ||
62 | blocking */ | ||
63 | #define LOCK_UN 8 /* remove lock */ | ||
64 | |||
65 | #define LOCK_MAND 32 /* This is a mandatory flock */ | ||
66 | #define LOCK_READ 64 /* ... Which allows concurrent read operations */ | ||
67 | #define LOCK_WRITE 128 /* ... Which allows concurrent write operations */ | ||
68 | #define LOCK_RW 192 /* ... Which allows concurrent read & write ops */ | ||
69 | |||
70 | struct flock { | ||
71 | short l_type; | ||
72 | short l_whence; | ||
73 | off_t l_start; | ||
74 | off_t l_len; | ||
75 | pid_t l_pid; | ||
76 | }; | ||
77 | |||
78 | struct flock64 { | ||
79 | short l_type; | ||
80 | short l_whence; | ||
81 | loff_t l_start; | ||
82 | loff_t l_len; | ||
83 | pid_t l_pid; | ||
84 | }; | ||
85 | |||
86 | #define F_LINUX_SPECIFIC_BASE 1024 | ||
87 | |||
88 | #endif | ||
diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h new file mode 100644 index 000000000000..c94cac958389 --- /dev/null +++ b/include/asm-i386/fixmap.h | |||
@@ -0,0 +1,158 @@ | |||
1 | /* | ||
2 | * fixmap.h: compile-time virtual memory allocation | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 1998 Ingo Molnar | ||
9 | * | ||
10 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | ||
11 | */ | ||
12 | |||
13 | #ifndef _ASM_FIXMAP_H | ||
14 | #define _ASM_FIXMAP_H | ||
15 | |||
16 | #include <linux/config.h> | ||
17 | |||
18 | /* used by vmalloc.c, vsyscall.lds.S. | ||
19 | * | ||
20 | * Leave one empty page between vmalloc'ed areas and | ||
21 | * the start of the fixmap. | ||
22 | */ | ||
23 | #define __FIXADDR_TOP 0xfffff000 | ||
24 | |||
25 | #ifndef __ASSEMBLY__ | ||
26 | #include <linux/kernel.h> | ||
27 | #include <asm/acpi.h> | ||
28 | #include <asm/apicdef.h> | ||
29 | #include <asm/page.h> | ||
30 | #ifdef CONFIG_HIGHMEM | ||
31 | #include <linux/threads.h> | ||
32 | #include <asm/kmap_types.h> | ||
33 | #endif | ||
34 | |||
35 | /* | ||
36 | * Here we define all the compile-time 'special' virtual | ||
37 | * addresses. The point is to have a constant address at | ||
38 | * compile time, but to set the physical address only | ||
39 | * in the boot process. We allocate these special addresses | ||
40 | * from the end of virtual memory (0xfffff000) backwards. | ||
41 | * Also this lets us do fail-safe vmalloc(), we | ||
42 | * can guarantee that these special addresses and | ||
43 | * vmalloc()-ed addresses never overlap. | ||
44 | * | ||
45 | * these 'compile-time allocated' memory buffers are | ||
46 | * fixed-size 4k pages. (or larger if used with an increment | ||
47 | * highger than 1) use fixmap_set(idx,phys) to associate | ||
48 | * physical memory with fixmap indices. | ||
49 | * | ||
50 | * TLB entries of such buffers will not be flushed across | ||
51 | * task switches. | ||
52 | */ | ||
53 | enum fixed_addresses { | ||
54 | FIX_HOLE, | ||
55 | FIX_VSYSCALL, | ||
56 | #ifdef CONFIG_X86_LOCAL_APIC | ||
57 | FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ | ||
58 | #endif | ||
59 | #ifdef CONFIG_X86_IO_APIC | ||
60 | FIX_IO_APIC_BASE_0, | ||
61 | FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, | ||
62 | #endif | ||
63 | #ifdef CONFIG_X86_VISWS_APIC | ||
64 | FIX_CO_CPU, /* Cobalt timer */ | ||
65 | FIX_CO_APIC, /* Cobalt APIC Redirection Table */ | ||
66 | FIX_LI_PCIA, /* Lithium PCI Bridge A */ | ||
67 | FIX_LI_PCIB, /* Lithium PCI Bridge B */ | ||
68 | #endif | ||
69 | #ifdef CONFIG_X86_F00F_BUG | ||
70 | FIX_F00F_IDT, /* Virtual mapping for IDT */ | ||
71 | #endif | ||
72 | #ifdef CONFIG_X86_CYCLONE_TIMER | ||
73 | FIX_CYCLONE_TIMER, /*cyclone timer register*/ | ||
74 | #endif | ||
75 | #ifdef CONFIG_HIGHMEM | ||
76 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ | ||
77 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, | ||
78 | #endif | ||
79 | #ifdef CONFIG_ACPI_BOOT | ||
80 | FIX_ACPI_BEGIN, | ||
81 | FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1, | ||
82 | #endif | ||
83 | #ifdef CONFIG_PCI_MMCONFIG | ||
84 | FIX_PCIE_MCFG, | ||
85 | #endif | ||
86 | __end_of_permanent_fixed_addresses, | ||
87 | /* temporary boot-time mappings, used before ioremap() is functional */ | ||
88 | #define NR_FIX_BTMAPS 16 | ||
89 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses, | ||
90 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1, | ||
91 | FIX_WP_TEST, | ||
92 | __end_of_fixed_addresses | ||
93 | }; | ||
94 | |||
95 | extern void __set_fixmap (enum fixed_addresses idx, | ||
96 | unsigned long phys, pgprot_t flags); | ||
97 | |||
98 | #define set_fixmap(idx, phys) \ | ||
99 | __set_fixmap(idx, phys, PAGE_KERNEL) | ||
100 | /* | ||
101 | * Some hardware wants to get fixmapped without caching. | ||
102 | */ | ||
103 | #define set_fixmap_nocache(idx, phys) \ | ||
104 | __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) | ||
105 | |||
106 | #define clear_fixmap(idx) \ | ||
107 | __set_fixmap(idx, 0, __pgprot(0)) | ||
108 | |||
109 | #define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) | ||
110 | |||
111 | #define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) | ||
112 | #define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) | ||
113 | #define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) | ||
114 | #define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE) | ||
115 | |||
116 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | ||
117 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) | ||
118 | |||
119 | /* | ||
120 | * This is the range that is readable by user mode, and things | ||
121 | * acting like user mode such as get_user_pages. | ||
122 | */ | ||
123 | #define FIXADDR_USER_START (__fix_to_virt(FIX_VSYSCALL)) | ||
124 | #define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) | ||
125 | |||
126 | |||
127 | extern void __this_fixmap_does_not_exist(void); | ||
128 | |||
129 | /* | ||
130 | * 'index to address' translation. If anyone tries to use the idx | ||
131 | * directly without tranlation, we catch the bug with a NULL-deference | ||
132 | * kernel oops. Illegal ranges of incoming indices are caught too. | ||
133 | */ | ||
134 | static __always_inline unsigned long fix_to_virt(const unsigned int idx) | ||
135 | { | ||
136 | /* | ||
137 | * this branch gets completely eliminated after inlining, | ||
138 | * except when someone tries to use fixaddr indices in an | ||
139 | * illegal way. (such as mixing up address types or using | ||
140 | * out-of-range indices). | ||
141 | * | ||
142 | * If it doesn't get removed, the linker will complain | ||
143 | * loudly with a reasonably clear error message.. | ||
144 | */ | ||
145 | if (idx >= __end_of_fixed_addresses) | ||
146 | __this_fixmap_does_not_exist(); | ||
147 | |||
148 | return __fix_to_virt(idx); | ||
149 | } | ||
150 | |||
151 | static inline unsigned long virt_to_fix(const unsigned long vaddr) | ||
152 | { | ||
153 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | ||
154 | return __virt_to_fix(vaddr); | ||
155 | } | ||
156 | |||
157 | #endif /* !__ASSEMBLY__ */ | ||
158 | #endif | ||
diff --git a/include/asm-i386/floppy.h b/include/asm-i386/floppy.h new file mode 100644 index 000000000000..f4782284807a --- /dev/null +++ b/include/asm-i386/floppy.h | |||
@@ -0,0 +1,319 @@ | |||
1 | /* | ||
2 | * Architecture specific parts of the Floppy driver | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 1995 | ||
9 | */ | ||
10 | #ifndef __ASM_I386_FLOPPY_H | ||
11 | #define __ASM_I386_FLOPPY_H | ||
12 | |||
13 | #include <linux/vmalloc.h> | ||
14 | |||
15 | |||
16 | /* | ||
17 | * The DMA channel used by the floppy controller cannot access data at | ||
18 | * addresses >= 16MB | ||
19 | * | ||
20 | * Went back to the 1MB limit, as some people had problems with the floppy | ||
21 | * driver otherwise. It doesn't matter much for performance anyway, as most | ||
22 | * floppy accesses go through the track buffer. | ||
23 | */ | ||
24 | #define _CROSS_64KB(a,s,vdma) \ | ||
25 | (!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)) | ||
26 | |||
27 | #define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1) | ||
28 | |||
29 | |||
30 | #define SW fd_routine[use_virtual_dma&1] | ||
31 | #define CSW fd_routine[can_use_virtual_dma & 1] | ||
32 | |||
33 | |||
34 | #define fd_inb(port) inb_p(port) | ||
35 | #define fd_outb(value,port) outb_p(value,port) | ||
36 | |||
37 | #define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy") | ||
38 | #define fd_free_dma() CSW._free_dma(FLOPPY_DMA) | ||
39 | #define fd_enable_irq() enable_irq(FLOPPY_IRQ) | ||
40 | #define fd_disable_irq() disable_irq(FLOPPY_IRQ) | ||
41 | #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) | ||
42 | #define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA) | ||
43 | #define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size) | ||
44 | #define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io) | ||
45 | |||
46 | #define FLOPPY_CAN_FALLBACK_ON_NODMA | ||
47 | |||
48 | static int virtual_dma_count; | ||
49 | static int virtual_dma_residue; | ||
50 | static char *virtual_dma_addr; | ||
51 | static int virtual_dma_mode; | ||
52 | static int doing_pdma; | ||
53 | |||
54 | static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs) | ||
55 | { | ||
56 | register unsigned char st; | ||
57 | |||
58 | #undef TRACE_FLPY_INT | ||
59 | #define NO_FLOPPY_ASSEMBLER | ||
60 | |||
61 | #ifdef TRACE_FLPY_INT | ||
62 | static int calls=0; | ||
63 | static int bytes=0; | ||
64 | static int dma_wait=0; | ||
65 | #endif | ||
66 | if (!doing_pdma) | ||
67 | return floppy_interrupt(irq, dev_id, regs); | ||
68 | |||
69 | #ifdef TRACE_FLPY_INT | ||
70 | if(!calls) | ||
71 | bytes = virtual_dma_count; | ||
72 | #endif | ||
73 | |||
74 | #ifndef NO_FLOPPY_ASSEMBLER | ||
75 | __asm__ ( | ||
76 | "testl %1,%1" | ||
77 | "je 3f" | ||
78 | "1: inb %w4,%b0" | ||
79 | "andb $160,%b0" | ||
80 | "cmpb $160,%b0" | ||
81 | "jne 2f" | ||
82 | "incw %w4" | ||
83 | "testl %3,%3" | ||
84 | "jne 4f" | ||
85 | "inb %w4,%b0" | ||
86 | "movb %0,(%2)" | ||
87 | "jmp 5f" | ||
88 | "4: movb (%2),%0" | ||
89 | "outb %b0,%w4" | ||
90 | "5: decw %w4" | ||
91 | "outb %0,$0x80" | ||
92 | "decl %1" | ||
93 | "incl %2" | ||
94 | "testl %1,%1" | ||
95 | "jne 1b" | ||
96 | "3: inb %w4,%b0" | ||
97 | "2: " | ||
98 | : "=a" ((char) st), | ||
99 | "=c" ((long) virtual_dma_count), | ||
100 | "=S" ((long) virtual_dma_addr) | ||
101 | : "b" ((long) virtual_dma_mode), | ||
102 | "d" ((short) virtual_dma_port+4), | ||
103 | "1" ((long) virtual_dma_count), | ||
104 | "2" ((long) virtual_dma_addr)); | ||
105 | #else | ||
106 | { | ||
107 | register int lcount; | ||
108 | register char *lptr; | ||
109 | |||
110 | st = 1; | ||
111 | for(lcount=virtual_dma_count, lptr=virtual_dma_addr; | ||
112 | lcount; lcount--, lptr++) { | ||
113 | st=inb(virtual_dma_port+4) & 0xa0 ; | ||
114 | if(st != 0xa0) | ||
115 | break; | ||
116 | if(virtual_dma_mode) | ||
117 | outb_p(*lptr, virtual_dma_port+5); | ||
118 | else | ||
119 | *lptr = inb_p(virtual_dma_port+5); | ||
120 | } | ||
121 | virtual_dma_count = lcount; | ||
122 | virtual_dma_addr = lptr; | ||
123 | st = inb(virtual_dma_port+4); | ||
124 | } | ||
125 | #endif | ||
126 | |||
127 | #ifdef TRACE_FLPY_INT | ||
128 | calls++; | ||
129 | #endif | ||
130 | if(st == 0x20) | ||
131 | return IRQ_HANDLED; | ||
132 | if(!(st & 0x20)) { | ||
133 | virtual_dma_residue += virtual_dma_count; | ||
134 | virtual_dma_count=0; | ||
135 | #ifdef TRACE_FLPY_INT | ||
136 | printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", | ||
137 | virtual_dma_count, virtual_dma_residue, calls, bytes, | ||
138 | dma_wait); | ||
139 | calls = 0; | ||
140 | dma_wait=0; | ||
141 | #endif | ||
142 | doing_pdma = 0; | ||
143 | floppy_interrupt(irq, dev_id, regs); | ||
144 | return IRQ_HANDLED; | ||
145 | } | ||
146 | #ifdef TRACE_FLPY_INT | ||
147 | if(!virtual_dma_count) | ||
148 | dma_wait++; | ||
149 | #endif | ||
150 | return IRQ_HANDLED; | ||
151 | } | ||
152 | |||
153 | static void fd_disable_dma(void) | ||
154 | { | ||
155 | if(! (can_use_virtual_dma & 1)) | ||
156 | disable_dma(FLOPPY_DMA); | ||
157 | doing_pdma = 0; | ||
158 | virtual_dma_residue += virtual_dma_count; | ||
159 | virtual_dma_count=0; | ||
160 | } | ||
161 | |||
162 | static int vdma_request_dma(unsigned int dmanr, const char * device_id) | ||
163 | { | ||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | static void vdma_nop(unsigned int dummy) | ||
168 | { | ||
169 | } | ||
170 | |||
171 | |||
172 | static int vdma_get_dma_residue(unsigned int dummy) | ||
173 | { | ||
174 | return virtual_dma_count + virtual_dma_residue; | ||
175 | } | ||
176 | |||
177 | |||
178 | static int fd_request_irq(void) | ||
179 | { | ||
180 | if(can_use_virtual_dma) | ||
181 | return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT, | ||
182 | "floppy", NULL); | ||
183 | else | ||
184 | return request_irq(FLOPPY_IRQ, floppy_interrupt, | ||
185 | SA_INTERRUPT|SA_SAMPLE_RANDOM, | ||
186 | "floppy", NULL); | ||
187 | |||
188 | } | ||
189 | |||
190 | static unsigned long dma_mem_alloc(unsigned long size) | ||
191 | { | ||
192 | return __get_dma_pages(GFP_KERNEL,get_order(size)); | ||
193 | } | ||
194 | |||
195 | |||
196 | static unsigned long vdma_mem_alloc(unsigned long size) | ||
197 | { | ||
198 | return (unsigned long) vmalloc(size); | ||
199 | |||
200 | } | ||
201 | |||
202 | #define nodma_mem_alloc(size) vdma_mem_alloc(size) | ||
203 | |||
204 | static void _fd_dma_mem_free(unsigned long addr, unsigned long size) | ||
205 | { | ||
206 | if((unsigned int) addr >= (unsigned int) high_memory) | ||
207 | vfree((void *)addr); | ||
208 | else | ||
209 | free_pages(addr, get_order(size)); | ||
210 | } | ||
211 | |||
212 | #define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size) | ||
213 | |||
214 | static void _fd_chose_dma_mode(char *addr, unsigned long size) | ||
215 | { | ||
216 | if(can_use_virtual_dma == 2) { | ||
217 | if((unsigned int) addr >= (unsigned int) high_memory || | ||
218 | isa_virt_to_bus(addr) >= 0x1000000 || | ||
219 | _CROSS_64KB(addr, size, 0)) | ||
220 | use_virtual_dma = 1; | ||
221 | else | ||
222 | use_virtual_dma = 0; | ||
223 | } else { | ||
224 | use_virtual_dma = can_use_virtual_dma & 1; | ||
225 | } | ||
226 | } | ||
227 | |||
228 | #define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size) | ||
229 | |||
230 | |||
231 | static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) | ||
232 | { | ||
233 | doing_pdma = 1; | ||
234 | virtual_dma_port = io; | ||
235 | virtual_dma_mode = (mode == DMA_MODE_WRITE); | ||
236 | virtual_dma_addr = addr; | ||
237 | virtual_dma_count = size; | ||
238 | virtual_dma_residue = 0; | ||
239 | return 0; | ||
240 | } | ||
241 | |||
242 | static int hard_dma_setup(char *addr, unsigned long size, int mode, int io) | ||
243 | { | ||
244 | #ifdef FLOPPY_SANITY_CHECK | ||
245 | if (CROSS_64KB(addr, size)) { | ||
246 | printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size); | ||
247 | return -1; | ||
248 | } | ||
249 | #endif | ||
250 | /* actual, physical DMA */ | ||
251 | doing_pdma = 0; | ||
252 | clear_dma_ff(FLOPPY_DMA); | ||
253 | set_dma_mode(FLOPPY_DMA,mode); | ||
254 | set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr)); | ||
255 | set_dma_count(FLOPPY_DMA,size); | ||
256 | enable_dma(FLOPPY_DMA); | ||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | struct fd_routine_l { | ||
261 | int (*_request_dma)(unsigned int dmanr, const char * device_id); | ||
262 | void (*_free_dma)(unsigned int dmanr); | ||
263 | int (*_get_dma_residue)(unsigned int dummy); | ||
264 | unsigned long (*_dma_mem_alloc) (unsigned long size); | ||
265 | int (*_dma_setup)(char *addr, unsigned long size, int mode, int io); | ||
266 | } fd_routine[] = { | ||
267 | { | ||
268 | request_dma, | ||
269 | free_dma, | ||
270 | get_dma_residue, | ||
271 | dma_mem_alloc, | ||
272 | hard_dma_setup | ||
273 | }, | ||
274 | { | ||
275 | vdma_request_dma, | ||
276 | vdma_nop, | ||
277 | vdma_get_dma_residue, | ||
278 | vdma_mem_alloc, | ||
279 | vdma_dma_setup | ||
280 | } | ||
281 | }; | ||
282 | |||
283 | |||
284 | static int FDC1 = 0x3f0; | ||
285 | static int FDC2 = -1; | ||
286 | |||
287 | /* | ||
288 | * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock | ||
289 | * is needed to prevent corrupted CMOS RAM in case "insmod floppy" | ||
290 | * coincides with another rtc CMOS user. Paul G. | ||
291 | */ | ||
292 | #define FLOPPY0_TYPE ({ \ | ||
293 | unsigned long flags; \ | ||
294 | unsigned char val; \ | ||
295 | spin_lock_irqsave(&rtc_lock, flags); \ | ||
296 | val = (CMOS_READ(0x10) >> 4) & 15; \ | ||
297 | spin_unlock_irqrestore(&rtc_lock, flags); \ | ||
298 | val; \ | ||
299 | }) | ||
300 | |||
301 | #define FLOPPY1_TYPE ({ \ | ||
302 | unsigned long flags; \ | ||
303 | unsigned char val; \ | ||
304 | spin_lock_irqsave(&rtc_lock, flags); \ | ||
305 | val = CMOS_READ(0x10) & 15; \ | ||
306 | spin_unlock_irqrestore(&rtc_lock, flags); \ | ||
307 | val; \ | ||
308 | }) | ||
309 | |||
310 | #define N_FDC 2 | ||
311 | #define N_DRIVE 8 | ||
312 | |||
313 | #define FLOPPY_MOTOR_MASK 0xf0 | ||
314 | |||
315 | #define AUTO_DMA | ||
316 | |||
317 | #define EXTRA_FLOPPY_PARAMS | ||
318 | |||
319 | #endif /* __ASM_I386_FLOPPY_H */ | ||
diff --git a/include/asm-i386/genapic.h b/include/asm-i386/genapic.h new file mode 100644 index 000000000000..fc813b2e8274 --- /dev/null +++ b/include/asm-i386/genapic.h | |||
@@ -0,0 +1,115 @@ | |||
1 | #ifndef _ASM_GENAPIC_H | ||
2 | #define _ASM_GENAPIC_H 1 | ||
3 | |||
4 | /* | ||
5 | * Generic APIC driver interface. | ||
6 | * | ||
7 | * An straight forward mapping of the APIC related parts of the | ||
8 | * x86 subarchitecture interface to a dynamic object. | ||
9 | * | ||
10 | * This is used by the "generic" x86 subarchitecture. | ||
11 | * | ||
12 | * Copyright 2003 Andi Kleen, SuSE Labs. | ||
13 | */ | ||
14 | |||
15 | struct mpc_config_translation; | ||
16 | struct mpc_config_bus; | ||
17 | struct mp_config_table; | ||
18 | struct mpc_config_processor; | ||
19 | |||
20 | struct genapic { | ||
21 | char *name; | ||
22 | int (*probe)(void); | ||
23 | |||
24 | int (*apic_id_registered)(void); | ||
25 | cpumask_t (*target_cpus)(void); | ||
26 | int int_delivery_mode; | ||
27 | int int_dest_mode; | ||
28 | int ESR_DISABLE; | ||
29 | int apic_destination_logical; | ||
30 | unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); | ||
31 | unsigned long (*check_apicid_present)(int apicid); | ||
32 | int no_balance_irq; | ||
33 | int no_ioapic_check; | ||
34 | void (*init_apic_ldr)(void); | ||
35 | physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map); | ||
36 | |||
37 | void (*clustered_apic_check)(void); | ||
38 | int (*multi_timer_check)(int apic, int irq); | ||
39 | int (*apicid_to_node)(int logical_apicid); | ||
40 | int (*cpu_to_logical_apicid)(int cpu); | ||
41 | int (*cpu_present_to_apicid)(int mps_cpu); | ||
42 | physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); | ||
43 | int (*mpc_apic_id)(struct mpc_config_processor *m, | ||
44 | struct mpc_config_translation *t); | ||
45 | void (*setup_portio_remap)(void); | ||
46 | int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); | ||
47 | void (*enable_apic_mode)(void); | ||
48 | u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb); | ||
49 | |||
50 | /* mpparse */ | ||
51 | void (*mpc_oem_bus_info)(struct mpc_config_bus *, char *, | ||
52 | struct mpc_config_translation *); | ||
53 | void (*mpc_oem_pci_bus)(struct mpc_config_bus *, | ||
54 | struct mpc_config_translation *); | ||
55 | |||
56 | /* When one of the next two hooks returns 1 the genapic | ||
57 | is switched to this. Essentially they are additional probe | ||
58 | functions. */ | ||
59 | int (*mps_oem_check)(struct mp_config_table *mpc, char *oem, | ||
60 | char *productid); | ||
61 | int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); | ||
62 | |||
63 | unsigned (*get_apic_id)(unsigned long x); | ||
64 | unsigned long apic_id_mask; | ||
65 | unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); | ||
66 | |||
67 | /* ipi */ | ||
68 | void (*send_IPI_mask)(cpumask_t mask, int vector); | ||
69 | void (*send_IPI_allbutself)(int vector); | ||
70 | void (*send_IPI_all)(int vector); | ||
71 | }; | ||
72 | |||
73 | #define APICFUNC(x) .x = x | ||
74 | |||
75 | #define APIC_INIT(aname, aprobe) { \ | ||
76 | .name = aname, \ | ||
77 | .probe = aprobe, \ | ||
78 | .int_delivery_mode = INT_DELIVERY_MODE, \ | ||
79 | .int_dest_mode = INT_DEST_MODE, \ | ||
80 | .no_balance_irq = NO_BALANCE_IRQ, \ | ||
81 | .no_ioapic_check = NO_IOAPIC_CHECK, \ | ||
82 | .ESR_DISABLE = esr_disable, \ | ||
83 | .apic_destination_logical = APIC_DEST_LOGICAL, \ | ||
84 | APICFUNC(apic_id_registered), \ | ||
85 | APICFUNC(target_cpus), \ | ||
86 | APICFUNC(check_apicid_used), \ | ||
87 | APICFUNC(check_apicid_present), \ | ||
88 | APICFUNC(init_apic_ldr), \ | ||
89 | APICFUNC(ioapic_phys_id_map), \ | ||
90 | APICFUNC(clustered_apic_check), \ | ||
91 | APICFUNC(multi_timer_check), \ | ||
92 | APICFUNC(apicid_to_node), \ | ||
93 | APICFUNC(cpu_to_logical_apicid), \ | ||
94 | APICFUNC(cpu_present_to_apicid), \ | ||
95 | APICFUNC(apicid_to_cpu_present), \ | ||
96 | APICFUNC(mpc_apic_id), \ | ||
97 | APICFUNC(setup_portio_remap), \ | ||
98 | APICFUNC(check_phys_apicid_present), \ | ||
99 | APICFUNC(mpc_oem_bus_info), \ | ||
100 | APICFUNC(mpc_oem_pci_bus), \ | ||
101 | APICFUNC(mps_oem_check), \ | ||
102 | APICFUNC(get_apic_id), \ | ||
103 | .apic_id_mask = APIC_ID_MASK, \ | ||
104 | APICFUNC(cpu_mask_to_apicid), \ | ||
105 | APICFUNC(acpi_madt_oem_check), \ | ||
106 | APICFUNC(send_IPI_mask), \ | ||
107 | APICFUNC(send_IPI_allbutself), \ | ||
108 | APICFUNC(send_IPI_all), \ | ||
109 | APICFUNC(enable_apic_mode), \ | ||
110 | APICFUNC(phys_pkg_id), \ | ||
111 | } | ||
112 | |||
113 | extern struct genapic *genapic; | ||
114 | |||
115 | #endif | ||
diff --git a/include/asm-i386/hardirq.h b/include/asm-i386/hardirq.h new file mode 100644 index 000000000000..ee754d359734 --- /dev/null +++ b/include/asm-i386/hardirq.h | |||
@@ -0,0 +1,24 @@ | |||
1 | #ifndef __ASM_HARDIRQ_H | ||
2 | #define __ASM_HARDIRQ_H | ||
3 | |||
4 | #include <linux/config.h> | ||
5 | #include <linux/threads.h> | ||
6 | #include <linux/irq.h> | ||
7 | |||
8 | typedef struct { | ||
9 | unsigned int __softirq_pending; | ||
10 | unsigned long idle_timestamp; | ||
11 | unsigned int __nmi_count; /* arch dependent */ | ||
12 | unsigned int apic_timer_irqs; /* arch dependent */ | ||
13 | } ____cacheline_aligned irq_cpustat_t; | ||
14 | |||
15 | DECLARE_PER_CPU(irq_cpustat_t, irq_stat); | ||
16 | extern irq_cpustat_t irq_stat[]; | ||
17 | |||
18 | #define __ARCH_IRQ_STAT | ||
19 | #define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member) | ||
20 | |||
21 | void ack_bad_irq(unsigned int irq); | ||
22 | #include <linux/irq_cpustat.h> | ||
23 | |||
24 | #endif /* __ASM_HARDIRQ_H */ | ||
diff --git a/include/asm-i386/hdreg.h b/include/asm-i386/hdreg.h new file mode 100644 index 000000000000..5989bbc97cbf --- /dev/null +++ b/include/asm-i386/hdreg.h | |||
@@ -0,0 +1 @@ | |||
#warning this file is obsolete, please do not use it | |||
diff --git a/include/asm-i386/highmem.h b/include/asm-i386/highmem.h new file mode 100644 index 000000000000..1df42bf347df --- /dev/null +++ b/include/asm-i386/highmem.h | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * highmem.h: virtual kernel memory mappings for high memory | ||
3 | * | ||
4 | * Used in CONFIG_HIGHMEM systems for memory pages which | ||
5 | * are not addressable by direct kernel virtual addresses. | ||
6 | * | ||
7 | * Copyright (C) 1999 Gerhard Wichert, Siemens AG | ||
8 | * Gerhard.Wichert@pdb.siemens.de | ||
9 | * | ||
10 | * | ||
11 | * Redesigned the x86 32-bit VM architecture to deal with | ||
12 | * up to 16 Terabyte physical memory. With current x86 CPUs | ||
13 | * we now support up to 64 Gigabytes physical RAM. | ||
14 | * | ||
15 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | ||
16 | */ | ||
17 | |||
18 | #ifndef _ASM_HIGHMEM_H | ||
19 | #define _ASM_HIGHMEM_H | ||
20 | |||
21 | #ifdef __KERNEL__ | ||
22 | |||
23 | #include <linux/config.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/threads.h> | ||
26 | #include <asm/kmap_types.h> | ||
27 | #include <asm/tlbflush.h> | ||
28 | |||
29 | /* declarations for highmem.c */ | ||
30 | extern unsigned long highstart_pfn, highend_pfn; | ||
31 | |||
32 | extern pte_t *kmap_pte; | ||
33 | extern pgprot_t kmap_prot; | ||
34 | extern pte_t *pkmap_page_table; | ||
35 | |||
36 | /* | ||
37 | * Right now we initialize only a single pte table. It can be extended | ||
38 | * easily, subsequent pte tables have to be allocated in one physical | ||
39 | * chunk of RAM. | ||
40 | */ | ||
41 | #ifdef CONFIG_X86_PAE | ||
42 | #define LAST_PKMAP 512 | ||
43 | #else | ||
44 | #define LAST_PKMAP 1024 | ||
45 | #endif | ||
46 | /* | ||
47 | * Ordering is: | ||
48 | * | ||
49 | * FIXADDR_TOP | ||
50 | * fixed_addresses | ||
51 | * FIXADDR_START | ||
52 | * temp fixed addresses | ||
53 | * FIXADDR_BOOT_START | ||
54 | * Persistent kmap area | ||
55 | * PKMAP_BASE | ||
56 | * VMALLOC_END | ||
57 | * Vmalloc area | ||
58 | * VMALLOC_START | ||
59 | * high_memory | ||
60 | */ | ||
61 | #define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK ) | ||
62 | #define LAST_PKMAP_MASK (LAST_PKMAP-1) | ||
63 | #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) | ||
64 | #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) | ||
65 | |||
66 | extern void * FASTCALL(kmap_high(struct page *page)); | ||
67 | extern void FASTCALL(kunmap_high(struct page *page)); | ||
68 | |||
69 | void *kmap(struct page *page); | ||
70 | void kunmap(struct page *page); | ||
71 | void *kmap_atomic(struct page *page, enum km_type type); | ||
72 | void kunmap_atomic(void *kvaddr, enum km_type type); | ||
73 | struct page *kmap_atomic_to_page(void *ptr); | ||
74 | |||
75 | #define flush_cache_kmaps() do { } while (0) | ||
76 | |||
77 | #endif /* __KERNEL__ */ | ||
78 | |||
79 | #endif /* _ASM_HIGHMEM_H */ | ||
diff --git a/include/asm-i386/hpet.h b/include/asm-i386/hpet.h new file mode 100644 index 000000000000..6e20b079f1d3 --- /dev/null +++ b/include/asm-i386/hpet.h | |||
@@ -0,0 +1,113 @@ | |||
1 | |||
2 | #ifndef _I386_HPET_H | ||
3 | #define _I386_HPET_H | ||
4 | |||
5 | #ifdef CONFIG_HPET_TIMER | ||
6 | |||
7 | #include <linux/errno.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/param.h> | ||
12 | #include <linux/string.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/time.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/smp.h> | ||
19 | |||
20 | #include <asm/io.h> | ||
21 | #include <asm/smp.h> | ||
22 | #include <asm/irq.h> | ||
23 | #include <asm/msr.h> | ||
24 | #include <asm/delay.h> | ||
25 | #include <asm/mpspec.h> | ||
26 | #include <asm/uaccess.h> | ||
27 | #include <asm/processor.h> | ||
28 | |||
29 | #include <linux/timex.h> | ||
30 | #include <linux/config.h> | ||
31 | |||
32 | #include <asm/fixmap.h> | ||
33 | |||
34 | /* | ||
35 | * Documentation on HPET can be found at: | ||
36 | * http://www.intel.com/ial/home/sp/pcmmspec.htm | ||
37 | * ftp://download.intel.com/ial/home/sp/mmts098.pdf | ||
38 | */ | ||
39 | |||
40 | #define HPET_MMAP_SIZE 1024 | ||
41 | |||
42 | #define HPET_ID 0x000 | ||
43 | #define HPET_PERIOD 0x004 | ||
44 | #define HPET_CFG 0x010 | ||
45 | #define HPET_STATUS 0x020 | ||
46 | #define HPET_COUNTER 0x0f0 | ||
47 | #define HPET_T0_CFG 0x100 | ||
48 | #define HPET_T0_CMP 0x108 | ||
49 | #define HPET_T0_ROUTE 0x110 | ||
50 | #define HPET_T1_CFG 0x120 | ||
51 | #define HPET_T1_CMP 0x128 | ||
52 | #define HPET_T1_ROUTE 0x130 | ||
53 | #define HPET_T2_CFG 0x140 | ||
54 | #define HPET_T2_CMP 0x148 | ||
55 | #define HPET_T2_ROUTE 0x150 | ||
56 | |||
57 | #define HPET_ID_LEGSUP 0x00008000 | ||
58 | #define HPET_ID_NUMBER 0x00001f00 | ||
59 | #define HPET_ID_REV 0x000000ff | ||
60 | #define HPET_ID_NUMBER_SHIFT 8 | ||
61 | |||
62 | #define HPET_CFG_ENABLE 0x001 | ||
63 | #define HPET_CFG_LEGACY 0x002 | ||
64 | #define HPET_LEGACY_8254 2 | ||
65 | #define HPET_LEGACY_RTC 8 | ||
66 | |||
67 | #define HPET_TN_ENABLE 0x004 | ||
68 | #define HPET_TN_PERIODIC 0x008 | ||
69 | #define HPET_TN_PERIODIC_CAP 0x010 | ||
70 | #define HPET_TN_SETVAL 0x040 | ||
71 | #define HPET_TN_32BIT 0x100 | ||
72 | |||
73 | /* Use our own asm for 64 bit multiply/divide */ | ||
74 | #define ASM_MUL64_REG(eax_out,edx_out,reg_in,eax_in) \ | ||
75 | __asm__ __volatile__("mull %2" \ | ||
76 | :"=a" (eax_out), "=d" (edx_out) \ | ||
77 | :"r" (reg_in), "0" (eax_in)) | ||
78 | |||
79 | #define ASM_DIV64_REG(eax_out,edx_out,reg_in,eax_in,edx_in) \ | ||
80 | __asm__ __volatile__("divl %2" \ | ||
81 | :"=a" (eax_out), "=d" (edx_out) \ | ||
82 | :"r" (reg_in), "0" (eax_in), "1" (edx_in)) | ||
83 | |||
84 | #define KERNEL_TICK_USEC (1000000UL/HZ) /* tick value in microsec */ | ||
85 | /* Max HPET Period is 10^8 femto sec as in HPET spec */ | ||
86 | #define HPET_MAX_PERIOD (100000000UL) | ||
87 | /* | ||
88 | * Min HPET period is 10^5 femto sec just for safety. If it is less than this, | ||
89 | * then 32 bit HPET counter wrapsaround in less than 0.5 sec. | ||
90 | */ | ||
91 | #define HPET_MIN_PERIOD (100000UL) | ||
92 | |||
93 | extern unsigned long hpet_tick; /* hpet clks count per tick */ | ||
94 | extern unsigned long hpet_address; /* hpet memory map physical address */ | ||
95 | |||
96 | extern int hpet_rtc_timer_init(void); | ||
97 | extern int hpet_enable(void); | ||
98 | extern int hpet_reenable(void); | ||
99 | extern int is_hpet_enabled(void); | ||
100 | extern int is_hpet_capable(void); | ||
101 | extern int hpet_readl(unsigned long a); | ||
102 | |||
103 | #ifdef CONFIG_HPET_EMULATE_RTC | ||
104 | extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask); | ||
105 | extern int hpet_set_rtc_irq_bit(unsigned long bit_mask); | ||
106 | extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec); | ||
107 | extern int hpet_set_periodic_freq(unsigned long freq); | ||
108 | extern int hpet_rtc_dropped_irq(void); | ||
109 | extern int hpet_rtc_timer_init(void); | ||
110 | extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs); | ||
111 | #endif /* CONFIG_HPET_EMULATE_RTC */ | ||
112 | #endif /* CONFIG_HPET_TIMER */ | ||
113 | #endif /* _I386_HPET_H */ | ||
diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h new file mode 100644 index 000000000000..4ac84cc6f01a --- /dev/null +++ b/include/asm-i386/hw_irq.h | |||
@@ -0,0 +1,79 @@ | |||
1 | #ifndef _ASM_HW_IRQ_H | ||
2 | #define _ASM_HW_IRQ_H | ||
3 | |||
4 | /* | ||
5 | * linux/include/asm/hw_irq.h | ||
6 | * | ||
7 | * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar | ||
8 | * | ||
9 | * moved some of the old arch/i386/kernel/irq.h to here. VY | ||
10 | * | ||
11 | * IRQ/IPI changes taken from work by Thomas Radke | ||
12 | * <tomsoft@informatik.tu-chemnitz.de> | ||
13 | */ | ||
14 | |||
15 | #include <linux/config.h> | ||
16 | #include <linux/profile.h> | ||
17 | #include <asm/atomic.h> | ||
18 | #include <asm/irq.h> | ||
19 | #include <asm/sections.h> | ||
20 | |||
21 | /* | ||
22 | * Various low-level irq details needed by irq.c, process.c, | ||
23 | * time.c, io_apic.c and smp.c | ||
24 | * | ||
25 | * Interrupt entry/exit code at both C and assembly level | ||
26 | */ | ||
27 | |||
28 | extern u8 irq_vector[NR_IRQ_VECTORS]; | ||
29 | #define IO_APIC_VECTOR(irq) (irq_vector[irq]) | ||
30 | #define AUTO_ASSIGN -1 | ||
31 | |||
32 | extern void (*interrupt[NR_IRQS])(void); | ||
33 | |||
34 | #ifdef CONFIG_SMP | ||
35 | fastcall void reschedule_interrupt(void); | ||
36 | fastcall void invalidate_interrupt(void); | ||
37 | fastcall void call_function_interrupt(void); | ||
38 | #endif | ||
39 | |||
40 | #ifdef CONFIG_X86_LOCAL_APIC | ||
41 | fastcall void apic_timer_interrupt(void); | ||
42 | fastcall void error_interrupt(void); | ||
43 | fastcall void spurious_interrupt(void); | ||
44 | fastcall void thermal_interrupt(struct pt_regs *); | ||
45 | #define platform_legacy_irq(irq) ((irq) < 16) | ||
46 | #endif | ||
47 | |||
48 | void disable_8259A_irq(unsigned int irq); | ||
49 | void enable_8259A_irq(unsigned int irq); | ||
50 | int i8259A_irq_pending(unsigned int irq); | ||
51 | void make_8259A_irq(unsigned int irq); | ||
52 | void init_8259A(int aeoi); | ||
53 | void FASTCALL(send_IPI_self(int vector)); | ||
54 | void init_VISWS_APIC_irqs(void); | ||
55 | void setup_IO_APIC(void); | ||
56 | void disable_IO_APIC(void); | ||
57 | void print_IO_APIC(void); | ||
58 | int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); | ||
59 | void send_IPI(int dest, int vector); | ||
60 | void setup_ioapic_dest(void); | ||
61 | |||
62 | extern unsigned long io_apic_irqs; | ||
63 | |||
64 | extern atomic_t irq_err_count; | ||
65 | extern atomic_t irq_mis_count; | ||
66 | |||
67 | #define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs)) | ||
68 | |||
69 | #if defined(CONFIG_X86_IO_APIC) | ||
70 | static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) | ||
71 | { | ||
72 | if (IO_APIC_IRQ(i)) | ||
73 | send_IPI_self(IO_APIC_VECTOR(i)); | ||
74 | } | ||
75 | #else | ||
76 | static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {} | ||
77 | #endif | ||
78 | |||
79 | #endif /* _ASM_HW_IRQ_H */ | ||
diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h new file mode 100644 index 000000000000..f6feb98a9397 --- /dev/null +++ b/include/asm-i386/i387.h | |||
@@ -0,0 +1,115 @@ | |||
1 | /* | ||
2 | * include/asm-i386/i387.h | ||
3 | * | ||
4 | * Copyright (C) 1994 Linus Torvalds | ||
5 | * | ||
6 | * Pentium III FXSR, SSE support | ||
7 | * General FPU state handling cleanups | ||
8 | * Gareth Hughes <gareth@valinux.com>, May 2000 | ||
9 | */ | ||
10 | |||
11 | #ifndef __ASM_I386_I387_H | ||
12 | #define __ASM_I386_I387_H | ||
13 | |||
14 | #include <linux/sched.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <asm/processor.h> | ||
17 | #include <asm/sigcontext.h> | ||
18 | #include <asm/user.h> | ||
19 | |||
20 | extern void mxcsr_feature_mask_init(void); | ||
21 | extern void init_fpu(struct task_struct *); | ||
22 | /* | ||
23 | * FPU lazy state save handling... | ||
24 | */ | ||
25 | extern void restore_fpu( struct task_struct *tsk ); | ||
26 | |||
27 | extern void kernel_fpu_begin(void); | ||
28 | #define kernel_fpu_end() do { stts(); preempt_enable(); } while(0) | ||
29 | |||
30 | /* | ||
31 | * These must be called with preempt disabled | ||
32 | */ | ||
33 | static inline void __save_init_fpu( struct task_struct *tsk ) | ||
34 | { | ||
35 | if ( cpu_has_fxsr ) { | ||
36 | asm volatile( "fxsave %0 ; fnclex" | ||
37 | : "=m" (tsk->thread.i387.fxsave) ); | ||
38 | } else { | ||
39 | asm volatile( "fnsave %0 ; fwait" | ||
40 | : "=m" (tsk->thread.i387.fsave) ); | ||
41 | } | ||
42 | tsk->thread_info->status &= ~TS_USEDFPU; | ||
43 | } | ||
44 | |||
45 | #define __unlazy_fpu( tsk ) do { \ | ||
46 | if ((tsk)->thread_info->status & TS_USEDFPU) \ | ||
47 | save_init_fpu( tsk ); \ | ||
48 | } while (0) | ||
49 | |||
50 | #define __clear_fpu( tsk ) \ | ||
51 | do { \ | ||
52 | if ((tsk)->thread_info->status & TS_USEDFPU) { \ | ||
53 | asm volatile("fnclex ; fwait"); \ | ||
54 | (tsk)->thread_info->status &= ~TS_USEDFPU; \ | ||
55 | stts(); \ | ||
56 | } \ | ||
57 | } while (0) | ||
58 | |||
59 | |||
60 | /* | ||
61 | * These disable preemption on their own and are safe | ||
62 | */ | ||
63 | static inline void save_init_fpu( struct task_struct *tsk ) | ||
64 | { | ||
65 | preempt_disable(); | ||
66 | __save_init_fpu(tsk); | ||
67 | stts(); | ||
68 | preempt_enable(); | ||
69 | } | ||
70 | |||
71 | #define unlazy_fpu( tsk ) do { \ | ||
72 | preempt_disable(); \ | ||
73 | __unlazy_fpu(tsk); \ | ||
74 | preempt_enable(); \ | ||
75 | } while (0) | ||
76 | |||
77 | #define clear_fpu( tsk ) do { \ | ||
78 | preempt_disable(); \ | ||
79 | __clear_fpu( tsk ); \ | ||
80 | preempt_enable(); \ | ||
81 | } while (0) | ||
82 | \ | ||
83 | /* | ||
84 | * FPU state interaction... | ||
85 | */ | ||
86 | extern unsigned short get_fpu_cwd( struct task_struct *tsk ); | ||
87 | extern unsigned short get_fpu_swd( struct task_struct *tsk ); | ||
88 | extern unsigned short get_fpu_mxcsr( struct task_struct *tsk ); | ||
89 | |||
90 | /* | ||
91 | * Signal frame handlers... | ||
92 | */ | ||
93 | extern int save_i387( struct _fpstate __user *buf ); | ||
94 | extern int restore_i387( struct _fpstate __user *buf ); | ||
95 | |||
96 | /* | ||
97 | * ptrace request handers... | ||
98 | */ | ||
99 | extern int get_fpregs( struct user_i387_struct __user *buf, | ||
100 | struct task_struct *tsk ); | ||
101 | extern int set_fpregs( struct task_struct *tsk, | ||
102 | struct user_i387_struct __user *buf ); | ||
103 | |||
104 | extern int get_fpxregs( struct user_fxsr_struct __user *buf, | ||
105 | struct task_struct *tsk ); | ||
106 | extern int set_fpxregs( struct task_struct *tsk, | ||
107 | struct user_fxsr_struct __user *buf ); | ||
108 | |||
109 | /* | ||
110 | * FPU state for core dumps... | ||
111 | */ | ||
112 | extern int dump_fpu( struct pt_regs *regs, | ||
113 | struct user_i387_struct *fpu ); | ||
114 | |||
115 | #endif /* __ASM_I386_I387_H */ | ||
diff --git a/include/asm-i386/i8259.h b/include/asm-i386/i8259.h new file mode 100644 index 000000000000..29d8f9a6b3fc --- /dev/null +++ b/include/asm-i386/i8259.h | |||
@@ -0,0 +1,17 @@ | |||
1 | #ifndef __ASM_I8259_H__ | ||
2 | #define __ASM_I8259_H__ | ||
3 | |||
4 | extern unsigned int cached_irq_mask; | ||
5 | |||
6 | #define __byte(x,y) (((unsigned char *) &(y))[x]) | ||
7 | #define cached_master_mask (__byte(0, cached_irq_mask)) | ||
8 | #define cached_slave_mask (__byte(1, cached_irq_mask)) | ||
9 | |||
10 | extern spinlock_t i8259A_lock; | ||
11 | |||
12 | extern void init_8259A(int auto_eoi); | ||
13 | extern void enable_8259A_irq(unsigned int irq); | ||
14 | extern void disable_8259A_irq(unsigned int irq); | ||
15 | extern unsigned int startup_8259A_irq(unsigned int irq); | ||
16 | |||
17 | #endif /* __ASM_I8259_H__ */ | ||
diff --git a/include/asm-i386/ide.h b/include/asm-i386/ide.h new file mode 100644 index 000000000000..859ebf4da632 --- /dev/null +++ b/include/asm-i386/ide.h | |||
@@ -0,0 +1,69 @@ | |||
1 | /* | ||
2 | * linux/include/asm-i386/ide.h | ||
3 | * | ||
4 | * Copyright (C) 1994-1996 Linus Torvalds & authors | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * This file contains the i386 architecture specific IDE code. | ||
9 | */ | ||
10 | |||
11 | #ifndef __ASMi386_IDE_H | ||
12 | #define __ASMi386_IDE_H | ||
13 | |||
14 | #ifdef __KERNEL__ | ||
15 | |||
16 | #include <linux/config.h> | ||
17 | |||
18 | #ifndef MAX_HWIFS | ||
19 | # ifdef CONFIG_BLK_DEV_IDEPCI | ||
20 | #define MAX_HWIFS 10 | ||
21 | # else | ||
22 | #define MAX_HWIFS 6 | ||
23 | # endif | ||
24 | #endif | ||
25 | |||
26 | #define IDE_ARCH_OBSOLETE_DEFAULTS | ||
27 | |||
28 | static __inline__ int ide_default_irq(unsigned long base) | ||
29 | { | ||
30 | switch (base) { | ||
31 | case 0x1f0: return 14; | ||
32 | case 0x170: return 15; | ||
33 | case 0x1e8: return 11; | ||
34 | case 0x168: return 10; | ||
35 | case 0x1e0: return 8; | ||
36 | case 0x160: return 12; | ||
37 | default: | ||
38 | return 0; | ||
39 | } | ||
40 | } | ||
41 | |||
42 | static __inline__ unsigned long ide_default_io_base(int index) | ||
43 | { | ||
44 | switch (index) { | ||
45 | case 0: return 0x1f0; | ||
46 | case 1: return 0x170; | ||
47 | case 2: return 0x1e8; | ||
48 | case 3: return 0x168; | ||
49 | case 4: return 0x1e0; | ||
50 | case 5: return 0x160; | ||
51 | default: | ||
52 | return 0; | ||
53 | } | ||
54 | } | ||
55 | |||
56 | #define IDE_ARCH_OBSOLETE_INIT | ||
57 | #define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */ | ||
58 | |||
59 | #ifdef CONFIG_BLK_DEV_IDEPCI | ||
60 | #define ide_init_default_irq(base) (0) | ||
61 | #else | ||
62 | #define ide_init_default_irq(base) ide_default_irq(base) | ||
63 | #endif | ||
64 | |||
65 | #include <asm-generic/ide_iops.h> | ||
66 | |||
67 | #endif /* __KERNEL__ */ | ||
68 | |||
69 | #endif /* __ASMi386_IDE_H */ | ||
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h new file mode 100644 index 000000000000..7babb97a02eb --- /dev/null +++ b/include/asm-i386/io.h | |||
@@ -0,0 +1,381 @@ | |||
1 | #ifndef _ASM_IO_H | ||
2 | #define _ASM_IO_H | ||
3 | |||
4 | #include <linux/config.h> | ||
5 | #include <linux/string.h> | ||
6 | #include <linux/compiler.h> | ||
7 | |||
8 | /* | ||
9 | * This file contains the definitions for the x86 IO instructions | ||
10 | * inb/inw/inl/outb/outw/outl and the "string versions" of the same | ||
11 | * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" | ||
12 | * versions of the single-IO instructions (inb_p/inw_p/..). | ||
13 | * | ||
14 | * This file is not meant to be obfuscating: it's just complicated | ||
15 | * to (a) handle it all in a way that makes gcc able to optimize it | ||
16 | * as well as possible and (b) trying to avoid writing the same thing | ||
17 | * over and over again with slight variations and possibly making a | ||
18 | * mistake somewhere. | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | * Thanks to James van Artsdalen for a better timing-fix than | ||
23 | * the two short jumps: using outb's to a nonexistent port seems | ||
24 | * to guarantee better timings even on fast machines. | ||
25 | * | ||
26 | * On the other hand, I'd like to be sure of a non-existent port: | ||
27 | * I feel a bit unsafe about using 0x80 (should be safe, though) | ||
28 | * | ||
29 | * Linus | ||
30 | */ | ||
31 | |||
32 | /* | ||
33 | * Bit simplified and optimized by Jan Hubicka | ||
34 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. | ||
35 | * | ||
36 | * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, | ||
37 | * isa_read[wl] and isa_write[wl] fixed | ||
38 | * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> | ||
39 | */ | ||
40 | |||
41 | #define IO_SPACE_LIMIT 0xffff | ||
42 | |||
43 | #define XQUAD_PORTIO_BASE 0xfe400000 | ||
44 | #define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ | ||
45 | |||
46 | #ifdef __KERNEL__ | ||
47 | |||
48 | #include <asm-generic/iomap.h> | ||
49 | |||
50 | #include <linux/vmalloc.h> | ||
51 | |||
52 | /* | ||
53 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | ||
54 | * access | ||
55 | */ | ||
56 | #define xlate_dev_mem_ptr(p) __va(p) | ||
57 | |||
58 | /* | ||
59 | * Convert a virtual cached pointer to an uncached pointer | ||
60 | */ | ||
61 | #define xlate_dev_kmem_ptr(p) p | ||
62 | |||
63 | /** | ||
64 | * virt_to_phys - map virtual addresses to physical | ||
65 | * @address: address to remap | ||
66 | * | ||
67 | * The returned physical address is the physical (CPU) mapping for | ||
68 | * the memory address given. It is only valid to use this function on | ||
69 | * addresses directly mapped or allocated via kmalloc. | ||
70 | * | ||
71 | * This function does not give bus mappings for DMA transfers. In | ||
72 | * almost all conceivable cases a device driver should not be using | ||
73 | * this function | ||
74 | */ | ||
75 | |||
76 | static inline unsigned long virt_to_phys(volatile void * address) | ||
77 | { | ||
78 | return __pa(address); | ||
79 | } | ||
80 | |||
81 | /** | ||
82 | * phys_to_virt - map physical address to virtual | ||
83 | * @address: address to remap | ||
84 | * | ||
85 | * The returned virtual address is a current CPU mapping for | ||
86 | * the memory address given. It is only valid to use this function on | ||
87 | * addresses that have a kernel mapping | ||
88 | * | ||
89 | * This function does not handle bus mappings for DMA transfers. In | ||
90 | * almost all conceivable cases a device driver should not be using | ||
91 | * this function | ||
92 | */ | ||
93 | |||
94 | static inline void * phys_to_virt(unsigned long address) | ||
95 | { | ||
96 | return __va(address); | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * Change "struct page" to physical address. | ||
101 | */ | ||
102 | #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) | ||
103 | |||
104 | extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags); | ||
105 | |||
106 | /** | ||
107 | * ioremap - map bus memory into CPU space | ||
108 | * @offset: bus address of the memory | ||
109 | * @size: size of the resource to map | ||
110 | * | ||
111 | * ioremap performs a platform specific sequence of operations to | ||
112 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | ||
113 | * writew/writel functions and the other mmio helpers. The returned | ||
114 | * address is not guaranteed to be usable directly as a virtual | ||
115 | * address. | ||
116 | */ | ||
117 | |||
118 | static inline void __iomem * ioremap(unsigned long offset, unsigned long size) | ||
119 | { | ||
120 | return __ioremap(offset, size, 0); | ||
121 | } | ||
122 | |||
123 | extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size); | ||
124 | extern void iounmap(volatile void __iomem *addr); | ||
125 | |||
126 | /* | ||
127 | * bt_ioremap() and bt_iounmap() are for temporary early boot-time | ||
128 | * mappings, before the real ioremap() is functional. | ||
129 | * A boot-time mapping is currently limited to at most 16 pages. | ||
130 | */ | ||
131 | extern void *bt_ioremap(unsigned long offset, unsigned long size); | ||
132 | extern void bt_iounmap(void *addr, unsigned long size); | ||
133 | |||
134 | /* | ||
135 | * ISA I/O bus memory addresses are 1:1 with the physical address. | ||
136 | */ | ||
137 | #define isa_virt_to_bus virt_to_phys | ||
138 | #define isa_page_to_bus page_to_phys | ||
139 | #define isa_bus_to_virt phys_to_virt | ||
140 | |||
141 | /* | ||
142 | * However PCI ones are not necessarily 1:1 and therefore these interfaces | ||
143 | * are forbidden in portable PCI drivers. | ||
144 | * | ||
145 | * Allow them on x86 for legacy drivers, though. | ||
146 | */ | ||
147 | #define virt_to_bus virt_to_phys | ||
148 | #define bus_to_virt phys_to_virt | ||
149 | |||
150 | /* | ||
151 | * readX/writeX() are used to access memory mapped devices. On some | ||
152 | * architectures the memory mapped IO stuff needs to be accessed | ||
153 | * differently. On the x86 architecture, we just read/write the | ||
154 | * memory location directly. | ||
155 | */ | ||
156 | |||
157 | static inline unsigned char readb(const volatile void __iomem *addr) | ||
158 | { | ||
159 | return *(volatile unsigned char __force *) addr; | ||
160 | } | ||
161 | static inline unsigned short readw(const volatile void __iomem *addr) | ||
162 | { | ||
163 | return *(volatile unsigned short __force *) addr; | ||
164 | } | ||
165 | static inline unsigned int readl(const volatile void __iomem *addr) | ||
166 | { | ||
167 | return *(volatile unsigned int __force *) addr; | ||
168 | } | ||
169 | #define readb_relaxed(addr) readb(addr) | ||
170 | #define readw_relaxed(addr) readw(addr) | ||
171 | #define readl_relaxed(addr) readl(addr) | ||
172 | #define __raw_readb readb | ||
173 | #define __raw_readw readw | ||
174 | #define __raw_readl readl | ||
175 | |||
176 | static inline void writeb(unsigned char b, volatile void __iomem *addr) | ||
177 | { | ||
178 | *(volatile unsigned char __force *) addr = b; | ||
179 | } | ||
180 | static inline void writew(unsigned short b, volatile void __iomem *addr) | ||
181 | { | ||
182 | *(volatile unsigned short __force *) addr = b; | ||
183 | } | ||
184 | static inline void writel(unsigned int b, volatile void __iomem *addr) | ||
185 | { | ||
186 | *(volatile unsigned int __force *) addr = b; | ||
187 | } | ||
188 | #define __raw_writeb writeb | ||
189 | #define __raw_writew writew | ||
190 | #define __raw_writel writel | ||
191 | |||
192 | #define mmiowb() | ||
193 | |||
194 | static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) | ||
195 | { | ||
196 | memset((void __force *) addr, val, count); | ||
197 | } | ||
198 | static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) | ||
199 | { | ||
200 | __memcpy(dst, (void __force *) src, count); | ||
201 | } | ||
202 | static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) | ||
203 | { | ||
204 | __memcpy((void __force *) dst, src, count); | ||
205 | } | ||
206 | |||
207 | /* | ||
208 | * ISA space is 'always mapped' on a typical x86 system, no need to | ||
209 | * explicitly ioremap() it. The fact that the ISA IO space is mapped | ||
210 | * to PAGE_OFFSET is pure coincidence - it does not mean ISA values | ||
211 | * are physical addresses. The following constant pointer can be | ||
212 | * used as the IO-area pointer (it can be iounmapped as well, so the | ||
213 | * analogy with PCI is quite large): | ||
214 | */ | ||
215 | #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) | ||
216 | |||
217 | #define isa_readb(a) readb(__ISA_IO_base + (a)) | ||
218 | #define isa_readw(a) readw(__ISA_IO_base + (a)) | ||
219 | #define isa_readl(a) readl(__ISA_IO_base + (a)) | ||
220 | #define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a)) | ||
221 | #define isa_writew(w,a) writew(w,__ISA_IO_base + (a)) | ||
222 | #define isa_writel(l,a) writel(l,__ISA_IO_base + (a)) | ||
223 | #define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c)) | ||
224 | #define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c)) | ||
225 | #define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c)) | ||
226 | |||
227 | |||
228 | /* | ||
229 | * Again, i386 does not require mem IO specific function. | ||
230 | */ | ||
231 | |||
232 | #define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void __force *)(b),(c),(d)) | ||
233 | #define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void __force *)(__ISA_IO_base + (b)),(c),(d)) | ||
234 | |||
235 | /** | ||
236 | * check_signature - find BIOS signatures | ||
237 | * @io_addr: mmio address to check | ||
238 | * @signature: signature block | ||
239 | * @length: length of signature | ||
240 | * | ||
241 | * Perform a signature comparison with the mmio address io_addr. This | ||
242 | * address should have been obtained by ioremap. | ||
243 | * Returns 1 on a match. | ||
244 | */ | ||
245 | |||
246 | static inline int check_signature(volatile void __iomem * io_addr, | ||
247 | const unsigned char *signature, int length) | ||
248 | { | ||
249 | int retval = 0; | ||
250 | do { | ||
251 | if (readb(io_addr) != *signature) | ||
252 | goto out; | ||
253 | io_addr++; | ||
254 | signature++; | ||
255 | length--; | ||
256 | } while (length); | ||
257 | retval = 1; | ||
258 | out: | ||
259 | return retval; | ||
260 | } | ||
261 | |||
262 | /* | ||
263 | * Cache management | ||
264 | * | ||
265 | * This needed for two cases | ||
266 | * 1. Out of order aware processors | ||
267 | * 2. Accidentally out of order processors (PPro errata #51) | ||
268 | */ | ||
269 | |||
270 | #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) | ||
271 | |||
272 | static inline void flush_write_buffers(void) | ||
273 | { | ||
274 | __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory"); | ||
275 | } | ||
276 | |||
277 | #define dma_cache_inv(_start,_size) flush_write_buffers() | ||
278 | #define dma_cache_wback(_start,_size) flush_write_buffers() | ||
279 | #define dma_cache_wback_inv(_start,_size) flush_write_buffers() | ||
280 | |||
281 | #else | ||
282 | |||
283 | /* Nothing to do */ | ||
284 | |||
285 | #define dma_cache_inv(_start,_size) do { } while (0) | ||
286 | #define dma_cache_wback(_start,_size) do { } while (0) | ||
287 | #define dma_cache_wback_inv(_start,_size) do { } while (0) | ||
288 | #define flush_write_buffers() | ||
289 | |||
290 | #endif | ||
291 | |||
292 | #endif /* __KERNEL__ */ | ||
293 | |||
294 | #ifdef SLOW_IO_BY_JUMPING | ||
295 | #define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:" | ||
296 | #else | ||
297 | #define __SLOW_DOWN_IO "outb %%al,$0x80;" | ||
298 | #endif | ||
299 | |||
300 | static inline void slow_down_io(void) { | ||
301 | __asm__ __volatile__( | ||
302 | __SLOW_DOWN_IO | ||
303 | #ifdef REALLY_SLOW_IO | ||
304 | __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO | ||
305 | #endif | ||
306 | : : ); | ||
307 | } | ||
308 | |||
309 | #ifdef CONFIG_X86_NUMAQ | ||
310 | extern void *xquad_portio; /* Where the IO area was mapped */ | ||
311 | #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) | ||
312 | #define __BUILDIO(bwl,bw,type) \ | ||
313 | static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \ | ||
314 | if (xquad_portio) \ | ||
315 | write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \ | ||
316 | else \ | ||
317 | out##bwl##_local(value, port); \ | ||
318 | } \ | ||
319 | static inline void out##bwl(unsigned type value, int port) { \ | ||
320 | out##bwl##_quad(value, port, 0); \ | ||
321 | } \ | ||
322 | static inline unsigned type in##bwl##_quad(int port, int quad) { \ | ||
323 | if (xquad_portio) \ | ||
324 | return read##bwl(XQUAD_PORT_ADDR(port, quad)); \ | ||
325 | else \ | ||
326 | return in##bwl##_local(port); \ | ||
327 | } \ | ||
328 | static inline unsigned type in##bwl(int port) { \ | ||
329 | return in##bwl##_quad(port, 0); \ | ||
330 | } | ||
331 | #else | ||
332 | #define __BUILDIO(bwl,bw,type) \ | ||
333 | static inline void out##bwl(unsigned type value, int port) { \ | ||
334 | out##bwl##_local(value, port); \ | ||
335 | } \ | ||
336 | static inline unsigned type in##bwl(int port) { \ | ||
337 | return in##bwl##_local(port); \ | ||
338 | } | ||
339 | #endif | ||
340 | |||
341 | |||
342 | #define BUILDIO(bwl,bw,type) \ | ||
343 | static inline void out##bwl##_local(unsigned type value, int port) { \ | ||
344 | __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \ | ||
345 | } \ | ||
346 | static inline unsigned type in##bwl##_local(int port) { \ | ||
347 | unsigned type value; \ | ||
348 | __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \ | ||
349 | return value; \ | ||
350 | } \ | ||
351 | static inline void out##bwl##_local_p(unsigned type value, int port) { \ | ||
352 | out##bwl##_local(value, port); \ | ||
353 | slow_down_io(); \ | ||
354 | } \ | ||
355 | static inline unsigned type in##bwl##_local_p(int port) { \ | ||
356 | unsigned type value = in##bwl##_local(port); \ | ||
357 | slow_down_io(); \ | ||
358 | return value; \ | ||
359 | } \ | ||
360 | __BUILDIO(bwl,bw,type) \ | ||
361 | static inline void out##bwl##_p(unsigned type value, int port) { \ | ||
362 | out##bwl(value, port); \ | ||
363 | slow_down_io(); \ | ||
364 | } \ | ||
365 | static inline unsigned type in##bwl##_p(int port) { \ | ||
366 | unsigned type value = in##bwl(port); \ | ||
367 | slow_down_io(); \ | ||
368 | return value; \ | ||
369 | } \ | ||
370 | static inline void outs##bwl(int port, const void *addr, unsigned long count) { \ | ||
371 | __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \ | ||
372 | } \ | ||
373 | static inline void ins##bwl(int port, void *addr, unsigned long count) { \ | ||
374 | __asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \ | ||
375 | } | ||
376 | |||
377 | BUILDIO(b,b,char) | ||
378 | BUILDIO(w,w,short) | ||
379 | BUILDIO(l,,int) | ||
380 | |||
381 | #endif | ||
diff --git a/include/asm-i386/io_apic.h b/include/asm-i386/io_apic.h new file mode 100644 index 000000000000..002c203ccd6a --- /dev/null +++ b/include/asm-i386/io_apic.h | |||
@@ -0,0 +1,213 @@ | |||
1 | #ifndef __ASM_IO_APIC_H | ||
2 | #define __ASM_IO_APIC_H | ||
3 | |||
4 | #include <linux/config.h> | ||
5 | #include <asm/types.h> | ||
6 | #include <asm/mpspec.h> | ||
7 | |||
8 | /* | ||
9 | * Intel IO-APIC support for SMP and UP systems. | ||
10 | * | ||
11 | * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar | ||
12 | */ | ||
13 | |||
14 | #ifdef CONFIG_X86_IO_APIC | ||
15 | |||
16 | #ifdef CONFIG_PCI_MSI | ||
17 | static inline int use_pci_vector(void) {return 1;} | ||
18 | static inline void disable_edge_ioapic_vector(unsigned int vector) { } | ||
19 | static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { } | ||
20 | static inline void end_edge_ioapic_vector (unsigned int vector) { } | ||
21 | #define startup_level_ioapic startup_level_ioapic_vector | ||
22 | #define shutdown_level_ioapic mask_IO_APIC_vector | ||
23 | #define enable_level_ioapic unmask_IO_APIC_vector | ||
24 | #define disable_level_ioapic mask_IO_APIC_vector | ||
25 | #define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_vector | ||
26 | #define end_level_ioapic end_level_ioapic_vector | ||
27 | #define set_ioapic_affinity set_ioapic_affinity_vector | ||
28 | |||
29 | #define startup_edge_ioapic startup_edge_ioapic_vector | ||
30 | #define shutdown_edge_ioapic disable_edge_ioapic_vector | ||
31 | #define enable_edge_ioapic unmask_IO_APIC_vector | ||
32 | #define disable_edge_ioapic disable_edge_ioapic_vector | ||
33 | #define ack_edge_ioapic ack_edge_ioapic_vector | ||
34 | #define end_edge_ioapic end_edge_ioapic_vector | ||
35 | #else | ||
36 | static inline int use_pci_vector(void) {return 0;} | ||
37 | static inline void disable_edge_ioapic_irq(unsigned int irq) { } | ||
38 | static inline void mask_and_ack_level_ioapic_irq(unsigned int irq) { } | ||
39 | static inline void end_edge_ioapic_irq (unsigned int irq) { } | ||
40 | #define startup_level_ioapic startup_level_ioapic_irq | ||
41 | #define shutdown_level_ioapic mask_IO_APIC_irq | ||
42 | #define enable_level_ioapic unmask_IO_APIC_irq | ||
43 | #define disable_level_ioapic mask_IO_APIC_irq | ||
44 | #define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_irq | ||
45 | #define end_level_ioapic end_level_ioapic_irq | ||
46 | #define set_ioapic_affinity set_ioapic_affinity_irq | ||
47 | |||
48 | #define startup_edge_ioapic startup_edge_ioapic_irq | ||
49 | #define shutdown_edge_ioapic disable_edge_ioapic_irq | ||
50 | #define enable_edge_ioapic unmask_IO_APIC_irq | ||
51 | #define disable_edge_ioapic disable_edge_ioapic_irq | ||
52 | #define ack_edge_ioapic ack_edge_ioapic_irq | ||
53 | #define end_edge_ioapic end_edge_ioapic_irq | ||
54 | #endif | ||
55 | |||
56 | #define IO_APIC_BASE(idx) \ | ||
57 | ((volatile int *)(__fix_to_virt(FIX_IO_APIC_BASE_0 + idx) \ | ||
58 | + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK))) | ||
59 | |||
60 | /* | ||
61 | * The structure of the IO-APIC: | ||
62 | */ | ||
63 | union IO_APIC_reg_00 { | ||
64 | u32 raw; | ||
65 | struct { | ||
66 | u32 __reserved_2 : 14, | ||
67 | LTS : 1, | ||
68 | delivery_type : 1, | ||
69 | __reserved_1 : 8, | ||
70 | ID : 8; | ||
71 | } __attribute__ ((packed)) bits; | ||
72 | }; | ||
73 | |||
74 | union IO_APIC_reg_01 { | ||
75 | u32 raw; | ||
76 | struct { | ||
77 | u32 version : 8, | ||
78 | __reserved_2 : 7, | ||
79 | PRQ : 1, | ||
80 | entries : 8, | ||
81 | __reserved_1 : 8; | ||
82 | } __attribute__ ((packed)) bits; | ||
83 | }; | ||
84 | |||
85 | union IO_APIC_reg_02 { | ||
86 | u32 raw; | ||
87 | struct { | ||
88 | u32 __reserved_2 : 24, | ||
89 | arbitration : 4, | ||
90 | __reserved_1 : 4; | ||
91 | } __attribute__ ((packed)) bits; | ||
92 | }; | ||
93 | |||
94 | union IO_APIC_reg_03 { | ||
95 | u32 raw; | ||
96 | struct { | ||
97 | u32 boot_DT : 1, | ||
98 | __reserved_1 : 31; | ||
99 | } __attribute__ ((packed)) bits; | ||
100 | }; | ||
101 | |||
102 | /* | ||
103 | * # of IO-APICs and # of IRQ routing registers | ||
104 | */ | ||
105 | extern int nr_ioapics; | ||
106 | extern int nr_ioapic_registers[MAX_IO_APICS]; | ||
107 | |||
108 | enum ioapic_irq_destination_types { | ||
109 | dest_Fixed = 0, | ||
110 | dest_LowestPrio = 1, | ||
111 | dest_SMI = 2, | ||
112 | dest__reserved_1 = 3, | ||
113 | dest_NMI = 4, | ||
114 | dest_INIT = 5, | ||
115 | dest__reserved_2 = 6, | ||
116 | dest_ExtINT = 7 | ||
117 | }; | ||
118 | |||
119 | struct IO_APIC_route_entry { | ||
120 | __u32 vector : 8, | ||
121 | delivery_mode : 3, /* 000: FIXED | ||
122 | * 001: lowest prio | ||
123 | * 111: ExtINT | ||
124 | */ | ||
125 | dest_mode : 1, /* 0: physical, 1: logical */ | ||
126 | delivery_status : 1, | ||
127 | polarity : 1, | ||
128 | irr : 1, | ||
129 | trigger : 1, /* 0: edge, 1: level */ | ||
130 | mask : 1, /* 0: enabled, 1: disabled */ | ||
131 | __reserved_2 : 15; | ||
132 | |||
133 | union { struct { __u32 | ||
134 | __reserved_1 : 24, | ||
135 | physical_dest : 4, | ||
136 | __reserved_2 : 4; | ||
137 | } physical; | ||
138 | |||
139 | struct { __u32 | ||
140 | __reserved_1 : 24, | ||
141 | logical_dest : 8; | ||
142 | } logical; | ||
143 | } dest; | ||
144 | |||
145 | } __attribute__ ((packed)); | ||
146 | |||
147 | /* | ||
148 | * MP-BIOS irq configuration table structures: | ||
149 | */ | ||
150 | |||
151 | /* I/O APIC entries */ | ||
152 | extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; | ||
153 | |||
154 | /* # of MP IRQ source entries */ | ||
155 | extern int mp_irq_entries; | ||
156 | |||
157 | /* MP IRQ source entries */ | ||
158 | extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; | ||
159 | |||
160 | /* non-0 if default (table-less) MP configuration */ | ||
161 | extern int mpc_default_type; | ||
162 | |||
163 | static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) | ||
164 | { | ||
165 | *IO_APIC_BASE(apic) = reg; | ||
166 | return *(IO_APIC_BASE(apic)+4); | ||
167 | } | ||
168 | |||
169 | static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) | ||
170 | { | ||
171 | *IO_APIC_BASE(apic) = reg; | ||
172 | *(IO_APIC_BASE(apic)+4) = value; | ||
173 | } | ||
174 | |||
175 | /* | ||
176 | * Re-write a value: to be used for read-modify-write | ||
177 | * cycles where the read already set up the index register. | ||
178 | * | ||
179 | * Older SiS APIC requires we rewrite the index regiser | ||
180 | */ | ||
181 | extern int sis_apic_bug; | ||
182 | static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) | ||
183 | { | ||
184 | if (sis_apic_bug) | ||
185 | *IO_APIC_BASE(apic) = reg; | ||
186 | *(IO_APIC_BASE(apic)+4) = value; | ||
187 | } | ||
188 | |||
189 | /* 1 if "noapic" boot option passed */ | ||
190 | extern int skip_ioapic_setup; | ||
191 | |||
192 | /* | ||
193 | * If we use the IO-APIC for IRQ routing, disable automatic | ||
194 | * assignment of PCI IRQ's. | ||
195 | */ | ||
196 | #define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) | ||
197 | |||
198 | #ifdef CONFIG_ACPI_BOOT | ||
199 | extern int io_apic_get_unique_id (int ioapic, int apic_id); | ||
200 | extern int io_apic_get_version (int ioapic); | ||
201 | extern int io_apic_get_redir_entries (int ioapic); | ||
202 | extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low); | ||
203 | #endif /*CONFIG_ACPI_BOOT*/ | ||
204 | |||
205 | extern int (*ioapic_renumber_irq)(int ioapic, int irq); | ||
206 | |||
207 | #else /* !CONFIG_X86_IO_APIC */ | ||
208 | #define io_apic_assign_pci_irqs 0 | ||
209 | #endif | ||
210 | |||
211 | extern int assign_irq_vector(int irq); | ||
212 | |||
213 | #endif | ||
diff --git a/include/asm-i386/ioctl.h b/include/asm-i386/ioctl.h new file mode 100644 index 000000000000..543f7843d553 --- /dev/null +++ b/include/asm-i386/ioctl.h | |||
@@ -0,0 +1,85 @@ | |||
1 | /* $Id: ioctl.h,v 1.5 1993/07/19 21:53:50 root Exp root $ | ||
2 | * | ||
3 | * linux/ioctl.h for Linux by H.H. Bergman. | ||
4 | */ | ||
5 | |||
6 | #ifndef _ASMI386_IOCTL_H | ||
7 | #define _ASMI386_IOCTL_H | ||
8 | |||
9 | /* ioctl command encoding: 32 bits total, command in lower 16 bits, | ||
10 | * size of the parameter structure in the lower 14 bits of the | ||
11 | * upper 16 bits. | ||
12 | * Encoding the size of the parameter structure in the ioctl request | ||
13 | * is useful for catching programs compiled with old versions | ||
14 | * and to avoid overwriting user space outside the user buffer area. | ||
15 | * The highest 2 bits are reserved for indicating the ``access mode''. | ||
16 | * NOTE: This limits the max parameter size to 16kB -1 ! | ||
17 | */ | ||
18 | |||
19 | /* | ||
20 | * The following is for compatibility across the various Linux | ||
21 | * platforms. The i386 ioctl numbering scheme doesn't really enforce | ||
22 | * a type field. De facto, however, the top 8 bits of the lower 16 | ||
23 | * bits are indeed used as a type field, so we might just as well make | ||
24 | * this explicit here. Please be sure to use the decoding macros | ||
25 | * below from now on. | ||
26 | */ | ||
27 | #define _IOC_NRBITS 8 | ||
28 | #define _IOC_TYPEBITS 8 | ||
29 | #define _IOC_SIZEBITS 14 | ||
30 | #define _IOC_DIRBITS 2 | ||
31 | |||
32 | #define _IOC_NRMASK ((1 << _IOC_NRBITS)-1) | ||
33 | #define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1) | ||
34 | #define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1) | ||
35 | #define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1) | ||
36 | |||
37 | #define _IOC_NRSHIFT 0 | ||
38 | #define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS) | ||
39 | #define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS) | ||
40 | #define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) | ||
41 | |||
42 | /* | ||
43 | * Direction bits. | ||
44 | */ | ||
45 | #define _IOC_NONE 0U | ||
46 | #define _IOC_WRITE 1U | ||
47 | #define _IOC_READ 2U | ||
48 | |||
49 | #define _IOC(dir,type,nr,size) \ | ||
50 | (((dir) << _IOC_DIRSHIFT) | \ | ||
51 | ((type) << _IOC_TYPESHIFT) | \ | ||
52 | ((nr) << _IOC_NRSHIFT) | \ | ||
53 | ((size) << _IOC_SIZESHIFT)) | ||
54 | |||
55 | /* provoke compile error for invalid uses of size argument */ | ||
56 | extern unsigned int __invalid_size_argument_for_IOC; | ||
57 | #define _IOC_TYPECHECK(t) \ | ||
58 | ((sizeof(t) == sizeof(t[1]) && \ | ||
59 | sizeof(t) < (1 << _IOC_SIZEBITS)) ? \ | ||
60 | sizeof(t) : __invalid_size_argument_for_IOC) | ||
61 | |||
62 | /* used to create numbers */ | ||
63 | #define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) | ||
64 | #define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size))) | ||
65 | #define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size))) | ||
66 | #define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size))) | ||
67 | #define _IOR_BAD(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size)) | ||
68 | #define _IOW_BAD(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size)) | ||
69 | #define _IOWR_BAD(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size)) | ||
70 | |||
71 | /* used to decode ioctl numbers.. */ | ||
72 | #define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK) | ||
73 | #define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK) | ||
74 | #define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK) | ||
75 | #define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK) | ||
76 | |||
77 | /* ...and for the drivers/sound files... */ | ||
78 | |||
79 | #define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT) | ||
80 | #define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT) | ||
81 | #define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT) | ||
82 | #define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT) | ||
83 | #define IOCSIZE_SHIFT (_IOC_SIZESHIFT) | ||
84 | |||
85 | #endif /* _ASMI386_IOCTL_H */ | ||
diff --git a/include/asm-i386/ioctls.h b/include/asm-i386/ioctls.h new file mode 100644 index 000000000000..f962fadab0fa --- /dev/null +++ b/include/asm-i386/ioctls.h | |||
@@ -0,0 +1,83 @@ | |||
1 | #ifndef __ARCH_I386_IOCTLS_H__ | ||
2 | #define __ARCH_I386_IOCTLS_H__ | ||
3 | |||
4 | #include <asm/ioctl.h> | ||
5 | |||
6 | /* 0x54 is just a magic number to make these relatively unique ('T') */ | ||
7 | |||
8 | #define TCGETS 0x5401 | ||
9 | #define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */ | ||
10 | #define TCSETSW 0x5403 | ||
11 | #define TCSETSF 0x5404 | ||
12 | #define TCGETA 0x5405 | ||
13 | #define TCSETA 0x5406 | ||
14 | #define TCSETAW 0x5407 | ||
15 | #define TCSETAF 0x5408 | ||
16 | #define TCSBRK 0x5409 | ||
17 | #define TCXONC 0x540A | ||
18 | #define TCFLSH 0x540B | ||
19 | #define TIOCEXCL 0x540C | ||
20 | #define TIOCNXCL 0x540D | ||
21 | #define TIOCSCTTY 0x540E | ||
22 | #define TIOCGPGRP 0x540F | ||
23 | #define TIOCSPGRP 0x5410 | ||
24 | #define TIOCOUTQ 0x5411 | ||
25 | #define TIOCSTI 0x5412 | ||
26 | #define TIOCGWINSZ 0x5413 | ||
27 | #define TIOCSWINSZ 0x5414 | ||
28 | #define TIOCMGET 0x5415 | ||
29 | #define TIOCMBIS 0x5416 | ||
30 | #define TIOCMBIC 0x5417 | ||
31 | #define TIOCMSET 0x5418 | ||
32 | #define TIOCGSOFTCAR 0x5419 | ||
33 | #define TIOCSSOFTCAR 0x541A | ||
34 | #define FIONREAD 0x541B | ||
35 | #define TIOCINQ FIONREAD | ||
36 | #define TIOCLINUX 0x541C | ||
37 | #define TIOCCONS 0x541D | ||
38 | #define TIOCGSERIAL 0x541E | ||
39 | #define TIOCSSERIAL 0x541F | ||
40 | #define TIOCPKT 0x5420 | ||
41 | #define FIONBIO 0x5421 | ||
42 | #define TIOCNOTTY 0x5422 | ||
43 | #define TIOCSETD 0x5423 | ||
44 | #define TIOCGETD 0x5424 | ||
45 | #define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ | ||
46 | /* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */ | ||
47 | #define TIOCSBRK 0x5427 /* BSD compatibility */ | ||
48 | #define TIOCCBRK 0x5428 /* BSD compatibility */ | ||
49 | #define TIOCGSID 0x5429 /* Return the session ID of FD */ | ||
50 | #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ | ||
51 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ | ||
52 | |||
53 | #define FIONCLEX 0x5450 | ||
54 | #define FIOCLEX 0x5451 | ||
55 | #define FIOASYNC 0x5452 | ||
56 | #define TIOCSERCONFIG 0x5453 | ||
57 | #define TIOCSERGWILD 0x5454 | ||
58 | #define TIOCSERSWILD 0x5455 | ||
59 | #define TIOCGLCKTRMIOS 0x5456 | ||
60 | #define TIOCSLCKTRMIOS 0x5457 | ||
61 | #define TIOCSERGSTRUCT 0x5458 /* For debugging only */ | ||
62 | #define TIOCSERGETLSR 0x5459 /* Get line status register */ | ||
63 | #define TIOCSERGETMULTI 0x545A /* Get multiport config */ | ||
64 | #define TIOCSERSETMULTI 0x545B /* Set multiport config */ | ||
65 | |||
66 | #define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ | ||
67 | #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ | ||
68 | #define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ | ||
69 | #define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ | ||
70 | #define FIOQSIZE 0x5460 | ||
71 | |||
72 | /* Used for packet mode */ | ||
73 | #define TIOCPKT_DATA 0 | ||
74 | #define TIOCPKT_FLUSHREAD 1 | ||
75 | #define TIOCPKT_FLUSHWRITE 2 | ||
76 | #define TIOCPKT_STOP 4 | ||
77 | #define TIOCPKT_START 8 | ||
78 | #define TIOCPKT_NOSTOP 16 | ||
79 | #define TIOCPKT_DOSTOP 32 | ||
80 | |||
81 | #define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ | ||
82 | |||
83 | #endif | ||
diff --git a/include/asm-i386/ipc.h b/include/asm-i386/ipc.h new file mode 100644 index 000000000000..a46e3d9c2a3f --- /dev/null +++ b/include/asm-i386/ipc.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/ipc.h> | |||
diff --git a/include/asm-i386/ipcbuf.h b/include/asm-i386/ipcbuf.h new file mode 100644 index 000000000000..0dcad4f84c2a --- /dev/null +++ b/include/asm-i386/ipcbuf.h | |||
@@ -0,0 +1,29 @@ | |||
1 | #ifndef __i386_IPCBUF_H__ | ||
2 | #define __i386_IPCBUF_H__ | ||
3 | |||
4 | /* | ||
5 | * The ipc64_perm structure for i386 architecture. | ||
6 | * Note extra padding because this structure is passed back and forth | ||
7 | * between kernel and user space. | ||
8 | * | ||
9 | * Pad space is left for: | ||
10 | * - 32-bit mode_t and seq | ||
11 | * - 2 miscellaneous 32-bit values | ||
12 | */ | ||
13 | |||
14 | struct ipc64_perm | ||
15 | { | ||
16 | __kernel_key_t key; | ||
17 | __kernel_uid32_t uid; | ||
18 | __kernel_gid32_t gid; | ||
19 | __kernel_uid32_t cuid; | ||
20 | __kernel_gid32_t cgid; | ||
21 | __kernel_mode_t mode; | ||
22 | unsigned short __pad1; | ||
23 | unsigned short seq; | ||
24 | unsigned short __pad2; | ||
25 | unsigned long __unused1; | ||
26 | unsigned long __unused2; | ||
27 | }; | ||
28 | |||
29 | #endif /* __i386_IPCBUF_H__ */ | ||
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h new file mode 100644 index 000000000000..05b9e61b0a72 --- /dev/null +++ b/include/asm-i386/irq.h | |||
@@ -0,0 +1,41 @@ | |||
1 | #ifndef _ASM_IRQ_H | ||
2 | #define _ASM_IRQ_H | ||
3 | |||
4 | /* | ||
5 | * linux/include/asm/irq.h | ||
6 | * | ||
7 | * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar | ||
8 | * | ||
9 | * IRQ/IPI changes taken from work by Thomas Radke | ||
10 | * <tomsoft@informatik.tu-chemnitz.de> | ||
11 | */ | ||
12 | |||
13 | #include <linux/config.h> | ||
14 | #include <linux/sched.h> | ||
15 | /* include comes from machine specific directory */ | ||
16 | #include "irq_vectors.h" | ||
17 | #include <asm/thread_info.h> | ||
18 | |||
19 | static __inline__ int irq_canonicalize(int irq) | ||
20 | { | ||
21 | return ((irq == 2) ? 9 : irq); | ||
22 | } | ||
23 | |||
24 | extern void release_vm86_irqs(struct task_struct *); | ||
25 | |||
26 | #ifdef CONFIG_X86_LOCAL_APIC | ||
27 | # define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */ | ||
28 | #endif | ||
29 | |||
30 | #ifdef CONFIG_4KSTACKS | ||
31 | extern void irq_ctx_init(int cpu); | ||
32 | # define __ARCH_HAS_DO_SOFTIRQ | ||
33 | #else | ||
34 | # define irq_ctx_init(cpu) do { } while (0) | ||
35 | #endif | ||
36 | |||
37 | #ifdef CONFIG_IRQBALANCE | ||
38 | extern int irqbalance_disable(char *str); | ||
39 | #endif | ||
40 | |||
41 | #endif /* _ASM_IRQ_H */ | ||
diff --git a/include/asm-i386/ist.h b/include/asm-i386/ist.h new file mode 100644 index 000000000000..d13d1e68afa9 --- /dev/null +++ b/include/asm-i386/ist.h | |||
@@ -0,0 +1,32 @@ | |||
1 | #ifndef _ASM_IST_H | ||
2 | #define _ASM_IST_H | ||
3 | |||
4 | /* | ||
5 | * Include file for the interface to IST BIOS | ||
6 | * Copyright 2002 Andy Grover <andrew.grover@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2, or (at your option) any | ||
11 | * later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | */ | ||
18 | |||
19 | |||
20 | #ifdef __KERNEL__ | ||
21 | |||
22 | struct ist_info { | ||
23 | unsigned long signature; | ||
24 | unsigned long command; | ||
25 | unsigned long event; | ||
26 | unsigned long perf_level; | ||
27 | }; | ||
28 | |||
29 | extern struct ist_info ist_info; | ||
30 | |||
31 | #endif /* __KERNEL__ */ | ||
32 | #endif /* _ASM_IST_H */ | ||
diff --git a/include/asm-i386/kdebug.h b/include/asm-i386/kdebug.h new file mode 100644 index 000000000000..de6498b0d493 --- /dev/null +++ b/include/asm-i386/kdebug.h | |||
@@ -0,0 +1,50 @@ | |||
1 | #ifndef _I386_KDEBUG_H | ||
2 | #define _I386_KDEBUG_H 1 | ||
3 | |||
4 | /* | ||
5 | * Aug-05 2004 Ported by Prasanna S Panchamukhi <prasanna@in.ibm.com> | ||
6 | * from x86_64 architecture. | ||
7 | */ | ||
8 | #include <linux/notifier.h> | ||
9 | |||
10 | struct pt_regs; | ||
11 | |||
12 | struct die_args { | ||
13 | struct pt_regs *regs; | ||
14 | const char *str; | ||
15 | long err; | ||
16 | int trapnr; | ||
17 | int signr; | ||
18 | }; | ||
19 | |||
20 | /* Note - you should never unregister because that can race with NMIs. | ||
21 | If you really want to do it first unregister - then synchronize_kernel - then free. | ||
22 | */ | ||
23 | int register_die_notifier(struct notifier_block *nb); | ||
24 | extern struct notifier_block *i386die_chain; | ||
25 | |||
26 | |||
27 | /* Grossly misnamed. */ | ||
28 | enum die_val { | ||
29 | DIE_OOPS = 1, | ||
30 | DIE_INT3, | ||
31 | DIE_DEBUG, | ||
32 | DIE_PANIC, | ||
33 | DIE_NMI, | ||
34 | DIE_DIE, | ||
35 | DIE_NMIWATCHDOG, | ||
36 | DIE_KERNELDEBUG, | ||
37 | DIE_TRAP, | ||
38 | DIE_GPF, | ||
39 | DIE_CALL, | ||
40 | DIE_NMI_IPI, | ||
41 | DIE_PAGE_FAULT, | ||
42 | }; | ||
43 | |||
44 | static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig) | ||
45 | { | ||
46 | struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig }; | ||
47 | return notifier_call_chain(&i386die_chain, val, &args); | ||
48 | } | ||
49 | |||
50 | #endif | ||
diff --git a/include/asm-i386/kmap_types.h b/include/asm-i386/kmap_types.h new file mode 100644 index 000000000000..6886a0c3fedf --- /dev/null +++ b/include/asm-i386/kmap_types.h | |||
@@ -0,0 +1,31 @@ | |||
1 | #ifndef _ASM_KMAP_TYPES_H | ||
2 | #define _ASM_KMAP_TYPES_H | ||
3 | |||
4 | #include <linux/config.h> | ||
5 | |||
6 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
7 | # define D(n) __KM_FENCE_##n , | ||
8 | #else | ||
9 | # define D(n) | ||
10 | #endif | ||
11 | |||
12 | enum km_type { | ||
13 | D(0) KM_BOUNCE_READ, | ||
14 | D(1) KM_SKB_SUNRPC_DATA, | ||
15 | D(2) KM_SKB_DATA_SOFTIRQ, | ||
16 | D(3) KM_USER0, | ||
17 | D(4) KM_USER1, | ||
18 | D(5) KM_BIO_SRC_IRQ, | ||
19 | D(6) KM_BIO_DST_IRQ, | ||
20 | D(7) KM_PTE0, | ||
21 | D(8) KM_PTE1, | ||
22 | D(9) KM_IRQ0, | ||
23 | D(10) KM_IRQ1, | ||
24 | D(11) KM_SOFTIRQ0, | ||
25 | D(12) KM_SOFTIRQ1, | ||
26 | D(13) KM_TYPE_NR | ||
27 | }; | ||
28 | |||
29 | #undef D | ||
30 | |||
31 | #endif | ||
diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h new file mode 100644 index 000000000000..4092f68d123a --- /dev/null +++ b/include/asm-i386/kprobes.h | |||
@@ -0,0 +1,69 @@ | |||
1 | #ifndef _ASM_KPROBES_H | ||
2 | #define _ASM_KPROBES_H | ||
3 | /* | ||
4 | * Kernel Probes (KProbes) | ||
5 | * include/asm-i386/kprobes.h | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | * | ||
21 | * Copyright (C) IBM Corporation, 2002, 2004 | ||
22 | * | ||
23 | * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel | ||
24 | * Probes initial implementation ( includes suggestions from | ||
25 | * Rusty Russell). | ||
26 | */ | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/ptrace.h> | ||
29 | |||
30 | struct pt_regs; | ||
31 | |||
32 | typedef u8 kprobe_opcode_t; | ||
33 | #define BREAKPOINT_INSTRUCTION 0xcc | ||
34 | #define MAX_INSN_SIZE 16 | ||
35 | #define MAX_STACK_SIZE 64 | ||
36 | #define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ | ||
37 | (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \ | ||
38 | ? (MAX_STACK_SIZE) \ | ||
39 | : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) | ||
40 | |||
41 | #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry | ||
42 | |||
43 | /* Architecture specific copy of original instruction*/ | ||
44 | struct arch_specific_insn { | ||
45 | /* copy of the original instruction */ | ||
46 | kprobe_opcode_t insn[MAX_INSN_SIZE]; | ||
47 | }; | ||
48 | |||
49 | |||
50 | /* trap3/1 are intr gates for kprobes. So, restore the status of IF, | ||
51 | * if necessary, before executing the original int3/1 (trap) handler. | ||
52 | */ | ||
53 | static inline void restore_interrupts(struct pt_regs *regs) | ||
54 | { | ||
55 | if (regs->eflags & IF_MASK) | ||
56 | local_irq_enable(); | ||
57 | } | ||
58 | |||
59 | #ifdef CONFIG_KPROBES | ||
60 | extern int kprobe_exceptions_notify(struct notifier_block *self, | ||
61 | unsigned long val, void *data); | ||
62 | #else /* !CONFIG_KPROBES */ | ||
63 | static inline int kprobe_exceptions_notify(struct notifier_block *self, | ||
64 | unsigned long val, void *data) | ||
65 | { | ||
66 | return 0; | ||
67 | } | ||
68 | #endif | ||
69 | #endif /* _ASM_KPROBES_H */ | ||
diff --git a/include/asm-i386/ldt.h b/include/asm-i386/ldt.h new file mode 100644 index 000000000000..e9d3de1dee6c --- /dev/null +++ b/include/asm-i386/ldt.h | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * ldt.h | ||
3 | * | ||
4 | * Definitions of structures used with the modify_ldt system call. | ||
5 | */ | ||
6 | #ifndef _LINUX_LDT_H | ||
7 | #define _LINUX_LDT_H | ||
8 | |||
9 | /* Maximum number of LDT entries supported. */ | ||
10 | #define LDT_ENTRIES 8192 | ||
11 | /* The size of each LDT entry. */ | ||
12 | #define LDT_ENTRY_SIZE 8 | ||
13 | |||
14 | #ifndef __ASSEMBLY__ | ||
15 | struct user_desc { | ||
16 | unsigned int entry_number; | ||
17 | unsigned long base_addr; | ||
18 | unsigned int limit; | ||
19 | unsigned int seg_32bit:1; | ||
20 | unsigned int contents:2; | ||
21 | unsigned int read_exec_only:1; | ||
22 | unsigned int limit_in_pages:1; | ||
23 | unsigned int seg_not_present:1; | ||
24 | unsigned int useable:1; | ||
25 | }; | ||
26 | |||
27 | #define MODIFY_LDT_CONTENTS_DATA 0 | ||
28 | #define MODIFY_LDT_CONTENTS_STACK 1 | ||
29 | #define MODIFY_LDT_CONTENTS_CODE 2 | ||
30 | |||
31 | #endif /* !__ASSEMBLY__ */ | ||
32 | #endif | ||
diff --git a/include/asm-i386/linkage.h b/include/asm-i386/linkage.h new file mode 100644 index 000000000000..af3d8571c5c7 --- /dev/null +++ b/include/asm-i386/linkage.h | |||
@@ -0,0 +1,17 @@ | |||
1 | #ifndef __ASM_LINKAGE_H | ||
2 | #define __ASM_LINKAGE_H | ||
3 | |||
4 | #define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) | ||
5 | #define FASTCALL(x) x __attribute__((regparm(3))) | ||
6 | #define fastcall __attribute__((regparm(3))) | ||
7 | |||
8 | #ifdef CONFIG_REGPARM | ||
9 | # define prevent_tail_call(ret) __asm__ ("" : "=r" (ret) : "0" (ret)) | ||
10 | #endif | ||
11 | |||
12 | #ifdef CONFIG_X86_ALIGNMENT_16 | ||
13 | #define __ALIGN .align 16,0x90 | ||
14 | #define __ALIGN_STR ".align 16,0x90" | ||
15 | #endif | ||
16 | |||
17 | #endif | ||
diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h new file mode 100644 index 000000000000..0177da80dde3 --- /dev/null +++ b/include/asm-i386/local.h | |||
@@ -0,0 +1,70 @@ | |||
1 | #ifndef _ARCH_I386_LOCAL_H | ||
2 | #define _ARCH_I386_LOCAL_H | ||
3 | |||
4 | #include <linux/percpu.h> | ||
5 | |||
6 | typedef struct | ||
7 | { | ||
8 | volatile unsigned long counter; | ||
9 | } local_t; | ||
10 | |||
11 | #define LOCAL_INIT(i) { (i) } | ||
12 | |||
13 | #define local_read(v) ((v)->counter) | ||
14 | #define local_set(v,i) (((v)->counter) = (i)) | ||
15 | |||
16 | static __inline__ void local_inc(local_t *v) | ||
17 | { | ||
18 | __asm__ __volatile__( | ||
19 | "incl %0" | ||
20 | :"=m" (v->counter) | ||
21 | :"m" (v->counter)); | ||
22 | } | ||
23 | |||
24 | static __inline__ void local_dec(local_t *v) | ||
25 | { | ||
26 | __asm__ __volatile__( | ||
27 | "decl %0" | ||
28 | :"=m" (v->counter) | ||
29 | :"m" (v->counter)); | ||
30 | } | ||
31 | |||
32 | static __inline__ void local_add(unsigned long i, local_t *v) | ||
33 | { | ||
34 | __asm__ __volatile__( | ||
35 | "addl %1,%0" | ||
36 | :"=m" (v->counter) | ||
37 | :"ir" (i), "m" (v->counter)); | ||
38 | } | ||
39 | |||
40 | static __inline__ void local_sub(unsigned long i, local_t *v) | ||
41 | { | ||
42 | __asm__ __volatile__( | ||
43 | "subl %1,%0" | ||
44 | :"=m" (v->counter) | ||
45 | :"ir" (i), "m" (v->counter)); | ||
46 | } | ||
47 | |||
48 | /* On x86, these are no better than the atomic variants. */ | ||
49 | #define __local_inc(l) local_inc(l) | ||
50 | #define __local_dec(l) local_dec(l) | ||
51 | #define __local_add(i,l) local_add((i),(l)) | ||
52 | #define __local_sub(i,l) local_sub((i),(l)) | ||
53 | |||
54 | /* Use these for per-cpu local_t variables: on some archs they are | ||
55 | * much more efficient than these naive implementations. Note they take | ||
56 | * a variable, not an address. | ||
57 | */ | ||
58 | #define cpu_local_read(v) local_read(&__get_cpu_var(v)) | ||
59 | #define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) | ||
60 | #define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) | ||
61 | #define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) | ||
62 | #define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) | ||
63 | #define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) | ||
64 | |||
65 | #define __cpu_local_inc(v) cpu_local_inc(v) | ||
66 | #define __cpu_local_dec(v) cpu_local_dec(v) | ||
67 | #define __cpu_local_add(i, v) cpu_local_add((i), (v)) | ||
68 | #define __cpu_local_sub(i, v) cpu_local_sub((i), (v)) | ||
69 | |||
70 | #endif /* _ARCH_I386_LOCAL_H */ | ||
diff --git a/include/asm-i386/mach-bigsmp/mach_apic.h b/include/asm-i386/mach-bigsmp/mach_apic.h new file mode 100644 index 000000000000..2339868270ef --- /dev/null +++ b/include/asm-i386/mach-bigsmp/mach_apic.h | |||
@@ -0,0 +1,167 @@ | |||
1 | #ifndef __ASM_MACH_APIC_H | ||
2 | #define __ASM_MACH_APIC_H | ||
3 | #include <asm/smp.h> | ||
4 | |||
5 | #define SEQUENTIAL_APICID | ||
6 | #ifdef SEQUENTIAL_APICID | ||
7 | #define xapic_phys_to_log_apicid(phys_apic) ( (1ul << ((phys_apic) & 0x3)) |\ | ||
8 | ((phys_apic<<2) & (~0xf)) ) | ||
9 | #elif CLUSTERED_APICID | ||
10 | #define xapic_phys_to_log_apicid(phys_apic) ( (1ul << ((phys_apic) & 0x3)) |\ | ||
11 | ((phys_apic) & (~0xf)) ) | ||
12 | #endif | ||
13 | |||
14 | #define NO_BALANCE_IRQ (1) | ||
15 | #define esr_disable (1) | ||
16 | |||
17 | #define NO_IOAPIC_CHECK (0) | ||
18 | |||
19 | static inline int apic_id_registered(void) | ||
20 | { | ||
21 | return (1); | ||
22 | } | ||
23 | |||
24 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) | ||
25 | /* Round robin the irqs amoung the online cpus */ | ||
26 | static inline cpumask_t target_cpus(void) | ||
27 | { | ||
28 | static unsigned long cpu = NR_CPUS; | ||
29 | do { | ||
30 | if (cpu >= NR_CPUS) | ||
31 | cpu = first_cpu(cpu_online_map); | ||
32 | else | ||
33 | cpu = next_cpu(cpu, cpu_online_map); | ||
34 | } while (cpu >= NR_CPUS); | ||
35 | return cpumask_of_cpu(cpu); | ||
36 | } | ||
37 | #define TARGET_CPUS (target_cpus()) | ||
38 | |||
39 | #define INT_DELIVERY_MODE dest_Fixed | ||
40 | #define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ | ||
41 | |||
42 | static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) | ||
43 | { | ||
44 | return 0; | ||
45 | } | ||
46 | |||
47 | /* we don't use the phys_cpu_present_map to indicate apicid presence */ | ||
48 | static inline unsigned long check_apicid_present(int bit) | ||
49 | { | ||
50 | return 1; | ||
51 | } | ||
52 | |||
53 | #define apicid_cluster(apicid) (apicid & 0xF0) | ||
54 | |||
55 | static inline unsigned long calculate_ldr(unsigned long old) | ||
56 | { | ||
57 | unsigned long id; | ||
58 | id = xapic_phys_to_log_apicid(hard_smp_processor_id()); | ||
59 | return ((old & ~APIC_LDR_MASK) | SET_APIC_LOGICAL_ID(id)); | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * Set up the logical destination ID. | ||
64 | * | ||
65 | * Intel recommends to set DFR, LDR and TPR before enabling | ||
66 | * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel | ||
67 | * document number 292116). So here it goes... | ||
68 | */ | ||
69 | static inline void init_apic_ldr(void) | ||
70 | { | ||
71 | unsigned long val; | ||
72 | |||
73 | apic_write_around(APIC_DFR, APIC_DFR_VALUE); | ||
74 | val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; | ||
75 | val = calculate_ldr(val); | ||
76 | apic_write_around(APIC_LDR, val); | ||
77 | } | ||
78 | |||
79 | static inline void clustered_apic_check(void) | ||
80 | { | ||
81 | printk("Enabling APIC mode: %s. Using %d I/O APICs\n", | ||
82 | "Cluster", nr_ioapics); | ||
83 | } | ||
84 | |||
85 | static inline int multi_timer_check(int apic, int irq) | ||
86 | { | ||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | static inline int apicid_to_node(int logical_apicid) | ||
91 | { | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | extern u8 bios_cpu_apicid[]; | ||
96 | |||
97 | static inline int cpu_present_to_apicid(int mps_cpu) | ||
98 | { | ||
99 | if (mps_cpu < NR_CPUS) | ||
100 | return (int)bios_cpu_apicid[mps_cpu]; | ||
101 | else | ||
102 | return BAD_APICID; | ||
103 | } | ||
104 | |||
105 | static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) | ||
106 | { | ||
107 | return physid_mask_of_physid(phys_apicid); | ||
108 | } | ||
109 | |||
110 | extern u8 cpu_2_logical_apicid[]; | ||
111 | /* Mapping from cpu number to logical apicid */ | ||
112 | static inline int cpu_to_logical_apicid(int cpu) | ||
113 | { | ||
114 | if (cpu >= NR_CPUS) | ||
115 | return BAD_APICID; | ||
116 | return (int)cpu_2_logical_apicid[cpu]; | ||
117 | } | ||
118 | |||
119 | static inline int mpc_apic_id(struct mpc_config_processor *m, | ||
120 | struct mpc_config_translation *translation_record) | ||
121 | { | ||
122 | printk("Processor #%d %ld:%ld APIC version %d\n", | ||
123 | m->mpc_apicid, | ||
124 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, | ||
125 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, | ||
126 | m->mpc_apicver); | ||
127 | return m->mpc_apicid; | ||
128 | } | ||
129 | |||
130 | static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) | ||
131 | { | ||
132 | /* For clustered we don't have a good way to do this yet - hack */ | ||
133 | return physids_promote(0xFUL); | ||
134 | } | ||
135 | |||
136 | #define WAKE_SECONDARY_VIA_INIT | ||
137 | |||
138 | static inline void setup_portio_remap(void) | ||
139 | { | ||
140 | } | ||
141 | |||
142 | static inline void enable_apic_mode(void) | ||
143 | { | ||
144 | } | ||
145 | |||
146 | static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) | ||
147 | { | ||
148 | return (1); | ||
149 | } | ||
150 | |||
151 | /* As we are using single CPU as destination, pick only one CPU here */ | ||
152 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | ||
153 | { | ||
154 | int cpu; | ||
155 | int apicid; | ||
156 | |||
157 | cpu = first_cpu(cpumask); | ||
158 | apicid = cpu_to_logical_apicid(cpu); | ||
159 | return apicid; | ||
160 | } | ||
161 | |||
162 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | ||
163 | { | ||
164 | return cpuid_apic >> index_msb; | ||
165 | } | ||
166 | |||
167 | #endif /* __ASM_MACH_APIC_H */ | ||
diff --git a/include/asm-i386/mach-bigsmp/mach_apicdef.h b/include/asm-i386/mach-bigsmp/mach_apicdef.h new file mode 100644 index 000000000000..23e58b317c79 --- /dev/null +++ b/include/asm-i386/mach-bigsmp/mach_apicdef.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef __ASM_MACH_APICDEF_H | ||
2 | #define __ASM_MACH_APICDEF_H | ||
3 | |||
4 | #define APIC_ID_MASK (0x0F<<24) | ||
5 | |||
6 | static inline unsigned get_apic_id(unsigned long x) | ||
7 | { | ||
8 | return (((x)>>24)&0x0F); | ||
9 | } | ||
10 | |||
11 | #define GET_APIC_ID(x) get_apic_id(x) | ||
12 | |||
13 | #endif | ||
diff --git a/include/asm-i386/mach-bigsmp/mach_ipi.h b/include/asm-i386/mach-bigsmp/mach_ipi.h new file mode 100644 index 000000000000..9404c535b7ec --- /dev/null +++ b/include/asm-i386/mach-bigsmp/mach_ipi.h | |||
@@ -0,0 +1,25 @@ | |||
1 | #ifndef __ASM_MACH_IPI_H | ||
2 | #define __ASM_MACH_IPI_H | ||
3 | |||
4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); | ||
5 | |||
6 | static inline void send_IPI_mask(cpumask_t mask, int vector) | ||
7 | { | ||
8 | send_IPI_mask_sequence(mask, vector); | ||
9 | } | ||
10 | |||
11 | static inline void send_IPI_allbutself(int vector) | ||
12 | { | ||
13 | cpumask_t mask = cpu_online_map; | ||
14 | cpu_clear(smp_processor_id(), mask); | ||
15 | |||
16 | if (!cpus_empty(mask)) | ||
17 | send_IPI_mask(mask, vector); | ||
18 | } | ||
19 | |||
20 | static inline void send_IPI_all(int vector) | ||
21 | { | ||
22 | send_IPI_mask(cpu_online_map, vector); | ||
23 | } | ||
24 | |||
25 | #endif /* __ASM_MACH_IPI_H */ | ||
diff --git a/include/asm-i386/mach-bigsmp/mach_mpspec.h b/include/asm-i386/mach-bigsmp/mach_mpspec.h new file mode 100644 index 000000000000..6b5dadcf1d0e --- /dev/null +++ b/include/asm-i386/mach-bigsmp/mach_mpspec.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef __ASM_MACH_MPSPEC_H | ||
2 | #define __ASM_MACH_MPSPEC_H | ||
3 | |||
4 | #define MAX_IRQ_SOURCES 256 | ||
5 | |||
6 | #define MAX_MP_BUSSES 32 | ||
7 | |||
8 | #endif /* __ASM_MACH_MPSPEC_H */ | ||
diff --git a/include/asm-i386/mach-default/apm.h b/include/asm-i386/mach-default/apm.h new file mode 100644 index 000000000000..1f730b8bd1fd --- /dev/null +++ b/include/asm-i386/mach-default/apm.h | |||
@@ -0,0 +1,75 @@ | |||
1 | /* | ||
2 | * include/asm-i386/mach-default/apm.h | ||
3 | * | ||
4 | * Machine specific APM BIOS functions for generic. | ||
5 | * Split out from apm.c by Osamu Tomita <tomita@cinet.co.jp> | ||
6 | */ | ||
7 | |||
8 | #ifndef _ASM_APM_H | ||
9 | #define _ASM_APM_H | ||
10 | |||
11 | #ifdef APM_ZERO_SEGS | ||
12 | # define APM_DO_ZERO_SEGS \ | ||
13 | "pushl %%ds\n\t" \ | ||
14 | "pushl %%es\n\t" \ | ||
15 | "xorl %%edx, %%edx\n\t" \ | ||
16 | "mov %%dx, %%ds\n\t" \ | ||
17 | "mov %%dx, %%es\n\t" \ | ||
18 | "mov %%dx, %%fs\n\t" \ | ||
19 | "mov %%dx, %%gs\n\t" | ||
20 | # define APM_DO_POP_SEGS \ | ||
21 | "popl %%es\n\t" \ | ||
22 | "popl %%ds\n\t" | ||
23 | #else | ||
24 | # define APM_DO_ZERO_SEGS | ||
25 | # define APM_DO_POP_SEGS | ||
26 | #endif | ||
27 | |||
28 | static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, | ||
29 | u32 *eax, u32 *ebx, u32 *ecx, | ||
30 | u32 *edx, u32 *esi) | ||
31 | { | ||
32 | /* | ||
33 | * N.B. We do NOT need a cld after the BIOS call | ||
34 | * because we always save and restore the flags. | ||
35 | */ | ||
36 | __asm__ __volatile__(APM_DO_ZERO_SEGS | ||
37 | "pushl %%edi\n\t" | ||
38 | "pushl %%ebp\n\t" | ||
39 | "lcall *%%cs:apm_bios_entry\n\t" | ||
40 | "setc %%al\n\t" | ||
41 | "popl %%ebp\n\t" | ||
42 | "popl %%edi\n\t" | ||
43 | APM_DO_POP_SEGS | ||
44 | : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx), | ||
45 | "=S" (*esi) | ||
46 | : "a" (func), "b" (ebx_in), "c" (ecx_in) | ||
47 | : "memory", "cc"); | ||
48 | } | ||
49 | |||
50 | static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, | ||
51 | u32 ecx_in, u32 *eax) | ||
52 | { | ||
53 | int cx, dx, si; | ||
54 | u8 error; | ||
55 | |||
56 | /* | ||
57 | * N.B. We do NOT need a cld after the BIOS call | ||
58 | * because we always save and restore the flags. | ||
59 | */ | ||
60 | __asm__ __volatile__(APM_DO_ZERO_SEGS | ||
61 | "pushl %%edi\n\t" | ||
62 | "pushl %%ebp\n\t" | ||
63 | "lcall *%%cs:apm_bios_entry\n\t" | ||
64 | "setc %%bl\n\t" | ||
65 | "popl %%ebp\n\t" | ||
66 | "popl %%edi\n\t" | ||
67 | APM_DO_POP_SEGS | ||
68 | : "=a" (*eax), "=b" (error), "=c" (cx), "=d" (dx), | ||
69 | "=S" (si) | ||
70 | : "a" (func), "b" (ebx_in), "c" (ecx_in) | ||
71 | : "memory", "cc"); | ||
72 | return error; | ||
73 | } | ||
74 | |||
75 | #endif /* _ASM_APM_H */ | ||
diff --git a/include/asm-i386/mach-default/bios_ebda.h b/include/asm-i386/mach-default/bios_ebda.h new file mode 100644 index 000000000000..9cbd9a668af8 --- /dev/null +++ b/include/asm-i386/mach-default/bios_ebda.h | |||
@@ -0,0 +1,15 @@ | |||
1 | #ifndef _MACH_BIOS_EBDA_H | ||
2 | #define _MACH_BIOS_EBDA_H | ||
3 | |||
4 | /* | ||
5 | * there is a real-mode segmented pointer pointing to the | ||
6 | * 4K EBDA area at 0x40E. | ||
7 | */ | ||
8 | static inline unsigned int get_bios_ebda(void) | ||
9 | { | ||
10 | unsigned int address = *(unsigned short *)phys_to_virt(0x40E); | ||
11 | address <<= 4; | ||
12 | return address; /* 0 means none */ | ||
13 | } | ||
14 | |||
15 | #endif /* _MACH_BIOS_EBDA_H */ | ||
diff --git a/include/asm-i386/mach-default/do_timer.h b/include/asm-i386/mach-default/do_timer.h new file mode 100644 index 000000000000..03dd13a48a8c --- /dev/null +++ b/include/asm-i386/mach-default/do_timer.h | |||
@@ -0,0 +1,85 @@ | |||
1 | /* defines for inline arch setup functions */ | ||
2 | |||
3 | #include <asm/apic.h> | ||
4 | |||
5 | /** | ||
6 | * do_timer_interrupt_hook - hook into timer tick | ||
7 | * @regs: standard registers from interrupt | ||
8 | * | ||
9 | * Description: | ||
10 | * This hook is called immediately after the timer interrupt is ack'd. | ||
11 | * It's primary purpose is to allow architectures that don't possess | ||
12 | * individual per CPU clocks (like the CPU APICs supply) to broadcast the | ||
13 | * timer interrupt as a means of triggering reschedules etc. | ||
14 | **/ | ||
15 | |||
16 | static inline void do_timer_interrupt_hook(struct pt_regs *regs) | ||
17 | { | ||
18 | do_timer(regs); | ||
19 | #ifndef CONFIG_SMP | ||
20 | update_process_times(user_mode(regs)); | ||
21 | #endif | ||
22 | /* | ||
23 | * In the SMP case we use the local APIC timer interrupt to do the | ||
24 | * profiling, except when we simulate SMP mode on a uniprocessor | ||
25 | * system, in that case we have to call the local interrupt handler. | ||
26 | */ | ||
27 | #ifndef CONFIG_X86_LOCAL_APIC | ||
28 | profile_tick(CPU_PROFILING, regs); | ||
29 | #else | ||
30 | if (!using_apic_timer) | ||
31 | smp_local_timer_interrupt(regs); | ||
32 | #endif | ||
33 | } | ||
34 | |||
35 | |||
36 | /* you can safely undefine this if you don't have the Neptune chipset */ | ||
37 | |||
38 | #define BUGGY_NEPTUN_TIMER | ||
39 | |||
40 | /** | ||
41 | * do_timer_overflow - process a detected timer overflow condition | ||
42 | * @count: hardware timer interrupt count on overflow | ||
43 | * | ||
44 | * Description: | ||
45 | * This call is invoked when the jiffies count has not incremented but | ||
46 | * the hardware timer interrupt has. It means that a timer tick interrupt | ||
47 | * came along while the previous one was pending, thus a tick was missed | ||
48 | **/ | ||
49 | static inline int do_timer_overflow(int count) | ||
50 | { | ||
51 | int i; | ||
52 | |||
53 | spin_lock(&i8259A_lock); | ||
54 | /* | ||
55 | * This is tricky when I/O APICs are used; | ||
56 | * see do_timer_interrupt(). | ||
57 | */ | ||
58 | i = inb(0x20); | ||
59 | spin_unlock(&i8259A_lock); | ||
60 | |||
61 | /* assumption about timer being IRQ0 */ | ||
62 | if (i & 0x01) { | ||
63 | /* | ||
64 | * We cannot detect lost timer interrupts ... | ||
65 | * well, that's why we call them lost, don't we? :) | ||
66 | * [hmm, on the Pentium and Alpha we can ... sort of] | ||
67 | */ | ||
68 | count -= LATCH; | ||
69 | } else { | ||
70 | #ifdef BUGGY_NEPTUN_TIMER | ||
71 | /* | ||
72 | * for the Neptun bug we know that the 'latch' | ||
73 | * command doesn't latch the high and low value | ||
74 | * of the counter atomically. Thus we have to | ||
75 | * substract 256 from the counter | ||
76 | * ... funny, isnt it? :) | ||
77 | */ | ||
78 | |||
79 | count -= 256; | ||
80 | #else | ||
81 | printk("do_slow_gettimeoffset(): hardware timer problem?\n"); | ||
82 | #endif | ||
83 | } | ||
84 | return count; | ||
85 | } | ||
diff --git a/include/asm-i386/mach-default/entry_arch.h b/include/asm-i386/mach-default/entry_arch.h new file mode 100644 index 000000000000..bc861469bdba --- /dev/null +++ b/include/asm-i386/mach-default/entry_arch.h | |||
@@ -0,0 +1,34 @@ | |||
1 | /* | ||
2 | * This file is designed to contain the BUILD_INTERRUPT specifications for | ||
3 | * all of the extra named interrupt vectors used by the architecture. | ||
4 | * Usually this is the Inter Process Interrupts (IPIs) | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * The following vectors are part of the Linux architecture, there | ||
9 | * is no hardware IRQ pin equivalent for them, they are triggered | ||
10 | * through the ICC by us (IPIs) | ||
11 | */ | ||
12 | #ifdef CONFIG_X86_SMP | ||
13 | BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) | ||
14 | BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) | ||
15 | BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) | ||
16 | #endif | ||
17 | |||
18 | /* | ||
19 | * every pentium local APIC has two 'local interrupts', with a | ||
20 | * soft-definable vector attached to both interrupts, one of | ||
21 | * which is a timer interrupt, the other one is error counter | ||
22 | * overflow. Linux uses the local APIC timer interrupt to get | ||
23 | * a much simpler SMP time architecture: | ||
24 | */ | ||
25 | #ifdef CONFIG_X86_LOCAL_APIC | ||
26 | BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) | ||
27 | BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) | ||
28 | BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) | ||
29 | |||
30 | #ifdef CONFIG_X86_MCE_P4THERMAL | ||
31 | BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR) | ||
32 | #endif | ||
33 | |||
34 | #endif | ||
diff --git a/include/asm-i386/mach-default/io_ports.h b/include/asm-i386/mach-default/io_ports.h new file mode 100644 index 000000000000..a96d9f6604ee --- /dev/null +++ b/include/asm-i386/mach-default/io_ports.h | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * arch/i386/mach-generic/io_ports.h | ||
3 | * | ||
4 | * Machine specific IO port address definition for generic. | ||
5 | * Written by Osamu Tomita <tomita@cinet.co.jp> | ||
6 | */ | ||
7 | #ifndef _MACH_IO_PORTS_H | ||
8 | #define _MACH_IO_PORTS_H | ||
9 | |||
10 | /* i8253A PIT registers */ | ||
11 | #define PIT_MODE 0x43 | ||
12 | #define PIT_CH0 0x40 | ||
13 | #define PIT_CH2 0x42 | ||
14 | |||
15 | /* i8259A PIC registers */ | ||
16 | #define PIC_MASTER_CMD 0x20 | ||
17 | #define PIC_MASTER_IMR 0x21 | ||
18 | #define PIC_MASTER_ISR PIC_MASTER_CMD | ||
19 | #define PIC_MASTER_POLL PIC_MASTER_ISR | ||
20 | #define PIC_MASTER_OCW3 PIC_MASTER_ISR | ||
21 | #define PIC_SLAVE_CMD 0xa0 | ||
22 | #define PIC_SLAVE_IMR 0xa1 | ||
23 | |||
24 | /* i8259A PIC related value */ | ||
25 | #define PIC_CASCADE_IR 2 | ||
26 | #define MASTER_ICW4_DEFAULT 0x01 | ||
27 | #define SLAVE_ICW4_DEFAULT 0x01 | ||
28 | #define PIC_ICW4_AEOI 2 | ||
29 | |||
30 | #endif /* !_MACH_IO_PORTS_H */ | ||
diff --git a/include/asm-i386/mach-default/irq_vectors.h b/include/asm-i386/mach-default/irq_vectors.h new file mode 100644 index 000000000000..881c63ca61ad --- /dev/null +++ b/include/asm-i386/mach-default/irq_vectors.h | |||
@@ -0,0 +1,96 @@ | |||
1 | /* | ||
2 | * This file should contain #defines for all of the interrupt vector | ||
3 | * numbers used by this architecture. | ||
4 | * | ||
5 | * In addition, there are some standard defines: | ||
6 | * | ||
7 | * FIRST_EXTERNAL_VECTOR: | ||
8 | * The first free place for external interrupts | ||
9 | * | ||
10 | * SYSCALL_VECTOR: | ||
11 | * The IRQ vector a syscall makes the user to kernel transition | ||
12 | * under. | ||
13 | * | ||
14 | * TIMER_IRQ: | ||
15 | * The IRQ number the timer interrupt comes in at. | ||
16 | * | ||
17 | * NR_IRQS: | ||
18 | * The total number of interrupt vectors (including all the | ||
19 | * architecture specific interrupts) needed. | ||
20 | * | ||
21 | */ | ||
22 | #ifndef _ASM_IRQ_VECTORS_H | ||
23 | #define _ASM_IRQ_VECTORS_H | ||
24 | |||
25 | /* | ||
26 | * IDT vectors usable for external interrupt sources start | ||
27 | * at 0x20: | ||
28 | */ | ||
29 | #define FIRST_EXTERNAL_VECTOR 0x20 | ||
30 | |||
31 | #define SYSCALL_VECTOR 0x80 | ||
32 | |||
33 | /* | ||
34 | * Vectors 0x20-0x2f are used for ISA interrupts. | ||
35 | */ | ||
36 | |||
37 | /* | ||
38 | * Special IRQ vectors used by the SMP architecture, 0xf0-0xff | ||
39 | * | ||
40 | * some of the following vectors are 'rare', they are merged | ||
41 | * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. | ||
42 | * TLB, reschedule and local APIC vectors are performance-critical. | ||
43 | * | ||
44 | * Vectors 0xf0-0xfa are free (reserved for future Linux use). | ||
45 | */ | ||
46 | #define SPURIOUS_APIC_VECTOR 0xff | ||
47 | #define ERROR_APIC_VECTOR 0xfe | ||
48 | #define INVALIDATE_TLB_VECTOR 0xfd | ||
49 | #define RESCHEDULE_VECTOR 0xfc | ||
50 | #define CALL_FUNCTION_VECTOR 0xfb | ||
51 | |||
52 | #define THERMAL_APIC_VECTOR 0xf0 | ||
53 | /* | ||
54 | * Local APIC timer IRQ vector is on a different priority level, | ||
55 | * to work around the 'lost local interrupt if more than 2 IRQ | ||
56 | * sources per level' errata. | ||
57 | */ | ||
58 | #define LOCAL_TIMER_VECTOR 0xef | ||
59 | |||
60 | /* | ||
61 | * First APIC vector available to drivers: (vectors 0x30-0xee) | ||
62 | * we start at 0x31 to spread out vectors evenly between priority | ||
63 | * levels. (0x80 is the syscall vector) | ||
64 | */ | ||
65 | #define FIRST_DEVICE_VECTOR 0x31 | ||
66 | #define FIRST_SYSTEM_VECTOR 0xef | ||
67 | |||
68 | #define TIMER_IRQ 0 | ||
69 | |||
70 | /* | ||
71 | * 16 8259A IRQ's, 208 potential APIC interrupt sources. | ||
72 | * Right now the APIC is mostly only used for SMP. | ||
73 | * 256 vectors is an architectural limit. (we can have | ||
74 | * more than 256 devices theoretically, but they will | ||
75 | * have to use shared interrupts) | ||
76 | * Since vectors 0x00-0x1f are used/reserved for the CPU, | ||
77 | * the usable vector space is 0x20-0xff (224 vectors) | ||
78 | */ | ||
79 | |||
80 | /* | ||
81 | * The maximum number of vectors supported by i386 processors | ||
82 | * is limited to 256. For processors other than i386, NR_VECTORS | ||
83 | * should be changed accordingly. | ||
84 | */ | ||
85 | #define NR_VECTORS 256 | ||
86 | |||
87 | #include "irq_vectors_limits.h" | ||
88 | |||
89 | #define FPU_IRQ 13 | ||
90 | |||
91 | #define FIRST_VM86_IRQ 3 | ||
92 | #define LAST_VM86_IRQ 15 | ||
93 | #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) | ||
94 | |||
95 | |||
96 | #endif /* _ASM_IRQ_VECTORS_H */ | ||
diff --git a/include/asm-i386/mach-default/irq_vectors_limits.h b/include/asm-i386/mach-default/irq_vectors_limits.h new file mode 100644 index 000000000000..b330026e6f7f --- /dev/null +++ b/include/asm-i386/mach-default/irq_vectors_limits.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef _ASM_IRQ_VECTORS_LIMITS_H | ||
2 | #define _ASM_IRQ_VECTORS_LIMITS_H | ||
3 | |||
4 | #ifdef CONFIG_PCI_MSI | ||
5 | #define NR_IRQS FIRST_SYSTEM_VECTOR | ||
6 | #define NR_IRQ_VECTORS NR_IRQS | ||
7 | #else | ||
8 | #ifdef CONFIG_X86_IO_APIC | ||
9 | #define NR_IRQS 224 | ||
10 | # if (224 >= 32 * NR_CPUS) | ||
11 | # define NR_IRQ_VECTORS NR_IRQS | ||
12 | # else | ||
13 | # define NR_IRQ_VECTORS (32 * NR_CPUS) | ||
14 | # endif | ||
15 | #else | ||
16 | #define NR_IRQS 16 | ||
17 | #define NR_IRQ_VECTORS NR_IRQS | ||
18 | #endif | ||
19 | #endif | ||
20 | |||
21 | #endif /* _ASM_IRQ_VECTORS_LIMITS_H */ | ||
diff --git a/include/asm-i386/mach-default/mach_apic.h b/include/asm-i386/mach-default/mach_apic.h new file mode 100644 index 000000000000..627f1cd084ba --- /dev/null +++ b/include/asm-i386/mach-default/mach_apic.h | |||
@@ -0,0 +1,133 @@ | |||
1 | #ifndef __ASM_MACH_APIC_H | ||
2 | #define __ASM_MACH_APIC_H | ||
3 | |||
4 | #include <mach_apicdef.h> | ||
5 | #include <asm/smp.h> | ||
6 | |||
7 | #define APIC_DFR_VALUE (APIC_DFR_FLAT) | ||
8 | |||
9 | static inline cpumask_t target_cpus(void) | ||
10 | { | ||
11 | #ifdef CONFIG_SMP | ||
12 | return cpu_online_map; | ||
13 | #else | ||
14 | return cpumask_of_cpu(0); | ||
15 | #endif | ||
16 | } | ||
17 | #define TARGET_CPUS (target_cpus()) | ||
18 | |||
19 | #define NO_BALANCE_IRQ (0) | ||
20 | #define esr_disable (0) | ||
21 | |||
22 | #define NO_IOAPIC_CHECK (0) | ||
23 | |||
24 | #define INT_DELIVERY_MODE dest_LowestPrio | ||
25 | #define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ | ||
26 | |||
27 | static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) | ||
28 | { | ||
29 | return physid_isset(apicid, bitmap); | ||
30 | } | ||
31 | |||
32 | static inline unsigned long check_apicid_present(int bit) | ||
33 | { | ||
34 | return physid_isset(bit, phys_cpu_present_map); | ||
35 | } | ||
36 | |||
37 | /* | ||
38 | * Set up the logical destination ID. | ||
39 | * | ||
40 | * Intel recommends to set DFR, LDR and TPR before enabling | ||
41 | * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel | ||
42 | * document number 292116). So here it goes... | ||
43 | */ | ||
44 | static inline void init_apic_ldr(void) | ||
45 | { | ||
46 | unsigned long val; | ||
47 | |||
48 | apic_write_around(APIC_DFR, APIC_DFR_VALUE); | ||
49 | val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; | ||
50 | val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); | ||
51 | apic_write_around(APIC_LDR, val); | ||
52 | } | ||
53 | |||
54 | static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) | ||
55 | { | ||
56 | return phys_map; | ||
57 | } | ||
58 | |||
59 | static inline void clustered_apic_check(void) | ||
60 | { | ||
61 | printk("Enabling APIC mode: %s. Using %d I/O APICs\n", | ||
62 | "Flat", nr_ioapics); | ||
63 | } | ||
64 | |||
65 | static inline int multi_timer_check(int apic, int irq) | ||
66 | { | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static inline int apicid_to_node(int logical_apicid) | ||
71 | { | ||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | /* Mapping from cpu number to logical apicid */ | ||
76 | static inline int cpu_to_logical_apicid(int cpu) | ||
77 | { | ||
78 | return 1 << cpu; | ||
79 | } | ||
80 | |||
81 | static inline int cpu_present_to_apicid(int mps_cpu) | ||
82 | { | ||
83 | if (mps_cpu < get_physical_broadcast()) | ||
84 | return mps_cpu; | ||
85 | else | ||
86 | return BAD_APICID; | ||
87 | } | ||
88 | |||
89 | static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) | ||
90 | { | ||
91 | return physid_mask_of_physid(phys_apicid); | ||
92 | } | ||
93 | |||
94 | static inline int mpc_apic_id(struct mpc_config_processor *m, | ||
95 | struct mpc_config_translation *translation_record) | ||
96 | { | ||
97 | printk("Processor #%d %ld:%ld APIC version %d\n", | ||
98 | m->mpc_apicid, | ||
99 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, | ||
100 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, | ||
101 | m->mpc_apicver); | ||
102 | return (m->mpc_apicid); | ||
103 | } | ||
104 | |||
105 | static inline void setup_portio_remap(void) | ||
106 | { | ||
107 | } | ||
108 | |||
109 | static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) | ||
110 | { | ||
111 | return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); | ||
112 | } | ||
113 | |||
114 | static inline int apic_id_registered(void) | ||
115 | { | ||
116 | return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); | ||
117 | } | ||
118 | |||
119 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | ||
120 | { | ||
121 | return cpus_addr(cpumask)[0]; | ||
122 | } | ||
123 | |||
124 | static inline void enable_apic_mode(void) | ||
125 | { | ||
126 | } | ||
127 | |||
128 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | ||
129 | { | ||
130 | return cpuid_apic >> index_msb; | ||
131 | } | ||
132 | |||
133 | #endif /* __ASM_MACH_APIC_H */ | ||
diff --git a/include/asm-i386/mach-default/mach_apicdef.h b/include/asm-i386/mach-default/mach_apicdef.h new file mode 100644 index 000000000000..7bcb350c3ee8 --- /dev/null +++ b/include/asm-i386/mach-default/mach_apicdef.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef __ASM_MACH_APICDEF_H | ||
2 | #define __ASM_MACH_APICDEF_H | ||
3 | |||
4 | #define APIC_ID_MASK (0xF<<24) | ||
5 | |||
6 | static inline unsigned get_apic_id(unsigned long x) | ||
7 | { | ||
8 | return (((x)>>24)&0xF); | ||
9 | } | ||
10 | |||
11 | #define GET_APIC_ID(x) get_apic_id(x) | ||
12 | |||
13 | #endif | ||
diff --git a/include/asm-i386/mach-default/mach_ipi.h b/include/asm-i386/mach-default/mach_ipi.h new file mode 100644 index 000000000000..6f2b17a20089 --- /dev/null +++ b/include/asm-i386/mach-default/mach_ipi.h | |||
@@ -0,0 +1,30 @@ | |||
1 | #ifndef __ASM_MACH_IPI_H | ||
2 | #define __ASM_MACH_IPI_H | ||
3 | |||
4 | void send_IPI_mask_bitmask(cpumask_t mask, int vector); | ||
5 | void __send_IPI_shortcut(unsigned int shortcut, int vector); | ||
6 | |||
7 | static inline void send_IPI_mask(cpumask_t mask, int vector) | ||
8 | { | ||
9 | send_IPI_mask_bitmask(mask, vector); | ||
10 | } | ||
11 | |||
12 | static inline void send_IPI_allbutself(int vector) | ||
13 | { | ||
14 | /* | ||
15 | * if there are no other CPUs in the system then we get an APIC send | ||
16 | * error if we try to broadcast, thus avoid sending IPIs in this case. | ||
17 | */ | ||
18 | if (!(num_online_cpus() > 1)) | ||
19 | return; | ||
20 | |||
21 | __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); | ||
22 | return; | ||
23 | } | ||
24 | |||
25 | static inline void send_IPI_all(int vector) | ||
26 | { | ||
27 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector); | ||
28 | } | ||
29 | |||
30 | #endif /* __ASM_MACH_IPI_H */ | ||
diff --git a/include/asm-i386/mach-default/mach_mpparse.h b/include/asm-i386/mach-default/mach_mpparse.h new file mode 100644 index 000000000000..1d3832482580 --- /dev/null +++ b/include/asm-i386/mach-default/mach_mpparse.h | |||
@@ -0,0 +1,28 @@ | |||
1 | #ifndef __ASM_MACH_MPPARSE_H | ||
2 | #define __ASM_MACH_MPPARSE_H | ||
3 | |||
4 | static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, | ||
5 | struct mpc_config_translation *translation) | ||
6 | { | ||
7 | // Dprintk("Bus #%d is %s\n", m->mpc_busid, name); | ||
8 | } | ||
9 | |||
10 | static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, | ||
11 | struct mpc_config_translation *translation) | ||
12 | { | ||
13 | } | ||
14 | |||
15 | static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, | ||
16 | char *productid) | ||
17 | { | ||
18 | return 0; | ||
19 | } | ||
20 | |||
21 | /* Hook from generic ACPI tables.c */ | ||
22 | static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
23 | { | ||
24 | return 0; | ||
25 | } | ||
26 | |||
27 | |||
28 | #endif /* __ASM_MACH_MPPARSE_H */ | ||
diff --git a/include/asm-i386/mach-default/mach_mpspec.h b/include/asm-i386/mach-default/mach_mpspec.h new file mode 100644 index 000000000000..6b5dadcf1d0e --- /dev/null +++ b/include/asm-i386/mach-default/mach_mpspec.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef __ASM_MACH_MPSPEC_H | ||
2 | #define __ASM_MACH_MPSPEC_H | ||
3 | |||
4 | #define MAX_IRQ_SOURCES 256 | ||
5 | |||
6 | #define MAX_MP_BUSSES 32 | ||
7 | |||
8 | #endif /* __ASM_MACH_MPSPEC_H */ | ||
diff --git a/include/asm-i386/mach-default/mach_reboot.h b/include/asm-i386/mach-default/mach_reboot.h new file mode 100644 index 000000000000..521e227db679 --- /dev/null +++ b/include/asm-i386/mach-default/mach_reboot.h | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * arch/i386/mach-generic/mach_reboot.h | ||
3 | * | ||
4 | * Machine specific reboot functions for generic. | ||
5 | * Split out from reboot.c by Osamu Tomita <tomita@cinet.co.jp> | ||
6 | */ | ||
7 | #ifndef _MACH_REBOOT_H | ||
8 | #define _MACH_REBOOT_H | ||
9 | |||
10 | static inline void kb_wait(void) | ||
11 | { | ||
12 | int i; | ||
13 | |||
14 | for (i = 0; i < 0x10000; i++) | ||
15 | if ((inb_p(0x64) & 0x02) == 0) | ||
16 | break; | ||
17 | } | ||
18 | |||
19 | static inline void mach_reboot(void) | ||
20 | { | ||
21 | int i; | ||
22 | for (i = 0; i < 100; i++) { | ||
23 | kb_wait(); | ||
24 | udelay(50); | ||
25 | outb(0xfe, 0x64); /* pulse reset low */ | ||
26 | udelay(50); | ||
27 | } | ||
28 | } | ||
29 | |||
30 | #endif /* !_MACH_REBOOT_H */ | ||
diff --git a/include/asm-i386/mach-default/mach_time.h b/include/asm-i386/mach-default/mach_time.h new file mode 100644 index 000000000000..b749aa44a86f --- /dev/null +++ b/include/asm-i386/mach-default/mach_time.h | |||
@@ -0,0 +1,122 @@ | |||
1 | /* | ||
2 | * include/asm-i386/mach-default/mach_time.h | ||
3 | * | ||
4 | * Machine specific set RTC function for generic. | ||
5 | * Split out from time.c by Osamu Tomita <tomita@cinet.co.jp> | ||
6 | */ | ||
7 | #ifndef _MACH_TIME_H | ||
8 | #define _MACH_TIME_H | ||
9 | |||
10 | #include <linux/mc146818rtc.h> | ||
11 | |||
12 | /* for check timing call set_rtc_mmss() 500ms */ | ||
13 | /* used in arch/i386/time.c::do_timer_interrupt() */ | ||
14 | #define USEC_AFTER 500000 | ||
15 | #define USEC_BEFORE 500000 | ||
16 | |||
17 | /* | ||
18 | * In order to set the CMOS clock precisely, set_rtc_mmss has to be | ||
19 | * called 500 ms after the second nowtime has started, because when | ||
20 | * nowtime is written into the registers of the CMOS clock, it will | ||
21 | * jump to the next second precisely 500 ms later. Check the Motorola | ||
22 | * MC146818A or Dallas DS12887 data sheet for details. | ||
23 | * | ||
24 | * BUG: This routine does not handle hour overflow properly; it just | ||
25 | * sets the minutes. Usually you'll only notice that after reboot! | ||
26 | */ | ||
27 | static inline int mach_set_rtc_mmss(unsigned long nowtime) | ||
28 | { | ||
29 | int retval = 0; | ||
30 | int real_seconds, real_minutes, cmos_minutes; | ||
31 | unsigned char save_control, save_freq_select; | ||
32 | |||
33 | save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */ | ||
34 | CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); | ||
35 | |||
36 | save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */ | ||
37 | CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); | ||
38 | |||
39 | cmos_minutes = CMOS_READ(RTC_MINUTES); | ||
40 | if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) | ||
41 | BCD_TO_BIN(cmos_minutes); | ||
42 | |||
43 | /* | ||
44 | * since we're only adjusting minutes and seconds, | ||
45 | * don't interfere with hour overflow. This avoids | ||
46 | * messing with unknown time zones but requires your | ||
47 | * RTC not to be off by more than 15 minutes | ||
48 | */ | ||
49 | real_seconds = nowtime % 60; | ||
50 | real_minutes = nowtime / 60; | ||
51 | if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) | ||
52 | real_minutes += 30; /* correct for half hour time zone */ | ||
53 | real_minutes %= 60; | ||
54 | |||
55 | if (abs(real_minutes - cmos_minutes) < 30) { | ||
56 | if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { | ||
57 | BIN_TO_BCD(real_seconds); | ||
58 | BIN_TO_BCD(real_minutes); | ||
59 | } | ||
60 | CMOS_WRITE(real_seconds,RTC_SECONDS); | ||
61 | CMOS_WRITE(real_minutes,RTC_MINUTES); | ||
62 | } else { | ||
63 | printk(KERN_WARNING | ||
64 | "set_rtc_mmss: can't update from %d to %d\n", | ||
65 | cmos_minutes, real_minutes); | ||
66 | retval = -1; | ||
67 | } | ||
68 | |||
69 | /* The following flags have to be released exactly in this order, | ||
70 | * otherwise the DS12887 (popular MC146818A clone with integrated | ||
71 | * battery and quartz) will not reset the oscillator and will not | ||
72 | * update precisely 500 ms later. You won't find this mentioned in | ||
73 | * the Dallas Semiconductor data sheets, but who believes data | ||
74 | * sheets anyway ... -- Markus Kuhn | ||
75 | */ | ||
76 | CMOS_WRITE(save_control, RTC_CONTROL); | ||
77 | CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); | ||
78 | |||
79 | return retval; | ||
80 | } | ||
81 | |||
82 | static inline unsigned long mach_get_cmos_time(void) | ||
83 | { | ||
84 | unsigned int year, mon, day, hour, min, sec; | ||
85 | int i; | ||
86 | |||
87 | /* The Linux interpretation of the CMOS clock register contents: | ||
88 | * When the Update-In-Progress (UIP) flag goes from 1 to 0, the | ||
89 | * RTC registers show the second which has precisely just started. | ||
90 | * Let's hope other operating systems interpret the RTC the same way. | ||
91 | */ | ||
92 | /* read RTC exactly on falling edge of update flag */ | ||
93 | for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */ | ||
94 | if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) | ||
95 | break; | ||
96 | for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */ | ||
97 | if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)) | ||
98 | break; | ||
99 | do { /* Isn't this overkill ? UIP above should guarantee consistency */ | ||
100 | sec = CMOS_READ(RTC_SECONDS); | ||
101 | min = CMOS_READ(RTC_MINUTES); | ||
102 | hour = CMOS_READ(RTC_HOURS); | ||
103 | day = CMOS_READ(RTC_DAY_OF_MONTH); | ||
104 | mon = CMOS_READ(RTC_MONTH); | ||
105 | year = CMOS_READ(RTC_YEAR); | ||
106 | } while (sec != CMOS_READ(RTC_SECONDS)); | ||
107 | if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) | ||
108 | { | ||
109 | BCD_TO_BIN(sec); | ||
110 | BCD_TO_BIN(min); | ||
111 | BCD_TO_BIN(hour); | ||
112 | BCD_TO_BIN(day); | ||
113 | BCD_TO_BIN(mon); | ||
114 | BCD_TO_BIN(year); | ||
115 | } | ||
116 | if ((year += 1900) < 1970) | ||
117 | year += 100; | ||
118 | |||
119 | return mktime(year, mon, day, hour, min, sec); | ||
120 | } | ||
121 | |||
122 | #endif /* !_MACH_TIME_H */ | ||
diff --git a/include/asm-i386/mach-default/mach_timer.h b/include/asm-i386/mach-default/mach_timer.h new file mode 100644 index 000000000000..4b9703bb0288 --- /dev/null +++ b/include/asm-i386/mach-default/mach_timer.h | |||
@@ -0,0 +1,48 @@ | |||
1 | /* | ||
2 | * include/asm-i386/mach-default/mach_timer.h | ||
3 | * | ||
4 | * Machine specific calibrate_tsc() for generic. | ||
5 | * Split out from timer_tsc.c by Osamu Tomita <tomita@cinet.co.jp> | ||
6 | */ | ||
7 | /* ------ Calibrate the TSC ------- | ||
8 | * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset(). | ||
9 | * Too much 64-bit arithmetic here to do this cleanly in C, and for | ||
10 | * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2) | ||
11 | * output busy loop as low as possible. We avoid reading the CTC registers | ||
12 | * directly because of the awkward 8-bit access mechanism of the 82C54 | ||
13 | * device. | ||
14 | */ | ||
15 | #ifndef _MACH_TIMER_H | ||
16 | #define _MACH_TIMER_H | ||
17 | |||
18 | #define CALIBRATE_LATCH (5 * LATCH) | ||
19 | |||
20 | static inline void mach_prepare_counter(void) | ||
21 | { | ||
22 | /* Set the Gate high, disable speaker */ | ||
23 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); | ||
24 | |||
25 | /* | ||
26 | * Now let's take care of CTC channel 2 | ||
27 | * | ||
28 | * Set the Gate high, program CTC channel 2 for mode 0, | ||
29 | * (interrupt on terminal count mode), binary count, | ||
30 | * load 5 * LATCH count, (LSB and MSB) to begin countdown. | ||
31 | * | ||
32 | * Some devices need a delay here. | ||
33 | */ | ||
34 | outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */ | ||
35 | outb_p(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */ | ||
36 | outb_p(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */ | ||
37 | } | ||
38 | |||
39 | static inline void mach_countup(unsigned long *count_p) | ||
40 | { | ||
41 | unsigned long count = 0; | ||
42 | do { | ||
43 | count++; | ||
44 | } while ((inb_p(0x61) & 0x20) == 0); | ||
45 | *count_p = count; | ||
46 | } | ||
47 | |||
48 | #endif /* !_MACH_TIMER_H */ | ||
diff --git a/include/asm-i386/mach-default/mach_traps.h b/include/asm-i386/mach-default/mach_traps.h new file mode 100644 index 000000000000..625438b8a6eb --- /dev/null +++ b/include/asm-i386/mach-default/mach_traps.h | |||
@@ -0,0 +1,41 @@ | |||
1 | /* | ||
2 | * include/asm-i386/mach-default/mach_traps.h | ||
3 | * | ||
4 | * Machine specific NMI handling for generic. | ||
5 | * Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp> | ||
6 | */ | ||
7 | #ifndef _MACH_TRAPS_H | ||
8 | #define _MACH_TRAPS_H | ||
9 | |||
10 | #include <asm/mc146818rtc.h> | ||
11 | |||
12 | static inline void clear_mem_error(unsigned char reason) | ||
13 | { | ||
14 | reason = (reason & 0xf) | 4; | ||
15 | outb(reason, 0x61); | ||
16 | } | ||
17 | |||
18 | static inline unsigned char get_nmi_reason(void) | ||
19 | { | ||
20 | return inb(0x61); | ||
21 | } | ||
22 | |||
23 | static inline void reassert_nmi(void) | ||
24 | { | ||
25 | int old_reg = -1; | ||
26 | |||
27 | if (do_i_have_lock_cmos()) | ||
28 | old_reg = current_lock_cmos_reg(); | ||
29 | else | ||
30 | lock_cmos(0); /* register doesn't matter here */ | ||
31 | outb(0x8f, 0x70); | ||
32 | inb(0x71); /* dummy */ | ||
33 | outb(0x0f, 0x70); | ||
34 | inb(0x71); /* dummy */ | ||
35 | if (old_reg >= 0) | ||
36 | outb(old_reg, 0x70); | ||
37 | else | ||
38 | unlock_cmos(); | ||
39 | } | ||
40 | |||
41 | #endif /* !_MACH_TRAPS_H */ | ||
diff --git a/include/asm-i386/mach-default/mach_wakecpu.h b/include/asm-i386/mach-default/mach_wakecpu.h new file mode 100644 index 000000000000..673b85c9b273 --- /dev/null +++ b/include/asm-i386/mach-default/mach_wakecpu.h | |||
@@ -0,0 +1,41 @@ | |||
1 | #ifndef __ASM_MACH_WAKECPU_H | ||
2 | #define __ASM_MACH_WAKECPU_H | ||
3 | |||
4 | /* | ||
5 | * This file copes with machines that wakeup secondary CPUs by the | ||
6 | * INIT, INIT, STARTUP sequence. | ||
7 | */ | ||
8 | |||
9 | #define WAKE_SECONDARY_VIA_INIT | ||
10 | |||
11 | #define TRAMPOLINE_LOW phys_to_virt(0x467) | ||
12 | #define TRAMPOLINE_HIGH phys_to_virt(0x469) | ||
13 | |||
14 | #define boot_cpu_apicid boot_cpu_physical_apicid | ||
15 | |||
16 | static inline void wait_for_init_deassert(atomic_t *deassert) | ||
17 | { | ||
18 | while (!atomic_read(deassert)); | ||
19 | return; | ||
20 | } | ||
21 | |||
22 | /* Nothing to do for most platforms, since cleared by the INIT cycle */ | ||
23 | static inline void smp_callin_clear_local_apic(void) | ||
24 | { | ||
25 | } | ||
26 | |||
27 | static inline void store_NMI_vector(unsigned short *high, unsigned short *low) | ||
28 | { | ||
29 | } | ||
30 | |||
31 | static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) | ||
32 | { | ||
33 | } | ||
34 | |||
35 | #if APIC_DEBUG | ||
36 | #define inquire_remote_apic(apicid) __inquire_remote_apic(apicid) | ||
37 | #else | ||
38 | #define inquire_remote_apic(apicid) {} | ||
39 | #endif | ||
40 | |||
41 | #endif /* __ASM_MACH_WAKECPU_H */ | ||
diff --git a/include/asm-i386/mach-default/pci-functions.h b/include/asm-i386/mach-default/pci-functions.h new file mode 100644 index 000000000000..ed0bab427354 --- /dev/null +++ b/include/asm-i386/mach-default/pci-functions.h | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * PCI BIOS function numbering for conventional PCI BIOS | ||
3 | * systems | ||
4 | */ | ||
5 | |||
6 | #define PCIBIOS_PCI_FUNCTION_ID 0xb1XX | ||
7 | #define PCIBIOS_PCI_BIOS_PRESENT 0xb101 | ||
8 | #define PCIBIOS_FIND_PCI_DEVICE 0xb102 | ||
9 | #define PCIBIOS_FIND_PCI_CLASS_CODE 0xb103 | ||
10 | #define PCIBIOS_GENERATE_SPECIAL_CYCLE 0xb106 | ||
11 | #define PCIBIOS_READ_CONFIG_BYTE 0xb108 | ||
12 | #define PCIBIOS_READ_CONFIG_WORD 0xb109 | ||
13 | #define PCIBIOS_READ_CONFIG_DWORD 0xb10a | ||
14 | #define PCIBIOS_WRITE_CONFIG_BYTE 0xb10b | ||
15 | #define PCIBIOS_WRITE_CONFIG_WORD 0xb10c | ||
16 | #define PCIBIOS_WRITE_CONFIG_DWORD 0xb10d | ||
17 | #define PCIBIOS_GET_ROUTING_OPTIONS 0xb10e | ||
18 | #define PCIBIOS_SET_PCI_HW_INT 0xb10f | ||
19 | |||
diff --git a/include/asm-i386/mach-default/setup_arch_post.h b/include/asm-i386/mach-default/setup_arch_post.h new file mode 100644 index 000000000000..2fc4888721f6 --- /dev/null +++ b/include/asm-i386/mach-default/setup_arch_post.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /** | ||
2 | * machine_specific_memory_setup - Hook for machine specific memory setup. | ||
3 | * | ||
4 | * Description: | ||
5 | * This is included late in kernel/setup.c so that it can make | ||
6 | * use of all of the static functions. | ||
7 | **/ | ||
8 | |||
9 | static char * __init machine_specific_memory_setup(void) | ||
10 | { | ||
11 | char *who; | ||
12 | |||
13 | |||
14 | who = "BIOS-e820"; | ||
15 | |||
16 | /* | ||
17 | * Try to copy the BIOS-supplied E820-map. | ||
18 | * | ||
19 | * Otherwise fake a memory map; one section from 0k->640k, | ||
20 | * the next section from 1mb->appropriate_mem_k | ||
21 | */ | ||
22 | sanitize_e820_map(E820_MAP, &E820_MAP_NR); | ||
23 | if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) { | ||
24 | unsigned long mem_size; | ||
25 | |||
26 | /* compare results from other methods and take the greater */ | ||
27 | if (ALT_MEM_K < EXT_MEM_K) { | ||
28 | mem_size = EXT_MEM_K; | ||
29 | who = "BIOS-88"; | ||
30 | } else { | ||
31 | mem_size = ALT_MEM_K; | ||
32 | who = "BIOS-e801"; | ||
33 | } | ||
34 | |||
35 | e820.nr_map = 0; | ||
36 | add_memory_region(0, LOWMEMSIZE(), E820_RAM); | ||
37 | add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM); | ||
38 | } | ||
39 | return who; | ||
40 | } | ||
diff --git a/include/asm-i386/mach-default/setup_arch_pre.h b/include/asm-i386/mach-default/setup_arch_pre.h new file mode 100644 index 000000000000..fb42099e7bd4 --- /dev/null +++ b/include/asm-i386/mach-default/setup_arch_pre.h | |||
@@ -0,0 +1,5 @@ | |||
1 | /* Hook to call BIOS initialisation function */ | ||
2 | |||
3 | /* no action for generic */ | ||
4 | |||
5 | #define ARCH_SETUP | ||
diff --git a/include/asm-i386/mach-default/smpboot_hooks.h b/include/asm-i386/mach-default/smpboot_hooks.h new file mode 100644 index 000000000000..7f45f6311059 --- /dev/null +++ b/include/asm-i386/mach-default/smpboot_hooks.h | |||
@@ -0,0 +1,44 @@ | |||
1 | /* two abstractions specific to kernel/smpboot.c, mainly to cater to visws | ||
2 | * which needs to alter them. */ | ||
3 | |||
4 | static inline void smpboot_clear_io_apic_irqs(void) | ||
5 | { | ||
6 | io_apic_irqs = 0; | ||
7 | } | ||
8 | |||
9 | static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) | ||
10 | { | ||
11 | CMOS_WRITE(0xa, 0xf); | ||
12 | local_flush_tlb(); | ||
13 | Dprintk("1.\n"); | ||
14 | *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4; | ||
15 | Dprintk("2.\n"); | ||
16 | *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf; | ||
17 | Dprintk("3.\n"); | ||
18 | } | ||
19 | |||
20 | static inline void smpboot_restore_warm_reset_vector(void) | ||
21 | { | ||
22 | /* | ||
23 | * Install writable page 0 entry to set BIOS data area. | ||
24 | */ | ||
25 | local_flush_tlb(); | ||
26 | |||
27 | /* | ||
28 | * Paranoid: Set warm reset code and vector here back | ||
29 | * to default values. | ||
30 | */ | ||
31 | CMOS_WRITE(0, 0xf); | ||
32 | |||
33 | *((volatile long *) phys_to_virt(0x467)) = 0; | ||
34 | } | ||
35 | |||
36 | static inline void smpboot_setup_io_apic(void) | ||
37 | { | ||
38 | /* | ||
39 | * Here we can be sure that there is an IO-APIC in the system. Let's | ||
40 | * go and set it up: | ||
41 | */ | ||
42 | if (!skip_ioapic_setup && nr_ioapics) | ||
43 | setup_IO_APIC(); | ||
44 | } | ||
diff --git a/include/asm-i386/mach-es7000/mach_apic.h b/include/asm-i386/mach-es7000/mach_apic.h new file mode 100644 index 000000000000..ceab2c464b13 --- /dev/null +++ b/include/asm-i386/mach-es7000/mach_apic.h | |||
@@ -0,0 +1,207 @@ | |||
1 | #ifndef __ASM_MACH_APIC_H | ||
2 | #define __ASM_MACH_APIC_H | ||
3 | |||
4 | extern u8 bios_cpu_apicid[]; | ||
5 | |||
6 | #define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu]) | ||
7 | #define esr_disable (1) | ||
8 | |||
9 | static inline int apic_id_registered(void) | ||
10 | { | ||
11 | return (1); | ||
12 | } | ||
13 | |||
14 | static inline cpumask_t target_cpus(void) | ||
15 | { | ||
16 | #if defined CONFIG_ES7000_CLUSTERED_APIC | ||
17 | return CPU_MASK_ALL; | ||
18 | #else | ||
19 | return cpumask_of_cpu(smp_processor_id()); | ||
20 | #endif | ||
21 | } | ||
22 | #define TARGET_CPUS (target_cpus()) | ||
23 | |||
24 | #if defined CONFIG_ES7000_CLUSTERED_APIC | ||
25 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) | ||
26 | #define INT_DELIVERY_MODE (dest_LowestPrio) | ||
27 | #define INT_DEST_MODE (1) /* logical delivery broadcast to all procs */ | ||
28 | #define NO_BALANCE_IRQ (1) | ||
29 | #undef WAKE_SECONDARY_VIA_INIT | ||
30 | #define WAKE_SECONDARY_VIA_MIP | ||
31 | #else | ||
32 | #define APIC_DFR_VALUE (APIC_DFR_FLAT) | ||
33 | #define INT_DELIVERY_MODE (dest_Fixed) | ||
34 | #define INT_DEST_MODE (0) /* phys delivery to target procs */ | ||
35 | #define NO_BALANCE_IRQ (0) | ||
36 | #undef APIC_DEST_LOGICAL | ||
37 | #define APIC_DEST_LOGICAL 0x0 | ||
38 | #define WAKE_SECONDARY_VIA_INIT | ||
39 | #endif | ||
40 | |||
41 | #define NO_IOAPIC_CHECK (1) | ||
42 | |||
43 | static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) | ||
44 | { | ||
45 | return 0; | ||
46 | } | ||
47 | static inline unsigned long check_apicid_present(int bit) | ||
48 | { | ||
49 | return physid_isset(bit, phys_cpu_present_map); | ||
50 | } | ||
51 | |||
52 | #define apicid_cluster(apicid) (apicid & 0xF0) | ||
53 | |||
54 | static inline unsigned long calculate_ldr(int cpu) | ||
55 | { | ||
56 | unsigned long id; | ||
57 | id = xapic_phys_to_log_apicid(cpu); | ||
58 | return (SET_APIC_LOGICAL_ID(id)); | ||
59 | } | ||
60 | |||
61 | /* | ||
62 | * Set up the logical destination ID. | ||
63 | * | ||
64 | * Intel recommends to set DFR, LdR and TPR before enabling | ||
65 | * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel | ||
66 | * document number 292116). So here it goes... | ||
67 | */ | ||
68 | static inline void init_apic_ldr(void) | ||
69 | { | ||
70 | unsigned long val; | ||
71 | int cpu = smp_processor_id(); | ||
72 | |||
73 | apic_write_around(APIC_DFR, APIC_DFR_VALUE); | ||
74 | val = calculate_ldr(cpu); | ||
75 | apic_write_around(APIC_LDR, val); | ||
76 | } | ||
77 | |||
78 | extern void es7000_sw_apic(void); | ||
79 | static inline void enable_apic_mode(void) | ||
80 | { | ||
81 | es7000_sw_apic(); | ||
82 | return; | ||
83 | } | ||
84 | |||
85 | extern int apic_version [MAX_APICS]; | ||
86 | static inline void clustered_apic_check(void) | ||
87 | { | ||
88 | int apic = bios_cpu_apicid[smp_processor_id()]; | ||
89 | printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", | ||
90 | (apic_version[apic] == 0x14) ? | ||
91 | "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]); | ||
92 | } | ||
93 | |||
94 | static inline int multi_timer_check(int apic, int irq) | ||
95 | { | ||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | static inline int apicid_to_node(int logical_apicid) | ||
100 | { | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | |||
105 | static inline int cpu_present_to_apicid(int mps_cpu) | ||
106 | { | ||
107 | if (!mps_cpu) | ||
108 | return boot_cpu_physical_apicid; | ||
109 | else if (mps_cpu < NR_CPUS) | ||
110 | return (int) bios_cpu_apicid[mps_cpu]; | ||
111 | else | ||
112 | return BAD_APICID; | ||
113 | } | ||
114 | |||
115 | static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) | ||
116 | { | ||
117 | static int id = 0; | ||
118 | physid_mask_t mask; | ||
119 | mask = physid_mask_of_physid(id); | ||
120 | ++id; | ||
121 | return mask; | ||
122 | } | ||
123 | |||
124 | extern u8 cpu_2_logical_apicid[]; | ||
125 | /* Mapping from cpu number to logical apicid */ | ||
126 | static inline int cpu_to_logical_apicid(int cpu) | ||
127 | { | ||
128 | if (cpu >= NR_CPUS) | ||
129 | return BAD_APICID; | ||
130 | return (int)cpu_2_logical_apicid[cpu]; | ||
131 | } | ||
132 | |||
133 | static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused) | ||
134 | { | ||
135 | printk("Processor #%d %ld:%ld APIC version %d\n", | ||
136 | m->mpc_apicid, | ||
137 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, | ||
138 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, | ||
139 | m->mpc_apicver); | ||
140 | return (m->mpc_apicid); | ||
141 | } | ||
142 | |||
143 | static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) | ||
144 | { | ||
145 | /* For clustered we don't have a good way to do this yet - hack */ | ||
146 | return physids_promote(0xff); | ||
147 | } | ||
148 | |||
149 | |||
150 | static inline void setup_portio_remap(void) | ||
151 | { | ||
152 | } | ||
153 | |||
154 | extern unsigned int boot_cpu_physical_apicid; | ||
155 | static inline int check_phys_apicid_present(int cpu_physical_apicid) | ||
156 | { | ||
157 | boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); | ||
158 | return (1); | ||
159 | } | ||
160 | |||
161 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | ||
162 | { | ||
163 | int num_bits_set; | ||
164 | int cpus_found = 0; | ||
165 | int cpu; | ||
166 | int apicid; | ||
167 | |||
168 | num_bits_set = cpus_weight(cpumask); | ||
169 | /* Return id to all */ | ||
170 | if (num_bits_set == NR_CPUS) | ||
171 | #if defined CONFIG_ES7000_CLUSTERED_APIC | ||
172 | return 0xFF; | ||
173 | #else | ||
174 | return cpu_to_logical_apicid(0); | ||
175 | #endif | ||
176 | /* | ||
177 | * The cpus in the mask must all be on the apic cluster. If are not | ||
178 | * on the same apicid cluster return default value of TARGET_CPUS. | ||
179 | */ | ||
180 | cpu = first_cpu(cpumask); | ||
181 | apicid = cpu_to_logical_apicid(cpu); | ||
182 | while (cpus_found < num_bits_set) { | ||
183 | if (cpu_isset(cpu, cpumask)) { | ||
184 | int new_apicid = cpu_to_logical_apicid(cpu); | ||
185 | if (apicid_cluster(apicid) != | ||
186 | apicid_cluster(new_apicid)){ | ||
187 | printk ("%s: Not a valid mask!\n",__FUNCTION__); | ||
188 | #if defined CONFIG_ES7000_CLUSTERED_APIC | ||
189 | return 0xFF; | ||
190 | #else | ||
191 | return cpu_to_logical_apicid(0); | ||
192 | #endif | ||
193 | } | ||
194 | apicid = new_apicid; | ||
195 | cpus_found++; | ||
196 | } | ||
197 | cpu++; | ||
198 | } | ||
199 | return apicid; | ||
200 | } | ||
201 | |||
202 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | ||
203 | { | ||
204 | return cpuid_apic >> index_msb; | ||
205 | } | ||
206 | |||
207 | #endif /* __ASM_MACH_APIC_H */ | ||
diff --git a/include/asm-i386/mach-es7000/mach_apicdef.h b/include/asm-i386/mach-es7000/mach_apicdef.h new file mode 100644 index 000000000000..a58ab5a75c8c --- /dev/null +++ b/include/asm-i386/mach-es7000/mach_apicdef.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef __ASM_MACH_APICDEF_H | ||
2 | #define __ASM_MACH_APICDEF_H | ||
3 | |||
4 | #define APIC_ID_MASK (0xFF<<24) | ||
5 | |||
6 | static inline unsigned get_apic_id(unsigned long x) | ||
7 | { | ||
8 | return (((x)>>24)&0xFF); | ||
9 | } | ||
10 | |||
11 | #define GET_APIC_ID(x) get_apic_id(x) | ||
12 | |||
13 | #endif | ||
diff --git a/include/asm-i386/mach-es7000/mach_ipi.h b/include/asm-i386/mach-es7000/mach_ipi.h new file mode 100644 index 000000000000..5e61bd220b06 --- /dev/null +++ b/include/asm-i386/mach-es7000/mach_ipi.h | |||
@@ -0,0 +1,24 @@ | |||
1 | #ifndef __ASM_MACH_IPI_H | ||
2 | #define __ASM_MACH_IPI_H | ||
3 | |||
4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); | ||
5 | |||
6 | static inline void send_IPI_mask(cpumask_t mask, int vector) | ||
7 | { | ||
8 | send_IPI_mask_sequence(mask, vector); | ||
9 | } | ||
10 | |||
11 | static inline void send_IPI_allbutself(int vector) | ||
12 | { | ||
13 | cpumask_t mask = cpu_online_map; | ||
14 | cpu_clear(smp_processor_id(), mask); | ||
15 | if (!cpus_empty(mask)) | ||
16 | send_IPI_mask(mask, vector); | ||
17 | } | ||
18 | |||
19 | static inline void send_IPI_all(int vector) | ||
20 | { | ||
21 | send_IPI_mask(cpu_online_map, vector); | ||
22 | } | ||
23 | |||
24 | #endif /* __ASM_MACH_IPI_H */ | ||
diff --git a/include/asm-i386/mach-es7000/mach_mpparse.h b/include/asm-i386/mach-es7000/mach_mpparse.h new file mode 100644 index 000000000000..85809e0898d7 --- /dev/null +++ b/include/asm-i386/mach-es7000/mach_mpparse.h | |||
@@ -0,0 +1,41 @@ | |||
1 | #ifndef __ASM_MACH_MPPARSE_H | ||
2 | #define __ASM_MACH_MPPARSE_H | ||
3 | |||
4 | static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, | ||
5 | struct mpc_config_translation *translation) | ||
6 | { | ||
7 | Dprintk("Bus #%d is %s\n", m->mpc_busid, name); | ||
8 | } | ||
9 | |||
10 | static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, | ||
11 | struct mpc_config_translation *translation) | ||
12 | { | ||
13 | } | ||
14 | |||
15 | extern int parse_unisys_oem (char *oemptr, int oem_entries); | ||
16 | extern int find_unisys_acpi_oem_table(unsigned long *oem_addr, int *length); | ||
17 | |||
18 | static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, | ||
19 | char *productid) | ||
20 | { | ||
21 | if (mpc->mpc_oemptr) { | ||
22 | struct mp_config_oemtable *oem_table = | ||
23 | (struct mp_config_oemtable *)mpc->mpc_oemptr; | ||
24 | if (!strncmp(oem, "UNISYS", 6)) | ||
25 | return parse_unisys_oem((char *)oem_table, oem_table->oem_length); | ||
26 | } | ||
27 | return 0; | ||
28 | } | ||
29 | |||
30 | /* Hook from generic ACPI tables.c */ | ||
31 | static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
32 | { | ||
33 | unsigned long oem_addr; | ||
34 | int oem_entries; | ||
35 | if (!find_unisys_acpi_oem_table(&oem_addr, &oem_entries)) | ||
36 | return parse_unisys_oem((char *)oem_addr, oem_entries); | ||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | |||
41 | #endif /* __ASM_MACH_MPPARSE_H */ | ||
diff --git a/include/asm-i386/mach-es7000/mach_mpspec.h b/include/asm-i386/mach-es7000/mach_mpspec.h new file mode 100644 index 000000000000..b1f5039d4506 --- /dev/null +++ b/include/asm-i386/mach-es7000/mach_mpspec.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef __ASM_MACH_MPSPEC_H | ||
2 | #define __ASM_MACH_MPSPEC_H | ||
3 | |||
4 | #define MAX_IRQ_SOURCES 256 | ||
5 | |||
6 | #define MAX_MP_BUSSES 256 | ||
7 | |||
8 | #endif /* __ASM_MACH_MPSPEC_H */ | ||
diff --git a/include/asm-i386/mach-es7000/mach_wakecpu.h b/include/asm-i386/mach-es7000/mach_wakecpu.h new file mode 100644 index 000000000000..efc903b73486 --- /dev/null +++ b/include/asm-i386/mach-es7000/mach_wakecpu.h | |||
@@ -0,0 +1,58 @@ | |||
1 | #ifndef __ASM_MACH_WAKECPU_H | ||
2 | #define __ASM_MACH_WAKECPU_H | ||
3 | |||
4 | /* | ||
5 | * This file copes with machines that wakeup secondary CPUs by the | ||
6 | * INIT, INIT, STARTUP sequence. | ||
7 | */ | ||
8 | |||
9 | #ifdef CONFIG_ES7000_CLUSTERED_APIC | ||
10 | #define WAKE_SECONDARY_VIA_MIP | ||
11 | #else | ||
12 | #define WAKE_SECONDARY_VIA_INIT | ||
13 | #endif | ||
14 | |||
15 | #ifdef WAKE_SECONDARY_VIA_MIP | ||
16 | extern int es7000_start_cpu(int cpu, unsigned long eip); | ||
17 | static inline int | ||
18 | wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) | ||
19 | { | ||
20 | int boot_error = 0; | ||
21 | boot_error = es7000_start_cpu(phys_apicid, start_eip); | ||
22 | return boot_error; | ||
23 | } | ||
24 | #endif | ||
25 | |||
26 | #define TRAMPOLINE_LOW phys_to_virt(0x467) | ||
27 | #define TRAMPOLINE_HIGH phys_to_virt(0x469) | ||
28 | |||
29 | #define boot_cpu_apicid boot_cpu_physical_apicid | ||
30 | |||
31 | static inline void wait_for_init_deassert(atomic_t *deassert) | ||
32 | { | ||
33 | #ifdef WAKE_SECONDARY_VIA_INIT | ||
34 | while (!atomic_read(deassert)); | ||
35 | #endif | ||
36 | return; | ||
37 | } | ||
38 | |||
39 | /* Nothing to do for most platforms, since cleared by the INIT cycle */ | ||
40 | static inline void smp_callin_clear_local_apic(void) | ||
41 | { | ||
42 | } | ||
43 | |||
44 | static inline void store_NMI_vector(unsigned short *high, unsigned short *low) | ||
45 | { | ||
46 | } | ||
47 | |||
48 | static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) | ||
49 | { | ||
50 | } | ||
51 | |||
52 | #if APIC_DEBUG | ||
53 | #define inquire_remote_apic(apicid) __inquire_remote_apic(apicid) | ||
54 | #else | ||
55 | #define inquire_remote_apic(apicid) {} | ||
56 | #endif | ||
57 | |||
58 | #endif /* __ASM_MACH_WAKECPU_H */ | ||
diff --git a/include/asm-i386/mach-generic/irq_vectors_limits.h b/include/asm-i386/mach-generic/irq_vectors_limits.h new file mode 100644 index 000000000000..890ce3f5e09a --- /dev/null +++ b/include/asm-i386/mach-generic/irq_vectors_limits.h | |||
@@ -0,0 +1,14 @@ | |||
1 | #ifndef _ASM_IRQ_VECTORS_LIMITS_H | ||
2 | #define _ASM_IRQ_VECTORS_LIMITS_H | ||
3 | |||
4 | /* | ||
5 | * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs, | ||
6 | * even with uni-proc kernels, so use a big array. | ||
7 | * | ||
8 | * This value should be the same in both the generic and summit subarches. | ||
9 | * Change one, change 'em both. | ||
10 | */ | ||
11 | #define NR_IRQS 224 | ||
12 | #define NR_IRQ_VECTORS 1024 | ||
13 | |||
14 | #endif /* _ASM_IRQ_VECTORS_LIMITS_H */ | ||
diff --git a/include/asm-i386/mach-generic/mach_apic.h b/include/asm-i386/mach-generic/mach_apic.h new file mode 100644 index 000000000000..ab36d02ebede --- /dev/null +++ b/include/asm-i386/mach-generic/mach_apic.h | |||
@@ -0,0 +1,32 @@ | |||
1 | #ifndef __ASM_MACH_APIC_H | ||
2 | #define __ASM_MACH_APIC_H | ||
3 | |||
4 | #include <asm/genapic.h> | ||
5 | |||
6 | #define esr_disable (genapic->ESR_DISABLE) | ||
7 | #define NO_BALANCE_IRQ (genapic->no_balance_irq) | ||
8 | #define NO_IOAPIC_CHECK (genapic->no_ioapic_check) | ||
9 | #define INT_DELIVERY_MODE (genapic->int_delivery_mode) | ||
10 | #define INT_DEST_MODE (genapic->int_dest_mode) | ||
11 | #undef APIC_DEST_LOGICAL | ||
12 | #define APIC_DEST_LOGICAL (genapic->apic_destination_logical) | ||
13 | #define TARGET_CPUS (genapic->target_cpus()) | ||
14 | #define apic_id_registered (genapic->apic_id_registered) | ||
15 | #define init_apic_ldr (genapic->init_apic_ldr) | ||
16 | #define ioapic_phys_id_map (genapic->ioapic_phys_id_map) | ||
17 | #define clustered_apic_check (genapic->clustered_apic_check) | ||
18 | #define multi_timer_check (genapic->multi_timer_check) | ||
19 | #define apicid_to_node (genapic->apicid_to_node) | ||
20 | #define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid) | ||
21 | #define cpu_present_to_apicid (genapic->cpu_present_to_apicid) | ||
22 | #define apicid_to_cpu_present (genapic->apicid_to_cpu_present) | ||
23 | #define mpc_apic_id (genapic->mpc_apic_id) | ||
24 | #define setup_portio_remap (genapic->setup_portio_remap) | ||
25 | #define check_apicid_present (genapic->check_apicid_present) | ||
26 | #define check_phys_apicid_present (genapic->check_phys_apicid_present) | ||
27 | #define check_apicid_used (genapic->check_apicid_used) | ||
28 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) | ||
29 | #define enable_apic_mode (genapic->enable_apic_mode) | ||
30 | #define phys_pkg_id (genapic->phys_pkg_id) | ||
31 | |||
32 | #endif /* __ASM_MACH_APIC_H */ | ||
diff --git a/include/asm-i386/mach-generic/mach_apicdef.h b/include/asm-i386/mach-generic/mach_apicdef.h new file mode 100644 index 000000000000..28ed98972ca8 --- /dev/null +++ b/include/asm-i386/mach-generic/mach_apicdef.h | |||
@@ -0,0 +1,11 @@ | |||
1 | #ifndef _GENAPIC_MACH_APICDEF_H | ||
2 | #define _GENAPIC_MACH_APICDEF_H 1 | ||
3 | |||
4 | #ifndef APIC_DEFINITION | ||
5 | #include <asm/genapic.h> | ||
6 | |||
7 | #define GET_APIC_ID (genapic->get_apic_id) | ||
8 | #define APIC_ID_MASK (genapic->apic_id_mask) | ||
9 | #endif | ||
10 | |||
11 | #endif | ||
diff --git a/include/asm-i386/mach-generic/mach_ipi.h b/include/asm-i386/mach-generic/mach_ipi.h new file mode 100644 index 000000000000..441b0fe3ed1d --- /dev/null +++ b/include/asm-i386/mach-generic/mach_ipi.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef _MACH_IPI_H | ||
2 | #define _MACH_IPI_H 1 | ||
3 | |||
4 | #include <asm/genapic.h> | ||
5 | |||
6 | #define send_IPI_mask (genapic->send_IPI_mask) | ||
7 | #define send_IPI_allbutself (genapic->send_IPI_allbutself) | ||
8 | #define send_IPI_all (genapic->send_IPI_all) | ||
9 | |||
10 | #endif | ||
diff --git a/include/asm-i386/mach-generic/mach_mpparse.h b/include/asm-i386/mach-generic/mach_mpparse.h new file mode 100644 index 000000000000..dbd9fce54f4d --- /dev/null +++ b/include/asm-i386/mach-generic/mach_mpparse.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _MACH_MPPARSE_H | ||
2 | #define _MACH_MPPARSE_H 1 | ||
3 | |||
4 | #include <asm/genapic.h> | ||
5 | |||
6 | #define mpc_oem_bus_info (genapic->mpc_oem_bus_info) | ||
7 | #define mpc_oem_pci_bus (genapic->mpc_oem_pci_bus) | ||
8 | |||
9 | int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid); | ||
10 | int acpi_madt_oem_check(char *oem_id, char *oem_table_id); | ||
11 | |||
12 | #endif | ||
diff --git a/include/asm-i386/mach-generic/mach_mpspec.h b/include/asm-i386/mach-generic/mach_mpspec.h new file mode 100644 index 000000000000..9ef0b941bb22 --- /dev/null +++ b/include/asm-i386/mach-generic/mach_mpspec.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef __ASM_MACH_MPSPEC_H | ||
2 | #define __ASM_MACH_MPSPEC_H | ||
3 | |||
4 | #define MAX_IRQ_SOURCES 256 | ||
5 | |||
6 | /* Summit or generic (i.e. installer) kernels need lots of bus entries. */ | ||
7 | /* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */ | ||
8 | #define MAX_MP_BUSSES 260 | ||
9 | |||
10 | #endif /* __ASM_MACH_MPSPEC_H */ | ||
diff --git a/include/asm-i386/mach-numaq/mach_apic.h b/include/asm-i386/mach-numaq/mach_apic.h new file mode 100644 index 000000000000..e1a04494764a --- /dev/null +++ b/include/asm-i386/mach-numaq/mach_apic.h | |||
@@ -0,0 +1,151 @@ | |||
1 | #ifndef __ASM_MACH_APIC_H | ||
2 | #define __ASM_MACH_APIC_H | ||
3 | |||
4 | #include <asm/io.h> | ||
5 | #include <linux/mmzone.h> | ||
6 | #include <linux/nodemask.h> | ||
7 | |||
8 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) | ||
9 | |||
10 | static inline cpumask_t target_cpus(void) | ||
11 | { | ||
12 | return CPU_MASK_ALL; | ||
13 | } | ||
14 | |||
15 | #define TARGET_CPUS (target_cpus()) | ||
16 | |||
17 | #define NO_BALANCE_IRQ (1) | ||
18 | #define esr_disable (1) | ||
19 | |||
20 | #define NO_IOAPIC_CHECK (0) | ||
21 | |||
22 | #define INT_DELIVERY_MODE dest_LowestPrio | ||
23 | #define INT_DEST_MODE 0 /* physical delivery on LOCAL quad */ | ||
24 | |||
25 | #define check_apicid_used(bitmap, apicid) physid_isset(apicid, bitmap) | ||
26 | #define check_apicid_present(bit) physid_isset(bit, phys_cpu_present_map) | ||
27 | #define apicid_cluster(apicid) (apicid & 0xF0) | ||
28 | |||
29 | static inline int apic_id_registered(void) | ||
30 | { | ||
31 | return 1; | ||
32 | } | ||
33 | |||
34 | static inline void init_apic_ldr(void) | ||
35 | { | ||
36 | /* Already done in NUMA-Q firmware */ | ||
37 | } | ||
38 | |||
39 | static inline void clustered_apic_check(void) | ||
40 | { | ||
41 | printk("Enabling APIC mode: %s. Using %d I/O APICs\n", | ||
42 | "NUMA-Q", nr_ioapics); | ||
43 | } | ||
44 | |||
45 | /* | ||
46 | * Skip adding the timer int on secondary nodes, which causes | ||
47 | * a small but painful rift in the time-space continuum. | ||
48 | */ | ||
49 | static inline int multi_timer_check(int apic, int irq) | ||
50 | { | ||
51 | return apic != 0 && irq == 0; | ||
52 | } | ||
53 | |||
54 | static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) | ||
55 | { | ||
56 | /* We don't have a good way to do this yet - hack */ | ||
57 | return physids_promote(0xFUL); | ||
58 | } | ||
59 | |||
60 | /* Mapping from cpu number to logical apicid */ | ||
61 | extern u8 cpu_2_logical_apicid[]; | ||
62 | static inline int cpu_to_logical_apicid(int cpu) | ||
63 | { | ||
64 | if (cpu >= NR_CPUS) | ||
65 | return BAD_APICID; | ||
66 | return (int)cpu_2_logical_apicid[cpu]; | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * Supporting over 60 cpus on NUMA-Q requires a locality-dependent | ||
71 | * cpu to APIC ID relation to properly interact with the intelligent | ||
72 | * mode of the cluster controller. | ||
73 | */ | ||
74 | static inline int cpu_present_to_apicid(int mps_cpu) | ||
75 | { | ||
76 | if (mps_cpu < 60) | ||
77 | return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3)); | ||
78 | else | ||
79 | return BAD_APICID; | ||
80 | } | ||
81 | |||
82 | static inline int generate_logical_apicid(int quad, int phys_apicid) | ||
83 | { | ||
84 | return (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1); | ||
85 | } | ||
86 | |||
87 | static inline int apicid_to_node(int logical_apicid) | ||
88 | { | ||
89 | return logical_apicid >> 4; | ||
90 | } | ||
91 | |||
92 | static inline physid_mask_t apicid_to_cpu_present(int logical_apicid) | ||
93 | { | ||
94 | int node = apicid_to_node(logical_apicid); | ||
95 | int cpu = __ffs(logical_apicid & 0xf); | ||
96 | |||
97 | return physid_mask_of_physid(cpu + 4*node); | ||
98 | } | ||
99 | |||
100 | static inline int mpc_apic_id(struct mpc_config_processor *m, | ||
101 | struct mpc_config_translation *translation_record) | ||
102 | { | ||
103 | int quad = translation_record->trans_quad; | ||
104 | int logical_apicid = generate_logical_apicid(quad, m->mpc_apicid); | ||
105 | |||
106 | printk("Processor #%d %ld:%ld APIC version %d (quad %d, apic %d)\n", | ||
107 | m->mpc_apicid, | ||
108 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, | ||
109 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, | ||
110 | m->mpc_apicver, quad, logical_apicid); | ||
111 | return logical_apicid; | ||
112 | } | ||
113 | |||
114 | static inline void setup_portio_remap(void) | ||
115 | { | ||
116 | int num_quads = num_online_nodes(); | ||
117 | |||
118 | if (num_quads <= 1) | ||
119 | return; | ||
120 | |||
121 | printk("Remapping cross-quad port I/O for %d quads\n", num_quads); | ||
122 | xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD); | ||
123 | printk("xquad_portio vaddr 0x%08lx, len %08lx\n", | ||
124 | (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD); | ||
125 | } | ||
126 | |||
127 | static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) | ||
128 | { | ||
129 | return (1); | ||
130 | } | ||
131 | |||
132 | static inline void enable_apic_mode(void) | ||
133 | { | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * We use physical apicids here, not logical, so just return the default | ||
138 | * physical broadcast to stop people from breaking us | ||
139 | */ | ||
140 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | ||
141 | { | ||
142 | return (int) 0xF; | ||
143 | } | ||
144 | |||
145 | /* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */ | ||
146 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | ||
147 | { | ||
148 | return cpuid_apic >> index_msb; | ||
149 | } | ||
150 | |||
151 | #endif /* __ASM_MACH_APIC_H */ | ||
diff --git a/include/asm-i386/mach-numaq/mach_apicdef.h b/include/asm-i386/mach-numaq/mach_apicdef.h new file mode 100644 index 000000000000..bf439d0690f5 --- /dev/null +++ b/include/asm-i386/mach-numaq/mach_apicdef.h | |||
@@ -0,0 +1,14 @@ | |||
1 | #ifndef __ASM_MACH_APICDEF_H | ||
2 | #define __ASM_MACH_APICDEF_H | ||
3 | |||
4 | |||
5 | #define APIC_ID_MASK (0xF<<24) | ||
6 | |||
7 | static inline unsigned get_apic_id(unsigned long x) | ||
8 | { | ||
9 | return (((x)>>24)&0x0F); | ||
10 | } | ||
11 | |||
12 | #define GET_APIC_ID(x) get_apic_id(x) | ||
13 | |||
14 | #endif | ||
diff --git a/include/asm-i386/mach-numaq/mach_ipi.h b/include/asm-i386/mach-numaq/mach_ipi.h new file mode 100644 index 000000000000..1b46fd3f2ae3 --- /dev/null +++ b/include/asm-i386/mach-numaq/mach_ipi.h | |||
@@ -0,0 +1,25 @@ | |||
1 | #ifndef __ASM_MACH_IPI_H | ||
2 | #define __ASM_MACH_IPI_H | ||
3 | |||
4 | inline void send_IPI_mask_sequence(cpumask_t, int vector); | ||
5 | |||
6 | static inline void send_IPI_mask(cpumask_t mask, int vector) | ||
7 | { | ||
8 | send_IPI_mask_sequence(mask, vector); | ||
9 | } | ||
10 | |||
11 | static inline void send_IPI_allbutself(int vector) | ||
12 | { | ||
13 | cpumask_t mask = cpu_online_map; | ||
14 | cpu_clear(smp_processor_id(), mask); | ||
15 | |||
16 | if (!cpus_empty(mask)) | ||
17 | send_IPI_mask(mask, vector); | ||
18 | } | ||
19 | |||
20 | static inline void send_IPI_all(int vector) | ||
21 | { | ||
22 | send_IPI_mask(cpu_online_map, vector); | ||
23 | } | ||
24 | |||
25 | #endif /* __ASM_MACH_IPI_H */ | ||
diff --git a/include/asm-i386/mach-numaq/mach_mpparse.h b/include/asm-i386/mach-numaq/mach_mpparse.h new file mode 100644 index 000000000000..51bbac8fc0c2 --- /dev/null +++ b/include/asm-i386/mach-numaq/mach_mpparse.h | |||
@@ -0,0 +1,29 @@ | |||
1 | #ifndef __ASM_MACH_MPPARSE_H | ||
2 | #define __ASM_MACH_MPPARSE_H | ||
3 | |||
4 | static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, | ||
5 | struct mpc_config_translation *translation) | ||
6 | { | ||
7 | int quad = translation->trans_quad; | ||
8 | int local = translation->trans_local; | ||
9 | |||
10 | mp_bus_id_to_node[m->mpc_busid] = quad; | ||
11 | mp_bus_id_to_local[m->mpc_busid] = local; | ||
12 | printk("Bus #%d is %s (node %d)\n", m->mpc_busid, name, quad); | ||
13 | } | ||
14 | |||
15 | static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, | ||
16 | struct mpc_config_translation *translation) | ||
17 | { | ||
18 | int quad = translation->trans_quad; | ||
19 | int local = translation->trans_local; | ||
20 | |||
21 | quad_local_to_mp_bus_id[quad][local] = m->mpc_busid; | ||
22 | } | ||
23 | |||
24 | /* Hook from generic ACPI tables.c */ | ||
25 | static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
26 | { | ||
27 | } | ||
28 | |||
29 | #endif /* __ASM_MACH_MPPARSE_H */ | ||
diff --git a/include/asm-i386/mach-numaq/mach_mpspec.h b/include/asm-i386/mach-numaq/mach_mpspec.h new file mode 100644 index 000000000000..dffb09856f8f --- /dev/null +++ b/include/asm-i386/mach-numaq/mach_mpspec.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef __ASM_MACH_MPSPEC_H | ||
2 | #define __ASM_MACH_MPSPEC_H | ||
3 | |||
4 | #define MAX_IRQ_SOURCES 512 | ||
5 | |||
6 | #define MAX_MP_BUSSES 32 | ||
7 | |||
8 | #endif /* __ASM_MACH_MPSPEC_H */ | ||
diff --git a/include/asm-i386/mach-numaq/mach_wakecpu.h b/include/asm-i386/mach-numaq/mach_wakecpu.h new file mode 100644 index 000000000000..00530041a991 --- /dev/null +++ b/include/asm-i386/mach-numaq/mach_wakecpu.h | |||
@@ -0,0 +1,43 @@ | |||
1 | #ifndef __ASM_MACH_WAKECPU_H | ||
2 | #define __ASM_MACH_WAKECPU_H | ||
3 | |||
4 | /* This file copes with machines that wakeup secondary CPUs by NMIs */ | ||
5 | |||
6 | #define WAKE_SECONDARY_VIA_NMI | ||
7 | |||
8 | #define TRAMPOLINE_LOW phys_to_virt(0x8) | ||
9 | #define TRAMPOLINE_HIGH phys_to_virt(0xa) | ||
10 | |||
11 | #define boot_cpu_apicid boot_cpu_logical_apicid | ||
12 | |||
13 | /* We don't do anything here because we use NMI's to boot instead */ | ||
14 | static inline void wait_for_init_deassert(atomic_t *deassert) | ||
15 | { | ||
16 | } | ||
17 | |||
18 | /* | ||
19 | * Because we use NMIs rather than the INIT-STARTUP sequence to | ||
20 | * bootstrap the CPUs, the APIC may be in a weird state. Kick it. | ||
21 | */ | ||
22 | static inline void smp_callin_clear_local_apic(void) | ||
23 | { | ||
24 | clear_local_APIC(); | ||
25 | } | ||
26 | |||
27 | static inline void store_NMI_vector(unsigned short *high, unsigned short *low) | ||
28 | { | ||
29 | printk("Storing NMI vector\n"); | ||
30 | *high = *((volatile unsigned short *) TRAMPOLINE_HIGH); | ||
31 | *low = *((volatile unsigned short *) TRAMPOLINE_LOW); | ||
32 | } | ||
33 | |||
34 | static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) | ||
35 | { | ||
36 | printk("Restoring NMI vector\n"); | ||
37 | *((volatile unsigned short *) TRAMPOLINE_HIGH) = *high; | ||
38 | *((volatile unsigned short *) TRAMPOLINE_LOW) = *low; | ||
39 | } | ||
40 | |||
41 | #define inquire_remote_apic(apicid) {} | ||
42 | |||
43 | #endif /* __ASM_MACH_WAKECPU_H */ | ||
diff --git a/include/asm-i386/mach-summit/irq_vectors_limits.h b/include/asm-i386/mach-summit/irq_vectors_limits.h new file mode 100644 index 000000000000..890ce3f5e09a --- /dev/null +++ b/include/asm-i386/mach-summit/irq_vectors_limits.h | |||
@@ -0,0 +1,14 @@ | |||
1 | #ifndef _ASM_IRQ_VECTORS_LIMITS_H | ||
2 | #define _ASM_IRQ_VECTORS_LIMITS_H | ||
3 | |||
4 | /* | ||
5 | * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs, | ||
6 | * even with uni-proc kernels, so use a big array. | ||
7 | * | ||
8 | * This value should be the same in both the generic and summit subarches. | ||
9 | * Change one, change 'em both. | ||
10 | */ | ||
11 | #define NR_IRQS 224 | ||
12 | #define NR_IRQ_VECTORS 1024 | ||
13 | |||
14 | #endif /* _ASM_IRQ_VECTORS_LIMITS_H */ | ||
diff --git a/include/asm-i386/mach-summit/mach_apic.h b/include/asm-i386/mach-summit/mach_apic.h new file mode 100644 index 000000000000..74e9cbc8c01b --- /dev/null +++ b/include/asm-i386/mach-summit/mach_apic.h | |||
@@ -0,0 +1,189 @@ | |||
1 | #ifndef __ASM_MACH_APIC_H | ||
2 | #define __ASM_MACH_APIC_H | ||
3 | |||
4 | #include <linux/config.h> | ||
5 | #include <asm/smp.h> | ||
6 | |||
7 | #define esr_disable (1) | ||
8 | #define NO_BALANCE_IRQ (0) | ||
9 | |||
10 | #define NO_IOAPIC_CHECK (1) /* Don't check I/O APIC ID for xAPIC */ | ||
11 | |||
12 | /* In clustered mode, the high nibble of APIC ID is a cluster number. | ||
13 | * The low nibble is a 4-bit bitmap. */ | ||
14 | #define XAPIC_DEST_CPUS_SHIFT 4 | ||
15 | #define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) | ||
16 | #define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) | ||
17 | |||
18 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) | ||
19 | |||
20 | static inline cpumask_t target_cpus(void) | ||
21 | { | ||
22 | /* CPU_MASK_ALL (0xff) has undefined behaviour with | ||
23 | * dest_LowestPrio mode logical clustered apic interrupt routing | ||
24 | * Just start on cpu 0. IRQ balancing will spread load | ||
25 | */ | ||
26 | return cpumask_of_cpu(0); | ||
27 | } | ||
28 | #define TARGET_CPUS (target_cpus()) | ||
29 | |||
30 | #define INT_DELIVERY_MODE (dest_LowestPrio) | ||
31 | #define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ | ||
32 | |||
33 | static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) | ||
34 | { | ||
35 | return 0; | ||
36 | } | ||
37 | |||
38 | /* we don't use the phys_cpu_present_map to indicate apicid presence */ | ||
39 | static inline unsigned long check_apicid_present(int bit) | ||
40 | { | ||
41 | return 1; | ||
42 | } | ||
43 | |||
44 | #define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) | ||
45 | |||
46 | extern u8 bios_cpu_apicid[]; | ||
47 | extern u8 cpu_2_logical_apicid[]; | ||
48 | |||
49 | static inline void init_apic_ldr(void) | ||
50 | { | ||
51 | unsigned long val, id; | ||
52 | int i, count; | ||
53 | u8 lid; | ||
54 | u8 my_id = (u8)hard_smp_processor_id(); | ||
55 | u8 my_cluster = (u8)apicid_cluster(my_id); | ||
56 | |||
57 | /* Create logical APIC IDs by counting CPUs already in cluster. */ | ||
58 | for (count = 0, i = NR_CPUS; --i >= 0; ) { | ||
59 | lid = cpu_2_logical_apicid[i]; | ||
60 | if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) | ||
61 | ++count; | ||
62 | } | ||
63 | /* We only have a 4 wide bitmap in cluster mode. If a deranged | ||
64 | * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ | ||
65 | BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); | ||
66 | id = my_cluster | (1UL << count); | ||
67 | apic_write_around(APIC_DFR, APIC_DFR_VALUE); | ||
68 | val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; | ||
69 | val |= SET_APIC_LOGICAL_ID(id); | ||
70 | apic_write_around(APIC_LDR, val); | ||
71 | } | ||
72 | |||
73 | static inline int multi_timer_check(int apic, int irq) | ||
74 | { | ||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | static inline int apic_id_registered(void) | ||
79 | { | ||
80 | return 1; | ||
81 | } | ||
82 | |||
83 | static inline void clustered_apic_check(void) | ||
84 | { | ||
85 | printk("Enabling APIC mode: Summit. Using %d I/O APICs\n", | ||
86 | nr_ioapics); | ||
87 | } | ||
88 | |||
89 | static inline int apicid_to_node(int logical_apicid) | ||
90 | { | ||
91 | return logical_apicid >> 5; /* 2 clusterids per CEC */ | ||
92 | } | ||
93 | |||
94 | /* Mapping from cpu number to logical apicid */ | ||
95 | static inline int cpu_to_logical_apicid(int cpu) | ||
96 | { | ||
97 | if (cpu >= NR_CPUS) | ||
98 | return BAD_APICID; | ||
99 | return (int)cpu_2_logical_apicid[cpu]; | ||
100 | } | ||
101 | |||
102 | static inline int cpu_present_to_apicid(int mps_cpu) | ||
103 | { | ||
104 | if (mps_cpu < NR_CPUS) | ||
105 | return (int)bios_cpu_apicid[mps_cpu]; | ||
106 | else | ||
107 | return BAD_APICID; | ||
108 | } | ||
109 | |||
110 | static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map) | ||
111 | { | ||
112 | /* For clustered we don't have a good way to do this yet - hack */ | ||
113 | return physids_promote(0x0F); | ||
114 | } | ||
115 | |||
116 | static inline physid_mask_t apicid_to_cpu_present(int apicid) | ||
117 | { | ||
118 | return physid_mask_of_physid(0); | ||
119 | } | ||
120 | |||
121 | static inline int mpc_apic_id(struct mpc_config_processor *m, | ||
122 | struct mpc_config_translation *translation_record) | ||
123 | { | ||
124 | printk("Processor #%d %ld:%ld APIC version %d\n", | ||
125 | m->mpc_apicid, | ||
126 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, | ||
127 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, | ||
128 | m->mpc_apicver); | ||
129 | return (m->mpc_apicid); | ||
130 | } | ||
131 | |||
132 | static inline void setup_portio_remap(void) | ||
133 | { | ||
134 | } | ||
135 | |||
136 | static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) | ||
137 | { | ||
138 | return 1; | ||
139 | } | ||
140 | |||
141 | static inline void enable_apic_mode(void) | ||
142 | { | ||
143 | } | ||
144 | |||
145 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | ||
146 | { | ||
147 | int num_bits_set; | ||
148 | int cpus_found = 0; | ||
149 | int cpu; | ||
150 | int apicid; | ||
151 | |||
152 | num_bits_set = cpus_weight(cpumask); | ||
153 | /* Return id to all */ | ||
154 | if (num_bits_set == NR_CPUS) | ||
155 | return (int) 0xFF; | ||
156 | /* | ||
157 | * The cpus in the mask must all be on the apic cluster. If are not | ||
158 | * on the same apicid cluster return default value of TARGET_CPUS. | ||
159 | */ | ||
160 | cpu = first_cpu(cpumask); | ||
161 | apicid = cpu_to_logical_apicid(cpu); | ||
162 | while (cpus_found < num_bits_set) { | ||
163 | if (cpu_isset(cpu, cpumask)) { | ||
164 | int new_apicid = cpu_to_logical_apicid(cpu); | ||
165 | if (apicid_cluster(apicid) != | ||
166 | apicid_cluster(new_apicid)){ | ||
167 | printk ("%s: Not a valid mask!\n",__FUNCTION__); | ||
168 | return 0xFF; | ||
169 | } | ||
170 | apicid = apicid | new_apicid; | ||
171 | cpus_found++; | ||
172 | } | ||
173 | cpu++; | ||
174 | } | ||
175 | return apicid; | ||
176 | } | ||
177 | |||
178 | /* cpuid returns the value latched in the HW at reset, not the APIC ID | ||
179 | * register's value. For any box whose BIOS changes APIC IDs, like | ||
180 | * clustered APIC systems, we must use hard_smp_processor_id. | ||
181 | * | ||
182 | * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. | ||
183 | */ | ||
184 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | ||
185 | { | ||
186 | return hard_smp_processor_id() >> index_msb; | ||
187 | } | ||
188 | |||
189 | #endif /* __ASM_MACH_APIC_H */ | ||
diff --git a/include/asm-i386/mach-summit/mach_apicdef.h b/include/asm-i386/mach-summit/mach_apicdef.h new file mode 100644 index 000000000000..a58ab5a75c8c --- /dev/null +++ b/include/asm-i386/mach-summit/mach_apicdef.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef __ASM_MACH_APICDEF_H | ||
2 | #define __ASM_MACH_APICDEF_H | ||
3 | |||
4 | #define APIC_ID_MASK (0xFF<<24) | ||
5 | |||
6 | static inline unsigned get_apic_id(unsigned long x) | ||
7 | { | ||
8 | return (((x)>>24)&0xFF); | ||
9 | } | ||
10 | |||
11 | #define GET_APIC_ID(x) get_apic_id(x) | ||
12 | |||
13 | #endif | ||
diff --git a/include/asm-i386/mach-summit/mach_ipi.h b/include/asm-i386/mach-summit/mach_ipi.h new file mode 100644 index 000000000000..9404c535b7ec --- /dev/null +++ b/include/asm-i386/mach-summit/mach_ipi.h | |||
@@ -0,0 +1,25 @@ | |||
1 | #ifndef __ASM_MACH_IPI_H | ||
2 | #define __ASM_MACH_IPI_H | ||
3 | |||
4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); | ||
5 | |||
6 | static inline void send_IPI_mask(cpumask_t mask, int vector) | ||
7 | { | ||
8 | send_IPI_mask_sequence(mask, vector); | ||
9 | } | ||
10 | |||
11 | static inline void send_IPI_allbutself(int vector) | ||
12 | { | ||
13 | cpumask_t mask = cpu_online_map; | ||
14 | cpu_clear(smp_processor_id(), mask); | ||
15 | |||
16 | if (!cpus_empty(mask)) | ||
17 | send_IPI_mask(mask, vector); | ||
18 | } | ||
19 | |||
20 | static inline void send_IPI_all(int vector) | ||
21 | { | ||
22 | send_IPI_mask(cpu_online_map, vector); | ||
23 | } | ||
24 | |||
25 | #endif /* __ASM_MACH_IPI_H */ | ||
diff --git a/include/asm-i386/mach-summit/mach_mpparse.h b/include/asm-i386/mach-summit/mach_mpparse.h new file mode 100644 index 000000000000..2b9e6d55bef1 --- /dev/null +++ b/include/asm-i386/mach-summit/mach_mpparse.h | |||
@@ -0,0 +1,121 @@ | |||
1 | #ifndef __ASM_MACH_MPPARSE_H | ||
2 | #define __ASM_MACH_MPPARSE_H | ||
3 | |||
4 | #include <mach_apic.h> | ||
5 | |||
6 | extern int use_cyclone; | ||
7 | |||
8 | #ifdef CONFIG_X86_SUMMIT_NUMA | ||
9 | extern void setup_summit(void); | ||
10 | #else | ||
11 | #define setup_summit() {} | ||
12 | #endif | ||
13 | |||
14 | static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, | ||
15 | struct mpc_config_translation *translation) | ||
16 | { | ||
17 | Dprintk("Bus #%d is %s\n", m->mpc_busid, name); | ||
18 | } | ||
19 | |||
20 | static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, | ||
21 | struct mpc_config_translation *translation) | ||
22 | { | ||
23 | } | ||
24 | |||
25 | extern int usb_early_handoff; | ||
26 | static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, | ||
27 | char *productid) | ||
28 | { | ||
29 | if (!strncmp(oem, "IBM ENSW", 8) && | ||
30 | (!strncmp(productid, "VIGIL SMP", 9) | ||
31 | || !strncmp(productid, "EXA", 3) | ||
32 | || !strncmp(productid, "RUTHLESS SMP", 12))){ | ||
33 | use_cyclone = 1; /*enable cyclone-timer*/ | ||
34 | setup_summit(); | ||
35 | usb_early_handoff = 1; | ||
36 | return 1; | ||
37 | } | ||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | /* Hook from generic ACPI tables.c */ | ||
42 | static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
43 | { | ||
44 | if (!strncmp(oem_id, "IBM", 3) && | ||
45 | (!strncmp(oem_table_id, "SERVIGIL", 8) | ||
46 | || !strncmp(oem_table_id, "EXA", 3))){ | ||
47 | use_cyclone = 1; /*enable cyclone-timer*/ | ||
48 | setup_summit(); | ||
49 | usb_early_handoff = 1; | ||
50 | return 1; | ||
51 | } | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | struct rio_table_hdr { | ||
56 | unsigned char version; /* Version number of this data structure */ | ||
57 | /* Version 3 adds chassis_num & WP_index */ | ||
58 | unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */ | ||
59 | unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */ | ||
60 | } __attribute__((packed)); | ||
61 | |||
62 | struct scal_detail { | ||
63 | unsigned char node_id; /* Scalability Node ID */ | ||
64 | unsigned long CBAR; /* Address of 1MB register space */ | ||
65 | unsigned char port0node; /* Node ID port connected to: 0xFF=None */ | ||
66 | unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */ | ||
67 | unsigned char port1node; /* Node ID port connected to: 0xFF = None */ | ||
68 | unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */ | ||
69 | unsigned char port2node; /* Node ID port connected to: 0xFF = None */ | ||
70 | unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */ | ||
71 | unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */ | ||
72 | } __attribute__((packed)); | ||
73 | |||
74 | struct rio_detail { | ||
75 | unsigned char node_id; /* RIO Node ID */ | ||
76 | unsigned long BBAR; /* Address of 1MB register space */ | ||
77 | unsigned char type; /* Type of device */ | ||
78 | unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/ | ||
79 | /* For CYC: Node ID of Twister that owns this CYC */ | ||
80 | unsigned char port0node; /* Node ID port connected to: 0xFF=None */ | ||
81 | unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */ | ||
82 | unsigned char port1node; /* Node ID port connected to: 0xFF=None */ | ||
83 | unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */ | ||
84 | unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */ | ||
85 | /* For CYC: 0 */ | ||
86 | unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */ | ||
87 | /* = 0 : the XAPIC is not used, ie:*/ | ||
88 | /* ints fwded to another XAPIC */ | ||
89 | /* Bits1:7 Reserved */ | ||
90 | /* For CYC: Bits0:7 Reserved */ | ||
91 | unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */ | ||
92 | /* lower slot numbers/PCI bus numbers */ | ||
93 | /* For CYC: No meaning */ | ||
94 | unsigned char chassis_num; /* 1 based Chassis number */ | ||
95 | /* For LookOut WPEGs this field indicates the */ | ||
96 | /* Expansion Chassis #, enumerated from Boot */ | ||
97 | /* Node WPEG external port, then Boot Node CYC */ | ||
98 | /* external port, then Next Vigil chassis WPEG */ | ||
99 | /* external port, etc. */ | ||
100 | /* Shared Lookouts have only 1 chassis number (the */ | ||
101 | /* first one assigned) */ | ||
102 | } __attribute__((packed)); | ||
103 | |||
104 | |||
105 | typedef enum { | ||
106 | CompatTwister = 0, /* Compatibility Twister */ | ||
107 | AltTwister = 1, /* Alternate Twister of internal 8-way */ | ||
108 | CompatCyclone = 2, /* Compatibility Cyclone */ | ||
109 | AltCyclone = 3, /* Alternate Cyclone of internal 8-way */ | ||
110 | CompatWPEG = 4, /* Compatibility WPEG */ | ||
111 | AltWPEG = 5, /* Second Planar WPEG */ | ||
112 | LookOutAWPEG = 6, /* LookOut WPEG */ | ||
113 | LookOutBWPEG = 7, /* LookOut WPEG */ | ||
114 | } node_type; | ||
115 | |||
116 | static inline int is_WPEG(struct rio_detail *rio){ | ||
117 | return (rio->type == CompatWPEG || rio->type == AltWPEG || | ||
118 | rio->type == LookOutAWPEG || rio->type == LookOutBWPEG); | ||
119 | } | ||
120 | |||
121 | #endif /* __ASM_MACH_MPPARSE_H */ | ||
diff --git a/include/asm-i386/mach-summit/mach_mpspec.h b/include/asm-i386/mach-summit/mach_mpspec.h new file mode 100644 index 000000000000..bd765523511a --- /dev/null +++ b/include/asm-i386/mach-summit/mach_mpspec.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef __ASM_MACH_MPSPEC_H | ||
2 | #define __ASM_MACH_MPSPEC_H | ||
3 | |||
4 | #define MAX_IRQ_SOURCES 256 | ||
5 | |||
6 | /* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */ | ||
7 | #define MAX_MP_BUSSES 260 | ||
8 | |||
9 | #endif /* __ASM_MACH_MPSPEC_H */ | ||
diff --git a/include/asm-i386/mach-visws/cobalt.h b/include/asm-i386/mach-visws/cobalt.h new file mode 100644 index 000000000000..33c36225a042 --- /dev/null +++ b/include/asm-i386/mach-visws/cobalt.h | |||
@@ -0,0 +1,125 @@ | |||
1 | #ifndef __I386_SGI_COBALT_H | ||
2 | #define __I386_SGI_COBALT_H | ||
3 | |||
4 | #include <asm/fixmap.h> | ||
5 | |||
6 | /* | ||
7 | * Cobalt SGI Visual Workstation system ASIC | ||
8 | */ | ||
9 | |||
10 | #define CO_CPU_NUM_PHYS 0x1e00 | ||
11 | #define CO_CPU_TAB_PHYS (CO_CPU_NUM_PHYS + 2) | ||
12 | |||
13 | #define CO_CPU_MAX 4 | ||
14 | |||
15 | #define CO_CPU_PHYS 0xc2000000 | ||
16 | #define CO_APIC_PHYS 0xc4000000 | ||
17 | |||
18 | /* see set_fixmap() and asm/fixmap.h */ | ||
19 | #define CO_CPU_VADDR (fix_to_virt(FIX_CO_CPU)) | ||
20 | #define CO_APIC_VADDR (fix_to_virt(FIX_CO_APIC)) | ||
21 | |||
22 | /* Cobalt CPU registers -- relative to CO_CPU_VADDR, use co_cpu_*() */ | ||
23 | #define CO_CPU_REV 0x08 | ||
24 | #define CO_CPU_CTRL 0x10 | ||
25 | #define CO_CPU_STAT 0x20 | ||
26 | #define CO_CPU_TIMEVAL 0x30 | ||
27 | |||
28 | /* CO_CPU_CTRL bits */ | ||
29 | #define CO_CTRL_TIMERUN 0x04 /* 0 == disabled */ | ||
30 | #define CO_CTRL_TIMEMASK 0x08 /* 0 == unmasked */ | ||
31 | |||
32 | /* CO_CPU_STATUS bits */ | ||
33 | #define CO_STAT_TIMEINTR 0x02 /* (r) 1 == int pend, (w) 0 == clear */ | ||
34 | |||
35 | /* CO_CPU_TIMEVAL value */ | ||
36 | #define CO_TIME_HZ 100000000 /* Cobalt core rate */ | ||
37 | |||
38 | /* Cobalt APIC registers -- relative to CO_APIC_VADDR, use co_apic_*() */ | ||
39 | #define CO_APIC_HI(n) (((n) * 0x10) + 4) | ||
40 | #define CO_APIC_LO(n) ((n) * 0x10) | ||
41 | #define CO_APIC_ID 0x0ffc | ||
42 | |||
43 | /* CO_APIC_ID bits */ | ||
44 | #define CO_APIC_ENABLE 0x00000100 | ||
45 | |||
46 | /* CO_APIC_LO bits */ | ||
47 | #define CO_APIC_MASK 0x00010000 /* 0 = enabled */ | ||
48 | #define CO_APIC_LEVEL 0x00008000 /* 0 = edge */ | ||
49 | |||
50 | /* | ||
51 | * Where things are physically wired to Cobalt | ||
52 | * #defines with no board _<type>_<rev>_ are common to all (thus far) | ||
53 | */ | ||
54 | #define CO_APIC_IDE0 4 | ||
55 | #define CO_APIC_IDE1 2 /* Only on 320 */ | ||
56 | |||
57 | #define CO_APIC_8259 12 /* serial, floppy, par-l-l */ | ||
58 | |||
59 | /* Lithium PCI Bridge A -- "the one with 82557 Ethernet" */ | ||
60 | #define CO_APIC_PCIA_BASE0 0 /* and 1 */ /* slot 0, line 0 */ | ||
61 | #define CO_APIC_PCIA_BASE123 5 /* and 6 */ /* slot 0, line 1 */ | ||
62 | |||
63 | #define CO_APIC_PIIX4_USB 7 /* this one is weird */ | ||
64 | |||
65 | /* Lithium PCI Bridge B -- "the one with PIIX4" */ | ||
66 | #define CO_APIC_PCIB_BASE0 8 /* and 9-12 *//* slot 0, line 0 */ | ||
67 | #define CO_APIC_PCIB_BASE123 13 /* 14.15 */ /* slot 0, line 1 */ | ||
68 | |||
69 | #define CO_APIC_VIDOUT0 16 | ||
70 | #define CO_APIC_VIDOUT1 17 | ||
71 | #define CO_APIC_VIDIN0 18 | ||
72 | #define CO_APIC_VIDIN1 19 | ||
73 | |||
74 | #define CO_APIC_LI_AUDIO 22 | ||
75 | |||
76 | #define CO_APIC_AS 24 | ||
77 | #define CO_APIC_RE 25 | ||
78 | |||
79 | #define CO_APIC_CPU 28 /* Timer and Cache interrupt */ | ||
80 | #define CO_APIC_NMI 29 | ||
81 | #define CO_APIC_LAST CO_APIC_NMI | ||
82 | |||
83 | /* | ||
84 | * This is how irqs are assigned on the Visual Workstation. | ||
85 | * Legacy devices get irq's 1-15 (system clock is 0 and is CO_APIC_CPU). | ||
86 | * All other devices (including PCI) go to Cobalt and are irq's 16 on up. | ||
87 | */ | ||
88 | #define CO_IRQ_APIC0 16 /* irq of apic entry 0 */ | ||
89 | #define IS_CO_APIC(irq) ((irq) >= CO_IRQ_APIC0) | ||
90 | #define CO_IRQ(apic) (CO_IRQ_APIC0 + (apic)) /* apic ent to irq */ | ||
91 | #define CO_APIC(irq) ((irq) - CO_IRQ_APIC0) /* irq to apic ent */ | ||
92 | #define CO_IRQ_IDE0 14 /* knowledge of... */ | ||
93 | #define CO_IRQ_IDE1 15 /* ... ide driver defaults! */ | ||
94 | #define CO_IRQ_8259 CO_IRQ(CO_APIC_8259) | ||
95 | |||
96 | #ifdef CONFIG_X86_VISWS_APIC | ||
97 | extern __inline void co_cpu_write(unsigned long reg, unsigned long v) | ||
98 | { | ||
99 | *((volatile unsigned long *)(CO_CPU_VADDR+reg))=v; | ||
100 | } | ||
101 | |||
102 | extern __inline unsigned long co_cpu_read(unsigned long reg) | ||
103 | { | ||
104 | return *((volatile unsigned long *)(CO_CPU_VADDR+reg)); | ||
105 | } | ||
106 | |||
107 | extern __inline void co_apic_write(unsigned long reg, unsigned long v) | ||
108 | { | ||
109 | *((volatile unsigned long *)(CO_APIC_VADDR+reg))=v; | ||
110 | } | ||
111 | |||
112 | extern __inline unsigned long co_apic_read(unsigned long reg) | ||
113 | { | ||
114 | return *((volatile unsigned long *)(CO_APIC_VADDR+reg)); | ||
115 | } | ||
116 | #endif | ||
117 | |||
118 | extern char visws_board_type; | ||
119 | |||
120 | #define VISWS_320 0 | ||
121 | #define VISWS_540 1 | ||
122 | |||
123 | extern char visws_board_rev; | ||
124 | |||
125 | #endif /* __I386_SGI_COBALT_H */ | ||
diff --git a/include/asm-i386/mach-visws/do_timer.h b/include/asm-i386/mach-visws/do_timer.h new file mode 100644 index 000000000000..33acd50fd9a8 --- /dev/null +++ b/include/asm-i386/mach-visws/do_timer.h | |||
@@ -0,0 +1,52 @@ | |||
1 | /* defines for inline arch setup functions */ | ||
2 | |||
3 | #include <asm/fixmap.h> | ||
4 | #include "cobalt.h" | ||
5 | |||
6 | static inline void do_timer_interrupt_hook(struct pt_regs *regs) | ||
7 | { | ||
8 | /* Clear the interrupt */ | ||
9 | co_cpu_write(CO_CPU_STAT,co_cpu_read(CO_CPU_STAT) & ~CO_STAT_TIMEINTR); | ||
10 | |||
11 | do_timer(regs); | ||
12 | #ifndef CONFIG_SMP | ||
13 | update_process_times(user_mode(regs)); | ||
14 | #endif | ||
15 | /* | ||
16 | * In the SMP case we use the local APIC timer interrupt to do the | ||
17 | * profiling, except when we simulate SMP mode on a uniprocessor | ||
18 | * system, in that case we have to call the local interrupt handler. | ||
19 | */ | ||
20 | #ifndef CONFIG_X86_LOCAL_APIC | ||
21 | profile_tick(CPU_PROFILING, regs); | ||
22 | #else | ||
23 | if (!using_apic_timer) | ||
24 | smp_local_timer_interrupt(regs); | ||
25 | #endif | ||
26 | } | ||
27 | |||
28 | static inline int do_timer_overflow(int count) | ||
29 | { | ||
30 | int i; | ||
31 | |||
32 | spin_lock(&i8259A_lock); | ||
33 | /* | ||
34 | * This is tricky when I/O APICs are used; | ||
35 | * see do_timer_interrupt(). | ||
36 | */ | ||
37 | i = inb(0x20); | ||
38 | spin_unlock(&i8259A_lock); | ||
39 | |||
40 | /* assumption about timer being IRQ0 */ | ||
41 | if (i & 0x01) { | ||
42 | /* | ||
43 | * We cannot detect lost timer interrupts ... | ||
44 | * well, that's why we call them lost, don't we? :) | ||
45 | * [hmm, on the Pentium and Alpha we can ... sort of] | ||
46 | */ | ||
47 | count -= LATCH; | ||
48 | } else { | ||
49 | printk("do_slow_gettimeoffset(): hardware timer problem?\n"); | ||
50 | } | ||
51 | return count; | ||
52 | } | ||
diff --git a/include/asm-i386/mach-visws/entry_arch.h b/include/asm-i386/mach-visws/entry_arch.h new file mode 100644 index 000000000000..b183fa6d83d9 --- /dev/null +++ b/include/asm-i386/mach-visws/entry_arch.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * The following vectors are part of the Linux architecture, there | ||
3 | * is no hardware IRQ pin equivalent for them, they are triggered | ||
4 | * through the ICC by us (IPIs) | ||
5 | */ | ||
6 | #ifdef CONFIG_X86_SMP | ||
7 | BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) | ||
8 | BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) | ||
9 | BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) | ||
10 | #endif | ||
11 | |||
12 | /* | ||
13 | * every pentium local APIC has two 'local interrupts', with a | ||
14 | * soft-definable vector attached to both interrupts, one of | ||
15 | * which is a timer interrupt, the other one is error counter | ||
16 | * overflow. Linux uses the local APIC timer interrupt to get | ||
17 | * a much simpler SMP time architecture: | ||
18 | */ | ||
19 | #ifdef CONFIG_X86_LOCAL_APIC | ||
20 | BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) | ||
21 | BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) | ||
22 | BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) | ||
23 | #endif | ||
diff --git a/include/asm-i386/mach-visws/irq_vectors.h b/include/asm-i386/mach-visws/irq_vectors.h new file mode 100644 index 000000000000..cb572d8db505 --- /dev/null +++ b/include/asm-i386/mach-visws/irq_vectors.h | |||
@@ -0,0 +1,62 @@ | |||
1 | #ifndef _ASM_IRQ_VECTORS_H | ||
2 | #define _ASM_IRQ_VECTORS_H | ||
3 | |||
4 | /* | ||
5 | * IDT vectors usable for external interrupt sources start | ||
6 | * at 0x20: | ||
7 | */ | ||
8 | #define FIRST_EXTERNAL_VECTOR 0x20 | ||
9 | |||
10 | #define SYSCALL_VECTOR 0x80 | ||
11 | |||
12 | /* | ||
13 | * Vectors 0x20-0x2f are used for ISA interrupts. | ||
14 | */ | ||
15 | |||
16 | /* | ||
17 | * Special IRQ vectors used by the SMP architecture, 0xf0-0xff | ||
18 | * | ||
19 | * some of the following vectors are 'rare', they are merged | ||
20 | * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. | ||
21 | * TLB, reschedule and local APIC vectors are performance-critical. | ||
22 | * | ||
23 | * Vectors 0xf0-0xfa are free (reserved for future Linux use). | ||
24 | */ | ||
25 | #define SPURIOUS_APIC_VECTOR 0xff | ||
26 | #define ERROR_APIC_VECTOR 0xfe | ||
27 | #define INVALIDATE_TLB_VECTOR 0xfd | ||
28 | #define RESCHEDULE_VECTOR 0xfc | ||
29 | #define CALL_FUNCTION_VECTOR 0xfb | ||
30 | |||
31 | #define THERMAL_APIC_VECTOR 0xf0 | ||
32 | /* | ||
33 | * Local APIC timer IRQ vector is on a different priority level, | ||
34 | * to work around the 'lost local interrupt if more than 2 IRQ | ||
35 | * sources per level' errata. | ||
36 | */ | ||
37 | #define LOCAL_TIMER_VECTOR 0xef | ||
38 | |||
39 | /* | ||
40 | * First APIC vector available to drivers: (vectors 0x30-0xee) | ||
41 | * we start at 0x31 to spread out vectors evenly between priority | ||
42 | * levels. (0x80 is the syscall vector) | ||
43 | */ | ||
44 | #define FIRST_DEVICE_VECTOR 0x31 | ||
45 | #define FIRST_SYSTEM_VECTOR 0xef | ||
46 | |||
47 | #define TIMER_IRQ 0 | ||
48 | |||
49 | /* | ||
50 | * IRQ definitions | ||
51 | */ | ||
52 | #define NR_VECTORS 256 | ||
53 | #define NR_IRQS 224 | ||
54 | #define NR_IRQ_VECTORS NR_IRQS | ||
55 | |||
56 | #define FPU_IRQ 13 | ||
57 | |||
58 | #define FIRST_VM86_IRQ 3 | ||
59 | #define LAST_VM86_IRQ 15 | ||
60 | #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) | ||
61 | |||
62 | #endif /* _ASM_IRQ_VECTORS_H */ | ||
diff --git a/include/asm-i386/mach-visws/lithium.h b/include/asm-i386/mach-visws/lithium.h new file mode 100644 index 000000000000..d443e68d0066 --- /dev/null +++ b/include/asm-i386/mach-visws/lithium.h | |||
@@ -0,0 +1,53 @@ | |||
1 | #ifndef __I386_SGI_LITHIUM_H | ||
2 | #define __I386_SGI_LITHIUM_H | ||
3 | |||
4 | #include <asm/fixmap.h> | ||
5 | |||
6 | /* | ||
7 | * Lithium is the SGI Visual Workstation I/O ASIC | ||
8 | */ | ||
9 | |||
10 | #define LI_PCI_A_PHYS 0xfc000000 /* Enet is dev 3 */ | ||
11 | #define LI_PCI_B_PHYS 0xfd000000 /* PIIX4 is here */ | ||
12 | |||
13 | /* see set_fixmap() and asm/fixmap.h */ | ||
14 | #define LI_PCIA_VADDR (fix_to_virt(FIX_LI_PCIA)) | ||
15 | #define LI_PCIB_VADDR (fix_to_virt(FIX_LI_PCIB)) | ||
16 | |||
17 | /* Not a standard PCI? (not in linux/pci.h) */ | ||
18 | #define LI_PCI_BUSNUM 0x44 /* lo8: primary, hi8: sub */ | ||
19 | #define LI_PCI_INTEN 0x46 | ||
20 | |||
21 | /* LI_PCI_INTENT bits */ | ||
22 | #define LI_INTA_0 0x0001 | ||
23 | #define LI_INTA_1 0x0002 | ||
24 | #define LI_INTA_2 0x0004 | ||
25 | #define LI_INTA_3 0x0008 | ||
26 | #define LI_INTA_4 0x0010 | ||
27 | #define LI_INTB 0x0020 | ||
28 | #define LI_INTC 0x0040 | ||
29 | #define LI_INTD 0x0080 | ||
30 | |||
31 | /* More special purpose macros... */ | ||
32 | extern __inline void li_pcia_write16(unsigned long reg, unsigned short v) | ||
33 | { | ||
34 | *((volatile unsigned short *)(LI_PCIA_VADDR+reg))=v; | ||
35 | } | ||
36 | |||
37 | extern __inline unsigned short li_pcia_read16(unsigned long reg) | ||
38 | { | ||
39 | return *((volatile unsigned short *)(LI_PCIA_VADDR+reg)); | ||
40 | } | ||
41 | |||
42 | extern __inline void li_pcib_write16(unsigned long reg, unsigned short v) | ||
43 | { | ||
44 | *((volatile unsigned short *)(LI_PCIB_VADDR+reg))=v; | ||
45 | } | ||
46 | |||
47 | extern __inline unsigned short li_pcib_read16(unsigned long reg) | ||
48 | { | ||
49 | return *((volatile unsigned short *)(LI_PCIB_VADDR+reg)); | ||
50 | } | ||
51 | |||
52 | #endif | ||
53 | |||
diff --git a/include/asm-i386/mach-visws/mach_apic.h b/include/asm-i386/mach-visws/mach_apic.h new file mode 100644 index 000000000000..4e6cdfb8b091 --- /dev/null +++ b/include/asm-i386/mach-visws/mach_apic.h | |||
@@ -0,0 +1,100 @@ | |||
1 | #ifndef __ASM_MACH_APIC_H | ||
2 | #define __ASM_MACH_APIC_H | ||
3 | |||
4 | #include <mach_apicdef.h> | ||
5 | #include <asm/smp.h> | ||
6 | |||
7 | #define APIC_DFR_VALUE (APIC_DFR_FLAT) | ||
8 | |||
9 | #define no_balance_irq (0) | ||
10 | #define esr_disable (0) | ||
11 | |||
12 | #define NO_IOAPIC_CHECK (0) | ||
13 | |||
14 | #define INT_DELIVERY_MODE dest_LowestPrio | ||
15 | #define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ | ||
16 | |||
17 | #ifdef CONFIG_SMP | ||
18 | #define TARGET_CPUS cpu_online_map | ||
19 | #else | ||
20 | #define TARGET_CPUS cpumask_of_cpu(0) | ||
21 | #endif | ||
22 | |||
23 | #define check_apicid_used(bitmap, apicid) physid_isset(apicid, bitmap) | ||
24 | #define check_apicid_present(bit) physid_isset(bit, phys_cpu_present_map) | ||
25 | |||
26 | static inline int apic_id_registered(void) | ||
27 | { | ||
28 | return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); | ||
29 | } | ||
30 | |||
31 | /* | ||
32 | * Set up the logical destination ID. | ||
33 | * | ||
34 | * Intel recommends to set DFR, LDR and TPR before enabling | ||
35 | * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel | ||
36 | * document number 292116). So here it goes... | ||
37 | */ | ||
38 | static inline void init_apic_ldr(void) | ||
39 | { | ||
40 | unsigned long val; | ||
41 | |||
42 | apic_write_around(APIC_DFR, APIC_DFR_VALUE); | ||
43 | val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; | ||
44 | val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); | ||
45 | apic_write_around(APIC_LDR, val); | ||
46 | } | ||
47 | |||
48 | static inline void summit_check(char *oem, char *productid) | ||
49 | { | ||
50 | } | ||
51 | |||
52 | static inline void clustered_apic_check(void) | ||
53 | { | ||
54 | } | ||
55 | |||
56 | /* Mapping from cpu number to logical apicid */ | ||
57 | static inline int cpu_to_logical_apicid(int cpu) | ||
58 | { | ||
59 | return 1 << cpu; | ||
60 | } | ||
61 | |||
62 | static inline int cpu_present_to_apicid(int mps_cpu) | ||
63 | { | ||
64 | if (mps_cpu < get_physical_broadcast()) | ||
65 | return mps_cpu; | ||
66 | else | ||
67 | return BAD_APICID; | ||
68 | } | ||
69 | |||
70 | static inline physid_mask_t apicid_to_cpu_present(int apicid) | ||
71 | { | ||
72 | return physid_mask_of_physid(apicid); | ||
73 | } | ||
74 | |||
75 | #define WAKE_SECONDARY_VIA_INIT | ||
76 | |||
77 | static inline void setup_portio_remap(void) | ||
78 | { | ||
79 | } | ||
80 | |||
81 | static inline void enable_apic_mode(void) | ||
82 | { | ||
83 | } | ||
84 | |||
85 | static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) | ||
86 | { | ||
87 | return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); | ||
88 | } | ||
89 | |||
90 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | ||
91 | { | ||
92 | return cpus_addr(cpumask)[0]; | ||
93 | } | ||
94 | |||
95 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | ||
96 | { | ||
97 | return cpuid_apic >> index_msb; | ||
98 | } | ||
99 | |||
100 | #endif /* __ASM_MACH_APIC_H */ | ||
diff --git a/include/asm-i386/mach-visws/mach_apicdef.h b/include/asm-i386/mach-visws/mach_apicdef.h new file mode 100644 index 000000000000..826cfa97d778 --- /dev/null +++ b/include/asm-i386/mach-visws/mach_apicdef.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef __ASM_MACH_APICDEF_H | ||
2 | #define __ASM_MACH_APICDEF_H | ||
3 | |||
4 | #define APIC_ID_MASK (0xF<<24) | ||
5 | |||
6 | static inline unsigned get_apic_id(unsigned long x) | ||
7 | { | ||
8 | return (((x)>>24)&0xF); | ||
9 | } | ||
10 | #define GET_APIC_ID(x) get_apic_id(x) | ||
11 | |||
12 | #endif | ||
diff --git a/include/asm-i386/mach-visws/piix4.h b/include/asm-i386/mach-visws/piix4.h new file mode 100644 index 000000000000..83ea4f46e419 --- /dev/null +++ b/include/asm-i386/mach-visws/piix4.h | |||
@@ -0,0 +1,107 @@ | |||
1 | #ifndef __I386_SGI_PIIX_H | ||
2 | #define __I386_SGI_PIIX_H | ||
3 | |||
4 | /* | ||
5 | * PIIX4 as used on SGI Visual Workstations | ||
6 | */ | ||
7 | |||
8 | #define PIIX_PM_START 0x0F80 | ||
9 | |||
10 | #define SIO_GPIO_START 0x0FC0 | ||
11 | |||
12 | #define SIO_PM_START 0x0FC8 | ||
13 | |||
14 | #define PMBASE PIIX_PM_START | ||
15 | #define GPIREG0 (PMBASE+0x30) | ||
16 | #define GPIREG(x) (GPIREG0+((x)/8)) | ||
17 | #define GPIBIT(x) (1 << ((x)%8)) | ||
18 | |||
19 | #define PIIX_GPI_BD_ID1 18 | ||
20 | #define PIIX_GPI_BD_ID2 19 | ||
21 | #define PIIX_GPI_BD_ID3 20 | ||
22 | #define PIIX_GPI_BD_ID4 21 | ||
23 | #define PIIX_GPI_BD_REG GPIREG(PIIX_GPI_BD_ID1) | ||
24 | #define PIIX_GPI_BD_MASK (GPIBIT(PIIX_GPI_BD_ID1) | \ | ||
25 | GPIBIT(PIIX_GPI_BD_ID2) | \ | ||
26 | GPIBIT(PIIX_GPI_BD_ID3) | \ | ||
27 | GPIBIT(PIIX_GPI_BD_ID4) ) | ||
28 | |||
29 | #define PIIX_GPI_BD_SHIFT (PIIX_GPI_BD_ID1 % 8) | ||
30 | |||
31 | #define SIO_INDEX 0x2e | ||
32 | #define SIO_DATA 0x2f | ||
33 | |||
34 | #define SIO_DEV_SEL 0x7 | ||
35 | #define SIO_DEV_ENB 0x30 | ||
36 | #define SIO_DEV_MSB 0x60 | ||
37 | #define SIO_DEV_LSB 0x61 | ||
38 | |||
39 | #define SIO_GP_DEV 0x7 | ||
40 | |||
41 | #define SIO_GP_BASE SIO_GPIO_START | ||
42 | #define SIO_GP_MSB (SIO_GP_BASE>>8) | ||
43 | #define SIO_GP_LSB (SIO_GP_BASE&0xff) | ||
44 | |||
45 | #define SIO_GP_DATA1 (SIO_GP_BASE+0) | ||
46 | |||
47 | #define SIO_PM_DEV 0x8 | ||
48 | |||
49 | #define SIO_PM_BASE SIO_PM_START | ||
50 | #define SIO_PM_MSB (SIO_PM_BASE>>8) | ||
51 | #define SIO_PM_LSB (SIO_PM_BASE&0xff) | ||
52 | #define SIO_PM_INDEX (SIO_PM_BASE+0) | ||
53 | #define SIO_PM_DATA (SIO_PM_BASE+1) | ||
54 | |||
55 | #define SIO_PM_FER2 0x1 | ||
56 | |||
57 | #define SIO_PM_GP_EN 0x80 | ||
58 | |||
59 | |||
60 | |||
61 | /* | ||
62 | * This is the dev/reg where generating a config cycle will | ||
63 | * result in a PCI special cycle. | ||
64 | */ | ||
65 | #define SPECIAL_DEV 0xff | ||
66 | #define SPECIAL_REG 0x00 | ||
67 | |||
68 | /* | ||
69 | * PIIX4 needs to see a special cycle with the following data | ||
70 | * to be convinced the processor has gone into the stop grant | ||
71 | * state. PIIX4 insists on seeing this before it will power | ||
72 | * down a system. | ||
73 | */ | ||
74 | #define PIIX_SPECIAL_STOP 0x00120002 | ||
75 | |||
76 | #define PIIX4_RESET_PORT 0xcf9 | ||
77 | #define PIIX4_RESET_VAL 0x6 | ||
78 | |||
79 | #define PMSTS_PORT 0xf80 // 2 bytes PM Status | ||
80 | #define PMEN_PORT 0xf82 // 2 bytes PM Enable | ||
81 | #define PMCNTRL_PORT 0xf84 // 2 bytes PM Control | ||
82 | |||
83 | #define PM_SUSPEND_ENABLE 0x2000 // start sequence to suspend state | ||
84 | |||
85 | /* | ||
86 | * PMSTS and PMEN I/O bit definitions. | ||
87 | * (Bits are the same in both registers) | ||
88 | */ | ||
89 | #define PM_STS_RSM (1<<15) // Resume Status | ||
90 | #define PM_STS_PWRBTNOR (1<<11) // Power Button Override | ||
91 | #define PM_STS_RTC (1<<10) // RTC status | ||
92 | #define PM_STS_PWRBTN (1<<8) // Power Button Pressed? | ||
93 | #define PM_STS_GBL (1<<5) // Global Status | ||
94 | #define PM_STS_BM (1<<4) // Bus Master Status | ||
95 | #define PM_STS_TMROF (1<<0) // Timer Overflow Status. | ||
96 | |||
97 | /* | ||
98 | * Stop clock GPI register | ||
99 | */ | ||
100 | #define PIIX_GPIREG0 (0xf80 + 0x30) | ||
101 | |||
102 | /* | ||
103 | * Stop clock GPI bit in GPIREG0 | ||
104 | */ | ||
105 | #define PIIX_GPI_STPCLK 0x4 // STPCLK signal routed back in | ||
106 | |||
107 | #endif | ||
diff --git a/include/asm-i386/mach-visws/setup_arch_post.h b/include/asm-i386/mach-visws/setup_arch_post.h new file mode 100644 index 000000000000..cdbd895a54b1 --- /dev/null +++ b/include/asm-i386/mach-visws/setup_arch_post.h | |||
@@ -0,0 +1,49 @@ | |||
1 | /* Hook for machine specific memory setup. | ||
2 | * | ||
3 | * This is included late in kernel/setup.c so that it can make use of all of | ||
4 | * the static functions. */ | ||
5 | |||
6 | #define MB (1024 * 1024) | ||
7 | |||
8 | unsigned long sgivwfb_mem_phys; | ||
9 | unsigned long sgivwfb_mem_size; | ||
10 | |||
11 | long long mem_size __initdata = 0; | ||
12 | |||
13 | static char * __init machine_specific_memory_setup(void) | ||
14 | { | ||
15 | long long gfx_mem_size = 8 * MB; | ||
16 | |||
17 | mem_size = ALT_MEM_K; | ||
18 | |||
19 | if (!mem_size) { | ||
20 | printk(KERN_WARNING "Bootloader didn't set memory size, upgrade it !\n"); | ||
21 | mem_size = 128 * MB; | ||
22 | } | ||
23 | |||
24 | /* | ||
25 | * this hardcodes the graphics memory to 8 MB | ||
26 | * it really should be sized dynamically (or at least | ||
27 | * set as a boot param) | ||
28 | */ | ||
29 | if (!sgivwfb_mem_size) { | ||
30 | printk(KERN_WARNING "Defaulting to 8 MB framebuffer size\n"); | ||
31 | sgivwfb_mem_size = 8 * MB; | ||
32 | } | ||
33 | |||
34 | /* | ||
35 | * Trim to nearest MB | ||
36 | */ | ||
37 | sgivwfb_mem_size &= ~((1 << 20) - 1); | ||
38 | sgivwfb_mem_phys = mem_size - gfx_mem_size; | ||
39 | |||
40 | add_memory_region(0, LOWMEMSIZE(), E820_RAM); | ||
41 | add_memory_region(HIGH_MEMORY, mem_size - sgivwfb_mem_size - HIGH_MEMORY, E820_RAM); | ||
42 | add_memory_region(sgivwfb_mem_phys, sgivwfb_mem_size, E820_RESERVED); | ||
43 | |||
44 | return "PROM"; | ||
45 | |||
46 | /* Remove gcc warnings */ | ||
47 | (void) sanitize_e820_map(NULL, NULL); | ||
48 | (void) copy_e820_map(NULL, 0); | ||
49 | } | ||
diff --git a/include/asm-i386/mach-visws/setup_arch_pre.h b/include/asm-i386/mach-visws/setup_arch_pre.h new file mode 100644 index 000000000000..b92d6d9a4d3c --- /dev/null +++ b/include/asm-i386/mach-visws/setup_arch_pre.h | |||
@@ -0,0 +1,5 @@ | |||
1 | /* Hook to call BIOS initialisation function */ | ||
2 | |||
3 | /* no action for visws */ | ||
4 | |||
5 | #define ARCH_SETUP | ||
diff --git a/include/asm-i386/mach-visws/smpboot_hooks.h b/include/asm-i386/mach-visws/smpboot_hooks.h new file mode 100644 index 000000000000..d926471fa359 --- /dev/null +++ b/include/asm-i386/mach-visws/smpboot_hooks.h | |||
@@ -0,0 +1,24 @@ | |||
1 | static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) | ||
2 | { | ||
3 | CMOS_WRITE(0xa, 0xf); | ||
4 | local_flush_tlb(); | ||
5 | Dprintk("1.\n"); | ||
6 | *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4; | ||
7 | Dprintk("2.\n"); | ||
8 | *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf; | ||
9 | Dprintk("3.\n"); | ||
10 | } | ||
11 | |||
12 | /* for visws do nothing for any of these */ | ||
13 | |||
14 | static inline void smpboot_clear_io_apic_irqs(void) | ||
15 | { | ||
16 | } | ||
17 | |||
18 | static inline void smpboot_restore_warm_reset_vector(void) | ||
19 | { | ||
20 | } | ||
21 | |||
22 | static inline void smpboot_setup_io_apic(void) | ||
23 | { | ||
24 | } | ||
diff --git a/include/asm-i386/mach-voyager/do_timer.h b/include/asm-i386/mach-voyager/do_timer.h new file mode 100644 index 000000000000..ae510e5d0d78 --- /dev/null +++ b/include/asm-i386/mach-voyager/do_timer.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* defines for inline arch setup functions */ | ||
2 | #include <asm/voyager.h> | ||
3 | |||
4 | static inline void do_timer_interrupt_hook(struct pt_regs *regs) | ||
5 | { | ||
6 | do_timer(regs); | ||
7 | #ifndef CONFIG_SMP | ||
8 | update_process_times(user_mode(regs)); | ||
9 | #endif | ||
10 | |||
11 | voyager_timer_interrupt(regs); | ||
12 | } | ||
13 | |||
14 | static inline int do_timer_overflow(int count) | ||
15 | { | ||
16 | /* can't read the ISR, just assume 1 tick | ||
17 | overflow */ | ||
18 | if(count > LATCH || count < 0) { | ||
19 | printk(KERN_ERR "VOYAGER PROBLEM: count is %d, latch is %d\n", count, LATCH); | ||
20 | count = LATCH; | ||
21 | } | ||
22 | count -= LATCH; | ||
23 | |||
24 | return count; | ||
25 | } | ||
diff --git a/include/asm-i386/mach-voyager/entry_arch.h b/include/asm-i386/mach-voyager/entry_arch.h new file mode 100644 index 000000000000..4a1e1e8c10b6 --- /dev/null +++ b/include/asm-i386/mach-voyager/entry_arch.h | |||
@@ -0,0 +1,26 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8 -*- */ | ||
2 | |||
3 | /* Copyright (C) 2002 | ||
4 | * | ||
5 | * Author: James.Bottomley@HansenPartnership.com | ||
6 | * | ||
7 | * linux/arch/i386/voyager/entry_arch.h | ||
8 | * | ||
9 | * This file builds the VIC and QIC CPI gates | ||
10 | */ | ||
11 | |||
12 | /* initialise the voyager interrupt gates | ||
13 | * | ||
14 | * This uses the macros in irq.h to set up assembly jump gates. The | ||
15 | * calls are then redirected to the same routine with smp_ prefixed */ | ||
16 | BUILD_INTERRUPT(vic_sys_interrupt, VIC_SYS_INT) | ||
17 | BUILD_INTERRUPT(vic_cmn_interrupt, VIC_CMN_INT) | ||
18 | BUILD_INTERRUPT(vic_cpi_interrupt, VIC_CPI_LEVEL0); | ||
19 | |||
20 | /* do all the QIC interrupts */ | ||
21 | BUILD_INTERRUPT(qic_timer_interrupt, QIC_TIMER_CPI); | ||
22 | BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI); | ||
23 | BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI); | ||
24 | BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI); | ||
25 | BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI); | ||
26 | |||
diff --git a/include/asm-i386/mach-voyager/irq_vectors.h b/include/asm-i386/mach-voyager/irq_vectors.h new file mode 100644 index 000000000000..165421f5821c --- /dev/null +++ b/include/asm-i386/mach-voyager/irq_vectors.h | |||
@@ -0,0 +1,79 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8 -*- */ | ||
2 | |||
3 | /* Copyright (C) 2002 | ||
4 | * | ||
5 | * Author: James.Bottomley@HansenPartnership.com | ||
6 | * | ||
7 | * linux/arch/i386/voyager/irq_vectors.h | ||
8 | * | ||
9 | * This file provides definitions for the VIC and QIC CPIs | ||
10 | */ | ||
11 | |||
12 | #ifndef _ASM_IRQ_VECTORS_H | ||
13 | #define _ASM_IRQ_VECTORS_H | ||
14 | |||
15 | /* | ||
16 | * IDT vectors usable for external interrupt sources start | ||
17 | * at 0x20: | ||
18 | */ | ||
19 | #define FIRST_EXTERNAL_VECTOR 0x20 | ||
20 | |||
21 | #define SYSCALL_VECTOR 0x80 | ||
22 | |||
23 | /* | ||
24 | * Vectors 0x20-0x2f are used for ISA interrupts. | ||
25 | */ | ||
26 | |||
27 | /* These define the CPIs we use in linux */ | ||
28 | #define VIC_CPI_LEVEL0 0 | ||
29 | #define VIC_CPI_LEVEL1 1 | ||
30 | /* now the fake CPIs */ | ||
31 | #define VIC_TIMER_CPI 2 | ||
32 | #define VIC_INVALIDATE_CPI 3 | ||
33 | #define VIC_RESCHEDULE_CPI 4 | ||
34 | #define VIC_ENABLE_IRQ_CPI 5 | ||
35 | #define VIC_CALL_FUNCTION_CPI 6 | ||
36 | |||
37 | /* Now the QIC CPIs: Since we don't need the two initial levels, | ||
38 | * these are 2 less than the VIC CPIs */ | ||
39 | #define QIC_CPI_OFFSET 1 | ||
40 | #define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET) | ||
41 | #define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET) | ||
42 | #define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET) | ||
43 | #define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET) | ||
44 | #define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET) | ||
45 | |||
46 | #define VIC_START_FAKE_CPI VIC_TIMER_CPI | ||
47 | #define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_CPI | ||
48 | |||
49 | /* this is the SYS_INT CPI. */ | ||
50 | #define VIC_SYS_INT 8 | ||
51 | #define VIC_CMN_INT 15 | ||
52 | |||
53 | /* This is the boot CPI for alternate processors. It gets overwritten | ||
54 | * by the above once the system has activated all available processors */ | ||
55 | #define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0 | ||
56 | #define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8) | ||
57 | |||
58 | #define NR_VECTORS 256 | ||
59 | #define NR_IRQS 224 | ||
60 | #define NR_IRQ_VECTORS NR_IRQS | ||
61 | |||
62 | #define FPU_IRQ 13 | ||
63 | |||
64 | #define FIRST_VM86_IRQ 3 | ||
65 | #define LAST_VM86_IRQ 15 | ||
66 | #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) | ||
67 | |||
68 | #ifndef __ASSEMBLY__ | ||
69 | extern asmlinkage void vic_cpi_interrupt(void); | ||
70 | extern asmlinkage void vic_sys_interrupt(void); | ||
71 | extern asmlinkage void vic_cmn_interrupt(void); | ||
72 | extern asmlinkage void qic_timer_interrupt(void); | ||
73 | extern asmlinkage void qic_invalidate_interrupt(void); | ||
74 | extern asmlinkage void qic_reschedule_interrupt(void); | ||
75 | extern asmlinkage void qic_enable_irq_interrupt(void); | ||
76 | extern asmlinkage void qic_call_function_interrupt(void); | ||
77 | #endif /* !__ASSEMBLY__ */ | ||
78 | |||
79 | #endif /* _ASM_IRQ_VECTORS_H */ | ||
diff --git a/include/asm-i386/mach-voyager/setup_arch_post.h b/include/asm-i386/mach-voyager/setup_arch_post.h new file mode 100644 index 000000000000..f6f6c2cbc75c --- /dev/null +++ b/include/asm-i386/mach-voyager/setup_arch_post.h | |||
@@ -0,0 +1,73 @@ | |||
1 | /* Hook for machine specific memory setup. | ||
2 | * | ||
3 | * This is included late in kernel/setup.c so that it can make use of all of | ||
4 | * the static functions. */ | ||
5 | |||
6 | static char * __init machine_specific_memory_setup(void) | ||
7 | { | ||
8 | char *who; | ||
9 | |||
10 | who = "NOT VOYAGER"; | ||
11 | |||
12 | if(voyager_level == 5) { | ||
13 | __u32 addr, length; | ||
14 | int i; | ||
15 | |||
16 | who = "Voyager-SUS"; | ||
17 | |||
18 | e820.nr_map = 0; | ||
19 | for(i=0; voyager_memory_detect(i, &addr, &length); i++) { | ||
20 | add_memory_region(addr, length, E820_RAM); | ||
21 | } | ||
22 | return who; | ||
23 | } else if(voyager_level == 4) { | ||
24 | __u32 tom; | ||
25 | __u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT)<<8; | ||
26 | /* select the DINO config space */ | ||
27 | outb(VOYAGER_DINO, VOYAGER_CAT_CONFIG_PORT); | ||
28 | /* Read DINO top of memory register */ | ||
29 | tom = ((inb(catbase + 0x4) & 0xf0) << 16) | ||
30 | + ((inb(catbase + 0x5) & 0x7f) << 24); | ||
31 | |||
32 | if(inb(catbase) != VOYAGER_DINO) { | ||
33 | printk(KERN_ERR "Voyager: Failed to get DINO for L4, setting tom to EXT_MEM_K\n"); | ||
34 | tom = (EXT_MEM_K)<<10; | ||
35 | } | ||
36 | who = "Voyager-TOM"; | ||
37 | add_memory_region(0, 0x9f000, E820_RAM); | ||
38 | /* map from 1M to top of memory */ | ||
39 | add_memory_region(1*1024*1024, tom - 1*1024*1024, E820_RAM); | ||
40 | /* FIXME: Should check the ASICs to see if I need to | ||
41 | * take out the 8M window. Just do it at the moment | ||
42 | * */ | ||
43 | add_memory_region(8*1024*1024, 8*1024*1024, E820_RESERVED); | ||
44 | return who; | ||
45 | } | ||
46 | |||
47 | who = "BIOS-e820"; | ||
48 | |||
49 | /* | ||
50 | * Try to copy the BIOS-supplied E820-map. | ||
51 | * | ||
52 | * Otherwise fake a memory map; one section from 0k->640k, | ||
53 | * the next section from 1mb->appropriate_mem_k | ||
54 | */ | ||
55 | sanitize_e820_map(E820_MAP, &E820_MAP_NR); | ||
56 | if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) { | ||
57 | unsigned long mem_size; | ||
58 | |||
59 | /* compare results from other methods and take the greater */ | ||
60 | if (ALT_MEM_K < EXT_MEM_K) { | ||
61 | mem_size = EXT_MEM_K; | ||
62 | who = "BIOS-88"; | ||
63 | } else { | ||
64 | mem_size = ALT_MEM_K; | ||
65 | who = "BIOS-e801"; | ||
66 | } | ||
67 | |||
68 | e820.nr_map = 0; | ||
69 | add_memory_region(0, LOWMEMSIZE(), E820_RAM); | ||
70 | add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM); | ||
71 | } | ||
72 | return who; | ||
73 | } | ||
diff --git a/include/asm-i386/mach-voyager/setup_arch_pre.h b/include/asm-i386/mach-voyager/setup_arch_pre.h new file mode 100644 index 000000000000..48f7e6ff49a5 --- /dev/null +++ b/include/asm-i386/mach-voyager/setup_arch_pre.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #include <asm/voyager.h> | ||
2 | #define VOYAGER_BIOS_INFO ((struct voyager_bios_info *)(PARAM+0x40)) | ||
3 | |||
4 | /* Hook to call BIOS initialisation function */ | ||
5 | |||
6 | /* for voyager, pass the voyager BIOS/SUS info area to the detection | ||
7 | * routines */ | ||
8 | |||
9 | #define ARCH_SETUP voyager_detect(VOYAGER_BIOS_INFO); | ||
10 | |||
diff --git a/include/asm-i386/math_emu.h b/include/asm-i386/math_emu.h new file mode 100644 index 000000000000..697673b555ce --- /dev/null +++ b/include/asm-i386/math_emu.h | |||
@@ -0,0 +1,35 @@ | |||
1 | #ifndef _I386_MATH_EMU_H | ||
2 | #define _I386_MATH_EMU_H | ||
3 | |||
4 | #include <asm/sigcontext.h> | ||
5 | |||
6 | int restore_i387_soft(void *s387, struct _fpstate __user *buf); | ||
7 | int save_i387_soft(void *s387, struct _fpstate __user *buf); | ||
8 | |||
9 | /* This structure matches the layout of the data saved to the stack | ||
10 | following a device-not-present interrupt, part of it saved | ||
11 | automatically by the 80386/80486. | ||
12 | */ | ||
13 | struct info { | ||
14 | long ___orig_eip; | ||
15 | long ___ebx; | ||
16 | long ___ecx; | ||
17 | long ___edx; | ||
18 | long ___esi; | ||
19 | long ___edi; | ||
20 | long ___ebp; | ||
21 | long ___eax; | ||
22 | long ___ds; | ||
23 | long ___es; | ||
24 | long ___orig_eax; | ||
25 | long ___eip; | ||
26 | long ___cs; | ||
27 | long ___eflags; | ||
28 | long ___esp; | ||
29 | long ___ss; | ||
30 | long ___vm86_es; /* This and the following only in vm86 mode */ | ||
31 | long ___vm86_ds; | ||
32 | long ___vm86_fs; | ||
33 | long ___vm86_gs; | ||
34 | }; | ||
35 | #endif | ||
diff --git a/include/asm-i386/mc146818rtc.h b/include/asm-i386/mc146818rtc.h new file mode 100644 index 000000000000..99a890047023 --- /dev/null +++ b/include/asm-i386/mc146818rtc.h | |||
@@ -0,0 +1,94 @@ | |||
1 | /* | ||
2 | * Machine dependent access functions for RTC registers. | ||
3 | */ | ||
4 | #ifndef _ASM_MC146818RTC_H | ||
5 | #define _ASM_MC146818RTC_H | ||
6 | |||
7 | #include <asm/io.h> | ||
8 | #include <asm/system.h> | ||
9 | #include <linux/mc146818rtc.h> | ||
10 | |||
11 | #ifndef RTC_PORT | ||
12 | #define RTC_PORT(x) (0x70 + (x)) | ||
13 | #define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ | ||
14 | #endif | ||
15 | |||
16 | #ifdef __HAVE_ARCH_CMPXCHG | ||
17 | /* | ||
18 | * This lock provides nmi access to the CMOS/RTC registers. It has some | ||
19 | * special properties. It is owned by a CPU and stores the index register | ||
20 | * currently being accessed (if owned). The idea here is that it works | ||
21 | * like a normal lock (normally). However, in an NMI, the NMI code will | ||
22 | * first check to see if its CPU owns the lock, meaning that the NMI | ||
23 | * interrupted during the read/write of the device. If it does, it goes ahead | ||
24 | * and performs the access and then restores the index register. If it does | ||
25 | * not, it locks normally. | ||
26 | * | ||
27 | * Note that since we are working with NMIs, we need this lock even in | ||
28 | * a non-SMP machine just to mark that the lock is owned. | ||
29 | * | ||
30 | * This only works with compare-and-swap. There is no other way to | ||
31 | * atomically claim the lock and set the owner. | ||
32 | */ | ||
33 | #include <linux/smp.h> | ||
34 | extern volatile unsigned long cmos_lock; | ||
35 | |||
36 | /* | ||
37 | * All of these below must be called with interrupts off, preempt | ||
38 | * disabled, etc. | ||
39 | */ | ||
40 | |||
41 | static inline void lock_cmos(unsigned char reg) | ||
42 | { | ||
43 | unsigned long new; | ||
44 | new = ((smp_processor_id()+1) << 8) | reg; | ||
45 | for (;;) { | ||
46 | if (cmos_lock) | ||
47 | continue; | ||
48 | if (__cmpxchg(&cmos_lock, 0, new, sizeof(cmos_lock)) == 0) | ||
49 | return; | ||
50 | } | ||
51 | } | ||
52 | |||
53 | static inline void unlock_cmos(void) | ||
54 | { | ||
55 | cmos_lock = 0; | ||
56 | } | ||
57 | static inline int do_i_have_lock_cmos(void) | ||
58 | { | ||
59 | return (cmos_lock >> 8) == (smp_processor_id()+1); | ||
60 | } | ||
61 | static inline unsigned char current_lock_cmos_reg(void) | ||
62 | { | ||
63 | return cmos_lock & 0xff; | ||
64 | } | ||
65 | #define lock_cmos_prefix(reg) \ | ||
66 | do { \ | ||
67 | unsigned long cmos_flags; \ | ||
68 | local_irq_save(cmos_flags); \ | ||
69 | lock_cmos(reg) | ||
70 | #define lock_cmos_suffix(reg) \ | ||
71 | unlock_cmos(); \ | ||
72 | local_irq_restore(cmos_flags); \ | ||
73 | } while (0) | ||
74 | #else | ||
75 | #define lock_cmos_prefix(reg) do {} while (0) | ||
76 | #define lock_cmos_suffix(reg) do {} while (0) | ||
77 | #define lock_cmos(reg) | ||
78 | #define unlock_cmos() | ||
79 | #define do_i_have_lock_cmos() 0 | ||
80 | #define current_lock_cmos_reg() 0 | ||
81 | #endif | ||
82 | |||
83 | /* | ||
84 | * The yet supported machines all access the RTC index register via | ||
85 | * an ISA port access but the way to access the date register differs ... | ||
86 | */ | ||
87 | #define CMOS_READ(addr) rtc_cmos_read(addr) | ||
88 | #define CMOS_WRITE(val, addr) rtc_cmos_write(val, addr) | ||
89 | unsigned char rtc_cmos_read(unsigned char addr); | ||
90 | void rtc_cmos_write(unsigned char val, unsigned char addr); | ||
91 | |||
92 | #define RTC_IRQ 8 | ||
93 | |||
94 | #endif /* _ASM_MC146818RTC_H */ | ||
diff --git a/include/asm-i386/mca.h b/include/asm-i386/mca.h new file mode 100644 index 000000000000..09adf2eac4dc --- /dev/null +++ b/include/asm-i386/mca.h | |||
@@ -0,0 +1,43 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8 -*- */ | ||
2 | |||
3 | /* Platform specific MCA defines */ | ||
4 | #ifndef _ASM_MCA_H | ||
5 | #define _ASM_MCA_H | ||
6 | |||
7 | /* Maximal number of MCA slots - actually, some machines have less, but | ||
8 | * they all have sufficient number of POS registers to cover 8. | ||
9 | */ | ||
10 | #define MCA_MAX_SLOT_NR 8 | ||
11 | |||
12 | /* Most machines have only one MCA bus. The only multiple bus machines | ||
13 | * I know have at most two */ | ||
14 | #define MAX_MCA_BUSSES 2 | ||
15 | |||
16 | #define MCA_PRIMARY_BUS 0 | ||
17 | #define MCA_SECONDARY_BUS 1 | ||
18 | |||
19 | /* Dummy slot numbers on primary MCA for integrated functions */ | ||
20 | #define MCA_INTEGSCSI (MCA_MAX_SLOT_NR) | ||
21 | #define MCA_INTEGVIDEO (MCA_MAX_SLOT_NR+1) | ||
22 | #define MCA_MOTHERBOARD (MCA_MAX_SLOT_NR+2) | ||
23 | |||
24 | /* Dummy POS values for integrated functions */ | ||
25 | #define MCA_DUMMY_POS_START 0x10000 | ||
26 | #define MCA_INTEGSCSI_POS (MCA_DUMMY_POS_START+1) | ||
27 | #define MCA_INTEGVIDEO_POS (MCA_DUMMY_POS_START+2) | ||
28 | #define MCA_MOTHERBOARD_POS (MCA_DUMMY_POS_START+3) | ||
29 | |||
30 | /* MCA registers */ | ||
31 | |||
32 | #define MCA_MOTHERBOARD_SETUP_REG 0x94 | ||
33 | #define MCA_ADAPTER_SETUP_REG 0x96 | ||
34 | #define MCA_POS_REG(n) (0x100+(n)) | ||
35 | |||
36 | #define MCA_ENABLED 0x01 /* POS 2, set if adapter enabled */ | ||
37 | |||
38 | /* Max number of adapters, including both slots and various integrated | ||
39 | * things. | ||
40 | */ | ||
41 | #define MCA_NUMADAPTERS (MCA_MAX_SLOT_NR+3) | ||
42 | |||
43 | #endif | ||
diff --git a/include/asm-i386/mca_dma.h b/include/asm-i386/mca_dma.h new file mode 100644 index 000000000000..4b3b526c5a3f --- /dev/null +++ b/include/asm-i386/mca_dma.h | |||
@@ -0,0 +1,202 @@ | |||
1 | #ifndef MCA_DMA_H | ||
2 | #define MCA_DMA_H | ||
3 | |||
4 | #include <asm/io.h> | ||
5 | #include <linux/ioport.h> | ||
6 | |||
7 | /* | ||
8 | * Microchannel specific DMA stuff. DMA on an MCA machine is fairly similar to | ||
9 | * standard PC dma, but it certainly has its quirks. DMA register addresses | ||
10 | * are in a different place and there are some added functions. Most of this | ||
11 | * should be pretty obvious on inspection. Note that the user must divide | ||
12 | * count by 2 when using 16-bit dma; that is not handled by these functions. | ||
13 | * | ||
14 | * Ramen Noodles are yummy. | ||
15 | * | ||
16 | * 1998 Tymm Twillman <tymm@computer.org> | ||
17 | */ | ||
18 | |||
19 | /* | ||
20 | * Registers that are used by the DMA controller; FN is the function register | ||
21 | * (tell the controller what to do) and EXE is the execution register (how | ||
22 | * to do it) | ||
23 | */ | ||
24 | |||
25 | #define MCA_DMA_REG_FN 0x18 | ||
26 | #define MCA_DMA_REG_EXE 0x1A | ||
27 | |||
28 | /* | ||
29 | * Functions that the DMA controller can do | ||
30 | */ | ||
31 | |||
32 | #define MCA_DMA_FN_SET_IO 0x00 | ||
33 | #define MCA_DMA_FN_SET_ADDR 0x20 | ||
34 | #define MCA_DMA_FN_GET_ADDR 0x30 | ||
35 | #define MCA_DMA_FN_SET_COUNT 0x40 | ||
36 | #define MCA_DMA_FN_GET_COUNT 0x50 | ||
37 | #define MCA_DMA_FN_GET_STATUS 0x60 | ||
38 | #define MCA_DMA_FN_SET_MODE 0x70 | ||
39 | #define MCA_DMA_FN_SET_ARBUS 0x80 | ||
40 | #define MCA_DMA_FN_MASK 0x90 | ||
41 | #define MCA_DMA_FN_RESET_MASK 0xA0 | ||
42 | #define MCA_DMA_FN_MASTER_CLEAR 0xD0 | ||
43 | |||
44 | /* | ||
45 | * Modes (used by setting MCA_DMA_FN_MODE in the function register) | ||
46 | * | ||
47 | * Note that the MODE_READ is read from memory (write to device), and | ||
48 | * MODE_WRITE is vice-versa. | ||
49 | */ | ||
50 | |||
51 | #define MCA_DMA_MODE_XFER 0x04 /* read by default */ | ||
52 | #define MCA_DMA_MODE_READ 0x04 /* same as XFER */ | ||
53 | #define MCA_DMA_MODE_WRITE 0x08 /* OR with MODE_XFER to use */ | ||
54 | #define MCA_DMA_MODE_IO 0x01 /* DMA from IO register */ | ||
55 | #define MCA_DMA_MODE_16 0x40 /* 16 bit xfers */ | ||
56 | |||
57 | |||
58 | /** | ||
59 | * mca_enable_dma - channel to enable DMA on | ||
60 | * @dmanr: DMA channel | ||
61 | * | ||
62 | * Enable the MCA bus DMA on a channel. This can be called from | ||
63 | * IRQ context. | ||
64 | */ | ||
65 | |||
66 | static __inline__ void mca_enable_dma(unsigned int dmanr) | ||
67 | { | ||
68 | outb(MCA_DMA_FN_RESET_MASK | dmanr, MCA_DMA_REG_FN); | ||
69 | } | ||
70 | |||
71 | /** | ||
72 | * mca_disble_dma - channel to disable DMA on | ||
73 | * @dmanr: DMA channel | ||
74 | * | ||
75 | * Enable the MCA bus DMA on a channel. This can be called from | ||
76 | * IRQ context. | ||
77 | */ | ||
78 | |||
79 | static __inline__ void mca_disable_dma(unsigned int dmanr) | ||
80 | { | ||
81 | outb(MCA_DMA_FN_MASK | dmanr, MCA_DMA_REG_FN); | ||
82 | } | ||
83 | |||
84 | /** | ||
85 | * mca_set_dma_addr - load a 24bit DMA address | ||
86 | * @dmanr: DMA channel | ||
87 | * @a: 24bit bus address | ||
88 | * | ||
89 | * Load the address register in the DMA controller. This has a 24bit | ||
90 | * limitation (16Mb). | ||
91 | */ | ||
92 | |||
93 | static __inline__ void mca_set_dma_addr(unsigned int dmanr, unsigned int a) | ||
94 | { | ||
95 | outb(MCA_DMA_FN_SET_ADDR | dmanr, MCA_DMA_REG_FN); | ||
96 | outb(a & 0xff, MCA_DMA_REG_EXE); | ||
97 | outb((a >> 8) & 0xff, MCA_DMA_REG_EXE); | ||
98 | outb((a >> 16) & 0xff, MCA_DMA_REG_EXE); | ||
99 | } | ||
100 | |||
101 | /** | ||
102 | * mca_get_dma_addr - load a 24bit DMA address | ||
103 | * @dmanr: DMA channel | ||
104 | * | ||
105 | * Read the address register in the DMA controller. This has a 24bit | ||
106 | * limitation (16Mb). The return is a bus address. | ||
107 | */ | ||
108 | |||
109 | static __inline__ unsigned int mca_get_dma_addr(unsigned int dmanr) | ||
110 | { | ||
111 | unsigned int addr; | ||
112 | |||
113 | outb(MCA_DMA_FN_GET_ADDR | dmanr, MCA_DMA_REG_FN); | ||
114 | addr = inb(MCA_DMA_REG_EXE); | ||
115 | addr |= inb(MCA_DMA_REG_EXE) << 8; | ||
116 | addr |= inb(MCA_DMA_REG_EXE) << 16; | ||
117 | |||
118 | return addr; | ||
119 | } | ||
120 | |||
121 | /** | ||
122 | * mca_set_dma_count - load a 16bit transfer count | ||
123 | * @dmanr: DMA channel | ||
124 | * @count: count | ||
125 | * | ||
126 | * Set the DMA count for this channel. This can be up to 64Kbytes. | ||
127 | * Setting a count of zero will not do what you expect. | ||
128 | */ | ||
129 | |||
130 | static __inline__ void mca_set_dma_count(unsigned int dmanr, unsigned int count) | ||
131 | { | ||
132 | count--; /* transfers one more than count -- correct for this */ | ||
133 | |||
134 | outb(MCA_DMA_FN_SET_COUNT | dmanr, MCA_DMA_REG_FN); | ||
135 | outb(count & 0xff, MCA_DMA_REG_EXE); | ||
136 | outb((count >> 8) & 0xff, MCA_DMA_REG_EXE); | ||
137 | } | ||
138 | |||
139 | /** | ||
140 | * mca_get_dma_residue - get the remaining bytes to transfer | ||
141 | * @dmanr: DMA channel | ||
142 | * | ||
143 | * This function returns the number of bytes left to transfer | ||
144 | * on this DMA channel. | ||
145 | */ | ||
146 | |||
147 | static __inline__ unsigned int mca_get_dma_residue(unsigned int dmanr) | ||
148 | { | ||
149 | unsigned short count; | ||
150 | |||
151 | outb(MCA_DMA_FN_GET_COUNT | dmanr, MCA_DMA_REG_FN); | ||
152 | count = 1 + inb(MCA_DMA_REG_EXE); | ||
153 | count += inb(MCA_DMA_REG_EXE) << 8; | ||
154 | |||
155 | return count; | ||
156 | } | ||
157 | |||
158 | /** | ||
159 | * mca_set_dma_io - set the port for an I/O transfer | ||
160 | * @dmanr: DMA channel | ||
161 | * @io_addr: an I/O port number | ||
162 | * | ||
163 | * Unlike the ISA bus DMA controllers the DMA on MCA bus can transfer | ||
164 | * with an I/O port target. | ||
165 | */ | ||
166 | |||
167 | static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr) | ||
168 | { | ||
169 | /* | ||
170 | * DMA from a port address -- set the io address | ||
171 | */ | ||
172 | |||
173 | outb(MCA_DMA_FN_SET_IO | dmanr, MCA_DMA_REG_FN); | ||
174 | outb(io_addr & 0xff, MCA_DMA_REG_EXE); | ||
175 | outb((io_addr >> 8) & 0xff, MCA_DMA_REG_EXE); | ||
176 | } | ||
177 | |||
178 | /** | ||
179 | * mca_set_dma_mode - set the DMA mode | ||
180 | * @dmanr: DMA channel | ||
181 | * @mode: mode to set | ||
182 | * | ||
183 | * The DMA controller supports several modes. The mode values you can | ||
184 | * set are : | ||
185 | * | ||
186 | * %MCA_DMA_MODE_READ when reading from the DMA device. | ||
187 | * | ||
188 | * %MCA_DMA_MODE_WRITE to writing to the DMA device. | ||
189 | * | ||
190 | * %MCA_DMA_MODE_IO to do DMA to or from an I/O port. | ||
191 | * | ||
192 | * %MCA_DMA_MODE_16 to do 16bit transfers. | ||
193 | * | ||
194 | */ | ||
195 | |||
196 | static __inline__ void mca_set_dma_mode(unsigned int dmanr, unsigned int mode) | ||
197 | { | ||
198 | outb(MCA_DMA_FN_SET_MODE | dmanr, MCA_DMA_REG_FN); | ||
199 | outb(mode, MCA_DMA_REG_EXE); | ||
200 | } | ||
201 | |||
202 | #endif /* MCA_DMA_H */ | ||
diff --git a/include/asm-i386/mman.h b/include/asm-i386/mman.h new file mode 100644 index 000000000000..196619a83854 --- /dev/null +++ b/include/asm-i386/mman.h | |||
@@ -0,0 +1,43 @@ | |||
1 | #ifndef __I386_MMAN_H__ | ||
2 | #define __I386_MMAN_H__ | ||
3 | |||
4 | #define PROT_READ 0x1 /* page can be read */ | ||
5 | #define PROT_WRITE 0x2 /* page can be written */ | ||
6 | #define PROT_EXEC 0x4 /* page can be executed */ | ||
7 | #define PROT_SEM 0x8 /* page may be used for atomic ops */ | ||
8 | #define PROT_NONE 0x0 /* page can not be accessed */ | ||
9 | #define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ | ||
10 | #define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ | ||
11 | |||
12 | #define MAP_SHARED 0x01 /* Share changes */ | ||
13 | #define MAP_PRIVATE 0x02 /* Changes are private */ | ||
14 | #define MAP_TYPE 0x0f /* Mask for type of mapping */ | ||
15 | #define MAP_FIXED 0x10 /* Interpret addr exactly */ | ||
16 | #define MAP_ANONYMOUS 0x20 /* don't use a file */ | ||
17 | |||
18 | #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ | ||
19 | #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ | ||
20 | #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ | ||
21 | #define MAP_LOCKED 0x2000 /* pages are locked */ | ||
22 | #define MAP_NORESERVE 0x4000 /* don't check for reservations */ | ||
23 | #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ | ||
24 | #define MAP_NONBLOCK 0x10000 /* do not block on IO */ | ||
25 | |||
26 | #define MS_ASYNC 1 /* sync memory asynchronously */ | ||
27 | #define MS_INVALIDATE 2 /* invalidate the caches */ | ||
28 | #define MS_SYNC 4 /* synchronous memory sync */ | ||
29 | |||
30 | #define MCL_CURRENT 1 /* lock all current mappings */ | ||
31 | #define MCL_FUTURE 2 /* lock all future mappings */ | ||
32 | |||
33 | #define MADV_NORMAL 0x0 /* default page-in behavior */ | ||
34 | #define MADV_RANDOM 0x1 /* page-in minimum required */ | ||
35 | #define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ | ||
36 | #define MADV_WILLNEED 0x3 /* pre-fault pages */ | ||
37 | #define MADV_DONTNEED 0x4 /* discard these pages */ | ||
38 | |||
39 | /* compatibility flags */ | ||
40 | #define MAP_ANON MAP_ANONYMOUS | ||
41 | #define MAP_FILE 0 | ||
42 | |||
43 | #endif /* __I386_MMAN_H__ */ | ||
diff --git a/include/asm-i386/mmu.h b/include/asm-i386/mmu.h new file mode 100644 index 000000000000..f431a0b86d4c --- /dev/null +++ b/include/asm-i386/mmu.h | |||
@@ -0,0 +1,17 @@ | |||
1 | #ifndef __i386_MMU_H | ||
2 | #define __i386_MMU_H | ||
3 | |||
4 | #include <asm/semaphore.h> | ||
5 | /* | ||
6 | * The i386 doesn't have a mmu context, but | ||
7 | * we put the segment information here. | ||
8 | * | ||
9 | * cpu_vm_mask is used to optimize ldt flushing. | ||
10 | */ | ||
11 | typedef struct { | ||
12 | int size; | ||
13 | struct semaphore sem; | ||
14 | void *ldt; | ||
15 | } mm_context_t; | ||
16 | |||
17 | #endif | ||
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h new file mode 100644 index 000000000000..bf08218357ea --- /dev/null +++ b/include/asm-i386/mmu_context.h | |||
@@ -0,0 +1,72 @@ | |||
1 | #ifndef __I386_SCHED_H | ||
2 | #define __I386_SCHED_H | ||
3 | |||
4 | #include <linux/config.h> | ||
5 | #include <asm/desc.h> | ||
6 | #include <asm/atomic.h> | ||
7 | #include <asm/pgalloc.h> | ||
8 | #include <asm/tlbflush.h> | ||
9 | |||
10 | /* | ||
11 | * Used for LDT copy/destruction. | ||
12 | */ | ||
13 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); | ||
14 | void destroy_context(struct mm_struct *mm); | ||
15 | |||
16 | |||
17 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | ||
18 | { | ||
19 | #ifdef CONFIG_SMP | ||
20 | unsigned cpu = smp_processor_id(); | ||
21 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) | ||
22 | per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY; | ||
23 | #endif | ||
24 | } | ||
25 | |||
26 | static inline void switch_mm(struct mm_struct *prev, | ||
27 | struct mm_struct *next, | ||
28 | struct task_struct *tsk) | ||
29 | { | ||
30 | int cpu = smp_processor_id(); | ||
31 | |||
32 | if (likely(prev != next)) { | ||
33 | /* stop flush ipis for the previous mm */ | ||
34 | cpu_clear(cpu, prev->cpu_vm_mask); | ||
35 | #ifdef CONFIG_SMP | ||
36 | per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; | ||
37 | per_cpu(cpu_tlbstate, cpu).active_mm = next; | ||
38 | #endif | ||
39 | cpu_set(cpu, next->cpu_vm_mask); | ||
40 | |||
41 | /* Re-load page tables */ | ||
42 | load_cr3(next->pgd); | ||
43 | |||
44 | /* | ||
45 | * load the LDT, if the LDT is different: | ||
46 | */ | ||
47 | if (unlikely(prev->context.ldt != next->context.ldt)) | ||
48 | load_LDT_nolock(&next->context, cpu); | ||
49 | } | ||
50 | #ifdef CONFIG_SMP | ||
51 | else { | ||
52 | per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; | ||
53 | BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); | ||
54 | |||
55 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { | ||
56 | /* We were in lazy tlb mode and leave_mm disabled | ||
57 | * tlb flush IPI delivery. We must reload %cr3. | ||
58 | */ | ||
59 | load_cr3(next->pgd); | ||
60 | load_LDT_nolock(&next->context, cpu); | ||
61 | } | ||
62 | } | ||
63 | #endif | ||
64 | } | ||
65 | |||
66 | #define deactivate_mm(tsk, mm) \ | ||
67 | asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0)) | ||
68 | |||
69 | #define activate_mm(prev, next) \ | ||
70 | switch_mm((prev),(next),NULL) | ||
71 | |||
72 | #endif | ||
diff --git a/include/asm-i386/mmx.h b/include/asm-i386/mmx.h new file mode 100644 index 000000000000..46b71da99869 --- /dev/null +++ b/include/asm-i386/mmx.h | |||
@@ -0,0 +1,14 @@ | |||
1 | #ifndef _ASM_MMX_H | ||
2 | #define _ASM_MMX_H | ||
3 | |||
4 | /* | ||
5 | * MMX 3Dnow! helper operations | ||
6 | */ | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | |||
10 | extern void *_mmx_memcpy(void *to, const void *from, size_t size); | ||
11 | extern void mmx_clear_page(void *page); | ||
12 | extern void mmx_copy_page(void *to, void *from); | ||
13 | |||
14 | #endif | ||
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h new file mode 100644 index 000000000000..13830ae67cac --- /dev/null +++ b/include/asm-i386/mmzone.h | |||
@@ -0,0 +1,147 @@ | |||
1 | /* | ||
2 | * Written by Pat Gaughen (gone@us.ibm.com) Mar 2002 | ||
3 | * | ||
4 | */ | ||
5 | |||
6 | #ifndef _ASM_MMZONE_H_ | ||
7 | #define _ASM_MMZONE_H_ | ||
8 | |||
9 | #include <asm/smp.h> | ||
10 | |||
11 | #ifdef CONFIG_DISCONTIGMEM | ||
12 | |||
13 | #ifdef CONFIG_NUMA | ||
14 | #ifdef CONFIG_X86_NUMAQ | ||
15 | #include <asm/numaq.h> | ||
16 | #else /* summit or generic arch */ | ||
17 | #include <asm/srat.h> | ||
18 | #endif | ||
19 | #else /* !CONFIG_NUMA */ | ||
20 | #define get_memcfg_numa get_memcfg_numa_flat | ||
21 | #define get_zholes_size(n) (0) | ||
22 | #endif /* CONFIG_NUMA */ | ||
23 | |||
24 | extern struct pglist_data *node_data[]; | ||
25 | #define NODE_DATA(nid) (node_data[nid]) | ||
26 | |||
27 | /* | ||
28 | * generic node memory support, the following assumptions apply: | ||
29 | * | ||
30 | * 1) memory comes in 256Mb contigious chunks which are either present or not | ||
31 | * 2) we will not have more than 64Gb in total | ||
32 | * | ||
33 | * for now assume that 64Gb is max amount of RAM for whole system | ||
34 | * 64Gb / 4096bytes/page = 16777216 pages | ||
35 | */ | ||
36 | #define MAX_NR_PAGES 16777216 | ||
37 | #define MAX_ELEMENTS 256 | ||
38 | #define PAGES_PER_ELEMENT (MAX_NR_PAGES/MAX_ELEMENTS) | ||
39 | |||
40 | extern s8 physnode_map[]; | ||
41 | |||
42 | static inline int pfn_to_nid(unsigned long pfn) | ||
43 | { | ||
44 | #ifdef CONFIG_NUMA | ||
45 | return((int) physnode_map[(pfn) / PAGES_PER_ELEMENT]); | ||
46 | #else | ||
47 | return 0; | ||
48 | #endif | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * Following are macros that are specific to this numa platform. | ||
53 | */ | ||
54 | #define reserve_bootmem(addr, size) \ | ||
55 | reserve_bootmem_node(NODE_DATA(0), (addr), (size)) | ||
56 | #define alloc_bootmem(x) \ | ||
57 | __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | ||
58 | #define alloc_bootmem_low(x) \ | ||
59 | __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0) | ||
60 | #define alloc_bootmem_pages(x) \ | ||
61 | __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | ||
62 | #define alloc_bootmem_low_pages(x) \ | ||
63 | __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) | ||
64 | #define alloc_bootmem_node(ignore, x) \ | ||
65 | __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | ||
66 | #define alloc_bootmem_pages_node(ignore, x) \ | ||
67 | __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | ||
68 | #define alloc_bootmem_low_pages_node(ignore, x) \ | ||
69 | __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) | ||
70 | |||
71 | #define node_localnr(pfn, nid) ((pfn) - node_data[nid]->node_start_pfn) | ||
72 | |||
73 | /* | ||
74 | * Following are macros that each numa implmentation must define. | ||
75 | */ | ||
76 | |||
77 | /* | ||
78 | * Given a kernel address, find the home node of the underlying memory. | ||
79 | */ | ||
80 | #define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT) | ||
81 | |||
82 | #define node_mem_map(nid) (NODE_DATA(nid)->node_mem_map) | ||
83 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | ||
84 | #define node_end_pfn(nid) \ | ||
85 | ({ \ | ||
86 | pg_data_t *__pgdat = NODE_DATA(nid); \ | ||
87 | __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ | ||
88 | }) | ||
89 | |||
90 | #define local_mapnr(kvaddr) \ | ||
91 | ({ \ | ||
92 | unsigned long __pfn = __pa(kvaddr) >> PAGE_SHIFT; \ | ||
93 | (__pfn - node_start_pfn(pfn_to_nid(__pfn))); \ | ||
94 | }) | ||
95 | |||
96 | /* XXX: FIXME -- wli */ | ||
97 | #define kern_addr_valid(kaddr) (0) | ||
98 | |||
99 | #define pfn_to_page(pfn) \ | ||
100 | ({ \ | ||
101 | unsigned long __pfn = pfn; \ | ||
102 | int __node = pfn_to_nid(__pfn); \ | ||
103 | &node_mem_map(__node)[node_localnr(__pfn,__node)]; \ | ||
104 | }) | ||
105 | |||
106 | #define page_to_pfn(pg) \ | ||
107 | ({ \ | ||
108 | struct page *__page = pg; \ | ||
109 | struct zone *__zone = page_zone(__page); \ | ||
110 | (unsigned long)(__page - __zone->zone_mem_map) \ | ||
111 | + __zone->zone_start_pfn; \ | ||
112 | }) | ||
113 | |||
114 | #ifdef CONFIG_X86_NUMAQ /* we have contiguous memory on NUMA-Q */ | ||
115 | #define pfn_valid(pfn) ((pfn) < num_physpages) | ||
116 | #else | ||
117 | static inline int pfn_valid(int pfn) | ||
118 | { | ||
119 | int nid = pfn_to_nid(pfn); | ||
120 | |||
121 | if (nid >= 0) | ||
122 | return (pfn < node_end_pfn(nid)); | ||
123 | return 0; | ||
124 | } | ||
125 | #endif | ||
126 | |||
127 | extern int get_memcfg_numa_flat(void ); | ||
128 | /* | ||
129 | * This allows any one NUMA architecture to be compiled | ||
130 | * for, and still fall back to the flat function if it | ||
131 | * fails. | ||
132 | */ | ||
133 | static inline void get_memcfg_numa(void) | ||
134 | { | ||
135 | #ifdef CONFIG_X86_NUMAQ | ||
136 | if (get_memcfg_numaq()) | ||
137 | return; | ||
138 | #elif CONFIG_ACPI_SRAT | ||
139 | if (get_memcfg_from_srat()) | ||
140 | return; | ||
141 | #endif | ||
142 | |||
143 | get_memcfg_numa_flat(); | ||
144 | } | ||
145 | |||
146 | #endif /* CONFIG_DISCONTIGMEM */ | ||
147 | #endif /* _ASM_MMZONE_H_ */ | ||
diff --git a/include/asm-i386/module.h b/include/asm-i386/module.h new file mode 100644 index 000000000000..508865e26308 --- /dev/null +++ b/include/asm-i386/module.h | |||
@@ -0,0 +1,75 @@ | |||
1 | #ifndef _ASM_I386_MODULE_H | ||
2 | #define _ASM_I386_MODULE_H | ||
3 | |||
4 | /* x86 is simple */ | ||
5 | struct mod_arch_specific | ||
6 | { | ||
7 | }; | ||
8 | |||
9 | #define Elf_Shdr Elf32_Shdr | ||
10 | #define Elf_Sym Elf32_Sym | ||
11 | #define Elf_Ehdr Elf32_Ehdr | ||
12 | |||
13 | #ifdef CONFIG_M386 | ||
14 | #define MODULE_PROC_FAMILY "386 " | ||
15 | #elif defined CONFIG_M486 | ||
16 | #define MODULE_PROC_FAMILY "486 " | ||
17 | #elif defined CONFIG_M586 | ||
18 | #define MODULE_PROC_FAMILY "586 " | ||
19 | #elif defined CONFIG_M586TSC | ||
20 | #define MODULE_PROC_FAMILY "586TSC " | ||
21 | #elif defined CONFIG_M586MMX | ||
22 | #define MODULE_PROC_FAMILY "586MMX " | ||
23 | #elif defined CONFIG_M686 | ||
24 | #define MODULE_PROC_FAMILY "686 " | ||
25 | #elif defined CONFIG_MPENTIUMII | ||
26 | #define MODULE_PROC_FAMILY "PENTIUMII " | ||
27 | #elif defined CONFIG_MPENTIUMIII | ||
28 | #define MODULE_PROC_FAMILY "PENTIUMIII " | ||
29 | #elif defined CONFIG_MPENTIUMM | ||
30 | #define MODULE_PROC_FAMILY "PENTIUMM " | ||
31 | #elif defined CONFIG_MPENTIUM4 | ||
32 | #define MODULE_PROC_FAMILY "PENTIUM4 " | ||
33 | #elif defined CONFIG_MK6 | ||
34 | #define MODULE_PROC_FAMILY "K6 " | ||
35 | #elif defined CONFIG_MK7 | ||
36 | #define MODULE_PROC_FAMILY "K7 " | ||
37 | #elif defined CONFIG_MK8 | ||
38 | #define MODULE_PROC_FAMILY "K8 " | ||
39 | #elif defined CONFIG_X86_ELAN | ||
40 | #define MODULE_PROC_FAMILY "ELAN " | ||
41 | #elif defined CONFIG_MCRUSOE | ||
42 | #define MODULE_PROC_FAMILY "CRUSOE " | ||
43 | #elif defined CONFIG_MEFFICEON | ||
44 | #define MODULE_PROC_FAMILY "EFFICEON " | ||
45 | #elif defined CONFIG_MWINCHIPC6 | ||
46 | #define MODULE_PROC_FAMILY "WINCHIPC6 " | ||
47 | #elif defined CONFIG_MWINCHIP2 | ||
48 | #define MODULE_PROC_FAMILY "WINCHIP2 " | ||
49 | #elif defined CONFIG_MWINCHIP3D | ||
50 | #define MODULE_PROC_FAMILY "WINCHIP3D " | ||
51 | #elif defined CONFIG_MCYRIXIII | ||
52 | #define MODULE_PROC_FAMILY "CYRIXIII " | ||
53 | #elif defined CONFIG_MVIAC3_2 | ||
54 | #define MODULE_PROC_FAMILY "VIAC3-2 " | ||
55 | #elif CONFIG_MGEODE | ||
56 | #define MODULE_PROC_FAMILY "GEODE " | ||
57 | #else | ||
58 | #error unknown processor family | ||
59 | #endif | ||
60 | |||
61 | #ifdef CONFIG_REGPARM | ||
62 | #define MODULE_REGPARM "REGPARM " | ||
63 | #else | ||
64 | #define MODULE_REGPARM "" | ||
65 | #endif | ||
66 | |||
67 | #ifdef CONFIG_4KSTACKS | ||
68 | #define MODULE_STACKSIZE "4KSTACKS " | ||
69 | #else | ||
70 | #define MODULE_STACKSIZE "" | ||
71 | #endif | ||
72 | |||
73 | #define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_REGPARM MODULE_STACKSIZE | ||
74 | |||
75 | #endif /* _ASM_I386_MODULE_H */ | ||
diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h new file mode 100644 index 000000000000..d9fafba075bc --- /dev/null +++ b/include/asm-i386/mpspec.h | |||
@@ -0,0 +1,83 @@ | |||
1 | #ifndef __ASM_MPSPEC_H | ||
2 | #define __ASM_MPSPEC_H | ||
3 | |||
4 | #include <linux/cpumask.h> | ||
5 | #include <asm/mpspec_def.h> | ||
6 | #include <mach_mpspec.h> | ||
7 | |||
8 | extern int mp_bus_id_to_type [MAX_MP_BUSSES]; | ||
9 | extern int mp_bus_id_to_node [MAX_MP_BUSSES]; | ||
10 | extern int mp_bus_id_to_local [MAX_MP_BUSSES]; | ||
11 | extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; | ||
12 | extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; | ||
13 | |||
14 | extern unsigned int boot_cpu_physical_apicid; | ||
15 | extern int smp_found_config; | ||
16 | extern void find_smp_config (void); | ||
17 | extern void get_smp_config (void); | ||
18 | extern int nr_ioapics; | ||
19 | extern int apic_version [MAX_APICS]; | ||
20 | extern int mp_bus_id_to_type [MAX_MP_BUSSES]; | ||
21 | extern int mp_irq_entries; | ||
22 | extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; | ||
23 | extern int mpc_default_type; | ||
24 | extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; | ||
25 | extern unsigned long mp_lapic_addr; | ||
26 | extern int pic_mode; | ||
27 | extern int using_apic_timer; | ||
28 | |||
29 | #ifdef CONFIG_ACPI_BOOT | ||
30 | extern void mp_register_lapic (u8 id, u8 enabled); | ||
31 | extern void mp_register_lapic_address (u64 address); | ||
32 | extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base); | ||
33 | extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); | ||
34 | extern void mp_config_acpi_legacy_irqs (void); | ||
35 | extern int mp_register_gsi (u32 gsi, int edge_level, int active_high_low); | ||
36 | #endif /*CONFIG_ACPI_BOOT*/ | ||
37 | |||
38 | #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) | ||
39 | |||
40 | struct physid_mask | ||
41 | { | ||
42 | unsigned long mask[PHYSID_ARRAY_SIZE]; | ||
43 | }; | ||
44 | |||
45 | typedef struct physid_mask physid_mask_t; | ||
46 | |||
47 | #define physid_set(physid, map) set_bit(physid, (map).mask) | ||
48 | #define physid_clear(physid, map) clear_bit(physid, (map).mask) | ||
49 | #define physid_isset(physid, map) test_bit(physid, (map).mask) | ||
50 | #define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask) | ||
51 | |||
52 | #define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) | ||
53 | #define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) | ||
54 | #define physids_clear(map) bitmap_zero((map).mask, MAX_APICS) | ||
55 | #define physids_complement(dst, src) bitmap_complement((dst).mask,(src).mask, MAX_APICS) | ||
56 | #define physids_empty(map) bitmap_empty((map).mask, MAX_APICS) | ||
57 | #define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS) | ||
58 | #define physids_weight(map) bitmap_weight((map).mask, MAX_APICS) | ||
59 | #define physids_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS) | ||
60 | #define physids_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) | ||
61 | #define physids_coerce(map) ((map).mask[0]) | ||
62 | |||
63 | #define physids_promote(physids) \ | ||
64 | ({ \ | ||
65 | physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ | ||
66 | __physid_mask.mask[0] = physids; \ | ||
67 | __physid_mask; \ | ||
68 | }) | ||
69 | |||
70 | #define physid_mask_of_physid(physid) \ | ||
71 | ({ \ | ||
72 | physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ | ||
73 | physid_set(physid, __physid_mask); \ | ||
74 | __physid_mask; \ | ||
75 | }) | ||
76 | |||
77 | #define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} } | ||
78 | #define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} } | ||
79 | |||
80 | extern physid_mask_t phys_cpu_present_map; | ||
81 | |||
82 | #endif | ||
83 | |||
diff --git a/include/asm-i386/mpspec_def.h b/include/asm-i386/mpspec_def.h new file mode 100644 index 000000000000..a961093dbf88 --- /dev/null +++ b/include/asm-i386/mpspec_def.h | |||
@@ -0,0 +1,188 @@ | |||
1 | #ifndef __ASM_MPSPEC_DEF_H | ||
2 | #define __ASM_MPSPEC_DEF_H | ||
3 | |||
4 | /* | ||
5 | * Structure definitions for SMP machines following the | ||
6 | * Intel Multiprocessing Specification 1.1 and 1.4. | ||
7 | */ | ||
8 | |||
9 | /* | ||
10 | * This tag identifies where the SMP configuration | ||
11 | * information is. | ||
12 | */ | ||
13 | |||
14 | #define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_') | ||
15 | |||
16 | #define MAX_MPC_ENTRY 1024 | ||
17 | #define MAX_APICS 256 | ||
18 | |||
19 | struct intel_mp_floating | ||
20 | { | ||
21 | char mpf_signature[4]; /* "_MP_" */ | ||
22 | unsigned long mpf_physptr; /* Configuration table address */ | ||
23 | unsigned char mpf_length; /* Our length (paragraphs) */ | ||
24 | unsigned char mpf_specification;/* Specification version */ | ||
25 | unsigned char mpf_checksum; /* Checksum (makes sum 0) */ | ||
26 | unsigned char mpf_feature1; /* Standard or configuration ? */ | ||
27 | unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */ | ||
28 | unsigned char mpf_feature3; /* Unused (0) */ | ||
29 | unsigned char mpf_feature4; /* Unused (0) */ | ||
30 | unsigned char mpf_feature5; /* Unused (0) */ | ||
31 | }; | ||
32 | |||
33 | struct mp_config_table | ||
34 | { | ||
35 | char mpc_signature[4]; | ||
36 | #define MPC_SIGNATURE "PCMP" | ||
37 | unsigned short mpc_length; /* Size of table */ | ||
38 | char mpc_spec; /* 0x01 */ | ||
39 | char mpc_checksum; | ||
40 | char mpc_oem[8]; | ||
41 | char mpc_productid[12]; | ||
42 | unsigned long mpc_oemptr; /* 0 if not present */ | ||
43 | unsigned short mpc_oemsize; /* 0 if not present */ | ||
44 | unsigned short mpc_oemcount; | ||
45 | unsigned long mpc_lapic; /* APIC address */ | ||
46 | unsigned long reserved; | ||
47 | }; | ||
48 | |||
49 | /* Followed by entries */ | ||
50 | |||
51 | #define MP_PROCESSOR 0 | ||
52 | #define MP_BUS 1 | ||
53 | #define MP_IOAPIC 2 | ||
54 | #define MP_INTSRC 3 | ||
55 | #define MP_LINTSRC 4 | ||
56 | #define MP_TRANSLATION 192 /* Used by IBM NUMA-Q to describe node locality */ | ||
57 | |||
58 | struct mpc_config_processor | ||
59 | { | ||
60 | unsigned char mpc_type; | ||
61 | unsigned char mpc_apicid; /* Local APIC number */ | ||
62 | unsigned char mpc_apicver; /* Its versions */ | ||
63 | unsigned char mpc_cpuflag; | ||
64 | #define CPU_ENABLED 1 /* Processor is available */ | ||
65 | #define CPU_BOOTPROCESSOR 2 /* Processor is the BP */ | ||
66 | unsigned long mpc_cpufeature; | ||
67 | #define CPU_STEPPING_MASK 0x0F | ||
68 | #define CPU_MODEL_MASK 0xF0 | ||
69 | #define CPU_FAMILY_MASK 0xF00 | ||
70 | unsigned long mpc_featureflag; /* CPUID feature value */ | ||
71 | unsigned long mpc_reserved[2]; | ||
72 | }; | ||
73 | |||
74 | struct mpc_config_bus | ||
75 | { | ||
76 | unsigned char mpc_type; | ||
77 | unsigned char mpc_busid; | ||
78 | unsigned char mpc_bustype[6] __attribute((packed)); | ||
79 | }; | ||
80 | |||
81 | /* List of Bus Type string values, Intel MP Spec. */ | ||
82 | #define BUSTYPE_EISA "EISA" | ||
83 | #define BUSTYPE_ISA "ISA" | ||
84 | #define BUSTYPE_INTERN "INTERN" /* Internal BUS */ | ||
85 | #define BUSTYPE_MCA "MCA" | ||
86 | #define BUSTYPE_VL "VL" /* Local bus */ | ||
87 | #define BUSTYPE_PCI "PCI" | ||
88 | #define BUSTYPE_PCMCIA "PCMCIA" | ||
89 | #define BUSTYPE_CBUS "CBUS" | ||
90 | #define BUSTYPE_CBUSII "CBUSII" | ||
91 | #define BUSTYPE_FUTURE "FUTURE" | ||
92 | #define BUSTYPE_MBI "MBI" | ||
93 | #define BUSTYPE_MBII "MBII" | ||
94 | #define BUSTYPE_MPI "MPI" | ||
95 | #define BUSTYPE_MPSA "MPSA" | ||
96 | #define BUSTYPE_NUBUS "NUBUS" | ||
97 | #define BUSTYPE_TC "TC" | ||
98 | #define BUSTYPE_VME "VME" | ||
99 | #define BUSTYPE_XPRESS "XPRESS" | ||
100 | #define BUSTYPE_NEC98 "NEC98" | ||
101 | |||
102 | struct mpc_config_ioapic | ||
103 | { | ||
104 | unsigned char mpc_type; | ||
105 | unsigned char mpc_apicid; | ||
106 | unsigned char mpc_apicver; | ||
107 | unsigned char mpc_flags; | ||
108 | #define MPC_APIC_USABLE 0x01 | ||
109 | unsigned long mpc_apicaddr; | ||
110 | }; | ||
111 | |||
112 | struct mpc_config_intsrc | ||
113 | { | ||
114 | unsigned char mpc_type; | ||
115 | unsigned char mpc_irqtype; | ||
116 | unsigned short mpc_irqflag; | ||
117 | unsigned char mpc_srcbus; | ||
118 | unsigned char mpc_srcbusirq; | ||
119 | unsigned char mpc_dstapic; | ||
120 | unsigned char mpc_dstirq; | ||
121 | }; | ||
122 | |||
123 | enum mp_irq_source_types { | ||
124 | mp_INT = 0, | ||
125 | mp_NMI = 1, | ||
126 | mp_SMI = 2, | ||
127 | mp_ExtINT = 3 | ||
128 | }; | ||
129 | |||
130 | #define MP_IRQDIR_DEFAULT 0 | ||
131 | #define MP_IRQDIR_HIGH 1 | ||
132 | #define MP_IRQDIR_LOW 3 | ||
133 | |||
134 | |||
135 | struct mpc_config_lintsrc | ||
136 | { | ||
137 | unsigned char mpc_type; | ||
138 | unsigned char mpc_irqtype; | ||
139 | unsigned short mpc_irqflag; | ||
140 | unsigned char mpc_srcbusid; | ||
141 | unsigned char mpc_srcbusirq; | ||
142 | unsigned char mpc_destapic; | ||
143 | #define MP_APIC_ALL 0xFF | ||
144 | unsigned char mpc_destapiclint; | ||
145 | }; | ||
146 | |||
147 | struct mp_config_oemtable | ||
148 | { | ||
149 | char oem_signature[4]; | ||
150 | #define MPC_OEM_SIGNATURE "_OEM" | ||
151 | unsigned short oem_length; /* Size of table */ | ||
152 | char oem_rev; /* 0x01 */ | ||
153 | char oem_checksum; | ||
154 | char mpc_oem[8]; | ||
155 | }; | ||
156 | |||
157 | struct mpc_config_translation | ||
158 | { | ||
159 | unsigned char mpc_type; | ||
160 | unsigned char trans_len; | ||
161 | unsigned char trans_type; | ||
162 | unsigned char trans_quad; | ||
163 | unsigned char trans_global; | ||
164 | unsigned char trans_local; | ||
165 | unsigned short trans_reserved; | ||
166 | }; | ||
167 | |||
168 | /* | ||
169 | * Default configurations | ||
170 | * | ||
171 | * 1 2 CPU ISA 82489DX | ||
172 | * 2 2 CPU EISA 82489DX neither IRQ 0 timer nor IRQ 13 DMA chaining | ||
173 | * 3 2 CPU EISA 82489DX | ||
174 | * 4 2 CPU MCA 82489DX | ||
175 | * 5 2 CPU ISA+PCI | ||
176 | * 6 2 CPU EISA+PCI | ||
177 | * 7 2 CPU MCA+PCI | ||
178 | */ | ||
179 | |||
180 | enum mp_bustype { | ||
181 | MP_BUS_ISA = 1, | ||
182 | MP_BUS_EISA, | ||
183 | MP_BUS_PCI, | ||
184 | MP_BUS_MCA, | ||
185 | MP_BUS_NEC98 | ||
186 | }; | ||
187 | #endif | ||
188 | |||
diff --git a/include/asm-i386/msgbuf.h b/include/asm-i386/msgbuf.h new file mode 100644 index 000000000000..b8d659c157ae --- /dev/null +++ b/include/asm-i386/msgbuf.h | |||
@@ -0,0 +1,31 @@ | |||
1 | #ifndef _I386_MSGBUF_H | ||
2 | #define _I386_MSGBUF_H | ||
3 | |||
4 | /* | ||
5 | * The msqid64_ds structure for i386 architecture. | ||
6 | * Note extra padding because this structure is passed back and forth | ||
7 | * between kernel and user space. | ||
8 | * | ||
9 | * Pad space is left for: | ||
10 | * - 64-bit time_t to solve y2038 problem | ||
11 | * - 2 miscellaneous 32-bit values | ||
12 | */ | ||
13 | |||
14 | struct msqid64_ds { | ||
15 | struct ipc64_perm msg_perm; | ||
16 | __kernel_time_t msg_stime; /* last msgsnd time */ | ||
17 | unsigned long __unused1; | ||
18 | __kernel_time_t msg_rtime; /* last msgrcv time */ | ||
19 | unsigned long __unused2; | ||
20 | __kernel_time_t msg_ctime; /* last change time */ | ||
21 | unsigned long __unused3; | ||
22 | unsigned long msg_cbytes; /* current number of bytes on queue */ | ||
23 | unsigned long msg_qnum; /* number of messages in queue */ | ||
24 | unsigned long msg_qbytes; /* max number of bytes on queue */ | ||
25 | __kernel_pid_t msg_lspid; /* pid of last msgsnd */ | ||
26 | __kernel_pid_t msg_lrpid; /* last receive pid */ | ||
27 | unsigned long __unused4; | ||
28 | unsigned long __unused5; | ||
29 | }; | ||
30 | |||
31 | #endif /* _I386_MSGBUF_H */ | ||
diff --git a/include/asm-i386/msi.h b/include/asm-i386/msi.h new file mode 100644 index 000000000000..b85393094c83 --- /dev/null +++ b/include/asm-i386/msi.h | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003-2004 Intel | ||
3 | * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) | ||
4 | */ | ||
5 | |||
6 | #ifndef ASM_MSI_H | ||
7 | #define ASM_MSI_H | ||
8 | |||
9 | #include <asm/desc.h> | ||
10 | #include <mach_apic.h> | ||
11 | |||
12 | #define LAST_DEVICE_VECTOR 232 | ||
13 | #define MSI_DEST_MODE MSI_LOGICAL_MODE | ||
14 | #define MSI_TARGET_CPU_SHIFT 12 | ||
15 | |||
16 | #ifdef CONFIG_SMP | ||
17 | #define MSI_TARGET_CPU logical_smp_processor_id() | ||
18 | #else | ||
19 | #define MSI_TARGET_CPU cpu_to_logical_apicid(first_cpu(cpu_online_map)) | ||
20 | #endif | ||
21 | |||
22 | #endif /* ASM_MSI_H */ | ||
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h new file mode 100644 index 000000000000..c76fce8badbb --- /dev/null +++ b/include/asm-i386/msr.h | |||
@@ -0,0 +1,272 @@ | |||
1 | #ifndef __ASM_MSR_H | ||
2 | #define __ASM_MSR_H | ||
3 | |||
4 | /* | ||
5 | * Access to machine-specific registers (available on 586 and better only) | ||
6 | * Note: the rd* operations modify the parameters directly (without using | ||
7 | * pointer indirection), this allows gcc to optimize better | ||
8 | */ | ||
9 | |||
10 | #define rdmsr(msr,val1,val2) \ | ||
11 | __asm__ __volatile__("rdmsr" \ | ||
12 | : "=a" (val1), "=d" (val2) \ | ||
13 | : "c" (msr)) | ||
14 | |||
15 | #define wrmsr(msr,val1,val2) \ | ||
16 | __asm__ __volatile__("wrmsr" \ | ||
17 | : /* no outputs */ \ | ||
18 | : "c" (msr), "a" (val1), "d" (val2)) | ||
19 | |||
20 | #define rdmsrl(msr,val) do { \ | ||
21 | unsigned long l__,h__; \ | ||
22 | rdmsr (msr, l__, h__); \ | ||
23 | val = l__; \ | ||
24 | val |= ((u64)h__<<32); \ | ||
25 | } while(0) | ||
26 | |||
27 | static inline void wrmsrl (unsigned long msr, unsigned long long val) | ||
28 | { | ||
29 | unsigned long lo, hi; | ||
30 | lo = (unsigned long) val; | ||
31 | hi = val >> 32; | ||
32 | wrmsr (msr, lo, hi); | ||
33 | } | ||
34 | |||
35 | /* wrmsr with exception handling */ | ||
36 | #define wrmsr_safe(msr,a,b) ({ int ret__; \ | ||
37 | asm volatile("2: wrmsr ; xorl %0,%0\n" \ | ||
38 | "1:\n\t" \ | ||
39 | ".section .fixup,\"ax\"\n\t" \ | ||
40 | "3: movl %4,%0 ; jmp 1b\n\t" \ | ||
41 | ".previous\n\t" \ | ||
42 | ".section __ex_table,\"a\"\n" \ | ||
43 | " .align 4\n\t" \ | ||
44 | " .long 2b,3b\n\t" \ | ||
45 | ".previous" \ | ||
46 | : "=a" (ret__) \ | ||
47 | : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\ | ||
48 | ret__; }) | ||
49 | |||
50 | #define rdtsc(low,high) \ | ||
51 | __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) | ||
52 | |||
53 | #define rdtscl(low) \ | ||
54 | __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx") | ||
55 | |||
56 | #define rdtscll(val) \ | ||
57 | __asm__ __volatile__("rdtsc" : "=A" (val)) | ||
58 | |||
59 | #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) | ||
60 | |||
61 | #define rdpmc(counter,low,high) \ | ||
62 | __asm__ __volatile__("rdpmc" \ | ||
63 | : "=a" (low), "=d" (high) \ | ||
64 | : "c" (counter)) | ||
65 | |||
66 | /* symbolic names for some interesting MSRs */ | ||
67 | /* Intel defined MSRs. */ | ||
68 | #define MSR_IA32_P5_MC_ADDR 0 | ||
69 | #define MSR_IA32_P5_MC_TYPE 1 | ||
70 | #define MSR_IA32_PLATFORM_ID 0x17 | ||
71 | #define MSR_IA32_EBL_CR_POWERON 0x2a | ||
72 | |||
73 | #define MSR_IA32_APICBASE 0x1b | ||
74 | #define MSR_IA32_APICBASE_BSP (1<<8) | ||
75 | #define MSR_IA32_APICBASE_ENABLE (1<<11) | ||
76 | #define MSR_IA32_APICBASE_BASE (0xfffff<<12) | ||
77 | |||
78 | #define MSR_IA32_UCODE_WRITE 0x79 | ||
79 | #define MSR_IA32_UCODE_REV 0x8b | ||
80 | |||
81 | #define MSR_P6_PERFCTR0 0xc1 | ||
82 | #define MSR_P6_PERFCTR1 0xc2 | ||
83 | |||
84 | #define MSR_IA32_BBL_CR_CTL 0x119 | ||
85 | |||
86 | #define MSR_IA32_SYSENTER_CS 0x174 | ||
87 | #define MSR_IA32_SYSENTER_ESP 0x175 | ||
88 | #define MSR_IA32_SYSENTER_EIP 0x176 | ||
89 | |||
90 | #define MSR_IA32_MCG_CAP 0x179 | ||
91 | #define MSR_IA32_MCG_STATUS 0x17a | ||
92 | #define MSR_IA32_MCG_CTL 0x17b | ||
93 | |||
94 | /* P4/Xeon+ specific */ | ||
95 | #define MSR_IA32_MCG_EAX 0x180 | ||
96 | #define MSR_IA32_MCG_EBX 0x181 | ||
97 | #define MSR_IA32_MCG_ECX 0x182 | ||
98 | #define MSR_IA32_MCG_EDX 0x183 | ||
99 | #define MSR_IA32_MCG_ESI 0x184 | ||
100 | #define MSR_IA32_MCG_EDI 0x185 | ||
101 | #define MSR_IA32_MCG_EBP 0x186 | ||
102 | #define MSR_IA32_MCG_ESP 0x187 | ||
103 | #define MSR_IA32_MCG_EFLAGS 0x188 | ||
104 | #define MSR_IA32_MCG_EIP 0x189 | ||
105 | #define MSR_IA32_MCG_RESERVED 0x18A | ||
106 | |||
107 | #define MSR_P6_EVNTSEL0 0x186 | ||
108 | #define MSR_P6_EVNTSEL1 0x187 | ||
109 | |||
110 | #define MSR_IA32_PERF_STATUS 0x198 | ||
111 | #define MSR_IA32_PERF_CTL 0x199 | ||
112 | |||
113 | #define MSR_IA32_THERM_CONTROL 0x19a | ||
114 | #define MSR_IA32_THERM_INTERRUPT 0x19b | ||
115 | #define MSR_IA32_THERM_STATUS 0x19c | ||
116 | #define MSR_IA32_MISC_ENABLE 0x1a0 | ||
117 | |||
118 | #define MSR_IA32_DEBUGCTLMSR 0x1d9 | ||
119 | #define MSR_IA32_LASTBRANCHFROMIP 0x1db | ||
120 | #define MSR_IA32_LASTBRANCHTOIP 0x1dc | ||
121 | #define MSR_IA32_LASTINTFROMIP 0x1dd | ||
122 | #define MSR_IA32_LASTINTTOIP 0x1de | ||
123 | |||
124 | #define MSR_IA32_MC0_CTL 0x400 | ||
125 | #define MSR_IA32_MC0_STATUS 0x401 | ||
126 | #define MSR_IA32_MC0_ADDR 0x402 | ||
127 | #define MSR_IA32_MC0_MISC 0x403 | ||
128 | |||
129 | /* Pentium IV performance counter MSRs */ | ||
130 | #define MSR_P4_BPU_PERFCTR0 0x300 | ||
131 | #define MSR_P4_BPU_PERFCTR1 0x301 | ||
132 | #define MSR_P4_BPU_PERFCTR2 0x302 | ||
133 | #define MSR_P4_BPU_PERFCTR3 0x303 | ||
134 | #define MSR_P4_MS_PERFCTR0 0x304 | ||
135 | #define MSR_P4_MS_PERFCTR1 0x305 | ||
136 | #define MSR_P4_MS_PERFCTR2 0x306 | ||
137 | #define MSR_P4_MS_PERFCTR3 0x307 | ||
138 | #define MSR_P4_FLAME_PERFCTR0 0x308 | ||
139 | #define MSR_P4_FLAME_PERFCTR1 0x309 | ||
140 | #define MSR_P4_FLAME_PERFCTR2 0x30a | ||
141 | #define MSR_P4_FLAME_PERFCTR3 0x30b | ||
142 | #define MSR_P4_IQ_PERFCTR0 0x30c | ||
143 | #define MSR_P4_IQ_PERFCTR1 0x30d | ||
144 | #define MSR_P4_IQ_PERFCTR2 0x30e | ||
145 | #define MSR_P4_IQ_PERFCTR3 0x30f | ||
146 | #define MSR_P4_IQ_PERFCTR4 0x310 | ||
147 | #define MSR_P4_IQ_PERFCTR5 0x311 | ||
148 | #define MSR_P4_BPU_CCCR0 0x360 | ||
149 | #define MSR_P4_BPU_CCCR1 0x361 | ||
150 | #define MSR_P4_BPU_CCCR2 0x362 | ||
151 | #define MSR_P4_BPU_CCCR3 0x363 | ||
152 | #define MSR_P4_MS_CCCR0 0x364 | ||
153 | #define MSR_P4_MS_CCCR1 0x365 | ||
154 | #define MSR_P4_MS_CCCR2 0x366 | ||
155 | #define MSR_P4_MS_CCCR3 0x367 | ||
156 | #define MSR_P4_FLAME_CCCR0 0x368 | ||
157 | #define MSR_P4_FLAME_CCCR1 0x369 | ||
158 | #define MSR_P4_FLAME_CCCR2 0x36a | ||
159 | #define MSR_P4_FLAME_CCCR3 0x36b | ||
160 | #define MSR_P4_IQ_CCCR0 0x36c | ||
161 | #define MSR_P4_IQ_CCCR1 0x36d | ||
162 | #define MSR_P4_IQ_CCCR2 0x36e | ||
163 | #define MSR_P4_IQ_CCCR3 0x36f | ||
164 | #define MSR_P4_IQ_CCCR4 0x370 | ||
165 | #define MSR_P4_IQ_CCCR5 0x371 | ||
166 | #define MSR_P4_ALF_ESCR0 0x3ca | ||
167 | #define MSR_P4_ALF_ESCR1 0x3cb | ||
168 | #define MSR_P4_BPU_ESCR0 0x3b2 | ||
169 | #define MSR_P4_BPU_ESCR1 0x3b3 | ||
170 | #define MSR_P4_BSU_ESCR0 0x3a0 | ||
171 | #define MSR_P4_BSU_ESCR1 0x3a1 | ||
172 | #define MSR_P4_CRU_ESCR0 0x3b8 | ||
173 | #define MSR_P4_CRU_ESCR1 0x3b9 | ||
174 | #define MSR_P4_CRU_ESCR2 0x3cc | ||
175 | #define MSR_P4_CRU_ESCR3 0x3cd | ||
176 | #define MSR_P4_CRU_ESCR4 0x3e0 | ||
177 | #define MSR_P4_CRU_ESCR5 0x3e1 | ||
178 | #define MSR_P4_DAC_ESCR0 0x3a8 | ||
179 | #define MSR_P4_DAC_ESCR1 0x3a9 | ||
180 | #define MSR_P4_FIRM_ESCR0 0x3a4 | ||
181 | #define MSR_P4_FIRM_ESCR1 0x3a5 | ||
182 | #define MSR_P4_FLAME_ESCR0 0x3a6 | ||
183 | #define MSR_P4_FLAME_ESCR1 0x3a7 | ||
184 | #define MSR_P4_FSB_ESCR0 0x3a2 | ||
185 | #define MSR_P4_FSB_ESCR1 0x3a3 | ||
186 | #define MSR_P4_IQ_ESCR0 0x3ba | ||
187 | #define MSR_P4_IQ_ESCR1 0x3bb | ||
188 | #define MSR_P4_IS_ESCR0 0x3b4 | ||
189 | #define MSR_P4_IS_ESCR1 0x3b5 | ||
190 | #define MSR_P4_ITLB_ESCR0 0x3b6 | ||
191 | #define MSR_P4_ITLB_ESCR1 0x3b7 | ||
192 | #define MSR_P4_IX_ESCR0 0x3c8 | ||
193 | #define MSR_P4_IX_ESCR1 0x3c9 | ||
194 | #define MSR_P4_MOB_ESCR0 0x3aa | ||
195 | #define MSR_P4_MOB_ESCR1 0x3ab | ||
196 | #define MSR_P4_MS_ESCR0 0x3c0 | ||
197 | #define MSR_P4_MS_ESCR1 0x3c1 | ||
198 | #define MSR_P4_PMH_ESCR0 0x3ac | ||
199 | #define MSR_P4_PMH_ESCR1 0x3ad | ||
200 | #define MSR_P4_RAT_ESCR0 0x3bc | ||
201 | #define MSR_P4_RAT_ESCR1 0x3bd | ||
202 | #define MSR_P4_SAAT_ESCR0 0x3ae | ||
203 | #define MSR_P4_SAAT_ESCR1 0x3af | ||
204 | #define MSR_P4_SSU_ESCR0 0x3be | ||
205 | #define MSR_P4_SSU_ESCR1 0x3bf /* guess: not defined in manual */ | ||
206 | #define MSR_P4_TBPU_ESCR0 0x3c2 | ||
207 | #define MSR_P4_TBPU_ESCR1 0x3c3 | ||
208 | #define MSR_P4_TC_ESCR0 0x3c4 | ||
209 | #define MSR_P4_TC_ESCR1 0x3c5 | ||
210 | #define MSR_P4_U2L_ESCR0 0x3b0 | ||
211 | #define MSR_P4_U2L_ESCR1 0x3b1 | ||
212 | |||
213 | /* AMD Defined MSRs */ | ||
214 | #define MSR_K6_EFER 0xC0000080 | ||
215 | #define MSR_K6_STAR 0xC0000081 | ||
216 | #define MSR_K6_WHCR 0xC0000082 | ||
217 | #define MSR_K6_UWCCR 0xC0000085 | ||
218 | #define MSR_K6_EPMR 0xC0000086 | ||
219 | #define MSR_K6_PSOR 0xC0000087 | ||
220 | #define MSR_K6_PFIR 0xC0000088 | ||
221 | |||
222 | #define MSR_K7_EVNTSEL0 0xC0010000 | ||
223 | #define MSR_K7_EVNTSEL1 0xC0010001 | ||
224 | #define MSR_K7_EVNTSEL2 0xC0010002 | ||
225 | #define MSR_K7_EVNTSEL3 0xC0010003 | ||
226 | #define MSR_K7_PERFCTR0 0xC0010004 | ||
227 | #define MSR_K7_PERFCTR1 0xC0010005 | ||
228 | #define MSR_K7_PERFCTR2 0xC0010006 | ||
229 | #define MSR_K7_PERFCTR3 0xC0010007 | ||
230 | #define MSR_K7_HWCR 0xC0010015 | ||
231 | #define MSR_K7_CLK_CTL 0xC001001b | ||
232 | #define MSR_K7_FID_VID_CTL 0xC0010041 | ||
233 | #define MSR_K7_FID_VID_STATUS 0xC0010042 | ||
234 | |||
235 | /* extended feature register */ | ||
236 | #define MSR_EFER 0xc0000080 | ||
237 | |||
238 | /* EFER bits: */ | ||
239 | |||
240 | /* Execute Disable enable */ | ||
241 | #define _EFER_NX 11 | ||
242 | #define EFER_NX (1<<_EFER_NX) | ||
243 | |||
244 | /* Centaur-Hauls/IDT defined MSRs. */ | ||
245 | #define MSR_IDT_FCR1 0x107 | ||
246 | #define MSR_IDT_FCR2 0x108 | ||
247 | #define MSR_IDT_FCR3 0x109 | ||
248 | #define MSR_IDT_FCR4 0x10a | ||
249 | |||
250 | #define MSR_IDT_MCR0 0x110 | ||
251 | #define MSR_IDT_MCR1 0x111 | ||
252 | #define MSR_IDT_MCR2 0x112 | ||
253 | #define MSR_IDT_MCR3 0x113 | ||
254 | #define MSR_IDT_MCR4 0x114 | ||
255 | #define MSR_IDT_MCR5 0x115 | ||
256 | #define MSR_IDT_MCR6 0x116 | ||
257 | #define MSR_IDT_MCR7 0x117 | ||
258 | #define MSR_IDT_MCR_CTRL 0x120 | ||
259 | |||
260 | /* VIA Cyrix defined MSRs*/ | ||
261 | #define MSR_VIA_FCR 0x1107 | ||
262 | #define MSR_VIA_LONGHAUL 0x110a | ||
263 | #define MSR_VIA_RNG 0x110b | ||
264 | #define MSR_VIA_BCR2 0x1147 | ||
265 | |||
266 | /* Transmeta defined MSRs */ | ||
267 | #define MSR_TMTA_LONGRUN_CTRL 0x80868010 | ||
268 | #define MSR_TMTA_LONGRUN_FLAGS 0x80868011 | ||
269 | #define MSR_TMTA_LRTI_READOUT 0x80868018 | ||
270 | #define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a | ||
271 | |||
272 | #endif /* __ASM_MSR_H */ | ||
diff --git a/include/asm-i386/mtrr.h b/include/asm-i386/mtrr.h new file mode 100644 index 000000000000..5b6ceda68c5f --- /dev/null +++ b/include/asm-i386/mtrr.h | |||
@@ -0,0 +1,107 @@ | |||
1 | /* Generic MTRR (Memory Type Range Register) ioctls. | ||
2 | |||
3 | Copyright (C) 1997-1999 Richard Gooch | ||
4 | |||
5 | This library is free software; you can redistribute it and/or | ||
6 | modify it under the terms of the GNU Library General Public | ||
7 | License as published by the Free Software Foundation; either | ||
8 | version 2 of the License, or (at your option) any later version. | ||
9 | |||
10 | This library is distributed in the hope that it will be useful, | ||
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
13 | Library General Public License for more details. | ||
14 | |||
15 | You should have received a copy of the GNU Library General Public | ||
16 | License along with this library; if not, write to the Free | ||
17 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
18 | |||
19 | Richard Gooch may be reached by email at rgooch@atnf.csiro.au | ||
20 | The postal address is: | ||
21 | Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. | ||
22 | */ | ||
23 | #ifndef _LINUX_MTRR_H | ||
24 | #define _LINUX_MTRR_H | ||
25 | |||
26 | #include <linux/config.h> | ||
27 | #include <linux/ioctl.h> | ||
28 | |||
29 | #define MTRR_IOCTL_BASE 'M' | ||
30 | |||
31 | struct mtrr_sentry | ||
32 | { | ||
33 | unsigned long base; /* Base address */ | ||
34 | unsigned int size; /* Size of region */ | ||
35 | unsigned int type; /* Type of region */ | ||
36 | }; | ||
37 | |||
38 | struct mtrr_gentry | ||
39 | { | ||
40 | unsigned int regnum; /* Register number */ | ||
41 | unsigned long base; /* Base address */ | ||
42 | unsigned int size; /* Size of region */ | ||
43 | unsigned int type; /* Type of region */ | ||
44 | }; | ||
45 | |||
46 | /* These are the various ioctls */ | ||
47 | #define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry) | ||
48 | #define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry) | ||
49 | #define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry) | ||
50 | #define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry) | ||
51 | #define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry) | ||
52 | #define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry) | ||
53 | #define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry) | ||
54 | #define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry) | ||
55 | #define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry) | ||
56 | #define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry) | ||
57 | |||
58 | /* These are the region types */ | ||
59 | #define MTRR_TYPE_UNCACHABLE 0 | ||
60 | #define MTRR_TYPE_WRCOMB 1 | ||
61 | /*#define MTRR_TYPE_ 2*/ | ||
62 | /*#define MTRR_TYPE_ 3*/ | ||
63 | #define MTRR_TYPE_WRTHROUGH 4 | ||
64 | #define MTRR_TYPE_WRPROT 5 | ||
65 | #define MTRR_TYPE_WRBACK 6 | ||
66 | #define MTRR_NUM_TYPES 7 | ||
67 | |||
68 | #ifdef __KERNEL__ | ||
69 | |||
70 | /* The following functions are for use by other drivers */ | ||
71 | # ifdef CONFIG_MTRR | ||
72 | extern int mtrr_add (unsigned long base, unsigned long size, | ||
73 | unsigned int type, char increment); | ||
74 | extern int mtrr_add_page (unsigned long base, unsigned long size, | ||
75 | unsigned int type, char increment); | ||
76 | extern int mtrr_del (int reg, unsigned long base, unsigned long size); | ||
77 | extern int mtrr_del_page (int reg, unsigned long base, unsigned long size); | ||
78 | extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); | ||
79 | # else | ||
80 | static __inline__ int mtrr_add (unsigned long base, unsigned long size, | ||
81 | unsigned int type, char increment) | ||
82 | { | ||
83 | return -ENODEV; | ||
84 | } | ||
85 | static __inline__ int mtrr_add_page (unsigned long base, unsigned long size, | ||
86 | unsigned int type, char increment) | ||
87 | { | ||
88 | return -ENODEV; | ||
89 | } | ||
90 | static __inline__ int mtrr_del (int reg, unsigned long base, | ||
91 | unsigned long size) | ||
92 | { | ||
93 | return -ENODEV; | ||
94 | } | ||
95 | static __inline__ int mtrr_del_page (int reg, unsigned long base, | ||
96 | unsigned long size) | ||
97 | { | ||
98 | return -ENODEV; | ||
99 | } | ||
100 | |||
101 | static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;} | ||
102 | |||
103 | # endif | ||
104 | |||
105 | #endif | ||
106 | |||
107 | #endif /* _LINUX_MTRR_H */ | ||
diff --git a/include/asm-i386/namei.h b/include/asm-i386/namei.h new file mode 100644 index 000000000000..814865088617 --- /dev/null +++ b/include/asm-i386/namei.h | |||
@@ -0,0 +1,17 @@ | |||
1 | /* $Id: namei.h,v 1.1 1996/12/13 14:48:21 jj Exp $ | ||
2 | * linux/include/asm-i386/namei.h | ||
3 | * | ||
4 | * Included from linux/fs/namei.c | ||
5 | */ | ||
6 | |||
7 | #ifndef __I386_NAMEI_H | ||
8 | #define __I386_NAMEI_H | ||
9 | |||
10 | /* This dummy routine maybe changed to something useful | ||
11 | * for /usr/gnemul/ emulation stuff. | ||
12 | * Look at asm-sparc/namei.h for details. | ||
13 | */ | ||
14 | |||
15 | #define __emul_prefix() NULL | ||
16 | |||
17 | #endif /* __I386_NAMEI_H */ | ||
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h new file mode 100644 index 000000000000..21f16638fc61 --- /dev/null +++ b/include/asm-i386/nmi.h | |||
@@ -0,0 +1,28 @@ | |||
1 | /* | ||
2 | * linux/include/asm-i386/nmi.h | ||
3 | */ | ||
4 | #ifndef ASM_NMI_H | ||
5 | #define ASM_NMI_H | ||
6 | |||
7 | #include <linux/pm.h> | ||
8 | |||
9 | struct pt_regs; | ||
10 | |||
11 | typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu); | ||
12 | |||
13 | /** | ||
14 | * set_nmi_callback | ||
15 | * | ||
16 | * Set a handler for an NMI. Only one handler may be | ||
17 | * set. Return 1 if the NMI was handled. | ||
18 | */ | ||
19 | void set_nmi_callback(nmi_callback_t callback); | ||
20 | |||
21 | /** | ||
22 | * unset_nmi_callback | ||
23 | * | ||
24 | * Remove the handler previously set. | ||
25 | */ | ||
26 | void unset_nmi_callback(void); | ||
27 | |||
28 | #endif /* ASM_NMI_H */ | ||
diff --git a/include/asm-i386/node.h b/include/asm-i386/node.h new file mode 100644 index 000000000000..e13c6ffa72ae --- /dev/null +++ b/include/asm-i386/node.h | |||
@@ -0,0 +1,29 @@ | |||
1 | #ifndef _ASM_I386_NODE_H_ | ||
2 | #define _ASM_I386_NODE_H_ | ||
3 | |||
4 | #include <linux/device.h> | ||
5 | #include <linux/mmzone.h> | ||
6 | #include <linux/node.h> | ||
7 | #include <linux/topology.h> | ||
8 | #include <linux/nodemask.h> | ||
9 | |||
10 | struct i386_node { | ||
11 | struct node node; | ||
12 | }; | ||
13 | extern struct i386_node node_devices[MAX_NUMNODES]; | ||
14 | |||
15 | static inline int arch_register_node(int num){ | ||
16 | int p_node; | ||
17 | struct node *parent = NULL; | ||
18 | |||
19 | if (!node_online(num)) | ||
20 | return 0; | ||
21 | p_node = parent_node(num); | ||
22 | |||
23 | if (p_node != num) | ||
24 | parent = &node_devices[p_node].node; | ||
25 | |||
26 | return register_node(&node_devices[num].node, num, parent); | ||
27 | } | ||
28 | |||
29 | #endif /* _ASM_I386_NODE_H_ */ | ||
diff --git a/include/asm-i386/numaq.h b/include/asm-i386/numaq.h new file mode 100644 index 000000000000..38f710dc37f2 --- /dev/null +++ b/include/asm-i386/numaq.h | |||
@@ -0,0 +1,164 @@ | |||
1 | /* | ||
2 | * Written by: Patricia Gaughen, IBM Corporation | ||
3 | * | ||
4 | * Copyright (C) 2002, IBM Corp. | ||
5 | * | ||
6 | * All rights reserved. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
16 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
17 | * details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
22 | * | ||
23 | * Send feedback to <gone@us.ibm.com> | ||
24 | */ | ||
25 | |||
26 | #ifndef NUMAQ_H | ||
27 | #define NUMAQ_H | ||
28 | |||
29 | #ifdef CONFIG_X86_NUMAQ | ||
30 | |||
31 | extern int get_memcfg_numaq(void); | ||
32 | |||
33 | /* | ||
34 | * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the | ||
35 | */ | ||
36 | #define SYS_CFG_DATA_PRIV_ADDR 0x0009d000 /* place for scd in private quad space */ | ||
37 | |||
38 | /* | ||
39 | * Communication area for each processor on lynxer-processor tests. | ||
40 | * | ||
41 | * NOTE: If you change the size of this eachproc structure you need | ||
42 | * to change the definition for EACH_QUAD_SIZE. | ||
43 | */ | ||
44 | struct eachquadmem { | ||
45 | unsigned int priv_mem_start; /* Starting address of this */ | ||
46 | /* quad's private memory. */ | ||
47 | /* This is always 0. */ | ||
48 | /* In MB. */ | ||
49 | unsigned int priv_mem_size; /* Size of this quad's */ | ||
50 | /* private memory. */ | ||
51 | /* In MB. */ | ||
52 | unsigned int low_shrd_mem_strp_start;/* Starting address of this */ | ||
53 | /* quad's low shared block */ | ||
54 | /* (untranslated). */ | ||
55 | /* In MB. */ | ||
56 | unsigned int low_shrd_mem_start; /* Starting address of this */ | ||
57 | /* quad's low shared memory */ | ||
58 | /* (untranslated). */ | ||
59 | /* In MB. */ | ||
60 | unsigned int low_shrd_mem_size; /* Size of this quad's low */ | ||
61 | /* shared memory. */ | ||
62 | /* In MB. */ | ||
63 | unsigned int lmmio_copb_start; /* Starting address of this */ | ||
64 | /* quad's local memory */ | ||
65 | /* mapped I/O in the */ | ||
66 | /* compatibility OPB. */ | ||
67 | /* In MB. */ | ||
68 | unsigned int lmmio_copb_size; /* Size of this quad's local */ | ||
69 | /* memory mapped I/O in the */ | ||
70 | /* compatibility OPB. */ | ||
71 | /* In MB. */ | ||
72 | unsigned int lmmio_nopb_start; /* Starting address of this */ | ||
73 | /* quad's local memory */ | ||
74 | /* mapped I/O in the */ | ||
75 | /* non-compatibility OPB. */ | ||
76 | /* In MB. */ | ||
77 | unsigned int lmmio_nopb_size; /* Size of this quad's local */ | ||
78 | /* memory mapped I/O in the */ | ||
79 | /* non-compatibility OPB. */ | ||
80 | /* In MB. */ | ||
81 | unsigned int io_apic_0_start; /* Starting address of I/O */ | ||
82 | /* APIC 0. */ | ||
83 | unsigned int io_apic_0_sz; /* Size I/O APIC 0. */ | ||
84 | unsigned int io_apic_1_start; /* Starting address of I/O */ | ||
85 | /* APIC 1. */ | ||
86 | unsigned int io_apic_1_sz; /* Size I/O APIC 1. */ | ||
87 | unsigned int hi_shrd_mem_start; /* Starting address of this */ | ||
88 | /* quad's high shared memory.*/ | ||
89 | /* In MB. */ | ||
90 | unsigned int hi_shrd_mem_size; /* Size of this quad's high */ | ||
91 | /* shared memory. */ | ||
92 | /* In MB. */ | ||
93 | unsigned int mps_table_addr; /* Address of this quad's */ | ||
94 | /* MPS tables from BIOS, */ | ||
95 | /* in system space.*/ | ||
96 | unsigned int lcl_MDC_pio_addr; /* Port-I/O address for */ | ||
97 | /* local access of MDC. */ | ||
98 | unsigned int rmt_MDC_mmpio_addr; /* MM-Port-I/O address for */ | ||
99 | /* remote access of MDC. */ | ||
100 | unsigned int mm_port_io_start; /* Starting address of this */ | ||
101 | /* quad's memory mapped Port */ | ||
102 | /* I/O space. */ | ||
103 | unsigned int mm_port_io_size; /* Size of this quad's memory*/ | ||
104 | /* mapped Port I/O space. */ | ||
105 | unsigned int mm_rmt_io_apic_start; /* Starting address of this */ | ||
106 | /* quad's memory mapped */ | ||
107 | /* remote I/O APIC space. */ | ||
108 | unsigned int mm_rmt_io_apic_size; /* Size of this quad's memory*/ | ||
109 | /* mapped remote I/O APIC */ | ||
110 | /* space. */ | ||
111 | unsigned int mm_isa_start; /* Starting address of this */ | ||
112 | /* quad's memory mapped ISA */ | ||
113 | /* space (contains MDC */ | ||
114 | /* memory space). */ | ||
115 | unsigned int mm_isa_size; /* Size of this quad's memory*/ | ||
116 | /* mapped ISA space (contains*/ | ||
117 | /* MDC memory space). */ | ||
118 | unsigned int rmt_qmi_addr; /* Remote addr to access QMI.*/ | ||
119 | unsigned int lcl_qmi_addr; /* Local addr to access QMI. */ | ||
120 | }; | ||
121 | |||
122 | /* | ||
123 | * Note: This structure must be NOT be changed unless the multiproc and | ||
124 | * OS are changed to reflect the new structure. | ||
125 | */ | ||
126 | struct sys_cfg_data { | ||
127 | unsigned int quad_id; | ||
128 | unsigned int bsp_proc_id; /* Boot Strap Processor in this quad. */ | ||
129 | unsigned int scd_version; /* Version number of this table. */ | ||
130 | unsigned int first_quad_id; | ||
131 | unsigned int quads_present31_0; /* 1 bit for each quad */ | ||
132 | unsigned int quads_present63_32; /* 1 bit for each quad */ | ||
133 | unsigned int config_flags; | ||
134 | unsigned int boot_flags; | ||
135 | unsigned int csr_start_addr; /* Absolute value (not in MB) */ | ||
136 | unsigned int csr_size; /* Absolute value (not in MB) */ | ||
137 | unsigned int lcl_apic_start_addr; /* Absolute value (not in MB) */ | ||
138 | unsigned int lcl_apic_size; /* Absolute value (not in MB) */ | ||
139 | unsigned int low_shrd_mem_base; /* 0 or 512MB or 1GB */ | ||
140 | unsigned int low_shrd_mem_quad_offset; /* 0,128M,256M,512M,1G */ | ||
141 | /* may not be totally populated */ | ||
142 | unsigned int split_mem_enbl; /* 0 for no low shared memory */ | ||
143 | unsigned int mmio_sz; /* Size of total system memory mapped I/O */ | ||
144 | /* (in MB). */ | ||
145 | unsigned int quad_spin_lock; /* Spare location used for quad */ | ||
146 | /* bringup. */ | ||
147 | unsigned int nonzero55; /* For checksumming. */ | ||
148 | unsigned int nonzeroaa; /* For checksumming. */ | ||
149 | unsigned int scd_magic_number; | ||
150 | unsigned int system_type; | ||
151 | unsigned int checksum; | ||
152 | /* | ||
153 | * memory configuration area for each quad | ||
154 | */ | ||
155 | struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */ | ||
156 | }; | ||
157 | |||
158 | static inline unsigned long *get_zholes_size(int nid) | ||
159 | { | ||
160 | return NULL; | ||
161 | } | ||
162 | #endif /* CONFIG_X86_NUMAQ */ | ||
163 | #endif /* NUMAQ_H */ | ||
164 | |||
diff --git a/include/asm-i386/numnodes.h b/include/asm-i386/numnodes.h new file mode 100644 index 000000000000..a61f38c8176f --- /dev/null +++ b/include/asm-i386/numnodes.h | |||
@@ -0,0 +1,18 @@ | |||
1 | #ifndef _ASM_MAX_NUMNODES_H | ||
2 | #define _ASM_MAX_NUMNODES_H | ||
3 | |||
4 | #include <linux/config.h> | ||
5 | |||
6 | #ifdef CONFIG_X86_NUMAQ | ||
7 | |||
8 | /* Max 16 Nodes */ | ||
9 | #define NODES_SHIFT 4 | ||
10 | |||
11 | #elif defined(CONFIG_ACPI_SRAT) | ||
12 | |||
13 | /* Max 8 Nodes */ | ||
14 | #define NODES_SHIFT 3 | ||
15 | |||
16 | #endif /* CONFIG_X86_NUMAQ */ | ||
17 | |||
18 | #endif /* _ASM_MAX_NUMNODES_H */ | ||
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h new file mode 100644 index 000000000000..ed13969fa2d6 --- /dev/null +++ b/include/asm-i386/page.h | |||
@@ -0,0 +1,153 @@ | |||
1 | #ifndef _I386_PAGE_H | ||
2 | #define _I386_PAGE_H | ||
3 | |||
4 | /* PAGE_SHIFT determines the page size */ | ||
5 | #define PAGE_SHIFT 12 | ||
6 | #define PAGE_SIZE (1UL << PAGE_SHIFT) | ||
7 | #define PAGE_MASK (~(PAGE_SIZE-1)) | ||
8 | |||
9 | #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) | ||
10 | #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT) | ||
11 | |||
12 | #ifdef __KERNEL__ | ||
13 | #ifndef __ASSEMBLY__ | ||
14 | |||
15 | #include <linux/config.h> | ||
16 | |||
17 | #ifdef CONFIG_X86_USE_3DNOW | ||
18 | |||
19 | #include <asm/mmx.h> | ||
20 | |||
21 | #define clear_page(page) mmx_clear_page((void *)(page)) | ||
22 | #define copy_page(to,from) mmx_copy_page(to,from) | ||
23 | |||
24 | #else | ||
25 | |||
26 | /* | ||
27 | * On older X86 processors it's not a win to use MMX here it seems. | ||
28 | * Maybe the K6-III ? | ||
29 | */ | ||
30 | |||
31 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) | ||
32 | #define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) | ||
33 | |||
34 | #endif | ||
35 | |||
36 | #define clear_user_page(page, vaddr, pg) clear_page(page) | ||
37 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) | ||
38 | |||
39 | #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) | ||
40 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | ||
41 | |||
42 | /* | ||
43 | * These are used to make use of C type-checking.. | ||
44 | */ | ||
45 | extern int nx_enabled; | ||
46 | #ifdef CONFIG_X86_PAE | ||
47 | extern unsigned long long __supported_pte_mask; | ||
48 | typedef struct { unsigned long pte_low, pte_high; } pte_t; | ||
49 | typedef struct { unsigned long long pmd; } pmd_t; | ||
50 | typedef struct { unsigned long long pgd; } pgd_t; | ||
51 | typedef struct { unsigned long long pgprot; } pgprot_t; | ||
52 | #define pmd_val(x) ((x).pmd) | ||
53 | #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) | ||
54 | #define __pmd(x) ((pmd_t) { (x) } ) | ||
55 | #define HPAGE_SHIFT 21 | ||
56 | #else | ||
57 | typedef struct { unsigned long pte_low; } pte_t; | ||
58 | typedef struct { unsigned long pgd; } pgd_t; | ||
59 | typedef struct { unsigned long pgprot; } pgprot_t; | ||
60 | #define boot_pte_t pte_t /* or would you rather have a typedef */ | ||
61 | #define pte_val(x) ((x).pte_low) | ||
62 | #define HPAGE_SHIFT 22 | ||
63 | #endif | ||
64 | #define PTE_MASK PAGE_MASK | ||
65 | |||
66 | #ifdef CONFIG_HUGETLB_PAGE | ||
67 | #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) | ||
68 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | ||
69 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | ||
70 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA | ||
71 | #endif | ||
72 | |||
73 | #define pgd_val(x) ((x).pgd) | ||
74 | #define pgprot_val(x) ((x).pgprot) | ||
75 | |||
76 | #define __pte(x) ((pte_t) { (x) } ) | ||
77 | #define __pgd(x) ((pgd_t) { (x) } ) | ||
78 | #define __pgprot(x) ((pgprot_t) { (x) } ) | ||
79 | |||
80 | #endif /* !__ASSEMBLY__ */ | ||
81 | |||
82 | /* to align the pointer to the (next) page boundary */ | ||
83 | #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) | ||
84 | |||
85 | /* | ||
86 | * This handles the memory map.. We could make this a config | ||
87 | * option, but too many people screw it up, and too few need | ||
88 | * it. | ||
89 | * | ||
90 | * A __PAGE_OFFSET of 0xC0000000 means that the kernel has | ||
91 | * a virtual address space of one gigabyte, which limits the | ||
92 | * amount of physical memory you can use to about 950MB. | ||
93 | * | ||
94 | * If you want more physical memory than this then see the CONFIG_HIGHMEM4G | ||
95 | * and CONFIG_HIGHMEM64G options in the kernel configuration. | ||
96 | */ | ||
97 | |||
98 | #ifndef __ASSEMBLY__ | ||
99 | |||
100 | /* | ||
101 | * This much address space is reserved for vmalloc() and iomap() | ||
102 | * as well as fixmap mappings. | ||
103 | */ | ||
104 | extern unsigned int __VMALLOC_RESERVE; | ||
105 | |||
106 | /* Pure 2^n version of get_order */ | ||
107 | static __inline__ int get_order(unsigned long size) | ||
108 | { | ||
109 | int order; | ||
110 | |||
111 | size = (size-1) >> (PAGE_SHIFT-1); | ||
112 | order = -1; | ||
113 | do { | ||
114 | size >>= 1; | ||
115 | order++; | ||
116 | } while (size); | ||
117 | return order; | ||
118 | } | ||
119 | |||
120 | extern int sysctl_legacy_va_layout; | ||
121 | |||
122 | #endif /* __ASSEMBLY__ */ | ||
123 | |||
124 | #ifdef __ASSEMBLY__ | ||
125 | #define __PAGE_OFFSET (0xC0000000) | ||
126 | #else | ||
127 | #define __PAGE_OFFSET (0xC0000000UL) | ||
128 | #endif | ||
129 | |||
130 | |||
131 | #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) | ||
132 | #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) | ||
133 | #define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE) | ||
134 | #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) | ||
135 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) | ||
136 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | ||
137 | #ifndef CONFIG_DISCONTIGMEM | ||
138 | #define pfn_to_page(pfn) (mem_map + (pfn)) | ||
139 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) | ||
140 | #define pfn_valid(pfn) ((pfn) < max_mapnr) | ||
141 | #endif /* !CONFIG_DISCONTIGMEM */ | ||
142 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | ||
143 | |||
144 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | ||
145 | |||
146 | #define VM_DATA_DEFAULT_FLAGS \ | ||
147 | (VM_READ | VM_WRITE | \ | ||
148 | ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ | ||
149 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
150 | |||
151 | #endif /* __KERNEL__ */ | ||
152 | |||
153 | #endif /* _I386_PAGE_H */ | ||
diff --git a/include/asm-i386/param.h b/include/asm-i386/param.h new file mode 100644 index 000000000000..b6440526e42a --- /dev/null +++ b/include/asm-i386/param.h | |||
@@ -0,0 +1,23 @@ | |||
1 | #ifndef _ASMi386_PARAM_H | ||
2 | #define _ASMi386_PARAM_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | # define HZ 1000 /* Internal kernel timer frequency */ | ||
6 | # define USER_HZ 100 /* .. some user interfaces are in "ticks" */ | ||
7 | # define CLOCKS_PER_SEC (USER_HZ) /* like times() */ | ||
8 | #endif | ||
9 | |||
10 | #ifndef HZ | ||
11 | #define HZ 100 | ||
12 | #endif | ||
13 | |||
14 | #define EXEC_PAGESIZE 4096 | ||
15 | |||
16 | #ifndef NOGROUP | ||
17 | #define NOGROUP (-1) | ||
18 | #endif | ||
19 | |||
20 | #define MAXHOSTNAMELEN 64 /* max length of hostname */ | ||
21 | #define COMMAND_LINE_SIZE 256 | ||
22 | |||
23 | #endif | ||
diff --git a/include/asm-i386/parport.h b/include/asm-i386/parport.h new file mode 100644 index 000000000000..fa0e321e498e --- /dev/null +++ b/include/asm-i386/parport.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * parport.h: ia32-specific parport initialisation | ||
3 | * | ||
4 | * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk> | ||
5 | * | ||
6 | * This file should only be included by drivers/parport/parport_pc.c. | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_I386_PARPORT_H | ||
10 | #define _ASM_I386_PARPORT_H 1 | ||
11 | |||
12 | static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma); | ||
13 | static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) | ||
14 | { | ||
15 | return parport_pc_find_isa_ports (autoirq, autodma); | ||
16 | } | ||
17 | |||
18 | #endif /* !(_ASM_I386_PARPORT_H) */ | ||
diff --git a/include/asm-i386/pci-direct.h b/include/asm-i386/pci-direct.h new file mode 100644 index 000000000000..4f6738b08206 --- /dev/null +++ b/include/asm-i386/pci-direct.h | |||
@@ -0,0 +1 @@ | |||
#include "asm-x86_64/pci-direct.h" | |||
diff --git a/include/asm-i386/pci.h b/include/asm-i386/pci.h new file mode 100644 index 000000000000..fb749b85a739 --- /dev/null +++ b/include/asm-i386/pci.h | |||
@@ -0,0 +1,110 @@ | |||
1 | #ifndef __i386_PCI_H | ||
2 | #define __i386_PCI_H | ||
3 | |||
4 | #include <linux/config.h> | ||
5 | |||
6 | #ifdef __KERNEL__ | ||
7 | #include <linux/mm.h> /* for struct page */ | ||
8 | |||
9 | /* Can be used to override the logic in pci_scan_bus for skipping | ||
10 | already-configured bus numbers - to be used for buggy BIOSes | ||
11 | or architectures with incomplete PCI setup by the loader */ | ||
12 | |||
13 | #ifdef CONFIG_PCI | ||
14 | extern unsigned int pcibios_assign_all_busses(void); | ||
15 | #else | ||
16 | #define pcibios_assign_all_busses() 0 | ||
17 | #endif | ||
18 | #define pcibios_scan_all_fns(a, b) 0 | ||
19 | |||
20 | extern unsigned long pci_mem_start; | ||
21 | #define PCIBIOS_MIN_IO 0x1000 | ||
22 | #define PCIBIOS_MIN_MEM (pci_mem_start) | ||
23 | |||
24 | #define PCIBIOS_MIN_CARDBUS_IO 0x4000 | ||
25 | |||
26 | void pcibios_config_init(void); | ||
27 | struct pci_bus * pcibios_scan_root(int bus); | ||
28 | |||
29 | void pcibios_set_master(struct pci_dev *dev); | ||
30 | void pcibios_penalize_isa_irq(int irq); | ||
31 | struct irq_routing_table *pcibios_get_irq_routing_table(void); | ||
32 | int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); | ||
33 | |||
34 | /* Dynamic DMA mapping stuff. | ||
35 | * i386 has everything mapped statically. | ||
36 | */ | ||
37 | |||
38 | #include <linux/types.h> | ||
39 | #include <linux/slab.h> | ||
40 | #include <asm/scatterlist.h> | ||
41 | #include <linux/string.h> | ||
42 | #include <asm/io.h> | ||
43 | |||
44 | struct pci_dev; | ||
45 | |||
46 | /* The PCI address space does equal the physical memory | ||
47 | * address space. The networking and block device layers use | ||
48 | * this boolean for bounce buffer decisions. | ||
49 | */ | ||
50 | #define PCI_DMA_BUS_IS_PHYS (1) | ||
51 | |||
52 | /* pci_unmap_{page,single} is a nop so... */ | ||
53 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) | ||
54 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) | ||
55 | #define pci_unmap_addr(PTR, ADDR_NAME) (0) | ||
56 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) | ||
57 | #define pci_unmap_len(PTR, LEN_NAME) (0) | ||
58 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) | ||
59 | |||
60 | /* This is always fine. */ | ||
61 | #define pci_dac_dma_supported(pci_dev, mask) (1) | ||
62 | |||
63 | static inline dma64_addr_t | ||
64 | pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction) | ||
65 | { | ||
66 | return ((dma64_addr_t) page_to_phys(page) + | ||
67 | (dma64_addr_t) offset); | ||
68 | } | ||
69 | |||
70 | static inline struct page * | ||
71 | pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr) | ||
72 | { | ||
73 | return pfn_to_page(dma_addr >> PAGE_SHIFT); | ||
74 | } | ||
75 | |||
76 | static inline unsigned long | ||
77 | pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr) | ||
78 | { | ||
79 | return (dma_addr & ~PAGE_MASK); | ||
80 | } | ||
81 | |||
82 | static inline void | ||
83 | pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction) | ||
84 | { | ||
85 | } | ||
86 | |||
87 | static inline void | ||
88 | pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction) | ||
89 | { | ||
90 | flush_write_buffers(); | ||
91 | } | ||
92 | |||
93 | #define HAVE_PCI_MMAP | ||
94 | extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | ||
95 | enum pci_mmap_state mmap_state, int write_combine); | ||
96 | |||
97 | |||
98 | static inline void pcibios_add_platform_entries(struct pci_dev *dev) | ||
99 | { | ||
100 | } | ||
101 | |||
102 | #endif /* __KERNEL__ */ | ||
103 | |||
104 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ | ||
105 | #include <asm-generic/pci-dma-compat.h> | ||
106 | |||
107 | /* generic pci stuff */ | ||
108 | #include <asm-generic/pci.h> | ||
109 | |||
110 | #endif /* __i386_PCI_H */ | ||
diff --git a/include/asm-i386/percpu.h b/include/asm-i386/percpu.h new file mode 100644 index 000000000000..5764afa4b6a4 --- /dev/null +++ b/include/asm-i386/percpu.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __ARCH_I386_PERCPU__ | ||
2 | #define __ARCH_I386_PERCPU__ | ||
3 | |||
4 | #include <asm-generic/percpu.h> | ||
5 | |||
6 | #endif /* __ARCH_I386_PERCPU__ */ | ||
diff --git a/include/asm-i386/pgalloc.h b/include/asm-i386/pgalloc.h new file mode 100644 index 000000000000..0380c3dc1f7e --- /dev/null +++ b/include/asm-i386/pgalloc.h | |||
@@ -0,0 +1,50 @@ | |||
1 | #ifndef _I386_PGALLOC_H | ||
2 | #define _I386_PGALLOC_H | ||
3 | |||
4 | #include <linux/config.h> | ||
5 | #include <asm/fixmap.h> | ||
6 | #include <linux/threads.h> | ||
7 | #include <linux/mm.h> /* for struct page */ | ||
8 | |||
9 | #define pmd_populate_kernel(mm, pmd, pte) \ | ||
10 | set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))) | ||
11 | |||
12 | #define pmd_populate(mm, pmd, pte) \ | ||
13 | set_pmd(pmd, __pmd(_PAGE_TABLE + \ | ||
14 | ((unsigned long long)page_to_pfn(pte) << \ | ||
15 | (unsigned long long) PAGE_SHIFT))) | ||
16 | /* | ||
17 | * Allocate and free page tables. | ||
18 | */ | ||
19 | extern pgd_t *pgd_alloc(struct mm_struct *); | ||
20 | extern void pgd_free(pgd_t *pgd); | ||
21 | |||
22 | extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); | ||
23 | extern struct page *pte_alloc_one(struct mm_struct *, unsigned long); | ||
24 | |||
25 | static inline void pte_free_kernel(pte_t *pte) | ||
26 | { | ||
27 | free_page((unsigned long)pte); | ||
28 | } | ||
29 | |||
30 | static inline void pte_free(struct page *pte) | ||
31 | { | ||
32 | __free_page(pte); | ||
33 | } | ||
34 | |||
35 | |||
36 | #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) | ||
37 | |||
38 | #ifdef CONFIG_X86_PAE | ||
39 | /* | ||
40 | * In the PAE case we free the pmds as part of the pgd. | ||
41 | */ | ||
42 | #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) | ||
43 | #define pmd_free(x) do { } while (0) | ||
44 | #define __pmd_free_tlb(tlb,x) do { } while (0) | ||
45 | #define pud_populate(mm, pmd, pte) BUG() | ||
46 | #endif | ||
47 | |||
48 | #define check_pgt_cache() do { } while (0) | ||
49 | |||
50 | #endif /* _I386_PGALLOC_H */ | ||
diff --git a/include/asm-i386/pgtable-2level-defs.h b/include/asm-i386/pgtable-2level-defs.h new file mode 100644 index 000000000000..02518079f816 --- /dev/null +++ b/include/asm-i386/pgtable-2level-defs.h | |||
@@ -0,0 +1,18 @@ | |||
1 | #ifndef _I386_PGTABLE_2LEVEL_DEFS_H | ||
2 | #define _I386_PGTABLE_2LEVEL_DEFS_H | ||
3 | |||
4 | /* | ||
5 | * traditional i386 two-level paging structure: | ||
6 | */ | ||
7 | |||
8 | #define PGDIR_SHIFT 22 | ||
9 | #define PTRS_PER_PGD 1024 | ||
10 | |||
11 | /* | ||
12 | * the i386 is two-level, so we don't really have any | ||
13 | * PMD directory physically. | ||
14 | */ | ||
15 | |||
16 | #define PTRS_PER_PTE 1024 | ||
17 | |||
18 | #endif /* _I386_PGTABLE_2LEVEL_DEFS_H */ | ||
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h new file mode 100644 index 000000000000..fa07bd6c7529 --- /dev/null +++ b/include/asm-i386/pgtable-2level.h | |||
@@ -0,0 +1,69 @@ | |||
1 | #ifndef _I386_PGTABLE_2LEVEL_H | ||
2 | #define _I386_PGTABLE_2LEVEL_H | ||
3 | |||
4 | #include <asm-generic/pgtable-nopmd.h> | ||
5 | |||
6 | #define pte_ERROR(e) \ | ||
7 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) | ||
8 | #define pgd_ERROR(e) \ | ||
9 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | ||
10 | |||
11 | /* | ||
12 | * Certain architectures need to do special things when PTEs | ||
13 | * within a page table are directly modified. Thus, the following | ||
14 | * hook is made available. | ||
15 | */ | ||
16 | #define set_pte(pteptr, pteval) (*(pteptr) = pteval) | ||
17 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | ||
18 | #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) | ||
19 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) | ||
20 | |||
21 | #define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0)) | ||
22 | #define pte_same(a, b) ((a).pte_low == (b).pte_low) | ||
23 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | ||
24 | #define pte_none(x) (!(x).pte_low) | ||
25 | #define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) | ||
26 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | ||
27 | #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | ||
28 | |||
29 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) | ||
30 | |||
31 | #define pmd_page_kernel(pmd) \ | ||
32 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | ||
33 | |||
34 | /* | ||
35 | * All present user pages are user-executable: | ||
36 | */ | ||
37 | static inline int pte_exec(pte_t pte) | ||
38 | { | ||
39 | return pte_user(pte); | ||
40 | } | ||
41 | |||
42 | /* | ||
43 | * All present pages are kernel-executable: | ||
44 | */ | ||
45 | static inline int pte_exec_kernel(pte_t pte) | ||
46 | { | ||
47 | return 1; | ||
48 | } | ||
49 | |||
50 | /* | ||
51 | * Bits 0, 6 and 7 are taken, split up the 29 bits of offset | ||
52 | * into this range: | ||
53 | */ | ||
54 | #define PTE_FILE_MAX_BITS 29 | ||
55 | |||
56 | #define pte_to_pgoff(pte) \ | ||
57 | ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 )) | ||
58 | |||
59 | #define pgoff_to_pte(off) \ | ||
60 | ((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE }) | ||
61 | |||
62 | /* Encode and de-code a swap entry */ | ||
63 | #define __swp_type(x) (((x).val >> 1) & 0x1f) | ||
64 | #define __swp_offset(x) ((x).val >> 8) | ||
65 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) | ||
66 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) | ||
67 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | ||
68 | |||
69 | #endif /* _I386_PGTABLE_2LEVEL_H */ | ||
diff --git a/include/asm-i386/pgtable-3level-defs.h b/include/asm-i386/pgtable-3level-defs.h new file mode 100644 index 000000000000..eb3a1ea88671 --- /dev/null +++ b/include/asm-i386/pgtable-3level-defs.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef _I386_PGTABLE_3LEVEL_DEFS_H | ||
2 | #define _I386_PGTABLE_3LEVEL_DEFS_H | ||
3 | |||
4 | /* | ||
5 | * PGDIR_SHIFT determines what a top-level page table entry can map | ||
6 | */ | ||
7 | #define PGDIR_SHIFT 30 | ||
8 | #define PTRS_PER_PGD 4 | ||
9 | |||
10 | /* | ||
11 | * PMD_SHIFT determines the size of the area a middle-level | ||
12 | * page table can map | ||
13 | */ | ||
14 | #define PMD_SHIFT 21 | ||
15 | #define PTRS_PER_PMD 512 | ||
16 | |||
17 | /* | ||
18 | * entries per page directory level | ||
19 | */ | ||
20 | #define PTRS_PER_PTE 512 | ||
21 | |||
22 | #endif /* _I386_PGTABLE_3LEVEL_DEFS_H */ | ||
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h new file mode 100644 index 000000000000..d609f9c2c1f0 --- /dev/null +++ b/include/asm-i386/pgtable-3level.h | |||
@@ -0,0 +1,160 @@ | |||
1 | #ifndef _I386_PGTABLE_3LEVEL_H | ||
2 | #define _I386_PGTABLE_3LEVEL_H | ||
3 | |||
4 | #include <asm-generic/pgtable-nopud.h> | ||
5 | |||
6 | /* | ||
7 | * Intel Physical Address Extension (PAE) Mode - three-level page | ||
8 | * tables on PPro+ CPUs. | ||
9 | * | ||
10 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | ||
11 | */ | ||
12 | |||
13 | #define pte_ERROR(e) \ | ||
14 | printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) | ||
15 | #define pmd_ERROR(e) \ | ||
16 | printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e)) | ||
17 | #define pgd_ERROR(e) \ | ||
18 | printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) | ||
19 | |||
20 | #define pud_none(pud) 0 | ||
21 | #define pud_bad(pud) 0 | ||
22 | #define pud_present(pud) 1 | ||
23 | |||
24 | /* | ||
25 | * Is the pte executable? | ||
26 | */ | ||
27 | static inline int pte_x(pte_t pte) | ||
28 | { | ||
29 | return !(pte_val(pte) & _PAGE_NX); | ||
30 | } | ||
31 | |||
32 | /* | ||
33 | * All present user-pages with !NX bit are user-executable: | ||
34 | */ | ||
35 | static inline int pte_exec(pte_t pte) | ||
36 | { | ||
37 | return pte_user(pte) && pte_x(pte); | ||
38 | } | ||
39 | /* | ||
40 | * All present pages with !NX bit are kernel-executable: | ||
41 | */ | ||
42 | static inline int pte_exec_kernel(pte_t pte) | ||
43 | { | ||
44 | return pte_x(pte); | ||
45 | } | ||
46 | |||
47 | /* Rules for using set_pte: the pte being assigned *must* be | ||
48 | * either not present or in a state where the hardware will | ||
49 | * not attempt to update the pte. In places where this is | ||
50 | * not possible, use pte_get_and_clear to obtain the old pte | ||
51 | * value and then use set_pte to update it. -ben | ||
52 | */ | ||
53 | static inline void set_pte(pte_t *ptep, pte_t pte) | ||
54 | { | ||
55 | ptep->pte_high = pte.pte_high; | ||
56 | smp_wmb(); | ||
57 | ptep->pte_low = pte.pte_low; | ||
58 | } | ||
59 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | ||
60 | |||
61 | #define __HAVE_ARCH_SET_PTE_ATOMIC | ||
62 | #define set_pte_atomic(pteptr,pteval) \ | ||
63 | set_64bit((unsigned long long *)(pteptr),pte_val(pteval)) | ||
64 | #define set_pmd(pmdptr,pmdval) \ | ||
65 | set_64bit((unsigned long long *)(pmdptr),pmd_val(pmdval)) | ||
66 | #define set_pud(pudptr,pudval) \ | ||
67 | set_64bit((unsigned long long *)(pudptr),pud_val(pudval)) | ||
68 | |||
69 | /* | ||
70 | * Pentium-II erratum A13: in PAE mode we explicitly have to flush | ||
71 | * the TLB via cr3 if the top-level pgd is changed... | ||
72 | * We do not let the generic code free and clear pgd entries due to | ||
73 | * this erratum. | ||
74 | */ | ||
75 | static inline void pud_clear (pud_t * pud) { } | ||
76 | |||
77 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) | ||
78 | |||
79 | #define pmd_page_kernel(pmd) \ | ||
80 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | ||
81 | |||
82 | #define pud_page(pud) \ | ||
83 | ((struct page *) __va(pud_val(pud) & PAGE_MASK)) | ||
84 | |||
85 | #define pud_page_kernel(pud) \ | ||
86 | ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) | ||
87 | |||
88 | |||
89 | /* Find an entry in the second-level page table.. */ | ||
90 | #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ | ||
91 | pmd_index(address)) | ||
92 | |||
93 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
94 | { | ||
95 | pte_t res; | ||
96 | |||
97 | /* xchg acts as a barrier before the setting of the high bits */ | ||
98 | res.pte_low = xchg(&ptep->pte_low, 0); | ||
99 | res.pte_high = ptep->pte_high; | ||
100 | ptep->pte_high = 0; | ||
101 | |||
102 | return res; | ||
103 | } | ||
104 | |||
105 | static inline int pte_same(pte_t a, pte_t b) | ||
106 | { | ||
107 | return a.pte_low == b.pte_low && a.pte_high == b.pte_high; | ||
108 | } | ||
109 | |||
110 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | ||
111 | |||
112 | static inline int pte_none(pte_t pte) | ||
113 | { | ||
114 | return !pte.pte_low && !pte.pte_high; | ||
115 | } | ||
116 | |||
117 | static inline unsigned long pte_pfn(pte_t pte) | ||
118 | { | ||
119 | return (pte.pte_low >> PAGE_SHIFT) | | ||
120 | (pte.pte_high << (32 - PAGE_SHIFT)); | ||
121 | } | ||
122 | |||
123 | extern unsigned long long __supported_pte_mask; | ||
124 | |||
125 | static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) | ||
126 | { | ||
127 | pte_t pte; | ||
128 | |||
129 | pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \ | ||
130 | (pgprot_val(pgprot) >> 32); | ||
131 | pte.pte_high &= (__supported_pte_mask >> 32); | ||
132 | pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \ | ||
133 | __supported_pte_mask; | ||
134 | return pte; | ||
135 | } | ||
136 | |||
137 | static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) | ||
138 | { | ||
139 | return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) | \ | ||
140 | pgprot_val(pgprot)) & __supported_pte_mask); | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * Bits 0, 6 and 7 are taken in the low part of the pte, | ||
145 | * put the 32 bits of offset into the high part. | ||
146 | */ | ||
147 | #define pte_to_pgoff(pte) ((pte).pte_high) | ||
148 | #define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) }) | ||
149 | #define PTE_FILE_MAX_BITS 32 | ||
150 | |||
151 | /* Encode and de-code a swap entry */ | ||
152 | #define __swp_type(x) (((x).val) & 0x1f) | ||
153 | #define __swp_offset(x) ((x).val >> 5) | ||
154 | #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) | ||
155 | #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) | ||
156 | #define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val }) | ||
157 | |||
158 | #define __pmd_free_tlb(tlb, x) do { } while (0) | ||
159 | |||
160 | #endif /* _I386_PGTABLE_3LEVEL_H */ | ||
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h new file mode 100644 index 000000000000..488c2b4befa5 --- /dev/null +++ b/include/asm-i386/pgtable.h | |||
@@ -0,0 +1,422 @@ | |||
1 | #ifndef _I386_PGTABLE_H | ||
2 | #define _I386_PGTABLE_H | ||
3 | |||
4 | #include <linux/config.h> | ||
5 | |||
6 | /* | ||
7 | * The Linux memory management assumes a three-level page table setup. On | ||
8 | * the i386, we use that, but "fold" the mid level into the top-level page | ||
9 | * table, so that we physically have the same two-level page table as the | ||
10 | * i386 mmu expects. | ||
11 | * | ||
12 | * This file contains the functions and defines necessary to modify and use | ||
13 | * the i386 page table tree. | ||
14 | */ | ||
15 | #ifndef __ASSEMBLY__ | ||
16 | #include <asm/processor.h> | ||
17 | #include <asm/fixmap.h> | ||
18 | #include <linux/threads.h> | ||
19 | |||
20 | #ifndef _I386_BITOPS_H | ||
21 | #include <asm/bitops.h> | ||
22 | #endif | ||
23 | |||
24 | #include <linux/slab.h> | ||
25 | #include <linux/list.h> | ||
26 | #include <linux/spinlock.h> | ||
27 | |||
28 | /* | ||
29 | * ZERO_PAGE is a global shared page that is always zero: used | ||
30 | * for zero-mapped memory areas etc.. | ||
31 | */ | ||
32 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | ||
33 | extern unsigned long empty_zero_page[1024]; | ||
34 | extern pgd_t swapper_pg_dir[1024]; | ||
35 | extern kmem_cache_t *pgd_cache; | ||
36 | extern kmem_cache_t *pmd_cache; | ||
37 | extern spinlock_t pgd_lock; | ||
38 | extern struct page *pgd_list; | ||
39 | |||
40 | void pmd_ctor(void *, kmem_cache_t *, unsigned long); | ||
41 | void pgd_ctor(void *, kmem_cache_t *, unsigned long); | ||
42 | void pgd_dtor(void *, kmem_cache_t *, unsigned long); | ||
43 | void pgtable_cache_init(void); | ||
44 | void paging_init(void); | ||
45 | |||
46 | /* | ||
47 | * The Linux x86 paging architecture is 'compile-time dual-mode', it | ||
48 | * implements both the traditional 2-level x86 page tables and the | ||
49 | * newer 3-level PAE-mode page tables. | ||
50 | */ | ||
51 | #ifdef CONFIG_X86_PAE | ||
52 | # include <asm/pgtable-3level-defs.h> | ||
53 | # define PMD_SIZE (1UL << PMD_SHIFT) | ||
54 | # define PMD_MASK (~(PMD_SIZE-1)) | ||
55 | #else | ||
56 | # include <asm/pgtable-2level-defs.h> | ||
57 | #endif | ||
58 | |||
59 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | ||
60 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
61 | |||
62 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) | ||
63 | #define FIRST_USER_PGD_NR 0 | ||
64 | |||
65 | #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) | ||
66 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) | ||
67 | |||
68 | #define TWOLEVEL_PGDIR_SHIFT 22 | ||
69 | #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT) | ||
70 | #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS) | ||
71 | |||
72 | /* Just any arbitrary offset to the start of the vmalloc VM area: the | ||
73 | * current 8MB value just means that there will be a 8MB "hole" after the | ||
74 | * physical memory until the kernel virtual memory starts. That means that | ||
75 | * any out-of-bounds memory accesses will hopefully be caught. | ||
76 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | ||
77 | * area for the same reason. ;) | ||
78 | */ | ||
79 | #define VMALLOC_OFFSET (8*1024*1024) | ||
80 | #define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \ | ||
81 | 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1)) | ||
82 | #ifdef CONFIG_HIGHMEM | ||
83 | # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) | ||
84 | #else | ||
85 | # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) | ||
86 | #endif | ||
87 | |||
88 | /* | ||
89 | * The 4MB page is guessing.. Detailed in the infamous "Chapter H" | ||
90 | * of the Pentium details, but assuming intel did the straightforward | ||
91 | * thing, this bit set in the page directory entry just means that | ||
92 | * the page directory entry points directly to a 4MB-aligned block of | ||
93 | * memory. | ||
94 | */ | ||
95 | #define _PAGE_BIT_PRESENT 0 | ||
96 | #define _PAGE_BIT_RW 1 | ||
97 | #define _PAGE_BIT_USER 2 | ||
98 | #define _PAGE_BIT_PWT 3 | ||
99 | #define _PAGE_BIT_PCD 4 | ||
100 | #define _PAGE_BIT_ACCESSED 5 | ||
101 | #define _PAGE_BIT_DIRTY 6 | ||
102 | #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */ | ||
103 | #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ | ||
104 | #define _PAGE_BIT_UNUSED1 9 /* available for programmer */ | ||
105 | #define _PAGE_BIT_UNUSED2 10 | ||
106 | #define _PAGE_BIT_UNUSED3 11 | ||
107 | #define _PAGE_BIT_NX 63 | ||
108 | |||
109 | #define _PAGE_PRESENT 0x001 | ||
110 | #define _PAGE_RW 0x002 | ||
111 | #define _PAGE_USER 0x004 | ||
112 | #define _PAGE_PWT 0x008 | ||
113 | #define _PAGE_PCD 0x010 | ||
114 | #define _PAGE_ACCESSED 0x020 | ||
115 | #define _PAGE_DIRTY 0x040 | ||
116 | #define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */ | ||
117 | #define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */ | ||
118 | #define _PAGE_UNUSED1 0x200 /* available for programmer */ | ||
119 | #define _PAGE_UNUSED2 0x400 | ||
120 | #define _PAGE_UNUSED3 0x800 | ||
121 | |||
122 | #define _PAGE_FILE 0x040 /* set:pagecache unset:swap */ | ||
123 | #define _PAGE_PROTNONE 0x080 /* If not present */ | ||
124 | #ifdef CONFIG_X86_PAE | ||
125 | #define _PAGE_NX (1ULL<<_PAGE_BIT_NX) | ||
126 | #else | ||
127 | #define _PAGE_NX 0 | ||
128 | #endif | ||
129 | |||
130 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
131 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
132 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
133 | |||
134 | #define PAGE_NONE \ | ||
135 | __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) | ||
136 | #define PAGE_SHARED \ | ||
137 | __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) | ||
138 | |||
139 | #define PAGE_SHARED_EXEC \ | ||
140 | __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) | ||
141 | #define PAGE_COPY_NOEXEC \ | ||
142 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | ||
143 | #define PAGE_COPY_EXEC \ | ||
144 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | ||
145 | #define PAGE_COPY \ | ||
146 | PAGE_COPY_NOEXEC | ||
147 | #define PAGE_READONLY \ | ||
148 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | ||
149 | #define PAGE_READONLY_EXEC \ | ||
150 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | ||
151 | |||
152 | #define _PAGE_KERNEL \ | ||
153 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX) | ||
154 | #define _PAGE_KERNEL_EXEC \ | ||
155 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) | ||
156 | |||
157 | extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC; | ||
158 | #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) | ||
159 | #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD) | ||
160 | #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) | ||
161 | #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) | ||
162 | |||
163 | #define PAGE_KERNEL __pgprot(__PAGE_KERNEL) | ||
164 | #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) | ||
165 | #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) | ||
166 | #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) | ||
167 | #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) | ||
168 | #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) | ||
169 | |||
170 | /* | ||
171 | * The i386 can't do page protection for execute, and considers that | ||
172 | * the same are read. Also, write permissions imply read permissions. | ||
173 | * This is the closest we can get.. | ||
174 | */ | ||
175 | #define __P000 PAGE_NONE | ||
176 | #define __P001 PAGE_READONLY | ||
177 | #define __P010 PAGE_COPY | ||
178 | #define __P011 PAGE_COPY | ||
179 | #define __P100 PAGE_READONLY_EXEC | ||
180 | #define __P101 PAGE_READONLY_EXEC | ||
181 | #define __P110 PAGE_COPY_EXEC | ||
182 | #define __P111 PAGE_COPY_EXEC | ||
183 | |||
184 | #define __S000 PAGE_NONE | ||
185 | #define __S001 PAGE_READONLY | ||
186 | #define __S010 PAGE_SHARED | ||
187 | #define __S011 PAGE_SHARED | ||
188 | #define __S100 PAGE_READONLY_EXEC | ||
189 | #define __S101 PAGE_READONLY_EXEC | ||
190 | #define __S110 PAGE_SHARED_EXEC | ||
191 | #define __S111 PAGE_SHARED_EXEC | ||
192 | |||
193 | /* | ||
194 | * Define this if things work differently on an i386 and an i486: | ||
195 | * it will (on an i486) warn about kernel memory accesses that are | ||
196 | * done without a 'verify_area(VERIFY_WRITE,..)' | ||
197 | */ | ||
198 | #undef TEST_VERIFY_AREA | ||
199 | |||
200 | /* The boot page tables (all created as a single array) */ | ||
201 | extern unsigned long pg0[]; | ||
202 | |||
203 | #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) | ||
204 | #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) | ||
205 | |||
206 | #define pmd_none(x) (!pmd_val(x)) | ||
207 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) | ||
208 | #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) | ||
209 | #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) | ||
210 | |||
211 | |||
212 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) | ||
213 | |||
214 | /* | ||
215 | * The following only work if pte_present() is true. | ||
216 | * Undefined behaviour if not.. | ||
217 | */ | ||
218 | static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; } | ||
219 | static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; } | ||
220 | static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; } | ||
221 | static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; } | ||
222 | static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; } | ||
223 | |||
224 | /* | ||
225 | * The following only works if pte_present() is not true. | ||
226 | */ | ||
227 | static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; } | ||
228 | |||
229 | static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; } | ||
230 | static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; } | ||
231 | static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; } | ||
232 | static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; } | ||
233 | static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; } | ||
234 | static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; } | ||
235 | static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; } | ||
236 | static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; } | ||
237 | static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; } | ||
238 | static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } | ||
239 | |||
240 | #ifdef CONFIG_X86_PAE | ||
241 | # include <asm/pgtable-3level.h> | ||
242 | #else | ||
243 | # include <asm/pgtable-2level.h> | ||
244 | #endif | ||
245 | |||
246 | static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | ||
247 | { | ||
248 | if (!pte_dirty(*ptep)) | ||
249 | return 0; | ||
250 | return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); | ||
251 | } | ||
252 | |||
253 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | ||
254 | { | ||
255 | if (!pte_young(*ptep)) | ||
256 | return 0; | ||
257 | return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); | ||
258 | } | ||
259 | |||
260 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
261 | { | ||
262 | clear_bit(_PAGE_BIT_RW, &ptep->pte_low); | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * Macro to mark a page protection value as "uncacheable". On processors which do not support | ||
267 | * it, this is a no-op. | ||
268 | */ | ||
269 | #define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \ | ||
270 | ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot)) | ||
271 | |||
272 | /* | ||
273 | * Conversion functions: convert a page and protection to a page entry, | ||
274 | * and a page entry and page directory to the page they refer to. | ||
275 | */ | ||
276 | |||
277 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | ||
278 | #define mk_pte_huge(entry) ((entry).pte_low |= _PAGE_PRESENT | _PAGE_PSE) | ||
279 | |||
280 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
281 | { | ||
282 | pte.pte_low &= _PAGE_CHG_MASK; | ||
283 | pte.pte_low |= pgprot_val(newprot); | ||
284 | #ifdef CONFIG_X86_PAE | ||
285 | /* | ||
286 | * Chop off the NX bit (if present), and add the NX portion of | ||
287 | * the newprot (if present): | ||
288 | */ | ||
289 | pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32)); | ||
290 | pte.pte_high |= (pgprot_val(newprot) >> 32) & \ | ||
291 | (__supported_pte_mask >> 32); | ||
292 | #endif | ||
293 | return pte; | ||
294 | } | ||
295 | |||
296 | #define page_pte(page) page_pte_prot(page, __pgprot(0)) | ||
297 | |||
298 | #define pmd_large(pmd) \ | ||
299 | ((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT)) | ||
300 | |||
301 | /* | ||
302 | * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] | ||
303 | * | ||
304 | * this macro returns the index of the entry in the pgd page which would | ||
305 | * control the given virtual address | ||
306 | */ | ||
307 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | ||
308 | #define pgd_index_k(addr) pgd_index(addr) | ||
309 | |||
310 | /* | ||
311 | * pgd_offset() returns a (pgd_t *) | ||
312 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; | ||
313 | */ | ||
314 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) | ||
315 | |||
316 | /* | ||
317 | * a shortcut which implies the use of the kernel's pgd, instead | ||
318 | * of a process's | ||
319 | */ | ||
320 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | ||
321 | |||
322 | /* | ||
323 | * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] | ||
324 | * | ||
325 | * this macro returns the index of the entry in the pmd page which would | ||
326 | * control the given virtual address | ||
327 | */ | ||
328 | #define pmd_index(address) \ | ||
329 | (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | ||
330 | |||
331 | /* | ||
332 | * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] | ||
333 | * | ||
334 | * this macro returns the index of the entry in the pte page which would | ||
335 | * control the given virtual address | ||
336 | */ | ||
337 | #define pte_index(address) \ | ||
338 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
339 | #define pte_offset_kernel(dir, address) \ | ||
340 | ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address)) | ||
341 | |||
342 | /* | ||
343 | * Helper function that returns the kernel pagetable entry controlling | ||
344 | * the virtual address 'address'. NULL means no pagetable entry present. | ||
345 | * NOTE: the return type is pte_t but if the pmd is PSE then we return it | ||
346 | * as a pte too. | ||
347 | */ | ||
348 | extern pte_t *lookup_address(unsigned long address); | ||
349 | |||
350 | /* | ||
351 | * Make a given kernel text page executable/non-executable. | ||
352 | * Returns the previous executability setting of that page (which | ||
353 | * is used to restore the previous state). Used by the SMP bootup code. | ||
354 | * NOTE: this is an __init function for security reasons. | ||
355 | */ | ||
356 | #ifdef CONFIG_X86_PAE | ||
357 | extern int set_kernel_exec(unsigned long vaddr, int enable); | ||
358 | #else | ||
359 | static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;} | ||
360 | #endif | ||
361 | |||
362 | extern void noexec_setup(const char *str); | ||
363 | |||
364 | #if defined(CONFIG_HIGHPTE) | ||
365 | #define pte_offset_map(dir, address) \ | ||
366 | ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) | ||
367 | #define pte_offset_map_nested(dir, address) \ | ||
368 | ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address)) | ||
369 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) | ||
370 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) | ||
371 | #else | ||
372 | #define pte_offset_map(dir, address) \ | ||
373 | ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) | ||
374 | #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) | ||
375 | #define pte_unmap(pte) do { } while (0) | ||
376 | #define pte_unmap_nested(pte) do { } while (0) | ||
377 | #endif | ||
378 | |||
379 | /* | ||
380 | * The i386 doesn't have any external MMU info: the kernel page | ||
381 | * tables contain all the necessary information. | ||
382 | * | ||
383 | * Also, we only update the dirty/accessed state if we set | ||
384 | * the dirty bit by hand in the kernel, since the hardware | ||
385 | * will do the accessed bit for us, and we don't want to | ||
386 | * race with other CPU's that might be updating the dirty | ||
387 | * bit at the same time. | ||
388 | */ | ||
389 | #define update_mmu_cache(vma,address,pte) do { } while (0) | ||
390 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
391 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ | ||
392 | do { \ | ||
393 | if (__dirty) { \ | ||
394 | (__ptep)->pte_low = (__entry).pte_low; \ | ||
395 | flush_tlb_page(__vma, __address); \ | ||
396 | } \ | ||
397 | } while (0) | ||
398 | |||
399 | #endif /* !__ASSEMBLY__ */ | ||
400 | |||
401 | #ifndef CONFIG_DISCONTIGMEM | ||
402 | #define kern_addr_valid(addr) (1) | ||
403 | #endif /* !CONFIG_DISCONTIGMEM */ | ||
404 | |||
405 | #define io_remap_page_range(vma, vaddr, paddr, size, prot) \ | ||
406 | remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot) | ||
407 | |||
408 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | ||
409 | remap_pfn_range(vma, vaddr, pfn, size, prot) | ||
410 | |||
411 | #define MK_IOSPACE_PFN(space, pfn) (pfn) | ||
412 | #define GET_IOSPACE(pfn) 0 | ||
413 | #define GET_PFN(pfn) (pfn) | ||
414 | |||
415 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
416 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | ||
417 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
418 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
419 | #define __HAVE_ARCH_PTE_SAME | ||
420 | #include <asm-generic/pgtable.h> | ||
421 | |||
422 | #endif /* _I386_PGTABLE_H */ | ||
diff --git a/include/asm-i386/poll.h b/include/asm-i386/poll.h new file mode 100644 index 000000000000..aecc80a15d36 --- /dev/null +++ b/include/asm-i386/poll.h | |||
@@ -0,0 +1,26 @@ | |||
1 | #ifndef __i386_POLL_H | ||
2 | #define __i386_POLL_H | ||
3 | |||
4 | /* These are specified by iBCS2 */ | ||
5 | #define POLLIN 0x0001 | ||
6 | #define POLLPRI 0x0002 | ||
7 | #define POLLOUT 0x0004 | ||
8 | #define POLLERR 0x0008 | ||
9 | #define POLLHUP 0x0010 | ||
10 | #define POLLNVAL 0x0020 | ||
11 | |||
12 | /* The rest seem to be more-or-less nonstandard. Check them! */ | ||
13 | #define POLLRDNORM 0x0040 | ||
14 | #define POLLRDBAND 0x0080 | ||
15 | #define POLLWRNORM 0x0100 | ||
16 | #define POLLWRBAND 0x0200 | ||
17 | #define POLLMSG 0x0400 | ||
18 | #define POLLREMOVE 0x1000 | ||
19 | |||
20 | struct pollfd { | ||
21 | int fd; | ||
22 | short events; | ||
23 | short revents; | ||
24 | }; | ||
25 | |||
26 | #endif | ||
diff --git a/include/asm-i386/posix_types.h b/include/asm-i386/posix_types.h new file mode 100644 index 000000000000..4e47ed059ad6 --- /dev/null +++ b/include/asm-i386/posix_types.h | |||
@@ -0,0 +1,82 @@ | |||
1 | #ifndef __ARCH_I386_POSIX_TYPES_H | ||
2 | #define __ARCH_I386_POSIX_TYPES_H | ||
3 | |||
4 | /* | ||
5 | * This file is generally used by user-level software, so you need to | ||
6 | * be a little careful about namespace pollution etc. Also, we cannot | ||
7 | * assume GCC is being used. | ||
8 | */ | ||
9 | |||
10 | typedef unsigned long __kernel_ino_t; | ||
11 | typedef unsigned short __kernel_mode_t; | ||
12 | typedef unsigned short __kernel_nlink_t; | ||
13 | typedef long __kernel_off_t; | ||
14 | typedef int __kernel_pid_t; | ||
15 | typedef unsigned short __kernel_ipc_pid_t; | ||
16 | typedef unsigned short __kernel_uid_t; | ||
17 | typedef unsigned short __kernel_gid_t; | ||
18 | typedef unsigned int __kernel_size_t; | ||
19 | typedef int __kernel_ssize_t; | ||
20 | typedef int __kernel_ptrdiff_t; | ||
21 | typedef long __kernel_time_t; | ||
22 | typedef long __kernel_suseconds_t; | ||
23 | typedef long __kernel_clock_t; | ||
24 | typedef int __kernel_timer_t; | ||
25 | typedef int __kernel_clockid_t; | ||
26 | typedef int __kernel_daddr_t; | ||
27 | typedef char * __kernel_caddr_t; | ||
28 | typedef unsigned short __kernel_uid16_t; | ||
29 | typedef unsigned short __kernel_gid16_t; | ||
30 | typedef unsigned int __kernel_uid32_t; | ||
31 | typedef unsigned int __kernel_gid32_t; | ||
32 | |||
33 | typedef unsigned short __kernel_old_uid_t; | ||
34 | typedef unsigned short __kernel_old_gid_t; | ||
35 | typedef unsigned short __kernel_old_dev_t; | ||
36 | |||
37 | #ifdef __GNUC__ | ||
38 | typedef long long __kernel_loff_t; | ||
39 | #endif | ||
40 | |||
41 | typedef struct { | ||
42 | #if defined(__KERNEL__) || defined(__USE_ALL) | ||
43 | int val[2]; | ||
44 | #else /* !defined(__KERNEL__) && !defined(__USE_ALL) */ | ||
45 | int __val[2]; | ||
46 | #endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */ | ||
47 | } __kernel_fsid_t; | ||
48 | |||
49 | #if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) | ||
50 | |||
51 | #undef __FD_SET | ||
52 | #define __FD_SET(fd,fdsetp) \ | ||
53 | __asm__ __volatile__("btsl %1,%0": \ | ||
54 | "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) | ||
55 | |||
56 | #undef __FD_CLR | ||
57 | #define __FD_CLR(fd,fdsetp) \ | ||
58 | __asm__ __volatile__("btrl %1,%0": \ | ||
59 | "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) | ||
60 | |||
61 | #undef __FD_ISSET | ||
62 | #define __FD_ISSET(fd,fdsetp) (__extension__ ({ \ | ||
63 | unsigned char __result; \ | ||
64 | __asm__ __volatile__("btl %1,%2 ; setb %0" \ | ||
65 | :"=q" (__result) :"r" ((int) (fd)), \ | ||
66 | "m" (*(__kernel_fd_set *) (fdsetp))); \ | ||
67 | __result; })) | ||
68 | |||
69 | #undef __FD_ZERO | ||
70 | #define __FD_ZERO(fdsetp) \ | ||
71 | do { \ | ||
72 | int __d0, __d1; \ | ||
73 | __asm__ __volatile__("cld ; rep ; stosl" \ | ||
74 | :"=m" (*(__kernel_fd_set *) (fdsetp)), \ | ||
75 | "=&c" (__d0), "=&D" (__d1) \ | ||
76 | :"a" (0), "1" (__FDSET_LONGS), \ | ||
77 | "2" ((__kernel_fd_set *) (fdsetp)) : "memory"); \ | ||
78 | } while (0) | ||
79 | |||
80 | #endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */ | ||
81 | |||
82 | #endif | ||
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h new file mode 100644 index 000000000000..be258b0e5a5f --- /dev/null +++ b/include/asm-i386/processor.h | |||
@@ -0,0 +1,682 @@ | |||
1 | /* | ||
2 | * include/asm-i386/processor.h | ||
3 | * | ||
4 | * Copyright (C) 1994 Linus Torvalds | ||
5 | */ | ||
6 | |||
7 | #ifndef __ASM_I386_PROCESSOR_H | ||
8 | #define __ASM_I386_PROCESSOR_H | ||
9 | |||
10 | #include <asm/vm86.h> | ||
11 | #include <asm/math_emu.h> | ||
12 | #include <asm/segment.h> | ||
13 | #include <asm/page.h> | ||
14 | #include <asm/types.h> | ||
15 | #include <asm/sigcontext.h> | ||
16 | #include <asm/cpufeature.h> | ||
17 | #include <asm/msr.h> | ||
18 | #include <asm/system.h> | ||
19 | #include <linux/cache.h> | ||
20 | #include <linux/config.h> | ||
21 | #include <linux/threads.h> | ||
22 | #include <asm/percpu.h> | ||
23 | |||
24 | /* flag for disabling the tsc */ | ||
25 | extern int tsc_disable; | ||
26 | |||
27 | struct desc_struct { | ||
28 | unsigned long a,b; | ||
29 | }; | ||
30 | |||
31 | #define desc_empty(desc) \ | ||
32 | (!((desc)->a + (desc)->b)) | ||
33 | |||
34 | #define desc_equal(desc1, desc2) \ | ||
35 | (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) | ||
36 | /* | ||
37 | * Default implementation of macro that returns current | ||
38 | * instruction pointer ("program counter"). | ||
39 | */ | ||
40 | #define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; }) | ||
41 | |||
42 | /* | ||
43 | * CPU type and hardware bug flags. Kept separately for each CPU. | ||
44 | * Members of this structure are referenced in head.S, so think twice | ||
45 | * before touching them. [mj] | ||
46 | */ | ||
47 | |||
48 | struct cpuinfo_x86 { | ||
49 | __u8 x86; /* CPU family */ | ||
50 | __u8 x86_vendor; /* CPU vendor */ | ||
51 | __u8 x86_model; | ||
52 | __u8 x86_mask; | ||
53 | char wp_works_ok; /* It doesn't on 386's */ | ||
54 | char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */ | ||
55 | char hard_math; | ||
56 | char rfu; | ||
57 | int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ | ||
58 | unsigned long x86_capability[NCAPINTS]; | ||
59 | char x86_vendor_id[16]; | ||
60 | char x86_model_id[64]; | ||
61 | int x86_cache_size; /* in KB - valid for CPUS which support this | ||
62 | call */ | ||
63 | int x86_cache_alignment; /* In bytes */ | ||
64 | int fdiv_bug; | ||
65 | int f00f_bug; | ||
66 | int coma_bug; | ||
67 | unsigned long loops_per_jiffy; | ||
68 | unsigned char x86_num_cores; | ||
69 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); | ||
70 | |||
71 | #define X86_VENDOR_INTEL 0 | ||
72 | #define X86_VENDOR_CYRIX 1 | ||
73 | #define X86_VENDOR_AMD 2 | ||
74 | #define X86_VENDOR_UMC 3 | ||
75 | #define X86_VENDOR_NEXGEN 4 | ||
76 | #define X86_VENDOR_CENTAUR 5 | ||
77 | #define X86_VENDOR_RISE 6 | ||
78 | #define X86_VENDOR_TRANSMETA 7 | ||
79 | #define X86_VENDOR_NSC 8 | ||
80 | #define X86_VENDOR_NUM 9 | ||
81 | #define X86_VENDOR_UNKNOWN 0xff | ||
82 | |||
83 | /* | ||
84 | * capabilities of CPUs | ||
85 | */ | ||
86 | |||
87 | extern struct cpuinfo_x86 boot_cpu_data; | ||
88 | extern struct cpuinfo_x86 new_cpu_data; | ||
89 | extern struct tss_struct doublefault_tss; | ||
90 | DECLARE_PER_CPU(struct tss_struct, init_tss); | ||
91 | |||
92 | #ifdef CONFIG_SMP | ||
93 | extern struct cpuinfo_x86 cpu_data[]; | ||
94 | #define current_cpu_data cpu_data[smp_processor_id()] | ||
95 | #else | ||
96 | #define cpu_data (&boot_cpu_data) | ||
97 | #define current_cpu_data boot_cpu_data | ||
98 | #endif | ||
99 | |||
100 | extern int phys_proc_id[NR_CPUS]; | ||
101 | extern char ignore_fpu_irq; | ||
102 | |||
103 | extern void identify_cpu(struct cpuinfo_x86 *); | ||
104 | extern void print_cpu_info(struct cpuinfo_x86 *); | ||
105 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | ||
106 | |||
107 | #ifdef CONFIG_X86_HT | ||
108 | extern void detect_ht(struct cpuinfo_x86 *c); | ||
109 | #else | ||
110 | static inline void detect_ht(struct cpuinfo_x86 *c) {} | ||
111 | #endif | ||
112 | |||
113 | /* | ||
114 | * EFLAGS bits | ||
115 | */ | ||
116 | #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */ | ||
117 | #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */ | ||
118 | #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */ | ||
119 | #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */ | ||
120 | #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */ | ||
121 | #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */ | ||
122 | #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */ | ||
123 | #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */ | ||
124 | #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */ | ||
125 | #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */ | ||
126 | #define X86_EFLAGS_NT 0x00004000 /* Nested Task */ | ||
127 | #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */ | ||
128 | #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */ | ||
129 | #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */ | ||
130 | #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */ | ||
131 | #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ | ||
132 | #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ | ||
133 | |||
134 | /* | ||
135 | * Generic CPUID function | ||
136 | * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx | ||
137 | * resulting in stale register contents being returned. | ||
138 | */ | ||
139 | static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) | ||
140 | { | ||
141 | __asm__("cpuid" | ||
142 | : "=a" (*eax), | ||
143 | "=b" (*ebx), | ||
144 | "=c" (*ecx), | ||
145 | "=d" (*edx) | ||
146 | : "0" (op), "c"(0)); | ||
147 | } | ||
148 | |||
149 | /* Some CPUID calls want 'count' to be placed in ecx */ | ||
150 | static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, | ||
151 | int *edx) | ||
152 | { | ||
153 | __asm__("cpuid" | ||
154 | : "=a" (*eax), | ||
155 | "=b" (*ebx), | ||
156 | "=c" (*ecx), | ||
157 | "=d" (*edx) | ||
158 | : "0" (op), "c" (count)); | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | * CPUID functions returning a single datum | ||
163 | */ | ||
164 | static inline unsigned int cpuid_eax(unsigned int op) | ||
165 | { | ||
166 | unsigned int eax; | ||
167 | |||
168 | __asm__("cpuid" | ||
169 | : "=a" (eax) | ||
170 | : "0" (op) | ||
171 | : "bx", "cx", "dx"); | ||
172 | return eax; | ||
173 | } | ||
174 | static inline unsigned int cpuid_ebx(unsigned int op) | ||
175 | { | ||
176 | unsigned int eax, ebx; | ||
177 | |||
178 | __asm__("cpuid" | ||
179 | : "=a" (eax), "=b" (ebx) | ||
180 | : "0" (op) | ||
181 | : "cx", "dx" ); | ||
182 | return ebx; | ||
183 | } | ||
184 | static inline unsigned int cpuid_ecx(unsigned int op) | ||
185 | { | ||
186 | unsigned int eax, ecx; | ||
187 | |||
188 | __asm__("cpuid" | ||
189 | : "=a" (eax), "=c" (ecx) | ||
190 | : "0" (op) | ||
191 | : "bx", "dx" ); | ||
192 | return ecx; | ||
193 | } | ||
194 | static inline unsigned int cpuid_edx(unsigned int op) | ||
195 | { | ||
196 | unsigned int eax, edx; | ||
197 | |||
198 | __asm__("cpuid" | ||
199 | : "=a" (eax), "=d" (edx) | ||
200 | : "0" (op) | ||
201 | : "bx", "cx"); | ||
202 | return edx; | ||
203 | } | ||
204 | |||
205 | #define load_cr3(pgdir) \ | ||
206 | asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir))) | ||
207 | |||
208 | |||
209 | /* | ||
210 | * Intel CPU features in CR4 | ||
211 | */ | ||
212 | #define X86_CR4_VME 0x0001 /* enable vm86 extensions */ | ||
213 | #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */ | ||
214 | #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */ | ||
215 | #define X86_CR4_DE 0x0008 /* enable debugging extensions */ | ||
216 | #define X86_CR4_PSE 0x0010 /* enable page size extensions */ | ||
217 | #define X86_CR4_PAE 0x0020 /* enable physical address extensions */ | ||
218 | #define X86_CR4_MCE 0x0040 /* Machine check enable */ | ||
219 | #define X86_CR4_PGE 0x0080 /* enable global pages */ | ||
220 | #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */ | ||
221 | #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */ | ||
222 | #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */ | ||
223 | |||
224 | /* | ||
225 | * Save the cr4 feature set we're using (ie | ||
226 | * Pentium 4MB enable and PPro Global page | ||
227 | * enable), so that any CPU's that boot up | ||
228 | * after us can get the correct flags. | ||
229 | */ | ||
230 | extern unsigned long mmu_cr4_features; | ||
231 | |||
232 | static inline void set_in_cr4 (unsigned long mask) | ||
233 | { | ||
234 | mmu_cr4_features |= mask; | ||
235 | __asm__("movl %%cr4,%%eax\n\t" | ||
236 | "orl %0,%%eax\n\t" | ||
237 | "movl %%eax,%%cr4\n" | ||
238 | : : "irg" (mask) | ||
239 | :"ax"); | ||
240 | } | ||
241 | |||
242 | static inline void clear_in_cr4 (unsigned long mask) | ||
243 | { | ||
244 | mmu_cr4_features &= ~mask; | ||
245 | __asm__("movl %%cr4,%%eax\n\t" | ||
246 | "andl %0,%%eax\n\t" | ||
247 | "movl %%eax,%%cr4\n" | ||
248 | : : "irg" (~mask) | ||
249 | :"ax"); | ||
250 | } | ||
251 | |||
252 | /* | ||
253 | * NSC/Cyrix CPU configuration register indexes | ||
254 | */ | ||
255 | |||
256 | #define CX86_PCR0 0x20 | ||
257 | #define CX86_GCR 0xb8 | ||
258 | #define CX86_CCR0 0xc0 | ||
259 | #define CX86_CCR1 0xc1 | ||
260 | #define CX86_CCR2 0xc2 | ||
261 | #define CX86_CCR3 0xc3 | ||
262 | #define CX86_CCR4 0xe8 | ||
263 | #define CX86_CCR5 0xe9 | ||
264 | #define CX86_CCR6 0xea | ||
265 | #define CX86_CCR7 0xeb | ||
266 | #define CX86_PCR1 0xf0 | ||
267 | #define CX86_DIR0 0xfe | ||
268 | #define CX86_DIR1 0xff | ||
269 | #define CX86_ARR_BASE 0xc4 | ||
270 | #define CX86_RCR_BASE 0xdc | ||
271 | |||
272 | /* | ||
273 | * NSC/Cyrix CPU indexed register access macros | ||
274 | */ | ||
275 | |||
276 | #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); }) | ||
277 | |||
278 | #define setCx86(reg, data) do { \ | ||
279 | outb((reg), 0x22); \ | ||
280 | outb((data), 0x23); \ | ||
281 | } while (0) | ||
282 | |||
283 | static inline void __monitor(const void *eax, unsigned long ecx, | ||
284 | unsigned long edx) | ||
285 | { | ||
286 | /* "monitor %eax,%ecx,%edx;" */ | ||
287 | asm volatile( | ||
288 | ".byte 0x0f,0x01,0xc8;" | ||
289 | : :"a" (eax), "c" (ecx), "d"(edx)); | ||
290 | } | ||
291 | |||
292 | static inline void __mwait(unsigned long eax, unsigned long ecx) | ||
293 | { | ||
294 | /* "mwait %eax,%ecx;" */ | ||
295 | asm volatile( | ||
296 | ".byte 0x0f,0x01,0xc9;" | ||
297 | : :"a" (eax), "c" (ecx)); | ||
298 | } | ||
299 | |||
300 | /* from system description table in BIOS. Mostly for MCA use, but | ||
301 | others may find it useful. */ | ||
302 | extern unsigned int machine_id; | ||
303 | extern unsigned int machine_submodel_id; | ||
304 | extern unsigned int BIOS_revision; | ||
305 | extern unsigned int mca_pentium_flag; | ||
306 | |||
307 | /* Boot loader type from the setup header */ | ||
308 | extern int bootloader_type; | ||
309 | |||
310 | /* | ||
311 | * User space process size: 3GB (default). | ||
312 | */ | ||
313 | #define TASK_SIZE (PAGE_OFFSET) | ||
314 | |||
315 | /* This decides where the kernel will search for a free chunk of vm | ||
316 | * space during mmap's. | ||
317 | */ | ||
318 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) | ||
319 | |||
320 | #define HAVE_ARCH_PICK_MMAP_LAYOUT | ||
321 | |||
322 | /* | ||
323 | * Size of io_bitmap. | ||
324 | */ | ||
325 | #define IO_BITMAP_BITS 65536 | ||
326 | #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) | ||
327 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) | ||
328 | #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap) | ||
329 | #define INVALID_IO_BITMAP_OFFSET 0x8000 | ||
330 | #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 | ||
331 | |||
332 | struct i387_fsave_struct { | ||
333 | long cwd; | ||
334 | long swd; | ||
335 | long twd; | ||
336 | long fip; | ||
337 | long fcs; | ||
338 | long foo; | ||
339 | long fos; | ||
340 | long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ | ||
341 | long status; /* software status information */ | ||
342 | }; | ||
343 | |||
344 | struct i387_fxsave_struct { | ||
345 | unsigned short cwd; | ||
346 | unsigned short swd; | ||
347 | unsigned short twd; | ||
348 | unsigned short fop; | ||
349 | long fip; | ||
350 | long fcs; | ||
351 | long foo; | ||
352 | long fos; | ||
353 | long mxcsr; | ||
354 | long mxcsr_mask; | ||
355 | long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ | ||
356 | long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ | ||
357 | long padding[56]; | ||
358 | } __attribute__ ((aligned (16))); | ||
359 | |||
360 | struct i387_soft_struct { | ||
361 | long cwd; | ||
362 | long swd; | ||
363 | long twd; | ||
364 | long fip; | ||
365 | long fcs; | ||
366 | long foo; | ||
367 | long fos; | ||
368 | long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ | ||
369 | unsigned char ftop, changed, lookahead, no_update, rm, alimit; | ||
370 | struct info *info; | ||
371 | unsigned long entry_eip; | ||
372 | }; | ||
373 | |||
374 | union i387_union { | ||
375 | struct i387_fsave_struct fsave; | ||
376 | struct i387_fxsave_struct fxsave; | ||
377 | struct i387_soft_struct soft; | ||
378 | }; | ||
379 | |||
380 | typedef struct { | ||
381 | unsigned long seg; | ||
382 | } mm_segment_t; | ||
383 | |||
384 | struct thread_struct; | ||
385 | |||
386 | struct tss_struct { | ||
387 | unsigned short back_link,__blh; | ||
388 | unsigned long esp0; | ||
389 | unsigned short ss0,__ss0h; | ||
390 | unsigned long esp1; | ||
391 | unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */ | ||
392 | unsigned long esp2; | ||
393 | unsigned short ss2,__ss2h; | ||
394 | unsigned long __cr3; | ||
395 | unsigned long eip; | ||
396 | unsigned long eflags; | ||
397 | unsigned long eax,ecx,edx,ebx; | ||
398 | unsigned long esp; | ||
399 | unsigned long ebp; | ||
400 | unsigned long esi; | ||
401 | unsigned long edi; | ||
402 | unsigned short es, __esh; | ||
403 | unsigned short cs, __csh; | ||
404 | unsigned short ss, __ssh; | ||
405 | unsigned short ds, __dsh; | ||
406 | unsigned short fs, __fsh; | ||
407 | unsigned short gs, __gsh; | ||
408 | unsigned short ldt, __ldth; | ||
409 | unsigned short trace, io_bitmap_base; | ||
410 | /* | ||
411 | * The extra 1 is there because the CPU will access an | ||
412 | * additional byte beyond the end of the IO permission | ||
413 | * bitmap. The extra byte must be all 1 bits, and must | ||
414 | * be within the limit. | ||
415 | */ | ||
416 | unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; | ||
417 | /* | ||
418 | * Cache the current maximum and the last task that used the bitmap: | ||
419 | */ | ||
420 | unsigned long io_bitmap_max; | ||
421 | struct thread_struct *io_bitmap_owner; | ||
422 | /* | ||
423 | * pads the TSS to be cacheline-aligned (size is 0x100) | ||
424 | */ | ||
425 | unsigned long __cacheline_filler[35]; | ||
426 | /* | ||
427 | * .. and then another 0x100 bytes for emergency kernel stack | ||
428 | */ | ||
429 | unsigned long stack[64]; | ||
430 | } __attribute__((packed)); | ||
431 | |||
432 | #define ARCH_MIN_TASKALIGN 16 | ||
433 | |||
434 | struct thread_struct { | ||
435 | /* cached TLS descriptors. */ | ||
436 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | ||
437 | unsigned long esp0; | ||
438 | unsigned long sysenter_cs; | ||
439 | unsigned long eip; | ||
440 | unsigned long esp; | ||
441 | unsigned long fs; | ||
442 | unsigned long gs; | ||
443 | /* Hardware debugging registers */ | ||
444 | unsigned long debugreg[8]; /* %%db0-7 debug registers */ | ||
445 | /* fault info */ | ||
446 | unsigned long cr2, trap_no, error_code; | ||
447 | /* floating point info */ | ||
448 | union i387_union i387; | ||
449 | /* virtual 86 mode info */ | ||
450 | struct vm86_struct __user * vm86_info; | ||
451 | unsigned long screen_bitmap; | ||
452 | unsigned long v86flags, v86mask, saved_esp0; | ||
453 | unsigned int saved_fs, saved_gs; | ||
454 | /* IO permissions */ | ||
455 | unsigned long *io_bitmap_ptr; | ||
456 | /* max allowed port in the bitmap, in bytes: */ | ||
457 | unsigned long io_bitmap_max; | ||
458 | }; | ||
459 | |||
460 | #define INIT_THREAD { \ | ||
461 | .vm86_info = NULL, \ | ||
462 | .sysenter_cs = __KERNEL_CS, \ | ||
463 | .io_bitmap_ptr = NULL, \ | ||
464 | } | ||
465 | |||
466 | /* | ||
467 | * Note that the .io_bitmap member must be extra-big. This is because | ||
468 | * the CPU will access an additional byte beyond the end of the IO | ||
469 | * permission bitmap. The extra byte must be all 1 bits, and must | ||
470 | * be within the limit. | ||
471 | */ | ||
472 | #define INIT_TSS { \ | ||
473 | .esp0 = sizeof(init_stack) + (long)&init_stack, \ | ||
474 | .ss0 = __KERNEL_DS, \ | ||
475 | .ss1 = __KERNEL_CS, \ | ||
476 | .ldt = GDT_ENTRY_LDT, \ | ||
477 | .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ | ||
478 | .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ | ||
479 | } | ||
480 | |||
481 | static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread) | ||
482 | { | ||
483 | tss->esp0 = thread->esp0; | ||
484 | /* This can only happen when SEP is enabled, no need to test "SEP"arately */ | ||
485 | if (unlikely(tss->ss1 != thread->sysenter_cs)) { | ||
486 | tss->ss1 = thread->sysenter_cs; | ||
487 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | ||
488 | } | ||
489 | } | ||
490 | |||
491 | #define start_thread(regs, new_eip, new_esp) do { \ | ||
492 | __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \ | ||
493 | set_fs(USER_DS); \ | ||
494 | regs->xds = __USER_DS; \ | ||
495 | regs->xes = __USER_DS; \ | ||
496 | regs->xss = __USER_DS; \ | ||
497 | regs->xcs = __USER_CS; \ | ||
498 | regs->eip = new_eip; \ | ||
499 | regs->esp = new_esp; \ | ||
500 | } while (0) | ||
501 | |||
502 | /* Forward declaration, a strange C thing */ | ||
503 | struct task_struct; | ||
504 | struct mm_struct; | ||
505 | |||
506 | /* Free all resources held by a thread. */ | ||
507 | extern void release_thread(struct task_struct *); | ||
508 | |||
509 | /* Prepare to copy thread state - unlazy all lazy status */ | ||
510 | extern void prepare_to_copy(struct task_struct *tsk); | ||
511 | |||
512 | /* | ||
513 | * create a kernel thread without removing it from tasklists | ||
514 | */ | ||
515 | extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); | ||
516 | |||
517 | extern unsigned long thread_saved_pc(struct task_struct *tsk); | ||
518 | void show_trace(struct task_struct *task, unsigned long *stack); | ||
519 | |||
520 | unsigned long get_wchan(struct task_struct *p); | ||
521 | |||
522 | #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) | ||
523 | #define KSTK_TOP(info) \ | ||
524 | ({ \ | ||
525 | unsigned long *__ptr = (unsigned long *)(info); \ | ||
526 | (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ | ||
527 | }) | ||
528 | |||
529 | #define task_pt_regs(task) \ | ||
530 | ({ \ | ||
531 | struct pt_regs *__regs__; \ | ||
532 | __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info); \ | ||
533 | __regs__ - 1; \ | ||
534 | }) | ||
535 | |||
536 | #define KSTK_EIP(task) (task_pt_regs(task)->eip) | ||
537 | #define KSTK_ESP(task) (task_pt_regs(task)->esp) | ||
538 | |||
539 | |||
540 | struct microcode_header { | ||
541 | unsigned int hdrver; | ||
542 | unsigned int rev; | ||
543 | unsigned int date; | ||
544 | unsigned int sig; | ||
545 | unsigned int cksum; | ||
546 | unsigned int ldrver; | ||
547 | unsigned int pf; | ||
548 | unsigned int datasize; | ||
549 | unsigned int totalsize; | ||
550 | unsigned int reserved[3]; | ||
551 | }; | ||
552 | |||
553 | struct microcode { | ||
554 | struct microcode_header hdr; | ||
555 | unsigned int bits[0]; | ||
556 | }; | ||
557 | |||
558 | typedef struct microcode microcode_t; | ||
559 | typedef struct microcode_header microcode_header_t; | ||
560 | |||
561 | /* microcode format is extended from prescott processors */ | ||
562 | struct extended_signature { | ||
563 | unsigned int sig; | ||
564 | unsigned int pf; | ||
565 | unsigned int cksum; | ||
566 | }; | ||
567 | |||
568 | struct extended_sigtable { | ||
569 | unsigned int count; | ||
570 | unsigned int cksum; | ||
571 | unsigned int reserved[3]; | ||
572 | struct extended_signature sigs[0]; | ||
573 | }; | ||
574 | /* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */ | ||
575 | #define MICROCODE_IOCFREE _IO('6',0) | ||
576 | |||
577 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | ||
578 | static inline void rep_nop(void) | ||
579 | { | ||
580 | __asm__ __volatile__("rep;nop": : :"memory"); | ||
581 | } | ||
582 | |||
583 | #define cpu_relax() rep_nop() | ||
584 | |||
585 | /* generic versions from gas */ | ||
586 | #define GENERIC_NOP1 ".byte 0x90\n" | ||
587 | #define GENERIC_NOP2 ".byte 0x89,0xf6\n" | ||
588 | #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n" | ||
589 | #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n" | ||
590 | #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4 | ||
591 | #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n" | ||
592 | #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n" | ||
593 | #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7 | ||
594 | |||
595 | /* Opteron nops */ | ||
596 | #define K8_NOP1 GENERIC_NOP1 | ||
597 | #define K8_NOP2 ".byte 0x66,0x90\n" | ||
598 | #define K8_NOP3 ".byte 0x66,0x66,0x90\n" | ||
599 | #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" | ||
600 | #define K8_NOP5 K8_NOP3 K8_NOP2 | ||
601 | #define K8_NOP6 K8_NOP3 K8_NOP3 | ||
602 | #define K8_NOP7 K8_NOP4 K8_NOP3 | ||
603 | #define K8_NOP8 K8_NOP4 K8_NOP4 | ||
604 | |||
605 | /* K7 nops */ | ||
606 | /* uses eax dependencies (arbitary choice) */ | ||
607 | #define K7_NOP1 GENERIC_NOP1 | ||
608 | #define K7_NOP2 ".byte 0x8b,0xc0\n" | ||
609 | #define K7_NOP3 ".byte 0x8d,0x04,0x20\n" | ||
610 | #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n" | ||
611 | #define K7_NOP5 K7_NOP4 ASM_NOP1 | ||
612 | #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n" | ||
613 | #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n" | ||
614 | #define K7_NOP8 K7_NOP7 ASM_NOP1 | ||
615 | |||
616 | #ifdef CONFIG_MK8 | ||
617 | #define ASM_NOP1 K8_NOP1 | ||
618 | #define ASM_NOP2 K8_NOP2 | ||
619 | #define ASM_NOP3 K8_NOP3 | ||
620 | #define ASM_NOP4 K8_NOP4 | ||
621 | #define ASM_NOP5 K8_NOP5 | ||
622 | #define ASM_NOP6 K8_NOP6 | ||
623 | #define ASM_NOP7 K8_NOP7 | ||
624 | #define ASM_NOP8 K8_NOP8 | ||
625 | #elif defined(CONFIG_MK7) | ||
626 | #define ASM_NOP1 K7_NOP1 | ||
627 | #define ASM_NOP2 K7_NOP2 | ||
628 | #define ASM_NOP3 K7_NOP3 | ||
629 | #define ASM_NOP4 K7_NOP4 | ||
630 | #define ASM_NOP5 K7_NOP5 | ||
631 | #define ASM_NOP6 K7_NOP6 | ||
632 | #define ASM_NOP7 K7_NOP7 | ||
633 | #define ASM_NOP8 K7_NOP8 | ||
634 | #else | ||
635 | #define ASM_NOP1 GENERIC_NOP1 | ||
636 | #define ASM_NOP2 GENERIC_NOP2 | ||
637 | #define ASM_NOP3 GENERIC_NOP3 | ||
638 | #define ASM_NOP4 GENERIC_NOP4 | ||
639 | #define ASM_NOP5 GENERIC_NOP5 | ||
640 | #define ASM_NOP6 GENERIC_NOP6 | ||
641 | #define ASM_NOP7 GENERIC_NOP7 | ||
642 | #define ASM_NOP8 GENERIC_NOP8 | ||
643 | #endif | ||
644 | |||
645 | #define ASM_NOP_MAX 8 | ||
646 | |||
647 | /* Prefetch instructions for Pentium III and AMD Athlon */ | ||
648 | /* It's not worth to care about 3dnow! prefetches for the K6 | ||
649 | because they are microcoded there and very slow. | ||
650 | However we don't do prefetches for pre XP Athlons currently | ||
651 | That should be fixed. */ | ||
652 | #define ARCH_HAS_PREFETCH | ||
653 | extern inline void prefetch(const void *x) | ||
654 | { | ||
655 | alternative_input(ASM_NOP4, | ||
656 | "prefetchnta (%1)", | ||
657 | X86_FEATURE_XMM, | ||
658 | "r" (x)); | ||
659 | } | ||
660 | |||
661 | #define ARCH_HAS_PREFETCH | ||
662 | #define ARCH_HAS_PREFETCHW | ||
663 | #define ARCH_HAS_SPINLOCK_PREFETCH | ||
664 | |||
665 | /* 3dnow! prefetch to get an exclusive cache line. Useful for | ||
666 | spinlocks to avoid one state transition in the cache coherency protocol. */ | ||
667 | extern inline void prefetchw(const void *x) | ||
668 | { | ||
669 | alternative_input(ASM_NOP4, | ||
670 | "prefetchw (%1)", | ||
671 | X86_FEATURE_3DNOW, | ||
672 | "r" (x)); | ||
673 | } | ||
674 | #define spin_lock_prefetch(x) prefetchw(x) | ||
675 | |||
676 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | ||
677 | |||
678 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) | ||
679 | |||
680 | extern unsigned long boot_option_idle_override; | ||
681 | |||
682 | #endif /* __ASM_I386_PROCESSOR_H */ | ||
diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h new file mode 100644 index 000000000000..8618914b3521 --- /dev/null +++ b/include/asm-i386/ptrace.h | |||
@@ -0,0 +1,69 @@ | |||
1 | #ifndef _I386_PTRACE_H | ||
2 | #define _I386_PTRACE_H | ||
3 | |||
4 | #define EBX 0 | ||
5 | #define ECX 1 | ||
6 | #define EDX 2 | ||
7 | #define ESI 3 | ||
8 | #define EDI 4 | ||
9 | #define EBP 5 | ||
10 | #define EAX 6 | ||
11 | #define DS 7 | ||
12 | #define ES 8 | ||
13 | #define FS 9 | ||
14 | #define GS 10 | ||
15 | #define ORIG_EAX 11 | ||
16 | #define EIP 12 | ||
17 | #define CS 13 | ||
18 | #define EFL 14 | ||
19 | #define UESP 15 | ||
20 | #define SS 16 | ||
21 | #define FRAME_SIZE 17 | ||
22 | |||
23 | /* this struct defines the way the registers are stored on the | ||
24 | stack during a system call. */ | ||
25 | |||
26 | struct pt_regs { | ||
27 | long ebx; | ||
28 | long ecx; | ||
29 | long edx; | ||
30 | long esi; | ||
31 | long edi; | ||
32 | long ebp; | ||
33 | long eax; | ||
34 | int xds; | ||
35 | int xes; | ||
36 | long orig_eax; | ||
37 | long eip; | ||
38 | int xcs; | ||
39 | long eflags; | ||
40 | long esp; | ||
41 | int xss; | ||
42 | }; | ||
43 | |||
44 | /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ | ||
45 | #define PTRACE_GETREGS 12 | ||
46 | #define PTRACE_SETREGS 13 | ||
47 | #define PTRACE_GETFPREGS 14 | ||
48 | #define PTRACE_SETFPREGS 15 | ||
49 | #define PTRACE_GETFPXREGS 18 | ||
50 | #define PTRACE_SETFPXREGS 19 | ||
51 | |||
52 | #define PTRACE_OLDSETOPTIONS 21 | ||
53 | |||
54 | #define PTRACE_GET_THREAD_AREA 25 | ||
55 | #define PTRACE_SET_THREAD_AREA 26 | ||
56 | |||
57 | #ifdef __KERNEL__ | ||
58 | struct task_struct; | ||
59 | extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); | ||
60 | #define user_mode(regs) ((VM_MASK & (regs)->eflags) || (3 & (regs)->xcs)) | ||
61 | #define instruction_pointer(regs) ((regs)->eip) | ||
62 | #if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER) | ||
63 | extern unsigned long profile_pc(struct pt_regs *regs); | ||
64 | #else | ||
65 | #define profile_pc(regs) instruction_pointer(regs) | ||
66 | #endif | ||
67 | #endif | ||
68 | |||
69 | #endif | ||
diff --git a/include/asm-i386/resource.h b/include/asm-i386/resource.h new file mode 100644 index 000000000000..6c1ea37c7718 --- /dev/null +++ b/include/asm-i386/resource.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _I386_RESOURCE_H | ||
2 | #define _I386_RESOURCE_H | ||
3 | |||
4 | #include <asm-generic/resource.h> | ||
5 | |||
6 | #endif | ||
diff --git a/include/asm-i386/rtc.h b/include/asm-i386/rtc.h new file mode 100644 index 000000000000..ffd02109a0e5 --- /dev/null +++ b/include/asm-i386/rtc.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef _I386_RTC_H | ||
2 | #define _I386_RTC_H | ||
3 | |||
4 | /* | ||
5 | * x86 uses the default access methods for the RTC. | ||
6 | */ | ||
7 | |||
8 | #include <asm-generic/rtc.h> | ||
9 | |||
10 | #endif | ||
diff --git a/include/asm-i386/rwlock.h b/include/asm-i386/rwlock.h new file mode 100644 index 000000000000..b57cc7afdf7e --- /dev/null +++ b/include/asm-i386/rwlock.h | |||
@@ -0,0 +1,71 @@ | |||
1 | /* include/asm-i386/rwlock.h | ||
2 | * | ||
3 | * Helpers used by both rw spinlocks and rw semaphores. | ||
4 | * | ||
5 | * Based in part on code from semaphore.h and | ||
6 | * spinlock.h Copyright 1996 Linus Torvalds. | ||
7 | * | ||
8 | * Copyright 1999 Red Hat, Inc. | ||
9 | * | ||
10 | * Written by Benjamin LaHaise. | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | */ | ||
17 | #ifndef _ASM_I386_RWLOCK_H | ||
18 | #define _ASM_I386_RWLOCK_H | ||
19 | |||
20 | #define RW_LOCK_BIAS 0x01000000 | ||
21 | #define RW_LOCK_BIAS_STR "0x01000000" | ||
22 | |||
23 | #define __build_read_lock_ptr(rw, helper) \ | ||
24 | asm volatile(LOCK "subl $1,(%0)\n\t" \ | ||
25 | "jns 1f\n" \ | ||
26 | "call " helper "\n\t" \ | ||
27 | "1:\n" \ | ||
28 | ::"a" (rw) : "memory") | ||
29 | |||
30 | #define __build_read_lock_const(rw, helper) \ | ||
31 | asm volatile(LOCK "subl $1,%0\n\t" \ | ||
32 | "jns 1f\n" \ | ||
33 | "pushl %%eax\n\t" \ | ||
34 | "leal %0,%%eax\n\t" \ | ||
35 | "call " helper "\n\t" \ | ||
36 | "popl %%eax\n\t" \ | ||
37 | "1:\n" \ | ||
38 | :"=m" (*(volatile int *)rw) : : "memory") | ||
39 | |||
40 | #define __build_read_lock(rw, helper) do { \ | ||
41 | if (__builtin_constant_p(rw)) \ | ||
42 | __build_read_lock_const(rw, helper); \ | ||
43 | else \ | ||
44 | __build_read_lock_ptr(rw, helper); \ | ||
45 | } while (0) | ||
46 | |||
47 | #define __build_write_lock_ptr(rw, helper) \ | ||
48 | asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ | ||
49 | "jz 1f\n" \ | ||
50 | "call " helper "\n\t" \ | ||
51 | "1:\n" \ | ||
52 | ::"a" (rw) : "memory") | ||
53 | |||
54 | #define __build_write_lock_const(rw, helper) \ | ||
55 | asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ | ||
56 | "jz 1f\n" \ | ||
57 | "pushl %%eax\n\t" \ | ||
58 | "leal %0,%%eax\n\t" \ | ||
59 | "call " helper "\n\t" \ | ||
60 | "popl %%eax\n\t" \ | ||
61 | "1:\n" \ | ||
62 | :"=m" (*(volatile int *)rw) : : "memory") | ||
63 | |||
64 | #define __build_write_lock(rw, helper) do { \ | ||
65 | if (__builtin_constant_p(rw)) \ | ||
66 | __build_write_lock_const(rw, helper); \ | ||
67 | else \ | ||
68 | __build_write_lock_ptr(rw, helper); \ | ||
69 | } while (0) | ||
70 | |||
71 | #endif | ||
diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h new file mode 100644 index 000000000000..7625a675852f --- /dev/null +++ b/include/asm-i386/rwsem.h | |||
@@ -0,0 +1,288 @@ | |||
1 | /* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+ | ||
2 | * | ||
3 | * Written by David Howells (dhowells@redhat.com). | ||
4 | * | ||
5 | * Derived from asm-i386/semaphore.h | ||
6 | * | ||
7 | * | ||
8 | * The MSW of the count is the negated number of active writers and waiting | ||
9 | * lockers, and the LSW is the total number of active locks | ||
10 | * | ||
11 | * The lock count is initialized to 0 (no active and no waiting lockers). | ||
12 | * | ||
13 | * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an | ||
14 | * uncontended lock. This can be determined because XADD returns the old value. | ||
15 | * Readers increment by 1 and see a positive value when uncontended, negative | ||
16 | * if there are writers (and maybe) readers waiting (in which case it goes to | ||
17 | * sleep). | ||
18 | * | ||
19 | * The value of WAITING_BIAS supports up to 32766 waiting processes. This can | ||
20 | * be extended to 65534 by manually checking the whole MSW rather than relying | ||
21 | * on the S flag. | ||
22 | * | ||
23 | * The value of ACTIVE_BIAS supports up to 65535 active processes. | ||
24 | * | ||
25 | * This should be totally fair - if anything is waiting, a process that wants a | ||
26 | * lock will go to the back of the queue. When the currently active lock is | ||
27 | * released, if there's a writer at the front of the queue, then that and only | ||
28 | * that will be woken up; if there's a bunch of consequtive readers at the | ||
29 | * front, then they'll all be woken up, but no other readers will be. | ||
30 | */ | ||
31 | |||
32 | #ifndef _I386_RWSEM_H | ||
33 | #define _I386_RWSEM_H | ||
34 | |||
35 | #ifndef _LINUX_RWSEM_H | ||
36 | #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" | ||
37 | #endif | ||
38 | |||
39 | #ifdef __KERNEL__ | ||
40 | |||
41 | #include <linux/list.h> | ||
42 | #include <linux/spinlock.h> | ||
43 | |||
44 | struct rwsem_waiter; | ||
45 | |||
46 | extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem)); | ||
47 | extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem)); | ||
48 | extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *)); | ||
49 | extern struct rw_semaphore *FASTCALL(rwsem_downgrade_wake(struct rw_semaphore *sem)); | ||
50 | |||
51 | /* | ||
52 | * the semaphore definition | ||
53 | */ | ||
54 | struct rw_semaphore { | ||
55 | signed long count; | ||
56 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | ||
57 | #define RWSEM_ACTIVE_BIAS 0x00000001 | ||
58 | #define RWSEM_ACTIVE_MASK 0x0000ffff | ||
59 | #define RWSEM_WAITING_BIAS (-0x00010000) | ||
60 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | ||
61 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | ||
62 | spinlock_t wait_lock; | ||
63 | struct list_head wait_list; | ||
64 | #if RWSEM_DEBUG | ||
65 | int debug; | ||
66 | #endif | ||
67 | }; | ||
68 | |||
69 | /* | ||
70 | * initialisation | ||
71 | */ | ||
72 | #if RWSEM_DEBUG | ||
73 | #define __RWSEM_DEBUG_INIT , 0 | ||
74 | #else | ||
75 | #define __RWSEM_DEBUG_INIT /* */ | ||
76 | #endif | ||
77 | |||
78 | #define __RWSEM_INITIALIZER(name) \ | ||
79 | { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ | ||
80 | __RWSEM_DEBUG_INIT } | ||
81 | |||
82 | #define DECLARE_RWSEM(name) \ | ||
83 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | ||
84 | |||
85 | static inline void init_rwsem(struct rw_semaphore *sem) | ||
86 | { | ||
87 | sem->count = RWSEM_UNLOCKED_VALUE; | ||
88 | spin_lock_init(&sem->wait_lock); | ||
89 | INIT_LIST_HEAD(&sem->wait_list); | ||
90 | #if RWSEM_DEBUG | ||
91 | sem->debug = 0; | ||
92 | #endif | ||
93 | } | ||
94 | |||
95 | /* | ||
96 | * lock for reading | ||
97 | */ | ||
98 | static inline void __down_read(struct rw_semaphore *sem) | ||
99 | { | ||
100 | __asm__ __volatile__( | ||
101 | "# beginning down_read\n\t" | ||
102 | LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */ | ||
103 | " js 2f\n\t" /* jump if we weren't granted the lock */ | ||
104 | "1:\n\t" | ||
105 | LOCK_SECTION_START("") | ||
106 | "2:\n\t" | ||
107 | " pushl %%ecx\n\t" | ||
108 | " pushl %%edx\n\t" | ||
109 | " call rwsem_down_read_failed\n\t" | ||
110 | " popl %%edx\n\t" | ||
111 | " popl %%ecx\n\t" | ||
112 | " jmp 1b\n" | ||
113 | LOCK_SECTION_END | ||
114 | "# ending down_read\n\t" | ||
115 | : "=m"(sem->count) | ||
116 | : "a"(sem), "m"(sem->count) | ||
117 | : "memory", "cc"); | ||
118 | } | ||
119 | |||
120 | /* | ||
121 | * trylock for reading -- returns 1 if successful, 0 if contention | ||
122 | */ | ||
123 | static inline int __down_read_trylock(struct rw_semaphore *sem) | ||
124 | { | ||
125 | __s32 result, tmp; | ||
126 | __asm__ __volatile__( | ||
127 | "# beginning __down_read_trylock\n\t" | ||
128 | " movl %0,%1\n\t" | ||
129 | "1:\n\t" | ||
130 | " movl %1,%2\n\t" | ||
131 | " addl %3,%2\n\t" | ||
132 | " jle 2f\n\t" | ||
133 | LOCK_PREFIX " cmpxchgl %2,%0\n\t" | ||
134 | " jnz 1b\n\t" | ||
135 | "2:\n\t" | ||
136 | "# ending __down_read_trylock\n\t" | ||
137 | : "+m"(sem->count), "=&a"(result), "=&r"(tmp) | ||
138 | : "i"(RWSEM_ACTIVE_READ_BIAS) | ||
139 | : "memory", "cc"); | ||
140 | return result>=0 ? 1 : 0; | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * lock for writing | ||
145 | */ | ||
146 | static inline void __down_write(struct rw_semaphore *sem) | ||
147 | { | ||
148 | int tmp; | ||
149 | |||
150 | tmp = RWSEM_ACTIVE_WRITE_BIAS; | ||
151 | __asm__ __volatile__( | ||
152 | "# beginning down_write\n\t" | ||
153 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */ | ||
154 | " testl %%edx,%%edx\n\t" /* was the count 0 before? */ | ||
155 | " jnz 2f\n\t" /* jump if we weren't granted the lock */ | ||
156 | "1:\n\t" | ||
157 | LOCK_SECTION_START("") | ||
158 | "2:\n\t" | ||
159 | " pushl %%ecx\n\t" | ||
160 | " call rwsem_down_write_failed\n\t" | ||
161 | " popl %%ecx\n\t" | ||
162 | " jmp 1b\n" | ||
163 | LOCK_SECTION_END | ||
164 | "# ending down_write" | ||
165 | : "=m"(sem->count), "=d"(tmp) | ||
166 | : "a"(sem), "1"(tmp), "m"(sem->count) | ||
167 | : "memory", "cc"); | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * trylock for writing -- returns 1 if successful, 0 if contention | ||
172 | */ | ||
173 | static inline int __down_write_trylock(struct rw_semaphore *sem) | ||
174 | { | ||
175 | signed long ret = cmpxchg(&sem->count, | ||
176 | RWSEM_UNLOCKED_VALUE, | ||
177 | RWSEM_ACTIVE_WRITE_BIAS); | ||
178 | if (ret == RWSEM_UNLOCKED_VALUE) | ||
179 | return 1; | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | /* | ||
184 | * unlock after reading | ||
185 | */ | ||
186 | static inline void __up_read(struct rw_semaphore *sem) | ||
187 | { | ||
188 | __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; | ||
189 | __asm__ __volatile__( | ||
190 | "# beginning __up_read\n\t" | ||
191 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */ | ||
192 | " js 2f\n\t" /* jump if the lock is being waited upon */ | ||
193 | "1:\n\t" | ||
194 | LOCK_SECTION_START("") | ||
195 | "2:\n\t" | ||
196 | " decw %%dx\n\t" /* do nothing if still outstanding active readers */ | ||
197 | " jnz 1b\n\t" | ||
198 | " pushl %%ecx\n\t" | ||
199 | " call rwsem_wake\n\t" | ||
200 | " popl %%ecx\n\t" | ||
201 | " jmp 1b\n" | ||
202 | LOCK_SECTION_END | ||
203 | "# ending __up_read\n" | ||
204 | : "=m"(sem->count), "=d"(tmp) | ||
205 | : "a"(sem), "1"(tmp), "m"(sem->count) | ||
206 | : "memory", "cc"); | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * unlock after writing | ||
211 | */ | ||
212 | static inline void __up_write(struct rw_semaphore *sem) | ||
213 | { | ||
214 | __asm__ __volatile__( | ||
215 | "# beginning __up_write\n\t" | ||
216 | " movl %2,%%edx\n\t" | ||
217 | LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */ | ||
218 | " jnz 2f\n\t" /* jump if the lock is being waited upon */ | ||
219 | "1:\n\t" | ||
220 | LOCK_SECTION_START("") | ||
221 | "2:\n\t" | ||
222 | " decw %%dx\n\t" /* did the active count reduce to 0? */ | ||
223 | " jnz 1b\n\t" /* jump back if not */ | ||
224 | " pushl %%ecx\n\t" | ||
225 | " call rwsem_wake\n\t" | ||
226 | " popl %%ecx\n\t" | ||
227 | " jmp 1b\n" | ||
228 | LOCK_SECTION_END | ||
229 | "# ending __up_write\n" | ||
230 | : "=m"(sem->count) | ||
231 | : "a"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS), "m"(sem->count) | ||
232 | : "memory", "cc", "edx"); | ||
233 | } | ||
234 | |||
235 | /* | ||
236 | * downgrade write lock to read lock | ||
237 | */ | ||
238 | static inline void __downgrade_write(struct rw_semaphore *sem) | ||
239 | { | ||
240 | __asm__ __volatile__( | ||
241 | "# beginning __downgrade_write\n\t" | ||
242 | LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ | ||
243 | " js 2f\n\t" /* jump if the lock is being waited upon */ | ||
244 | "1:\n\t" | ||
245 | LOCK_SECTION_START("") | ||
246 | "2:\n\t" | ||
247 | " pushl %%ecx\n\t" | ||
248 | " pushl %%edx\n\t" | ||
249 | " call rwsem_downgrade_wake\n\t" | ||
250 | " popl %%edx\n\t" | ||
251 | " popl %%ecx\n\t" | ||
252 | " jmp 1b\n" | ||
253 | LOCK_SECTION_END | ||
254 | "# ending __downgrade_write\n" | ||
255 | : "=m"(sem->count) | ||
256 | : "a"(sem), "i"(-RWSEM_WAITING_BIAS), "m"(sem->count) | ||
257 | : "memory", "cc"); | ||
258 | } | ||
259 | |||
260 | /* | ||
261 | * implement atomic add functionality | ||
262 | */ | ||
263 | static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) | ||
264 | { | ||
265 | __asm__ __volatile__( | ||
266 | LOCK_PREFIX "addl %1,%0" | ||
267 | : "=m"(sem->count) | ||
268 | : "ir"(delta), "m"(sem->count)); | ||
269 | } | ||
270 | |||
271 | /* | ||
272 | * implement exchange and add functionality | ||
273 | */ | ||
274 | static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) | ||
275 | { | ||
276 | int tmp = delta; | ||
277 | |||
278 | __asm__ __volatile__( | ||
279 | LOCK_PREFIX "xadd %0,(%2)" | ||
280 | : "+r"(tmp), "=m"(sem->count) | ||
281 | : "r"(sem), "m"(sem->count) | ||
282 | : "memory"); | ||
283 | |||
284 | return tmp+delta; | ||
285 | } | ||
286 | |||
287 | #endif /* __KERNEL__ */ | ||
288 | #endif /* _I386_RWSEM_H */ | ||
diff --git a/include/asm-i386/scatterlist.h b/include/asm-i386/scatterlist.h new file mode 100644 index 000000000000..55d6c953a76e --- /dev/null +++ b/include/asm-i386/scatterlist.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef _I386_SCATTERLIST_H | ||
2 | #define _I386_SCATTERLIST_H | ||
3 | |||
4 | struct scatterlist { | ||
5 | struct page *page; | ||
6 | unsigned int offset; | ||
7 | dma_addr_t dma_address; | ||
8 | unsigned int length; | ||
9 | }; | ||
10 | |||
11 | /* These macros should be used after a pci_map_sg call has been done | ||
12 | * to get bus addresses of each of the SG entries and their lengths. | ||
13 | * You should only work with the number of sg entries pci_map_sg | ||
14 | * returns. | ||
15 | */ | ||
16 | #define sg_dma_address(sg) ((sg)->dma_address) | ||
17 | #define sg_dma_len(sg) ((sg)->length) | ||
18 | |||
19 | #define ISA_DMA_THRESHOLD (0x00ffffff) | ||
20 | |||
21 | #endif /* !(_I386_SCATTERLIST_H) */ | ||
diff --git a/include/asm-i386/seccomp.h b/include/asm-i386/seccomp.h new file mode 100644 index 000000000000..18da19e89bff --- /dev/null +++ b/include/asm-i386/seccomp.h | |||
@@ -0,0 +1,16 @@ | |||
1 | #ifndef _ASM_SECCOMP_H | ||
2 | |||
3 | #include <linux/thread_info.h> | ||
4 | |||
5 | #ifdef TIF_32BIT | ||
6 | #error "unexpected TIF_32BIT on i386" | ||
7 | #endif | ||
8 | |||
9 | #include <linux/unistd.h> | ||
10 | |||
11 | #define __NR_seccomp_read __NR_read | ||
12 | #define __NR_seccomp_write __NR_write | ||
13 | #define __NR_seccomp_exit __NR_exit | ||
14 | #define __NR_seccomp_sigreturn __NR_sigreturn | ||
15 | |||
16 | #endif /* _ASM_SECCOMP_H */ | ||
diff --git a/include/asm-i386/sections.h b/include/asm-i386/sections.h new file mode 100644 index 000000000000..2dcbb92918b2 --- /dev/null +++ b/include/asm-i386/sections.h | |||
@@ -0,0 +1,7 @@ | |||
1 | #ifndef _I386_SECTIONS_H | ||
2 | #define _I386_SECTIONS_H | ||
3 | |||
4 | /* nothing to see, move along */ | ||
5 | #include <asm-generic/sections.h> | ||
6 | |||
7 | #endif | ||
diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h new file mode 100644 index 000000000000..bb5ff5b2c02e --- /dev/null +++ b/include/asm-i386/segment.h | |||
@@ -0,0 +1,101 @@ | |||
1 | #ifndef _ASM_SEGMENT_H | ||
2 | #define _ASM_SEGMENT_H | ||
3 | |||
4 | /* | ||
5 | * The layout of the per-CPU GDT under Linux: | ||
6 | * | ||
7 | * 0 - null | ||
8 | * 1 - reserved | ||
9 | * 2 - reserved | ||
10 | * 3 - reserved | ||
11 | * | ||
12 | * 4 - unused <==== new cacheline | ||
13 | * 5 - unused | ||
14 | * | ||
15 | * ------- start of TLS (Thread-Local Storage) segments: | ||
16 | * | ||
17 | * 6 - TLS segment #1 [ glibc's TLS segment ] | ||
18 | * 7 - TLS segment #2 [ Wine's %fs Win32 segment ] | ||
19 | * 8 - TLS segment #3 | ||
20 | * 9 - reserved | ||
21 | * 10 - reserved | ||
22 | * 11 - reserved | ||
23 | * | ||
24 | * ------- start of kernel segments: | ||
25 | * | ||
26 | * 12 - kernel code segment <==== new cacheline | ||
27 | * 13 - kernel data segment | ||
28 | * 14 - default user CS | ||
29 | * 15 - default user DS | ||
30 | * 16 - TSS | ||
31 | * 17 - LDT | ||
32 | * 18 - PNPBIOS support (16->32 gate) | ||
33 | * 19 - PNPBIOS support | ||
34 | * 20 - PNPBIOS support | ||
35 | * 21 - PNPBIOS support | ||
36 | * 22 - PNPBIOS support | ||
37 | * 23 - APM BIOS support | ||
38 | * 24 - APM BIOS support | ||
39 | * 25 - APM BIOS support | ||
40 | * | ||
41 | * 26 - ESPFIX small SS | ||
42 | * 27 - unused | ||
43 | * 28 - unused | ||
44 | * 29 - unused | ||
45 | * 30 - unused | ||
46 | * 31 - TSS for double fault handler | ||
47 | */ | ||
48 | #define GDT_ENTRY_TLS_ENTRIES 3 | ||
49 | #define GDT_ENTRY_TLS_MIN 6 | ||
50 | #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) | ||
51 | |||
52 | #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) | ||
53 | |||
54 | #define GDT_ENTRY_DEFAULT_USER_CS 14 | ||
55 | #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3) | ||
56 | |||
57 | #define GDT_ENTRY_DEFAULT_USER_DS 15 | ||
58 | #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3) | ||
59 | |||
60 | #define GDT_ENTRY_KERNEL_BASE 12 | ||
61 | |||
62 | #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0) | ||
63 | #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8) | ||
64 | |||
65 | #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1) | ||
66 | #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8) | ||
67 | |||
68 | #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4) | ||
69 | #define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5) | ||
70 | |||
71 | #define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6) | ||
72 | #define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11) | ||
73 | |||
74 | #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14) | ||
75 | #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8) | ||
76 | |||
77 | #define GDT_ENTRY_DOUBLEFAULT_TSS 31 | ||
78 | |||
79 | /* | ||
80 | * The GDT has 32 entries | ||
81 | */ | ||
82 | #define GDT_ENTRIES 32 | ||
83 | |||
84 | #define GDT_SIZE (GDT_ENTRIES * 8) | ||
85 | |||
86 | /* Simple and small GDT entries for booting only */ | ||
87 | |||
88 | #define GDT_ENTRY_BOOT_CS 2 | ||
89 | #define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8) | ||
90 | |||
91 | #define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1) | ||
92 | #define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8) | ||
93 | |||
94 | /* | ||
95 | * The interrupt descriptor table has room for 256 idt's, | ||
96 | * the global descriptor table is dependent on the number | ||
97 | * of tasks we can have.. | ||
98 | */ | ||
99 | #define IDT_ENTRIES 256 | ||
100 | |||
101 | #endif | ||
diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h new file mode 100644 index 000000000000..ea563da63e24 --- /dev/null +++ b/include/asm-i386/semaphore.h | |||
@@ -0,0 +1,194 @@ | |||
1 | #ifndef _I386_SEMAPHORE_H | ||
2 | #define _I386_SEMAPHORE_H | ||
3 | |||
4 | #include <linux/linkage.h> | ||
5 | |||
6 | #ifdef __KERNEL__ | ||
7 | |||
8 | /* | ||
9 | * SMP- and interrupt-safe semaphores.. | ||
10 | * | ||
11 | * (C) Copyright 1996 Linus Torvalds | ||
12 | * | ||
13 | * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in | ||
14 | * the original code and to make semaphore waits | ||
15 | * interruptible so that processes waiting on | ||
16 | * semaphores can be killed. | ||
17 | * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper | ||
18 | * functions in asm/sempahore-helper.h while fixing a | ||
19 | * potential and subtle race discovered by Ulrich Schmid | ||
20 | * in down_interruptible(). Since I started to play here I | ||
21 | * also implemented the `trylock' semaphore operation. | ||
22 | * 1999-07-02 Artur Skawina <skawina@geocities.com> | ||
23 | * Optimized "0(ecx)" -> "(ecx)" (the assembler does not | ||
24 | * do this). Changed calling sequences from push/jmp to | ||
25 | * traditional call/ret. | ||
26 | * Modified 2001-01-01 Andreas Franck <afranck@gmx.de> | ||
27 | * Some hacks to ensure compatibility with recent | ||
28 | * GCC snapshots, to avoid stack corruption when compiling | ||
29 | * with -fomit-frame-pointer. It's not sure if this will | ||
30 | * be fixed in GCC, as our previous implementation was a | ||
31 | * bit dubious. | ||
32 | * | ||
33 | * If you would like to see an analysis of this implementation, please | ||
34 | * ftp to gcom.com and download the file | ||
35 | * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz. | ||
36 | * | ||
37 | */ | ||
38 | |||
39 | #include <asm/system.h> | ||
40 | #include <asm/atomic.h> | ||
41 | #include <linux/wait.h> | ||
42 | #include <linux/rwsem.h> | ||
43 | |||
44 | struct semaphore { | ||
45 | atomic_t count; | ||
46 | int sleepers; | ||
47 | wait_queue_head_t wait; | ||
48 | }; | ||
49 | |||
50 | |||
51 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
52 | { \ | ||
53 | .count = ATOMIC_INIT(n), \ | ||
54 | .sleepers = 0, \ | ||
55 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
56 | } | ||
57 | |||
58 | #define __MUTEX_INITIALIZER(name) \ | ||
59 | __SEMAPHORE_INITIALIZER(name,1) | ||
60 | |||
61 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
62 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
63 | |||
64 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
65 | #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0) | ||
66 | |||
67 | static inline void sema_init (struct semaphore *sem, int val) | ||
68 | { | ||
69 | /* | ||
70 | * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); | ||
71 | * | ||
72 | * i'd rather use the more flexible initialization above, but sadly | ||
73 | * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well. | ||
74 | */ | ||
75 | atomic_set(&sem->count, val); | ||
76 | sem->sleepers = 0; | ||
77 | init_waitqueue_head(&sem->wait); | ||
78 | } | ||
79 | |||
80 | static inline void init_MUTEX (struct semaphore *sem) | ||
81 | { | ||
82 | sema_init(sem, 1); | ||
83 | } | ||
84 | |||
85 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
86 | { | ||
87 | sema_init(sem, 0); | ||
88 | } | ||
89 | |||
90 | fastcall void __down_failed(void /* special register calling convention */); | ||
91 | fastcall int __down_failed_interruptible(void /* params in registers */); | ||
92 | fastcall int __down_failed_trylock(void /* params in registers */); | ||
93 | fastcall void __up_wakeup(void /* special register calling convention */); | ||
94 | |||
95 | /* | ||
96 | * This is ugly, but we want the default case to fall through. | ||
97 | * "__down_failed" is a special asm handler that calls the C | ||
98 | * routine that actually waits. See arch/i386/kernel/semaphore.c | ||
99 | */ | ||
100 | static inline void down(struct semaphore * sem) | ||
101 | { | ||
102 | might_sleep(); | ||
103 | __asm__ __volatile__( | ||
104 | "# atomic down operation\n\t" | ||
105 | LOCK "decl %0\n\t" /* --sem->count */ | ||
106 | "js 2f\n" | ||
107 | "1:\n" | ||
108 | LOCK_SECTION_START("") | ||
109 | "2:\tlea %0,%%eax\n\t" | ||
110 | "call __down_failed\n\t" | ||
111 | "jmp 1b\n" | ||
112 | LOCK_SECTION_END | ||
113 | :"=m" (sem->count) | ||
114 | : | ||
115 | :"memory","ax"); | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * Interruptible try to acquire a semaphore. If we obtained | ||
120 | * it, return zero. If we were interrupted, returns -EINTR | ||
121 | */ | ||
122 | static inline int down_interruptible(struct semaphore * sem) | ||
123 | { | ||
124 | int result; | ||
125 | |||
126 | might_sleep(); | ||
127 | __asm__ __volatile__( | ||
128 | "# atomic interruptible down operation\n\t" | ||
129 | LOCK "decl %1\n\t" /* --sem->count */ | ||
130 | "js 2f\n\t" | ||
131 | "xorl %0,%0\n" | ||
132 | "1:\n" | ||
133 | LOCK_SECTION_START("") | ||
134 | "2:\tlea %1,%%eax\n\t" | ||
135 | "call __down_failed_interruptible\n\t" | ||
136 | "jmp 1b\n" | ||
137 | LOCK_SECTION_END | ||
138 | :"=a" (result), "=m" (sem->count) | ||
139 | : | ||
140 | :"memory"); | ||
141 | return result; | ||
142 | } | ||
143 | |||
144 | /* | ||
145 | * Non-blockingly attempt to down() a semaphore. | ||
146 | * Returns zero if we acquired it | ||
147 | */ | ||
148 | static inline int down_trylock(struct semaphore * sem) | ||
149 | { | ||
150 | int result; | ||
151 | |||
152 | __asm__ __volatile__( | ||
153 | "# atomic interruptible down operation\n\t" | ||
154 | LOCK "decl %1\n\t" /* --sem->count */ | ||
155 | "js 2f\n\t" | ||
156 | "xorl %0,%0\n" | ||
157 | "1:\n" | ||
158 | LOCK_SECTION_START("") | ||
159 | "2:\tlea %1,%%eax\n\t" | ||
160 | "call __down_failed_trylock\n\t" | ||
161 | "jmp 1b\n" | ||
162 | LOCK_SECTION_END | ||
163 | :"=a" (result), "=m" (sem->count) | ||
164 | : | ||
165 | :"memory"); | ||
166 | return result; | ||
167 | } | ||
168 | |||
169 | /* | ||
170 | * Note! This is subtle. We jump to wake people up only if | ||
171 | * the semaphore was negative (== somebody was waiting on it). | ||
172 | * The default case (no contention) will result in NO | ||
173 | * jumps for both down() and up(). | ||
174 | */ | ||
175 | static inline void up(struct semaphore * sem) | ||
176 | { | ||
177 | __asm__ __volatile__( | ||
178 | "# atomic up operation\n\t" | ||
179 | LOCK "incl %0\n\t" /* ++sem->count */ | ||
180 | "jle 2f\n" | ||
181 | "1:\n" | ||
182 | LOCK_SECTION_START("") | ||
183 | "2:\tlea %0,%%eax\n\t" | ||
184 | "call __up_wakeup\n\t" | ||
185 | "jmp 1b\n" | ||
186 | LOCK_SECTION_END | ||
187 | ".subsection 0\n" | ||
188 | :"=m" (sem->count) | ||
189 | : | ||
190 | :"memory","ax"); | ||
191 | } | ||
192 | |||
193 | #endif | ||
194 | #endif | ||
diff --git a/include/asm-i386/sembuf.h b/include/asm-i386/sembuf.h new file mode 100644 index 000000000000..323835166c14 --- /dev/null +++ b/include/asm-i386/sembuf.h | |||
@@ -0,0 +1,25 @@ | |||
1 | #ifndef _I386_SEMBUF_H | ||
2 | #define _I386_SEMBUF_H | ||
3 | |||
4 | /* | ||
5 | * The semid64_ds structure for i386 architecture. | ||
6 | * Note extra padding because this structure is passed back and forth | ||
7 | * between kernel and user space. | ||
8 | * | ||
9 | * Pad space is left for: | ||
10 | * - 64-bit time_t to solve y2038 problem | ||
11 | * - 2 miscellaneous 32-bit values | ||
12 | */ | ||
13 | |||
14 | struct semid64_ds { | ||
15 | struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ | ||
16 | __kernel_time_t sem_otime; /* last semop time */ | ||
17 | unsigned long __unused1; | ||
18 | __kernel_time_t sem_ctime; /* last change time */ | ||
19 | unsigned long __unused2; | ||
20 | unsigned long sem_nsems; /* no. of semaphores in array */ | ||
21 | unsigned long __unused3; | ||
22 | unsigned long __unused4; | ||
23 | }; | ||
24 | |||
25 | #endif /* _I386_SEMBUF_H */ | ||
diff --git a/include/asm-i386/serial.h b/include/asm-i386/serial.h new file mode 100644 index 000000000000..21ddecc77c77 --- /dev/null +++ b/include/asm-i386/serial.h | |||
@@ -0,0 +1,130 @@ | |||
1 | /* | ||
2 | * include/asm-i386/serial.h | ||
3 | */ | ||
4 | |||
5 | #include <linux/config.h> | ||
6 | |||
7 | /* | ||
8 | * This assumes you have a 1.8432 MHz clock for your UART. | ||
9 | * | ||
10 | * It'd be nice if someone built a serial card with a 24.576 MHz | ||
11 | * clock, since the 16550A is capable of handling a top speed of 1.5 | ||
12 | * megabits/second; but this requires the faster clock. | ||
13 | */ | ||
14 | #define BASE_BAUD ( 1843200 / 16 ) | ||
15 | |||
16 | /* Standard COM flags (except for COM4, because of the 8514 problem) */ | ||
17 | #ifdef CONFIG_SERIAL_DETECT_IRQ | ||
18 | #define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ) | ||
19 | #define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ) | ||
20 | #else | ||
21 | #define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) | ||
22 | #define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF | ||
23 | #endif | ||
24 | |||
25 | #ifdef CONFIG_SERIAL_MANY_PORTS | ||
26 | #define FOURPORT_FLAGS ASYNC_FOURPORT | ||
27 | #define ACCENT_FLAGS 0 | ||
28 | #define BOCA_FLAGS 0 | ||
29 | #define HUB6_FLAGS 0 | ||
30 | #endif | ||
31 | |||
32 | #define MCA_COM_FLAGS (STD_COM_FLAGS|ASYNC_BOOT_ONLYMCA) | ||
33 | |||
34 | /* | ||
35 | * The following define the access methods for the HUB6 card. All | ||
36 | * access is through two ports for all 24 possible chips. The card is | ||
37 | * selected through the high 2 bits, the port on that card with the | ||
38 | * "middle" 3 bits, and the register on that port with the bottom | ||
39 | * 3 bits. | ||
40 | * | ||
41 | * While the access port and interrupt is configurable, the default | ||
42 | * port locations are 0x302 for the port control register, and 0x303 | ||
43 | * for the data read/write register. Normally, the interrupt is at irq3 | ||
44 | * but can be anything from 3 to 7 inclusive. Note that using 3 will | ||
45 | * require disabling com2. | ||
46 | */ | ||
47 | |||
48 | #define C_P(card,port) (((card)<<6|(port)<<3) + 1) | ||
49 | |||
50 | #define STD_SERIAL_PORT_DEFNS \ | ||
51 | /* UART CLK PORT IRQ FLAGS */ \ | ||
52 | { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \ | ||
53 | { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \ | ||
54 | { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \ | ||
55 | { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */ | ||
56 | |||
57 | |||
58 | #ifdef CONFIG_SERIAL_MANY_PORTS | ||
59 | #define EXTRA_SERIAL_PORT_DEFNS \ | ||
60 | { 0, BASE_BAUD, 0x1A0, 9, FOURPORT_FLAGS }, /* ttyS4 */ \ | ||
61 | { 0, BASE_BAUD, 0x1A8, 9, FOURPORT_FLAGS }, /* ttyS5 */ \ | ||
62 | { 0, BASE_BAUD, 0x1B0, 9, FOURPORT_FLAGS }, /* ttyS6 */ \ | ||
63 | { 0, BASE_BAUD, 0x1B8, 9, FOURPORT_FLAGS }, /* ttyS7 */ \ | ||
64 | { 0, BASE_BAUD, 0x2A0, 5, FOURPORT_FLAGS }, /* ttyS8 */ \ | ||
65 | { 0, BASE_BAUD, 0x2A8, 5, FOURPORT_FLAGS }, /* ttyS9 */ \ | ||
66 | { 0, BASE_BAUD, 0x2B0, 5, FOURPORT_FLAGS }, /* ttyS10 */ \ | ||
67 | { 0, BASE_BAUD, 0x2B8, 5, FOURPORT_FLAGS }, /* ttyS11 */ \ | ||
68 | { 0, BASE_BAUD, 0x330, 4, ACCENT_FLAGS }, /* ttyS12 */ \ | ||
69 | { 0, BASE_BAUD, 0x338, 4, ACCENT_FLAGS }, /* ttyS13 */ \ | ||
70 | { 0, BASE_BAUD, 0x000, 0, 0 }, /* ttyS14 (spare) */ \ | ||
71 | { 0, BASE_BAUD, 0x000, 0, 0 }, /* ttyS15 (spare) */ \ | ||
72 | { 0, BASE_BAUD, 0x100, 12, BOCA_FLAGS }, /* ttyS16 */ \ | ||
73 | { 0, BASE_BAUD, 0x108, 12, BOCA_FLAGS }, /* ttyS17 */ \ | ||
74 | { 0, BASE_BAUD, 0x110, 12, BOCA_FLAGS }, /* ttyS18 */ \ | ||
75 | { 0, BASE_BAUD, 0x118, 12, BOCA_FLAGS }, /* ttyS19 */ \ | ||
76 | { 0, BASE_BAUD, 0x120, 12, BOCA_FLAGS }, /* ttyS20 */ \ | ||
77 | { 0, BASE_BAUD, 0x128, 12, BOCA_FLAGS }, /* ttyS21 */ \ | ||
78 | { 0, BASE_BAUD, 0x130, 12, BOCA_FLAGS }, /* ttyS22 */ \ | ||
79 | { 0, BASE_BAUD, 0x138, 12, BOCA_FLAGS }, /* ttyS23 */ \ | ||
80 | { 0, BASE_BAUD, 0x140, 12, BOCA_FLAGS }, /* ttyS24 */ \ | ||
81 | { 0, BASE_BAUD, 0x148, 12, BOCA_FLAGS }, /* ttyS25 */ \ | ||
82 | { 0, BASE_BAUD, 0x150, 12, BOCA_FLAGS }, /* ttyS26 */ \ | ||
83 | { 0, BASE_BAUD, 0x158, 12, BOCA_FLAGS }, /* ttyS27 */ \ | ||
84 | { 0, BASE_BAUD, 0x160, 12, BOCA_FLAGS }, /* ttyS28 */ \ | ||
85 | { 0, BASE_BAUD, 0x168, 12, BOCA_FLAGS }, /* ttyS29 */ \ | ||
86 | { 0, BASE_BAUD, 0x170, 12, BOCA_FLAGS }, /* ttyS30 */ \ | ||
87 | { 0, BASE_BAUD, 0x178, 12, BOCA_FLAGS }, /* ttyS31 */ | ||
88 | #else | ||
89 | #define EXTRA_SERIAL_PORT_DEFNS | ||
90 | #endif | ||
91 | |||
92 | /* You can have up to four HUB6's in the system, but I've only | ||
93 | * included two cards here for a total of twelve ports. | ||
94 | */ | ||
95 | #if (defined(CONFIG_HUB6) && defined(CONFIG_SERIAL_MANY_PORTS)) | ||
96 | #define HUB6_SERIAL_PORT_DFNS \ | ||
97 | { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,0) }, /* ttyS32 */ \ | ||
98 | { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,1) }, /* ttyS33 */ \ | ||
99 | { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,2) }, /* ttyS34 */ \ | ||
100 | { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,3) }, /* ttyS35 */ \ | ||
101 | { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,4) }, /* ttyS36 */ \ | ||
102 | { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(0,5) }, /* ttyS37 */ \ | ||
103 | { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,0) }, /* ttyS38 */ \ | ||
104 | { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,1) }, /* ttyS39 */ \ | ||
105 | { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,2) }, /* ttyS40 */ \ | ||
106 | { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,3) }, /* ttyS41 */ \ | ||
107 | { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,4) }, /* ttyS42 */ \ | ||
108 | { 0, BASE_BAUD, 0x302, 3, HUB6_FLAGS, C_P(1,5) }, /* ttyS43 */ | ||
109 | #else | ||
110 | #define HUB6_SERIAL_PORT_DFNS | ||
111 | #endif | ||
112 | |||
113 | #ifdef CONFIG_MCA | ||
114 | #define MCA_SERIAL_PORT_DFNS \ | ||
115 | { 0, BASE_BAUD, 0x3220, 3, MCA_COM_FLAGS }, \ | ||
116 | { 0, BASE_BAUD, 0x3228, 3, MCA_COM_FLAGS }, \ | ||
117 | { 0, BASE_BAUD, 0x4220, 3, MCA_COM_FLAGS }, \ | ||
118 | { 0, BASE_BAUD, 0x4228, 3, MCA_COM_FLAGS }, \ | ||
119 | { 0, BASE_BAUD, 0x5220, 3, MCA_COM_FLAGS }, \ | ||
120 | { 0, BASE_BAUD, 0x5228, 3, MCA_COM_FLAGS }, | ||
121 | #else | ||
122 | #define MCA_SERIAL_PORT_DFNS | ||
123 | #endif | ||
124 | |||
125 | #define SERIAL_PORT_DFNS \ | ||
126 | STD_SERIAL_PORT_DEFNS \ | ||
127 | EXTRA_SERIAL_PORT_DEFNS \ | ||
128 | HUB6_SERIAL_PORT_DFNS \ | ||
129 | MCA_SERIAL_PORT_DFNS | ||
130 | |||
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h new file mode 100644 index 000000000000..8814b54c75d4 --- /dev/null +++ b/include/asm-i386/setup.h | |||
@@ -0,0 +1,66 @@ | |||
1 | /* | ||
2 | * Just a place holder. We don't want to have to test x86 before | ||
3 | * we include stuff | ||
4 | */ | ||
5 | |||
6 | #ifndef _i386_SETUP_H | ||
7 | #define _i386_SETUP_H | ||
8 | |||
9 | #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) | ||
10 | #define PFN_DOWN(x) ((x) >> PAGE_SHIFT) | ||
11 | #define PFN_PHYS(x) ((x) << PAGE_SHIFT) | ||
12 | |||
13 | /* | ||
14 | * Reserved space for vmalloc and iomap - defined in asm/page.h | ||
15 | */ | ||
16 | #define MAXMEM_PFN PFN_DOWN(MAXMEM) | ||
17 | #define MAX_NONPAE_PFN (1 << 20) | ||
18 | |||
19 | #define PARAM_SIZE 2048 | ||
20 | #define COMMAND_LINE_SIZE 256 | ||
21 | |||
22 | #define OLD_CL_MAGIC_ADDR 0x90020 | ||
23 | #define OLD_CL_MAGIC 0xA33F | ||
24 | #define OLD_CL_BASE_ADDR 0x90000 | ||
25 | #define OLD_CL_OFFSET 0x90022 | ||
26 | #define NEW_CL_POINTER 0x228 /* Relative to real mode data */ | ||
27 | |||
28 | #ifndef __ASSEMBLY__ | ||
29 | /* | ||
30 | * This is set up by the setup-routine at boot-time | ||
31 | */ | ||
32 | extern unsigned char boot_params[PARAM_SIZE]; | ||
33 | |||
34 | #define PARAM (boot_params) | ||
35 | #define SCREEN_INFO (*(struct screen_info *) (PARAM+0)) | ||
36 | #define EXT_MEM_K (*(unsigned short *) (PARAM+2)) | ||
37 | #define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0)) | ||
38 | #define E820_MAP_NR (*(char*) (PARAM+E820NR)) | ||
39 | #define E820_MAP ((struct e820entry *) (PARAM+E820MAP)) | ||
40 | #define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40)) | ||
41 | #define IST_INFO (*(struct ist_info *) (PARAM+0x60)) | ||
42 | #define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80)) | ||
43 | #define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0)) | ||
44 | #define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4))) | ||
45 | #define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8))) | ||
46 | #define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc))) | ||
47 | #define EFI_MEMMAP ((efi_memory_desc_t *) *((unsigned long *)(PARAM+0x1d0))) | ||
48 | #define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4))) | ||
49 | #define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2)) | ||
50 | #define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8)) | ||
51 | #define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA)) | ||
52 | #define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC)) | ||
53 | #define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF)) | ||
54 | #define LOADER_TYPE (*(unsigned char *) (PARAM+0x210)) | ||
55 | #define KERNEL_START (*(unsigned long *) (PARAM+0x214)) | ||
56 | #define INITRD_START (*(unsigned long *) (PARAM+0x218)) | ||
57 | #define INITRD_SIZE (*(unsigned long *) (PARAM+0x21c)) | ||
58 | #define EDID_INFO (*(struct edid_info *) (PARAM+0x140)) | ||
59 | #define EDD_NR (*(unsigned char *) (PARAM+EDDNR)) | ||
60 | #define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF)) | ||
61 | #define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF)) | ||
62 | #define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF)) | ||
63 | |||
64 | #endif /* __ASSEMBLY__ */ | ||
65 | |||
66 | #endif /* _i386_SETUP_H */ | ||
diff --git a/include/asm-i386/shmbuf.h b/include/asm-i386/shmbuf.h new file mode 100644 index 000000000000..d1cdc3cb079b --- /dev/null +++ b/include/asm-i386/shmbuf.h | |||
@@ -0,0 +1,42 @@ | |||
1 | #ifndef _I386_SHMBUF_H | ||
2 | #define _I386_SHMBUF_H | ||
3 | |||
4 | /* | ||
5 | * The shmid64_ds structure for i386 architecture. | ||
6 | * Note extra padding because this structure is passed back and forth | ||
7 | * between kernel and user space. | ||
8 | * | ||
9 | * Pad space is left for: | ||
10 | * - 64-bit time_t to solve y2038 problem | ||
11 | * - 2 miscellaneous 32-bit values | ||
12 | */ | ||
13 | |||
14 | struct shmid64_ds { | ||
15 | struct ipc64_perm shm_perm; /* operation perms */ | ||
16 | size_t shm_segsz; /* size of segment (bytes) */ | ||
17 | __kernel_time_t shm_atime; /* last attach time */ | ||
18 | unsigned long __unused1; | ||
19 | __kernel_time_t shm_dtime; /* last detach time */ | ||
20 | unsigned long __unused2; | ||
21 | __kernel_time_t shm_ctime; /* last change time */ | ||
22 | unsigned long __unused3; | ||
23 | __kernel_pid_t shm_cpid; /* pid of creator */ | ||
24 | __kernel_pid_t shm_lpid; /* pid of last operator */ | ||
25 | unsigned long shm_nattch; /* no. of current attaches */ | ||
26 | unsigned long __unused4; | ||
27 | unsigned long __unused5; | ||
28 | }; | ||
29 | |||
30 | struct shminfo64 { | ||
31 | unsigned long shmmax; | ||
32 | unsigned long shmmin; | ||
33 | unsigned long shmmni; | ||
34 | unsigned long shmseg; | ||
35 | unsigned long shmall; | ||
36 | unsigned long __unused1; | ||
37 | unsigned long __unused2; | ||
38 | unsigned long __unused3; | ||
39 | unsigned long __unused4; | ||
40 | }; | ||
41 | |||
42 | #endif /* _I386_SHMBUF_H */ | ||
diff --git a/include/asm-i386/shmparam.h b/include/asm-i386/shmparam.h new file mode 100644 index 000000000000..786243a5b319 --- /dev/null +++ b/include/asm-i386/shmparam.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _ASMI386_SHMPARAM_H | ||
2 | #define _ASMI386_SHMPARAM_H | ||
3 | |||
4 | #define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ | ||
5 | |||
6 | #endif /* _ASMI386_SHMPARAM_H */ | ||
diff --git a/include/asm-i386/sigcontext.h b/include/asm-i386/sigcontext.h new file mode 100644 index 000000000000..aaef089a7787 --- /dev/null +++ b/include/asm-i386/sigcontext.h | |||
@@ -0,0 +1,85 @@ | |||
1 | #ifndef _ASMi386_SIGCONTEXT_H | ||
2 | #define _ASMi386_SIGCONTEXT_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | |||
6 | /* | ||
7 | * As documented in the iBCS2 standard.. | ||
8 | * | ||
9 | * The first part of "struct _fpstate" is just the normal i387 | ||
10 | * hardware setup, the extra "status" word is used to save the | ||
11 | * coprocessor status word before entering the handler. | ||
12 | * | ||
13 | * Pentium III FXSR, SSE support | ||
14 | * Gareth Hughes <gareth@valinux.com>, May 2000 | ||
15 | * | ||
16 | * The FPU state data structure has had to grow to accommodate the | ||
17 | * extended FPU state required by the Streaming SIMD Extensions. | ||
18 | * There is no documented standard to accomplish this at the moment. | ||
19 | */ | ||
20 | struct _fpreg { | ||
21 | unsigned short significand[4]; | ||
22 | unsigned short exponent; | ||
23 | }; | ||
24 | |||
25 | struct _fpxreg { | ||
26 | unsigned short significand[4]; | ||
27 | unsigned short exponent; | ||
28 | unsigned short padding[3]; | ||
29 | }; | ||
30 | |||
31 | struct _xmmreg { | ||
32 | unsigned long element[4]; | ||
33 | }; | ||
34 | |||
35 | struct _fpstate { | ||
36 | /* Regular FPU environment */ | ||
37 | unsigned long cw; | ||
38 | unsigned long sw; | ||
39 | unsigned long tag; | ||
40 | unsigned long ipoff; | ||
41 | unsigned long cssel; | ||
42 | unsigned long dataoff; | ||
43 | unsigned long datasel; | ||
44 | struct _fpreg _st[8]; | ||
45 | unsigned short status; | ||
46 | unsigned short magic; /* 0xffff = regular FPU data only */ | ||
47 | |||
48 | /* FXSR FPU environment */ | ||
49 | unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */ | ||
50 | unsigned long mxcsr; | ||
51 | unsigned long reserved; | ||
52 | struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ | ||
53 | struct _xmmreg _xmm[8]; | ||
54 | unsigned long padding[56]; | ||
55 | }; | ||
56 | |||
57 | #define X86_FXSR_MAGIC 0x0000 | ||
58 | |||
59 | struct sigcontext { | ||
60 | unsigned short gs, __gsh; | ||
61 | unsigned short fs, __fsh; | ||
62 | unsigned short es, __esh; | ||
63 | unsigned short ds, __dsh; | ||
64 | unsigned long edi; | ||
65 | unsigned long esi; | ||
66 | unsigned long ebp; | ||
67 | unsigned long esp; | ||
68 | unsigned long ebx; | ||
69 | unsigned long edx; | ||
70 | unsigned long ecx; | ||
71 | unsigned long eax; | ||
72 | unsigned long trapno; | ||
73 | unsigned long err; | ||
74 | unsigned long eip; | ||
75 | unsigned short cs, __csh; | ||
76 | unsigned long eflags; | ||
77 | unsigned long esp_at_signal; | ||
78 | unsigned short ss, __ssh; | ||
79 | struct _fpstate __user * fpstate; | ||
80 | unsigned long oldmask; | ||
81 | unsigned long cr2; | ||
82 | }; | ||
83 | |||
84 | |||
85 | #endif | ||
diff --git a/include/asm-i386/siginfo.h b/include/asm-i386/siginfo.h new file mode 100644 index 000000000000..fe18f98fccfa --- /dev/null +++ b/include/asm-i386/siginfo.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _I386_SIGINFO_H | ||
2 | #define _I386_SIGINFO_H | ||
3 | |||
4 | #include <asm-generic/siginfo.h> | ||
5 | |||
6 | #endif | ||
diff --git a/include/asm-i386/signal.h b/include/asm-i386/signal.h new file mode 100644 index 000000000000..7ef343b6812d --- /dev/null +++ b/include/asm-i386/signal.h | |||
@@ -0,0 +1,237 @@ | |||
1 | #ifndef _ASMi386_SIGNAL_H | ||
2 | #define _ASMi386_SIGNAL_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <linux/linkage.h> | ||
6 | #include <linux/time.h> | ||
7 | #include <linux/compiler.h> | ||
8 | |||
9 | /* Avoid too many header ordering problems. */ | ||
10 | struct siginfo; | ||
11 | |||
12 | #ifdef __KERNEL__ | ||
13 | /* Most things should be clean enough to redefine this at will, if care | ||
14 | is taken to make libc match. */ | ||
15 | |||
16 | #define _NSIG 64 | ||
17 | #define _NSIG_BPW 32 | ||
18 | #define _NSIG_WORDS (_NSIG / _NSIG_BPW) | ||
19 | |||
20 | typedef unsigned long old_sigset_t; /* at least 32 bits */ | ||
21 | |||
22 | typedef struct { | ||
23 | unsigned long sig[_NSIG_WORDS]; | ||
24 | } sigset_t; | ||
25 | |||
26 | #else | ||
27 | /* Here we must cater to libcs that poke about in kernel headers. */ | ||
28 | |||
29 | #define NSIG 32 | ||
30 | typedef unsigned long sigset_t; | ||
31 | |||
32 | #endif /* __KERNEL__ */ | ||
33 | |||
34 | #define SIGHUP 1 | ||
35 | #define SIGINT 2 | ||
36 | #define SIGQUIT 3 | ||
37 | #define SIGILL 4 | ||
38 | #define SIGTRAP 5 | ||
39 | #define SIGABRT 6 | ||
40 | #define SIGIOT 6 | ||
41 | #define SIGBUS 7 | ||
42 | #define SIGFPE 8 | ||
43 | #define SIGKILL 9 | ||
44 | #define SIGUSR1 10 | ||
45 | #define SIGSEGV 11 | ||
46 | #define SIGUSR2 12 | ||
47 | #define SIGPIPE 13 | ||
48 | #define SIGALRM 14 | ||
49 | #define SIGTERM 15 | ||
50 | #define SIGSTKFLT 16 | ||
51 | #define SIGCHLD 17 | ||
52 | #define SIGCONT 18 | ||
53 | #define SIGSTOP 19 | ||
54 | #define SIGTSTP 20 | ||
55 | #define SIGTTIN 21 | ||
56 | #define SIGTTOU 22 | ||
57 | #define SIGURG 23 | ||
58 | #define SIGXCPU 24 | ||
59 | #define SIGXFSZ 25 | ||
60 | #define SIGVTALRM 26 | ||
61 | #define SIGPROF 27 | ||
62 | #define SIGWINCH 28 | ||
63 | #define SIGIO 29 | ||
64 | #define SIGPOLL SIGIO | ||
65 | /* | ||
66 | #define SIGLOST 29 | ||
67 | */ | ||
68 | #define SIGPWR 30 | ||
69 | #define SIGSYS 31 | ||
70 | #define SIGUNUSED 31 | ||
71 | |||
72 | /* These should not be considered constants from userland. */ | ||
73 | #define SIGRTMIN 32 | ||
74 | #define SIGRTMAX _NSIG | ||
75 | |||
76 | /* | ||
77 | * SA_FLAGS values: | ||
78 | * | ||
79 | * SA_ONSTACK indicates that a registered stack_t will be used. | ||
80 | * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the | ||
81 | * SA_RESTART flag to get restarting signals (which were the default long ago) | ||
82 | * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. | ||
83 | * SA_RESETHAND clears the handler when the signal is delivered. | ||
84 | * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. | ||
85 | * SA_NODEFER prevents the current signal from being masked in the handler. | ||
86 | * | ||
87 | * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single | ||
88 | * Unix names RESETHAND and NODEFER respectively. | ||
89 | */ | ||
90 | #define SA_NOCLDSTOP 0x00000001u | ||
91 | #define SA_NOCLDWAIT 0x00000002u | ||
92 | #define SA_SIGINFO 0x00000004u | ||
93 | #define SA_ONSTACK 0x08000000u | ||
94 | #define SA_RESTART 0x10000000u | ||
95 | #define SA_NODEFER 0x40000000u | ||
96 | #define SA_RESETHAND 0x80000000u | ||
97 | |||
98 | #define SA_NOMASK SA_NODEFER | ||
99 | #define SA_ONESHOT SA_RESETHAND | ||
100 | #define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ | ||
101 | |||
102 | #define SA_RESTORER 0x04000000 | ||
103 | |||
104 | /* | ||
105 | * sigaltstack controls | ||
106 | */ | ||
107 | #define SS_ONSTACK 1 | ||
108 | #define SS_DISABLE 2 | ||
109 | |||
110 | #define MINSIGSTKSZ 2048 | ||
111 | #define SIGSTKSZ 8192 | ||
112 | |||
113 | #ifdef __KERNEL__ | ||
114 | |||
115 | /* | ||
116 | * These values of sa_flags are used only by the kernel as part of the | ||
117 | * irq handling routines. | ||
118 | * | ||
119 | * SA_INTERRUPT is also used by the irq handling routines. | ||
120 | * SA_SHIRQ is for shared interrupt support on PCI and EISA. | ||
121 | */ | ||
122 | #define SA_PROBE SA_ONESHOT | ||
123 | #define SA_SAMPLE_RANDOM SA_RESTART | ||
124 | #define SA_SHIRQ 0x04000000 | ||
125 | #endif | ||
126 | |||
127 | #define SIG_BLOCK 0 /* for blocking signals */ | ||
128 | #define SIG_UNBLOCK 1 /* for unblocking signals */ | ||
129 | #define SIG_SETMASK 2 /* for setting the signal mask */ | ||
130 | |||
131 | /* Type of a signal handler. */ | ||
132 | typedef void __signalfn_t(int); | ||
133 | typedef __signalfn_t __user *__sighandler_t; | ||
134 | |||
135 | typedef void __restorefn_t(void); | ||
136 | typedef __restorefn_t __user *__sigrestore_t; | ||
137 | |||
138 | #define SIG_DFL ((__sighandler_t)0) /* default signal handling */ | ||
139 | #define SIG_IGN ((__sighandler_t)1) /* ignore signal */ | ||
140 | #define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ | ||
141 | |||
142 | #ifdef __KERNEL__ | ||
143 | struct old_sigaction { | ||
144 | __sighandler_t sa_handler; | ||
145 | old_sigset_t sa_mask; | ||
146 | unsigned long sa_flags; | ||
147 | __sigrestore_t sa_restorer; | ||
148 | }; | ||
149 | |||
150 | struct sigaction { | ||
151 | __sighandler_t sa_handler; | ||
152 | unsigned long sa_flags; | ||
153 | __sigrestore_t sa_restorer; | ||
154 | sigset_t sa_mask; /* mask last for extensibility */ | ||
155 | }; | ||
156 | |||
157 | struct k_sigaction { | ||
158 | struct sigaction sa; | ||
159 | }; | ||
160 | #else | ||
161 | /* Here we must cater to libcs that poke about in kernel headers. */ | ||
162 | |||
163 | struct sigaction { | ||
164 | union { | ||
165 | __sighandler_t _sa_handler; | ||
166 | void (*_sa_sigaction)(int, struct siginfo *, void *); | ||
167 | } _u; | ||
168 | sigset_t sa_mask; | ||
169 | unsigned long sa_flags; | ||
170 | void (*sa_restorer)(void); | ||
171 | }; | ||
172 | |||
173 | #define sa_handler _u._sa_handler | ||
174 | #define sa_sigaction _u._sa_sigaction | ||
175 | |||
176 | #endif /* __KERNEL__ */ | ||
177 | |||
178 | typedef struct sigaltstack { | ||
179 | void __user *ss_sp; | ||
180 | int ss_flags; | ||
181 | size_t ss_size; | ||
182 | } stack_t; | ||
183 | |||
184 | #ifdef __KERNEL__ | ||
185 | #include <asm/sigcontext.h> | ||
186 | |||
187 | #define __HAVE_ARCH_SIG_BITOPS | ||
188 | |||
189 | static __inline__ void sigaddset(sigset_t *set, int _sig) | ||
190 | { | ||
191 | __asm__("btsl %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); | ||
192 | } | ||
193 | |||
194 | static __inline__ void sigdelset(sigset_t *set, int _sig) | ||
195 | { | ||
196 | __asm__("btrl %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); | ||
197 | } | ||
198 | |||
199 | static __inline__ int __const_sigismember(sigset_t *set, int _sig) | ||
200 | { | ||
201 | unsigned long sig = _sig - 1; | ||
202 | return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); | ||
203 | } | ||
204 | |||
205 | static __inline__ int __gen_sigismember(sigset_t *set, int _sig) | ||
206 | { | ||
207 | int ret; | ||
208 | __asm__("btl %2,%1\n\tsbbl %0,%0" | ||
209 | : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); | ||
210 | return ret; | ||
211 | } | ||
212 | |||
213 | #define sigismember(set,sig) \ | ||
214 | (__builtin_constant_p(sig) ? \ | ||
215 | __const_sigismember((set),(sig)) : \ | ||
216 | __gen_sigismember((set),(sig))) | ||
217 | |||
218 | static __inline__ int sigfindinword(unsigned long word) | ||
219 | { | ||
220 | __asm__("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc"); | ||
221 | return word; | ||
222 | } | ||
223 | |||
224 | struct pt_regs; | ||
225 | extern int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset)); | ||
226 | |||
227 | #define ptrace_signal_deliver(regs, cookie) \ | ||
228 | do { \ | ||
229 | if (current->ptrace & PT_DTRACE) { \ | ||
230 | current->ptrace &= ~PT_DTRACE; \ | ||
231 | (regs)->eflags &= ~TF_MASK; \ | ||
232 | } \ | ||
233 | } while (0) | ||
234 | |||
235 | #endif /* __KERNEL__ */ | ||
236 | |||
237 | #endif | ||
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h new file mode 100644 index 000000000000..dd1491225d51 --- /dev/null +++ b/include/asm-i386/smp.h | |||
@@ -0,0 +1,90 @@ | |||
1 | #ifndef __ASM_SMP_H | ||
2 | #define __ASM_SMP_H | ||
3 | |||
4 | /* | ||
5 | * We need the APIC definitions automatically as part of 'smp.h' | ||
6 | */ | ||
7 | #ifndef __ASSEMBLY__ | ||
8 | #include <linux/config.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/threads.h> | ||
11 | #include <linux/cpumask.h> | ||
12 | #endif | ||
13 | |||
14 | #ifdef CONFIG_X86_LOCAL_APIC | ||
15 | #ifndef __ASSEMBLY__ | ||
16 | #include <asm/fixmap.h> | ||
17 | #include <asm/bitops.h> | ||
18 | #include <asm/mpspec.h> | ||
19 | #ifdef CONFIG_X86_IO_APIC | ||
20 | #include <asm/io_apic.h> | ||
21 | #endif | ||
22 | #include <asm/apic.h> | ||
23 | #endif | ||
24 | #endif | ||
25 | |||
26 | #define BAD_APICID 0xFFu | ||
27 | #ifdef CONFIG_SMP | ||
28 | #ifndef __ASSEMBLY__ | ||
29 | |||
30 | /* | ||
31 | * Private routines/data | ||
32 | */ | ||
33 | |||
34 | extern void smp_alloc_memory(void); | ||
35 | extern int pic_mode; | ||
36 | extern int smp_num_siblings; | ||
37 | extern cpumask_t cpu_sibling_map[]; | ||
38 | |||
39 | extern void smp_flush_tlb(void); | ||
40 | extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs); | ||
41 | extern void smp_invalidate_rcv(void); /* Process an NMI */ | ||
42 | extern void (*mtrr_hook) (void); | ||
43 | extern void zap_low_mappings (void); | ||
44 | |||
45 | #define MAX_APICID 256 | ||
46 | extern u8 x86_cpu_to_apicid[]; | ||
47 | |||
48 | /* | ||
49 | * This function is needed by all SMP systems. It must _always_ be valid | ||
50 | * from the initial startup. We map APIC_BASE very early in page_setup(), | ||
51 | * so this is correct in the x86 case. | ||
52 | */ | ||
53 | #define __smp_processor_id() (current_thread_info()->cpu) | ||
54 | |||
55 | extern cpumask_t cpu_callout_map; | ||
56 | extern cpumask_t cpu_callin_map; | ||
57 | #define cpu_possible_map cpu_callout_map | ||
58 | |||
59 | /* We don't mark CPUs online until __cpu_up(), so we need another measure */ | ||
60 | static inline int num_booting_cpus(void) | ||
61 | { | ||
62 | return cpus_weight(cpu_callout_map); | ||
63 | } | ||
64 | |||
65 | #ifdef CONFIG_X86_LOCAL_APIC | ||
66 | |||
67 | #ifdef APIC_DEFINITION | ||
68 | extern int hard_smp_processor_id(void); | ||
69 | #else | ||
70 | #include <mach_apicdef.h> | ||
71 | static inline int hard_smp_processor_id(void) | ||
72 | { | ||
73 | /* we don't want to mark this access volatile - bad code generation */ | ||
74 | return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID)); | ||
75 | } | ||
76 | #endif | ||
77 | |||
78 | static __inline int logical_smp_processor_id(void) | ||
79 | { | ||
80 | /* we don't want to mark this access volatile - bad code generation */ | ||
81 | return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); | ||
82 | } | ||
83 | |||
84 | #endif | ||
85 | #endif /* !__ASSEMBLY__ */ | ||
86 | |||
87 | #define NO_PROC_ID 0xFF /* No processor magic marker */ | ||
88 | |||
89 | #endif | ||
90 | #endif | ||
diff --git a/include/asm-i386/socket.h b/include/asm-i386/socket.h new file mode 100644 index 000000000000..07f6b38ad140 --- /dev/null +++ b/include/asm-i386/socket.h | |||
@@ -0,0 +1,50 @@ | |||
1 | #ifndef _ASM_SOCKET_H | ||
2 | #define _ASM_SOCKET_H | ||
3 | |||
4 | #include <asm/sockios.h> | ||
5 | |||
6 | /* For setsockopt(2) */ | ||
7 | #define SOL_SOCKET 1 | ||
8 | |||
9 | #define SO_DEBUG 1 | ||
10 | #define SO_REUSEADDR 2 | ||
11 | #define SO_TYPE 3 | ||
12 | #define SO_ERROR 4 | ||
13 | #define SO_DONTROUTE 5 | ||
14 | #define SO_BROADCAST 6 | ||
15 | #define SO_SNDBUF 7 | ||
16 | #define SO_RCVBUF 8 | ||
17 | #define SO_KEEPALIVE 9 | ||
18 | #define SO_OOBINLINE 10 | ||
19 | #define SO_NO_CHECK 11 | ||
20 | #define SO_PRIORITY 12 | ||
21 | #define SO_LINGER 13 | ||
22 | #define SO_BSDCOMPAT 14 | ||
23 | /* To add :#define SO_REUSEPORT 15 */ | ||
24 | #define SO_PASSCRED 16 | ||
25 | #define SO_PEERCRED 17 | ||
26 | #define SO_RCVLOWAT 18 | ||
27 | #define SO_SNDLOWAT 19 | ||
28 | #define SO_RCVTIMEO 20 | ||
29 | #define SO_SNDTIMEO 21 | ||
30 | |||
31 | /* Security levels - as per NRL IPv6 - don't actually do anything */ | ||
32 | #define SO_SECURITY_AUTHENTICATION 22 | ||
33 | #define SO_SECURITY_ENCRYPTION_TRANSPORT 23 | ||
34 | #define SO_SECURITY_ENCRYPTION_NETWORK 24 | ||
35 | |||
36 | #define SO_BINDTODEVICE 25 | ||
37 | |||
38 | /* Socket filtering */ | ||
39 | #define SO_ATTACH_FILTER 26 | ||
40 | #define SO_DETACH_FILTER 27 | ||
41 | |||
42 | #define SO_PEERNAME 28 | ||
43 | #define SO_TIMESTAMP 29 | ||
44 | #define SCM_TIMESTAMP SO_TIMESTAMP | ||
45 | |||
46 | #define SO_ACCEPTCONN 30 | ||
47 | |||
48 | #define SO_PEERSEC 31 | ||
49 | |||
50 | #endif /* _ASM_SOCKET_H */ | ||
diff --git a/include/asm-i386/sockios.h b/include/asm-i386/sockios.h new file mode 100644 index 000000000000..6b747f8e228b --- /dev/null +++ b/include/asm-i386/sockios.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef __ARCH_I386_SOCKIOS__ | ||
2 | #define __ARCH_I386_SOCKIOS__ | ||
3 | |||
4 | /* Socket-level I/O control calls. */ | ||
5 | #define FIOSETOWN 0x8901 | ||
6 | #define SIOCSPGRP 0x8902 | ||
7 | #define FIOGETOWN 0x8903 | ||
8 | #define SIOCGPGRP 0x8904 | ||
9 | #define SIOCATMARK 0x8905 | ||
10 | #define SIOCGSTAMP 0x8906 /* Get stamp */ | ||
11 | |||
12 | #endif | ||
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h new file mode 100644 index 000000000000..f9ff31f40036 --- /dev/null +++ b/include/asm-i386/spinlock.h | |||
@@ -0,0 +1,250 @@ | |||
1 | #ifndef __ASM_SPINLOCK_H | ||
2 | #define __ASM_SPINLOCK_H | ||
3 | |||
4 | #include <asm/atomic.h> | ||
5 | #include <asm/rwlock.h> | ||
6 | #include <asm/page.h> | ||
7 | #include <linux/config.h> | ||
8 | #include <linux/compiler.h> | ||
9 | |||
10 | asmlinkage int printk(const char * fmt, ...) | ||
11 | __attribute__ ((format (printf, 1, 2))); | ||
12 | |||
13 | /* | ||
14 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | ||
15 | */ | ||
16 | |||
17 | typedef struct { | ||
18 | volatile unsigned int slock; | ||
19 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
20 | unsigned magic; | ||
21 | #endif | ||
22 | #ifdef CONFIG_PREEMPT | ||
23 | unsigned int break_lock; | ||
24 | #endif | ||
25 | } spinlock_t; | ||
26 | |||
27 | #define SPINLOCK_MAGIC 0xdead4ead | ||
28 | |||
29 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
30 | #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC | ||
31 | #else | ||
32 | #define SPINLOCK_MAGIC_INIT /* */ | ||
33 | #endif | ||
34 | |||
35 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT } | ||
36 | |||
37 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
38 | |||
39 | /* | ||
40 | * Simple spin lock operations. There are two variants, one clears IRQ's | ||
41 | * on the local processor, one does not. | ||
42 | * | ||
43 | * We make no fairness assumptions. They have a cost. | ||
44 | */ | ||
45 | |||
46 | #define spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0) | ||
47 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | ||
48 | |||
49 | #define spin_lock_string \ | ||
50 | "\n1:\t" \ | ||
51 | "lock ; decb %0\n\t" \ | ||
52 | "jns 3f\n" \ | ||
53 | "2:\t" \ | ||
54 | "rep;nop\n\t" \ | ||
55 | "cmpb $0,%0\n\t" \ | ||
56 | "jle 2b\n\t" \ | ||
57 | "jmp 1b\n" \ | ||
58 | "3:\n\t" | ||
59 | |||
60 | #define spin_lock_string_flags \ | ||
61 | "\n1:\t" \ | ||
62 | "lock ; decb %0\n\t" \ | ||
63 | "jns 4f\n\t" \ | ||
64 | "2:\t" \ | ||
65 | "testl $0x200, %1\n\t" \ | ||
66 | "jz 3f\n\t" \ | ||
67 | "sti\n\t" \ | ||
68 | "3:\t" \ | ||
69 | "rep;nop\n\t" \ | ||
70 | "cmpb $0, %0\n\t" \ | ||
71 | "jle 3b\n\t" \ | ||
72 | "cli\n\t" \ | ||
73 | "jmp 1b\n" \ | ||
74 | "4:\n\t" | ||
75 | |||
76 | /* | ||
77 | * This works. Despite all the confusion. | ||
78 | * (except on PPro SMP or if we are using OOSTORE) | ||
79 | * (PPro errata 66, 92) | ||
80 | */ | ||
81 | |||
82 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) | ||
83 | |||
84 | #define spin_unlock_string \ | ||
85 | "movb $1,%0" \ | ||
86 | :"=m" (lock->slock) : : "memory" | ||
87 | |||
88 | |||
89 | static inline void _raw_spin_unlock(spinlock_t *lock) | ||
90 | { | ||
91 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
92 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
93 | BUG_ON(!spin_is_locked(lock)); | ||
94 | #endif | ||
95 | __asm__ __volatile__( | ||
96 | spin_unlock_string | ||
97 | ); | ||
98 | } | ||
99 | |||
100 | #else | ||
101 | |||
102 | #define spin_unlock_string \ | ||
103 | "xchgb %b0, %1" \ | ||
104 | :"=q" (oldval), "=m" (lock->slock) \ | ||
105 | :"0" (oldval) : "memory" | ||
106 | |||
107 | static inline void _raw_spin_unlock(spinlock_t *lock) | ||
108 | { | ||
109 | char oldval = 1; | ||
110 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
111 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
112 | BUG_ON(!spin_is_locked(lock)); | ||
113 | #endif | ||
114 | __asm__ __volatile__( | ||
115 | spin_unlock_string | ||
116 | ); | ||
117 | } | ||
118 | |||
119 | #endif | ||
120 | |||
121 | static inline int _raw_spin_trylock(spinlock_t *lock) | ||
122 | { | ||
123 | char oldval; | ||
124 | __asm__ __volatile__( | ||
125 | "xchgb %b0,%1" | ||
126 | :"=q" (oldval), "=m" (lock->slock) | ||
127 | :"0" (0) : "memory"); | ||
128 | return oldval > 0; | ||
129 | } | ||
130 | |||
131 | static inline void _raw_spin_lock(spinlock_t *lock) | ||
132 | { | ||
133 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
134 | if (unlikely(lock->magic != SPINLOCK_MAGIC)) { | ||
135 | printk("eip: %p\n", __builtin_return_address(0)); | ||
136 | BUG(); | ||
137 | } | ||
138 | #endif | ||
139 | __asm__ __volatile__( | ||
140 | spin_lock_string | ||
141 | :"=m" (lock->slock) : : "memory"); | ||
142 | } | ||
143 | |||
144 | static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | ||
145 | { | ||
146 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
147 | if (unlikely(lock->magic != SPINLOCK_MAGIC)) { | ||
148 | printk("eip: %p\n", __builtin_return_address(0)); | ||
149 | BUG(); | ||
150 | } | ||
151 | #endif | ||
152 | __asm__ __volatile__( | ||
153 | spin_lock_string_flags | ||
154 | :"=m" (lock->slock) : "r" (flags) : "memory"); | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * Read-write spinlocks, allowing multiple readers | ||
159 | * but only one writer. | ||
160 | * | ||
161 | * NOTE! it is quite common to have readers in interrupts | ||
162 | * but no interrupt writers. For those circumstances we | ||
163 | * can "mix" irq-safe locks - any writer needs to get a | ||
164 | * irq-safe write-lock, but readers can get non-irqsafe | ||
165 | * read-locks. | ||
166 | */ | ||
167 | typedef struct { | ||
168 | volatile unsigned int lock; | ||
169 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
170 | unsigned magic; | ||
171 | #endif | ||
172 | #ifdef CONFIG_PREEMPT | ||
173 | unsigned int break_lock; | ||
174 | #endif | ||
175 | } rwlock_t; | ||
176 | |||
177 | #define RWLOCK_MAGIC 0xdeaf1eed | ||
178 | |||
179 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
180 | #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC | ||
181 | #else | ||
182 | #define RWLOCK_MAGIC_INIT /* */ | ||
183 | #endif | ||
184 | |||
185 | #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT } | ||
186 | |||
187 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
188 | |||
189 | /** | ||
190 | * read_can_lock - would read_trylock() succeed? | ||
191 | * @lock: the rwlock in question. | ||
192 | */ | ||
193 | #define read_can_lock(x) ((int)(x)->lock > 0) | ||
194 | |||
195 | /** | ||
196 | * write_can_lock - would write_trylock() succeed? | ||
197 | * @lock: the rwlock in question. | ||
198 | */ | ||
199 | #define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | ||
200 | |||
201 | /* | ||
202 | * On x86, we implement read-write locks as a 32-bit counter | ||
203 | * with the high bit (sign) being the "contended" bit. | ||
204 | * | ||
205 | * The inline assembly is non-obvious. Think about it. | ||
206 | * | ||
207 | * Changed to use the same technique as rw semaphores. See | ||
208 | * semaphore.h for details. -ben | ||
209 | */ | ||
210 | /* the spinlock helpers are in arch/i386/kernel/semaphore.c */ | ||
211 | |||
212 | static inline void _raw_read_lock(rwlock_t *rw) | ||
213 | { | ||
214 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
215 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
216 | #endif | ||
217 | __build_read_lock(rw, "__read_lock_failed"); | ||
218 | } | ||
219 | |||
220 | static inline void _raw_write_lock(rwlock_t *rw) | ||
221 | { | ||
222 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
223 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
224 | #endif | ||
225 | __build_write_lock(rw, "__write_lock_failed"); | ||
226 | } | ||
227 | |||
228 | #define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory") | ||
229 | #define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory") | ||
230 | |||
231 | static inline int _raw_read_trylock(rwlock_t *lock) | ||
232 | { | ||
233 | atomic_t *count = (atomic_t *)lock; | ||
234 | atomic_dec(count); | ||
235 | if (atomic_read(count) >= 0) | ||
236 | return 1; | ||
237 | atomic_inc(count); | ||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | static inline int _raw_write_trylock(rwlock_t *lock) | ||
242 | { | ||
243 | atomic_t *count = (atomic_t *)lock; | ||
244 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | ||
245 | return 1; | ||
246 | atomic_add(RW_LOCK_BIAS, count); | ||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | #endif /* __ASM_SPINLOCK_H */ | ||
diff --git a/include/asm-i386/srat.h b/include/asm-i386/srat.h new file mode 100644 index 000000000000..165ab4bdc02b --- /dev/null +++ b/include/asm-i386/srat.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * Some of the code in this file has been gleaned from the 64 bit | ||
3 | * discontigmem support code base. | ||
4 | * | ||
5 | * Copyright (C) 2002, IBM Corp. | ||
6 | * | ||
7 | * All rights reserved. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but | ||
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
17 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
18 | * details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
23 | * | ||
24 | * Send feedback to Pat Gaughen <gone@us.ibm.com> | ||
25 | */ | ||
26 | |||
27 | #ifndef _ASM_SRAT_H_ | ||
28 | #define _ASM_SRAT_H_ | ||
29 | |||
30 | #ifndef CONFIG_ACPI_SRAT | ||
31 | #error CONFIG_ACPI_SRAT not defined, and srat.h header has been included | ||
32 | #endif | ||
33 | |||
34 | extern int get_memcfg_from_srat(void); | ||
35 | extern unsigned long *get_zholes_size(int); | ||
36 | |||
37 | #endif /* _ASM_SRAT_H_ */ | ||
diff --git a/include/asm-i386/stat.h b/include/asm-i386/stat.h new file mode 100644 index 000000000000..b464f8020ec4 --- /dev/null +++ b/include/asm-i386/stat.h | |||
@@ -0,0 +1,78 @@ | |||
1 | #ifndef _I386_STAT_H | ||
2 | #define _I386_STAT_H | ||
3 | |||
4 | struct __old_kernel_stat { | ||
5 | unsigned short st_dev; | ||
6 | unsigned short st_ino; | ||
7 | unsigned short st_mode; | ||
8 | unsigned short st_nlink; | ||
9 | unsigned short st_uid; | ||
10 | unsigned short st_gid; | ||
11 | unsigned short st_rdev; | ||
12 | unsigned long st_size; | ||
13 | unsigned long st_atime; | ||
14 | unsigned long st_mtime; | ||
15 | unsigned long st_ctime; | ||
16 | }; | ||
17 | |||
18 | struct stat { | ||
19 | unsigned long st_dev; | ||
20 | unsigned long st_ino; | ||
21 | unsigned short st_mode; | ||
22 | unsigned short st_nlink; | ||
23 | unsigned short st_uid; | ||
24 | unsigned short st_gid; | ||
25 | unsigned long st_rdev; | ||
26 | unsigned long st_size; | ||
27 | unsigned long st_blksize; | ||
28 | unsigned long st_blocks; | ||
29 | unsigned long st_atime; | ||
30 | unsigned long st_atime_nsec; | ||
31 | unsigned long st_mtime; | ||
32 | unsigned long st_mtime_nsec; | ||
33 | unsigned long st_ctime; | ||
34 | unsigned long st_ctime_nsec; | ||
35 | unsigned long __unused4; | ||
36 | unsigned long __unused5; | ||
37 | }; | ||
38 | |||
39 | /* This matches struct stat64 in glibc2.1, hence the absolutely | ||
40 | * insane amounts of padding around dev_t's. | ||
41 | */ | ||
42 | struct stat64 { | ||
43 | unsigned long long st_dev; | ||
44 | unsigned char __pad0[4]; | ||
45 | |||
46 | #define STAT64_HAS_BROKEN_ST_INO 1 | ||
47 | unsigned long __st_ino; | ||
48 | |||
49 | unsigned int st_mode; | ||
50 | unsigned int st_nlink; | ||
51 | |||
52 | unsigned long st_uid; | ||
53 | unsigned long st_gid; | ||
54 | |||
55 | unsigned long long st_rdev; | ||
56 | unsigned char __pad3[4]; | ||
57 | |||
58 | long long st_size; | ||
59 | unsigned long st_blksize; | ||
60 | |||
61 | unsigned long st_blocks; /* Number 512-byte blocks allocated. */ | ||
62 | unsigned long __pad4; /* future possible st_blocks high bits */ | ||
63 | |||
64 | unsigned long st_atime; | ||
65 | unsigned long st_atime_nsec; | ||
66 | |||
67 | unsigned long st_mtime; | ||
68 | unsigned int st_mtime_nsec; | ||
69 | |||
70 | unsigned long st_ctime; | ||
71 | unsigned long st_ctime_nsec; | ||
72 | |||
73 | unsigned long long st_ino; | ||
74 | }; | ||
75 | |||
76 | #define STAT_HAVE_NSEC 1 | ||
77 | |||
78 | #endif | ||
diff --git a/include/asm-i386/statfs.h b/include/asm-i386/statfs.h new file mode 100644 index 000000000000..24972c175132 --- /dev/null +++ b/include/asm-i386/statfs.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _I386_STATFS_H | ||
2 | #define _I386_STATFS_H | ||
3 | |||
4 | #include <asm-generic/statfs.h> | ||
5 | |||
6 | #endif | ||
diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h new file mode 100644 index 000000000000..1679983d053f --- /dev/null +++ b/include/asm-i386/string.h | |||
@@ -0,0 +1,449 @@ | |||
1 | #ifndef _I386_STRING_H_ | ||
2 | #define _I386_STRING_H_ | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | #include <linux/config.h> | ||
6 | /* | ||
7 | * On a 486 or Pentium, we are better off not using the | ||
8 | * byte string operations. But on a 386 or a PPro the | ||
9 | * byte string ops are faster than doing it by hand | ||
10 | * (MUCH faster on a Pentium). | ||
11 | */ | ||
12 | |||
13 | /* | ||
14 | * This string-include defines all string functions as inline | ||
15 | * functions. Use gcc. It also assumes ds=es=data space, this should be | ||
16 | * normal. Most of the string-functions are rather heavily hand-optimized, | ||
17 | * see especially strsep,strstr,str[c]spn. They should work, but are not | ||
18 | * very easy to understand. Everything is done entirely within the register | ||
19 | * set, making the functions fast and clean. String instructions have been | ||
20 | * used through-out, making for "slightly" unclear code :-) | ||
21 | * | ||
22 | * NO Copyright (C) 1991, 1992 Linus Torvalds, | ||
23 | * consider these trivial functions to be PD. | ||
24 | */ | ||
25 | |||
26 | /* AK: in fact I bet it would be better to move this stuff all out of line. | ||
27 | */ | ||
28 | |||
29 | #define __HAVE_ARCH_STRCPY | ||
30 | static inline char * strcpy(char * dest,const char *src) | ||
31 | { | ||
32 | int d0, d1, d2; | ||
33 | __asm__ __volatile__( | ||
34 | "1:\tlodsb\n\t" | ||
35 | "stosb\n\t" | ||
36 | "testb %%al,%%al\n\t" | ||
37 | "jne 1b" | ||
38 | : "=&S" (d0), "=&D" (d1), "=&a" (d2) | ||
39 | :"0" (src),"1" (dest) : "memory"); | ||
40 | return dest; | ||
41 | } | ||
42 | |||
43 | #define __HAVE_ARCH_STRNCPY | ||
44 | static inline char * strncpy(char * dest,const char *src,size_t count) | ||
45 | { | ||
46 | int d0, d1, d2, d3; | ||
47 | __asm__ __volatile__( | ||
48 | "1:\tdecl %2\n\t" | ||
49 | "js 2f\n\t" | ||
50 | "lodsb\n\t" | ||
51 | "stosb\n\t" | ||
52 | "testb %%al,%%al\n\t" | ||
53 | "jne 1b\n\t" | ||
54 | "rep\n\t" | ||
55 | "stosb\n" | ||
56 | "2:" | ||
57 | : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3) | ||
58 | :"0" (src),"1" (dest),"2" (count) : "memory"); | ||
59 | return dest; | ||
60 | } | ||
61 | |||
62 | #define __HAVE_ARCH_STRCAT | ||
63 | static inline char * strcat(char * dest,const char * src) | ||
64 | { | ||
65 | int d0, d1, d2, d3; | ||
66 | __asm__ __volatile__( | ||
67 | "repne\n\t" | ||
68 | "scasb\n\t" | ||
69 | "decl %1\n" | ||
70 | "1:\tlodsb\n\t" | ||
71 | "stosb\n\t" | ||
72 | "testb %%al,%%al\n\t" | ||
73 | "jne 1b" | ||
74 | : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) | ||
75 | : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu):"memory"); | ||
76 | return dest; | ||
77 | } | ||
78 | |||
79 | #define __HAVE_ARCH_STRNCAT | ||
80 | static inline char * strncat(char * dest,const char * src,size_t count) | ||
81 | { | ||
82 | int d0, d1, d2, d3; | ||
83 | __asm__ __volatile__( | ||
84 | "repne\n\t" | ||
85 | "scasb\n\t" | ||
86 | "decl %1\n\t" | ||
87 | "movl %8,%3\n" | ||
88 | "1:\tdecl %3\n\t" | ||
89 | "js 2f\n\t" | ||
90 | "lodsb\n\t" | ||
91 | "stosb\n\t" | ||
92 | "testb %%al,%%al\n\t" | ||
93 | "jne 1b\n" | ||
94 | "2:\txorl %2,%2\n\t" | ||
95 | "stosb" | ||
96 | : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) | ||
97 | : "0" (src),"1" (dest),"2" (0),"3" (0xffffffffu), "g" (count) | ||
98 | : "memory"); | ||
99 | return dest; | ||
100 | } | ||
101 | |||
102 | #define __HAVE_ARCH_STRCMP | ||
103 | static inline int strcmp(const char * cs,const char * ct) | ||
104 | { | ||
105 | int d0, d1; | ||
106 | register int __res; | ||
107 | __asm__ __volatile__( | ||
108 | "1:\tlodsb\n\t" | ||
109 | "scasb\n\t" | ||
110 | "jne 2f\n\t" | ||
111 | "testb %%al,%%al\n\t" | ||
112 | "jne 1b\n\t" | ||
113 | "xorl %%eax,%%eax\n\t" | ||
114 | "jmp 3f\n" | ||
115 | "2:\tsbbl %%eax,%%eax\n\t" | ||
116 | "orb $1,%%al\n" | ||
117 | "3:" | ||
118 | :"=a" (__res), "=&S" (d0), "=&D" (d1) | ||
119 | :"1" (cs),"2" (ct)); | ||
120 | return __res; | ||
121 | } | ||
122 | |||
123 | #define __HAVE_ARCH_STRNCMP | ||
124 | static inline int strncmp(const char * cs,const char * ct,size_t count) | ||
125 | { | ||
126 | register int __res; | ||
127 | int d0, d1, d2; | ||
128 | __asm__ __volatile__( | ||
129 | "1:\tdecl %3\n\t" | ||
130 | "js 2f\n\t" | ||
131 | "lodsb\n\t" | ||
132 | "scasb\n\t" | ||
133 | "jne 3f\n\t" | ||
134 | "testb %%al,%%al\n\t" | ||
135 | "jne 1b\n" | ||
136 | "2:\txorl %%eax,%%eax\n\t" | ||
137 | "jmp 4f\n" | ||
138 | "3:\tsbbl %%eax,%%eax\n\t" | ||
139 | "orb $1,%%al\n" | ||
140 | "4:" | ||
141 | :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2) | ||
142 | :"1" (cs),"2" (ct),"3" (count)); | ||
143 | return __res; | ||
144 | } | ||
145 | |||
146 | #define __HAVE_ARCH_STRCHR | ||
147 | static inline char * strchr(const char * s, int c) | ||
148 | { | ||
149 | int d0; | ||
150 | register char * __res; | ||
151 | __asm__ __volatile__( | ||
152 | "movb %%al,%%ah\n" | ||
153 | "1:\tlodsb\n\t" | ||
154 | "cmpb %%ah,%%al\n\t" | ||
155 | "je 2f\n\t" | ||
156 | "testb %%al,%%al\n\t" | ||
157 | "jne 1b\n\t" | ||
158 | "movl $1,%1\n" | ||
159 | "2:\tmovl %1,%0\n\t" | ||
160 | "decl %0" | ||
161 | :"=a" (__res), "=&S" (d0) : "1" (s),"0" (c)); | ||
162 | return __res; | ||
163 | } | ||
164 | |||
165 | #define __HAVE_ARCH_STRRCHR | ||
166 | static inline char * strrchr(const char * s, int c) | ||
167 | { | ||
168 | int d0, d1; | ||
169 | register char * __res; | ||
170 | __asm__ __volatile__( | ||
171 | "movb %%al,%%ah\n" | ||
172 | "1:\tlodsb\n\t" | ||
173 | "cmpb %%ah,%%al\n\t" | ||
174 | "jne 2f\n\t" | ||
175 | "leal -1(%%esi),%0\n" | ||
176 | "2:\ttestb %%al,%%al\n\t" | ||
177 | "jne 1b" | ||
178 | :"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c)); | ||
179 | return __res; | ||
180 | } | ||
181 | |||
182 | #define __HAVE_ARCH_STRLEN | ||
183 | static inline size_t strlen(const char * s) | ||
184 | { | ||
185 | int d0; | ||
186 | register int __res; | ||
187 | __asm__ __volatile__( | ||
188 | "repne\n\t" | ||
189 | "scasb\n\t" | ||
190 | "notl %0\n\t" | ||
191 | "decl %0" | ||
192 | :"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffffu)); | ||
193 | return __res; | ||
194 | } | ||
195 | |||
196 | static inline void * __memcpy(void * to, const void * from, size_t n) | ||
197 | { | ||
198 | int d0, d1, d2; | ||
199 | __asm__ __volatile__( | ||
200 | "rep ; movsl\n\t" | ||
201 | "testb $2,%b4\n\t" | ||
202 | "je 1f\n\t" | ||
203 | "movsw\n" | ||
204 | "1:\ttestb $1,%b4\n\t" | ||
205 | "je 2f\n\t" | ||
206 | "movsb\n" | ||
207 | "2:" | ||
208 | : "=&c" (d0), "=&D" (d1), "=&S" (d2) | ||
209 | :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from) | ||
210 | : "memory"); | ||
211 | return (to); | ||
212 | } | ||
213 | |||
214 | /* | ||
215 | * This looks horribly ugly, but the compiler can optimize it totally, | ||
216 | * as the count is constant. | ||
217 | */ | ||
218 | static inline void * __constant_memcpy(void * to, const void * from, size_t n) | ||
219 | { | ||
220 | if (n <= 128) | ||
221 | return __builtin_memcpy(to, from, n); | ||
222 | |||
223 | #define COMMON(x) \ | ||
224 | __asm__ __volatile__( \ | ||
225 | "rep ; movsl" \ | ||
226 | x \ | ||
227 | : "=&c" (d0), "=&D" (d1), "=&S" (d2) \ | ||
228 | : "0" (n/4),"1" ((long) to),"2" ((long) from) \ | ||
229 | : "memory"); | ||
230 | { | ||
231 | int d0, d1, d2; | ||
232 | switch (n % 4) { | ||
233 | case 0: COMMON(""); return to; | ||
234 | case 1: COMMON("\n\tmovsb"); return to; | ||
235 | case 2: COMMON("\n\tmovsw"); return to; | ||
236 | default: COMMON("\n\tmovsw\n\tmovsb"); return to; | ||
237 | } | ||
238 | } | ||
239 | |||
240 | #undef COMMON | ||
241 | } | ||
242 | |||
243 | #define __HAVE_ARCH_MEMCPY | ||
244 | |||
245 | #ifdef CONFIG_X86_USE_3DNOW | ||
246 | |||
247 | #include <asm/mmx.h> | ||
248 | |||
249 | /* | ||
250 | * This CPU favours 3DNow strongly (eg AMD Athlon) | ||
251 | */ | ||
252 | |||
253 | static inline void * __constant_memcpy3d(void * to, const void * from, size_t len) | ||
254 | { | ||
255 | if (len < 512) | ||
256 | return __constant_memcpy(to, from, len); | ||
257 | return _mmx_memcpy(to, from, len); | ||
258 | } | ||
259 | |||
260 | static __inline__ void *__memcpy3d(void *to, const void *from, size_t len) | ||
261 | { | ||
262 | if (len < 512) | ||
263 | return __memcpy(to, from, len); | ||
264 | return _mmx_memcpy(to, from, len); | ||
265 | } | ||
266 | |||
267 | #define memcpy(t, f, n) \ | ||
268 | (__builtin_constant_p(n) ? \ | ||
269 | __constant_memcpy3d((t),(f),(n)) : \ | ||
270 | __memcpy3d((t),(f),(n))) | ||
271 | |||
272 | #else | ||
273 | |||
274 | /* | ||
275 | * No 3D Now! | ||
276 | */ | ||
277 | |||
278 | #define memcpy(t, f, n) \ | ||
279 | (__builtin_constant_p(n) ? \ | ||
280 | __constant_memcpy((t),(f),(n)) : \ | ||
281 | __memcpy((t),(f),(n))) | ||
282 | |||
283 | #endif | ||
284 | |||
285 | #define __HAVE_ARCH_MEMMOVE | ||
286 | void *memmove(void * dest,const void * src, size_t n); | ||
287 | |||
288 | #define memcmp __builtin_memcmp | ||
289 | |||
290 | #define __HAVE_ARCH_MEMCHR | ||
291 | static inline void * memchr(const void * cs,int c,size_t count) | ||
292 | { | ||
293 | int d0; | ||
294 | register void * __res; | ||
295 | if (!count) | ||
296 | return NULL; | ||
297 | __asm__ __volatile__( | ||
298 | "repne\n\t" | ||
299 | "scasb\n\t" | ||
300 | "je 1f\n\t" | ||
301 | "movl $1,%0\n" | ||
302 | "1:\tdecl %0" | ||
303 | :"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count)); | ||
304 | return __res; | ||
305 | } | ||
306 | |||
307 | static inline void * __memset_generic(void * s, char c,size_t count) | ||
308 | { | ||
309 | int d0, d1; | ||
310 | __asm__ __volatile__( | ||
311 | "rep\n\t" | ||
312 | "stosb" | ||
313 | : "=&c" (d0), "=&D" (d1) | ||
314 | :"a" (c),"1" (s),"0" (count) | ||
315 | :"memory"); | ||
316 | return s; | ||
317 | } | ||
318 | |||
319 | /* we might want to write optimized versions of these later */ | ||
320 | #define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count)) | ||
321 | |||
322 | /* | ||
323 | * memset(x,0,y) is a reasonably common thing to do, so we want to fill | ||
324 | * things 32 bits at a time even when we don't know the size of the | ||
325 | * area at compile-time.. | ||
326 | */ | ||
327 | static inline void * __constant_c_memset(void * s, unsigned long c, size_t count) | ||
328 | { | ||
329 | int d0, d1; | ||
330 | __asm__ __volatile__( | ||
331 | "rep ; stosl\n\t" | ||
332 | "testb $2,%b3\n\t" | ||
333 | "je 1f\n\t" | ||
334 | "stosw\n" | ||
335 | "1:\ttestb $1,%b3\n\t" | ||
336 | "je 2f\n\t" | ||
337 | "stosb\n" | ||
338 | "2:" | ||
339 | : "=&c" (d0), "=&D" (d1) | ||
340 | :"a" (c), "q" (count), "0" (count/4), "1" ((long) s) | ||
341 | :"memory"); | ||
342 | return (s); | ||
343 | } | ||
344 | |||
345 | /* Added by Gertjan van Wingerde to make minix and sysv module work */ | ||
346 | #define __HAVE_ARCH_STRNLEN | ||
347 | static inline size_t strnlen(const char * s, size_t count) | ||
348 | { | ||
349 | int d0; | ||
350 | register int __res; | ||
351 | __asm__ __volatile__( | ||
352 | "movl %2,%0\n\t" | ||
353 | "jmp 2f\n" | ||
354 | "1:\tcmpb $0,(%0)\n\t" | ||
355 | "je 3f\n\t" | ||
356 | "incl %0\n" | ||
357 | "2:\tdecl %1\n\t" | ||
358 | "cmpl $-1,%1\n\t" | ||
359 | "jne 1b\n" | ||
360 | "3:\tsubl %2,%0" | ||
361 | :"=a" (__res), "=&d" (d0) | ||
362 | :"c" (s),"1" (count)); | ||
363 | return __res; | ||
364 | } | ||
365 | /* end of additional stuff */ | ||
366 | |||
367 | #define __HAVE_ARCH_STRSTR | ||
368 | |||
369 | extern char *strstr(const char *cs, const char *ct); | ||
370 | |||
371 | /* | ||
372 | * This looks horribly ugly, but the compiler can optimize it totally, | ||
373 | * as we by now know that both pattern and count is constant.. | ||
374 | */ | ||
375 | static inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) | ||
376 | { | ||
377 | switch (count) { | ||
378 | case 0: | ||
379 | return s; | ||
380 | case 1: | ||
381 | *(unsigned char *)s = pattern; | ||
382 | return s; | ||
383 | case 2: | ||
384 | *(unsigned short *)s = pattern; | ||
385 | return s; | ||
386 | case 3: | ||
387 | *(unsigned short *)s = pattern; | ||
388 | *(2+(unsigned char *)s) = pattern; | ||
389 | return s; | ||
390 | case 4: | ||
391 | *(unsigned long *)s = pattern; | ||
392 | return s; | ||
393 | } | ||
394 | #define COMMON(x) \ | ||
395 | __asm__ __volatile__( \ | ||
396 | "rep ; stosl" \ | ||
397 | x \ | ||
398 | : "=&c" (d0), "=&D" (d1) \ | ||
399 | : "a" (pattern),"0" (count/4),"1" ((long) s) \ | ||
400 | : "memory") | ||
401 | { | ||
402 | int d0, d1; | ||
403 | switch (count % 4) { | ||
404 | case 0: COMMON(""); return s; | ||
405 | case 1: COMMON("\n\tstosb"); return s; | ||
406 | case 2: COMMON("\n\tstosw"); return s; | ||
407 | default: COMMON("\n\tstosw\n\tstosb"); return s; | ||
408 | } | ||
409 | } | ||
410 | |||
411 | #undef COMMON | ||
412 | } | ||
413 | |||
414 | #define __constant_c_x_memset(s, c, count) \ | ||
415 | (__builtin_constant_p(count) ? \ | ||
416 | __constant_c_and_count_memset((s),(c),(count)) : \ | ||
417 | __constant_c_memset((s),(c),(count))) | ||
418 | |||
419 | #define __memset(s, c, count) \ | ||
420 | (__builtin_constant_p(count) ? \ | ||
421 | __constant_count_memset((s),(c),(count)) : \ | ||
422 | __memset_generic((s),(c),(count))) | ||
423 | |||
424 | #define __HAVE_ARCH_MEMSET | ||
425 | #define memset(s, c, count) \ | ||
426 | (__builtin_constant_p(c) ? \ | ||
427 | __constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \ | ||
428 | __memset((s),(c),(count))) | ||
429 | |||
430 | /* | ||
431 | * find the first occurrence of byte 'c', or 1 past the area if none | ||
432 | */ | ||
433 | #define __HAVE_ARCH_MEMSCAN | ||
434 | static inline void * memscan(void * addr, int c, size_t size) | ||
435 | { | ||
436 | if (!size) | ||
437 | return addr; | ||
438 | __asm__("repnz; scasb\n\t" | ||
439 | "jnz 1f\n\t" | ||
440 | "dec %%edi\n" | ||
441 | "1:" | ||
442 | : "=D" (addr), "=c" (size) | ||
443 | : "0" (addr), "1" (size), "a" (c)); | ||
444 | return addr; | ||
445 | } | ||
446 | |||
447 | #endif /* __KERNEL__ */ | ||
448 | |||
449 | #endif | ||
diff --git a/include/asm-i386/suspend.h b/include/asm-i386/suspend.h new file mode 100644 index 000000000000..dfc1114c1b6f --- /dev/null +++ b/include/asm-i386/suspend.h | |||
@@ -0,0 +1,66 @@ | |||
1 | /* | ||
2 | * Copyright 2001-2002 Pavel Machek <pavel@suse.cz> | ||
3 | * Based on code | ||
4 | * Copyright 2001 Patrick Mochel <mochel@osdl.org> | ||
5 | */ | ||
6 | #include <asm/desc.h> | ||
7 | #include <asm/i387.h> | ||
8 | |||
9 | static inline int | ||
10 | arch_prepare_suspend(void) | ||
11 | { | ||
12 | /* If you want to make non-PSE machine work, turn off paging | ||
13 | in swsusp_arch_suspend. swsusp_pg_dir should have identity mapping, so | ||
14 | it could work... */ | ||
15 | if (!cpu_has_pse) { | ||
16 | printk(KERN_ERR "PSE is required for swsusp.\n"); | ||
17 | return -EPERM; | ||
18 | } | ||
19 | return 0; | ||
20 | } | ||
21 | |||
22 | /* image of the saved processor state */ | ||
23 | struct saved_context { | ||
24 | u16 es, fs, gs, ss; | ||
25 | unsigned long cr0, cr2, cr3, cr4; | ||
26 | u16 gdt_pad; | ||
27 | u16 gdt_limit; | ||
28 | unsigned long gdt_base; | ||
29 | u16 idt_pad; | ||
30 | u16 idt_limit; | ||
31 | unsigned long idt_base; | ||
32 | u16 ldt; | ||
33 | u16 tss; | ||
34 | unsigned long tr; | ||
35 | unsigned long safety; | ||
36 | unsigned long return_address; | ||
37 | } __attribute__((packed)); | ||
38 | |||
39 | #define loaddebug(thread,register) \ | ||
40 | __asm__("movl %0,%%db" #register \ | ||
41 | : /* no output */ \ | ||
42 | :"r" ((thread)->debugreg[register])) | ||
43 | |||
44 | #ifdef CONFIG_ACPI_SLEEP | ||
45 | extern unsigned long saved_eip; | ||
46 | extern unsigned long saved_esp; | ||
47 | extern unsigned long saved_ebp; | ||
48 | extern unsigned long saved_ebx; | ||
49 | extern unsigned long saved_esi; | ||
50 | extern unsigned long saved_edi; | ||
51 | |||
52 | static inline void acpi_save_register_state(unsigned long return_point) | ||
53 | { | ||
54 | saved_eip = return_point; | ||
55 | asm volatile ("movl %%esp,%0" : "=m" (saved_esp)); | ||
56 | asm volatile ("movl %%ebp,%0" : "=m" (saved_ebp)); | ||
57 | asm volatile ("movl %%ebx,%0" : "=m" (saved_ebx)); | ||
58 | asm volatile ("movl %%edi,%0" : "=m" (saved_edi)); | ||
59 | asm volatile ("movl %%esi,%0" : "=m" (saved_esi)); | ||
60 | } | ||
61 | |||
62 | #define acpi_restore_register_state() do {} while (0) | ||
63 | |||
64 | /* routines for saving/restoring kernel state */ | ||
65 | extern int acpi_save_state_mem(void); | ||
66 | #endif | ||
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h new file mode 100644 index 000000000000..6f74d4c44a0e --- /dev/null +++ b/include/asm-i386/system.h | |||
@@ -0,0 +1,473 @@ | |||
1 | #ifndef __ASM_SYSTEM_H | ||
2 | #define __ASM_SYSTEM_H | ||
3 | |||
4 | #include <linux/config.h> | ||
5 | #include <linux/kernel.h> | ||
6 | #include <asm/segment.h> | ||
7 | #include <asm/cpufeature.h> | ||
8 | #include <linux/bitops.h> /* for LOCK_PREFIX */ | ||
9 | |||
10 | #ifdef __KERNEL__ | ||
11 | |||
12 | struct task_struct; /* one of the stranger aspects of C forward declarations.. */ | ||
13 | extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); | ||
14 | |||
15 | #define switch_to(prev,next,last) do { \ | ||
16 | unsigned long esi,edi; \ | ||
17 | asm volatile("pushfl\n\t" \ | ||
18 | "pushl %%ebp\n\t" \ | ||
19 | "movl %%esp,%0\n\t" /* save ESP */ \ | ||
20 | "movl %5,%%esp\n\t" /* restore ESP */ \ | ||
21 | "movl $1f,%1\n\t" /* save EIP */ \ | ||
22 | "pushl %6\n\t" /* restore EIP */ \ | ||
23 | "jmp __switch_to\n" \ | ||
24 | "1:\t" \ | ||
25 | "popl %%ebp\n\t" \ | ||
26 | "popfl" \ | ||
27 | :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ | ||
28 | "=a" (last),"=S" (esi),"=D" (edi) \ | ||
29 | :"m" (next->thread.esp),"m" (next->thread.eip), \ | ||
30 | "2" (prev), "d" (next)); \ | ||
31 | } while (0) | ||
32 | |||
33 | #define _set_base(addr,base) do { unsigned long __pr; \ | ||
34 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | ||
35 | "rorl $16,%%edx\n\t" \ | ||
36 | "movb %%dl,%2\n\t" \ | ||
37 | "movb %%dh,%3" \ | ||
38 | :"=&d" (__pr) \ | ||
39 | :"m" (*((addr)+2)), \ | ||
40 | "m" (*((addr)+4)), \ | ||
41 | "m" (*((addr)+7)), \ | ||
42 | "0" (base) \ | ||
43 | ); } while(0) | ||
44 | |||
45 | #define _set_limit(addr,limit) do { unsigned long __lr; \ | ||
46 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | ||
47 | "rorl $16,%%edx\n\t" \ | ||
48 | "movb %2,%%dh\n\t" \ | ||
49 | "andb $0xf0,%%dh\n\t" \ | ||
50 | "orb %%dh,%%dl\n\t" \ | ||
51 | "movb %%dl,%2" \ | ||
52 | :"=&d" (__lr) \ | ||
53 | :"m" (*(addr)), \ | ||
54 | "m" (*((addr)+6)), \ | ||
55 | "0" (limit) \ | ||
56 | ); } while(0) | ||
57 | |||
58 | #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) ) | ||
59 | #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 ) | ||
60 | |||
61 | static inline unsigned long _get_base(char * addr) | ||
62 | { | ||
63 | unsigned long __base; | ||
64 | __asm__("movb %3,%%dh\n\t" | ||
65 | "movb %2,%%dl\n\t" | ||
66 | "shll $16,%%edx\n\t" | ||
67 | "movw %1,%%dx" | ||
68 | :"=&d" (__base) | ||
69 | :"m" (*((addr)+2)), | ||
70 | "m" (*((addr)+4)), | ||
71 | "m" (*((addr)+7))); | ||
72 | return __base; | ||
73 | } | ||
74 | |||
75 | #define get_base(ldt) _get_base( ((char *)&(ldt)) ) | ||
76 | |||
77 | /* | ||
78 | * Load a segment. Fall back on loading the zero | ||
79 | * segment if something goes wrong.. | ||
80 | */ | ||
81 | #define loadsegment(seg,value) \ | ||
82 | asm volatile("\n" \ | ||
83 | "1:\t" \ | ||
84 | "movl %0,%%" #seg "\n" \ | ||
85 | "2:\n" \ | ||
86 | ".section .fixup,\"ax\"\n" \ | ||
87 | "3:\t" \ | ||
88 | "pushl $0\n\t" \ | ||
89 | "popl %%" #seg "\n\t" \ | ||
90 | "jmp 2b\n" \ | ||
91 | ".previous\n" \ | ||
92 | ".section __ex_table,\"a\"\n\t" \ | ||
93 | ".align 4\n\t" \ | ||
94 | ".long 1b,3b\n" \ | ||
95 | ".previous" \ | ||
96 | : :"m" (*(unsigned int *)&(value))) | ||
97 | |||
98 | /* | ||
99 | * Save a segment register away | ||
100 | */ | ||
101 | #define savesegment(seg, value) \ | ||
102 | asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value))) | ||
103 | |||
104 | /* | ||
105 | * Clear and set 'TS' bit respectively | ||
106 | */ | ||
107 | #define clts() __asm__ __volatile__ ("clts") | ||
108 | #define read_cr0() ({ \ | ||
109 | unsigned int __dummy; \ | ||
110 | __asm__( \ | ||
111 | "movl %%cr0,%0\n\t" \ | ||
112 | :"=r" (__dummy)); \ | ||
113 | __dummy; \ | ||
114 | }) | ||
115 | #define write_cr0(x) \ | ||
116 | __asm__("movl %0,%%cr0": :"r" (x)); | ||
117 | |||
118 | #define read_cr4() ({ \ | ||
119 | unsigned int __dummy; \ | ||
120 | __asm__( \ | ||
121 | "movl %%cr4,%0\n\t" \ | ||
122 | :"=r" (__dummy)); \ | ||
123 | __dummy; \ | ||
124 | }) | ||
125 | #define write_cr4(x) \ | ||
126 | __asm__("movl %0,%%cr4": :"r" (x)); | ||
127 | #define stts() write_cr0(8 | read_cr0()) | ||
128 | |||
129 | #endif /* __KERNEL__ */ | ||
130 | |||
131 | #define wbinvd() \ | ||
132 | __asm__ __volatile__ ("wbinvd": : :"memory"); | ||
133 | |||
134 | static inline unsigned long get_limit(unsigned long segment) | ||
135 | { | ||
136 | unsigned long __limit; | ||
137 | __asm__("lsll %1,%0" | ||
138 | :"=r" (__limit):"r" (segment)); | ||
139 | return __limit+1; | ||
140 | } | ||
141 | |||
142 | #define nop() __asm__ __volatile__ ("nop") | ||
143 | |||
144 | #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) | ||
145 | |||
146 | #define tas(ptr) (xchg((ptr),1)) | ||
147 | |||
148 | struct __xchg_dummy { unsigned long a[100]; }; | ||
149 | #define __xg(x) ((struct __xchg_dummy *)(x)) | ||
150 | |||
151 | |||
152 | /* | ||
153 | * The semantics of XCHGCMP8B are a bit strange, this is why | ||
154 | * there is a loop and the loading of %%eax and %%edx has to | ||
155 | * be inside. This inlines well in most cases, the cached | ||
156 | * cost is around ~38 cycles. (in the future we might want | ||
157 | * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that | ||
158 | * might have an implicit FPU-save as a cost, so it's not | ||
159 | * clear which path to go.) | ||
160 | * | ||
161 | * cmpxchg8b must be used with the lock prefix here to allow | ||
162 | * the instruction to be executed atomically, see page 3-102 | ||
163 | * of the instruction set reference 24319102.pdf. We need | ||
164 | * the reader side to see the coherent 64bit value. | ||
165 | */ | ||
166 | static inline void __set_64bit (unsigned long long * ptr, | ||
167 | unsigned int low, unsigned int high) | ||
168 | { | ||
169 | __asm__ __volatile__ ( | ||
170 | "\n1:\t" | ||
171 | "movl (%0), %%eax\n\t" | ||
172 | "movl 4(%0), %%edx\n\t" | ||
173 | "lock cmpxchg8b (%0)\n\t" | ||
174 | "jnz 1b" | ||
175 | : /* no outputs */ | ||
176 | : "D"(ptr), | ||
177 | "b"(low), | ||
178 | "c"(high) | ||
179 | : "ax","dx","memory"); | ||
180 | } | ||
181 | |||
182 | static inline void __set_64bit_constant (unsigned long long *ptr, | ||
183 | unsigned long long value) | ||
184 | { | ||
185 | __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL)); | ||
186 | } | ||
187 | #define ll_low(x) *(((unsigned int*)&(x))+0) | ||
188 | #define ll_high(x) *(((unsigned int*)&(x))+1) | ||
189 | |||
190 | static inline void __set_64bit_var (unsigned long long *ptr, | ||
191 | unsigned long long value) | ||
192 | { | ||
193 | __set_64bit(ptr,ll_low(value), ll_high(value)); | ||
194 | } | ||
195 | |||
196 | #define set_64bit(ptr,value) \ | ||
197 | (__builtin_constant_p(value) ? \ | ||
198 | __set_64bit_constant(ptr, value) : \ | ||
199 | __set_64bit_var(ptr, value) ) | ||
200 | |||
201 | #define _set_64bit(ptr,value) \ | ||
202 | (__builtin_constant_p(value) ? \ | ||
203 | __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ | ||
204 | __set_64bit(ptr, ll_low(value), ll_high(value)) ) | ||
205 | |||
206 | /* | ||
207 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | ||
208 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | ||
209 | * but generally the primitive is invalid, *ptr is output argument. --ANK | ||
210 | */ | ||
211 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | ||
212 | { | ||
213 | switch (size) { | ||
214 | case 1: | ||
215 | __asm__ __volatile__("xchgb %b0,%1" | ||
216 | :"=q" (x) | ||
217 | :"m" (*__xg(ptr)), "0" (x) | ||
218 | :"memory"); | ||
219 | break; | ||
220 | case 2: | ||
221 | __asm__ __volatile__("xchgw %w0,%1" | ||
222 | :"=r" (x) | ||
223 | :"m" (*__xg(ptr)), "0" (x) | ||
224 | :"memory"); | ||
225 | break; | ||
226 | case 4: | ||
227 | __asm__ __volatile__("xchgl %0,%1" | ||
228 | :"=r" (x) | ||
229 | :"m" (*__xg(ptr)), "0" (x) | ||
230 | :"memory"); | ||
231 | break; | ||
232 | } | ||
233 | return x; | ||
234 | } | ||
235 | |||
236 | /* | ||
237 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | ||
238 | * store NEW in MEM. Return the initial value in MEM. Success is | ||
239 | * indicated by comparing RETURN with OLD. | ||
240 | */ | ||
241 | |||
242 | #ifdef CONFIG_X86_CMPXCHG | ||
243 | #define __HAVE_ARCH_CMPXCHG 1 | ||
244 | #endif | ||
245 | |||
246 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | ||
247 | unsigned long new, int size) | ||
248 | { | ||
249 | unsigned long prev; | ||
250 | switch (size) { | ||
251 | case 1: | ||
252 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" | ||
253 | : "=a"(prev) | ||
254 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
255 | : "memory"); | ||
256 | return prev; | ||
257 | case 2: | ||
258 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" | ||
259 | : "=a"(prev) | ||
260 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
261 | : "memory"); | ||
262 | return prev; | ||
263 | case 4: | ||
264 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" | ||
265 | : "=a"(prev) | ||
266 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | ||
267 | : "memory"); | ||
268 | return prev; | ||
269 | } | ||
270 | return old; | ||
271 | } | ||
272 | |||
273 | #define cmpxchg(ptr,o,n)\ | ||
274 | ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ | ||
275 | (unsigned long)(n),sizeof(*(ptr)))) | ||
276 | |||
277 | #ifdef __KERNEL__ | ||
278 | struct alt_instr { | ||
279 | __u8 *instr; /* original instruction */ | ||
280 | __u8 *replacement; | ||
281 | __u8 cpuid; /* cpuid bit set for replacement */ | ||
282 | __u8 instrlen; /* length of original instruction */ | ||
283 | __u8 replacementlen; /* length of new instruction, <= instrlen */ | ||
284 | __u8 pad; | ||
285 | }; | ||
286 | #endif | ||
287 | |||
288 | /* | ||
289 | * Alternative instructions for different CPU types or capabilities. | ||
290 | * | ||
291 | * This allows to use optimized instructions even on generic binary | ||
292 | * kernels. | ||
293 | * | ||
294 | * length of oldinstr must be longer or equal the length of newinstr | ||
295 | * It can be padded with nops as needed. | ||
296 | * | ||
297 | * For non barrier like inlines please define new variants | ||
298 | * without volatile and memory clobber. | ||
299 | */ | ||
300 | #define alternative(oldinstr, newinstr, feature) \ | ||
301 | asm volatile ("661:\n\t" oldinstr "\n662:\n" \ | ||
302 | ".section .altinstructions,\"a\"\n" \ | ||
303 | " .align 4\n" \ | ||
304 | " .long 661b\n" /* label */ \ | ||
305 | " .long 663f\n" /* new instruction */ \ | ||
306 | " .byte %c0\n" /* feature bit */ \ | ||
307 | " .byte 662b-661b\n" /* sourcelen */ \ | ||
308 | " .byte 664f-663f\n" /* replacementlen */ \ | ||
309 | ".previous\n" \ | ||
310 | ".section .altinstr_replacement,\"ax\"\n" \ | ||
311 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ | ||
312 | ".previous" :: "i" (feature) : "memory") | ||
313 | |||
314 | /* | ||
315 | * Alternative inline assembly with input. | ||
316 | * | ||
317 | * Pecularities: | ||
318 | * No memory clobber here. | ||
319 | * Argument numbers start with 1. | ||
320 | * Best is to use constraints that are fixed size (like (%1) ... "r") | ||
321 | * If you use variable sized constraints like "m" or "g" in the | ||
322 | * replacement maake sure to pad to the worst case length. | ||
323 | */ | ||
324 | #define alternative_input(oldinstr, newinstr, feature, input...) \ | ||
325 | asm volatile ("661:\n\t" oldinstr "\n662:\n" \ | ||
326 | ".section .altinstructions,\"a\"\n" \ | ||
327 | " .align 4\n" \ | ||
328 | " .long 661b\n" /* label */ \ | ||
329 | " .long 663f\n" /* new instruction */ \ | ||
330 | " .byte %c0\n" /* feature bit */ \ | ||
331 | " .byte 662b-661b\n" /* sourcelen */ \ | ||
332 | " .byte 664f-663f\n" /* replacementlen */ \ | ||
333 | ".previous\n" \ | ||
334 | ".section .altinstr_replacement,\"ax\"\n" \ | ||
335 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ | ||
336 | ".previous" :: "i" (feature), ##input) | ||
337 | |||
338 | /* | ||
339 | * Force strict CPU ordering. | ||
340 | * And yes, this is required on UP too when we're talking | ||
341 | * to devices. | ||
342 | * | ||
343 | * For now, "wmb()" doesn't actually do anything, as all | ||
344 | * Intel CPU's follow what Intel calls a *Processor Order*, | ||
345 | * in which all writes are seen in the program order even | ||
346 | * outside the CPU. | ||
347 | * | ||
348 | * I expect future Intel CPU's to have a weaker ordering, | ||
349 | * but I'd also expect them to finally get their act together | ||
350 | * and add some real memory barriers if so. | ||
351 | * | ||
352 | * Some non intel clones support out of order store. wmb() ceases to be a | ||
353 | * nop for these. | ||
354 | */ | ||
355 | |||
356 | |||
357 | /* | ||
358 | * Actually only lfence would be needed for mb() because all stores done | ||
359 | * by the kernel should be already ordered. But keep a full barrier for now. | ||
360 | */ | ||
361 | |||
362 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | ||
363 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | ||
364 | |||
365 | /** | ||
366 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
367 | * depend on. | ||
368 | * | ||
369 | * No data-dependent reads from memory-like regions are ever reordered | ||
370 | * over this barrier. All reads preceding this primitive are guaranteed | ||
371 | * to access memory (but not necessarily other CPUs' caches) before any | ||
372 | * reads following this primitive that depend on the data return by | ||
373 | * any of the preceding reads. This primitive is much lighter weight than | ||
374 | * rmb() on most CPUs, and is never heavier weight than is | ||
375 | * rmb(). | ||
376 | * | ||
377 | * These ordering constraints are respected by both the local CPU | ||
378 | * and the compiler. | ||
379 | * | ||
380 | * Ordering is not guaranteed by anything other than these primitives, | ||
381 | * not even by data dependencies. See the documentation for | ||
382 | * memory_barrier() for examples and URLs to more information. | ||
383 | * | ||
384 | * For example, the following code would force ordering (the initial | ||
385 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
386 | * | ||
387 | * <programlisting> | ||
388 | * CPU 0 CPU 1 | ||
389 | * | ||
390 | * b = 2; | ||
391 | * memory_barrier(); | ||
392 | * p = &b; q = p; | ||
393 | * read_barrier_depends(); | ||
394 | * d = *q; | ||
395 | * </programlisting> | ||
396 | * | ||
397 | * because the read of "*q" depends on the read of "p" and these | ||
398 | * two reads are separated by a read_barrier_depends(). However, | ||
399 | * the following code, with the same initial values for "a" and "b": | ||
400 | * | ||
401 | * <programlisting> | ||
402 | * CPU 0 CPU 1 | ||
403 | * | ||
404 | * a = 2; | ||
405 | * memory_barrier(); | ||
406 | * b = 3; y = b; | ||
407 | * read_barrier_depends(); | ||
408 | * x = a; | ||
409 | * </programlisting> | ||
410 | * | ||
411 | * does not enforce ordering, since there is no data dependency between | ||
412 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
413 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
414 | * in cases like thiswhere there are no data dependencies. | ||
415 | **/ | ||
416 | |||
417 | #define read_barrier_depends() do { } while(0) | ||
418 | |||
419 | #ifdef CONFIG_X86_OOSTORE | ||
420 | /* Actually there are no OOO store capable CPUs for now that do SSE, | ||
421 | but make it already an possibility. */ | ||
422 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | ||
423 | #else | ||
424 | #define wmb() __asm__ __volatile__ ("": : :"memory") | ||
425 | #endif | ||
426 | |||
427 | #ifdef CONFIG_SMP | ||
428 | #define smp_mb() mb() | ||
429 | #define smp_rmb() rmb() | ||
430 | #define smp_wmb() wmb() | ||
431 | #define smp_read_barrier_depends() read_barrier_depends() | ||
432 | #define set_mb(var, value) do { xchg(&var, value); } while (0) | ||
433 | #else | ||
434 | #define smp_mb() barrier() | ||
435 | #define smp_rmb() barrier() | ||
436 | #define smp_wmb() barrier() | ||
437 | #define smp_read_barrier_depends() do { } while(0) | ||
438 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | ||
439 | #endif | ||
440 | |||
441 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | ||
442 | |||
443 | /* interrupt control.. */ | ||
444 | #define local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0) | ||
445 | #define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0) | ||
446 | #define local_irq_disable() __asm__ __volatile__("cli": : :"memory") | ||
447 | #define local_irq_enable() __asm__ __volatile__("sti": : :"memory") | ||
448 | /* used in the idle loop; sti takes one instruction cycle to complete */ | ||
449 | #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") | ||
450 | |||
451 | #define irqs_disabled() \ | ||
452 | ({ \ | ||
453 | unsigned long flags; \ | ||
454 | local_save_flags(flags); \ | ||
455 | !(flags & (1<<9)); \ | ||
456 | }) | ||
457 | |||
458 | /* For spinlocks etc */ | ||
459 | #define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory") | ||
460 | |||
461 | /* | ||
462 | * disable hlt during certain critical i/o operations | ||
463 | */ | ||
464 | #define HAVE_DISABLE_HLT | ||
465 | void disable_hlt(void); | ||
466 | void enable_hlt(void); | ||
467 | |||
468 | extern int es7000_plat; | ||
469 | void cpu_idle_wait(void); | ||
470 | |||
471 | extern unsigned long arch_align_stack(unsigned long sp); | ||
472 | |||
473 | #endif | ||
diff --git a/include/asm-i386/termbits.h b/include/asm-i386/termbits.h new file mode 100644 index 000000000000..72c10e3190f8 --- /dev/null +++ b/include/asm-i386/termbits.h | |||
@@ -0,0 +1,173 @@ | |||
1 | #ifndef __ARCH_I386_TERMBITS_H__ | ||
2 | #define __ARCH_I386_TERMBITS_H__ | ||
3 | |||
4 | #include <linux/posix_types.h> | ||
5 | |||
6 | typedef unsigned char cc_t; | ||
7 | typedef unsigned int speed_t; | ||
8 | typedef unsigned int tcflag_t; | ||
9 | |||
10 | #define NCCS 19 | ||
11 | struct termios { | ||
12 | tcflag_t c_iflag; /* input mode flags */ | ||
13 | tcflag_t c_oflag; /* output mode flags */ | ||
14 | tcflag_t c_cflag; /* control mode flags */ | ||
15 | tcflag_t c_lflag; /* local mode flags */ | ||
16 | cc_t c_line; /* line discipline */ | ||
17 | cc_t c_cc[NCCS]; /* control characters */ | ||
18 | }; | ||
19 | |||
20 | /* c_cc characters */ | ||
21 | #define VINTR 0 | ||
22 | #define VQUIT 1 | ||
23 | #define VERASE 2 | ||
24 | #define VKILL 3 | ||
25 | #define VEOF 4 | ||
26 | #define VTIME 5 | ||
27 | #define VMIN 6 | ||
28 | #define VSWTC 7 | ||
29 | #define VSTART 8 | ||
30 | #define VSTOP 9 | ||
31 | #define VSUSP 10 | ||
32 | #define VEOL 11 | ||
33 | #define VREPRINT 12 | ||
34 | #define VDISCARD 13 | ||
35 | #define VWERASE 14 | ||
36 | #define VLNEXT 15 | ||
37 | #define VEOL2 16 | ||
38 | |||
39 | /* c_iflag bits */ | ||
40 | #define IGNBRK 0000001 | ||
41 | #define BRKINT 0000002 | ||
42 | #define IGNPAR 0000004 | ||
43 | #define PARMRK 0000010 | ||
44 | #define INPCK 0000020 | ||
45 | #define ISTRIP 0000040 | ||
46 | #define INLCR 0000100 | ||
47 | #define IGNCR 0000200 | ||
48 | #define ICRNL 0000400 | ||
49 | #define IUCLC 0001000 | ||
50 | #define IXON 0002000 | ||
51 | #define IXANY 0004000 | ||
52 | #define IXOFF 0010000 | ||
53 | #define IMAXBEL 0020000 | ||
54 | #define IUTF8 0040000 | ||
55 | |||
56 | /* c_oflag bits */ | ||
57 | #define OPOST 0000001 | ||
58 | #define OLCUC 0000002 | ||
59 | #define ONLCR 0000004 | ||
60 | #define OCRNL 0000010 | ||
61 | #define ONOCR 0000020 | ||
62 | #define ONLRET 0000040 | ||
63 | #define OFILL 0000100 | ||
64 | #define OFDEL 0000200 | ||
65 | #define NLDLY 0000400 | ||
66 | #define NL0 0000000 | ||
67 | #define NL1 0000400 | ||
68 | #define CRDLY 0003000 | ||
69 | #define CR0 0000000 | ||
70 | #define CR1 0001000 | ||
71 | #define CR2 0002000 | ||
72 | #define CR3 0003000 | ||
73 | #define TABDLY 0014000 | ||
74 | #define TAB0 0000000 | ||
75 | #define TAB1 0004000 | ||
76 | #define TAB2 0010000 | ||
77 | #define TAB3 0014000 | ||
78 | #define XTABS 0014000 | ||
79 | #define BSDLY 0020000 | ||
80 | #define BS0 0000000 | ||
81 | #define BS1 0020000 | ||
82 | #define VTDLY 0040000 | ||
83 | #define VT0 0000000 | ||
84 | #define VT1 0040000 | ||
85 | #define FFDLY 0100000 | ||
86 | #define FF0 0000000 | ||
87 | #define FF1 0100000 | ||
88 | |||
89 | /* c_cflag bit meaning */ | ||
90 | #define CBAUD 0010017 | ||
91 | #define B0 0000000 /* hang up */ | ||
92 | #define B50 0000001 | ||
93 | #define B75 0000002 | ||
94 | #define B110 0000003 | ||
95 | #define B134 0000004 | ||
96 | #define B150 0000005 | ||
97 | #define B200 0000006 | ||
98 | #define B300 0000007 | ||
99 | #define B600 0000010 | ||
100 | #define B1200 0000011 | ||
101 | #define B1800 0000012 | ||
102 | #define B2400 0000013 | ||
103 | #define B4800 0000014 | ||
104 | #define B9600 0000015 | ||
105 | #define B19200 0000016 | ||
106 | #define B38400 0000017 | ||
107 | #define EXTA B19200 | ||
108 | #define EXTB B38400 | ||
109 | #define CSIZE 0000060 | ||
110 | #define CS5 0000000 | ||
111 | #define CS6 0000020 | ||
112 | #define CS7 0000040 | ||
113 | #define CS8 0000060 | ||
114 | #define CSTOPB 0000100 | ||
115 | #define CREAD 0000200 | ||
116 | #define PARENB 0000400 | ||
117 | #define PARODD 0001000 | ||
118 | #define HUPCL 0002000 | ||
119 | #define CLOCAL 0004000 | ||
120 | #define CBAUDEX 0010000 | ||
121 | #define B57600 0010001 | ||
122 | #define B115200 0010002 | ||
123 | #define B230400 0010003 | ||
124 | #define B460800 0010004 | ||
125 | #define B500000 0010005 | ||
126 | #define B576000 0010006 | ||
127 | #define B921600 0010007 | ||
128 | #define B1000000 0010010 | ||
129 | #define B1152000 0010011 | ||
130 | #define B1500000 0010012 | ||
131 | #define B2000000 0010013 | ||
132 | #define B2500000 0010014 | ||
133 | #define B3000000 0010015 | ||
134 | #define B3500000 0010016 | ||
135 | #define B4000000 0010017 | ||
136 | #define CIBAUD 002003600000 /* input baud rate (not used) */ | ||
137 | #define CMSPAR 010000000000 /* mark or space (stick) parity */ | ||
138 | #define CRTSCTS 020000000000 /* flow control */ | ||
139 | |||
140 | /* c_lflag bits */ | ||
141 | #define ISIG 0000001 | ||
142 | #define ICANON 0000002 | ||
143 | #define XCASE 0000004 | ||
144 | #define ECHO 0000010 | ||
145 | #define ECHOE 0000020 | ||
146 | #define ECHOK 0000040 | ||
147 | #define ECHONL 0000100 | ||
148 | #define NOFLSH 0000200 | ||
149 | #define TOSTOP 0000400 | ||
150 | #define ECHOCTL 0001000 | ||
151 | #define ECHOPRT 0002000 | ||
152 | #define ECHOKE 0004000 | ||
153 | #define FLUSHO 0010000 | ||
154 | #define PENDIN 0040000 | ||
155 | #define IEXTEN 0100000 | ||
156 | |||
157 | /* tcflow() and TCXONC use these */ | ||
158 | #define TCOOFF 0 | ||
159 | #define TCOON 1 | ||
160 | #define TCIOFF 2 | ||
161 | #define TCION 3 | ||
162 | |||
163 | /* tcflush() and TCFLSH use these */ | ||
164 | #define TCIFLUSH 0 | ||
165 | #define TCOFLUSH 1 | ||
166 | #define TCIOFLUSH 2 | ||
167 | |||
168 | /* tcsetattr uses these */ | ||
169 | #define TCSANOW 0 | ||
170 | #define TCSADRAIN 1 | ||
171 | #define TCSAFLUSH 2 | ||
172 | |||
173 | #endif | ||
diff --git a/include/asm-i386/termios.h b/include/asm-i386/termios.h new file mode 100644 index 000000000000..03f548536d6b --- /dev/null +++ b/include/asm-i386/termios.h | |||
@@ -0,0 +1,107 @@ | |||
1 | #ifndef _I386_TERMIOS_H | ||
2 | #define _I386_TERMIOS_H | ||
3 | |||
4 | #include <asm/termbits.h> | ||
5 | #include <asm/ioctls.h> | ||
6 | |||
7 | struct winsize { | ||
8 | unsigned short ws_row; | ||
9 | unsigned short ws_col; | ||
10 | unsigned short ws_xpixel; | ||
11 | unsigned short ws_ypixel; | ||
12 | }; | ||
13 | |||
14 | #define NCC 8 | ||
15 | struct termio { | ||
16 | unsigned short c_iflag; /* input mode flags */ | ||
17 | unsigned short c_oflag; /* output mode flags */ | ||
18 | unsigned short c_cflag; /* control mode flags */ | ||
19 | unsigned short c_lflag; /* local mode flags */ | ||
20 | unsigned char c_line; /* line discipline */ | ||
21 | unsigned char c_cc[NCC]; /* control characters */ | ||
22 | }; | ||
23 | |||
24 | /* modem lines */ | ||
25 | #define TIOCM_LE 0x001 | ||
26 | #define TIOCM_DTR 0x002 | ||
27 | #define TIOCM_RTS 0x004 | ||
28 | #define TIOCM_ST 0x008 | ||
29 | #define TIOCM_SR 0x010 | ||
30 | #define TIOCM_CTS 0x020 | ||
31 | #define TIOCM_CAR 0x040 | ||
32 | #define TIOCM_RNG 0x080 | ||
33 | #define TIOCM_DSR 0x100 | ||
34 | #define TIOCM_CD TIOCM_CAR | ||
35 | #define TIOCM_RI TIOCM_RNG | ||
36 | #define TIOCM_OUT1 0x2000 | ||
37 | #define TIOCM_OUT2 0x4000 | ||
38 | #define TIOCM_LOOP 0x8000 | ||
39 | |||
40 | /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ | ||
41 | |||
42 | /* line disciplines */ | ||
43 | #define N_TTY 0 | ||
44 | #define N_SLIP 1 | ||
45 | #define N_MOUSE 2 | ||
46 | #define N_PPP 3 | ||
47 | #define N_STRIP 4 | ||
48 | #define N_AX25 5 | ||
49 | #define N_X25 6 /* X.25 async */ | ||
50 | #define N_6PACK 7 | ||
51 | #define N_MASC 8 /* Reserved for Mobitex module <kaz@cafe.net> */ | ||
52 | #define N_R3964 9 /* Reserved for Simatic R3964 module */ | ||
53 | #define N_PROFIBUS_FDL 10 /* Reserved for Profibus <Dave@mvhi.com> */ | ||
54 | #define N_IRDA 11 /* Linux IR - http://irda.sourceforge.net/ */ | ||
55 | #define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */ | ||
56 | #define N_HDLC 13 /* synchronous HDLC */ | ||
57 | #define N_SYNC_PPP 14 /* synchronous PPP */ | ||
58 | #define N_HCI 15 /* Bluetooth HCI UART */ | ||
59 | |||
60 | #ifdef __KERNEL__ | ||
61 | #include <linux/module.h> | ||
62 | |||
63 | /* intr=^C quit=^\ erase=del kill=^U | ||
64 | eof=^D vtime=\0 vmin=\1 sxtc=\0 | ||
65 | start=^Q stop=^S susp=^Z eol=\0 | ||
66 | reprint=^R discard=^U werase=^W lnext=^V | ||
67 | eol2=\0 | ||
68 | */ | ||
69 | #define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" | ||
70 | |||
71 | /* | ||
72 | * Translate a "termio" structure into a "termios". Ugh. | ||
73 | */ | ||
74 | #define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ | ||
75 | unsigned short __tmp; \ | ||
76 | get_user(__tmp,&(termio)->x); \ | ||
77 | *(unsigned short *) &(termios)->x = __tmp; \ | ||
78 | } | ||
79 | |||
80 | #define user_termio_to_kernel_termios(termios, termio) \ | ||
81 | ({ \ | ||
82 | SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \ | ||
83 | SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \ | ||
84 | SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \ | ||
85 | SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \ | ||
86 | copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ | ||
87 | }) | ||
88 | |||
89 | /* | ||
90 | * Translate a "termios" structure into a "termio". Ugh. | ||
91 | */ | ||
92 | #define kernel_termios_to_user_termio(termio, termios) \ | ||
93 | ({ \ | ||
94 | put_user((termios)->c_iflag, &(termio)->c_iflag); \ | ||
95 | put_user((termios)->c_oflag, &(termio)->c_oflag); \ | ||
96 | put_user((termios)->c_cflag, &(termio)->c_cflag); \ | ||
97 | put_user((termios)->c_lflag, &(termio)->c_lflag); \ | ||
98 | put_user((termios)->c_line, &(termio)->c_line); \ | ||
99 | copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ | ||
100 | }) | ||
101 | |||
102 | #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) | ||
103 | #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) | ||
104 | |||
105 | #endif /* __KERNEL__ */ | ||
106 | |||
107 | #endif /* _I386_TERMIOS_H */ | ||
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h new file mode 100644 index 000000000000..2cd57271801d --- /dev/null +++ b/include/asm-i386/thread_info.h | |||
@@ -0,0 +1,174 @@ | |||
1 | /* thread_info.h: i386 low-level thread information | ||
2 | * | ||
3 | * Copyright (C) 2002 David Howells (dhowells@redhat.com) | ||
4 | * - Incorporating suggestions made by Linus Torvalds and Dave Miller | ||
5 | */ | ||
6 | |||
7 | #ifndef _ASM_THREAD_INFO_H | ||
8 | #define _ASM_THREAD_INFO_H | ||
9 | |||
10 | #ifdef __KERNEL__ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <linux/compiler.h> | ||
14 | #include <asm/page.h> | ||
15 | |||
16 | #ifndef __ASSEMBLY__ | ||
17 | #include <asm/processor.h> | ||
18 | #endif | ||
19 | |||
20 | /* | ||
21 | * low level task data that entry.S needs immediate access to | ||
22 | * - this struct should fit entirely inside of one cache line | ||
23 | * - this struct shares the supervisor stack pages | ||
24 | * - if the contents of this structure are changed, the assembly constants must also be changed | ||
25 | */ | ||
26 | #ifndef __ASSEMBLY__ | ||
27 | |||
28 | struct thread_info { | ||
29 | struct task_struct *task; /* main task structure */ | ||
30 | struct exec_domain *exec_domain; /* execution domain */ | ||
31 | unsigned long flags; /* low level flags */ | ||
32 | unsigned long status; /* thread-synchronous flags */ | ||
33 | __u32 cpu; /* current CPU */ | ||
34 | __s32 preempt_count; /* 0 => preemptable, <0 => BUG */ | ||
35 | |||
36 | |||
37 | mm_segment_t addr_limit; /* thread address space: | ||
38 | 0-0xBFFFFFFF for user-thead | ||
39 | 0-0xFFFFFFFF for kernel-thread | ||
40 | */ | ||
41 | struct restart_block restart_block; | ||
42 | |||
43 | unsigned long previous_esp; /* ESP of the previous stack in case | ||
44 | of nested (IRQ) stacks | ||
45 | */ | ||
46 | __u8 supervisor_stack[0]; | ||
47 | }; | ||
48 | |||
49 | #else /* !__ASSEMBLY__ */ | ||
50 | |||
51 | #include <asm/asm_offsets.h> | ||
52 | |||
53 | #endif | ||
54 | |||
55 | #define PREEMPT_ACTIVE 0x10000000 | ||
56 | #ifdef CONFIG_4KSTACKS | ||
57 | #define THREAD_SIZE (4096) | ||
58 | #else | ||
59 | #define THREAD_SIZE (8192) | ||
60 | #endif | ||
61 | |||
62 | #define STACK_WARN (THREAD_SIZE/8) | ||
63 | /* | ||
64 | * macros/functions for gaining access to the thread information structure | ||
65 | * | ||
66 | * preempt_count needs to be 1 initially, until the scheduler is functional. | ||
67 | */ | ||
68 | #ifndef __ASSEMBLY__ | ||
69 | |||
70 | #define INIT_THREAD_INFO(tsk) \ | ||
71 | { \ | ||
72 | .task = &tsk, \ | ||
73 | .exec_domain = &default_exec_domain, \ | ||
74 | .flags = 0, \ | ||
75 | .cpu = 0, \ | ||
76 | .preempt_count = 1, \ | ||
77 | .addr_limit = KERNEL_DS, \ | ||
78 | .restart_block = { \ | ||
79 | .fn = do_no_restart_syscall, \ | ||
80 | }, \ | ||
81 | } | ||
82 | |||
83 | #define init_thread_info (init_thread_union.thread_info) | ||
84 | #define init_stack (init_thread_union.stack) | ||
85 | |||
86 | |||
87 | /* how to get the thread information struct from C */ | ||
88 | static inline struct thread_info *current_thread_info(void) | ||
89 | { | ||
90 | struct thread_info *ti; | ||
91 | __asm__("andl %%esp,%0; ":"=r" (ti) : "0" (~(THREAD_SIZE - 1))); | ||
92 | return ti; | ||
93 | } | ||
94 | |||
95 | /* how to get the current stack pointer from C */ | ||
96 | register unsigned long current_stack_pointer asm("esp") __attribute_used__; | ||
97 | |||
98 | /* thread information allocation */ | ||
99 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
100 | #define alloc_thread_info(tsk) \ | ||
101 | ({ \ | ||
102 | struct thread_info *ret; \ | ||
103 | \ | ||
104 | ret = kmalloc(THREAD_SIZE, GFP_KERNEL); \ | ||
105 | if (ret) \ | ||
106 | memset(ret, 0, THREAD_SIZE); \ | ||
107 | ret; \ | ||
108 | }) | ||
109 | #else | ||
110 | #define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) | ||
111 | #endif | ||
112 | |||
113 | #define free_thread_info(info) kfree(info) | ||
114 | #define get_thread_info(ti) get_task_struct((ti)->task) | ||
115 | #define put_thread_info(ti) put_task_struct((ti)->task) | ||
116 | |||
117 | #else /* !__ASSEMBLY__ */ | ||
118 | |||
119 | /* how to get the thread information struct from ASM */ | ||
120 | #define GET_THREAD_INFO(reg) \ | ||
121 | movl $-THREAD_SIZE, reg; \ | ||
122 | andl %esp, reg | ||
123 | |||
124 | /* use this one if reg already contains %esp */ | ||
125 | #define GET_THREAD_INFO_WITH_ESP(reg) \ | ||
126 | andl $-THREAD_SIZE, reg | ||
127 | |||
128 | #endif | ||
129 | |||
130 | /* | ||
131 | * thread information flags | ||
132 | * - these are process state flags that various assembly files may need to access | ||
133 | * - pending work-to-be-done flags are in LSW | ||
134 | * - other flags in MSW | ||
135 | */ | ||
136 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ | ||
137 | #define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ | ||
138 | #define TIF_SIGPENDING 2 /* signal pending */ | ||
139 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ | ||
140 | #define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */ | ||
141 | #define TIF_IRET 5 /* return with iret */ | ||
142 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ | ||
143 | #define TIF_SECCOMP 8 /* secure computing */ | ||
144 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | ||
145 | #define TIF_MEMDIE 17 | ||
146 | |||
147 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | ||
148 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | ||
149 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | ||
150 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | ||
151 | #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) | ||
152 | #define _TIF_IRET (1<<TIF_IRET) | ||
153 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | ||
154 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | ||
155 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | ||
156 | |||
157 | /* work to do on interrupt/exception return */ | ||
158 | #define _TIF_WORK_MASK \ | ||
159 | (0x0000FFFF & ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP|_TIF_SECCOMP)) | ||
160 | /* work to do on any return to u-space */ | ||
161 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) | ||
162 | |||
163 | /* | ||
164 | * Thread-synchronous status. | ||
165 | * | ||
166 | * This is different from the flags in that nobody else | ||
167 | * ever touches our thread-synchronous status, so we don't | ||
168 | * have to worry about atomic accesses. | ||
169 | */ | ||
170 | #define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ | ||
171 | |||
172 | #endif /* __KERNEL__ */ | ||
173 | |||
174 | #endif /* _ASM_THREAD_INFO_H */ | ||
diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h new file mode 100644 index 000000000000..40c54f69780e --- /dev/null +++ b/include/asm-i386/timer.h | |||
@@ -0,0 +1,64 @@ | |||
1 | #ifndef _ASMi386_TIMER_H | ||
2 | #define _ASMi386_TIMER_H | ||
3 | #include <linux/init.h> | ||
4 | |||
5 | /** | ||
6 | * struct timer_ops - used to define a timer source | ||
7 | * | ||
8 | * @name: name of the timer. | ||
9 | * @init: Probes and initializes the timer. Takes clock= override | ||
10 | * string as an argument. Returns 0 on success, anything else | ||
11 | * on failure. | ||
12 | * @mark_offset: called by the timer interrupt. | ||
13 | * @get_offset: called by gettimeofday(). Returns the number of microseconds | ||
14 | * since the last timer interupt. | ||
15 | * @monotonic_clock: returns the number of nanoseconds since the init of the | ||
16 | * timer. | ||
17 | * @delay: delays this many clock cycles. | ||
18 | */ | ||
19 | struct timer_opts { | ||
20 | char* name; | ||
21 | void (*mark_offset)(void); | ||
22 | unsigned long (*get_offset)(void); | ||
23 | unsigned long long (*monotonic_clock)(void); | ||
24 | void (*delay)(unsigned long); | ||
25 | }; | ||
26 | |||
27 | struct init_timer_opts { | ||
28 | int (*init)(char *override); | ||
29 | struct timer_opts *opts; | ||
30 | }; | ||
31 | |||
32 | #define TICK_SIZE (tick_nsec / 1000) | ||
33 | |||
34 | extern struct timer_opts* __init select_timer(void); | ||
35 | extern void clock_fallback(void); | ||
36 | void setup_pit_timer(void); | ||
37 | |||
38 | /* Modifiers for buggy PIT handling */ | ||
39 | |||
40 | extern int pit_latch_buggy; | ||
41 | |||
42 | extern struct timer_opts *cur_timer; | ||
43 | extern int timer_ack; | ||
44 | |||
45 | /* list of externed timers */ | ||
46 | extern struct timer_opts timer_none; | ||
47 | extern struct timer_opts timer_pit; | ||
48 | extern struct init_timer_opts timer_pit_init; | ||
49 | extern struct init_timer_opts timer_tsc_init; | ||
50 | #ifdef CONFIG_X86_CYCLONE_TIMER | ||
51 | extern struct init_timer_opts timer_cyclone_init; | ||
52 | #endif | ||
53 | |||
54 | extern unsigned long calibrate_tsc(void); | ||
55 | extern void init_cpu_khz(void); | ||
56 | #ifdef CONFIG_HPET_TIMER | ||
57 | extern struct init_timer_opts timer_hpet_init; | ||
58 | extern unsigned long calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr); | ||
59 | #endif | ||
60 | |||
61 | #ifdef CONFIG_X86_PM_TIMER | ||
62 | extern struct init_timer_opts timer_pmtmr_init; | ||
63 | #endif | ||
64 | #endif | ||
diff --git a/include/asm-i386/timex.h b/include/asm-i386/timex.h new file mode 100644 index 000000000000..b41e484c3445 --- /dev/null +++ b/include/asm-i386/timex.h | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * linux/include/asm-i386/timex.h | ||
3 | * | ||
4 | * i386 architecture timex specifications | ||
5 | */ | ||
6 | #ifndef _ASMi386_TIMEX_H | ||
7 | #define _ASMi386_TIMEX_H | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <asm/processor.h> | ||
11 | |||
12 | #ifdef CONFIG_X86_ELAN | ||
13 | # define CLOCK_TICK_RATE 1189200 /* AMD Elan has different frequency! */ | ||
14 | #else | ||
15 | # define CLOCK_TICK_RATE 1193182 /* Underlying HZ */ | ||
16 | #endif | ||
17 | |||
18 | |||
19 | /* | ||
20 | * Standard way to access the cycle counter on i586+ CPUs. | ||
21 | * Currently only used on SMP. | ||
22 | * | ||
23 | * If you really have a SMP machine with i486 chips or older, | ||
24 | * compile for that, and this will just always return zero. | ||
25 | * That's ok, it just means that the nicer scheduling heuristics | ||
26 | * won't work for you. | ||
27 | * | ||
28 | * We only use the low 32 bits, and we'd simply better make sure | ||
29 | * that we reschedule before that wraps. Scheduling at least every | ||
30 | * four billion cycles just basically sounds like a good idea, | ||
31 | * regardless of how fast the machine is. | ||
32 | */ | ||
33 | typedef unsigned long long cycles_t; | ||
34 | |||
35 | static inline cycles_t get_cycles (void) | ||
36 | { | ||
37 | unsigned long long ret=0; | ||
38 | |||
39 | #ifndef CONFIG_X86_TSC | ||
40 | if (!cpu_has_tsc) | ||
41 | return 0; | ||
42 | #endif | ||
43 | |||
44 | #if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC) | ||
45 | rdtscll(ret); | ||
46 | #endif | ||
47 | return ret; | ||
48 | } | ||
49 | |||
50 | extern unsigned long cpu_khz; | ||
51 | |||
52 | #endif | ||
diff --git a/include/asm-i386/tlb.h b/include/asm-i386/tlb.h new file mode 100644 index 000000000000..c006c5c92bea --- /dev/null +++ b/include/asm-i386/tlb.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef _I386_TLB_H | ||
2 | #define _I386_TLB_H | ||
3 | |||
4 | /* | ||
5 | * x86 doesn't need any special per-pte or | ||
6 | * per-vma handling.. | ||
7 | */ | ||
8 | #define tlb_start_vma(tlb, vma) do { } while (0) | ||
9 | #define tlb_end_vma(tlb, vma) do { } while (0) | ||
10 | #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) | ||
11 | |||
12 | /* | ||
13 | * .. because we flush the whole mm when it | ||
14 | * fills up. | ||
15 | */ | ||
16 | #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) | ||
17 | |||
18 | #include <asm-generic/tlb.h> | ||
19 | |||
20 | #endif | ||
diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h new file mode 100644 index 000000000000..f22fab0cea26 --- /dev/null +++ b/include/asm-i386/tlbflush.h | |||
@@ -0,0 +1,147 @@ | |||
1 | #ifndef _I386_TLBFLUSH_H | ||
2 | #define _I386_TLBFLUSH_H | ||
3 | |||
4 | #include <linux/config.h> | ||
5 | #include <linux/mm.h> | ||
6 | #include <asm/processor.h> | ||
7 | |||
8 | #define __flush_tlb() \ | ||
9 | do { \ | ||
10 | unsigned int tmpreg; \ | ||
11 | \ | ||
12 | __asm__ __volatile__( \ | ||
13 | "movl %%cr3, %0; \n" \ | ||
14 | "movl %0, %%cr3; # flush TLB \n" \ | ||
15 | : "=r" (tmpreg) \ | ||
16 | :: "memory"); \ | ||
17 | } while (0) | ||
18 | |||
19 | /* | ||
20 | * Global pages have to be flushed a bit differently. Not a real | ||
21 | * performance problem because this does not happen often. | ||
22 | */ | ||
23 | #define __flush_tlb_global() \ | ||
24 | do { \ | ||
25 | unsigned int tmpreg; \ | ||
26 | \ | ||
27 | __asm__ __volatile__( \ | ||
28 | "movl %1, %%cr4; # turn off PGE \n" \ | ||
29 | "movl %%cr3, %0; \n" \ | ||
30 | "movl %0, %%cr3; # flush TLB \n" \ | ||
31 | "movl %2, %%cr4; # turn PGE back on \n" \ | ||
32 | : "=&r" (tmpreg) \ | ||
33 | : "r" (mmu_cr4_features & ~X86_CR4_PGE), \ | ||
34 | "r" (mmu_cr4_features) \ | ||
35 | : "memory"); \ | ||
36 | } while (0) | ||
37 | |||
38 | extern unsigned long pgkern_mask; | ||
39 | |||
40 | # define __flush_tlb_all() \ | ||
41 | do { \ | ||
42 | if (cpu_has_pge) \ | ||
43 | __flush_tlb_global(); \ | ||
44 | else \ | ||
45 | __flush_tlb(); \ | ||
46 | } while (0) | ||
47 | |||
48 | #define cpu_has_invlpg (boot_cpu_data.x86 > 3) | ||
49 | |||
50 | #define __flush_tlb_single(addr) \ | ||
51 | __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) | ||
52 | |||
53 | #ifdef CONFIG_X86_INVLPG | ||
54 | # define __flush_tlb_one(addr) __flush_tlb_single(addr) | ||
55 | #else | ||
56 | # define __flush_tlb_one(addr) \ | ||
57 | do { \ | ||
58 | if (cpu_has_invlpg) \ | ||
59 | __flush_tlb_single(addr); \ | ||
60 | else \ | ||
61 | __flush_tlb(); \ | ||
62 | } while (0) | ||
63 | #endif | ||
64 | |||
65 | /* | ||
66 | * TLB flushing: | ||
67 | * | ||
68 | * - flush_tlb() flushes the current mm struct TLBs | ||
69 | * - flush_tlb_all() flushes all processes TLBs | ||
70 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | ||
71 | * - flush_tlb_page(vma, vmaddr) flushes one page | ||
72 | * - flush_tlb_range(vma, start, end) flushes a range of pages | ||
73 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | ||
74 | * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables | ||
75 | * | ||
76 | * ..but the i386 has somewhat limited tlb flushing capabilities, | ||
77 | * and page-granular flushes are available only on i486 and up. | ||
78 | */ | ||
79 | |||
80 | #ifndef CONFIG_SMP | ||
81 | |||
82 | #define flush_tlb() __flush_tlb() | ||
83 | #define flush_tlb_all() __flush_tlb_all() | ||
84 | #define local_flush_tlb() __flush_tlb() | ||
85 | |||
86 | static inline void flush_tlb_mm(struct mm_struct *mm) | ||
87 | { | ||
88 | if (mm == current->active_mm) | ||
89 | __flush_tlb(); | ||
90 | } | ||
91 | |||
92 | static inline void flush_tlb_page(struct vm_area_struct *vma, | ||
93 | unsigned long addr) | ||
94 | { | ||
95 | if (vma->vm_mm == current->active_mm) | ||
96 | __flush_tlb_one(addr); | ||
97 | } | ||
98 | |||
99 | static inline void flush_tlb_range(struct vm_area_struct *vma, | ||
100 | unsigned long start, unsigned long end) | ||
101 | { | ||
102 | if (vma->vm_mm == current->active_mm) | ||
103 | __flush_tlb(); | ||
104 | } | ||
105 | |||
106 | #else | ||
107 | |||
108 | #include <asm/smp.h> | ||
109 | |||
110 | #define local_flush_tlb() \ | ||
111 | __flush_tlb() | ||
112 | |||
113 | extern void flush_tlb_all(void); | ||
114 | extern void flush_tlb_current_task(void); | ||
115 | extern void flush_tlb_mm(struct mm_struct *); | ||
116 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); | ||
117 | |||
118 | #define flush_tlb() flush_tlb_current_task() | ||
119 | |||
120 | static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) | ||
121 | { | ||
122 | flush_tlb_mm(vma->vm_mm); | ||
123 | } | ||
124 | |||
125 | #define TLBSTATE_OK 1 | ||
126 | #define TLBSTATE_LAZY 2 | ||
127 | |||
128 | struct tlb_state | ||
129 | { | ||
130 | struct mm_struct *active_mm; | ||
131 | int state; | ||
132 | char __cacheline_padding[L1_CACHE_BYTES-8]; | ||
133 | }; | ||
134 | DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); | ||
135 | |||
136 | |||
137 | #endif | ||
138 | |||
139 | #define flush_tlb_kernel_range(start, end) flush_tlb_all() | ||
140 | |||
141 | static inline void flush_tlb_pgtables(struct mm_struct *mm, | ||
142 | unsigned long start, unsigned long end) | ||
143 | { | ||
144 | /* i386 does not keep any page table caches in TLB */ | ||
145 | } | ||
146 | |||
147 | #endif /* _I386_TLBFLUSH_H */ | ||
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h new file mode 100644 index 000000000000..98f9e6850cba --- /dev/null +++ b/include/asm-i386/topology.h | |||
@@ -0,0 +1,108 @@ | |||
1 | /* | ||
2 | * linux/include/asm-i386/topology.h | ||
3 | * | ||
4 | * Written by: Matthew Dobson, IBM Corporation | ||
5 | * | ||
6 | * Copyright (C) 2002, IBM Corp. | ||
7 | * | ||
8 | * All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but | ||
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
18 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
19 | * details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
24 | * | ||
25 | * Send feedback to <colpatch@us.ibm.com> | ||
26 | */ | ||
27 | #ifndef _ASM_I386_TOPOLOGY_H | ||
28 | #define _ASM_I386_TOPOLOGY_H | ||
29 | |||
30 | #ifdef CONFIG_NUMA | ||
31 | |||
32 | #include <asm/mpspec.h> | ||
33 | |||
34 | #include <linux/cpumask.h> | ||
35 | |||
36 | /* Mappings between logical cpu number and node number */ | ||
37 | extern cpumask_t node_2_cpu_mask[]; | ||
38 | extern int cpu_2_node[]; | ||
39 | |||
40 | /* Returns the number of the node containing CPU 'cpu' */ | ||
41 | static inline int cpu_to_node(int cpu) | ||
42 | { | ||
43 | return cpu_2_node[cpu]; | ||
44 | } | ||
45 | |||
46 | /* Returns the number of the node containing Node 'node'. This architecture is flat, | ||
47 | so it is a pretty simple function! */ | ||
48 | #define parent_node(node) (node) | ||
49 | |||
50 | /* Returns a bitmask of CPUs on Node 'node'. */ | ||
51 | static inline cpumask_t node_to_cpumask(int node) | ||
52 | { | ||
53 | return node_2_cpu_mask[node]; | ||
54 | } | ||
55 | |||
56 | /* Returns the number of the first CPU on Node 'node'. */ | ||
57 | static inline int node_to_first_cpu(int node) | ||
58 | { | ||
59 | cpumask_t mask = node_to_cpumask(node); | ||
60 | return first_cpu(mask); | ||
61 | } | ||
62 | |||
63 | /* Returns the number of the node containing PCI bus number 'busnr' */ | ||
64 | static inline cpumask_t __pcibus_to_cpumask(int busnr) | ||
65 | { | ||
66 | return node_to_cpumask(mp_bus_id_to_node[busnr]); | ||
67 | } | ||
68 | #define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus->number) | ||
69 | |||
70 | /* sched_domains SD_NODE_INIT for NUMAQ machines */ | ||
71 | #define SD_NODE_INIT (struct sched_domain) { \ | ||
72 | .span = CPU_MASK_NONE, \ | ||
73 | .parent = NULL, \ | ||
74 | .groups = NULL, \ | ||
75 | .min_interval = 8, \ | ||
76 | .max_interval = 32, \ | ||
77 | .busy_factor = 32, \ | ||
78 | .imbalance_pct = 125, \ | ||
79 | .cache_hot_time = (10*1000000), \ | ||
80 | .cache_nice_tries = 1, \ | ||
81 | .per_cpu_gain = 100, \ | ||
82 | .flags = SD_LOAD_BALANCE \ | ||
83 | | SD_BALANCE_EXEC \ | ||
84 | | SD_BALANCE_NEWIDLE \ | ||
85 | | SD_WAKE_IDLE \ | ||
86 | | SD_WAKE_BALANCE, \ | ||
87 | .last_balance = jiffies, \ | ||
88 | .balance_interval = 1, \ | ||
89 | .nr_balance_failed = 0, \ | ||
90 | } | ||
91 | |||
92 | extern unsigned long node_start_pfn[]; | ||
93 | extern unsigned long node_end_pfn[]; | ||
94 | extern unsigned long node_remap_size[]; | ||
95 | |||
96 | #define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid]) | ||
97 | |||
98 | #else /* !CONFIG_NUMA */ | ||
99 | /* | ||
100 | * Other i386 platforms should define their own version of the | ||
101 | * above macros here. | ||
102 | */ | ||
103 | |||
104 | #include <asm-generic/topology.h> | ||
105 | |||
106 | #endif /* CONFIG_NUMA */ | ||
107 | |||
108 | #endif /* _ASM_I386_TOPOLOGY_H */ | ||
diff --git a/include/asm-i386/types.h b/include/asm-i386/types.h new file mode 100644 index 000000000000..901b77c42b8a --- /dev/null +++ b/include/asm-i386/types.h | |||
@@ -0,0 +1,72 @@ | |||
1 | #ifndef _I386_TYPES_H | ||
2 | #define _I386_TYPES_H | ||
3 | |||
4 | #ifndef __ASSEMBLY__ | ||
5 | |||
6 | typedef unsigned short umode_t; | ||
7 | |||
8 | /* | ||
9 | * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the | ||
10 | * header files exported to user space | ||
11 | */ | ||
12 | |||
13 | typedef __signed__ char __s8; | ||
14 | typedef unsigned char __u8; | ||
15 | |||
16 | typedef __signed__ short __s16; | ||
17 | typedef unsigned short __u16; | ||
18 | |||
19 | typedef __signed__ int __s32; | ||
20 | typedef unsigned int __u32; | ||
21 | |||
22 | #if defined(__GNUC__) && !defined(__STRICT_ANSI__) | ||
23 | typedef __signed__ long long __s64; | ||
24 | typedef unsigned long long __u64; | ||
25 | #endif | ||
26 | |||
27 | #endif /* __ASSEMBLY__ */ | ||
28 | |||
29 | /* | ||
30 | * These aren't exported outside the kernel to avoid name space clashes | ||
31 | */ | ||
32 | #ifdef __KERNEL__ | ||
33 | |||
34 | #define BITS_PER_LONG 32 | ||
35 | |||
36 | #ifndef __ASSEMBLY__ | ||
37 | |||
38 | #include <linux/config.h> | ||
39 | |||
40 | typedef signed char s8; | ||
41 | typedef unsigned char u8; | ||
42 | |||
43 | typedef signed short s16; | ||
44 | typedef unsigned short u16; | ||
45 | |||
46 | typedef signed int s32; | ||
47 | typedef unsigned int u32; | ||
48 | |||
49 | typedef signed long long s64; | ||
50 | typedef unsigned long long u64; | ||
51 | |||
52 | /* DMA addresses come in generic and 64-bit flavours. */ | ||
53 | |||
54 | #ifdef CONFIG_HIGHMEM64G | ||
55 | typedef u64 dma_addr_t; | ||
56 | #else | ||
57 | typedef u32 dma_addr_t; | ||
58 | #endif | ||
59 | typedef u64 dma64_addr_t; | ||
60 | |||
61 | #ifdef CONFIG_LBD | ||
62 | typedef u64 sector_t; | ||
63 | #define HAVE_SECTOR_T | ||
64 | #endif | ||
65 | |||
66 | typedef unsigned short kmem_bufctl_t; | ||
67 | |||
68 | #endif /* __ASSEMBLY__ */ | ||
69 | |||
70 | #endif /* __KERNEL__ */ | ||
71 | |||
72 | #endif | ||
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h new file mode 100644 index 000000000000..886867aea947 --- /dev/null +++ b/include/asm-i386/uaccess.h | |||
@@ -0,0 +1,539 @@ | |||
1 | #ifndef __i386_UACCESS_H | ||
2 | #define __i386_UACCESS_H | ||
3 | |||
4 | /* | ||
5 | * User space memory access functions | ||
6 | */ | ||
7 | #include <linux/config.h> | ||
8 | #include <linux/errno.h> | ||
9 | #include <linux/thread_info.h> | ||
10 | #include <linux/prefetch.h> | ||
11 | #include <linux/string.h> | ||
12 | #include <asm/page.h> | ||
13 | |||
14 | #define VERIFY_READ 0 | ||
15 | #define VERIFY_WRITE 1 | ||
16 | |||
17 | /* | ||
18 | * The fs value determines whether argument validity checking should be | ||
19 | * performed or not. If get_fs() == USER_DS, checking is performed, with | ||
20 | * get_fs() == KERNEL_DS, checking is bypassed. | ||
21 | * | ||
22 | * For historical reasons, these macros are grossly misnamed. | ||
23 | */ | ||
24 | |||
25 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) | ||
26 | |||
27 | |||
28 | #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFUL) | ||
29 | #define USER_DS MAKE_MM_SEG(PAGE_OFFSET) | ||
30 | |||
31 | #define get_ds() (KERNEL_DS) | ||
32 | #define get_fs() (current_thread_info()->addr_limit) | ||
33 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | ||
34 | |||
35 | #define segment_eq(a,b) ((a).seg == (b).seg) | ||
36 | |||
37 | /* | ||
38 | * movsl can be slow when source and dest are not both 8-byte aligned | ||
39 | */ | ||
40 | #ifdef CONFIG_X86_INTEL_USERCOPY | ||
41 | extern struct movsl_mask { | ||
42 | int mask; | ||
43 | } ____cacheline_aligned_in_smp movsl_mask; | ||
44 | #endif | ||
45 | |||
46 | #define __addr_ok(addr) ((unsigned long __force)(addr) < (current_thread_info()->addr_limit.seg)) | ||
47 | |||
48 | /* | ||
49 | * Test whether a block of memory is a valid user space address. | ||
50 | * Returns 0 if the range is valid, nonzero otherwise. | ||
51 | * | ||
52 | * This is equivalent to the following test: | ||
53 | * (u33)addr + (u33)size >= (u33)current->addr_limit.seg | ||
54 | * | ||
55 | * This needs 33-bit arithmetic. We have a carry... | ||
56 | */ | ||
57 | #define __range_ok(addr,size) ({ \ | ||
58 | unsigned long flag,sum; \ | ||
59 | __chk_user_ptr(addr); \ | ||
60 | asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ | ||
61 | :"=&r" (flag), "=r" (sum) \ | ||
62 | :"1" (addr),"g" ((int)(size)),"g" (current_thread_info()->addr_limit.seg)); \ | ||
63 | flag; }) | ||
64 | |||
65 | /** | ||
66 | * access_ok: - Checks if a user space pointer is valid | ||
67 | * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that | ||
68 | * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe | ||
69 | * to write to a block, it is always safe to read from it. | ||
70 | * @addr: User space pointer to start of block to check | ||
71 | * @size: Size of block to check | ||
72 | * | ||
73 | * Context: User context only. This function may sleep. | ||
74 | * | ||
75 | * Checks if a pointer to a block of memory in user space is valid. | ||
76 | * | ||
77 | * Returns true (nonzero) if the memory block may be valid, false (zero) | ||
78 | * if it is definitely invalid. | ||
79 | * | ||
80 | * Note that, depending on architecture, this function probably just | ||
81 | * checks that the pointer is in the user space range - after calling | ||
82 | * this function, memory access functions may still return -EFAULT. | ||
83 | */ | ||
84 | #define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0)) | ||
85 | |||
86 | /** | ||
87 | * verify_area: - Obsolete/deprecated and will go away soon, | ||
88 | * use access_ok() instead. | ||
89 | * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE | ||
90 | * @addr: User space pointer to start of block to check | ||
91 | * @size: Size of block to check | ||
92 | * | ||
93 | * Context: User context only. This function may sleep. | ||
94 | * | ||
95 | * This function has been replaced by access_ok(). | ||
96 | * | ||
97 | * Checks if a pointer to a block of memory in user space is valid. | ||
98 | * | ||
99 | * Returns zero if the memory block may be valid, -EFAULT | ||
100 | * if it is definitely invalid. | ||
101 | * | ||
102 | * See access_ok() for more details. | ||
103 | */ | ||
104 | static inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size) | ||
105 | { | ||
106 | return access_ok(type,addr,size) ? 0 : -EFAULT; | ||
107 | } | ||
108 | |||
109 | |||
110 | /* | ||
111 | * The exception table consists of pairs of addresses: the first is the | ||
112 | * address of an instruction that is allowed to fault, and the second is | ||
113 | * the address at which the program should continue. No registers are | ||
114 | * modified, so it is entirely up to the continuation code to figure out | ||
115 | * what to do. | ||
116 | * | ||
117 | * All the routines below use bits of fixup code that are out of line | ||
118 | * with the main instruction path. This means when everything is well, | ||
119 | * we don't even have to jump over them. Further, they do not intrude | ||
120 | * on our cache or tlb entries. | ||
121 | */ | ||
122 | |||
123 | struct exception_table_entry | ||
124 | { | ||
125 | unsigned long insn, fixup; | ||
126 | }; | ||
127 | |||
128 | extern int fixup_exception(struct pt_regs *regs); | ||
129 | |||
130 | /* | ||
131 | * These are the main single-value transfer routines. They automatically | ||
132 | * use the right size if we just have the right pointer type. | ||
133 | * | ||
134 | * This gets kind of ugly. We want to return _two_ values in "get_user()" | ||
135 | * and yet we don't want to do any pointers, because that is too much | ||
136 | * of a performance impact. Thus we have a few rather ugly macros here, | ||
137 | * and hide all the ugliness from the user. | ||
138 | * | ||
139 | * The "__xxx" versions of the user access functions are versions that | ||
140 | * do not verify the address space, that must have been done previously | ||
141 | * with a separate "access_ok()" call (this is used when we do multiple | ||
142 | * accesses to the same area of user memory). | ||
143 | */ | ||
144 | |||
145 | extern void __get_user_1(void); | ||
146 | extern void __get_user_2(void); | ||
147 | extern void __get_user_4(void); | ||
148 | |||
149 | #define __get_user_x(size,ret,x,ptr) \ | ||
150 | __asm__ __volatile__("call __get_user_" #size \ | ||
151 | :"=a" (ret),"=d" (x) \ | ||
152 | :"0" (ptr)) | ||
153 | |||
154 | |||
155 | /* Careful: we have to cast the result to the type of the pointer for sign reasons */ | ||
156 | /** | ||
157 | * get_user: - Get a simple variable from user space. | ||
158 | * @x: Variable to store result. | ||
159 | * @ptr: Source address, in user space. | ||
160 | * | ||
161 | * Context: User context only. This function may sleep. | ||
162 | * | ||
163 | * This macro copies a single simple variable from user space to kernel | ||
164 | * space. It supports simple types like char and int, but not larger | ||
165 | * data types like structures or arrays. | ||
166 | * | ||
167 | * @ptr must have pointer-to-simple-variable type, and the result of | ||
168 | * dereferencing @ptr must be assignable to @x without a cast. | ||
169 | * | ||
170 | * Returns zero on success, or -EFAULT on error. | ||
171 | * On error, the variable @x is set to zero. | ||
172 | */ | ||
173 | #define get_user(x,ptr) \ | ||
174 | ({ int __ret_gu; \ | ||
175 | unsigned long __val_gu; \ | ||
176 | __chk_user_ptr(ptr); \ | ||
177 | switch(sizeof (*(ptr))) { \ | ||
178 | case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \ | ||
179 | case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \ | ||
180 | case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \ | ||
181 | default: __get_user_x(X,__ret_gu,__val_gu,ptr); break; \ | ||
182 | } \ | ||
183 | (x) = (__typeof__(*(ptr)))__val_gu; \ | ||
184 | __ret_gu; \ | ||
185 | }) | ||
186 | |||
187 | extern void __put_user_bad(void); | ||
188 | |||
189 | /* | ||
190 | * Strange magic calling convention: pointer in %ecx, | ||
191 | * value in %eax(:%edx), return value in %eax, no clobbers. | ||
192 | */ | ||
193 | extern void __put_user_1(void); | ||
194 | extern void __put_user_2(void); | ||
195 | extern void __put_user_4(void); | ||
196 | extern void __put_user_8(void); | ||
197 | |||
198 | #define __put_user_1(x, ptr) __asm__ __volatile__("call __put_user_1":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) | ||
199 | #define __put_user_2(x, ptr) __asm__ __volatile__("call __put_user_2":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) | ||
200 | #define __put_user_4(x, ptr) __asm__ __volatile__("call __put_user_4":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) | ||
201 | #define __put_user_8(x, ptr) __asm__ __volatile__("call __put_user_8":"=a" (__ret_pu):"A" ((typeof(*(ptr)))(x)), "c" (ptr)) | ||
202 | #define __put_user_X(x, ptr) __asm__ __volatile__("call __put_user_X":"=a" (__ret_pu):"c" (ptr)) | ||
203 | |||
204 | /** | ||
205 | * put_user: - Write a simple value into user space. | ||
206 | * @x: Value to copy to user space. | ||
207 | * @ptr: Destination address, in user space. | ||
208 | * | ||
209 | * Context: User context only. This function may sleep. | ||
210 | * | ||
211 | * This macro copies a single simple value from kernel space to user | ||
212 | * space. It supports simple types like char and int, but not larger | ||
213 | * data types like structures or arrays. | ||
214 | * | ||
215 | * @ptr must have pointer-to-simple-variable type, and @x must be assignable | ||
216 | * to the result of dereferencing @ptr. | ||
217 | * | ||
218 | * Returns zero on success, or -EFAULT on error. | ||
219 | */ | ||
220 | #ifdef CONFIG_X86_WP_WORKS_OK | ||
221 | |||
222 | #define put_user(x,ptr) \ | ||
223 | ({ int __ret_pu; \ | ||
224 | __chk_user_ptr(ptr); \ | ||
225 | switch(sizeof(*(ptr))) { \ | ||
226 | case 1: __put_user_1(x, ptr); break; \ | ||
227 | case 2: __put_user_2(x, ptr); break; \ | ||
228 | case 4: __put_user_4(x, ptr); break; \ | ||
229 | case 8: __put_user_8(x, ptr); break; \ | ||
230 | default:__put_user_X(x, ptr); break; \ | ||
231 | } \ | ||
232 | __ret_pu; \ | ||
233 | }) | ||
234 | |||
235 | #else | ||
236 | #define put_user(x,ptr) \ | ||
237 | ({ \ | ||
238 | int __ret_pu; \ | ||
239 | __typeof__(*(ptr)) __pus_tmp = x; \ | ||
240 | __ret_pu=0; \ | ||
241 | if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \ | ||
242 | sizeof(*(ptr))) != 0)) \ | ||
243 | __ret_pu=-EFAULT; \ | ||
244 | __ret_pu; \ | ||
245 | }) | ||
246 | |||
247 | |||
248 | #endif | ||
249 | |||
250 | /** | ||
251 | * __get_user: - Get a simple variable from user space, with less checking. | ||
252 | * @x: Variable to store result. | ||
253 | * @ptr: Source address, in user space. | ||
254 | * | ||
255 | * Context: User context only. This function may sleep. | ||
256 | * | ||
257 | * This macro copies a single simple variable from user space to kernel | ||
258 | * space. It supports simple types like char and int, but not larger | ||
259 | * data types like structures or arrays. | ||
260 | * | ||
261 | * @ptr must have pointer-to-simple-variable type, and the result of | ||
262 | * dereferencing @ptr must be assignable to @x without a cast. | ||
263 | * | ||
264 | * Caller must check the pointer with access_ok() before calling this | ||
265 | * function. | ||
266 | * | ||
267 | * Returns zero on success, or -EFAULT on error. | ||
268 | * On error, the variable @x is set to zero. | ||
269 | */ | ||
270 | #define __get_user(x,ptr) \ | ||
271 | __get_user_nocheck((x),(ptr),sizeof(*(ptr))) | ||
272 | |||
273 | |||
274 | /** | ||
275 | * __put_user: - Write a simple value into user space, with less checking. | ||
276 | * @x: Value to copy to user space. | ||
277 | * @ptr: Destination address, in user space. | ||
278 | * | ||
279 | * Context: User context only. This function may sleep. | ||
280 | * | ||
281 | * This macro copies a single simple value from kernel space to user | ||
282 | * space. It supports simple types like char and int, but not larger | ||
283 | * data types like structures or arrays. | ||
284 | * | ||
285 | * @ptr must have pointer-to-simple-variable type, and @x must be assignable | ||
286 | * to the result of dereferencing @ptr. | ||
287 | * | ||
288 | * Caller must check the pointer with access_ok() before calling this | ||
289 | * function. | ||
290 | * | ||
291 | * Returns zero on success, or -EFAULT on error. | ||
292 | */ | ||
293 | #define __put_user(x,ptr) \ | ||
294 | __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | ||
295 | |||
296 | #define __put_user_nocheck(x,ptr,size) \ | ||
297 | ({ \ | ||
298 | long __pu_err; \ | ||
299 | __put_user_size((x),(ptr),(size),__pu_err,-EFAULT); \ | ||
300 | __pu_err; \ | ||
301 | }) | ||
302 | |||
303 | |||
304 | #define __put_user_u64(x, addr, err) \ | ||
305 | __asm__ __volatile__( \ | ||
306 | "1: movl %%eax,0(%2)\n" \ | ||
307 | "2: movl %%edx,4(%2)\n" \ | ||
308 | "3:\n" \ | ||
309 | ".section .fixup,\"ax\"\n" \ | ||
310 | "4: movl %3,%0\n" \ | ||
311 | " jmp 3b\n" \ | ||
312 | ".previous\n" \ | ||
313 | ".section __ex_table,\"a\"\n" \ | ||
314 | " .align 4\n" \ | ||
315 | " .long 1b,4b\n" \ | ||
316 | " .long 2b,4b\n" \ | ||
317 | ".previous" \ | ||
318 | : "=r"(err) \ | ||
319 | : "A" (x), "r" (addr), "i"(-EFAULT), "0"(err)) | ||
320 | |||
321 | #ifdef CONFIG_X86_WP_WORKS_OK | ||
322 | |||
323 | #define __put_user_size(x,ptr,size,retval,errret) \ | ||
324 | do { \ | ||
325 | retval = 0; \ | ||
326 | __chk_user_ptr(ptr); \ | ||
327 | switch (size) { \ | ||
328 | case 1: __put_user_asm(x,ptr,retval,"b","b","iq",errret);break; \ | ||
329 | case 2: __put_user_asm(x,ptr,retval,"w","w","ir",errret);break; \ | ||
330 | case 4: __put_user_asm(x,ptr,retval,"l","","ir",errret); break; \ | ||
331 | case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\ | ||
332 | default: __put_user_bad(); \ | ||
333 | } \ | ||
334 | } while (0) | ||
335 | |||
336 | #else | ||
337 | |||
338 | #define __put_user_size(x,ptr,size,retval,errret) \ | ||
339 | do { \ | ||
340 | __typeof__(*(ptr)) __pus_tmp = x; \ | ||
341 | retval = 0; \ | ||
342 | \ | ||
343 | if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \ | ||
344 | retval = errret; \ | ||
345 | } while (0) | ||
346 | |||
347 | #endif | ||
348 | struct __large_struct { unsigned long buf[100]; }; | ||
349 | #define __m(x) (*(struct __large_struct __user *)(x)) | ||
350 | |||
351 | /* | ||
352 | * Tell gcc we read from memory instead of writing: this is because | ||
353 | * we do not write to any memory gcc knows about, so there are no | ||
354 | * aliasing issues. | ||
355 | */ | ||
356 | #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ | ||
357 | __asm__ __volatile__( \ | ||
358 | "1: mov"itype" %"rtype"1,%2\n" \ | ||
359 | "2:\n" \ | ||
360 | ".section .fixup,\"ax\"\n" \ | ||
361 | "3: movl %3,%0\n" \ | ||
362 | " jmp 2b\n" \ | ||
363 | ".previous\n" \ | ||
364 | ".section __ex_table,\"a\"\n" \ | ||
365 | " .align 4\n" \ | ||
366 | " .long 1b,3b\n" \ | ||
367 | ".previous" \ | ||
368 | : "=r"(err) \ | ||
369 | : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err)) | ||
370 | |||
371 | |||
372 | #define __get_user_nocheck(x,ptr,size) \ | ||
373 | ({ \ | ||
374 | long __gu_err; \ | ||
375 | unsigned long __gu_val; \ | ||
376 | __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\ | ||
377 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
378 | __gu_err; \ | ||
379 | }) | ||
380 | |||
381 | extern long __get_user_bad(void); | ||
382 | |||
383 | #define __get_user_size(x,ptr,size,retval,errret) \ | ||
384 | do { \ | ||
385 | retval = 0; \ | ||
386 | __chk_user_ptr(ptr); \ | ||
387 | switch (size) { \ | ||
388 | case 1: __get_user_asm(x,ptr,retval,"b","b","=q",errret);break; \ | ||
389 | case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break; \ | ||
390 | case 4: __get_user_asm(x,ptr,retval,"l","","=r",errret);break; \ | ||
391 | default: (x) = __get_user_bad(); \ | ||
392 | } \ | ||
393 | } while (0) | ||
394 | |||
395 | #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ | ||
396 | __asm__ __volatile__( \ | ||
397 | "1: mov"itype" %2,%"rtype"1\n" \ | ||
398 | "2:\n" \ | ||
399 | ".section .fixup,\"ax\"\n" \ | ||
400 | "3: movl %3,%0\n" \ | ||
401 | " xor"itype" %"rtype"1,%"rtype"1\n" \ | ||
402 | " jmp 2b\n" \ | ||
403 | ".previous\n" \ | ||
404 | ".section __ex_table,\"a\"\n" \ | ||
405 | " .align 4\n" \ | ||
406 | " .long 1b,3b\n" \ | ||
407 | ".previous" \ | ||
408 | : "=r"(err), ltype (x) \ | ||
409 | : "m"(__m(addr)), "i"(errret), "0"(err)) | ||
410 | |||
411 | |||
412 | unsigned long __must_check __copy_to_user_ll(void __user *to, | ||
413 | const void *from, unsigned long n); | ||
414 | unsigned long __must_check __copy_from_user_ll(void *to, | ||
415 | const void __user *from, unsigned long n); | ||
416 | |||
417 | /* | ||
418 | * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault | ||
419 | * we return the initial request size (1, 2 or 4), as copy_*_user should do. | ||
420 | * If a store crosses a page boundary and gets a fault, the x86 will not write | ||
421 | * anything, so this is accurate. | ||
422 | */ | ||
423 | |||
424 | /** | ||
425 | * __copy_to_user: - Copy a block of data into user space, with less checking. | ||
426 | * @to: Destination address, in user space. | ||
427 | * @from: Source address, in kernel space. | ||
428 | * @n: Number of bytes to copy. | ||
429 | * | ||
430 | * Context: User context only. This function may sleep. | ||
431 | * | ||
432 | * Copy data from kernel space to user space. Caller must check | ||
433 | * the specified block with access_ok() before calling this function. | ||
434 | * | ||
435 | * Returns number of bytes that could not be copied. | ||
436 | * On success, this will be zero. | ||
437 | */ | ||
438 | static inline unsigned long __must_check | ||
439 | __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) | ||
440 | { | ||
441 | if (__builtin_constant_p(n)) { | ||
442 | unsigned long ret; | ||
443 | |||
444 | switch (n) { | ||
445 | case 1: | ||
446 | __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1); | ||
447 | return ret; | ||
448 | case 2: | ||
449 | __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2); | ||
450 | return ret; | ||
451 | case 4: | ||
452 | __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4); | ||
453 | return ret; | ||
454 | } | ||
455 | } | ||
456 | return __copy_to_user_ll(to, from, n); | ||
457 | } | ||
458 | |||
459 | static inline unsigned long __must_check | ||
460 | __copy_to_user(void __user *to, const void *from, unsigned long n) | ||
461 | { | ||
462 | might_sleep(); | ||
463 | return __copy_to_user_inatomic(to, from, n); | ||
464 | } | ||
465 | |||
466 | /** | ||
467 | * __copy_from_user: - Copy a block of data from user space, with less checking. | ||
468 | * @to: Destination address, in kernel space. | ||
469 | * @from: Source address, in user space. | ||
470 | * @n: Number of bytes to copy. | ||
471 | * | ||
472 | * Context: User context only. This function may sleep. | ||
473 | * | ||
474 | * Copy data from user space to kernel space. Caller must check | ||
475 | * the specified block with access_ok() before calling this function. | ||
476 | * | ||
477 | * Returns number of bytes that could not be copied. | ||
478 | * On success, this will be zero. | ||
479 | * | ||
480 | * If some data could not be copied, this function will pad the copied | ||
481 | * data to the requested size using zero bytes. | ||
482 | */ | ||
483 | static inline unsigned long | ||
484 | __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) | ||
485 | { | ||
486 | if (__builtin_constant_p(n)) { | ||
487 | unsigned long ret; | ||
488 | |||
489 | switch (n) { | ||
490 | case 1: | ||
491 | __get_user_size(*(u8 *)to, from, 1, ret, 1); | ||
492 | return ret; | ||
493 | case 2: | ||
494 | __get_user_size(*(u16 *)to, from, 2, ret, 2); | ||
495 | return ret; | ||
496 | case 4: | ||
497 | __get_user_size(*(u32 *)to, from, 4, ret, 4); | ||
498 | return ret; | ||
499 | } | ||
500 | } | ||
501 | return __copy_from_user_ll(to, from, n); | ||
502 | } | ||
503 | |||
504 | static inline unsigned long | ||
505 | __copy_from_user(void *to, const void __user *from, unsigned long n) | ||
506 | { | ||
507 | might_sleep(); | ||
508 | return __copy_from_user_inatomic(to, from, n); | ||
509 | } | ||
510 | unsigned long __must_check copy_to_user(void __user *to, | ||
511 | const void *from, unsigned long n); | ||
512 | unsigned long __must_check copy_from_user(void *to, | ||
513 | const void __user *from, unsigned long n); | ||
514 | long __must_check strncpy_from_user(char *dst, const char __user *src, | ||
515 | long count); | ||
516 | long __must_check __strncpy_from_user(char *dst, | ||
517 | const char __user *src, long count); | ||
518 | |||
519 | /** | ||
520 | * strlen_user: - Get the size of a string in user space. | ||
521 | * @str: The string to measure. | ||
522 | * | ||
523 | * Context: User context only. This function may sleep. | ||
524 | * | ||
525 | * Get the size of a NUL-terminated string in user space. | ||
526 | * | ||
527 | * Returns the size of the string INCLUDING the terminating NUL. | ||
528 | * On exception, returns 0. | ||
529 | * | ||
530 | * If there is a limit on the length of a valid string, you may wish to | ||
531 | * consider using strnlen_user() instead. | ||
532 | */ | ||
533 | #define strlen_user(str) strnlen_user(str, ~0UL >> 1) | ||
534 | |||
535 | long strnlen_user(const char __user *str, long n); | ||
536 | unsigned long __must_check clear_user(void __user *mem, unsigned long len); | ||
537 | unsigned long __must_check __clear_user(void __user *mem, unsigned long len); | ||
538 | |||
539 | #endif /* __i386_UACCESS_H */ | ||
diff --git a/include/asm-i386/ucontext.h b/include/asm-i386/ucontext.h new file mode 100644 index 000000000000..b0db36925f55 --- /dev/null +++ b/include/asm-i386/ucontext.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _ASMi386_UCONTEXT_H | ||
2 | #define _ASMi386_UCONTEXT_H | ||
3 | |||
4 | struct ucontext { | ||
5 | unsigned long uc_flags; | ||
6 | struct ucontext *uc_link; | ||
7 | stack_t uc_stack; | ||
8 | struct sigcontext uc_mcontext; | ||
9 | sigset_t uc_sigmask; /* mask last for extensibility */ | ||
10 | }; | ||
11 | |||
12 | #endif /* !_ASMi386_UCONTEXT_H */ | ||
diff --git a/include/asm-i386/unaligned.h b/include/asm-i386/unaligned.h new file mode 100644 index 000000000000..7acd7957621e --- /dev/null +++ b/include/asm-i386/unaligned.h | |||
@@ -0,0 +1,37 @@ | |||
1 | #ifndef __I386_UNALIGNED_H | ||
2 | #define __I386_UNALIGNED_H | ||
3 | |||
4 | /* | ||
5 | * The i386 can do unaligned accesses itself. | ||
6 | * | ||
7 | * The strange macros are there to make sure these can't | ||
8 | * be misused in a way that makes them not work on other | ||
9 | * architectures where unaligned accesses aren't as simple. | ||
10 | */ | ||
11 | |||
12 | /** | ||
13 | * get_unaligned - get value from possibly mis-aligned location | ||
14 | * @ptr: pointer to value | ||
15 | * | ||
16 | * This macro should be used for accessing values larger in size than | ||
17 | * single bytes at locations that are expected to be improperly aligned, | ||
18 | * e.g. retrieving a u16 value from a location not u16-aligned. | ||
19 | * | ||
20 | * Note that unaligned accesses can be very expensive on some architectures. | ||
21 | */ | ||
22 | #define get_unaligned(ptr) (*(ptr)) | ||
23 | |||
24 | /** | ||
25 | * put_unaligned - put value to a possibly mis-aligned location | ||
26 | * @val: value to place | ||
27 | * @ptr: pointer to location | ||
28 | * | ||
29 | * This macro should be used for placing values larger in size than | ||
30 | * single bytes at locations that are expected to be improperly aligned, | ||
31 | * e.g. writing a u16 value to a location not u16-aligned. | ||
32 | * | ||
33 | * Note that unaligned accesses can be very expensive on some architectures. | ||
34 | */ | ||
35 | #define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) | ||
36 | |||
37 | #endif | ||
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h new file mode 100644 index 000000000000..61bcc1b1e3f4 --- /dev/null +++ b/include/asm-i386/unistd.h | |||
@@ -0,0 +1,466 @@ | |||
1 | #ifndef _ASM_I386_UNISTD_H_ | ||
2 | #define _ASM_I386_UNISTD_H_ | ||
3 | |||
4 | /* | ||
5 | * This file contains the system call numbers. | ||
6 | */ | ||
7 | |||
8 | #define __NR_restart_syscall 0 | ||
9 | #define __NR_exit 1 | ||
10 | #define __NR_fork 2 | ||
11 | #define __NR_read 3 | ||
12 | #define __NR_write 4 | ||
13 | #define __NR_open 5 | ||
14 | #define __NR_close 6 | ||
15 | #define __NR_waitpid 7 | ||
16 | #define __NR_creat 8 | ||
17 | #define __NR_link 9 | ||
18 | #define __NR_unlink 10 | ||
19 | #define __NR_execve 11 | ||
20 | #define __NR_chdir 12 | ||
21 | #define __NR_time 13 | ||
22 | #define __NR_mknod 14 | ||
23 | #define __NR_chmod 15 | ||
24 | #define __NR_lchown 16 | ||
25 | #define __NR_break 17 | ||
26 | #define __NR_oldstat 18 | ||
27 | #define __NR_lseek 19 | ||
28 | #define __NR_getpid 20 | ||
29 | #define __NR_mount 21 | ||
30 | #define __NR_umount 22 | ||
31 | #define __NR_setuid 23 | ||
32 | #define __NR_getuid 24 | ||
33 | #define __NR_stime 25 | ||
34 | #define __NR_ptrace 26 | ||
35 | #define __NR_alarm 27 | ||
36 | #define __NR_oldfstat 28 | ||
37 | #define __NR_pause 29 | ||
38 | #define __NR_utime 30 | ||
39 | #define __NR_stty 31 | ||
40 | #define __NR_gtty 32 | ||
41 | #define __NR_access 33 | ||
42 | #define __NR_nice 34 | ||
43 | #define __NR_ftime 35 | ||
44 | #define __NR_sync 36 | ||
45 | #define __NR_kill 37 | ||
46 | #define __NR_rename 38 | ||
47 | #define __NR_mkdir 39 | ||
48 | #define __NR_rmdir 40 | ||
49 | #define __NR_dup 41 | ||
50 | #define __NR_pipe 42 | ||
51 | #define __NR_times 43 | ||
52 | #define __NR_prof 44 | ||
53 | #define __NR_brk 45 | ||
54 | #define __NR_setgid 46 | ||
55 | #define __NR_getgid 47 | ||
56 | #define __NR_signal 48 | ||
57 | #define __NR_geteuid 49 | ||
58 | #define __NR_getegid 50 | ||
59 | #define __NR_acct 51 | ||
60 | #define __NR_umount2 52 | ||
61 | #define __NR_lock 53 | ||
62 | #define __NR_ioctl 54 | ||
63 | #define __NR_fcntl 55 | ||
64 | #define __NR_mpx 56 | ||
65 | #define __NR_setpgid 57 | ||
66 | #define __NR_ulimit 58 | ||
67 | #define __NR_oldolduname 59 | ||
68 | #define __NR_umask 60 | ||
69 | #define __NR_chroot 61 | ||
70 | #define __NR_ustat 62 | ||
71 | #define __NR_dup2 63 | ||
72 | #define __NR_getppid 64 | ||
73 | #define __NR_getpgrp 65 | ||
74 | #define __NR_setsid 66 | ||
75 | #define __NR_sigaction 67 | ||
76 | #define __NR_sgetmask 68 | ||
77 | #define __NR_ssetmask 69 | ||
78 | #define __NR_setreuid 70 | ||
79 | #define __NR_setregid 71 | ||
80 | #define __NR_sigsuspend 72 | ||
81 | #define __NR_sigpending 73 | ||
82 | #define __NR_sethostname 74 | ||
83 | #define __NR_setrlimit 75 | ||
84 | #define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */ | ||
85 | #define __NR_getrusage 77 | ||
86 | #define __NR_gettimeofday 78 | ||
87 | #define __NR_settimeofday 79 | ||
88 | #define __NR_getgroups 80 | ||
89 | #define __NR_setgroups 81 | ||
90 | #define __NR_select 82 | ||
91 | #define __NR_symlink 83 | ||
92 | #define __NR_oldlstat 84 | ||
93 | #define __NR_readlink 85 | ||
94 | #define __NR_uselib 86 | ||
95 | #define __NR_swapon 87 | ||
96 | #define __NR_reboot 88 | ||
97 | #define __NR_readdir 89 | ||
98 | #define __NR_mmap 90 | ||
99 | #define __NR_munmap 91 | ||
100 | #define __NR_truncate 92 | ||
101 | #define __NR_ftruncate 93 | ||
102 | #define __NR_fchmod 94 | ||
103 | #define __NR_fchown 95 | ||
104 | #define __NR_getpriority 96 | ||
105 | #define __NR_setpriority 97 | ||
106 | #define __NR_profil 98 | ||
107 | #define __NR_statfs 99 | ||
108 | #define __NR_fstatfs 100 | ||
109 | #define __NR_ioperm 101 | ||
110 | #define __NR_socketcall 102 | ||
111 | #define __NR_syslog 103 | ||
112 | #define __NR_setitimer 104 | ||
113 | #define __NR_getitimer 105 | ||
114 | #define __NR_stat 106 | ||
115 | #define __NR_lstat 107 | ||
116 | #define __NR_fstat 108 | ||
117 | #define __NR_olduname 109 | ||
118 | #define __NR_iopl 110 | ||
119 | #define __NR_vhangup 111 | ||
120 | #define __NR_idle 112 | ||
121 | #define __NR_vm86old 113 | ||
122 | #define __NR_wait4 114 | ||
123 | #define __NR_swapoff 115 | ||
124 | #define __NR_sysinfo 116 | ||
125 | #define __NR_ipc 117 | ||
126 | #define __NR_fsync 118 | ||
127 | #define __NR_sigreturn 119 | ||
128 | #define __NR_clone 120 | ||
129 | #define __NR_setdomainname 121 | ||
130 | #define __NR_uname 122 | ||
131 | #define __NR_modify_ldt 123 | ||
132 | #define __NR_adjtimex 124 | ||
133 | #define __NR_mprotect 125 | ||
134 | #define __NR_sigprocmask 126 | ||
135 | #define __NR_create_module 127 | ||
136 | #define __NR_init_module 128 | ||
137 | #define __NR_delete_module 129 | ||
138 | #define __NR_get_kernel_syms 130 | ||
139 | #define __NR_quotactl 131 | ||
140 | #define __NR_getpgid 132 | ||
141 | #define __NR_fchdir 133 | ||
142 | #define __NR_bdflush 134 | ||
143 | #define __NR_sysfs 135 | ||
144 | #define __NR_personality 136 | ||
145 | #define __NR_afs_syscall 137 /* Syscall for Andrew File System */ | ||
146 | #define __NR_setfsuid 138 | ||
147 | #define __NR_setfsgid 139 | ||
148 | #define __NR__llseek 140 | ||
149 | #define __NR_getdents 141 | ||
150 | #define __NR__newselect 142 | ||
151 | #define __NR_flock 143 | ||
152 | #define __NR_msync 144 | ||
153 | #define __NR_readv 145 | ||
154 | #define __NR_writev 146 | ||
155 | #define __NR_getsid 147 | ||
156 | #define __NR_fdatasync 148 | ||
157 | #define __NR__sysctl 149 | ||
158 | #define __NR_mlock 150 | ||
159 | #define __NR_munlock 151 | ||
160 | #define __NR_mlockall 152 | ||
161 | #define __NR_munlockall 153 | ||
162 | #define __NR_sched_setparam 154 | ||
163 | #define __NR_sched_getparam 155 | ||
164 | #define __NR_sched_setscheduler 156 | ||
165 | #define __NR_sched_getscheduler 157 | ||
166 | #define __NR_sched_yield 158 | ||
167 | #define __NR_sched_get_priority_max 159 | ||
168 | #define __NR_sched_get_priority_min 160 | ||
169 | #define __NR_sched_rr_get_interval 161 | ||
170 | #define __NR_nanosleep 162 | ||
171 | #define __NR_mremap 163 | ||
172 | #define __NR_setresuid 164 | ||
173 | #define __NR_getresuid 165 | ||
174 | #define __NR_vm86 166 | ||
175 | #define __NR_query_module 167 | ||
176 | #define __NR_poll 168 | ||
177 | #define __NR_nfsservctl 169 | ||
178 | #define __NR_setresgid 170 | ||
179 | #define __NR_getresgid 171 | ||
180 | #define __NR_prctl 172 | ||
181 | #define __NR_rt_sigreturn 173 | ||
182 | #define __NR_rt_sigaction 174 | ||
183 | #define __NR_rt_sigprocmask 175 | ||
184 | #define __NR_rt_sigpending 176 | ||
185 | #define __NR_rt_sigtimedwait 177 | ||
186 | #define __NR_rt_sigqueueinfo 178 | ||
187 | #define __NR_rt_sigsuspend 179 | ||
188 | #define __NR_pread64 180 | ||
189 | #define __NR_pwrite64 181 | ||
190 | #define __NR_chown 182 | ||
191 | #define __NR_getcwd 183 | ||
192 | #define __NR_capget 184 | ||
193 | #define __NR_capset 185 | ||
194 | #define __NR_sigaltstack 186 | ||
195 | #define __NR_sendfile 187 | ||
196 | #define __NR_getpmsg 188 /* some people actually want streams */ | ||
197 | #define __NR_putpmsg 189 /* some people actually want streams */ | ||
198 | #define __NR_vfork 190 | ||
199 | #define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ | ||
200 | #define __NR_mmap2 192 | ||
201 | #define __NR_truncate64 193 | ||
202 | #define __NR_ftruncate64 194 | ||
203 | #define __NR_stat64 195 | ||
204 | #define __NR_lstat64 196 | ||
205 | #define __NR_fstat64 197 | ||
206 | #define __NR_lchown32 198 | ||
207 | #define __NR_getuid32 199 | ||
208 | #define __NR_getgid32 200 | ||
209 | #define __NR_geteuid32 201 | ||
210 | #define __NR_getegid32 202 | ||
211 | #define __NR_setreuid32 203 | ||
212 | #define __NR_setregid32 204 | ||
213 | #define __NR_getgroups32 205 | ||
214 | #define __NR_setgroups32 206 | ||
215 | #define __NR_fchown32 207 | ||
216 | #define __NR_setresuid32 208 | ||
217 | #define __NR_getresuid32 209 | ||
218 | #define __NR_setresgid32 210 | ||
219 | #define __NR_getresgid32 211 | ||
220 | #define __NR_chown32 212 | ||
221 | #define __NR_setuid32 213 | ||
222 | #define __NR_setgid32 214 | ||
223 | #define __NR_setfsuid32 215 | ||
224 | #define __NR_setfsgid32 216 | ||
225 | #define __NR_pivot_root 217 | ||
226 | #define __NR_mincore 218 | ||
227 | #define __NR_madvise 219 | ||
228 | #define __NR_madvise1 219 /* delete when C lib stub is removed */ | ||
229 | #define __NR_getdents64 220 | ||
230 | #define __NR_fcntl64 221 | ||
231 | /* 223 is unused */ | ||
232 | #define __NR_gettid 224 | ||
233 | #define __NR_readahead 225 | ||
234 | #define __NR_setxattr 226 | ||
235 | #define __NR_lsetxattr 227 | ||
236 | #define __NR_fsetxattr 228 | ||
237 | #define __NR_getxattr 229 | ||
238 | #define __NR_lgetxattr 230 | ||
239 | #define __NR_fgetxattr 231 | ||
240 | #define __NR_listxattr 232 | ||
241 | #define __NR_llistxattr 233 | ||
242 | #define __NR_flistxattr 234 | ||
243 | #define __NR_removexattr 235 | ||
244 | #define __NR_lremovexattr 236 | ||
245 | #define __NR_fremovexattr 237 | ||
246 | #define __NR_tkill 238 | ||
247 | #define __NR_sendfile64 239 | ||
248 | #define __NR_futex 240 | ||
249 | #define __NR_sched_setaffinity 241 | ||
250 | #define __NR_sched_getaffinity 242 | ||
251 | #define __NR_set_thread_area 243 | ||
252 | #define __NR_get_thread_area 244 | ||
253 | #define __NR_io_setup 245 | ||
254 | #define __NR_io_destroy 246 | ||
255 | #define __NR_io_getevents 247 | ||
256 | #define __NR_io_submit 248 | ||
257 | #define __NR_io_cancel 249 | ||
258 | #define __NR_fadvise64 250 | ||
259 | |||
260 | #define __NR_exit_group 252 | ||
261 | #define __NR_lookup_dcookie 253 | ||
262 | #define __NR_epoll_create 254 | ||
263 | #define __NR_epoll_ctl 255 | ||
264 | #define __NR_epoll_wait 256 | ||
265 | #define __NR_remap_file_pages 257 | ||
266 | #define __NR_set_tid_address 258 | ||
267 | #define __NR_timer_create 259 | ||
268 | #define __NR_timer_settime (__NR_timer_create+1) | ||
269 | #define __NR_timer_gettime (__NR_timer_create+2) | ||
270 | #define __NR_timer_getoverrun (__NR_timer_create+3) | ||
271 | #define __NR_timer_delete (__NR_timer_create+4) | ||
272 | #define __NR_clock_settime (__NR_timer_create+5) | ||
273 | #define __NR_clock_gettime (__NR_timer_create+6) | ||
274 | #define __NR_clock_getres (__NR_timer_create+7) | ||
275 | #define __NR_clock_nanosleep (__NR_timer_create+8) | ||
276 | #define __NR_statfs64 268 | ||
277 | #define __NR_fstatfs64 269 | ||
278 | #define __NR_tgkill 270 | ||
279 | #define __NR_utimes 271 | ||
280 | #define __NR_fadvise64_64 272 | ||
281 | #define __NR_vserver 273 | ||
282 | #define __NR_mbind 274 | ||
283 | #define __NR_get_mempolicy 275 | ||
284 | #define __NR_set_mempolicy 276 | ||
285 | #define __NR_mq_open 277 | ||
286 | #define __NR_mq_unlink (__NR_mq_open+1) | ||
287 | #define __NR_mq_timedsend (__NR_mq_open+2) | ||
288 | #define __NR_mq_timedreceive (__NR_mq_open+3) | ||
289 | #define __NR_mq_notify (__NR_mq_open+4) | ||
290 | #define __NR_mq_getsetattr (__NR_mq_open+5) | ||
291 | #define __NR_sys_kexec_load 283 | ||
292 | #define __NR_waitid 284 | ||
293 | /* #define __NR_sys_setaltroot 285 */ | ||
294 | #define __NR_add_key 286 | ||
295 | #define __NR_request_key 287 | ||
296 | #define __NR_keyctl 288 | ||
297 | |||
298 | #define NR_syscalls 289 | ||
299 | |||
300 | /* | ||
301 | * user-visible error numbers are in the range -1 - -128: see | ||
302 | * <asm-i386/errno.h> | ||
303 | */ | ||
304 | #define __syscall_return(type, res) \ | ||
305 | do { \ | ||
306 | if ((unsigned long)(res) >= (unsigned long)(-(128 + 1))) { \ | ||
307 | errno = -(res); \ | ||
308 | res = -1; \ | ||
309 | } \ | ||
310 | return (type) (res); \ | ||
311 | } while (0) | ||
312 | |||
313 | /* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */ | ||
314 | #define _syscall0(type,name) \ | ||
315 | type name(void) \ | ||
316 | { \ | ||
317 | long __res; \ | ||
318 | __asm__ volatile ("int $0x80" \ | ||
319 | : "=a" (__res) \ | ||
320 | : "0" (__NR_##name)); \ | ||
321 | __syscall_return(type,__res); \ | ||
322 | } | ||
323 | |||
324 | #define _syscall1(type,name,type1,arg1) \ | ||
325 | type name(type1 arg1) \ | ||
326 | { \ | ||
327 | long __res; \ | ||
328 | __asm__ volatile ("int $0x80" \ | ||
329 | : "=a" (__res) \ | ||
330 | : "0" (__NR_##name),"b" ((long)(arg1))); \ | ||
331 | __syscall_return(type,__res); \ | ||
332 | } | ||
333 | |||
334 | #define _syscall2(type,name,type1,arg1,type2,arg2) \ | ||
335 | type name(type1 arg1,type2 arg2) \ | ||
336 | { \ | ||
337 | long __res; \ | ||
338 | __asm__ volatile ("int $0x80" \ | ||
339 | : "=a" (__res) \ | ||
340 | : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2))); \ | ||
341 | __syscall_return(type,__res); \ | ||
342 | } | ||
343 | |||
344 | #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ | ||
345 | type name(type1 arg1,type2 arg2,type3 arg3) \ | ||
346 | { \ | ||
347 | long __res; \ | ||
348 | __asm__ volatile ("int $0x80" \ | ||
349 | : "=a" (__res) \ | ||
350 | : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ | ||
351 | "d" ((long)(arg3))); \ | ||
352 | __syscall_return(type,__res); \ | ||
353 | } | ||
354 | |||
355 | #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ | ||
356 | type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ | ||
357 | { \ | ||
358 | long __res; \ | ||
359 | __asm__ volatile ("int $0x80" \ | ||
360 | : "=a" (__res) \ | ||
361 | : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ | ||
362 | "d" ((long)(arg3)),"S" ((long)(arg4))); \ | ||
363 | __syscall_return(type,__res); \ | ||
364 | } | ||
365 | |||
366 | #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ | ||
367 | type5,arg5) \ | ||
368 | type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ | ||
369 | { \ | ||
370 | long __res; \ | ||
371 | __asm__ volatile ("int $0x80" \ | ||
372 | : "=a" (__res) \ | ||
373 | : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ | ||
374 | "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5))); \ | ||
375 | __syscall_return(type,__res); \ | ||
376 | } | ||
377 | |||
378 | #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ | ||
379 | type5,arg5,type6,arg6) \ | ||
380 | type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \ | ||
381 | { \ | ||
382 | long __res; \ | ||
383 | __asm__ volatile ("push %%ebp ; movl %%eax,%%ebp ; movl %1,%%eax ; int $0x80 ; pop %%ebp" \ | ||
384 | : "=a" (__res) \ | ||
385 | : "i" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ | ||
386 | "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)), \ | ||
387 | "0" ((long)(arg6))); \ | ||
388 | __syscall_return(type,__res); \ | ||
389 | } | ||
390 | |||
391 | #ifdef __KERNEL__ | ||
392 | #define __ARCH_WANT_IPC_PARSE_VERSION | ||
393 | #define __ARCH_WANT_OLD_READDIR | ||
394 | #define __ARCH_WANT_OLD_STAT | ||
395 | #define __ARCH_WANT_STAT64 | ||
396 | #define __ARCH_WANT_SYS_ALARM | ||
397 | #define __ARCH_WANT_SYS_GETHOSTNAME | ||
398 | #define __ARCH_WANT_SYS_PAUSE | ||
399 | #define __ARCH_WANT_SYS_SGETMASK | ||
400 | #define __ARCH_WANT_SYS_SIGNAL | ||
401 | #define __ARCH_WANT_SYS_TIME | ||
402 | #define __ARCH_WANT_SYS_UTIME | ||
403 | #define __ARCH_WANT_SYS_WAITPID | ||
404 | #define __ARCH_WANT_SYS_SOCKETCALL | ||
405 | #define __ARCH_WANT_SYS_FADVISE64 | ||
406 | #define __ARCH_WANT_SYS_GETPGRP | ||
407 | #define __ARCH_WANT_SYS_LLSEEK | ||
408 | #define __ARCH_WANT_SYS_NICE | ||
409 | #define __ARCH_WANT_SYS_OLD_GETRLIMIT | ||
410 | #define __ARCH_WANT_SYS_OLDUMOUNT | ||
411 | #define __ARCH_WANT_SYS_SIGPENDING | ||
412 | #define __ARCH_WANT_SYS_SIGPROCMASK | ||
413 | #define __ARCH_WANT_SYS_RT_SIGACTION | ||
414 | #endif | ||
415 | |||
416 | #ifdef __KERNEL_SYSCALLS__ | ||
417 | |||
418 | #include <linux/compiler.h> | ||
419 | #include <linux/types.h> | ||
420 | #include <linux/linkage.h> | ||
421 | #include <asm/ptrace.h> | ||
422 | |||
423 | /* | ||
424 | * we need this inline - forking from kernel space will result | ||
425 | * in NO COPY ON WRITE (!!!), until an execve is executed. This | ||
426 | * is no problem, but for the stack. This is handled by not letting | ||
427 | * main() use the stack at all after fork(). Thus, no function | ||
428 | * calls - which means inline code for fork too, as otherwise we | ||
429 | * would use the stack upon exit from 'fork()'. | ||
430 | * | ||
431 | * Actually only pause and fork are needed inline, so that there | ||
432 | * won't be any messing with the stack from main(), but we define | ||
433 | * some others too. | ||
434 | */ | ||
435 | static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp) | ||
436 | |||
437 | asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount); | ||
438 | asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | ||
439 | unsigned long prot, unsigned long flags, | ||
440 | unsigned long fd, unsigned long pgoff); | ||
441 | asmlinkage int sys_execve(struct pt_regs regs); | ||
442 | asmlinkage int sys_clone(struct pt_regs regs); | ||
443 | asmlinkage int sys_fork(struct pt_regs regs); | ||
444 | asmlinkage int sys_vfork(struct pt_regs regs); | ||
445 | asmlinkage int sys_pipe(unsigned long __user *fildes); | ||
446 | asmlinkage int sys_ptrace(long request, long pid, long addr, long data); | ||
447 | asmlinkage long sys_iopl(unsigned long unused); | ||
448 | struct sigaction; | ||
449 | asmlinkage long sys_rt_sigaction(int sig, | ||
450 | const struct sigaction __user *act, | ||
451 | struct sigaction __user *oact, | ||
452 | size_t sigsetsize); | ||
453 | |||
454 | #endif | ||
455 | |||
456 | /* | ||
457 | * "Conditional" syscalls | ||
458 | * | ||
459 | * What we want is __attribute__((weak,alias("sys_ni_syscall"))), | ||
460 | * but it doesn't work on all toolchains, so we just do it by hand | ||
461 | */ | ||
462 | #ifndef cond_syscall | ||
463 | #define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") | ||
464 | #endif | ||
465 | |||
466 | #endif /* _ASM_I386_UNISTD_H_ */ | ||
diff --git a/include/asm-i386/user.h b/include/asm-i386/user.h new file mode 100644 index 000000000000..0e85d2a5e33a --- /dev/null +++ b/include/asm-i386/user.h | |||
@@ -0,0 +1,121 @@ | |||
1 | #ifndef _I386_USER_H | ||
2 | #define _I386_USER_H | ||
3 | |||
4 | #include <asm/page.h> | ||
5 | /* Core file format: The core file is written in such a way that gdb | ||
6 | can understand it and provide useful information to the user (under | ||
7 | linux we use the 'trad-core' bfd). There are quite a number of | ||
8 | obstacles to being able to view the contents of the floating point | ||
9 | registers, and until these are solved you will not be able to view the | ||
10 | contents of them. Actually, you can read in the core file and look at | ||
11 | the contents of the user struct to find out what the floating point | ||
12 | registers contain. | ||
13 | The actual file contents are as follows: | ||
14 | UPAGE: 1 page consisting of a user struct that tells gdb what is present | ||
15 | in the file. Directly after this is a copy of the task_struct, which | ||
16 | is currently not used by gdb, but it may come in useful at some point. | ||
17 | All of the registers are stored as part of the upage. The upage should | ||
18 | always be only one page. | ||
19 | DATA: The data area is stored. We use current->end_text to | ||
20 | current->brk to pick up all of the user variables, plus any memory | ||
21 | that may have been malloced. No attempt is made to determine if a page | ||
22 | is demand-zero or if a page is totally unused, we just cover the entire | ||
23 | range. All of the addresses are rounded in such a way that an integral | ||
24 | number of pages is written. | ||
25 | STACK: We need the stack information in order to get a meaningful | ||
26 | backtrace. We need to write the data from (esp) to | ||
27 | current->start_stack, so we round each of these off in order to be able | ||
28 | to write an integer number of pages. | ||
29 | The minimum core file size is 3 pages, or 12288 bytes. | ||
30 | */ | ||
31 | |||
32 | /* | ||
33 | * Pentium III FXSR, SSE support | ||
34 | * Gareth Hughes <gareth@valinux.com>, May 2000 | ||
35 | * | ||
36 | * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for | ||
37 | * interacting with the FXSR-format floating point environment. Floating | ||
38 | * point data can be accessed in the regular format in the usual manner, | ||
39 | * and both the standard and SIMD floating point data can be accessed via | ||
40 | * the new ptrace requests. In either case, changes to the FPU environment | ||
41 | * will be reflected in the task's state as expected. | ||
42 | */ | ||
43 | |||
44 | struct user_i387_struct { | ||
45 | long cwd; | ||
46 | long swd; | ||
47 | long twd; | ||
48 | long fip; | ||
49 | long fcs; | ||
50 | long foo; | ||
51 | long fos; | ||
52 | long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ | ||
53 | }; | ||
54 | |||
55 | struct user_fxsr_struct { | ||
56 | unsigned short cwd; | ||
57 | unsigned short swd; | ||
58 | unsigned short twd; | ||
59 | unsigned short fop; | ||
60 | long fip; | ||
61 | long fcs; | ||
62 | long foo; | ||
63 | long fos; | ||
64 | long mxcsr; | ||
65 | long reserved; | ||
66 | long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ | ||
67 | long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ | ||
68 | long padding[56]; | ||
69 | }; | ||
70 | |||
71 | /* | ||
72 | * This is the old layout of "struct pt_regs", and | ||
73 | * is still the layout used by user mode (the new | ||
74 | * pt_regs doesn't have all registers as the kernel | ||
75 | * doesn't use the extra segment registers) | ||
76 | */ | ||
77 | struct user_regs_struct { | ||
78 | long ebx, ecx, edx, esi, edi, ebp, eax; | ||
79 | unsigned short ds, __ds, es, __es; | ||
80 | unsigned short fs, __fs, gs, __gs; | ||
81 | long orig_eax, eip; | ||
82 | unsigned short cs, __cs; | ||
83 | long eflags, esp; | ||
84 | unsigned short ss, __ss; | ||
85 | }; | ||
86 | |||
87 | /* When the kernel dumps core, it starts by dumping the user struct - | ||
88 | this will be used by gdb to figure out where the data and stack segments | ||
89 | are within the file, and what virtual addresses to use. */ | ||
90 | struct user{ | ||
91 | /* We start with the registers, to mimic the way that "memory" is returned | ||
92 | from the ptrace(3,...) function. */ | ||
93 | struct user_regs_struct regs; /* Where the registers are actually stored */ | ||
94 | /* ptrace does not yet supply these. Someday.... */ | ||
95 | int u_fpvalid; /* True if math co-processor being used. */ | ||
96 | /* for this mess. Not yet used. */ | ||
97 | struct user_i387_struct i387; /* Math Co-processor registers. */ | ||
98 | /* The rest of this junk is to help gdb figure out what goes where */ | ||
99 | unsigned long int u_tsize; /* Text segment size (pages). */ | ||
100 | unsigned long int u_dsize; /* Data segment size (pages). */ | ||
101 | unsigned long int u_ssize; /* Stack segment size (pages). */ | ||
102 | unsigned long start_code; /* Starting virtual address of text. */ | ||
103 | unsigned long start_stack; /* Starting virtual address of stack area. | ||
104 | This is actually the bottom of the stack, | ||
105 | the top of the stack is always found in the | ||
106 | esp register. */ | ||
107 | long int signal; /* Signal that caused the core dump. */ | ||
108 | int reserved; /* No longer used */ | ||
109 | struct user_pt_regs * u_ar0; /* Used by gdb to help find the values for */ | ||
110 | /* the registers. */ | ||
111 | struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */ | ||
112 | unsigned long magic; /* To uniquely identify a core file */ | ||
113 | char u_comm[32]; /* User command that was responsible */ | ||
114 | int u_debugreg[8]; | ||
115 | }; | ||
116 | #define NBPG PAGE_SIZE | ||
117 | #define UPAGES 1 | ||
118 | #define HOST_TEXT_START_ADDR (u.start_code) | ||
119 | #define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) | ||
120 | |||
121 | #endif /* _I386_USER_H */ | ||
diff --git a/include/asm-i386/vga.h b/include/asm-i386/vga.h new file mode 100644 index 000000000000..ef0c0e50cc95 --- /dev/null +++ b/include/asm-i386/vga.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Access to VGA videoram | ||
3 | * | ||
4 | * (c) 1998 Martin Mares <mj@ucw.cz> | ||
5 | */ | ||
6 | |||
7 | #ifndef _LINUX_ASM_VGA_H_ | ||
8 | #define _LINUX_ASM_VGA_H_ | ||
9 | |||
10 | /* | ||
11 | * On the PC, we can just recalculate addresses and then | ||
12 | * access the videoram directly without any black magic. | ||
13 | */ | ||
14 | |||
15 | #define VGA_MAP_MEM(x) (unsigned long)phys_to_virt(x) | ||
16 | |||
17 | #define vga_readb(x) (*(x)) | ||
18 | #define vga_writeb(x,y) (*(y) = (x)) | ||
19 | |||
20 | #endif | ||
diff --git a/include/asm-i386/vic.h b/include/asm-i386/vic.h new file mode 100644 index 000000000000..4abfcfb91eb8 --- /dev/null +++ b/include/asm-i386/vic.h | |||
@@ -0,0 +1,61 @@ | |||
1 | /* Copyright (C) 1999,2001 | ||
2 | * | ||
3 | * Author: J.E.J.Bottomley@HansenPartnership.com | ||
4 | * | ||
5 | * Standard include definitions for the NCR Voyager Interrupt Controller */ | ||
6 | |||
7 | /* The eight CPI vectors. To activate a CPI, you write a bit mask | ||
8 | * corresponding to the processor set to be interrupted into the | ||
9 | * relevant register. That set of CPUs will then be interrupted with | ||
10 | * the CPI */ | ||
11 | static const int VIC_CPI_Registers[] = | ||
12 | {0xFC00, 0xFC01, 0xFC08, 0xFC09, | ||
13 | 0xFC10, 0xFC11, 0xFC18, 0xFC19 }; | ||
14 | |||
15 | #define VIC_PROC_WHO_AM_I 0xfc29 | ||
16 | # define QUAD_IDENTIFIER 0xC0 | ||
17 | # define EIGHT_SLOT_IDENTIFIER 0xE0 | ||
18 | #define QIC_EXTENDED_PROCESSOR_SELECT 0xFC72 | ||
19 | #define VIC_CPI_BASE_REGISTER 0xFC41 | ||
20 | #define VIC_PROCESSOR_ID 0xFC21 | ||
21 | # define VIC_CPU_MASQUERADE_ENABLE 0x8 | ||
22 | |||
23 | #define VIC_CLAIM_REGISTER_0 0xFC38 | ||
24 | #define VIC_CLAIM_REGISTER_1 0xFC39 | ||
25 | #define VIC_REDIRECT_REGISTER_0 0xFC60 | ||
26 | #define VIC_REDIRECT_REGISTER_1 0xFC61 | ||
27 | #define VIC_PRIORITY_REGISTER 0xFC20 | ||
28 | |||
29 | #define VIC_PRIMARY_MC_BASE 0xFC48 | ||
30 | #define VIC_SECONDARY_MC_BASE 0xFC49 | ||
31 | |||
32 | #define QIC_PROCESSOR_ID 0xFC71 | ||
33 | # define QIC_CPUID_ENABLE 0x08 | ||
34 | |||
35 | #define QIC_VIC_CPI_BASE_REGISTER 0xFC79 | ||
36 | #define QIC_CPI_BASE_REGISTER 0xFC7A | ||
37 | |||
38 | #define QIC_MASK_REGISTER0 0xFC80 | ||
39 | /* NOTE: these are masked high, enabled low */ | ||
40 | # define QIC_PERF_TIMER 0x01 | ||
41 | # define QIC_LPE 0x02 | ||
42 | # define QIC_SYS_INT 0x04 | ||
43 | # define QIC_CMN_INT 0x08 | ||
44 | /* at the moment, just enable CMN_INT, disable SYS_INT */ | ||
45 | # define QIC_DEFAULT_MASK0 (~(QIC_CMN_INT /* | VIC_SYS_INT */)) | ||
46 | #define QIC_MASK_REGISTER1 0xFC81 | ||
47 | # define QIC_BOOT_CPI_MASK 0xFE | ||
48 | /* Enable CPI's 1-6 inclusive */ | ||
49 | # define QIC_CPI_ENABLE 0x81 | ||
50 | |||
51 | #define QIC_INTERRUPT_CLEAR0 0xFC8A | ||
52 | #define QIC_INTERRUPT_CLEAR1 0xFC8B | ||
53 | |||
54 | /* this is where we place the CPI vectors */ | ||
55 | #define VIC_DEFAULT_CPI_BASE 0xC0 | ||
56 | /* this is where we place the QIC CPI vectors */ | ||
57 | #define QIC_DEFAULT_CPI_BASE 0xD0 | ||
58 | |||
59 | #define VIC_BOOT_INTERRUPT_MASK 0xfe | ||
60 | |||
61 | extern void smp_vic_timer_interrupt(struct pt_regs *regs); | ||
diff --git a/include/asm-i386/vm86.h b/include/asm-i386/vm86.h new file mode 100644 index 000000000000..40ec82c6914d --- /dev/null +++ b/include/asm-i386/vm86.h | |||
@@ -0,0 +1,208 @@ | |||
1 | #ifndef _LINUX_VM86_H | ||
2 | #define _LINUX_VM86_H | ||
3 | |||
4 | /* | ||
5 | * I'm guessing at the VIF/VIP flag usage, but hope that this is how | ||
6 | * the Pentium uses them. Linux will return from vm86 mode when both | ||
7 | * VIF and VIP is set. | ||
8 | * | ||
9 | * On a Pentium, we could probably optimize the virtual flags directly | ||
10 | * in the eflags register instead of doing it "by hand" in vflags... | ||
11 | * | ||
12 | * Linus | ||
13 | */ | ||
14 | |||
15 | #define TF_MASK 0x00000100 | ||
16 | #define IF_MASK 0x00000200 | ||
17 | #define IOPL_MASK 0x00003000 | ||
18 | #define NT_MASK 0x00004000 | ||
19 | #define VM_MASK 0x00020000 | ||
20 | #define AC_MASK 0x00040000 | ||
21 | #define VIF_MASK 0x00080000 /* virtual interrupt flag */ | ||
22 | #define VIP_MASK 0x00100000 /* virtual interrupt pending */ | ||
23 | #define ID_MASK 0x00200000 | ||
24 | |||
25 | #define BIOSSEG 0x0f000 | ||
26 | |||
27 | #define CPU_086 0 | ||
28 | #define CPU_186 1 | ||
29 | #define CPU_286 2 | ||
30 | #define CPU_386 3 | ||
31 | #define CPU_486 4 | ||
32 | #define CPU_586 5 | ||
33 | |||
34 | /* | ||
35 | * Return values for the 'vm86()' system call | ||
36 | */ | ||
37 | #define VM86_TYPE(retval) ((retval) & 0xff) | ||
38 | #define VM86_ARG(retval) ((retval) >> 8) | ||
39 | |||
40 | #define VM86_SIGNAL 0 /* return due to signal */ | ||
41 | #define VM86_UNKNOWN 1 /* unhandled GP fault - IO-instruction or similar */ | ||
42 | #define VM86_INTx 2 /* int3/int x instruction (ARG = x) */ | ||
43 | #define VM86_STI 3 /* sti/popf/iret instruction enabled virtual interrupts */ | ||
44 | |||
45 | /* | ||
46 | * Additional return values when invoking new vm86() | ||
47 | */ | ||
48 | #define VM86_PICRETURN 4 /* return due to pending PIC request */ | ||
49 | #define VM86_TRAP 6 /* return due to DOS-debugger request */ | ||
50 | |||
51 | /* | ||
52 | * function codes when invoking new vm86() | ||
53 | */ | ||
54 | #define VM86_PLUS_INSTALL_CHECK 0 | ||
55 | #define VM86_ENTER 1 | ||
56 | #define VM86_ENTER_NO_BYPASS 2 | ||
57 | #define VM86_REQUEST_IRQ 3 | ||
58 | #define VM86_FREE_IRQ 4 | ||
59 | #define VM86_GET_IRQ_BITS 5 | ||
60 | #define VM86_GET_AND_RESET_IRQ 6 | ||
61 | |||
62 | /* | ||
63 | * This is the stack-layout seen by the user space program when we have | ||
64 | * done a translation of "SAVE_ALL" from vm86 mode. The real kernel layout | ||
65 | * is 'kernel_vm86_regs' (see below). | ||
66 | */ | ||
67 | |||
68 | struct vm86_regs { | ||
69 | /* | ||
70 | * normal regs, with special meaning for the segment descriptors.. | ||
71 | */ | ||
72 | long ebx; | ||
73 | long ecx; | ||
74 | long edx; | ||
75 | long esi; | ||
76 | long edi; | ||
77 | long ebp; | ||
78 | long eax; | ||
79 | long __null_ds; | ||
80 | long __null_es; | ||
81 | long __null_fs; | ||
82 | long __null_gs; | ||
83 | long orig_eax; | ||
84 | long eip; | ||
85 | unsigned short cs, __csh; | ||
86 | long eflags; | ||
87 | long esp; | ||
88 | unsigned short ss, __ssh; | ||
89 | /* | ||
90 | * these are specific to v86 mode: | ||
91 | */ | ||
92 | unsigned short es, __esh; | ||
93 | unsigned short ds, __dsh; | ||
94 | unsigned short fs, __fsh; | ||
95 | unsigned short gs, __gsh; | ||
96 | }; | ||
97 | |||
98 | struct revectored_struct { | ||
99 | unsigned long __map[8]; /* 256 bits */ | ||
100 | }; | ||
101 | |||
102 | struct vm86_struct { | ||
103 | struct vm86_regs regs; | ||
104 | unsigned long flags; | ||
105 | unsigned long screen_bitmap; | ||
106 | unsigned long cpu_type; | ||
107 | struct revectored_struct int_revectored; | ||
108 | struct revectored_struct int21_revectored; | ||
109 | }; | ||
110 | |||
111 | /* | ||
112 | * flags masks | ||
113 | */ | ||
114 | #define VM86_SCREEN_BITMAP 0x0001 | ||
115 | |||
116 | struct vm86plus_info_struct { | ||
117 | unsigned long force_return_for_pic:1; | ||
118 | unsigned long vm86dbg_active:1; /* for debugger */ | ||
119 | unsigned long vm86dbg_TFpendig:1; /* for debugger */ | ||
120 | unsigned long unused:28; | ||
121 | unsigned long is_vm86pus:1; /* for vm86 internal use */ | ||
122 | unsigned char vm86dbg_intxxtab[32]; /* for debugger */ | ||
123 | }; | ||
124 | |||
125 | struct vm86plus_struct { | ||
126 | struct vm86_regs regs; | ||
127 | unsigned long flags; | ||
128 | unsigned long screen_bitmap; | ||
129 | unsigned long cpu_type; | ||
130 | struct revectored_struct int_revectored; | ||
131 | struct revectored_struct int21_revectored; | ||
132 | struct vm86plus_info_struct vm86plus; | ||
133 | }; | ||
134 | |||
135 | #ifdef __KERNEL__ | ||
136 | /* | ||
137 | * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86 | ||
138 | * mode - the main change is that the old segment descriptors aren't | ||
139 | * useful any more and are forced to be zero by the kernel (and the | ||
140 | * hardware when a trap occurs), and the real segment descriptors are | ||
141 | * at the end of the structure. Look at ptrace.h to see the "normal" | ||
142 | * setup. For user space layout see 'struct vm86_regs' above. | ||
143 | */ | ||
144 | |||
145 | struct kernel_vm86_regs { | ||
146 | /* | ||
147 | * normal regs, with special meaning for the segment descriptors.. | ||
148 | */ | ||
149 | long ebx; | ||
150 | long ecx; | ||
151 | long edx; | ||
152 | long esi; | ||
153 | long edi; | ||
154 | long ebp; | ||
155 | long eax; | ||
156 | long __null_ds; | ||
157 | long __null_es; | ||
158 | long orig_eax; | ||
159 | long eip; | ||
160 | unsigned short cs, __csh; | ||
161 | long eflags; | ||
162 | long esp; | ||
163 | unsigned short ss, __ssh; | ||
164 | /* | ||
165 | * these are specific to v86 mode: | ||
166 | */ | ||
167 | unsigned short es, __esh; | ||
168 | unsigned short ds, __dsh; | ||
169 | unsigned short fs, __fsh; | ||
170 | unsigned short gs, __gsh; | ||
171 | }; | ||
172 | |||
173 | struct kernel_vm86_struct { | ||
174 | struct kernel_vm86_regs regs; | ||
175 | /* | ||
176 | * the below part remains on the kernel stack while we are in VM86 mode. | ||
177 | * 'tss.esp0' then contains the address of VM86_TSS_ESP0 below, and when we | ||
178 | * get forced back from VM86, the CPU and "SAVE_ALL" will restore the above | ||
179 | * 'struct kernel_vm86_regs' with the then actual values. | ||
180 | * Therefore, pt_regs in fact points to a complete 'kernel_vm86_struct' | ||
181 | * in kernelspace, hence we need not reget the data from userspace. | ||
182 | */ | ||
183 | #define VM86_TSS_ESP0 flags | ||
184 | unsigned long flags; | ||
185 | unsigned long screen_bitmap; | ||
186 | unsigned long cpu_type; | ||
187 | struct revectored_struct int_revectored; | ||
188 | struct revectored_struct int21_revectored; | ||
189 | struct vm86plus_info_struct vm86plus; | ||
190 | struct pt_regs *regs32; /* here we save the pointer to the old regs */ | ||
191 | /* | ||
192 | * The below is not part of the structure, but the stack layout continues | ||
193 | * this way. In front of 'return-eip' may be some data, depending on | ||
194 | * compilation, so we don't rely on this and save the pointer to 'oldregs' | ||
195 | * in 'regs32' above. | ||
196 | * However, with GCC-2.7.2 and the current CFLAGS you see exactly this: | ||
197 | |||
198 | long return-eip; from call to vm86() | ||
199 | struct pt_regs oldregs; user space registers as saved by syscall | ||
200 | */ | ||
201 | }; | ||
202 | |||
203 | void handle_vm86_fault(struct kernel_vm86_regs *, long); | ||
204 | int handle_vm86_trap(struct kernel_vm86_regs *, long, int); | ||
205 | |||
206 | #endif /* __KERNEL__ */ | ||
207 | |||
208 | #endif | ||
diff --git a/include/asm-i386/voyager.h b/include/asm-i386/voyager.h new file mode 100644 index 000000000000..aaf432dd7673 --- /dev/null +++ b/include/asm-i386/voyager.h | |||
@@ -0,0 +1,521 @@ | |||
1 | /* Copyright (C) 1999,2001 | ||
2 | * | ||
3 | * Author: J.E.J.Bottomley@HansenPartnership.com | ||
4 | * | ||
5 | * Standard include definitions for the NCR Voyager system */ | ||
6 | |||
7 | #undef VOYAGER_DEBUG | ||
8 | #undef VOYAGER_CAT_DEBUG | ||
9 | |||
10 | #ifdef VOYAGER_DEBUG | ||
11 | #define VDEBUG(x) printk x | ||
12 | #else | ||
13 | #define VDEBUG(x) | ||
14 | #endif | ||
15 | |||
16 | /* There are three levels of voyager machine: 3,4 and 5. The rule is | ||
17 | * if it's less than 3435 it's a Level 3 except for a 3360 which is | ||
18 | * a level 4. A 3435 or above is a Level 5 */ | ||
19 | #define VOYAGER_LEVEL5_AND_ABOVE 0x3435 | ||
20 | #define VOYAGER_LEVEL4 0x3360 | ||
21 | |||
22 | /* The L4 DINO ASIC */ | ||
23 | #define VOYAGER_DINO 0x43 | ||
24 | |||
25 | /* voyager ports in standard I/O space */ | ||
26 | #define VOYAGER_MC_SETUP 0x96 | ||
27 | |||
28 | |||
29 | #define VOYAGER_CAT_CONFIG_PORT 0x97 | ||
30 | # define VOYAGER_CAT_DESELECT 0xff | ||
31 | #define VOYAGER_SSPB_RELOCATION_PORT 0x98 | ||
32 | |||
33 | /* Valid CAT controller commands */ | ||
34 | /* start instruction register cycle */ | ||
35 | #define VOYAGER_CAT_IRCYC 0x01 | ||
36 | /* start data register cycle */ | ||
37 | #define VOYAGER_CAT_DRCYC 0x02 | ||
38 | /* move to execute state */ | ||
39 | #define VOYAGER_CAT_RUN 0x0F | ||
40 | /* end operation */ | ||
41 | #define VOYAGER_CAT_END 0x80 | ||
42 | /* hold in idle state */ | ||
43 | #define VOYAGER_CAT_HOLD 0x90 | ||
44 | /* single step an "intest" vector */ | ||
45 | #define VOYAGER_CAT_STEP 0xE0 | ||
46 | /* return cat controller to CLEMSON mode */ | ||
47 | #define VOYAGER_CAT_CLEMSON 0xFF | ||
48 | |||
49 | /* the default cat command header */ | ||
50 | #define VOYAGER_CAT_HEADER 0x7F | ||
51 | |||
52 | /* the range of possible CAT module ids in the system */ | ||
53 | #define VOYAGER_MIN_MODULE 0x10 | ||
54 | #define VOYAGER_MAX_MODULE 0x1f | ||
55 | |||
56 | /* The voyager registers per asic */ | ||
57 | #define VOYAGER_ASIC_ID_REG 0x00 | ||
58 | #define VOYAGER_ASIC_TYPE_REG 0x01 | ||
59 | /* the sub address registers can be made auto incrementing on reads */ | ||
60 | #define VOYAGER_AUTO_INC_REG 0x02 | ||
61 | # define VOYAGER_AUTO_INC 0x04 | ||
62 | # define VOYAGER_NO_AUTO_INC 0xfb | ||
63 | #define VOYAGER_SUBADDRDATA 0x03 | ||
64 | #define VOYAGER_SCANPATH 0x05 | ||
65 | # define VOYAGER_CONNECT_ASIC 0x01 | ||
66 | # define VOYAGER_DISCONNECT_ASIC 0xfe | ||
67 | #define VOYAGER_SUBADDRLO 0x06 | ||
68 | #define VOYAGER_SUBADDRHI 0x07 | ||
69 | #define VOYAGER_SUBMODSELECT 0x08 | ||
70 | #define VOYAGER_SUBMODPRESENT 0x09 | ||
71 | |||
72 | #define VOYAGER_SUBADDR_LO 0xff | ||
73 | #define VOYAGER_SUBADDR_HI 0xffff | ||
74 | |||
75 | /* the maximum size of a scan path -- used to form instructions */ | ||
76 | #define VOYAGER_MAX_SCAN_PATH 0x100 | ||
77 | /* the biggest possible register size (in bytes) */ | ||
78 | #define VOYAGER_MAX_REG_SIZE 4 | ||
79 | |||
80 | /* Total number of possible modules (including submodules) */ | ||
81 | #define VOYAGER_MAX_MODULES 16 | ||
82 | /* Largest number of asics per module */ | ||
83 | #define VOYAGER_MAX_ASICS_PER_MODULE 7 | ||
84 | |||
85 | /* the CAT asic of each module is always the first one */ | ||
86 | #define VOYAGER_CAT_ID 0 | ||
87 | #define VOYAGER_PSI 0x1a | ||
88 | |||
89 | /* voyager instruction operations and registers */ | ||
90 | #define VOYAGER_READ_CONFIG 0x1 | ||
91 | #define VOYAGER_WRITE_CONFIG 0x2 | ||
92 | #define VOYAGER_BYPASS 0xff | ||
93 | |||
94 | typedef struct voyager_asic | ||
95 | { | ||
96 | __u8 asic_addr; /* ASIC address; Level 4 */ | ||
97 | __u8 asic_type; /* ASIC type */ | ||
98 | __u8 asic_id; /* ASIC id */ | ||
99 | __u8 jtag_id[4]; /* JTAG id */ | ||
100 | __u8 asic_location; /* Location within scan path; start w/ 0 */ | ||
101 | __u8 bit_location; /* Location within bit stream; start w/ 0 */ | ||
102 | __u8 ireg_length; /* Instruction register length */ | ||
103 | __u16 subaddr; /* Amount of sub address space */ | ||
104 | struct voyager_asic *next; /* Next asic in linked list */ | ||
105 | } voyager_asic_t; | ||
106 | |||
107 | typedef struct voyager_module { | ||
108 | __u8 module_addr; /* Module address */ | ||
109 | __u8 scan_path_connected; /* Scan path connected */ | ||
110 | __u16 ee_size; /* Size of the EEPROM */ | ||
111 | __u16 num_asics; /* Number of Asics */ | ||
112 | __u16 inst_bits; /* Instruction bits in the scan path */ | ||
113 | __u16 largest_reg; /* Largest register in the scan path */ | ||
114 | __u16 smallest_reg; /* Smallest register in the scan path */ | ||
115 | voyager_asic_t *asic; /* First ASIC in scan path (CAT_I) */ | ||
116 | struct voyager_module *submodule; /* Submodule pointer */ | ||
117 | struct voyager_module *next; /* Next module in linked list */ | ||
118 | } voyager_module_t; | ||
119 | |||
120 | typedef struct voyager_eeprom_hdr { | ||
121 | __u8 module_id[4] __attribute__((packed)); | ||
122 | __u8 version_id __attribute__((packed)); | ||
123 | __u8 config_id __attribute__((packed)); | ||
124 | __u16 boundry_id __attribute__((packed)); /* boundary scan id */ | ||
125 | __u16 ee_size __attribute__((packed)); /* size of EEPROM */ | ||
126 | __u8 assembly[11] __attribute__((packed)); /* assembly # */ | ||
127 | __u8 assembly_rev __attribute__((packed)); /* assembly rev */ | ||
128 | __u8 tracer[4] __attribute__((packed)); /* tracer number */ | ||
129 | __u16 assembly_cksum __attribute__((packed)); /* asm checksum */ | ||
130 | __u16 power_consump __attribute__((packed)); /* pwr requirements */ | ||
131 | __u16 num_asics __attribute__((packed)); /* number of asics */ | ||
132 | __u16 bist_time __attribute__((packed)); /* min. bist time */ | ||
133 | __u16 err_log_offset __attribute__((packed)); /* error log offset */ | ||
134 | __u16 scan_path_offset __attribute__((packed));/* scan path offset */ | ||
135 | __u16 cct_offset __attribute__((packed)); | ||
136 | __u16 log_length __attribute__((packed)); /* length of err log */ | ||
137 | __u16 xsum_end __attribute__((packed)); /* offset to end of | ||
138 | checksum */ | ||
139 | __u8 reserved[4] __attribute__((packed)); | ||
140 | __u8 sflag __attribute__((packed)); /* starting sentinal */ | ||
141 | __u8 part_number[13] __attribute__((packed)); /* prom part number */ | ||
142 | __u8 version[10] __attribute__((packed)); /* version number */ | ||
143 | __u8 signature[8] __attribute__((packed)); | ||
144 | __u16 eeprom_chksum __attribute__((packed)); | ||
145 | __u32 data_stamp_offset __attribute__((packed)); | ||
146 | __u8 eflag __attribute__((packed)); /* ending sentinal */ | ||
147 | } voyager_eprom_hdr_t; | ||
148 | |||
149 | |||
150 | |||
151 | #define VOYAGER_EPROM_SIZE_OFFSET ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size))) | ||
152 | #define VOYAGER_XSUM_END_OFFSET 0x2a | ||
153 | |||
154 | /* the following three definitions are for internal table layouts | ||
155 | * in the module EPROMs. We really only care about the IDs and | ||
156 | * offsets */ | ||
157 | typedef struct voyager_sp_table { | ||
158 | __u8 asic_id __attribute__((packed)); | ||
159 | __u8 bypass_flag __attribute__((packed)); | ||
160 | __u16 asic_data_offset __attribute__((packed)); | ||
161 | __u16 config_data_offset __attribute__((packed)); | ||
162 | } voyager_sp_table_t; | ||
163 | |||
164 | typedef struct voyager_jtag_table { | ||
165 | __u8 icode[4] __attribute__((packed)); | ||
166 | __u8 runbist[4] __attribute__((packed)); | ||
167 | __u8 intest[4] __attribute__((packed)); | ||
168 | __u8 samp_preld[4] __attribute__((packed)); | ||
169 | __u8 ireg_len __attribute__((packed)); | ||
170 | } voyager_jtt_t; | ||
171 | |||
172 | typedef struct voyager_asic_data_table { | ||
173 | __u8 jtag_id[4] __attribute__((packed)); | ||
174 | __u16 length_bsr __attribute__((packed)); | ||
175 | __u16 length_bist_reg __attribute__((packed)); | ||
176 | __u32 bist_clk __attribute__((packed)); | ||
177 | __u16 subaddr_bits __attribute__((packed)); | ||
178 | __u16 seed_bits __attribute__((packed)); | ||
179 | __u16 sig_bits __attribute__((packed)); | ||
180 | __u16 jtag_offset __attribute__((packed)); | ||
181 | } voyager_at_t; | ||
182 | |||
183 | /* Voyager Interrupt Controller (VIC) registers */ | ||
184 | |||
185 | /* Base to add to Cross Processor Interrupts (CPIs) when triggering | ||
186 | * the CPU IRQ line */ | ||
187 | /* register defines for the WCBICs (one per processor) */ | ||
188 | #define VOYAGER_WCBIC0 0x41 /* bus A node P1 processor 0 */ | ||
189 | #define VOYAGER_WCBIC1 0x49 /* bus A node P1 processor 1 */ | ||
190 | #define VOYAGER_WCBIC2 0x51 /* bus A node P2 processor 0 */ | ||
191 | #define VOYAGER_WCBIC3 0x59 /* bus A node P2 processor 1 */ | ||
192 | #define VOYAGER_WCBIC4 0x61 /* bus B node P1 processor 0 */ | ||
193 | #define VOYAGER_WCBIC5 0x69 /* bus B node P1 processor 1 */ | ||
194 | #define VOYAGER_WCBIC6 0x71 /* bus B node P2 processor 0 */ | ||
195 | #define VOYAGER_WCBIC7 0x79 /* bus B node P2 processor 1 */ | ||
196 | |||
197 | |||
198 | /* top of memory registers */ | ||
199 | #define VOYAGER_WCBIC_TOM_L 0x4 | ||
200 | #define VOYAGER_WCBIC_TOM_H 0x5 | ||
201 | |||
202 | /* register defines for Voyager Memory Contol (VMC) | ||
203 | * these are present on L4 machines only */ | ||
204 | #define VOYAGER_VMC1 0x81 | ||
205 | #define VOYAGER_VMC2 0x91 | ||
206 | #define VOYAGER_VMC3 0xa1 | ||
207 | #define VOYAGER_VMC4 0xb1 | ||
208 | |||
209 | /* VMC Ports */ | ||
210 | #define VOYAGER_VMC_MEMORY_SETUP 0x9 | ||
211 | # define VMC_Interleaving 0x01 | ||
212 | # define VMC_4Way 0x02 | ||
213 | # define VMC_EvenCacheLines 0x04 | ||
214 | # define VMC_HighLine 0x08 | ||
215 | # define VMC_Start0_Enable 0x20 | ||
216 | # define VMC_Start1_Enable 0x40 | ||
217 | # define VMC_Vremap 0x80 | ||
218 | #define VOYAGER_VMC_BANK_DENSITY 0xa | ||
219 | # define VMC_BANK_EMPTY 0 | ||
220 | # define VMC_BANK_4MB 1 | ||
221 | # define VMC_BANK_16MB 2 | ||
222 | # define VMC_BANK_64MB 3 | ||
223 | # define VMC_BANK0_MASK 0x03 | ||
224 | # define VMC_BANK1_MASK 0x0C | ||
225 | # define VMC_BANK2_MASK 0x30 | ||
226 | # define VMC_BANK3_MASK 0xC0 | ||
227 | |||
228 | /* Magellan Memory Controller (MMC) defines - present on L5 */ | ||
229 | #define VOYAGER_MMC_ASIC_ID 1 | ||
230 | /* the two memory modules corresponding to memory cards in the system */ | ||
231 | #define VOYAGER_MMC_MEMORY0_MODULE 0x14 | ||
232 | #define VOYAGER_MMC_MEMORY1_MODULE 0x15 | ||
233 | /* the Magellan Memory Address (MMA) defines */ | ||
234 | #define VOYAGER_MMA_ASIC_ID 2 | ||
235 | |||
236 | /* Submodule number for the Quad Baseboard */ | ||
237 | #define VOYAGER_QUAD_BASEBOARD 1 | ||
238 | |||
239 | /* ASIC defines for the Quad Baseboard */ | ||
240 | #define VOYAGER_QUAD_QDATA0 1 | ||
241 | #define VOYAGER_QUAD_QDATA1 2 | ||
242 | #define VOYAGER_QUAD_QABC 3 | ||
243 | |||
244 | /* Useful areas in extended CMOS */ | ||
245 | #define VOYAGER_PROCESSOR_PRESENT_MASK 0x88a | ||
246 | #define VOYAGER_MEMORY_CLICKMAP 0xa23 | ||
247 | #define VOYAGER_DUMP_LOCATION 0xb1a | ||
248 | |||
249 | /* SUS In Control bit - used to tell SUS that we don't need to be | ||
250 | * babysat anymore */ | ||
251 | #define VOYAGER_SUS_IN_CONTROL_PORT 0x3ff | ||
252 | # define VOYAGER_IN_CONTROL_FLAG 0x80 | ||
253 | |||
254 | /* Voyager PSI defines */ | ||
255 | #define VOYAGER_PSI_STATUS_REG 0x08 | ||
256 | # define PSI_DC_FAIL 0x01 | ||
257 | # define PSI_MON 0x02 | ||
258 | # define PSI_FAULT 0x04 | ||
259 | # define PSI_ALARM 0x08 | ||
260 | # define PSI_CURRENT 0x10 | ||
261 | # define PSI_DVM 0x20 | ||
262 | # define PSI_PSCFAULT 0x40 | ||
263 | # define PSI_STAT_CHG 0x80 | ||
264 | |||
265 | #define VOYAGER_PSI_SUPPLY_REG 0x8000 | ||
266 | /* read */ | ||
267 | # define PSI_FAIL_DC 0x01 | ||
268 | # define PSI_FAIL_AC 0x02 | ||
269 | # define PSI_MON_INT 0x04 | ||
270 | # define PSI_SWITCH_OFF 0x08 | ||
271 | # define PSI_HX_OFF 0x10 | ||
272 | # define PSI_SECURITY 0x20 | ||
273 | # define PSI_CMOS_BATT_LOW 0x40 | ||
274 | # define PSI_CMOS_BATT_FAIL 0x80 | ||
275 | /* write */ | ||
276 | # define PSI_CLR_SWITCH_OFF 0x13 | ||
277 | # define PSI_CLR_HX_OFF 0x14 | ||
278 | # define PSI_CLR_CMOS_BATT_FAIL 0x17 | ||
279 | |||
280 | #define VOYAGER_PSI_MASK 0x8001 | ||
281 | # define PSI_MASK_MASK 0x10 | ||
282 | |||
283 | #define VOYAGER_PSI_AC_FAIL_REG 0x8004 | ||
284 | #define AC_FAIL_STAT_CHANGE 0x80 | ||
285 | |||
286 | #define VOYAGER_PSI_GENERAL_REG 0x8007 | ||
287 | /* read */ | ||
288 | # define PSI_SWITCH_ON 0x01 | ||
289 | # define PSI_SWITCH_ENABLED 0x02 | ||
290 | # define PSI_ALARM_ENABLED 0x08 | ||
291 | # define PSI_SECURE_ENABLED 0x10 | ||
292 | # define PSI_COLD_RESET 0x20 | ||
293 | # define PSI_COLD_START 0x80 | ||
294 | /* write */ | ||
295 | # define PSI_POWER_DOWN 0x10 | ||
296 | # define PSI_SWITCH_DISABLE 0x01 | ||
297 | # define PSI_SWITCH_ENABLE 0x11 | ||
298 | # define PSI_CLEAR 0x12 | ||
299 | # define PSI_ALARM_DISABLE 0x03 | ||
300 | # define PSI_ALARM_ENABLE 0x13 | ||
301 | # define PSI_CLEAR_COLD_RESET 0x05 | ||
302 | # define PSI_SET_COLD_RESET 0x15 | ||
303 | # define PSI_CLEAR_COLD_START 0x07 | ||
304 | # define PSI_SET_COLD_START 0x17 | ||
305 | |||
306 | |||
307 | |||
308 | struct voyager_bios_info { | ||
309 | __u8 len; | ||
310 | __u8 major; | ||
311 | __u8 minor; | ||
312 | __u8 debug; | ||
313 | __u8 num_classes; | ||
314 | __u8 class_1; | ||
315 | __u8 class_2; | ||
316 | }; | ||
317 | |||
318 | /* The following structures and definitions are for the Kernel/SUS | ||
319 | * interface these are needed to find out how SUS initialised any Quad | ||
320 | * boards in the system */ | ||
321 | |||
322 | #define NUMBER_OF_MC_BUSSES 2 | ||
323 | #define SLOTS_PER_MC_BUS 8 | ||
324 | #define MAX_CPUS 16 /* 16 way CPU system */ | ||
325 | #define MAX_PROCESSOR_BOARDS 4 /* 4 processor slot system */ | ||
326 | #define MAX_CACHE_LEVELS 4 /* # of cache levels supported */ | ||
327 | #define MAX_SHARED_CPUS 4 /* # of CPUs that can share a LARC */ | ||
328 | #define NUMBER_OF_POS_REGS 8 | ||
329 | |||
330 | typedef struct { | ||
331 | __u8 MC_Slot __attribute__((packed)); | ||
332 | __u8 POS_Values[NUMBER_OF_POS_REGS] __attribute__((packed)); | ||
333 | } MC_SlotInformation_t; | ||
334 | |||
335 | struct QuadDescription { | ||
336 | __u8 Type __attribute__((packed)); /* for type 0 (DYADIC or MONADIC) all fields | ||
337 | * will be zero except for slot */ | ||
338 | __u8 StructureVersion __attribute__((packed)); | ||
339 | __u32 CPI_BaseAddress __attribute__((packed)); | ||
340 | __u32 LARC_BankSize __attribute__((packed)); | ||
341 | __u32 LocalMemoryStateBits __attribute__((packed)); | ||
342 | __u8 Slot __attribute__((packed)); /* Processor slots 1 - 4 */ | ||
343 | }; | ||
344 | |||
345 | struct ProcBoardInfo { | ||
346 | __u8 Type __attribute__((packed)); | ||
347 | __u8 StructureVersion __attribute__((packed)); | ||
348 | __u8 NumberOfBoards __attribute__((packed)); | ||
349 | struct QuadDescription QuadData[MAX_PROCESSOR_BOARDS] __attribute__((packed)); | ||
350 | }; | ||
351 | |||
352 | struct CacheDescription { | ||
353 | __u8 Level __attribute__((packed)); | ||
354 | __u32 TotalSize __attribute__((packed)); | ||
355 | __u16 LineSize __attribute__((packed)); | ||
356 | __u8 Associativity __attribute__((packed)); | ||
357 | __u8 CacheType __attribute__((packed)); | ||
358 | __u8 WriteType __attribute__((packed)); | ||
359 | __u8 Number_CPUs_SharedBy __attribute__((packed)); | ||
360 | __u8 Shared_CPUs_Hardware_IDs[MAX_SHARED_CPUS] __attribute__((packed)); | ||
361 | |||
362 | }; | ||
363 | |||
364 | struct CPU_Description { | ||
365 | __u8 CPU_HardwareId __attribute__((packed)); | ||
366 | char *FRU_String __attribute__((packed)); | ||
367 | __u8 NumberOfCacheLevels __attribute__((packed)); | ||
368 | struct CacheDescription CacheLevelData[MAX_CACHE_LEVELS] __attribute__((packed)); | ||
369 | }; | ||
370 | |||
371 | struct CPU_Info { | ||
372 | __u8 Type __attribute__((packed)); | ||
373 | __u8 StructureVersion __attribute__((packed)); | ||
374 | __u8 NumberOf_CPUs __attribute__((packed)); | ||
375 | struct CPU_Description CPU_Data[MAX_CPUS] __attribute__((packed)); | ||
376 | }; | ||
377 | |||
378 | |||
379 | /* | ||
380 | * This structure will be used by SUS and the OS. | ||
381 | * The assumption about this structure is that no blank space is | ||
382 | * packed in it by our friend the compiler. | ||
383 | */ | ||
384 | typedef struct { | ||
385 | __u8 Mailbox_SUS; /* Written to by SUS to give commands/response to the OS */ | ||
386 | __u8 Mailbox_OS; /* Written to by the OS to give commands/response to SUS */ | ||
387 | __u8 SUS_MailboxVersion; /* Tells the OS which iteration of the interface SUS supports */ | ||
388 | __u8 OS_MailboxVersion; /* Tells SUS which iteration of the interface the OS supports */ | ||
389 | __u32 OS_Flags; /* Flags set by the OS as info for SUS */ | ||
390 | __u32 SUS_Flags; /* Flags set by SUS as info for the OS */ | ||
391 | __u32 WatchDogPeriod; /* Watchdog period (in seconds) which the DP uses to see if the OS is dead */ | ||
392 | __u32 WatchDogCount; /* Updated by the OS on every tic. */ | ||
393 | __u32 MemoryFor_SUS_ErrorLog; /* Flat 32 bit address which tells SUS where to stuff the SUS error log on a dump */ | ||
394 | MC_SlotInformation_t MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS]; /* Storage for MCA POS data */ | ||
395 | /* All new SECOND_PASS_INTERFACE fields added from this point */ | ||
396 | struct ProcBoardInfo *BoardData; | ||
397 | struct CPU_Info *CPU_Data; | ||
398 | /* All new fields must be added from this point */ | ||
399 | } Voyager_KernelSUS_Mbox_t; | ||
400 | |||
401 | /* structure for finding the right memory address to send a QIC CPI to */ | ||
402 | struct voyager_qic_cpi { | ||
403 | /* Each cache line (32 bytes) can trigger a cpi. The cpi | ||
404 | * read/write may occur anywhere in the cache line---pick the | ||
405 | * middle to be safe */ | ||
406 | struct { | ||
407 | __u32 pad1[3]; | ||
408 | __u32 cpi; | ||
409 | __u32 pad2[4]; | ||
410 | } qic_cpi[8]; | ||
411 | }; | ||
412 | |||
413 | struct voyager_status { | ||
414 | __u32 power_fail:1; | ||
415 | __u32 switch_off:1; | ||
416 | __u32 request_from_kernel:1; | ||
417 | }; | ||
418 | |||
419 | struct voyager_psi_regs { | ||
420 | __u8 cat_id; | ||
421 | __u8 cat_dev; | ||
422 | __u8 cat_control; | ||
423 | __u8 subaddr; | ||
424 | __u8 dummy4; | ||
425 | __u8 checkbit; | ||
426 | __u8 subaddr_low; | ||
427 | __u8 subaddr_high; | ||
428 | __u8 intstatus; | ||
429 | __u8 stat1; | ||
430 | __u8 stat3; | ||
431 | __u8 fault; | ||
432 | __u8 tms; | ||
433 | __u8 gen; | ||
434 | __u8 sysconf; | ||
435 | __u8 dummy15; | ||
436 | }; | ||
437 | |||
438 | struct voyager_psi_subregs { | ||
439 | __u8 supply; | ||
440 | __u8 mask; | ||
441 | __u8 present; | ||
442 | __u8 DCfail; | ||
443 | __u8 ACfail; | ||
444 | __u8 fail; | ||
445 | __u8 UPSfail; | ||
446 | __u8 genstatus; | ||
447 | }; | ||
448 | |||
449 | struct voyager_psi { | ||
450 | struct voyager_psi_regs regs; | ||
451 | struct voyager_psi_subregs subregs; | ||
452 | }; | ||
453 | |||
454 | struct voyager_SUS { | ||
455 | #define VOYAGER_DUMP_BUTTON_NMI 0x1 | ||
456 | #define VOYAGER_SUS_VALID 0x2 | ||
457 | #define VOYAGER_SYSINT_COMPLETE 0x3 | ||
458 | __u8 SUS_mbox; | ||
459 | #define VOYAGER_NO_COMMAND 0x0 | ||
460 | #define VOYAGER_IGNORE_DUMP 0x1 | ||
461 | #define VOYAGER_DO_DUMP 0x2 | ||
462 | #define VOYAGER_SYSINT_HANDSHAKE 0x3 | ||
463 | #define VOYAGER_DO_MEM_DUMP 0x4 | ||
464 | #define VOYAGER_SYSINT_WAS_RECOVERED 0x5 | ||
465 | __u8 kernel_mbox; | ||
466 | #define VOYAGER_MAILBOX_VERSION 0x10 | ||
467 | __u8 SUS_version; | ||
468 | __u8 kernel_version; | ||
469 | #define VOYAGER_OS_HAS_SYSINT 0x1 | ||
470 | #define VOYAGER_OS_IN_PROGRESS 0x2 | ||
471 | #define VOYAGER_UPDATING_WDPERIOD 0x4 | ||
472 | __u32 kernel_flags; | ||
473 | #define VOYAGER_SUS_BOOTING 0x1 | ||
474 | #define VOYAGER_SUS_IN_PROGRESS 0x2 | ||
475 | __u32 SUS_flags; | ||
476 | __u32 watchdog_period; | ||
477 | __u32 watchdog_count; | ||
478 | __u32 SUS_errorlog; | ||
479 | /* lots of system configuration stuff under here */ | ||
480 | }; | ||
481 | |||
482 | /* Variables exported by voyager_smp */ | ||
483 | extern __u32 voyager_extended_vic_processors; | ||
484 | extern __u32 voyager_allowed_boot_processors; | ||
485 | extern __u32 voyager_quad_processors; | ||
486 | extern struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS]; | ||
487 | extern struct voyager_SUS *voyager_SUS; | ||
488 | |||
489 | /* variables exported always */ | ||
490 | extern int voyager_level; | ||
491 | extern int kvoyagerd_running; | ||
492 | extern struct semaphore kvoyagerd_sem; | ||
493 | extern struct voyager_status voyager_status; | ||
494 | |||
495 | |||
496 | |||
497 | /* functions exported by the voyager and voyager_smp modules */ | ||
498 | |||
499 | extern int voyager_cat_readb(__u8 module, __u8 asic, int reg); | ||
500 | extern void voyager_cat_init(void); | ||
501 | extern void voyager_detect(struct voyager_bios_info *); | ||
502 | extern void voyager_trap_init(void); | ||
503 | extern void voyager_setup_irqs(void); | ||
504 | extern int voyager_memory_detect(int region, __u32 *addr, __u32 *length); | ||
505 | extern void voyager_smp_intr_init(void); | ||
506 | extern __u8 voyager_extended_cmos_read(__u16 cmos_address); | ||
507 | extern void voyager_smp_dump(void); | ||
508 | extern void voyager_timer_interrupt(struct pt_regs *regs); | ||
509 | extern void smp_local_timer_interrupt(struct pt_regs * regs); | ||
510 | extern void voyager_power_off(void); | ||
511 | extern void smp_voyager_power_off(void *dummy); | ||
512 | extern void voyager_restart(void); | ||
513 | extern void voyager_cat_power_off(void); | ||
514 | extern void voyager_cat_do_common_interrupt(void); | ||
515 | extern void voyager_handle_nmi(void); | ||
516 | /* Commands for the following are */ | ||
517 | #define VOYAGER_PSI_READ 0 | ||
518 | #define VOYAGER_PSI_WRITE 1 | ||
519 | #define VOYAGER_PSI_SUBREAD 2 | ||
520 | #define VOYAGER_PSI_SUBWRITE 3 | ||
521 | extern void voyager_cat_psi(__u8, __u16, __u8 *); | ||
diff --git a/include/asm-i386/xor.h b/include/asm-i386/xor.h new file mode 100644 index 000000000000..f80e2dbe1b56 --- /dev/null +++ b/include/asm-i386/xor.h | |||
@@ -0,0 +1,883 @@ | |||
1 | /* | ||
2 | * include/asm-i386/xor.h | ||
3 | * | ||
4 | * Optimized RAID-5 checksumming functions for MMX and SSE. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2, or (at your option) | ||
9 | * any later version. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * (for example /usr/src/linux/COPYING); if not, write to the Free | ||
13 | * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
14 | */ | ||
15 | |||
16 | /* | ||
17 | * High-speed RAID5 checksumming functions utilizing MMX instructions. | ||
18 | * Copyright (C) 1998 Ingo Molnar. | ||
19 | */ | ||
20 | |||
21 | #define LD(x,y) " movq 8*("#x")(%1), %%mm"#y" ;\n" | ||
22 | #define ST(x,y) " movq %%mm"#y", 8*("#x")(%1) ;\n" | ||
23 | #define XO1(x,y) " pxor 8*("#x")(%2), %%mm"#y" ;\n" | ||
24 | #define XO2(x,y) " pxor 8*("#x")(%3), %%mm"#y" ;\n" | ||
25 | #define XO3(x,y) " pxor 8*("#x")(%4), %%mm"#y" ;\n" | ||
26 | #define XO4(x,y) " pxor 8*("#x")(%5), %%mm"#y" ;\n" | ||
27 | |||
28 | #include <asm/i387.h> | ||
29 | |||
30 | static void | ||
31 | xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | ||
32 | { | ||
33 | unsigned long lines = bytes >> 7; | ||
34 | |||
35 | kernel_fpu_begin(); | ||
36 | |||
37 | __asm__ __volatile__ ( | ||
38 | #undef BLOCK | ||
39 | #define BLOCK(i) \ | ||
40 | LD(i,0) \ | ||
41 | LD(i+1,1) \ | ||
42 | LD(i+2,2) \ | ||
43 | LD(i+3,3) \ | ||
44 | XO1(i,0) \ | ||
45 | ST(i,0) \ | ||
46 | XO1(i+1,1) \ | ||
47 | ST(i+1,1) \ | ||
48 | XO1(i+2,2) \ | ||
49 | ST(i+2,2) \ | ||
50 | XO1(i+3,3) \ | ||
51 | ST(i+3,3) | ||
52 | |||
53 | " .align 32 ;\n" | ||
54 | " 1: ;\n" | ||
55 | |||
56 | BLOCK(0) | ||
57 | BLOCK(4) | ||
58 | BLOCK(8) | ||
59 | BLOCK(12) | ||
60 | |||
61 | " addl $128, %1 ;\n" | ||
62 | " addl $128, %2 ;\n" | ||
63 | " decl %0 ;\n" | ||
64 | " jnz 1b ;\n" | ||
65 | : "+r" (lines), | ||
66 | "+r" (p1), "+r" (p2) | ||
67 | : | ||
68 | : "memory"); | ||
69 | |||
70 | kernel_fpu_end(); | ||
71 | } | ||
72 | |||
73 | static void | ||
74 | xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
75 | unsigned long *p3) | ||
76 | { | ||
77 | unsigned long lines = bytes >> 7; | ||
78 | |||
79 | kernel_fpu_begin(); | ||
80 | |||
81 | __asm__ __volatile__ ( | ||
82 | #undef BLOCK | ||
83 | #define BLOCK(i) \ | ||
84 | LD(i,0) \ | ||
85 | LD(i+1,1) \ | ||
86 | LD(i+2,2) \ | ||
87 | LD(i+3,3) \ | ||
88 | XO1(i,0) \ | ||
89 | XO1(i+1,1) \ | ||
90 | XO1(i+2,2) \ | ||
91 | XO1(i+3,3) \ | ||
92 | XO2(i,0) \ | ||
93 | ST(i,0) \ | ||
94 | XO2(i+1,1) \ | ||
95 | ST(i+1,1) \ | ||
96 | XO2(i+2,2) \ | ||
97 | ST(i+2,2) \ | ||
98 | XO2(i+3,3) \ | ||
99 | ST(i+3,3) | ||
100 | |||
101 | " .align 32 ;\n" | ||
102 | " 1: ;\n" | ||
103 | |||
104 | BLOCK(0) | ||
105 | BLOCK(4) | ||
106 | BLOCK(8) | ||
107 | BLOCK(12) | ||
108 | |||
109 | " addl $128, %1 ;\n" | ||
110 | " addl $128, %2 ;\n" | ||
111 | " addl $128, %3 ;\n" | ||
112 | " decl %0 ;\n" | ||
113 | " jnz 1b ;\n" | ||
114 | : "+r" (lines), | ||
115 | "+r" (p1), "+r" (p2), "+r" (p3) | ||
116 | : | ||
117 | : "memory"); | ||
118 | |||
119 | kernel_fpu_end(); | ||
120 | } | ||
121 | |||
122 | static void | ||
123 | xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
124 | unsigned long *p3, unsigned long *p4) | ||
125 | { | ||
126 | unsigned long lines = bytes >> 7; | ||
127 | |||
128 | kernel_fpu_begin(); | ||
129 | |||
130 | __asm__ __volatile__ ( | ||
131 | #undef BLOCK | ||
132 | #define BLOCK(i) \ | ||
133 | LD(i,0) \ | ||
134 | LD(i+1,1) \ | ||
135 | LD(i+2,2) \ | ||
136 | LD(i+3,3) \ | ||
137 | XO1(i,0) \ | ||
138 | XO1(i+1,1) \ | ||
139 | XO1(i+2,2) \ | ||
140 | XO1(i+3,3) \ | ||
141 | XO2(i,0) \ | ||
142 | XO2(i+1,1) \ | ||
143 | XO2(i+2,2) \ | ||
144 | XO2(i+3,3) \ | ||
145 | XO3(i,0) \ | ||
146 | ST(i,0) \ | ||
147 | XO3(i+1,1) \ | ||
148 | ST(i+1,1) \ | ||
149 | XO3(i+2,2) \ | ||
150 | ST(i+2,2) \ | ||
151 | XO3(i+3,3) \ | ||
152 | ST(i+3,3) | ||
153 | |||
154 | " .align 32 ;\n" | ||
155 | " 1: ;\n" | ||
156 | |||
157 | BLOCK(0) | ||
158 | BLOCK(4) | ||
159 | BLOCK(8) | ||
160 | BLOCK(12) | ||
161 | |||
162 | " addl $128, %1 ;\n" | ||
163 | " addl $128, %2 ;\n" | ||
164 | " addl $128, %3 ;\n" | ||
165 | " addl $128, %4 ;\n" | ||
166 | " decl %0 ;\n" | ||
167 | " jnz 1b ;\n" | ||
168 | : "+r" (lines), | ||
169 | "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) | ||
170 | : | ||
171 | : "memory"); | ||
172 | |||
173 | kernel_fpu_end(); | ||
174 | } | ||
175 | |||
176 | |||
177 | static void | ||
178 | xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
179 | unsigned long *p3, unsigned long *p4, unsigned long *p5) | ||
180 | { | ||
181 | unsigned long lines = bytes >> 7; | ||
182 | |||
183 | kernel_fpu_begin(); | ||
184 | |||
185 | /* Make sure GCC forgets anything it knows about p4 or p5, | ||
186 | such that it won't pass to the asm volatile below a | ||
187 | register that is shared with any other variable. That's | ||
188 | because we modify p4 and p5 there, but we can't mark them | ||
189 | as read/write, otherwise we'd overflow the 10-asm-operands | ||
190 | limit of GCC < 3.1. */ | ||
191 | __asm__ ("" : "+r" (p4), "+r" (p5)); | ||
192 | |||
193 | __asm__ __volatile__ ( | ||
194 | #undef BLOCK | ||
195 | #define BLOCK(i) \ | ||
196 | LD(i,0) \ | ||
197 | LD(i+1,1) \ | ||
198 | LD(i+2,2) \ | ||
199 | LD(i+3,3) \ | ||
200 | XO1(i,0) \ | ||
201 | XO1(i+1,1) \ | ||
202 | XO1(i+2,2) \ | ||
203 | XO1(i+3,3) \ | ||
204 | XO2(i,0) \ | ||
205 | XO2(i+1,1) \ | ||
206 | XO2(i+2,2) \ | ||
207 | XO2(i+3,3) \ | ||
208 | XO3(i,0) \ | ||
209 | XO3(i+1,1) \ | ||
210 | XO3(i+2,2) \ | ||
211 | XO3(i+3,3) \ | ||
212 | XO4(i,0) \ | ||
213 | ST(i,0) \ | ||
214 | XO4(i+1,1) \ | ||
215 | ST(i+1,1) \ | ||
216 | XO4(i+2,2) \ | ||
217 | ST(i+2,2) \ | ||
218 | XO4(i+3,3) \ | ||
219 | ST(i+3,3) | ||
220 | |||
221 | " .align 32 ;\n" | ||
222 | " 1: ;\n" | ||
223 | |||
224 | BLOCK(0) | ||
225 | BLOCK(4) | ||
226 | BLOCK(8) | ||
227 | BLOCK(12) | ||
228 | |||
229 | " addl $128, %1 ;\n" | ||
230 | " addl $128, %2 ;\n" | ||
231 | " addl $128, %3 ;\n" | ||
232 | " addl $128, %4 ;\n" | ||
233 | " addl $128, %5 ;\n" | ||
234 | " decl %0 ;\n" | ||
235 | " jnz 1b ;\n" | ||
236 | : "+r" (lines), | ||
237 | "+r" (p1), "+r" (p2), "+r" (p3) | ||
238 | : "r" (p4), "r" (p5) | ||
239 | : "memory"); | ||
240 | |||
241 | /* p4 and p5 were modified, and now the variables are dead. | ||
242 | Clobber them just to be sure nobody does something stupid | ||
243 | like assuming they have some legal value. */ | ||
244 | __asm__ ("" : "=r" (p4), "=r" (p5)); | ||
245 | |||
246 | kernel_fpu_end(); | ||
247 | } | ||
248 | |||
249 | #undef LD | ||
250 | #undef XO1 | ||
251 | #undef XO2 | ||
252 | #undef XO3 | ||
253 | #undef XO4 | ||
254 | #undef ST | ||
255 | #undef BLOCK | ||
256 | |||
257 | static void | ||
258 | xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | ||
259 | { | ||
260 | unsigned long lines = bytes >> 6; | ||
261 | |||
262 | kernel_fpu_begin(); | ||
263 | |||
264 | __asm__ __volatile__ ( | ||
265 | " .align 32 ;\n" | ||
266 | " 1: ;\n" | ||
267 | " movq (%1), %%mm0 ;\n" | ||
268 | " movq 8(%1), %%mm1 ;\n" | ||
269 | " pxor (%2), %%mm0 ;\n" | ||
270 | " movq 16(%1), %%mm2 ;\n" | ||
271 | " movq %%mm0, (%1) ;\n" | ||
272 | " pxor 8(%2), %%mm1 ;\n" | ||
273 | " movq 24(%1), %%mm3 ;\n" | ||
274 | " movq %%mm1, 8(%1) ;\n" | ||
275 | " pxor 16(%2), %%mm2 ;\n" | ||
276 | " movq 32(%1), %%mm4 ;\n" | ||
277 | " movq %%mm2, 16(%1) ;\n" | ||
278 | " pxor 24(%2), %%mm3 ;\n" | ||
279 | " movq 40(%1), %%mm5 ;\n" | ||
280 | " movq %%mm3, 24(%1) ;\n" | ||
281 | " pxor 32(%2), %%mm4 ;\n" | ||
282 | " movq 48(%1), %%mm6 ;\n" | ||
283 | " movq %%mm4, 32(%1) ;\n" | ||
284 | " pxor 40(%2), %%mm5 ;\n" | ||
285 | " movq 56(%1), %%mm7 ;\n" | ||
286 | " movq %%mm5, 40(%1) ;\n" | ||
287 | " pxor 48(%2), %%mm6 ;\n" | ||
288 | " pxor 56(%2), %%mm7 ;\n" | ||
289 | " movq %%mm6, 48(%1) ;\n" | ||
290 | " movq %%mm7, 56(%1) ;\n" | ||
291 | |||
292 | " addl $64, %1 ;\n" | ||
293 | " addl $64, %2 ;\n" | ||
294 | " decl %0 ;\n" | ||
295 | " jnz 1b ;\n" | ||
296 | : "+r" (lines), | ||
297 | "+r" (p1), "+r" (p2) | ||
298 | : | ||
299 | : "memory"); | ||
300 | |||
301 | kernel_fpu_end(); | ||
302 | } | ||
303 | |||
304 | static void | ||
305 | xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
306 | unsigned long *p3) | ||
307 | { | ||
308 | unsigned long lines = bytes >> 6; | ||
309 | |||
310 | kernel_fpu_begin(); | ||
311 | |||
312 | __asm__ __volatile__ ( | ||
313 | " .align 32,0x90 ;\n" | ||
314 | " 1: ;\n" | ||
315 | " movq (%1), %%mm0 ;\n" | ||
316 | " movq 8(%1), %%mm1 ;\n" | ||
317 | " pxor (%2), %%mm0 ;\n" | ||
318 | " movq 16(%1), %%mm2 ;\n" | ||
319 | " pxor 8(%2), %%mm1 ;\n" | ||
320 | " pxor (%3), %%mm0 ;\n" | ||
321 | " pxor 16(%2), %%mm2 ;\n" | ||
322 | " movq %%mm0, (%1) ;\n" | ||
323 | " pxor 8(%3), %%mm1 ;\n" | ||
324 | " pxor 16(%3), %%mm2 ;\n" | ||
325 | " movq 24(%1), %%mm3 ;\n" | ||
326 | " movq %%mm1, 8(%1) ;\n" | ||
327 | " movq 32(%1), %%mm4 ;\n" | ||
328 | " movq 40(%1), %%mm5 ;\n" | ||
329 | " pxor 24(%2), %%mm3 ;\n" | ||
330 | " movq %%mm2, 16(%1) ;\n" | ||
331 | " pxor 32(%2), %%mm4 ;\n" | ||
332 | " pxor 24(%3), %%mm3 ;\n" | ||
333 | " pxor 40(%2), %%mm5 ;\n" | ||
334 | " movq %%mm3, 24(%1) ;\n" | ||
335 | " pxor 32(%3), %%mm4 ;\n" | ||
336 | " pxor 40(%3), %%mm5 ;\n" | ||
337 | " movq 48(%1), %%mm6 ;\n" | ||
338 | " movq %%mm4, 32(%1) ;\n" | ||
339 | " movq 56(%1), %%mm7 ;\n" | ||
340 | " pxor 48(%2), %%mm6 ;\n" | ||
341 | " movq %%mm5, 40(%1) ;\n" | ||
342 | " pxor 56(%2), %%mm7 ;\n" | ||
343 | " pxor 48(%3), %%mm6 ;\n" | ||
344 | " pxor 56(%3), %%mm7 ;\n" | ||
345 | " movq %%mm6, 48(%1) ;\n" | ||
346 | " movq %%mm7, 56(%1) ;\n" | ||
347 | |||
348 | " addl $64, %1 ;\n" | ||
349 | " addl $64, %2 ;\n" | ||
350 | " addl $64, %3 ;\n" | ||
351 | " decl %0 ;\n" | ||
352 | " jnz 1b ;\n" | ||
353 | : "+r" (lines), | ||
354 | "+r" (p1), "+r" (p2), "+r" (p3) | ||
355 | : | ||
356 | : "memory" ); | ||
357 | |||
358 | kernel_fpu_end(); | ||
359 | } | ||
360 | |||
361 | static void | ||
362 | xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
363 | unsigned long *p3, unsigned long *p4) | ||
364 | { | ||
365 | unsigned long lines = bytes >> 6; | ||
366 | |||
367 | kernel_fpu_begin(); | ||
368 | |||
369 | __asm__ __volatile__ ( | ||
370 | " .align 32,0x90 ;\n" | ||
371 | " 1: ;\n" | ||
372 | " movq (%1), %%mm0 ;\n" | ||
373 | " movq 8(%1), %%mm1 ;\n" | ||
374 | " pxor (%2), %%mm0 ;\n" | ||
375 | " movq 16(%1), %%mm2 ;\n" | ||
376 | " pxor 8(%2), %%mm1 ;\n" | ||
377 | " pxor (%3), %%mm0 ;\n" | ||
378 | " pxor 16(%2), %%mm2 ;\n" | ||
379 | " pxor 8(%3), %%mm1 ;\n" | ||
380 | " pxor (%4), %%mm0 ;\n" | ||
381 | " movq 24(%1), %%mm3 ;\n" | ||
382 | " pxor 16(%3), %%mm2 ;\n" | ||
383 | " pxor 8(%4), %%mm1 ;\n" | ||
384 | " movq %%mm0, (%1) ;\n" | ||
385 | " movq 32(%1), %%mm4 ;\n" | ||
386 | " pxor 24(%2), %%mm3 ;\n" | ||
387 | " pxor 16(%4), %%mm2 ;\n" | ||
388 | " movq %%mm1, 8(%1) ;\n" | ||
389 | " movq 40(%1), %%mm5 ;\n" | ||
390 | " pxor 32(%2), %%mm4 ;\n" | ||
391 | " pxor 24(%3), %%mm3 ;\n" | ||
392 | " movq %%mm2, 16(%1) ;\n" | ||
393 | " pxor 40(%2), %%mm5 ;\n" | ||
394 | " pxor 32(%3), %%mm4 ;\n" | ||
395 | " pxor 24(%4), %%mm3 ;\n" | ||
396 | " movq %%mm3, 24(%1) ;\n" | ||
397 | " movq 56(%1), %%mm7 ;\n" | ||
398 | " movq 48(%1), %%mm6 ;\n" | ||
399 | " pxor 40(%3), %%mm5 ;\n" | ||
400 | " pxor 32(%4), %%mm4 ;\n" | ||
401 | " pxor 48(%2), %%mm6 ;\n" | ||
402 | " movq %%mm4, 32(%1) ;\n" | ||
403 | " pxor 56(%2), %%mm7 ;\n" | ||
404 | " pxor 40(%4), %%mm5 ;\n" | ||
405 | " pxor 48(%3), %%mm6 ;\n" | ||
406 | " pxor 56(%3), %%mm7 ;\n" | ||
407 | " movq %%mm5, 40(%1) ;\n" | ||
408 | " pxor 48(%4), %%mm6 ;\n" | ||
409 | " pxor 56(%4), %%mm7 ;\n" | ||
410 | " movq %%mm6, 48(%1) ;\n" | ||
411 | " movq %%mm7, 56(%1) ;\n" | ||
412 | |||
413 | " addl $64, %1 ;\n" | ||
414 | " addl $64, %2 ;\n" | ||
415 | " addl $64, %3 ;\n" | ||
416 | " addl $64, %4 ;\n" | ||
417 | " decl %0 ;\n" | ||
418 | " jnz 1b ;\n" | ||
419 | : "+r" (lines), | ||
420 | "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) | ||
421 | : | ||
422 | : "memory"); | ||
423 | |||
424 | kernel_fpu_end(); | ||
425 | } | ||
426 | |||
427 | static void | ||
428 | xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
429 | unsigned long *p3, unsigned long *p4, unsigned long *p5) | ||
430 | { | ||
431 | unsigned long lines = bytes >> 6; | ||
432 | |||
433 | kernel_fpu_begin(); | ||
434 | |||
435 | /* Make sure GCC forgets anything it knows about p4 or p5, | ||
436 | such that it won't pass to the asm volatile below a | ||
437 | register that is shared with any other variable. That's | ||
438 | because we modify p4 and p5 there, but we can't mark them | ||
439 | as read/write, otherwise we'd overflow the 10-asm-operands | ||
440 | limit of GCC < 3.1. */ | ||
441 | __asm__ ("" : "+r" (p4), "+r" (p5)); | ||
442 | |||
443 | __asm__ __volatile__ ( | ||
444 | " .align 32,0x90 ;\n" | ||
445 | " 1: ;\n" | ||
446 | " movq (%1), %%mm0 ;\n" | ||
447 | " movq 8(%1), %%mm1 ;\n" | ||
448 | " pxor (%2), %%mm0 ;\n" | ||
449 | " pxor 8(%2), %%mm1 ;\n" | ||
450 | " movq 16(%1), %%mm2 ;\n" | ||
451 | " pxor (%3), %%mm0 ;\n" | ||
452 | " pxor 8(%3), %%mm1 ;\n" | ||
453 | " pxor 16(%2), %%mm2 ;\n" | ||
454 | " pxor (%4), %%mm0 ;\n" | ||
455 | " pxor 8(%4), %%mm1 ;\n" | ||
456 | " pxor 16(%3), %%mm2 ;\n" | ||
457 | " movq 24(%1), %%mm3 ;\n" | ||
458 | " pxor (%5), %%mm0 ;\n" | ||
459 | " pxor 8(%5), %%mm1 ;\n" | ||
460 | " movq %%mm0, (%1) ;\n" | ||
461 | " pxor 16(%4), %%mm2 ;\n" | ||
462 | " pxor 24(%2), %%mm3 ;\n" | ||
463 | " movq %%mm1, 8(%1) ;\n" | ||
464 | " pxor 16(%5), %%mm2 ;\n" | ||
465 | " pxor 24(%3), %%mm3 ;\n" | ||
466 | " movq 32(%1), %%mm4 ;\n" | ||
467 | " movq %%mm2, 16(%1) ;\n" | ||
468 | " pxor 24(%4), %%mm3 ;\n" | ||
469 | " pxor 32(%2), %%mm4 ;\n" | ||
470 | " movq 40(%1), %%mm5 ;\n" | ||
471 | " pxor 24(%5), %%mm3 ;\n" | ||
472 | " pxor 32(%3), %%mm4 ;\n" | ||
473 | " pxor 40(%2), %%mm5 ;\n" | ||
474 | " movq %%mm3, 24(%1) ;\n" | ||
475 | " pxor 32(%4), %%mm4 ;\n" | ||
476 | " pxor 40(%3), %%mm5 ;\n" | ||
477 | " movq 48(%1), %%mm6 ;\n" | ||
478 | " movq 56(%1), %%mm7 ;\n" | ||
479 | " pxor 32(%5), %%mm4 ;\n" | ||
480 | " pxor 40(%4), %%mm5 ;\n" | ||
481 | " pxor 48(%2), %%mm6 ;\n" | ||
482 | " pxor 56(%2), %%mm7 ;\n" | ||
483 | " movq %%mm4, 32(%1) ;\n" | ||
484 | " pxor 48(%3), %%mm6 ;\n" | ||
485 | " pxor 56(%3), %%mm7 ;\n" | ||
486 | " pxor 40(%5), %%mm5 ;\n" | ||
487 | " pxor 48(%4), %%mm6 ;\n" | ||
488 | " pxor 56(%4), %%mm7 ;\n" | ||
489 | " movq %%mm5, 40(%1) ;\n" | ||
490 | " pxor 48(%5), %%mm6 ;\n" | ||
491 | " pxor 56(%5), %%mm7 ;\n" | ||
492 | " movq %%mm6, 48(%1) ;\n" | ||
493 | " movq %%mm7, 56(%1) ;\n" | ||
494 | |||
495 | " addl $64, %1 ;\n" | ||
496 | " addl $64, %2 ;\n" | ||
497 | " addl $64, %3 ;\n" | ||
498 | " addl $64, %4 ;\n" | ||
499 | " addl $64, %5 ;\n" | ||
500 | " decl %0 ;\n" | ||
501 | " jnz 1b ;\n" | ||
502 | : "+r" (lines), | ||
503 | "+r" (p1), "+r" (p2), "+r" (p3) | ||
504 | : "r" (p4), "r" (p5) | ||
505 | : "memory"); | ||
506 | |||
507 | /* p4 and p5 were modified, and now the variables are dead. | ||
508 | Clobber them just to be sure nobody does something stupid | ||
509 | like assuming they have some legal value. */ | ||
510 | __asm__ ("" : "=r" (p4), "=r" (p5)); | ||
511 | |||
512 | kernel_fpu_end(); | ||
513 | } | ||
514 | |||
515 | static struct xor_block_template xor_block_pII_mmx = { | ||
516 | .name = "pII_mmx", | ||
517 | .do_2 = xor_pII_mmx_2, | ||
518 | .do_3 = xor_pII_mmx_3, | ||
519 | .do_4 = xor_pII_mmx_4, | ||
520 | .do_5 = xor_pII_mmx_5, | ||
521 | }; | ||
522 | |||
523 | static struct xor_block_template xor_block_p5_mmx = { | ||
524 | .name = "p5_mmx", | ||
525 | .do_2 = xor_p5_mmx_2, | ||
526 | .do_3 = xor_p5_mmx_3, | ||
527 | .do_4 = xor_p5_mmx_4, | ||
528 | .do_5 = xor_p5_mmx_5, | ||
529 | }; | ||
530 | |||
531 | /* | ||
532 | * Cache avoiding checksumming functions utilizing KNI instructions | ||
533 | * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) | ||
534 | */ | ||
535 | |||
536 | #define XMMS_SAVE do { \ | ||
537 | preempt_disable(); \ | ||
538 | __asm__ __volatile__ ( \ | ||
539 | "movl %%cr0,%0 ;\n\t" \ | ||
540 | "clts ;\n\t" \ | ||
541 | "movups %%xmm0,(%1) ;\n\t" \ | ||
542 | "movups %%xmm1,0x10(%1) ;\n\t" \ | ||
543 | "movups %%xmm2,0x20(%1) ;\n\t" \ | ||
544 | "movups %%xmm3,0x30(%1) ;\n\t" \ | ||
545 | : "=&r" (cr0) \ | ||
546 | : "r" (xmm_save) \ | ||
547 | : "memory"); \ | ||
548 | } while(0) | ||
549 | |||
550 | #define XMMS_RESTORE do { \ | ||
551 | __asm__ __volatile__ ( \ | ||
552 | "sfence ;\n\t" \ | ||
553 | "movups (%1),%%xmm0 ;\n\t" \ | ||
554 | "movups 0x10(%1),%%xmm1 ;\n\t" \ | ||
555 | "movups 0x20(%1),%%xmm2 ;\n\t" \ | ||
556 | "movups 0x30(%1),%%xmm3 ;\n\t" \ | ||
557 | "movl %0,%%cr0 ;\n\t" \ | ||
558 | : \ | ||
559 | : "r" (cr0), "r" (xmm_save) \ | ||
560 | : "memory"); \ | ||
561 | preempt_enable(); \ | ||
562 | } while(0) | ||
563 | |||
564 | #define ALIGN16 __attribute__((aligned(16))) | ||
565 | |||
566 | #define OFFS(x) "16*("#x")" | ||
567 | #define PF_OFFS(x) "256+16*("#x")" | ||
568 | #define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n" | ||
569 | #define LD(x,y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n" | ||
570 | #define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n" | ||
571 | #define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n" | ||
572 | #define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n" | ||
573 | #define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n" | ||
574 | #define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n" | ||
575 | #define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n" | ||
576 | #define XO1(x,y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n" | ||
577 | #define XO2(x,y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n" | ||
578 | #define XO3(x,y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n" | ||
579 | #define XO4(x,y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n" | ||
580 | #define XO5(x,y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n" | ||
581 | |||
582 | |||
583 | static void | ||
584 | xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | ||
585 | { | ||
586 | unsigned long lines = bytes >> 8; | ||
587 | char xmm_save[16*4] ALIGN16; | ||
588 | int cr0; | ||
589 | |||
590 | XMMS_SAVE; | ||
591 | |||
592 | __asm__ __volatile__ ( | ||
593 | #undef BLOCK | ||
594 | #define BLOCK(i) \ | ||
595 | LD(i,0) \ | ||
596 | LD(i+1,1) \ | ||
597 | PF1(i) \ | ||
598 | PF1(i+2) \ | ||
599 | LD(i+2,2) \ | ||
600 | LD(i+3,3) \ | ||
601 | PF0(i+4) \ | ||
602 | PF0(i+6) \ | ||
603 | XO1(i,0) \ | ||
604 | XO1(i+1,1) \ | ||
605 | XO1(i+2,2) \ | ||
606 | XO1(i+3,3) \ | ||
607 | ST(i,0) \ | ||
608 | ST(i+1,1) \ | ||
609 | ST(i+2,2) \ | ||
610 | ST(i+3,3) \ | ||
611 | |||
612 | |||
613 | PF0(0) | ||
614 | PF0(2) | ||
615 | |||
616 | " .align 32 ;\n" | ||
617 | " 1: ;\n" | ||
618 | |||
619 | BLOCK(0) | ||
620 | BLOCK(4) | ||
621 | BLOCK(8) | ||
622 | BLOCK(12) | ||
623 | |||
624 | " addl $256, %1 ;\n" | ||
625 | " addl $256, %2 ;\n" | ||
626 | " decl %0 ;\n" | ||
627 | " jnz 1b ;\n" | ||
628 | : "+r" (lines), | ||
629 | "+r" (p1), "+r" (p2) | ||
630 | : | ||
631 | : "memory"); | ||
632 | |||
633 | XMMS_RESTORE; | ||
634 | } | ||
635 | |||
636 | static void | ||
637 | xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
638 | unsigned long *p3) | ||
639 | { | ||
640 | unsigned long lines = bytes >> 8; | ||
641 | char xmm_save[16*4] ALIGN16; | ||
642 | int cr0; | ||
643 | |||
644 | XMMS_SAVE; | ||
645 | |||
646 | __asm__ __volatile__ ( | ||
647 | #undef BLOCK | ||
648 | #define BLOCK(i) \ | ||
649 | PF1(i) \ | ||
650 | PF1(i+2) \ | ||
651 | LD(i,0) \ | ||
652 | LD(i+1,1) \ | ||
653 | LD(i+2,2) \ | ||
654 | LD(i+3,3) \ | ||
655 | PF2(i) \ | ||
656 | PF2(i+2) \ | ||
657 | PF0(i+4) \ | ||
658 | PF0(i+6) \ | ||
659 | XO1(i,0) \ | ||
660 | XO1(i+1,1) \ | ||
661 | XO1(i+2,2) \ | ||
662 | XO1(i+3,3) \ | ||
663 | XO2(i,0) \ | ||
664 | XO2(i+1,1) \ | ||
665 | XO2(i+2,2) \ | ||
666 | XO2(i+3,3) \ | ||
667 | ST(i,0) \ | ||
668 | ST(i+1,1) \ | ||
669 | ST(i+2,2) \ | ||
670 | ST(i+3,3) \ | ||
671 | |||
672 | |||
673 | PF0(0) | ||
674 | PF0(2) | ||
675 | |||
676 | " .align 32 ;\n" | ||
677 | " 1: ;\n" | ||
678 | |||
679 | BLOCK(0) | ||
680 | BLOCK(4) | ||
681 | BLOCK(8) | ||
682 | BLOCK(12) | ||
683 | |||
684 | " addl $256, %1 ;\n" | ||
685 | " addl $256, %2 ;\n" | ||
686 | " addl $256, %3 ;\n" | ||
687 | " decl %0 ;\n" | ||
688 | " jnz 1b ;\n" | ||
689 | : "+r" (lines), | ||
690 | "+r" (p1), "+r"(p2), "+r"(p3) | ||
691 | : | ||
692 | : "memory" ); | ||
693 | |||
694 | XMMS_RESTORE; | ||
695 | } | ||
696 | |||
697 | static void | ||
698 | xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
699 | unsigned long *p3, unsigned long *p4) | ||
700 | { | ||
701 | unsigned long lines = bytes >> 8; | ||
702 | char xmm_save[16*4] ALIGN16; | ||
703 | int cr0; | ||
704 | |||
705 | XMMS_SAVE; | ||
706 | |||
707 | __asm__ __volatile__ ( | ||
708 | #undef BLOCK | ||
709 | #define BLOCK(i) \ | ||
710 | PF1(i) \ | ||
711 | PF1(i+2) \ | ||
712 | LD(i,0) \ | ||
713 | LD(i+1,1) \ | ||
714 | LD(i+2,2) \ | ||
715 | LD(i+3,3) \ | ||
716 | PF2(i) \ | ||
717 | PF2(i+2) \ | ||
718 | XO1(i,0) \ | ||
719 | XO1(i+1,1) \ | ||
720 | XO1(i+2,2) \ | ||
721 | XO1(i+3,3) \ | ||
722 | PF3(i) \ | ||
723 | PF3(i+2) \ | ||
724 | PF0(i+4) \ | ||
725 | PF0(i+6) \ | ||
726 | XO2(i,0) \ | ||
727 | XO2(i+1,1) \ | ||
728 | XO2(i+2,2) \ | ||
729 | XO2(i+3,3) \ | ||
730 | XO3(i,0) \ | ||
731 | XO3(i+1,1) \ | ||
732 | XO3(i+2,2) \ | ||
733 | XO3(i+3,3) \ | ||
734 | ST(i,0) \ | ||
735 | ST(i+1,1) \ | ||
736 | ST(i+2,2) \ | ||
737 | ST(i+3,3) \ | ||
738 | |||
739 | |||
740 | PF0(0) | ||
741 | PF0(2) | ||
742 | |||
743 | " .align 32 ;\n" | ||
744 | " 1: ;\n" | ||
745 | |||
746 | BLOCK(0) | ||
747 | BLOCK(4) | ||
748 | BLOCK(8) | ||
749 | BLOCK(12) | ||
750 | |||
751 | " addl $256, %1 ;\n" | ||
752 | " addl $256, %2 ;\n" | ||
753 | " addl $256, %3 ;\n" | ||
754 | " addl $256, %4 ;\n" | ||
755 | " decl %0 ;\n" | ||
756 | " jnz 1b ;\n" | ||
757 | : "+r" (lines), | ||
758 | "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) | ||
759 | : | ||
760 | : "memory" ); | ||
761 | |||
762 | XMMS_RESTORE; | ||
763 | } | ||
764 | |||
765 | static void | ||
766 | xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
767 | unsigned long *p3, unsigned long *p4, unsigned long *p5) | ||
768 | { | ||
769 | unsigned long lines = bytes >> 8; | ||
770 | char xmm_save[16*4] ALIGN16; | ||
771 | int cr0; | ||
772 | |||
773 | XMMS_SAVE; | ||
774 | |||
775 | /* Make sure GCC forgets anything it knows about p4 or p5, | ||
776 | such that it won't pass to the asm volatile below a | ||
777 | register that is shared with any other variable. That's | ||
778 | because we modify p4 and p5 there, but we can't mark them | ||
779 | as read/write, otherwise we'd overflow the 10-asm-operands | ||
780 | limit of GCC < 3.1. */ | ||
781 | __asm__ ("" : "+r" (p4), "+r" (p5)); | ||
782 | |||
783 | __asm__ __volatile__ ( | ||
784 | #undef BLOCK | ||
785 | #define BLOCK(i) \ | ||
786 | PF1(i) \ | ||
787 | PF1(i+2) \ | ||
788 | LD(i,0) \ | ||
789 | LD(i+1,1) \ | ||
790 | LD(i+2,2) \ | ||
791 | LD(i+3,3) \ | ||
792 | PF2(i) \ | ||
793 | PF2(i+2) \ | ||
794 | XO1(i,0) \ | ||
795 | XO1(i+1,1) \ | ||
796 | XO1(i+2,2) \ | ||
797 | XO1(i+3,3) \ | ||
798 | PF3(i) \ | ||
799 | PF3(i+2) \ | ||
800 | XO2(i,0) \ | ||
801 | XO2(i+1,1) \ | ||
802 | XO2(i+2,2) \ | ||
803 | XO2(i+3,3) \ | ||
804 | PF4(i) \ | ||
805 | PF4(i+2) \ | ||
806 | PF0(i+4) \ | ||
807 | PF0(i+6) \ | ||
808 | XO3(i,0) \ | ||
809 | XO3(i+1,1) \ | ||
810 | XO3(i+2,2) \ | ||
811 | XO3(i+3,3) \ | ||
812 | XO4(i,0) \ | ||
813 | XO4(i+1,1) \ | ||
814 | XO4(i+2,2) \ | ||
815 | XO4(i+3,3) \ | ||
816 | ST(i,0) \ | ||
817 | ST(i+1,1) \ | ||
818 | ST(i+2,2) \ | ||
819 | ST(i+3,3) \ | ||
820 | |||
821 | |||
822 | PF0(0) | ||
823 | PF0(2) | ||
824 | |||
825 | " .align 32 ;\n" | ||
826 | " 1: ;\n" | ||
827 | |||
828 | BLOCK(0) | ||
829 | BLOCK(4) | ||
830 | BLOCK(8) | ||
831 | BLOCK(12) | ||
832 | |||
833 | " addl $256, %1 ;\n" | ||
834 | " addl $256, %2 ;\n" | ||
835 | " addl $256, %3 ;\n" | ||
836 | " addl $256, %4 ;\n" | ||
837 | " addl $256, %5 ;\n" | ||
838 | " decl %0 ;\n" | ||
839 | " jnz 1b ;\n" | ||
840 | : "+r" (lines), | ||
841 | "+r" (p1), "+r" (p2), "+r" (p3) | ||
842 | : "r" (p4), "r" (p5) | ||
843 | : "memory"); | ||
844 | |||
845 | /* p4 and p5 were modified, and now the variables are dead. | ||
846 | Clobber them just to be sure nobody does something stupid | ||
847 | like assuming they have some legal value. */ | ||
848 | __asm__ ("" : "=r" (p4), "=r" (p5)); | ||
849 | |||
850 | XMMS_RESTORE; | ||
851 | } | ||
852 | |||
853 | static struct xor_block_template xor_block_pIII_sse = { | ||
854 | .name = "pIII_sse", | ||
855 | .do_2 = xor_sse_2, | ||
856 | .do_3 = xor_sse_3, | ||
857 | .do_4 = xor_sse_4, | ||
858 | .do_5 = xor_sse_5, | ||
859 | }; | ||
860 | |||
861 | /* Also try the generic routines. */ | ||
862 | #include <asm-generic/xor.h> | ||
863 | |||
864 | #undef XOR_TRY_TEMPLATES | ||
865 | #define XOR_TRY_TEMPLATES \ | ||
866 | do { \ | ||
867 | xor_speed(&xor_block_8regs); \ | ||
868 | xor_speed(&xor_block_8regs_p); \ | ||
869 | xor_speed(&xor_block_32regs); \ | ||
870 | xor_speed(&xor_block_32regs_p); \ | ||
871 | if (cpu_has_xmm) \ | ||
872 | xor_speed(&xor_block_pIII_sse); \ | ||
873 | if (cpu_has_mmx) { \ | ||
874 | xor_speed(&xor_block_pII_mmx); \ | ||
875 | xor_speed(&xor_block_p5_mmx); \ | ||
876 | } \ | ||
877 | } while (0) | ||
878 | |||
879 | /* We force the use of the SSE xor block because it can write around L2. | ||
880 | We may also be able to load into the L1 only depending on how the cpu | ||
881 | deals with a load to a line that is being prefetched. */ | ||
882 | #define XOR_SELECT_TEMPLATE(FASTEST) \ | ||
883 | (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) | ||