diff options
Diffstat (limited to 'include/asm-x86')
220 files changed, 10289 insertions, 11840 deletions
diff --git a/include/asm-x86/Kbuild b/include/asm-x86/Kbuild index 12db5a1cdd74..3c6f0f80e827 100644 --- a/include/asm-x86/Kbuild +++ b/include/asm-x86/Kbuild | |||
@@ -3,21 +3,20 @@ include include/asm-generic/Kbuild.asm | |||
3 | header-y += boot.h | 3 | header-y += boot.h |
4 | header-y += bootparam.h | 4 | header-y += bootparam.h |
5 | header-y += debugreg.h | 5 | header-y += debugreg.h |
6 | header-y += kvm.h | ||
6 | header-y += ldt.h | 7 | header-y += ldt.h |
7 | header-y += msr-index.h | 8 | header-y += msr-index.h |
8 | header-y += prctl.h | 9 | header-y += prctl.h |
9 | header-y += ptrace-abi.h | 10 | header-y += ptrace-abi.h |
10 | header-y += sigcontext32.h | 11 | header-y += sigcontext32.h |
11 | header-y += ucontext.h | 12 | header-y += ucontext.h |
12 | header-y += vsyscall32.h | ||
13 | 13 | ||
14 | unifdef-y += e820.h | 14 | unifdef-y += e820.h |
15 | unifdef-y += ist.h | 15 | unifdef-y += ist.h |
16 | unifdef-y += mce.h | 16 | unifdef-y += mce.h |
17 | unifdef-y += msr.h | 17 | unifdef-y += msr.h |
18 | unifdef-y += mtrr.h | 18 | unifdef-y += mtrr.h |
19 | unifdef-y += page_32.h | 19 | unifdef-y += page.h |
20 | unifdef-y += page_64.h | ||
21 | unifdef-y += posix_types_32.h | 20 | unifdef-y += posix_types_32.h |
22 | unifdef-y += posix_types_64.h | 21 | unifdef-y += posix_types_64.h |
23 | unifdef-y += ptrace.h | 22 | unifdef-y += ptrace.h |
diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h index f8a89793ac8c..98a9ca266531 100644 --- a/include/asm-x86/acpi.h +++ b/include/asm-x86/acpi.h | |||
@@ -1,13 +1,123 @@ | |||
1 | #ifndef _ASM_X86_ACPI_H | 1 | #ifndef _ASM_X86_ACPI_H |
2 | #define _ASM_X86_ACPI_H | 2 | #define _ASM_X86_ACPI_H |
3 | 3 | ||
4 | #ifdef CONFIG_X86_32 | 4 | /* |
5 | # include "acpi_32.h" | 5 | * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> |
6 | #else | 6 | * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org> |
7 | # include "acpi_64.h" | 7 | * |
8 | #endif | 8 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
23 | * | ||
24 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
25 | */ | ||
26 | #include <acpi/pdc_intel.h> | ||
9 | 27 | ||
28 | #include <asm/numa.h> | ||
10 | #include <asm/processor.h> | 29 | #include <asm/processor.h> |
30 | #include <asm/mmu.h> | ||
31 | |||
32 | #define COMPILER_DEPENDENT_INT64 long long | ||
33 | #define COMPILER_DEPENDENT_UINT64 unsigned long long | ||
34 | |||
35 | /* | ||
36 | * Calling conventions: | ||
37 | * | ||
38 | * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) | ||
39 | * ACPI_EXTERNAL_XFACE - External ACPI interfaces | ||
40 | * ACPI_INTERNAL_XFACE - Internal ACPI interfaces | ||
41 | * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces | ||
42 | */ | ||
43 | #define ACPI_SYSTEM_XFACE | ||
44 | #define ACPI_EXTERNAL_XFACE | ||
45 | #define ACPI_INTERNAL_XFACE | ||
46 | #define ACPI_INTERNAL_VAR_XFACE | ||
47 | |||
48 | /* Asm macros */ | ||
49 | |||
50 | #define ACPI_ASM_MACROS | ||
51 | #define BREAKPOINT3 | ||
52 | #define ACPI_DISABLE_IRQS() local_irq_disable() | ||
53 | #define ACPI_ENABLE_IRQS() local_irq_enable() | ||
54 | #define ACPI_FLUSH_CPU_CACHE() wbinvd() | ||
55 | |||
56 | int __acpi_acquire_global_lock(unsigned int *lock); | ||
57 | int __acpi_release_global_lock(unsigned int *lock); | ||
58 | |||
59 | #define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \ | ||
60 | ((Acq) = __acpi_acquire_global_lock(&facs->global_lock)) | ||
61 | |||
62 | #define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ | ||
63 | ((Acq) = __acpi_release_global_lock(&facs->global_lock)) | ||
64 | |||
65 | /* | ||
66 | * Math helper asm macros | ||
67 | */ | ||
68 | #define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ | ||
69 | asm("divl %2;" \ | ||
70 | :"=a"(q32), "=d"(r32) \ | ||
71 | :"r"(d32), \ | ||
72 | "0"(n_lo), "1"(n_hi)) | ||
73 | |||
74 | |||
75 | #define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ | ||
76 | asm("shrl $1,%2 ;" \ | ||
77 | "rcrl $1,%3;" \ | ||
78 | :"=r"(n_hi), "=r"(n_lo) \ | ||
79 | :"0"(n_hi), "1"(n_lo)) | ||
80 | |||
81 | #ifdef CONFIG_ACPI | ||
82 | extern int acpi_lapic; | ||
83 | extern int acpi_ioapic; | ||
84 | extern int acpi_noirq; | ||
85 | extern int acpi_strict; | ||
86 | extern int acpi_disabled; | ||
87 | extern int acpi_ht; | ||
88 | extern int acpi_pci_disabled; | ||
89 | extern int acpi_skip_timer_override; | ||
90 | extern int acpi_use_timer_override; | ||
91 | |||
92 | static inline void disable_acpi(void) | ||
93 | { | ||
94 | acpi_disabled = 1; | ||
95 | acpi_ht = 0; | ||
96 | acpi_pci_disabled = 1; | ||
97 | acpi_noirq = 1; | ||
98 | } | ||
99 | |||
100 | /* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */ | ||
101 | #define FIX_ACPI_PAGES 4 | ||
102 | |||
103 | extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); | ||
104 | |||
105 | static inline void acpi_noirq_set(void) { acpi_noirq = 1; } | ||
106 | static inline void acpi_disable_pci(void) | ||
107 | { | ||
108 | acpi_pci_disabled = 1; | ||
109 | acpi_noirq_set(); | ||
110 | } | ||
111 | extern int acpi_irq_balance_set(char *str); | ||
112 | |||
113 | /* routines for saving/restoring kernel state */ | ||
114 | extern int acpi_save_state_mem(void); | ||
115 | extern void acpi_restore_state_mem(void); | ||
116 | |||
117 | extern unsigned long acpi_wakeup_address; | ||
118 | |||
119 | /* early initialization routine */ | ||
120 | extern void acpi_reserve_bootmem(void); | ||
11 | 121 | ||
12 | /* | 122 | /* |
13 | * Check if the CPU can handle C2 and deeper | 123 | * Check if the CPU can handle C2 and deeper |
@@ -29,4 +139,35 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) | |||
29 | return max_cstate; | 139 | return max_cstate; |
30 | } | 140 | } |
31 | 141 | ||
142 | #else /* !CONFIG_ACPI */ | ||
143 | |||
144 | #define acpi_lapic 0 | ||
145 | #define acpi_ioapic 0 | ||
146 | static inline void acpi_noirq_set(void) { } | ||
147 | static inline void acpi_disable_pci(void) { } | ||
148 | static inline void disable_acpi(void) { } | ||
149 | |||
150 | #endif /* !CONFIG_ACPI */ | ||
151 | |||
152 | #define ARCH_HAS_POWER_INIT 1 | ||
153 | |||
154 | struct bootnode; | ||
155 | |||
156 | #ifdef CONFIG_ACPI_NUMA | ||
157 | extern int acpi_numa; | ||
158 | extern int acpi_scan_nodes(unsigned long start, unsigned long end); | ||
159 | #ifdef CONFIG_X86_64 | ||
160 | # define NR_NODE_MEMBLKS (MAX_NUMNODES*2) | ||
161 | #endif | ||
162 | extern void acpi_fake_nodes(const struct bootnode *fake_nodes, | ||
163 | int num_nodes); | ||
164 | #else | ||
165 | static inline void acpi_fake_nodes(const struct bootnode *fake_nodes, | ||
166 | int num_nodes) | ||
167 | { | ||
168 | } | ||
32 | #endif | 169 | #endif |
170 | |||
171 | #define acpi_unlazy_tlb(x) leave_mm(x) | ||
172 | |||
173 | #endif /*__X86_ASM_ACPI_H*/ | ||
diff --git a/include/asm-x86/acpi_32.h b/include/asm-x86/acpi_32.h deleted file mode 100644 index 723493e6c851..000000000000 --- a/include/asm-x86/acpi_32.h +++ /dev/null | |||
@@ -1,143 +0,0 @@ | |||
1 | /* | ||
2 | * asm-i386/acpi.h | ||
3 | * | ||
4 | * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> | ||
5 | * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org> | ||
6 | * | ||
7 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
22 | * | ||
23 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
24 | */ | ||
25 | |||
26 | #ifndef _ASM_ACPI_H | ||
27 | #define _ASM_ACPI_H | ||
28 | |||
29 | #ifdef __KERNEL__ | ||
30 | |||
31 | #include <acpi/pdc_intel.h> | ||
32 | |||
33 | #include <asm/system.h> /* defines cmpxchg */ | ||
34 | |||
35 | #define COMPILER_DEPENDENT_INT64 long long | ||
36 | #define COMPILER_DEPENDENT_UINT64 unsigned long long | ||
37 | |||
38 | /* | ||
39 | * Calling conventions: | ||
40 | * | ||
41 | * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) | ||
42 | * ACPI_EXTERNAL_XFACE - External ACPI interfaces | ||
43 | * ACPI_INTERNAL_XFACE - Internal ACPI interfaces | ||
44 | * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces | ||
45 | */ | ||
46 | #define ACPI_SYSTEM_XFACE | ||
47 | #define ACPI_EXTERNAL_XFACE | ||
48 | #define ACPI_INTERNAL_XFACE | ||
49 | #define ACPI_INTERNAL_VAR_XFACE | ||
50 | |||
51 | /* Asm macros */ | ||
52 | |||
53 | #define ACPI_ASM_MACROS | ||
54 | #define BREAKPOINT3 | ||
55 | #define ACPI_DISABLE_IRQS() local_irq_disable() | ||
56 | #define ACPI_ENABLE_IRQS() local_irq_enable() | ||
57 | #define ACPI_FLUSH_CPU_CACHE() wbinvd() | ||
58 | |||
59 | int __acpi_acquire_global_lock(unsigned int *lock); | ||
60 | int __acpi_release_global_lock(unsigned int *lock); | ||
61 | |||
62 | #define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \ | ||
63 | ((Acq) = __acpi_acquire_global_lock(&facs->global_lock)) | ||
64 | |||
65 | #define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ | ||
66 | ((Acq) = __acpi_release_global_lock(&facs->global_lock)) | ||
67 | |||
68 | /* | ||
69 | * Math helper asm macros | ||
70 | */ | ||
71 | #define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ | ||
72 | asm("divl %2;" \ | ||
73 | :"=a"(q32), "=d"(r32) \ | ||
74 | :"r"(d32), \ | ||
75 | "0"(n_lo), "1"(n_hi)) | ||
76 | |||
77 | |||
78 | #define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ | ||
79 | asm("shrl $1,%2;" \ | ||
80 | "rcrl $1,%3;" \ | ||
81 | :"=r"(n_hi), "=r"(n_lo) \ | ||
82 | :"0"(n_hi), "1"(n_lo)) | ||
83 | |||
84 | extern void early_quirks(void); | ||
85 | |||
86 | #ifdef CONFIG_ACPI | ||
87 | extern int acpi_lapic; | ||
88 | extern int acpi_ioapic; | ||
89 | extern int acpi_noirq; | ||
90 | extern int acpi_strict; | ||
91 | extern int acpi_disabled; | ||
92 | extern int acpi_ht; | ||
93 | extern int acpi_pci_disabled; | ||
94 | static inline void disable_acpi(void) | ||
95 | { | ||
96 | acpi_disabled = 1; | ||
97 | acpi_ht = 0; | ||
98 | acpi_pci_disabled = 1; | ||
99 | acpi_noirq = 1; | ||
100 | } | ||
101 | |||
102 | /* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */ | ||
103 | #define FIX_ACPI_PAGES 4 | ||
104 | |||
105 | extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); | ||
106 | |||
107 | #ifdef CONFIG_X86_IO_APIC | ||
108 | extern int acpi_skip_timer_override; | ||
109 | extern int acpi_use_timer_override; | ||
110 | #endif | ||
111 | |||
112 | static inline void acpi_noirq_set(void) { acpi_noirq = 1; } | ||
113 | static inline void acpi_disable_pci(void) | ||
114 | { | ||
115 | acpi_pci_disabled = 1; | ||
116 | acpi_noirq_set(); | ||
117 | } | ||
118 | extern int acpi_irq_balance_set(char *str); | ||
119 | |||
120 | /* routines for saving/restoring kernel state */ | ||
121 | extern int acpi_save_state_mem(void); | ||
122 | extern void acpi_restore_state_mem(void); | ||
123 | |||
124 | extern unsigned long acpi_wakeup_address; | ||
125 | |||
126 | /* early initialization routine */ | ||
127 | extern void acpi_reserve_bootmem(void); | ||
128 | |||
129 | #else /* !CONFIG_ACPI */ | ||
130 | |||
131 | #define acpi_lapic 0 | ||
132 | #define acpi_ioapic 0 | ||
133 | static inline void acpi_noirq_set(void) { } | ||
134 | static inline void acpi_disable_pci(void) { } | ||
135 | static inline void disable_acpi(void) { } | ||
136 | |||
137 | #endif /* !CONFIG_ACPI */ | ||
138 | |||
139 | #define ARCH_HAS_POWER_INIT 1 | ||
140 | |||
141 | #endif /*__KERNEL__*/ | ||
142 | |||
143 | #endif /*_ASM_ACPI_H*/ | ||
diff --git a/include/asm-x86/acpi_64.h b/include/asm-x86/acpi_64.h deleted file mode 100644 index 98173357dd89..000000000000 --- a/include/asm-x86/acpi_64.h +++ /dev/null | |||
@@ -1,153 +0,0 @@ | |||
1 | /* | ||
2 | * asm-x86_64/acpi.h | ||
3 | * | ||
4 | * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> | ||
5 | * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org> | ||
6 | * | ||
7 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
22 | * | ||
23 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
24 | */ | ||
25 | |||
26 | #ifndef _ASM_ACPI_H | ||
27 | #define _ASM_ACPI_H | ||
28 | |||
29 | #ifdef __KERNEL__ | ||
30 | |||
31 | #include <acpi/pdc_intel.h> | ||
32 | #include <asm/numa.h> | ||
33 | |||
34 | #define COMPILER_DEPENDENT_INT64 long long | ||
35 | #define COMPILER_DEPENDENT_UINT64 unsigned long long | ||
36 | |||
37 | /* | ||
38 | * Calling conventions: | ||
39 | * | ||
40 | * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) | ||
41 | * ACPI_EXTERNAL_XFACE - External ACPI interfaces | ||
42 | * ACPI_INTERNAL_XFACE - Internal ACPI interfaces | ||
43 | * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces | ||
44 | */ | ||
45 | #define ACPI_SYSTEM_XFACE | ||
46 | #define ACPI_EXTERNAL_XFACE | ||
47 | #define ACPI_INTERNAL_XFACE | ||
48 | #define ACPI_INTERNAL_VAR_XFACE | ||
49 | |||
50 | /* Asm macros */ | ||
51 | |||
52 | #define ACPI_ASM_MACROS | ||
53 | #define BREAKPOINT3 | ||
54 | #define ACPI_DISABLE_IRQS() local_irq_disable() | ||
55 | #define ACPI_ENABLE_IRQS() local_irq_enable() | ||
56 | #define ACPI_FLUSH_CPU_CACHE() wbinvd() | ||
57 | |||
58 | int __acpi_acquire_global_lock(unsigned int *lock); | ||
59 | int __acpi_release_global_lock(unsigned int *lock); | ||
60 | |||
61 | #define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \ | ||
62 | ((Acq) = __acpi_acquire_global_lock(&facs->global_lock)) | ||
63 | |||
64 | #define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ | ||
65 | ((Acq) = __acpi_release_global_lock(&facs->global_lock)) | ||
66 | |||
67 | /* | ||
68 | * Math helper asm macros | ||
69 | */ | ||
70 | #define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ | ||
71 | asm("divl %2;" \ | ||
72 | :"=a"(q32), "=d"(r32) \ | ||
73 | :"r"(d32), \ | ||
74 | "0"(n_lo), "1"(n_hi)) | ||
75 | |||
76 | |||
77 | #define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ | ||
78 | asm("shrl $1,%2;" \ | ||
79 | "rcrl $1,%3;" \ | ||
80 | :"=r"(n_hi), "=r"(n_lo) \ | ||
81 | :"0"(n_hi), "1"(n_lo)) | ||
82 | |||
83 | #ifdef CONFIG_ACPI | ||
84 | extern int acpi_lapic; | ||
85 | extern int acpi_ioapic; | ||
86 | extern int acpi_noirq; | ||
87 | extern int acpi_strict; | ||
88 | extern int acpi_disabled; | ||
89 | extern int acpi_pci_disabled; | ||
90 | extern int acpi_ht; | ||
91 | static inline void disable_acpi(void) | ||
92 | { | ||
93 | acpi_disabled = 1; | ||
94 | acpi_ht = 0; | ||
95 | acpi_pci_disabled = 1; | ||
96 | acpi_noirq = 1; | ||
97 | } | ||
98 | |||
99 | /* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */ | ||
100 | #define FIX_ACPI_PAGES 4 | ||
101 | |||
102 | extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); | ||
103 | static inline void acpi_noirq_set(void) { acpi_noirq = 1; } | ||
104 | static inline void acpi_disable_pci(void) | ||
105 | { | ||
106 | acpi_pci_disabled = 1; | ||
107 | acpi_noirq_set(); | ||
108 | } | ||
109 | extern int acpi_irq_balance_set(char *str); | ||
110 | |||
111 | /* routines for saving/restoring kernel state */ | ||
112 | extern int acpi_save_state_mem(void); | ||
113 | extern void acpi_restore_state_mem(void); | ||
114 | |||
115 | extern unsigned long acpi_wakeup_address; | ||
116 | |||
117 | /* early initialization routine */ | ||
118 | extern void acpi_reserve_bootmem(void); | ||
119 | |||
120 | #else /* !CONFIG_ACPI */ | ||
121 | |||
122 | #define acpi_lapic 0 | ||
123 | #define acpi_ioapic 0 | ||
124 | static inline void acpi_noirq_set(void) { } | ||
125 | static inline void acpi_disable_pci(void) { } | ||
126 | |||
127 | #endif /* !CONFIG_ACPI */ | ||
128 | |||
129 | extern int acpi_numa; | ||
130 | extern int acpi_scan_nodes(unsigned long start, unsigned long end); | ||
131 | #define NR_NODE_MEMBLKS (MAX_NUMNODES*2) | ||
132 | |||
133 | extern int acpi_disabled; | ||
134 | extern int acpi_pci_disabled; | ||
135 | |||
136 | #define ARCH_HAS_POWER_INIT 1 | ||
137 | |||
138 | extern int acpi_skip_timer_override; | ||
139 | extern int acpi_use_timer_override; | ||
140 | |||
141 | #ifdef CONFIG_ACPI_NUMA | ||
142 | extern void __init acpi_fake_nodes(const struct bootnode *fake_nodes, | ||
143 | int num_nodes); | ||
144 | #else | ||
145 | static inline void acpi_fake_nodes(const struct bootnode *fake_nodes, | ||
146 | int num_nodes) | ||
147 | { | ||
148 | } | ||
149 | #endif | ||
150 | |||
151 | #endif /*__KERNEL__*/ | ||
152 | |||
153 | #endif /*_ASM_ACPI_H*/ | ||
diff --git a/include/asm-x86/agp.h b/include/asm-x86/agp.h index 62df2a9e7130..e4004a9f6a9a 100644 --- a/include/asm-x86/agp.h +++ b/include/asm-x86/agp.h | |||
@@ -12,13 +12,8 @@ | |||
12 | * page. This avoids data corruption on some CPUs. | 12 | * page. This avoids data corruption on some CPUs. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | /* | 15 | #define map_page_into_agp(page) set_pages_uc(page, 1) |
16 | * Caller's responsibility to call global_flush_tlb() for performance | 16 | #define unmap_page_from_agp(page) set_pages_wb(page, 1) |
17 | * reasons | ||
18 | */ | ||
19 | #define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE) | ||
20 | #define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL) | ||
21 | #define flush_agp_mappings() global_flush_tlb() | ||
22 | 17 | ||
23 | /* | 18 | /* |
24 | * Could use CLFLUSH here if the cpu supports it. But then it would | 19 | * Could use CLFLUSH here if the cpu supports it. But then it would |
diff --git a/include/asm-x86/alternative.h b/include/asm-x86/alternative.h index 9eef6a32a130..d8bacf3c4b08 100644 --- a/include/asm-x86/alternative.h +++ b/include/asm-x86/alternative.h | |||
@@ -1,5 +1,161 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifndef _ASM_X86_ALTERNATIVE_H |
2 | # include "alternative_32.h" | 2 | #define _ASM_X86_ALTERNATIVE_H |
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <linux/stddef.h> | ||
6 | #include <asm/asm.h> | ||
7 | |||
8 | /* | ||
9 | * Alternative inline assembly for SMP. | ||
10 | * | ||
11 | * The LOCK_PREFIX macro defined here replaces the LOCK and | ||
12 | * LOCK_PREFIX macros used everywhere in the source tree. | ||
13 | * | ||
14 | * SMP alternatives use the same data structures as the other | ||
15 | * alternatives and the X86_FEATURE_UP flag to indicate the case of a | ||
16 | * UP system running a SMP kernel. The existing apply_alternatives() | ||
17 | * works fine for patching a SMP kernel for UP. | ||
18 | * | ||
19 | * The SMP alternative tables can be kept after boot and contain both | ||
20 | * UP and SMP versions of the instructions to allow switching back to | ||
21 | * SMP at runtime, when hotplugging in a new CPU, which is especially | ||
22 | * useful in virtualized environments. | ||
23 | * | ||
24 | * The very common lock prefix is handled as special case in a | ||
25 | * separate table which is a pure address list without replacement ptr | ||
26 | * and size information. That keeps the table sizes small. | ||
27 | */ | ||
28 | |||
29 | #ifdef CONFIG_SMP | ||
30 | #define LOCK_PREFIX \ | ||
31 | ".section .smp_locks,\"a\"\n" \ | ||
32 | _ASM_ALIGN "\n" \ | ||
33 | _ASM_PTR "661f\n" /* address */ \ | ||
34 | ".previous\n" \ | ||
35 | "661:\n\tlock; " | ||
36 | |||
37 | #else /* ! CONFIG_SMP */ | ||
38 | #define LOCK_PREFIX "" | ||
39 | #endif | ||
40 | |||
41 | /* This must be included *after* the definition of LOCK_PREFIX */ | ||
42 | #include <asm/cpufeature.h> | ||
43 | |||
44 | struct alt_instr { | ||
45 | u8 *instr; /* original instruction */ | ||
46 | u8 *replacement; | ||
47 | u8 cpuid; /* cpuid bit set for replacement */ | ||
48 | u8 instrlen; /* length of original instruction */ | ||
49 | u8 replacementlen; /* length of new instruction, <= instrlen */ | ||
50 | u8 pad1; | ||
51 | #ifdef CONFIG_X86_64 | ||
52 | u32 pad2; | ||
53 | #endif | ||
54 | }; | ||
55 | |||
56 | extern void alternative_instructions(void); | ||
57 | extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); | ||
58 | |||
59 | struct module; | ||
60 | |||
61 | #ifdef CONFIG_SMP | ||
62 | extern void alternatives_smp_module_add(struct module *mod, char *name, | ||
63 | void *locks, void *locks_end, | ||
64 | void *text, void *text_end); | ||
65 | extern void alternatives_smp_module_del(struct module *mod); | ||
66 | extern void alternatives_smp_switch(int smp); | ||
67 | #else | ||
68 | static inline void alternatives_smp_module_add(struct module *mod, char *name, | ||
69 | void *locks, void *locks_end, | ||
70 | void *text, void *text_end) {} | ||
71 | static inline void alternatives_smp_module_del(struct module *mod) {} | ||
72 | static inline void alternatives_smp_switch(int smp) {} | ||
73 | #endif /* CONFIG_SMP */ | ||
74 | |||
75 | /* | ||
76 | * Alternative instructions for different CPU types or capabilities. | ||
77 | * | ||
78 | * This allows to use optimized instructions even on generic binary | ||
79 | * kernels. | ||
80 | * | ||
81 | * length of oldinstr must be longer or equal the length of newinstr | ||
82 | * It can be padded with nops as needed. | ||
83 | * | ||
84 | * For non barrier like inlines please define new variants | ||
85 | * without volatile and memory clobber. | ||
86 | */ | ||
87 | #define alternative(oldinstr, newinstr, feature) \ | ||
88 | asm volatile ("661:\n\t" oldinstr "\n662:\n" \ | ||
89 | ".section .altinstructions,\"a\"\n" \ | ||
90 | _ASM_ALIGN "\n" \ | ||
91 | _ASM_PTR "661b\n" /* label */ \ | ||
92 | _ASM_PTR "663f\n" /* new instruction */ \ | ||
93 | " .byte %c0\n" /* feature bit */ \ | ||
94 | " .byte 662b-661b\n" /* sourcelen */ \ | ||
95 | " .byte 664f-663f\n" /* replacementlen */ \ | ||
96 | ".previous\n" \ | ||
97 | ".section .altinstr_replacement,\"ax\"\n" \ | ||
98 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ | ||
99 | ".previous" :: "i" (feature) : "memory") | ||
100 | |||
101 | /* | ||
102 | * Alternative inline assembly with input. | ||
103 | * | ||
104 | * Pecularities: | ||
105 | * No memory clobber here. | ||
106 | * Argument numbers start with 1. | ||
107 | * Best is to use constraints that are fixed size (like (%1) ... "r") | ||
108 | * If you use variable sized constraints like "m" or "g" in the | ||
109 | * replacement make sure to pad to the worst case length. | ||
110 | */ | ||
111 | #define alternative_input(oldinstr, newinstr, feature, input...) \ | ||
112 | asm volatile ("661:\n\t" oldinstr "\n662:\n" \ | ||
113 | ".section .altinstructions,\"a\"\n" \ | ||
114 | _ASM_ALIGN "\n" \ | ||
115 | _ASM_PTR "661b\n" /* label */ \ | ||
116 | _ASM_PTR "663f\n" /* new instruction */ \ | ||
117 | " .byte %c0\n" /* feature bit */ \ | ||
118 | " .byte 662b-661b\n" /* sourcelen */ \ | ||
119 | " .byte 664f-663f\n" /* replacementlen */ \ | ||
120 | ".previous\n" \ | ||
121 | ".section .altinstr_replacement,\"ax\"\n" \ | ||
122 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ | ||
123 | ".previous" :: "i" (feature), ##input) | ||
124 | |||
125 | /* Like alternative_input, but with a single output argument */ | ||
126 | #define alternative_io(oldinstr, newinstr, feature, output, input...) \ | ||
127 | asm volatile ("661:\n\t" oldinstr "\n662:\n" \ | ||
128 | ".section .altinstructions,\"a\"\n" \ | ||
129 | _ASM_ALIGN "\n" \ | ||
130 | _ASM_PTR "661b\n" /* label */ \ | ||
131 | _ASM_PTR "663f\n" /* new instruction */ \ | ||
132 | " .byte %c[feat]\n" /* feature bit */ \ | ||
133 | " .byte 662b-661b\n" /* sourcelen */ \ | ||
134 | " .byte 664f-663f\n" /* replacementlen */ \ | ||
135 | ".previous\n" \ | ||
136 | ".section .altinstr_replacement,\"ax\"\n" \ | ||
137 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ | ||
138 | ".previous" : output : [feat] "i" (feature), ##input) | ||
139 | |||
140 | /* | ||
141 | * use this macro(s) if you need more than one output parameter | ||
142 | * in alternative_io | ||
143 | */ | ||
144 | #define ASM_OUTPUT2(a, b) a, b | ||
145 | |||
146 | struct paravirt_patch_site; | ||
147 | #ifdef CONFIG_PARAVIRT | ||
148 | void apply_paravirt(struct paravirt_patch_site *start, | ||
149 | struct paravirt_patch_site *end); | ||
3 | #else | 150 | #else |
4 | # include "alternative_64.h" | 151 | static inline void |
152 | apply_paravirt(struct paravirt_patch_site *start, | ||
153 | struct paravirt_patch_site *end) | ||
154 | {} | ||
155 | #define __parainstructions NULL | ||
156 | #define __parainstructions_end NULL | ||
5 | #endif | 157 | #endif |
158 | |||
159 | extern void text_poke(void *addr, unsigned char *opcode, int len); | ||
160 | |||
161 | #endif /* _ASM_X86_ALTERNATIVE_H */ | ||
diff --git a/include/asm-x86/alternative_32.h b/include/asm-x86/alternative_32.h deleted file mode 100644 index bda6c810c0f4..000000000000 --- a/include/asm-x86/alternative_32.h +++ /dev/null | |||
@@ -1,154 +0,0 @@ | |||
1 | #ifndef _I386_ALTERNATIVE_H | ||
2 | #define _I386_ALTERNATIVE_H | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | #include <linux/stddef.h> | ||
6 | #include <linux/types.h> | ||
7 | |||
8 | struct alt_instr { | ||
9 | u8 *instr; /* original instruction */ | ||
10 | u8 *replacement; | ||
11 | u8 cpuid; /* cpuid bit set for replacement */ | ||
12 | u8 instrlen; /* length of original instruction */ | ||
13 | u8 replacementlen; /* length of new instruction, <= instrlen */ | ||
14 | u8 pad; | ||
15 | }; | ||
16 | |||
17 | extern void alternative_instructions(void); | ||
18 | extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); | ||
19 | |||
20 | struct module; | ||
21 | #ifdef CONFIG_SMP | ||
22 | extern void alternatives_smp_module_add(struct module *mod, char *name, | ||
23 | void *locks, void *locks_end, | ||
24 | void *text, void *text_end); | ||
25 | extern void alternatives_smp_module_del(struct module *mod); | ||
26 | extern void alternatives_smp_switch(int smp); | ||
27 | #else | ||
28 | static inline void alternatives_smp_module_add(struct module *mod, char *name, | ||
29 | void *locks, void *locks_end, | ||
30 | void *text, void *text_end) {} | ||
31 | static inline void alternatives_smp_module_del(struct module *mod) {} | ||
32 | static inline void alternatives_smp_switch(int smp) {} | ||
33 | #endif /* CONFIG_SMP */ | ||
34 | |||
35 | /* | ||
36 | * Alternative instructions for different CPU types or capabilities. | ||
37 | * | ||
38 | * This allows to use optimized instructions even on generic binary | ||
39 | * kernels. | ||
40 | * | ||
41 | * length of oldinstr must be longer or equal the length of newinstr | ||
42 | * It can be padded with nops as needed. | ||
43 | * | ||
44 | * For non barrier like inlines please define new variants | ||
45 | * without volatile and memory clobber. | ||
46 | */ | ||
47 | #define alternative(oldinstr, newinstr, feature) \ | ||
48 | asm volatile ("661:\n\t" oldinstr "\n662:\n" \ | ||
49 | ".section .altinstructions,\"a\"\n" \ | ||
50 | " .align 4\n" \ | ||
51 | " .long 661b\n" /* label */ \ | ||
52 | " .long 663f\n" /* new instruction */ \ | ||
53 | " .byte %c0\n" /* feature bit */ \ | ||
54 | " .byte 662b-661b\n" /* sourcelen */ \ | ||
55 | " .byte 664f-663f\n" /* replacementlen */ \ | ||
56 | ".previous\n" \ | ||
57 | ".section .altinstr_replacement,\"ax\"\n" \ | ||
58 | "663:\n\t" newinstr "\n664:\n" /* replacement */\ | ||
59 | ".previous" :: "i" (feature) : "memory") | ||
60 | |||
61 | /* | ||
62 | * Alternative inline assembly with input. | ||
63 | * | ||
64 | * Pecularities: | ||
65 | * No memory clobber here. | ||
66 | * Argument numbers start with 1. | ||
67 | * Best is to use constraints that are fixed size (like (%1) ... "r") | ||
68 | * If you use variable sized constraints like "m" or "g" in the | ||
69 | * replacement maake sure to pad to the worst case length. | ||
70 | */ | ||
71 | #define alternative_input(oldinstr, newinstr, feature, input...) \ | ||
72 | asm volatile ("661:\n\t" oldinstr "\n662:\n" \ | ||
73 | ".section .altinstructions,\"a\"\n" \ | ||
74 | " .align 4\n" \ | ||
75 | " .long 661b\n" /* label */ \ | ||
76 | " .long 663f\n" /* new instruction */ \ | ||
77 | " .byte %c0\n" /* feature bit */ \ | ||
78 | " .byte 662b-661b\n" /* sourcelen */ \ | ||
79 | " .byte 664f-663f\n" /* replacementlen */ \ | ||
80 | ".previous\n" \ | ||
81 | ".section .altinstr_replacement,\"ax\"\n" \ | ||
82 | "663:\n\t" newinstr "\n664:\n" /* replacement */\ | ||
83 | ".previous" :: "i" (feature), ##input) | ||
84 | |||
85 | /* Like alternative_input, but with a single output argument */ | ||
86 | #define alternative_io(oldinstr, newinstr, feature, output, input...) \ | ||
87 | asm volatile ("661:\n\t" oldinstr "\n662:\n" \ | ||
88 | ".section .altinstructions,\"a\"\n" \ | ||
89 | " .align 4\n" \ | ||
90 | " .long 661b\n" /* label */ \ | ||
91 | " .long 663f\n" /* new instruction */ \ | ||
92 | " .byte %c[feat]\n" /* feature bit */ \ | ||
93 | " .byte 662b-661b\n" /* sourcelen */ \ | ||
94 | " .byte 664f-663f\n" /* replacementlen */ \ | ||
95 | ".previous\n" \ | ||
96 | ".section .altinstr_replacement,\"ax\"\n" \ | ||
97 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ | ||
98 | ".previous" : output : [feat] "i" (feature), ##input) | ||
99 | |||
100 | /* | ||
101 | * use this macro(s) if you need more than one output parameter | ||
102 | * in alternative_io | ||
103 | */ | ||
104 | #define ASM_OUTPUT2(a, b) a, b | ||
105 | |||
106 | /* | ||
107 | * Alternative inline assembly for SMP. | ||
108 | * | ||
109 | * The LOCK_PREFIX macro defined here replaces the LOCK and | ||
110 | * LOCK_PREFIX macros used everywhere in the source tree. | ||
111 | * | ||
112 | * SMP alternatives use the same data structures as the other | ||
113 | * alternatives and the X86_FEATURE_UP flag to indicate the case of a | ||
114 | * UP system running a SMP kernel. The existing apply_alternatives() | ||
115 | * works fine for patching a SMP kernel for UP. | ||
116 | * | ||
117 | * The SMP alternative tables can be kept after boot and contain both | ||
118 | * UP and SMP versions of the instructions to allow switching back to | ||
119 | * SMP at runtime, when hotplugging in a new CPU, which is especially | ||
120 | * useful in virtualized environments. | ||
121 | * | ||
122 | * The very common lock prefix is handled as special case in a | ||
123 | * separate table which is a pure address list without replacement ptr | ||
124 | * and size information. That keeps the table sizes small. | ||
125 | */ | ||
126 | |||
127 | #ifdef CONFIG_SMP | ||
128 | #define LOCK_PREFIX \ | ||
129 | ".section .smp_locks,\"a\"\n" \ | ||
130 | " .align 4\n" \ | ||
131 | " .long 661f\n" /* address */ \ | ||
132 | ".previous\n" \ | ||
133 | "661:\n\tlock; " | ||
134 | |||
135 | #else /* ! CONFIG_SMP */ | ||
136 | #define LOCK_PREFIX "" | ||
137 | #endif | ||
138 | |||
139 | struct paravirt_patch_site; | ||
140 | #ifdef CONFIG_PARAVIRT | ||
141 | void apply_paravirt(struct paravirt_patch_site *start, | ||
142 | struct paravirt_patch_site *end); | ||
143 | #else | ||
144 | static inline void | ||
145 | apply_paravirt(struct paravirt_patch_site *start, | ||
146 | struct paravirt_patch_site *end) | ||
147 | {} | ||
148 | #define __parainstructions NULL | ||
149 | #define __parainstructions_end NULL | ||
150 | #endif | ||
151 | |||
152 | extern void text_poke(void *addr, unsigned char *opcode, int len); | ||
153 | |||
154 | #endif /* _I386_ALTERNATIVE_H */ | ||
diff --git a/include/asm-x86/alternative_64.h b/include/asm-x86/alternative_64.h deleted file mode 100644 index ab161e810151..000000000000 --- a/include/asm-x86/alternative_64.h +++ /dev/null | |||
@@ -1,159 +0,0 @@ | |||
1 | #ifndef _X86_64_ALTERNATIVE_H | ||
2 | #define _X86_64_ALTERNATIVE_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #include <linux/types.h> | ||
7 | #include <linux/stddef.h> | ||
8 | |||
9 | /* | ||
10 | * Alternative inline assembly for SMP. | ||
11 | * | ||
12 | * The LOCK_PREFIX macro defined here replaces the LOCK and | ||
13 | * LOCK_PREFIX macros used everywhere in the source tree. | ||
14 | * | ||
15 | * SMP alternatives use the same data structures as the other | ||
16 | * alternatives and the X86_FEATURE_UP flag to indicate the case of a | ||
17 | * UP system running a SMP kernel. The existing apply_alternatives() | ||
18 | * works fine for patching a SMP kernel for UP. | ||
19 | * | ||
20 | * The SMP alternative tables can be kept after boot and contain both | ||
21 | * UP and SMP versions of the instructions to allow switching back to | ||
22 | * SMP at runtime, when hotplugging in a new CPU, which is especially | ||
23 | * useful in virtualized environments. | ||
24 | * | ||
25 | * The very common lock prefix is handled as special case in a | ||
26 | * separate table which is a pure address list without replacement ptr | ||
27 | * and size information. That keeps the table sizes small. | ||
28 | */ | ||
29 | |||
30 | #ifdef CONFIG_SMP | ||
31 | #define LOCK_PREFIX \ | ||
32 | ".section .smp_locks,\"a\"\n" \ | ||
33 | " .align 8\n" \ | ||
34 | " .quad 661f\n" /* address */ \ | ||
35 | ".previous\n" \ | ||
36 | "661:\n\tlock; " | ||
37 | |||
38 | #else /* ! CONFIG_SMP */ | ||
39 | #define LOCK_PREFIX "" | ||
40 | #endif | ||
41 | |||
42 | /* This must be included *after* the definition of LOCK_PREFIX */ | ||
43 | #include <asm/cpufeature.h> | ||
44 | |||
45 | struct alt_instr { | ||
46 | u8 *instr; /* original instruction */ | ||
47 | u8 *replacement; | ||
48 | u8 cpuid; /* cpuid bit set for replacement */ | ||
49 | u8 instrlen; /* length of original instruction */ | ||
50 | u8 replacementlen; /* length of new instruction, <= instrlen */ | ||
51 | u8 pad[5]; | ||
52 | }; | ||
53 | |||
54 | extern void alternative_instructions(void); | ||
55 | extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); | ||
56 | |||
57 | struct module; | ||
58 | |||
59 | #ifdef CONFIG_SMP | ||
60 | extern void alternatives_smp_module_add(struct module *mod, char *name, | ||
61 | void *locks, void *locks_end, | ||
62 | void *text, void *text_end); | ||
63 | extern void alternatives_smp_module_del(struct module *mod); | ||
64 | extern void alternatives_smp_switch(int smp); | ||
65 | #else | ||
66 | static inline void alternatives_smp_module_add(struct module *mod, char *name, | ||
67 | void *locks, void *locks_end, | ||
68 | void *text, void *text_end) {} | ||
69 | static inline void alternatives_smp_module_del(struct module *mod) {} | ||
70 | static inline void alternatives_smp_switch(int smp) {} | ||
71 | #endif | ||
72 | |||
73 | #endif | ||
74 | |||
75 | /* | ||
76 | * Alternative instructions for different CPU types or capabilities. | ||
77 | * | ||
78 | * This allows to use optimized instructions even on generic binary | ||
79 | * kernels. | ||
80 | * | ||
81 | * length of oldinstr must be longer or equal the length of newinstr | ||
82 | * It can be padded with nops as needed. | ||
83 | * | ||
84 | * For non barrier like inlines please define new variants | ||
85 | * without volatile and memory clobber. | ||
86 | */ | ||
87 | #define alternative(oldinstr, newinstr, feature) \ | ||
88 | asm volatile ("661:\n\t" oldinstr "\n662:\n" \ | ||
89 | ".section .altinstructions,\"a\"\n" \ | ||
90 | " .align 8\n" \ | ||
91 | " .quad 661b\n" /* label */ \ | ||
92 | " .quad 663f\n" /* new instruction */ \ | ||
93 | " .byte %c0\n" /* feature bit */ \ | ||
94 | " .byte 662b-661b\n" /* sourcelen */ \ | ||
95 | " .byte 664f-663f\n" /* replacementlen */ \ | ||
96 | ".previous\n" \ | ||
97 | ".section .altinstr_replacement,\"ax\"\n" \ | ||
98 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ | ||
99 | ".previous" :: "i" (feature) : "memory") | ||
100 | |||
101 | /* | ||
102 | * Alternative inline assembly with input. | ||
103 | * | ||
104 | * Pecularities: | ||
105 | * No memory clobber here. | ||
106 | * Argument numbers start with 1. | ||
107 | * Best is to use constraints that are fixed size (like (%1) ... "r") | ||
108 | * If you use variable sized constraints like "m" or "g" in the | ||
109 | * replacement make sure to pad to the worst case length. | ||
110 | */ | ||
111 | #define alternative_input(oldinstr, newinstr, feature, input...) \ | ||
112 | asm volatile ("661:\n\t" oldinstr "\n662:\n" \ | ||
113 | ".section .altinstructions,\"a\"\n" \ | ||
114 | " .align 8\n" \ | ||
115 | " .quad 661b\n" /* label */ \ | ||
116 | " .quad 663f\n" /* new instruction */ \ | ||
117 | " .byte %c0\n" /* feature bit */ \ | ||
118 | " .byte 662b-661b\n" /* sourcelen */ \ | ||
119 | " .byte 664f-663f\n" /* replacementlen */ \ | ||
120 | ".previous\n" \ | ||
121 | ".section .altinstr_replacement,\"ax\"\n" \ | ||
122 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ | ||
123 | ".previous" :: "i" (feature), ##input) | ||
124 | |||
125 | /* Like alternative_input, but with a single output argument */ | ||
126 | #define alternative_io(oldinstr, newinstr, feature, output, input...) \ | ||
127 | asm volatile ("661:\n\t" oldinstr "\n662:\n" \ | ||
128 | ".section .altinstructions,\"a\"\n" \ | ||
129 | " .align 8\n" \ | ||
130 | " .quad 661b\n" /* label */ \ | ||
131 | " .quad 663f\n" /* new instruction */ \ | ||
132 | " .byte %c[feat]\n" /* feature bit */ \ | ||
133 | " .byte 662b-661b\n" /* sourcelen */ \ | ||
134 | " .byte 664f-663f\n" /* replacementlen */ \ | ||
135 | ".previous\n" \ | ||
136 | ".section .altinstr_replacement,\"ax\"\n" \ | ||
137 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ | ||
138 | ".previous" : output : [feat] "i" (feature), ##input) | ||
139 | |||
140 | /* | ||
141 | * use this macro(s) if you need more than one output parameter | ||
142 | * in alternative_io | ||
143 | */ | ||
144 | #define ASM_OUTPUT2(a, b) a, b | ||
145 | |||
146 | struct paravirt_patch; | ||
147 | #ifdef CONFIG_PARAVIRT | ||
148 | void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end); | ||
149 | #else | ||
150 | static inline void | ||
151 | apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end) | ||
152 | {} | ||
153 | #define __parainstructions NULL | ||
154 | #define __parainstructions_end NULL | ||
155 | #endif | ||
156 | |||
157 | extern void text_poke(void *addr, unsigned char *opcode, int len); | ||
158 | |||
159 | #endif /* _X86_64_ALTERNATIVE_H */ | ||
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index 9fbcc0bd2ac4..bcfc07fd3661 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h | |||
@@ -1,5 +1,140 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifndef _ASM_X86_APIC_H |
2 | # include "apic_32.h" | 2 | #define _ASM_X86_APIC_H |
3 | |||
4 | #include <linux/pm.h> | ||
5 | #include <linux/delay.h> | ||
6 | #include <asm/fixmap.h> | ||
7 | #include <asm/apicdef.h> | ||
8 | #include <asm/processor.h> | ||
9 | #include <asm/system.h> | ||
10 | |||
11 | #define ARCH_APICTIMER_STOPS_ON_C3 1 | ||
12 | |||
13 | #define Dprintk(x...) | ||
14 | |||
15 | /* | ||
16 | * Debugging macros | ||
17 | */ | ||
18 | #define APIC_QUIET 0 | ||
19 | #define APIC_VERBOSE 1 | ||
20 | #define APIC_DEBUG 2 | ||
21 | |||
22 | /* | ||
23 | * Define the default level of output to be very little | ||
24 | * This can be turned up by using apic=verbose for more | ||
25 | * information and apic=debug for _lots_ of information. | ||
26 | * apic_verbosity is defined in apic.c | ||
27 | */ | ||
28 | #define apic_printk(v, s, a...) do { \ | ||
29 | if ((v) <= apic_verbosity) \ | ||
30 | printk(s, ##a); \ | ||
31 | } while (0) | ||
32 | |||
33 | |||
34 | extern void generic_apic_probe(void); | ||
35 | |||
36 | #ifdef CONFIG_X86_LOCAL_APIC | ||
37 | |||
38 | extern int apic_verbosity; | ||
39 | extern int timer_over_8254; | ||
40 | extern int local_apic_timer_c2_ok; | ||
41 | extern int local_apic_timer_disabled; | ||
42 | |||
43 | extern int apic_runs_main_timer; | ||
44 | extern int ioapic_force; | ||
45 | extern int disable_apic; | ||
46 | extern int disable_apic_timer; | ||
47 | extern unsigned boot_cpu_id; | ||
48 | |||
49 | /* | ||
50 | * Basic functions accessing APICs. | ||
51 | */ | ||
52 | #ifdef CONFIG_PARAVIRT | ||
53 | #include <asm/paravirt.h> | ||
3 | #else | 54 | #else |
4 | # include "apic_64.h" | 55 | #define apic_write native_apic_write |
56 | #define apic_write_atomic native_apic_write_atomic | ||
57 | #define apic_read native_apic_read | ||
58 | #define setup_boot_clock setup_boot_APIC_clock | ||
59 | #define setup_secondary_clock setup_secondary_APIC_clock | ||
5 | #endif | 60 | #endif |
61 | |||
62 | static inline void native_apic_write(unsigned long reg, u32 v) | ||
63 | { | ||
64 | *((volatile u32 *)(APIC_BASE + reg)) = v; | ||
65 | } | ||
66 | |||
67 | static inline void native_apic_write_atomic(unsigned long reg, u32 v) | ||
68 | { | ||
69 | (void) xchg((u32*)(APIC_BASE + reg), v); | ||
70 | } | ||
71 | |||
72 | static inline u32 native_apic_read(unsigned long reg) | ||
73 | { | ||
74 | return *((volatile u32 *)(APIC_BASE + reg)); | ||
75 | } | ||
76 | |||
77 | extern void apic_wait_icr_idle(void); | ||
78 | extern u32 safe_apic_wait_icr_idle(void); | ||
79 | extern int get_physical_broadcast(void); | ||
80 | |||
81 | #ifdef CONFIG_X86_GOOD_APIC | ||
82 | # define FORCE_READ_AROUND_WRITE 0 | ||
83 | # define apic_read_around(x) | ||
84 | # define apic_write_around(x, y) apic_write((x), (y)) | ||
85 | #else | ||
86 | # define FORCE_READ_AROUND_WRITE 1 | ||
87 | # define apic_read_around(x) apic_read(x) | ||
88 | # define apic_write_around(x, y) apic_write_atomic((x), (y)) | ||
89 | #endif | ||
90 | |||
91 | static inline void ack_APIC_irq(void) | ||
92 | { | ||
93 | /* | ||
94 | * ack_APIC_irq() actually gets compiled as a single instruction: | ||
95 | * - a single rmw on Pentium/82489DX | ||
96 | * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC) | ||
97 | * ... yummie. | ||
98 | */ | ||
99 | |||
100 | /* Docs say use 0 for future compatibility */ | ||
101 | apic_write_around(APIC_EOI, 0); | ||
102 | } | ||
103 | |||
104 | extern int lapic_get_maxlvt(void); | ||
105 | extern void clear_local_APIC(void); | ||
106 | extern void connect_bsp_APIC(void); | ||
107 | extern void disconnect_bsp_APIC(int virt_wire_setup); | ||
108 | extern void disable_local_APIC(void); | ||
109 | extern void lapic_shutdown(void); | ||
110 | extern int verify_local_APIC(void); | ||
111 | extern void cache_APIC_registers(void); | ||
112 | extern void sync_Arb_IDs(void); | ||
113 | extern void init_bsp_APIC(void); | ||
114 | extern void setup_local_APIC(void); | ||
115 | extern void end_local_APIC_setup(void); | ||
116 | extern void init_apic_mappings(void); | ||
117 | extern void setup_boot_APIC_clock(void); | ||
118 | extern void setup_secondary_APIC_clock(void); | ||
119 | extern int APIC_init_uniprocessor(void); | ||
120 | extern void enable_NMI_through_LVT0(void); | ||
121 | |||
122 | /* | ||
123 | * On 32bit this is mach-xxx local | ||
124 | */ | ||
125 | #ifdef CONFIG_X86_64 | ||
126 | extern void setup_apic_routing(void); | ||
127 | #endif | ||
128 | |||
129 | extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask); | ||
130 | extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask); | ||
131 | |||
132 | extern int apic_is_clustered_box(void); | ||
133 | |||
134 | #else /* !CONFIG_X86_LOCAL_APIC */ | ||
135 | static inline void lapic_shutdown(void) { } | ||
136 | #define local_apic_timer_c2_ok 1 | ||
137 | |||
138 | #endif /* !CONFIG_X86_LOCAL_APIC */ | ||
139 | |||
140 | #endif /* __ASM_APIC_H */ | ||
diff --git a/include/asm-x86/apic_32.h b/include/asm-x86/apic_32.h deleted file mode 100644 index be158b27d54b..000000000000 --- a/include/asm-x86/apic_32.h +++ /dev/null | |||
@@ -1,127 +0,0 @@ | |||
1 | #ifndef __ASM_APIC_H | ||
2 | #define __ASM_APIC_H | ||
3 | |||
4 | #include <linux/pm.h> | ||
5 | #include <linux/delay.h> | ||
6 | #include <asm/fixmap.h> | ||
7 | #include <asm/apicdef.h> | ||
8 | #include <asm/processor.h> | ||
9 | #include <asm/system.h> | ||
10 | |||
11 | #define Dprintk(x...) | ||
12 | |||
13 | /* | ||
14 | * Debugging macros | ||
15 | */ | ||
16 | #define APIC_QUIET 0 | ||
17 | #define APIC_VERBOSE 1 | ||
18 | #define APIC_DEBUG 2 | ||
19 | |||
20 | extern int apic_verbosity; | ||
21 | |||
22 | /* | ||
23 | * Define the default level of output to be very little | ||
24 | * This can be turned up by using apic=verbose for more | ||
25 | * information and apic=debug for _lots_ of information. | ||
26 | * apic_verbosity is defined in apic.c | ||
27 | */ | ||
28 | #define apic_printk(v, s, a...) do { \ | ||
29 | if ((v) <= apic_verbosity) \ | ||
30 | printk(s, ##a); \ | ||
31 | } while (0) | ||
32 | |||
33 | |||
34 | extern void generic_apic_probe(void); | ||
35 | |||
36 | #ifdef CONFIG_X86_LOCAL_APIC | ||
37 | |||
38 | /* | ||
39 | * Basic functions accessing APICs. | ||
40 | */ | ||
41 | #ifdef CONFIG_PARAVIRT | ||
42 | #include <asm/paravirt.h> | ||
43 | #else | ||
44 | #define apic_write native_apic_write | ||
45 | #define apic_write_atomic native_apic_write_atomic | ||
46 | #define apic_read native_apic_read | ||
47 | #define setup_boot_clock setup_boot_APIC_clock | ||
48 | #define setup_secondary_clock setup_secondary_APIC_clock | ||
49 | #endif | ||
50 | |||
51 | static __inline fastcall void native_apic_write(unsigned long reg, | ||
52 | unsigned long v) | ||
53 | { | ||
54 | *((volatile unsigned long *)(APIC_BASE+reg)) = v; | ||
55 | } | ||
56 | |||
57 | static __inline fastcall void native_apic_write_atomic(unsigned long reg, | ||
58 | unsigned long v) | ||
59 | { | ||
60 | xchg((volatile unsigned long *)(APIC_BASE+reg), v); | ||
61 | } | ||
62 | |||
63 | static __inline fastcall unsigned long native_apic_read(unsigned long reg) | ||
64 | { | ||
65 | return *((volatile unsigned long *)(APIC_BASE+reg)); | ||
66 | } | ||
67 | |||
68 | void apic_wait_icr_idle(void); | ||
69 | unsigned long safe_apic_wait_icr_idle(void); | ||
70 | int get_physical_broadcast(void); | ||
71 | |||
72 | #ifdef CONFIG_X86_GOOD_APIC | ||
73 | # define FORCE_READ_AROUND_WRITE 0 | ||
74 | # define apic_read_around(x) | ||
75 | # define apic_write_around(x,y) apic_write((x),(y)) | ||
76 | #else | ||
77 | # define FORCE_READ_AROUND_WRITE 1 | ||
78 | # define apic_read_around(x) apic_read(x) | ||
79 | # define apic_write_around(x,y) apic_write_atomic((x),(y)) | ||
80 | #endif | ||
81 | |||
82 | static inline void ack_APIC_irq(void) | ||
83 | { | ||
84 | /* | ||
85 | * ack_APIC_irq() actually gets compiled as a single instruction: | ||
86 | * - a single rmw on Pentium/82489DX | ||
87 | * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC) | ||
88 | * ... yummie. | ||
89 | */ | ||
90 | |||
91 | /* Docs say use 0 for future compatibility */ | ||
92 | apic_write_around(APIC_EOI, 0); | ||
93 | } | ||
94 | |||
95 | extern int lapic_get_maxlvt(void); | ||
96 | extern void clear_local_APIC(void); | ||
97 | extern void connect_bsp_APIC (void); | ||
98 | extern void disconnect_bsp_APIC (int virt_wire_setup); | ||
99 | extern void disable_local_APIC (void); | ||
100 | extern void lapic_shutdown (void); | ||
101 | extern int verify_local_APIC (void); | ||
102 | extern void cache_APIC_registers (void); | ||
103 | extern void sync_Arb_IDs (void); | ||
104 | extern void init_bsp_APIC (void); | ||
105 | extern void setup_local_APIC (void); | ||
106 | extern void init_apic_mappings (void); | ||
107 | extern void smp_local_timer_interrupt (void); | ||
108 | extern void setup_boot_APIC_clock (void); | ||
109 | extern void setup_secondary_APIC_clock (void); | ||
110 | extern int APIC_init_uniprocessor (void); | ||
111 | |||
112 | extern void enable_NMI_through_LVT0 (void * dummy); | ||
113 | |||
114 | #define ARCH_APICTIMER_STOPS_ON_C3 1 | ||
115 | |||
116 | extern int timer_over_8254; | ||
117 | extern int local_apic_timer_c2_ok; | ||
118 | |||
119 | extern int local_apic_timer_disabled; | ||
120 | |||
121 | #else /* !CONFIG_X86_LOCAL_APIC */ | ||
122 | static inline void lapic_shutdown(void) { } | ||
123 | #define local_apic_timer_c2_ok 1 | ||
124 | |||
125 | #endif /* !CONFIG_X86_LOCAL_APIC */ | ||
126 | |||
127 | #endif /* __ASM_APIC_H */ | ||
diff --git a/include/asm-x86/apic_64.h b/include/asm-x86/apic_64.h deleted file mode 100644 index 2747a11a2b19..000000000000 --- a/include/asm-x86/apic_64.h +++ /dev/null | |||
@@ -1,102 +0,0 @@ | |||
1 | #ifndef __ASM_APIC_H | ||
2 | #define __ASM_APIC_H | ||
3 | |||
4 | #include <linux/pm.h> | ||
5 | #include <linux/delay.h> | ||
6 | #include <asm/fixmap.h> | ||
7 | #include <asm/apicdef.h> | ||
8 | #include <asm/system.h> | ||
9 | |||
10 | #define Dprintk(x...) | ||
11 | |||
12 | /* | ||
13 | * Debugging macros | ||
14 | */ | ||
15 | #define APIC_QUIET 0 | ||
16 | #define APIC_VERBOSE 1 | ||
17 | #define APIC_DEBUG 2 | ||
18 | |||
19 | extern int apic_verbosity; | ||
20 | extern int apic_runs_main_timer; | ||
21 | extern int ioapic_force; | ||
22 | extern int disable_apic_timer; | ||
23 | |||
24 | /* | ||
25 | * Define the default level of output to be very little | ||
26 | * This can be turned up by using apic=verbose for more | ||
27 | * information and apic=debug for _lots_ of information. | ||
28 | * apic_verbosity is defined in apic.c | ||
29 | */ | ||
30 | #define apic_printk(v, s, a...) do { \ | ||
31 | if ((v) <= apic_verbosity) \ | ||
32 | printk(s, ##a); \ | ||
33 | } while (0) | ||
34 | |||
35 | struct pt_regs; | ||
36 | |||
37 | /* | ||
38 | * Basic functions accessing APICs. | ||
39 | */ | ||
40 | |||
41 | static __inline void apic_write(unsigned long reg, unsigned int v) | ||
42 | { | ||
43 | *((volatile unsigned int *)(APIC_BASE+reg)) = v; | ||
44 | } | ||
45 | |||
46 | static __inline unsigned int apic_read(unsigned long reg) | ||
47 | { | ||
48 | return *((volatile unsigned int *)(APIC_BASE+reg)); | ||
49 | } | ||
50 | |||
51 | extern void apic_wait_icr_idle(void); | ||
52 | extern unsigned int safe_apic_wait_icr_idle(void); | ||
53 | |||
54 | static inline void ack_APIC_irq(void) | ||
55 | { | ||
56 | /* | ||
57 | * ack_APIC_irq() actually gets compiled as a single instruction: | ||
58 | * - a single rmw on Pentium/82489DX | ||
59 | * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC) | ||
60 | * ... yummie. | ||
61 | */ | ||
62 | |||
63 | /* Docs say use 0 for future compatibility */ | ||
64 | apic_write(APIC_EOI, 0); | ||
65 | } | ||
66 | |||
67 | extern int get_maxlvt (void); | ||
68 | extern void clear_local_APIC (void); | ||
69 | extern void connect_bsp_APIC (void); | ||
70 | extern void disconnect_bsp_APIC (int virt_wire_setup); | ||
71 | extern void disable_local_APIC (void); | ||
72 | extern void lapic_shutdown (void); | ||
73 | extern int verify_local_APIC (void); | ||
74 | extern void cache_APIC_registers (void); | ||
75 | extern void sync_Arb_IDs (void); | ||
76 | extern void init_bsp_APIC (void); | ||
77 | extern void setup_local_APIC (void); | ||
78 | extern void init_apic_mappings (void); | ||
79 | extern void smp_local_timer_interrupt (void); | ||
80 | extern void setup_boot_APIC_clock (void); | ||
81 | extern void setup_secondary_APIC_clock (void); | ||
82 | extern int APIC_init_uniprocessor (void); | ||
83 | extern void setup_apic_routing(void); | ||
84 | |||
85 | extern void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector, | ||
86 | unsigned char msg_type, unsigned char mask); | ||
87 | |||
88 | extern int apic_is_clustered_box(void); | ||
89 | |||
90 | #define K8_APIC_EXT_LVT_BASE 0x500 | ||
91 | #define K8_APIC_EXT_INT_MSG_FIX 0x0 | ||
92 | #define K8_APIC_EXT_INT_MSG_SMI 0x2 | ||
93 | #define K8_APIC_EXT_INT_MSG_NMI 0x4 | ||
94 | #define K8_APIC_EXT_INT_MSG_EXT 0x7 | ||
95 | #define K8_APIC_EXT_LVT_ENTRY_THRESHOLD 0 | ||
96 | |||
97 | #define ARCH_APICTIMER_STOPS_ON_C3 1 | ||
98 | |||
99 | extern unsigned boot_cpu_id; | ||
100 | extern int local_apic_timer_c2_ok; | ||
101 | |||
102 | #endif /* __ASM_APIC_H */ | ||
diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h index 4542c220bf4d..550af7a6f88e 100644 --- a/include/asm-x86/apicdef.h +++ b/include/asm-x86/apicdef.h | |||
@@ -1,5 +1,413 @@ | |||
1 | #ifndef _ASM_X86_APICDEF_H | ||
2 | #define _ASM_X86_APICDEF_H | ||
3 | |||
4 | /* | ||
5 | * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) | ||
6 | * | ||
7 | * Alan Cox <Alan.Cox@linux.org>, 1995. | ||
8 | * Ingo Molnar <mingo@redhat.com>, 1999, 2000 | ||
9 | */ | ||
10 | |||
11 | #define APIC_DEFAULT_PHYS_BASE 0xfee00000 | ||
12 | |||
13 | #define APIC_ID 0x20 | ||
14 | |||
15 | #ifdef CONFIG_X86_64 | ||
16 | # define APIC_ID_MASK (0xFFu<<24) | ||
17 | # define GET_APIC_ID(x) (((x)>>24)&0xFFu) | ||
18 | # define SET_APIC_ID(x) (((x)<<24)) | ||
19 | #endif | ||
20 | |||
21 | #define APIC_LVR 0x30 | ||
22 | #define APIC_LVR_MASK 0xFF00FF | ||
23 | #define GET_APIC_VERSION(x) ((x)&0xFFu) | ||
24 | #define GET_APIC_MAXLVT(x) (((x)>>16)&0xFFu) | ||
25 | #define APIC_INTEGRATED(x) ((x)&0xF0u) | ||
26 | #define APIC_XAPIC(x) ((x) >= 0x14) | ||
27 | #define APIC_TASKPRI 0x80 | ||
28 | #define APIC_TPRI_MASK 0xFFu | ||
29 | #define APIC_ARBPRI 0x90 | ||
30 | #define APIC_ARBPRI_MASK 0xFFu | ||
31 | #define APIC_PROCPRI 0xA0 | ||
32 | #define APIC_EOI 0xB0 | ||
33 | #define APIC_EIO_ACK 0x0 | ||
34 | #define APIC_RRR 0xC0 | ||
35 | #define APIC_LDR 0xD0 | ||
36 | #define APIC_LDR_MASK (0xFFu<<24) | ||
37 | #define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFFu) | ||
38 | #define SET_APIC_LOGICAL_ID(x) (((x)<<24)) | ||
39 | #define APIC_ALL_CPUS 0xFFu | ||
40 | #define APIC_DFR 0xE0 | ||
41 | #define APIC_DFR_CLUSTER 0x0FFFFFFFul | ||
42 | #define APIC_DFR_FLAT 0xFFFFFFFFul | ||
43 | #define APIC_SPIV 0xF0 | ||
44 | #define APIC_SPIV_FOCUS_DISABLED (1<<9) | ||
45 | #define APIC_SPIV_APIC_ENABLED (1<<8) | ||
46 | #define APIC_ISR 0x100 | ||
47 | #define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ | ||
48 | #define APIC_TMR 0x180 | ||
49 | #define APIC_IRR 0x200 | ||
50 | #define APIC_ESR 0x280 | ||
51 | #define APIC_ESR_SEND_CS 0x00001 | ||
52 | #define APIC_ESR_RECV_CS 0x00002 | ||
53 | #define APIC_ESR_SEND_ACC 0x00004 | ||
54 | #define APIC_ESR_RECV_ACC 0x00008 | ||
55 | #define APIC_ESR_SENDILL 0x00020 | ||
56 | #define APIC_ESR_RECVILL 0x00040 | ||
57 | #define APIC_ESR_ILLREGA 0x00080 | ||
58 | #define APIC_ICR 0x300 | ||
59 | #define APIC_DEST_SELF 0x40000 | ||
60 | #define APIC_DEST_ALLINC 0x80000 | ||
61 | #define APIC_DEST_ALLBUT 0xC0000 | ||
62 | #define APIC_ICR_RR_MASK 0x30000 | ||
63 | #define APIC_ICR_RR_INVALID 0x00000 | ||
64 | #define APIC_ICR_RR_INPROG 0x10000 | ||
65 | #define APIC_ICR_RR_VALID 0x20000 | ||
66 | #define APIC_INT_LEVELTRIG 0x08000 | ||
67 | #define APIC_INT_ASSERT 0x04000 | ||
68 | #define APIC_ICR_BUSY 0x01000 | ||
69 | #define APIC_DEST_LOGICAL 0x00800 | ||
70 | #define APIC_DEST_PHYSICAL 0x00000 | ||
71 | #define APIC_DM_FIXED 0x00000 | ||
72 | #define APIC_DM_LOWEST 0x00100 | ||
73 | #define APIC_DM_SMI 0x00200 | ||
74 | #define APIC_DM_REMRD 0x00300 | ||
75 | #define APIC_DM_NMI 0x00400 | ||
76 | #define APIC_DM_INIT 0x00500 | ||
77 | #define APIC_DM_STARTUP 0x00600 | ||
78 | #define APIC_DM_EXTINT 0x00700 | ||
79 | #define APIC_VECTOR_MASK 0x000FF | ||
80 | #define APIC_ICR2 0x310 | ||
81 | #define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) | ||
82 | #define SET_APIC_DEST_FIELD(x) ((x)<<24) | ||
83 | #define APIC_LVTT 0x320 | ||
84 | #define APIC_LVTTHMR 0x330 | ||
85 | #define APIC_LVTPC 0x340 | ||
86 | #define APIC_LVT0 0x350 | ||
87 | #define APIC_LVT_TIMER_BASE_MASK (0x3<<18) | ||
88 | #define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3) | ||
89 | #define SET_APIC_TIMER_BASE(x) (((x)<<18)) | ||
90 | #define APIC_TIMER_BASE_CLKIN 0x0 | ||
91 | #define APIC_TIMER_BASE_TMBASE 0x1 | ||
92 | #define APIC_TIMER_BASE_DIV 0x2 | ||
93 | #define APIC_LVT_TIMER_PERIODIC (1<<17) | ||
94 | #define APIC_LVT_MASKED (1<<16) | ||
95 | #define APIC_LVT_LEVEL_TRIGGER (1<<15) | ||
96 | #define APIC_LVT_REMOTE_IRR (1<<14) | ||
97 | #define APIC_INPUT_POLARITY (1<<13) | ||
98 | #define APIC_SEND_PENDING (1<<12) | ||
99 | #define APIC_MODE_MASK 0x700 | ||
100 | #define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) | ||
101 | #define SET_APIC_DELIVERY_MODE(x, y) (((x)&~0x700)|((y)<<8)) | ||
102 | #define APIC_MODE_FIXED 0x0 | ||
103 | #define APIC_MODE_NMI 0x4 | ||
104 | #define APIC_MODE_EXTINT 0x7 | ||
105 | #define APIC_LVT1 0x360 | ||
106 | #define APIC_LVTERR 0x370 | ||
107 | #define APIC_TMICT 0x380 | ||
108 | #define APIC_TMCCT 0x390 | ||
109 | #define APIC_TDCR 0x3E0 | ||
110 | #define APIC_TDR_DIV_TMBASE (1<<2) | ||
111 | #define APIC_TDR_DIV_1 0xB | ||
112 | #define APIC_TDR_DIV_2 0x0 | ||
113 | #define APIC_TDR_DIV_4 0x1 | ||
114 | #define APIC_TDR_DIV_8 0x2 | ||
115 | #define APIC_TDR_DIV_16 0x3 | ||
116 | #define APIC_TDR_DIV_32 0x8 | ||
117 | #define APIC_TDR_DIV_64 0x9 | ||
118 | #define APIC_TDR_DIV_128 0xA | ||
119 | #define APIC_EILVT0 0x500 | ||
120 | #define APIC_EILVT_NR_AMD_K8 1 /* Number of extended interrupts */ | ||
121 | #define APIC_EILVT_NR_AMD_10H 4 | ||
122 | #define APIC_EILVT_LVTOFF(x) (((x)>>4)&0xF) | ||
123 | #define APIC_EILVT_MSG_FIX 0x0 | ||
124 | #define APIC_EILVT_MSG_SMI 0x2 | ||
125 | #define APIC_EILVT_MSG_NMI 0x4 | ||
126 | #define APIC_EILVT_MSG_EXT 0x7 | ||
127 | #define APIC_EILVT_MASKED (1<<16) | ||
128 | #define APIC_EILVT1 0x510 | ||
129 | #define APIC_EILVT2 0x520 | ||
130 | #define APIC_EILVT3 0x530 | ||
131 | |||
132 | #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) | ||
133 | |||
1 | #ifdef CONFIG_X86_32 | 134 | #ifdef CONFIG_X86_32 |
2 | # include "apicdef_32.h" | 135 | # define MAX_IO_APICS 64 |
3 | #else | 136 | #else |
4 | # include "apicdef_64.h" | 137 | # define MAX_IO_APICS 128 |
138 | # define MAX_LOCAL_APIC 256 | ||
139 | #endif | ||
140 | |||
141 | /* | ||
142 | * All x86-64 systems are xAPIC compatible. | ||
143 | * In the following, "apicid" is a physical APIC ID. | ||
144 | */ | ||
145 | #define XAPIC_DEST_CPUS_SHIFT 4 | ||
146 | #define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) | ||
147 | #define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) | ||
148 | #define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) | ||
149 | #define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT) | ||
150 | #define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK) | ||
151 | #define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT) | ||
152 | |||
153 | /* | ||
154 | * the local APIC register structure, memory mapped. Not terribly well | ||
155 | * tested, but we might eventually use this one in the future - the | ||
156 | * problem why we cannot use it right now is the P5 APIC, it has an | ||
157 | * errata which cannot take 8-bit reads and writes, only 32-bit ones ... | ||
158 | */ | ||
159 | #define u32 unsigned int | ||
160 | |||
161 | struct local_apic { | ||
162 | |||
163 | /*000*/ struct { u32 __reserved[4]; } __reserved_01; | ||
164 | |||
165 | /*010*/ struct { u32 __reserved[4]; } __reserved_02; | ||
166 | |||
167 | /*020*/ struct { /* APIC ID Register */ | ||
168 | u32 __reserved_1 : 24, | ||
169 | phys_apic_id : 4, | ||
170 | __reserved_2 : 4; | ||
171 | u32 __reserved[3]; | ||
172 | } id; | ||
173 | |||
174 | /*030*/ const | ||
175 | struct { /* APIC Version Register */ | ||
176 | u32 version : 8, | ||
177 | __reserved_1 : 8, | ||
178 | max_lvt : 8, | ||
179 | __reserved_2 : 8; | ||
180 | u32 __reserved[3]; | ||
181 | } version; | ||
182 | |||
183 | /*040*/ struct { u32 __reserved[4]; } __reserved_03; | ||
184 | |||
185 | /*050*/ struct { u32 __reserved[4]; } __reserved_04; | ||
186 | |||
187 | /*060*/ struct { u32 __reserved[4]; } __reserved_05; | ||
188 | |||
189 | /*070*/ struct { u32 __reserved[4]; } __reserved_06; | ||
190 | |||
191 | /*080*/ struct { /* Task Priority Register */ | ||
192 | u32 priority : 8, | ||
193 | __reserved_1 : 24; | ||
194 | u32 __reserved_2[3]; | ||
195 | } tpr; | ||
196 | |||
197 | /*090*/ const | ||
198 | struct { /* Arbitration Priority Register */ | ||
199 | u32 priority : 8, | ||
200 | __reserved_1 : 24; | ||
201 | u32 __reserved_2[3]; | ||
202 | } apr; | ||
203 | |||
204 | /*0A0*/ const | ||
205 | struct { /* Processor Priority Register */ | ||
206 | u32 priority : 8, | ||
207 | __reserved_1 : 24; | ||
208 | u32 __reserved_2[3]; | ||
209 | } ppr; | ||
210 | |||
211 | /*0B0*/ struct { /* End Of Interrupt Register */ | ||
212 | u32 eoi; | ||
213 | u32 __reserved[3]; | ||
214 | } eoi; | ||
215 | |||
216 | /*0C0*/ struct { u32 __reserved[4]; } __reserved_07; | ||
217 | |||
218 | /*0D0*/ struct { /* Logical Destination Register */ | ||
219 | u32 __reserved_1 : 24, | ||
220 | logical_dest : 8; | ||
221 | u32 __reserved_2[3]; | ||
222 | } ldr; | ||
223 | |||
224 | /*0E0*/ struct { /* Destination Format Register */ | ||
225 | u32 __reserved_1 : 28, | ||
226 | model : 4; | ||
227 | u32 __reserved_2[3]; | ||
228 | } dfr; | ||
229 | |||
230 | /*0F0*/ struct { /* Spurious Interrupt Vector Register */ | ||
231 | u32 spurious_vector : 8, | ||
232 | apic_enabled : 1, | ||
233 | focus_cpu : 1, | ||
234 | __reserved_2 : 22; | ||
235 | u32 __reserved_3[3]; | ||
236 | } svr; | ||
237 | |||
238 | /*100*/ struct { /* In Service Register */ | ||
239 | /*170*/ u32 bitfield; | ||
240 | u32 __reserved[3]; | ||
241 | } isr [8]; | ||
242 | |||
243 | /*180*/ struct { /* Trigger Mode Register */ | ||
244 | /*1F0*/ u32 bitfield; | ||
245 | u32 __reserved[3]; | ||
246 | } tmr [8]; | ||
247 | |||
248 | /*200*/ struct { /* Interrupt Request Register */ | ||
249 | /*270*/ u32 bitfield; | ||
250 | u32 __reserved[3]; | ||
251 | } irr [8]; | ||
252 | |||
253 | /*280*/ union { /* Error Status Register */ | ||
254 | struct { | ||
255 | u32 send_cs_error : 1, | ||
256 | receive_cs_error : 1, | ||
257 | send_accept_error : 1, | ||
258 | receive_accept_error : 1, | ||
259 | __reserved_1 : 1, | ||
260 | send_illegal_vector : 1, | ||
261 | receive_illegal_vector : 1, | ||
262 | illegal_register_address : 1, | ||
263 | __reserved_2 : 24; | ||
264 | u32 __reserved_3[3]; | ||
265 | } error_bits; | ||
266 | struct { | ||
267 | u32 errors; | ||
268 | u32 __reserved_3[3]; | ||
269 | } all_errors; | ||
270 | } esr; | ||
271 | |||
272 | /*290*/ struct { u32 __reserved[4]; } __reserved_08; | ||
273 | |||
274 | /*2A0*/ struct { u32 __reserved[4]; } __reserved_09; | ||
275 | |||
276 | /*2B0*/ struct { u32 __reserved[4]; } __reserved_10; | ||
277 | |||
278 | /*2C0*/ struct { u32 __reserved[4]; } __reserved_11; | ||
279 | |||
280 | /*2D0*/ struct { u32 __reserved[4]; } __reserved_12; | ||
281 | |||
282 | /*2E0*/ struct { u32 __reserved[4]; } __reserved_13; | ||
283 | |||
284 | /*2F0*/ struct { u32 __reserved[4]; } __reserved_14; | ||
285 | |||
286 | /*300*/ struct { /* Interrupt Command Register 1 */ | ||
287 | u32 vector : 8, | ||
288 | delivery_mode : 3, | ||
289 | destination_mode : 1, | ||
290 | delivery_status : 1, | ||
291 | __reserved_1 : 1, | ||
292 | level : 1, | ||
293 | trigger : 1, | ||
294 | __reserved_2 : 2, | ||
295 | shorthand : 2, | ||
296 | __reserved_3 : 12; | ||
297 | u32 __reserved_4[3]; | ||
298 | } icr1; | ||
299 | |||
300 | /*310*/ struct { /* Interrupt Command Register 2 */ | ||
301 | union { | ||
302 | u32 __reserved_1 : 24, | ||
303 | phys_dest : 4, | ||
304 | __reserved_2 : 4; | ||
305 | u32 __reserved_3 : 24, | ||
306 | logical_dest : 8; | ||
307 | } dest; | ||
308 | u32 __reserved_4[3]; | ||
309 | } icr2; | ||
310 | |||
311 | /*320*/ struct { /* LVT - Timer */ | ||
312 | u32 vector : 8, | ||
313 | __reserved_1 : 4, | ||
314 | delivery_status : 1, | ||
315 | __reserved_2 : 3, | ||
316 | mask : 1, | ||
317 | timer_mode : 1, | ||
318 | __reserved_3 : 14; | ||
319 | u32 __reserved_4[3]; | ||
320 | } lvt_timer; | ||
321 | |||
322 | /*330*/ struct { /* LVT - Thermal Sensor */ | ||
323 | u32 vector : 8, | ||
324 | delivery_mode : 3, | ||
325 | __reserved_1 : 1, | ||
326 | delivery_status : 1, | ||
327 | __reserved_2 : 3, | ||
328 | mask : 1, | ||
329 | __reserved_3 : 15; | ||
330 | u32 __reserved_4[3]; | ||
331 | } lvt_thermal; | ||
332 | |||
333 | /*340*/ struct { /* LVT - Performance Counter */ | ||
334 | u32 vector : 8, | ||
335 | delivery_mode : 3, | ||
336 | __reserved_1 : 1, | ||
337 | delivery_status : 1, | ||
338 | __reserved_2 : 3, | ||
339 | mask : 1, | ||
340 | __reserved_3 : 15; | ||
341 | u32 __reserved_4[3]; | ||
342 | } lvt_pc; | ||
343 | |||
344 | /*350*/ struct { /* LVT - LINT0 */ | ||
345 | u32 vector : 8, | ||
346 | delivery_mode : 3, | ||
347 | __reserved_1 : 1, | ||
348 | delivery_status : 1, | ||
349 | polarity : 1, | ||
350 | remote_irr : 1, | ||
351 | trigger : 1, | ||
352 | mask : 1, | ||
353 | __reserved_2 : 15; | ||
354 | u32 __reserved_3[3]; | ||
355 | } lvt_lint0; | ||
356 | |||
357 | /*360*/ struct { /* LVT - LINT1 */ | ||
358 | u32 vector : 8, | ||
359 | delivery_mode : 3, | ||
360 | __reserved_1 : 1, | ||
361 | delivery_status : 1, | ||
362 | polarity : 1, | ||
363 | remote_irr : 1, | ||
364 | trigger : 1, | ||
365 | mask : 1, | ||
366 | __reserved_2 : 15; | ||
367 | u32 __reserved_3[3]; | ||
368 | } lvt_lint1; | ||
369 | |||
370 | /*370*/ struct { /* LVT - Error */ | ||
371 | u32 vector : 8, | ||
372 | __reserved_1 : 4, | ||
373 | delivery_status : 1, | ||
374 | __reserved_2 : 3, | ||
375 | mask : 1, | ||
376 | __reserved_3 : 15; | ||
377 | u32 __reserved_4[3]; | ||
378 | } lvt_error; | ||
379 | |||
380 | /*380*/ struct { /* Timer Initial Count Register */ | ||
381 | u32 initial_count; | ||
382 | u32 __reserved_2[3]; | ||
383 | } timer_icr; | ||
384 | |||
385 | /*390*/ const | ||
386 | struct { /* Timer Current Count Register */ | ||
387 | u32 curr_count; | ||
388 | u32 __reserved_2[3]; | ||
389 | } timer_ccr; | ||
390 | |||
391 | /*3A0*/ struct { u32 __reserved[4]; } __reserved_16; | ||
392 | |||
393 | /*3B0*/ struct { u32 __reserved[4]; } __reserved_17; | ||
394 | |||
395 | /*3C0*/ struct { u32 __reserved[4]; } __reserved_18; | ||
396 | |||
397 | /*3D0*/ struct { u32 __reserved[4]; } __reserved_19; | ||
398 | |||
399 | /*3E0*/ struct { /* Timer Divide Configuration Register */ | ||
400 | u32 divisor : 4, | ||
401 | __reserved_1 : 28; | ||
402 | u32 __reserved_2[3]; | ||
403 | } timer_dcr; | ||
404 | |||
405 | /*3F0*/ struct { u32 __reserved[4]; } __reserved_20; | ||
406 | |||
407 | } __attribute__ ((packed)); | ||
408 | |||
409 | #undef u32 | ||
410 | |||
411 | #define BAD_APICID 0xFFu | ||
412 | |||
5 | #endif | 413 | #endif |
diff --git a/include/asm-x86/apicdef_32.h b/include/asm-x86/apicdef_32.h deleted file mode 100644 index 9f6995341fdc..000000000000 --- a/include/asm-x86/apicdef_32.h +++ /dev/null | |||
@@ -1,375 +0,0 @@ | |||
1 | #ifndef __ASM_APICDEF_H | ||
2 | #define __ASM_APICDEF_H | ||
3 | |||
4 | /* | ||
5 | * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) | ||
6 | * | ||
7 | * Alan Cox <Alan.Cox@linux.org>, 1995. | ||
8 | * Ingo Molnar <mingo@redhat.com>, 1999, 2000 | ||
9 | */ | ||
10 | |||
11 | #define APIC_DEFAULT_PHYS_BASE 0xfee00000 | ||
12 | |||
13 | #define APIC_ID 0x20 | ||
14 | #define APIC_LVR 0x30 | ||
15 | #define APIC_LVR_MASK 0xFF00FF | ||
16 | #define GET_APIC_VERSION(x) ((x)&0xFF) | ||
17 | #define GET_APIC_MAXLVT(x) (((x)>>16)&0xFF) | ||
18 | #define APIC_INTEGRATED(x) ((x)&0xF0) | ||
19 | #define APIC_XAPIC(x) ((x) >= 0x14) | ||
20 | #define APIC_TASKPRI 0x80 | ||
21 | #define APIC_TPRI_MASK 0xFF | ||
22 | #define APIC_ARBPRI 0x90 | ||
23 | #define APIC_ARBPRI_MASK 0xFF | ||
24 | #define APIC_PROCPRI 0xA0 | ||
25 | #define APIC_EOI 0xB0 | ||
26 | #define APIC_EIO_ACK 0x0 /* Write this to the EOI register */ | ||
27 | #define APIC_RRR 0xC0 | ||
28 | #define APIC_LDR 0xD0 | ||
29 | #define APIC_LDR_MASK (0xFF<<24) | ||
30 | #define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFF) | ||
31 | #define SET_APIC_LOGICAL_ID(x) (((x)<<24)) | ||
32 | #define APIC_ALL_CPUS 0xFF | ||
33 | #define APIC_DFR 0xE0 | ||
34 | #define APIC_DFR_CLUSTER 0x0FFFFFFFul | ||
35 | #define APIC_DFR_FLAT 0xFFFFFFFFul | ||
36 | #define APIC_SPIV 0xF0 | ||
37 | #define APIC_SPIV_FOCUS_DISABLED (1<<9) | ||
38 | #define APIC_SPIV_APIC_ENABLED (1<<8) | ||
39 | #define APIC_ISR 0x100 | ||
40 | #define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ | ||
41 | #define APIC_TMR 0x180 | ||
42 | #define APIC_IRR 0x200 | ||
43 | #define APIC_ESR 0x280 | ||
44 | #define APIC_ESR_SEND_CS 0x00001 | ||
45 | #define APIC_ESR_RECV_CS 0x00002 | ||
46 | #define APIC_ESR_SEND_ACC 0x00004 | ||
47 | #define APIC_ESR_RECV_ACC 0x00008 | ||
48 | #define APIC_ESR_SENDILL 0x00020 | ||
49 | #define APIC_ESR_RECVILL 0x00040 | ||
50 | #define APIC_ESR_ILLREGA 0x00080 | ||
51 | #define APIC_ICR 0x300 | ||
52 | #define APIC_DEST_SELF 0x40000 | ||
53 | #define APIC_DEST_ALLINC 0x80000 | ||
54 | #define APIC_DEST_ALLBUT 0xC0000 | ||
55 | #define APIC_ICR_RR_MASK 0x30000 | ||
56 | #define APIC_ICR_RR_INVALID 0x00000 | ||
57 | #define APIC_ICR_RR_INPROG 0x10000 | ||
58 | #define APIC_ICR_RR_VALID 0x20000 | ||
59 | #define APIC_INT_LEVELTRIG 0x08000 | ||
60 | #define APIC_INT_ASSERT 0x04000 | ||
61 | #define APIC_ICR_BUSY 0x01000 | ||
62 | #define APIC_DEST_LOGICAL 0x00800 | ||
63 | #define APIC_DM_FIXED 0x00000 | ||
64 | #define APIC_DM_LOWEST 0x00100 | ||
65 | #define APIC_DM_SMI 0x00200 | ||
66 | #define APIC_DM_REMRD 0x00300 | ||
67 | #define APIC_DM_NMI 0x00400 | ||
68 | #define APIC_DM_INIT 0x00500 | ||
69 | #define APIC_DM_STARTUP 0x00600 | ||
70 | #define APIC_DM_EXTINT 0x00700 | ||
71 | #define APIC_VECTOR_MASK 0x000FF | ||
72 | #define APIC_ICR2 0x310 | ||
73 | #define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) | ||
74 | #define SET_APIC_DEST_FIELD(x) ((x)<<24) | ||
75 | #define APIC_LVTT 0x320 | ||
76 | #define APIC_LVTTHMR 0x330 | ||
77 | #define APIC_LVTPC 0x340 | ||
78 | #define APIC_LVT0 0x350 | ||
79 | #define APIC_LVT_TIMER_BASE_MASK (0x3<<18) | ||
80 | #define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3) | ||
81 | #define SET_APIC_TIMER_BASE(x) (((x)<<18)) | ||
82 | #define APIC_TIMER_BASE_CLKIN 0x0 | ||
83 | #define APIC_TIMER_BASE_TMBASE 0x1 | ||
84 | #define APIC_TIMER_BASE_DIV 0x2 | ||
85 | #define APIC_LVT_TIMER_PERIODIC (1<<17) | ||
86 | #define APIC_LVT_MASKED (1<<16) | ||
87 | #define APIC_LVT_LEVEL_TRIGGER (1<<15) | ||
88 | #define APIC_LVT_REMOTE_IRR (1<<14) | ||
89 | #define APIC_INPUT_POLARITY (1<<13) | ||
90 | #define APIC_SEND_PENDING (1<<12) | ||
91 | #define APIC_MODE_MASK 0x700 | ||
92 | #define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) | ||
93 | #define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8)) | ||
94 | #define APIC_MODE_FIXED 0x0 | ||
95 | #define APIC_MODE_NMI 0x4 | ||
96 | #define APIC_MODE_EXTINT 0x7 | ||
97 | #define APIC_LVT1 0x360 | ||
98 | #define APIC_LVTERR 0x370 | ||
99 | #define APIC_TMICT 0x380 | ||
100 | #define APIC_TMCCT 0x390 | ||
101 | #define APIC_TDCR 0x3E0 | ||
102 | #define APIC_TDR_DIV_TMBASE (1<<2) | ||
103 | #define APIC_TDR_DIV_1 0xB | ||
104 | #define APIC_TDR_DIV_2 0x0 | ||
105 | #define APIC_TDR_DIV_4 0x1 | ||
106 | #define APIC_TDR_DIV_8 0x2 | ||
107 | #define APIC_TDR_DIV_16 0x3 | ||
108 | #define APIC_TDR_DIV_32 0x8 | ||
109 | #define APIC_TDR_DIV_64 0x9 | ||
110 | #define APIC_TDR_DIV_128 0xA | ||
111 | |||
112 | #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) | ||
113 | |||
114 | #define MAX_IO_APICS 64 | ||
115 | |||
116 | /* | ||
117 | * the local APIC register structure, memory mapped. Not terribly well | ||
118 | * tested, but we might eventually use this one in the future - the | ||
119 | * problem why we cannot use it right now is the P5 APIC, it has an | ||
120 | * errata which cannot take 8-bit reads and writes, only 32-bit ones ... | ||
121 | */ | ||
122 | #define u32 unsigned int | ||
123 | |||
124 | |||
125 | struct local_apic { | ||
126 | |||
127 | /*000*/ struct { u32 __reserved[4]; } __reserved_01; | ||
128 | |||
129 | /*010*/ struct { u32 __reserved[4]; } __reserved_02; | ||
130 | |||
131 | /*020*/ struct { /* APIC ID Register */ | ||
132 | u32 __reserved_1 : 24, | ||
133 | phys_apic_id : 4, | ||
134 | __reserved_2 : 4; | ||
135 | u32 __reserved[3]; | ||
136 | } id; | ||
137 | |||
138 | /*030*/ const | ||
139 | struct { /* APIC Version Register */ | ||
140 | u32 version : 8, | ||
141 | __reserved_1 : 8, | ||
142 | max_lvt : 8, | ||
143 | __reserved_2 : 8; | ||
144 | u32 __reserved[3]; | ||
145 | } version; | ||
146 | |||
147 | /*040*/ struct { u32 __reserved[4]; } __reserved_03; | ||
148 | |||
149 | /*050*/ struct { u32 __reserved[4]; } __reserved_04; | ||
150 | |||
151 | /*060*/ struct { u32 __reserved[4]; } __reserved_05; | ||
152 | |||
153 | /*070*/ struct { u32 __reserved[4]; } __reserved_06; | ||
154 | |||
155 | /*080*/ struct { /* Task Priority Register */ | ||
156 | u32 priority : 8, | ||
157 | __reserved_1 : 24; | ||
158 | u32 __reserved_2[3]; | ||
159 | } tpr; | ||
160 | |||
161 | /*090*/ const | ||
162 | struct { /* Arbitration Priority Register */ | ||
163 | u32 priority : 8, | ||
164 | __reserved_1 : 24; | ||
165 | u32 __reserved_2[3]; | ||
166 | } apr; | ||
167 | |||
168 | /*0A0*/ const | ||
169 | struct { /* Processor Priority Register */ | ||
170 | u32 priority : 8, | ||
171 | __reserved_1 : 24; | ||
172 | u32 __reserved_2[3]; | ||
173 | } ppr; | ||
174 | |||
175 | /*0B0*/ struct { /* End Of Interrupt Register */ | ||
176 | u32 eoi; | ||
177 | u32 __reserved[3]; | ||
178 | } eoi; | ||
179 | |||
180 | /*0C0*/ struct { u32 __reserved[4]; } __reserved_07; | ||
181 | |||
182 | /*0D0*/ struct { /* Logical Destination Register */ | ||
183 | u32 __reserved_1 : 24, | ||
184 | logical_dest : 8; | ||
185 | u32 __reserved_2[3]; | ||
186 | } ldr; | ||
187 | |||
188 | /*0E0*/ struct { /* Destination Format Register */ | ||
189 | u32 __reserved_1 : 28, | ||
190 | model : 4; | ||
191 | u32 __reserved_2[3]; | ||
192 | } dfr; | ||
193 | |||
194 | /*0F0*/ struct { /* Spurious Interrupt Vector Register */ | ||
195 | u32 spurious_vector : 8, | ||
196 | apic_enabled : 1, | ||
197 | focus_cpu : 1, | ||
198 | __reserved_2 : 22; | ||
199 | u32 __reserved_3[3]; | ||
200 | } svr; | ||
201 | |||
202 | /*100*/ struct { /* In Service Register */ | ||
203 | /*170*/ u32 bitfield; | ||
204 | u32 __reserved[3]; | ||
205 | } isr [8]; | ||
206 | |||
207 | /*180*/ struct { /* Trigger Mode Register */ | ||
208 | /*1F0*/ u32 bitfield; | ||
209 | u32 __reserved[3]; | ||
210 | } tmr [8]; | ||
211 | |||
212 | /*200*/ struct { /* Interrupt Request Register */ | ||
213 | /*270*/ u32 bitfield; | ||
214 | u32 __reserved[3]; | ||
215 | } irr [8]; | ||
216 | |||
217 | /*280*/ union { /* Error Status Register */ | ||
218 | struct { | ||
219 | u32 send_cs_error : 1, | ||
220 | receive_cs_error : 1, | ||
221 | send_accept_error : 1, | ||
222 | receive_accept_error : 1, | ||
223 | __reserved_1 : 1, | ||
224 | send_illegal_vector : 1, | ||
225 | receive_illegal_vector : 1, | ||
226 | illegal_register_address : 1, | ||
227 | __reserved_2 : 24; | ||
228 | u32 __reserved_3[3]; | ||
229 | } error_bits; | ||
230 | struct { | ||
231 | u32 errors; | ||
232 | u32 __reserved_3[3]; | ||
233 | } all_errors; | ||
234 | } esr; | ||
235 | |||
236 | /*290*/ struct { u32 __reserved[4]; } __reserved_08; | ||
237 | |||
238 | /*2A0*/ struct { u32 __reserved[4]; } __reserved_09; | ||
239 | |||
240 | /*2B0*/ struct { u32 __reserved[4]; } __reserved_10; | ||
241 | |||
242 | /*2C0*/ struct { u32 __reserved[4]; } __reserved_11; | ||
243 | |||
244 | /*2D0*/ struct { u32 __reserved[4]; } __reserved_12; | ||
245 | |||
246 | /*2E0*/ struct { u32 __reserved[4]; } __reserved_13; | ||
247 | |||
248 | /*2F0*/ struct { u32 __reserved[4]; } __reserved_14; | ||
249 | |||
250 | /*300*/ struct { /* Interrupt Command Register 1 */ | ||
251 | u32 vector : 8, | ||
252 | delivery_mode : 3, | ||
253 | destination_mode : 1, | ||
254 | delivery_status : 1, | ||
255 | __reserved_1 : 1, | ||
256 | level : 1, | ||
257 | trigger : 1, | ||
258 | __reserved_2 : 2, | ||
259 | shorthand : 2, | ||
260 | __reserved_3 : 12; | ||
261 | u32 __reserved_4[3]; | ||
262 | } icr1; | ||
263 | |||
264 | /*310*/ struct { /* Interrupt Command Register 2 */ | ||
265 | union { | ||
266 | u32 __reserved_1 : 24, | ||
267 | phys_dest : 4, | ||
268 | __reserved_2 : 4; | ||
269 | u32 __reserved_3 : 24, | ||
270 | logical_dest : 8; | ||
271 | } dest; | ||
272 | u32 __reserved_4[3]; | ||
273 | } icr2; | ||
274 | |||
275 | /*320*/ struct { /* LVT - Timer */ | ||
276 | u32 vector : 8, | ||
277 | __reserved_1 : 4, | ||
278 | delivery_status : 1, | ||
279 | __reserved_2 : 3, | ||
280 | mask : 1, | ||
281 | timer_mode : 1, | ||
282 | __reserved_3 : 14; | ||
283 | u32 __reserved_4[3]; | ||
284 | } lvt_timer; | ||
285 | |||
286 | /*330*/ struct { /* LVT - Thermal Sensor */ | ||
287 | u32 vector : 8, | ||
288 | delivery_mode : 3, | ||
289 | __reserved_1 : 1, | ||
290 | delivery_status : 1, | ||
291 | __reserved_2 : 3, | ||
292 | mask : 1, | ||
293 | __reserved_3 : 15; | ||
294 | u32 __reserved_4[3]; | ||
295 | } lvt_thermal; | ||
296 | |||
297 | /*340*/ struct { /* LVT - Performance Counter */ | ||
298 | u32 vector : 8, | ||
299 | delivery_mode : 3, | ||
300 | __reserved_1 : 1, | ||
301 | delivery_status : 1, | ||
302 | __reserved_2 : 3, | ||
303 | mask : 1, | ||
304 | __reserved_3 : 15; | ||
305 | u32 __reserved_4[3]; | ||
306 | } lvt_pc; | ||
307 | |||
308 | /*350*/ struct { /* LVT - LINT0 */ | ||
309 | u32 vector : 8, | ||
310 | delivery_mode : 3, | ||
311 | __reserved_1 : 1, | ||
312 | delivery_status : 1, | ||
313 | polarity : 1, | ||
314 | remote_irr : 1, | ||
315 | trigger : 1, | ||
316 | mask : 1, | ||
317 | __reserved_2 : 15; | ||
318 | u32 __reserved_3[3]; | ||
319 | } lvt_lint0; | ||
320 | |||
321 | /*360*/ struct { /* LVT - LINT1 */ | ||
322 | u32 vector : 8, | ||
323 | delivery_mode : 3, | ||
324 | __reserved_1 : 1, | ||
325 | delivery_status : 1, | ||
326 | polarity : 1, | ||
327 | remote_irr : 1, | ||
328 | trigger : 1, | ||
329 | mask : 1, | ||
330 | __reserved_2 : 15; | ||
331 | u32 __reserved_3[3]; | ||
332 | } lvt_lint1; | ||
333 | |||
334 | /*370*/ struct { /* LVT - Error */ | ||
335 | u32 vector : 8, | ||
336 | __reserved_1 : 4, | ||
337 | delivery_status : 1, | ||
338 | __reserved_2 : 3, | ||
339 | mask : 1, | ||
340 | __reserved_3 : 15; | ||
341 | u32 __reserved_4[3]; | ||
342 | } lvt_error; | ||
343 | |||
344 | /*380*/ struct { /* Timer Initial Count Register */ | ||
345 | u32 initial_count; | ||
346 | u32 __reserved_2[3]; | ||
347 | } timer_icr; | ||
348 | |||
349 | /*390*/ const | ||
350 | struct { /* Timer Current Count Register */ | ||
351 | u32 curr_count; | ||
352 | u32 __reserved_2[3]; | ||
353 | } timer_ccr; | ||
354 | |||
355 | /*3A0*/ struct { u32 __reserved[4]; } __reserved_16; | ||
356 | |||
357 | /*3B0*/ struct { u32 __reserved[4]; } __reserved_17; | ||
358 | |||
359 | /*3C0*/ struct { u32 __reserved[4]; } __reserved_18; | ||
360 | |||
361 | /*3D0*/ struct { u32 __reserved[4]; } __reserved_19; | ||
362 | |||
363 | /*3E0*/ struct { /* Timer Divide Configuration Register */ | ||
364 | u32 divisor : 4, | ||
365 | __reserved_1 : 28; | ||
366 | u32 __reserved_2[3]; | ||
367 | } timer_dcr; | ||
368 | |||
369 | /*3F0*/ struct { u32 __reserved[4]; } __reserved_20; | ||
370 | |||
371 | } __attribute__ ((packed)); | ||
372 | |||
373 | #undef u32 | ||
374 | |||
375 | #endif | ||
diff --git a/include/asm-x86/apicdef_64.h b/include/asm-x86/apicdef_64.h deleted file mode 100644 index 1dd40067c67c..000000000000 --- a/include/asm-x86/apicdef_64.h +++ /dev/null | |||
@@ -1,392 +0,0 @@ | |||
1 | #ifndef __ASM_APICDEF_H | ||
2 | #define __ASM_APICDEF_H | ||
3 | |||
4 | /* | ||
5 | * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) | ||
6 | * | ||
7 | * Alan Cox <Alan.Cox@linux.org>, 1995. | ||
8 | * Ingo Molnar <mingo@redhat.com>, 1999, 2000 | ||
9 | */ | ||
10 | |||
11 | #define APIC_DEFAULT_PHYS_BASE 0xfee00000 | ||
12 | |||
13 | #define APIC_ID 0x20 | ||
14 | #define APIC_ID_MASK (0xFFu<<24) | ||
15 | #define GET_APIC_ID(x) (((x)>>24)&0xFFu) | ||
16 | #define SET_APIC_ID(x) (((x)<<24)) | ||
17 | #define APIC_LVR 0x30 | ||
18 | #define APIC_LVR_MASK 0xFF00FF | ||
19 | #define GET_APIC_VERSION(x) ((x)&0xFFu) | ||
20 | #define GET_APIC_MAXLVT(x) (((x)>>16)&0xFFu) | ||
21 | #define APIC_INTEGRATED(x) ((x)&0xF0u) | ||
22 | #define APIC_TASKPRI 0x80 | ||
23 | #define APIC_TPRI_MASK 0xFFu | ||
24 | #define APIC_ARBPRI 0x90 | ||
25 | #define APIC_ARBPRI_MASK 0xFFu | ||
26 | #define APIC_PROCPRI 0xA0 | ||
27 | #define APIC_EOI 0xB0 | ||
28 | #define APIC_EIO_ACK 0x0 /* Write this to the EOI register */ | ||
29 | #define APIC_RRR 0xC0 | ||
30 | #define APIC_LDR 0xD0 | ||
31 | #define APIC_LDR_MASK (0xFFu<<24) | ||
32 | #define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFFu) | ||
33 | #define SET_APIC_LOGICAL_ID(x) (((x)<<24)) | ||
34 | #define APIC_ALL_CPUS 0xFFu | ||
35 | #define APIC_DFR 0xE0 | ||
36 | #define APIC_DFR_CLUSTER 0x0FFFFFFFul | ||
37 | #define APIC_DFR_FLAT 0xFFFFFFFFul | ||
38 | #define APIC_SPIV 0xF0 | ||
39 | #define APIC_SPIV_FOCUS_DISABLED (1<<9) | ||
40 | #define APIC_SPIV_APIC_ENABLED (1<<8) | ||
41 | #define APIC_ISR 0x100 | ||
42 | #define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ | ||
43 | #define APIC_TMR 0x180 | ||
44 | #define APIC_IRR 0x200 | ||
45 | #define APIC_ESR 0x280 | ||
46 | #define APIC_ESR_SEND_CS 0x00001 | ||
47 | #define APIC_ESR_RECV_CS 0x00002 | ||
48 | #define APIC_ESR_SEND_ACC 0x00004 | ||
49 | #define APIC_ESR_RECV_ACC 0x00008 | ||
50 | #define APIC_ESR_SENDILL 0x00020 | ||
51 | #define APIC_ESR_RECVILL 0x00040 | ||
52 | #define APIC_ESR_ILLREGA 0x00080 | ||
53 | #define APIC_ICR 0x300 | ||
54 | #define APIC_DEST_SELF 0x40000 | ||
55 | #define APIC_DEST_ALLINC 0x80000 | ||
56 | #define APIC_DEST_ALLBUT 0xC0000 | ||
57 | #define APIC_ICR_RR_MASK 0x30000 | ||
58 | #define APIC_ICR_RR_INVALID 0x00000 | ||
59 | #define APIC_ICR_RR_INPROG 0x10000 | ||
60 | #define APIC_ICR_RR_VALID 0x20000 | ||
61 | #define APIC_INT_LEVELTRIG 0x08000 | ||
62 | #define APIC_INT_ASSERT 0x04000 | ||
63 | #define APIC_ICR_BUSY 0x01000 | ||
64 | #define APIC_DEST_LOGICAL 0x00800 | ||
65 | #define APIC_DEST_PHYSICAL 0x00000 | ||
66 | #define APIC_DM_FIXED 0x00000 | ||
67 | #define APIC_DM_LOWEST 0x00100 | ||
68 | #define APIC_DM_SMI 0x00200 | ||
69 | #define APIC_DM_REMRD 0x00300 | ||
70 | #define APIC_DM_NMI 0x00400 | ||
71 | #define APIC_DM_INIT 0x00500 | ||
72 | #define APIC_DM_STARTUP 0x00600 | ||
73 | #define APIC_DM_EXTINT 0x00700 | ||
74 | #define APIC_VECTOR_MASK 0x000FF | ||
75 | #define APIC_ICR2 0x310 | ||
76 | #define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) | ||
77 | #define SET_APIC_DEST_FIELD(x) ((x)<<24) | ||
78 | #define APIC_LVTT 0x320 | ||
79 | #define APIC_LVTTHMR 0x330 | ||
80 | #define APIC_LVTPC 0x340 | ||
81 | #define APIC_LVT0 0x350 | ||
82 | #define APIC_LVT_TIMER_BASE_MASK (0x3<<18) | ||
83 | #define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3) | ||
84 | #define SET_APIC_TIMER_BASE(x) (((x)<<18)) | ||
85 | #define APIC_TIMER_BASE_CLKIN 0x0 | ||
86 | #define APIC_TIMER_BASE_TMBASE 0x1 | ||
87 | #define APIC_TIMER_BASE_DIV 0x2 | ||
88 | #define APIC_LVT_TIMER_PERIODIC (1<<17) | ||
89 | #define APIC_LVT_MASKED (1<<16) | ||
90 | #define APIC_LVT_LEVEL_TRIGGER (1<<15) | ||
91 | #define APIC_LVT_REMOTE_IRR (1<<14) | ||
92 | #define APIC_INPUT_POLARITY (1<<13) | ||
93 | #define APIC_SEND_PENDING (1<<12) | ||
94 | #define APIC_MODE_MASK 0x700 | ||
95 | #define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) | ||
96 | #define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8)) | ||
97 | #define APIC_MODE_FIXED 0x0 | ||
98 | #define APIC_MODE_NMI 0x4 | ||
99 | #define APIC_MODE_EXTINT 0x7 | ||
100 | #define APIC_LVT1 0x360 | ||
101 | #define APIC_LVTERR 0x370 | ||
102 | #define APIC_TMICT 0x380 | ||
103 | #define APIC_TMCCT 0x390 | ||
104 | #define APIC_TDCR 0x3E0 | ||
105 | #define APIC_TDR_DIV_TMBASE (1<<2) | ||
106 | #define APIC_TDR_DIV_1 0xB | ||
107 | #define APIC_TDR_DIV_2 0x0 | ||
108 | #define APIC_TDR_DIV_4 0x1 | ||
109 | #define APIC_TDR_DIV_8 0x2 | ||
110 | #define APIC_TDR_DIV_16 0x3 | ||
111 | #define APIC_TDR_DIV_32 0x8 | ||
112 | #define APIC_TDR_DIV_64 0x9 | ||
113 | #define APIC_TDR_DIV_128 0xA | ||
114 | |||
115 | #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) | ||
116 | |||
117 | #define MAX_IO_APICS 128 | ||
118 | #define MAX_LOCAL_APIC 256 | ||
119 | |||
120 | /* | ||
121 | * All x86-64 systems are xAPIC compatible. | ||
122 | * In the following, "apicid" is a physical APIC ID. | ||
123 | */ | ||
124 | #define XAPIC_DEST_CPUS_SHIFT 4 | ||
125 | #define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) | ||
126 | #define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) | ||
127 | #define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) | ||
128 | #define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT) | ||
129 | #define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK) | ||
130 | #define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT) | ||
131 | |||
132 | /* | ||
133 | * the local APIC register structure, memory mapped. Not terribly well | ||
134 | * tested, but we might eventually use this one in the future - the | ||
135 | * problem why we cannot use it right now is the P5 APIC, it has an | ||
136 | * errata which cannot take 8-bit reads and writes, only 32-bit ones ... | ||
137 | */ | ||
138 | #define u32 unsigned int | ||
139 | |||
140 | struct local_apic { | ||
141 | |||
142 | /*000*/ struct { u32 __reserved[4]; } __reserved_01; | ||
143 | |||
144 | /*010*/ struct { u32 __reserved[4]; } __reserved_02; | ||
145 | |||
146 | /*020*/ struct { /* APIC ID Register */ | ||
147 | u32 __reserved_1 : 24, | ||
148 | phys_apic_id : 4, | ||
149 | __reserved_2 : 4; | ||
150 | u32 __reserved[3]; | ||
151 | } id; | ||
152 | |||
153 | /*030*/ const | ||
154 | struct { /* APIC Version Register */ | ||
155 | u32 version : 8, | ||
156 | __reserved_1 : 8, | ||
157 | max_lvt : 8, | ||
158 | __reserved_2 : 8; | ||
159 | u32 __reserved[3]; | ||
160 | } version; | ||
161 | |||
162 | /*040*/ struct { u32 __reserved[4]; } __reserved_03; | ||
163 | |||
164 | /*050*/ struct { u32 __reserved[4]; } __reserved_04; | ||
165 | |||
166 | /*060*/ struct { u32 __reserved[4]; } __reserved_05; | ||
167 | |||
168 | /*070*/ struct { u32 __reserved[4]; } __reserved_06; | ||
169 | |||
170 | /*080*/ struct { /* Task Priority Register */ | ||
171 | u32 priority : 8, | ||
172 | __reserved_1 : 24; | ||
173 | u32 __reserved_2[3]; | ||
174 | } tpr; | ||
175 | |||
176 | /*090*/ const | ||
177 | struct { /* Arbitration Priority Register */ | ||
178 | u32 priority : 8, | ||
179 | __reserved_1 : 24; | ||
180 | u32 __reserved_2[3]; | ||
181 | } apr; | ||
182 | |||
183 | /*0A0*/ const | ||
184 | struct { /* Processor Priority Register */ | ||
185 | u32 priority : 8, | ||
186 | __reserved_1 : 24; | ||
187 | u32 __reserved_2[3]; | ||
188 | } ppr; | ||
189 | |||
190 | /*0B0*/ struct { /* End Of Interrupt Register */ | ||
191 | u32 eoi; | ||
192 | u32 __reserved[3]; | ||
193 | } eoi; | ||
194 | |||
195 | /*0C0*/ struct { u32 __reserved[4]; } __reserved_07; | ||
196 | |||
197 | /*0D0*/ struct { /* Logical Destination Register */ | ||
198 | u32 __reserved_1 : 24, | ||
199 | logical_dest : 8; | ||
200 | u32 __reserved_2[3]; | ||
201 | } ldr; | ||
202 | |||
203 | /*0E0*/ struct { /* Destination Format Register */ | ||
204 | u32 __reserved_1 : 28, | ||
205 | model : 4; | ||
206 | u32 __reserved_2[3]; | ||
207 | } dfr; | ||
208 | |||
209 | /*0F0*/ struct { /* Spurious Interrupt Vector Register */ | ||
210 | u32 spurious_vector : 8, | ||
211 | apic_enabled : 1, | ||
212 | focus_cpu : 1, | ||
213 | __reserved_2 : 22; | ||
214 | u32 __reserved_3[3]; | ||
215 | } svr; | ||
216 | |||
217 | /*100*/ struct { /* In Service Register */ | ||
218 | /*170*/ u32 bitfield; | ||
219 | u32 __reserved[3]; | ||
220 | } isr [8]; | ||
221 | |||
222 | /*180*/ struct { /* Trigger Mode Register */ | ||
223 | /*1F0*/ u32 bitfield; | ||
224 | u32 __reserved[3]; | ||
225 | } tmr [8]; | ||
226 | |||
227 | /*200*/ struct { /* Interrupt Request Register */ | ||
228 | /*270*/ u32 bitfield; | ||
229 | u32 __reserved[3]; | ||
230 | } irr [8]; | ||
231 | |||
232 | /*280*/ union { /* Error Status Register */ | ||
233 | struct { | ||
234 | u32 send_cs_error : 1, | ||
235 | receive_cs_error : 1, | ||
236 | send_accept_error : 1, | ||
237 | receive_accept_error : 1, | ||
238 | __reserved_1 : 1, | ||
239 | send_illegal_vector : 1, | ||
240 | receive_illegal_vector : 1, | ||
241 | illegal_register_address : 1, | ||
242 | __reserved_2 : 24; | ||
243 | u32 __reserved_3[3]; | ||
244 | } error_bits; | ||
245 | struct { | ||
246 | u32 errors; | ||
247 | u32 __reserved_3[3]; | ||
248 | } all_errors; | ||
249 | } esr; | ||
250 | |||
251 | /*290*/ struct { u32 __reserved[4]; } __reserved_08; | ||
252 | |||
253 | /*2A0*/ struct { u32 __reserved[4]; } __reserved_09; | ||
254 | |||
255 | /*2B0*/ struct { u32 __reserved[4]; } __reserved_10; | ||
256 | |||
257 | /*2C0*/ struct { u32 __reserved[4]; } __reserved_11; | ||
258 | |||
259 | /*2D0*/ struct { u32 __reserved[4]; } __reserved_12; | ||
260 | |||
261 | /*2E0*/ struct { u32 __reserved[4]; } __reserved_13; | ||
262 | |||
263 | /*2F0*/ struct { u32 __reserved[4]; } __reserved_14; | ||
264 | |||
265 | /*300*/ struct { /* Interrupt Command Register 1 */ | ||
266 | u32 vector : 8, | ||
267 | delivery_mode : 3, | ||
268 | destination_mode : 1, | ||
269 | delivery_status : 1, | ||
270 | __reserved_1 : 1, | ||
271 | level : 1, | ||
272 | trigger : 1, | ||
273 | __reserved_2 : 2, | ||
274 | shorthand : 2, | ||
275 | __reserved_3 : 12; | ||
276 | u32 __reserved_4[3]; | ||
277 | } icr1; | ||
278 | |||
279 | /*310*/ struct { /* Interrupt Command Register 2 */ | ||
280 | union { | ||
281 | u32 __reserved_1 : 24, | ||
282 | phys_dest : 4, | ||
283 | __reserved_2 : 4; | ||
284 | u32 __reserved_3 : 24, | ||
285 | logical_dest : 8; | ||
286 | } dest; | ||
287 | u32 __reserved_4[3]; | ||
288 | } icr2; | ||
289 | |||
290 | /*320*/ struct { /* LVT - Timer */ | ||
291 | u32 vector : 8, | ||
292 | __reserved_1 : 4, | ||
293 | delivery_status : 1, | ||
294 | __reserved_2 : 3, | ||
295 | mask : 1, | ||
296 | timer_mode : 1, | ||
297 | __reserved_3 : 14; | ||
298 | u32 __reserved_4[3]; | ||
299 | } lvt_timer; | ||
300 | |||
301 | /*330*/ struct { /* LVT - Thermal Sensor */ | ||
302 | u32 vector : 8, | ||
303 | delivery_mode : 3, | ||
304 | __reserved_1 : 1, | ||
305 | delivery_status : 1, | ||
306 | __reserved_2 : 3, | ||
307 | mask : 1, | ||
308 | __reserved_3 : 15; | ||
309 | u32 __reserved_4[3]; | ||
310 | } lvt_thermal; | ||
311 | |||
312 | /*340*/ struct { /* LVT - Performance Counter */ | ||
313 | u32 vector : 8, | ||
314 | delivery_mode : 3, | ||
315 | __reserved_1 : 1, | ||
316 | delivery_status : 1, | ||
317 | __reserved_2 : 3, | ||
318 | mask : 1, | ||
319 | __reserved_3 : 15; | ||
320 | u32 __reserved_4[3]; | ||
321 | } lvt_pc; | ||
322 | |||
323 | /*350*/ struct { /* LVT - LINT0 */ | ||
324 | u32 vector : 8, | ||
325 | delivery_mode : 3, | ||
326 | __reserved_1 : 1, | ||
327 | delivery_status : 1, | ||
328 | polarity : 1, | ||
329 | remote_irr : 1, | ||
330 | trigger : 1, | ||
331 | mask : 1, | ||
332 | __reserved_2 : 15; | ||
333 | u32 __reserved_3[3]; | ||
334 | } lvt_lint0; | ||
335 | |||
336 | /*360*/ struct { /* LVT - LINT1 */ | ||
337 | u32 vector : 8, | ||
338 | delivery_mode : 3, | ||
339 | __reserved_1 : 1, | ||
340 | delivery_status : 1, | ||
341 | polarity : 1, | ||
342 | remote_irr : 1, | ||
343 | trigger : 1, | ||
344 | mask : 1, | ||
345 | __reserved_2 : 15; | ||
346 | u32 __reserved_3[3]; | ||
347 | } lvt_lint1; | ||
348 | |||
349 | /*370*/ struct { /* LVT - Error */ | ||
350 | u32 vector : 8, | ||
351 | __reserved_1 : 4, | ||
352 | delivery_status : 1, | ||
353 | __reserved_2 : 3, | ||
354 | mask : 1, | ||
355 | __reserved_3 : 15; | ||
356 | u32 __reserved_4[3]; | ||
357 | } lvt_error; | ||
358 | |||
359 | /*380*/ struct { /* Timer Initial Count Register */ | ||
360 | u32 initial_count; | ||
361 | u32 __reserved_2[3]; | ||
362 | } timer_icr; | ||
363 | |||
364 | /*390*/ const | ||
365 | struct { /* Timer Current Count Register */ | ||
366 | u32 curr_count; | ||
367 | u32 __reserved_2[3]; | ||
368 | } timer_ccr; | ||
369 | |||
370 | /*3A0*/ struct { u32 __reserved[4]; } __reserved_16; | ||
371 | |||
372 | /*3B0*/ struct { u32 __reserved[4]; } __reserved_17; | ||
373 | |||
374 | /*3C0*/ struct { u32 __reserved[4]; } __reserved_18; | ||
375 | |||
376 | /*3D0*/ struct { u32 __reserved[4]; } __reserved_19; | ||
377 | |||
378 | /*3E0*/ struct { /* Timer Divide Configuration Register */ | ||
379 | u32 divisor : 4, | ||
380 | __reserved_1 : 28; | ||
381 | u32 __reserved_2[3]; | ||
382 | } timer_dcr; | ||
383 | |||
384 | /*3F0*/ struct { u32 __reserved[4]; } __reserved_20; | ||
385 | |||
386 | } __attribute__ ((packed)); | ||
387 | |||
388 | #undef u32 | ||
389 | |||
390 | #define BAD_APICID 0xFFu | ||
391 | |||
392 | #endif | ||
diff --git a/include/asm-x86/arch_hooks.h b/include/asm-x86/arch_hooks.h index a8c1fca9726d..768aee8a04ef 100644 --- a/include/asm-x86/arch_hooks.h +++ b/include/asm-x86/arch_hooks.h | |||
@@ -6,7 +6,7 @@ | |||
6 | /* | 6 | /* |
7 | * linux/include/asm/arch_hooks.h | 7 | * linux/include/asm/arch_hooks.h |
8 | * | 8 | * |
9 | * define the architecture specific hooks | 9 | * define the architecture specific hooks |
10 | */ | 10 | */ |
11 | 11 | ||
12 | /* these aren't arch hooks, they are generic routines | 12 | /* these aren't arch hooks, they are generic routines |
@@ -24,7 +24,4 @@ extern void trap_init_hook(void); | |||
24 | extern void time_init_hook(void); | 24 | extern void time_init_hook(void); |
25 | extern void mca_nmi_hook(void); | 25 | extern void mca_nmi_hook(void); |
26 | 26 | ||
27 | extern int setup_early_printk(char *); | ||
28 | extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2))); | ||
29 | |||
30 | #endif | 27 | #endif |
diff --git a/include/asm-x86/asm.h b/include/asm-x86/asm.h new file mode 100644 index 000000000000..90dec0c23646 --- /dev/null +++ b/include/asm-x86/asm.h | |||
@@ -0,0 +1,39 @@ | |||
1 | #ifndef _ASM_X86_ASM_H | ||
2 | #define _ASM_X86_ASM_H | ||
3 | |||
4 | #ifdef CONFIG_X86_32 | ||
5 | /* 32 bits */ | ||
6 | |||
7 | # define _ASM_PTR " .long " | ||
8 | # define _ASM_ALIGN " .balign 4 " | ||
9 | # define _ASM_MOV_UL " movl " | ||
10 | |||
11 | # define _ASM_INC " incl " | ||
12 | # define _ASM_DEC " decl " | ||
13 | # define _ASM_ADD " addl " | ||
14 | # define _ASM_SUB " subl " | ||
15 | # define _ASM_XADD " xaddl " | ||
16 | |||
17 | #else | ||
18 | /* 64 bits */ | ||
19 | |||
20 | # define _ASM_PTR " .quad " | ||
21 | # define _ASM_ALIGN " .balign 8 " | ||
22 | # define _ASM_MOV_UL " movq " | ||
23 | |||
24 | # define _ASM_INC " incq " | ||
25 | # define _ASM_DEC " decq " | ||
26 | # define _ASM_ADD " addq " | ||
27 | # define _ASM_SUB " subq " | ||
28 | # define _ASM_XADD " xaddq " | ||
29 | |||
30 | #endif /* CONFIG_X86_32 */ | ||
31 | |||
32 | /* Exception table entry */ | ||
33 | # define _ASM_EXTABLE(from,to) \ | ||
34 | " .section __ex_table,\"a\"\n" \ | ||
35 | _ASM_ALIGN "\n" \ | ||
36 | _ASM_PTR #from "," #to "\n" \ | ||
37 | " .previous\n" | ||
38 | |||
39 | #endif /* _ASM_X86_ASM_H */ | ||
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h index 07e3f6d4fe47..1a23ce1a5697 100644 --- a/include/asm-x86/bitops.h +++ b/include/asm-x86/bitops.h | |||
@@ -1,5 +1,321 @@ | |||
1 | #ifndef _ASM_X86_BITOPS_H | ||
2 | #define _ASM_X86_BITOPS_H | ||
3 | |||
4 | /* | ||
5 | * Copyright 1992, Linus Torvalds. | ||
6 | */ | ||
7 | |||
8 | #ifndef _LINUX_BITOPS_H | ||
9 | #error only <linux/bitops.h> can be included directly | ||
10 | #endif | ||
11 | |||
12 | #include <linux/compiler.h> | ||
13 | #include <asm/alternative.h> | ||
14 | |||
15 | /* | ||
16 | * These have to be done with inline assembly: that way the bit-setting | ||
17 | * is guaranteed to be atomic. All bit operations return 0 if the bit | ||
18 | * was cleared before the operation and != 0 if it was not. | ||
19 | * | ||
20 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | ||
21 | */ | ||
22 | |||
23 | #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) | ||
24 | /* Technically wrong, but this avoids compilation errors on some gcc | ||
25 | versions. */ | ||
26 | #define ADDR "=m" (*(volatile long *) addr) | ||
27 | #else | ||
28 | #define ADDR "+m" (*(volatile long *) addr) | ||
29 | #endif | ||
30 | |||
31 | /** | ||
32 | * set_bit - Atomically set a bit in memory | ||
33 | * @nr: the bit to set | ||
34 | * @addr: the address to start counting from | ||
35 | * | ||
36 | * This function is atomic and may not be reordered. See __set_bit() | ||
37 | * if you do not require the atomic guarantees. | ||
38 | * | ||
39 | * Note: there are no guarantees that this function will not be reordered | ||
40 | * on non x86 architectures, so if you are writing portable code, | ||
41 | * make sure not to rely on its reordering guarantees. | ||
42 | * | ||
43 | * Note that @nr may be almost arbitrarily large; this function is not | ||
44 | * restricted to acting on a single-word quantity. | ||
45 | */ | ||
46 | static inline void set_bit(int nr, volatile void *addr) | ||
47 | { | ||
48 | asm volatile(LOCK_PREFIX "bts %1,%0" | ||
49 | : ADDR | ||
50 | : "Ir" (nr) : "memory"); | ||
51 | } | ||
52 | |||
53 | /** | ||
54 | * __set_bit - Set a bit in memory | ||
55 | * @nr: the bit to set | ||
56 | * @addr: the address to start counting from | ||
57 | * | ||
58 | * Unlike set_bit(), this function is non-atomic and may be reordered. | ||
59 | * If it's called on the same region of memory simultaneously, the effect | ||
60 | * may be that only one operation succeeds. | ||
61 | */ | ||
62 | static inline void __set_bit(int nr, volatile void *addr) | ||
63 | { | ||
64 | asm volatile("bts %1,%0" | ||
65 | : ADDR | ||
66 | : "Ir" (nr) : "memory"); | ||
67 | } | ||
68 | |||
69 | |||
70 | /** | ||
71 | * clear_bit - Clears a bit in memory | ||
72 | * @nr: Bit to clear | ||
73 | * @addr: Address to start counting from | ||
74 | * | ||
75 | * clear_bit() is atomic and may not be reordered. However, it does | ||
76 | * not contain a memory barrier, so if it is used for locking purposes, | ||
77 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | ||
78 | * in order to ensure changes are visible on other processors. | ||
79 | */ | ||
80 | static inline void clear_bit(int nr, volatile void *addr) | ||
81 | { | ||
82 | asm volatile(LOCK_PREFIX "btr %1,%0" | ||
83 | : ADDR | ||
84 | : "Ir" (nr)); | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * clear_bit_unlock - Clears a bit in memory | ||
89 | * @nr: Bit to clear | ||
90 | * @addr: Address to start counting from | ||
91 | * | ||
92 | * clear_bit() is atomic and implies release semantics before the memory | ||
93 | * operation. It can be used for an unlock. | ||
94 | */ | ||
95 | static inline void clear_bit_unlock(unsigned nr, volatile void *addr) | ||
96 | { | ||
97 | barrier(); | ||
98 | clear_bit(nr, addr); | ||
99 | } | ||
100 | |||
101 | static inline void __clear_bit(int nr, volatile void *addr) | ||
102 | { | ||
103 | asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * __clear_bit_unlock - Clears a bit in memory | ||
108 | * @nr: Bit to clear | ||
109 | * @addr: Address to start counting from | ||
110 | * | ||
111 | * __clear_bit() is non-atomic and implies release semantics before the memory | ||
112 | * operation. It can be used for an unlock if no other CPUs can concurrently | ||
113 | * modify other bits in the word. | ||
114 | * | ||
115 | * No memory barrier is required here, because x86 cannot reorder stores past | ||
116 | * older loads. Same principle as spin_unlock. | ||
117 | */ | ||
118 | static inline void __clear_bit_unlock(unsigned nr, volatile void *addr) | ||
119 | { | ||
120 | barrier(); | ||
121 | __clear_bit(nr, addr); | ||
122 | } | ||
123 | |||
124 | #define smp_mb__before_clear_bit() barrier() | ||
125 | #define smp_mb__after_clear_bit() barrier() | ||
126 | |||
127 | /** | ||
128 | * __change_bit - Toggle a bit in memory | ||
129 | * @nr: the bit to change | ||
130 | * @addr: the address to start counting from | ||
131 | * | ||
132 | * Unlike change_bit(), this function is non-atomic and may be reordered. | ||
133 | * If it's called on the same region of memory simultaneously, the effect | ||
134 | * may be that only one operation succeeds. | ||
135 | */ | ||
136 | static inline void __change_bit(int nr, volatile void *addr) | ||
137 | { | ||
138 | asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); | ||
139 | } | ||
140 | |||
141 | /** | ||
142 | * change_bit - Toggle a bit in memory | ||
143 | * @nr: Bit to change | ||
144 | * @addr: Address to start counting from | ||
145 | * | ||
146 | * change_bit() is atomic and may not be reordered. | ||
147 | * Note that @nr may be almost arbitrarily large; this function is not | ||
148 | * restricted to acting on a single-word quantity. | ||
149 | */ | ||
150 | static inline void change_bit(int nr, volatile void *addr) | ||
151 | { | ||
152 | asm volatile(LOCK_PREFIX "btc %1,%0" | ||
153 | : ADDR : "Ir" (nr)); | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * test_and_set_bit - Set a bit and return its old value | ||
158 | * @nr: Bit to set | ||
159 | * @addr: Address to count from | ||
160 | * | ||
161 | * This operation is atomic and cannot be reordered. | ||
162 | * It also implies a memory barrier. | ||
163 | */ | ||
164 | static inline int test_and_set_bit(int nr, volatile void *addr) | ||
165 | { | ||
166 | int oldbit; | ||
167 | |||
168 | asm volatile(LOCK_PREFIX "bts %2,%1\n\t" | ||
169 | "sbb %0,%0" | ||
170 | : "=r" (oldbit), ADDR | ||
171 | : "Ir" (nr) : "memory"); | ||
172 | |||
173 | return oldbit; | ||
174 | } | ||
175 | |||
176 | /** | ||
177 | * test_and_set_bit_lock - Set a bit and return its old value for lock | ||
178 | * @nr: Bit to set | ||
179 | * @addr: Address to count from | ||
180 | * | ||
181 | * This is the same as test_and_set_bit on x86. | ||
182 | */ | ||
183 | static inline int test_and_set_bit_lock(int nr, volatile void *addr) | ||
184 | { | ||
185 | return test_and_set_bit(nr, addr); | ||
186 | } | ||
187 | |||
188 | /** | ||
189 | * __test_and_set_bit - Set a bit and return its old value | ||
190 | * @nr: Bit to set | ||
191 | * @addr: Address to count from | ||
192 | * | ||
193 | * This operation is non-atomic and can be reordered. | ||
194 | * If two examples of this operation race, one can appear to succeed | ||
195 | * but actually fail. You must protect multiple accesses with a lock. | ||
196 | */ | ||
197 | static inline int __test_and_set_bit(int nr, volatile void *addr) | ||
198 | { | ||
199 | int oldbit; | ||
200 | |||
201 | asm("bts %2,%1\n\t" | ||
202 | "sbb %0,%0" | ||
203 | : "=r" (oldbit), ADDR | ||
204 | : "Ir" (nr)); | ||
205 | return oldbit; | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * test_and_clear_bit - Clear a bit and return its old value | ||
210 | * @nr: Bit to clear | ||
211 | * @addr: Address to count from | ||
212 | * | ||
213 | * This operation is atomic and cannot be reordered. | ||
214 | * It also implies a memory barrier. | ||
215 | */ | ||
216 | static inline int test_and_clear_bit(int nr, volatile void *addr) | ||
217 | { | ||
218 | int oldbit; | ||
219 | |||
220 | asm volatile(LOCK_PREFIX "btr %2,%1\n\t" | ||
221 | "sbb %0,%0" | ||
222 | : "=r" (oldbit), ADDR | ||
223 | : "Ir" (nr) : "memory"); | ||
224 | |||
225 | return oldbit; | ||
226 | } | ||
227 | |||
228 | /** | ||
229 | * __test_and_clear_bit - Clear a bit and return its old value | ||
230 | * @nr: Bit to clear | ||
231 | * @addr: Address to count from | ||
232 | * | ||
233 | * This operation is non-atomic and can be reordered. | ||
234 | * If two examples of this operation race, one can appear to succeed | ||
235 | * but actually fail. You must protect multiple accesses with a lock. | ||
236 | */ | ||
237 | static inline int __test_and_clear_bit(int nr, volatile void *addr) | ||
238 | { | ||
239 | int oldbit; | ||
240 | |||
241 | asm volatile("btr %2,%1\n\t" | ||
242 | "sbb %0,%0" | ||
243 | : "=r" (oldbit), ADDR | ||
244 | : "Ir" (nr)); | ||
245 | return oldbit; | ||
246 | } | ||
247 | |||
248 | /* WARNING: non atomic and it can be reordered! */ | ||
249 | static inline int __test_and_change_bit(int nr, volatile void *addr) | ||
250 | { | ||
251 | int oldbit; | ||
252 | |||
253 | asm volatile("btc %2,%1\n\t" | ||
254 | "sbb %0,%0" | ||
255 | : "=r" (oldbit), ADDR | ||
256 | : "Ir" (nr) : "memory"); | ||
257 | |||
258 | return oldbit; | ||
259 | } | ||
260 | |||
261 | /** | ||
262 | * test_and_change_bit - Change a bit and return its old value | ||
263 | * @nr: Bit to change | ||
264 | * @addr: Address to count from | ||
265 | * | ||
266 | * This operation is atomic and cannot be reordered. | ||
267 | * It also implies a memory barrier. | ||
268 | */ | ||
269 | static inline int test_and_change_bit(int nr, volatile void *addr) | ||
270 | { | ||
271 | int oldbit; | ||
272 | |||
273 | asm volatile(LOCK_PREFIX "btc %2,%1\n\t" | ||
274 | "sbb %0,%0" | ||
275 | : "=r" (oldbit), ADDR | ||
276 | : "Ir" (nr) : "memory"); | ||
277 | |||
278 | return oldbit; | ||
279 | } | ||
280 | |||
281 | static inline int constant_test_bit(int nr, const volatile void *addr) | ||
282 | { | ||
283 | return ((1UL << (nr % BITS_PER_LONG)) & | ||
284 | (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; | ||
285 | } | ||
286 | |||
287 | static inline int variable_test_bit(int nr, volatile const void *addr) | ||
288 | { | ||
289 | int oldbit; | ||
290 | |||
291 | asm volatile("bt %2,%1\n\t" | ||
292 | "sbb %0,%0" | ||
293 | : "=r" (oldbit) | ||
294 | : "m" (*(unsigned long *)addr), "Ir" (nr)); | ||
295 | |||
296 | return oldbit; | ||
297 | } | ||
298 | |||
299 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ | ||
300 | /** | ||
301 | * test_bit - Determine whether a bit is set | ||
302 | * @nr: bit number to test | ||
303 | * @addr: Address to start counting from | ||
304 | */ | ||
305 | static int test_bit(int nr, const volatile unsigned long *addr); | ||
306 | #endif | ||
307 | |||
308 | #define test_bit(nr,addr) \ | ||
309 | (__builtin_constant_p(nr) ? \ | ||
310 | constant_test_bit((nr),(addr)) : \ | ||
311 | variable_test_bit((nr),(addr))) | ||
312 | |||
313 | #undef ADDR | ||
314 | |||
1 | #ifdef CONFIG_X86_32 | 315 | #ifdef CONFIG_X86_32 |
2 | # include "bitops_32.h" | 316 | # include "bitops_32.h" |
3 | #else | 317 | #else |
4 | # include "bitops_64.h" | 318 | # include "bitops_64.h" |
5 | #endif | 319 | #endif |
320 | |||
321 | #endif /* _ASM_X86_BITOPS_H */ | ||
diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h index 0b40f6d20bea..e4d75fcf9c03 100644 --- a/include/asm-x86/bitops_32.h +++ b/include/asm-x86/bitops_32.h | |||
@@ -5,320 +5,12 @@ | |||
5 | * Copyright 1992, Linus Torvalds. | 5 | * Copyright 1992, Linus Torvalds. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #ifndef _LINUX_BITOPS_H | ||
9 | #error only <linux/bitops.h> can be included directly | ||
10 | #endif | ||
11 | |||
12 | #include <linux/compiler.h> | ||
13 | #include <asm/alternative.h> | ||
14 | |||
15 | /* | ||
16 | * These have to be done with inline assembly: that way the bit-setting | ||
17 | * is guaranteed to be atomic. All bit operations return 0 if the bit | ||
18 | * was cleared before the operation and != 0 if it was not. | ||
19 | * | ||
20 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | ||
21 | */ | ||
22 | |||
23 | #define ADDR (*(volatile long *) addr) | ||
24 | |||
25 | /** | ||
26 | * set_bit - Atomically set a bit in memory | ||
27 | * @nr: the bit to set | ||
28 | * @addr: the address to start counting from | ||
29 | * | ||
30 | * This function is atomic and may not be reordered. See __set_bit() | ||
31 | * if you do not require the atomic guarantees. | ||
32 | * | ||
33 | * Note: there are no guarantees that this function will not be reordered | ||
34 | * on non x86 architectures, so if you are writing portable code, | ||
35 | * make sure not to rely on its reordering guarantees. | ||
36 | * | ||
37 | * Note that @nr may be almost arbitrarily large; this function is not | ||
38 | * restricted to acting on a single-word quantity. | ||
39 | */ | ||
40 | static inline void set_bit(int nr, volatile unsigned long * addr) | ||
41 | { | ||
42 | __asm__ __volatile__( LOCK_PREFIX | ||
43 | "btsl %1,%0" | ||
44 | :"+m" (ADDR) | ||
45 | :"Ir" (nr)); | ||
46 | } | ||
47 | |||
48 | /** | ||
49 | * __set_bit - Set a bit in memory | ||
50 | * @nr: the bit to set | ||
51 | * @addr: the address to start counting from | ||
52 | * | ||
53 | * Unlike set_bit(), this function is non-atomic and may be reordered. | ||
54 | * If it's called on the same region of memory simultaneously, the effect | ||
55 | * may be that only one operation succeeds. | ||
56 | */ | ||
57 | static inline void __set_bit(int nr, volatile unsigned long * addr) | ||
58 | { | ||
59 | __asm__( | ||
60 | "btsl %1,%0" | ||
61 | :"+m" (ADDR) | ||
62 | :"Ir" (nr)); | ||
63 | } | ||
64 | |||
65 | /** | ||
66 | * clear_bit - Clears a bit in memory | ||
67 | * @nr: Bit to clear | ||
68 | * @addr: Address to start counting from | ||
69 | * | ||
70 | * clear_bit() is atomic and may not be reordered. However, it does | ||
71 | * not contain a memory barrier, so if it is used for locking purposes, | ||
72 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | ||
73 | * in order to ensure changes are visible on other processors. | ||
74 | */ | ||
75 | static inline void clear_bit(int nr, volatile unsigned long * addr) | ||
76 | { | ||
77 | __asm__ __volatile__( LOCK_PREFIX | ||
78 | "btrl %1,%0" | ||
79 | :"+m" (ADDR) | ||
80 | :"Ir" (nr)); | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * clear_bit_unlock - Clears a bit in memory | ||
85 | * @nr: Bit to clear | ||
86 | * @addr: Address to start counting from | ||
87 | * | ||
88 | * clear_bit() is atomic and implies release semantics before the memory | ||
89 | * operation. It can be used for an unlock. | ||
90 | */ | ||
91 | static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) | ||
92 | { | ||
93 | barrier(); | ||
94 | clear_bit(nr, addr); | ||
95 | } | ||
96 | |||
97 | static inline void __clear_bit(int nr, volatile unsigned long * addr) | ||
98 | { | ||
99 | __asm__ __volatile__( | ||
100 | "btrl %1,%0" | ||
101 | :"+m" (ADDR) | ||
102 | :"Ir" (nr)); | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * __clear_bit_unlock - Clears a bit in memory | ||
107 | * @nr: Bit to clear | ||
108 | * @addr: Address to start counting from | ||
109 | * | ||
110 | * __clear_bit() is non-atomic and implies release semantics before the memory | ||
111 | * operation. It can be used for an unlock if no other CPUs can concurrently | ||
112 | * modify other bits in the word. | ||
113 | * | ||
114 | * No memory barrier is required here, because x86 cannot reorder stores past | ||
115 | * older loads. Same principle as spin_unlock. | ||
116 | */ | ||
117 | static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) | ||
118 | { | ||
119 | barrier(); | ||
120 | __clear_bit(nr, addr); | ||
121 | } | ||
122 | |||
123 | #define smp_mb__before_clear_bit() barrier() | ||
124 | #define smp_mb__after_clear_bit() barrier() | ||
125 | |||
126 | /** | ||
127 | * __change_bit - Toggle a bit in memory | ||
128 | * @nr: the bit to change | ||
129 | * @addr: the address to start counting from | ||
130 | * | ||
131 | * Unlike change_bit(), this function is non-atomic and may be reordered. | ||
132 | * If it's called on the same region of memory simultaneously, the effect | ||
133 | * may be that only one operation succeeds. | ||
134 | */ | ||
135 | static inline void __change_bit(int nr, volatile unsigned long * addr) | ||
136 | { | ||
137 | __asm__ __volatile__( | ||
138 | "btcl %1,%0" | ||
139 | :"+m" (ADDR) | ||
140 | :"Ir" (nr)); | ||
141 | } | ||
142 | |||
143 | /** | ||
144 | * change_bit - Toggle a bit in memory | ||
145 | * @nr: Bit to change | ||
146 | * @addr: Address to start counting from | ||
147 | * | ||
148 | * change_bit() is atomic and may not be reordered. It may be | ||
149 | * reordered on other architectures than x86. | ||
150 | * Note that @nr may be almost arbitrarily large; this function is not | ||
151 | * restricted to acting on a single-word quantity. | ||
152 | */ | ||
153 | static inline void change_bit(int nr, volatile unsigned long * addr) | ||
154 | { | ||
155 | __asm__ __volatile__( LOCK_PREFIX | ||
156 | "btcl %1,%0" | ||
157 | :"+m" (ADDR) | ||
158 | :"Ir" (nr)); | ||
159 | } | ||
160 | |||
161 | /** | ||
162 | * test_and_set_bit - Set a bit and return its old value | ||
163 | * @nr: Bit to set | ||
164 | * @addr: Address to count from | ||
165 | * | ||
166 | * This operation is atomic and cannot be reordered. | ||
167 | * It may be reordered on other architectures than x86. | ||
168 | * It also implies a memory barrier. | ||
169 | */ | ||
170 | static inline int test_and_set_bit(int nr, volatile unsigned long * addr) | ||
171 | { | ||
172 | int oldbit; | ||
173 | |||
174 | __asm__ __volatile__( LOCK_PREFIX | ||
175 | "btsl %2,%1\n\tsbbl %0,%0" | ||
176 | :"=r" (oldbit),"+m" (ADDR) | ||
177 | :"Ir" (nr) : "memory"); | ||
178 | return oldbit; | ||
179 | } | ||
180 | |||
181 | /** | ||
182 | * test_and_set_bit_lock - Set a bit and return its old value for lock | ||
183 | * @nr: Bit to set | ||
184 | * @addr: Address to count from | ||
185 | * | ||
186 | * This is the same as test_and_set_bit on x86. | ||
187 | */ | ||
188 | static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr) | ||
189 | { | ||
190 | return test_and_set_bit(nr, addr); | ||
191 | } | ||
192 | |||
193 | /** | ||
194 | * __test_and_set_bit - Set a bit and return its old value | ||
195 | * @nr: Bit to set | ||
196 | * @addr: Address to count from | ||
197 | * | ||
198 | * This operation is non-atomic and can be reordered. | ||
199 | * If two examples of this operation race, one can appear to succeed | ||
200 | * but actually fail. You must protect multiple accesses with a lock. | ||
201 | */ | ||
202 | static inline int __test_and_set_bit(int nr, volatile unsigned long * addr) | ||
203 | { | ||
204 | int oldbit; | ||
205 | |||
206 | __asm__( | ||
207 | "btsl %2,%1\n\tsbbl %0,%0" | ||
208 | :"=r" (oldbit),"+m" (ADDR) | ||
209 | :"Ir" (nr)); | ||
210 | return oldbit; | ||
211 | } | ||
212 | |||
213 | /** | ||
214 | * test_and_clear_bit - Clear a bit and return its old value | ||
215 | * @nr: Bit to clear | ||
216 | * @addr: Address to count from | ||
217 | * | ||
218 | * This operation is atomic and cannot be reordered. | ||
219 | * It can be reorderdered on other architectures other than x86. | ||
220 | * It also implies a memory barrier. | ||
221 | */ | ||
222 | static inline int test_and_clear_bit(int nr, volatile unsigned long * addr) | ||
223 | { | ||
224 | int oldbit; | ||
225 | |||
226 | __asm__ __volatile__( LOCK_PREFIX | ||
227 | "btrl %2,%1\n\tsbbl %0,%0" | ||
228 | :"=r" (oldbit),"+m" (ADDR) | ||
229 | :"Ir" (nr) : "memory"); | ||
230 | return oldbit; | ||
231 | } | ||
232 | |||
233 | /** | ||
234 | * __test_and_clear_bit - Clear a bit and return its old value | ||
235 | * @nr: Bit to clear | ||
236 | * @addr: Address to count from | ||
237 | * | ||
238 | * This operation is non-atomic and can be reordered. | ||
239 | * If two examples of this operation race, one can appear to succeed | ||
240 | * but actually fail. You must protect multiple accesses with a lock. | ||
241 | */ | ||
242 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
243 | { | ||
244 | int oldbit; | ||
245 | |||
246 | __asm__( | ||
247 | "btrl %2,%1\n\tsbbl %0,%0" | ||
248 | :"=r" (oldbit),"+m" (ADDR) | ||
249 | :"Ir" (nr)); | ||
250 | return oldbit; | ||
251 | } | ||
252 | |||
253 | /* WARNING: non atomic and it can be reordered! */ | ||
254 | static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) | ||
255 | { | ||
256 | int oldbit; | ||
257 | |||
258 | __asm__ __volatile__( | ||
259 | "btcl %2,%1\n\tsbbl %0,%0" | ||
260 | :"=r" (oldbit),"+m" (ADDR) | ||
261 | :"Ir" (nr) : "memory"); | ||
262 | return oldbit; | ||
263 | } | ||
264 | |||
265 | /** | ||
266 | * test_and_change_bit - Change a bit and return its old value | ||
267 | * @nr: Bit to change | ||
268 | * @addr: Address to count from | ||
269 | * | ||
270 | * This operation is atomic and cannot be reordered. | ||
271 | * It also implies a memory barrier. | ||
272 | */ | ||
273 | static inline int test_and_change_bit(int nr, volatile unsigned long* addr) | ||
274 | { | ||
275 | int oldbit; | ||
276 | |||
277 | __asm__ __volatile__( LOCK_PREFIX | ||
278 | "btcl %2,%1\n\tsbbl %0,%0" | ||
279 | :"=r" (oldbit),"+m" (ADDR) | ||
280 | :"Ir" (nr) : "memory"); | ||
281 | return oldbit; | ||
282 | } | ||
283 | |||
284 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ | ||
285 | /** | ||
286 | * test_bit - Determine whether a bit is set | ||
287 | * @nr: bit number to test | ||
288 | * @addr: Address to start counting from | ||
289 | */ | ||
290 | static int test_bit(int nr, const volatile void * addr); | ||
291 | #endif | ||
292 | |||
293 | static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr) | ||
294 | { | ||
295 | return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; | ||
296 | } | ||
297 | |||
298 | static inline int variable_test_bit(int nr, const volatile unsigned long * addr) | ||
299 | { | ||
300 | int oldbit; | ||
301 | |||
302 | __asm__ __volatile__( | ||
303 | "btl %2,%1\n\tsbbl %0,%0" | ||
304 | :"=r" (oldbit) | ||
305 | :"m" (ADDR),"Ir" (nr)); | ||
306 | return oldbit; | ||
307 | } | ||
308 | |||
309 | #define test_bit(nr,addr) \ | ||
310 | (__builtin_constant_p(nr) ? \ | ||
311 | constant_test_bit((nr),(addr)) : \ | ||
312 | variable_test_bit((nr),(addr))) | ||
313 | |||
314 | #undef ADDR | ||
315 | |||
316 | /** | 8 | /** |
317 | * find_first_zero_bit - find the first zero bit in a memory region | 9 | * find_first_zero_bit - find the first zero bit in a memory region |
318 | * @addr: The address to start the search at | 10 | * @addr: The address to start the search at |
319 | * @size: The maximum size to search | 11 | * @size: The maximum size to search |
320 | * | 12 | * |
321 | * Returns the bit-number of the first zero bit, not the number of the byte | 13 | * Returns the bit number of the first zero bit, not the number of the byte |
322 | * containing a bit. | 14 | * containing a bit. |
323 | */ | 15 | */ |
324 | static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) | 16 | static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) |
@@ -348,7 +40,7 @@ static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) | |||
348 | /** | 40 | /** |
349 | * find_next_zero_bit - find the first zero bit in a memory region | 41 | * find_next_zero_bit - find the first zero bit in a memory region |
350 | * @addr: The address to base the search on | 42 | * @addr: The address to base the search on |
351 | * @offset: The bitnumber to start searching at | 43 | * @offset: The bit number to start searching at |
352 | * @size: The maximum size to search | 44 | * @size: The maximum size to search |
353 | */ | 45 | */ |
354 | int find_next_zero_bit(const unsigned long *addr, int size, int offset); | 46 | int find_next_zero_bit(const unsigned long *addr, int size, int offset); |
@@ -372,7 +64,7 @@ static inline unsigned long __ffs(unsigned long word) | |||
372 | * @addr: The address to start the search at | 64 | * @addr: The address to start the search at |
373 | * @size: The maximum size to search | 65 | * @size: The maximum size to search |
374 | * | 66 | * |
375 | * Returns the bit-number of the first set bit, not the number of the byte | 67 | * Returns the bit number of the first set bit, not the number of the byte |
376 | * containing a bit. | 68 | * containing a bit. |
377 | */ | 69 | */ |
378 | static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) | 70 | static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) |
@@ -391,7 +83,7 @@ static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) | |||
391 | /** | 83 | /** |
392 | * find_next_bit - find the first set bit in a memory region | 84 | * find_next_bit - find the first set bit in a memory region |
393 | * @addr: The address to base the search on | 85 | * @addr: The address to base the search on |
394 | * @offset: The bitnumber to start searching at | 86 | * @offset: The bit number to start searching at |
395 | * @size: The maximum size to search | 87 | * @size: The maximum size to search |
396 | */ | 88 | */ |
397 | int find_next_bit(const unsigned long *addr, int size, int offset); | 89 | int find_next_bit(const unsigned long *addr, int size, int offset); |
@@ -460,10 +152,10 @@ static inline int fls(int x) | |||
460 | 152 | ||
461 | #include <asm-generic/bitops/ext2-non-atomic.h> | 153 | #include <asm-generic/bitops/ext2-non-atomic.h> |
462 | 154 | ||
463 | #define ext2_set_bit_atomic(lock,nr,addr) \ | 155 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
464 | test_and_set_bit((nr),(unsigned long*)addr) | 156 | test_and_set_bit((nr), (unsigned long *)addr) |
465 | #define ext2_clear_bit_atomic(lock,nr, addr) \ | 157 | #define ext2_clear_bit_atomic(lock, nr, addr) \ |
466 | test_and_clear_bit((nr),(unsigned long*)addr) | 158 | test_and_clear_bit((nr), (unsigned long *)addr) |
467 | 159 | ||
468 | #include <asm-generic/bitops/minix.h> | 160 | #include <asm-generic/bitops/minix.h> |
469 | 161 | ||
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h index 766bcc0470a6..aaf15194d536 100644 --- a/include/asm-x86/bitops_64.h +++ b/include/asm-x86/bitops_64.h | |||
@@ -5,303 +5,6 @@ | |||
5 | * Copyright 1992, Linus Torvalds. | 5 | * Copyright 1992, Linus Torvalds. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #ifndef _LINUX_BITOPS_H | ||
9 | #error only <linux/bitops.h> can be included directly | ||
10 | #endif | ||
11 | |||
12 | #include <asm/alternative.h> | ||
13 | |||
14 | #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) | ||
15 | /* Technically wrong, but this avoids compilation errors on some gcc | ||
16 | versions. */ | ||
17 | #define ADDR "=m" (*(volatile long *) addr) | ||
18 | #else | ||
19 | #define ADDR "+m" (*(volatile long *) addr) | ||
20 | #endif | ||
21 | |||
22 | /** | ||
23 | * set_bit - Atomically set a bit in memory | ||
24 | * @nr: the bit to set | ||
25 | * @addr: the address to start counting from | ||
26 | * | ||
27 | * This function is atomic and may not be reordered. See __set_bit() | ||
28 | * if you do not require the atomic guarantees. | ||
29 | * Note that @nr may be almost arbitrarily large; this function is not | ||
30 | * restricted to acting on a single-word quantity. | ||
31 | */ | ||
32 | static inline void set_bit(int nr, volatile void *addr) | ||
33 | { | ||
34 | __asm__ __volatile__( LOCK_PREFIX | ||
35 | "btsl %1,%0" | ||
36 | :ADDR | ||
37 | :"dIr" (nr) : "memory"); | ||
38 | } | ||
39 | |||
40 | /** | ||
41 | * __set_bit - Set a bit in memory | ||
42 | * @nr: the bit to set | ||
43 | * @addr: the address to start counting from | ||
44 | * | ||
45 | * Unlike set_bit(), this function is non-atomic and may be reordered. | ||
46 | * If it's called on the same region of memory simultaneously, the effect | ||
47 | * may be that only one operation succeeds. | ||
48 | */ | ||
49 | static inline void __set_bit(int nr, volatile void *addr) | ||
50 | { | ||
51 | __asm__ volatile( | ||
52 | "btsl %1,%0" | ||
53 | :ADDR | ||
54 | :"dIr" (nr) : "memory"); | ||
55 | } | ||
56 | |||
57 | /** | ||
58 | * clear_bit - Clears a bit in memory | ||
59 | * @nr: Bit to clear | ||
60 | * @addr: Address to start counting from | ||
61 | * | ||
62 | * clear_bit() is atomic and may not be reordered. However, it does | ||
63 | * not contain a memory barrier, so if it is used for locking purposes, | ||
64 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | ||
65 | * in order to ensure changes are visible on other processors. | ||
66 | */ | ||
67 | static inline void clear_bit(int nr, volatile void *addr) | ||
68 | { | ||
69 | __asm__ __volatile__( LOCK_PREFIX | ||
70 | "btrl %1,%0" | ||
71 | :ADDR | ||
72 | :"dIr" (nr)); | ||
73 | } | ||
74 | |||
75 | /* | ||
76 | * clear_bit_unlock - Clears a bit in memory | ||
77 | * @nr: Bit to clear | ||
78 | * @addr: Address to start counting from | ||
79 | * | ||
80 | * clear_bit() is atomic and implies release semantics before the memory | ||
81 | * operation. It can be used for an unlock. | ||
82 | */ | ||
83 | static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) | ||
84 | { | ||
85 | barrier(); | ||
86 | clear_bit(nr, addr); | ||
87 | } | ||
88 | |||
89 | static inline void __clear_bit(int nr, volatile void *addr) | ||
90 | { | ||
91 | __asm__ __volatile__( | ||
92 | "btrl %1,%0" | ||
93 | :ADDR | ||
94 | :"dIr" (nr)); | ||
95 | } | ||
96 | |||
97 | /* | ||
98 | * __clear_bit_unlock - Clears a bit in memory | ||
99 | * @nr: Bit to clear | ||
100 | * @addr: Address to start counting from | ||
101 | * | ||
102 | * __clear_bit() is non-atomic and implies release semantics before the memory | ||
103 | * operation. It can be used for an unlock if no other CPUs can concurrently | ||
104 | * modify other bits in the word. | ||
105 | * | ||
106 | * No memory barrier is required here, because x86 cannot reorder stores past | ||
107 | * older loads. Same principle as spin_unlock. | ||
108 | */ | ||
109 | static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) | ||
110 | { | ||
111 | barrier(); | ||
112 | __clear_bit(nr, addr); | ||
113 | } | ||
114 | |||
115 | #define smp_mb__before_clear_bit() barrier() | ||
116 | #define smp_mb__after_clear_bit() barrier() | ||
117 | |||
118 | /** | ||
119 | * __change_bit - Toggle a bit in memory | ||
120 | * @nr: the bit to change | ||
121 | * @addr: the address to start counting from | ||
122 | * | ||
123 | * Unlike change_bit(), this function is non-atomic and may be reordered. | ||
124 | * If it's called on the same region of memory simultaneously, the effect | ||
125 | * may be that only one operation succeeds. | ||
126 | */ | ||
127 | static inline void __change_bit(int nr, volatile void *addr) | ||
128 | { | ||
129 | __asm__ __volatile__( | ||
130 | "btcl %1,%0" | ||
131 | :ADDR | ||
132 | :"dIr" (nr)); | ||
133 | } | ||
134 | |||
135 | /** | ||
136 | * change_bit - Toggle a bit in memory | ||
137 | * @nr: Bit to change | ||
138 | * @addr: Address to start counting from | ||
139 | * | ||
140 | * change_bit() is atomic and may not be reordered. | ||
141 | * Note that @nr may be almost arbitrarily large; this function is not | ||
142 | * restricted to acting on a single-word quantity. | ||
143 | */ | ||
144 | static inline void change_bit(int nr, volatile void *addr) | ||
145 | { | ||
146 | __asm__ __volatile__( LOCK_PREFIX | ||
147 | "btcl %1,%0" | ||
148 | :ADDR | ||
149 | :"dIr" (nr)); | ||
150 | } | ||
151 | |||
152 | /** | ||
153 | * test_and_set_bit - Set a bit and return its old value | ||
154 | * @nr: Bit to set | ||
155 | * @addr: Address to count from | ||
156 | * | ||
157 | * This operation is atomic and cannot be reordered. | ||
158 | * It also implies a memory barrier. | ||
159 | */ | ||
160 | static inline int test_and_set_bit(int nr, volatile void *addr) | ||
161 | { | ||
162 | int oldbit; | ||
163 | |||
164 | __asm__ __volatile__( LOCK_PREFIX | ||
165 | "btsl %2,%1\n\tsbbl %0,%0" | ||
166 | :"=r" (oldbit),ADDR | ||
167 | :"dIr" (nr) : "memory"); | ||
168 | return oldbit; | ||
169 | } | ||
170 | |||
171 | /** | ||
172 | * test_and_set_bit_lock - Set a bit and return its old value for lock | ||
173 | * @nr: Bit to set | ||
174 | * @addr: Address to count from | ||
175 | * | ||
176 | * This is the same as test_and_set_bit on x86. | ||
177 | */ | ||
178 | static inline int test_and_set_bit_lock(int nr, volatile void *addr) | ||
179 | { | ||
180 | return test_and_set_bit(nr, addr); | ||
181 | } | ||
182 | |||
183 | /** | ||
184 | * __test_and_set_bit - Set a bit and return its old value | ||
185 | * @nr: Bit to set | ||
186 | * @addr: Address to count from | ||
187 | * | ||
188 | * This operation is non-atomic and can be reordered. | ||
189 | * If two examples of this operation race, one can appear to succeed | ||
190 | * but actually fail. You must protect multiple accesses with a lock. | ||
191 | */ | ||
192 | static inline int __test_and_set_bit(int nr, volatile void *addr) | ||
193 | { | ||
194 | int oldbit; | ||
195 | |||
196 | __asm__( | ||
197 | "btsl %2,%1\n\tsbbl %0,%0" | ||
198 | :"=r" (oldbit),ADDR | ||
199 | :"dIr" (nr)); | ||
200 | return oldbit; | ||
201 | } | ||
202 | |||
203 | /** | ||
204 | * test_and_clear_bit - Clear a bit and return its old value | ||
205 | * @nr: Bit to clear | ||
206 | * @addr: Address to count from | ||
207 | * | ||
208 | * This operation is atomic and cannot be reordered. | ||
209 | * It also implies a memory barrier. | ||
210 | */ | ||
211 | static inline int test_and_clear_bit(int nr, volatile void *addr) | ||
212 | { | ||
213 | int oldbit; | ||
214 | |||
215 | __asm__ __volatile__( LOCK_PREFIX | ||
216 | "btrl %2,%1\n\tsbbl %0,%0" | ||
217 | :"=r" (oldbit),ADDR | ||
218 | :"dIr" (nr) : "memory"); | ||
219 | return oldbit; | ||
220 | } | ||
221 | |||
222 | /** | ||
223 | * __test_and_clear_bit - Clear a bit and return its old value | ||
224 | * @nr: Bit to clear | ||
225 | * @addr: Address to count from | ||
226 | * | ||
227 | * This operation is non-atomic and can be reordered. | ||
228 | * If two examples of this operation race, one can appear to succeed | ||
229 | * but actually fail. You must protect multiple accesses with a lock. | ||
230 | */ | ||
231 | static inline int __test_and_clear_bit(int nr, volatile void *addr) | ||
232 | { | ||
233 | int oldbit; | ||
234 | |||
235 | __asm__( | ||
236 | "btrl %2,%1\n\tsbbl %0,%0" | ||
237 | :"=r" (oldbit),ADDR | ||
238 | :"dIr" (nr)); | ||
239 | return oldbit; | ||
240 | } | ||
241 | |||
242 | /* WARNING: non atomic and it can be reordered! */ | ||
243 | static inline int __test_and_change_bit(int nr, volatile void *addr) | ||
244 | { | ||
245 | int oldbit; | ||
246 | |||
247 | __asm__ __volatile__( | ||
248 | "btcl %2,%1\n\tsbbl %0,%0" | ||
249 | :"=r" (oldbit),ADDR | ||
250 | :"dIr" (nr) : "memory"); | ||
251 | return oldbit; | ||
252 | } | ||
253 | |||
254 | /** | ||
255 | * test_and_change_bit - Change a bit and return its old value | ||
256 | * @nr: Bit to change | ||
257 | * @addr: Address to count from | ||
258 | * | ||
259 | * This operation is atomic and cannot be reordered. | ||
260 | * It also implies a memory barrier. | ||
261 | */ | ||
262 | static inline int test_and_change_bit(int nr, volatile void *addr) | ||
263 | { | ||
264 | int oldbit; | ||
265 | |||
266 | __asm__ __volatile__( LOCK_PREFIX | ||
267 | "btcl %2,%1\n\tsbbl %0,%0" | ||
268 | :"=r" (oldbit),ADDR | ||
269 | :"dIr" (nr) : "memory"); | ||
270 | return oldbit; | ||
271 | } | ||
272 | |||
273 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ | ||
274 | /** | ||
275 | * test_bit - Determine whether a bit is set | ||
276 | * @nr: bit number to test | ||
277 | * @addr: Address to start counting from | ||
278 | */ | ||
279 | static int test_bit(int nr, const volatile void *addr); | ||
280 | #endif | ||
281 | |||
282 | static inline int constant_test_bit(int nr, const volatile void *addr) | ||
283 | { | ||
284 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; | ||
285 | } | ||
286 | |||
287 | static inline int variable_test_bit(int nr, volatile const void *addr) | ||
288 | { | ||
289 | int oldbit; | ||
290 | |||
291 | __asm__ __volatile__( | ||
292 | "btl %2,%1\n\tsbbl %0,%0" | ||
293 | :"=r" (oldbit) | ||
294 | :"m" (*(volatile long *)addr),"dIr" (nr)); | ||
295 | return oldbit; | ||
296 | } | ||
297 | |||
298 | #define test_bit(nr,addr) \ | ||
299 | (__builtin_constant_p(nr) ? \ | ||
300 | constant_test_bit((nr),(addr)) : \ | ||
301 | variable_test_bit((nr),(addr))) | ||
302 | |||
303 | #undef ADDR | ||
304 | |||
305 | extern long find_first_zero_bit(const unsigned long *addr, unsigned long size); | 8 | extern long find_first_zero_bit(const unsigned long *addr, unsigned long size); |
306 | extern long find_next_zero_bit(const unsigned long *addr, long size, long offset); | 9 | extern long find_next_zero_bit(const unsigned long *addr, long size, long offset); |
307 | extern long find_first_bit(const unsigned long *addr, unsigned long size); | 10 | extern long find_first_bit(const unsigned long *addr, unsigned long size); |
@@ -334,12 +37,6 @@ static inline long __scanbit(unsigned long val, unsigned long max) | |||
334 | ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \ | 37 | ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \ |
335 | find_next_zero_bit(addr,size,off))) | 38 | find_next_zero_bit(addr,size,off))) |
336 | 39 | ||
337 | /* | ||
338 | * Find string of zero bits in a bitmap. -1 when not found. | ||
339 | */ | ||
340 | extern unsigned long | ||
341 | find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len); | ||
342 | |||
343 | static inline void set_bit_string(unsigned long *bitmap, unsigned long i, | 40 | static inline void set_bit_string(unsigned long *bitmap, unsigned long i, |
344 | int len) | 41 | int len) |
345 | { | 42 | { |
@@ -350,16 +47,6 @@ static inline void set_bit_string(unsigned long *bitmap, unsigned long i, | |||
350 | } | 47 | } |
351 | } | 48 | } |
352 | 49 | ||
353 | static inline void __clear_bit_string(unsigned long *bitmap, unsigned long i, | ||
354 | int len) | ||
355 | { | ||
356 | unsigned long end = i + len; | ||
357 | while (i < end) { | ||
358 | __clear_bit(i, bitmap); | ||
359 | i++; | ||
360 | } | ||
361 | } | ||
362 | |||
363 | /** | 50 | /** |
364 | * ffz - find first zero in word. | 51 | * ffz - find first zero in word. |
365 | * @word: The word to search | 52 | * @word: The word to search |
diff --git a/include/asm-x86/bootparam.h b/include/asm-x86/bootparam.h index 19f3ddf2df4b..51151356840f 100644 --- a/include/asm-x86/bootparam.h +++ b/include/asm-x86/bootparam.h | |||
@@ -54,13 +54,14 @@ struct sys_desc_table { | |||
54 | }; | 54 | }; |
55 | 55 | ||
56 | struct efi_info { | 56 | struct efi_info { |
57 | __u32 _pad1; | 57 | __u32 efi_loader_signature; |
58 | __u32 efi_systab; | 58 | __u32 efi_systab; |
59 | __u32 efi_memdesc_size; | 59 | __u32 efi_memdesc_size; |
60 | __u32 efi_memdesc_version; | 60 | __u32 efi_memdesc_version; |
61 | __u32 efi_memmap; | 61 | __u32 efi_memmap; |
62 | __u32 efi_memmap_size; | 62 | __u32 efi_memmap_size; |
63 | __u32 _pad2[2]; | 63 | __u32 efi_systab_hi; |
64 | __u32 efi_memmap_hi; | ||
64 | }; | 65 | }; |
65 | 66 | ||
66 | /* The so-called "zeropage" */ | 67 | /* The so-called "zeropage" */ |
diff --git a/include/asm-x86/bug.h b/include/asm-x86/bug.h index fd8bdc639c48..8d477a201392 100644 --- a/include/asm-x86/bug.h +++ b/include/asm-x86/bug.h | |||
@@ -33,9 +33,6 @@ | |||
33 | } while(0) | 33 | } while(0) |
34 | #endif | 34 | #endif |
35 | 35 | ||
36 | void out_of_line_bug(void); | ||
37 | #else /* CONFIG_BUG */ | ||
38 | static inline void out_of_line_bug(void) { } | ||
39 | #endif /* !CONFIG_BUG */ | 36 | #endif /* !CONFIG_BUG */ |
40 | 37 | ||
41 | #include <asm-generic/bug.h> | 38 | #include <asm-generic/bug.h> |
diff --git a/include/asm-x86/bugs.h b/include/asm-x86/bugs.h index aac8317420af..021cbdd5f258 100644 --- a/include/asm-x86/bugs.h +++ b/include/asm-x86/bugs.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef _ASM_X86_BUGS_H | 1 | #ifndef _ASM_X86_BUGS_H |
2 | #define _ASM_X86_BUGS_H | 2 | #define _ASM_X86_BUGS_H |
3 | 3 | ||
4 | void check_bugs(void); | 4 | extern void check_bugs(void); |
5 | int ppro_with_ram_bug(void); | ||
5 | 6 | ||
6 | #endif /* _ASM_X86_BUGS_H */ | 7 | #endif /* _ASM_X86_BUGS_H */ |
diff --git a/include/asm-x86/byteorder.h b/include/asm-x86/byteorder.h index 1f2d6d5bf20d..fe2f2e5d51ba 100644 --- a/include/asm-x86/byteorder.h +++ b/include/asm-x86/byteorder.h | |||
@@ -30,13 +30,13 @@ static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val) | |||
30 | } v; | 30 | } v; |
31 | v.u = val; | 31 | v.u = val; |
32 | #ifdef CONFIG_X86_BSWAP | 32 | #ifdef CONFIG_X86_BSWAP |
33 | asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" | 33 | __asm__("bswapl %0 ; bswapl %1 ; xchgl %0,%1" |
34 | : "=r" (v.s.a), "=r" (v.s.b) | 34 | : "=r" (v.s.a), "=r" (v.s.b) |
35 | : "0" (v.s.a), "1" (v.s.b)); | 35 | : "0" (v.s.a), "1" (v.s.b)); |
36 | #else | 36 | #else |
37 | v.s.a = ___arch__swab32(v.s.a); | 37 | v.s.a = ___arch__swab32(v.s.a); |
38 | v.s.b = ___arch__swab32(v.s.b); | 38 | v.s.b = ___arch__swab32(v.s.b); |
39 | asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); | 39 | __asm__("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); |
40 | #endif | 40 | #endif |
41 | return v.u; | 41 | return v.u; |
42 | } | 42 | } |
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index 9411a2d3f19c..8dd8c5e3cc7f 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h | |||
@@ -24,18 +24,35 @@ | |||
24 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | 24 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
25 | memcpy(dst, src, len) | 25 | memcpy(dst, src, len) |
26 | 26 | ||
27 | void global_flush_tlb(void); | 27 | int __deprecated_for_modules change_page_attr(struct page *page, int numpages, |
28 | int change_page_attr(struct page *page, int numpages, pgprot_t prot); | 28 | pgprot_t prot); |
29 | int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot); | 29 | |
30 | void clflush_cache_range(void *addr, int size); | 30 | int set_pages_uc(struct page *page, int numpages); |
31 | 31 | int set_pages_wb(struct page *page, int numpages); | |
32 | #ifdef CONFIG_DEBUG_PAGEALLOC | 32 | int set_pages_x(struct page *page, int numpages); |
33 | /* internal debugging function */ | 33 | int set_pages_nx(struct page *page, int numpages); |
34 | void kernel_map_pages(struct page *page, int numpages, int enable); | 34 | int set_pages_ro(struct page *page, int numpages); |
35 | #endif | 35 | int set_pages_rw(struct page *page, int numpages); |
36 | |||
37 | int set_memory_uc(unsigned long addr, int numpages); | ||
38 | int set_memory_wb(unsigned long addr, int numpages); | ||
39 | int set_memory_x(unsigned long addr, int numpages); | ||
40 | int set_memory_nx(unsigned long addr, int numpages); | ||
41 | int set_memory_ro(unsigned long addr, int numpages); | ||
42 | int set_memory_rw(unsigned long addr, int numpages); | ||
43 | int set_memory_np(unsigned long addr, int numpages); | ||
44 | |||
45 | void clflush_cache_range(void *addr, unsigned int size); | ||
36 | 46 | ||
37 | #ifdef CONFIG_DEBUG_RODATA | 47 | #ifdef CONFIG_DEBUG_RODATA |
38 | void mark_rodata_ro(void); | 48 | void mark_rodata_ro(void); |
39 | #endif | 49 | #endif |
50 | #ifdef CONFIG_DEBUG_RODATA_TEST | ||
51 | void rodata_test(void); | ||
52 | #else | ||
53 | static inline void rodata_test(void) | ||
54 | { | ||
55 | } | ||
56 | #endif | ||
40 | 57 | ||
41 | #endif | 58 | #endif |
diff --git a/include/asm-x86/calling.h b/include/asm-x86/calling.h index 6f4f63af96e1..f13e62e2cb3e 100644 --- a/include/asm-x86/calling.h +++ b/include/asm-x86/calling.h | |||
@@ -1,162 +1,168 @@ | |||
1 | /* | 1 | /* |
2 | * Some macros to handle stack frames in assembly. | 2 | * Some macros to handle stack frames in assembly. |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #define R15 0 | ||
6 | #define R14 8 | ||
7 | #define R13 16 | ||
8 | #define R12 24 | ||
9 | #define RBP 32 | ||
10 | #define RBX 40 | ||
5 | 11 | ||
6 | #define R15 0 | ||
7 | #define R14 8 | ||
8 | #define R13 16 | ||
9 | #define R12 24 | ||
10 | #define RBP 32 | ||
11 | #define RBX 40 | ||
12 | /* arguments: interrupts/non tracing syscalls only save upto here*/ | 12 | /* arguments: interrupts/non tracing syscalls only save upto here*/ |
13 | #define R11 48 | 13 | #define R11 48 |
14 | #define R10 56 | 14 | #define R10 56 |
15 | #define R9 64 | 15 | #define R9 64 |
16 | #define R8 72 | 16 | #define R8 72 |
17 | #define RAX 80 | 17 | #define RAX 80 |
18 | #define RCX 88 | 18 | #define RCX 88 |
19 | #define RDX 96 | 19 | #define RDX 96 |
20 | #define RSI 104 | 20 | #define RSI 104 |
21 | #define RDI 112 | 21 | #define RDI 112 |
22 | #define ORIG_RAX 120 /* + error_code */ | 22 | #define ORIG_RAX 120 /* + error_code */ |
23 | /* end of arguments */ | 23 | /* end of arguments */ |
24 | |||
24 | /* cpu exception frame or undefined in case of fast syscall. */ | 25 | /* cpu exception frame or undefined in case of fast syscall. */ |
25 | #define RIP 128 | 26 | #define RIP 128 |
26 | #define CS 136 | 27 | #define CS 136 |
27 | #define EFLAGS 144 | 28 | #define EFLAGS 144 |
28 | #define RSP 152 | 29 | #define RSP 152 |
29 | #define SS 160 | 30 | #define SS 160 |
30 | #define ARGOFFSET R11 | 31 | |
31 | #define SWFRAME ORIG_RAX | 32 | #define ARGOFFSET R11 |
33 | #define SWFRAME ORIG_RAX | ||
32 | 34 | ||
33 | .macro SAVE_ARGS addskip=0,norcx=0,nor891011=0 | 35 | .macro SAVE_ARGS addskip=0, norcx=0, nor891011=0 |
34 | subq $9*8+\addskip,%rsp | 36 | subq $9*8+\addskip, %rsp |
35 | CFI_ADJUST_CFA_OFFSET 9*8+\addskip | 37 | CFI_ADJUST_CFA_OFFSET 9*8+\addskip |
36 | movq %rdi,8*8(%rsp) | 38 | movq %rdi, 8*8(%rsp) |
37 | CFI_REL_OFFSET rdi,8*8 | 39 | CFI_REL_OFFSET rdi, 8*8 |
38 | movq %rsi,7*8(%rsp) | 40 | movq %rsi, 7*8(%rsp) |
39 | CFI_REL_OFFSET rsi,7*8 | 41 | CFI_REL_OFFSET rsi, 7*8 |
40 | movq %rdx,6*8(%rsp) | 42 | movq %rdx, 6*8(%rsp) |
41 | CFI_REL_OFFSET rdx,6*8 | 43 | CFI_REL_OFFSET rdx, 6*8 |
42 | .if \norcx | 44 | .if \norcx |
43 | .else | 45 | .else |
44 | movq %rcx,5*8(%rsp) | 46 | movq %rcx, 5*8(%rsp) |
45 | CFI_REL_OFFSET rcx,5*8 | 47 | CFI_REL_OFFSET rcx, 5*8 |
46 | .endif | 48 | .endif |
47 | movq %rax,4*8(%rsp) | 49 | movq %rax, 4*8(%rsp) |
48 | CFI_REL_OFFSET rax,4*8 | 50 | CFI_REL_OFFSET rax, 4*8 |
49 | .if \nor891011 | 51 | .if \nor891011 |
50 | .else | 52 | .else |
51 | movq %r8,3*8(%rsp) | 53 | movq %r8, 3*8(%rsp) |
52 | CFI_REL_OFFSET r8,3*8 | 54 | CFI_REL_OFFSET r8, 3*8 |
53 | movq %r9,2*8(%rsp) | 55 | movq %r9, 2*8(%rsp) |
54 | CFI_REL_OFFSET r9,2*8 | 56 | CFI_REL_OFFSET r9, 2*8 |
55 | movq %r10,1*8(%rsp) | 57 | movq %r10, 1*8(%rsp) |
56 | CFI_REL_OFFSET r10,1*8 | 58 | CFI_REL_OFFSET r10, 1*8 |
57 | movq %r11,(%rsp) | 59 | movq %r11, (%rsp) |
58 | CFI_REL_OFFSET r11,0*8 | 60 | CFI_REL_OFFSET r11, 0*8 |
59 | .endif | 61 | .endif |
60 | .endm | 62 | .endm |
61 | 63 | ||
62 | #define ARG_SKIP 9*8 | 64 | #define ARG_SKIP 9*8 |
63 | .macro RESTORE_ARGS skiprax=0,addskip=0,skiprcx=0,skipr11=0,skipr8910=0,skiprdx=0 | 65 | |
66 | .macro RESTORE_ARGS skiprax=0, addskip=0, skiprcx=0, skipr11=0, \ | ||
67 | skipr8910=0, skiprdx=0 | ||
64 | .if \skipr11 | 68 | .if \skipr11 |
65 | .else | 69 | .else |
66 | movq (%rsp),%r11 | 70 | movq (%rsp), %r11 |
67 | CFI_RESTORE r11 | 71 | CFI_RESTORE r11 |
68 | .endif | 72 | .endif |
69 | .if \skipr8910 | 73 | .if \skipr8910 |
70 | .else | 74 | .else |
71 | movq 1*8(%rsp),%r10 | 75 | movq 1*8(%rsp), %r10 |
72 | CFI_RESTORE r10 | 76 | CFI_RESTORE r10 |
73 | movq 2*8(%rsp),%r9 | 77 | movq 2*8(%rsp), %r9 |
74 | CFI_RESTORE r9 | 78 | CFI_RESTORE r9 |
75 | movq 3*8(%rsp),%r8 | 79 | movq 3*8(%rsp), %r8 |
76 | CFI_RESTORE r8 | 80 | CFI_RESTORE r8 |
77 | .endif | 81 | .endif |
78 | .if \skiprax | 82 | .if \skiprax |
79 | .else | 83 | .else |
80 | movq 4*8(%rsp),%rax | 84 | movq 4*8(%rsp), %rax |
81 | CFI_RESTORE rax | 85 | CFI_RESTORE rax |
82 | .endif | 86 | .endif |
83 | .if \skiprcx | 87 | .if \skiprcx |
84 | .else | 88 | .else |
85 | movq 5*8(%rsp),%rcx | 89 | movq 5*8(%rsp), %rcx |
86 | CFI_RESTORE rcx | 90 | CFI_RESTORE rcx |
87 | .endif | 91 | .endif |
88 | .if \skiprdx | 92 | .if \skiprdx |
89 | .else | 93 | .else |
90 | movq 6*8(%rsp),%rdx | 94 | movq 6*8(%rsp), %rdx |
91 | CFI_RESTORE rdx | 95 | CFI_RESTORE rdx |
92 | .endif | 96 | .endif |
93 | movq 7*8(%rsp),%rsi | 97 | movq 7*8(%rsp), %rsi |
94 | CFI_RESTORE rsi | 98 | CFI_RESTORE rsi |
95 | movq 8*8(%rsp),%rdi | 99 | movq 8*8(%rsp), %rdi |
96 | CFI_RESTORE rdi | 100 | CFI_RESTORE rdi |
97 | .if ARG_SKIP+\addskip > 0 | 101 | .if ARG_SKIP+\addskip > 0 |
98 | addq $ARG_SKIP+\addskip,%rsp | 102 | addq $ARG_SKIP+\addskip, %rsp |
99 | CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip) | 103 | CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip) |
100 | .endif | 104 | .endif |
101 | .endm | 105 | .endm |
102 | 106 | ||
103 | .macro LOAD_ARGS offset | 107 | .macro LOAD_ARGS offset |
104 | movq \offset(%rsp),%r11 | 108 | movq \offset(%rsp), %r11 |
105 | movq \offset+8(%rsp),%r10 | 109 | movq \offset+8(%rsp), %r10 |
106 | movq \offset+16(%rsp),%r9 | 110 | movq \offset+16(%rsp), %r9 |
107 | movq \offset+24(%rsp),%r8 | 111 | movq \offset+24(%rsp), %r8 |
108 | movq \offset+40(%rsp),%rcx | 112 | movq \offset+40(%rsp), %rcx |
109 | movq \offset+48(%rsp),%rdx | 113 | movq \offset+48(%rsp), %rdx |
110 | movq \offset+56(%rsp),%rsi | 114 | movq \offset+56(%rsp), %rsi |
111 | movq \offset+64(%rsp),%rdi | 115 | movq \offset+64(%rsp), %rdi |
112 | movq \offset+72(%rsp),%rax | 116 | movq \offset+72(%rsp), %rax |
113 | .endm | 117 | .endm |
114 | 118 | ||
115 | #define REST_SKIP 6*8 | 119 | #define REST_SKIP 6*8 |
120 | |||
116 | .macro SAVE_REST | 121 | .macro SAVE_REST |
117 | subq $REST_SKIP,%rsp | 122 | subq $REST_SKIP, %rsp |
118 | CFI_ADJUST_CFA_OFFSET REST_SKIP | 123 | CFI_ADJUST_CFA_OFFSET REST_SKIP |
119 | movq %rbx,5*8(%rsp) | 124 | movq %rbx, 5*8(%rsp) |
120 | CFI_REL_OFFSET rbx,5*8 | 125 | CFI_REL_OFFSET rbx, 5*8 |
121 | movq %rbp,4*8(%rsp) | 126 | movq %rbp, 4*8(%rsp) |
122 | CFI_REL_OFFSET rbp,4*8 | 127 | CFI_REL_OFFSET rbp, 4*8 |
123 | movq %r12,3*8(%rsp) | 128 | movq %r12, 3*8(%rsp) |
124 | CFI_REL_OFFSET r12,3*8 | 129 | CFI_REL_OFFSET r12, 3*8 |
125 | movq %r13,2*8(%rsp) | 130 | movq %r13, 2*8(%rsp) |
126 | CFI_REL_OFFSET r13,2*8 | 131 | CFI_REL_OFFSET r13, 2*8 |
127 | movq %r14,1*8(%rsp) | 132 | movq %r14, 1*8(%rsp) |
128 | CFI_REL_OFFSET r14,1*8 | 133 | CFI_REL_OFFSET r14, 1*8 |
129 | movq %r15,(%rsp) | 134 | movq %r15, (%rsp) |
130 | CFI_REL_OFFSET r15,0*8 | 135 | CFI_REL_OFFSET r15, 0*8 |
131 | .endm | 136 | .endm |
132 | 137 | ||
133 | .macro RESTORE_REST | 138 | .macro RESTORE_REST |
134 | movq (%rsp),%r15 | 139 | movq (%rsp), %r15 |
135 | CFI_RESTORE r15 | 140 | CFI_RESTORE r15 |
136 | movq 1*8(%rsp),%r14 | 141 | movq 1*8(%rsp), %r14 |
137 | CFI_RESTORE r14 | 142 | CFI_RESTORE r14 |
138 | movq 2*8(%rsp),%r13 | 143 | movq 2*8(%rsp), %r13 |
139 | CFI_RESTORE r13 | 144 | CFI_RESTORE r13 |
140 | movq 3*8(%rsp),%r12 | 145 | movq 3*8(%rsp), %r12 |
141 | CFI_RESTORE r12 | 146 | CFI_RESTORE r12 |
142 | movq 4*8(%rsp),%rbp | 147 | movq 4*8(%rsp), %rbp |
143 | CFI_RESTORE rbp | 148 | CFI_RESTORE rbp |
144 | movq 5*8(%rsp),%rbx | 149 | movq 5*8(%rsp), %rbx |
145 | CFI_RESTORE rbx | 150 | CFI_RESTORE rbx |
146 | addq $REST_SKIP,%rsp | 151 | addq $REST_SKIP, %rsp |
147 | CFI_ADJUST_CFA_OFFSET -(REST_SKIP) | 152 | CFI_ADJUST_CFA_OFFSET -(REST_SKIP) |
148 | .endm | 153 | .endm |
149 | 154 | ||
150 | .macro SAVE_ALL | 155 | .macro SAVE_ALL |
151 | SAVE_ARGS | 156 | SAVE_ARGS |
152 | SAVE_REST | 157 | SAVE_REST |
153 | .endm | 158 | .endm |
154 | 159 | ||
155 | .macro RESTORE_ALL addskip=0 | 160 | .macro RESTORE_ALL addskip=0 |
156 | RESTORE_REST | 161 | RESTORE_REST |
157 | RESTORE_ARGS 0,\addskip | 162 | RESTORE_ARGS 0, \addskip |
158 | .endm | 163 | .endm |
159 | 164 | ||
160 | .macro icebp | 165 | .macro icebp |
161 | .byte 0xf1 | 166 | .byte 0xf1 |
162 | .endm | 167 | .endm |
168 | |||
diff --git a/include/asm-x86/checksum_64.h b/include/asm-x86/checksum_64.h index 419fe88a0342..e5f79997decc 100644 --- a/include/asm-x86/checksum_64.h +++ b/include/asm-x86/checksum_64.h | |||
@@ -4,7 +4,7 @@ | |||
4 | /* | 4 | /* |
5 | * Checksums for x86-64 | 5 | * Checksums for x86-64 |
6 | * Copyright 2002 by Andi Kleen, SuSE Labs | 6 | * Copyright 2002 by Andi Kleen, SuSE Labs |
7 | * with some code from asm-i386/checksum.h | 7 | * with some code from asm-x86/checksum.h |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/compiler.h> | 10 | #include <linux/compiler.h> |
diff --git a/include/asm-x86/cmpxchg_32.h b/include/asm-x86/cmpxchg_32.h index f86ede28f6dc..cea1dae288a7 100644 --- a/include/asm-x86/cmpxchg_32.h +++ b/include/asm-x86/cmpxchg_32.h | |||
@@ -105,15 +105,24 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz | |||
105 | 105 | ||
106 | #ifdef CONFIG_X86_CMPXCHG | 106 | #ifdef CONFIG_X86_CMPXCHG |
107 | #define __HAVE_ARCH_CMPXCHG 1 | 107 | #define __HAVE_ARCH_CMPXCHG 1 |
108 | #define cmpxchg(ptr,o,n)\ | 108 | #define cmpxchg(ptr, o, n) \ |
109 | ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ | 109 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ |
110 | (unsigned long)(n),sizeof(*(ptr)))) | 110 | (unsigned long)(n), sizeof(*(ptr)))) |
111 | #define sync_cmpxchg(ptr,o,n)\ | 111 | #define sync_cmpxchg(ptr, o, n) \ |
112 | ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\ | 112 | ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \ |
113 | (unsigned long)(n),sizeof(*(ptr)))) | 113 | (unsigned long)(n), sizeof(*(ptr)))) |
114 | #define cmpxchg_local(ptr,o,n)\ | 114 | #define cmpxchg_local(ptr, o, n) \ |
115 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr),(unsigned long)(o),\ | 115 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ |
116 | (unsigned long)(n),sizeof(*(ptr)))) | 116 | (unsigned long)(n), sizeof(*(ptr)))) |
117 | #endif | ||
118 | |||
119 | #ifdef CONFIG_X86_CMPXCHG64 | ||
120 | #define cmpxchg64(ptr, o, n) \ | ||
121 | ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ | ||
122 | (unsigned long long)(n))) | ||
123 | #define cmpxchg64_local(ptr, o, n) \ | ||
124 | ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o),\ | ||
125 | (unsigned long long)(n))) | ||
117 | #endif | 126 | #endif |
118 | 127 | ||
119 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | 128 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, |
@@ -203,6 +212,34 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, | |||
203 | return old; | 212 | return old; |
204 | } | 213 | } |
205 | 214 | ||
215 | static inline unsigned long long __cmpxchg64(volatile void *ptr, | ||
216 | unsigned long long old, unsigned long long new) | ||
217 | { | ||
218 | unsigned long long prev; | ||
219 | __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3" | ||
220 | : "=A"(prev) | ||
221 | : "b"((unsigned long)new), | ||
222 | "c"((unsigned long)(new >> 32)), | ||
223 | "m"(*__xg(ptr)), | ||
224 | "0"(old) | ||
225 | : "memory"); | ||
226 | return prev; | ||
227 | } | ||
228 | |||
229 | static inline unsigned long long __cmpxchg64_local(volatile void *ptr, | ||
230 | unsigned long long old, unsigned long long new) | ||
231 | { | ||
232 | unsigned long long prev; | ||
233 | __asm__ __volatile__("cmpxchg8b %3" | ||
234 | : "=A"(prev) | ||
235 | : "b"((unsigned long)new), | ||
236 | "c"((unsigned long)(new >> 32)), | ||
237 | "m"(*__xg(ptr)), | ||
238 | "0"(old) | ||
239 | : "memory"); | ||
240 | return prev; | ||
241 | } | ||
242 | |||
206 | #ifndef CONFIG_X86_CMPXCHG | 243 | #ifndef CONFIG_X86_CMPXCHG |
207 | /* | 244 | /* |
208 | * Building a kernel capable running on 80386. It may be necessary to | 245 | * Building a kernel capable running on 80386. It may be necessary to |
@@ -228,7 +265,7 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, | |||
228 | return old; | 265 | return old; |
229 | } | 266 | } |
230 | 267 | ||
231 | #define cmpxchg(ptr,o,n) \ | 268 | #define cmpxchg(ptr, o, n) \ |
232 | ({ \ | 269 | ({ \ |
233 | __typeof__(*(ptr)) __ret; \ | 270 | __typeof__(*(ptr)) __ret; \ |
234 | if (likely(boot_cpu_data.x86 > 3)) \ | 271 | if (likely(boot_cpu_data.x86 > 3)) \ |
@@ -239,7 +276,7 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, | |||
239 | (unsigned long)(n), sizeof(*(ptr))); \ | 276 | (unsigned long)(n), sizeof(*(ptr))); \ |
240 | __ret; \ | 277 | __ret; \ |
241 | }) | 278 | }) |
242 | #define cmpxchg_local(ptr,o,n) \ | 279 | #define cmpxchg_local(ptr, o, n) \ |
243 | ({ \ | 280 | ({ \ |
244 | __typeof__(*(ptr)) __ret; \ | 281 | __typeof__(*(ptr)) __ret; \ |
245 | if (likely(boot_cpu_data.x86 > 3)) \ | 282 | if (likely(boot_cpu_data.x86 > 3)) \ |
@@ -252,38 +289,37 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, | |||
252 | }) | 289 | }) |
253 | #endif | 290 | #endif |
254 | 291 | ||
255 | static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old, | 292 | #ifndef CONFIG_X86_CMPXCHG64 |
256 | unsigned long long new) | 293 | /* |
257 | { | 294 | * Building a kernel capable running on 80386 and 80486. It may be necessary |
258 | unsigned long long prev; | 295 | * to simulate the cmpxchg8b on the 80386 and 80486 CPU. |
259 | __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3" | 296 | */ |
260 | : "=A"(prev) | ||
261 | : "b"((unsigned long)new), | ||
262 | "c"((unsigned long)(new >> 32)), | ||
263 | "m"(*__xg(ptr)), | ||
264 | "0"(old) | ||
265 | : "memory"); | ||
266 | return prev; | ||
267 | } | ||
268 | 297 | ||
269 | static inline unsigned long long __cmpxchg64_local(volatile void *ptr, | 298 | extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64); |
270 | unsigned long long old, unsigned long long new) | 299 | |
271 | { | 300 | #define cmpxchg64(ptr, o, n) \ |
272 | unsigned long long prev; | 301 | ({ \ |
273 | __asm__ __volatile__("cmpxchg8b %3" | 302 | __typeof__(*(ptr)) __ret; \ |
274 | : "=A"(prev) | 303 | if (likely(boot_cpu_data.x86 > 4)) \ |
275 | : "b"((unsigned long)new), | 304 | __ret = __cmpxchg64((ptr), (unsigned long long)(o), \ |
276 | "c"((unsigned long)(new >> 32)), | 305 | (unsigned long long)(n)); \ |
277 | "m"(*__xg(ptr)), | 306 | else \ |
278 | "0"(old) | 307 | __ret = cmpxchg_486_u64((ptr), (unsigned long long)(o), \ |
279 | : "memory"); | 308 | (unsigned long long)(n)); \ |
280 | return prev; | 309 | __ret; \ |
281 | } | 310 | }) |
311 | #define cmpxchg64_local(ptr, o, n) \ | ||
312 | ({ \ | ||
313 | __typeof__(*(ptr)) __ret; \ | ||
314 | if (likely(boot_cpu_data.x86 > 4)) \ | ||
315 | __ret = __cmpxchg64_local((ptr), (unsigned long long)(o), \ | ||
316 | (unsigned long long)(n)); \ | ||
317 | else \ | ||
318 | __ret = cmpxchg_486_u64((ptr), (unsigned long long)(o), \ | ||
319 | (unsigned long long)(n)); \ | ||
320 | __ret; \ | ||
321 | }) | ||
322 | |||
323 | #endif | ||
282 | 324 | ||
283 | #define cmpxchg64(ptr,o,n)\ | ||
284 | ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\ | ||
285 | (unsigned long long)(n))) | ||
286 | #define cmpxchg64_local(ptr,o,n)\ | ||
287 | ((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\ | ||
288 | (unsigned long long)(n))) | ||
289 | #endif | 325 | #endif |
diff --git a/include/asm-x86/compat.h b/include/asm-x86/compat.h index 66ba7987184a..d3e8f3e87ee8 100644 --- a/include/asm-x86/compat.h +++ b/include/asm-x86/compat.h | |||
@@ -190,7 +190,7 @@ typedef struct user_regs_struct32 compat_elf_gregset_t; | |||
190 | * A pointer passed in from user mode. This should not | 190 | * A pointer passed in from user mode. This should not |
191 | * be used for syscall parameters, just declare them | 191 | * be used for syscall parameters, just declare them |
192 | * as pointers because the syscall entry code will have | 192 | * as pointers because the syscall entry code will have |
193 | * appropriately comverted them already. | 193 | * appropriately converted them already. |
194 | */ | 194 | */ |
195 | typedef u32 compat_uptr_t; | 195 | typedef u32 compat_uptr_t; |
196 | 196 | ||
@@ -207,7 +207,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) | |||
207 | static __inline__ void __user *compat_alloc_user_space(long len) | 207 | static __inline__ void __user *compat_alloc_user_space(long len) |
208 | { | 208 | { |
209 | struct pt_regs *regs = task_pt_regs(current); | 209 | struct pt_regs *regs = task_pt_regs(current); |
210 | return (void __user *)regs->rsp - len; | 210 | return (void __user *)regs->sp - len; |
211 | } | 211 | } |
212 | 212 | ||
213 | static inline int is_compat_task(void) | 213 | static inline int is_compat_task(void) |
diff --git a/include/asm-x86/cpu.h b/include/asm-x86/cpu.h index b1bc7b1b64b0..73f2ea84fd74 100644 --- a/include/asm-x86/cpu.h +++ b/include/asm-x86/cpu.h | |||
@@ -7,11 +7,12 @@ | |||
7 | #include <linux/nodemask.h> | 7 | #include <linux/nodemask.h> |
8 | #include <linux/percpu.h> | 8 | #include <linux/percpu.h> |
9 | 9 | ||
10 | struct i386_cpu { | 10 | struct x86_cpu { |
11 | struct cpu cpu; | 11 | struct cpu cpu; |
12 | }; | 12 | }; |
13 | extern int arch_register_cpu(int num); | 13 | |
14 | #ifdef CONFIG_HOTPLUG_CPU | 14 | #ifdef CONFIG_HOTPLUG_CPU |
15 | extern int arch_register_cpu(int num); | ||
15 | extern void arch_unregister_cpu(int); | 16 | extern void arch_unregister_cpu(int); |
16 | #endif | 17 | #endif |
17 | 18 | ||
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index b7160a4598d7..065e92966c7c 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h | |||
@@ -1,5 +1,215 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | /* |
2 | # include "cpufeature_32.h" | 2 | * Defines x86 CPU feature bits |
3 | */ | ||
4 | #ifndef _ASM_X86_CPUFEATURE_H | ||
5 | #define _ASM_X86_CPUFEATURE_H | ||
6 | |||
7 | #include <asm/required-features.h> | ||
8 | |||
9 | #define NCAPINTS 8 /* N 32-bit words worth of info */ | ||
10 | |||
11 | /* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ | ||
12 | #define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ | ||
13 | #define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */ | ||
14 | #define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ | ||
15 | #define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ | ||
16 | #define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ | ||
17 | #define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */ | ||
18 | #define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ | ||
19 | #define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */ | ||
20 | #define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ | ||
21 | #define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */ | ||
22 | #define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */ | ||
23 | #define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ | ||
24 | #define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ | ||
25 | #define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ | ||
26 | #define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */ | ||
27 | #define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ | ||
28 | #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ | ||
29 | #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ | ||
30 | #define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ | ||
31 | #define X86_FEATURE_DS (0*32+21) /* Debug Store */ | ||
32 | #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ | ||
33 | #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ | ||
34 | #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ | ||
35 | /* of FPU context), and CR4.OSFXSR available */ | ||
36 | #define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */ | ||
37 | #define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */ | ||
38 | #define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */ | ||
39 | #define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ | ||
40 | #define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */ | ||
41 | #define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ | ||
42 | |||
43 | /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ | ||
44 | /* Don't duplicate feature flags which are redundant with Intel! */ | ||
45 | #define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ | ||
46 | #define X86_FEATURE_MP (1*32+19) /* MP Capable. */ | ||
47 | #define X86_FEATURE_NX (1*32+20) /* Execute Disable */ | ||
48 | #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ | ||
49 | #define X86_FEATURE_GBPAGES (1*32+26) /* GB pages */ | ||
50 | #define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ | ||
51 | #define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ | ||
52 | #define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ | ||
53 | #define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ | ||
54 | |||
55 | /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ | ||
56 | #define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */ | ||
57 | #define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */ | ||
58 | #define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */ | ||
59 | |||
60 | /* Other features, Linux-defined mapping, word 3 */ | ||
61 | /* This range is used for feature bits which conflict or are synthesized */ | ||
62 | #define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */ | ||
63 | #define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */ | ||
64 | #define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ | ||
65 | #define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ | ||
66 | /* cpu types for specific tunings: */ | ||
67 | #define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */ | ||
68 | #define X86_FEATURE_K7 (3*32+ 5) /* Athlon */ | ||
69 | #define X86_FEATURE_P3 (3*32+ 6) /* P3 */ | ||
70 | #define X86_FEATURE_P4 (3*32+ 7) /* P4 */ | ||
71 | #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ | ||
72 | #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ | ||
73 | #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ | ||
74 | #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ | ||
75 | #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ | ||
76 | #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ | ||
77 | /* 14 free */ | ||
78 | /* 15 free */ | ||
79 | #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ | ||
80 | #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */ | ||
81 | #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ | ||
82 | |||
83 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | ||
84 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ | ||
85 | #define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */ | ||
86 | #define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */ | ||
87 | #define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ | ||
88 | #define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ | ||
89 | #define X86_FEATURE_CID (4*32+10) /* Context ID */ | ||
90 | #define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ | ||
91 | #define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ | ||
92 | #define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ | ||
93 | |||
94 | /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ | ||
95 | #define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ | ||
96 | #define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */ | ||
97 | #define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */ | ||
98 | #define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */ | ||
99 | #define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */ | ||
100 | #define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */ | ||
101 | #define X86_FEATURE_PHE (5*32+ 10) /* PadLock Hash Engine */ | ||
102 | #define X86_FEATURE_PHE_EN (5*32+ 11) /* PHE enabled */ | ||
103 | #define X86_FEATURE_PMM (5*32+ 12) /* PadLock Montgomery Multiplier */ | ||
104 | #define X86_FEATURE_PMM_EN (5*32+ 13) /* PMM enabled */ | ||
105 | |||
106 | /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ | ||
107 | #define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ | ||
108 | #define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ | ||
109 | |||
110 | /* | ||
111 | * Auxiliary flags: Linux defined - For features scattered in various | ||
112 | * CPUID levels like 0x6, 0xA etc | ||
113 | */ | ||
114 | #define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ | ||
115 | |||
116 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) | ||
117 | |||
118 | #include <linux/bitops.h> | ||
119 | |||
120 | extern const char * const x86_cap_flags[NCAPINTS*32]; | ||
121 | extern const char * const x86_power_flags[32]; | ||
122 | |||
123 | #define cpu_has(c, bit) \ | ||
124 | (__builtin_constant_p(bit) && \ | ||
125 | ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ | ||
126 | (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \ | ||
127 | (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \ | ||
128 | (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \ | ||
129 | (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \ | ||
130 | (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \ | ||
131 | (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ | ||
132 | (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \ | ||
133 | ? 1 : \ | ||
134 | test_bit(bit, (unsigned long *)((c)->x86_capability))) | ||
135 | #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) | ||
136 | |||
137 | #define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) | ||
138 | #define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability)) | ||
139 | #define setup_clear_cpu_cap(bit) do { \ | ||
140 | clear_cpu_cap(&boot_cpu_data, bit); \ | ||
141 | set_bit(bit, cleared_cpu_caps); \ | ||
142 | } while (0) | ||
143 | #define setup_force_cpu_cap(bit) do { \ | ||
144 | set_cpu_cap(&boot_cpu_data, bit); \ | ||
145 | clear_bit(bit, cleared_cpu_caps); \ | ||
146 | } while (0) | ||
147 | |||
148 | #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) | ||
149 | #define cpu_has_vme boot_cpu_has(X86_FEATURE_VME) | ||
150 | #define cpu_has_de boot_cpu_has(X86_FEATURE_DE) | ||
151 | #define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) | ||
152 | #define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) | ||
153 | #define cpu_has_pae boot_cpu_has(X86_FEATURE_PAE) | ||
154 | #define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) | ||
155 | #define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) | ||
156 | #define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP) | ||
157 | #define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR) | ||
158 | #define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX) | ||
159 | #define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) | ||
160 | #define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) | ||
161 | #define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) | ||
162 | #define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) | ||
163 | #define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) | ||
164 | #define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) | ||
165 | #define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) | ||
166 | #define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR) | ||
167 | #define cpu_has_cyrix_arr boot_cpu_has(X86_FEATURE_CYRIX_ARR) | ||
168 | #define cpu_has_centaur_mcr boot_cpu_has(X86_FEATURE_CENTAUR_MCR) | ||
169 | #define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE) | ||
170 | #define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN) | ||
171 | #define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT) | ||
172 | #define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN) | ||
173 | #define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2) | ||
174 | #define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN) | ||
175 | #define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE) | ||
176 | #define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN) | ||
177 | #define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM) | ||
178 | #define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN) | ||
179 | #define cpu_has_ds boot_cpu_has(X86_FEATURE_DS) | ||
180 | #define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS) | ||
181 | #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) | ||
182 | #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) | ||
183 | #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) | ||
184 | |||
185 | #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) | ||
186 | # define cpu_has_invlpg 1 | ||
3 | #else | 187 | #else |
4 | # include "cpufeature_64.h" | 188 | # define cpu_has_invlpg (boot_cpu_data.x86 > 3) |
5 | #endif | 189 | #endif |
190 | |||
191 | #ifdef CONFIG_X86_64 | ||
192 | |||
193 | #undef cpu_has_vme | ||
194 | #define cpu_has_vme 0 | ||
195 | |||
196 | #undef cpu_has_pae | ||
197 | #define cpu_has_pae ___BUG___ | ||
198 | |||
199 | #undef cpu_has_mp | ||
200 | #define cpu_has_mp 1 | ||
201 | |||
202 | #undef cpu_has_k6_mtrr | ||
203 | #define cpu_has_k6_mtrr 0 | ||
204 | |||
205 | #undef cpu_has_cyrix_arr | ||
206 | #define cpu_has_cyrix_arr 0 | ||
207 | |||
208 | #undef cpu_has_centaur_mcr | ||
209 | #define cpu_has_centaur_mcr 0 | ||
210 | |||
211 | #endif /* CONFIG_X86_64 */ | ||
212 | |||
213 | #endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ | ||
214 | |||
215 | #endif /* _ASM_X86_CPUFEATURE_H */ | ||
diff --git a/include/asm-x86/cpufeature_32.h b/include/asm-x86/cpufeature_32.h deleted file mode 100644 index f17e688dfb05..000000000000 --- a/include/asm-x86/cpufeature_32.h +++ /dev/null | |||
@@ -1,176 +0,0 @@ | |||
1 | /* | ||
2 | * cpufeature.h | ||
3 | * | ||
4 | * Defines x86 CPU feature bits | ||
5 | */ | ||
6 | |||
7 | #ifndef __ASM_I386_CPUFEATURE_H | ||
8 | #define __ASM_I386_CPUFEATURE_H | ||
9 | |||
10 | #ifndef __ASSEMBLY__ | ||
11 | #include <linux/bitops.h> | ||
12 | #endif | ||
13 | #include <asm/required-features.h> | ||
14 | |||
15 | #define NCAPINTS 8 /* N 32-bit words worth of info */ | ||
16 | |||
17 | /* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ | ||
18 | #define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ | ||
19 | #define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */ | ||
20 | #define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ | ||
21 | #define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ | ||
22 | #define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ | ||
23 | #define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */ | ||
24 | #define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ | ||
25 | #define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */ | ||
26 | #define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ | ||
27 | #define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */ | ||
28 | #define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */ | ||
29 | #define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ | ||
30 | #define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ | ||
31 | #define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ | ||
32 | #define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */ | ||
33 | #define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ | ||
34 | #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ | ||
35 | #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ | ||
36 | #define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ | ||
37 | #define X86_FEATURE_DS (0*32+21) /* Debug Store */ | ||
38 | #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ | ||
39 | #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ | ||
40 | #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ | ||
41 | /* of FPU context), and CR4.OSFXSR available */ | ||
42 | #define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */ | ||
43 | #define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */ | ||
44 | #define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */ | ||
45 | #define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ | ||
46 | #define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */ | ||
47 | #define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ | ||
48 | |||
49 | /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ | ||
50 | /* Don't duplicate feature flags which are redundant with Intel! */ | ||
51 | #define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ | ||
52 | #define X86_FEATURE_MP (1*32+19) /* MP Capable. */ | ||
53 | #define X86_FEATURE_NX (1*32+20) /* Execute Disable */ | ||
54 | #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ | ||
55 | #define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ | ||
56 | #define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ | ||
57 | #define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ | ||
58 | #define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ | ||
59 | |||
60 | /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ | ||
61 | #define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */ | ||
62 | #define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */ | ||
63 | #define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */ | ||
64 | |||
65 | /* Other features, Linux-defined mapping, word 3 */ | ||
66 | /* This range is used for feature bits which conflict or are synthesized */ | ||
67 | #define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */ | ||
68 | #define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */ | ||
69 | #define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ | ||
70 | #define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ | ||
71 | /* cpu types for specific tunings: */ | ||
72 | #define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */ | ||
73 | #define X86_FEATURE_K7 (3*32+ 5) /* Athlon */ | ||
74 | #define X86_FEATURE_P3 (3*32+ 6) /* P3 */ | ||
75 | #define X86_FEATURE_P4 (3*32+ 7) /* P4 */ | ||
76 | #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ | ||
77 | #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ | ||
78 | #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ | ||
79 | #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ | ||
80 | #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ | ||
81 | #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ | ||
82 | /* 14 free */ | ||
83 | #define X86_FEATURE_SYNC_RDTSC (3*32+15) /* RDTSC synchronizes the CPU */ | ||
84 | #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ | ||
85 | |||
86 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | ||
87 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ | ||
88 | #define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */ | ||
89 | #define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */ | ||
90 | #define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ | ||
91 | #define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ | ||
92 | #define X86_FEATURE_CID (4*32+10) /* Context ID */ | ||
93 | #define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ | ||
94 | #define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ | ||
95 | #define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ | ||
96 | |||
97 | /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ | ||
98 | #define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ | ||
99 | #define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */ | ||
100 | #define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */ | ||
101 | #define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */ | ||
102 | #define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */ | ||
103 | #define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */ | ||
104 | #define X86_FEATURE_PHE (5*32+ 10) /* PadLock Hash Engine */ | ||
105 | #define X86_FEATURE_PHE_EN (5*32+ 11) /* PHE enabled */ | ||
106 | #define X86_FEATURE_PMM (5*32+ 12) /* PadLock Montgomery Multiplier */ | ||
107 | #define X86_FEATURE_PMM_EN (5*32+ 13) /* PMM enabled */ | ||
108 | |||
109 | /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ | ||
110 | #define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ | ||
111 | #define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ | ||
112 | |||
113 | /* | ||
114 | * Auxiliary flags: Linux defined - For features scattered in various | ||
115 | * CPUID levels like 0x6, 0xA etc | ||
116 | */ | ||
117 | #define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ | ||
118 | |||
119 | #define cpu_has(c, bit) \ | ||
120 | (__builtin_constant_p(bit) && \ | ||
121 | ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ | ||
122 | (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \ | ||
123 | (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \ | ||
124 | (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \ | ||
125 | (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \ | ||
126 | (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \ | ||
127 | (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ | ||
128 | (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \ | ||
129 | ? 1 : \ | ||
130 | test_bit(bit, (c)->x86_capability)) | ||
131 | #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) | ||
132 | |||
133 | #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) | ||
134 | #define cpu_has_vme boot_cpu_has(X86_FEATURE_VME) | ||
135 | #define cpu_has_de boot_cpu_has(X86_FEATURE_DE) | ||
136 | #define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) | ||
137 | #define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) | ||
138 | #define cpu_has_pae boot_cpu_has(X86_FEATURE_PAE) | ||
139 | #define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) | ||
140 | #define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) | ||
141 | #define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP) | ||
142 | #define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR) | ||
143 | #define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX) | ||
144 | #define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) | ||
145 | #define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) | ||
146 | #define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) | ||
147 | #define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) | ||
148 | #define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) | ||
149 | #define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) | ||
150 | #define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) | ||
151 | #define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR) | ||
152 | #define cpu_has_cyrix_arr boot_cpu_has(X86_FEATURE_CYRIX_ARR) | ||
153 | #define cpu_has_centaur_mcr boot_cpu_has(X86_FEATURE_CENTAUR_MCR) | ||
154 | #define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE) | ||
155 | #define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN) | ||
156 | #define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT) | ||
157 | #define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN) | ||
158 | #define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2) | ||
159 | #define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN) | ||
160 | #define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE) | ||
161 | #define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN) | ||
162 | #define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM) | ||
163 | #define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN) | ||
164 | #define cpu_has_ds boot_cpu_has(X86_FEATURE_DS) | ||
165 | #define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS) | ||
166 | #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) | ||
167 | #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) | ||
168 | |||
169 | #endif /* __ASM_I386_CPUFEATURE_H */ | ||
170 | |||
171 | /* | ||
172 | * Local Variables: | ||
173 | * mode:c | ||
174 | * comment-column:42 | ||
175 | * End: | ||
176 | */ | ||
diff --git a/include/asm-x86/cpufeature_64.h b/include/asm-x86/cpufeature_64.h deleted file mode 100644 index e18496b7b850..000000000000 --- a/include/asm-x86/cpufeature_64.h +++ /dev/null | |||
@@ -1,30 +0,0 @@ | |||
1 | /* | ||
2 | * cpufeature_32.h | ||
3 | * | ||
4 | * Defines x86 CPU feature bits | ||
5 | */ | ||
6 | |||
7 | #ifndef __ASM_X8664_CPUFEATURE_H | ||
8 | #define __ASM_X8664_CPUFEATURE_H | ||
9 | |||
10 | #include "cpufeature_32.h" | ||
11 | |||
12 | #undef cpu_has_vme | ||
13 | #define cpu_has_vme 0 | ||
14 | |||
15 | #undef cpu_has_pae | ||
16 | #define cpu_has_pae ___BUG___ | ||
17 | |||
18 | #undef cpu_has_mp | ||
19 | #define cpu_has_mp 1 /* XXX */ | ||
20 | |||
21 | #undef cpu_has_k6_mtrr | ||
22 | #define cpu_has_k6_mtrr 0 | ||
23 | |||
24 | #undef cpu_has_cyrix_arr | ||
25 | #define cpu_has_cyrix_arr 0 | ||
26 | |||
27 | #undef cpu_has_centaur_mcr | ||
28 | #define cpu_has_centaur_mcr 0 | ||
29 | |||
30 | #endif /* __ASM_X8664_CPUFEATURE_H */ | ||
diff --git a/include/asm-x86/delay.h b/include/asm-x86/delay.h index d11d47fc1a0e..409a649204aa 100644 --- a/include/asm-x86/delay.h +++ b/include/asm-x86/delay.h | |||
@@ -13,7 +13,7 @@ extern void __bad_ndelay(void); | |||
13 | 13 | ||
14 | extern void __udelay(unsigned long usecs); | 14 | extern void __udelay(unsigned long usecs); |
15 | extern void __ndelay(unsigned long nsecs); | 15 | extern void __ndelay(unsigned long nsecs); |
16 | extern void __const_udelay(unsigned long usecs); | 16 | extern void __const_udelay(unsigned long xloops); |
17 | extern void __delay(unsigned long loops); | 17 | extern void __delay(unsigned long loops); |
18 | 18 | ||
19 | /* 0x10c7 is 2**32 / 1000000 (rounded up) */ | 19 | /* 0x10c7 is 2**32 / 1000000 (rounded up) */ |
diff --git a/include/asm-x86/desc.h b/include/asm-x86/desc.h index 6065c5092265..5b6a05d3a771 100644 --- a/include/asm-x86/desc.h +++ b/include/asm-x86/desc.h | |||
@@ -1,5 +1,381 @@ | |||
1 | #ifndef _ASM_DESC_H_ | ||
2 | #define _ASM_DESC_H_ | ||
3 | |||
4 | #ifndef __ASSEMBLY__ | ||
5 | #include <asm/desc_defs.h> | ||
6 | #include <asm/ldt.h> | ||
7 | #include <asm/mmu.h> | ||
8 | #include <linux/smp.h> | ||
9 | |||
10 | static inline void fill_ldt(struct desc_struct *desc, | ||
11 | const struct user_desc *info) | ||
12 | { | ||
13 | desc->limit0 = info->limit & 0x0ffff; | ||
14 | desc->base0 = info->base_addr & 0x0000ffff; | ||
15 | |||
16 | desc->base1 = (info->base_addr & 0x00ff0000) >> 16; | ||
17 | desc->type = (info->read_exec_only ^ 1) << 1; | ||
18 | desc->type |= info->contents << 2; | ||
19 | desc->s = 1; | ||
20 | desc->dpl = 0x3; | ||
21 | desc->p = info->seg_not_present ^ 1; | ||
22 | desc->limit = (info->limit & 0xf0000) >> 16; | ||
23 | desc->avl = info->useable; | ||
24 | desc->d = info->seg_32bit; | ||
25 | desc->g = info->limit_in_pages; | ||
26 | desc->base2 = (info->base_addr & 0xff000000) >> 24; | ||
27 | } | ||
28 | |||
29 | extern struct desc_ptr idt_descr; | ||
30 | extern gate_desc idt_table[]; | ||
31 | |||
32 | #ifdef CONFIG_X86_64 | ||
33 | extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; | ||
34 | extern struct desc_ptr cpu_gdt_descr[]; | ||
35 | /* the cpu gdt accessor */ | ||
36 | #define get_cpu_gdt_table(x) ((struct desc_struct *)cpu_gdt_descr[x].address) | ||
37 | |||
38 | static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func, | ||
39 | unsigned dpl, unsigned ist, unsigned seg) | ||
40 | { | ||
41 | gate->offset_low = PTR_LOW(func); | ||
42 | gate->segment = __KERNEL_CS; | ||
43 | gate->ist = ist; | ||
44 | gate->p = 1; | ||
45 | gate->dpl = dpl; | ||
46 | gate->zero0 = 0; | ||
47 | gate->zero1 = 0; | ||
48 | gate->type = type; | ||
49 | gate->offset_middle = PTR_MIDDLE(func); | ||
50 | gate->offset_high = PTR_HIGH(func); | ||
51 | } | ||
52 | |||
53 | #else | ||
54 | struct gdt_page { | ||
55 | struct desc_struct gdt[GDT_ENTRIES]; | ||
56 | } __attribute__((aligned(PAGE_SIZE))); | ||
57 | DECLARE_PER_CPU(struct gdt_page, gdt_page); | ||
58 | |||
59 | static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) | ||
60 | { | ||
61 | return per_cpu(gdt_page, cpu).gdt; | ||
62 | } | ||
63 | |||
64 | static inline void pack_gate(gate_desc *gate, unsigned char type, | ||
65 | unsigned long base, unsigned dpl, unsigned flags, unsigned short seg) | ||
66 | |||
67 | { | ||
68 | gate->a = (seg << 16) | (base & 0xffff); | ||
69 | gate->b = (base & 0xffff0000) | | ||
70 | (((0x80 | type | (dpl << 5)) & 0xff) << 8); | ||
71 | } | ||
72 | |||
73 | #endif | ||
74 | |||
75 | static inline int desc_empty(const void *ptr) | ||
76 | { | ||
77 | const u32 *desc = ptr; | ||
78 | return !(desc[0] | desc[1]); | ||
79 | } | ||
80 | |||
81 | #ifdef CONFIG_PARAVIRT | ||
82 | #include <asm/paravirt.h> | ||
83 | #else | ||
84 | #define load_TR_desc() native_load_tr_desc() | ||
85 | #define load_gdt(dtr) native_load_gdt(dtr) | ||
86 | #define load_idt(dtr) native_load_idt(dtr) | ||
87 | #define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr)) | ||
88 | #define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt)) | ||
89 | |||
90 | #define store_gdt(dtr) native_store_gdt(dtr) | ||
91 | #define store_idt(dtr) native_store_idt(dtr) | ||
92 | #define store_tr(tr) (tr = native_store_tr()) | ||
93 | #define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt)) | ||
94 | |||
95 | #define load_TLS(t, cpu) native_load_tls(t, cpu) | ||
96 | #define set_ldt native_set_ldt | ||
97 | |||
98 | #define write_ldt_entry(dt, entry, desc) \ | ||
99 | native_write_ldt_entry(dt, entry, desc) | ||
100 | #define write_gdt_entry(dt, entry, desc, type) \ | ||
101 | native_write_gdt_entry(dt, entry, desc, type) | ||
102 | #define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g) | ||
103 | #endif | ||
104 | |||
105 | static inline void native_write_idt_entry(gate_desc *idt, int entry, | ||
106 | const gate_desc *gate) | ||
107 | { | ||
108 | memcpy(&idt[entry], gate, sizeof(*gate)); | ||
109 | } | ||
110 | |||
111 | static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, | ||
112 | const void *desc) | ||
113 | { | ||
114 | memcpy(&ldt[entry], desc, 8); | ||
115 | } | ||
116 | |||
117 | static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry, | ||
118 | const void *desc, int type) | ||
119 | { | ||
120 | unsigned int size; | ||
121 | switch (type) { | ||
122 | case DESC_TSS: | ||
123 | size = sizeof(tss_desc); | ||
124 | break; | ||
125 | case DESC_LDT: | ||
126 | size = sizeof(ldt_desc); | ||
127 | break; | ||
128 | default: | ||
129 | size = sizeof(struct desc_struct); | ||
130 | break; | ||
131 | } | ||
132 | memcpy(&gdt[entry], desc, size); | ||
133 | } | ||
134 | |||
135 | static inline void pack_descriptor(struct desc_struct *desc, unsigned long base, | ||
136 | unsigned long limit, unsigned char type, | ||
137 | unsigned char flags) | ||
138 | { | ||
139 | desc->a = ((base & 0xffff) << 16) | (limit & 0xffff); | ||
140 | desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) | | ||
141 | (limit & 0x000f0000) | ((type & 0xff) << 8) | | ||
142 | ((flags & 0xf) << 20); | ||
143 | desc->p = 1; | ||
144 | } | ||
145 | |||
146 | |||
147 | static inline void set_tssldt_descriptor(void *d, unsigned long addr, | ||
148 | unsigned type, unsigned size) | ||
149 | { | ||
150 | #ifdef CONFIG_X86_64 | ||
151 | struct ldttss_desc64 *desc = d; | ||
152 | memset(desc, 0, sizeof(*desc)); | ||
153 | desc->limit0 = size & 0xFFFF; | ||
154 | desc->base0 = PTR_LOW(addr); | ||
155 | desc->base1 = PTR_MIDDLE(addr) & 0xFF; | ||
156 | desc->type = type; | ||
157 | desc->p = 1; | ||
158 | desc->limit1 = (size >> 16) & 0xF; | ||
159 | desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF; | ||
160 | desc->base3 = PTR_HIGH(addr); | ||
161 | #else | ||
162 | |||
163 | pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0); | ||
164 | #endif | ||
165 | } | ||
166 | |||
167 | static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr) | ||
168 | { | ||
169 | struct desc_struct *d = get_cpu_gdt_table(cpu); | ||
170 | tss_desc tss; | ||
171 | |||
172 | /* | ||
173 | * sizeof(unsigned long) coming from an extra "long" at the end | ||
174 | * of the iobitmap. See tss_struct definition in processor.h | ||
175 | * | ||
176 | * -1? seg base+limit should be pointing to the address of the | ||
177 | * last valid byte | ||
178 | */ | ||
179 | set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS, | ||
180 | IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1); | ||
181 | write_gdt_entry(d, entry, &tss, DESC_TSS); | ||
182 | } | ||
183 | |||
184 | #define set_tss_desc(cpu, addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) | ||
185 | |||
186 | static inline void native_set_ldt(const void *addr, unsigned int entries) | ||
187 | { | ||
188 | if (likely(entries == 0)) | ||
189 | __asm__ __volatile__("lldt %w0"::"q" (0)); | ||
190 | else { | ||
191 | unsigned cpu = smp_processor_id(); | ||
192 | ldt_desc ldt; | ||
193 | |||
194 | set_tssldt_descriptor(&ldt, (unsigned long)addr, | ||
195 | DESC_LDT, entries * sizeof(ldt) - 1); | ||
196 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, | ||
197 | &ldt, DESC_LDT); | ||
198 | __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)); | ||
199 | } | ||
200 | } | ||
201 | |||
202 | static inline void native_load_tr_desc(void) | ||
203 | { | ||
204 | asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); | ||
205 | } | ||
206 | |||
207 | static inline void native_load_gdt(const struct desc_ptr *dtr) | ||
208 | { | ||
209 | asm volatile("lgdt %0"::"m" (*dtr)); | ||
210 | } | ||
211 | |||
212 | static inline void native_load_idt(const struct desc_ptr *dtr) | ||
213 | { | ||
214 | asm volatile("lidt %0"::"m" (*dtr)); | ||
215 | } | ||
216 | |||
217 | static inline void native_store_gdt(struct desc_ptr *dtr) | ||
218 | { | ||
219 | asm volatile("sgdt %0":"=m" (*dtr)); | ||
220 | } | ||
221 | |||
222 | static inline void native_store_idt(struct desc_ptr *dtr) | ||
223 | { | ||
224 | asm volatile("sidt %0":"=m" (*dtr)); | ||
225 | } | ||
226 | |||
227 | static inline unsigned long native_store_tr(void) | ||
228 | { | ||
229 | unsigned long tr; | ||
230 | asm volatile("str %0":"=r" (tr)); | ||
231 | return tr; | ||
232 | } | ||
233 | |||
234 | static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) | ||
235 | { | ||
236 | unsigned int i; | ||
237 | struct desc_struct *gdt = get_cpu_gdt_table(cpu); | ||
238 | |||
239 | for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) | ||
240 | gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; | ||
241 | } | ||
242 | |||
243 | #define _LDT_empty(info) (\ | ||
244 | (info)->base_addr == 0 && \ | ||
245 | (info)->limit == 0 && \ | ||
246 | (info)->contents == 0 && \ | ||
247 | (info)->read_exec_only == 1 && \ | ||
248 | (info)->seg_32bit == 0 && \ | ||
249 | (info)->limit_in_pages == 0 && \ | ||
250 | (info)->seg_not_present == 1 && \ | ||
251 | (info)->useable == 0) | ||
252 | |||
253 | #ifdef CONFIG_X86_64 | ||
254 | #define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0)) | ||
255 | #else | ||
256 | #define LDT_empty(info) (_LDT_empty(info)) | ||
257 | #endif | ||
258 | |||
259 | static inline void clear_LDT(void) | ||
260 | { | ||
261 | set_ldt(NULL, 0); | ||
262 | } | ||
263 | |||
264 | /* | ||
265 | * load one particular LDT into the current CPU | ||
266 | */ | ||
267 | static inline void load_LDT_nolock(mm_context_t *pc) | ||
268 | { | ||
269 | set_ldt(pc->ldt, pc->size); | ||
270 | } | ||
271 | |||
272 | static inline void load_LDT(mm_context_t *pc) | ||
273 | { | ||
274 | preempt_disable(); | ||
275 | load_LDT_nolock(pc); | ||
276 | preempt_enable(); | ||
277 | } | ||
278 | |||
279 | static inline unsigned long get_desc_base(const struct desc_struct *desc) | ||
280 | { | ||
281 | return desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24); | ||
282 | } | ||
283 | |||
284 | static inline unsigned long get_desc_limit(const struct desc_struct *desc) | ||
285 | { | ||
286 | return desc->limit0 | (desc->limit << 16); | ||
287 | } | ||
288 | |||
289 | static inline void _set_gate(int gate, unsigned type, void *addr, | ||
290 | unsigned dpl, unsigned ist, unsigned seg) | ||
291 | { | ||
292 | gate_desc s; | ||
293 | pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg); | ||
294 | /* | ||
295 | * does not need to be atomic because it is only done once at | ||
296 | * setup time | ||
297 | */ | ||
298 | write_idt_entry(idt_table, gate, &s); | ||
299 | } | ||
300 | |||
301 | /* | ||
302 | * This needs to use 'idt_table' rather than 'idt', and | ||
303 | * thus use the _nonmapped_ version of the IDT, as the | ||
304 | * Pentium F0 0F bugfix can have resulted in the mapped | ||
305 | * IDT being write-protected. | ||
306 | */ | ||
307 | static inline void set_intr_gate(unsigned int n, void *addr) | ||
308 | { | ||
309 | BUG_ON((unsigned)n > 0xFF); | ||
310 | _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); | ||
311 | } | ||
312 | |||
313 | /* | ||
314 | * This routine sets up an interrupt gate at directory privilege level 3. | ||
315 | */ | ||
316 | static inline void set_system_intr_gate(unsigned int n, void *addr) | ||
317 | { | ||
318 | BUG_ON((unsigned)n > 0xFF); | ||
319 | _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS); | ||
320 | } | ||
321 | |||
322 | static inline void set_trap_gate(unsigned int n, void *addr) | ||
323 | { | ||
324 | BUG_ON((unsigned)n > 0xFF); | ||
325 | _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS); | ||
326 | } | ||
327 | |||
328 | static inline void set_system_gate(unsigned int n, void *addr) | ||
329 | { | ||
330 | BUG_ON((unsigned)n > 0xFF); | ||
1 | #ifdef CONFIG_X86_32 | 331 | #ifdef CONFIG_X86_32 |
2 | # include "desc_32.h" | 332 | _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS); |
333 | #else | ||
334 | _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS); | ||
335 | #endif | ||
336 | } | ||
337 | |||
338 | static inline void set_task_gate(unsigned int n, unsigned int gdt_entry) | ||
339 | { | ||
340 | BUG_ON((unsigned)n > 0xFF); | ||
341 | _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3)); | ||
342 | } | ||
343 | |||
344 | static inline void set_intr_gate_ist(int n, void *addr, unsigned ist) | ||
345 | { | ||
346 | BUG_ON((unsigned)n > 0xFF); | ||
347 | _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS); | ||
348 | } | ||
349 | |||
350 | static inline void set_system_gate_ist(int n, void *addr, unsigned ist) | ||
351 | { | ||
352 | BUG_ON((unsigned)n > 0xFF); | ||
353 | _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS); | ||
354 | } | ||
355 | |||
3 | #else | 356 | #else |
4 | # include "desc_64.h" | 357 | /* |
358 | * GET_DESC_BASE reads the descriptor base of the specified segment. | ||
359 | * | ||
360 | * Args: | ||
361 | * idx - descriptor index | ||
362 | * gdt - GDT pointer | ||
363 | * base - 32bit register to which the base will be written | ||
364 | * lo_w - lo word of the "base" register | ||
365 | * lo_b - lo byte of the "base" register | ||
366 | * hi_b - hi byte of the low word of the "base" register | ||
367 | * | ||
368 | * Example: | ||
369 | * GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah) | ||
370 | * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax. | ||
371 | */ | ||
372 | #define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \ | ||
373 | movb idx*8+4(gdt), lo_b; \ | ||
374 | movb idx*8+7(gdt), hi_b; \ | ||
375 | shll $16, base; \ | ||
376 | movw idx*8+2(gdt), lo_w; | ||
377 | |||
378 | |||
379 | #endif /* __ASSEMBLY__ */ | ||
380 | |||
5 | #endif | 381 | #endif |
diff --git a/include/asm-x86/desc_32.h b/include/asm-x86/desc_32.h deleted file mode 100644 index c547403f341d..000000000000 --- a/include/asm-x86/desc_32.h +++ /dev/null | |||
@@ -1,244 +0,0 @@ | |||
1 | #ifndef __ARCH_DESC_H | ||
2 | #define __ARCH_DESC_H | ||
3 | |||
4 | #include <asm/ldt.h> | ||
5 | #include <asm/segment.h> | ||
6 | |||
7 | #ifndef __ASSEMBLY__ | ||
8 | |||
9 | #include <linux/preempt.h> | ||
10 | #include <linux/smp.h> | ||
11 | #include <linux/percpu.h> | ||
12 | |||
13 | #include <asm/mmu.h> | ||
14 | |||
15 | struct Xgt_desc_struct { | ||
16 | unsigned short size; | ||
17 | unsigned long address __attribute__((packed)); | ||
18 | unsigned short pad; | ||
19 | } __attribute__ ((packed)); | ||
20 | |||
21 | struct gdt_page | ||
22 | { | ||
23 | struct desc_struct gdt[GDT_ENTRIES]; | ||
24 | } __attribute__((aligned(PAGE_SIZE))); | ||
25 | DECLARE_PER_CPU(struct gdt_page, gdt_page); | ||
26 | |||
27 | static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) | ||
28 | { | ||
29 | return per_cpu(gdt_page, cpu).gdt; | ||
30 | } | ||
31 | |||
32 | extern struct Xgt_desc_struct idt_descr; | ||
33 | extern struct desc_struct idt_table[]; | ||
34 | extern void set_intr_gate(unsigned int irq, void * addr); | ||
35 | |||
36 | static inline void pack_descriptor(__u32 *a, __u32 *b, | ||
37 | unsigned long base, unsigned long limit, unsigned char type, unsigned char flags) | ||
38 | { | ||
39 | *a = ((base & 0xffff) << 16) | (limit & 0xffff); | ||
40 | *b = (base & 0xff000000) | ((base & 0xff0000) >> 16) | | ||
41 | (limit & 0x000f0000) | ((type & 0xff) << 8) | ((flags & 0xf) << 20); | ||
42 | } | ||
43 | |||
44 | static inline void pack_gate(__u32 *a, __u32 *b, | ||
45 | unsigned long base, unsigned short seg, unsigned char type, unsigned char flags) | ||
46 | { | ||
47 | *a = (seg << 16) | (base & 0xffff); | ||
48 | *b = (base & 0xffff0000) | ((type & 0xff) << 8) | (flags & 0xff); | ||
49 | } | ||
50 | |||
51 | #define DESCTYPE_LDT 0x82 /* present, system, DPL-0, LDT */ | ||
52 | #define DESCTYPE_TSS 0x89 /* present, system, DPL-0, 32-bit TSS */ | ||
53 | #define DESCTYPE_TASK 0x85 /* present, system, DPL-0, task gate */ | ||
54 | #define DESCTYPE_INT 0x8e /* present, system, DPL-0, interrupt gate */ | ||
55 | #define DESCTYPE_TRAP 0x8f /* present, system, DPL-0, trap gate */ | ||
56 | #define DESCTYPE_DPL3 0x60 /* DPL-3 */ | ||
57 | #define DESCTYPE_S 0x10 /* !system */ | ||
58 | |||
59 | #ifdef CONFIG_PARAVIRT | ||
60 | #include <asm/paravirt.h> | ||
61 | #else | ||
62 | #define load_TR_desc() native_load_tr_desc() | ||
63 | #define load_gdt(dtr) native_load_gdt(dtr) | ||
64 | #define load_idt(dtr) native_load_idt(dtr) | ||
65 | #define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr)) | ||
66 | #define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt)) | ||
67 | |||
68 | #define store_gdt(dtr) native_store_gdt(dtr) | ||
69 | #define store_idt(dtr) native_store_idt(dtr) | ||
70 | #define store_tr(tr) (tr = native_store_tr()) | ||
71 | #define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt)) | ||
72 | |||
73 | #define load_TLS(t, cpu) native_load_tls(t, cpu) | ||
74 | #define set_ldt native_set_ldt | ||
75 | |||
76 | #define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) | ||
77 | #define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) | ||
78 | #define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) | ||
79 | #endif | ||
80 | |||
81 | static inline void write_dt_entry(struct desc_struct *dt, | ||
82 | int entry, u32 entry_low, u32 entry_high) | ||
83 | { | ||
84 | dt[entry].a = entry_low; | ||
85 | dt[entry].b = entry_high; | ||
86 | } | ||
87 | |||
88 | static inline void native_set_ldt(const void *addr, unsigned int entries) | ||
89 | { | ||
90 | if (likely(entries == 0)) | ||
91 | __asm__ __volatile__("lldt %w0"::"q" (0)); | ||
92 | else { | ||
93 | unsigned cpu = smp_processor_id(); | ||
94 | __u32 a, b; | ||
95 | |||
96 | pack_descriptor(&a, &b, (unsigned long)addr, | ||
97 | entries * sizeof(struct desc_struct) - 1, | ||
98 | DESCTYPE_LDT, 0); | ||
99 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b); | ||
100 | __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)); | ||
101 | } | ||
102 | } | ||
103 | |||
104 | |||
105 | static inline void native_load_tr_desc(void) | ||
106 | { | ||
107 | asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); | ||
108 | } | ||
109 | |||
110 | static inline void native_load_gdt(const struct Xgt_desc_struct *dtr) | ||
111 | { | ||
112 | asm volatile("lgdt %0"::"m" (*dtr)); | ||
113 | } | ||
114 | |||
115 | static inline void native_load_idt(const struct Xgt_desc_struct *dtr) | ||
116 | { | ||
117 | asm volatile("lidt %0"::"m" (*dtr)); | ||
118 | } | ||
119 | |||
120 | static inline void native_store_gdt(struct Xgt_desc_struct *dtr) | ||
121 | { | ||
122 | asm ("sgdt %0":"=m" (*dtr)); | ||
123 | } | ||
124 | |||
125 | static inline void native_store_idt(struct Xgt_desc_struct *dtr) | ||
126 | { | ||
127 | asm ("sidt %0":"=m" (*dtr)); | ||
128 | } | ||
129 | |||
130 | static inline unsigned long native_store_tr(void) | ||
131 | { | ||
132 | unsigned long tr; | ||
133 | asm ("str %0":"=r" (tr)); | ||
134 | return tr; | ||
135 | } | ||
136 | |||
137 | static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) | ||
138 | { | ||
139 | unsigned int i; | ||
140 | struct desc_struct *gdt = get_cpu_gdt_table(cpu); | ||
141 | |||
142 | for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) | ||
143 | gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; | ||
144 | } | ||
145 | |||
146 | static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg) | ||
147 | { | ||
148 | __u32 a, b; | ||
149 | pack_gate(&a, &b, (unsigned long)addr, seg, type, 0); | ||
150 | write_idt_entry(idt_table, gate, a, b); | ||
151 | } | ||
152 | |||
153 | static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const void *addr) | ||
154 | { | ||
155 | __u32 a, b; | ||
156 | pack_descriptor(&a, &b, (unsigned long)addr, | ||
157 | offsetof(struct tss_struct, __cacheline_filler) - 1, | ||
158 | DESCTYPE_TSS, 0); | ||
159 | write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b); | ||
160 | } | ||
161 | |||
162 | |||
163 | #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) | ||
164 | |||
165 | #define LDT_entry_a(info) \ | ||
166 | ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) | ||
167 | |||
168 | #define LDT_entry_b(info) \ | ||
169 | (((info)->base_addr & 0xff000000) | \ | ||
170 | (((info)->base_addr & 0x00ff0000) >> 16) | \ | ||
171 | ((info)->limit & 0xf0000) | \ | ||
172 | (((info)->read_exec_only ^ 1) << 9) | \ | ||
173 | ((info)->contents << 10) | \ | ||
174 | (((info)->seg_not_present ^ 1) << 15) | \ | ||
175 | ((info)->seg_32bit << 22) | \ | ||
176 | ((info)->limit_in_pages << 23) | \ | ||
177 | ((info)->useable << 20) | \ | ||
178 | 0x7000) | ||
179 | |||
180 | #define LDT_empty(info) (\ | ||
181 | (info)->base_addr == 0 && \ | ||
182 | (info)->limit == 0 && \ | ||
183 | (info)->contents == 0 && \ | ||
184 | (info)->read_exec_only == 1 && \ | ||
185 | (info)->seg_32bit == 0 && \ | ||
186 | (info)->limit_in_pages == 0 && \ | ||
187 | (info)->seg_not_present == 1 && \ | ||
188 | (info)->useable == 0 ) | ||
189 | |||
190 | static inline void clear_LDT(void) | ||
191 | { | ||
192 | set_ldt(NULL, 0); | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * load one particular LDT into the current CPU | ||
197 | */ | ||
198 | static inline void load_LDT_nolock(mm_context_t *pc) | ||
199 | { | ||
200 | set_ldt(pc->ldt, pc->size); | ||
201 | } | ||
202 | |||
203 | static inline void load_LDT(mm_context_t *pc) | ||
204 | { | ||
205 | preempt_disable(); | ||
206 | load_LDT_nolock(pc); | ||
207 | preempt_enable(); | ||
208 | } | ||
209 | |||
210 | static inline unsigned long get_desc_base(unsigned long *desc) | ||
211 | { | ||
212 | unsigned long base; | ||
213 | base = ((desc[0] >> 16) & 0x0000ffff) | | ||
214 | ((desc[1] << 16) & 0x00ff0000) | | ||
215 | (desc[1] & 0xff000000); | ||
216 | return base; | ||
217 | } | ||
218 | |||
219 | #else /* __ASSEMBLY__ */ | ||
220 | |||
221 | /* | ||
222 | * GET_DESC_BASE reads the descriptor base of the specified segment. | ||
223 | * | ||
224 | * Args: | ||
225 | * idx - descriptor index | ||
226 | * gdt - GDT pointer | ||
227 | * base - 32bit register to which the base will be written | ||
228 | * lo_w - lo word of the "base" register | ||
229 | * lo_b - lo byte of the "base" register | ||
230 | * hi_b - hi byte of the low word of the "base" register | ||
231 | * | ||
232 | * Example: | ||
233 | * GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah) | ||
234 | * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax. | ||
235 | */ | ||
236 | #define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \ | ||
237 | movb idx*8+4(gdt), lo_b; \ | ||
238 | movb idx*8+7(gdt), hi_b; \ | ||
239 | shll $16, base; \ | ||
240 | movw idx*8+2(gdt), lo_w; | ||
241 | |||
242 | #endif /* !__ASSEMBLY__ */ | ||
243 | |||
244 | #endif | ||
diff --git a/include/asm-x86/desc_64.h b/include/asm-x86/desc_64.h index 7d9c938e69fd..8b137891791f 100644 --- a/include/asm-x86/desc_64.h +++ b/include/asm-x86/desc_64.h | |||
@@ -1,204 +1 @@ | |||
1 | /* Written 2000 by Andi Kleen */ | ||
2 | #ifndef __ARCH_DESC_H | ||
3 | #define __ARCH_DESC_H | ||
4 | |||
5 | #include <linux/threads.h> | ||
6 | #include <asm/ldt.h> | ||
7 | |||
8 | #ifndef __ASSEMBLY__ | ||
9 | |||
10 | #include <linux/string.h> | ||
11 | #include <linux/smp.h> | ||
12 | #include <asm/desc_defs.h> | ||
13 | |||
14 | #include <asm/segment.h> | ||
15 | #include <asm/mmu.h> | ||
16 | |||
17 | extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; | ||
18 | |||
19 | #define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8)) | ||
20 | #define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8)) | ||
21 | #define clear_LDT() asm volatile("lldt %w0"::"r" (0)) | ||
22 | |||
23 | static inline unsigned long __store_tr(void) | ||
24 | { | ||
25 | unsigned long tr; | ||
26 | |||
27 | asm volatile ("str %w0":"=r" (tr)); | ||
28 | return tr; | ||
29 | } | ||
30 | |||
31 | #define store_tr(tr) (tr) = __store_tr() | ||
32 | |||
33 | /* | ||
34 | * This is the ldt that every process will get unless we need | ||
35 | * something other than this. | ||
36 | */ | ||
37 | extern struct desc_struct default_ldt[]; | ||
38 | extern struct gate_struct idt_table[]; | ||
39 | extern struct desc_ptr cpu_gdt_descr[]; | ||
40 | |||
41 | /* the cpu gdt accessor */ | ||
42 | #define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address) | ||
43 | |||
44 | static inline void load_gdt(const struct desc_ptr *ptr) | ||
45 | { | ||
46 | asm volatile("lgdt %w0"::"m" (*ptr)); | ||
47 | } | ||
48 | |||
49 | static inline void store_gdt(struct desc_ptr *ptr) | ||
50 | { | ||
51 | asm("sgdt %w0":"=m" (*ptr)); | ||
52 | } | ||
53 | |||
54 | static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist) | ||
55 | { | ||
56 | struct gate_struct s; | ||
57 | s.offset_low = PTR_LOW(func); | ||
58 | s.segment = __KERNEL_CS; | ||
59 | s.ist = ist; | ||
60 | s.p = 1; | ||
61 | s.dpl = dpl; | ||
62 | s.zero0 = 0; | ||
63 | s.zero1 = 0; | ||
64 | s.type = type; | ||
65 | s.offset_middle = PTR_MIDDLE(func); | ||
66 | s.offset_high = PTR_HIGH(func); | ||
67 | /* does not need to be atomic because it is only done once at setup time */ | ||
68 | memcpy(adr, &s, 16); | ||
69 | } | ||
70 | |||
71 | static inline void set_intr_gate(int nr, void *func) | ||
72 | { | ||
73 | BUG_ON((unsigned)nr > 0xFF); | ||
74 | _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0); | ||
75 | } | ||
76 | |||
77 | static inline void set_intr_gate_ist(int nr, void *func, unsigned ist) | ||
78 | { | ||
79 | BUG_ON((unsigned)nr > 0xFF); | ||
80 | _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist); | ||
81 | } | ||
82 | |||
83 | static inline void set_system_gate(int nr, void *func) | ||
84 | { | ||
85 | BUG_ON((unsigned)nr > 0xFF); | ||
86 | _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0); | ||
87 | } | ||
88 | |||
89 | static inline void set_system_gate_ist(int nr, void *func, unsigned ist) | ||
90 | { | ||
91 | _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist); | ||
92 | } | ||
93 | |||
94 | static inline void load_idt(const struct desc_ptr *ptr) | ||
95 | { | ||
96 | asm volatile("lidt %w0"::"m" (*ptr)); | ||
97 | } | ||
98 | |||
99 | static inline void store_idt(struct desc_ptr *dtr) | ||
100 | { | ||
101 | asm("sidt %w0":"=m" (*dtr)); | ||
102 | } | ||
103 | |||
104 | static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type, | ||
105 | unsigned size) | ||
106 | { | ||
107 | struct ldttss_desc d; | ||
108 | memset(&d,0,sizeof(d)); | ||
109 | d.limit0 = size & 0xFFFF; | ||
110 | d.base0 = PTR_LOW(tss); | ||
111 | d.base1 = PTR_MIDDLE(tss) & 0xFF; | ||
112 | d.type = type; | ||
113 | d.p = 1; | ||
114 | d.limit1 = (size >> 16) & 0xF; | ||
115 | d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF; | ||
116 | d.base3 = PTR_HIGH(tss); | ||
117 | memcpy(ptr, &d, 16); | ||
118 | } | ||
119 | |||
120 | static inline void set_tss_desc(unsigned cpu, void *addr) | ||
121 | { | ||
122 | /* | ||
123 | * sizeof(unsigned long) coming from an extra "long" at the end | ||
124 | * of the iobitmap. See tss_struct definition in processor.h | ||
125 | * | ||
126 | * -1? seg base+limit should be pointing to the address of the | ||
127 | * last valid byte | ||
128 | */ | ||
129 | set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_TSS], | ||
130 | (unsigned long)addr, DESC_TSS, | ||
131 | IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1); | ||
132 | } | ||
133 | |||
134 | static inline void set_ldt_desc(unsigned cpu, void *addr, int size) | ||
135 | { | ||
136 | set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_LDT], (unsigned long)addr, | ||
137 | DESC_LDT, size * 8 - 1); | ||
138 | } | ||
139 | |||
140 | #define LDT_entry_a(info) \ | ||
141 | ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) | ||
142 | /* Don't allow setting of the lm bit. It is useless anyways because | ||
143 | 64bit system calls require __USER_CS. */ | ||
144 | #define LDT_entry_b(info) \ | ||
145 | (((info)->base_addr & 0xff000000) | \ | ||
146 | (((info)->base_addr & 0x00ff0000) >> 16) | \ | ||
147 | ((info)->limit & 0xf0000) | \ | ||
148 | (((info)->read_exec_only ^ 1) << 9) | \ | ||
149 | ((info)->contents << 10) | \ | ||
150 | (((info)->seg_not_present ^ 1) << 15) | \ | ||
151 | ((info)->seg_32bit << 22) | \ | ||
152 | ((info)->limit_in_pages << 23) | \ | ||
153 | ((info)->useable << 20) | \ | ||
154 | /* ((info)->lm << 21) | */ \ | ||
155 | 0x7000) | ||
156 | |||
157 | #define LDT_empty(info) (\ | ||
158 | (info)->base_addr == 0 && \ | ||
159 | (info)->limit == 0 && \ | ||
160 | (info)->contents == 0 && \ | ||
161 | (info)->read_exec_only == 1 && \ | ||
162 | (info)->seg_32bit == 0 && \ | ||
163 | (info)->limit_in_pages == 0 && \ | ||
164 | (info)->seg_not_present == 1 && \ | ||
165 | (info)->useable == 0 && \ | ||
166 | (info)->lm == 0) | ||
167 | |||
168 | static inline void load_TLS(struct thread_struct *t, unsigned int cpu) | ||
169 | { | ||
170 | unsigned int i; | ||
171 | u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN); | ||
172 | |||
173 | for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) | ||
174 | gdt[i] = t->tls_array[i]; | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * load one particular LDT into the current CPU | ||
179 | */ | ||
180 | static inline void load_LDT_nolock (mm_context_t *pc, int cpu) | ||
181 | { | ||
182 | int count = pc->size; | ||
183 | |||
184 | if (likely(!count)) { | ||
185 | clear_LDT(); | ||
186 | return; | ||
187 | } | ||
188 | |||
189 | set_ldt_desc(cpu, pc->ldt, count); | ||
190 | load_LDT_desc(); | ||
191 | } | ||
192 | |||
193 | static inline void load_LDT(mm_context_t *pc) | ||
194 | { | ||
195 | int cpu = get_cpu(); | ||
196 | load_LDT_nolock(pc, cpu); | ||
197 | put_cpu(); | ||
198 | } | ||
199 | |||
200 | extern struct desc_ptr idt_descr; | ||
201 | |||
202 | #endif /* !__ASSEMBLY__ */ | ||
203 | |||
204 | #endif | ||
diff --git a/include/asm-x86/desc_defs.h b/include/asm-x86/desc_defs.h index 089004070099..e33f078b3e54 100644 --- a/include/asm-x86/desc_defs.h +++ b/include/asm-x86/desc_defs.h | |||
@@ -11,26 +11,36 @@ | |||
11 | 11 | ||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | 13 | ||
14 | /* | ||
15 | * FIXME: Acessing the desc_struct through its fields is more elegant, | ||
16 | * and should be the one valid thing to do. However, a lot of open code | ||
17 | * still touches the a and b acessors, and doing this allow us to do it | ||
18 | * incrementally. We keep the signature as a struct, rather than an union, | ||
19 | * so we can get rid of it transparently in the future -- glommer | ||
20 | */ | ||
14 | // 8 byte segment descriptor | 21 | // 8 byte segment descriptor |
15 | struct desc_struct { | 22 | struct desc_struct { |
16 | u16 limit0; | 23 | union { |
17 | u16 base0; | 24 | struct { unsigned int a, b; }; |
18 | unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1; | 25 | struct { |
19 | unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8; | 26 | u16 limit0; |
20 | } __attribute__((packed)); | 27 | u16 base0; |
28 | unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1; | ||
29 | unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; | ||
30 | }; | ||
21 | 31 | ||
22 | struct n_desc_struct { | 32 | }; |
23 | unsigned int a,b; | 33 | } __attribute__((packed)); |
24 | }; | ||
25 | 34 | ||
26 | enum { | 35 | enum { |
27 | GATE_INTERRUPT = 0xE, | 36 | GATE_INTERRUPT = 0xE, |
28 | GATE_TRAP = 0xF, | 37 | GATE_TRAP = 0xF, |
29 | GATE_CALL = 0xC, | 38 | GATE_CALL = 0xC, |
39 | GATE_TASK = 0x5, | ||
30 | }; | 40 | }; |
31 | 41 | ||
32 | // 16byte gate | 42 | // 16byte gate |
33 | struct gate_struct { | 43 | struct gate_struct64 { |
34 | u16 offset_low; | 44 | u16 offset_low; |
35 | u16 segment; | 45 | u16 segment; |
36 | unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1; | 46 | unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1; |
@@ -39,17 +49,18 @@ struct gate_struct { | |||
39 | u32 zero1; | 49 | u32 zero1; |
40 | } __attribute__((packed)); | 50 | } __attribute__((packed)); |
41 | 51 | ||
42 | #define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF) | 52 | #define PTR_LOW(x) ((unsigned long long)(x) & 0xFFFF) |
43 | #define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF) | 53 | #define PTR_MIDDLE(x) (((unsigned long long)(x) >> 16) & 0xFFFF) |
44 | #define PTR_HIGH(x) ((unsigned long)(x) >> 32) | 54 | #define PTR_HIGH(x) ((unsigned long long)(x) >> 32) |
45 | 55 | ||
46 | enum { | 56 | enum { |
47 | DESC_TSS = 0x9, | 57 | DESC_TSS = 0x9, |
48 | DESC_LDT = 0x2, | 58 | DESC_LDT = 0x2, |
59 | DESCTYPE_S = 0x10, /* !system */ | ||
49 | }; | 60 | }; |
50 | 61 | ||
51 | // LDT or TSS descriptor in the GDT. 16 bytes. | 62 | // LDT or TSS descriptor in the GDT. 16 bytes. |
52 | struct ldttss_desc { | 63 | struct ldttss_desc64 { |
53 | u16 limit0; | 64 | u16 limit0; |
54 | u16 base0; | 65 | u16 base0; |
55 | unsigned base1 : 8, type : 5, dpl : 2, p : 1; | 66 | unsigned base1 : 8, type : 5, dpl : 2, p : 1; |
@@ -58,6 +69,16 @@ struct ldttss_desc { | |||
58 | u32 zero1; | 69 | u32 zero1; |
59 | } __attribute__((packed)); | 70 | } __attribute__((packed)); |
60 | 71 | ||
72 | #ifdef CONFIG_X86_64 | ||
73 | typedef struct gate_struct64 gate_desc; | ||
74 | typedef struct ldttss_desc64 ldt_desc; | ||
75 | typedef struct ldttss_desc64 tss_desc; | ||
76 | #else | ||
77 | typedef struct desc_struct gate_desc; | ||
78 | typedef struct desc_struct ldt_desc; | ||
79 | typedef struct desc_struct tss_desc; | ||
80 | #endif | ||
81 | |||
61 | struct desc_ptr { | 82 | struct desc_ptr { |
62 | unsigned short size; | 83 | unsigned short size; |
63 | unsigned long address; | 84 | unsigned long address; |
diff --git a/include/asm-x86/dma.h b/include/asm-x86/dma.h index 9f936c61a4e5..e9733ce89880 100644 --- a/include/asm-x86/dma.h +++ b/include/asm-x86/dma.h | |||
@@ -1,5 +1,319 @@ | |||
1 | /* | ||
2 | * linux/include/asm/dma.h: Defines for using and allocating dma channels. | ||
3 | * Written by Hennus Bergman, 1992. | ||
4 | * High DMA channel support & info by Hannu Savolainen | ||
5 | * and John Boyd, Nov. 1992. | ||
6 | */ | ||
7 | |||
8 | #ifndef _ASM_X86_DMA_H | ||
9 | #define _ASM_X86_DMA_H | ||
10 | |||
11 | #include <linux/spinlock.h> /* And spinlocks */ | ||
12 | #include <asm/io.h> /* need byte IO */ | ||
13 | #include <linux/delay.h> | ||
14 | |||
15 | |||
16 | #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER | ||
17 | #define dma_outb outb_p | ||
18 | #else | ||
19 | #define dma_outb outb | ||
20 | #endif | ||
21 | |||
22 | #define dma_inb inb | ||
23 | |||
24 | /* | ||
25 | * NOTES about DMA transfers: | ||
26 | * | ||
27 | * controller 1: channels 0-3, byte operations, ports 00-1F | ||
28 | * controller 2: channels 4-7, word operations, ports C0-DF | ||
29 | * | ||
30 | * - ALL registers are 8 bits only, regardless of transfer size | ||
31 | * - channel 4 is not used - cascades 1 into 2. | ||
32 | * - channels 0-3 are byte - addresses/counts are for physical bytes | ||
33 | * - channels 5-7 are word - addresses/counts are for physical words | ||
34 | * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries | ||
35 | * - transfer count loaded to registers is 1 less than actual count | ||
36 | * - controller 2 offsets are all even (2x offsets for controller 1) | ||
37 | * - page registers for 5-7 don't use data bit 0, represent 128K pages | ||
38 | * - page registers for 0-3 use bit 0, represent 64K pages | ||
39 | * | ||
40 | * DMA transfers are limited to the lower 16MB of _physical_ memory. | ||
41 | * Note that addresses loaded into registers must be _physical_ addresses, | ||
42 | * not logical addresses (which may differ if paging is active). | ||
43 | * | ||
44 | * Address mapping for channels 0-3: | ||
45 | * | ||
46 | * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) | ||
47 | * | ... | | ... | | ... | | ||
48 | * | ... | | ... | | ... | | ||
49 | * | ... | | ... | | ... | | ||
50 | * P7 ... P0 A7 ... A0 A7 ... A0 | ||
51 | * | Page | Addr MSB | Addr LSB | (DMA registers) | ||
52 | * | ||
53 | * Address mapping for channels 5-7: | ||
54 | * | ||
55 | * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) | ||
56 | * | ... | \ \ ... \ \ \ ... \ \ | ||
57 | * | ... | \ \ ... \ \ \ ... \ (not used) | ||
58 | * | ... | \ \ ... \ \ \ ... \ | ||
59 | * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 | ||
60 | * | Page | Addr MSB | Addr LSB | (DMA registers) | ||
61 | * | ||
62 | * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses | ||
63 | * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at | ||
64 | * the hardware level, so odd-byte transfers aren't possible). | ||
65 | * | ||
66 | * Transfer count (_not # bytes_) is limited to 64K, represented as actual | ||
67 | * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, | ||
68 | * and up to 128K bytes may be transferred on channels 5-7 in one operation. | ||
69 | * | ||
70 | */ | ||
71 | |||
72 | #define MAX_DMA_CHANNELS 8 | ||
73 | |||
1 | #ifdef CONFIG_X86_32 | 74 | #ifdef CONFIG_X86_32 |
2 | # include "dma_32.h" | 75 | |
76 | /* The maximum address that we can perform a DMA transfer to on this platform */ | ||
77 | #define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000) | ||
78 | |||
79 | #else | ||
80 | |||
81 | /* 16MB ISA DMA zone */ | ||
82 | #define MAX_DMA_PFN ((16*1024*1024) >> PAGE_SHIFT) | ||
83 | |||
84 | /* 4GB broken PCI/AGP hardware bus master zone */ | ||
85 | #define MAX_DMA32_PFN ((4UL*1024*1024*1024) >> PAGE_SHIFT) | ||
86 | |||
87 | /* Compat define for old dma zone */ | ||
88 | #define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT)) | ||
89 | |||
90 | #endif | ||
91 | |||
92 | /* 8237 DMA controllers */ | ||
93 | #define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ | ||
94 | #define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ | ||
95 | |||
96 | /* DMA controller registers */ | ||
97 | #define DMA1_CMD_REG 0x08 /* command register (w) */ | ||
98 | #define DMA1_STAT_REG 0x08 /* status register (r) */ | ||
99 | #define DMA1_REQ_REG 0x09 /* request register (w) */ | ||
100 | #define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ | ||
101 | #define DMA1_MODE_REG 0x0B /* mode register (w) */ | ||
102 | #define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ | ||
103 | #define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ | ||
104 | #define DMA1_RESET_REG 0x0D /* Master Clear (w) */ | ||
105 | #define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ | ||
106 | #define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ | ||
107 | |||
108 | #define DMA2_CMD_REG 0xD0 /* command register (w) */ | ||
109 | #define DMA2_STAT_REG 0xD0 /* status register (r) */ | ||
110 | #define DMA2_REQ_REG 0xD2 /* request register (w) */ | ||
111 | #define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ | ||
112 | #define DMA2_MODE_REG 0xD6 /* mode register (w) */ | ||
113 | #define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ | ||
114 | #define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ | ||
115 | #define DMA2_RESET_REG 0xDA /* Master Clear (w) */ | ||
116 | #define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ | ||
117 | #define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ | ||
118 | |||
119 | #define DMA_ADDR_0 0x00 /* DMA address registers */ | ||
120 | #define DMA_ADDR_1 0x02 | ||
121 | #define DMA_ADDR_2 0x04 | ||
122 | #define DMA_ADDR_3 0x06 | ||
123 | #define DMA_ADDR_4 0xC0 | ||
124 | #define DMA_ADDR_5 0xC4 | ||
125 | #define DMA_ADDR_6 0xC8 | ||
126 | #define DMA_ADDR_7 0xCC | ||
127 | |||
128 | #define DMA_CNT_0 0x01 /* DMA count registers */ | ||
129 | #define DMA_CNT_1 0x03 | ||
130 | #define DMA_CNT_2 0x05 | ||
131 | #define DMA_CNT_3 0x07 | ||
132 | #define DMA_CNT_4 0xC2 | ||
133 | #define DMA_CNT_5 0xC6 | ||
134 | #define DMA_CNT_6 0xCA | ||
135 | #define DMA_CNT_7 0xCE | ||
136 | |||
137 | #define DMA_PAGE_0 0x87 /* DMA page registers */ | ||
138 | #define DMA_PAGE_1 0x83 | ||
139 | #define DMA_PAGE_2 0x81 | ||
140 | #define DMA_PAGE_3 0x82 | ||
141 | #define DMA_PAGE_5 0x8B | ||
142 | #define DMA_PAGE_6 0x89 | ||
143 | #define DMA_PAGE_7 0x8A | ||
144 | |||
145 | /* I/O to memory, no autoinit, increment, single mode */ | ||
146 | #define DMA_MODE_READ 0x44 | ||
147 | /* memory to I/O, no autoinit, increment, single mode */ | ||
148 | #define DMA_MODE_WRITE 0x48 | ||
149 | /* pass thru DREQ->HRQ, DACK<-HLDA only */ | ||
150 | #define DMA_MODE_CASCADE 0xC0 | ||
151 | |||
152 | #define DMA_AUTOINIT 0x10 | ||
153 | |||
154 | |||
155 | extern spinlock_t dma_spin_lock; | ||
156 | |||
157 | static __inline__ unsigned long claim_dma_lock(void) | ||
158 | { | ||
159 | unsigned long flags; | ||
160 | spin_lock_irqsave(&dma_spin_lock, flags); | ||
161 | return flags; | ||
162 | } | ||
163 | |||
164 | static __inline__ void release_dma_lock(unsigned long flags) | ||
165 | { | ||
166 | spin_unlock_irqrestore(&dma_spin_lock, flags); | ||
167 | } | ||
168 | |||
169 | /* enable/disable a specific DMA channel */ | ||
170 | static __inline__ void enable_dma(unsigned int dmanr) | ||
171 | { | ||
172 | if (dmanr <= 3) | ||
173 | dma_outb(dmanr, DMA1_MASK_REG); | ||
174 | else | ||
175 | dma_outb(dmanr & 3, DMA2_MASK_REG); | ||
176 | } | ||
177 | |||
178 | static __inline__ void disable_dma(unsigned int dmanr) | ||
179 | { | ||
180 | if (dmanr <= 3) | ||
181 | dma_outb(dmanr | 4, DMA1_MASK_REG); | ||
182 | else | ||
183 | dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); | ||
184 | } | ||
185 | |||
186 | /* Clear the 'DMA Pointer Flip Flop'. | ||
187 | * Write 0 for LSB/MSB, 1 for MSB/LSB access. | ||
188 | * Use this once to initialize the FF to a known state. | ||
189 | * After that, keep track of it. :-) | ||
190 | * --- In order to do that, the DMA routines below should --- | ||
191 | * --- only be used while holding the DMA lock ! --- | ||
192 | */ | ||
193 | static __inline__ void clear_dma_ff(unsigned int dmanr) | ||
194 | { | ||
195 | if (dmanr <= 3) | ||
196 | dma_outb(0, DMA1_CLEAR_FF_REG); | ||
197 | else | ||
198 | dma_outb(0, DMA2_CLEAR_FF_REG); | ||
199 | } | ||
200 | |||
201 | /* set mode (above) for a specific DMA channel */ | ||
202 | static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | ||
203 | { | ||
204 | if (dmanr <= 3) | ||
205 | dma_outb(mode | dmanr, DMA1_MODE_REG); | ||
206 | else | ||
207 | dma_outb(mode | (dmanr & 3), DMA2_MODE_REG); | ||
208 | } | ||
209 | |||
210 | /* Set only the page register bits of the transfer address. | ||
211 | * This is used for successive transfers when we know the contents of | ||
212 | * the lower 16 bits of the DMA current address register, but a 64k boundary | ||
213 | * may have been crossed. | ||
214 | */ | ||
215 | static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) | ||
216 | { | ||
217 | switch (dmanr) { | ||
218 | case 0: | ||
219 | dma_outb(pagenr, DMA_PAGE_0); | ||
220 | break; | ||
221 | case 1: | ||
222 | dma_outb(pagenr, DMA_PAGE_1); | ||
223 | break; | ||
224 | case 2: | ||
225 | dma_outb(pagenr, DMA_PAGE_2); | ||
226 | break; | ||
227 | case 3: | ||
228 | dma_outb(pagenr, DMA_PAGE_3); | ||
229 | break; | ||
230 | case 5: | ||
231 | dma_outb(pagenr & 0xfe, DMA_PAGE_5); | ||
232 | break; | ||
233 | case 6: | ||
234 | dma_outb(pagenr & 0xfe, DMA_PAGE_6); | ||
235 | break; | ||
236 | case 7: | ||
237 | dma_outb(pagenr & 0xfe, DMA_PAGE_7); | ||
238 | break; | ||
239 | } | ||
240 | } | ||
241 | |||
242 | |||
243 | /* Set transfer address & page bits for specific DMA channel. | ||
244 | * Assumes dma flipflop is clear. | ||
245 | */ | ||
246 | static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) | ||
247 | { | ||
248 | set_dma_page(dmanr, a>>16); | ||
249 | if (dmanr <= 3) { | ||
250 | dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); | ||
251 | dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); | ||
252 | } else { | ||
253 | dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); | ||
254 | dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); | ||
255 | } | ||
256 | } | ||
257 | |||
258 | |||
259 | /* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for | ||
260 | * a specific DMA channel. | ||
261 | * You must ensure the parameters are valid. | ||
262 | * NOTE: from a manual: "the number of transfers is one more | ||
263 | * than the initial word count"! This is taken into account. | ||
264 | * Assumes dma flip-flop is clear. | ||
265 | * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. | ||
266 | */ | ||
267 | static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | ||
268 | { | ||
269 | count--; | ||
270 | if (dmanr <= 3) { | ||
271 | dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); | ||
272 | dma_outb((count >> 8) & 0xff, | ||
273 | ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); | ||
274 | } else { | ||
275 | dma_outb((count >> 1) & 0xff, | ||
276 | ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); | ||
277 | dma_outb((count >> 9) & 0xff, | ||
278 | ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); | ||
279 | } | ||
280 | } | ||
281 | |||
282 | |||
283 | /* Get DMA residue count. After a DMA transfer, this | ||
284 | * should return zero. Reading this while a DMA transfer is | ||
285 | * still in progress will return unpredictable results. | ||
286 | * If called before the channel has been used, it may return 1. | ||
287 | * Otherwise, it returns the number of _bytes_ left to transfer. | ||
288 | * | ||
289 | * Assumes DMA flip-flop is clear. | ||
290 | */ | ||
291 | static __inline__ int get_dma_residue(unsigned int dmanr) | ||
292 | { | ||
293 | unsigned int io_port; | ||
294 | /* using short to get 16-bit wrap around */ | ||
295 | unsigned short count; | ||
296 | |||
297 | io_port = (dmanr <= 3) ? ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE | ||
298 | : ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE; | ||
299 | |||
300 | count = 1 + dma_inb(io_port); | ||
301 | count += dma_inb(io_port) << 8; | ||
302 | |||
303 | return (dmanr <= 3) ? count : (count << 1); | ||
304 | } | ||
305 | |||
306 | |||
307 | /* These are in kernel/dma.c: */ | ||
308 | extern int request_dma(unsigned int dmanr, const char *device_id); | ||
309 | extern void free_dma(unsigned int dmanr); | ||
310 | |||
311 | /* From PCI */ | ||
312 | |||
313 | #ifdef CONFIG_PCI | ||
314 | extern int isa_dma_bridge_buggy; | ||
3 | #else | 315 | #else |
4 | # include "dma_64.h" | 316 | #define isa_dma_bridge_buggy (0) |
5 | #endif | 317 | #endif |
318 | |||
319 | #endif /* _ASM_X86_DMA_H */ | ||
diff --git a/include/asm-x86/dma_32.h b/include/asm-x86/dma_32.h deleted file mode 100644 index d23aac8e1a50..000000000000 --- a/include/asm-x86/dma_32.h +++ /dev/null | |||
@@ -1,297 +0,0 @@ | |||
1 | /* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $ | ||
2 | * linux/include/asm/dma.h: Defines for using and allocating dma channels. | ||
3 | * Written by Hennus Bergman, 1992. | ||
4 | * High DMA channel support & info by Hannu Savolainen | ||
5 | * and John Boyd, Nov. 1992. | ||
6 | */ | ||
7 | |||
8 | #ifndef _ASM_DMA_H | ||
9 | #define _ASM_DMA_H | ||
10 | |||
11 | #include <linux/spinlock.h> /* And spinlocks */ | ||
12 | #include <asm/io.h> /* need byte IO */ | ||
13 | #include <linux/delay.h> | ||
14 | |||
15 | |||
16 | #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER | ||
17 | #define dma_outb outb_p | ||
18 | #else | ||
19 | #define dma_outb outb | ||
20 | #endif | ||
21 | |||
22 | #define dma_inb inb | ||
23 | |||
24 | /* | ||
25 | * NOTES about DMA transfers: | ||
26 | * | ||
27 | * controller 1: channels 0-3, byte operations, ports 00-1F | ||
28 | * controller 2: channels 4-7, word operations, ports C0-DF | ||
29 | * | ||
30 | * - ALL registers are 8 bits only, regardless of transfer size | ||
31 | * - channel 4 is not used - cascades 1 into 2. | ||
32 | * - channels 0-3 are byte - addresses/counts are for physical bytes | ||
33 | * - channels 5-7 are word - addresses/counts are for physical words | ||
34 | * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries | ||
35 | * - transfer count loaded to registers is 1 less than actual count | ||
36 | * - controller 2 offsets are all even (2x offsets for controller 1) | ||
37 | * - page registers for 5-7 don't use data bit 0, represent 128K pages | ||
38 | * - page registers for 0-3 use bit 0, represent 64K pages | ||
39 | * | ||
40 | * DMA transfers are limited to the lower 16MB of _physical_ memory. | ||
41 | * Note that addresses loaded into registers must be _physical_ addresses, | ||
42 | * not logical addresses (which may differ if paging is active). | ||
43 | * | ||
44 | * Address mapping for channels 0-3: | ||
45 | * | ||
46 | * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) | ||
47 | * | ... | | ... | | ... | | ||
48 | * | ... | | ... | | ... | | ||
49 | * | ... | | ... | | ... | | ||
50 | * P7 ... P0 A7 ... A0 A7 ... A0 | ||
51 | * | Page | Addr MSB | Addr LSB | (DMA registers) | ||
52 | * | ||
53 | * Address mapping for channels 5-7: | ||
54 | * | ||
55 | * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) | ||
56 | * | ... | \ \ ... \ \ \ ... \ \ | ||
57 | * | ... | \ \ ... \ \ \ ... \ (not used) | ||
58 | * | ... | \ \ ... \ \ \ ... \ | ||
59 | * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 | ||
60 | * | Page | Addr MSB | Addr LSB | (DMA registers) | ||
61 | * | ||
62 | * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses | ||
63 | * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at | ||
64 | * the hardware level, so odd-byte transfers aren't possible). | ||
65 | * | ||
66 | * Transfer count (_not # bytes_) is limited to 64K, represented as actual | ||
67 | * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, | ||
68 | * and up to 128K bytes may be transferred on channels 5-7 in one operation. | ||
69 | * | ||
70 | */ | ||
71 | |||
72 | #define MAX_DMA_CHANNELS 8 | ||
73 | |||
74 | /* The maximum address that we can perform a DMA transfer to on this platform */ | ||
75 | #define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000) | ||
76 | |||
77 | /* 8237 DMA controllers */ | ||
78 | #define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ | ||
79 | #define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ | ||
80 | |||
81 | /* DMA controller registers */ | ||
82 | #define DMA1_CMD_REG 0x08 /* command register (w) */ | ||
83 | #define DMA1_STAT_REG 0x08 /* status register (r) */ | ||
84 | #define DMA1_REQ_REG 0x09 /* request register (w) */ | ||
85 | #define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ | ||
86 | #define DMA1_MODE_REG 0x0B /* mode register (w) */ | ||
87 | #define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ | ||
88 | #define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ | ||
89 | #define DMA1_RESET_REG 0x0D /* Master Clear (w) */ | ||
90 | #define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ | ||
91 | #define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ | ||
92 | |||
93 | #define DMA2_CMD_REG 0xD0 /* command register (w) */ | ||
94 | #define DMA2_STAT_REG 0xD0 /* status register (r) */ | ||
95 | #define DMA2_REQ_REG 0xD2 /* request register (w) */ | ||
96 | #define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ | ||
97 | #define DMA2_MODE_REG 0xD6 /* mode register (w) */ | ||
98 | #define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ | ||
99 | #define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ | ||
100 | #define DMA2_RESET_REG 0xDA /* Master Clear (w) */ | ||
101 | #define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ | ||
102 | #define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ | ||
103 | |||
104 | #define DMA_ADDR_0 0x00 /* DMA address registers */ | ||
105 | #define DMA_ADDR_1 0x02 | ||
106 | #define DMA_ADDR_2 0x04 | ||
107 | #define DMA_ADDR_3 0x06 | ||
108 | #define DMA_ADDR_4 0xC0 | ||
109 | #define DMA_ADDR_5 0xC4 | ||
110 | #define DMA_ADDR_6 0xC8 | ||
111 | #define DMA_ADDR_7 0xCC | ||
112 | |||
113 | #define DMA_CNT_0 0x01 /* DMA count registers */ | ||
114 | #define DMA_CNT_1 0x03 | ||
115 | #define DMA_CNT_2 0x05 | ||
116 | #define DMA_CNT_3 0x07 | ||
117 | #define DMA_CNT_4 0xC2 | ||
118 | #define DMA_CNT_5 0xC6 | ||
119 | #define DMA_CNT_6 0xCA | ||
120 | #define DMA_CNT_7 0xCE | ||
121 | |||
122 | #define DMA_PAGE_0 0x87 /* DMA page registers */ | ||
123 | #define DMA_PAGE_1 0x83 | ||
124 | #define DMA_PAGE_2 0x81 | ||
125 | #define DMA_PAGE_3 0x82 | ||
126 | #define DMA_PAGE_5 0x8B | ||
127 | #define DMA_PAGE_6 0x89 | ||
128 | #define DMA_PAGE_7 0x8A | ||
129 | |||
130 | #define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ | ||
131 | #define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ | ||
132 | #define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ | ||
133 | |||
134 | #define DMA_AUTOINIT 0x10 | ||
135 | |||
136 | |||
137 | extern spinlock_t dma_spin_lock; | ||
138 | |||
139 | static __inline__ unsigned long claim_dma_lock(void) | ||
140 | { | ||
141 | unsigned long flags; | ||
142 | spin_lock_irqsave(&dma_spin_lock, flags); | ||
143 | return flags; | ||
144 | } | ||
145 | |||
146 | static __inline__ void release_dma_lock(unsigned long flags) | ||
147 | { | ||
148 | spin_unlock_irqrestore(&dma_spin_lock, flags); | ||
149 | } | ||
150 | |||
151 | /* enable/disable a specific DMA channel */ | ||
152 | static __inline__ void enable_dma(unsigned int dmanr) | ||
153 | { | ||
154 | if (dmanr<=3) | ||
155 | dma_outb(dmanr, DMA1_MASK_REG); | ||
156 | else | ||
157 | dma_outb(dmanr & 3, DMA2_MASK_REG); | ||
158 | } | ||
159 | |||
160 | static __inline__ void disable_dma(unsigned int dmanr) | ||
161 | { | ||
162 | if (dmanr<=3) | ||
163 | dma_outb(dmanr | 4, DMA1_MASK_REG); | ||
164 | else | ||
165 | dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); | ||
166 | } | ||
167 | |||
168 | /* Clear the 'DMA Pointer Flip Flop'. | ||
169 | * Write 0 for LSB/MSB, 1 for MSB/LSB access. | ||
170 | * Use this once to initialize the FF to a known state. | ||
171 | * After that, keep track of it. :-) | ||
172 | * --- In order to do that, the DMA routines below should --- | ||
173 | * --- only be used while holding the DMA lock ! --- | ||
174 | */ | ||
175 | static __inline__ void clear_dma_ff(unsigned int dmanr) | ||
176 | { | ||
177 | if (dmanr<=3) | ||
178 | dma_outb(0, DMA1_CLEAR_FF_REG); | ||
179 | else | ||
180 | dma_outb(0, DMA2_CLEAR_FF_REG); | ||
181 | } | ||
182 | |||
183 | /* set mode (above) for a specific DMA channel */ | ||
184 | static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | ||
185 | { | ||
186 | if (dmanr<=3) | ||
187 | dma_outb(mode | dmanr, DMA1_MODE_REG); | ||
188 | else | ||
189 | dma_outb(mode | (dmanr&3), DMA2_MODE_REG); | ||
190 | } | ||
191 | |||
192 | /* Set only the page register bits of the transfer address. | ||
193 | * This is used for successive transfers when we know the contents of | ||
194 | * the lower 16 bits of the DMA current address register, but a 64k boundary | ||
195 | * may have been crossed. | ||
196 | */ | ||
197 | static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) | ||
198 | { | ||
199 | switch(dmanr) { | ||
200 | case 0: | ||
201 | dma_outb(pagenr, DMA_PAGE_0); | ||
202 | break; | ||
203 | case 1: | ||
204 | dma_outb(pagenr, DMA_PAGE_1); | ||
205 | break; | ||
206 | case 2: | ||
207 | dma_outb(pagenr, DMA_PAGE_2); | ||
208 | break; | ||
209 | case 3: | ||
210 | dma_outb(pagenr, DMA_PAGE_3); | ||
211 | break; | ||
212 | case 5: | ||
213 | dma_outb(pagenr & 0xfe, DMA_PAGE_5); | ||
214 | break; | ||
215 | case 6: | ||
216 | dma_outb(pagenr & 0xfe, DMA_PAGE_6); | ||
217 | break; | ||
218 | case 7: | ||
219 | dma_outb(pagenr & 0xfe, DMA_PAGE_7); | ||
220 | break; | ||
221 | } | ||
222 | } | ||
223 | |||
224 | |||
225 | /* Set transfer address & page bits for specific DMA channel. | ||
226 | * Assumes dma flipflop is clear. | ||
227 | */ | ||
228 | static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) | ||
229 | { | ||
230 | set_dma_page(dmanr, a>>16); | ||
231 | if (dmanr <= 3) { | ||
232 | dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); | ||
233 | dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); | ||
234 | } else { | ||
235 | dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); | ||
236 | dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); | ||
237 | } | ||
238 | } | ||
239 | |||
240 | |||
241 | /* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for | ||
242 | * a specific DMA channel. | ||
243 | * You must ensure the parameters are valid. | ||
244 | * NOTE: from a manual: "the number of transfers is one more | ||
245 | * than the initial word count"! This is taken into account. | ||
246 | * Assumes dma flip-flop is clear. | ||
247 | * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. | ||
248 | */ | ||
249 | static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | ||
250 | { | ||
251 | count--; | ||
252 | if (dmanr <= 3) { | ||
253 | dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); | ||
254 | dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); | ||
255 | } else { | ||
256 | dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); | ||
257 | dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); | ||
258 | } | ||
259 | } | ||
260 | |||
261 | |||
262 | /* Get DMA residue count. After a DMA transfer, this | ||
263 | * should return zero. Reading this while a DMA transfer is | ||
264 | * still in progress will return unpredictable results. | ||
265 | * If called before the channel has been used, it may return 1. | ||
266 | * Otherwise, it returns the number of _bytes_ left to transfer. | ||
267 | * | ||
268 | * Assumes DMA flip-flop is clear. | ||
269 | */ | ||
270 | static __inline__ int get_dma_residue(unsigned int dmanr) | ||
271 | { | ||
272 | unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE | ||
273 | : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; | ||
274 | |||
275 | /* using short to get 16-bit wrap around */ | ||
276 | unsigned short count; | ||
277 | |||
278 | count = 1 + dma_inb(io_port); | ||
279 | count += dma_inb(io_port) << 8; | ||
280 | |||
281 | return (dmanr<=3)? count : (count<<1); | ||
282 | } | ||
283 | |||
284 | |||
285 | /* These are in kernel/dma.c: */ | ||
286 | extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ | ||
287 | extern void free_dma(unsigned int dmanr); /* release it again */ | ||
288 | |||
289 | /* From PCI */ | ||
290 | |||
291 | #ifdef CONFIG_PCI | ||
292 | extern int isa_dma_bridge_buggy; | ||
293 | #else | ||
294 | #define isa_dma_bridge_buggy (0) | ||
295 | #endif | ||
296 | |||
297 | #endif /* _ASM_DMA_H */ | ||
diff --git a/include/asm-x86/dma_64.h b/include/asm-x86/dma_64.h deleted file mode 100644 index a37c16f06289..000000000000 --- a/include/asm-x86/dma_64.h +++ /dev/null | |||
@@ -1,304 +0,0 @@ | |||
1 | /* | ||
2 | * linux/include/asm/dma.h: Defines for using and allocating dma channels. | ||
3 | * Written by Hennus Bergman, 1992. | ||
4 | * High DMA channel support & info by Hannu Savolainen | ||
5 | * and John Boyd, Nov. 1992. | ||
6 | */ | ||
7 | |||
8 | #ifndef _ASM_DMA_H | ||
9 | #define _ASM_DMA_H | ||
10 | |||
11 | #include <linux/spinlock.h> /* And spinlocks */ | ||
12 | #include <asm/io.h> /* need byte IO */ | ||
13 | #include <linux/delay.h> | ||
14 | |||
15 | |||
16 | #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER | ||
17 | #define dma_outb outb_p | ||
18 | #else | ||
19 | #define dma_outb outb | ||
20 | #endif | ||
21 | |||
22 | #define dma_inb inb | ||
23 | |||
24 | /* | ||
25 | * NOTES about DMA transfers: | ||
26 | * | ||
27 | * controller 1: channels 0-3, byte operations, ports 00-1F | ||
28 | * controller 2: channels 4-7, word operations, ports C0-DF | ||
29 | * | ||
30 | * - ALL registers are 8 bits only, regardless of transfer size | ||
31 | * - channel 4 is not used - cascades 1 into 2. | ||
32 | * - channels 0-3 are byte - addresses/counts are for physical bytes | ||
33 | * - channels 5-7 are word - addresses/counts are for physical words | ||
34 | * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries | ||
35 | * - transfer count loaded to registers is 1 less than actual count | ||
36 | * - controller 2 offsets are all even (2x offsets for controller 1) | ||
37 | * - page registers for 5-7 don't use data bit 0, represent 128K pages | ||
38 | * - page registers for 0-3 use bit 0, represent 64K pages | ||
39 | * | ||
40 | * DMA transfers are limited to the lower 16MB of _physical_ memory. | ||
41 | * Note that addresses loaded into registers must be _physical_ addresses, | ||
42 | * not logical addresses (which may differ if paging is active). | ||
43 | * | ||
44 | * Address mapping for channels 0-3: | ||
45 | * | ||
46 | * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) | ||
47 | * | ... | | ... | | ... | | ||
48 | * | ... | | ... | | ... | | ||
49 | * | ... | | ... | | ... | | ||
50 | * P7 ... P0 A7 ... A0 A7 ... A0 | ||
51 | * | Page | Addr MSB | Addr LSB | (DMA registers) | ||
52 | * | ||
53 | * Address mapping for channels 5-7: | ||
54 | * | ||
55 | * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) | ||
56 | * | ... | \ \ ... \ \ \ ... \ \ | ||
57 | * | ... | \ \ ... \ \ \ ... \ (not used) | ||
58 | * | ... | \ \ ... \ \ \ ... \ | ||
59 | * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 | ||
60 | * | Page | Addr MSB | Addr LSB | (DMA registers) | ||
61 | * | ||
62 | * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses | ||
63 | * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at | ||
64 | * the hardware level, so odd-byte transfers aren't possible). | ||
65 | * | ||
66 | * Transfer count (_not # bytes_) is limited to 64K, represented as actual | ||
67 | * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, | ||
68 | * and up to 128K bytes may be transferred on channels 5-7 in one operation. | ||
69 | * | ||
70 | */ | ||
71 | |||
72 | #define MAX_DMA_CHANNELS 8 | ||
73 | |||
74 | |||
75 | /* 16MB ISA DMA zone */ | ||
76 | #define MAX_DMA_PFN ((16*1024*1024) >> PAGE_SHIFT) | ||
77 | |||
78 | /* 4GB broken PCI/AGP hardware bus master zone */ | ||
79 | #define MAX_DMA32_PFN ((4UL*1024*1024*1024) >> PAGE_SHIFT) | ||
80 | |||
81 | /* Compat define for old dma zone */ | ||
82 | #define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT)) | ||
83 | |||
84 | /* 8237 DMA controllers */ | ||
85 | #define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ | ||
86 | #define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ | ||
87 | |||
88 | /* DMA controller registers */ | ||
89 | #define DMA1_CMD_REG 0x08 /* command register (w) */ | ||
90 | #define DMA1_STAT_REG 0x08 /* status register (r) */ | ||
91 | #define DMA1_REQ_REG 0x09 /* request register (w) */ | ||
92 | #define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ | ||
93 | #define DMA1_MODE_REG 0x0B /* mode register (w) */ | ||
94 | #define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ | ||
95 | #define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ | ||
96 | #define DMA1_RESET_REG 0x0D /* Master Clear (w) */ | ||
97 | #define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ | ||
98 | #define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ | ||
99 | |||
100 | #define DMA2_CMD_REG 0xD0 /* command register (w) */ | ||
101 | #define DMA2_STAT_REG 0xD0 /* status register (r) */ | ||
102 | #define DMA2_REQ_REG 0xD2 /* request register (w) */ | ||
103 | #define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ | ||
104 | #define DMA2_MODE_REG 0xD6 /* mode register (w) */ | ||
105 | #define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ | ||
106 | #define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ | ||
107 | #define DMA2_RESET_REG 0xDA /* Master Clear (w) */ | ||
108 | #define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ | ||
109 | #define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ | ||
110 | |||
111 | #define DMA_ADDR_0 0x00 /* DMA address registers */ | ||
112 | #define DMA_ADDR_1 0x02 | ||
113 | #define DMA_ADDR_2 0x04 | ||
114 | #define DMA_ADDR_3 0x06 | ||
115 | #define DMA_ADDR_4 0xC0 | ||
116 | #define DMA_ADDR_5 0xC4 | ||
117 | #define DMA_ADDR_6 0xC8 | ||
118 | #define DMA_ADDR_7 0xCC | ||
119 | |||
120 | #define DMA_CNT_0 0x01 /* DMA count registers */ | ||
121 | #define DMA_CNT_1 0x03 | ||
122 | #define DMA_CNT_2 0x05 | ||
123 | #define DMA_CNT_3 0x07 | ||
124 | #define DMA_CNT_4 0xC2 | ||
125 | #define DMA_CNT_5 0xC6 | ||
126 | #define DMA_CNT_6 0xCA | ||
127 | #define DMA_CNT_7 0xCE | ||
128 | |||
129 | #define DMA_PAGE_0 0x87 /* DMA page registers */ | ||
130 | #define DMA_PAGE_1 0x83 | ||
131 | #define DMA_PAGE_2 0x81 | ||
132 | #define DMA_PAGE_3 0x82 | ||
133 | #define DMA_PAGE_5 0x8B | ||
134 | #define DMA_PAGE_6 0x89 | ||
135 | #define DMA_PAGE_7 0x8A | ||
136 | |||
137 | #define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ | ||
138 | #define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ | ||
139 | #define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ | ||
140 | |||
141 | #define DMA_AUTOINIT 0x10 | ||
142 | |||
143 | |||
144 | extern spinlock_t dma_spin_lock; | ||
145 | |||
146 | static __inline__ unsigned long claim_dma_lock(void) | ||
147 | { | ||
148 | unsigned long flags; | ||
149 | spin_lock_irqsave(&dma_spin_lock, flags); | ||
150 | return flags; | ||
151 | } | ||
152 | |||
153 | static __inline__ void release_dma_lock(unsigned long flags) | ||
154 | { | ||
155 | spin_unlock_irqrestore(&dma_spin_lock, flags); | ||
156 | } | ||
157 | |||
158 | /* enable/disable a specific DMA channel */ | ||
159 | static __inline__ void enable_dma(unsigned int dmanr) | ||
160 | { | ||
161 | if (dmanr<=3) | ||
162 | dma_outb(dmanr, DMA1_MASK_REG); | ||
163 | else | ||
164 | dma_outb(dmanr & 3, DMA2_MASK_REG); | ||
165 | } | ||
166 | |||
167 | static __inline__ void disable_dma(unsigned int dmanr) | ||
168 | { | ||
169 | if (dmanr<=3) | ||
170 | dma_outb(dmanr | 4, DMA1_MASK_REG); | ||
171 | else | ||
172 | dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); | ||
173 | } | ||
174 | |||
175 | /* Clear the 'DMA Pointer Flip Flop'. | ||
176 | * Write 0 for LSB/MSB, 1 for MSB/LSB access. | ||
177 | * Use this once to initialize the FF to a known state. | ||
178 | * After that, keep track of it. :-) | ||
179 | * --- In order to do that, the DMA routines below should --- | ||
180 | * --- only be used while holding the DMA lock ! --- | ||
181 | */ | ||
182 | static __inline__ void clear_dma_ff(unsigned int dmanr) | ||
183 | { | ||
184 | if (dmanr<=3) | ||
185 | dma_outb(0, DMA1_CLEAR_FF_REG); | ||
186 | else | ||
187 | dma_outb(0, DMA2_CLEAR_FF_REG); | ||
188 | } | ||
189 | |||
190 | /* set mode (above) for a specific DMA channel */ | ||
191 | static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | ||
192 | { | ||
193 | if (dmanr<=3) | ||
194 | dma_outb(mode | dmanr, DMA1_MODE_REG); | ||
195 | else | ||
196 | dma_outb(mode | (dmanr&3), DMA2_MODE_REG); | ||
197 | } | ||
198 | |||
199 | /* Set only the page register bits of the transfer address. | ||
200 | * This is used for successive transfers when we know the contents of | ||
201 | * the lower 16 bits of the DMA current address register, but a 64k boundary | ||
202 | * may have been crossed. | ||
203 | */ | ||
204 | static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) | ||
205 | { | ||
206 | switch(dmanr) { | ||
207 | case 0: | ||
208 | dma_outb(pagenr, DMA_PAGE_0); | ||
209 | break; | ||
210 | case 1: | ||
211 | dma_outb(pagenr, DMA_PAGE_1); | ||
212 | break; | ||
213 | case 2: | ||
214 | dma_outb(pagenr, DMA_PAGE_2); | ||
215 | break; | ||
216 | case 3: | ||
217 | dma_outb(pagenr, DMA_PAGE_3); | ||
218 | break; | ||
219 | case 5: | ||
220 | dma_outb(pagenr & 0xfe, DMA_PAGE_5); | ||
221 | break; | ||
222 | case 6: | ||
223 | dma_outb(pagenr & 0xfe, DMA_PAGE_6); | ||
224 | break; | ||
225 | case 7: | ||
226 | dma_outb(pagenr & 0xfe, DMA_PAGE_7); | ||
227 | break; | ||
228 | } | ||
229 | } | ||
230 | |||
231 | |||
232 | /* Set transfer address & page bits for specific DMA channel. | ||
233 | * Assumes dma flipflop is clear. | ||
234 | */ | ||
235 | static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) | ||
236 | { | ||
237 | set_dma_page(dmanr, a>>16); | ||
238 | if (dmanr <= 3) { | ||
239 | dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); | ||
240 | dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); | ||
241 | } else { | ||
242 | dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); | ||
243 | dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); | ||
244 | } | ||
245 | } | ||
246 | |||
247 | |||
248 | /* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for | ||
249 | * a specific DMA channel. | ||
250 | * You must ensure the parameters are valid. | ||
251 | * NOTE: from a manual: "the number of transfers is one more | ||
252 | * than the initial word count"! This is taken into account. | ||
253 | * Assumes dma flip-flop is clear. | ||
254 | * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. | ||
255 | */ | ||
256 | static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | ||
257 | { | ||
258 | count--; | ||
259 | if (dmanr <= 3) { | ||
260 | dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); | ||
261 | dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); | ||
262 | } else { | ||
263 | dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); | ||
264 | dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); | ||
265 | } | ||
266 | } | ||
267 | |||
268 | |||
269 | /* Get DMA residue count. After a DMA transfer, this | ||
270 | * should return zero. Reading this while a DMA transfer is | ||
271 | * still in progress will return unpredictable results. | ||
272 | * If called before the channel has been used, it may return 1. | ||
273 | * Otherwise, it returns the number of _bytes_ left to transfer. | ||
274 | * | ||
275 | * Assumes DMA flip-flop is clear. | ||
276 | */ | ||
277 | static __inline__ int get_dma_residue(unsigned int dmanr) | ||
278 | { | ||
279 | unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE | ||
280 | : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; | ||
281 | |||
282 | /* using short to get 16-bit wrap around */ | ||
283 | unsigned short count; | ||
284 | |||
285 | count = 1 + dma_inb(io_port); | ||
286 | count += dma_inb(io_port) << 8; | ||
287 | |||
288 | return (dmanr<=3)? count : (count<<1); | ||
289 | } | ||
290 | |||
291 | |||
292 | /* These are in kernel/dma.c: */ | ||
293 | extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ | ||
294 | extern void free_dma(unsigned int dmanr); /* release it again */ | ||
295 | |||
296 | /* From PCI */ | ||
297 | |||
298 | #ifdef CONFIG_PCI | ||
299 | extern int isa_dma_bridge_buggy; | ||
300 | #else | ||
301 | #define isa_dma_bridge_buggy (0) | ||
302 | #endif | ||
303 | |||
304 | #endif /* _ASM_DMA_H */ | ||
diff --git a/include/asm-x86/dmi.h b/include/asm-x86/dmi.h index 8e2b0e6aa8e7..1241e6ad1935 100644 --- a/include/asm-x86/dmi.h +++ b/include/asm-x86/dmi.h | |||
@@ -5,9 +5,6 @@ | |||
5 | 5 | ||
6 | #ifdef CONFIG_X86_32 | 6 | #ifdef CONFIG_X86_32 |
7 | 7 | ||
8 | /* Use early IO mappings for DMI because it's initialized early */ | ||
9 | #define dmi_ioremap bt_ioremap | ||
10 | #define dmi_iounmap bt_iounmap | ||
11 | #define dmi_alloc alloc_bootmem | 8 | #define dmi_alloc alloc_bootmem |
12 | 9 | ||
13 | #else /* CONFIG_X86_32 */ | 10 | #else /* CONFIG_X86_32 */ |
@@ -22,14 +19,15 @@ extern char dmi_alloc_data[DMI_MAX_DATA]; | |||
22 | static inline void *dmi_alloc(unsigned len) | 19 | static inline void *dmi_alloc(unsigned len) |
23 | { | 20 | { |
24 | int idx = dmi_alloc_index; | 21 | int idx = dmi_alloc_index; |
25 | if ((dmi_alloc_index += len) > DMI_MAX_DATA) | 22 | if ((dmi_alloc_index + len) > DMI_MAX_DATA) |
26 | return NULL; | 23 | return NULL; |
24 | dmi_alloc_index += len; | ||
27 | return dmi_alloc_data + idx; | 25 | return dmi_alloc_data + idx; |
28 | } | 26 | } |
29 | 27 | ||
28 | #endif | ||
29 | |||
30 | #define dmi_ioremap early_ioremap | 30 | #define dmi_ioremap early_ioremap |
31 | #define dmi_iounmap early_iounmap | 31 | #define dmi_iounmap early_iounmap |
32 | 32 | ||
33 | #endif | 33 | #endif |
34 | |||
35 | #endif | ||
diff --git a/include/asm-x86/ds.h b/include/asm-x86/ds.h new file mode 100644 index 000000000000..7881368142fa --- /dev/null +++ b/include/asm-x86/ds.h | |||
@@ -0,0 +1,72 @@ | |||
1 | /* | ||
2 | * Debug Store (DS) support | ||
3 | * | ||
4 | * This provides a low-level interface to the hardware's Debug Store | ||
5 | * feature that is used for last branch recording (LBR) and | ||
6 | * precise-event based sampling (PEBS). | ||
7 | * | ||
8 | * Different architectures use a different DS layout/pointer size. | ||
9 | * The below functions therefore work on a void*. | ||
10 | * | ||
11 | * | ||
12 | * Since there is no user for PEBS, yet, only LBR (or branch | ||
13 | * trace store, BTS) is supported. | ||
14 | * | ||
15 | * | ||
16 | * Copyright (C) 2007 Intel Corporation. | ||
17 | * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007 | ||
18 | */ | ||
19 | |||
20 | #ifndef _ASM_X86_DS_H | ||
21 | #define _ASM_X86_DS_H | ||
22 | |||
23 | #include <linux/types.h> | ||
24 | #include <linux/init.h> | ||
25 | |||
26 | struct cpuinfo_x86; | ||
27 | |||
28 | |||
29 | /* a branch trace record entry | ||
30 | * | ||
31 | * In order to unify the interface between various processor versions, | ||
32 | * we use the below data structure for all processors. | ||
33 | */ | ||
34 | enum bts_qualifier { | ||
35 | BTS_INVALID = 0, | ||
36 | BTS_BRANCH, | ||
37 | BTS_TASK_ARRIVES, | ||
38 | BTS_TASK_DEPARTS | ||
39 | }; | ||
40 | |||
41 | struct bts_struct { | ||
42 | u64 qualifier; | ||
43 | union { | ||
44 | /* BTS_BRANCH */ | ||
45 | struct { | ||
46 | u64 from_ip; | ||
47 | u64 to_ip; | ||
48 | } lbr; | ||
49 | /* BTS_TASK_ARRIVES or | ||
50 | BTS_TASK_DEPARTS */ | ||
51 | u64 jiffies; | ||
52 | } variant; | ||
53 | }; | ||
54 | |||
55 | /* Overflow handling mechanisms */ | ||
56 | #define DS_O_SIGNAL 1 /* send overflow signal */ | ||
57 | #define DS_O_WRAP 2 /* wrap around */ | ||
58 | |||
59 | extern int ds_allocate(void **, size_t); | ||
60 | extern int ds_free(void **); | ||
61 | extern int ds_get_bts_size(void *); | ||
62 | extern int ds_get_bts_end(void *); | ||
63 | extern int ds_get_bts_index(void *); | ||
64 | extern int ds_set_overflow(void *, int); | ||
65 | extern int ds_get_overflow(void *); | ||
66 | extern int ds_clear(void *); | ||
67 | extern int ds_read_bts(void *, int, struct bts_struct *); | ||
68 | extern int ds_write_bts(void *, const struct bts_struct *); | ||
69 | extern unsigned long ds_debugctl_mask(void); | ||
70 | extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *c); | ||
71 | |||
72 | #endif /* _ASM_X86_DS_H */ | ||
diff --git a/include/asm-x86/e820.h b/include/asm-x86/e820.h index 3e214f39fad3..7004251fc66b 100644 --- a/include/asm-x86/e820.h +++ b/include/asm-x86/e820.h | |||
@@ -22,6 +22,12 @@ struct e820map { | |||
22 | }; | 22 | }; |
23 | #endif /* __ASSEMBLY__ */ | 23 | #endif /* __ASSEMBLY__ */ |
24 | 24 | ||
25 | #define ISA_START_ADDRESS 0xa0000 | ||
26 | #define ISA_END_ADDRESS 0x100000 | ||
27 | |||
28 | #define BIOS_BEGIN 0x000a0000 | ||
29 | #define BIOS_END 0x00100000 | ||
30 | |||
25 | #ifdef __KERNEL__ | 31 | #ifdef __KERNEL__ |
26 | #ifdef CONFIG_X86_32 | 32 | #ifdef CONFIG_X86_32 |
27 | # include "e820_32.h" | 33 | # include "e820_32.h" |
diff --git a/include/asm-x86/e820_32.h b/include/asm-x86/e820_32.h index 03f60c690c8a..f1da7ebd1905 100644 --- a/include/asm-x86/e820_32.h +++ b/include/asm-x86/e820_32.h | |||
@@ -12,20 +12,28 @@ | |||
12 | #ifndef __E820_HEADER | 12 | #ifndef __E820_HEADER |
13 | #define __E820_HEADER | 13 | #define __E820_HEADER |
14 | 14 | ||
15 | #include <linux/ioport.h> | ||
16 | |||
15 | #define HIGH_MEMORY (1024*1024) | 17 | #define HIGH_MEMORY (1024*1024) |
16 | 18 | ||
17 | #ifndef __ASSEMBLY__ | 19 | #ifndef __ASSEMBLY__ |
18 | 20 | ||
19 | extern struct e820map e820; | 21 | extern struct e820map e820; |
22 | extern void update_e820(void); | ||
20 | 23 | ||
21 | extern int e820_all_mapped(unsigned long start, unsigned long end, | 24 | extern int e820_all_mapped(unsigned long start, unsigned long end, |
22 | unsigned type); | 25 | unsigned type); |
23 | extern int e820_any_mapped(u64 start, u64 end, unsigned type); | 26 | extern int e820_any_mapped(u64 start, u64 end, unsigned type); |
24 | extern void find_max_pfn(void); | 27 | extern void find_max_pfn(void); |
25 | extern void register_bootmem_low_pages(unsigned long max_low_pfn); | 28 | extern void register_bootmem_low_pages(unsigned long max_low_pfn); |
29 | extern void add_memory_region(unsigned long long start, | ||
30 | unsigned long long size, int type); | ||
26 | extern void e820_register_memory(void); | 31 | extern void e820_register_memory(void); |
27 | extern void limit_regions(unsigned long long size); | 32 | extern void limit_regions(unsigned long long size); |
28 | extern void print_memory_map(char *who); | 33 | extern void print_memory_map(char *who); |
34 | extern void init_iomem_resources(struct resource *code_resource, | ||
35 | struct resource *data_resource, | ||
36 | struct resource *bss_resource); | ||
29 | 37 | ||
30 | #if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION) | 38 | #if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION) |
31 | extern void e820_mark_nosave_regions(void); | 39 | extern void e820_mark_nosave_regions(void); |
@@ -35,5 +43,6 @@ static inline void e820_mark_nosave_regions(void) | |||
35 | } | 43 | } |
36 | #endif | 44 | #endif |
37 | 45 | ||
46 | |||
38 | #endif/*!__ASSEMBLY__*/ | 47 | #endif/*!__ASSEMBLY__*/ |
39 | #endif/*__E820_HEADER*/ | 48 | #endif/*__E820_HEADER*/ |
diff --git a/include/asm-x86/e820_64.h b/include/asm-x86/e820_64.h index 0bd4787a5d57..a560c4f5d500 100644 --- a/include/asm-x86/e820_64.h +++ b/include/asm-x86/e820_64.h | |||
@@ -11,19 +11,25 @@ | |||
11 | #ifndef __E820_HEADER | 11 | #ifndef __E820_HEADER |
12 | #define __E820_HEADER | 12 | #define __E820_HEADER |
13 | 13 | ||
14 | #include <linux/ioport.h> | ||
15 | |||
14 | #ifndef __ASSEMBLY__ | 16 | #ifndef __ASSEMBLY__ |
15 | extern unsigned long find_e820_area(unsigned long start, unsigned long end, | 17 | extern unsigned long find_e820_area(unsigned long start, unsigned long end, |
16 | unsigned size); | 18 | unsigned size, unsigned long align); |
17 | extern void add_memory_region(unsigned long start, unsigned long size, | 19 | extern void add_memory_region(unsigned long start, unsigned long size, |
18 | int type); | 20 | int type); |
19 | extern void setup_memory_region(void); | 21 | extern void setup_memory_region(void); |
20 | extern void contig_e820_setup(void); | 22 | extern void contig_e820_setup(void); |
21 | extern unsigned long e820_end_of_ram(void); | 23 | extern unsigned long e820_end_of_ram(void); |
22 | extern void e820_reserve_resources(void); | 24 | extern void e820_reserve_resources(struct resource *code_resource, |
25 | struct resource *data_resource, struct resource *bss_resource); | ||
23 | extern void e820_mark_nosave_regions(void); | 26 | extern void e820_mark_nosave_regions(void); |
24 | extern void e820_print_map(char *who); | ||
25 | extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type); | 27 | extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type); |
26 | extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type); | 28 | extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type); |
29 | extern int e820_any_non_reserved(unsigned long start, unsigned long end); | ||
30 | extern int is_memory_any_valid(unsigned long start, unsigned long end); | ||
31 | extern int e820_all_non_reserved(unsigned long start, unsigned long end); | ||
32 | extern int is_memory_all_valid(unsigned long start, unsigned long end); | ||
27 | extern unsigned long e820_hole_size(unsigned long start, unsigned long end); | 33 | extern unsigned long e820_hole_size(unsigned long start, unsigned long end); |
28 | 34 | ||
29 | extern void e820_setup_gap(void); | 35 | extern void e820_setup_gap(void); |
@@ -33,9 +39,11 @@ extern void e820_register_active_regions(int nid, | |||
33 | extern void finish_e820_parsing(void); | 39 | extern void finish_e820_parsing(void); |
34 | 40 | ||
35 | extern struct e820map e820; | 41 | extern struct e820map e820; |
42 | extern void update_e820(void); | ||
43 | |||
44 | extern void reserve_early(unsigned long start, unsigned long end, char *name); | ||
45 | extern void early_res_to_bootmem(void); | ||
36 | 46 | ||
37 | extern unsigned ebda_addr, ebda_size; | ||
38 | extern unsigned long nodemap_addr, nodemap_size; | ||
39 | #endif/*!__ASSEMBLY__*/ | 47 | #endif/*!__ASSEMBLY__*/ |
40 | 48 | ||
41 | #endif/*__E820_HEADER*/ | 49 | #endif/*__E820_HEADER*/ |
diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h new file mode 100644 index 000000000000..ea9734b74aca --- /dev/null +++ b/include/asm-x86/efi.h | |||
@@ -0,0 +1,97 @@ | |||
1 | #ifndef _ASM_X86_EFI_H | ||
2 | #define _ASM_X86_EFI_H | ||
3 | |||
4 | #ifdef CONFIG_X86_32 | ||
5 | |||
6 | extern unsigned long asmlinkage efi_call_phys(void *, ...); | ||
7 | |||
8 | #define efi_call_phys0(f) efi_call_phys(f) | ||
9 | #define efi_call_phys1(f, a1) efi_call_phys(f, a1) | ||
10 | #define efi_call_phys2(f, a1, a2) efi_call_phys(f, a1, a2) | ||
11 | #define efi_call_phys3(f, a1, a2, a3) efi_call_phys(f, a1, a2, a3) | ||
12 | #define efi_call_phys4(f, a1, a2, a3, a4) \ | ||
13 | efi_call_phys(f, a1, a2, a3, a4) | ||
14 | #define efi_call_phys5(f, a1, a2, a3, a4, a5) \ | ||
15 | efi_call_phys(f, a1, a2, a3, a4, a5) | ||
16 | #define efi_call_phys6(f, a1, a2, a3, a4, a5, a6) \ | ||
17 | efi_call_phys(f, a1, a2, a3, a4, a5, a6) | ||
18 | /* | ||
19 | * Wrap all the virtual calls in a way that forces the parameters on the stack. | ||
20 | */ | ||
21 | |||
22 | #define efi_call_virt(f, args...) \ | ||
23 | ((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args) | ||
24 | |||
25 | #define efi_call_virt0(f) efi_call_virt(f) | ||
26 | #define efi_call_virt1(f, a1) efi_call_virt(f, a1) | ||
27 | #define efi_call_virt2(f, a1, a2) efi_call_virt(f, a1, a2) | ||
28 | #define efi_call_virt3(f, a1, a2, a3) efi_call_virt(f, a1, a2, a3) | ||
29 | #define efi_call_virt4(f, a1, a2, a3, a4) \ | ||
30 | efi_call_virt(f, a1, a2, a3, a4) | ||
31 | #define efi_call_virt5(f, a1, a2, a3, a4, a5) \ | ||
32 | efi_call_virt(f, a1, a2, a3, a4, a5) | ||
33 | #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ | ||
34 | efi_call_virt(f, a1, a2, a3, a4, a5, a6) | ||
35 | |||
36 | #define efi_ioremap(addr, size) ioremap_cache(addr, size) | ||
37 | |||
38 | #else /* !CONFIG_X86_32 */ | ||
39 | |||
40 | #define MAX_EFI_IO_PAGES 100 | ||
41 | |||
42 | extern u64 efi_call0(void *fp); | ||
43 | extern u64 efi_call1(void *fp, u64 arg1); | ||
44 | extern u64 efi_call2(void *fp, u64 arg1, u64 arg2); | ||
45 | extern u64 efi_call3(void *fp, u64 arg1, u64 arg2, u64 arg3); | ||
46 | extern u64 efi_call4(void *fp, u64 arg1, u64 arg2, u64 arg3, u64 arg4); | ||
47 | extern u64 efi_call5(void *fp, u64 arg1, u64 arg2, u64 arg3, | ||
48 | u64 arg4, u64 arg5); | ||
49 | extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3, | ||
50 | u64 arg4, u64 arg5, u64 arg6); | ||
51 | |||
52 | #define efi_call_phys0(f) \ | ||
53 | efi_call0((void *)(f)) | ||
54 | #define efi_call_phys1(f, a1) \ | ||
55 | efi_call1((void *)(f), (u64)(a1)) | ||
56 | #define efi_call_phys2(f, a1, a2) \ | ||
57 | efi_call2((void *)(f), (u64)(a1), (u64)(a2)) | ||
58 | #define efi_call_phys3(f, a1, a2, a3) \ | ||
59 | efi_call3((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3)) | ||
60 | #define efi_call_phys4(f, a1, a2, a3, a4) \ | ||
61 | efi_call4((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \ | ||
62 | (u64)(a4)) | ||
63 | #define efi_call_phys5(f, a1, a2, a3, a4, a5) \ | ||
64 | efi_call5((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \ | ||
65 | (u64)(a4), (u64)(a5)) | ||
66 | #define efi_call_phys6(f, a1, a2, a3, a4, a5, a6) \ | ||
67 | efi_call6((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \ | ||
68 | (u64)(a4), (u64)(a5), (u64)(a6)) | ||
69 | |||
70 | #define efi_call_virt0(f) \ | ||
71 | efi_call0((void *)(efi.systab->runtime->f)) | ||
72 | #define efi_call_virt1(f, a1) \ | ||
73 | efi_call1((void *)(efi.systab->runtime->f), (u64)(a1)) | ||
74 | #define efi_call_virt2(f, a1, a2) \ | ||
75 | efi_call2((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2)) | ||
76 | #define efi_call_virt3(f, a1, a2, a3) \ | ||
77 | efi_call3((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ | ||
78 | (u64)(a3)) | ||
79 | #define efi_call_virt4(f, a1, a2, a3, a4) \ | ||
80 | efi_call4((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ | ||
81 | (u64)(a3), (u64)(a4)) | ||
82 | #define efi_call_virt5(f, a1, a2, a3, a4, a5) \ | ||
83 | efi_call5((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ | ||
84 | (u64)(a3), (u64)(a4), (u64)(a5)) | ||
85 | #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ | ||
86 | efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ | ||
87 | (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) | ||
88 | |||
89 | extern void *efi_ioremap(unsigned long addr, unsigned long size); | ||
90 | |||
91 | #endif /* CONFIG_X86_32 */ | ||
92 | |||
93 | extern void efi_reserve_bootmem(void); | ||
94 | extern void efi_call_phys_prelog(void); | ||
95 | extern void efi_call_phys_epilog(void); | ||
96 | |||
97 | #endif | ||
diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h index ec42a4d2e83b..d9c94e707289 100644 --- a/include/asm-x86/elf.h +++ b/include/asm-x86/elf.h | |||
@@ -73,18 +73,23 @@ typedef struct user_fxsr_struct elf_fpxregset_t; | |||
73 | #endif | 73 | #endif |
74 | 74 | ||
75 | #ifdef __KERNEL__ | 75 | #ifdef __KERNEL__ |
76 | #include <asm/vdso.h> | ||
76 | 77 | ||
77 | #ifdef CONFIG_X86_32 | 78 | extern unsigned int vdso_enabled; |
78 | #include <asm/processor.h> | ||
79 | #include <asm/system.h> /* for savesegment */ | ||
80 | #include <asm/desc.h> | ||
81 | 79 | ||
82 | /* | 80 | /* |
83 | * This is used to ensure we don't load something for the wrong architecture. | 81 | * This is used to ensure we don't load something for the wrong architecture. |
84 | */ | 82 | */ |
85 | #define elf_check_arch(x) \ | 83 | #define elf_check_arch_ia32(x) \ |
86 | (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) | 84 | (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) |
87 | 85 | ||
86 | #ifdef CONFIG_X86_32 | ||
87 | #include <asm/processor.h> | ||
88 | #include <asm/system.h> /* for savesegment */ | ||
89 | #include <asm/desc.h> | ||
90 | |||
91 | #define elf_check_arch(x) elf_check_arch_ia32(x) | ||
92 | |||
88 | /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx | 93 | /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx |
89 | contains a pointer to a function which might be registered using `atexit'. | 94 | contains a pointer to a function which might be registered using `atexit'. |
90 | This provides a mean for the dynamic linker to call DT_FINI functions for | 95 | This provides a mean for the dynamic linker to call DT_FINI functions for |
@@ -96,36 +101,38 @@ typedef struct user_fxsr_struct elf_fpxregset_t; | |||
96 | just to make things more deterministic. | 101 | just to make things more deterministic. |
97 | */ | 102 | */ |
98 | #define ELF_PLAT_INIT(_r, load_addr) do { \ | 103 | #define ELF_PLAT_INIT(_r, load_addr) do { \ |
99 | _r->ebx = 0; _r->ecx = 0; _r->edx = 0; \ | 104 | _r->bx = 0; _r->cx = 0; _r->dx = 0; \ |
100 | _r->esi = 0; _r->edi = 0; _r->ebp = 0; \ | 105 | _r->si = 0; _r->di = 0; _r->bp = 0; \ |
101 | _r->eax = 0; \ | 106 | _r->ax = 0; \ |
102 | } while (0) | 107 | } while (0) |
103 | 108 | ||
104 | /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is | 109 | /* |
105 | now struct_user_regs, they are different) */ | 110 | * regs is struct pt_regs, pr_reg is elf_gregset_t (which is |
106 | 111 | * now struct_user_regs, they are different) | |
107 | #define ELF_CORE_COPY_REGS(pr_reg, regs) \ | 112 | */ |
108 | pr_reg[0] = regs->ebx; \ | 113 | |
109 | pr_reg[1] = regs->ecx; \ | 114 | #define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ |
110 | pr_reg[2] = regs->edx; \ | 115 | pr_reg[0] = regs->bx; \ |
111 | pr_reg[3] = regs->esi; \ | 116 | pr_reg[1] = regs->cx; \ |
112 | pr_reg[4] = regs->edi; \ | 117 | pr_reg[2] = regs->dx; \ |
113 | pr_reg[5] = regs->ebp; \ | 118 | pr_reg[3] = regs->si; \ |
114 | pr_reg[6] = regs->eax; \ | 119 | pr_reg[4] = regs->di; \ |
115 | pr_reg[7] = regs->xds & 0xffff; \ | 120 | pr_reg[5] = regs->bp; \ |
116 | pr_reg[8] = regs->xes & 0xffff; \ | 121 | pr_reg[6] = regs->ax; \ |
117 | pr_reg[9] = regs->xfs & 0xffff; \ | 122 | pr_reg[7] = regs->ds & 0xffff; \ |
118 | savesegment(gs,pr_reg[10]); \ | 123 | pr_reg[8] = regs->es & 0xffff; \ |
119 | pr_reg[11] = regs->orig_eax; \ | 124 | pr_reg[9] = regs->fs & 0xffff; \ |
120 | pr_reg[12] = regs->eip; \ | 125 | savesegment(gs, pr_reg[10]); \ |
121 | pr_reg[13] = regs->xcs & 0xffff; \ | 126 | pr_reg[11] = regs->orig_ax; \ |
122 | pr_reg[14] = regs->eflags; \ | 127 | pr_reg[12] = regs->ip; \ |
123 | pr_reg[15] = regs->esp; \ | 128 | pr_reg[13] = regs->cs & 0xffff; \ |
124 | pr_reg[16] = regs->xss & 0xffff; | 129 | pr_reg[14] = regs->flags; \ |
130 | pr_reg[15] = regs->sp; \ | ||
131 | pr_reg[16] = regs->ss & 0xffff; \ | ||
132 | } while (0); | ||
125 | 133 | ||
126 | #define ELF_PLATFORM (utsname()->machine) | 134 | #define ELF_PLATFORM (utsname()->machine) |
127 | #define set_personality_64bit() do { } while (0) | 135 | #define set_personality_64bit() do { } while (0) |
128 | extern unsigned int vdso_enabled; | ||
129 | 136 | ||
130 | #else /* CONFIG_X86_32 */ | 137 | #else /* CONFIG_X86_32 */ |
131 | 138 | ||
@@ -137,28 +144,57 @@ extern unsigned int vdso_enabled; | |||
137 | #define elf_check_arch(x) \ | 144 | #define elf_check_arch(x) \ |
138 | ((x)->e_machine == EM_X86_64) | 145 | ((x)->e_machine == EM_X86_64) |
139 | 146 | ||
147 | #define compat_elf_check_arch(x) elf_check_arch_ia32(x) | ||
148 | |||
149 | static inline void start_ia32_thread(struct pt_regs *regs, u32 ip, u32 sp) | ||
150 | { | ||
151 | asm volatile("movl %0,%%fs" :: "r" (0)); | ||
152 | asm volatile("movl %0,%%es; movl %0,%%ds" : : "r" (__USER32_DS)); | ||
153 | load_gs_index(0); | ||
154 | regs->ip = ip; | ||
155 | regs->sp = sp; | ||
156 | regs->flags = X86_EFLAGS_IF; | ||
157 | regs->cs = __USER32_CS; | ||
158 | regs->ss = __USER32_DS; | ||
159 | } | ||
160 | |||
161 | static inline void elf_common_init(struct thread_struct *t, | ||
162 | struct pt_regs *regs, const u16 ds) | ||
163 | { | ||
164 | regs->ax = regs->bx = regs->cx = regs->dx = 0; | ||
165 | regs->si = regs->di = regs->bp = 0; | ||
166 | regs->r8 = regs->r9 = regs->r10 = regs->r11 = 0; | ||
167 | regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0; | ||
168 | t->fs = t->gs = 0; | ||
169 | t->fsindex = t->gsindex = 0; | ||
170 | t->ds = t->es = ds; | ||
171 | } | ||
172 | |||
140 | #define ELF_PLAT_INIT(_r, load_addr) do { \ | 173 | #define ELF_PLAT_INIT(_r, load_addr) do { \ |
141 | struct task_struct *cur = current; \ | 174 | elf_common_init(¤t->thread, _r, 0); \ |
142 | (_r)->rbx = 0; (_r)->rcx = 0; (_r)->rdx = 0; \ | ||
143 | (_r)->rsi = 0; (_r)->rdi = 0; (_r)->rbp = 0; \ | ||
144 | (_r)->rax = 0; \ | ||
145 | (_r)->r8 = 0; \ | ||
146 | (_r)->r9 = 0; \ | ||
147 | (_r)->r10 = 0; \ | ||
148 | (_r)->r11 = 0; \ | ||
149 | (_r)->r12 = 0; \ | ||
150 | (_r)->r13 = 0; \ | ||
151 | (_r)->r14 = 0; \ | ||
152 | (_r)->r15 = 0; \ | ||
153 | cur->thread.fs = 0; cur->thread.gs = 0; \ | ||
154 | cur->thread.fsindex = 0; cur->thread.gsindex = 0; \ | ||
155 | cur->thread.ds = 0; cur->thread.es = 0; \ | ||
156 | clear_thread_flag(TIF_IA32); \ | 175 | clear_thread_flag(TIF_IA32); \ |
157 | } while (0) | 176 | } while (0) |
158 | 177 | ||
159 | /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is | 178 | #define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ |
160 | now struct_user_regs, they are different). Assumes current is the process | 179 | elf_common_init(¤t->thread, regs, __USER_DS) |
161 | getting dumped. */ | 180 | #define compat_start_thread(regs, ip, sp) do { \ |
181 | start_ia32_thread(regs, ip, sp); \ | ||
182 | set_fs(USER_DS); \ | ||
183 | } while (0) | ||
184 | #define COMPAT_SET_PERSONALITY(ex, ibcs2) do { \ | ||
185 | if (test_thread_flag(TIF_IA32)) \ | ||
186 | clear_thread_flag(TIF_ABI_PENDING); \ | ||
187 | else \ | ||
188 | set_thread_flag(TIF_ABI_PENDING); \ | ||
189 | current->personality |= force_personality32; \ | ||
190 | } while (0) | ||
191 | #define COMPAT_ELF_PLATFORM ("i686") | ||
192 | |||
193 | /* | ||
194 | * regs is struct pt_regs, pr_reg is elf_gregset_t (which is | ||
195 | * now struct_user_regs, they are different). Assumes current is the process | ||
196 | * getting dumped. | ||
197 | */ | ||
162 | 198 | ||
163 | #define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ | 199 | #define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ |
164 | unsigned v; \ | 200 | unsigned v; \ |
@@ -166,22 +202,22 @@ extern unsigned int vdso_enabled; | |||
166 | (pr_reg)[1] = (regs)->r14; \ | 202 | (pr_reg)[1] = (regs)->r14; \ |
167 | (pr_reg)[2] = (regs)->r13; \ | 203 | (pr_reg)[2] = (regs)->r13; \ |
168 | (pr_reg)[3] = (regs)->r12; \ | 204 | (pr_reg)[3] = (regs)->r12; \ |
169 | (pr_reg)[4] = (regs)->rbp; \ | 205 | (pr_reg)[4] = (regs)->bp; \ |
170 | (pr_reg)[5] = (regs)->rbx; \ | 206 | (pr_reg)[5] = (regs)->bx; \ |
171 | (pr_reg)[6] = (regs)->r11; \ | 207 | (pr_reg)[6] = (regs)->r11; \ |
172 | (pr_reg)[7] = (regs)->r10; \ | 208 | (pr_reg)[7] = (regs)->r10; \ |
173 | (pr_reg)[8] = (regs)->r9; \ | 209 | (pr_reg)[8] = (regs)->r9; \ |
174 | (pr_reg)[9] = (regs)->r8; \ | 210 | (pr_reg)[9] = (regs)->r8; \ |
175 | (pr_reg)[10] = (regs)->rax; \ | 211 | (pr_reg)[10] = (regs)->ax; \ |
176 | (pr_reg)[11] = (regs)->rcx; \ | 212 | (pr_reg)[11] = (regs)->cx; \ |
177 | (pr_reg)[12] = (regs)->rdx; \ | 213 | (pr_reg)[12] = (regs)->dx; \ |
178 | (pr_reg)[13] = (regs)->rsi; \ | 214 | (pr_reg)[13] = (regs)->si; \ |
179 | (pr_reg)[14] = (regs)->rdi; \ | 215 | (pr_reg)[14] = (regs)->di; \ |
180 | (pr_reg)[15] = (regs)->orig_rax; \ | 216 | (pr_reg)[15] = (regs)->orig_ax; \ |
181 | (pr_reg)[16] = (regs)->rip; \ | 217 | (pr_reg)[16] = (regs)->ip; \ |
182 | (pr_reg)[17] = (regs)->cs; \ | 218 | (pr_reg)[17] = (regs)->cs; \ |
183 | (pr_reg)[18] = (regs)->eflags; \ | 219 | (pr_reg)[18] = (regs)->flags; \ |
184 | (pr_reg)[19] = (regs)->rsp; \ | 220 | (pr_reg)[19] = (regs)->sp; \ |
185 | (pr_reg)[20] = (regs)->ss; \ | 221 | (pr_reg)[20] = (regs)->ss; \ |
186 | (pr_reg)[21] = current->thread.fs; \ | 222 | (pr_reg)[21] = current->thread.fs; \ |
187 | (pr_reg)[22] = current->thread.gs; \ | 223 | (pr_reg)[22] = current->thread.gs; \ |
@@ -189,15 +225,17 @@ extern unsigned int vdso_enabled; | |||
189 | asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \ | 225 | asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \ |
190 | asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \ | 226 | asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \ |
191 | asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \ | 227 | asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \ |
192 | } while(0); | 228 | } while (0); |
193 | 229 | ||
194 | /* I'm not sure if we can use '-' here */ | 230 | /* I'm not sure if we can use '-' here */ |
195 | #define ELF_PLATFORM ("x86_64") | 231 | #define ELF_PLATFORM ("x86_64") |
196 | extern void set_personality_64bit(void); | 232 | extern void set_personality_64bit(void); |
197 | extern int vdso_enabled; | 233 | extern unsigned int sysctl_vsyscall32; |
234 | extern int force_personality32; | ||
198 | 235 | ||
199 | #endif /* !CONFIG_X86_32 */ | 236 | #endif /* !CONFIG_X86_32 */ |
200 | 237 | ||
238 | #define CORE_DUMP_USE_REGSET | ||
201 | #define USE_ELF_CORE_DUMP | 239 | #define USE_ELF_CORE_DUMP |
202 | #define ELF_EXEC_PAGESIZE 4096 | 240 | #define ELF_EXEC_PAGESIZE 4096 |
203 | 241 | ||
@@ -232,43 +270,24 @@ extern int vdso_enabled; | |||
232 | 270 | ||
233 | struct task_struct; | 271 | struct task_struct; |
234 | 272 | ||
235 | extern int dump_task_regs (struct task_struct *, elf_gregset_t *); | 273 | #define ARCH_DLINFO_IA32(vdso_enabled) \ |
236 | extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); | 274 | do if (vdso_enabled) { \ |
237 | 275 | NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ | |
238 | #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) | 276 | NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ |
239 | #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) | 277 | } while (0) |
240 | 278 | ||
241 | #ifdef CONFIG_X86_32 | 279 | #ifdef CONFIG_X86_32 |
242 | extern int dump_task_extended_fpu (struct task_struct *, | ||
243 | struct user_fxsr_struct *); | ||
244 | #define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) \ | ||
245 | dump_task_extended_fpu(tsk, elf_xfpregs) | ||
246 | #define ELF_CORE_XFPREG_TYPE NT_PRXFPREG | ||
247 | 280 | ||
248 | #define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO)) | 281 | #define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO)) |
249 | #define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) | ||
250 | #define VDSO_PRELINK 0 | ||
251 | |||
252 | #define VDSO_SYM(x) \ | ||
253 | (VDSO_CURRENT_BASE + (unsigned long)(x) - VDSO_PRELINK) | ||
254 | |||
255 | #define VDSO_HIGH_EHDR ((const struct elfhdr *) VDSO_HIGH_BASE) | ||
256 | #define VDSO_EHDR ((const struct elfhdr *) VDSO_CURRENT_BASE) | ||
257 | 282 | ||
258 | extern void __kernel_vsyscall; | 283 | #define ARCH_DLINFO ARCH_DLINFO_IA32(vdso_enabled) |
259 | |||
260 | #define VDSO_ENTRY VDSO_SYM(&__kernel_vsyscall) | ||
261 | 284 | ||
262 | /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ | 285 | /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ |
263 | 286 | ||
264 | #define ARCH_DLINFO \ | ||
265 | do if (vdso_enabled) { \ | ||
266 | NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ | ||
267 | NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ | ||
268 | } while (0) | ||
269 | |||
270 | #else /* CONFIG_X86_32 */ | 287 | #else /* CONFIG_X86_32 */ |
271 | 288 | ||
289 | #define VDSO_HIGH_BASE 0xffffe000U /* CONFIG_COMPAT_VDSO address */ | ||
290 | |||
272 | /* 1GB for 64bit, 8MB for 32bit */ | 291 | /* 1GB for 64bit, 8MB for 32bit */ |
273 | #define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff) | 292 | #define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff) |
274 | 293 | ||
@@ -277,14 +296,31 @@ do if (vdso_enabled) { \ | |||
277 | NEW_AUX_ENT(AT_SYSINFO_EHDR,(unsigned long)current->mm->context.vdso);\ | 296 | NEW_AUX_ENT(AT_SYSINFO_EHDR,(unsigned long)current->mm->context.vdso);\ |
278 | } while (0) | 297 | } while (0) |
279 | 298 | ||
299 | #define AT_SYSINFO 32 | ||
300 | |||
301 | #define COMPAT_ARCH_DLINFO ARCH_DLINFO_IA32(sysctl_vsyscall32) | ||
302 | |||
303 | #define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) | ||
304 | |||
280 | #endif /* !CONFIG_X86_32 */ | 305 | #endif /* !CONFIG_X86_32 */ |
281 | 306 | ||
307 | #define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) | ||
308 | |||
309 | #define VDSO_ENTRY \ | ||
310 | ((unsigned long) VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall)) | ||
311 | |||
282 | struct linux_binprm; | 312 | struct linux_binprm; |
283 | 313 | ||
284 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 | 314 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 |
285 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, | 315 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, |
286 | int executable_stack); | 316 | int executable_stack); |
287 | 317 | ||
318 | extern int syscall32_setup_pages(struct linux_binprm *, int exstack); | ||
319 | #define compat_arch_setup_additional_pages syscall32_setup_pages | ||
320 | |||
321 | extern unsigned long arch_randomize_brk(struct mm_struct *mm); | ||
322 | #define arch_randomize_brk arch_randomize_brk | ||
323 | |||
288 | #endif /* __KERNEL__ */ | 324 | #endif /* __KERNEL__ */ |
289 | 325 | ||
290 | #endif | 326 | #endif |
diff --git a/include/asm-x86/emergency-restart.h b/include/asm-x86/emergency-restart.h index 680c39563345..8e6aef19f8f0 100644 --- a/include/asm-x86/emergency-restart.h +++ b/include/asm-x86/emergency-restart.h | |||
@@ -1,6 +1,18 @@ | |||
1 | #ifndef _ASM_EMERGENCY_RESTART_H | 1 | #ifndef _ASM_EMERGENCY_RESTART_H |
2 | #define _ASM_EMERGENCY_RESTART_H | 2 | #define _ASM_EMERGENCY_RESTART_H |
3 | 3 | ||
4 | enum reboot_type { | ||
5 | BOOT_TRIPLE = 't', | ||
6 | BOOT_KBD = 'k', | ||
7 | #ifdef CONFIG_X86_32 | ||
8 | BOOT_BIOS = 'b', | ||
9 | #endif | ||
10 | BOOT_ACPI = 'a', | ||
11 | BOOT_EFI = 'e' | ||
12 | }; | ||
13 | |||
14 | extern enum reboot_type reboot_type; | ||
15 | |||
4 | extern void machine_emergency_restart(void); | 16 | extern void machine_emergency_restart(void); |
5 | 17 | ||
6 | #endif /* _ASM_EMERGENCY_RESTART_H */ | 18 | #endif /* _ASM_EMERGENCY_RESTART_H */ |
diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h index 249e753ac805..a7404d50686b 100644 --- a/include/asm-x86/fixmap_32.h +++ b/include/asm-x86/fixmap_32.h | |||
@@ -65,7 +65,7 @@ enum fixed_addresses { | |||
65 | #endif | 65 | #endif |
66 | #ifdef CONFIG_X86_VISWS_APIC | 66 | #ifdef CONFIG_X86_VISWS_APIC |
67 | FIX_CO_CPU, /* Cobalt timer */ | 67 | FIX_CO_CPU, /* Cobalt timer */ |
68 | FIX_CO_APIC, /* Cobalt APIC Redirection Table */ | 68 | FIX_CO_APIC, /* Cobalt APIC Redirection Table */ |
69 | FIX_LI_PCIA, /* Lithium PCI Bridge A */ | 69 | FIX_LI_PCIA, /* Lithium PCI Bridge A */ |
70 | FIX_LI_PCIB, /* Lithium PCI Bridge B */ | 70 | FIX_LI_PCIB, /* Lithium PCI Bridge B */ |
71 | #endif | 71 | #endif |
@@ -74,7 +74,7 @@ enum fixed_addresses { | |||
74 | #endif | 74 | #endif |
75 | #ifdef CONFIG_X86_CYCLONE_TIMER | 75 | #ifdef CONFIG_X86_CYCLONE_TIMER |
76 | FIX_CYCLONE_TIMER, /*cyclone timer register*/ | 76 | FIX_CYCLONE_TIMER, /*cyclone timer register*/ |
77 | #endif | 77 | #endif |
78 | #ifdef CONFIG_HIGHMEM | 78 | #ifdef CONFIG_HIGHMEM |
79 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ | 79 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ |
80 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, | 80 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, |
@@ -90,11 +90,23 @@ enum fixed_addresses { | |||
90 | FIX_PARAVIRT_BOOTMAP, | 90 | FIX_PARAVIRT_BOOTMAP, |
91 | #endif | 91 | #endif |
92 | __end_of_permanent_fixed_addresses, | 92 | __end_of_permanent_fixed_addresses, |
93 | /* temporary boot-time mappings, used before ioremap() is functional */ | 93 | /* |
94 | #define NR_FIX_BTMAPS 16 | 94 | * 256 temporary boot-time mappings, used by early_ioremap(), |
95 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses, | 95 | * before ioremap() is functional. |
96 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1, | 96 | * |
97 | * We round it up to the next 512 pages boundary so that we | ||
98 | * can have a single pgd entry and a single pte table: | ||
99 | */ | ||
100 | #define NR_FIX_BTMAPS 64 | ||
101 | #define FIX_BTMAPS_NESTING 4 | ||
102 | FIX_BTMAP_END = | ||
103 | __end_of_permanent_fixed_addresses + 512 - | ||
104 | (__end_of_permanent_fixed_addresses & 511), | ||
105 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1, | ||
97 | FIX_WP_TEST, | 106 | FIX_WP_TEST, |
107 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | ||
108 | FIX_OHCI1394_BASE, | ||
109 | #endif | ||
98 | __end_of_fixed_addresses | 110 | __end_of_fixed_addresses |
99 | }; | 111 | }; |
100 | 112 | ||
diff --git a/include/asm-x86/fixmap_64.h b/include/asm-x86/fixmap_64.h index cdfbe4a6ae6f..70ddb21e6458 100644 --- a/include/asm-x86/fixmap_64.h +++ b/include/asm-x86/fixmap_64.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/apicdef.h> | 15 | #include <asm/apicdef.h> |
16 | #include <asm/page.h> | 16 | #include <asm/page.h> |
17 | #include <asm/vsyscall.h> | 17 | #include <asm/vsyscall.h> |
18 | #include <asm/efi.h> | ||
18 | 19 | ||
19 | /* | 20 | /* |
20 | * Here we define all the compile-time 'special' virtual | 21 | * Here we define all the compile-time 'special' virtual |
@@ -41,6 +42,11 @@ enum fixed_addresses { | |||
41 | FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ | 42 | FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ |
42 | FIX_IO_APIC_BASE_0, | 43 | FIX_IO_APIC_BASE_0, |
43 | FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, | 44 | FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, |
45 | FIX_EFI_IO_MAP_LAST_PAGE, | ||
46 | FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE+MAX_EFI_IO_PAGES-1, | ||
47 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | ||
48 | FIX_OHCI1394_BASE, | ||
49 | #endif | ||
44 | __end_of_fixed_addresses | 50 | __end_of_fixed_addresses |
45 | }; | 51 | }; |
46 | 52 | ||
diff --git a/include/asm-x86/fpu32.h b/include/asm-x86/fpu32.h deleted file mode 100644 index 4153db5c0c31..000000000000 --- a/include/asm-x86/fpu32.h +++ /dev/null | |||
@@ -1,10 +0,0 @@ | |||
1 | #ifndef _FPU32_H | ||
2 | #define _FPU32_H 1 | ||
3 | |||
4 | struct _fpstate_ia32; | ||
5 | |||
6 | int restore_i387_ia32(struct task_struct *tsk, struct _fpstate_ia32 __user *buf, int fsave); | ||
7 | int save_i387_ia32(struct task_struct *tsk, struct _fpstate_ia32 __user *buf, | ||
8 | struct pt_regs *regs, int fsave); | ||
9 | |||
10 | #endif | ||
diff --git a/include/asm-x86/futex.h b/include/asm-x86/futex.h index 1f4610e0c613..cd9f894dd2d7 100644 --- a/include/asm-x86/futex.h +++ b/include/asm-x86/futex.h | |||
@@ -1,5 +1,124 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifndef _ASM_X86_FUTEX_H |
2 | # include "futex_32.h" | 2 | #define _ASM_X86_FUTEX_H |
3 | #else | 3 | |
4 | # include "futex_64.h" | 4 | #ifdef __KERNEL__ |
5 | |||
6 | #include <linux/futex.h> | ||
7 | |||
8 | #include <asm/asm.h> | ||
9 | #include <asm/errno.h> | ||
10 | #include <asm/processor.h> | ||
11 | #include <asm/system.h> | ||
12 | #include <asm/uaccess.h> | ||
13 | |||
14 | #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ | ||
15 | __asm__ __volatile( \ | ||
16 | "1: " insn "\n" \ | ||
17 | "2: .section .fixup,\"ax\"\n \ | ||
18 | 3: mov %3, %1\n \ | ||
19 | jmp 2b\n \ | ||
20 | .previous\n" \ | ||
21 | _ASM_EXTABLE(1b,3b) \ | ||
22 | : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ | ||
23 | : "i" (-EFAULT), "0" (oparg), "1" (0)) | ||
24 | |||
25 | #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ | ||
26 | __asm__ __volatile( \ | ||
27 | "1: movl %2, %0\n \ | ||
28 | movl %0, %3\n" \ | ||
29 | insn "\n" \ | ||
30 | "2: lock; cmpxchgl %3, %2\n \ | ||
31 | jnz 1b\n \ | ||
32 | 3: .section .fixup,\"ax\"\n \ | ||
33 | 4: mov %5, %1\n \ | ||
34 | jmp 3b\n \ | ||
35 | .previous\n" \ | ||
36 | _ASM_EXTABLE(1b,4b) \ | ||
37 | _ASM_EXTABLE(2b,4b) \ | ||
38 | : "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \ | ||
39 | "=&r" (tem) \ | ||
40 | : "r" (oparg), "i" (-EFAULT), "1" (0)) | ||
41 | |||
42 | static inline int | ||
43 | futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | ||
44 | { | ||
45 | int op = (encoded_op >> 28) & 7; | ||
46 | int cmp = (encoded_op >> 24) & 15; | ||
47 | int oparg = (encoded_op << 8) >> 20; | ||
48 | int cmparg = (encoded_op << 20) >> 20; | ||
49 | int oldval = 0, ret, tem; | ||
50 | |||
51 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | ||
52 | oparg = 1 << oparg; | ||
53 | |||
54 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | ||
55 | return -EFAULT; | ||
56 | |||
57 | #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) | ||
58 | /* Real i386 machines can only support FUTEX_OP_SET */ | ||
59 | if (op != FUTEX_OP_SET && boot_cpu_data.x86 == 3) | ||
60 | return -ENOSYS; | ||
61 | #endif | ||
62 | |||
63 | pagefault_disable(); | ||
64 | |||
65 | switch (op) { | ||
66 | case FUTEX_OP_SET: | ||
67 | __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); | ||
68 | break; | ||
69 | case FUTEX_OP_ADD: | ||
70 | __futex_atomic_op1("lock; xaddl %0, %2", ret, oldval, | ||
71 | uaddr, oparg); | ||
72 | break; | ||
73 | case FUTEX_OP_OR: | ||
74 | __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg); | ||
75 | break; | ||
76 | case FUTEX_OP_ANDN: | ||
77 | __futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, ~oparg); | ||
78 | break; | ||
79 | case FUTEX_OP_XOR: | ||
80 | __futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, oparg); | ||
81 | break; | ||
82 | default: | ||
83 | ret = -ENOSYS; | ||
84 | } | ||
85 | |||
86 | pagefault_enable(); | ||
87 | |||
88 | if (!ret) { | ||
89 | switch (cmp) { | ||
90 | case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; | ||
91 | case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; | ||
92 | case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; | ||
93 | case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; | ||
94 | case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; | ||
95 | case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; | ||
96 | default: ret = -ENOSYS; | ||
97 | } | ||
98 | } | ||
99 | return ret; | ||
100 | } | ||
101 | |||
102 | static inline int | ||
103 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | ||
104 | { | ||
105 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | ||
106 | return -EFAULT; | ||
107 | |||
108 | __asm__ __volatile__( | ||
109 | "1: lock; cmpxchgl %3, %1 \n" | ||
110 | "2: .section .fixup, \"ax\" \n" | ||
111 | "3: mov %2, %0 \n" | ||
112 | " jmp 2b \n" | ||
113 | " .previous \n" | ||
114 | _ASM_EXTABLE(1b,3b) | ||
115 | : "=a" (oldval), "+m" (*uaddr) | ||
116 | : "i" (-EFAULT), "r" (newval), "0" (oldval) | ||
117 | : "memory" | ||
118 | ); | ||
119 | |||
120 | return oldval; | ||
121 | } | ||
122 | |||
123 | #endif | ||
5 | #endif | 124 | #endif |
diff --git a/include/asm-x86/futex_32.h b/include/asm-x86/futex_32.h deleted file mode 100644 index 438ef0ec7101..000000000000 --- a/include/asm-x86/futex_32.h +++ /dev/null | |||
@@ -1,135 +0,0 @@ | |||
1 | #ifndef _ASM_FUTEX_H | ||
2 | #define _ASM_FUTEX_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #include <linux/futex.h> | ||
7 | #include <asm/errno.h> | ||
8 | #include <asm/system.h> | ||
9 | #include <asm/processor.h> | ||
10 | #include <asm/uaccess.h> | ||
11 | |||
12 | #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ | ||
13 | __asm__ __volatile ( \ | ||
14 | "1: " insn "\n" \ | ||
15 | "2: .section .fixup,\"ax\"\n\ | ||
16 | 3: mov %3, %1\n\ | ||
17 | jmp 2b\n\ | ||
18 | .previous\n\ | ||
19 | .section __ex_table,\"a\"\n\ | ||
20 | .align 8\n\ | ||
21 | .long 1b,3b\n\ | ||
22 | .previous" \ | ||
23 | : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ | ||
24 | : "i" (-EFAULT), "0" (oparg), "1" (0)) | ||
25 | |||
26 | #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ | ||
27 | __asm__ __volatile ( \ | ||
28 | "1: movl %2, %0\n\ | ||
29 | movl %0, %3\n" \ | ||
30 | insn "\n" \ | ||
31 | "2: " LOCK_PREFIX "cmpxchgl %3, %2\n\ | ||
32 | jnz 1b\n\ | ||
33 | 3: .section .fixup,\"ax\"\n\ | ||
34 | 4: mov %5, %1\n\ | ||
35 | jmp 3b\n\ | ||
36 | .previous\n\ | ||
37 | .section __ex_table,\"a\"\n\ | ||
38 | .align 8\n\ | ||
39 | .long 1b,4b,2b,4b\n\ | ||
40 | .previous" \ | ||
41 | : "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \ | ||
42 | "=&r" (tem) \ | ||
43 | : "r" (oparg), "i" (-EFAULT), "1" (0)) | ||
44 | |||
45 | static inline int | ||
46 | futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | ||
47 | { | ||
48 | int op = (encoded_op >> 28) & 7; | ||
49 | int cmp = (encoded_op >> 24) & 15; | ||
50 | int oparg = (encoded_op << 8) >> 20; | ||
51 | int cmparg = (encoded_op << 20) >> 20; | ||
52 | int oldval = 0, ret, tem; | ||
53 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | ||
54 | oparg = 1 << oparg; | ||
55 | |||
56 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | ||
57 | return -EFAULT; | ||
58 | |||
59 | pagefault_disable(); | ||
60 | |||
61 | if (op == FUTEX_OP_SET) | ||
62 | __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); | ||
63 | else { | ||
64 | #ifndef CONFIG_X86_BSWAP | ||
65 | if (boot_cpu_data.x86 == 3) | ||
66 | ret = -ENOSYS; | ||
67 | else | ||
68 | #endif | ||
69 | switch (op) { | ||
70 | case FUTEX_OP_ADD: | ||
71 | __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, | ||
72 | oldval, uaddr, oparg); | ||
73 | break; | ||
74 | case FUTEX_OP_OR: | ||
75 | __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, | ||
76 | oparg); | ||
77 | break; | ||
78 | case FUTEX_OP_ANDN: | ||
79 | __futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, | ||
80 | ~oparg); | ||
81 | break; | ||
82 | case FUTEX_OP_XOR: | ||
83 | __futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, | ||
84 | oparg); | ||
85 | break; | ||
86 | default: | ||
87 | ret = -ENOSYS; | ||
88 | } | ||
89 | } | ||
90 | |||
91 | pagefault_enable(); | ||
92 | |||
93 | if (!ret) { | ||
94 | switch (cmp) { | ||
95 | case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; | ||
96 | case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; | ||
97 | case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; | ||
98 | case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; | ||
99 | case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; | ||
100 | case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; | ||
101 | default: ret = -ENOSYS; | ||
102 | } | ||
103 | } | ||
104 | return ret; | ||
105 | } | ||
106 | |||
107 | static inline int | ||
108 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | ||
109 | { | ||
110 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | ||
111 | return -EFAULT; | ||
112 | |||
113 | __asm__ __volatile__( | ||
114 | "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n" | ||
115 | |||
116 | "2: .section .fixup, \"ax\" \n" | ||
117 | "3: mov %2, %0 \n" | ||
118 | " jmp 2b \n" | ||
119 | " .previous \n" | ||
120 | |||
121 | " .section __ex_table, \"a\" \n" | ||
122 | " .align 8 \n" | ||
123 | " .long 1b,3b \n" | ||
124 | " .previous \n" | ||
125 | |||
126 | : "=a" (oldval), "+m" (*uaddr) | ||
127 | : "i" (-EFAULT), "r" (newval), "0" (oldval) | ||
128 | : "memory" | ||
129 | ); | ||
130 | |||
131 | return oldval; | ||
132 | } | ||
133 | |||
134 | #endif | ||
135 | #endif | ||
diff --git a/include/asm-x86/futex_64.h b/include/asm-x86/futex_64.h deleted file mode 100644 index 5cdfb08013c3..000000000000 --- a/include/asm-x86/futex_64.h +++ /dev/null | |||
@@ -1,125 +0,0 @@ | |||
1 | #ifndef _ASM_FUTEX_H | ||
2 | #define _ASM_FUTEX_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #include <linux/futex.h> | ||
7 | #include <asm/errno.h> | ||
8 | #include <asm/system.h> | ||
9 | #include <asm/uaccess.h> | ||
10 | |||
11 | #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ | ||
12 | __asm__ __volatile ( \ | ||
13 | "1: " insn "\n" \ | ||
14 | "2: .section .fixup,\"ax\"\n\ | ||
15 | 3: mov %3, %1\n\ | ||
16 | jmp 2b\n\ | ||
17 | .previous\n\ | ||
18 | .section __ex_table,\"a\"\n\ | ||
19 | .align 8\n\ | ||
20 | .quad 1b,3b\n\ | ||
21 | .previous" \ | ||
22 | : "=r" (oldval), "=r" (ret), "=m" (*uaddr) \ | ||
23 | : "i" (-EFAULT), "m" (*uaddr), "0" (oparg), "1" (0)) | ||
24 | |||
25 | #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ | ||
26 | __asm__ __volatile ( \ | ||
27 | "1: movl %2, %0\n\ | ||
28 | movl %0, %3\n" \ | ||
29 | insn "\n" \ | ||
30 | "2: " LOCK_PREFIX "cmpxchgl %3, %2\n\ | ||
31 | jnz 1b\n\ | ||
32 | 3: .section .fixup,\"ax\"\n\ | ||
33 | 4: mov %5, %1\n\ | ||
34 | jmp 3b\n\ | ||
35 | .previous\n\ | ||
36 | .section __ex_table,\"a\"\n\ | ||
37 | .align 8\n\ | ||
38 | .quad 1b,4b,2b,4b\n\ | ||
39 | .previous" \ | ||
40 | : "=&a" (oldval), "=&r" (ret), "=m" (*uaddr), \ | ||
41 | "=&r" (tem) \ | ||
42 | : "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0)) | ||
43 | |||
44 | static inline int | ||
45 | futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | ||
46 | { | ||
47 | int op = (encoded_op >> 28) & 7; | ||
48 | int cmp = (encoded_op >> 24) & 15; | ||
49 | int oparg = (encoded_op << 8) >> 20; | ||
50 | int cmparg = (encoded_op << 20) >> 20; | ||
51 | int oldval = 0, ret, tem; | ||
52 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | ||
53 | oparg = 1 << oparg; | ||
54 | |||
55 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | ||
56 | return -EFAULT; | ||
57 | |||
58 | pagefault_disable(); | ||
59 | |||
60 | switch (op) { | ||
61 | case FUTEX_OP_SET: | ||
62 | __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); | ||
63 | break; | ||
64 | case FUTEX_OP_ADD: | ||
65 | __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval, | ||
66 | uaddr, oparg); | ||
67 | break; | ||
68 | case FUTEX_OP_OR: | ||
69 | __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg); | ||
70 | break; | ||
71 | case FUTEX_OP_ANDN: | ||
72 | __futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, ~oparg); | ||
73 | break; | ||
74 | case FUTEX_OP_XOR: | ||
75 | __futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, oparg); | ||
76 | break; | ||
77 | default: | ||
78 | ret = -ENOSYS; | ||
79 | } | ||
80 | |||
81 | pagefault_enable(); | ||
82 | |||
83 | if (!ret) { | ||
84 | switch (cmp) { | ||
85 | case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; | ||
86 | case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; | ||
87 | case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; | ||
88 | case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; | ||
89 | case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; | ||
90 | case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; | ||
91 | default: ret = -ENOSYS; | ||
92 | } | ||
93 | } | ||
94 | return ret; | ||
95 | } | ||
96 | |||
97 | static inline int | ||
98 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | ||
99 | { | ||
100 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | ||
101 | return -EFAULT; | ||
102 | |||
103 | __asm__ __volatile__( | ||
104 | "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n" | ||
105 | |||
106 | "2: .section .fixup, \"ax\" \n" | ||
107 | "3: mov %2, %0 \n" | ||
108 | " jmp 2b \n" | ||
109 | " .previous \n" | ||
110 | |||
111 | " .section __ex_table, \"a\" \n" | ||
112 | " .align 8 \n" | ||
113 | " .quad 1b,3b \n" | ||
114 | " .previous \n" | ||
115 | |||
116 | : "=a" (oldval), "=m" (*uaddr) | ||
117 | : "i" (-EFAULT), "r" (newval), "0" (oldval) | ||
118 | : "memory" | ||
119 | ); | ||
120 | |||
121 | return oldval; | ||
122 | } | ||
123 | |||
124 | #endif | ||
125 | #endif | ||
diff --git a/include/asm-x86/gart.h b/include/asm-x86/gart.h index f704c50519b8..90958ed993fa 100644 --- a/include/asm-x86/gart.h +++ b/include/asm-x86/gart.h | |||
@@ -9,6 +9,7 @@ extern int iommu_detected; | |||
9 | extern void gart_iommu_init(void); | 9 | extern void gart_iommu_init(void); |
10 | extern void gart_iommu_shutdown(void); | 10 | extern void gart_iommu_shutdown(void); |
11 | extern void __init gart_parse_options(char *); | 11 | extern void __init gart_parse_options(char *); |
12 | extern void early_gart_iommu_check(void); | ||
12 | extern void gart_iommu_hole_init(void); | 13 | extern void gart_iommu_hole_init(void); |
13 | extern int fallback_aper_order; | 14 | extern int fallback_aper_order; |
14 | extern int fallback_aper_force; | 15 | extern int fallback_aper_force; |
@@ -20,6 +21,10 @@ extern int fix_aperture; | |||
20 | #define gart_iommu_aperture 0 | 21 | #define gart_iommu_aperture 0 |
21 | #define gart_iommu_aperture_allowed 0 | 22 | #define gart_iommu_aperture_allowed 0 |
22 | 23 | ||
24 | static inline void early_gart_iommu_check(void) | ||
25 | { | ||
26 | } | ||
27 | |||
23 | static inline void gart_iommu_shutdown(void) | 28 | static inline void gart_iommu_shutdown(void) |
24 | { | 29 | { |
25 | } | 30 | } |
diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h index 771af336734f..811fe14f70b2 100644 --- a/include/asm-x86/geode.h +++ b/include/asm-x86/geode.h | |||
@@ -121,9 +121,15 @@ extern int geode_get_dev_base(unsigned int dev); | |||
121 | #define GPIO_MAP_Z 0xE8 | 121 | #define GPIO_MAP_Z 0xE8 |
122 | #define GPIO_MAP_W 0xEC | 122 | #define GPIO_MAP_W 0xEC |
123 | 123 | ||
124 | extern void geode_gpio_set(unsigned int, unsigned int); | 124 | static inline u32 geode_gpio(unsigned int nr) |
125 | extern void geode_gpio_clear(unsigned int, unsigned int); | 125 | { |
126 | extern int geode_gpio_isset(unsigned int, unsigned int); | 126 | BUG_ON(nr > 28); |
127 | return 1 << nr; | ||
128 | } | ||
129 | |||
130 | extern void geode_gpio_set(u32, unsigned int); | ||
131 | extern void geode_gpio_clear(u32, unsigned int); | ||
132 | extern int geode_gpio_isset(u32, unsigned int); | ||
127 | extern void geode_gpio_setup_event(unsigned int, int, int); | 133 | extern void geode_gpio_setup_event(unsigned int, int, int); |
128 | extern void geode_gpio_set_irq(unsigned int, unsigned int); | 134 | extern void geode_gpio_set_irq(unsigned int, unsigned int); |
129 | 135 | ||
diff --git a/include/asm-x86/gpio.h b/include/asm-x86/gpio.h new file mode 100644 index 000000000000..ff87fca0caf9 --- /dev/null +++ b/include/asm-x86/gpio.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _ASM_I386_GPIO_H | ||
2 | #define _ASM_I386_GPIO_H | ||
3 | |||
4 | #include <gpio.h> | ||
5 | |||
6 | #endif /* _ASM_I386_GPIO_H */ | ||
diff --git a/include/asm-x86/highmem.h b/include/asm-x86/highmem.h index 13cdcd66fff2..479767c9195f 100644 --- a/include/asm-x86/highmem.h +++ b/include/asm-x86/highmem.h | |||
@@ -38,11 +38,6 @@ extern pte_t *pkmap_page_table; | |||
38 | * easily, subsequent pte tables have to be allocated in one physical | 38 | * easily, subsequent pte tables have to be allocated in one physical |
39 | * chunk of RAM. | 39 | * chunk of RAM. |
40 | */ | 40 | */ |
41 | #ifdef CONFIG_X86_PAE | ||
42 | #define LAST_PKMAP 512 | ||
43 | #else | ||
44 | #define LAST_PKMAP 1024 | ||
45 | #endif | ||
46 | /* | 41 | /* |
47 | * Ordering is: | 42 | * Ordering is: |
48 | * | 43 | * |
@@ -58,13 +53,12 @@ extern pte_t *pkmap_page_table; | |||
58 | * VMALLOC_START | 53 | * VMALLOC_START |
59 | * high_memory | 54 | * high_memory |
60 | */ | 55 | */ |
61 | #define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK ) | ||
62 | #define LAST_PKMAP_MASK (LAST_PKMAP-1) | 56 | #define LAST_PKMAP_MASK (LAST_PKMAP-1) |
63 | #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) | 57 | #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) |
64 | #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) | 58 | #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) |
65 | 59 | ||
66 | extern void * FASTCALL(kmap_high(struct page *page)); | 60 | extern void *kmap_high(struct page *page); |
67 | extern void FASTCALL(kunmap_high(struct page *page)); | 61 | extern void kunmap_high(struct page *page); |
68 | 62 | ||
69 | void *kmap(struct page *page); | 63 | void *kmap(struct page *page); |
70 | void kunmap(struct page *page); | 64 | void kunmap(struct page *page); |
diff --git a/include/asm-x86/hpet.h b/include/asm-x86/hpet.h index ad8d6e758785..6a9b4ac59bf7 100644 --- a/include/asm-x86/hpet.h +++ b/include/asm-x86/hpet.h | |||
@@ -69,6 +69,7 @@ extern void force_hpet_resume(void); | |||
69 | 69 | ||
70 | #include <linux/interrupt.h> | 70 | #include <linux/interrupt.h> |
71 | 71 | ||
72 | typedef irqreturn_t (*rtc_irq_handler)(int interrupt, void *cookie); | ||
72 | extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask); | 73 | extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask); |
73 | extern int hpet_set_rtc_irq_bit(unsigned long bit_mask); | 74 | extern int hpet_set_rtc_irq_bit(unsigned long bit_mask); |
74 | extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min, | 75 | extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min, |
@@ -77,13 +78,16 @@ extern int hpet_set_periodic_freq(unsigned long freq); | |||
77 | extern int hpet_rtc_dropped_irq(void); | 78 | extern int hpet_rtc_dropped_irq(void); |
78 | extern int hpet_rtc_timer_init(void); | 79 | extern int hpet_rtc_timer_init(void); |
79 | extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id); | 80 | extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id); |
81 | extern int hpet_register_irq_handler(rtc_irq_handler handler); | ||
82 | extern void hpet_unregister_irq_handler(rtc_irq_handler handler); | ||
80 | 83 | ||
81 | #endif /* CONFIG_HPET_EMULATE_RTC */ | 84 | #endif /* CONFIG_HPET_EMULATE_RTC */ |
82 | 85 | ||
83 | #else | 86 | #else /* CONFIG_HPET_TIMER */ |
84 | 87 | ||
85 | static inline int hpet_enable(void) { return 0; } | 88 | static inline int hpet_enable(void) { return 0; } |
86 | static inline unsigned long hpet_readl(unsigned long a) { return 0; } | 89 | static inline unsigned long hpet_readl(unsigned long a) { return 0; } |
90 | static inline int is_hpet_enabled(void) { return 0; } | ||
87 | 91 | ||
88 | #endif /* CONFIG_HPET_TIMER */ | 92 | #endif |
89 | #endif /* ASM_X86_HPET_H */ | 93 | #endif /* ASM_X86_HPET_H */ |
diff --git a/include/asm-x86/hw_irq_32.h b/include/asm-x86/hw_irq_32.h index 0bedbdf5e907..ea88054e03f3 100644 --- a/include/asm-x86/hw_irq_32.h +++ b/include/asm-x86/hw_irq_32.h | |||
@@ -26,19 +26,19 @@ | |||
26 | * Interrupt entry/exit code at both C and assembly level | 26 | * Interrupt entry/exit code at both C and assembly level |
27 | */ | 27 | */ |
28 | 28 | ||
29 | extern void (*interrupt[NR_IRQS])(void); | 29 | extern void (*const interrupt[NR_IRQS])(void); |
30 | 30 | ||
31 | #ifdef CONFIG_SMP | 31 | #ifdef CONFIG_SMP |
32 | fastcall void reschedule_interrupt(void); | 32 | void reschedule_interrupt(void); |
33 | fastcall void invalidate_interrupt(void); | 33 | void invalidate_interrupt(void); |
34 | fastcall void call_function_interrupt(void); | 34 | void call_function_interrupt(void); |
35 | #endif | 35 | #endif |
36 | 36 | ||
37 | #ifdef CONFIG_X86_LOCAL_APIC | 37 | #ifdef CONFIG_X86_LOCAL_APIC |
38 | fastcall void apic_timer_interrupt(void); | 38 | void apic_timer_interrupt(void); |
39 | fastcall void error_interrupt(void); | 39 | void error_interrupt(void); |
40 | fastcall void spurious_interrupt(void); | 40 | void spurious_interrupt(void); |
41 | fastcall void thermal_interrupt(void); | 41 | void thermal_interrupt(void); |
42 | #define platform_legacy_irq(irq) ((irq) < 16) | 42 | #define platform_legacy_irq(irq) ((irq) < 16) |
43 | #endif | 43 | #endif |
44 | 44 | ||
@@ -47,7 +47,7 @@ void enable_8259A_irq(unsigned int irq); | |||
47 | int i8259A_irq_pending(unsigned int irq); | 47 | int i8259A_irq_pending(unsigned int irq); |
48 | void make_8259A_irq(unsigned int irq); | 48 | void make_8259A_irq(unsigned int irq); |
49 | void init_8259A(int aeoi); | 49 | void init_8259A(int aeoi); |
50 | void FASTCALL(send_IPI_self(int vector)); | 50 | void send_IPI_self(int vector); |
51 | void init_VISWS_APIC_irqs(void); | 51 | void init_VISWS_APIC_irqs(void); |
52 | void setup_IO_APIC(void); | 52 | void setup_IO_APIC(void); |
53 | void disable_IO_APIC(void); | 53 | void disable_IO_APIC(void); |
diff --git a/include/asm-x86/hw_irq_64.h b/include/asm-x86/hw_irq_64.h index a470d59da678..312a58d6dac6 100644 --- a/include/asm-x86/hw_irq_64.h +++ b/include/asm-x86/hw_irq_64.h | |||
@@ -135,11 +135,13 @@ extern void init_8259A(int aeoi); | |||
135 | extern void send_IPI_self(int vector); | 135 | extern void send_IPI_self(int vector); |
136 | extern void init_VISWS_APIC_irqs(void); | 136 | extern void init_VISWS_APIC_irqs(void); |
137 | extern void setup_IO_APIC(void); | 137 | extern void setup_IO_APIC(void); |
138 | extern void enable_IO_APIC(void); | ||
138 | extern void disable_IO_APIC(void); | 139 | extern void disable_IO_APIC(void); |
139 | extern void print_IO_APIC(void); | 140 | extern void print_IO_APIC(void); |
140 | extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); | 141 | extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); |
141 | extern void send_IPI(int dest, int vector); | 142 | extern void send_IPI(int dest, int vector); |
142 | extern void setup_ioapic_dest(void); | 143 | extern void setup_ioapic_dest(void); |
144 | extern void native_init_IRQ(void); | ||
143 | 145 | ||
144 | extern unsigned long io_apic_irqs; | 146 | extern unsigned long io_apic_irqs; |
145 | 147 | ||
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h index a8bbed349664..6b1895ccd6b7 100644 --- a/include/asm-x86/i387.h +++ b/include/asm-x86/i387.h | |||
@@ -1,5 +1,352 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | /* |
2 | # include "i387_32.h" | 2 | * Copyright (C) 1994 Linus Torvalds |
3 | * | ||
4 | * Pentium III FXSR, SSE support | ||
5 | * General FPU state handling cleanups | ||
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | ||
7 | * x86-64 work by Andi Kleen 2002 | ||
8 | */ | ||
9 | |||
10 | #ifndef _ASM_X86_I387_H | ||
11 | #define _ASM_X86_I387_H | ||
12 | |||
13 | #include <linux/sched.h> | ||
14 | #include <linux/kernel_stat.h> | ||
15 | #include <linux/regset.h> | ||
16 | #include <asm/asm.h> | ||
17 | #include <asm/processor.h> | ||
18 | #include <asm/sigcontext.h> | ||
19 | #include <asm/user.h> | ||
20 | #include <asm/uaccess.h> | ||
21 | |||
22 | extern void fpu_init(void); | ||
23 | extern unsigned int mxcsr_feature_mask; | ||
24 | extern void mxcsr_feature_mask_init(void); | ||
25 | extern void init_fpu(struct task_struct *child); | ||
26 | extern asmlinkage void math_state_restore(void); | ||
27 | |||
28 | extern user_regset_active_fn fpregs_active, xfpregs_active; | ||
29 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get; | ||
30 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set; | ||
31 | |||
32 | #ifdef CONFIG_IA32_EMULATION | ||
33 | struct _fpstate_ia32; | ||
34 | extern int save_i387_ia32(struct _fpstate_ia32 __user *buf); | ||
35 | extern int restore_i387_ia32(struct _fpstate_ia32 __user *buf); | ||
36 | #endif | ||
37 | |||
38 | #ifdef CONFIG_X86_64 | ||
39 | |||
40 | /* Ignore delayed exceptions from user space */ | ||
41 | static inline void tolerant_fwait(void) | ||
42 | { | ||
43 | asm volatile("1: fwait\n" | ||
44 | "2:\n" | ||
45 | _ASM_EXTABLE(1b,2b)); | ||
46 | } | ||
47 | |||
48 | static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) | ||
49 | { | ||
50 | int err; | ||
51 | |||
52 | asm volatile("1: rex64/fxrstor (%[fx])\n\t" | ||
53 | "2:\n" | ||
54 | ".section .fixup,\"ax\"\n" | ||
55 | "3: movl $-1,%[err]\n" | ||
56 | " jmp 2b\n" | ||
57 | ".previous\n" | ||
58 | _ASM_EXTABLE(1b,3b) | ||
59 | : [err] "=r" (err) | ||
60 | #if 0 /* See comment in __save_init_fpu() below. */ | ||
61 | : [fx] "r" (fx), "m" (*fx), "0" (0)); | ||
62 | #else | ||
63 | : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0)); | ||
64 | #endif | ||
65 | if (unlikely(err)) | ||
66 | init_fpu(current); | ||
67 | return err; | ||
68 | } | ||
69 | |||
70 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ | ||
71 | |||
72 | /* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception | ||
73 | is pending. Clear the x87 state here by setting it to fixed | ||
74 | values. The kernel data segment can be sometimes 0 and sometimes | ||
75 | new user value. Both should be ok. | ||
76 | Use the PDA as safe address because it should be already in L1. */ | ||
77 | static inline void clear_fpu_state(struct i387_fxsave_struct *fx) | ||
78 | { | ||
79 | if (unlikely(fx->swd & X87_FSW_ES)) | ||
80 | asm volatile("fnclex"); | ||
81 | alternative_input(ASM_NOP8 ASM_NOP2, | ||
82 | " emms\n" /* clear stack tags */ | ||
83 | " fildl %%gs:0", /* load to clear state */ | ||
84 | X86_FEATURE_FXSAVE_LEAK); | ||
85 | } | ||
86 | |||
87 | static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) | ||
88 | { | ||
89 | int err; | ||
90 | |||
91 | asm volatile("1: rex64/fxsave (%[fx])\n\t" | ||
92 | "2:\n" | ||
93 | ".section .fixup,\"ax\"\n" | ||
94 | "3: movl $-1,%[err]\n" | ||
95 | " jmp 2b\n" | ||
96 | ".previous\n" | ||
97 | _ASM_EXTABLE(1b,3b) | ||
98 | : [err] "=r" (err), "=m" (*fx) | ||
99 | #if 0 /* See comment in __fxsave_clear() below. */ | ||
100 | : [fx] "r" (fx), "0" (0)); | ||
101 | #else | ||
102 | : [fx] "cdaSDb" (fx), "0" (0)); | ||
103 | #endif | ||
104 | if (unlikely(err) && __clear_user(fx, sizeof(struct i387_fxsave_struct))) | ||
105 | err = -EFAULT; | ||
106 | /* No need to clear here because the caller clears USED_MATH */ | ||
107 | return err; | ||
108 | } | ||
109 | |||
110 | static inline void __save_init_fpu(struct task_struct *tsk) | ||
111 | { | ||
112 | /* Using "rex64; fxsave %0" is broken because, if the memory operand | ||
113 | uses any extended registers for addressing, a second REX prefix | ||
114 | will be generated (to the assembler, rex64 followed by semicolon | ||
115 | is a separate instruction), and hence the 64-bitness is lost. */ | ||
116 | #if 0 | ||
117 | /* Using "fxsaveq %0" would be the ideal choice, but is only supported | ||
118 | starting with gas 2.16. */ | ||
119 | __asm__ __volatile__("fxsaveq %0" | ||
120 | : "=m" (tsk->thread.i387.fxsave)); | ||
121 | #elif 0 | ||
122 | /* Using, as a workaround, the properly prefixed form below isn't | ||
123 | accepted by any binutils version so far released, complaining that | ||
124 | the same type of prefix is used twice if an extended register is | ||
125 | needed for addressing (fix submitted to mainline 2005-11-21). */ | ||
126 | __asm__ __volatile__("rex64/fxsave %0" | ||
127 | : "=m" (tsk->thread.i387.fxsave)); | ||
128 | #else | ||
129 | /* This, however, we can work around by forcing the compiler to select | ||
130 | an addressing mode that doesn't require extended registers. */ | ||
131 | __asm__ __volatile__("rex64/fxsave %P2(%1)" | ||
132 | : "=m" (tsk->thread.i387.fxsave) | ||
133 | : "cdaSDb" (tsk), | ||
134 | "i" (offsetof(__typeof__(*tsk), | ||
135 | thread.i387.fxsave))); | ||
136 | #endif | ||
137 | clear_fpu_state(&tsk->thread.i387.fxsave); | ||
138 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * Signal frame handlers. | ||
143 | */ | ||
144 | |||
145 | static inline int save_i387(struct _fpstate __user *buf) | ||
146 | { | ||
147 | struct task_struct *tsk = current; | ||
148 | int err = 0; | ||
149 | |||
150 | BUILD_BUG_ON(sizeof(struct user_i387_struct) != | ||
151 | sizeof(tsk->thread.i387.fxsave)); | ||
152 | |||
153 | if ((unsigned long)buf % 16) | ||
154 | printk("save_i387: bad fpstate %p\n", buf); | ||
155 | |||
156 | if (!used_math()) | ||
157 | return 0; | ||
158 | clear_used_math(); /* trigger finit */ | ||
159 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | ||
160 | err = save_i387_checking((struct i387_fxsave_struct __user *)buf); | ||
161 | if (err) return err; | ||
162 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
163 | stts(); | ||
164 | } else { | ||
165 | if (__copy_to_user(buf, &tsk->thread.i387.fxsave, | ||
166 | sizeof(struct i387_fxsave_struct))) | ||
167 | return -1; | ||
168 | } | ||
169 | return 1; | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * This restores directly out of user space. Exceptions are handled. | ||
174 | */ | ||
175 | static inline int restore_i387(struct _fpstate __user *buf) | ||
176 | { | ||
177 | set_used_math(); | ||
178 | if (!(task_thread_info(current)->status & TS_USEDFPU)) { | ||
179 | clts(); | ||
180 | task_thread_info(current)->status |= TS_USEDFPU; | ||
181 | } | ||
182 | return restore_fpu_checking((__force struct i387_fxsave_struct *)buf); | ||
183 | } | ||
184 | |||
185 | #else /* CONFIG_X86_32 */ | ||
186 | |||
187 | static inline void tolerant_fwait(void) | ||
188 | { | ||
189 | asm volatile("fnclex ; fwait"); | ||
190 | } | ||
191 | |||
192 | static inline void restore_fpu(struct task_struct *tsk) | ||
193 | { | ||
194 | /* | ||
195 | * The "nop" is needed to make the instructions the same | ||
196 | * length. | ||
197 | */ | ||
198 | alternative_input( | ||
199 | "nop ; frstor %1", | ||
200 | "fxrstor %1", | ||
201 | X86_FEATURE_FXSR, | ||
202 | "m" ((tsk)->thread.i387.fxsave)); | ||
203 | } | ||
204 | |||
205 | /* We need a safe address that is cheap to find and that is already | ||
206 | in L1 during context switch. The best choices are unfortunately | ||
207 | different for UP and SMP */ | ||
208 | #ifdef CONFIG_SMP | ||
209 | #define safe_address (__per_cpu_offset[0]) | ||
3 | #else | 210 | #else |
4 | # include "i387_64.h" | 211 | #define safe_address (kstat_cpu(0).cpustat.user) |
5 | #endif | 212 | #endif |
213 | |||
214 | /* | ||
215 | * These must be called with preempt disabled | ||
216 | */ | ||
217 | static inline void __save_init_fpu(struct task_struct *tsk) | ||
218 | { | ||
219 | /* Use more nops than strictly needed in case the compiler | ||
220 | varies code */ | ||
221 | alternative_input( | ||
222 | "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4, | ||
223 | "fxsave %[fx]\n" | ||
224 | "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:", | ||
225 | X86_FEATURE_FXSR, | ||
226 | [fx] "m" (tsk->thread.i387.fxsave), | ||
227 | [fsw] "m" (tsk->thread.i387.fxsave.swd) : "memory"); | ||
228 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | ||
229 | is pending. Clear the x87 state here by setting it to fixed | ||
230 | values. safe_address is a random variable that should be in L1 */ | ||
231 | alternative_input( | ||
232 | GENERIC_NOP8 GENERIC_NOP2, | ||
233 | "emms\n\t" /* clear stack tags */ | ||
234 | "fildl %[addr]", /* set F?P to defined value */ | ||
235 | X86_FEATURE_FXSAVE_LEAK, | ||
236 | [addr] "m" (safe_address)); | ||
237 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
238 | } | ||
239 | |||
240 | /* | ||
241 | * Signal frame handlers... | ||
242 | */ | ||
243 | extern int save_i387(struct _fpstate __user *buf); | ||
244 | extern int restore_i387(struct _fpstate __user *buf); | ||
245 | |||
246 | #endif /* CONFIG_X86_64 */ | ||
247 | |||
248 | static inline void __unlazy_fpu(struct task_struct *tsk) | ||
249 | { | ||
250 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | ||
251 | __save_init_fpu(tsk); | ||
252 | stts(); | ||
253 | } else | ||
254 | tsk->fpu_counter = 0; | ||
255 | } | ||
256 | |||
257 | static inline void __clear_fpu(struct task_struct *tsk) | ||
258 | { | ||
259 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | ||
260 | tolerant_fwait(); | ||
261 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
262 | stts(); | ||
263 | } | ||
264 | } | ||
265 | |||
266 | static inline void kernel_fpu_begin(void) | ||
267 | { | ||
268 | struct thread_info *me = current_thread_info(); | ||
269 | preempt_disable(); | ||
270 | if (me->status & TS_USEDFPU) | ||
271 | __save_init_fpu(me->task); | ||
272 | else | ||
273 | clts(); | ||
274 | } | ||
275 | |||
276 | static inline void kernel_fpu_end(void) | ||
277 | { | ||
278 | stts(); | ||
279 | preempt_enable(); | ||
280 | } | ||
281 | |||
282 | #ifdef CONFIG_X86_64 | ||
283 | |||
284 | static inline void save_init_fpu(struct task_struct *tsk) | ||
285 | { | ||
286 | __save_init_fpu(tsk); | ||
287 | stts(); | ||
288 | } | ||
289 | |||
290 | #define unlazy_fpu __unlazy_fpu | ||
291 | #define clear_fpu __clear_fpu | ||
292 | |||
293 | #else /* CONFIG_X86_32 */ | ||
294 | |||
295 | /* | ||
296 | * These disable preemption on their own and are safe | ||
297 | */ | ||
298 | static inline void save_init_fpu(struct task_struct *tsk) | ||
299 | { | ||
300 | preempt_disable(); | ||
301 | __save_init_fpu(tsk); | ||
302 | stts(); | ||
303 | preempt_enable(); | ||
304 | } | ||
305 | |||
306 | static inline void unlazy_fpu(struct task_struct *tsk) | ||
307 | { | ||
308 | preempt_disable(); | ||
309 | __unlazy_fpu(tsk); | ||
310 | preempt_enable(); | ||
311 | } | ||
312 | |||
313 | static inline void clear_fpu(struct task_struct *tsk) | ||
314 | { | ||
315 | preempt_disable(); | ||
316 | __clear_fpu(tsk); | ||
317 | preempt_enable(); | ||
318 | } | ||
319 | |||
320 | #endif /* CONFIG_X86_64 */ | ||
321 | |||
322 | /* | ||
323 | * i387 state interaction | ||
324 | */ | ||
325 | static inline unsigned short get_fpu_cwd(struct task_struct *tsk) | ||
326 | { | ||
327 | if (cpu_has_fxsr) { | ||
328 | return tsk->thread.i387.fxsave.cwd; | ||
329 | } else { | ||
330 | return (unsigned short)tsk->thread.i387.fsave.cwd; | ||
331 | } | ||
332 | } | ||
333 | |||
334 | static inline unsigned short get_fpu_swd(struct task_struct *tsk) | ||
335 | { | ||
336 | if (cpu_has_fxsr) { | ||
337 | return tsk->thread.i387.fxsave.swd; | ||
338 | } else { | ||
339 | return (unsigned short)tsk->thread.i387.fsave.swd; | ||
340 | } | ||
341 | } | ||
342 | |||
343 | static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) | ||
344 | { | ||
345 | if (cpu_has_xmm) { | ||
346 | return tsk->thread.i387.fxsave.mxcsr; | ||
347 | } else { | ||
348 | return MXCSR_DEFAULT; | ||
349 | } | ||
350 | } | ||
351 | |||
352 | #endif /* _ASM_X86_I387_H */ | ||
diff --git a/include/asm-x86/i387_32.h b/include/asm-x86/i387_32.h deleted file mode 100644 index cdd1e248e3b4..000000000000 --- a/include/asm-x86/i387_32.h +++ /dev/null | |||
@@ -1,151 +0,0 @@ | |||
1 | /* | ||
2 | * include/asm-i386/i387.h | ||
3 | * | ||
4 | * Copyright (C) 1994 Linus Torvalds | ||
5 | * | ||
6 | * Pentium III FXSR, SSE support | ||
7 | * General FPU state handling cleanups | ||
8 | * Gareth Hughes <gareth@valinux.com>, May 2000 | ||
9 | */ | ||
10 | |||
11 | #ifndef __ASM_I386_I387_H | ||
12 | #define __ASM_I386_I387_H | ||
13 | |||
14 | #include <linux/sched.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/kernel_stat.h> | ||
17 | #include <asm/processor.h> | ||
18 | #include <asm/sigcontext.h> | ||
19 | #include <asm/user.h> | ||
20 | |||
21 | extern void mxcsr_feature_mask_init(void); | ||
22 | extern void init_fpu(struct task_struct *); | ||
23 | |||
24 | /* | ||
25 | * FPU lazy state save handling... | ||
26 | */ | ||
27 | |||
28 | /* | ||
29 | * The "nop" is needed to make the instructions the same | ||
30 | * length. | ||
31 | */ | ||
32 | #define restore_fpu(tsk) \ | ||
33 | alternative_input( \ | ||
34 | "nop ; frstor %1", \ | ||
35 | "fxrstor %1", \ | ||
36 | X86_FEATURE_FXSR, \ | ||
37 | "m" ((tsk)->thread.i387.fxsave)) | ||
38 | |||
39 | extern void kernel_fpu_begin(void); | ||
40 | #define kernel_fpu_end() do { stts(); preempt_enable(); } while(0) | ||
41 | |||
42 | /* We need a safe address that is cheap to find and that is already | ||
43 | in L1 during context switch. The best choices are unfortunately | ||
44 | different for UP and SMP */ | ||
45 | #ifdef CONFIG_SMP | ||
46 | #define safe_address (__per_cpu_offset[0]) | ||
47 | #else | ||
48 | #define safe_address (kstat_cpu(0).cpustat.user) | ||
49 | #endif | ||
50 | |||
51 | /* | ||
52 | * These must be called with preempt disabled | ||
53 | */ | ||
54 | static inline void __save_init_fpu( struct task_struct *tsk ) | ||
55 | { | ||
56 | /* Use more nops than strictly needed in case the compiler | ||
57 | varies code */ | ||
58 | alternative_input( | ||
59 | "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4, | ||
60 | "fxsave %[fx]\n" | ||
61 | "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:", | ||
62 | X86_FEATURE_FXSR, | ||
63 | [fx] "m" (tsk->thread.i387.fxsave), | ||
64 | [fsw] "m" (tsk->thread.i387.fxsave.swd) : "memory"); | ||
65 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | ||
66 | is pending. Clear the x87 state here by setting it to fixed | ||
67 | values. safe_address is a random variable that should be in L1 */ | ||
68 | alternative_input( | ||
69 | GENERIC_NOP8 GENERIC_NOP2, | ||
70 | "emms\n\t" /* clear stack tags */ | ||
71 | "fildl %[addr]", /* set F?P to defined value */ | ||
72 | X86_FEATURE_FXSAVE_LEAK, | ||
73 | [addr] "m" (safe_address)); | ||
74 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
75 | } | ||
76 | |||
77 | #define __unlazy_fpu( tsk ) do { \ | ||
78 | if (task_thread_info(tsk)->status & TS_USEDFPU) { \ | ||
79 | __save_init_fpu(tsk); \ | ||
80 | stts(); \ | ||
81 | } else \ | ||
82 | tsk->fpu_counter = 0; \ | ||
83 | } while (0) | ||
84 | |||
85 | #define __clear_fpu( tsk ) \ | ||
86 | do { \ | ||
87 | if (task_thread_info(tsk)->status & TS_USEDFPU) { \ | ||
88 | asm volatile("fnclex ; fwait"); \ | ||
89 | task_thread_info(tsk)->status &= ~TS_USEDFPU; \ | ||
90 | stts(); \ | ||
91 | } \ | ||
92 | } while (0) | ||
93 | |||
94 | |||
95 | /* | ||
96 | * These disable preemption on their own and are safe | ||
97 | */ | ||
98 | static inline void save_init_fpu( struct task_struct *tsk ) | ||
99 | { | ||
100 | preempt_disable(); | ||
101 | __save_init_fpu(tsk); | ||
102 | stts(); | ||
103 | preempt_enable(); | ||
104 | } | ||
105 | |||
106 | #define unlazy_fpu( tsk ) do { \ | ||
107 | preempt_disable(); \ | ||
108 | __unlazy_fpu(tsk); \ | ||
109 | preempt_enable(); \ | ||
110 | } while (0) | ||
111 | |||
112 | #define clear_fpu( tsk ) do { \ | ||
113 | preempt_disable(); \ | ||
114 | __clear_fpu( tsk ); \ | ||
115 | preempt_enable(); \ | ||
116 | } while (0) | ||
117 | |||
118 | /* | ||
119 | * FPU state interaction... | ||
120 | */ | ||
121 | extern unsigned short get_fpu_cwd( struct task_struct *tsk ); | ||
122 | extern unsigned short get_fpu_swd( struct task_struct *tsk ); | ||
123 | extern unsigned short get_fpu_mxcsr( struct task_struct *tsk ); | ||
124 | extern asmlinkage void math_state_restore(void); | ||
125 | |||
126 | /* | ||
127 | * Signal frame handlers... | ||
128 | */ | ||
129 | extern int save_i387( struct _fpstate __user *buf ); | ||
130 | extern int restore_i387( struct _fpstate __user *buf ); | ||
131 | |||
132 | /* | ||
133 | * ptrace request handers... | ||
134 | */ | ||
135 | extern int get_fpregs( struct user_i387_struct __user *buf, | ||
136 | struct task_struct *tsk ); | ||
137 | extern int set_fpregs( struct task_struct *tsk, | ||
138 | struct user_i387_struct __user *buf ); | ||
139 | |||
140 | extern int get_fpxregs( struct user_fxsr_struct __user *buf, | ||
141 | struct task_struct *tsk ); | ||
142 | extern int set_fpxregs( struct task_struct *tsk, | ||
143 | struct user_fxsr_struct __user *buf ); | ||
144 | |||
145 | /* | ||
146 | * FPU state for core dumps... | ||
147 | */ | ||
148 | extern int dump_fpu( struct pt_regs *regs, | ||
149 | struct user_i387_struct *fpu ); | ||
150 | |||
151 | #endif /* __ASM_I386_I387_H */ | ||
diff --git a/include/asm-x86/i387_64.h b/include/asm-x86/i387_64.h deleted file mode 100644 index 3a4ffba3d6bc..000000000000 --- a/include/asm-x86/i387_64.h +++ /dev/null | |||
@@ -1,214 +0,0 @@ | |||
1 | /* | ||
2 | * include/asm-x86_64/i387.h | ||
3 | * | ||
4 | * Copyright (C) 1994 Linus Torvalds | ||
5 | * | ||
6 | * Pentium III FXSR, SSE support | ||
7 | * General FPU state handling cleanups | ||
8 | * Gareth Hughes <gareth@valinux.com>, May 2000 | ||
9 | * x86-64 work by Andi Kleen 2002 | ||
10 | */ | ||
11 | |||
12 | #ifndef __ASM_X86_64_I387_H | ||
13 | #define __ASM_X86_64_I387_H | ||
14 | |||
15 | #include <linux/sched.h> | ||
16 | #include <asm/processor.h> | ||
17 | #include <asm/sigcontext.h> | ||
18 | #include <asm/user.h> | ||
19 | #include <asm/thread_info.h> | ||
20 | #include <asm/uaccess.h> | ||
21 | |||
22 | extern void fpu_init(void); | ||
23 | extern unsigned int mxcsr_feature_mask; | ||
24 | extern void mxcsr_feature_mask_init(void); | ||
25 | extern void init_fpu(struct task_struct *child); | ||
26 | extern int save_i387(struct _fpstate __user *buf); | ||
27 | extern asmlinkage void math_state_restore(void); | ||
28 | |||
29 | /* | ||
30 | * FPU lazy state save handling... | ||
31 | */ | ||
32 | |||
33 | #define unlazy_fpu(tsk) do { \ | ||
34 | if (task_thread_info(tsk)->status & TS_USEDFPU) \ | ||
35 | save_init_fpu(tsk); \ | ||
36 | else \ | ||
37 | tsk->fpu_counter = 0; \ | ||
38 | } while (0) | ||
39 | |||
40 | /* Ignore delayed exceptions from user space */ | ||
41 | static inline void tolerant_fwait(void) | ||
42 | { | ||
43 | asm volatile("1: fwait\n" | ||
44 | "2:\n" | ||
45 | " .section __ex_table,\"a\"\n" | ||
46 | " .align 8\n" | ||
47 | " .quad 1b,2b\n" | ||
48 | " .previous\n"); | ||
49 | } | ||
50 | |||
51 | #define clear_fpu(tsk) do { \ | ||
52 | if (task_thread_info(tsk)->status & TS_USEDFPU) { \ | ||
53 | tolerant_fwait(); \ | ||
54 | task_thread_info(tsk)->status &= ~TS_USEDFPU; \ | ||
55 | stts(); \ | ||
56 | } \ | ||
57 | } while (0) | ||
58 | |||
59 | /* | ||
60 | * ptrace request handers... | ||
61 | */ | ||
62 | extern int get_fpregs(struct user_i387_struct __user *buf, | ||
63 | struct task_struct *tsk); | ||
64 | extern int set_fpregs(struct task_struct *tsk, | ||
65 | struct user_i387_struct __user *buf); | ||
66 | |||
67 | /* | ||
68 | * i387 state interaction | ||
69 | */ | ||
70 | #define get_fpu_mxcsr(t) ((t)->thread.i387.fxsave.mxcsr) | ||
71 | #define get_fpu_cwd(t) ((t)->thread.i387.fxsave.cwd) | ||
72 | #define get_fpu_fxsr_twd(t) ((t)->thread.i387.fxsave.twd) | ||
73 | #define get_fpu_swd(t) ((t)->thread.i387.fxsave.swd) | ||
74 | #define set_fpu_cwd(t,val) ((t)->thread.i387.fxsave.cwd = (val)) | ||
75 | #define set_fpu_swd(t,val) ((t)->thread.i387.fxsave.swd = (val)) | ||
76 | #define set_fpu_fxsr_twd(t,val) ((t)->thread.i387.fxsave.twd = (val)) | ||
77 | |||
78 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ | ||
79 | |||
80 | /* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception | ||
81 | is pending. Clear the x87 state here by setting it to fixed | ||
82 | values. The kernel data segment can be sometimes 0 and sometimes | ||
83 | new user value. Both should be ok. | ||
84 | Use the PDA as safe address because it should be already in L1. */ | ||
85 | static inline void clear_fpu_state(struct i387_fxsave_struct *fx) | ||
86 | { | ||
87 | if (unlikely(fx->swd & X87_FSW_ES)) | ||
88 | asm volatile("fnclex"); | ||
89 | alternative_input(ASM_NOP8 ASM_NOP2, | ||
90 | " emms\n" /* clear stack tags */ | ||
91 | " fildl %%gs:0", /* load to clear state */ | ||
92 | X86_FEATURE_FXSAVE_LEAK); | ||
93 | } | ||
94 | |||
95 | static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) | ||
96 | { | ||
97 | int err; | ||
98 | |||
99 | asm volatile("1: rex64/fxrstor (%[fx])\n\t" | ||
100 | "2:\n" | ||
101 | ".section .fixup,\"ax\"\n" | ||
102 | "3: movl $-1,%[err]\n" | ||
103 | " jmp 2b\n" | ||
104 | ".previous\n" | ||
105 | ".section __ex_table,\"a\"\n" | ||
106 | " .align 8\n" | ||
107 | " .quad 1b,3b\n" | ||
108 | ".previous" | ||
109 | : [err] "=r" (err) | ||
110 | #if 0 /* See comment in __fxsave_clear() below. */ | ||
111 | : [fx] "r" (fx), "m" (*fx), "0" (0)); | ||
112 | #else | ||
113 | : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0)); | ||
114 | #endif | ||
115 | if (unlikely(err)) | ||
116 | init_fpu(current); | ||
117 | return err; | ||
118 | } | ||
119 | |||
120 | static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) | ||
121 | { | ||
122 | int err; | ||
123 | |||
124 | asm volatile("1: rex64/fxsave (%[fx])\n\t" | ||
125 | "2:\n" | ||
126 | ".section .fixup,\"ax\"\n" | ||
127 | "3: movl $-1,%[err]\n" | ||
128 | " jmp 2b\n" | ||
129 | ".previous\n" | ||
130 | ".section __ex_table,\"a\"\n" | ||
131 | " .align 8\n" | ||
132 | " .quad 1b,3b\n" | ||
133 | ".previous" | ||
134 | : [err] "=r" (err), "=m" (*fx) | ||
135 | #if 0 /* See comment in __fxsave_clear() below. */ | ||
136 | : [fx] "r" (fx), "0" (0)); | ||
137 | #else | ||
138 | : [fx] "cdaSDb" (fx), "0" (0)); | ||
139 | #endif | ||
140 | if (unlikely(err) && __clear_user(fx, sizeof(struct i387_fxsave_struct))) | ||
141 | err = -EFAULT; | ||
142 | /* No need to clear here because the caller clears USED_MATH */ | ||
143 | return err; | ||
144 | } | ||
145 | |||
146 | static inline void __fxsave_clear(struct task_struct *tsk) | ||
147 | { | ||
148 | /* Using "rex64; fxsave %0" is broken because, if the memory operand | ||
149 | uses any extended registers for addressing, a second REX prefix | ||
150 | will be generated (to the assembler, rex64 followed by semicolon | ||
151 | is a separate instruction), and hence the 64-bitness is lost. */ | ||
152 | #if 0 | ||
153 | /* Using "fxsaveq %0" would be the ideal choice, but is only supported | ||
154 | starting with gas 2.16. */ | ||
155 | __asm__ __volatile__("fxsaveq %0" | ||
156 | : "=m" (tsk->thread.i387.fxsave)); | ||
157 | #elif 0 | ||
158 | /* Using, as a workaround, the properly prefixed form below isn't | ||
159 | accepted by any binutils version so far released, complaining that | ||
160 | the same type of prefix is used twice if an extended register is | ||
161 | needed for addressing (fix submitted to mainline 2005-11-21). */ | ||
162 | __asm__ __volatile__("rex64/fxsave %0" | ||
163 | : "=m" (tsk->thread.i387.fxsave)); | ||
164 | #else | ||
165 | /* This, however, we can work around by forcing the compiler to select | ||
166 | an addressing mode that doesn't require extended registers. */ | ||
167 | __asm__ __volatile__("rex64/fxsave %P2(%1)" | ||
168 | : "=m" (tsk->thread.i387.fxsave) | ||
169 | : "cdaSDb" (tsk), | ||
170 | "i" (offsetof(__typeof__(*tsk), | ||
171 | thread.i387.fxsave))); | ||
172 | #endif | ||
173 | clear_fpu_state(&tsk->thread.i387.fxsave); | ||
174 | } | ||
175 | |||
176 | static inline void kernel_fpu_begin(void) | ||
177 | { | ||
178 | struct thread_info *me = current_thread_info(); | ||
179 | preempt_disable(); | ||
180 | if (me->status & TS_USEDFPU) { | ||
181 | __fxsave_clear(me->task); | ||
182 | me->status &= ~TS_USEDFPU; | ||
183 | return; | ||
184 | } | ||
185 | clts(); | ||
186 | } | ||
187 | |||
188 | static inline void kernel_fpu_end(void) | ||
189 | { | ||
190 | stts(); | ||
191 | preempt_enable(); | ||
192 | } | ||
193 | |||
194 | static inline void save_init_fpu(struct task_struct *tsk) | ||
195 | { | ||
196 | __fxsave_clear(tsk); | ||
197 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
198 | stts(); | ||
199 | } | ||
200 | |||
201 | /* | ||
202 | * This restores directly out of user space. Exceptions are handled. | ||
203 | */ | ||
204 | static inline int restore_i387(struct _fpstate __user *buf) | ||
205 | { | ||
206 | set_used_math(); | ||
207 | if (!(task_thread_info(current)->status & TS_USEDFPU)) { | ||
208 | clts(); | ||
209 | task_thread_info(current)->status |= TS_USEDFPU; | ||
210 | } | ||
211 | return restore_fpu_checking((__force struct i387_fxsave_struct *)buf); | ||
212 | } | ||
213 | |||
214 | #endif /* __ASM_X86_64_I387_H */ | ||
diff --git a/include/asm-x86/i8253.h b/include/asm-x86/i8253.h index 747548ec5d1d..b51c0487fc41 100644 --- a/include/asm-x86/i8253.h +++ b/include/asm-x86/i8253.h | |||
@@ -12,4 +12,7 @@ extern struct clock_event_device *global_clock_event; | |||
12 | 12 | ||
13 | extern void setup_pit_timer(void); | 13 | extern void setup_pit_timer(void); |
14 | 14 | ||
15 | #define inb_pit inb_p | ||
16 | #define outb_pit outb_p | ||
17 | |||
15 | #endif /* __ASM_I8253_H__ */ | 18 | #endif /* __ASM_I8253_H__ */ |
diff --git a/include/asm-x86/i8259.h b/include/asm-x86/i8259.h index 29d8f9a6b3fc..67c319e0efc7 100644 --- a/include/asm-x86/i8259.h +++ b/include/asm-x86/i8259.h | |||
@@ -3,10 +3,25 @@ | |||
3 | 3 | ||
4 | extern unsigned int cached_irq_mask; | 4 | extern unsigned int cached_irq_mask; |
5 | 5 | ||
6 | #define __byte(x,y) (((unsigned char *) &(y))[x]) | 6 | #define __byte(x,y) (((unsigned char *) &(y))[x]) |
7 | #define cached_master_mask (__byte(0, cached_irq_mask)) | 7 | #define cached_master_mask (__byte(0, cached_irq_mask)) |
8 | #define cached_slave_mask (__byte(1, cached_irq_mask)) | 8 | #define cached_slave_mask (__byte(1, cached_irq_mask)) |
9 | 9 | ||
10 | /* i8259A PIC registers */ | ||
11 | #define PIC_MASTER_CMD 0x20 | ||
12 | #define PIC_MASTER_IMR 0x21 | ||
13 | #define PIC_MASTER_ISR PIC_MASTER_CMD | ||
14 | #define PIC_MASTER_POLL PIC_MASTER_ISR | ||
15 | #define PIC_MASTER_OCW3 PIC_MASTER_ISR | ||
16 | #define PIC_SLAVE_CMD 0xa0 | ||
17 | #define PIC_SLAVE_IMR 0xa1 | ||
18 | |||
19 | /* i8259A PIC related value */ | ||
20 | #define PIC_CASCADE_IR 2 | ||
21 | #define MASTER_ICW4_DEFAULT 0x01 | ||
22 | #define SLAVE_ICW4_DEFAULT 0x01 | ||
23 | #define PIC_ICW4_AEOI 2 | ||
24 | |||
10 | extern spinlock_t i8259A_lock; | 25 | extern spinlock_t i8259A_lock; |
11 | 26 | ||
12 | extern void init_8259A(int auto_eoi); | 27 | extern void init_8259A(int auto_eoi); |
@@ -14,4 +29,7 @@ extern void enable_8259A_irq(unsigned int irq); | |||
14 | extern void disable_8259A_irq(unsigned int irq); | 29 | extern void disable_8259A_irq(unsigned int irq); |
15 | extern unsigned int startup_8259A_irq(unsigned int irq); | 30 | extern unsigned int startup_8259A_irq(unsigned int irq); |
16 | 31 | ||
32 | #define inb_pic inb_p | ||
33 | #define outb_pic outb_p | ||
34 | |||
17 | #endif /* __ASM_I8259_H__ */ | 35 | #endif /* __ASM_I8259_H__ */ |
diff --git a/include/asm-x86/ia32.h b/include/asm-x86/ia32.h index 0190b7c4e319..aa9733206e29 100644 --- a/include/asm-x86/ia32.h +++ b/include/asm-x86/ia32.h | |||
@@ -159,12 +159,6 @@ struct ustat32 { | |||
159 | #define IA32_STACK_TOP IA32_PAGE_OFFSET | 159 | #define IA32_STACK_TOP IA32_PAGE_OFFSET |
160 | 160 | ||
161 | #ifdef __KERNEL__ | 161 | #ifdef __KERNEL__ |
162 | struct user_desc; | ||
163 | struct siginfo_t; | ||
164 | int do_get_thread_area(struct thread_struct *t, struct user_desc __user *info); | ||
165 | int do_set_thread_area(struct thread_struct *t, struct user_desc __user *info); | ||
166 | int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs); | ||
167 | |||
168 | struct linux_binprm; | 162 | struct linux_binprm; |
169 | extern int ia32_setup_arg_pages(struct linux_binprm *bprm, | 163 | extern int ia32_setup_arg_pages(struct linux_binprm *bprm, |
170 | unsigned long stack_top, int exec_stack); | 164 | unsigned long stack_top, int exec_stack); |
diff --git a/include/asm-x86/ia32_unistd.h b/include/asm-x86/ia32_unistd.h index 5b52ce507338..61cea9e7c5c1 100644 --- a/include/asm-x86/ia32_unistd.h +++ b/include/asm-x86/ia32_unistd.h | |||
@@ -5,7 +5,7 @@ | |||
5 | * This file contains the system call numbers of the ia32 port, | 5 | * This file contains the system call numbers of the ia32 port, |
6 | * this is for the kernel only. | 6 | * this is for the kernel only. |
7 | * Only add syscalls here where some part of the kernel needs to know | 7 | * Only add syscalls here where some part of the kernel needs to know |
8 | * the number. This should be otherwise in sync with asm-i386/unistd.h. -AK | 8 | * the number. This should be otherwise in sync with asm-x86/unistd_32.h. -AK |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define __NR_ia32_restart_syscall 0 | 11 | #define __NR_ia32_restart_syscall 0 |
diff --git a/include/asm-x86/ide.h b/include/asm-x86/ide.h index 42130adf9c7c..c2552d8bebf7 100644 --- a/include/asm-x86/ide.h +++ b/include/asm-x86/ide.h | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * linux/include/asm-i386/ide.h | ||
3 | * | ||
4 | * Copyright (C) 1994-1996 Linus Torvalds & authors | 2 | * Copyright (C) 1994-1996 Linus Torvalds & authors |
5 | */ | 3 | */ |
6 | 4 | ||
diff --git a/include/asm-x86/idle.h b/include/asm-x86/idle.h index 6bd47dcf2067..d240e5b30a45 100644 --- a/include/asm-x86/idle.h +++ b/include/asm-x86/idle.h | |||
@@ -6,7 +6,6 @@ | |||
6 | 6 | ||
7 | struct notifier_block; | 7 | struct notifier_block; |
8 | void idle_notifier_register(struct notifier_block *n); | 8 | void idle_notifier_register(struct notifier_block *n); |
9 | void idle_notifier_unregister(struct notifier_block *n); | ||
10 | 9 | ||
11 | void enter_idle(void); | 10 | void enter_idle(void); |
12 | void exit_idle(void); | 11 | void exit_idle(void); |
diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h index fe881cd1e6f4..58d2c45cd0b1 100644 --- a/include/asm-x86/io_32.h +++ b/include/asm-x86/io_32.h | |||
@@ -100,8 +100,6 @@ static inline void * phys_to_virt(unsigned long address) | |||
100 | */ | 100 | */ |
101 | #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) | 101 | #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) |
102 | 102 | ||
103 | extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags); | ||
104 | |||
105 | /** | 103 | /** |
106 | * ioremap - map bus memory into CPU space | 104 | * ioremap - map bus memory into CPU space |
107 | * @offset: bus address of the memory | 105 | * @offset: bus address of the memory |
@@ -111,32 +109,39 @@ extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsign | |||
111 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | 109 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
112 | * writew/writel functions and the other mmio helpers. The returned | 110 | * writew/writel functions and the other mmio helpers. The returned |
113 | * address is not guaranteed to be usable directly as a virtual | 111 | * address is not guaranteed to be usable directly as a virtual |
114 | * address. | 112 | * address. |
115 | * | 113 | * |
116 | * If the area you are trying to map is a PCI BAR you should have a | 114 | * If the area you are trying to map is a PCI BAR you should have a |
117 | * look at pci_iomap(). | 115 | * look at pci_iomap(). |
118 | */ | 116 | */ |
117 | extern void __iomem *ioremap_nocache(unsigned long offset, unsigned long size); | ||
118 | extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size); | ||
119 | 119 | ||
120 | static inline void __iomem * ioremap(unsigned long offset, unsigned long size) | 120 | /* |
121 | * The default ioremap() behavior is non-cached: | ||
122 | */ | ||
123 | static inline void __iomem *ioremap(unsigned long offset, unsigned long size) | ||
121 | { | 124 | { |
122 | return __ioremap(offset, size, 0); | 125 | return ioremap_nocache(offset, size); |
123 | } | 126 | } |
124 | 127 | ||
125 | extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size); | ||
126 | extern void iounmap(volatile void __iomem *addr); | 128 | extern void iounmap(volatile void __iomem *addr); |
127 | 129 | ||
128 | /* | 130 | /* |
129 | * bt_ioremap() and bt_iounmap() are for temporary early boot-time | 131 | * early_ioremap() and early_iounmap() are for temporary early boot-time |
130 | * mappings, before the real ioremap() is functional. | 132 | * mappings, before the real ioremap() is functional. |
131 | * A boot-time mapping is currently limited to at most 16 pages. | 133 | * A boot-time mapping is currently limited to at most 16 pages. |
132 | */ | 134 | */ |
133 | extern void *bt_ioremap(unsigned long offset, unsigned long size); | 135 | extern void early_ioremap_init(void); |
134 | extern void bt_iounmap(void *addr, unsigned long size); | 136 | extern void early_ioremap_clear(void); |
137 | extern void early_ioremap_reset(void); | ||
138 | extern void *early_ioremap(unsigned long offset, unsigned long size); | ||
139 | extern void early_iounmap(void *addr, unsigned long size); | ||
135 | extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); | 140 | extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); |
136 | 141 | ||
137 | /* Use early IO mappings for DMI because it's initialized early */ | 142 | /* Use early IO mappings for DMI because it's initialized early */ |
138 | #define dmi_ioremap bt_ioremap | 143 | #define dmi_ioremap early_ioremap |
139 | #define dmi_iounmap bt_iounmap | 144 | #define dmi_iounmap early_iounmap |
140 | #define dmi_alloc alloc_bootmem | 145 | #define dmi_alloc alloc_bootmem |
141 | 146 | ||
142 | /* | 147 | /* |
@@ -250,10 +255,10 @@ static inline void flush_write_buffers(void) | |||
250 | 255 | ||
251 | #endif /* __KERNEL__ */ | 256 | #endif /* __KERNEL__ */ |
252 | 257 | ||
253 | static inline void native_io_delay(void) | 258 | extern void native_io_delay(void); |
254 | { | 259 | |
255 | asm volatile("outb %%al,$0x80" : : : "memory"); | 260 | extern int io_delay_type; |
256 | } | 261 | extern void io_delay_init(void); |
257 | 262 | ||
258 | #if defined(CONFIG_PARAVIRT) | 263 | #if defined(CONFIG_PARAVIRT) |
259 | #include <asm/paravirt.h> | 264 | #include <asm/paravirt.h> |
@@ -270,29 +275,6 @@ static inline void slow_down_io(void) { | |||
270 | 275 | ||
271 | #endif | 276 | #endif |
272 | 277 | ||
273 | #ifdef CONFIG_X86_NUMAQ | ||
274 | extern void *xquad_portio; /* Where the IO area was mapped */ | ||
275 | #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) | ||
276 | #define __BUILDIO(bwl,bw,type) \ | ||
277 | static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \ | ||
278 | if (xquad_portio) \ | ||
279 | write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \ | ||
280 | else \ | ||
281 | out##bwl##_local(value, port); \ | ||
282 | } \ | ||
283 | static inline void out##bwl(unsigned type value, int port) { \ | ||
284 | out##bwl##_quad(value, port, 0); \ | ||
285 | } \ | ||
286 | static inline unsigned type in##bwl##_quad(int port, int quad) { \ | ||
287 | if (xquad_portio) \ | ||
288 | return read##bwl(XQUAD_PORT_ADDR(port, quad)); \ | ||
289 | else \ | ||
290 | return in##bwl##_local(port); \ | ||
291 | } \ | ||
292 | static inline unsigned type in##bwl(int port) { \ | ||
293 | return in##bwl##_quad(port, 0); \ | ||
294 | } | ||
295 | #else | ||
296 | #define __BUILDIO(bwl,bw,type) \ | 278 | #define __BUILDIO(bwl,bw,type) \ |
297 | static inline void out##bwl(unsigned type value, int port) { \ | 279 | static inline void out##bwl(unsigned type value, int port) { \ |
298 | out##bwl##_local(value, port); \ | 280 | out##bwl##_local(value, port); \ |
@@ -300,8 +282,6 @@ static inline void out##bwl(unsigned type value, int port) { \ | |||
300 | static inline unsigned type in##bwl(int port) { \ | 282 | static inline unsigned type in##bwl(int port) { \ |
301 | return in##bwl##_local(port); \ | 283 | return in##bwl##_local(port); \ |
302 | } | 284 | } |
303 | #endif | ||
304 | |||
305 | 285 | ||
306 | #define BUILDIO(bwl,bw,type) \ | 286 | #define BUILDIO(bwl,bw,type) \ |
307 | static inline void out##bwl##_local(unsigned type value, int port) { \ | 287 | static inline void out##bwl##_local(unsigned type value, int port) { \ |
diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h index a037b0794332..f64a59cc396d 100644 --- a/include/asm-x86/io_64.h +++ b/include/asm-x86/io_64.h | |||
@@ -35,12 +35,24 @@ | |||
35 | * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> | 35 | * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> |
36 | */ | 36 | */ |
37 | 37 | ||
38 | #define __SLOW_DOWN_IO "\noutb %%al,$0x80" | 38 | extern void native_io_delay(void); |
39 | 39 | ||
40 | #ifdef REALLY_SLOW_IO | 40 | extern int io_delay_type; |
41 | #define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO | 41 | extern void io_delay_init(void); |
42 | |||
43 | #if defined(CONFIG_PARAVIRT) | ||
44 | #include <asm/paravirt.h> | ||
42 | #else | 45 | #else |
43 | #define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO | 46 | |
47 | static inline void slow_down_io(void) | ||
48 | { | ||
49 | native_io_delay(); | ||
50 | #ifdef REALLY_SLOW_IO | ||
51 | native_io_delay(); | ||
52 | native_io_delay(); | ||
53 | native_io_delay(); | ||
54 | #endif | ||
55 | } | ||
44 | #endif | 56 | #endif |
45 | 57 | ||
46 | /* | 58 | /* |
@@ -52,9 +64,15 @@ static inline void out##s(unsigned x value, unsigned short port) { | |||
52 | #define __OUT2(s,s1,s2) \ | 64 | #define __OUT2(s,s1,s2) \ |
53 | __asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" | 65 | __asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" |
54 | 66 | ||
67 | #ifndef REALLY_SLOW_IO | ||
68 | #define REALLY_SLOW_IO | ||
69 | #define UNSET_REALLY_SLOW_IO | ||
70 | #endif | ||
71 | |||
55 | #define __OUT(s,s1,x) \ | 72 | #define __OUT(s,s1,x) \ |
56 | __OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \ | 73 | __OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \ |
57 | __OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \ | 74 | __OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \ |
75 | slow_down_io(); } | ||
58 | 76 | ||
59 | #define __IN1(s) \ | 77 | #define __IN1(s) \ |
60 | static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; | 78 | static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; |
@@ -63,8 +81,13 @@ static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; | |||
63 | __asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" | 81 | __asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" |
64 | 82 | ||
65 | #define __IN(s,s1,i...) \ | 83 | #define __IN(s,s1,i...) \ |
66 | __IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ | 84 | __IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); return _v; } \ |
67 | __IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ | 85 | __IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \ |
86 | slow_down_io(); return _v; } | ||
87 | |||
88 | #ifdef UNSET_REALLY_SLOW_IO | ||
89 | #undef REALLY_SLOW_IO | ||
90 | #endif | ||
68 | 91 | ||
69 | #define __INS(s) \ | 92 | #define __INS(s) \ |
70 | static inline void ins##s(unsigned short port, void * addr, unsigned long count) \ | 93 | static inline void ins##s(unsigned short port, void * addr, unsigned long count) \ |
@@ -127,13 +150,6 @@ static inline void * phys_to_virt(unsigned long address) | |||
127 | 150 | ||
128 | #include <asm-generic/iomap.h> | 151 | #include <asm-generic/iomap.h> |
129 | 152 | ||
130 | extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags); | ||
131 | |||
132 | static inline void __iomem * ioremap (unsigned long offset, unsigned long size) | ||
133 | { | ||
134 | return __ioremap(offset, size, 0); | ||
135 | } | ||
136 | |||
137 | extern void *early_ioremap(unsigned long addr, unsigned long size); | 153 | extern void *early_ioremap(unsigned long addr, unsigned long size); |
138 | extern void early_iounmap(void *addr, unsigned long size); | 154 | extern void early_iounmap(void *addr, unsigned long size); |
139 | 155 | ||
@@ -142,8 +158,19 @@ extern void early_iounmap(void *addr, unsigned long size); | |||
142 | * it's useful if some control registers are in such an area and write combining | 158 | * it's useful if some control registers are in such an area and write combining |
143 | * or read caching is not desirable: | 159 | * or read caching is not desirable: |
144 | */ | 160 | */ |
145 | extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); | 161 | extern void __iomem *ioremap_nocache(unsigned long offset, unsigned long size); |
162 | extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size); | ||
163 | |||
164 | /* | ||
165 | * The default ioremap() behavior is non-cached: | ||
166 | */ | ||
167 | static inline void __iomem *ioremap(unsigned long offset, unsigned long size) | ||
168 | { | ||
169 | return ioremap_nocache(offset, size); | ||
170 | } | ||
171 | |||
146 | extern void iounmap(volatile void __iomem *addr); | 172 | extern void iounmap(volatile void __iomem *addr); |
173 | |||
147 | extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); | 174 | extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); |
148 | 175 | ||
149 | /* | 176 | /* |
diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h index 88494966beeb..0f5b3fef0b08 100644 --- a/include/asm-x86/io_apic.h +++ b/include/asm-x86/io_apic.h | |||
@@ -1,5 +1,159 @@ | |||
1 | #ifndef __ASM_IO_APIC_H | ||
2 | #define __ASM_IO_APIC_H | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | #include <asm/mpspec.h> | ||
6 | #include <asm/apicdef.h> | ||
7 | |||
8 | /* | ||
9 | * Intel IO-APIC support for SMP and UP systems. | ||
10 | * | ||
11 | * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar | ||
12 | */ | ||
13 | |||
14 | /* | ||
15 | * The structure of the IO-APIC: | ||
16 | */ | ||
17 | union IO_APIC_reg_00 { | ||
18 | u32 raw; | ||
19 | struct { | ||
20 | u32 __reserved_2 : 14, | ||
21 | LTS : 1, | ||
22 | delivery_type : 1, | ||
23 | __reserved_1 : 8, | ||
24 | ID : 8; | ||
25 | } __attribute__ ((packed)) bits; | ||
26 | }; | ||
27 | |||
28 | union IO_APIC_reg_01 { | ||
29 | u32 raw; | ||
30 | struct { | ||
31 | u32 version : 8, | ||
32 | __reserved_2 : 7, | ||
33 | PRQ : 1, | ||
34 | entries : 8, | ||
35 | __reserved_1 : 8; | ||
36 | } __attribute__ ((packed)) bits; | ||
37 | }; | ||
38 | |||
39 | union IO_APIC_reg_02 { | ||
40 | u32 raw; | ||
41 | struct { | ||
42 | u32 __reserved_2 : 24, | ||
43 | arbitration : 4, | ||
44 | __reserved_1 : 4; | ||
45 | } __attribute__ ((packed)) bits; | ||
46 | }; | ||
47 | |||
48 | union IO_APIC_reg_03 { | ||
49 | u32 raw; | ||
50 | struct { | ||
51 | u32 boot_DT : 1, | ||
52 | __reserved_1 : 31; | ||
53 | } __attribute__ ((packed)) bits; | ||
54 | }; | ||
55 | |||
56 | enum ioapic_irq_destination_types { | ||
57 | dest_Fixed = 0, | ||
58 | dest_LowestPrio = 1, | ||
59 | dest_SMI = 2, | ||
60 | dest__reserved_1 = 3, | ||
61 | dest_NMI = 4, | ||
62 | dest_INIT = 5, | ||
63 | dest__reserved_2 = 6, | ||
64 | dest_ExtINT = 7 | ||
65 | }; | ||
66 | |||
67 | struct IO_APIC_route_entry { | ||
68 | __u32 vector : 8, | ||
69 | delivery_mode : 3, /* 000: FIXED | ||
70 | * 001: lowest prio | ||
71 | * 111: ExtINT | ||
72 | */ | ||
73 | dest_mode : 1, /* 0: physical, 1: logical */ | ||
74 | delivery_status : 1, | ||
75 | polarity : 1, | ||
76 | irr : 1, | ||
77 | trigger : 1, /* 0: edge, 1: level */ | ||
78 | mask : 1, /* 0: enabled, 1: disabled */ | ||
79 | __reserved_2 : 15; | ||
80 | |||
1 | #ifdef CONFIG_X86_32 | 81 | #ifdef CONFIG_X86_32 |
2 | # include "io_apic_32.h" | 82 | union { |
83 | struct { | ||
84 | __u32 __reserved_1 : 24, | ||
85 | physical_dest : 4, | ||
86 | __reserved_2 : 4; | ||
87 | } physical; | ||
88 | |||
89 | struct { | ||
90 | __u32 __reserved_1 : 24, | ||
91 | logical_dest : 8; | ||
92 | } logical; | ||
93 | } dest; | ||
3 | #else | 94 | #else |
4 | # include "io_apic_64.h" | 95 | __u32 __reserved_3 : 24, |
96 | dest : 8; | ||
97 | #endif | ||
98 | |||
99 | } __attribute__ ((packed)); | ||
100 | |||
101 | #ifdef CONFIG_X86_IO_APIC | ||
102 | |||
103 | /* | ||
104 | * # of IO-APICs and # of IRQ routing registers | ||
105 | */ | ||
106 | extern int nr_ioapics; | ||
107 | extern int nr_ioapic_registers[MAX_IO_APICS]; | ||
108 | |||
109 | /* | ||
110 | * MP-BIOS irq configuration table structures: | ||
111 | */ | ||
112 | |||
113 | /* I/O APIC entries */ | ||
114 | extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; | ||
115 | |||
116 | /* # of MP IRQ source entries */ | ||
117 | extern int mp_irq_entries; | ||
118 | |||
119 | /* MP IRQ source entries */ | ||
120 | extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; | ||
121 | |||
122 | /* non-0 if default (table-less) MP configuration */ | ||
123 | extern int mpc_default_type; | ||
124 | |||
125 | /* Older SiS APIC requires we rewrite the index register */ | ||
126 | extern int sis_apic_bug; | ||
127 | |||
128 | /* 1 if "noapic" boot option passed */ | ||
129 | extern int skip_ioapic_setup; | ||
130 | |||
131 | static inline void disable_ioapic_setup(void) | ||
132 | { | ||
133 | skip_ioapic_setup = 1; | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * If we use the IO-APIC for IRQ routing, disable automatic | ||
138 | * assignment of PCI IRQ's. | ||
139 | */ | ||
140 | #define io_apic_assign_pci_irqs \ | ||
141 | (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) | ||
142 | |||
143 | #ifdef CONFIG_ACPI | ||
144 | extern int io_apic_get_unique_id(int ioapic, int apic_id); | ||
145 | extern int io_apic_get_version(int ioapic); | ||
146 | extern int io_apic_get_redir_entries(int ioapic); | ||
147 | extern int io_apic_set_pci_routing(int ioapic, int pin, int irq, | ||
148 | int edge_level, int active_high_low); | ||
149 | extern int timer_uses_ioapic_pin_0; | ||
150 | #endif /* CONFIG_ACPI */ | ||
151 | |||
152 | extern int (*ioapic_renumber_irq)(int ioapic, int irq); | ||
153 | extern void ioapic_init_mappings(void); | ||
154 | |||
155 | #else /* !CONFIG_X86_IO_APIC */ | ||
156 | #define io_apic_assign_pci_irqs 0 | ||
157 | #endif | ||
158 | |||
5 | #endif | 159 | #endif |
diff --git a/include/asm-x86/io_apic_32.h b/include/asm-x86/io_apic_32.h deleted file mode 100644 index 3f087883ea48..000000000000 --- a/include/asm-x86/io_apic_32.h +++ /dev/null | |||
@@ -1,155 +0,0 @@ | |||
1 | #ifndef __ASM_IO_APIC_H | ||
2 | #define __ASM_IO_APIC_H | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | #include <asm/mpspec.h> | ||
6 | #include <asm/apicdef.h> | ||
7 | |||
8 | /* | ||
9 | * Intel IO-APIC support for SMP and UP systems. | ||
10 | * | ||
11 | * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar | ||
12 | */ | ||
13 | |||
14 | /* | ||
15 | * The structure of the IO-APIC: | ||
16 | */ | ||
17 | union IO_APIC_reg_00 { | ||
18 | u32 raw; | ||
19 | struct { | ||
20 | u32 __reserved_2 : 14, | ||
21 | LTS : 1, | ||
22 | delivery_type : 1, | ||
23 | __reserved_1 : 8, | ||
24 | ID : 8; | ||
25 | } __attribute__ ((packed)) bits; | ||
26 | }; | ||
27 | |||
28 | union IO_APIC_reg_01 { | ||
29 | u32 raw; | ||
30 | struct { | ||
31 | u32 version : 8, | ||
32 | __reserved_2 : 7, | ||
33 | PRQ : 1, | ||
34 | entries : 8, | ||
35 | __reserved_1 : 8; | ||
36 | } __attribute__ ((packed)) bits; | ||
37 | }; | ||
38 | |||
39 | union IO_APIC_reg_02 { | ||
40 | u32 raw; | ||
41 | struct { | ||
42 | u32 __reserved_2 : 24, | ||
43 | arbitration : 4, | ||
44 | __reserved_1 : 4; | ||
45 | } __attribute__ ((packed)) bits; | ||
46 | }; | ||
47 | |||
48 | union IO_APIC_reg_03 { | ||
49 | u32 raw; | ||
50 | struct { | ||
51 | u32 boot_DT : 1, | ||
52 | __reserved_1 : 31; | ||
53 | } __attribute__ ((packed)) bits; | ||
54 | }; | ||
55 | |||
56 | enum ioapic_irq_destination_types { | ||
57 | dest_Fixed = 0, | ||
58 | dest_LowestPrio = 1, | ||
59 | dest_SMI = 2, | ||
60 | dest__reserved_1 = 3, | ||
61 | dest_NMI = 4, | ||
62 | dest_INIT = 5, | ||
63 | dest__reserved_2 = 6, | ||
64 | dest_ExtINT = 7 | ||
65 | }; | ||
66 | |||
67 | struct IO_APIC_route_entry { | ||
68 | __u32 vector : 8, | ||
69 | delivery_mode : 3, /* 000: FIXED | ||
70 | * 001: lowest prio | ||
71 | * 111: ExtINT | ||
72 | */ | ||
73 | dest_mode : 1, /* 0: physical, 1: logical */ | ||
74 | delivery_status : 1, | ||
75 | polarity : 1, | ||
76 | irr : 1, | ||
77 | trigger : 1, /* 0: edge, 1: level */ | ||
78 | mask : 1, /* 0: enabled, 1: disabled */ | ||
79 | __reserved_2 : 15; | ||
80 | |||
81 | union { struct { __u32 | ||
82 | __reserved_1 : 24, | ||
83 | physical_dest : 4, | ||
84 | __reserved_2 : 4; | ||
85 | } physical; | ||
86 | |||
87 | struct { __u32 | ||
88 | __reserved_1 : 24, | ||
89 | logical_dest : 8; | ||
90 | } logical; | ||
91 | } dest; | ||
92 | |||
93 | } __attribute__ ((packed)); | ||
94 | |||
95 | #ifdef CONFIG_X86_IO_APIC | ||
96 | |||
97 | /* | ||
98 | * # of IO-APICs and # of IRQ routing registers | ||
99 | */ | ||
100 | extern int nr_ioapics; | ||
101 | extern int nr_ioapic_registers[MAX_IO_APICS]; | ||
102 | |||
103 | /* | ||
104 | * MP-BIOS irq configuration table structures: | ||
105 | */ | ||
106 | |||
107 | /* I/O APIC entries */ | ||
108 | extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; | ||
109 | |||
110 | /* # of MP IRQ source entries */ | ||
111 | extern int mp_irq_entries; | ||
112 | |||
113 | /* MP IRQ source entries */ | ||
114 | extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; | ||
115 | |||
116 | /* non-0 if default (table-less) MP configuration */ | ||
117 | extern int mpc_default_type; | ||
118 | |||
119 | /* Older SiS APIC requires we rewrite the index register */ | ||
120 | extern int sis_apic_bug; | ||
121 | |||
122 | /* 1 if "noapic" boot option passed */ | ||
123 | extern int skip_ioapic_setup; | ||
124 | |||
125 | static inline void disable_ioapic_setup(void) | ||
126 | { | ||
127 | skip_ioapic_setup = 1; | ||
128 | } | ||
129 | |||
130 | static inline int ioapic_setup_disabled(void) | ||
131 | { | ||
132 | return skip_ioapic_setup; | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * If we use the IO-APIC for IRQ routing, disable automatic | ||
137 | * assignment of PCI IRQ's. | ||
138 | */ | ||
139 | #define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) | ||
140 | |||
141 | #ifdef CONFIG_ACPI | ||
142 | extern int io_apic_get_unique_id (int ioapic, int apic_id); | ||
143 | extern int io_apic_get_version (int ioapic); | ||
144 | extern int io_apic_get_redir_entries (int ioapic); | ||
145 | extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low); | ||
146 | extern int timer_uses_ioapic_pin_0; | ||
147 | #endif /* CONFIG_ACPI */ | ||
148 | |||
149 | extern int (*ioapic_renumber_irq)(int ioapic, int irq); | ||
150 | |||
151 | #else /* !CONFIG_X86_IO_APIC */ | ||
152 | #define io_apic_assign_pci_irqs 0 | ||
153 | #endif | ||
154 | |||
155 | #endif | ||
diff --git a/include/asm-x86/io_apic_64.h b/include/asm-x86/io_apic_64.h deleted file mode 100644 index e2c13675ee4e..000000000000 --- a/include/asm-x86/io_apic_64.h +++ /dev/null | |||
@@ -1,138 +0,0 @@ | |||
1 | #ifndef __ASM_IO_APIC_H | ||
2 | #define __ASM_IO_APIC_H | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | #include <asm/mpspec.h> | ||
6 | #include <asm/apicdef.h> | ||
7 | |||
8 | /* | ||
9 | * Intel IO-APIC support for SMP and UP systems. | ||
10 | * | ||
11 | * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar | ||
12 | */ | ||
13 | |||
14 | #define APIC_MISMATCH_DEBUG | ||
15 | |||
16 | /* | ||
17 | * The structure of the IO-APIC: | ||
18 | */ | ||
19 | union IO_APIC_reg_00 { | ||
20 | u32 raw; | ||
21 | struct { | ||
22 | u32 __reserved_2 : 14, | ||
23 | LTS : 1, | ||
24 | delivery_type : 1, | ||
25 | __reserved_1 : 8, | ||
26 | ID : 8; | ||
27 | } __attribute__ ((packed)) bits; | ||
28 | }; | ||
29 | |||
30 | union IO_APIC_reg_01 { | ||
31 | u32 raw; | ||
32 | struct { | ||
33 | u32 version : 8, | ||
34 | __reserved_2 : 7, | ||
35 | PRQ : 1, | ||
36 | entries : 8, | ||
37 | __reserved_1 : 8; | ||
38 | } __attribute__ ((packed)) bits; | ||
39 | }; | ||
40 | |||
41 | union IO_APIC_reg_02 { | ||
42 | u32 raw; | ||
43 | struct { | ||
44 | u32 __reserved_2 : 24, | ||
45 | arbitration : 4, | ||
46 | __reserved_1 : 4; | ||
47 | } __attribute__ ((packed)) bits; | ||
48 | }; | ||
49 | |||
50 | union IO_APIC_reg_03 { | ||
51 | u32 raw; | ||
52 | struct { | ||
53 | u32 boot_DT : 1, | ||
54 | __reserved_1 : 31; | ||
55 | } __attribute__ ((packed)) bits; | ||
56 | }; | ||
57 | |||
58 | /* | ||
59 | * # of IO-APICs and # of IRQ routing registers | ||
60 | */ | ||
61 | extern int nr_ioapics; | ||
62 | extern int nr_ioapic_registers[MAX_IO_APICS]; | ||
63 | |||
64 | enum ioapic_irq_destination_types { | ||
65 | dest_Fixed = 0, | ||
66 | dest_LowestPrio = 1, | ||
67 | dest_SMI = 2, | ||
68 | dest__reserved_1 = 3, | ||
69 | dest_NMI = 4, | ||
70 | dest_INIT = 5, | ||
71 | dest__reserved_2 = 6, | ||
72 | dest_ExtINT = 7 | ||
73 | }; | ||
74 | |||
75 | struct IO_APIC_route_entry { | ||
76 | __u32 vector : 8, | ||
77 | delivery_mode : 3, /* 000: FIXED | ||
78 | * 001: lowest prio | ||
79 | * 111: ExtINT | ||
80 | */ | ||
81 | dest_mode : 1, /* 0: physical, 1: logical */ | ||
82 | delivery_status : 1, | ||
83 | polarity : 1, | ||
84 | irr : 1, | ||
85 | trigger : 1, /* 0: edge, 1: level */ | ||
86 | mask : 1, /* 0: enabled, 1: disabled */ | ||
87 | __reserved_2 : 15; | ||
88 | |||
89 | __u32 __reserved_3 : 24, | ||
90 | dest : 8; | ||
91 | } __attribute__ ((packed)); | ||
92 | |||
93 | /* | ||
94 | * MP-BIOS irq configuration table structures: | ||
95 | */ | ||
96 | |||
97 | /* I/O APIC entries */ | ||
98 | extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; | ||
99 | |||
100 | /* # of MP IRQ source entries */ | ||
101 | extern int mp_irq_entries; | ||
102 | |||
103 | /* MP IRQ source entries */ | ||
104 | extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; | ||
105 | |||
106 | /* non-0 if default (table-less) MP configuration */ | ||
107 | extern int mpc_default_type; | ||
108 | |||
109 | /* 1 if "noapic" boot option passed */ | ||
110 | extern int skip_ioapic_setup; | ||
111 | |||
112 | static inline void disable_ioapic_setup(void) | ||
113 | { | ||
114 | skip_ioapic_setup = 1; | ||
115 | } | ||
116 | |||
117 | |||
118 | /* | ||
119 | * If we use the IO-APIC for IRQ routing, disable automatic | ||
120 | * assignment of PCI IRQ's. | ||
121 | */ | ||
122 | #define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) | ||
123 | |||
124 | #ifdef CONFIG_ACPI | ||
125 | extern int io_apic_get_version (int ioapic); | ||
126 | extern int io_apic_get_redir_entries (int ioapic); | ||
127 | extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int, int); | ||
128 | #endif | ||
129 | |||
130 | extern int sis_apic_bug; /* dummy */ | ||
131 | |||
132 | void enable_NMI_through_LVT0 (void * dummy); | ||
133 | |||
134 | extern spinlock_t i8259A_lock; | ||
135 | |||
136 | extern int timer_over_8254; | ||
137 | |||
138 | #endif | ||
diff --git a/include/asm-x86/irqflags.h b/include/asm-x86/irqflags.h index 1b695ff52687..92021c1ffa3a 100644 --- a/include/asm-x86/irqflags.h +++ b/include/asm-x86/irqflags.h | |||
@@ -1,5 +1,245 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifndef _X86_IRQFLAGS_H_ |
2 | # include "irqflags_32.h" | 2 | #define _X86_IRQFLAGS_H_ |
3 | |||
4 | #include <asm/processor-flags.h> | ||
5 | |||
6 | #ifndef __ASSEMBLY__ | ||
7 | /* | ||
8 | * Interrupt control: | ||
9 | */ | ||
10 | |||
11 | static inline unsigned long native_save_fl(void) | ||
12 | { | ||
13 | unsigned long flags; | ||
14 | |||
15 | __asm__ __volatile__( | ||
16 | "# __raw_save_flags\n\t" | ||
17 | "pushf ; pop %0" | ||
18 | : "=g" (flags) | ||
19 | : /* no input */ | ||
20 | : "memory" | ||
21 | ); | ||
22 | |||
23 | return flags; | ||
24 | } | ||
25 | |||
26 | static inline void native_restore_fl(unsigned long flags) | ||
27 | { | ||
28 | __asm__ __volatile__( | ||
29 | "push %0 ; popf" | ||
30 | : /* no output */ | ||
31 | :"g" (flags) | ||
32 | :"memory", "cc" | ||
33 | ); | ||
34 | } | ||
35 | |||
36 | static inline void native_irq_disable(void) | ||
37 | { | ||
38 | asm volatile("cli": : :"memory"); | ||
39 | } | ||
40 | |||
41 | static inline void native_irq_enable(void) | ||
42 | { | ||
43 | asm volatile("sti": : :"memory"); | ||
44 | } | ||
45 | |||
46 | static inline void native_safe_halt(void) | ||
47 | { | ||
48 | asm volatile("sti; hlt": : :"memory"); | ||
49 | } | ||
50 | |||
51 | static inline void native_halt(void) | ||
52 | { | ||
53 | asm volatile("hlt": : :"memory"); | ||
54 | } | ||
55 | |||
56 | #endif | ||
57 | |||
58 | #ifdef CONFIG_PARAVIRT | ||
59 | #include <asm/paravirt.h> | ||
60 | #else | ||
61 | #ifndef __ASSEMBLY__ | ||
62 | |||
63 | static inline unsigned long __raw_local_save_flags(void) | ||
64 | { | ||
65 | return native_save_fl(); | ||
66 | } | ||
67 | |||
68 | static inline void raw_local_irq_restore(unsigned long flags) | ||
69 | { | ||
70 | native_restore_fl(flags); | ||
71 | } | ||
72 | |||
73 | static inline void raw_local_irq_disable(void) | ||
74 | { | ||
75 | native_irq_disable(); | ||
76 | } | ||
77 | |||
78 | static inline void raw_local_irq_enable(void) | ||
79 | { | ||
80 | native_irq_enable(); | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * Used in the idle loop; sti takes one instruction cycle | ||
85 | * to complete: | ||
86 | */ | ||
87 | static inline void raw_safe_halt(void) | ||
88 | { | ||
89 | native_safe_halt(); | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * Used when interrupts are already enabled or to | ||
94 | * shutdown the processor: | ||
95 | */ | ||
96 | static inline void halt(void) | ||
97 | { | ||
98 | native_halt(); | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * For spinlocks, etc: | ||
103 | */ | ||
104 | static inline unsigned long __raw_local_irq_save(void) | ||
105 | { | ||
106 | unsigned long flags = __raw_local_save_flags(); | ||
107 | |||
108 | raw_local_irq_disable(); | ||
109 | |||
110 | return flags; | ||
111 | } | ||
112 | #else | ||
113 | |||
114 | #define ENABLE_INTERRUPTS(x) sti | ||
115 | #define DISABLE_INTERRUPTS(x) cli | ||
116 | |||
117 | #ifdef CONFIG_X86_64 | ||
118 | #define INTERRUPT_RETURN iretq | ||
119 | #define ENABLE_INTERRUPTS_SYSCALL_RET \ | ||
120 | movq %gs:pda_oldrsp, %rsp; \ | ||
121 | swapgs; \ | ||
122 | sysretq; | ||
123 | #else | ||
124 | #define INTERRUPT_RETURN iret | ||
125 | #define ENABLE_INTERRUPTS_SYSCALL_RET sti; sysexit | ||
126 | #define GET_CR0_INTO_EAX movl %cr0, %eax | ||
127 | #endif | ||
128 | |||
129 | |||
130 | #endif /* __ASSEMBLY__ */ | ||
131 | #endif /* CONFIG_PARAVIRT */ | ||
132 | |||
133 | #ifndef __ASSEMBLY__ | ||
134 | #define raw_local_save_flags(flags) \ | ||
135 | do { (flags) = __raw_local_save_flags(); } while (0) | ||
136 | |||
137 | #define raw_local_irq_save(flags) \ | ||
138 | do { (flags) = __raw_local_irq_save(); } while (0) | ||
139 | |||
140 | static inline int raw_irqs_disabled_flags(unsigned long flags) | ||
141 | { | ||
142 | return !(flags & X86_EFLAGS_IF); | ||
143 | } | ||
144 | |||
145 | static inline int raw_irqs_disabled(void) | ||
146 | { | ||
147 | unsigned long flags = __raw_local_save_flags(); | ||
148 | |||
149 | return raw_irqs_disabled_flags(flags); | ||
150 | } | ||
151 | |||
152 | /* | ||
153 | * makes the traced hardirq state match with the machine state | ||
154 | * | ||
155 | * should be a rarely used function, only in places where its | ||
156 | * otherwise impossible to know the irq state, like in traps. | ||
157 | */ | ||
158 | static inline void trace_hardirqs_fixup_flags(unsigned long flags) | ||
159 | { | ||
160 | if (raw_irqs_disabled_flags(flags)) | ||
161 | trace_hardirqs_off(); | ||
162 | else | ||
163 | trace_hardirqs_on(); | ||
164 | } | ||
165 | |||
166 | static inline void trace_hardirqs_fixup(void) | ||
167 | { | ||
168 | unsigned long flags = __raw_local_save_flags(); | ||
169 | |||
170 | trace_hardirqs_fixup_flags(flags); | ||
171 | } | ||
172 | |||
3 | #else | 173 | #else |
4 | # include "irqflags_64.h" | 174 | |
175 | #ifdef CONFIG_X86_64 | ||
176 | /* | ||
177 | * Currently paravirt can't handle swapgs nicely when we | ||
178 | * don't have a stack we can rely on (such as a user space | ||
179 | * stack). So we either find a way around these or just fault | ||
180 | * and emulate if a guest tries to call swapgs directly. | ||
181 | * | ||
182 | * Either way, this is a good way to document that we don't | ||
183 | * have a reliable stack. x86_64 only. | ||
184 | */ | ||
185 | #define SWAPGS_UNSAFE_STACK swapgs | ||
186 | #define ARCH_TRACE_IRQS_ON call trace_hardirqs_on_thunk | ||
187 | #define ARCH_TRACE_IRQS_OFF call trace_hardirqs_off_thunk | ||
188 | #define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk | ||
189 | #define ARCH_LOCKDEP_SYS_EXIT_IRQ \ | ||
190 | TRACE_IRQS_ON; \ | ||
191 | sti; \ | ||
192 | SAVE_REST; \ | ||
193 | LOCKDEP_SYS_EXIT; \ | ||
194 | RESTORE_REST; \ | ||
195 | cli; \ | ||
196 | TRACE_IRQS_OFF; | ||
197 | |||
198 | #else | ||
199 | #define ARCH_TRACE_IRQS_ON \ | ||
200 | pushl %eax; \ | ||
201 | pushl %ecx; \ | ||
202 | pushl %edx; \ | ||
203 | call trace_hardirqs_on; \ | ||
204 | popl %edx; \ | ||
205 | popl %ecx; \ | ||
206 | popl %eax; | ||
207 | |||
208 | #define ARCH_TRACE_IRQS_OFF \ | ||
209 | pushl %eax; \ | ||
210 | pushl %ecx; \ | ||
211 | pushl %edx; \ | ||
212 | call trace_hardirqs_off; \ | ||
213 | popl %edx; \ | ||
214 | popl %ecx; \ | ||
215 | popl %eax; | ||
216 | |||
217 | #define ARCH_LOCKDEP_SYS_EXIT \ | ||
218 | pushl %eax; \ | ||
219 | pushl %ecx; \ | ||
220 | pushl %edx; \ | ||
221 | call lockdep_sys_exit; \ | ||
222 | popl %edx; \ | ||
223 | popl %ecx; \ | ||
224 | popl %eax; | ||
225 | |||
226 | #define ARCH_LOCKDEP_SYS_EXIT_IRQ | ||
227 | #endif | ||
228 | |||
229 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
230 | # define TRACE_IRQS_ON ARCH_TRACE_IRQS_ON | ||
231 | # define TRACE_IRQS_OFF ARCH_TRACE_IRQS_OFF | ||
232 | #else | ||
233 | # define TRACE_IRQS_ON | ||
234 | # define TRACE_IRQS_OFF | ||
235 | #endif | ||
236 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
237 | # define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT | ||
238 | # define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ | ||
239 | # else | ||
240 | # define LOCKDEP_SYS_EXIT | ||
241 | # define LOCKDEP_SYS_EXIT_IRQ | ||
242 | # endif | ||
243 | |||
244 | #endif /* __ASSEMBLY__ */ | ||
5 | #endif | 245 | #endif |
diff --git a/include/asm-x86/irqflags_32.h b/include/asm-x86/irqflags_32.h deleted file mode 100644 index 4c7720089cb5..000000000000 --- a/include/asm-x86/irqflags_32.h +++ /dev/null | |||
@@ -1,197 +0,0 @@ | |||
1 | /* | ||
2 | * include/asm-i386/irqflags.h | ||
3 | * | ||
4 | * IRQ flags handling | ||
5 | * | ||
6 | * This file gets included from lowlevel asm headers too, to provide | ||
7 | * wrapped versions of the local_irq_*() APIs, based on the | ||
8 | * raw_local_irq_*() functions from the lowlevel headers. | ||
9 | */ | ||
10 | #ifndef _ASM_IRQFLAGS_H | ||
11 | #define _ASM_IRQFLAGS_H | ||
12 | #include <asm/processor-flags.h> | ||
13 | |||
14 | #ifndef __ASSEMBLY__ | ||
15 | static inline unsigned long native_save_fl(void) | ||
16 | { | ||
17 | unsigned long f; | ||
18 | asm volatile("pushfl ; popl %0":"=g" (f): /* no input */); | ||
19 | return f; | ||
20 | } | ||
21 | |||
22 | static inline void native_restore_fl(unsigned long f) | ||
23 | { | ||
24 | asm volatile("pushl %0 ; popfl": /* no output */ | ||
25 | :"g" (f) | ||
26 | :"memory", "cc"); | ||
27 | } | ||
28 | |||
29 | static inline void native_irq_disable(void) | ||
30 | { | ||
31 | asm volatile("cli": : :"memory"); | ||
32 | } | ||
33 | |||
34 | static inline void native_irq_enable(void) | ||
35 | { | ||
36 | asm volatile("sti": : :"memory"); | ||
37 | } | ||
38 | |||
39 | static inline void native_safe_halt(void) | ||
40 | { | ||
41 | asm volatile("sti; hlt": : :"memory"); | ||
42 | } | ||
43 | |||
44 | static inline void native_halt(void) | ||
45 | { | ||
46 | asm volatile("hlt": : :"memory"); | ||
47 | } | ||
48 | #endif /* __ASSEMBLY__ */ | ||
49 | |||
50 | #ifdef CONFIG_PARAVIRT | ||
51 | #include <asm/paravirt.h> | ||
52 | #else | ||
53 | #ifndef __ASSEMBLY__ | ||
54 | |||
55 | static inline unsigned long __raw_local_save_flags(void) | ||
56 | { | ||
57 | return native_save_fl(); | ||
58 | } | ||
59 | |||
60 | static inline void raw_local_irq_restore(unsigned long flags) | ||
61 | { | ||
62 | native_restore_fl(flags); | ||
63 | } | ||
64 | |||
65 | static inline void raw_local_irq_disable(void) | ||
66 | { | ||
67 | native_irq_disable(); | ||
68 | } | ||
69 | |||
70 | static inline void raw_local_irq_enable(void) | ||
71 | { | ||
72 | native_irq_enable(); | ||
73 | } | ||
74 | |||
75 | /* | ||
76 | * Used in the idle loop; sti takes one instruction cycle | ||
77 | * to complete: | ||
78 | */ | ||
79 | static inline void raw_safe_halt(void) | ||
80 | { | ||
81 | native_safe_halt(); | ||
82 | } | ||
83 | |||
84 | /* | ||
85 | * Used when interrupts are already enabled or to | ||
86 | * shutdown the processor: | ||
87 | */ | ||
88 | static inline void halt(void) | ||
89 | { | ||
90 | native_halt(); | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * For spinlocks, etc: | ||
95 | */ | ||
96 | static inline unsigned long __raw_local_irq_save(void) | ||
97 | { | ||
98 | unsigned long flags = __raw_local_save_flags(); | ||
99 | |||
100 | raw_local_irq_disable(); | ||
101 | |||
102 | return flags; | ||
103 | } | ||
104 | |||
105 | #else | ||
106 | #define DISABLE_INTERRUPTS(clobbers) cli | ||
107 | #define ENABLE_INTERRUPTS(clobbers) sti | ||
108 | #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit | ||
109 | #define INTERRUPT_RETURN iret | ||
110 | #define GET_CR0_INTO_EAX movl %cr0, %eax | ||
111 | #endif /* __ASSEMBLY__ */ | ||
112 | #endif /* CONFIG_PARAVIRT */ | ||
113 | |||
114 | #ifndef __ASSEMBLY__ | ||
115 | #define raw_local_save_flags(flags) \ | ||
116 | do { (flags) = __raw_local_save_flags(); } while (0) | ||
117 | |||
118 | #define raw_local_irq_save(flags) \ | ||
119 | do { (flags) = __raw_local_irq_save(); } while (0) | ||
120 | |||
121 | static inline int raw_irqs_disabled_flags(unsigned long flags) | ||
122 | { | ||
123 | return !(flags & X86_EFLAGS_IF); | ||
124 | } | ||
125 | |||
126 | static inline int raw_irqs_disabled(void) | ||
127 | { | ||
128 | unsigned long flags = __raw_local_save_flags(); | ||
129 | |||
130 | return raw_irqs_disabled_flags(flags); | ||
131 | } | ||
132 | |||
133 | /* | ||
134 | * makes the traced hardirq state match with the machine state | ||
135 | * | ||
136 | * should be a rarely used function, only in places where its | ||
137 | * otherwise impossible to know the irq state, like in traps. | ||
138 | */ | ||
139 | static inline void trace_hardirqs_fixup_flags(unsigned long flags) | ||
140 | { | ||
141 | if (raw_irqs_disabled_flags(flags)) | ||
142 | trace_hardirqs_off(); | ||
143 | else | ||
144 | trace_hardirqs_on(); | ||
145 | } | ||
146 | |||
147 | static inline void trace_hardirqs_fixup(void) | ||
148 | { | ||
149 | unsigned long flags = __raw_local_save_flags(); | ||
150 | |||
151 | trace_hardirqs_fixup_flags(flags); | ||
152 | } | ||
153 | #endif /* __ASSEMBLY__ */ | ||
154 | |||
155 | /* | ||
156 | * Do the CPU's IRQ-state tracing from assembly code. We call a | ||
157 | * C function, so save all the C-clobbered registers: | ||
158 | */ | ||
159 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
160 | |||
161 | # define TRACE_IRQS_ON \ | ||
162 | pushl %eax; \ | ||
163 | pushl %ecx; \ | ||
164 | pushl %edx; \ | ||
165 | call trace_hardirqs_on; \ | ||
166 | popl %edx; \ | ||
167 | popl %ecx; \ | ||
168 | popl %eax; | ||
169 | |||
170 | # define TRACE_IRQS_OFF \ | ||
171 | pushl %eax; \ | ||
172 | pushl %ecx; \ | ||
173 | pushl %edx; \ | ||
174 | call trace_hardirqs_off; \ | ||
175 | popl %edx; \ | ||
176 | popl %ecx; \ | ||
177 | popl %eax; | ||
178 | |||
179 | #else | ||
180 | # define TRACE_IRQS_ON | ||
181 | # define TRACE_IRQS_OFF | ||
182 | #endif | ||
183 | |||
184 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
185 | # define LOCKDEP_SYS_EXIT \ | ||
186 | pushl %eax; \ | ||
187 | pushl %ecx; \ | ||
188 | pushl %edx; \ | ||
189 | call lockdep_sys_exit; \ | ||
190 | popl %edx; \ | ||
191 | popl %ecx; \ | ||
192 | popl %eax; | ||
193 | #else | ||
194 | # define LOCKDEP_SYS_EXIT | ||
195 | #endif | ||
196 | |||
197 | #endif | ||
diff --git a/include/asm-x86/irqflags_64.h b/include/asm-x86/irqflags_64.h deleted file mode 100644 index bb9163bb29d1..000000000000 --- a/include/asm-x86/irqflags_64.h +++ /dev/null | |||
@@ -1,176 +0,0 @@ | |||
1 | /* | ||
2 | * include/asm-x86_64/irqflags.h | ||
3 | * | ||
4 | * IRQ flags handling | ||
5 | * | ||
6 | * This file gets included from lowlevel asm headers too, to provide | ||
7 | * wrapped versions of the local_irq_*() APIs, based on the | ||
8 | * raw_local_irq_*() functions from the lowlevel headers. | ||
9 | */ | ||
10 | #ifndef _ASM_IRQFLAGS_H | ||
11 | #define _ASM_IRQFLAGS_H | ||
12 | #include <asm/processor-flags.h> | ||
13 | |||
14 | #ifndef __ASSEMBLY__ | ||
15 | /* | ||
16 | * Interrupt control: | ||
17 | */ | ||
18 | |||
19 | static inline unsigned long __raw_local_save_flags(void) | ||
20 | { | ||
21 | unsigned long flags; | ||
22 | |||
23 | __asm__ __volatile__( | ||
24 | "# __raw_save_flags\n\t" | ||
25 | "pushfq ; popq %q0" | ||
26 | : "=g" (flags) | ||
27 | : /* no input */ | ||
28 | : "memory" | ||
29 | ); | ||
30 | |||
31 | return flags; | ||
32 | } | ||
33 | |||
34 | #define raw_local_save_flags(flags) \ | ||
35 | do { (flags) = __raw_local_save_flags(); } while (0) | ||
36 | |||
37 | static inline void raw_local_irq_restore(unsigned long flags) | ||
38 | { | ||
39 | __asm__ __volatile__( | ||
40 | "pushq %0 ; popfq" | ||
41 | : /* no output */ | ||
42 | :"g" (flags) | ||
43 | :"memory", "cc" | ||
44 | ); | ||
45 | } | ||
46 | |||
47 | #ifdef CONFIG_X86_VSMP | ||
48 | |||
49 | /* | ||
50 | * Interrupt control for the VSMP architecture: | ||
51 | */ | ||
52 | |||
53 | static inline void raw_local_irq_disable(void) | ||
54 | { | ||
55 | unsigned long flags = __raw_local_save_flags(); | ||
56 | |||
57 | raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC); | ||
58 | } | ||
59 | |||
60 | static inline void raw_local_irq_enable(void) | ||
61 | { | ||
62 | unsigned long flags = __raw_local_save_flags(); | ||
63 | |||
64 | raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); | ||
65 | } | ||
66 | |||
67 | static inline int raw_irqs_disabled_flags(unsigned long flags) | ||
68 | { | ||
69 | return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC); | ||
70 | } | ||
71 | |||
72 | #else /* CONFIG_X86_VSMP */ | ||
73 | |||
74 | static inline void raw_local_irq_disable(void) | ||
75 | { | ||
76 | __asm__ __volatile__("cli" : : : "memory"); | ||
77 | } | ||
78 | |||
79 | static inline void raw_local_irq_enable(void) | ||
80 | { | ||
81 | __asm__ __volatile__("sti" : : : "memory"); | ||
82 | } | ||
83 | |||
84 | static inline int raw_irqs_disabled_flags(unsigned long flags) | ||
85 | { | ||
86 | return !(flags & X86_EFLAGS_IF); | ||
87 | } | ||
88 | |||
89 | #endif | ||
90 | |||
91 | /* | ||
92 | * For spinlocks, etc.: | ||
93 | */ | ||
94 | |||
95 | static inline unsigned long __raw_local_irq_save(void) | ||
96 | { | ||
97 | unsigned long flags = __raw_local_save_flags(); | ||
98 | |||
99 | raw_local_irq_disable(); | ||
100 | |||
101 | return flags; | ||
102 | } | ||
103 | |||
104 | #define raw_local_irq_save(flags) \ | ||
105 | do { (flags) = __raw_local_irq_save(); } while (0) | ||
106 | |||
107 | static inline int raw_irqs_disabled(void) | ||
108 | { | ||
109 | unsigned long flags = __raw_local_save_flags(); | ||
110 | |||
111 | return raw_irqs_disabled_flags(flags); | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * makes the traced hardirq state match with the machine state | ||
116 | * | ||
117 | * should be a rarely used function, only in places where its | ||
118 | * otherwise impossible to know the irq state, like in traps. | ||
119 | */ | ||
120 | static inline void trace_hardirqs_fixup_flags(unsigned long flags) | ||
121 | { | ||
122 | if (raw_irqs_disabled_flags(flags)) | ||
123 | trace_hardirqs_off(); | ||
124 | else | ||
125 | trace_hardirqs_on(); | ||
126 | } | ||
127 | |||
128 | static inline void trace_hardirqs_fixup(void) | ||
129 | { | ||
130 | unsigned long flags = __raw_local_save_flags(); | ||
131 | |||
132 | trace_hardirqs_fixup_flags(flags); | ||
133 | } | ||
134 | /* | ||
135 | * Used in the idle loop; sti takes one instruction cycle | ||
136 | * to complete: | ||
137 | */ | ||
138 | static inline void raw_safe_halt(void) | ||
139 | { | ||
140 | __asm__ __volatile__("sti; hlt" : : : "memory"); | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * Used when interrupts are already enabled or to | ||
145 | * shutdown the processor: | ||
146 | */ | ||
147 | static inline void halt(void) | ||
148 | { | ||
149 | __asm__ __volatile__("hlt": : :"memory"); | ||
150 | } | ||
151 | |||
152 | #else /* __ASSEMBLY__: */ | ||
153 | # ifdef CONFIG_TRACE_IRQFLAGS | ||
154 | # define TRACE_IRQS_ON call trace_hardirqs_on_thunk | ||
155 | # define TRACE_IRQS_OFF call trace_hardirqs_off_thunk | ||
156 | # else | ||
157 | # define TRACE_IRQS_ON | ||
158 | # define TRACE_IRQS_OFF | ||
159 | # endif | ||
160 | # ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
161 | # define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk | ||
162 | # define LOCKDEP_SYS_EXIT_IRQ \ | ||
163 | TRACE_IRQS_ON; \ | ||
164 | sti; \ | ||
165 | SAVE_REST; \ | ||
166 | LOCKDEP_SYS_EXIT; \ | ||
167 | RESTORE_REST; \ | ||
168 | cli; \ | ||
169 | TRACE_IRQS_OFF; | ||
170 | # else | ||
171 | # define LOCKDEP_SYS_EXIT | ||
172 | # define LOCKDEP_SYS_EXIT_IRQ | ||
173 | # endif | ||
174 | #endif | ||
175 | |||
176 | #endif | ||
diff --git a/include/asm-x86/k8.h b/include/asm-x86/k8.h index 699dd6961eda..452e2b696ff4 100644 --- a/include/asm-x86/k8.h +++ b/include/asm-x86/k8.h | |||
@@ -10,5 +10,6 @@ extern struct pci_dev **k8_northbridges; | |||
10 | extern int num_k8_northbridges; | 10 | extern int num_k8_northbridges; |
11 | extern int cache_k8_northbridges(void); | 11 | extern int cache_k8_northbridges(void); |
12 | extern void k8_flush_garts(void); | 12 | extern void k8_flush_garts(void); |
13 | extern int k8_scan_nodes(unsigned long start, unsigned long end); | ||
13 | 14 | ||
14 | #endif | 15 | #endif |
diff --git a/include/asm-x86/kdebug.h b/include/asm-x86/kdebug.h index e2f9b62e535e..dd442a1632c0 100644 --- a/include/asm-x86/kdebug.h +++ b/include/asm-x86/kdebug.h | |||
@@ -22,12 +22,17 @@ enum die_val { | |||
22 | DIE_PAGE_FAULT, | 22 | DIE_PAGE_FAULT, |
23 | }; | 23 | }; |
24 | 24 | ||
25 | extern void printk_address(unsigned long address); | 25 | extern void printk_address(unsigned long address, int reliable); |
26 | extern void die(const char *,struct pt_regs *,long); | 26 | extern void die(const char *,struct pt_regs *,long); |
27 | extern void __die(const char *,struct pt_regs *,long); | 27 | extern int __must_check __die(const char *, struct pt_regs *, long); |
28 | extern void show_registers(struct pt_regs *regs); | 28 | extern void show_registers(struct pt_regs *regs); |
29 | extern void __show_registers(struct pt_regs *, int all); | ||
30 | extern void show_trace(struct task_struct *t, struct pt_regs *regs, | ||
31 | unsigned long *sp, unsigned long bp); | ||
32 | extern void __show_regs(struct pt_regs *regs); | ||
33 | extern void show_regs(struct pt_regs *regs); | ||
29 | extern void dump_pagetable(unsigned long); | 34 | extern void dump_pagetable(unsigned long); |
30 | extern unsigned long oops_begin(void); | 35 | extern unsigned long oops_begin(void); |
31 | extern void oops_end(unsigned long); | 36 | extern void oops_end(unsigned long, struct pt_regs *, int signr); |
32 | 37 | ||
33 | #endif | 38 | #endif |
diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h index 718ddbfb9516..c90d3c77afc2 100644 --- a/include/asm-x86/kexec.h +++ b/include/asm-x86/kexec.h | |||
@@ -1,5 +1,170 @@ | |||
1 | #ifndef _KEXEC_H | ||
2 | #define _KEXEC_H | ||
3 | |||
1 | #ifdef CONFIG_X86_32 | 4 | #ifdef CONFIG_X86_32 |
2 | # include "kexec_32.h" | 5 | # define PA_CONTROL_PAGE 0 |
6 | # define VA_CONTROL_PAGE 1 | ||
7 | # define PA_PGD 2 | ||
8 | # define VA_PGD 3 | ||
9 | # define PA_PTE_0 4 | ||
10 | # define VA_PTE_0 5 | ||
11 | # define PA_PTE_1 6 | ||
12 | # define VA_PTE_1 7 | ||
13 | # ifdef CONFIG_X86_PAE | ||
14 | # define PA_PMD_0 8 | ||
15 | # define VA_PMD_0 9 | ||
16 | # define PA_PMD_1 10 | ||
17 | # define VA_PMD_1 11 | ||
18 | # define PAGES_NR 12 | ||
19 | # else | ||
20 | # define PAGES_NR 8 | ||
21 | # endif | ||
3 | #else | 22 | #else |
4 | # include "kexec_64.h" | 23 | # define PA_CONTROL_PAGE 0 |
24 | # define VA_CONTROL_PAGE 1 | ||
25 | # define PA_PGD 2 | ||
26 | # define VA_PGD 3 | ||
27 | # define PA_PUD_0 4 | ||
28 | # define VA_PUD_0 5 | ||
29 | # define PA_PMD_0 6 | ||
30 | # define VA_PMD_0 7 | ||
31 | # define PA_PTE_0 8 | ||
32 | # define VA_PTE_0 9 | ||
33 | # define PA_PUD_1 10 | ||
34 | # define VA_PUD_1 11 | ||
35 | # define PA_PMD_1 12 | ||
36 | # define VA_PMD_1 13 | ||
37 | # define PA_PTE_1 14 | ||
38 | # define VA_PTE_1 15 | ||
39 | # define PA_TABLE_PAGE 16 | ||
40 | # define PAGES_NR 17 | ||
5 | #endif | 41 | #endif |
42 | |||
43 | #ifndef __ASSEMBLY__ | ||
44 | |||
45 | #include <linux/string.h> | ||
46 | |||
47 | #include <asm/page.h> | ||
48 | #include <asm/ptrace.h> | ||
49 | |||
50 | /* | ||
51 | * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. | ||
52 | * I.e. Maximum page that is mapped directly into kernel memory, | ||
53 | * and kmap is not required. | ||
54 | * | ||
55 | * So far x86_64 is limited to 40 physical address bits. | ||
56 | */ | ||
57 | #ifdef CONFIG_X86_32 | ||
58 | /* Maximum physical address we can use pages from */ | ||
59 | # define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) | ||
60 | /* Maximum address we can reach in physical address mode */ | ||
61 | # define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) | ||
62 | /* Maximum address we can use for the control code buffer */ | ||
63 | # define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE | ||
64 | |||
65 | # define KEXEC_CONTROL_CODE_SIZE 4096 | ||
66 | |||
67 | /* The native architecture */ | ||
68 | # define KEXEC_ARCH KEXEC_ARCH_386 | ||
69 | |||
70 | /* We can also handle crash dumps from 64 bit kernel. */ | ||
71 | # define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64) | ||
72 | #else | ||
73 | /* Maximum physical address we can use pages from */ | ||
74 | # define KEXEC_SOURCE_MEMORY_LIMIT (0xFFFFFFFFFFUL) | ||
75 | /* Maximum address we can reach in physical address mode */ | ||
76 | # define KEXEC_DESTINATION_MEMORY_LIMIT (0xFFFFFFFFFFUL) | ||
77 | /* Maximum address we can use for the control pages */ | ||
78 | # define KEXEC_CONTROL_MEMORY_LIMIT (0xFFFFFFFFFFUL) | ||
79 | |||
80 | /* Allocate one page for the pdp and the second for the code */ | ||
81 | # define KEXEC_CONTROL_CODE_SIZE (4096UL + 4096UL) | ||
82 | |||
83 | /* The native architecture */ | ||
84 | # define KEXEC_ARCH KEXEC_ARCH_X86_64 | ||
85 | #endif | ||
86 | |||
87 | /* | ||
88 | * CPU does not save ss and sp on stack if execution is already | ||
89 | * running in kernel mode at the time of NMI occurrence. This code | ||
90 | * fixes it. | ||
91 | */ | ||
92 | static inline void crash_fixup_ss_esp(struct pt_regs *newregs, | ||
93 | struct pt_regs *oldregs) | ||
94 | { | ||
95 | #ifdef CONFIG_X86_32 | ||
96 | newregs->sp = (unsigned long)&(oldregs->sp); | ||
97 | __asm__ __volatile__( | ||
98 | "xorl %%eax, %%eax\n\t" | ||
99 | "movw %%ss, %%ax\n\t" | ||
100 | :"=a"(newregs->ss)); | ||
101 | #endif | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * This function is responsible for capturing register states if coming | ||
106 | * via panic otherwise just fix up the ss and sp if coming via kernel | ||
107 | * mode exception. | ||
108 | */ | ||
109 | static inline void crash_setup_regs(struct pt_regs *newregs, | ||
110 | struct pt_regs *oldregs) | ||
111 | { | ||
112 | if (oldregs) { | ||
113 | memcpy(newregs, oldregs, sizeof(*newregs)); | ||
114 | crash_fixup_ss_esp(newregs, oldregs); | ||
115 | } else { | ||
116 | #ifdef CONFIG_X86_32 | ||
117 | __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->bx)); | ||
118 | __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->cx)); | ||
119 | __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->dx)); | ||
120 | __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->si)); | ||
121 | __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->di)); | ||
122 | __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->bp)); | ||
123 | __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->ax)); | ||
124 | __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->sp)); | ||
125 | __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss)); | ||
126 | __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs)); | ||
127 | __asm__ __volatile__("movl %%ds, %%eax;" :"=a"(newregs->ds)); | ||
128 | __asm__ __volatile__("movl %%es, %%eax;" :"=a"(newregs->es)); | ||
129 | __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->flags)); | ||
130 | #else | ||
131 | __asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->bx)); | ||
132 | __asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->cx)); | ||
133 | __asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->dx)); | ||
134 | __asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->si)); | ||
135 | __asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->di)); | ||
136 | __asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->bp)); | ||
137 | __asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->ax)); | ||
138 | __asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->sp)); | ||
139 | __asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8)); | ||
140 | __asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9)); | ||
141 | __asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10)); | ||
142 | __asm__ __volatile__("movq %%r11,%0" : "=m"(newregs->r11)); | ||
143 | __asm__ __volatile__("movq %%r12,%0" : "=m"(newregs->r12)); | ||
144 | __asm__ __volatile__("movq %%r13,%0" : "=m"(newregs->r13)); | ||
145 | __asm__ __volatile__("movq %%r14,%0" : "=m"(newregs->r14)); | ||
146 | __asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15)); | ||
147 | __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss)); | ||
148 | __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs)); | ||
149 | __asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->flags)); | ||
150 | #endif | ||
151 | newregs->ip = (unsigned long)current_text_addr(); | ||
152 | } | ||
153 | } | ||
154 | |||
155 | #ifdef CONFIG_X86_32 | ||
156 | asmlinkage NORET_TYPE void | ||
157 | relocate_kernel(unsigned long indirection_page, | ||
158 | unsigned long control_page, | ||
159 | unsigned long start_address, | ||
160 | unsigned int has_pae) ATTRIB_NORET; | ||
161 | #else | ||
162 | NORET_TYPE void | ||
163 | relocate_kernel(unsigned long indirection_page, | ||
164 | unsigned long page_list, | ||
165 | unsigned long start_address) ATTRIB_NORET; | ||
166 | #endif | ||
167 | |||
168 | #endif /* __ASSEMBLY__ */ | ||
169 | |||
170 | #endif /* _KEXEC_H */ | ||
diff --git a/include/asm-x86/kexec_32.h b/include/asm-x86/kexec_32.h deleted file mode 100644 index 4b9dc9e6b701..000000000000 --- a/include/asm-x86/kexec_32.h +++ /dev/null | |||
@@ -1,99 +0,0 @@ | |||
1 | #ifndef _I386_KEXEC_H | ||
2 | #define _I386_KEXEC_H | ||
3 | |||
4 | #define PA_CONTROL_PAGE 0 | ||
5 | #define VA_CONTROL_PAGE 1 | ||
6 | #define PA_PGD 2 | ||
7 | #define VA_PGD 3 | ||
8 | #define PA_PTE_0 4 | ||
9 | #define VA_PTE_0 5 | ||
10 | #define PA_PTE_1 6 | ||
11 | #define VA_PTE_1 7 | ||
12 | #ifdef CONFIG_X86_PAE | ||
13 | #define PA_PMD_0 8 | ||
14 | #define VA_PMD_0 9 | ||
15 | #define PA_PMD_1 10 | ||
16 | #define VA_PMD_1 11 | ||
17 | #define PAGES_NR 12 | ||
18 | #else | ||
19 | #define PAGES_NR 8 | ||
20 | #endif | ||
21 | |||
22 | #ifndef __ASSEMBLY__ | ||
23 | |||
24 | #include <asm/ptrace.h> | ||
25 | #include <asm/string.h> | ||
26 | |||
27 | /* | ||
28 | * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. | ||
29 | * I.e. Maximum page that is mapped directly into kernel memory, | ||
30 | * and kmap is not required. | ||
31 | */ | ||
32 | |||
33 | /* Maximum physical address we can use pages from */ | ||
34 | #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) | ||
35 | /* Maximum address we can reach in physical address mode */ | ||
36 | #define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) | ||
37 | /* Maximum address we can use for the control code buffer */ | ||
38 | #define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE | ||
39 | |||
40 | #define KEXEC_CONTROL_CODE_SIZE 4096 | ||
41 | |||
42 | /* The native architecture */ | ||
43 | #define KEXEC_ARCH KEXEC_ARCH_386 | ||
44 | |||
45 | /* We can also handle crash dumps from 64 bit kernel. */ | ||
46 | #define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64) | ||
47 | |||
48 | /* CPU does not save ss and esp on stack if execution is already | ||
49 | * running in kernel mode at the time of NMI occurrence. This code | ||
50 | * fixes it. | ||
51 | */ | ||
52 | static inline void crash_fixup_ss_esp(struct pt_regs *newregs, | ||
53 | struct pt_regs *oldregs) | ||
54 | { | ||
55 | memcpy(newregs, oldregs, sizeof(*newregs)); | ||
56 | newregs->esp = (unsigned long)&(oldregs->esp); | ||
57 | __asm__ __volatile__( | ||
58 | "xorl %%eax, %%eax\n\t" | ||
59 | "movw %%ss, %%ax\n\t" | ||
60 | :"=a"(newregs->xss)); | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * This function is responsible for capturing register states if coming | ||
65 | * via panic otherwise just fix up the ss and esp if coming via kernel | ||
66 | * mode exception. | ||
67 | */ | ||
68 | static inline void crash_setup_regs(struct pt_regs *newregs, | ||
69 | struct pt_regs *oldregs) | ||
70 | { | ||
71 | if (oldregs) | ||
72 | crash_fixup_ss_esp(newregs, oldregs); | ||
73 | else { | ||
74 | __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->ebx)); | ||
75 | __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->ecx)); | ||
76 | __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->edx)); | ||
77 | __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->esi)); | ||
78 | __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->edi)); | ||
79 | __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->ebp)); | ||
80 | __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->eax)); | ||
81 | __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->esp)); | ||
82 | __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(newregs->xss)); | ||
83 | __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(newregs->xcs)); | ||
84 | __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(newregs->xds)); | ||
85 | __asm__ __volatile__("movw %%es, %%ax;" :"=a"(newregs->xes)); | ||
86 | __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->eflags)); | ||
87 | |||
88 | newregs->eip = (unsigned long)current_text_addr(); | ||
89 | } | ||
90 | } | ||
91 | asmlinkage NORET_TYPE void | ||
92 | relocate_kernel(unsigned long indirection_page, | ||
93 | unsigned long control_page, | ||
94 | unsigned long start_address, | ||
95 | unsigned int has_pae) ATTRIB_NORET; | ||
96 | |||
97 | #endif /* __ASSEMBLY__ */ | ||
98 | |||
99 | #endif /* _I386_KEXEC_H */ | ||
diff --git a/include/asm-x86/kexec_64.h b/include/asm-x86/kexec_64.h deleted file mode 100644 index 738e581b67f8..000000000000 --- a/include/asm-x86/kexec_64.h +++ /dev/null | |||
@@ -1,94 +0,0 @@ | |||
1 | #ifndef _X86_64_KEXEC_H | ||
2 | #define _X86_64_KEXEC_H | ||
3 | |||
4 | #define PA_CONTROL_PAGE 0 | ||
5 | #define VA_CONTROL_PAGE 1 | ||
6 | #define PA_PGD 2 | ||
7 | #define VA_PGD 3 | ||
8 | #define PA_PUD_0 4 | ||
9 | #define VA_PUD_0 5 | ||
10 | #define PA_PMD_0 6 | ||
11 | #define VA_PMD_0 7 | ||
12 | #define PA_PTE_0 8 | ||
13 | #define VA_PTE_0 9 | ||
14 | #define PA_PUD_1 10 | ||
15 | #define VA_PUD_1 11 | ||
16 | #define PA_PMD_1 12 | ||
17 | #define VA_PMD_1 13 | ||
18 | #define PA_PTE_1 14 | ||
19 | #define VA_PTE_1 15 | ||
20 | #define PA_TABLE_PAGE 16 | ||
21 | #define PAGES_NR 17 | ||
22 | |||
23 | #ifndef __ASSEMBLY__ | ||
24 | |||
25 | #include <linux/string.h> | ||
26 | |||
27 | #include <asm/page.h> | ||
28 | #include <asm/ptrace.h> | ||
29 | |||
30 | /* | ||
31 | * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. | ||
32 | * I.e. Maximum page that is mapped directly into kernel memory, | ||
33 | * and kmap is not required. | ||
34 | * | ||
35 | * So far x86_64 is limited to 40 physical address bits. | ||
36 | */ | ||
37 | |||
38 | /* Maximum physical address we can use pages from */ | ||
39 | #define KEXEC_SOURCE_MEMORY_LIMIT (0xFFFFFFFFFFUL) | ||
40 | /* Maximum address we can reach in physical address mode */ | ||
41 | #define KEXEC_DESTINATION_MEMORY_LIMIT (0xFFFFFFFFFFUL) | ||
42 | /* Maximum address we can use for the control pages */ | ||
43 | #define KEXEC_CONTROL_MEMORY_LIMIT (0xFFFFFFFFFFUL) | ||
44 | |||
45 | /* Allocate one page for the pdp and the second for the code */ | ||
46 | #define KEXEC_CONTROL_CODE_SIZE (4096UL + 4096UL) | ||
47 | |||
48 | /* The native architecture */ | ||
49 | #define KEXEC_ARCH KEXEC_ARCH_X86_64 | ||
50 | |||
51 | /* | ||
52 | * Saving the registers of the cpu on which panic occured in | ||
53 | * crash_kexec to save a valid sp. The registers of other cpus | ||
54 | * will be saved in machine_crash_shutdown while shooting down them. | ||
55 | */ | ||
56 | |||
57 | static inline void crash_setup_regs(struct pt_regs *newregs, | ||
58 | struct pt_regs *oldregs) | ||
59 | { | ||
60 | if (oldregs) | ||
61 | memcpy(newregs, oldregs, sizeof(*newregs)); | ||
62 | else { | ||
63 | __asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->rbx)); | ||
64 | __asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->rcx)); | ||
65 | __asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->rdx)); | ||
66 | __asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->rsi)); | ||
67 | __asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->rdi)); | ||
68 | __asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->rbp)); | ||
69 | __asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->rax)); | ||
70 | __asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->rsp)); | ||
71 | __asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8)); | ||
72 | __asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9)); | ||
73 | __asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10)); | ||
74 | __asm__ __volatile__("movq %%r11,%0" : "=m"(newregs->r11)); | ||
75 | __asm__ __volatile__("movq %%r12,%0" : "=m"(newregs->r12)); | ||
76 | __asm__ __volatile__("movq %%r13,%0" : "=m"(newregs->r13)); | ||
77 | __asm__ __volatile__("movq %%r14,%0" : "=m"(newregs->r14)); | ||
78 | __asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15)); | ||
79 | __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss)); | ||
80 | __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs)); | ||
81 | __asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->eflags)); | ||
82 | |||
83 | newregs->rip = (unsigned long)current_text_addr(); | ||
84 | } | ||
85 | } | ||
86 | |||
87 | NORET_TYPE void | ||
88 | relocate_kernel(unsigned long indirection_page, | ||
89 | unsigned long page_list, | ||
90 | unsigned long start_address) ATTRIB_NORET; | ||
91 | |||
92 | #endif /* __ASSEMBLY__ */ | ||
93 | |||
94 | #endif /* _X86_64_KEXEC_H */ | ||
diff --git a/include/asm-x86/kprobes.h b/include/asm-x86/kprobes.h index b7bbd25ba2a6..143476a3cb52 100644 --- a/include/asm-x86/kprobes.h +++ b/include/asm-x86/kprobes.h | |||
@@ -1,5 +1,98 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifndef _ASM_KPROBES_H |
2 | # include "kprobes_32.h" | 2 | #define _ASM_KPROBES_H |
3 | #else | 3 | /* |
4 | # include "kprobes_64.h" | 4 | * Kernel Probes (KProbes) |
5 | #endif | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | * | ||
20 | * Copyright (C) IBM Corporation, 2002, 2004 | ||
21 | * | ||
22 | * See arch/x86/kernel/kprobes.c for x86 kprobes history. | ||
23 | */ | ||
24 | #include <linux/types.h> | ||
25 | #include <linux/ptrace.h> | ||
26 | #include <linux/percpu.h> | ||
27 | |||
28 | #define __ARCH_WANT_KPROBES_INSN_SLOT | ||
29 | |||
30 | struct pt_regs; | ||
31 | struct kprobe; | ||
32 | |||
33 | typedef u8 kprobe_opcode_t; | ||
34 | #define BREAKPOINT_INSTRUCTION 0xcc | ||
35 | #define RELATIVEJUMP_INSTRUCTION 0xe9 | ||
36 | #define MAX_INSN_SIZE 16 | ||
37 | #define MAX_STACK_SIZE 64 | ||
38 | #define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ | ||
39 | (((unsigned long)current_thread_info()) + THREAD_SIZE \ | ||
40 | - (unsigned long)(ADDR))) \ | ||
41 | ? (MAX_STACK_SIZE) \ | ||
42 | : (((unsigned long)current_thread_info()) + THREAD_SIZE \ | ||
43 | - (unsigned long)(ADDR))) | ||
44 | |||
45 | #define ARCH_SUPPORTS_KRETPROBES | ||
46 | #define flush_insn_slot(p) do { } while (0) | ||
47 | |||
48 | extern const int kretprobe_blacklist_size; | ||
49 | |||
50 | void arch_remove_kprobe(struct kprobe *p); | ||
51 | void kretprobe_trampoline(void); | ||
52 | |||
53 | /* Architecture specific copy of original instruction*/ | ||
54 | struct arch_specific_insn { | ||
55 | /* copy of the original instruction */ | ||
56 | kprobe_opcode_t *insn; | ||
57 | /* | ||
58 | * boostable = -1: This instruction type is not boostable. | ||
59 | * boostable = 0: This instruction type is boostable. | ||
60 | * boostable = 1: This instruction has been boosted: we have | ||
61 | * added a relative jump after the instruction copy in insn, | ||
62 | * so no single-step and fixup are needed (unless there's | ||
63 | * a post_handler or break_handler). | ||
64 | */ | ||
65 | int boostable; | ||
66 | }; | ||
67 | |||
68 | struct prev_kprobe { | ||
69 | struct kprobe *kp; | ||
70 | unsigned long status; | ||
71 | unsigned long old_flags; | ||
72 | unsigned long saved_flags; | ||
73 | }; | ||
74 | |||
75 | /* per-cpu kprobe control block */ | ||
76 | struct kprobe_ctlblk { | ||
77 | unsigned long kprobe_status; | ||
78 | unsigned long kprobe_old_flags; | ||
79 | unsigned long kprobe_saved_flags; | ||
80 | unsigned long *jprobe_saved_sp; | ||
81 | struct pt_regs jprobe_saved_regs; | ||
82 | kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; | ||
83 | struct prev_kprobe prev_kprobe; | ||
84 | }; | ||
85 | |||
86 | /* trap3/1 are intr gates for kprobes. So, restore the status of IF, | ||
87 | * if necessary, before executing the original int3/1 (trap) handler. | ||
88 | */ | ||
89 | static inline void restore_interrupts(struct pt_regs *regs) | ||
90 | { | ||
91 | if (regs->flags & X86_EFLAGS_IF) | ||
92 | local_irq_enable(); | ||
93 | } | ||
94 | |||
95 | extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); | ||
96 | extern int kprobe_exceptions_notify(struct notifier_block *self, | ||
97 | unsigned long val, void *data); | ||
98 | #endif /* _ASM_KPROBES_H */ | ||
diff --git a/include/asm-x86/kprobes_32.h b/include/asm-x86/kprobes_32.h deleted file mode 100644 index 9fe8f3bddfd5..000000000000 --- a/include/asm-x86/kprobes_32.h +++ /dev/null | |||
@@ -1,94 +0,0 @@ | |||
1 | #ifndef _ASM_KPROBES_H | ||
2 | #define _ASM_KPROBES_H | ||
3 | /* | ||
4 | * Kernel Probes (KProbes) | ||
5 | * include/asm-i386/kprobes.h | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | * | ||
21 | * Copyright (C) IBM Corporation, 2002, 2004 | ||
22 | * | ||
23 | * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel | ||
24 | * Probes initial implementation ( includes suggestions from | ||
25 | * Rusty Russell). | ||
26 | */ | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/ptrace.h> | ||
29 | |||
30 | #define __ARCH_WANT_KPROBES_INSN_SLOT | ||
31 | |||
32 | struct kprobe; | ||
33 | struct pt_regs; | ||
34 | |||
35 | typedef u8 kprobe_opcode_t; | ||
36 | #define BREAKPOINT_INSTRUCTION 0xcc | ||
37 | #define RELATIVEJUMP_INSTRUCTION 0xe9 | ||
38 | #define MAX_INSN_SIZE 16 | ||
39 | #define MAX_STACK_SIZE 64 | ||
40 | #define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ | ||
41 | (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \ | ||
42 | ? (MAX_STACK_SIZE) \ | ||
43 | : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) | ||
44 | |||
45 | #define ARCH_SUPPORTS_KRETPROBES | ||
46 | #define flush_insn_slot(p) do { } while (0) | ||
47 | |||
48 | extern const int kretprobe_blacklist_size; | ||
49 | |||
50 | void arch_remove_kprobe(struct kprobe *p); | ||
51 | void kretprobe_trampoline(void); | ||
52 | |||
53 | /* Architecture specific copy of original instruction*/ | ||
54 | struct arch_specific_insn { | ||
55 | /* copy of the original instruction */ | ||
56 | kprobe_opcode_t *insn; | ||
57 | /* | ||
58 | * If this flag is not 0, this kprobe can be boost when its | ||
59 | * post_handler and break_handler is not set. | ||
60 | */ | ||
61 | int boostable; | ||
62 | }; | ||
63 | |||
64 | struct prev_kprobe { | ||
65 | struct kprobe *kp; | ||
66 | unsigned long status; | ||
67 | unsigned long old_eflags; | ||
68 | unsigned long saved_eflags; | ||
69 | }; | ||
70 | |||
71 | /* per-cpu kprobe control block */ | ||
72 | struct kprobe_ctlblk { | ||
73 | unsigned long kprobe_status; | ||
74 | unsigned long kprobe_old_eflags; | ||
75 | unsigned long kprobe_saved_eflags; | ||
76 | unsigned long *jprobe_saved_esp; | ||
77 | struct pt_regs jprobe_saved_regs; | ||
78 | kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; | ||
79 | struct prev_kprobe prev_kprobe; | ||
80 | }; | ||
81 | |||
82 | /* trap3/1 are intr gates for kprobes. So, restore the status of IF, | ||
83 | * if necessary, before executing the original int3/1 (trap) handler. | ||
84 | */ | ||
85 | static inline void restore_interrupts(struct pt_regs *regs) | ||
86 | { | ||
87 | if (regs->eflags & IF_MASK) | ||
88 | local_irq_enable(); | ||
89 | } | ||
90 | |||
91 | extern int kprobe_exceptions_notify(struct notifier_block *self, | ||
92 | unsigned long val, void *data); | ||
93 | extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); | ||
94 | #endif /* _ASM_KPROBES_H */ | ||
diff --git a/include/asm-x86/kprobes_64.h b/include/asm-x86/kprobes_64.h deleted file mode 100644 index 743d76218fc9..000000000000 --- a/include/asm-x86/kprobes_64.h +++ /dev/null | |||
@@ -1,90 +0,0 @@ | |||
1 | #ifndef _ASM_KPROBES_H | ||
2 | #define _ASM_KPROBES_H | ||
3 | /* | ||
4 | * Kernel Probes (KProbes) | ||
5 | * include/asm-x86_64/kprobes.h | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | * | ||
21 | * Copyright (C) IBM Corporation, 2002, 2004 | ||
22 | * | ||
23 | * 2004-Oct Prasanna S Panchamukhi <prasanna@in.ibm.com> and Jim Keniston | ||
24 | * kenistoj@us.ibm.com adopted from i386. | ||
25 | */ | ||
26 | #include <linux/types.h> | ||
27 | #include <linux/ptrace.h> | ||
28 | #include <linux/percpu.h> | ||
29 | |||
30 | #define __ARCH_WANT_KPROBES_INSN_SLOT | ||
31 | |||
32 | struct pt_regs; | ||
33 | struct kprobe; | ||
34 | |||
35 | typedef u8 kprobe_opcode_t; | ||
36 | #define BREAKPOINT_INSTRUCTION 0xcc | ||
37 | #define MAX_INSN_SIZE 15 | ||
38 | #define MAX_STACK_SIZE 64 | ||
39 | #define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ | ||
40 | (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \ | ||
41 | ? (MAX_STACK_SIZE) \ | ||
42 | : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) | ||
43 | |||
44 | #define ARCH_SUPPORTS_KRETPROBES | ||
45 | extern const int kretprobe_blacklist_size; | ||
46 | |||
47 | void kretprobe_trampoline(void); | ||
48 | extern void arch_remove_kprobe(struct kprobe *p); | ||
49 | #define flush_insn_slot(p) do { } while (0) | ||
50 | |||
51 | /* Architecture specific copy of original instruction*/ | ||
52 | struct arch_specific_insn { | ||
53 | /* copy of the original instruction */ | ||
54 | kprobe_opcode_t *insn; | ||
55 | }; | ||
56 | |||
57 | struct prev_kprobe { | ||
58 | struct kprobe *kp; | ||
59 | unsigned long status; | ||
60 | unsigned long old_rflags; | ||
61 | unsigned long saved_rflags; | ||
62 | }; | ||
63 | |||
64 | /* per-cpu kprobe control block */ | ||
65 | struct kprobe_ctlblk { | ||
66 | unsigned long kprobe_status; | ||
67 | unsigned long kprobe_old_rflags; | ||
68 | unsigned long kprobe_saved_rflags; | ||
69 | unsigned long *jprobe_saved_rsp; | ||
70 | struct pt_regs jprobe_saved_regs; | ||
71 | kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; | ||
72 | struct prev_kprobe prev_kprobe; | ||
73 | }; | ||
74 | |||
75 | /* trap3/1 are intr gates for kprobes. So, restore the status of IF, | ||
76 | * if necessary, before executing the original int3/1 (trap) handler. | ||
77 | */ | ||
78 | static inline void restore_interrupts(struct pt_regs *regs) | ||
79 | { | ||
80 | if (regs->eflags & IF_MASK) | ||
81 | local_irq_enable(); | ||
82 | } | ||
83 | |||
84 | extern int post_kprobe_handler(struct pt_regs *regs); | ||
85 | extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); | ||
86 | extern int kprobe_handler(struct pt_regs *regs); | ||
87 | |||
88 | extern int kprobe_exceptions_notify(struct notifier_block *self, | ||
89 | unsigned long val, void *data); | ||
90 | #endif /* _ASM_KPROBES_H */ | ||
diff --git a/include/asm-x86/kvm.h b/include/asm-x86/kvm.h new file mode 100644 index 000000000000..7a71120426a3 --- /dev/null +++ b/include/asm-x86/kvm.h | |||
@@ -0,0 +1,191 @@ | |||
1 | #ifndef __LINUX_KVM_X86_H | ||
2 | #define __LINUX_KVM_X86_H | ||
3 | |||
4 | /* | ||
5 | * KVM x86 specific structures and definitions | ||
6 | * | ||
7 | */ | ||
8 | |||
9 | #include <asm/types.h> | ||
10 | #include <linux/ioctl.h> | ||
11 | |||
12 | /* Architectural interrupt line count. */ | ||
13 | #define KVM_NR_INTERRUPTS 256 | ||
14 | |||
15 | struct kvm_memory_alias { | ||
16 | __u32 slot; /* this has a different namespace than memory slots */ | ||
17 | __u32 flags; | ||
18 | __u64 guest_phys_addr; | ||
19 | __u64 memory_size; | ||
20 | __u64 target_phys_addr; | ||
21 | }; | ||
22 | |||
23 | /* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */ | ||
24 | struct kvm_pic_state { | ||
25 | __u8 last_irr; /* edge detection */ | ||
26 | __u8 irr; /* interrupt request register */ | ||
27 | __u8 imr; /* interrupt mask register */ | ||
28 | __u8 isr; /* interrupt service register */ | ||
29 | __u8 priority_add; /* highest irq priority */ | ||
30 | __u8 irq_base; | ||
31 | __u8 read_reg_select; | ||
32 | __u8 poll; | ||
33 | __u8 special_mask; | ||
34 | __u8 init_state; | ||
35 | __u8 auto_eoi; | ||
36 | __u8 rotate_on_auto_eoi; | ||
37 | __u8 special_fully_nested_mode; | ||
38 | __u8 init4; /* true if 4 byte init */ | ||
39 | __u8 elcr; /* PIIX edge/trigger selection */ | ||
40 | __u8 elcr_mask; | ||
41 | }; | ||
42 | |||
43 | #define KVM_IOAPIC_NUM_PINS 24 | ||
44 | struct kvm_ioapic_state { | ||
45 | __u64 base_address; | ||
46 | __u32 ioregsel; | ||
47 | __u32 id; | ||
48 | __u32 irr; | ||
49 | __u32 pad; | ||
50 | union { | ||
51 | __u64 bits; | ||
52 | struct { | ||
53 | __u8 vector; | ||
54 | __u8 delivery_mode:3; | ||
55 | __u8 dest_mode:1; | ||
56 | __u8 delivery_status:1; | ||
57 | __u8 polarity:1; | ||
58 | __u8 remote_irr:1; | ||
59 | __u8 trig_mode:1; | ||
60 | __u8 mask:1; | ||
61 | __u8 reserve:7; | ||
62 | __u8 reserved[4]; | ||
63 | __u8 dest_id; | ||
64 | } fields; | ||
65 | } redirtbl[KVM_IOAPIC_NUM_PINS]; | ||
66 | }; | ||
67 | |||
68 | #define KVM_IRQCHIP_PIC_MASTER 0 | ||
69 | #define KVM_IRQCHIP_PIC_SLAVE 1 | ||
70 | #define KVM_IRQCHIP_IOAPIC 2 | ||
71 | |||
72 | /* for KVM_GET_REGS and KVM_SET_REGS */ | ||
73 | struct kvm_regs { | ||
74 | /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ | ||
75 | __u64 rax, rbx, rcx, rdx; | ||
76 | __u64 rsi, rdi, rsp, rbp; | ||
77 | __u64 r8, r9, r10, r11; | ||
78 | __u64 r12, r13, r14, r15; | ||
79 | __u64 rip, rflags; | ||
80 | }; | ||
81 | |||
82 | /* for KVM_GET_LAPIC and KVM_SET_LAPIC */ | ||
83 | #define KVM_APIC_REG_SIZE 0x400 | ||
84 | struct kvm_lapic_state { | ||
85 | char regs[KVM_APIC_REG_SIZE]; | ||
86 | }; | ||
87 | |||
88 | struct kvm_segment { | ||
89 | __u64 base; | ||
90 | __u32 limit; | ||
91 | __u16 selector; | ||
92 | __u8 type; | ||
93 | __u8 present, dpl, db, s, l, g, avl; | ||
94 | __u8 unusable; | ||
95 | __u8 padding; | ||
96 | }; | ||
97 | |||
98 | struct kvm_dtable { | ||
99 | __u64 base; | ||
100 | __u16 limit; | ||
101 | __u16 padding[3]; | ||
102 | }; | ||
103 | |||
104 | |||
105 | /* for KVM_GET_SREGS and KVM_SET_SREGS */ | ||
106 | struct kvm_sregs { | ||
107 | /* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */ | ||
108 | struct kvm_segment cs, ds, es, fs, gs, ss; | ||
109 | struct kvm_segment tr, ldt; | ||
110 | struct kvm_dtable gdt, idt; | ||
111 | __u64 cr0, cr2, cr3, cr4, cr8; | ||
112 | __u64 efer; | ||
113 | __u64 apic_base; | ||
114 | __u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64]; | ||
115 | }; | ||
116 | |||
117 | /* for KVM_GET_FPU and KVM_SET_FPU */ | ||
118 | struct kvm_fpu { | ||
119 | __u8 fpr[8][16]; | ||
120 | __u16 fcw; | ||
121 | __u16 fsw; | ||
122 | __u8 ftwx; /* in fxsave format */ | ||
123 | __u8 pad1; | ||
124 | __u16 last_opcode; | ||
125 | __u64 last_ip; | ||
126 | __u64 last_dp; | ||
127 | __u8 xmm[16][16]; | ||
128 | __u32 mxcsr; | ||
129 | __u32 pad2; | ||
130 | }; | ||
131 | |||
132 | struct kvm_msr_entry { | ||
133 | __u32 index; | ||
134 | __u32 reserved; | ||
135 | __u64 data; | ||
136 | }; | ||
137 | |||
138 | /* for KVM_GET_MSRS and KVM_SET_MSRS */ | ||
139 | struct kvm_msrs { | ||
140 | __u32 nmsrs; /* number of msrs in entries */ | ||
141 | __u32 pad; | ||
142 | |||
143 | struct kvm_msr_entry entries[0]; | ||
144 | }; | ||
145 | |||
146 | /* for KVM_GET_MSR_INDEX_LIST */ | ||
147 | struct kvm_msr_list { | ||
148 | __u32 nmsrs; /* number of msrs in entries */ | ||
149 | __u32 indices[0]; | ||
150 | }; | ||
151 | |||
152 | |||
153 | struct kvm_cpuid_entry { | ||
154 | __u32 function; | ||
155 | __u32 eax; | ||
156 | __u32 ebx; | ||
157 | __u32 ecx; | ||
158 | __u32 edx; | ||
159 | __u32 padding; | ||
160 | }; | ||
161 | |||
162 | /* for KVM_SET_CPUID */ | ||
163 | struct kvm_cpuid { | ||
164 | __u32 nent; | ||
165 | __u32 padding; | ||
166 | struct kvm_cpuid_entry entries[0]; | ||
167 | }; | ||
168 | |||
169 | struct kvm_cpuid_entry2 { | ||
170 | __u32 function; | ||
171 | __u32 index; | ||
172 | __u32 flags; | ||
173 | __u32 eax; | ||
174 | __u32 ebx; | ||
175 | __u32 ecx; | ||
176 | __u32 edx; | ||
177 | __u32 padding[3]; | ||
178 | }; | ||
179 | |||
180 | #define KVM_CPUID_FLAG_SIGNIFCANT_INDEX 1 | ||
181 | #define KVM_CPUID_FLAG_STATEFUL_FUNC 2 | ||
182 | #define KVM_CPUID_FLAG_STATE_READ_NEXT 4 | ||
183 | |||
184 | /* for KVM_SET_CPUID2 */ | ||
185 | struct kvm_cpuid2 { | ||
186 | __u32 nent; | ||
187 | __u32 padding; | ||
188 | struct kvm_cpuid_entry2 entries[0]; | ||
189 | }; | ||
190 | |||
191 | #endif | ||
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h new file mode 100644 index 000000000000..4702b04b979a --- /dev/null +++ b/include/asm-x86/kvm_host.h | |||
@@ -0,0 +1,611 @@ | |||
1 | #/* | ||
2 | * Kernel-based Virtual Machine driver for Linux | ||
3 | * | ||
4 | * This header defines architecture specific interfaces, x86 version | ||
5 | * | ||
6 | * This work is licensed under the terms of the GNU GPL, version 2. See | ||
7 | * the COPYING file in the top-level directory. | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | #ifndef ASM_KVM_HOST_H | ||
12 | #define ASM_KVM_HOST_H | ||
13 | |||
14 | #include <linux/types.h> | ||
15 | #include <linux/mm.h> | ||
16 | |||
17 | #include <linux/kvm.h> | ||
18 | #include <linux/kvm_para.h> | ||
19 | #include <linux/kvm_types.h> | ||
20 | |||
21 | #include <asm/desc.h> | ||
22 | |||
23 | #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) | ||
24 | #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) | ||
25 | #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL) | ||
26 | |||
27 | #define KVM_GUEST_CR0_MASK \ | ||
28 | (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \ | ||
29 | | X86_CR0_NW | X86_CR0_CD) | ||
30 | #define KVM_VM_CR0_ALWAYS_ON \ | ||
31 | (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \ | ||
32 | | X86_CR0_MP) | ||
33 | #define KVM_GUEST_CR4_MASK \ | ||
34 | (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE) | ||
35 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) | ||
36 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) | ||
37 | |||
38 | #define INVALID_PAGE (~(hpa_t)0) | ||
39 | #define UNMAPPED_GVA (~(gpa_t)0) | ||
40 | |||
41 | #define DE_VECTOR 0 | ||
42 | #define UD_VECTOR 6 | ||
43 | #define NM_VECTOR 7 | ||
44 | #define DF_VECTOR 8 | ||
45 | #define TS_VECTOR 10 | ||
46 | #define NP_VECTOR 11 | ||
47 | #define SS_VECTOR 12 | ||
48 | #define GP_VECTOR 13 | ||
49 | #define PF_VECTOR 14 | ||
50 | |||
51 | #define SELECTOR_TI_MASK (1 << 2) | ||
52 | #define SELECTOR_RPL_MASK 0x03 | ||
53 | |||
54 | #define IOPL_SHIFT 12 | ||
55 | |||
56 | #define KVM_ALIAS_SLOTS 4 | ||
57 | |||
58 | #define KVM_PERMILLE_MMU_PAGES 20 | ||
59 | #define KVM_MIN_ALLOC_MMU_PAGES 64 | ||
60 | #define KVM_NUM_MMU_PAGES 1024 | ||
61 | #define KVM_MIN_FREE_MMU_PAGES 5 | ||
62 | #define KVM_REFILL_PAGES 25 | ||
63 | #define KVM_MAX_CPUID_ENTRIES 40 | ||
64 | |||
65 | extern spinlock_t kvm_lock; | ||
66 | extern struct list_head vm_list; | ||
67 | |||
68 | struct kvm_vcpu; | ||
69 | struct kvm; | ||
70 | |||
71 | enum { | ||
72 | VCPU_REGS_RAX = 0, | ||
73 | VCPU_REGS_RCX = 1, | ||
74 | VCPU_REGS_RDX = 2, | ||
75 | VCPU_REGS_RBX = 3, | ||
76 | VCPU_REGS_RSP = 4, | ||
77 | VCPU_REGS_RBP = 5, | ||
78 | VCPU_REGS_RSI = 6, | ||
79 | VCPU_REGS_RDI = 7, | ||
80 | #ifdef CONFIG_X86_64 | ||
81 | VCPU_REGS_R8 = 8, | ||
82 | VCPU_REGS_R9 = 9, | ||
83 | VCPU_REGS_R10 = 10, | ||
84 | VCPU_REGS_R11 = 11, | ||
85 | VCPU_REGS_R12 = 12, | ||
86 | VCPU_REGS_R13 = 13, | ||
87 | VCPU_REGS_R14 = 14, | ||
88 | VCPU_REGS_R15 = 15, | ||
89 | #endif | ||
90 | NR_VCPU_REGS | ||
91 | }; | ||
92 | |||
93 | enum { | ||
94 | VCPU_SREG_CS, | ||
95 | VCPU_SREG_DS, | ||
96 | VCPU_SREG_ES, | ||
97 | VCPU_SREG_FS, | ||
98 | VCPU_SREG_GS, | ||
99 | VCPU_SREG_SS, | ||
100 | VCPU_SREG_TR, | ||
101 | VCPU_SREG_LDTR, | ||
102 | }; | ||
103 | |||
104 | #include <asm/kvm_x86_emulate.h> | ||
105 | |||
106 | #define KVM_NR_MEM_OBJS 40 | ||
107 | |||
108 | /* | ||
109 | * We don't want allocation failures within the mmu code, so we preallocate | ||
110 | * enough memory for a single page fault in a cache. | ||
111 | */ | ||
112 | struct kvm_mmu_memory_cache { | ||
113 | int nobjs; | ||
114 | void *objects[KVM_NR_MEM_OBJS]; | ||
115 | }; | ||
116 | |||
117 | #define NR_PTE_CHAIN_ENTRIES 5 | ||
118 | |||
119 | struct kvm_pte_chain { | ||
120 | u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES]; | ||
121 | struct hlist_node link; | ||
122 | }; | ||
123 | |||
124 | /* | ||
125 | * kvm_mmu_page_role, below, is defined as: | ||
126 | * | ||
127 | * bits 0:3 - total guest paging levels (2-4, or zero for real mode) | ||
128 | * bits 4:7 - page table level for this shadow (1-4) | ||
129 | * bits 8:9 - page table quadrant for 2-level guests | ||
130 | * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode) | ||
131 | * bits 17:19 - common access permissions for all ptes in this shadow page | ||
132 | */ | ||
133 | union kvm_mmu_page_role { | ||
134 | unsigned word; | ||
135 | struct { | ||
136 | unsigned glevels : 4; | ||
137 | unsigned level : 4; | ||
138 | unsigned quadrant : 2; | ||
139 | unsigned pad_for_nice_hex_output : 6; | ||
140 | unsigned metaphysical : 1; | ||
141 | unsigned access : 3; | ||
142 | }; | ||
143 | }; | ||
144 | |||
145 | struct kvm_mmu_page { | ||
146 | struct list_head link; | ||
147 | struct hlist_node hash_link; | ||
148 | |||
149 | /* | ||
150 | * The following two entries are used to key the shadow page in the | ||
151 | * hash table. | ||
152 | */ | ||
153 | gfn_t gfn; | ||
154 | union kvm_mmu_page_role role; | ||
155 | |||
156 | u64 *spt; | ||
157 | /* hold the gfn of each spte inside spt */ | ||
158 | gfn_t *gfns; | ||
159 | unsigned long slot_bitmap; /* One bit set per slot which has memory | ||
160 | * in this shadow page. | ||
161 | */ | ||
162 | int multimapped; /* More than one parent_pte? */ | ||
163 | int root_count; /* Currently serving as active root */ | ||
164 | union { | ||
165 | u64 *parent_pte; /* !multimapped */ | ||
166 | struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ | ||
167 | }; | ||
168 | }; | ||
169 | |||
170 | /* | ||
171 | * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level | ||
172 | * 32-bit). The kvm_mmu structure abstracts the details of the current mmu | ||
173 | * mode. | ||
174 | */ | ||
175 | struct kvm_mmu { | ||
176 | void (*new_cr3)(struct kvm_vcpu *vcpu); | ||
177 | int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); | ||
178 | void (*free)(struct kvm_vcpu *vcpu); | ||
179 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva); | ||
180 | void (*prefetch_page)(struct kvm_vcpu *vcpu, | ||
181 | struct kvm_mmu_page *page); | ||
182 | hpa_t root_hpa; | ||
183 | int root_level; | ||
184 | int shadow_root_level; | ||
185 | |||
186 | u64 *pae_root; | ||
187 | }; | ||
188 | |||
189 | struct kvm_vcpu_arch { | ||
190 | u64 host_tsc; | ||
191 | int interrupt_window_open; | ||
192 | unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ | ||
193 | DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS); | ||
194 | unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */ | ||
195 | unsigned long rip; /* needs vcpu_load_rsp_rip() */ | ||
196 | |||
197 | unsigned long cr0; | ||
198 | unsigned long cr2; | ||
199 | unsigned long cr3; | ||
200 | unsigned long cr4; | ||
201 | unsigned long cr8; | ||
202 | u64 pdptrs[4]; /* pae */ | ||
203 | u64 shadow_efer; | ||
204 | u64 apic_base; | ||
205 | struct kvm_lapic *apic; /* kernel irqchip context */ | ||
206 | #define VCPU_MP_STATE_RUNNABLE 0 | ||
207 | #define VCPU_MP_STATE_UNINITIALIZED 1 | ||
208 | #define VCPU_MP_STATE_INIT_RECEIVED 2 | ||
209 | #define VCPU_MP_STATE_SIPI_RECEIVED 3 | ||
210 | #define VCPU_MP_STATE_HALTED 4 | ||
211 | int mp_state; | ||
212 | int sipi_vector; | ||
213 | u64 ia32_misc_enable_msr; | ||
214 | bool tpr_access_reporting; | ||
215 | |||
216 | struct kvm_mmu mmu; | ||
217 | |||
218 | struct kvm_mmu_memory_cache mmu_pte_chain_cache; | ||
219 | struct kvm_mmu_memory_cache mmu_rmap_desc_cache; | ||
220 | struct kvm_mmu_memory_cache mmu_page_cache; | ||
221 | struct kvm_mmu_memory_cache mmu_page_header_cache; | ||
222 | |||
223 | gfn_t last_pt_write_gfn; | ||
224 | int last_pt_write_count; | ||
225 | u64 *last_pte_updated; | ||
226 | |||
227 | struct { | ||
228 | gfn_t gfn; /* presumed gfn during guest pte update */ | ||
229 | struct page *page; /* page corresponding to that gfn */ | ||
230 | } update_pte; | ||
231 | |||
232 | struct i387_fxsave_struct host_fx_image; | ||
233 | struct i387_fxsave_struct guest_fx_image; | ||
234 | |||
235 | gva_t mmio_fault_cr2; | ||
236 | struct kvm_pio_request pio; | ||
237 | void *pio_data; | ||
238 | |||
239 | struct kvm_queued_exception { | ||
240 | bool pending; | ||
241 | bool has_error_code; | ||
242 | u8 nr; | ||
243 | u32 error_code; | ||
244 | } exception; | ||
245 | |||
246 | struct { | ||
247 | int active; | ||
248 | u8 save_iopl; | ||
249 | struct kvm_save_segment { | ||
250 | u16 selector; | ||
251 | unsigned long base; | ||
252 | u32 limit; | ||
253 | u32 ar; | ||
254 | } tr, es, ds, fs, gs; | ||
255 | } rmode; | ||
256 | int halt_request; /* real mode on Intel only */ | ||
257 | |||
258 | int cpuid_nent; | ||
259 | struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES]; | ||
260 | /* emulate context */ | ||
261 | |||
262 | struct x86_emulate_ctxt emulate_ctxt; | ||
263 | }; | ||
264 | |||
265 | struct kvm_mem_alias { | ||
266 | gfn_t base_gfn; | ||
267 | unsigned long npages; | ||
268 | gfn_t target_gfn; | ||
269 | }; | ||
270 | |||
271 | struct kvm_arch{ | ||
272 | int naliases; | ||
273 | struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; | ||
274 | |||
275 | unsigned int n_free_mmu_pages; | ||
276 | unsigned int n_requested_mmu_pages; | ||
277 | unsigned int n_alloc_mmu_pages; | ||
278 | struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; | ||
279 | /* | ||
280 | * Hash table of struct kvm_mmu_page. | ||
281 | */ | ||
282 | struct list_head active_mmu_pages; | ||
283 | struct kvm_pic *vpic; | ||
284 | struct kvm_ioapic *vioapic; | ||
285 | |||
286 | int round_robin_prev_vcpu; | ||
287 | unsigned int tss_addr; | ||
288 | struct page *apic_access_page; | ||
289 | }; | ||
290 | |||
291 | struct kvm_vm_stat { | ||
292 | u32 mmu_shadow_zapped; | ||
293 | u32 mmu_pte_write; | ||
294 | u32 mmu_pte_updated; | ||
295 | u32 mmu_pde_zapped; | ||
296 | u32 mmu_flooded; | ||
297 | u32 mmu_recycled; | ||
298 | u32 mmu_cache_miss; | ||
299 | u32 remote_tlb_flush; | ||
300 | }; | ||
301 | |||
302 | struct kvm_vcpu_stat { | ||
303 | u32 pf_fixed; | ||
304 | u32 pf_guest; | ||
305 | u32 tlb_flush; | ||
306 | u32 invlpg; | ||
307 | |||
308 | u32 exits; | ||
309 | u32 io_exits; | ||
310 | u32 mmio_exits; | ||
311 | u32 signal_exits; | ||
312 | u32 irq_window_exits; | ||
313 | u32 halt_exits; | ||
314 | u32 halt_wakeup; | ||
315 | u32 request_irq_exits; | ||
316 | u32 irq_exits; | ||
317 | u32 host_state_reload; | ||
318 | u32 efer_reload; | ||
319 | u32 fpu_reload; | ||
320 | u32 insn_emulation; | ||
321 | u32 insn_emulation_fail; | ||
322 | }; | ||
323 | |||
324 | struct descriptor_table { | ||
325 | u16 limit; | ||
326 | unsigned long base; | ||
327 | } __attribute__((packed)); | ||
328 | |||
329 | struct kvm_x86_ops { | ||
330 | int (*cpu_has_kvm_support)(void); /* __init */ | ||
331 | int (*disabled_by_bios)(void); /* __init */ | ||
332 | void (*hardware_enable)(void *dummy); /* __init */ | ||
333 | void (*hardware_disable)(void *dummy); | ||
334 | void (*check_processor_compatibility)(void *rtn); | ||
335 | int (*hardware_setup)(void); /* __init */ | ||
336 | void (*hardware_unsetup)(void); /* __exit */ | ||
337 | bool (*cpu_has_accelerated_tpr)(void); | ||
338 | |||
339 | /* Create, but do not attach this VCPU */ | ||
340 | struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); | ||
341 | void (*vcpu_free)(struct kvm_vcpu *vcpu); | ||
342 | int (*vcpu_reset)(struct kvm_vcpu *vcpu); | ||
343 | |||
344 | void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); | ||
345 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); | ||
346 | void (*vcpu_put)(struct kvm_vcpu *vcpu); | ||
347 | void (*vcpu_decache)(struct kvm_vcpu *vcpu); | ||
348 | |||
349 | int (*set_guest_debug)(struct kvm_vcpu *vcpu, | ||
350 | struct kvm_debug_guest *dbg); | ||
351 | void (*guest_debug_pre)(struct kvm_vcpu *vcpu); | ||
352 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); | ||
353 | int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); | ||
354 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); | ||
355 | void (*get_segment)(struct kvm_vcpu *vcpu, | ||
356 | struct kvm_segment *var, int seg); | ||
357 | void (*set_segment)(struct kvm_vcpu *vcpu, | ||
358 | struct kvm_segment *var, int seg); | ||
359 | void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); | ||
360 | void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); | ||
361 | void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); | ||
362 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); | ||
363 | void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); | ||
364 | void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); | ||
365 | void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | ||
366 | void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | ||
367 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | ||
368 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); | ||
369 | unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr); | ||
370 | void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value, | ||
371 | int *exception); | ||
372 | void (*cache_regs)(struct kvm_vcpu *vcpu); | ||
373 | void (*decache_regs)(struct kvm_vcpu *vcpu); | ||
374 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); | ||
375 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); | ||
376 | |||
377 | void (*tlb_flush)(struct kvm_vcpu *vcpu); | ||
378 | |||
379 | void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
380 | int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); | ||
381 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); | ||
382 | void (*patch_hypercall)(struct kvm_vcpu *vcpu, | ||
383 | unsigned char *hypercall_addr); | ||
384 | int (*get_irq)(struct kvm_vcpu *vcpu); | ||
385 | void (*set_irq)(struct kvm_vcpu *vcpu, int vec); | ||
386 | void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, | ||
387 | bool has_error_code, u32 error_code); | ||
388 | bool (*exception_injected)(struct kvm_vcpu *vcpu); | ||
389 | void (*inject_pending_irq)(struct kvm_vcpu *vcpu); | ||
390 | void (*inject_pending_vectors)(struct kvm_vcpu *vcpu, | ||
391 | struct kvm_run *run); | ||
392 | |||
393 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); | ||
394 | }; | ||
395 | |||
396 | extern struct kvm_x86_ops *kvm_x86_ops; | ||
397 | |||
398 | int kvm_mmu_module_init(void); | ||
399 | void kvm_mmu_module_exit(void); | ||
400 | |||
401 | void kvm_mmu_destroy(struct kvm_vcpu *vcpu); | ||
402 | int kvm_mmu_create(struct kvm_vcpu *vcpu); | ||
403 | int kvm_mmu_setup(struct kvm_vcpu *vcpu); | ||
404 | void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); | ||
405 | |||
406 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); | ||
407 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); | ||
408 | void kvm_mmu_zap_all(struct kvm *kvm); | ||
409 | unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); | ||
410 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); | ||
411 | |||
412 | enum emulation_result { | ||
413 | EMULATE_DONE, /* no further processing */ | ||
414 | EMULATE_DO_MMIO, /* kvm_run filled with mmio request */ | ||
415 | EMULATE_FAIL, /* can't emulate this instruction */ | ||
416 | }; | ||
417 | |||
418 | #define EMULTYPE_NO_DECODE (1 << 0) | ||
419 | #define EMULTYPE_TRAP_UD (1 << 1) | ||
420 | int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
421 | unsigned long cr2, u16 error_code, int emulation_type); | ||
422 | void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context); | ||
423 | void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); | ||
424 | void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); | ||
425 | void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, | ||
426 | unsigned long *rflags); | ||
427 | |||
428 | unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr); | ||
429 | void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value, | ||
430 | unsigned long *rflags); | ||
431 | int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); | ||
432 | int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); | ||
433 | |||
434 | struct x86_emulate_ctxt; | ||
435 | |||
436 | int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | ||
437 | int size, unsigned port); | ||
438 | int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | ||
439 | int size, unsigned long count, int down, | ||
440 | gva_t address, int rep, unsigned port); | ||
441 | void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); | ||
442 | int kvm_emulate_halt(struct kvm_vcpu *vcpu); | ||
443 | int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); | ||
444 | int emulate_clts(struct kvm_vcpu *vcpu); | ||
445 | int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, | ||
446 | unsigned long *dest); | ||
447 | int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, | ||
448 | unsigned long value); | ||
449 | |||
450 | void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); | ||
451 | void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0); | ||
452 | void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0); | ||
453 | void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0); | ||
454 | unsigned long get_cr8(struct kvm_vcpu *vcpu); | ||
455 | void lmsw(struct kvm_vcpu *vcpu, unsigned long msw); | ||
456 | void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); | ||
457 | |||
458 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); | ||
459 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); | ||
460 | |||
461 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); | ||
462 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | ||
463 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, | ||
464 | u32 error_code); | ||
465 | |||
466 | void fx_init(struct kvm_vcpu *vcpu); | ||
467 | |||
468 | int emulator_read_std(unsigned long addr, | ||
469 | void *val, | ||
470 | unsigned int bytes, | ||
471 | struct kvm_vcpu *vcpu); | ||
472 | int emulator_write_emulated(unsigned long addr, | ||
473 | const void *val, | ||
474 | unsigned int bytes, | ||
475 | struct kvm_vcpu *vcpu); | ||
476 | |||
477 | unsigned long segment_base(u16 selector); | ||
478 | |||
479 | void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); | ||
480 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | ||
481 | const u8 *new, int bytes); | ||
482 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); | ||
483 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); | ||
484 | int kvm_mmu_load(struct kvm_vcpu *vcpu); | ||
485 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); | ||
486 | |||
487 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); | ||
488 | |||
489 | int kvm_fix_hypercall(struct kvm_vcpu *vcpu); | ||
490 | |||
491 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); | ||
492 | |||
493 | int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); | ||
494 | int complete_pio(struct kvm_vcpu *vcpu); | ||
495 | |||
496 | static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) | ||
497 | { | ||
498 | struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); | ||
499 | |||
500 | return (struct kvm_mmu_page *)page_private(page); | ||
501 | } | ||
502 | |||
503 | static inline u16 read_fs(void) | ||
504 | { | ||
505 | u16 seg; | ||
506 | asm("mov %%fs, %0" : "=g"(seg)); | ||
507 | return seg; | ||
508 | } | ||
509 | |||
510 | static inline u16 read_gs(void) | ||
511 | { | ||
512 | u16 seg; | ||
513 | asm("mov %%gs, %0" : "=g"(seg)); | ||
514 | return seg; | ||
515 | } | ||
516 | |||
517 | static inline u16 read_ldt(void) | ||
518 | { | ||
519 | u16 ldt; | ||
520 | asm("sldt %0" : "=g"(ldt)); | ||
521 | return ldt; | ||
522 | } | ||
523 | |||
524 | static inline void load_fs(u16 sel) | ||
525 | { | ||
526 | asm("mov %0, %%fs" : : "rm"(sel)); | ||
527 | } | ||
528 | |||
529 | static inline void load_gs(u16 sel) | ||
530 | { | ||
531 | asm("mov %0, %%gs" : : "rm"(sel)); | ||
532 | } | ||
533 | |||
534 | #ifndef load_ldt | ||
535 | static inline void load_ldt(u16 sel) | ||
536 | { | ||
537 | asm("lldt %0" : : "rm"(sel)); | ||
538 | } | ||
539 | #endif | ||
540 | |||
541 | static inline void get_idt(struct descriptor_table *table) | ||
542 | { | ||
543 | asm("sidt %0" : "=m"(*table)); | ||
544 | } | ||
545 | |||
546 | static inline void get_gdt(struct descriptor_table *table) | ||
547 | { | ||
548 | asm("sgdt %0" : "=m"(*table)); | ||
549 | } | ||
550 | |||
551 | static inline unsigned long read_tr_base(void) | ||
552 | { | ||
553 | u16 tr; | ||
554 | asm("str %0" : "=g"(tr)); | ||
555 | return segment_base(tr); | ||
556 | } | ||
557 | |||
558 | #ifdef CONFIG_X86_64 | ||
559 | static inline unsigned long read_msr(unsigned long msr) | ||
560 | { | ||
561 | u64 value; | ||
562 | |||
563 | rdmsrl(msr, value); | ||
564 | return value; | ||
565 | } | ||
566 | #endif | ||
567 | |||
568 | static inline void fx_save(struct i387_fxsave_struct *image) | ||
569 | { | ||
570 | asm("fxsave (%0)":: "r" (image)); | ||
571 | } | ||
572 | |||
573 | static inline void fx_restore(struct i387_fxsave_struct *image) | ||
574 | { | ||
575 | asm("fxrstor (%0)":: "r" (image)); | ||
576 | } | ||
577 | |||
578 | static inline void fpu_init(void) | ||
579 | { | ||
580 | asm("finit"); | ||
581 | } | ||
582 | |||
583 | static inline u32 get_rdx_init_val(void) | ||
584 | { | ||
585 | return 0x600; /* P6 family */ | ||
586 | } | ||
587 | |||
588 | static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) | ||
589 | { | ||
590 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); | ||
591 | } | ||
592 | |||
593 | #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30" | ||
594 | #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2" | ||
595 | #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3" | ||
596 | #define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30" | ||
597 | #define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0" | ||
598 | #define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0" | ||
599 | #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" | ||
600 | #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" | ||
601 | #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" | ||
602 | |||
603 | #define MSR_IA32_TIME_STAMP_COUNTER 0x010 | ||
604 | |||
605 | #define TSS_IOPB_BASE_OFFSET 0x66 | ||
606 | #define TSS_BASE_SIZE 0x68 | ||
607 | #define TSS_IOPB_SIZE (65536 / 8) | ||
608 | #define TSS_REDIRECTION_SIZE (256 / 8) | ||
609 | #define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) | ||
610 | |||
611 | #endif | ||
diff --git a/include/asm-x86/kvm_para.h b/include/asm-x86/kvm_para.h new file mode 100644 index 000000000000..c6f3fd8d8c53 --- /dev/null +++ b/include/asm-x86/kvm_para.h | |||
@@ -0,0 +1,105 @@ | |||
1 | #ifndef __X86_KVM_PARA_H | ||
2 | #define __X86_KVM_PARA_H | ||
3 | |||
4 | /* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It | ||
5 | * should be used to determine that a VM is running under KVM. | ||
6 | */ | ||
7 | #define KVM_CPUID_SIGNATURE 0x40000000 | ||
8 | |||
9 | /* This CPUID returns a feature bitmap in eax. Before enabling a particular | ||
10 | * paravirtualization, the appropriate feature bit should be checked. | ||
11 | */ | ||
12 | #define KVM_CPUID_FEATURES 0x40000001 | ||
13 | |||
14 | #ifdef __KERNEL__ | ||
15 | #include <asm/processor.h> | ||
16 | |||
17 | /* This instruction is vmcall. On non-VT architectures, it will generate a | ||
18 | * trap that we will then rewrite to the appropriate instruction. | ||
19 | */ | ||
20 | #define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1" | ||
21 | |||
22 | /* For KVM hypercalls, a three-byte sequence of either the vmrun or the vmmrun | ||
23 | * instruction. The hypervisor may replace it with something else but only the | ||
24 | * instructions are guaranteed to be supported. | ||
25 | * | ||
26 | * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively. | ||
27 | * The hypercall number should be placed in rax and the return value will be | ||
28 | * placed in rax. No other registers will be clobbered unless explicited | ||
29 | * noted by the particular hypercall. | ||
30 | */ | ||
31 | |||
32 | static inline long kvm_hypercall0(unsigned int nr) | ||
33 | { | ||
34 | long ret; | ||
35 | asm volatile(KVM_HYPERCALL | ||
36 | : "=a"(ret) | ||
37 | : "a"(nr)); | ||
38 | return ret; | ||
39 | } | ||
40 | |||
41 | static inline long kvm_hypercall1(unsigned int nr, unsigned long p1) | ||
42 | { | ||
43 | long ret; | ||
44 | asm volatile(KVM_HYPERCALL | ||
45 | : "=a"(ret) | ||
46 | : "a"(nr), "b"(p1)); | ||
47 | return ret; | ||
48 | } | ||
49 | |||
50 | static inline long kvm_hypercall2(unsigned int nr, unsigned long p1, | ||
51 | unsigned long p2) | ||
52 | { | ||
53 | long ret; | ||
54 | asm volatile(KVM_HYPERCALL | ||
55 | : "=a"(ret) | ||
56 | : "a"(nr), "b"(p1), "c"(p2)); | ||
57 | return ret; | ||
58 | } | ||
59 | |||
60 | static inline long kvm_hypercall3(unsigned int nr, unsigned long p1, | ||
61 | unsigned long p2, unsigned long p3) | ||
62 | { | ||
63 | long ret; | ||
64 | asm volatile(KVM_HYPERCALL | ||
65 | : "=a"(ret) | ||
66 | : "a"(nr), "b"(p1), "c"(p2), "d"(p3)); | ||
67 | return ret; | ||
68 | } | ||
69 | |||
70 | static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, | ||
71 | unsigned long p2, unsigned long p3, | ||
72 | unsigned long p4) | ||
73 | { | ||
74 | long ret; | ||
75 | asm volatile(KVM_HYPERCALL | ||
76 | : "=a"(ret) | ||
77 | : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)); | ||
78 | return ret; | ||
79 | } | ||
80 | |||
81 | static inline int kvm_para_available(void) | ||
82 | { | ||
83 | unsigned int eax, ebx, ecx, edx; | ||
84 | char signature[13]; | ||
85 | |||
86 | cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx); | ||
87 | memcpy(signature + 0, &ebx, 4); | ||
88 | memcpy(signature + 4, &ecx, 4); | ||
89 | memcpy(signature + 8, &edx, 4); | ||
90 | signature[12] = 0; | ||
91 | |||
92 | if (strcmp(signature, "KVMKVMKVM") == 0) | ||
93 | return 1; | ||
94 | |||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | static inline unsigned int kvm_arch_para_features(void) | ||
99 | { | ||
100 | return cpuid_eax(KVM_CPUID_FEATURES); | ||
101 | } | ||
102 | |||
103 | #endif | ||
104 | |||
105 | #endif | ||
diff --git a/include/asm-x86/kvm_x86_emulate.h b/include/asm-x86/kvm_x86_emulate.h new file mode 100644 index 000000000000..7db91b9bdcd4 --- /dev/null +++ b/include/asm-x86/kvm_x86_emulate.h | |||
@@ -0,0 +1,186 @@ | |||
1 | /****************************************************************************** | ||
2 | * x86_emulate.h | ||
3 | * | ||
4 | * Generic x86 (32-bit and 64-bit) instruction decoder and emulator. | ||
5 | * | ||
6 | * Copyright (c) 2005 Keir Fraser | ||
7 | * | ||
8 | * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 | ||
9 | */ | ||
10 | |||
11 | #ifndef __X86_EMULATE_H__ | ||
12 | #define __X86_EMULATE_H__ | ||
13 | |||
14 | struct x86_emulate_ctxt; | ||
15 | |||
16 | /* | ||
17 | * x86_emulate_ops: | ||
18 | * | ||
19 | * These operations represent the instruction emulator's interface to memory. | ||
20 | * There are two categories of operation: those that act on ordinary memory | ||
21 | * regions (*_std), and those that act on memory regions known to require | ||
22 | * special treatment or emulation (*_emulated). | ||
23 | * | ||
24 | * The emulator assumes that an instruction accesses only one 'emulated memory' | ||
25 | * location, that this location is the given linear faulting address (cr2), and | ||
26 | * that this is one of the instruction's data operands. Instruction fetches and | ||
27 | * stack operations are assumed never to access emulated memory. The emulator | ||
28 | * automatically deduces which operand of a string-move operation is accessing | ||
29 | * emulated memory, and assumes that the other operand accesses normal memory. | ||
30 | * | ||
31 | * NOTES: | ||
32 | * 1. The emulator isn't very smart about emulated vs. standard memory. | ||
33 | * 'Emulated memory' access addresses should be checked for sanity. | ||
34 | * 'Normal memory' accesses may fault, and the caller must arrange to | ||
35 | * detect and handle reentrancy into the emulator via recursive faults. | ||
36 | * Accesses may be unaligned and may cross page boundaries. | ||
37 | * 2. If the access fails (cannot emulate, or a standard access faults) then | ||
38 | * it is up to the memop to propagate the fault to the guest VM via | ||
39 | * some out-of-band mechanism, unknown to the emulator. The memop signals | ||
40 | * failure by returning X86EMUL_PROPAGATE_FAULT to the emulator, which will | ||
41 | * then immediately bail. | ||
42 | * 3. Valid access sizes are 1, 2, 4 and 8 bytes. On x86/32 systems only | ||
43 | * cmpxchg8b_emulated need support 8-byte accesses. | ||
44 | * 4. The emulator cannot handle 64-bit mode emulation on an x86/32 system. | ||
45 | */ | ||
46 | /* Access completed successfully: continue emulation as normal. */ | ||
47 | #define X86EMUL_CONTINUE 0 | ||
48 | /* Access is unhandleable: bail from emulation and return error to caller. */ | ||
49 | #define X86EMUL_UNHANDLEABLE 1 | ||
50 | /* Terminate emulation but return success to the caller. */ | ||
51 | #define X86EMUL_PROPAGATE_FAULT 2 /* propagate a generated fault to guest */ | ||
52 | #define X86EMUL_RETRY_INSTR 2 /* retry the instruction for some reason */ | ||
53 | #define X86EMUL_CMPXCHG_FAILED 2 /* cmpxchg did not see expected value */ | ||
54 | struct x86_emulate_ops { | ||
55 | /* | ||
56 | * read_std: Read bytes of standard (non-emulated/special) memory. | ||
57 | * Used for instruction fetch, stack operations, and others. | ||
58 | * @addr: [IN ] Linear address from which to read. | ||
59 | * @val: [OUT] Value read from memory, zero-extended to 'u_long'. | ||
60 | * @bytes: [IN ] Number of bytes to read from memory. | ||
61 | */ | ||
62 | int (*read_std)(unsigned long addr, void *val, | ||
63 | unsigned int bytes, struct kvm_vcpu *vcpu); | ||
64 | |||
65 | /* | ||
66 | * read_emulated: Read bytes from emulated/special memory area. | ||
67 | * @addr: [IN ] Linear address from which to read. | ||
68 | * @val: [OUT] Value read from memory, zero-extended to 'u_long'. | ||
69 | * @bytes: [IN ] Number of bytes to read from memory. | ||
70 | */ | ||
71 | int (*read_emulated) (unsigned long addr, | ||
72 | void *val, | ||
73 | unsigned int bytes, | ||
74 | struct kvm_vcpu *vcpu); | ||
75 | |||
76 | /* | ||
77 | * write_emulated: Read bytes from emulated/special memory area. | ||
78 | * @addr: [IN ] Linear address to which to write. | ||
79 | * @val: [IN ] Value to write to memory (low-order bytes used as | ||
80 | * required). | ||
81 | * @bytes: [IN ] Number of bytes to write to memory. | ||
82 | */ | ||
83 | int (*write_emulated) (unsigned long addr, | ||
84 | const void *val, | ||
85 | unsigned int bytes, | ||
86 | struct kvm_vcpu *vcpu); | ||
87 | |||
88 | /* | ||
89 | * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an | ||
90 | * emulated/special memory area. | ||
91 | * @addr: [IN ] Linear address to access. | ||
92 | * @old: [IN ] Value expected to be current at @addr. | ||
93 | * @new: [IN ] Value to write to @addr. | ||
94 | * @bytes: [IN ] Number of bytes to access using CMPXCHG. | ||
95 | */ | ||
96 | int (*cmpxchg_emulated) (unsigned long addr, | ||
97 | const void *old, | ||
98 | const void *new, | ||
99 | unsigned int bytes, | ||
100 | struct kvm_vcpu *vcpu); | ||
101 | |||
102 | }; | ||
103 | |||
104 | /* Type, address-of, and value of an instruction's operand. */ | ||
105 | struct operand { | ||
106 | enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type; | ||
107 | unsigned int bytes; | ||
108 | unsigned long val, orig_val, *ptr; | ||
109 | }; | ||
110 | |||
111 | struct fetch_cache { | ||
112 | u8 data[15]; | ||
113 | unsigned long start; | ||
114 | unsigned long end; | ||
115 | }; | ||
116 | |||
117 | struct decode_cache { | ||
118 | u8 twobyte; | ||
119 | u8 b; | ||
120 | u8 lock_prefix; | ||
121 | u8 rep_prefix; | ||
122 | u8 op_bytes; | ||
123 | u8 ad_bytes; | ||
124 | u8 rex_prefix; | ||
125 | struct operand src; | ||
126 | struct operand dst; | ||
127 | unsigned long *override_base; | ||
128 | unsigned int d; | ||
129 | unsigned long regs[NR_VCPU_REGS]; | ||
130 | unsigned long eip; | ||
131 | /* modrm */ | ||
132 | u8 modrm; | ||
133 | u8 modrm_mod; | ||
134 | u8 modrm_reg; | ||
135 | u8 modrm_rm; | ||
136 | u8 use_modrm_ea; | ||
137 | unsigned long modrm_ea; | ||
138 | unsigned long modrm_val; | ||
139 | struct fetch_cache fetch; | ||
140 | }; | ||
141 | |||
142 | struct x86_emulate_ctxt { | ||
143 | /* Register state before/after emulation. */ | ||
144 | struct kvm_vcpu *vcpu; | ||
145 | |||
146 | /* Linear faulting address (if emulating a page-faulting instruction). */ | ||
147 | unsigned long eflags; | ||
148 | |||
149 | /* Emulated execution mode, represented by an X86EMUL_MODE value. */ | ||
150 | int mode; | ||
151 | |||
152 | unsigned long cs_base; | ||
153 | unsigned long ds_base; | ||
154 | unsigned long es_base; | ||
155 | unsigned long ss_base; | ||
156 | unsigned long gs_base; | ||
157 | unsigned long fs_base; | ||
158 | |||
159 | /* decode cache */ | ||
160 | |||
161 | struct decode_cache decode; | ||
162 | }; | ||
163 | |||
164 | /* Repeat String Operation Prefix */ | ||
165 | #define REPE_PREFIX 1 | ||
166 | #define REPNE_PREFIX 2 | ||
167 | |||
168 | /* Execution mode, passed to the emulator. */ | ||
169 | #define X86EMUL_MODE_REAL 0 /* Real mode. */ | ||
170 | #define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */ | ||
171 | #define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */ | ||
172 | #define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */ | ||
173 | |||
174 | /* Host execution mode. */ | ||
175 | #if defined(__i386__) | ||
176 | #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32 | ||
177 | #elif defined(CONFIG_X86_64) | ||
178 | #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64 | ||
179 | #endif | ||
180 | |||
181 | int x86_decode_insn(struct x86_emulate_ctxt *ctxt, | ||
182 | struct x86_emulate_ops *ops); | ||
183 | int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, | ||
184 | struct x86_emulate_ops *ops); | ||
185 | |||
186 | #endif /* __X86_EMULATE_H__ */ | ||
diff --git a/include/asm-x86/lguest.h b/include/asm-x86/lguest.h index ccd338460811..4d9367b72976 100644 --- a/include/asm-x86/lguest.h +++ b/include/asm-x86/lguest.h | |||
@@ -44,19 +44,19 @@ struct lguest_ro_state | |||
44 | { | 44 | { |
45 | /* Host information we need to restore when we switch back. */ | 45 | /* Host information we need to restore when we switch back. */ |
46 | u32 host_cr3; | 46 | u32 host_cr3; |
47 | struct Xgt_desc_struct host_idt_desc; | 47 | struct desc_ptr host_idt_desc; |
48 | struct Xgt_desc_struct host_gdt_desc; | 48 | struct desc_ptr host_gdt_desc; |
49 | u32 host_sp; | 49 | u32 host_sp; |
50 | 50 | ||
51 | /* Fields which are used when guest is running. */ | 51 | /* Fields which are used when guest is running. */ |
52 | struct Xgt_desc_struct guest_idt_desc; | 52 | struct desc_ptr guest_idt_desc; |
53 | struct Xgt_desc_struct guest_gdt_desc; | 53 | struct desc_ptr guest_gdt_desc; |
54 | struct i386_hw_tss guest_tss; | 54 | struct x86_hw_tss guest_tss; |
55 | struct desc_struct guest_idt[IDT_ENTRIES]; | 55 | struct desc_struct guest_idt[IDT_ENTRIES]; |
56 | struct desc_struct guest_gdt[GDT_ENTRIES]; | 56 | struct desc_struct guest_gdt[GDT_ENTRIES]; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | struct lguest_arch | 59 | struct lg_cpu_arch |
60 | { | 60 | { |
61 | /* The GDT entries copied into lguest_ro_state when running. */ | 61 | /* The GDT entries copied into lguest_ro_state when running. */ |
62 | struct desc_struct gdt[GDT_ENTRIES]; | 62 | struct desc_struct gdt[GDT_ENTRIES]; |
@@ -78,8 +78,8 @@ static inline void lguest_set_ts(void) | |||
78 | } | 78 | } |
79 | 79 | ||
80 | /* Full 4G segment descriptors, suitable for CS and DS. */ | 80 | /* Full 4G segment descriptors, suitable for CS and DS. */ |
81 | #define FULL_EXEC_SEGMENT ((struct desc_struct){0x0000ffff, 0x00cf9b00}) | 81 | #define FULL_EXEC_SEGMENT ((struct desc_struct){ { {0x0000ffff, 0x00cf9b00} } }) |
82 | #define FULL_SEGMENT ((struct desc_struct){0x0000ffff, 0x00cf9300}) | 82 | #define FULL_SEGMENT ((struct desc_struct){ { {0x0000ffff, 0x00cf9300} } }) |
83 | 83 | ||
84 | #endif /* __ASSEMBLY__ */ | 84 | #endif /* __ASSEMBLY__ */ |
85 | 85 | ||
diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h index 2091779e91fb..758b9a5d4539 100644 --- a/include/asm-x86/lguest_hcall.h +++ b/include/asm-x86/lguest_hcall.h | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | #define LHCALL_FLUSH_ASYNC 0 | 5 | #define LHCALL_FLUSH_ASYNC 0 |
6 | #define LHCALL_LGUEST_INIT 1 | 6 | #define LHCALL_LGUEST_INIT 1 |
7 | #define LHCALL_CRASH 2 | 7 | #define LHCALL_SHUTDOWN 2 |
8 | #define LHCALL_LOAD_GDT 3 | 8 | #define LHCALL_LOAD_GDT 3 |
9 | #define LHCALL_NEW_PGTABLE 4 | 9 | #define LHCALL_NEW_PGTABLE 4 |
10 | #define LHCALL_FLUSH_TLB 5 | 10 | #define LHCALL_FLUSH_TLB 5 |
@@ -20,6 +20,10 @@ | |||
20 | 20 | ||
21 | #define LGUEST_TRAP_ENTRY 0x1F | 21 | #define LGUEST_TRAP_ENTRY 0x1F |
22 | 22 | ||
23 | /* Argument number 3 to LHCALL_LGUEST_SHUTDOWN */ | ||
24 | #define LGUEST_SHUTDOWN_POWEROFF 1 | ||
25 | #define LGUEST_SHUTDOWN_RESTART 2 | ||
26 | |||
23 | #ifndef __ASSEMBLY__ | 27 | #ifndef __ASSEMBLY__ |
24 | #include <asm/hw_irq.h> | 28 | #include <asm/hw_irq.h> |
25 | 29 | ||
diff --git a/include/asm-x86/linkage.h b/include/asm-x86/linkage.h index 94b257fa8701..31739c7d66a9 100644 --- a/include/asm-x86/linkage.h +++ b/include/asm-x86/linkage.h | |||
@@ -1,5 +1,25 @@ | |||
1 | #ifndef __ASM_LINKAGE_H | ||
2 | #define __ASM_LINKAGE_H | ||
3 | |||
4 | #ifdef CONFIG_X86_64 | ||
5 | #define __ALIGN .p2align 4,,15 | ||
6 | #define __ALIGN_STR ".p2align 4,,15" | ||
7 | #endif | ||
8 | |||
1 | #ifdef CONFIG_X86_32 | 9 | #ifdef CONFIG_X86_32 |
2 | # include "linkage_32.h" | 10 | #define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) |
3 | #else | 11 | #define prevent_tail_call(ret) __asm__ ("" : "=r" (ret) : "0" (ret)) |
4 | # include "linkage_64.h" | 12 | /* |
13 | * For 32-bit UML - mark functions implemented in assembly that use | ||
14 | * regparm input parameters: | ||
15 | */ | ||
16 | #define asmregparm __attribute__((regparm(3))) | ||
17 | #endif | ||
18 | |||
19 | #ifdef CONFIG_X86_ALIGNMENT_16 | ||
20 | #define __ALIGN .align 16,0x90 | ||
21 | #define __ALIGN_STR ".align 16,0x90" | ||
22 | #endif | ||
23 | |||
5 | #endif | 24 | #endif |
25 | |||
diff --git a/include/asm-x86/linkage_32.h b/include/asm-x86/linkage_32.h deleted file mode 100644 index f4a6ebac0247..000000000000 --- a/include/asm-x86/linkage_32.h +++ /dev/null | |||
@@ -1,15 +0,0 @@ | |||
1 | #ifndef __ASM_LINKAGE_H | ||
2 | #define __ASM_LINKAGE_H | ||
3 | |||
4 | #define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) | ||
5 | #define FASTCALL(x) x __attribute__((regparm(3))) | ||
6 | #define fastcall __attribute__((regparm(3))) | ||
7 | |||
8 | #define prevent_tail_call(ret) __asm__ ("" : "=r" (ret) : "0" (ret)) | ||
9 | |||
10 | #ifdef CONFIG_X86_ALIGNMENT_16 | ||
11 | #define __ALIGN .align 16,0x90 | ||
12 | #define __ALIGN_STR ".align 16,0x90" | ||
13 | #endif | ||
14 | |||
15 | #endif | ||
diff --git a/include/asm-x86/linkage_64.h b/include/asm-x86/linkage_64.h deleted file mode 100644 index b5f39d0189ce..000000000000 --- a/include/asm-x86/linkage_64.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef __ASM_LINKAGE_H | ||
2 | #define __ASM_LINKAGE_H | ||
3 | |||
4 | #define __ALIGN .p2align 4,,15 | ||
5 | |||
6 | #endif | ||
diff --git a/include/asm-x86/local.h b/include/asm-x86/local.h index c7a1b1c66c96..f852c62b3319 100644 --- a/include/asm-x86/local.h +++ b/include/asm-x86/local.h | |||
@@ -1,5 +1,240 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifndef _ARCH_LOCAL_H |
2 | # include "local_32.h" | 2 | #define _ARCH_LOCAL_H |
3 | #else | 3 | |
4 | # include "local_64.h" | 4 | #include <linux/percpu.h> |
5 | |||
6 | #include <asm/system.h> | ||
7 | #include <asm/atomic.h> | ||
8 | #include <asm/asm.h> | ||
9 | |||
10 | typedef struct { | ||
11 | atomic_long_t a; | ||
12 | } local_t; | ||
13 | |||
14 | #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } | ||
15 | |||
16 | #define local_read(l) atomic_long_read(&(l)->a) | ||
17 | #define local_set(l, i) atomic_long_set(&(l)->a, (i)) | ||
18 | |||
19 | static inline void local_inc(local_t *l) | ||
20 | { | ||
21 | __asm__ __volatile__( | ||
22 | _ASM_INC "%0" | ||
23 | :"+m" (l->a.counter)); | ||
24 | } | ||
25 | |||
26 | static inline void local_dec(local_t *l) | ||
27 | { | ||
28 | __asm__ __volatile__( | ||
29 | _ASM_DEC "%0" | ||
30 | :"+m" (l->a.counter)); | ||
31 | } | ||
32 | |||
33 | static inline void local_add(long i, local_t *l) | ||
34 | { | ||
35 | __asm__ __volatile__( | ||
36 | _ASM_ADD "%1,%0" | ||
37 | :"+m" (l->a.counter) | ||
38 | :"ir" (i)); | ||
39 | } | ||
40 | |||
41 | static inline void local_sub(long i, local_t *l) | ||
42 | { | ||
43 | __asm__ __volatile__( | ||
44 | _ASM_SUB "%1,%0" | ||
45 | :"+m" (l->a.counter) | ||
46 | :"ir" (i)); | ||
47 | } | ||
48 | |||
49 | /** | ||
50 | * local_sub_and_test - subtract value from variable and test result | ||
51 | * @i: integer value to subtract | ||
52 | * @l: pointer to type local_t | ||
53 | * | ||
54 | * Atomically subtracts @i from @l and returns | ||
55 | * true if the result is zero, or false for all | ||
56 | * other cases. | ||
57 | */ | ||
58 | static inline int local_sub_and_test(long i, local_t *l) | ||
59 | { | ||
60 | unsigned char c; | ||
61 | |||
62 | __asm__ __volatile__( | ||
63 | _ASM_SUB "%2,%0; sete %1" | ||
64 | :"+m" (l->a.counter), "=qm" (c) | ||
65 | :"ir" (i) : "memory"); | ||
66 | return c; | ||
67 | } | ||
68 | |||
69 | /** | ||
70 | * local_dec_and_test - decrement and test | ||
71 | * @l: pointer to type local_t | ||
72 | * | ||
73 | * Atomically decrements @l by 1 and | ||
74 | * returns true if the result is 0, or false for all other | ||
75 | * cases. | ||
76 | */ | ||
77 | static inline int local_dec_and_test(local_t *l) | ||
78 | { | ||
79 | unsigned char c; | ||
80 | |||
81 | __asm__ __volatile__( | ||
82 | _ASM_DEC "%0; sete %1" | ||
83 | :"+m" (l->a.counter), "=qm" (c) | ||
84 | : : "memory"); | ||
85 | return c != 0; | ||
86 | } | ||
87 | |||
88 | /** | ||
89 | * local_inc_and_test - increment and test | ||
90 | * @l: pointer to type local_t | ||
91 | * | ||
92 | * Atomically increments @l by 1 | ||
93 | * and returns true if the result is zero, or false for all | ||
94 | * other cases. | ||
95 | */ | ||
96 | static inline int local_inc_and_test(local_t *l) | ||
97 | { | ||
98 | unsigned char c; | ||
99 | |||
100 | __asm__ __volatile__( | ||
101 | _ASM_INC "%0; sete %1" | ||
102 | :"+m" (l->a.counter), "=qm" (c) | ||
103 | : : "memory"); | ||
104 | return c != 0; | ||
105 | } | ||
106 | |||
107 | /** | ||
108 | * local_add_negative - add and test if negative | ||
109 | * @i: integer value to add | ||
110 | * @l: pointer to type local_t | ||
111 | * | ||
112 | * Atomically adds @i to @l and returns true | ||
113 | * if the result is negative, or false when | ||
114 | * result is greater than or equal to zero. | ||
115 | */ | ||
116 | static inline int local_add_negative(long i, local_t *l) | ||
117 | { | ||
118 | unsigned char c; | ||
119 | |||
120 | __asm__ __volatile__( | ||
121 | _ASM_ADD "%2,%0; sets %1" | ||
122 | :"+m" (l->a.counter), "=qm" (c) | ||
123 | :"ir" (i) : "memory"); | ||
124 | return c; | ||
125 | } | ||
126 | |||
127 | /** | ||
128 | * local_add_return - add and return | ||
129 | * @i: integer value to add | ||
130 | * @l: pointer to type local_t | ||
131 | * | ||
132 | * Atomically adds @i to @l and returns @i + @l | ||
133 | */ | ||
134 | static inline long local_add_return(long i, local_t *l) | ||
135 | { | ||
136 | long __i; | ||
137 | #ifdef CONFIG_M386 | ||
138 | unsigned long flags; | ||
139 | if (unlikely(boot_cpu_data.x86 <= 3)) | ||
140 | goto no_xadd; | ||
5 | #endif | 141 | #endif |
142 | /* Modern 486+ processor */ | ||
143 | __i = i; | ||
144 | __asm__ __volatile__( | ||
145 | _ASM_XADD "%0, %1;" | ||
146 | :"+r" (i), "+m" (l->a.counter) | ||
147 | : : "memory"); | ||
148 | return i + __i; | ||
149 | |||
150 | #ifdef CONFIG_M386 | ||
151 | no_xadd: /* Legacy 386 processor */ | ||
152 | local_irq_save(flags); | ||
153 | __i = local_read(l); | ||
154 | local_set(l, i + __i); | ||
155 | local_irq_restore(flags); | ||
156 | return i + __i; | ||
157 | #endif | ||
158 | } | ||
159 | |||
160 | static inline long local_sub_return(long i, local_t *l) | ||
161 | { | ||
162 | return local_add_return(-i, l); | ||
163 | } | ||
164 | |||
165 | #define local_inc_return(l) (local_add_return(1, l)) | ||
166 | #define local_dec_return(l) (local_sub_return(1, l)) | ||
167 | |||
168 | #define local_cmpxchg(l, o, n) \ | ||
169 | (cmpxchg_local(&((l)->a.counter), (o), (n))) | ||
170 | /* Always has a lock prefix */ | ||
171 | #define local_xchg(l, n) (xchg(&((l)->a.counter), (n))) | ||
172 | |||
173 | /** | ||
174 | * local_add_unless - add unless the number is a given value | ||
175 | * @l: pointer of type local_t | ||
176 | * @a: the amount to add to l... | ||
177 | * @u: ...unless l is equal to u. | ||
178 | * | ||
179 | * Atomically adds @a to @l, so long as it was not @u. | ||
180 | * Returns non-zero if @l was not @u, and zero otherwise. | ||
181 | */ | ||
182 | #define local_add_unless(l, a, u) \ | ||
183 | ({ \ | ||
184 | long c, old; \ | ||
185 | c = local_read(l); \ | ||
186 | for (;;) { \ | ||
187 | if (unlikely(c == (u))) \ | ||
188 | break; \ | ||
189 | old = local_cmpxchg((l), c, c + (a)); \ | ||
190 | if (likely(old == c)) \ | ||
191 | break; \ | ||
192 | c = old; \ | ||
193 | } \ | ||
194 | c != (u); \ | ||
195 | }) | ||
196 | #define local_inc_not_zero(l) local_add_unless((l), 1, 0) | ||
197 | |||
198 | /* On x86_32, these are no better than the atomic variants. | ||
199 | * On x86-64 these are better than the atomic variants on SMP kernels | ||
200 | * because they dont use a lock prefix. | ||
201 | */ | ||
202 | #define __local_inc(l) local_inc(l) | ||
203 | #define __local_dec(l) local_dec(l) | ||
204 | #define __local_add(i, l) local_add((i), (l)) | ||
205 | #define __local_sub(i, l) local_sub((i), (l)) | ||
206 | |||
207 | /* Use these for per-cpu local_t variables: on some archs they are | ||
208 | * much more efficient than these naive implementations. Note they take | ||
209 | * a variable, not an address. | ||
210 | * | ||
211 | * X86_64: This could be done better if we moved the per cpu data directly | ||
212 | * after GS. | ||
213 | */ | ||
214 | |||
215 | /* Need to disable preemption for the cpu local counters otherwise we could | ||
216 | still access a variable of a previous CPU in a non atomic way. */ | ||
217 | #define cpu_local_wrap_v(l) \ | ||
218 | ({ local_t res__; \ | ||
219 | preempt_disable(); \ | ||
220 | res__ = (l); \ | ||
221 | preempt_enable(); \ | ||
222 | res__; }) | ||
223 | #define cpu_local_wrap(l) \ | ||
224 | ({ preempt_disable(); \ | ||
225 | l; \ | ||
226 | preempt_enable(); }) \ | ||
227 | |||
228 | #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) | ||
229 | #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) | ||
230 | #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) | ||
231 | #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) | ||
232 | #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) | ||
233 | #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) | ||
234 | |||
235 | #define __cpu_local_inc(l) cpu_local_inc(l) | ||
236 | #define __cpu_local_dec(l) cpu_local_dec(l) | ||
237 | #define __cpu_local_add(i, l) cpu_local_add((i), (l)) | ||
238 | #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) | ||
239 | |||
240 | #endif /* _ARCH_LOCAL_H */ | ||
diff --git a/include/asm-x86/local_32.h b/include/asm-x86/local_32.h deleted file mode 100644 index 6e85975b9ed2..000000000000 --- a/include/asm-x86/local_32.h +++ /dev/null | |||
@@ -1,233 +0,0 @@ | |||
1 | #ifndef _ARCH_I386_LOCAL_H | ||
2 | #define _ARCH_I386_LOCAL_H | ||
3 | |||
4 | #include <linux/percpu.h> | ||
5 | #include <asm/system.h> | ||
6 | #include <asm/atomic.h> | ||
7 | |||
8 | typedef struct | ||
9 | { | ||
10 | atomic_long_t a; | ||
11 | } local_t; | ||
12 | |||
13 | #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } | ||
14 | |||
15 | #define local_read(l) atomic_long_read(&(l)->a) | ||
16 | #define local_set(l,i) atomic_long_set(&(l)->a, (i)) | ||
17 | |||
18 | static __inline__ void local_inc(local_t *l) | ||
19 | { | ||
20 | __asm__ __volatile__( | ||
21 | "incl %0" | ||
22 | :"+m" (l->a.counter)); | ||
23 | } | ||
24 | |||
25 | static __inline__ void local_dec(local_t *l) | ||
26 | { | ||
27 | __asm__ __volatile__( | ||
28 | "decl %0" | ||
29 | :"+m" (l->a.counter)); | ||
30 | } | ||
31 | |||
32 | static __inline__ void local_add(long i, local_t *l) | ||
33 | { | ||
34 | __asm__ __volatile__( | ||
35 | "addl %1,%0" | ||
36 | :"+m" (l->a.counter) | ||
37 | :"ir" (i)); | ||
38 | } | ||
39 | |||
40 | static __inline__ void local_sub(long i, local_t *l) | ||
41 | { | ||
42 | __asm__ __volatile__( | ||
43 | "subl %1,%0" | ||
44 | :"+m" (l->a.counter) | ||
45 | :"ir" (i)); | ||
46 | } | ||
47 | |||
48 | /** | ||
49 | * local_sub_and_test - subtract value from variable and test result | ||
50 | * @i: integer value to subtract | ||
51 | * @l: pointer of type local_t | ||
52 | * | ||
53 | * Atomically subtracts @i from @l and returns | ||
54 | * true if the result is zero, or false for all | ||
55 | * other cases. | ||
56 | */ | ||
57 | static __inline__ int local_sub_and_test(long i, local_t *l) | ||
58 | { | ||
59 | unsigned char c; | ||
60 | |||
61 | __asm__ __volatile__( | ||
62 | "subl %2,%0; sete %1" | ||
63 | :"+m" (l->a.counter), "=qm" (c) | ||
64 | :"ir" (i) : "memory"); | ||
65 | return c; | ||
66 | } | ||
67 | |||
68 | /** | ||
69 | * local_dec_and_test - decrement and test | ||
70 | * @l: pointer of type local_t | ||
71 | * | ||
72 | * Atomically decrements @l by 1 and | ||
73 | * returns true if the result is 0, or false for all other | ||
74 | * cases. | ||
75 | */ | ||
76 | static __inline__ int local_dec_and_test(local_t *l) | ||
77 | { | ||
78 | unsigned char c; | ||
79 | |||
80 | __asm__ __volatile__( | ||
81 | "decl %0; sete %1" | ||
82 | :"+m" (l->a.counter), "=qm" (c) | ||
83 | : : "memory"); | ||
84 | return c != 0; | ||
85 | } | ||
86 | |||
87 | /** | ||
88 | * local_inc_and_test - increment and test | ||
89 | * @l: pointer of type local_t | ||
90 | * | ||
91 | * Atomically increments @l by 1 | ||
92 | * and returns true if the result is zero, or false for all | ||
93 | * other cases. | ||
94 | */ | ||
95 | static __inline__ int local_inc_and_test(local_t *l) | ||
96 | { | ||
97 | unsigned char c; | ||
98 | |||
99 | __asm__ __volatile__( | ||
100 | "incl %0; sete %1" | ||
101 | :"+m" (l->a.counter), "=qm" (c) | ||
102 | : : "memory"); | ||
103 | return c != 0; | ||
104 | } | ||
105 | |||
106 | /** | ||
107 | * local_add_negative - add and test if negative | ||
108 | * @l: pointer of type local_t | ||
109 | * @i: integer value to add | ||
110 | * | ||
111 | * Atomically adds @i to @l and returns true | ||
112 | * if the result is negative, or false when | ||
113 | * result is greater than or equal to zero. | ||
114 | */ | ||
115 | static __inline__ int local_add_negative(long i, local_t *l) | ||
116 | { | ||
117 | unsigned char c; | ||
118 | |||
119 | __asm__ __volatile__( | ||
120 | "addl %2,%0; sets %1" | ||
121 | :"+m" (l->a.counter), "=qm" (c) | ||
122 | :"ir" (i) : "memory"); | ||
123 | return c; | ||
124 | } | ||
125 | |||
126 | /** | ||
127 | * local_add_return - add and return | ||
128 | * @l: pointer of type local_t | ||
129 | * @i: integer value to add | ||
130 | * | ||
131 | * Atomically adds @i to @l and returns @i + @l | ||
132 | */ | ||
133 | static __inline__ long local_add_return(long i, local_t *l) | ||
134 | { | ||
135 | long __i; | ||
136 | #ifdef CONFIG_M386 | ||
137 | unsigned long flags; | ||
138 | if(unlikely(boot_cpu_data.x86 <= 3)) | ||
139 | goto no_xadd; | ||
140 | #endif | ||
141 | /* Modern 486+ processor */ | ||
142 | __i = i; | ||
143 | __asm__ __volatile__( | ||
144 | "xaddl %0, %1;" | ||
145 | :"+r" (i), "+m" (l->a.counter) | ||
146 | : : "memory"); | ||
147 | return i + __i; | ||
148 | |||
149 | #ifdef CONFIG_M386 | ||
150 | no_xadd: /* Legacy 386 processor */ | ||
151 | local_irq_save(flags); | ||
152 | __i = local_read(l); | ||
153 | local_set(l, i + __i); | ||
154 | local_irq_restore(flags); | ||
155 | return i + __i; | ||
156 | #endif | ||
157 | } | ||
158 | |||
159 | static __inline__ long local_sub_return(long i, local_t *l) | ||
160 | { | ||
161 | return local_add_return(-i,l); | ||
162 | } | ||
163 | |||
164 | #define local_inc_return(l) (local_add_return(1,l)) | ||
165 | #define local_dec_return(l) (local_sub_return(1,l)) | ||
166 | |||
167 | #define local_cmpxchg(l, o, n) \ | ||
168 | (cmpxchg_local(&((l)->a.counter), (o), (n))) | ||
169 | /* Always has a lock prefix */ | ||
170 | #define local_xchg(l, n) (xchg(&((l)->a.counter), (n))) | ||
171 | |||
172 | /** | ||
173 | * local_add_unless - add unless the number is a given value | ||
174 | * @l: pointer of type local_t | ||
175 | * @a: the amount to add to l... | ||
176 | * @u: ...unless l is equal to u. | ||
177 | * | ||
178 | * Atomically adds @a to @l, so long as it was not @u. | ||
179 | * Returns non-zero if @l was not @u, and zero otherwise. | ||
180 | */ | ||
181 | #define local_add_unless(l, a, u) \ | ||
182 | ({ \ | ||
183 | long c, old; \ | ||
184 | c = local_read(l); \ | ||
185 | for (;;) { \ | ||
186 | if (unlikely(c == (u))) \ | ||
187 | break; \ | ||
188 | old = local_cmpxchg((l), c, c + (a)); \ | ||
189 | if (likely(old == c)) \ | ||
190 | break; \ | ||
191 | c = old; \ | ||
192 | } \ | ||
193 | c != (u); \ | ||
194 | }) | ||
195 | #define local_inc_not_zero(l) local_add_unless((l), 1, 0) | ||
196 | |||
197 | /* On x86, these are no better than the atomic variants. */ | ||
198 | #define __local_inc(l) local_inc(l) | ||
199 | #define __local_dec(l) local_dec(l) | ||
200 | #define __local_add(i,l) local_add((i),(l)) | ||
201 | #define __local_sub(i,l) local_sub((i),(l)) | ||
202 | |||
203 | /* Use these for per-cpu local_t variables: on some archs they are | ||
204 | * much more efficient than these naive implementations. Note they take | ||
205 | * a variable, not an address. | ||
206 | */ | ||
207 | |||
208 | /* Need to disable preemption for the cpu local counters otherwise we could | ||
209 | still access a variable of a previous CPU in a non atomic way. */ | ||
210 | #define cpu_local_wrap_v(l) \ | ||
211 | ({ local_t res__; \ | ||
212 | preempt_disable(); \ | ||
213 | res__ = (l); \ | ||
214 | preempt_enable(); \ | ||
215 | res__; }) | ||
216 | #define cpu_local_wrap(l) \ | ||
217 | ({ preempt_disable(); \ | ||
218 | l; \ | ||
219 | preempt_enable(); }) \ | ||
220 | |||
221 | #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) | ||
222 | #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) | ||
223 | #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) | ||
224 | #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) | ||
225 | #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) | ||
226 | #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) | ||
227 | |||
228 | #define __cpu_local_inc(l) cpu_local_inc(l) | ||
229 | #define __cpu_local_dec(l) cpu_local_dec(l) | ||
230 | #define __cpu_local_add(i, l) cpu_local_add((i), (l)) | ||
231 | #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) | ||
232 | |||
233 | #endif /* _ARCH_I386_LOCAL_H */ | ||
diff --git a/include/asm-x86/local_64.h b/include/asm-x86/local_64.h deleted file mode 100644 index e87492bb0693..000000000000 --- a/include/asm-x86/local_64.h +++ /dev/null | |||
@@ -1,222 +0,0 @@ | |||
1 | #ifndef _ARCH_X8664_LOCAL_H | ||
2 | #define _ARCH_X8664_LOCAL_H | ||
3 | |||
4 | #include <linux/percpu.h> | ||
5 | #include <asm/atomic.h> | ||
6 | |||
7 | typedef struct | ||
8 | { | ||
9 | atomic_long_t a; | ||
10 | } local_t; | ||
11 | |||
12 | #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } | ||
13 | |||
14 | #define local_read(l) atomic_long_read(&(l)->a) | ||
15 | #define local_set(l,i) atomic_long_set(&(l)->a, (i)) | ||
16 | |||
17 | static inline void local_inc(local_t *l) | ||
18 | { | ||
19 | __asm__ __volatile__( | ||
20 | "incq %0" | ||
21 | :"=m" (l->a.counter) | ||
22 | :"m" (l->a.counter)); | ||
23 | } | ||
24 | |||
25 | static inline void local_dec(local_t *l) | ||
26 | { | ||
27 | __asm__ __volatile__( | ||
28 | "decq %0" | ||
29 | :"=m" (l->a.counter) | ||
30 | :"m" (l->a.counter)); | ||
31 | } | ||
32 | |||
33 | static inline void local_add(long i, local_t *l) | ||
34 | { | ||
35 | __asm__ __volatile__( | ||
36 | "addq %1,%0" | ||
37 | :"=m" (l->a.counter) | ||
38 | :"ir" (i), "m" (l->a.counter)); | ||
39 | } | ||
40 | |||
41 | static inline void local_sub(long i, local_t *l) | ||
42 | { | ||
43 | __asm__ __volatile__( | ||
44 | "subq %1,%0" | ||
45 | :"=m" (l->a.counter) | ||
46 | :"ir" (i), "m" (l->a.counter)); | ||
47 | } | ||
48 | |||
49 | /** | ||
50 | * local_sub_and_test - subtract value from variable and test result | ||
51 | * @i: integer value to subtract | ||
52 | * @l: pointer to type local_t | ||
53 | * | ||
54 | * Atomically subtracts @i from @l and returns | ||
55 | * true if the result is zero, or false for all | ||
56 | * other cases. | ||
57 | */ | ||
58 | static __inline__ int local_sub_and_test(long i, local_t *l) | ||
59 | { | ||
60 | unsigned char c; | ||
61 | |||
62 | __asm__ __volatile__( | ||
63 | "subq %2,%0; sete %1" | ||
64 | :"=m" (l->a.counter), "=qm" (c) | ||
65 | :"ir" (i), "m" (l->a.counter) : "memory"); | ||
66 | return c; | ||
67 | } | ||
68 | |||
69 | /** | ||
70 | * local_dec_and_test - decrement and test | ||
71 | * @l: pointer to type local_t | ||
72 | * | ||
73 | * Atomically decrements @l by 1 and | ||
74 | * returns true if the result is 0, or false for all other | ||
75 | * cases. | ||
76 | */ | ||
77 | static __inline__ int local_dec_and_test(local_t *l) | ||
78 | { | ||
79 | unsigned char c; | ||
80 | |||
81 | __asm__ __volatile__( | ||
82 | "decq %0; sete %1" | ||
83 | :"=m" (l->a.counter), "=qm" (c) | ||
84 | :"m" (l->a.counter) : "memory"); | ||
85 | return c != 0; | ||
86 | } | ||
87 | |||
88 | /** | ||
89 | * local_inc_and_test - increment and test | ||
90 | * @l: pointer to type local_t | ||
91 | * | ||
92 | * Atomically increments @l by 1 | ||
93 | * and returns true if the result is zero, or false for all | ||
94 | * other cases. | ||
95 | */ | ||
96 | static __inline__ int local_inc_and_test(local_t *l) | ||
97 | { | ||
98 | unsigned char c; | ||
99 | |||
100 | __asm__ __volatile__( | ||
101 | "incq %0; sete %1" | ||
102 | :"=m" (l->a.counter), "=qm" (c) | ||
103 | :"m" (l->a.counter) : "memory"); | ||
104 | return c != 0; | ||
105 | } | ||
106 | |||
107 | /** | ||
108 | * local_add_negative - add and test if negative | ||
109 | * @i: integer value to add | ||
110 | * @l: pointer to type local_t | ||
111 | * | ||
112 | * Atomically adds @i to @l and returns true | ||
113 | * if the result is negative, or false when | ||
114 | * result is greater than or equal to zero. | ||
115 | */ | ||
116 | static __inline__ int local_add_negative(long i, local_t *l) | ||
117 | { | ||
118 | unsigned char c; | ||
119 | |||
120 | __asm__ __volatile__( | ||
121 | "addq %2,%0; sets %1" | ||
122 | :"=m" (l->a.counter), "=qm" (c) | ||
123 | :"ir" (i), "m" (l->a.counter) : "memory"); | ||
124 | return c; | ||
125 | } | ||
126 | |||
127 | /** | ||
128 | * local_add_return - add and return | ||
129 | * @i: integer value to add | ||
130 | * @l: pointer to type local_t | ||
131 | * | ||
132 | * Atomically adds @i to @l and returns @i + @l | ||
133 | */ | ||
134 | static __inline__ long local_add_return(long i, local_t *l) | ||
135 | { | ||
136 | long __i = i; | ||
137 | __asm__ __volatile__( | ||
138 | "xaddq %0, %1;" | ||
139 | :"+r" (i), "+m" (l->a.counter) | ||
140 | : : "memory"); | ||
141 | return i + __i; | ||
142 | } | ||
143 | |||
144 | static __inline__ long local_sub_return(long i, local_t *l) | ||
145 | { | ||
146 | return local_add_return(-i,l); | ||
147 | } | ||
148 | |||
149 | #define local_inc_return(l) (local_add_return(1,l)) | ||
150 | #define local_dec_return(l) (local_sub_return(1,l)) | ||
151 | |||
152 | #define local_cmpxchg(l, o, n) \ | ||
153 | (cmpxchg_local(&((l)->a.counter), (o), (n))) | ||
154 | /* Always has a lock prefix */ | ||
155 | #define local_xchg(l, n) (xchg(&((l)->a.counter), (n))) | ||
156 | |||
157 | /** | ||
158 | * atomic_up_add_unless - add unless the number is a given value | ||
159 | * @l: pointer of type local_t | ||
160 | * @a: the amount to add to l... | ||
161 | * @u: ...unless l is equal to u. | ||
162 | * | ||
163 | * Atomically adds @a to @l, so long as it was not @u. | ||
164 | * Returns non-zero if @l was not @u, and zero otherwise. | ||
165 | */ | ||
166 | #define local_add_unless(l, a, u) \ | ||
167 | ({ \ | ||
168 | long c, old; \ | ||
169 | c = local_read(l); \ | ||
170 | for (;;) { \ | ||
171 | if (unlikely(c == (u))) \ | ||
172 | break; \ | ||
173 | old = local_cmpxchg((l), c, c + (a)); \ | ||
174 | if (likely(old == c)) \ | ||
175 | break; \ | ||
176 | c = old; \ | ||
177 | } \ | ||
178 | c != (u); \ | ||
179 | }) | ||
180 | #define local_inc_not_zero(l) local_add_unless((l), 1, 0) | ||
181 | |||
182 | /* On x86-64 these are better than the atomic variants on SMP kernels | ||
183 | because they dont use a lock prefix. */ | ||
184 | #define __local_inc(l) local_inc(l) | ||
185 | #define __local_dec(l) local_dec(l) | ||
186 | #define __local_add(i,l) local_add((i),(l)) | ||
187 | #define __local_sub(i,l) local_sub((i),(l)) | ||
188 | |||
189 | /* Use these for per-cpu local_t variables: on some archs they are | ||
190 | * much more efficient than these naive implementations. Note they take | ||
191 | * a variable, not an address. | ||
192 | * | ||
193 | * This could be done better if we moved the per cpu data directly | ||
194 | * after GS. | ||
195 | */ | ||
196 | |||
197 | /* Need to disable preemption for the cpu local counters otherwise we could | ||
198 | still access a variable of a previous CPU in a non atomic way. */ | ||
199 | #define cpu_local_wrap_v(l) \ | ||
200 | ({ local_t res__; \ | ||
201 | preempt_disable(); \ | ||
202 | res__ = (l); \ | ||
203 | preempt_enable(); \ | ||
204 | res__; }) | ||
205 | #define cpu_local_wrap(l) \ | ||
206 | ({ preempt_disable(); \ | ||
207 | l; \ | ||
208 | preempt_enable(); }) \ | ||
209 | |||
210 | #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) | ||
211 | #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) | ||
212 | #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) | ||
213 | #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) | ||
214 | #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) | ||
215 | #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) | ||
216 | |||
217 | #define __cpu_local_inc(l) cpu_local_inc(l) | ||
218 | #define __cpu_local_dec(l) cpu_local_dec(l) | ||
219 | #define __cpu_local_add(i, l) cpu_local_add((i), (l)) | ||
220 | #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) | ||
221 | |||
222 | #endif /* _ARCH_X8664_LOCAL_H */ | ||
diff --git a/include/asm-x86/mach-bigsmp/mach_apic.h b/include/asm-x86/mach-bigsmp/mach_apic.h index ebd319f838ab..6df235e8ea91 100644 --- a/include/asm-x86/mach-bigsmp/mach_apic.h +++ b/include/asm-x86/mach-bigsmp/mach_apic.h | |||
@@ -110,13 +110,13 @@ static inline int cpu_to_logical_apicid(int cpu) | |||
110 | } | 110 | } |
111 | 111 | ||
112 | static inline int mpc_apic_id(struct mpc_config_processor *m, | 112 | static inline int mpc_apic_id(struct mpc_config_processor *m, |
113 | struct mpc_config_translation *translation_record) | 113 | struct mpc_config_translation *translation_record) |
114 | { | 114 | { |
115 | printk("Processor #%d %ld:%ld APIC version %d\n", | 115 | printk("Processor #%d %u:%u APIC version %d\n", |
116 | m->mpc_apicid, | 116 | m->mpc_apicid, |
117 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, | 117 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, |
118 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, | 118 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, |
119 | m->mpc_apicver); | 119 | m->mpc_apicver); |
120 | return m->mpc_apicid; | 120 | return m->mpc_apicid; |
121 | } | 121 | } |
122 | 122 | ||
diff --git a/include/asm-x86/mach-default/apm.h b/include/asm-x86/mach-default/apm.h index 1f730b8bd1fd..989f34c37d32 100644 --- a/include/asm-x86/mach-default/apm.h +++ b/include/asm-x86/mach-default/apm.h | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-i386/mach-default/apm.h | ||
3 | * | ||
4 | * Machine specific APM BIOS functions for generic. | 2 | * Machine specific APM BIOS functions for generic. |
5 | * Split out from apm.c by Osamu Tomita <tomita@cinet.co.jp> | 3 | * Split out from apm.c by Osamu Tomita <tomita@cinet.co.jp> |
6 | */ | 4 | */ |
diff --git a/include/asm-x86/mach-default/io_ports.h b/include/asm-x86/mach-default/io_ports.h deleted file mode 100644 index 48540ba97166..000000000000 --- a/include/asm-x86/mach-default/io_ports.h +++ /dev/null | |||
@@ -1,25 +0,0 @@ | |||
1 | /* | ||
2 | * arch/i386/mach-generic/io_ports.h | ||
3 | * | ||
4 | * Machine specific IO port address definition for generic. | ||
5 | * Written by Osamu Tomita <tomita@cinet.co.jp> | ||
6 | */ | ||
7 | #ifndef _MACH_IO_PORTS_H | ||
8 | #define _MACH_IO_PORTS_H | ||
9 | |||
10 | /* i8259A PIC registers */ | ||
11 | #define PIC_MASTER_CMD 0x20 | ||
12 | #define PIC_MASTER_IMR 0x21 | ||
13 | #define PIC_MASTER_ISR PIC_MASTER_CMD | ||
14 | #define PIC_MASTER_POLL PIC_MASTER_ISR | ||
15 | #define PIC_MASTER_OCW3 PIC_MASTER_ISR | ||
16 | #define PIC_SLAVE_CMD 0xa0 | ||
17 | #define PIC_SLAVE_IMR 0xa1 | ||
18 | |||
19 | /* i8259A PIC related value */ | ||
20 | #define PIC_CASCADE_IR 2 | ||
21 | #define MASTER_ICW4_DEFAULT 0x01 | ||
22 | #define SLAVE_ICW4_DEFAULT 0x01 | ||
23 | #define PIC_ICW4_AEOI 2 | ||
24 | |||
25 | #endif /* !_MACH_IO_PORTS_H */ | ||
diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h index 6db1c3babe9a..e3c2c1012c1c 100644 --- a/include/asm-x86/mach-default/mach_apic.h +++ b/include/asm-x86/mach-default/mach_apic.h | |||
@@ -89,15 +89,15 @@ static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) | |||
89 | return physid_mask_of_physid(phys_apicid); | 89 | return physid_mask_of_physid(phys_apicid); |
90 | } | 90 | } |
91 | 91 | ||
92 | static inline int mpc_apic_id(struct mpc_config_processor *m, | 92 | static inline int mpc_apic_id(struct mpc_config_processor *m, |
93 | struct mpc_config_translation *translation_record) | 93 | struct mpc_config_translation *translation_record) |
94 | { | 94 | { |
95 | printk("Processor #%d %ld:%ld APIC version %d\n", | 95 | printk("Processor #%d %u:%u APIC version %d\n", |
96 | m->mpc_apicid, | 96 | m->mpc_apicid, |
97 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, | 97 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, |
98 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, | 98 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, |
99 | m->mpc_apicver); | 99 | m->mpc_apicver); |
100 | return (m->mpc_apicid); | 100 | return m->mpc_apicid; |
101 | } | 101 | } |
102 | 102 | ||
103 | static inline void setup_portio_remap(void) | 103 | static inline void setup_portio_remap(void) |
diff --git a/include/asm-x86/mach-default/mach_time.h b/include/asm-x86/mach-default/mach_time.h deleted file mode 100644 index 31eb5de6f3dc..000000000000 --- a/include/asm-x86/mach-default/mach_time.h +++ /dev/null | |||
@@ -1,111 +0,0 @@ | |||
1 | /* | ||
2 | * include/asm-i386/mach-default/mach_time.h | ||
3 | * | ||
4 | * Machine specific set RTC function for generic. | ||
5 | * Split out from time.c by Osamu Tomita <tomita@cinet.co.jp> | ||
6 | */ | ||
7 | #ifndef _MACH_TIME_H | ||
8 | #define _MACH_TIME_H | ||
9 | |||
10 | #include <linux/mc146818rtc.h> | ||
11 | |||
12 | /* for check timing call set_rtc_mmss() 500ms */ | ||
13 | /* used in arch/i386/time.c::do_timer_interrupt() */ | ||
14 | #define USEC_AFTER 500000 | ||
15 | #define USEC_BEFORE 500000 | ||
16 | |||
17 | /* | ||
18 | * In order to set the CMOS clock precisely, set_rtc_mmss has to be | ||
19 | * called 500 ms after the second nowtime has started, because when | ||
20 | * nowtime is written into the registers of the CMOS clock, it will | ||
21 | * jump to the next second precisely 500 ms later. Check the Motorola | ||
22 | * MC146818A or Dallas DS12887 data sheet for details. | ||
23 | * | ||
24 | * BUG: This routine does not handle hour overflow properly; it just | ||
25 | * sets the minutes. Usually you'll only notice that after reboot! | ||
26 | */ | ||
27 | static inline int mach_set_rtc_mmss(unsigned long nowtime) | ||
28 | { | ||
29 | int retval = 0; | ||
30 | int real_seconds, real_minutes, cmos_minutes; | ||
31 | unsigned char save_control, save_freq_select; | ||
32 | |||
33 | save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */ | ||
34 | CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); | ||
35 | |||
36 | save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */ | ||
37 | CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); | ||
38 | |||
39 | cmos_minutes = CMOS_READ(RTC_MINUTES); | ||
40 | if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) | ||
41 | BCD_TO_BIN(cmos_minutes); | ||
42 | |||
43 | /* | ||
44 | * since we're only adjusting minutes and seconds, | ||
45 | * don't interfere with hour overflow. This avoids | ||
46 | * messing with unknown time zones but requires your | ||
47 | * RTC not to be off by more than 15 minutes | ||
48 | */ | ||
49 | real_seconds = nowtime % 60; | ||
50 | real_minutes = nowtime / 60; | ||
51 | if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) | ||
52 | real_minutes += 30; /* correct for half hour time zone */ | ||
53 | real_minutes %= 60; | ||
54 | |||
55 | if (abs(real_minutes - cmos_minutes) < 30) { | ||
56 | if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { | ||
57 | BIN_TO_BCD(real_seconds); | ||
58 | BIN_TO_BCD(real_minutes); | ||
59 | } | ||
60 | CMOS_WRITE(real_seconds,RTC_SECONDS); | ||
61 | CMOS_WRITE(real_minutes,RTC_MINUTES); | ||
62 | } else { | ||
63 | printk(KERN_WARNING | ||
64 | "set_rtc_mmss: can't update from %d to %d\n", | ||
65 | cmos_minutes, real_minutes); | ||
66 | retval = -1; | ||
67 | } | ||
68 | |||
69 | /* The following flags have to be released exactly in this order, | ||
70 | * otherwise the DS12887 (popular MC146818A clone with integrated | ||
71 | * battery and quartz) will not reset the oscillator and will not | ||
72 | * update precisely 500 ms later. You won't find this mentioned in | ||
73 | * the Dallas Semiconductor data sheets, but who believes data | ||
74 | * sheets anyway ... -- Markus Kuhn | ||
75 | */ | ||
76 | CMOS_WRITE(save_control, RTC_CONTROL); | ||
77 | CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); | ||
78 | |||
79 | return retval; | ||
80 | } | ||
81 | |||
82 | static inline unsigned long mach_get_cmos_time(void) | ||
83 | { | ||
84 | unsigned int year, mon, day, hour, min, sec; | ||
85 | |||
86 | do { | ||
87 | sec = CMOS_READ(RTC_SECONDS); | ||
88 | min = CMOS_READ(RTC_MINUTES); | ||
89 | hour = CMOS_READ(RTC_HOURS); | ||
90 | day = CMOS_READ(RTC_DAY_OF_MONTH); | ||
91 | mon = CMOS_READ(RTC_MONTH); | ||
92 | year = CMOS_READ(RTC_YEAR); | ||
93 | } while (sec != CMOS_READ(RTC_SECONDS)); | ||
94 | |||
95 | if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { | ||
96 | BCD_TO_BIN(sec); | ||
97 | BCD_TO_BIN(min); | ||
98 | BCD_TO_BIN(hour); | ||
99 | BCD_TO_BIN(day); | ||
100 | BCD_TO_BIN(mon); | ||
101 | BCD_TO_BIN(year); | ||
102 | } | ||
103 | |||
104 | year += 1900; | ||
105 | if (year < 1970) | ||
106 | year += 100; | ||
107 | |||
108 | return mktime(year, mon, day, hour, min, sec); | ||
109 | } | ||
110 | |||
111 | #endif /* !_MACH_TIME_H */ | ||
diff --git a/include/asm-x86/mach-default/mach_timer.h b/include/asm-x86/mach-default/mach_timer.h index 807992fd4171..4b76e536cd98 100644 --- a/include/asm-x86/mach-default/mach_timer.h +++ b/include/asm-x86/mach-default/mach_timer.h | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-i386/mach-default/mach_timer.h | ||
3 | * | ||
4 | * Machine specific calibrate_tsc() for generic. | 2 | * Machine specific calibrate_tsc() for generic. |
5 | * Split out from timer_tsc.c by Osamu Tomita <tomita@cinet.co.jp> | 3 | * Split out from timer_tsc.c by Osamu Tomita <tomita@cinet.co.jp> |
6 | */ | 4 | */ |
diff --git a/include/asm-x86/mach-default/mach_traps.h b/include/asm-x86/mach-default/mach_traps.h index 625438b8a6eb..2fe7705c0484 100644 --- a/include/asm-x86/mach-default/mach_traps.h +++ b/include/asm-x86/mach-default/mach_traps.h | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-i386/mach-default/mach_traps.h | ||
3 | * | ||
4 | * Machine specific NMI handling for generic. | 2 | * Machine specific NMI handling for generic. |
5 | * Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp> | 3 | * Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp> |
6 | */ | 4 | */ |
diff --git a/include/asm-x86/mach-es7000/mach_apic.h b/include/asm-x86/mach-es7000/mach_apic.h index caec64be516d..d23011fdf454 100644 --- a/include/asm-x86/mach-es7000/mach_apic.h +++ b/include/asm-x86/mach-es7000/mach_apic.h | |||
@@ -131,11 +131,11 @@ static inline int cpu_to_logical_apicid(int cpu) | |||
131 | 131 | ||
132 | static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused) | 132 | static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused) |
133 | { | 133 | { |
134 | printk("Processor #%d %ld:%ld APIC version %d\n", | 134 | printk("Processor #%d %u:%u APIC version %d\n", |
135 | m->mpc_apicid, | 135 | m->mpc_apicid, |
136 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, | 136 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, |
137 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, | 137 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, |
138 | m->mpc_apicver); | 138 | m->mpc_apicver); |
139 | return (m->mpc_apicid); | 139 | return (m->mpc_apicid); |
140 | } | 140 | } |
141 | 141 | ||
diff --git a/include/asm-x86/mach-generic/gpio.h b/include/asm-x86/mach-generic/gpio.h new file mode 100644 index 000000000000..5305dcb96df2 --- /dev/null +++ b/include/asm-x86/mach-generic/gpio.h | |||
@@ -0,0 +1,15 @@ | |||
1 | #ifndef __ASM_MACH_GENERIC_GPIO_H | ||
2 | #define __ASM_MACH_GENERIC_GPIO_H | ||
3 | |||
4 | int gpio_request(unsigned gpio, const char *label); | ||
5 | void gpio_free(unsigned gpio); | ||
6 | int gpio_direction_input(unsigned gpio); | ||
7 | int gpio_direction_output(unsigned gpio, int value); | ||
8 | int gpio_get_value(unsigned gpio); | ||
9 | void gpio_set_value(unsigned gpio, int value); | ||
10 | int gpio_to_irq(unsigned gpio); | ||
11 | int irq_to_gpio(unsigned irq); | ||
12 | |||
13 | #include <asm-generic/gpio.h> /* cansleep wrappers */ | ||
14 | |||
15 | #endif /* __ASM_MACH_GENERIC_GPIO_H */ | ||
diff --git a/include/asm-x86/mach-numaq/mach_apic.h b/include/asm-x86/mach-numaq/mach_apic.h index 5e5e7dd2692e..3b637fac890b 100644 --- a/include/asm-x86/mach-numaq/mach_apic.h +++ b/include/asm-x86/mach-numaq/mach_apic.h | |||
@@ -101,14 +101,16 @@ static inline int mpc_apic_id(struct mpc_config_processor *m, | |||
101 | int quad = translation_record->trans_quad; | 101 | int quad = translation_record->trans_quad; |
102 | int logical_apicid = generate_logical_apicid(quad, m->mpc_apicid); | 102 | int logical_apicid = generate_logical_apicid(quad, m->mpc_apicid); |
103 | 103 | ||
104 | printk("Processor #%d %ld:%ld APIC version %d (quad %d, apic %d)\n", | 104 | printk("Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n", |
105 | m->mpc_apicid, | 105 | m->mpc_apicid, |
106 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, | 106 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, |
107 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, | 107 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, |
108 | m->mpc_apicver, quad, logical_apicid); | 108 | m->mpc_apicver, quad, logical_apicid); |
109 | return logical_apicid; | 109 | return logical_apicid; |
110 | } | 110 | } |
111 | 111 | ||
112 | extern void *xquad_portio; | ||
113 | |||
112 | static inline void setup_portio_remap(void) | 114 | static inline void setup_portio_remap(void) |
113 | { | 115 | { |
114 | int num_quads = num_online_nodes(); | 116 | int num_quads = num_online_nodes(); |
diff --git a/include/asm-x86/mach-rdc321x/gpio.h b/include/asm-x86/mach-rdc321x/gpio.h new file mode 100644 index 000000000000..db31b929b990 --- /dev/null +++ b/include/asm-x86/mach-rdc321x/gpio.h | |||
@@ -0,0 +1,56 @@ | |||
1 | #ifndef _RDC321X_GPIO_H | ||
2 | #define _RDC321X_GPIO_H | ||
3 | |||
4 | extern int rdc_gpio_get_value(unsigned gpio); | ||
5 | extern void rdc_gpio_set_value(unsigned gpio, int value); | ||
6 | extern int rdc_gpio_direction_input(unsigned gpio); | ||
7 | extern int rdc_gpio_direction_output(unsigned gpio, int value); | ||
8 | |||
9 | |||
10 | /* Wrappers for the arch-neutral GPIO API */ | ||
11 | |||
12 | static inline int gpio_request(unsigned gpio, const char *label) | ||
13 | { | ||
14 | /* Not yet implemented */ | ||
15 | return 0; | ||
16 | } | ||
17 | |||
18 | static inline void gpio_free(unsigned gpio) | ||
19 | { | ||
20 | /* Not yet implemented */ | ||
21 | } | ||
22 | |||
23 | static inline int gpio_direction_input(unsigned gpio) | ||
24 | { | ||
25 | return rdc_gpio_direction_input(gpio); | ||
26 | } | ||
27 | |||
28 | static inline int gpio_direction_output(unsigned gpio, int value) | ||
29 | { | ||
30 | return rdc_gpio_direction_output(gpio, value); | ||
31 | } | ||
32 | |||
33 | static inline int gpio_get_value(unsigned gpio) | ||
34 | { | ||
35 | return rdc_gpio_get_value(gpio); | ||
36 | } | ||
37 | |||
38 | static inline void gpio_set_value(unsigned gpio, int value) | ||
39 | { | ||
40 | rdc_gpio_set_value(gpio, value); | ||
41 | } | ||
42 | |||
43 | static inline int gpio_to_irq(unsigned gpio) | ||
44 | { | ||
45 | return gpio; | ||
46 | } | ||
47 | |||
48 | static inline int irq_to_gpio(unsigned irq) | ||
49 | { | ||
50 | return irq; | ||
51 | } | ||
52 | |||
53 | /* For cansleep */ | ||
54 | #include <asm-generic/gpio.h> | ||
55 | |||
56 | #endif /* _RDC321X_GPIO_H_ */ | ||
diff --git a/include/asm-x86/mach-rdc321x/rdc321x_defs.h b/include/asm-x86/mach-rdc321x/rdc321x_defs.h new file mode 100644 index 000000000000..838ba8f64fd3 --- /dev/null +++ b/include/asm-x86/mach-rdc321x/rdc321x_defs.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #define PFX "rdc321x: " | ||
2 | |||
3 | /* General purpose configuration and data registers */ | ||
4 | #define RDC3210_CFGREG_ADDR 0x0CF8 | ||
5 | #define RDC3210_CFGREG_DATA 0x0CFC | ||
6 | #define RDC_MAX_GPIO 0x3A | ||
diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/mach-summit/mach_apic.h index 732f776aab8e..062c97f6100b 100644 --- a/include/asm-x86/mach-summit/mach_apic.h +++ b/include/asm-x86/mach-summit/mach_apic.h | |||
@@ -126,15 +126,15 @@ static inline physid_mask_t apicid_to_cpu_present(int apicid) | |||
126 | return physid_mask_of_physid(0); | 126 | return physid_mask_of_physid(0); |
127 | } | 127 | } |
128 | 128 | ||
129 | static inline int mpc_apic_id(struct mpc_config_processor *m, | 129 | static inline int mpc_apic_id(struct mpc_config_processor *m, |
130 | struct mpc_config_translation *translation_record) | 130 | struct mpc_config_translation *translation_record) |
131 | { | 131 | { |
132 | printk("Processor #%d %ld:%ld APIC version %d\n", | 132 | printk("Processor #%d %u:%u APIC version %d\n", |
133 | m->mpc_apicid, | 133 | m->mpc_apicid, |
134 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, | 134 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, |
135 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, | 135 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, |
136 | m->mpc_apicver); | 136 | m->mpc_apicver); |
137 | return (m->mpc_apicid); | 137 | return m->mpc_apicid; |
138 | } | 138 | } |
139 | 139 | ||
140 | static inline void setup_portio_remap(void) | 140 | static inline void setup_portio_remap(void) |
diff --git a/include/asm-x86/mach-voyager/do_timer.h b/include/asm-x86/mach-voyager/do_timer.h index bc2b58926308..9e5a459fd15b 100644 --- a/include/asm-x86/mach-voyager/do_timer.h +++ b/include/asm-x86/mach-voyager/do_timer.h | |||
@@ -6,7 +6,6 @@ | |||
6 | 6 | ||
7 | /** | 7 | /** |
8 | * do_timer_interrupt_hook - hook into timer tick | 8 | * do_timer_interrupt_hook - hook into timer tick |
9 | * @regs: standard registers from interrupt | ||
10 | * | 9 | * |
11 | * Call the pit clock event handler. see asm/i8253.h | 10 | * Call the pit clock event handler. see asm/i8253.h |
12 | **/ | 11 | **/ |
diff --git a/include/asm-x86/math_emu.h b/include/asm-x86/math_emu.h index a4b0aa3320e6..9bf4ae93ab10 100644 --- a/include/asm-x86/math_emu.h +++ b/include/asm-x86/math_emu.h | |||
@@ -1,11 +1,6 @@ | |||
1 | #ifndef _I386_MATH_EMU_H | 1 | #ifndef _I386_MATH_EMU_H |
2 | #define _I386_MATH_EMU_H | 2 | #define _I386_MATH_EMU_H |
3 | 3 | ||
4 | #include <asm/sigcontext.h> | ||
5 | |||
6 | int restore_i387_soft(void *s387, struct _fpstate __user *buf); | ||
7 | int save_i387_soft(void *s387, struct _fpstate __user *buf); | ||
8 | |||
9 | /* This structure matches the layout of the data saved to the stack | 4 | /* This structure matches the layout of the data saved to the stack |
10 | following a device-not-present interrupt, part of it saved | 5 | following a device-not-present interrupt, part of it saved |
11 | automatically by the 80386/80486. | 6 | automatically by the 80386/80486. |
diff --git a/include/asm-x86/mc146818rtc.h b/include/asm-x86/mc146818rtc.h index 5c2bb66caf17..cdd9f965835a 100644 --- a/include/asm-x86/mc146818rtc.h +++ b/include/asm-x86/mc146818rtc.h | |||
@@ -1,5 +1,100 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | /* |
2 | # include "mc146818rtc_32.h" | 2 | * Machine dependent access functions for RTC registers. |
3 | */ | ||
4 | #ifndef _ASM_MC146818RTC_H | ||
5 | #define _ASM_MC146818RTC_H | ||
6 | |||
7 | #include <asm/io.h> | ||
8 | #include <asm/system.h> | ||
9 | #include <asm/processor.h> | ||
10 | #include <linux/mc146818rtc.h> | ||
11 | |||
12 | #ifndef RTC_PORT | ||
13 | #define RTC_PORT(x) (0x70 + (x)) | ||
14 | #define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ | ||
15 | #endif | ||
16 | |||
17 | #if defined(CONFIG_X86_32) && defined(__HAVE_ARCH_CMPXCHG) | ||
18 | /* | ||
19 | * This lock provides nmi access to the CMOS/RTC registers. It has some | ||
20 | * special properties. It is owned by a CPU and stores the index register | ||
21 | * currently being accessed (if owned). The idea here is that it works | ||
22 | * like a normal lock (normally). However, in an NMI, the NMI code will | ||
23 | * first check to see if its CPU owns the lock, meaning that the NMI | ||
24 | * interrupted during the read/write of the device. If it does, it goes ahead | ||
25 | * and performs the access and then restores the index register. If it does | ||
26 | * not, it locks normally. | ||
27 | * | ||
28 | * Note that since we are working with NMIs, we need this lock even in | ||
29 | * a non-SMP machine just to mark that the lock is owned. | ||
30 | * | ||
31 | * This only works with compare-and-swap. There is no other way to | ||
32 | * atomically claim the lock and set the owner. | ||
33 | */ | ||
34 | #include <linux/smp.h> | ||
35 | extern volatile unsigned long cmos_lock; | ||
36 | |||
37 | /* | ||
38 | * All of these below must be called with interrupts off, preempt | ||
39 | * disabled, etc. | ||
40 | */ | ||
41 | |||
42 | static inline void lock_cmos(unsigned char reg) | ||
43 | { | ||
44 | unsigned long new; | ||
45 | new = ((smp_processor_id()+1) << 8) | reg; | ||
46 | for (;;) { | ||
47 | if (cmos_lock) { | ||
48 | cpu_relax(); | ||
49 | continue; | ||
50 | } | ||
51 | if (__cmpxchg(&cmos_lock, 0, new, sizeof(cmos_lock)) == 0) | ||
52 | return; | ||
53 | } | ||
54 | } | ||
55 | |||
56 | static inline void unlock_cmos(void) | ||
57 | { | ||
58 | cmos_lock = 0; | ||
59 | } | ||
60 | static inline int do_i_have_lock_cmos(void) | ||
61 | { | ||
62 | return (cmos_lock >> 8) == (smp_processor_id()+1); | ||
63 | } | ||
64 | static inline unsigned char current_lock_cmos_reg(void) | ||
65 | { | ||
66 | return cmos_lock & 0xff; | ||
67 | } | ||
68 | #define lock_cmos_prefix(reg) \ | ||
69 | do { \ | ||
70 | unsigned long cmos_flags; \ | ||
71 | local_irq_save(cmos_flags); \ | ||
72 | lock_cmos(reg) | ||
73 | #define lock_cmos_suffix(reg) \ | ||
74 | unlock_cmos(); \ | ||
75 | local_irq_restore(cmos_flags); \ | ||
76 | } while (0) | ||
3 | #else | 77 | #else |
4 | # include "mc146818rtc_64.h" | 78 | #define lock_cmos_prefix(reg) do {} while (0) |
79 | #define lock_cmos_suffix(reg) do {} while (0) | ||
80 | #define lock_cmos(reg) | ||
81 | #define unlock_cmos() | ||
82 | #define do_i_have_lock_cmos() 0 | ||
83 | #define current_lock_cmos_reg() 0 | ||
5 | #endif | 84 | #endif |
85 | |||
86 | /* | ||
87 | * The yet supported machines all access the RTC index register via | ||
88 | * an ISA port access but the way to access the date register differs ... | ||
89 | */ | ||
90 | #define CMOS_READ(addr) rtc_cmos_read(addr) | ||
91 | #define CMOS_WRITE(val, addr) rtc_cmos_write(val, addr) | ||
92 | unsigned char rtc_cmos_read(unsigned char addr); | ||
93 | void rtc_cmos_write(unsigned char val, unsigned char addr); | ||
94 | |||
95 | extern int mach_set_rtc_mmss(unsigned long nowtime); | ||
96 | extern unsigned long mach_get_cmos_time(void); | ||
97 | |||
98 | #define RTC_IRQ 8 | ||
99 | |||
100 | #endif /* _ASM_MC146818RTC_H */ | ||
diff --git a/include/asm-x86/mc146818rtc_32.h b/include/asm-x86/mc146818rtc_32.h deleted file mode 100644 index 1613b42eaf58..000000000000 --- a/include/asm-x86/mc146818rtc_32.h +++ /dev/null | |||
@@ -1,97 +0,0 @@ | |||
1 | /* | ||
2 | * Machine dependent access functions for RTC registers. | ||
3 | */ | ||
4 | #ifndef _ASM_MC146818RTC_H | ||
5 | #define _ASM_MC146818RTC_H | ||
6 | |||
7 | #include <asm/io.h> | ||
8 | #include <asm/system.h> | ||
9 | #include <asm/processor.h> | ||
10 | #include <linux/mc146818rtc.h> | ||
11 | |||
12 | #ifndef RTC_PORT | ||
13 | #define RTC_PORT(x) (0x70 + (x)) | ||
14 | #define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ | ||
15 | #endif | ||
16 | |||
17 | #ifdef __HAVE_ARCH_CMPXCHG | ||
18 | /* | ||
19 | * This lock provides nmi access to the CMOS/RTC registers. It has some | ||
20 | * special properties. It is owned by a CPU and stores the index register | ||
21 | * currently being accessed (if owned). The idea here is that it works | ||
22 | * like a normal lock (normally). However, in an NMI, the NMI code will | ||
23 | * first check to see if its CPU owns the lock, meaning that the NMI | ||
24 | * interrupted during the read/write of the device. If it does, it goes ahead | ||
25 | * and performs the access and then restores the index register. If it does | ||
26 | * not, it locks normally. | ||
27 | * | ||
28 | * Note that since we are working with NMIs, we need this lock even in | ||
29 | * a non-SMP machine just to mark that the lock is owned. | ||
30 | * | ||
31 | * This only works with compare-and-swap. There is no other way to | ||
32 | * atomically claim the lock and set the owner. | ||
33 | */ | ||
34 | #include <linux/smp.h> | ||
35 | extern volatile unsigned long cmos_lock; | ||
36 | |||
37 | /* | ||
38 | * All of these below must be called with interrupts off, preempt | ||
39 | * disabled, etc. | ||
40 | */ | ||
41 | |||
42 | static inline void lock_cmos(unsigned char reg) | ||
43 | { | ||
44 | unsigned long new; | ||
45 | new = ((smp_processor_id()+1) << 8) | reg; | ||
46 | for (;;) { | ||
47 | if (cmos_lock) { | ||
48 | cpu_relax(); | ||
49 | continue; | ||
50 | } | ||
51 | if (__cmpxchg(&cmos_lock, 0, new, sizeof(cmos_lock)) == 0) | ||
52 | return; | ||
53 | } | ||
54 | } | ||
55 | |||
56 | static inline void unlock_cmos(void) | ||
57 | { | ||
58 | cmos_lock = 0; | ||
59 | } | ||
60 | static inline int do_i_have_lock_cmos(void) | ||
61 | { | ||
62 | return (cmos_lock >> 8) == (smp_processor_id()+1); | ||
63 | } | ||
64 | static inline unsigned char current_lock_cmos_reg(void) | ||
65 | { | ||
66 | return cmos_lock & 0xff; | ||
67 | } | ||
68 | #define lock_cmos_prefix(reg) \ | ||
69 | do { \ | ||
70 | unsigned long cmos_flags; \ | ||
71 | local_irq_save(cmos_flags); \ | ||
72 | lock_cmos(reg) | ||
73 | #define lock_cmos_suffix(reg) \ | ||
74 | unlock_cmos(); \ | ||
75 | local_irq_restore(cmos_flags); \ | ||
76 | } while (0) | ||
77 | #else | ||
78 | #define lock_cmos_prefix(reg) do {} while (0) | ||
79 | #define lock_cmos_suffix(reg) do {} while (0) | ||
80 | #define lock_cmos(reg) | ||
81 | #define unlock_cmos() | ||
82 | #define do_i_have_lock_cmos() 0 | ||
83 | #define current_lock_cmos_reg() 0 | ||
84 | #endif | ||
85 | |||
86 | /* | ||
87 | * The yet supported machines all access the RTC index register via | ||
88 | * an ISA port access but the way to access the date register differs ... | ||
89 | */ | ||
90 | #define CMOS_READ(addr) rtc_cmos_read(addr) | ||
91 | #define CMOS_WRITE(val, addr) rtc_cmos_write(val, addr) | ||
92 | unsigned char rtc_cmos_read(unsigned char addr); | ||
93 | void rtc_cmos_write(unsigned char val, unsigned char addr); | ||
94 | |||
95 | #define RTC_IRQ 8 | ||
96 | |||
97 | #endif /* _ASM_MC146818RTC_H */ | ||
diff --git a/include/asm-x86/mc146818rtc_64.h b/include/asm-x86/mc146818rtc_64.h deleted file mode 100644 index d6e3009430c1..000000000000 --- a/include/asm-x86/mc146818rtc_64.h +++ /dev/null | |||
@@ -1,29 +0,0 @@ | |||
1 | /* | ||
2 | * Machine dependent access functions for RTC registers. | ||
3 | */ | ||
4 | #ifndef _ASM_MC146818RTC_H | ||
5 | #define _ASM_MC146818RTC_H | ||
6 | |||
7 | #include <asm/io.h> | ||
8 | |||
9 | #ifndef RTC_PORT | ||
10 | #define RTC_PORT(x) (0x70 + (x)) | ||
11 | #define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ | ||
12 | #endif | ||
13 | |||
14 | /* | ||
15 | * The yet supported machines all access the RTC index register via | ||
16 | * an ISA port access but the way to access the date register differs ... | ||
17 | */ | ||
18 | #define CMOS_READ(addr) ({ \ | ||
19 | outb_p((addr),RTC_PORT(0)); \ | ||
20 | inb_p(RTC_PORT(1)); \ | ||
21 | }) | ||
22 | #define CMOS_WRITE(val, addr) ({ \ | ||
23 | outb_p((addr),RTC_PORT(0)); \ | ||
24 | outb_p((val),RTC_PORT(1)); \ | ||
25 | }) | ||
26 | |||
27 | #define RTC_IRQ 8 | ||
28 | |||
29 | #endif /* _ASM_MC146818RTC_H */ | ||
diff --git a/include/asm-x86/mce.h b/include/asm-x86/mce.h index df304fd89c27..94f1fd79e22a 100644 --- a/include/asm-x86/mce.h +++ b/include/asm-x86/mce.h | |||
@@ -13,7 +13,7 @@ | |||
13 | #define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */ | 13 | #define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */ |
14 | 14 | ||
15 | #define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */ | 15 | #define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */ |
16 | #define MCG_STATUS_EIPV (1UL<<1) /* eip points to correct instruction */ | 16 | #define MCG_STATUS_EIPV (1UL<<1) /* ip points to correct instruction */ |
17 | #define MCG_STATUS_MCIP (1UL<<2) /* machine check in progress */ | 17 | #define MCG_STATUS_MCIP (1UL<<2) /* machine check in progress */ |
18 | 18 | ||
19 | #define MCI_STATUS_VAL (1UL<<63) /* valid error */ | 19 | #define MCI_STATUS_VAL (1UL<<63) /* valid error */ |
@@ -30,7 +30,7 @@ struct mce { | |||
30 | __u64 misc; | 30 | __u64 misc; |
31 | __u64 addr; | 31 | __u64 addr; |
32 | __u64 mcgstatus; | 32 | __u64 mcgstatus; |
33 | __u64 rip; | 33 | __u64 ip; |
34 | __u64 tsc; /* cpu time stamp counter */ | 34 | __u64 tsc; /* cpu time stamp counter */ |
35 | __u64 res1; /* for future extension */ | 35 | __u64 res1; /* for future extension */ |
36 | __u64 res2; /* dito. */ | 36 | __u64 res2; /* dito. */ |
@@ -85,14 +85,7 @@ struct mce_log { | |||
85 | #ifdef __KERNEL__ | 85 | #ifdef __KERNEL__ |
86 | 86 | ||
87 | #ifdef CONFIG_X86_32 | 87 | #ifdef CONFIG_X86_32 |
88 | #ifdef CONFIG_X86_MCE | ||
89 | extern void mcheck_init(struct cpuinfo_x86 *c); | ||
90 | #else | ||
91 | #define mcheck_init(c) do {} while(0) | ||
92 | #endif | ||
93 | |||
94 | extern int mce_disabled; | 88 | extern int mce_disabled; |
95 | |||
96 | #else /* CONFIG_X86_32 */ | 89 | #else /* CONFIG_X86_32 */ |
97 | 90 | ||
98 | #include <asm/atomic.h> | 91 | #include <asm/atomic.h> |
@@ -121,6 +114,13 @@ extern int mce_notify_user(void); | |||
121 | 114 | ||
122 | #endif /* !CONFIG_X86_32 */ | 115 | #endif /* !CONFIG_X86_32 */ |
123 | 116 | ||
117 | |||
118 | |||
119 | #ifdef CONFIG_X86_MCE | ||
120 | extern void mcheck_init(struct cpuinfo_x86 *c); | ||
121 | #else | ||
122 | #define mcheck_init(c) do { } while (0) | ||
123 | #endif | ||
124 | extern void stop_mce(void); | 124 | extern void stop_mce(void); |
125 | extern void restart_mce(void); | 125 | extern void restart_mce(void); |
126 | 126 | ||
diff --git a/include/asm-x86/mmsegment.h b/include/asm-x86/mmsegment.h deleted file mode 100644 index d3f80c996330..000000000000 --- a/include/asm-x86/mmsegment.h +++ /dev/null | |||
@@ -1,8 +0,0 @@ | |||
1 | #ifndef _ASM_MMSEGMENT_H | ||
2 | #define _ASM_MMSEGMENT_H 1 | ||
3 | |||
4 | typedef struct { | ||
5 | unsigned long seg; | ||
6 | } mm_segment_t; | ||
7 | |||
8 | #endif | ||
diff --git a/include/asm-x86/mmu.h b/include/asm-x86/mmu.h index 3f922c8e1c88..efa962c38897 100644 --- a/include/asm-x86/mmu.h +++ b/include/asm-x86/mmu.h | |||
@@ -20,4 +20,12 @@ typedef struct { | |||
20 | void *vdso; | 20 | void *vdso; |
21 | } mm_context_t; | 21 | } mm_context_t; |
22 | 22 | ||
23 | #ifdef CONFIG_SMP | ||
24 | void leave_mm(int cpu); | ||
25 | #else | ||
26 | static inline void leave_mm(int cpu) | ||
27 | { | ||
28 | } | ||
29 | #endif | ||
30 | |||
23 | #endif /* _ASM_X86_MMU_H */ | 31 | #endif /* _ASM_X86_MMU_H */ |
diff --git a/include/asm-x86/mmu_context_32.h b/include/asm-x86/mmu_context_32.h index 7eb0b0b1fb3c..8198d1cca1f3 100644 --- a/include/asm-x86/mmu_context_32.h +++ b/include/asm-x86/mmu_context_32.h | |||
@@ -32,8 +32,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |||
32 | #endif | 32 | #endif |
33 | } | 33 | } |
34 | 34 | ||
35 | void leave_mm(unsigned long cpu); | ||
36 | |||
37 | static inline void switch_mm(struct mm_struct *prev, | 35 | static inline void switch_mm(struct mm_struct *prev, |
38 | struct mm_struct *next, | 36 | struct mm_struct *next, |
39 | struct task_struct *tsk) | 37 | struct task_struct *tsk) |
diff --git a/include/asm-x86/mmu_context_64.h b/include/asm-x86/mmu_context_64.h index 0cce83a78378..ad6dc821ef9e 100644 --- a/include/asm-x86/mmu_context_64.h +++ b/include/asm-x86/mmu_context_64.h | |||
@@ -7,7 +7,9 @@ | |||
7 | #include <asm/pda.h> | 7 | #include <asm/pda.h> |
8 | #include <asm/pgtable.h> | 8 | #include <asm/pgtable.h> |
9 | #include <asm/tlbflush.h> | 9 | #include <asm/tlbflush.h> |
10 | #ifndef CONFIG_PARAVIRT | ||
10 | #include <asm-generic/mm_hooks.h> | 11 | #include <asm-generic/mm_hooks.h> |
12 | #endif | ||
11 | 13 | ||
12 | /* | 14 | /* |
13 | * possibly do the LDT unload here? | 15 | * possibly do the LDT unload here? |
@@ -23,11 +25,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |||
23 | #endif | 25 | #endif |
24 | } | 26 | } |
25 | 27 | ||
26 | static inline void load_cr3(pgd_t *pgd) | ||
27 | { | ||
28 | asm volatile("movq %0,%%cr3" :: "r" (__pa(pgd)) : "memory"); | ||
29 | } | ||
30 | |||
31 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | 28 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
32 | struct task_struct *tsk) | 29 | struct task_struct *tsk) |
33 | { | 30 | { |
@@ -43,20 +40,20 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
43 | load_cr3(next->pgd); | 40 | load_cr3(next->pgd); |
44 | 41 | ||
45 | if (unlikely(next->context.ldt != prev->context.ldt)) | 42 | if (unlikely(next->context.ldt != prev->context.ldt)) |
46 | load_LDT_nolock(&next->context, cpu); | 43 | load_LDT_nolock(&next->context); |
47 | } | 44 | } |
48 | #ifdef CONFIG_SMP | 45 | #ifdef CONFIG_SMP |
49 | else { | 46 | else { |
50 | write_pda(mmu_state, TLBSTATE_OK); | 47 | write_pda(mmu_state, TLBSTATE_OK); |
51 | if (read_pda(active_mm) != next) | 48 | if (read_pda(active_mm) != next) |
52 | out_of_line_bug(); | 49 | BUG(); |
53 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { | 50 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { |
54 | /* We were in lazy tlb mode and leave_mm disabled | 51 | /* We were in lazy tlb mode and leave_mm disabled |
55 | * tlb flush IPI delivery. We must reload CR3 | 52 | * tlb flush IPI delivery. We must reload CR3 |
56 | * to make sure to use no freed page tables. | 53 | * to make sure to use no freed page tables. |
57 | */ | 54 | */ |
58 | load_cr3(next->pgd); | 55 | load_cr3(next->pgd); |
59 | load_LDT_nolock(&next->context, cpu); | 56 | load_LDT_nolock(&next->context); |
60 | } | 57 | } |
61 | } | 58 | } |
62 | #endif | 59 | #endif |
diff --git a/include/asm-x86/mmzone_32.h b/include/asm-x86/mmzone_32.h index 118e9812778f..5d6f4ce6e6d6 100644 --- a/include/asm-x86/mmzone_32.h +++ b/include/asm-x86/mmzone_32.h | |||
@@ -87,9 +87,6 @@ static inline int pfn_to_nid(unsigned long pfn) | |||
87 | __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ | 87 | __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ |
88 | }) | 88 | }) |
89 | 89 | ||
90 | /* XXX: FIXME -- wli */ | ||
91 | #define kern_addr_valid(kaddr) (0) | ||
92 | |||
93 | #ifdef CONFIG_X86_NUMAQ /* we have contiguous memory on NUMA-Q */ | 90 | #ifdef CONFIG_X86_NUMAQ /* we have contiguous memory on NUMA-Q */ |
94 | #define pfn_valid(pfn) ((pfn) < num_physpages) | 91 | #define pfn_valid(pfn) ((pfn) < num_physpages) |
95 | #else | 92 | #else |
diff --git a/include/asm-x86/mmzone_64.h b/include/asm-x86/mmzone_64.h index 19a89377b123..ebaf9663aa8a 100644 --- a/include/asm-x86/mmzone_64.h +++ b/include/asm-x86/mmzone_64.h | |||
@@ -15,9 +15,9 @@ | |||
15 | struct memnode { | 15 | struct memnode { |
16 | int shift; | 16 | int shift; |
17 | unsigned int mapsize; | 17 | unsigned int mapsize; |
18 | u8 *map; | 18 | s16 *map; |
19 | u8 embedded_map[64-16]; | 19 | s16 embedded_map[64-8]; |
20 | } ____cacheline_aligned; /* total size = 64 bytes */ | 20 | } ____cacheline_aligned; /* total size = 128 bytes */ |
21 | extern struct memnode memnode; | 21 | extern struct memnode memnode; |
22 | #define memnode_shift memnode.shift | 22 | #define memnode_shift memnode.shift |
23 | #define memnodemap memnode.map | 23 | #define memnodemap memnode.map |
@@ -41,11 +41,7 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) | |||
41 | #define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ | 41 | #define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ |
42 | NODE_DATA(nid)->node_spanned_pages) | 42 | NODE_DATA(nid)->node_spanned_pages) |
43 | 43 | ||
44 | #ifdef CONFIG_DISCONTIGMEM | 44 | extern int early_pfn_to_nid(unsigned long pfn); |
45 | #define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) | ||
46 | |||
47 | extern int pfn_valid(unsigned long pfn); | ||
48 | #endif | ||
49 | 45 | ||
50 | #ifdef CONFIG_NUMA_EMU | 46 | #ifdef CONFIG_NUMA_EMU |
51 | #define FAKE_NODE_MIN_SIZE (64*1024*1024) | 47 | #define FAKE_NODE_MIN_SIZE (64*1024*1024) |
diff --git a/include/asm-x86/module.h b/include/asm-x86/module.h index 2b2f18d8a531..bfedb247871c 100644 --- a/include/asm-x86/module.h +++ b/include/asm-x86/module.h | |||
@@ -1,5 +1,82 @@ | |||
1 | #ifndef _ASM_MODULE_H | ||
2 | #define _ASM_MODULE_H | ||
3 | |||
4 | /* x86_32/64 are simple */ | ||
5 | struct mod_arch_specific {}; | ||
6 | |||
1 | #ifdef CONFIG_X86_32 | 7 | #ifdef CONFIG_X86_32 |
2 | # include "module_32.h" | 8 | # define Elf_Shdr Elf32_Shdr |
9 | # define Elf_Sym Elf32_Sym | ||
10 | # define Elf_Ehdr Elf32_Ehdr | ||
3 | #else | 11 | #else |
4 | # include "module_64.h" | 12 | # define Elf_Shdr Elf64_Shdr |
13 | # define Elf_Sym Elf64_Sym | ||
14 | # define Elf_Ehdr Elf64_Ehdr | ||
5 | #endif | 15 | #endif |
16 | |||
17 | #ifdef CONFIG_X86_64 | ||
18 | /* X86_64 does not define MODULE_PROC_FAMILY */ | ||
19 | #elif defined CONFIG_M386 | ||
20 | #define MODULE_PROC_FAMILY "386 " | ||
21 | #elif defined CONFIG_M486 | ||
22 | #define MODULE_PROC_FAMILY "486 " | ||
23 | #elif defined CONFIG_M586 | ||
24 | #define MODULE_PROC_FAMILY "586 " | ||
25 | #elif defined CONFIG_M586TSC | ||
26 | #define MODULE_PROC_FAMILY "586TSC " | ||
27 | #elif defined CONFIG_M586MMX | ||
28 | #define MODULE_PROC_FAMILY "586MMX " | ||
29 | #elif defined CONFIG_MCORE2 | ||
30 | #define MODULE_PROC_FAMILY "CORE2 " | ||
31 | #elif defined CONFIG_M686 | ||
32 | #define MODULE_PROC_FAMILY "686 " | ||
33 | #elif defined CONFIG_MPENTIUMII | ||
34 | #define MODULE_PROC_FAMILY "PENTIUMII " | ||
35 | #elif defined CONFIG_MPENTIUMIII | ||
36 | #define MODULE_PROC_FAMILY "PENTIUMIII " | ||
37 | #elif defined CONFIG_MPENTIUMM | ||
38 | #define MODULE_PROC_FAMILY "PENTIUMM " | ||
39 | #elif defined CONFIG_MPENTIUM4 | ||
40 | #define MODULE_PROC_FAMILY "PENTIUM4 " | ||
41 | #elif defined CONFIG_MK6 | ||
42 | #define MODULE_PROC_FAMILY "K6 " | ||
43 | #elif defined CONFIG_MK7 | ||
44 | #define MODULE_PROC_FAMILY "K7 " | ||
45 | #elif defined CONFIG_MK8 | ||
46 | #define MODULE_PROC_FAMILY "K8 " | ||
47 | #elif defined CONFIG_X86_ELAN | ||
48 | #define MODULE_PROC_FAMILY "ELAN " | ||
49 | #elif defined CONFIG_MCRUSOE | ||
50 | #define MODULE_PROC_FAMILY "CRUSOE " | ||
51 | #elif defined CONFIG_MEFFICEON | ||
52 | #define MODULE_PROC_FAMILY "EFFICEON " | ||
53 | #elif defined CONFIG_MWINCHIPC6 | ||
54 | #define MODULE_PROC_FAMILY "WINCHIPC6 " | ||
55 | #elif defined CONFIG_MWINCHIP2 | ||
56 | #define MODULE_PROC_FAMILY "WINCHIP2 " | ||
57 | #elif defined CONFIG_MWINCHIP3D | ||
58 | #define MODULE_PROC_FAMILY "WINCHIP3D " | ||
59 | #elif defined CONFIG_MCYRIXIII | ||
60 | #define MODULE_PROC_FAMILY "CYRIXIII " | ||
61 | #elif defined CONFIG_MVIAC3_2 | ||
62 | #define MODULE_PROC_FAMILY "VIAC3-2 " | ||
63 | #elif defined CONFIG_MVIAC7 | ||
64 | #define MODULE_PROC_FAMILY "VIAC7 " | ||
65 | #elif defined CONFIG_MGEODEGX1 | ||
66 | #define MODULE_PROC_FAMILY "GEODEGX1 " | ||
67 | #elif defined CONFIG_MGEODE_LX | ||
68 | #define MODULE_PROC_FAMILY "GEODE " | ||
69 | #else | ||
70 | #error unknown processor family | ||
71 | #endif | ||
72 | |||
73 | #ifdef CONFIG_X86_32 | ||
74 | # ifdef CONFIG_4KSTACKS | ||
75 | # define MODULE_STACKSIZE "4KSTACKS " | ||
76 | # else | ||
77 | # define MODULE_STACKSIZE "" | ||
78 | # endif | ||
79 | # define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE | ||
80 | #endif | ||
81 | |||
82 | #endif /* _ASM_MODULE_H */ | ||
diff --git a/include/asm-x86/module_32.h b/include/asm-x86/module_32.h deleted file mode 100644 index 7e5fda6c3976..000000000000 --- a/include/asm-x86/module_32.h +++ /dev/null | |||
@@ -1,75 +0,0 @@ | |||
1 | #ifndef _ASM_I386_MODULE_H | ||
2 | #define _ASM_I386_MODULE_H | ||
3 | |||
4 | /* x86 is simple */ | ||
5 | struct mod_arch_specific | ||
6 | { | ||
7 | }; | ||
8 | |||
9 | #define Elf_Shdr Elf32_Shdr | ||
10 | #define Elf_Sym Elf32_Sym | ||
11 | #define Elf_Ehdr Elf32_Ehdr | ||
12 | |||
13 | #ifdef CONFIG_M386 | ||
14 | #define MODULE_PROC_FAMILY "386 " | ||
15 | #elif defined CONFIG_M486 | ||
16 | #define MODULE_PROC_FAMILY "486 " | ||
17 | #elif defined CONFIG_M586 | ||
18 | #define MODULE_PROC_FAMILY "586 " | ||
19 | #elif defined CONFIG_M586TSC | ||
20 | #define MODULE_PROC_FAMILY "586TSC " | ||
21 | #elif defined CONFIG_M586MMX | ||
22 | #define MODULE_PROC_FAMILY "586MMX " | ||
23 | #elif defined CONFIG_MCORE2 | ||
24 | #define MODULE_PROC_FAMILY "CORE2 " | ||
25 | #elif defined CONFIG_M686 | ||
26 | #define MODULE_PROC_FAMILY "686 " | ||
27 | #elif defined CONFIG_MPENTIUMII | ||
28 | #define MODULE_PROC_FAMILY "PENTIUMII " | ||
29 | #elif defined CONFIG_MPENTIUMIII | ||
30 | #define MODULE_PROC_FAMILY "PENTIUMIII " | ||
31 | #elif defined CONFIG_MPENTIUMM | ||
32 | #define MODULE_PROC_FAMILY "PENTIUMM " | ||
33 | #elif defined CONFIG_MPENTIUM4 | ||
34 | #define MODULE_PROC_FAMILY "PENTIUM4 " | ||
35 | #elif defined CONFIG_MK6 | ||
36 | #define MODULE_PROC_FAMILY "K6 " | ||
37 | #elif defined CONFIG_MK7 | ||
38 | #define MODULE_PROC_FAMILY "K7 " | ||
39 | #elif defined CONFIG_MK8 | ||
40 | #define MODULE_PROC_FAMILY "K8 " | ||
41 | #elif defined CONFIG_X86_ELAN | ||
42 | #define MODULE_PROC_FAMILY "ELAN " | ||
43 | #elif defined CONFIG_MCRUSOE | ||
44 | #define MODULE_PROC_FAMILY "CRUSOE " | ||
45 | #elif defined CONFIG_MEFFICEON | ||
46 | #define MODULE_PROC_FAMILY "EFFICEON " | ||
47 | #elif defined CONFIG_MWINCHIPC6 | ||
48 | #define MODULE_PROC_FAMILY "WINCHIPC6 " | ||
49 | #elif defined CONFIG_MWINCHIP2 | ||
50 | #define MODULE_PROC_FAMILY "WINCHIP2 " | ||
51 | #elif defined CONFIG_MWINCHIP3D | ||
52 | #define MODULE_PROC_FAMILY "WINCHIP3D " | ||
53 | #elif defined CONFIG_MCYRIXIII | ||
54 | #define MODULE_PROC_FAMILY "CYRIXIII " | ||
55 | #elif defined CONFIG_MVIAC3_2 | ||
56 | #define MODULE_PROC_FAMILY "VIAC3-2 " | ||
57 | #elif defined CONFIG_MVIAC7 | ||
58 | #define MODULE_PROC_FAMILY "VIAC7 " | ||
59 | #elif defined CONFIG_MGEODEGX1 | ||
60 | #define MODULE_PROC_FAMILY "GEODEGX1 " | ||
61 | #elif defined CONFIG_MGEODE_LX | ||
62 | #define MODULE_PROC_FAMILY "GEODE " | ||
63 | #else | ||
64 | #error unknown processor family | ||
65 | #endif | ||
66 | |||
67 | #ifdef CONFIG_4KSTACKS | ||
68 | #define MODULE_STACKSIZE "4KSTACKS " | ||
69 | #else | ||
70 | #define MODULE_STACKSIZE "" | ||
71 | #endif | ||
72 | |||
73 | #define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE | ||
74 | |||
75 | #endif /* _ASM_I386_MODULE_H */ | ||
diff --git a/include/asm-x86/module_64.h b/include/asm-x86/module_64.h deleted file mode 100644 index 67f8f69fa7b1..000000000000 --- a/include/asm-x86/module_64.h +++ /dev/null | |||
@@ -1,10 +0,0 @@ | |||
1 | #ifndef _ASM_X8664_MODULE_H | ||
2 | #define _ASM_X8664_MODULE_H | ||
3 | |||
4 | struct mod_arch_specific {}; | ||
5 | |||
6 | #define Elf_Shdr Elf64_Shdr | ||
7 | #define Elf_Sym Elf64_Sym | ||
8 | #define Elf_Ehdr Elf64_Ehdr | ||
9 | |||
10 | #endif | ||
diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 8f268e8fd2e9..781ad74ab9e9 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h | |||
@@ -1,5 +1,117 @@ | |||
1 | #ifndef _AM_X86_MPSPEC_H | ||
2 | #define _AM_X86_MPSPEC_H | ||
3 | |||
4 | #include <asm/mpspec_def.h> | ||
5 | |||
1 | #ifdef CONFIG_X86_32 | 6 | #ifdef CONFIG_X86_32 |
2 | # include "mpspec_32.h" | 7 | #include <mach_mpspec.h> |
8 | |||
9 | extern int mp_bus_id_to_type[MAX_MP_BUSSES]; | ||
10 | extern int mp_bus_id_to_node[MAX_MP_BUSSES]; | ||
11 | extern int mp_bus_id_to_local[MAX_MP_BUSSES]; | ||
12 | extern int quad_local_to_mp_bus_id[NR_CPUS/4][4]; | ||
13 | |||
14 | extern unsigned int def_to_bigsmp; | ||
15 | extern int apic_version[MAX_APICS]; | ||
16 | extern u8 apicid_2_node[]; | ||
17 | extern int pic_mode; | ||
18 | |||
19 | #define MAX_APICID 256 | ||
20 | |||
3 | #else | 21 | #else |
4 | # include "mpspec_64.h" | 22 | |
23 | #define MAX_MP_BUSSES 256 | ||
24 | /* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */ | ||
25 | #define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4) | ||
26 | |||
27 | extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); | ||
28 | |||
29 | #endif | ||
30 | |||
31 | extern int mp_bus_id_to_pci_bus[MAX_MP_BUSSES]; | ||
32 | |||
33 | extern unsigned int boot_cpu_physical_apicid; | ||
34 | extern int smp_found_config; | ||
35 | extern int nr_ioapics; | ||
36 | extern int mp_irq_entries; | ||
37 | extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; | ||
38 | extern int mpc_default_type; | ||
39 | extern unsigned long mp_lapic_addr; | ||
40 | |||
41 | extern void find_smp_config(void); | ||
42 | extern void get_smp_config(void); | ||
43 | |||
44 | #ifdef CONFIG_ACPI | ||
45 | extern void mp_register_lapic(u8 id, u8 enabled); | ||
46 | extern void mp_register_lapic_address(u64 address); | ||
47 | extern void mp_register_ioapic(u8 id, u32 address, u32 gsi_base); | ||
48 | extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, | ||
49 | u32 gsi); | ||
50 | extern void mp_config_acpi_legacy_irqs(void); | ||
51 | extern int mp_register_gsi(u32 gsi, int edge_level, int active_high_low); | ||
52 | #endif /* CONFIG_ACPI */ | ||
53 | |||
54 | #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) | ||
55 | |||
56 | struct physid_mask | ||
57 | { | ||
58 | unsigned long mask[PHYSID_ARRAY_SIZE]; | ||
59 | }; | ||
60 | |||
61 | typedef struct physid_mask physid_mask_t; | ||
62 | |||
63 | #define physid_set(physid, map) set_bit(physid, (map).mask) | ||
64 | #define physid_clear(physid, map) clear_bit(physid, (map).mask) | ||
65 | #define physid_isset(physid, map) test_bit(physid, (map).mask) | ||
66 | #define physid_test_and_set(physid, map) \ | ||
67 | test_and_set_bit(physid, (map).mask) | ||
68 | |||
69 | #define physids_and(dst, src1, src2) \ | ||
70 | bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) | ||
71 | |||
72 | #define physids_or(dst, src1, src2) \ | ||
73 | bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) | ||
74 | |||
75 | #define physids_clear(map) \ | ||
76 | bitmap_zero((map).mask, MAX_APICS) | ||
77 | |||
78 | #define physids_complement(dst, src) \ | ||
79 | bitmap_complement((dst).mask, (src).mask, MAX_APICS) | ||
80 | |||
81 | #define physids_empty(map) \ | ||
82 | bitmap_empty((map).mask, MAX_APICS) | ||
83 | |||
84 | #define physids_equal(map1, map2) \ | ||
85 | bitmap_equal((map1).mask, (map2).mask, MAX_APICS) | ||
86 | |||
87 | #define physids_weight(map) \ | ||
88 | bitmap_weight((map).mask, MAX_APICS) | ||
89 | |||
90 | #define physids_shift_right(d, s, n) \ | ||
91 | bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS) | ||
92 | |||
93 | #define physids_shift_left(d, s, n) \ | ||
94 | bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) | ||
95 | |||
96 | #define physids_coerce(map) ((map).mask[0]) | ||
97 | |||
98 | #define physids_promote(physids) \ | ||
99 | ({ \ | ||
100 | physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ | ||
101 | __physid_mask.mask[0] = physids; \ | ||
102 | __physid_mask; \ | ||
103 | }) | ||
104 | |||
105 | #define physid_mask_of_physid(physid) \ | ||
106 | ({ \ | ||
107 | physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ | ||
108 | physid_set(physid, __physid_mask); \ | ||
109 | __physid_mask; \ | ||
110 | }) | ||
111 | |||
112 | #define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} } | ||
113 | #define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} } | ||
114 | |||
115 | extern physid_mask_t phys_cpu_present_map; | ||
116 | |||
5 | #endif | 117 | #endif |
diff --git a/include/asm-x86/mpspec_32.h b/include/asm-x86/mpspec_32.h deleted file mode 100644 index f21349399d14..000000000000 --- a/include/asm-x86/mpspec_32.h +++ /dev/null | |||
@@ -1,81 +0,0 @@ | |||
1 | #ifndef __ASM_MPSPEC_H | ||
2 | #define __ASM_MPSPEC_H | ||
3 | |||
4 | #include <linux/cpumask.h> | ||
5 | #include <asm/mpspec_def.h> | ||
6 | #include <mach_mpspec.h> | ||
7 | |||
8 | extern int mp_bus_id_to_type [MAX_MP_BUSSES]; | ||
9 | extern int mp_bus_id_to_node [MAX_MP_BUSSES]; | ||
10 | extern int mp_bus_id_to_local [MAX_MP_BUSSES]; | ||
11 | extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; | ||
12 | extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; | ||
13 | |||
14 | extern unsigned int def_to_bigsmp; | ||
15 | extern unsigned int boot_cpu_physical_apicid; | ||
16 | extern int smp_found_config; | ||
17 | extern void find_smp_config (void); | ||
18 | extern void get_smp_config (void); | ||
19 | extern int nr_ioapics; | ||
20 | extern int apic_version [MAX_APICS]; | ||
21 | extern int mp_irq_entries; | ||
22 | extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; | ||
23 | extern int mpc_default_type; | ||
24 | extern unsigned long mp_lapic_addr; | ||
25 | extern int pic_mode; | ||
26 | |||
27 | #ifdef CONFIG_ACPI | ||
28 | extern void mp_register_lapic (u8 id, u8 enabled); | ||
29 | extern void mp_register_lapic_address (u64 address); | ||
30 | extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base); | ||
31 | extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); | ||
32 | extern void mp_config_acpi_legacy_irqs (void); | ||
33 | extern int mp_register_gsi (u32 gsi, int edge_level, int active_high_low); | ||
34 | #endif /* CONFIG_ACPI */ | ||
35 | |||
36 | #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) | ||
37 | |||
38 | struct physid_mask | ||
39 | { | ||
40 | unsigned long mask[PHYSID_ARRAY_SIZE]; | ||
41 | }; | ||
42 | |||
43 | typedef struct physid_mask physid_mask_t; | ||
44 | |||
45 | #define physid_set(physid, map) set_bit(physid, (map).mask) | ||
46 | #define physid_clear(physid, map) clear_bit(physid, (map).mask) | ||
47 | #define physid_isset(physid, map) test_bit(physid, (map).mask) | ||
48 | #define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask) | ||
49 | |||
50 | #define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) | ||
51 | #define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) | ||
52 | #define physids_clear(map) bitmap_zero((map).mask, MAX_APICS) | ||
53 | #define physids_complement(dst, src) bitmap_complement((dst).mask,(src).mask, MAX_APICS) | ||
54 | #define physids_empty(map) bitmap_empty((map).mask, MAX_APICS) | ||
55 | #define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS) | ||
56 | #define physids_weight(map) bitmap_weight((map).mask, MAX_APICS) | ||
57 | #define physids_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS) | ||
58 | #define physids_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) | ||
59 | #define physids_coerce(map) ((map).mask[0]) | ||
60 | |||
61 | #define physids_promote(physids) \ | ||
62 | ({ \ | ||
63 | physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ | ||
64 | __physid_mask.mask[0] = physids; \ | ||
65 | __physid_mask; \ | ||
66 | }) | ||
67 | |||
68 | #define physid_mask_of_physid(physid) \ | ||
69 | ({ \ | ||
70 | physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ | ||
71 | physid_set(physid, __physid_mask); \ | ||
72 | __physid_mask; \ | ||
73 | }) | ||
74 | |||
75 | #define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} } | ||
76 | #define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} } | ||
77 | |||
78 | extern physid_mask_t phys_cpu_present_map; | ||
79 | |||
80 | #endif | ||
81 | |||
diff --git a/include/asm-x86/mpspec_64.h b/include/asm-x86/mpspec_64.h deleted file mode 100644 index 017fddb61dc5..000000000000 --- a/include/asm-x86/mpspec_64.h +++ /dev/null | |||
@@ -1,233 +0,0 @@ | |||
1 | #ifndef __ASM_MPSPEC_H | ||
2 | #define __ASM_MPSPEC_H | ||
3 | |||
4 | /* | ||
5 | * Structure definitions for SMP machines following the | ||
6 | * Intel Multiprocessing Specification 1.1 and 1.4. | ||
7 | */ | ||
8 | |||
9 | /* | ||
10 | * This tag identifies where the SMP configuration | ||
11 | * information is. | ||
12 | */ | ||
13 | |||
14 | #define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_') | ||
15 | |||
16 | /* | ||
17 | * A maximum of 255 APICs with the current APIC ID architecture. | ||
18 | */ | ||
19 | #define MAX_APICS 255 | ||
20 | |||
21 | struct intel_mp_floating | ||
22 | { | ||
23 | char mpf_signature[4]; /* "_MP_" */ | ||
24 | unsigned int mpf_physptr; /* Configuration table address */ | ||
25 | unsigned char mpf_length; /* Our length (paragraphs) */ | ||
26 | unsigned char mpf_specification;/* Specification version */ | ||
27 | unsigned char mpf_checksum; /* Checksum (makes sum 0) */ | ||
28 | unsigned char mpf_feature1; /* Standard or configuration ? */ | ||
29 | unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */ | ||
30 | unsigned char mpf_feature3; /* Unused (0) */ | ||
31 | unsigned char mpf_feature4; /* Unused (0) */ | ||
32 | unsigned char mpf_feature5; /* Unused (0) */ | ||
33 | }; | ||
34 | |||
35 | struct mp_config_table | ||
36 | { | ||
37 | char mpc_signature[4]; | ||
38 | #define MPC_SIGNATURE "PCMP" | ||
39 | unsigned short mpc_length; /* Size of table */ | ||
40 | char mpc_spec; /* 0x01 */ | ||
41 | char mpc_checksum; | ||
42 | char mpc_oem[8]; | ||
43 | char mpc_productid[12]; | ||
44 | unsigned int mpc_oemptr; /* 0 if not present */ | ||
45 | unsigned short mpc_oemsize; /* 0 if not present */ | ||
46 | unsigned short mpc_oemcount; | ||
47 | unsigned int mpc_lapic; /* APIC address */ | ||
48 | unsigned int reserved; | ||
49 | }; | ||
50 | |||
51 | /* Followed by entries */ | ||
52 | |||
53 | #define MP_PROCESSOR 0 | ||
54 | #define MP_BUS 1 | ||
55 | #define MP_IOAPIC 2 | ||
56 | #define MP_INTSRC 3 | ||
57 | #define MP_LINTSRC 4 | ||
58 | |||
59 | struct mpc_config_processor | ||
60 | { | ||
61 | unsigned char mpc_type; | ||
62 | unsigned char mpc_apicid; /* Local APIC number */ | ||
63 | unsigned char mpc_apicver; /* Its versions */ | ||
64 | unsigned char mpc_cpuflag; | ||
65 | #define CPU_ENABLED 1 /* Processor is available */ | ||
66 | #define CPU_BOOTPROCESSOR 2 /* Processor is the BP */ | ||
67 | unsigned int mpc_cpufeature; | ||
68 | #define CPU_STEPPING_MASK 0x0F | ||
69 | #define CPU_MODEL_MASK 0xF0 | ||
70 | #define CPU_FAMILY_MASK 0xF00 | ||
71 | unsigned int mpc_featureflag; /* CPUID feature value */ | ||
72 | unsigned int mpc_reserved[2]; | ||
73 | }; | ||
74 | |||
75 | struct mpc_config_bus | ||
76 | { | ||
77 | unsigned char mpc_type; | ||
78 | unsigned char mpc_busid; | ||
79 | unsigned char mpc_bustype[6]; | ||
80 | }; | ||
81 | |||
82 | /* List of Bus Type string values, Intel MP Spec. */ | ||
83 | #define BUSTYPE_EISA "EISA" | ||
84 | #define BUSTYPE_ISA "ISA" | ||
85 | #define BUSTYPE_INTERN "INTERN" /* Internal BUS */ | ||
86 | #define BUSTYPE_MCA "MCA" | ||
87 | #define BUSTYPE_VL "VL" /* Local bus */ | ||
88 | #define BUSTYPE_PCI "PCI" | ||
89 | #define BUSTYPE_PCMCIA "PCMCIA" | ||
90 | #define BUSTYPE_CBUS "CBUS" | ||
91 | #define BUSTYPE_CBUSII "CBUSII" | ||
92 | #define BUSTYPE_FUTURE "FUTURE" | ||
93 | #define BUSTYPE_MBI "MBI" | ||
94 | #define BUSTYPE_MBII "MBII" | ||
95 | #define BUSTYPE_MPI "MPI" | ||
96 | #define BUSTYPE_MPSA "MPSA" | ||
97 | #define BUSTYPE_NUBUS "NUBUS" | ||
98 | #define BUSTYPE_TC "TC" | ||
99 | #define BUSTYPE_VME "VME" | ||
100 | #define BUSTYPE_XPRESS "XPRESS" | ||
101 | |||
102 | struct mpc_config_ioapic | ||
103 | { | ||
104 | unsigned char mpc_type; | ||
105 | unsigned char mpc_apicid; | ||
106 | unsigned char mpc_apicver; | ||
107 | unsigned char mpc_flags; | ||
108 | #define MPC_APIC_USABLE 0x01 | ||
109 | unsigned int mpc_apicaddr; | ||
110 | }; | ||
111 | |||
112 | struct mpc_config_intsrc | ||
113 | { | ||
114 | unsigned char mpc_type; | ||
115 | unsigned char mpc_irqtype; | ||
116 | unsigned short mpc_irqflag; | ||
117 | unsigned char mpc_srcbus; | ||
118 | unsigned char mpc_srcbusirq; | ||
119 | unsigned char mpc_dstapic; | ||
120 | unsigned char mpc_dstirq; | ||
121 | }; | ||
122 | |||
123 | enum mp_irq_source_types { | ||
124 | mp_INT = 0, | ||
125 | mp_NMI = 1, | ||
126 | mp_SMI = 2, | ||
127 | mp_ExtINT = 3 | ||
128 | }; | ||
129 | |||
130 | #define MP_IRQDIR_DEFAULT 0 | ||
131 | #define MP_IRQDIR_HIGH 1 | ||
132 | #define MP_IRQDIR_LOW 3 | ||
133 | |||
134 | |||
135 | struct mpc_config_lintsrc | ||
136 | { | ||
137 | unsigned char mpc_type; | ||
138 | unsigned char mpc_irqtype; | ||
139 | unsigned short mpc_irqflag; | ||
140 | unsigned char mpc_srcbusid; | ||
141 | unsigned char mpc_srcbusirq; | ||
142 | unsigned char mpc_destapic; | ||
143 | #define MP_APIC_ALL 0xFF | ||
144 | unsigned char mpc_destapiclint; | ||
145 | }; | ||
146 | |||
147 | /* | ||
148 | * Default configurations | ||
149 | * | ||
150 | * 1 2 CPU ISA 82489DX | ||
151 | * 2 2 CPU EISA 82489DX neither IRQ 0 timer nor IRQ 13 DMA chaining | ||
152 | * 3 2 CPU EISA 82489DX | ||
153 | * 4 2 CPU MCA 82489DX | ||
154 | * 5 2 CPU ISA+PCI | ||
155 | * 6 2 CPU EISA+PCI | ||
156 | * 7 2 CPU MCA+PCI | ||
157 | */ | ||
158 | |||
159 | #define MAX_MP_BUSSES 256 | ||
160 | /* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */ | ||
161 | #define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4) | ||
162 | extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); | ||
163 | extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; | ||
164 | |||
165 | extern unsigned int boot_cpu_physical_apicid; | ||
166 | extern int smp_found_config; | ||
167 | extern void find_smp_config (void); | ||
168 | extern void get_smp_config (void); | ||
169 | extern int nr_ioapics; | ||
170 | extern unsigned char apic_version [MAX_APICS]; | ||
171 | extern int mp_irq_entries; | ||
172 | extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; | ||
173 | extern int mpc_default_type; | ||
174 | extern unsigned long mp_lapic_addr; | ||
175 | |||
176 | #ifdef CONFIG_ACPI | ||
177 | extern void mp_register_lapic (u8 id, u8 enabled); | ||
178 | extern void mp_register_lapic_address (u64 address); | ||
179 | |||
180 | extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base); | ||
181 | extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); | ||
182 | extern void mp_config_acpi_legacy_irqs (void); | ||
183 | extern int mp_register_gsi (u32 gsi, int triggering, int polarity); | ||
184 | #endif | ||
185 | |||
186 | extern int using_apic_timer; | ||
187 | |||
188 | #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) | ||
189 | |||
190 | struct physid_mask | ||
191 | { | ||
192 | unsigned long mask[PHYSID_ARRAY_SIZE]; | ||
193 | }; | ||
194 | |||
195 | typedef struct physid_mask physid_mask_t; | ||
196 | |||
197 | #define physid_set(physid, map) set_bit(physid, (map).mask) | ||
198 | #define physid_clear(physid, map) clear_bit(physid, (map).mask) | ||
199 | #define physid_isset(physid, map) test_bit(physid, (map).mask) | ||
200 | #define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask) | ||
201 | |||
202 | #define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) | ||
203 | #define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) | ||
204 | #define physids_clear(map) bitmap_zero((map).mask, MAX_APICS) | ||
205 | #define physids_complement(dst, src) bitmap_complement((dst).mask, (src).mask, MAX_APICS) | ||
206 | #define physids_empty(map) bitmap_empty((map).mask, MAX_APICS) | ||
207 | #define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS) | ||
208 | #define physids_weight(map) bitmap_weight((map).mask, MAX_APICS) | ||
209 | #define physids_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS) | ||
210 | #define physids_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) | ||
211 | #define physids_coerce(map) ((map).mask[0]) | ||
212 | |||
213 | #define physids_promote(physids) \ | ||
214 | ({ \ | ||
215 | physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ | ||
216 | __physid_mask.mask[0] = physids; \ | ||
217 | __physid_mask; \ | ||
218 | }) | ||
219 | |||
220 | #define physid_mask_of_physid(physid) \ | ||
221 | ({ \ | ||
222 | physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ | ||
223 | physid_set(physid, __physid_mask); \ | ||
224 | __physid_mask; \ | ||
225 | }) | ||
226 | |||
227 | #define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} } | ||
228 | #define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} } | ||
229 | |||
230 | extern physid_mask_t phys_cpu_present_map; | ||
231 | |||
232 | #endif | ||
233 | |||
diff --git a/include/asm-x86/mpspec_def.h b/include/asm-x86/mpspec_def.h index 13bafb16e7af..3504617fe648 100644 --- a/include/asm-x86/mpspec_def.h +++ b/include/asm-x86/mpspec_def.h | |||
@@ -8,52 +8,68 @@ | |||
8 | 8 | ||
9 | /* | 9 | /* |
10 | * This tag identifies where the SMP configuration | 10 | * This tag identifies where the SMP configuration |
11 | * information is. | 11 | * information is. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_') | 14 | #define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_') |
15 | 15 | ||
16 | #define MAX_MPC_ENTRY 1024 | 16 | #ifdef CONFIG_X86_32 |
17 | #define MAX_APICS 256 | 17 | # define MAX_MPC_ENTRY 1024 |
18 | # define MAX_APICS 256 | ||
19 | #else | ||
20 | /* | ||
21 | * A maximum of 255 APICs with the current APIC ID architecture. | ||
22 | */ | ||
23 | # define MAX_APICS 255 | ||
24 | #endif | ||
18 | 25 | ||
19 | struct intel_mp_floating | 26 | struct intel_mp_floating |
20 | { | 27 | { |
21 | char mpf_signature[4]; /* "_MP_" */ | 28 | char mpf_signature[4]; /* "_MP_" */ |
22 | unsigned long mpf_physptr; /* Configuration table address */ | 29 | unsigned int mpf_physptr; /* Configuration table address */ |
23 | unsigned char mpf_length; /* Our length (paragraphs) */ | 30 | unsigned char mpf_length; /* Our length (paragraphs) */ |
24 | unsigned char mpf_specification;/* Specification version */ | 31 | unsigned char mpf_specification;/* Specification version */ |
25 | unsigned char mpf_checksum; /* Checksum (makes sum 0) */ | 32 | unsigned char mpf_checksum; /* Checksum (makes sum 0) */ |
26 | unsigned char mpf_feature1; /* Standard or configuration ? */ | 33 | unsigned char mpf_feature1; /* Standard or configuration ? */ |
27 | unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */ | 34 | unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */ |
28 | unsigned char mpf_feature3; /* Unused (0) */ | 35 | unsigned char mpf_feature3; /* Unused (0) */ |
29 | unsigned char mpf_feature4; /* Unused (0) */ | 36 | unsigned char mpf_feature4; /* Unused (0) */ |
30 | unsigned char mpf_feature5; /* Unused (0) */ | 37 | unsigned char mpf_feature5; /* Unused (0) */ |
31 | }; | 38 | }; |
32 | 39 | ||
40 | #define MPC_SIGNATURE "PCMP" | ||
41 | |||
33 | struct mp_config_table | 42 | struct mp_config_table |
34 | { | 43 | { |
35 | char mpc_signature[4]; | 44 | char mpc_signature[4]; |
36 | #define MPC_SIGNATURE "PCMP" | ||
37 | unsigned short mpc_length; /* Size of table */ | 45 | unsigned short mpc_length; /* Size of table */ |
38 | char mpc_spec; /* 0x01 */ | 46 | char mpc_spec; /* 0x01 */ |
39 | char mpc_checksum; | 47 | char mpc_checksum; |
40 | char mpc_oem[8]; | 48 | char mpc_oem[8]; |
41 | char mpc_productid[12]; | 49 | char mpc_productid[12]; |
42 | unsigned long mpc_oemptr; /* 0 if not present */ | 50 | unsigned int mpc_oemptr; /* 0 if not present */ |
43 | unsigned short mpc_oemsize; /* 0 if not present */ | 51 | unsigned short mpc_oemsize; /* 0 if not present */ |
44 | unsigned short mpc_oemcount; | 52 | unsigned short mpc_oemcount; |
45 | unsigned long mpc_lapic; /* APIC address */ | 53 | unsigned int mpc_lapic; /* APIC address */ |
46 | unsigned long reserved; | 54 | unsigned int reserved; |
47 | }; | 55 | }; |
48 | 56 | ||
49 | /* Followed by entries */ | 57 | /* Followed by entries */ |
50 | 58 | ||
51 | #define MP_PROCESSOR 0 | 59 | #define MP_PROCESSOR 0 |
52 | #define MP_BUS 1 | 60 | #define MP_BUS 1 |
53 | #define MP_IOAPIC 2 | 61 | #define MP_IOAPIC 2 |
54 | #define MP_INTSRC 3 | 62 | #define MP_INTSRC 3 |
55 | #define MP_LINTSRC 4 | 63 | #define MP_LINTSRC 4 |
56 | #define MP_TRANSLATION 192 /* Used by IBM NUMA-Q to describe node locality */ | 64 | /* Used by IBM NUMA-Q to describe node locality */ |
65 | #define MP_TRANSLATION 192 | ||
66 | |||
67 | #define CPU_ENABLED 1 /* Processor is available */ | ||
68 | #define CPU_BOOTPROCESSOR 2 /* Processor is the BP */ | ||
69 | |||
70 | #define CPU_STEPPING_MASK 0x000F | ||
71 | #define CPU_MODEL_MASK 0x00F0 | ||
72 | #define CPU_FAMILY_MASK 0x0F00 | ||
57 | 73 | ||
58 | struct mpc_config_processor | 74 | struct mpc_config_processor |
59 | { | 75 | { |
@@ -61,14 +77,9 @@ struct mpc_config_processor | |||
61 | unsigned char mpc_apicid; /* Local APIC number */ | 77 | unsigned char mpc_apicid; /* Local APIC number */ |
62 | unsigned char mpc_apicver; /* Its versions */ | 78 | unsigned char mpc_apicver; /* Its versions */ |
63 | unsigned char mpc_cpuflag; | 79 | unsigned char mpc_cpuflag; |
64 | #define CPU_ENABLED 1 /* Processor is available */ | 80 | unsigned int mpc_cpufeature; |
65 | #define CPU_BOOTPROCESSOR 2 /* Processor is the BP */ | 81 | unsigned int mpc_featureflag; /* CPUID feature value */ |
66 | unsigned long mpc_cpufeature; | 82 | unsigned int mpc_reserved[2]; |
67 | #define CPU_STEPPING_MASK 0x0F | ||
68 | #define CPU_MODEL_MASK 0xF0 | ||
69 | #define CPU_FAMILY_MASK 0xF00 | ||
70 | unsigned long mpc_featureflag; /* CPUID feature value */ | ||
71 | unsigned long mpc_reserved[2]; | ||
72 | }; | 83 | }; |
73 | 84 | ||
74 | struct mpc_config_bus | 85 | struct mpc_config_bus |
@@ -98,14 +109,15 @@ struct mpc_config_bus | |||
98 | #define BUSTYPE_VME "VME" | 109 | #define BUSTYPE_VME "VME" |
99 | #define BUSTYPE_XPRESS "XPRESS" | 110 | #define BUSTYPE_XPRESS "XPRESS" |
100 | 111 | ||
112 | #define MPC_APIC_USABLE 0x01 | ||
113 | |||
101 | struct mpc_config_ioapic | 114 | struct mpc_config_ioapic |
102 | { | 115 | { |
103 | unsigned char mpc_type; | 116 | unsigned char mpc_type; |
104 | unsigned char mpc_apicid; | 117 | unsigned char mpc_apicid; |
105 | unsigned char mpc_apicver; | 118 | unsigned char mpc_apicver; |
106 | unsigned char mpc_flags; | 119 | unsigned char mpc_flags; |
107 | #define MPC_APIC_USABLE 0x01 | 120 | unsigned int mpc_apicaddr; |
108 | unsigned long mpc_apicaddr; | ||
109 | }; | 121 | }; |
110 | 122 | ||
111 | struct mpc_config_intsrc | 123 | struct mpc_config_intsrc |
@@ -130,6 +142,7 @@ enum mp_irq_source_types { | |||
130 | #define MP_IRQDIR_HIGH 1 | 142 | #define MP_IRQDIR_HIGH 1 |
131 | #define MP_IRQDIR_LOW 3 | 143 | #define MP_IRQDIR_LOW 3 |
132 | 144 | ||
145 | #define MP_APIC_ALL 0xFF | ||
133 | 146 | ||
134 | struct mpc_config_lintsrc | 147 | struct mpc_config_lintsrc |
135 | { | 148 | { |
@@ -138,15 +151,15 @@ struct mpc_config_lintsrc | |||
138 | unsigned short mpc_irqflag; | 151 | unsigned short mpc_irqflag; |
139 | unsigned char mpc_srcbusid; | 152 | unsigned char mpc_srcbusid; |
140 | unsigned char mpc_srcbusirq; | 153 | unsigned char mpc_srcbusirq; |
141 | unsigned char mpc_destapic; | 154 | unsigned char mpc_destapic; |
142 | #define MP_APIC_ALL 0xFF | ||
143 | unsigned char mpc_destapiclint; | 155 | unsigned char mpc_destapiclint; |
144 | }; | 156 | }; |
145 | 157 | ||
158 | #define MPC_OEM_SIGNATURE "_OEM" | ||
159 | |||
146 | struct mp_config_oemtable | 160 | struct mp_config_oemtable |
147 | { | 161 | { |
148 | char oem_signature[4]; | 162 | char oem_signature[4]; |
149 | #define MPC_OEM_SIGNATURE "_OEM" | ||
150 | unsigned short oem_length; /* Size of table */ | 163 | unsigned short oem_length; /* Size of table */ |
151 | char oem_rev; /* 0x01 */ | 164 | char oem_rev; /* 0x01 */ |
152 | char oem_checksum; | 165 | char oem_checksum; |
@@ -155,13 +168,13 @@ struct mp_config_oemtable | |||
155 | 168 | ||
156 | struct mpc_config_translation | 169 | struct mpc_config_translation |
157 | { | 170 | { |
158 | unsigned char mpc_type; | 171 | unsigned char mpc_type; |
159 | unsigned char trans_len; | 172 | unsigned char trans_len; |
160 | unsigned char trans_type; | 173 | unsigned char trans_type; |
161 | unsigned char trans_quad; | 174 | unsigned char trans_quad; |
162 | unsigned char trans_global; | 175 | unsigned char trans_global; |
163 | unsigned char trans_local; | 176 | unsigned char trans_local; |
164 | unsigned short trans_reserved; | 177 | unsigned short trans_reserved; |
165 | }; | 178 | }; |
166 | 179 | ||
167 | /* | 180 | /* |
diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h index a4944732be04..fae118a25278 100644 --- a/include/asm-x86/msr-index.h +++ b/include/asm-x86/msr-index.h | |||
@@ -63,6 +63,13 @@ | |||
63 | #define MSR_IA32_LASTINTFROMIP 0x000001dd | 63 | #define MSR_IA32_LASTINTFROMIP 0x000001dd |
64 | #define MSR_IA32_LASTINTTOIP 0x000001de | 64 | #define MSR_IA32_LASTINTTOIP 0x000001de |
65 | 65 | ||
66 | /* DEBUGCTLMSR bits (others vary by model): */ | ||
67 | #define _DEBUGCTLMSR_LBR 0 /* last branch recording */ | ||
68 | #define _DEBUGCTLMSR_BTF 1 /* single-step on branches */ | ||
69 | |||
70 | #define DEBUGCTLMSR_LBR (1UL << _DEBUGCTLMSR_LBR) | ||
71 | #define DEBUGCTLMSR_BTF (1UL << _DEBUGCTLMSR_BTF) | ||
72 | |||
66 | #define MSR_IA32_MC0_CTL 0x00000400 | 73 | #define MSR_IA32_MC0_CTL 0x00000400 |
67 | #define MSR_IA32_MC0_STATUS 0x00000401 | 74 | #define MSR_IA32_MC0_STATUS 0x00000401 |
68 | #define MSR_IA32_MC0_ADDR 0x00000402 | 75 | #define MSR_IA32_MC0_ADDR 0x00000402 |
@@ -88,6 +95,14 @@ | |||
88 | #define MSR_AMD64_IBSDCPHYSAD 0xc0011039 | 95 | #define MSR_AMD64_IBSDCPHYSAD 0xc0011039 |
89 | #define MSR_AMD64_IBSCTL 0xc001103a | 96 | #define MSR_AMD64_IBSCTL 0xc001103a |
90 | 97 | ||
98 | /* Fam 10h MSRs */ | ||
99 | #define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 | ||
100 | #define FAM10H_MMIO_CONF_ENABLE (1<<0) | ||
101 | #define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf | ||
102 | #define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2 | ||
103 | #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffff | ||
104 | #define FAM10H_MMIO_CONF_BASE_SHIFT 20 | ||
105 | |||
91 | /* K8 MSRs */ | 106 | /* K8 MSRs */ |
92 | #define MSR_K8_TOP_MEM1 0xc001001a | 107 | #define MSR_K8_TOP_MEM1 0xc001001a |
93 | #define MSR_K8_TOP_MEM2 0xc001001d | 108 | #define MSR_K8_TOP_MEM2 0xc001001d |
diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h index ba4b31432120..3ca29ebebbb1 100644 --- a/include/asm-x86/msr.h +++ b/include/asm-x86/msr.h | |||
@@ -3,77 +3,107 @@ | |||
3 | 3 | ||
4 | #include <asm/msr-index.h> | 4 | #include <asm/msr-index.h> |
5 | 5 | ||
6 | #ifdef __i386__ | 6 | #ifndef __ASSEMBLY__ |
7 | # include <linux/types.h> | ||
8 | #endif | ||
7 | 9 | ||
8 | #ifdef __KERNEL__ | 10 | #ifdef __KERNEL__ |
9 | #ifndef __ASSEMBLY__ | 11 | #ifndef __ASSEMBLY__ |
10 | 12 | ||
13 | #include <asm/asm.h> | ||
11 | #include <asm/errno.h> | 14 | #include <asm/errno.h> |
12 | 15 | ||
16 | static inline unsigned long long native_read_tscp(unsigned int *aux) | ||
17 | { | ||
18 | unsigned long low, high; | ||
19 | asm volatile (".byte 0x0f,0x01,0xf9" | ||
20 | : "=a" (low), "=d" (high), "=c" (*aux)); | ||
21 | return low | ((u64)high >> 32); | ||
22 | } | ||
23 | |||
24 | /* | ||
25 | * i386 calling convention returns 64-bit value in edx:eax, while | ||
26 | * x86_64 returns at rax. Also, the "A" constraint does not really | ||
27 | * mean rdx:rax in x86_64, so we need specialized behaviour for each | ||
28 | * architecture | ||
29 | */ | ||
30 | #ifdef CONFIG_X86_64 | ||
31 | #define DECLARE_ARGS(val, low, high) unsigned low, high | ||
32 | #define EAX_EDX_VAL(val, low, high) (low | ((u64)(high) << 32)) | ||
33 | #define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high) | ||
34 | #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high) | ||
35 | #else | ||
36 | #define DECLARE_ARGS(val, low, high) unsigned long long val | ||
37 | #define EAX_EDX_VAL(val, low, high) (val) | ||
38 | #define EAX_EDX_ARGS(val, low, high) "A" (val) | ||
39 | #define EAX_EDX_RET(val, low, high) "=A" (val) | ||
40 | #endif | ||
41 | |||
13 | static inline unsigned long long native_read_msr(unsigned int msr) | 42 | static inline unsigned long long native_read_msr(unsigned int msr) |
14 | { | 43 | { |
15 | unsigned long long val; | 44 | DECLARE_ARGS(val, low, high); |
16 | 45 | ||
17 | asm volatile("rdmsr" : "=A" (val) : "c" (msr)); | 46 | asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr)); |
18 | return val; | 47 | return EAX_EDX_VAL(val, low, high); |
19 | } | 48 | } |
20 | 49 | ||
21 | static inline unsigned long long native_read_msr_safe(unsigned int msr, | 50 | static inline unsigned long long native_read_msr_safe(unsigned int msr, |
22 | int *err) | 51 | int *err) |
23 | { | 52 | { |
24 | unsigned long long val; | 53 | DECLARE_ARGS(val, low, high); |
25 | 54 | ||
26 | asm volatile("2: rdmsr ; xorl %0,%0\n" | 55 | asm volatile("2: rdmsr ; xor %0,%0\n" |
27 | "1:\n\t" | 56 | "1:\n\t" |
28 | ".section .fixup,\"ax\"\n\t" | 57 | ".section .fixup,\"ax\"\n\t" |
29 | "3: movl %3,%0 ; jmp 1b\n\t" | 58 | "3: mov %3,%0 ; jmp 1b\n\t" |
30 | ".previous\n\t" | 59 | ".previous\n\t" |
31 | ".section __ex_table,\"a\"\n" | 60 | _ASM_EXTABLE(2b,3b) |
32 | " .align 4\n\t" | 61 | : "=r" (*err), EAX_EDX_RET(val, low, high) |
33 | " .long 2b,3b\n\t" | ||
34 | ".previous" | ||
35 | : "=r" (*err), "=A" (val) | ||
36 | : "c" (msr), "i" (-EFAULT)); | 62 | : "c" (msr), "i" (-EFAULT)); |
37 | 63 | return EAX_EDX_VAL(val, low, high); | |
38 | return val; | ||
39 | } | 64 | } |
40 | 65 | ||
41 | static inline void native_write_msr(unsigned int msr, unsigned long long val) | 66 | static inline void native_write_msr(unsigned int msr, |
67 | unsigned low, unsigned high) | ||
42 | { | 68 | { |
43 | asm volatile("wrmsr" : : "c" (msr), "A"(val)); | 69 | asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high)); |
44 | } | 70 | } |
45 | 71 | ||
46 | static inline int native_write_msr_safe(unsigned int msr, | 72 | static inline int native_write_msr_safe(unsigned int msr, |
47 | unsigned long long val) | 73 | unsigned low, unsigned high) |
48 | { | 74 | { |
49 | int err; | 75 | int err; |
50 | asm volatile("2: wrmsr ; xorl %0,%0\n" | 76 | asm volatile("2: wrmsr ; xor %0,%0\n" |
51 | "1:\n\t" | 77 | "1:\n\t" |
52 | ".section .fixup,\"ax\"\n\t" | 78 | ".section .fixup,\"ax\"\n\t" |
53 | "3: movl %4,%0 ; jmp 1b\n\t" | 79 | "3: mov %4,%0 ; jmp 1b\n\t" |
54 | ".previous\n\t" | 80 | ".previous\n\t" |
55 | ".section __ex_table,\"a\"\n" | 81 | _ASM_EXTABLE(2b,3b) |
56 | " .align 4\n\t" | ||
57 | " .long 2b,3b\n\t" | ||
58 | ".previous" | ||
59 | : "=a" (err) | 82 | : "=a" (err) |
60 | : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)), | 83 | : "c" (msr), "0" (low), "d" (high), |
61 | "i" (-EFAULT)); | 84 | "i" (-EFAULT)); |
62 | return err; | 85 | return err; |
63 | } | 86 | } |
64 | 87 | ||
65 | static inline unsigned long long native_read_tsc(void) | 88 | extern unsigned long long native_read_tsc(void); |
89 | |||
90 | static __always_inline unsigned long long __native_read_tsc(void) | ||
66 | { | 91 | { |
67 | unsigned long long val; | 92 | DECLARE_ARGS(val, low, high); |
68 | asm volatile("rdtsc" : "=A" (val)); | 93 | |
69 | return val; | 94 | rdtsc_barrier(); |
95 | asm volatile("rdtsc" : EAX_EDX_RET(val, low, high)); | ||
96 | rdtsc_barrier(); | ||
97 | |||
98 | return EAX_EDX_VAL(val, low, high); | ||
70 | } | 99 | } |
71 | 100 | ||
72 | static inline unsigned long long native_read_pmc(void) | 101 | static inline unsigned long long native_read_pmc(int counter) |
73 | { | 102 | { |
74 | unsigned long long val; | 103 | DECLARE_ARGS(val, low, high); |
75 | asm volatile("rdpmc" : "=A" (val)); | 104 | |
76 | return val; | 105 | asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter)); |
106 | return EAX_EDX_VAL(val, low, high); | ||
77 | } | 107 | } |
78 | 108 | ||
79 | #ifdef CONFIG_PARAVIRT | 109 | #ifdef CONFIG_PARAVIRT |
@@ -93,20 +123,21 @@ static inline unsigned long long native_read_pmc(void) | |||
93 | (val2) = (u32)(__val >> 32); \ | 123 | (val2) = (u32)(__val >> 32); \ |
94 | } while(0) | 124 | } while(0) |
95 | 125 | ||
96 | static inline void wrmsr(u32 __msr, u32 __low, u32 __high) | 126 | static inline void wrmsr(unsigned msr, unsigned low, unsigned high) |
97 | { | 127 | { |
98 | native_write_msr(__msr, ((u64)__high << 32) | __low); | 128 | native_write_msr(msr, low, high); |
99 | } | 129 | } |
100 | 130 | ||
101 | #define rdmsrl(msr,val) \ | 131 | #define rdmsrl(msr,val) \ |
102 | ((val) = native_read_msr(msr)) | 132 | ((val) = native_read_msr(msr)) |
103 | 133 | ||
104 | #define wrmsrl(msr,val) native_write_msr(msr, val) | 134 | #define wrmsrl(msr, val) \ |
135 | native_write_msr(msr, (u32)((u64)(val)), (u32)((u64)(val) >> 32)) | ||
105 | 136 | ||
106 | /* wrmsr with exception handling */ | 137 | /* wrmsr with exception handling */ |
107 | static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high) | 138 | static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) |
108 | { | 139 | { |
109 | return native_write_msr_safe(__msr, ((u64)__high << 32) | __low); | 140 | return native_write_msr_safe(msr, low, high); |
110 | } | 141 | } |
111 | 142 | ||
112 | /* rdmsr with exception handling */ | 143 | /* rdmsr with exception handling */ |
@@ -125,201 +156,31 @@ static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high) | |||
125 | #define rdtscll(val) \ | 156 | #define rdtscll(val) \ |
126 | ((val) = native_read_tsc()) | 157 | ((val) = native_read_tsc()) |
127 | 158 | ||
128 | #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) | ||
129 | |||
130 | #define rdpmc(counter,low,high) \ | 159 | #define rdpmc(counter,low,high) \ |
131 | do { \ | 160 | do { \ |
132 | u64 _l = native_read_pmc(); \ | 161 | u64 _l = native_read_pmc(counter); \ |
133 | (low) = (u32)_l; \ | 162 | (low) = (u32)_l; \ |
134 | (high) = (u32)(_l >> 32); \ | 163 | (high) = (u32)(_l >> 32); \ |
135 | } while(0) | 164 | } while(0) |
136 | #endif /* !CONFIG_PARAVIRT */ | ||
137 | 165 | ||
138 | #ifdef CONFIG_SMP | 166 | #define rdtscp(low, high, aux) \ |
139 | void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | 167 | do { \ |
140 | void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | 168 | unsigned long long _val = native_read_tscp(&(aux)); \ |
141 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | 169 | (low) = (u32)_val; \ |
142 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | 170 | (high) = (u32)(_val >> 32); \ |
143 | #else /* CONFIG_SMP */ | 171 | } while (0) |
144 | static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) | ||
145 | { | ||
146 | rdmsr(msr_no, *l, *h); | ||
147 | } | ||
148 | static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | ||
149 | { | ||
150 | wrmsr(msr_no, l, h); | ||
151 | } | ||
152 | static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) | ||
153 | { | ||
154 | return rdmsr_safe(msr_no, l, h); | ||
155 | } | ||
156 | static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | ||
157 | { | ||
158 | return wrmsr_safe(msr_no, l, h); | ||
159 | } | ||
160 | #endif /* CONFIG_SMP */ | ||
161 | #endif /* ! __ASSEMBLY__ */ | ||
162 | #endif /* __KERNEL__ */ | ||
163 | |||
164 | #else /* __i386__ */ | ||
165 | |||
166 | #ifndef __ASSEMBLY__ | ||
167 | #include <linux/errno.h> | ||
168 | /* | ||
169 | * Access to machine-specific registers (available on 586 and better only) | ||
170 | * Note: the rd* operations modify the parameters directly (without using | ||
171 | * pointer indirection), this allows gcc to optimize better | ||
172 | */ | ||
173 | 172 | ||
174 | #define rdmsr(msr,val1,val2) \ | 173 | #define rdtscpll(val, aux) (val) = native_read_tscp(&(aux)) |
175 | __asm__ __volatile__("rdmsr" \ | ||
176 | : "=a" (val1), "=d" (val2) \ | ||
177 | : "c" (msr)) | ||
178 | 174 | ||
175 | #endif /* !CONFIG_PARAVIRT */ | ||
179 | 176 | ||
180 | #define rdmsrl(msr,val) do { unsigned long a__,b__; \ | ||
181 | __asm__ __volatile__("rdmsr" \ | ||
182 | : "=a" (a__), "=d" (b__) \ | ||
183 | : "c" (msr)); \ | ||
184 | val = a__ | (b__<<32); \ | ||
185 | } while(0) | ||
186 | |||
187 | #define wrmsr(msr,val1,val2) \ | ||
188 | __asm__ __volatile__("wrmsr" \ | ||
189 | : /* no outputs */ \ | ||
190 | : "c" (msr), "a" (val1), "d" (val2)) | ||
191 | |||
192 | #define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) | ||
193 | |||
194 | /* wrmsr with exception handling */ | ||
195 | #define wrmsr_safe(msr,a,b) ({ int ret__; \ | ||
196 | asm volatile("2: wrmsr ; xorl %0,%0\n" \ | ||
197 | "1:\n\t" \ | ||
198 | ".section .fixup,\"ax\"\n\t" \ | ||
199 | "3: movl %4,%0 ; jmp 1b\n\t" \ | ||
200 | ".previous\n\t" \ | ||
201 | ".section __ex_table,\"a\"\n" \ | ||
202 | " .align 8\n\t" \ | ||
203 | " .quad 2b,3b\n\t" \ | ||
204 | ".previous" \ | ||
205 | : "=a" (ret__) \ | ||
206 | : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \ | ||
207 | ret__; }) | ||
208 | 177 | ||
209 | #define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) | 178 | #define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) |
210 | 179 | ||
211 | #define rdmsr_safe(msr,a,b) \ | ||
212 | ({ int ret__; \ | ||
213 | asm volatile ("1: rdmsr\n" \ | ||
214 | "2:\n" \ | ||
215 | ".section .fixup,\"ax\"\n" \ | ||
216 | "3: movl %4,%0\n" \ | ||
217 | " jmp 2b\n" \ | ||
218 | ".previous\n" \ | ||
219 | ".section __ex_table,\"a\"\n" \ | ||
220 | " .align 8\n" \ | ||
221 | " .quad 1b,3b\n" \ | ||
222 | ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b)) \ | ||
223 | :"c"(msr), "i"(-EIO), "0"(0)); \ | ||
224 | ret__; }) | ||
225 | |||
226 | #define rdtsc(low,high) \ | ||
227 | __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) | ||
228 | |||
229 | #define rdtscl(low) \ | ||
230 | __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx") | ||
231 | |||
232 | #define rdtscp(low,high,aux) \ | ||
233 | asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux)) | ||
234 | |||
235 | #define rdtscll(val) do { \ | ||
236 | unsigned int __a,__d; \ | ||
237 | asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ | ||
238 | (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \ | ||
239 | } while(0) | ||
240 | |||
241 | #define rdtscpll(val, aux) do { \ | ||
242 | unsigned long __a, __d; \ | ||
243 | asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \ | ||
244 | (val) = (__d << 32) | __a; \ | ||
245 | } while (0) | ||
246 | |||
247 | #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) | 180 | #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) |
248 | 181 | ||
249 | #define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0) | 182 | #define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0) |
250 | 183 | ||
251 | #define rdpmc(counter,low,high) \ | ||
252 | __asm__ __volatile__("rdpmc" \ | ||
253 | : "=a" (low), "=d" (high) \ | ||
254 | : "c" (counter)) | ||
255 | |||
256 | static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, | ||
257 | unsigned int *ecx, unsigned int *edx) | ||
258 | { | ||
259 | __asm__("cpuid" | ||
260 | : "=a" (*eax), | ||
261 | "=b" (*ebx), | ||
262 | "=c" (*ecx), | ||
263 | "=d" (*edx) | ||
264 | : "0" (op)); | ||
265 | } | ||
266 | |||
267 | /* Some CPUID calls want 'count' to be placed in ecx */ | ||
268 | static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, | ||
269 | int *edx) | ||
270 | { | ||
271 | __asm__("cpuid" | ||
272 | : "=a" (*eax), | ||
273 | "=b" (*ebx), | ||
274 | "=c" (*ecx), | ||
275 | "=d" (*edx) | ||
276 | : "0" (op), "c" (count)); | ||
277 | } | ||
278 | |||
279 | /* | ||
280 | * CPUID functions returning a single datum | ||
281 | */ | ||
282 | static inline unsigned int cpuid_eax(unsigned int op) | ||
283 | { | ||
284 | unsigned int eax; | ||
285 | |||
286 | __asm__("cpuid" | ||
287 | : "=a" (eax) | ||
288 | : "0" (op) | ||
289 | : "bx", "cx", "dx"); | ||
290 | return eax; | ||
291 | } | ||
292 | static inline unsigned int cpuid_ebx(unsigned int op) | ||
293 | { | ||
294 | unsigned int eax, ebx; | ||
295 | |||
296 | __asm__("cpuid" | ||
297 | : "=a" (eax), "=b" (ebx) | ||
298 | : "0" (op) | ||
299 | : "cx", "dx" ); | ||
300 | return ebx; | ||
301 | } | ||
302 | static inline unsigned int cpuid_ecx(unsigned int op) | ||
303 | { | ||
304 | unsigned int eax, ecx; | ||
305 | |||
306 | __asm__("cpuid" | ||
307 | : "=a" (eax), "=c" (ecx) | ||
308 | : "0" (op) | ||
309 | : "bx", "dx" ); | ||
310 | return ecx; | ||
311 | } | ||
312 | static inline unsigned int cpuid_edx(unsigned int op) | ||
313 | { | ||
314 | unsigned int eax, edx; | ||
315 | |||
316 | __asm__("cpuid" | ||
317 | : "=a" (eax), "=d" (edx) | ||
318 | : "0" (op) | ||
319 | : "bx", "cx"); | ||
320 | return edx; | ||
321 | } | ||
322 | |||
323 | #ifdef CONFIG_SMP | 184 | #ifdef CONFIG_SMP |
324 | void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | 185 | void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
325 | void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | 186 | void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
@@ -343,8 +204,8 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | |||
343 | return wrmsr_safe(msr_no, l, h); | 204 | return wrmsr_safe(msr_no, l, h); |
344 | } | 205 | } |
345 | #endif /* CONFIG_SMP */ | 206 | #endif /* CONFIG_SMP */ |
346 | #endif /* __ASSEMBLY__ */ | 207 | #endif /* __ASSEMBLY__ */ |
208 | #endif /* __KERNEL__ */ | ||
347 | 209 | ||
348 | #endif /* !__i386__ */ | ||
349 | 210 | ||
350 | #endif | 211 | #endif |
diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h index e8320e4e6ca2..319d065800be 100644 --- a/include/asm-x86/mtrr.h +++ b/include/asm-x86/mtrr.h | |||
@@ -89,24 +89,25 @@ struct mtrr_gentry | |||
89 | extern void mtrr_save_fixed_ranges(void *); | 89 | extern void mtrr_save_fixed_ranges(void *); |
90 | extern void mtrr_save_state(void); | 90 | extern void mtrr_save_state(void); |
91 | extern int mtrr_add (unsigned long base, unsigned long size, | 91 | extern int mtrr_add (unsigned long base, unsigned long size, |
92 | unsigned int type, char increment); | 92 | unsigned int type, bool increment); |
93 | extern int mtrr_add_page (unsigned long base, unsigned long size, | 93 | extern int mtrr_add_page (unsigned long base, unsigned long size, |
94 | unsigned int type, char increment); | 94 | unsigned int type, bool increment); |
95 | extern int mtrr_del (int reg, unsigned long base, unsigned long size); | 95 | extern int mtrr_del (int reg, unsigned long base, unsigned long size); |
96 | extern int mtrr_del_page (int reg, unsigned long base, unsigned long size); | 96 | extern int mtrr_del_page (int reg, unsigned long base, unsigned long size); |
97 | extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); | 97 | extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); |
98 | extern void mtrr_ap_init(void); | 98 | extern void mtrr_ap_init(void); |
99 | extern void mtrr_bp_init(void); | 99 | extern void mtrr_bp_init(void); |
100 | extern int mtrr_trim_uncached_memory(unsigned long end_pfn); | ||
100 | # else | 101 | # else |
101 | #define mtrr_save_fixed_ranges(arg) do {} while (0) | 102 | #define mtrr_save_fixed_ranges(arg) do {} while (0) |
102 | #define mtrr_save_state() do {} while (0) | 103 | #define mtrr_save_state() do {} while (0) |
103 | static __inline__ int mtrr_add (unsigned long base, unsigned long size, | 104 | static __inline__ int mtrr_add (unsigned long base, unsigned long size, |
104 | unsigned int type, char increment) | 105 | unsigned int type, bool increment) |
105 | { | 106 | { |
106 | return -ENODEV; | 107 | return -ENODEV; |
107 | } | 108 | } |
108 | static __inline__ int mtrr_add_page (unsigned long base, unsigned long size, | 109 | static __inline__ int mtrr_add_page (unsigned long base, unsigned long size, |
109 | unsigned int type, char increment) | 110 | unsigned int type, bool increment) |
110 | { | 111 | { |
111 | return -ENODEV; | 112 | return -ENODEV; |
112 | } | 113 | } |
@@ -120,7 +121,10 @@ static __inline__ int mtrr_del_page (int reg, unsigned long base, | |||
120 | { | 121 | { |
121 | return -ENODEV; | 122 | return -ENODEV; |
122 | } | 123 | } |
123 | 124 | static inline int mtrr_trim_uncached_memory(unsigned long end_pfn) | |
125 | { | ||
126 | return 0; | ||
127 | } | ||
124 | static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;} | 128 | static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;} |
125 | 129 | ||
126 | #define mtrr_ap_init() do {} while (0) | 130 | #define mtrr_ap_init() do {} while (0) |
diff --git a/include/asm-x86/mutex_32.h b/include/asm-x86/mutex_32.h index 7a17d9e58ad6..bbeefb96ddfd 100644 --- a/include/asm-x86/mutex_32.h +++ b/include/asm-x86/mutex_32.h | |||
@@ -26,7 +26,7 @@ do { \ | |||
26 | unsigned int dummy; \ | 26 | unsigned int dummy; \ |
27 | \ | 27 | \ |
28 | typecheck(atomic_t *, count); \ | 28 | typecheck(atomic_t *, count); \ |
29 | typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ | 29 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ |
30 | \ | 30 | \ |
31 | __asm__ __volatile__( \ | 31 | __asm__ __volatile__( \ |
32 | LOCK_PREFIX " decl (%%eax) \n" \ | 32 | LOCK_PREFIX " decl (%%eax) \n" \ |
@@ -51,8 +51,7 @@ do { \ | |||
51 | * or anything the slow path function returns | 51 | * or anything the slow path function returns |
52 | */ | 52 | */ |
53 | static inline int | 53 | static inline int |
54 | __mutex_fastpath_lock_retval(atomic_t *count, | 54 | __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) |
55 | int fastcall (*fail_fn)(atomic_t *)) | ||
56 | { | 55 | { |
57 | if (unlikely(atomic_dec_return(count) < 0)) | 56 | if (unlikely(atomic_dec_return(count) < 0)) |
58 | return fail_fn(count); | 57 | return fail_fn(count); |
@@ -78,7 +77,7 @@ do { \ | |||
78 | unsigned int dummy; \ | 77 | unsigned int dummy; \ |
79 | \ | 78 | \ |
80 | typecheck(atomic_t *, count); \ | 79 | typecheck(atomic_t *, count); \ |
81 | typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ | 80 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ |
82 | \ | 81 | \ |
83 | __asm__ __volatile__( \ | 82 | __asm__ __volatile__( \ |
84 | LOCK_PREFIX " incl (%%eax) \n" \ | 83 | LOCK_PREFIX " incl (%%eax) \n" \ |
diff --git a/include/asm-x86/nmi_32.h b/include/asm-x86/nmi_32.h index 70a958a8e381..7206c7e8a388 100644 --- a/include/asm-x86/nmi_32.h +++ b/include/asm-x86/nmi_32.h | |||
@@ -1,6 +1,3 @@ | |||
1 | /* | ||
2 | * linux/include/asm-i386/nmi.h | ||
3 | */ | ||
4 | #ifndef ASM_NMI_H | 1 | #ifndef ASM_NMI_H |
5 | #define ASM_NMI_H | 2 | #define ASM_NMI_H |
6 | 3 | ||
diff --git a/include/asm-x86/nmi_64.h b/include/asm-x86/nmi_64.h index 65b6acf3bb59..2eeb74e5f3ff 100644 --- a/include/asm-x86/nmi_64.h +++ b/include/asm-x86/nmi_64.h | |||
@@ -1,6 +1,3 @@ | |||
1 | /* | ||
2 | * linux/include/asm-i386/nmi.h | ||
3 | */ | ||
4 | #ifndef ASM_NMI_H | 1 | #ifndef ASM_NMI_H |
5 | #define ASM_NMI_H | 2 | #define ASM_NMI_H |
6 | 3 | ||
@@ -41,7 +38,6 @@ extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); | |||
41 | 38 | ||
42 | #define get_nmi_reason() inb(0x61) | 39 | #define get_nmi_reason() inb(0x61) |
43 | 40 | ||
44 | extern int panic_on_timeout; | ||
45 | extern int unknown_nmi_panic; | 41 | extern int unknown_nmi_panic; |
46 | extern int nmi_watchdog_enabled; | 42 | extern int nmi_watchdog_enabled; |
47 | 43 | ||
@@ -60,7 +56,6 @@ extern void enable_timer_nmi_watchdog(void); | |||
60 | extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); | 56 | extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); |
61 | 57 | ||
62 | extern void nmi_watchdog_default(void); | 58 | extern void nmi_watchdog_default(void); |
63 | extern int setup_nmi_watchdog(char *); | ||
64 | 59 | ||
65 | extern atomic_t nmi_active; | 60 | extern atomic_t nmi_active; |
66 | extern unsigned int nmi_watchdog; | 61 | extern unsigned int nmi_watchdog; |
diff --git a/include/asm-x86/nops.h b/include/asm-x86/nops.h new file mode 100644 index 000000000000..fec025c7f58c --- /dev/null +++ b/include/asm-x86/nops.h | |||
@@ -0,0 +1,90 @@ | |||
1 | #ifndef _ASM_NOPS_H | ||
2 | #define _ASM_NOPS_H 1 | ||
3 | |||
4 | /* Define nops for use with alternative() */ | ||
5 | |||
6 | /* generic versions from gas */ | ||
7 | #define GENERIC_NOP1 ".byte 0x90\n" | ||
8 | #define GENERIC_NOP2 ".byte 0x89,0xf6\n" | ||
9 | #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n" | ||
10 | #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n" | ||
11 | #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4 | ||
12 | #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n" | ||
13 | #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n" | ||
14 | #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7 | ||
15 | |||
16 | /* Opteron 64bit nops */ | ||
17 | #define K8_NOP1 GENERIC_NOP1 | ||
18 | #define K8_NOP2 ".byte 0x66,0x90\n" | ||
19 | #define K8_NOP3 ".byte 0x66,0x66,0x90\n" | ||
20 | #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" | ||
21 | #define K8_NOP5 K8_NOP3 K8_NOP2 | ||
22 | #define K8_NOP6 K8_NOP3 K8_NOP3 | ||
23 | #define K8_NOP7 K8_NOP4 K8_NOP3 | ||
24 | #define K8_NOP8 K8_NOP4 K8_NOP4 | ||
25 | |||
26 | /* K7 nops */ | ||
27 | /* uses eax dependencies (arbitary choice) */ | ||
28 | #define K7_NOP1 GENERIC_NOP1 | ||
29 | #define K7_NOP2 ".byte 0x8b,0xc0\n" | ||
30 | #define K7_NOP3 ".byte 0x8d,0x04,0x20\n" | ||
31 | #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n" | ||
32 | #define K7_NOP5 K7_NOP4 ASM_NOP1 | ||
33 | #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n" | ||
34 | #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n" | ||
35 | #define K7_NOP8 K7_NOP7 ASM_NOP1 | ||
36 | |||
37 | /* P6 nops */ | ||
38 | /* uses eax dependencies (Intel-recommended choice) */ | ||
39 | #define P6_NOP1 GENERIC_NOP1 | ||
40 | #define P6_NOP2 ".byte 0x66,0x90\n" | ||
41 | #define P6_NOP3 ".byte 0x0f,0x1f,0x00\n" | ||
42 | #define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n" | ||
43 | #define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n" | ||
44 | #define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n" | ||
45 | #define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n" | ||
46 | #define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n" | ||
47 | |||
48 | #if defined(CONFIG_MK8) | ||
49 | #define ASM_NOP1 K8_NOP1 | ||
50 | #define ASM_NOP2 K8_NOP2 | ||
51 | #define ASM_NOP3 K8_NOP3 | ||
52 | #define ASM_NOP4 K8_NOP4 | ||
53 | #define ASM_NOP5 K8_NOP5 | ||
54 | #define ASM_NOP6 K8_NOP6 | ||
55 | #define ASM_NOP7 K8_NOP7 | ||
56 | #define ASM_NOP8 K8_NOP8 | ||
57 | #elif defined(CONFIG_MK7) | ||
58 | #define ASM_NOP1 K7_NOP1 | ||
59 | #define ASM_NOP2 K7_NOP2 | ||
60 | #define ASM_NOP3 K7_NOP3 | ||
61 | #define ASM_NOP4 K7_NOP4 | ||
62 | #define ASM_NOP5 K7_NOP5 | ||
63 | #define ASM_NOP6 K7_NOP6 | ||
64 | #define ASM_NOP7 K7_NOP7 | ||
65 | #define ASM_NOP8 K7_NOP8 | ||
66 | #elif defined(CONFIG_M686) || defined(CONFIG_MPENTIUMII) || \ | ||
67 | defined(CONFIG_MPENTIUMIII) || defined(CONFIG_MPENTIUMM) || \ | ||
68 | defined(CONFIG_MCORE2) || defined(CONFIG_PENTIUM4) | ||
69 | #define ASM_NOP1 P6_NOP1 | ||
70 | #define ASM_NOP2 P6_NOP2 | ||
71 | #define ASM_NOP3 P6_NOP3 | ||
72 | #define ASM_NOP4 P6_NOP4 | ||
73 | #define ASM_NOP5 P6_NOP5 | ||
74 | #define ASM_NOP6 P6_NOP6 | ||
75 | #define ASM_NOP7 P6_NOP7 | ||
76 | #define ASM_NOP8 P6_NOP8 | ||
77 | #else | ||
78 | #define ASM_NOP1 GENERIC_NOP1 | ||
79 | #define ASM_NOP2 GENERIC_NOP2 | ||
80 | #define ASM_NOP3 GENERIC_NOP3 | ||
81 | #define ASM_NOP4 GENERIC_NOP4 | ||
82 | #define ASM_NOP5 GENERIC_NOP5 | ||
83 | #define ASM_NOP6 GENERIC_NOP6 | ||
84 | #define ASM_NOP7 GENERIC_NOP7 | ||
85 | #define ASM_NOP8 GENERIC_NOP8 | ||
86 | #endif | ||
87 | |||
88 | #define ASM_NOP_MAX 8 | ||
89 | |||
90 | #endif | ||
diff --git a/include/asm-x86/numa_32.h b/include/asm-x86/numa_32.h index 96fcb157db1d..03d0f7a9bf02 100644 --- a/include/asm-x86/numa_32.h +++ b/include/asm-x86/numa_32.h | |||
@@ -1,3 +1,15 @@ | |||
1 | #ifndef _ASM_X86_32_NUMA_H | ||
2 | #define _ASM_X86_32_NUMA_H 1 | ||
1 | 3 | ||
2 | int pxm_to_nid(int pxm); | 4 | extern int pxm_to_nid(int pxm); |
3 | 5 | ||
6 | #ifdef CONFIG_NUMA | ||
7 | extern void __init remap_numa_kva(void); | ||
8 | extern void set_highmem_pages_init(int); | ||
9 | #else | ||
10 | static inline void remap_numa_kva(void) | ||
11 | { | ||
12 | } | ||
13 | #endif | ||
14 | |||
15 | #endif /* _ASM_X86_32_NUMA_H */ | ||
diff --git a/include/asm-x86/numa_64.h b/include/asm-x86/numa_64.h index 0cc5c97a7fc9..15fe07cde586 100644 --- a/include/asm-x86/numa_64.h +++ b/include/asm-x86/numa_64.h | |||
@@ -20,13 +20,19 @@ extern void numa_set_node(int cpu, int node); | |||
20 | extern void srat_reserve_add_area(int nodeid); | 20 | extern void srat_reserve_add_area(int nodeid); |
21 | extern int hotadd_percent; | 21 | extern int hotadd_percent; |
22 | 22 | ||
23 | extern unsigned char apicid_to_node[MAX_LOCAL_APIC]; | 23 | extern s16 apicid_to_node[MAX_LOCAL_APIC]; |
24 | |||
25 | extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn); | ||
26 | extern unsigned long numa_free_all_bootmem(void); | ||
27 | extern void setup_node_bootmem(int nodeid, unsigned long start, | ||
28 | unsigned long end); | ||
29 | |||
24 | #ifdef CONFIG_NUMA | 30 | #ifdef CONFIG_NUMA |
25 | extern void __init init_cpu_to_node(void); | 31 | extern void __init init_cpu_to_node(void); |
26 | 32 | ||
27 | static inline void clear_node_cpumask(int cpu) | 33 | static inline void clear_node_cpumask(int cpu) |
28 | { | 34 | { |
29 | clear_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]); | 35 | clear_bit(cpu, (unsigned long *)&node_to_cpumask_map[cpu_to_node(cpu)]); |
30 | } | 36 | } |
31 | 37 | ||
32 | #else | 38 | #else |
@@ -34,6 +40,4 @@ static inline void clear_node_cpumask(int cpu) | |||
34 | #define clear_node_cpumask(cpu) do {} while (0) | 40 | #define clear_node_cpumask(cpu) do {} while (0) |
35 | #endif | 41 | #endif |
36 | 42 | ||
37 | #define NUMA_NO_NODE 0xff | ||
38 | |||
39 | #endif | 43 | #endif |
diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h index a757eb26141d..1cb7c51bc296 100644 --- a/include/asm-x86/page.h +++ b/include/asm-x86/page.h | |||
@@ -1,13 +1,183 @@ | |||
1 | #ifndef _ASM_X86_PAGE_H | ||
2 | #define _ASM_X86_PAGE_H | ||
3 | |||
4 | #include <linux/const.h> | ||
5 | |||
6 | /* PAGE_SHIFT determines the page size */ | ||
7 | #define PAGE_SHIFT 12 | ||
8 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) | ||
9 | #define PAGE_MASK (~(PAGE_SIZE-1)) | ||
10 | |||
1 | #ifdef __KERNEL__ | 11 | #ifdef __KERNEL__ |
2 | # ifdef CONFIG_X86_32 | 12 | |
3 | # include "page_32.h" | 13 | #define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK) |
4 | # else | 14 | #define PTE_MASK (_AT(long, PHYSICAL_PAGE_MASK)) |
5 | # include "page_64.h" | 15 | |
6 | # endif | 16 | #define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) |
17 | #define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) | ||
18 | |||
19 | #define HPAGE_SHIFT PMD_SHIFT | ||
20 | #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) | ||
21 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | ||
22 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | ||
23 | |||
24 | /* to align the pointer to the (next) page boundary */ | ||
25 | #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) | ||
26 | |||
27 | #define __PHYSICAL_MASK _AT(phys_addr_t, (_AC(1,ULL) << __PHYSICAL_MASK_SHIFT) - 1) | ||
28 | #define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1) | ||
29 | |||
30 | #ifndef __ASSEMBLY__ | ||
31 | #include <linux/types.h> | ||
32 | #endif | ||
33 | |||
34 | #ifdef CONFIG_X86_64 | ||
35 | #include <asm/page_64.h> | ||
36 | #define max_pfn_mapped end_pfn_map | ||
7 | #else | 37 | #else |
8 | # ifdef __i386__ | 38 | #include <asm/page_32.h> |
9 | # include "page_32.h" | 39 | #define max_pfn_mapped max_low_pfn |
10 | # else | 40 | #endif /* CONFIG_X86_64 */ |
11 | # include "page_64.h" | 41 | |
12 | # endif | 42 | #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) |
43 | |||
44 | #define VM_DATA_DEFAULT_FLAGS \ | ||
45 | (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ | ||
46 | VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
47 | |||
48 | |||
49 | #ifndef __ASSEMBLY__ | ||
50 | |||
51 | extern int page_is_ram(unsigned long pagenr); | ||
52 | |||
53 | struct page; | ||
54 | |||
55 | static void inline clear_user_page(void *page, unsigned long vaddr, | ||
56 | struct page *pg) | ||
57 | { | ||
58 | clear_page(page); | ||
59 | } | ||
60 | |||
61 | static void inline copy_user_page(void *to, void *from, unsigned long vaddr, | ||
62 | struct page *topage) | ||
63 | { | ||
64 | copy_page(to, from); | ||
65 | } | ||
66 | |||
67 | #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ | ||
68 | alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) | ||
69 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | ||
70 | |||
71 | typedef struct { pgdval_t pgd; } pgd_t; | ||
72 | typedef struct { pgprotval_t pgprot; } pgprot_t; | ||
73 | |||
74 | static inline pgd_t native_make_pgd(pgdval_t val) | ||
75 | { | ||
76 | return (pgd_t) { val }; | ||
77 | } | ||
78 | |||
79 | static inline pgdval_t native_pgd_val(pgd_t pgd) | ||
80 | { | ||
81 | return pgd.pgd; | ||
82 | } | ||
83 | |||
84 | #if PAGETABLE_LEVELS >= 3 | ||
85 | #if PAGETABLE_LEVELS == 4 | ||
86 | typedef struct { pudval_t pud; } pud_t; | ||
87 | |||
88 | static inline pud_t native_make_pud(pmdval_t val) | ||
89 | { | ||
90 | return (pud_t) { val }; | ||
91 | } | ||
92 | |||
93 | static inline pudval_t native_pud_val(pud_t pud) | ||
94 | { | ||
95 | return pud.pud; | ||
96 | } | ||
97 | #else /* PAGETABLE_LEVELS == 3 */ | ||
98 | #include <asm-generic/pgtable-nopud.h> | ||
99 | |||
100 | static inline pudval_t native_pud_val(pud_t pud) | ||
101 | { | ||
102 | return native_pgd_val(pud.pgd); | ||
103 | } | ||
104 | #endif /* PAGETABLE_LEVELS == 4 */ | ||
105 | |||
106 | typedef struct { pmdval_t pmd; } pmd_t; | ||
107 | |||
108 | static inline pmd_t native_make_pmd(pmdval_t val) | ||
109 | { | ||
110 | return (pmd_t) { val }; | ||
111 | } | ||
112 | |||
113 | static inline pmdval_t native_pmd_val(pmd_t pmd) | ||
114 | { | ||
115 | return pmd.pmd; | ||
116 | } | ||
117 | #else /* PAGETABLE_LEVELS == 2 */ | ||
118 | #include <asm-generic/pgtable-nopmd.h> | ||
119 | |||
120 | static inline pmdval_t native_pmd_val(pmd_t pmd) | ||
121 | { | ||
122 | return native_pgd_val(pmd.pud.pgd); | ||
123 | } | ||
124 | #endif /* PAGETABLE_LEVELS >= 3 */ | ||
125 | |||
126 | static inline pte_t native_make_pte(pteval_t val) | ||
127 | { | ||
128 | return (pte_t) { .pte = val }; | ||
129 | } | ||
130 | |||
131 | static inline pteval_t native_pte_val(pte_t pte) | ||
132 | { | ||
133 | return pte.pte; | ||
134 | } | ||
135 | |||
136 | #define pgprot_val(x) ((x).pgprot) | ||
137 | #define __pgprot(x) ((pgprot_t) { (x) } ) | ||
138 | |||
139 | #ifdef CONFIG_PARAVIRT | ||
140 | #include <asm/paravirt.h> | ||
141 | #else /* !CONFIG_PARAVIRT */ | ||
142 | |||
143 | #define pgd_val(x) native_pgd_val(x) | ||
144 | #define __pgd(x) native_make_pgd(x) | ||
145 | |||
146 | #ifndef __PAGETABLE_PUD_FOLDED | ||
147 | #define pud_val(x) native_pud_val(x) | ||
148 | #define __pud(x) native_make_pud(x) | ||
149 | #endif | ||
150 | |||
151 | #ifndef __PAGETABLE_PMD_FOLDED | ||
152 | #define pmd_val(x) native_pmd_val(x) | ||
153 | #define __pmd(x) native_make_pmd(x) | ||
13 | #endif | 154 | #endif |
155 | |||
156 | #define pte_val(x) native_pte_val(x) | ||
157 | #define __pte(x) native_make_pte(x) | ||
158 | |||
159 | #endif /* CONFIG_PARAVIRT */ | ||
160 | |||
161 | #define __pa(x) __phys_addr((unsigned long)(x)) | ||
162 | /* __pa_symbol should be used for C visible symbols. | ||
163 | This seems to be the official gcc blessed way to do such arithmetic. */ | ||
164 | #define __pa_symbol(x) __pa(__phys_reloc_hide((unsigned long)(x))) | ||
165 | |||
166 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) | ||
167 | |||
168 | #define __boot_va(x) __va(x) | ||
169 | #define __boot_pa(x) __pa(x) | ||
170 | |||
171 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | ||
172 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | ||
173 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | ||
174 | |||
175 | #endif /* __ASSEMBLY__ */ | ||
176 | |||
177 | #include <asm-generic/memory_model.h> | ||
178 | #include <asm-generic/page.h> | ||
179 | |||
180 | #define __HAVE_ARCH_GATE_AREA 1 | ||
181 | |||
182 | #endif /* __KERNEL__ */ | ||
183 | #endif /* _ASM_X86_PAGE_H */ | ||
diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h index 80ecc66b6d86..a6fd10f230d2 100644 --- a/include/asm-x86/page_32.h +++ b/include/asm-x86/page_32.h | |||
@@ -1,206 +1,107 @@ | |||
1 | #ifndef _I386_PAGE_H | 1 | #ifndef _ASM_X86_PAGE_32_H |
2 | #define _I386_PAGE_H | 2 | #define _ASM_X86_PAGE_32_H |
3 | |||
4 | /* PAGE_SHIFT determines the page size */ | ||
5 | #define PAGE_SHIFT 12 | ||
6 | #define PAGE_SIZE (1UL << PAGE_SHIFT) | ||
7 | #define PAGE_MASK (~(PAGE_SIZE-1)) | ||
8 | |||
9 | #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) | ||
10 | #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT) | ||
11 | |||
12 | #ifdef __KERNEL__ | ||
13 | #ifndef __ASSEMBLY__ | ||
14 | |||
15 | #ifdef CONFIG_X86_USE_3DNOW | ||
16 | |||
17 | #include <asm/mmx.h> | ||
18 | |||
19 | #define clear_page(page) mmx_clear_page((void *)(page)) | ||
20 | #define copy_page(to,from) mmx_copy_page(to,from) | ||
21 | |||
22 | #else | ||
23 | 3 | ||
24 | /* | 4 | /* |
25 | * On older X86 processors it's not a win to use MMX here it seems. | 5 | * This handles the memory map. |
26 | * Maybe the K6-III ? | 6 | * |
27 | */ | 7 | * A __PAGE_OFFSET of 0xC0000000 means that the kernel has |
28 | 8 | * a virtual address space of one gigabyte, which limits the | |
29 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) | 9 | * amount of physical memory you can use to about 950MB. |
30 | #define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) | 10 | * |
31 | 11 | * If you want more physical memory than this then see the CONFIG_HIGHMEM4G | |
32 | #endif | 12 | * and CONFIG_HIGHMEM64G options in the kernel configuration. |
33 | |||
34 | #define clear_user_page(page, vaddr, pg) clear_page(page) | ||
35 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) | ||
36 | |||
37 | #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ | ||
38 | alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) | ||
39 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | ||
40 | |||
41 | /* | ||
42 | * These are used to make use of C type-checking.. | ||
43 | */ | 13 | */ |
44 | extern int nx_enabled; | 14 | #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) |
45 | 15 | ||
46 | #ifdef CONFIG_X86_PAE | 16 | #ifdef CONFIG_X86_PAE |
47 | typedef struct { unsigned long pte_low, pte_high; } pte_t; | 17 | #define __PHYSICAL_MASK_SHIFT 36 |
48 | typedef struct { unsigned long long pmd; } pmd_t; | 18 | #define __VIRTUAL_MASK_SHIFT 32 |
49 | typedef struct { unsigned long long pgd; } pgd_t; | 19 | #define PAGETABLE_LEVELS 3 |
50 | typedef struct { unsigned long long pgprot; } pgprot_t; | ||
51 | 20 | ||
52 | static inline unsigned long long native_pgd_val(pgd_t pgd) | 21 | #ifndef __ASSEMBLY__ |
53 | { | 22 | typedef u64 pteval_t; |
54 | return pgd.pgd; | 23 | typedef u64 pmdval_t; |
55 | } | 24 | typedef u64 pudval_t; |
56 | 25 | typedef u64 pgdval_t; | |
57 | static inline unsigned long long native_pmd_val(pmd_t pmd) | 26 | typedef u64 pgprotval_t; |
58 | { | 27 | typedef u64 phys_addr_t; |
59 | return pmd.pmd; | 28 | |
60 | } | 29 | typedef union { |
61 | 30 | struct { | |
62 | static inline unsigned long long native_pte_val(pte_t pte) | 31 | unsigned long pte_low, pte_high; |
63 | { | 32 | }; |
64 | return pte.pte_low | ((unsigned long long)pte.pte_high << 32); | 33 | pteval_t pte; |
65 | } | 34 | } pte_t; |
66 | 35 | #endif /* __ASSEMBLY__ | |
67 | static inline pgd_t native_make_pgd(unsigned long long val) | 36 | */ |
68 | { | ||
69 | return (pgd_t) { val }; | ||
70 | } | ||
71 | |||
72 | static inline pmd_t native_make_pmd(unsigned long long val) | ||
73 | { | ||
74 | return (pmd_t) { val }; | ||
75 | } | ||
76 | |||
77 | static inline pte_t native_make_pte(unsigned long long val) | ||
78 | { | ||
79 | return (pte_t) { .pte_low = val, .pte_high = (val >> 32) } ; | ||
80 | } | ||
81 | |||
82 | #ifndef CONFIG_PARAVIRT | ||
83 | #define pmd_val(x) native_pmd_val(x) | ||
84 | #define __pmd(x) native_make_pmd(x) | ||
85 | #endif | ||
86 | |||
87 | #define HPAGE_SHIFT 21 | ||
88 | #include <asm-generic/pgtable-nopud.h> | ||
89 | #else /* !CONFIG_X86_PAE */ | 37 | #else /* !CONFIG_X86_PAE */ |
90 | typedef struct { unsigned long pte_low; } pte_t; | 38 | #define __PHYSICAL_MASK_SHIFT 32 |
91 | typedef struct { unsigned long pgd; } pgd_t; | 39 | #define __VIRTUAL_MASK_SHIFT 32 |
92 | typedef struct { unsigned long pgprot; } pgprot_t; | 40 | #define PAGETABLE_LEVELS 2 |
93 | #define boot_pte_t pte_t /* or would you rather have a typedef */ | ||
94 | |||
95 | static inline unsigned long native_pgd_val(pgd_t pgd) | ||
96 | { | ||
97 | return pgd.pgd; | ||
98 | } | ||
99 | 41 | ||
100 | static inline unsigned long native_pte_val(pte_t pte) | 42 | #ifndef __ASSEMBLY__ |
101 | { | 43 | typedef unsigned long pteval_t; |
102 | return pte.pte_low; | 44 | typedef unsigned long pmdval_t; |
103 | } | 45 | typedef unsigned long pudval_t; |
104 | 46 | typedef unsigned long pgdval_t; | |
105 | static inline pgd_t native_make_pgd(unsigned long val) | 47 | typedef unsigned long pgprotval_t; |
106 | { | 48 | typedef unsigned long phys_addr_t; |
107 | return (pgd_t) { val }; | ||
108 | } | ||
109 | 49 | ||
110 | static inline pte_t native_make_pte(unsigned long val) | 50 | typedef union { pteval_t pte, pte_low; } pte_t; |
111 | { | 51 | typedef pte_t boot_pte_t; |
112 | return (pte_t) { .pte_low = val }; | ||
113 | } | ||
114 | 52 | ||
115 | #define HPAGE_SHIFT 22 | 53 | #endif /* __ASSEMBLY__ */ |
116 | #include <asm-generic/pgtable-nopmd.h> | ||
117 | #endif /* CONFIG_X86_PAE */ | 54 | #endif /* CONFIG_X86_PAE */ |
118 | 55 | ||
119 | #define PTE_MASK PAGE_MASK | ||
120 | |||
121 | #ifdef CONFIG_HUGETLB_PAGE | 56 | #ifdef CONFIG_HUGETLB_PAGE |
122 | #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) | ||
123 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | ||
124 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | ||
125 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA | 57 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
126 | #endif | 58 | #endif |
127 | 59 | ||
128 | #define pgprot_val(x) ((x).pgprot) | ||
129 | #define __pgprot(x) ((pgprot_t) { (x) } ) | ||
130 | |||
131 | #ifndef CONFIG_PARAVIRT | ||
132 | #define pgd_val(x) native_pgd_val(x) | ||
133 | #define __pgd(x) native_make_pgd(x) | ||
134 | #define pte_val(x) native_pte_val(x) | ||
135 | #define __pte(x) native_make_pte(x) | ||
136 | #endif | ||
137 | |||
138 | #endif /* !__ASSEMBLY__ */ | ||
139 | |||
140 | /* to align the pointer to the (next) page boundary */ | ||
141 | #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) | ||
142 | |||
143 | /* | ||
144 | * This handles the memory map.. We could make this a config | ||
145 | * option, but too many people screw it up, and too few need | ||
146 | * it. | ||
147 | * | ||
148 | * A __PAGE_OFFSET of 0xC0000000 means that the kernel has | ||
149 | * a virtual address space of one gigabyte, which limits the | ||
150 | * amount of physical memory you can use to about 950MB. | ||
151 | * | ||
152 | * If you want more physical memory than this then see the CONFIG_HIGHMEM4G | ||
153 | * and CONFIG_HIGHMEM64G options in the kernel configuration. | ||
154 | */ | ||
155 | |||
156 | #ifndef __ASSEMBLY__ | 60 | #ifndef __ASSEMBLY__ |
61 | #define __phys_addr(x) ((x)-PAGE_OFFSET) | ||
62 | #define __phys_reloc_hide(x) RELOC_HIDE((x), 0) | ||
63 | |||
64 | #ifdef CONFIG_FLATMEM | ||
65 | #define pfn_valid(pfn) ((pfn) < max_mapnr) | ||
66 | #endif /* CONFIG_FLATMEM */ | ||
157 | 67 | ||
158 | struct vm_area_struct; | 68 | extern int nx_enabled; |
159 | 69 | ||
160 | /* | 70 | /* |
161 | * This much address space is reserved for vmalloc() and iomap() | 71 | * This much address space is reserved for vmalloc() and iomap() |
162 | * as well as fixmap mappings. | 72 | * as well as fixmap mappings. |
163 | */ | 73 | */ |
164 | extern unsigned int __VMALLOC_RESERVE; | 74 | extern unsigned int __VMALLOC_RESERVE; |
165 | |||
166 | extern int sysctl_legacy_va_layout; | 75 | extern int sysctl_legacy_va_layout; |
167 | 76 | ||
168 | extern int page_is_ram(unsigned long pagenr); | ||
169 | |||
170 | #endif /* __ASSEMBLY__ */ | ||
171 | |||
172 | #ifdef __ASSEMBLY__ | ||
173 | #define __PAGE_OFFSET CONFIG_PAGE_OFFSET | ||
174 | #else | ||
175 | #define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET) | ||
176 | #endif | ||
177 | |||
178 | |||
179 | #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) | ||
180 | #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) | 77 | #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) |
181 | #define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE) | 78 | #define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE) |
182 | #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) | ||
183 | /* __pa_symbol should be used for C visible symbols. | ||
184 | This seems to be the official gcc blessed way to do such arithmetic. */ | ||
185 | #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0)) | ||
186 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) | ||
187 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | ||
188 | #ifdef CONFIG_FLATMEM | ||
189 | #define pfn_valid(pfn) ((pfn) < max_mapnr) | ||
190 | #endif /* CONFIG_FLATMEM */ | ||
191 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | ||
192 | 79 | ||
193 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | 80 | #ifdef CONFIG_X86_USE_3DNOW |
81 | #include <asm/mmx.h> | ||
82 | |||
83 | static inline void clear_page(void *page) | ||
84 | { | ||
85 | mmx_clear_page(page); | ||
86 | } | ||
194 | 87 | ||
195 | #define VM_DATA_DEFAULT_FLAGS \ | 88 | static inline void copy_page(void *to, void *from) |
196 | (VM_READ | VM_WRITE | \ | 89 | { |
197 | ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ | 90 | mmx_copy_page(to, from); |
198 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | 91 | } |
92 | #else /* !CONFIG_X86_USE_3DNOW */ | ||
93 | #include <linux/string.h> | ||
199 | 94 | ||
200 | #include <asm-generic/memory_model.h> | 95 | static inline void clear_page(void *page) |
201 | #include <asm-generic/page.h> | 96 | { |
97 | memset(page, 0, PAGE_SIZE); | ||
98 | } | ||
202 | 99 | ||
203 | #define __HAVE_ARCH_GATE_AREA 1 | 100 | static inline void copy_page(void *to, void *from) |
204 | #endif /* __KERNEL__ */ | 101 | { |
102 | memcpy(to, from, PAGE_SIZE); | ||
103 | } | ||
104 | #endif /* CONFIG_X86_3DNOW */ | ||
105 | #endif /* !__ASSEMBLY__ */ | ||
205 | 106 | ||
206 | #endif /* _I386_PAGE_H */ | 107 | #endif /* _ASM_X86_PAGE_32_H */ |
diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h index c3b52bcb171e..dcf0c0746075 100644 --- a/include/asm-x86/page_64.h +++ b/include/asm-x86/page_64.h | |||
@@ -1,15 +1,9 @@ | |||
1 | #ifndef _X86_64_PAGE_H | 1 | #ifndef _X86_64_PAGE_H |
2 | #define _X86_64_PAGE_H | 2 | #define _X86_64_PAGE_H |
3 | 3 | ||
4 | #include <linux/const.h> | 4 | #define PAGETABLE_LEVELS 4 |
5 | 5 | ||
6 | /* PAGE_SHIFT determines the page size */ | 6 | #define THREAD_ORDER 1 |
7 | #define PAGE_SHIFT 12 | ||
8 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) | ||
9 | #define PAGE_MASK (~(PAGE_SIZE-1)) | ||
10 | #define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK) | ||
11 | |||
12 | #define THREAD_ORDER 1 | ||
13 | #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) | 7 | #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) |
14 | #define CURRENT_MASK (~(THREAD_SIZE-1)) | 8 | #define CURRENT_MASK (~(THREAD_SIZE-1)) |
15 | 9 | ||
@@ -29,54 +23,10 @@ | |||
29 | #define MCE_STACK 5 | 23 | #define MCE_STACK 5 |
30 | #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ | 24 | #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ |
31 | 25 | ||
32 | #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) | 26 | #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) |
33 | #define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT) | 27 | #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) |
34 | |||
35 | #define HPAGE_SHIFT PMD_SHIFT | ||
36 | #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) | ||
37 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | ||
38 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | ||
39 | |||
40 | #ifdef __KERNEL__ | ||
41 | #ifndef __ASSEMBLY__ | ||
42 | |||
43 | extern unsigned long end_pfn; | ||
44 | |||
45 | void clear_page(void *); | ||
46 | void copy_page(void *, void *); | ||
47 | |||
48 | #define clear_user_page(page, vaddr, pg) clear_page(page) | ||
49 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) | ||
50 | |||
51 | #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ | ||
52 | alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) | ||
53 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | ||
54 | /* | ||
55 | * These are used to make use of C type-checking.. | ||
56 | */ | ||
57 | typedef struct { unsigned long pte; } pte_t; | ||
58 | typedef struct { unsigned long pmd; } pmd_t; | ||
59 | typedef struct { unsigned long pud; } pud_t; | ||
60 | typedef struct { unsigned long pgd; } pgd_t; | ||
61 | #define PTE_MASK PHYSICAL_PAGE_MASK | ||
62 | |||
63 | typedef struct { unsigned long pgprot; } pgprot_t; | ||
64 | |||
65 | extern unsigned long phys_base; | ||
66 | |||
67 | #define pte_val(x) ((x).pte) | ||
68 | #define pmd_val(x) ((x).pmd) | ||
69 | #define pud_val(x) ((x).pud) | ||
70 | #define pgd_val(x) ((x).pgd) | ||
71 | #define pgprot_val(x) ((x).pgprot) | ||
72 | |||
73 | #define __pte(x) ((pte_t) { (x) } ) | ||
74 | #define __pmd(x) ((pmd_t) { (x) } ) | ||
75 | #define __pud(x) ((pud_t) { (x) } ) | ||
76 | #define __pgd(x) ((pgd_t) { (x) } ) | ||
77 | #define __pgprot(x) ((pgprot_t) { (x) } ) | ||
78 | 28 | ||
79 | #endif /* !__ASSEMBLY__ */ | 29 | #define __PAGE_OFFSET _AC(0xffff810000000000, UL) |
80 | 30 | ||
81 | #define __PHYSICAL_START CONFIG_PHYSICAL_START | 31 | #define __PHYSICAL_START CONFIG_PHYSICAL_START |
82 | #define __KERNEL_ALIGN 0x200000 | 32 | #define __KERNEL_ALIGN 0x200000 |
@@ -92,53 +42,44 @@ extern unsigned long phys_base; | |||
92 | 42 | ||
93 | #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) | 43 | #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) |
94 | #define __START_KERNEL_map _AC(0xffffffff80000000, UL) | 44 | #define __START_KERNEL_map _AC(0xffffffff80000000, UL) |
95 | #define __PAGE_OFFSET _AC(0xffff810000000000, UL) | ||
96 | |||
97 | /* to align the pointer to the (next) page boundary */ | ||
98 | #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) | ||
99 | 45 | ||
100 | /* See Documentation/x86_64/mm.txt for a description of the memory map. */ | 46 | /* See Documentation/x86_64/mm.txt for a description of the memory map. */ |
101 | #define __PHYSICAL_MASK_SHIFT 46 | 47 | #define __PHYSICAL_MASK_SHIFT 46 |
102 | #define __PHYSICAL_MASK ((_AC(1,UL) << __PHYSICAL_MASK_SHIFT) - 1) | ||
103 | #define __VIRTUAL_MASK_SHIFT 48 | 48 | #define __VIRTUAL_MASK_SHIFT 48 |
104 | #define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1) | ||
105 | 49 | ||
106 | #define KERNEL_TEXT_SIZE (40*1024*1024) | 50 | #define KERNEL_TEXT_SIZE (40*1024*1024) |
107 | #define KERNEL_TEXT_START _AC(0xffffffff80000000, UL) | 51 | #define KERNEL_TEXT_START _AC(0xffffffff80000000, UL) |
108 | #define PAGE_OFFSET __PAGE_OFFSET | ||
109 | 52 | ||
110 | #ifndef __ASSEMBLY__ | 53 | #ifndef __ASSEMBLY__ |
54 | void clear_page(void *page); | ||
55 | void copy_page(void *to, void *from); | ||
111 | 56 | ||
112 | #include <asm/bug.h> | 57 | extern unsigned long end_pfn; |
58 | extern unsigned long end_pfn_map; | ||
59 | extern unsigned long phys_base; | ||
113 | 60 | ||
114 | extern unsigned long __phys_addr(unsigned long); | 61 | extern unsigned long __phys_addr(unsigned long); |
62 | #define __phys_reloc_hide(x) (x) | ||
115 | 63 | ||
116 | #endif /* __ASSEMBLY__ */ | 64 | /* |
117 | 65 | * These are used to make use of C type-checking.. | |
118 | #define __pa(x) __phys_addr((unsigned long)(x)) | 66 | */ |
119 | #define __pa_symbol(x) __phys_addr((unsigned long)(x)) | 67 | typedef unsigned long pteval_t; |
120 | 68 | typedef unsigned long pmdval_t; | |
121 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) | 69 | typedef unsigned long pudval_t; |
122 | #define __boot_va(x) __va(x) | 70 | typedef unsigned long pgdval_t; |
123 | #define __boot_pa(x) __pa(x) | 71 | typedef unsigned long pgprotval_t; |
124 | #ifdef CONFIG_FLATMEM | 72 | typedef unsigned long phys_addr_t; |
125 | #define pfn_valid(pfn) ((pfn) < end_pfn) | ||
126 | #endif | ||
127 | |||
128 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | ||
129 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | ||
130 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | ||
131 | 73 | ||
132 | #define VM_DATA_DEFAULT_FLAGS \ | 74 | typedef struct { pteval_t pte; } pte_t; |
133 | (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ | ||
134 | VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
135 | 75 | ||
136 | #define __HAVE_ARCH_GATE_AREA 1 | ||
137 | #define vmemmap ((struct page *)VMEMMAP_START) | 76 | #define vmemmap ((struct page *)VMEMMAP_START) |
138 | 77 | ||
139 | #include <asm-generic/memory_model.h> | 78 | #endif /* !__ASSEMBLY__ */ |
140 | #include <asm-generic/page.h> | 79 | |
80 | #ifdef CONFIG_FLATMEM | ||
81 | #define pfn_valid(pfn) ((pfn) < end_pfn) | ||
82 | #endif | ||
141 | 83 | ||
142 | #endif /* __KERNEL__ */ | ||
143 | 84 | ||
144 | #endif /* _X86_64_PAGE_H */ | 85 | #endif /* _X86_64_PAGE_H */ |
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index f59d370c5df4..d6236eb46466 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h | |||
@@ -5,22 +5,37 @@ | |||
5 | 5 | ||
6 | #ifdef CONFIG_PARAVIRT | 6 | #ifdef CONFIG_PARAVIRT |
7 | #include <asm/page.h> | 7 | #include <asm/page.h> |
8 | #include <asm/asm.h> | ||
8 | 9 | ||
9 | /* Bitmask of what can be clobbered: usually at least eax. */ | 10 | /* Bitmask of what can be clobbered: usually at least eax. */ |
10 | #define CLBR_NONE 0x0 | 11 | #define CLBR_NONE 0 |
11 | #define CLBR_EAX 0x1 | 12 | #define CLBR_EAX (1 << 0) |
12 | #define CLBR_ECX 0x2 | 13 | #define CLBR_ECX (1 << 1) |
13 | #define CLBR_EDX 0x4 | 14 | #define CLBR_EDX (1 << 2) |
14 | #define CLBR_ANY 0x7 | 15 | |
16 | #ifdef CONFIG_X86_64 | ||
17 | #define CLBR_RSI (1 << 3) | ||
18 | #define CLBR_RDI (1 << 4) | ||
19 | #define CLBR_R8 (1 << 5) | ||
20 | #define CLBR_R9 (1 << 6) | ||
21 | #define CLBR_R10 (1 << 7) | ||
22 | #define CLBR_R11 (1 << 8) | ||
23 | #define CLBR_ANY ((1 << 9) - 1) | ||
24 | #include <asm/desc_defs.h> | ||
25 | #else | ||
26 | /* CLBR_ANY should match all regs platform has. For i386, that's just it */ | ||
27 | #define CLBR_ANY ((1 << 3) - 1) | ||
28 | #endif /* X86_64 */ | ||
15 | 29 | ||
16 | #ifndef __ASSEMBLY__ | 30 | #ifndef __ASSEMBLY__ |
17 | #include <linux/types.h> | 31 | #include <linux/types.h> |
18 | #include <linux/cpumask.h> | 32 | #include <linux/cpumask.h> |
19 | #include <asm/kmap_types.h> | 33 | #include <asm/kmap_types.h> |
34 | #include <asm/desc_defs.h> | ||
20 | 35 | ||
21 | struct page; | 36 | struct page; |
22 | struct thread_struct; | 37 | struct thread_struct; |
23 | struct Xgt_desc_struct; | 38 | struct desc_ptr; |
24 | struct tss_struct; | 39 | struct tss_struct; |
25 | struct mm_struct; | 40 | struct mm_struct; |
26 | struct desc_struct; | 41 | struct desc_struct; |
@@ -86,22 +101,27 @@ struct pv_cpu_ops { | |||
86 | unsigned long (*read_cr4)(void); | 101 | unsigned long (*read_cr4)(void); |
87 | void (*write_cr4)(unsigned long); | 102 | void (*write_cr4)(unsigned long); |
88 | 103 | ||
104 | #ifdef CONFIG_X86_64 | ||
105 | unsigned long (*read_cr8)(void); | ||
106 | void (*write_cr8)(unsigned long); | ||
107 | #endif | ||
108 | |||
89 | /* Segment descriptor handling */ | 109 | /* Segment descriptor handling */ |
90 | void (*load_tr_desc)(void); | 110 | void (*load_tr_desc)(void); |
91 | void (*load_gdt)(const struct Xgt_desc_struct *); | 111 | void (*load_gdt)(const struct desc_ptr *); |
92 | void (*load_idt)(const struct Xgt_desc_struct *); | 112 | void (*load_idt)(const struct desc_ptr *); |
93 | void (*store_gdt)(struct Xgt_desc_struct *); | 113 | void (*store_gdt)(struct desc_ptr *); |
94 | void (*store_idt)(struct Xgt_desc_struct *); | 114 | void (*store_idt)(struct desc_ptr *); |
95 | void (*set_ldt)(const void *desc, unsigned entries); | 115 | void (*set_ldt)(const void *desc, unsigned entries); |
96 | unsigned long (*store_tr)(void); | 116 | unsigned long (*store_tr)(void); |
97 | void (*load_tls)(struct thread_struct *t, unsigned int cpu); | 117 | void (*load_tls)(struct thread_struct *t, unsigned int cpu); |
98 | void (*write_ldt_entry)(struct desc_struct *, | 118 | void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, |
99 | int entrynum, u32 low, u32 high); | 119 | const void *desc); |
100 | void (*write_gdt_entry)(struct desc_struct *, | 120 | void (*write_gdt_entry)(struct desc_struct *, |
101 | int entrynum, u32 low, u32 high); | 121 | int entrynum, const void *desc, int size); |
102 | void (*write_idt_entry)(struct desc_struct *, | 122 | void (*write_idt_entry)(gate_desc *, |
103 | int entrynum, u32 low, u32 high); | 123 | int entrynum, const gate_desc *gate); |
104 | void (*load_esp0)(struct tss_struct *tss, struct thread_struct *t); | 124 | void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t); |
105 | 125 | ||
106 | void (*set_iopl_mask)(unsigned mask); | 126 | void (*set_iopl_mask)(unsigned mask); |
107 | 127 | ||
@@ -115,15 +135,18 @@ struct pv_cpu_ops { | |||
115 | /* MSR, PMC and TSR operations. | 135 | /* MSR, PMC and TSR operations. |
116 | err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ | 136 | err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ |
117 | u64 (*read_msr)(unsigned int msr, int *err); | 137 | u64 (*read_msr)(unsigned int msr, int *err); |
118 | int (*write_msr)(unsigned int msr, u64 val); | 138 | int (*write_msr)(unsigned int msr, unsigned low, unsigned high); |
119 | 139 | ||
120 | u64 (*read_tsc)(void); | 140 | u64 (*read_tsc)(void); |
121 | u64 (*read_pmc)(void); | 141 | u64 (*read_pmc)(int counter); |
142 | unsigned long long (*read_tscp)(unsigned int *aux); | ||
122 | 143 | ||
123 | /* These two are jmp to, not actually called. */ | 144 | /* These two are jmp to, not actually called. */ |
124 | void (*irq_enable_sysexit)(void); | 145 | void (*irq_enable_syscall_ret)(void); |
125 | void (*iret)(void); | 146 | void (*iret)(void); |
126 | 147 | ||
148 | void (*swapgs)(void); | ||
149 | |||
127 | struct pv_lazy_ops lazy_mode; | 150 | struct pv_lazy_ops lazy_mode; |
128 | }; | 151 | }; |
129 | 152 | ||
@@ -150,9 +173,9 @@ struct pv_apic_ops { | |||
150 | * Direct APIC operations, principally for VMI. Ideally | 173 | * Direct APIC operations, principally for VMI. Ideally |
151 | * these shouldn't be in this interface. | 174 | * these shouldn't be in this interface. |
152 | */ | 175 | */ |
153 | void (*apic_write)(unsigned long reg, unsigned long v); | 176 | void (*apic_write)(unsigned long reg, u32 v); |
154 | void (*apic_write_atomic)(unsigned long reg, unsigned long v); | 177 | void (*apic_write_atomic)(unsigned long reg, u32 v); |
155 | unsigned long (*apic_read)(unsigned long reg); | 178 | u32 (*apic_read)(unsigned long reg); |
156 | void (*setup_boot_clock)(void); | 179 | void (*setup_boot_clock)(void); |
157 | void (*setup_secondary_clock)(void); | 180 | void (*setup_secondary_clock)(void); |
158 | 181 | ||
@@ -198,7 +221,7 @@ struct pv_mmu_ops { | |||
198 | 221 | ||
199 | /* Hooks for allocating/releasing pagetable pages */ | 222 | /* Hooks for allocating/releasing pagetable pages */ |
200 | void (*alloc_pt)(struct mm_struct *mm, u32 pfn); | 223 | void (*alloc_pt)(struct mm_struct *mm, u32 pfn); |
201 | void (*alloc_pd)(u32 pfn); | 224 | void (*alloc_pd)(struct mm_struct *mm, u32 pfn); |
202 | void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); | 225 | void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); |
203 | void (*release_pt)(u32 pfn); | 226 | void (*release_pt)(u32 pfn); |
204 | void (*release_pd)(u32 pfn); | 227 | void (*release_pd)(u32 pfn); |
@@ -212,28 +235,34 @@ struct pv_mmu_ops { | |||
212 | void (*pte_update_defer)(struct mm_struct *mm, | 235 | void (*pte_update_defer)(struct mm_struct *mm, |
213 | unsigned long addr, pte_t *ptep); | 236 | unsigned long addr, pte_t *ptep); |
214 | 237 | ||
238 | pteval_t (*pte_val)(pte_t); | ||
239 | pte_t (*make_pte)(pteval_t pte); | ||
240 | |||
241 | pgdval_t (*pgd_val)(pgd_t); | ||
242 | pgd_t (*make_pgd)(pgdval_t pgd); | ||
243 | |||
244 | #if PAGETABLE_LEVELS >= 3 | ||
215 | #ifdef CONFIG_X86_PAE | 245 | #ifdef CONFIG_X86_PAE |
216 | void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); | 246 | void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); |
217 | void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, | 247 | void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, |
218 | pte_t *ptep, pte_t pte); | 248 | pte_t *ptep, pte_t pte); |
219 | void (*set_pud)(pud_t *pudp, pud_t pudval); | ||
220 | void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | 249 | void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); |
221 | void (*pmd_clear)(pmd_t *pmdp); | 250 | void (*pmd_clear)(pmd_t *pmdp); |
222 | 251 | ||
223 | unsigned long long (*pte_val)(pte_t); | 252 | #endif /* CONFIG_X86_PAE */ |
224 | unsigned long long (*pmd_val)(pmd_t); | ||
225 | unsigned long long (*pgd_val)(pgd_t); | ||
226 | 253 | ||
227 | pte_t (*make_pte)(unsigned long long pte); | 254 | void (*set_pud)(pud_t *pudp, pud_t pudval); |
228 | pmd_t (*make_pmd)(unsigned long long pmd); | ||
229 | pgd_t (*make_pgd)(unsigned long long pgd); | ||
230 | #else | ||
231 | unsigned long (*pte_val)(pte_t); | ||
232 | unsigned long (*pgd_val)(pgd_t); | ||
233 | 255 | ||
234 | pte_t (*make_pte)(unsigned long pte); | 256 | pmdval_t (*pmd_val)(pmd_t); |
235 | pgd_t (*make_pgd)(unsigned long pgd); | 257 | pmd_t (*make_pmd)(pmdval_t pmd); |
236 | #endif | 258 | |
259 | #if PAGETABLE_LEVELS == 4 | ||
260 | pudval_t (*pud_val)(pud_t); | ||
261 | pud_t (*make_pud)(pudval_t pud); | ||
262 | |||
263 | void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); | ||
264 | #endif /* PAGETABLE_LEVELS == 4 */ | ||
265 | #endif /* PAGETABLE_LEVELS >= 3 */ | ||
237 | 266 | ||
238 | #ifdef CONFIG_HIGHPTE | 267 | #ifdef CONFIG_HIGHPTE |
239 | void *(*kmap_atomic_pte)(struct page *page, enum km_type type); | 268 | void *(*kmap_atomic_pte)(struct page *page, enum km_type type); |
@@ -279,7 +308,8 @@ extern struct pv_mmu_ops pv_mmu_ops; | |||
279 | #define _paravirt_alt(insn_string, type, clobber) \ | 308 | #define _paravirt_alt(insn_string, type, clobber) \ |
280 | "771:\n\t" insn_string "\n" "772:\n" \ | 309 | "771:\n\t" insn_string "\n" "772:\n" \ |
281 | ".pushsection .parainstructions,\"a\"\n" \ | 310 | ".pushsection .parainstructions,\"a\"\n" \ |
282 | " .long 771b\n" \ | 311 | _ASM_ALIGN "\n" \ |
312 | _ASM_PTR " 771b\n" \ | ||
283 | " .byte " type "\n" \ | 313 | " .byte " type "\n" \ |
284 | " .byte 772b-771b\n" \ | 314 | " .byte 772b-771b\n" \ |
285 | " .short " clobber "\n" \ | 315 | " .short " clobber "\n" \ |
@@ -289,6 +319,11 @@ extern struct pv_mmu_ops pv_mmu_ops; | |||
289 | #define paravirt_alt(insn_string) \ | 319 | #define paravirt_alt(insn_string) \ |
290 | _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") | 320 | _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") |
291 | 321 | ||
322 | /* Simple instruction patching code. */ | ||
323 | #define DEF_NATIVE(ops, name, code) \ | ||
324 | extern const char start_##ops##_##name[], end_##ops##_##name[]; \ | ||
325 | asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":") | ||
326 | |||
292 | unsigned paravirt_patch_nop(void); | 327 | unsigned paravirt_patch_nop(void); |
293 | unsigned paravirt_patch_ignore(unsigned len); | 328 | unsigned paravirt_patch_ignore(unsigned len); |
294 | unsigned paravirt_patch_call(void *insnbuf, | 329 | unsigned paravirt_patch_call(void *insnbuf, |
@@ -303,6 +338,9 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, | |||
303 | unsigned paravirt_patch_insns(void *insnbuf, unsigned len, | 338 | unsigned paravirt_patch_insns(void *insnbuf, unsigned len, |
304 | const char *start, const char *end); | 339 | const char *start, const char *end); |
305 | 340 | ||
341 | unsigned native_patch(u8 type, u16 clobbers, void *ibuf, | ||
342 | unsigned long addr, unsigned len); | ||
343 | |||
306 | int paravirt_disable_iospace(void); | 344 | int paravirt_disable_iospace(void); |
307 | 345 | ||
308 | /* | 346 | /* |
@@ -319,7 +357,7 @@ int paravirt_disable_iospace(void); | |||
319 | * runtime. | 357 | * runtime. |
320 | * | 358 | * |
321 | * Normally, a call to a pv_op function is a simple indirect call: | 359 | * Normally, a call to a pv_op function is a simple indirect call: |
322 | * (paravirt_ops.operations)(args...). | 360 | * (pv_op_struct.operations)(args...). |
323 | * | 361 | * |
324 | * Unfortunately, this is a relatively slow operation for modern CPUs, | 362 | * Unfortunately, this is a relatively slow operation for modern CPUs, |
325 | * because it cannot necessarily determine what the destination | 363 | * because it cannot necessarily determine what the destination |
@@ -329,11 +367,17 @@ int paravirt_disable_iospace(void); | |||
329 | * calls are essentially free, because the call and return addresses | 367 | * calls are essentially free, because the call and return addresses |
330 | * are completely predictable.) | 368 | * are completely predictable.) |
331 | * | 369 | * |
332 | * These macros rely on the standard gcc "regparm(3)" calling | 370 | * For i386, these macros rely on the standard gcc "regparm(3)" calling |
333 | * convention, in which the first three arguments are placed in %eax, | 371 | * convention, in which the first three arguments are placed in %eax, |
334 | * %edx, %ecx (in that order), and the remaining arguments are placed | 372 | * %edx, %ecx (in that order), and the remaining arguments are placed |
335 | * on the stack. All caller-save registers (eax,edx,ecx) are expected | 373 | * on the stack. All caller-save registers (eax,edx,ecx) are expected |
336 | * to be modified (either clobbered or used for return values). | 374 | * to be modified (either clobbered or used for return values). |
375 | * X86_64, on the other hand, already specifies a register-based calling | ||
376 | * conventions, returning at %rax, with parameteres going on %rdi, %rsi, | ||
377 | * %rdx, and %rcx. Note that for this reason, x86_64 does not need any | ||
378 | * special handling for dealing with 4 arguments, unlike i386. | ||
379 | * However, x86_64 also have to clobber all caller saved registers, which | ||
380 | * unfortunately, are quite a bit (r8 - r11) | ||
337 | * | 381 | * |
338 | * The call instruction itself is marked by placing its start address | 382 | * The call instruction itself is marked by placing its start address |
339 | * and size into the .parainstructions section, so that | 383 | * and size into the .parainstructions section, so that |
@@ -356,10 +400,12 @@ int paravirt_disable_iospace(void); | |||
356 | * the return type. The macro then uses sizeof() on that type to | 400 | * the return type. The macro then uses sizeof() on that type to |
357 | * determine whether its a 32 or 64 bit value, and places the return | 401 | * determine whether its a 32 or 64 bit value, and places the return |
358 | * in the right register(s) (just %eax for 32-bit, and %edx:%eax for | 402 | * in the right register(s) (just %eax for 32-bit, and %edx:%eax for |
359 | * 64-bit). | 403 | * 64-bit). For x86_64 machines, it just returns at %rax regardless of |
404 | * the return value size. | ||
360 | * | 405 | * |
361 | * 64-bit arguments are passed as a pair of adjacent 32-bit arguments | 406 | * 64-bit arguments are passed as a pair of adjacent 32-bit arguments |
362 | * in low,high order. | 407 | * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments |
408 | * in low,high order | ||
363 | * | 409 | * |
364 | * Small structures are passed and returned in registers. The macro | 410 | * Small structures are passed and returned in registers. The macro |
365 | * calling convention can't directly deal with this, so the wrapper | 411 | * calling convention can't directly deal with this, so the wrapper |
@@ -369,46 +415,67 @@ int paravirt_disable_iospace(void); | |||
369 | * means that all uses must be wrapped in inline functions. This also | 415 | * means that all uses must be wrapped in inline functions. This also |
370 | * makes sure the incoming and outgoing types are always correct. | 416 | * makes sure the incoming and outgoing types are always correct. |
371 | */ | 417 | */ |
418 | #ifdef CONFIG_X86_32 | ||
419 | #define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx | ||
420 | #define PVOP_CALL_ARGS PVOP_VCALL_ARGS | ||
421 | #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ | ||
422 | "=c" (__ecx) | ||
423 | #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS | ||
424 | #define EXTRA_CLOBBERS | ||
425 | #define VEXTRA_CLOBBERS | ||
426 | #else | ||
427 | #define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx | ||
428 | #define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax | ||
429 | #define PVOP_VCALL_CLOBBERS "=D" (__edi), \ | ||
430 | "=S" (__esi), "=d" (__edx), \ | ||
431 | "=c" (__ecx) | ||
432 | |||
433 | #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) | ||
434 | |||
435 | #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11" | ||
436 | #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11" | ||
437 | #endif | ||
438 | |||
372 | #define __PVOP_CALL(rettype, op, pre, post, ...) \ | 439 | #define __PVOP_CALL(rettype, op, pre, post, ...) \ |
373 | ({ \ | 440 | ({ \ |
374 | rettype __ret; \ | 441 | rettype __ret; \ |
375 | unsigned long __eax, __edx, __ecx; \ | 442 | PVOP_CALL_ARGS; \ |
443 | /* This is 32-bit specific, but is okay in 64-bit */ \ | ||
444 | /* since this condition will never hold */ \ | ||
376 | if (sizeof(rettype) > sizeof(unsigned long)) { \ | 445 | if (sizeof(rettype) > sizeof(unsigned long)) { \ |
377 | asm volatile(pre \ | 446 | asm volatile(pre \ |
378 | paravirt_alt(PARAVIRT_CALL) \ | 447 | paravirt_alt(PARAVIRT_CALL) \ |
379 | post \ | 448 | post \ |
380 | : "=a" (__eax), "=d" (__edx), \ | 449 | : PVOP_CALL_CLOBBERS \ |
381 | "=c" (__ecx) \ | ||
382 | : paravirt_type(op), \ | 450 | : paravirt_type(op), \ |
383 | paravirt_clobber(CLBR_ANY), \ | 451 | paravirt_clobber(CLBR_ANY), \ |
384 | ##__VA_ARGS__ \ | 452 | ##__VA_ARGS__ \ |
385 | : "memory", "cc"); \ | 453 | : "memory", "cc" EXTRA_CLOBBERS); \ |
386 | __ret = (rettype)((((u64)__edx) << 32) | __eax); \ | 454 | __ret = (rettype)((((u64)__edx) << 32) | __eax); \ |
387 | } else { \ | 455 | } else { \ |
388 | asm volatile(pre \ | 456 | asm volatile(pre \ |
389 | paravirt_alt(PARAVIRT_CALL) \ | 457 | paravirt_alt(PARAVIRT_CALL) \ |
390 | post \ | 458 | post \ |
391 | : "=a" (__eax), "=d" (__edx), \ | 459 | : PVOP_CALL_CLOBBERS \ |
392 | "=c" (__ecx) \ | ||
393 | : paravirt_type(op), \ | 460 | : paravirt_type(op), \ |
394 | paravirt_clobber(CLBR_ANY), \ | 461 | paravirt_clobber(CLBR_ANY), \ |
395 | ##__VA_ARGS__ \ | 462 | ##__VA_ARGS__ \ |
396 | : "memory", "cc"); \ | 463 | : "memory", "cc" EXTRA_CLOBBERS); \ |
397 | __ret = (rettype)__eax; \ | 464 | __ret = (rettype)__eax; \ |
398 | } \ | 465 | } \ |
399 | __ret; \ | 466 | __ret; \ |
400 | }) | 467 | }) |
401 | #define __PVOP_VCALL(op, pre, post, ...) \ | 468 | #define __PVOP_VCALL(op, pre, post, ...) \ |
402 | ({ \ | 469 | ({ \ |
403 | unsigned long __eax, __edx, __ecx; \ | 470 | PVOP_VCALL_ARGS; \ |
404 | asm volatile(pre \ | 471 | asm volatile(pre \ |
405 | paravirt_alt(PARAVIRT_CALL) \ | 472 | paravirt_alt(PARAVIRT_CALL) \ |
406 | post \ | 473 | post \ |
407 | : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \ | 474 | : PVOP_VCALL_CLOBBERS \ |
408 | : paravirt_type(op), \ | 475 | : paravirt_type(op), \ |
409 | paravirt_clobber(CLBR_ANY), \ | 476 | paravirt_clobber(CLBR_ANY), \ |
410 | ##__VA_ARGS__ \ | 477 | ##__VA_ARGS__ \ |
411 | : "memory", "cc"); \ | 478 | : "memory", "cc" VEXTRA_CLOBBERS); \ |
412 | }) | 479 | }) |
413 | 480 | ||
414 | #define PVOP_CALL0(rettype, op) \ | 481 | #define PVOP_CALL0(rettype, op) \ |
@@ -417,22 +484,26 @@ int paravirt_disable_iospace(void); | |||
417 | __PVOP_VCALL(op, "", "") | 484 | __PVOP_VCALL(op, "", "") |
418 | 485 | ||
419 | #define PVOP_CALL1(rettype, op, arg1) \ | 486 | #define PVOP_CALL1(rettype, op, arg1) \ |
420 | __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1))) | 487 | __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1))) |
421 | #define PVOP_VCALL1(op, arg1) \ | 488 | #define PVOP_VCALL1(op, arg1) \ |
422 | __PVOP_VCALL(op, "", "", "0" ((u32)(arg1))) | 489 | __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1))) |
423 | 490 | ||
424 | #define PVOP_CALL2(rettype, op, arg1, arg2) \ | 491 | #define PVOP_CALL2(rettype, op, arg1, arg2) \ |
425 | __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1)), "1" ((u32)(arg2))) | 492 | __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ |
493 | "1" ((unsigned long)(arg2))) | ||
426 | #define PVOP_VCALL2(op, arg1, arg2) \ | 494 | #define PVOP_VCALL2(op, arg1, arg2) \ |
427 | __PVOP_VCALL(op, "", "", "0" ((u32)(arg1)), "1" ((u32)(arg2))) | 495 | __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ |
496 | "1" ((unsigned long)(arg2))) | ||
428 | 497 | ||
429 | #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ | 498 | #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ |
430 | __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1)), \ | 499 | __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ |
431 | "1"((u32)(arg2)), "2"((u32)(arg3))) | 500 | "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3))) |
432 | #define PVOP_VCALL3(op, arg1, arg2, arg3) \ | 501 | #define PVOP_VCALL3(op, arg1, arg2, arg3) \ |
433 | __PVOP_VCALL(op, "", "", "0" ((u32)(arg1)), "1"((u32)(arg2)), \ | 502 | __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ |
434 | "2"((u32)(arg3))) | 503 | "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3))) |
435 | 504 | ||
505 | /* This is the only difference in x86_64. We can make it much simpler */ | ||
506 | #ifdef CONFIG_X86_32 | ||
436 | #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ | 507 | #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ |
437 | __PVOP_CALL(rettype, op, \ | 508 | __PVOP_CALL(rettype, op, \ |
438 | "push %[_arg4];", "lea 4(%%esp),%%esp;", \ | 509 | "push %[_arg4];", "lea 4(%%esp),%%esp;", \ |
@@ -443,16 +514,26 @@ int paravirt_disable_iospace(void); | |||
443 | "push %[_arg4];", "lea 4(%%esp),%%esp;", \ | 514 | "push %[_arg4];", "lea 4(%%esp),%%esp;", \ |
444 | "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ | 515 | "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ |
445 | "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) | 516 | "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) |
517 | #else | ||
518 | #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ | ||
519 | __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \ | ||
520 | "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \ | ||
521 | "3"((unsigned long)(arg4))) | ||
522 | #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ | ||
523 | __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \ | ||
524 | "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \ | ||
525 | "3"((unsigned long)(arg4))) | ||
526 | #endif | ||
446 | 527 | ||
447 | static inline int paravirt_enabled(void) | 528 | static inline int paravirt_enabled(void) |
448 | { | 529 | { |
449 | return pv_info.paravirt_enabled; | 530 | return pv_info.paravirt_enabled; |
450 | } | 531 | } |
451 | 532 | ||
452 | static inline void load_esp0(struct tss_struct *tss, | 533 | static inline void load_sp0(struct tss_struct *tss, |
453 | struct thread_struct *thread) | 534 | struct thread_struct *thread) |
454 | { | 535 | { |
455 | PVOP_VCALL2(pv_cpu_ops.load_esp0, tss, thread); | 536 | PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread); |
456 | } | 537 | } |
457 | 538 | ||
458 | #define ARCH_SETUP pv_init_ops.arch_setup(); | 539 | #define ARCH_SETUP pv_init_ops.arch_setup(); |
@@ -540,6 +621,18 @@ static inline void write_cr4(unsigned long x) | |||
540 | PVOP_VCALL1(pv_cpu_ops.write_cr4, x); | 621 | PVOP_VCALL1(pv_cpu_ops.write_cr4, x); |
541 | } | 622 | } |
542 | 623 | ||
624 | #ifdef CONFIG_X86_64 | ||
625 | static inline unsigned long read_cr8(void) | ||
626 | { | ||
627 | return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8); | ||
628 | } | ||
629 | |||
630 | static inline void write_cr8(unsigned long x) | ||
631 | { | ||
632 | PVOP_VCALL1(pv_cpu_ops.write_cr8, x); | ||
633 | } | ||
634 | #endif | ||
635 | |||
543 | static inline void raw_safe_halt(void) | 636 | static inline void raw_safe_halt(void) |
544 | { | 637 | { |
545 | PVOP_VCALL0(pv_irq_ops.safe_halt); | 638 | PVOP_VCALL0(pv_irq_ops.safe_halt); |
@@ -613,8 +706,6 @@ static inline unsigned long long paravirt_sched_clock(void) | |||
613 | } | 706 | } |
614 | #define calculate_cpu_khz() (pv_time_ops.get_cpu_khz()) | 707 | #define calculate_cpu_khz() (pv_time_ops.get_cpu_khz()) |
615 | 708 | ||
616 | #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) | ||
617 | |||
618 | static inline unsigned long long paravirt_read_pmc(int counter) | 709 | static inline unsigned long long paravirt_read_pmc(int counter) |
619 | { | 710 | { |
620 | return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter); | 711 | return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter); |
@@ -626,15 +717,36 @@ static inline unsigned long long paravirt_read_pmc(int counter) | |||
626 | high = _l >> 32; \ | 717 | high = _l >> 32; \ |
627 | } while(0) | 718 | } while(0) |
628 | 719 | ||
720 | static inline unsigned long long paravirt_rdtscp(unsigned int *aux) | ||
721 | { | ||
722 | return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux); | ||
723 | } | ||
724 | |||
725 | #define rdtscp(low, high, aux) \ | ||
726 | do { \ | ||
727 | int __aux; \ | ||
728 | unsigned long __val = paravirt_rdtscp(&__aux); \ | ||
729 | (low) = (u32)__val; \ | ||
730 | (high) = (u32)(__val >> 32); \ | ||
731 | (aux) = __aux; \ | ||
732 | } while (0) | ||
733 | |||
734 | #define rdtscpll(val, aux) \ | ||
735 | do { \ | ||
736 | unsigned long __aux; \ | ||
737 | val = paravirt_rdtscp(&__aux); \ | ||
738 | (aux) = __aux; \ | ||
739 | } while (0) | ||
740 | |||
629 | static inline void load_TR_desc(void) | 741 | static inline void load_TR_desc(void) |
630 | { | 742 | { |
631 | PVOP_VCALL0(pv_cpu_ops.load_tr_desc); | 743 | PVOP_VCALL0(pv_cpu_ops.load_tr_desc); |
632 | } | 744 | } |
633 | static inline void load_gdt(const struct Xgt_desc_struct *dtr) | 745 | static inline void load_gdt(const struct desc_ptr *dtr) |
634 | { | 746 | { |
635 | PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr); | 747 | PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr); |
636 | } | 748 | } |
637 | static inline void load_idt(const struct Xgt_desc_struct *dtr) | 749 | static inline void load_idt(const struct desc_ptr *dtr) |
638 | { | 750 | { |
639 | PVOP_VCALL1(pv_cpu_ops.load_idt, dtr); | 751 | PVOP_VCALL1(pv_cpu_ops.load_idt, dtr); |
640 | } | 752 | } |
@@ -642,11 +754,11 @@ static inline void set_ldt(const void *addr, unsigned entries) | |||
642 | { | 754 | { |
643 | PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries); | 755 | PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries); |
644 | } | 756 | } |
645 | static inline void store_gdt(struct Xgt_desc_struct *dtr) | 757 | static inline void store_gdt(struct desc_ptr *dtr) |
646 | { | 758 | { |
647 | PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr); | 759 | PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr); |
648 | } | 760 | } |
649 | static inline void store_idt(struct Xgt_desc_struct *dtr) | 761 | static inline void store_idt(struct desc_ptr *dtr) |
650 | { | 762 | { |
651 | PVOP_VCALL1(pv_cpu_ops.store_idt, dtr); | 763 | PVOP_VCALL1(pv_cpu_ops.store_idt, dtr); |
652 | } | 764 | } |
@@ -659,17 +771,22 @@ static inline void load_TLS(struct thread_struct *t, unsigned cpu) | |||
659 | { | 771 | { |
660 | PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu); | 772 | PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu); |
661 | } | 773 | } |
662 | static inline void write_ldt_entry(void *dt, int entry, u32 low, u32 high) | 774 | |
775 | static inline void write_ldt_entry(struct desc_struct *dt, int entry, | ||
776 | const void *desc) | ||
663 | { | 777 | { |
664 | PVOP_VCALL4(pv_cpu_ops.write_ldt_entry, dt, entry, low, high); | 778 | PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc); |
665 | } | 779 | } |
666 | static inline void write_gdt_entry(void *dt, int entry, u32 low, u32 high) | 780 | |
781 | static inline void write_gdt_entry(struct desc_struct *dt, int entry, | ||
782 | void *desc, int type) | ||
667 | { | 783 | { |
668 | PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, low, high); | 784 | PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type); |
669 | } | 785 | } |
670 | static inline void write_idt_entry(void *dt, int entry, u32 low, u32 high) | 786 | |
787 | static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g) | ||
671 | { | 788 | { |
672 | PVOP_VCALL4(pv_cpu_ops.write_idt_entry, dt, entry, low, high); | 789 | PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g); |
673 | } | 790 | } |
674 | static inline void set_iopl_mask(unsigned mask) | 791 | static inline void set_iopl_mask(unsigned mask) |
675 | { | 792 | { |
@@ -690,17 +807,17 @@ static inline void slow_down_io(void) { | |||
690 | /* | 807 | /* |
691 | * Basic functions accessing APICs. | 808 | * Basic functions accessing APICs. |
692 | */ | 809 | */ |
693 | static inline void apic_write(unsigned long reg, unsigned long v) | 810 | static inline void apic_write(unsigned long reg, u32 v) |
694 | { | 811 | { |
695 | PVOP_VCALL2(pv_apic_ops.apic_write, reg, v); | 812 | PVOP_VCALL2(pv_apic_ops.apic_write, reg, v); |
696 | } | 813 | } |
697 | 814 | ||
698 | static inline void apic_write_atomic(unsigned long reg, unsigned long v) | 815 | static inline void apic_write_atomic(unsigned long reg, u32 v) |
699 | { | 816 | { |
700 | PVOP_VCALL2(pv_apic_ops.apic_write_atomic, reg, v); | 817 | PVOP_VCALL2(pv_apic_ops.apic_write_atomic, reg, v); |
701 | } | 818 | } |
702 | 819 | ||
703 | static inline unsigned long apic_read(unsigned long reg) | 820 | static inline u32 apic_read(unsigned long reg) |
704 | { | 821 | { |
705 | return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg); | 822 | return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg); |
706 | } | 823 | } |
@@ -786,9 +903,9 @@ static inline void paravirt_release_pt(unsigned pfn) | |||
786 | PVOP_VCALL1(pv_mmu_ops.release_pt, pfn); | 903 | PVOP_VCALL1(pv_mmu_ops.release_pt, pfn); |
787 | } | 904 | } |
788 | 905 | ||
789 | static inline void paravirt_alloc_pd(unsigned pfn) | 906 | static inline void paravirt_alloc_pd(struct mm_struct *mm, unsigned pfn) |
790 | { | 907 | { |
791 | PVOP_VCALL1(pv_mmu_ops.alloc_pd, pfn); | 908 | PVOP_VCALL2(pv_mmu_ops.alloc_pd, mm, pfn); |
792 | } | 909 | } |
793 | 910 | ||
794 | static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn, | 911 | static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn, |
@@ -822,128 +939,236 @@ static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr, | |||
822 | PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep); | 939 | PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep); |
823 | } | 940 | } |
824 | 941 | ||
825 | #ifdef CONFIG_X86_PAE | 942 | static inline pte_t __pte(pteval_t val) |
826 | static inline pte_t __pte(unsigned long long val) | ||
827 | { | 943 | { |
828 | unsigned long long ret = PVOP_CALL2(unsigned long long, | 944 | pteval_t ret; |
829 | pv_mmu_ops.make_pte, | 945 | |
830 | val, val >> 32); | 946 | if (sizeof(pteval_t) > sizeof(long)) |
831 | return (pte_t) { ret, ret >> 32 }; | 947 | ret = PVOP_CALL2(pteval_t, |
948 | pv_mmu_ops.make_pte, | ||
949 | val, (u64)val >> 32); | ||
950 | else | ||
951 | ret = PVOP_CALL1(pteval_t, | ||
952 | pv_mmu_ops.make_pte, | ||
953 | val); | ||
954 | |||
955 | return (pte_t) { .pte = ret }; | ||
832 | } | 956 | } |
833 | 957 | ||
834 | static inline pmd_t __pmd(unsigned long long val) | 958 | static inline pteval_t pte_val(pte_t pte) |
835 | { | 959 | { |
836 | return (pmd_t) { PVOP_CALL2(unsigned long long, pv_mmu_ops.make_pmd, | 960 | pteval_t ret; |
837 | val, val >> 32) }; | 961 | |
962 | if (sizeof(pteval_t) > sizeof(long)) | ||
963 | ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val, | ||
964 | pte.pte, (u64)pte.pte >> 32); | ||
965 | else | ||
966 | ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val, | ||
967 | pte.pte); | ||
968 | |||
969 | return ret; | ||
838 | } | 970 | } |
839 | 971 | ||
840 | static inline pgd_t __pgd(unsigned long long val) | 972 | static inline pgd_t __pgd(pgdval_t val) |
841 | { | 973 | { |
842 | return (pgd_t) { PVOP_CALL2(unsigned long long, pv_mmu_ops.make_pgd, | 974 | pgdval_t ret; |
843 | val, val >> 32) }; | 975 | |
976 | if (sizeof(pgdval_t) > sizeof(long)) | ||
977 | ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd, | ||
978 | val, (u64)val >> 32); | ||
979 | else | ||
980 | ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd, | ||
981 | val); | ||
982 | |||
983 | return (pgd_t) { ret }; | ||
844 | } | 984 | } |
845 | 985 | ||
846 | static inline unsigned long long pte_val(pte_t x) | 986 | static inline pgdval_t pgd_val(pgd_t pgd) |
847 | { | 987 | { |
848 | return PVOP_CALL2(unsigned long long, pv_mmu_ops.pte_val, | 988 | pgdval_t ret; |
849 | x.pte_low, x.pte_high); | 989 | |
990 | if (sizeof(pgdval_t) > sizeof(long)) | ||
991 | ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val, | ||
992 | pgd.pgd, (u64)pgd.pgd >> 32); | ||
993 | else | ||
994 | ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val, | ||
995 | pgd.pgd); | ||
996 | |||
997 | return ret; | ||
850 | } | 998 | } |
851 | 999 | ||
852 | static inline unsigned long long pmd_val(pmd_t x) | 1000 | static inline void set_pte(pte_t *ptep, pte_t pte) |
853 | { | 1001 | { |
854 | return PVOP_CALL2(unsigned long long, pv_mmu_ops.pmd_val, | 1002 | if (sizeof(pteval_t) > sizeof(long)) |
855 | x.pmd, x.pmd >> 32); | 1003 | PVOP_VCALL3(pv_mmu_ops.set_pte, ptep, |
1004 | pte.pte, (u64)pte.pte >> 32); | ||
1005 | else | ||
1006 | PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, | ||
1007 | pte.pte); | ||
856 | } | 1008 | } |
857 | 1009 | ||
858 | static inline unsigned long long pgd_val(pgd_t x) | 1010 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
1011 | pte_t *ptep, pte_t pte) | ||
859 | { | 1012 | { |
860 | return PVOP_CALL2(unsigned long long, pv_mmu_ops.pgd_val, | 1013 | if (sizeof(pteval_t) > sizeof(long)) |
861 | x.pgd, x.pgd >> 32); | 1014 | /* 5 arg words */ |
1015 | pv_mmu_ops.set_pte_at(mm, addr, ptep, pte); | ||
1016 | else | ||
1017 | PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte); | ||
862 | } | 1018 | } |
863 | 1019 | ||
864 | static inline void set_pte(pte_t *ptep, pte_t pteval) | 1020 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) |
865 | { | 1021 | { |
866 | PVOP_VCALL3(pv_mmu_ops.set_pte, ptep, pteval.pte_low, pteval.pte_high); | 1022 | pmdval_t val = native_pmd_val(pmd); |
1023 | |||
1024 | if (sizeof(pmdval_t) > sizeof(long)) | ||
1025 | PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32); | ||
1026 | else | ||
1027 | PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val); | ||
867 | } | 1028 | } |
868 | 1029 | ||
869 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | 1030 | #if PAGETABLE_LEVELS >= 3 |
870 | pte_t *ptep, pte_t pteval) | 1031 | static inline pmd_t __pmd(pmdval_t val) |
871 | { | 1032 | { |
872 | /* 5 arg words */ | 1033 | pmdval_t ret; |
873 | pv_mmu_ops.set_pte_at(mm, addr, ptep, pteval); | 1034 | |
1035 | if (sizeof(pmdval_t) > sizeof(long)) | ||
1036 | ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd, | ||
1037 | val, (u64)val >> 32); | ||
1038 | else | ||
1039 | ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd, | ||
1040 | val); | ||
1041 | |||
1042 | return (pmd_t) { ret }; | ||
874 | } | 1043 | } |
875 | 1044 | ||
876 | static inline void set_pte_atomic(pte_t *ptep, pte_t pteval) | 1045 | static inline pmdval_t pmd_val(pmd_t pmd) |
877 | { | 1046 | { |
878 | PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep, | 1047 | pmdval_t ret; |
879 | pteval.pte_low, pteval.pte_high); | 1048 | |
1049 | if (sizeof(pmdval_t) > sizeof(long)) | ||
1050 | ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val, | ||
1051 | pmd.pmd, (u64)pmd.pmd >> 32); | ||
1052 | else | ||
1053 | ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val, | ||
1054 | pmd.pmd); | ||
1055 | |||
1056 | return ret; | ||
880 | } | 1057 | } |
881 | 1058 | ||
882 | static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, | 1059 | static inline void set_pud(pud_t *pudp, pud_t pud) |
883 | pte_t *ptep, pte_t pte) | ||
884 | { | 1060 | { |
885 | /* 5 arg words */ | 1061 | pudval_t val = native_pud_val(pud); |
886 | pv_mmu_ops.set_pte_present(mm, addr, ptep, pte); | 1062 | |
1063 | if (sizeof(pudval_t) > sizeof(long)) | ||
1064 | PVOP_VCALL3(pv_mmu_ops.set_pud, pudp, | ||
1065 | val, (u64)val >> 32); | ||
1066 | else | ||
1067 | PVOP_VCALL2(pv_mmu_ops.set_pud, pudp, | ||
1068 | val); | ||
1069 | } | ||
1070 | #if PAGETABLE_LEVELS == 4 | ||
1071 | static inline pud_t __pud(pudval_t val) | ||
1072 | { | ||
1073 | pudval_t ret; | ||
1074 | |||
1075 | if (sizeof(pudval_t) > sizeof(long)) | ||
1076 | ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud, | ||
1077 | val, (u64)val >> 32); | ||
1078 | else | ||
1079 | ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud, | ||
1080 | val); | ||
1081 | |||
1082 | return (pud_t) { ret }; | ||
887 | } | 1083 | } |
888 | 1084 | ||
889 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) | 1085 | static inline pudval_t pud_val(pud_t pud) |
890 | { | 1086 | { |
891 | PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, | 1087 | pudval_t ret; |
892 | pmdval.pmd, pmdval.pmd >> 32); | 1088 | |
1089 | if (sizeof(pudval_t) > sizeof(long)) | ||
1090 | ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val, | ||
1091 | pud.pud, (u64)pud.pud >> 32); | ||
1092 | else | ||
1093 | ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val, | ||
1094 | pud.pud); | ||
1095 | |||
1096 | return ret; | ||
893 | } | 1097 | } |
894 | 1098 | ||
895 | static inline void set_pud(pud_t *pudp, pud_t pudval) | 1099 | static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) |
896 | { | 1100 | { |
897 | PVOP_VCALL3(pv_mmu_ops.set_pud, pudp, | 1101 | pgdval_t val = native_pgd_val(pgd); |
898 | pudval.pgd.pgd, pudval.pgd.pgd >> 32); | 1102 | |
1103 | if (sizeof(pgdval_t) > sizeof(long)) | ||
1104 | PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp, | ||
1105 | val, (u64)val >> 32); | ||
1106 | else | ||
1107 | PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, | ||
1108 | val); | ||
899 | } | 1109 | } |
900 | 1110 | ||
901 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 1111 | static inline void pgd_clear(pgd_t *pgdp) |
902 | { | 1112 | { |
903 | PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep); | 1113 | set_pgd(pgdp, __pgd(0)); |
904 | } | 1114 | } |
905 | 1115 | ||
906 | static inline void pmd_clear(pmd_t *pmdp) | 1116 | static inline void pud_clear(pud_t *pudp) |
907 | { | 1117 | { |
908 | PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp); | 1118 | set_pud(pudp, __pud(0)); |
909 | } | 1119 | } |
910 | 1120 | ||
911 | #else /* !CONFIG_X86_PAE */ | 1121 | #endif /* PAGETABLE_LEVELS == 4 */ |
912 | 1122 | ||
913 | static inline pte_t __pte(unsigned long val) | 1123 | #endif /* PAGETABLE_LEVELS >= 3 */ |
1124 | |||
1125 | #ifdef CONFIG_X86_PAE | ||
1126 | /* Special-case pte-setting operations for PAE, which can't update a | ||
1127 | 64-bit pte atomically */ | ||
1128 | static inline void set_pte_atomic(pte_t *ptep, pte_t pte) | ||
914 | { | 1129 | { |
915 | return (pte_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pte, val) }; | 1130 | PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep, |
1131 | pte.pte, pte.pte >> 32); | ||
916 | } | 1132 | } |
917 | 1133 | ||
918 | static inline pgd_t __pgd(unsigned long val) | 1134 | static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, |
1135 | pte_t *ptep, pte_t pte) | ||
919 | { | 1136 | { |
920 | return (pgd_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pgd, val) }; | 1137 | /* 5 arg words */ |
1138 | pv_mmu_ops.set_pte_present(mm, addr, ptep, pte); | ||
921 | } | 1139 | } |
922 | 1140 | ||
923 | static inline unsigned long pte_val(pte_t x) | 1141 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, |
1142 | pte_t *ptep) | ||
924 | { | 1143 | { |
925 | return PVOP_CALL1(unsigned long, pv_mmu_ops.pte_val, x.pte_low); | 1144 | PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep); |
926 | } | 1145 | } |
927 | 1146 | ||
928 | static inline unsigned long pgd_val(pgd_t x) | 1147 | static inline void pmd_clear(pmd_t *pmdp) |
1148 | { | ||
1149 | PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp); | ||
1150 | } | ||
1151 | #else /* !CONFIG_X86_PAE */ | ||
1152 | static inline void set_pte_atomic(pte_t *ptep, pte_t pte) | ||
929 | { | 1153 | { |
930 | return PVOP_CALL1(unsigned long, pv_mmu_ops.pgd_val, x.pgd); | 1154 | set_pte(ptep, pte); |
931 | } | 1155 | } |
932 | 1156 | ||
933 | static inline void set_pte(pte_t *ptep, pte_t pteval) | 1157 | static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, |
1158 | pte_t *ptep, pte_t pte) | ||
934 | { | 1159 | { |
935 | PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, pteval.pte_low); | 1160 | set_pte(ptep, pte); |
936 | } | 1161 | } |
937 | 1162 | ||
938 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | 1163 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, |
939 | pte_t *ptep, pte_t pteval) | 1164 | pte_t *ptep) |
940 | { | 1165 | { |
941 | PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pteval.pte_low); | 1166 | set_pte_at(mm, addr, ptep, __pte(0)); |
942 | } | 1167 | } |
943 | 1168 | ||
944 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) | 1169 | static inline void pmd_clear(pmd_t *pmdp) |
945 | { | 1170 | { |
946 | PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, pmdval.pud.pgd.pgd); | 1171 | set_pmd(pmdp, __pmd(0)); |
947 | } | 1172 | } |
948 | #endif /* CONFIG_X86_PAE */ | 1173 | #endif /* CONFIG_X86_PAE */ |
949 | 1174 | ||
@@ -1014,52 +1239,68 @@ struct paravirt_patch_site { | |||
1014 | extern struct paravirt_patch_site __parainstructions[], | 1239 | extern struct paravirt_patch_site __parainstructions[], |
1015 | __parainstructions_end[]; | 1240 | __parainstructions_end[]; |
1016 | 1241 | ||
1242 | #ifdef CONFIG_X86_32 | ||
1243 | #define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;" | ||
1244 | #define PV_RESTORE_REGS "popl %%edx; popl %%ecx" | ||
1245 | #define PV_FLAGS_ARG "0" | ||
1246 | #define PV_EXTRA_CLOBBERS | ||
1247 | #define PV_VEXTRA_CLOBBERS | ||
1248 | #else | ||
1249 | /* We save some registers, but all of them, that's too much. We clobber all | ||
1250 | * caller saved registers but the argument parameter */ | ||
1251 | #define PV_SAVE_REGS "pushq %%rdi;" | ||
1252 | #define PV_RESTORE_REGS "popq %%rdi;" | ||
1253 | #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx" | ||
1254 | #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx" | ||
1255 | #define PV_FLAGS_ARG "D" | ||
1256 | #endif | ||
1257 | |||
1017 | static inline unsigned long __raw_local_save_flags(void) | 1258 | static inline unsigned long __raw_local_save_flags(void) |
1018 | { | 1259 | { |
1019 | unsigned long f; | 1260 | unsigned long f; |
1020 | 1261 | ||
1021 | asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;" | 1262 | asm volatile(paravirt_alt(PV_SAVE_REGS |
1022 | PARAVIRT_CALL | 1263 | PARAVIRT_CALL |
1023 | "popl %%edx; popl %%ecx") | 1264 | PV_RESTORE_REGS) |
1024 | : "=a"(f) | 1265 | : "=a"(f) |
1025 | : paravirt_type(pv_irq_ops.save_fl), | 1266 | : paravirt_type(pv_irq_ops.save_fl), |
1026 | paravirt_clobber(CLBR_EAX) | 1267 | paravirt_clobber(CLBR_EAX) |
1027 | : "memory", "cc"); | 1268 | : "memory", "cc" PV_VEXTRA_CLOBBERS); |
1028 | return f; | 1269 | return f; |
1029 | } | 1270 | } |
1030 | 1271 | ||
1031 | static inline void raw_local_irq_restore(unsigned long f) | 1272 | static inline void raw_local_irq_restore(unsigned long f) |
1032 | { | 1273 | { |
1033 | asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;" | 1274 | asm volatile(paravirt_alt(PV_SAVE_REGS |
1034 | PARAVIRT_CALL | 1275 | PARAVIRT_CALL |
1035 | "popl %%edx; popl %%ecx") | 1276 | PV_RESTORE_REGS) |
1036 | : "=a"(f) | 1277 | : "=a"(f) |
1037 | : "0"(f), | 1278 | : PV_FLAGS_ARG(f), |
1038 | paravirt_type(pv_irq_ops.restore_fl), | 1279 | paravirt_type(pv_irq_ops.restore_fl), |
1039 | paravirt_clobber(CLBR_EAX) | 1280 | paravirt_clobber(CLBR_EAX) |
1040 | : "memory", "cc"); | 1281 | : "memory", "cc" PV_EXTRA_CLOBBERS); |
1041 | } | 1282 | } |
1042 | 1283 | ||
1043 | static inline void raw_local_irq_disable(void) | 1284 | static inline void raw_local_irq_disable(void) |
1044 | { | 1285 | { |
1045 | asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;" | 1286 | asm volatile(paravirt_alt(PV_SAVE_REGS |
1046 | PARAVIRT_CALL | 1287 | PARAVIRT_CALL |
1047 | "popl %%edx; popl %%ecx") | 1288 | PV_RESTORE_REGS) |
1048 | : | 1289 | : |
1049 | : paravirt_type(pv_irq_ops.irq_disable), | 1290 | : paravirt_type(pv_irq_ops.irq_disable), |
1050 | paravirt_clobber(CLBR_EAX) | 1291 | paravirt_clobber(CLBR_EAX) |
1051 | : "memory", "eax", "cc"); | 1292 | : "memory", "eax", "cc" PV_EXTRA_CLOBBERS); |
1052 | } | 1293 | } |
1053 | 1294 | ||
1054 | static inline void raw_local_irq_enable(void) | 1295 | static inline void raw_local_irq_enable(void) |
1055 | { | 1296 | { |
1056 | asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;" | 1297 | asm volatile(paravirt_alt(PV_SAVE_REGS |
1057 | PARAVIRT_CALL | 1298 | PARAVIRT_CALL |
1058 | "popl %%edx; popl %%ecx") | 1299 | PV_RESTORE_REGS) |
1059 | : | 1300 | : |
1060 | : paravirt_type(pv_irq_ops.irq_enable), | 1301 | : paravirt_type(pv_irq_ops.irq_enable), |
1061 | paravirt_clobber(CLBR_EAX) | 1302 | paravirt_clobber(CLBR_EAX) |
1062 | : "memory", "eax", "cc"); | 1303 | : "memory", "eax", "cc" PV_EXTRA_CLOBBERS); |
1063 | } | 1304 | } |
1064 | 1305 | ||
1065 | static inline unsigned long __raw_local_irq_save(void) | 1306 | static inline unsigned long __raw_local_irq_save(void) |
@@ -1071,27 +1312,6 @@ static inline unsigned long __raw_local_irq_save(void) | |||
1071 | return f; | 1312 | return f; |
1072 | } | 1313 | } |
1073 | 1314 | ||
1074 | #define CLI_STRING \ | ||
1075 | _paravirt_alt("pushl %%ecx; pushl %%edx;" \ | ||
1076 | "call *%[paravirt_cli_opptr];" \ | ||
1077 | "popl %%edx; popl %%ecx", \ | ||
1078 | "%c[paravirt_cli_type]", "%c[paravirt_clobber]") | ||
1079 | |||
1080 | #define STI_STRING \ | ||
1081 | _paravirt_alt("pushl %%ecx; pushl %%edx;" \ | ||
1082 | "call *%[paravirt_sti_opptr];" \ | ||
1083 | "popl %%edx; popl %%ecx", \ | ||
1084 | "%c[paravirt_sti_type]", "%c[paravirt_clobber]") | ||
1085 | |||
1086 | #define CLI_STI_CLOBBERS , "%eax" | ||
1087 | #define CLI_STI_INPUT_ARGS \ | ||
1088 | , \ | ||
1089 | [paravirt_cli_type] "i" (PARAVIRT_PATCH(pv_irq_ops.irq_disable)), \ | ||
1090 | [paravirt_cli_opptr] "m" (pv_irq_ops.irq_disable), \ | ||
1091 | [paravirt_sti_type] "i" (PARAVIRT_PATCH(pv_irq_ops.irq_enable)), \ | ||
1092 | [paravirt_sti_opptr] "m" (pv_irq_ops.irq_enable), \ | ||
1093 | paravirt_clobber(CLBR_EAX) | ||
1094 | |||
1095 | /* Make sure as little as possible of this mess escapes. */ | 1315 | /* Make sure as little as possible of this mess escapes. */ |
1096 | #undef PARAVIRT_CALL | 1316 | #undef PARAVIRT_CALL |
1097 | #undef __PVOP_CALL | 1317 | #undef __PVOP_CALL |
@@ -1109,43 +1329,72 @@ static inline unsigned long __raw_local_irq_save(void) | |||
1109 | 1329 | ||
1110 | #else /* __ASSEMBLY__ */ | 1330 | #else /* __ASSEMBLY__ */ |
1111 | 1331 | ||
1112 | #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) | 1332 | #define _PVSITE(ptype, clobbers, ops, word, algn) \ |
1113 | |||
1114 | #define PARA_SITE(ptype, clobbers, ops) \ | ||
1115 | 771:; \ | 1333 | 771:; \ |
1116 | ops; \ | 1334 | ops; \ |
1117 | 772:; \ | 1335 | 772:; \ |
1118 | .pushsection .parainstructions,"a"; \ | 1336 | .pushsection .parainstructions,"a"; \ |
1119 | .long 771b; \ | 1337 | .align algn; \ |
1338 | word 771b; \ | ||
1120 | .byte ptype; \ | 1339 | .byte ptype; \ |
1121 | .byte 772b-771b; \ | 1340 | .byte 772b-771b; \ |
1122 | .short clobbers; \ | 1341 | .short clobbers; \ |
1123 | .popsection | 1342 | .popsection |
1124 | 1343 | ||
1344 | |||
1345 | #ifdef CONFIG_X86_64 | ||
1346 | #define PV_SAVE_REGS pushq %rax; pushq %rdi; pushq %rcx; pushq %rdx | ||
1347 | #define PV_RESTORE_REGS popq %rdx; popq %rcx; popq %rdi; popq %rax | ||
1348 | #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8) | ||
1349 | #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8) | ||
1350 | #else | ||
1351 | #define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx | ||
1352 | #define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax | ||
1353 | #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) | ||
1354 | #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) | ||
1355 | #endif | ||
1356 | |||
1125 | #define INTERRUPT_RETURN \ | 1357 | #define INTERRUPT_RETURN \ |
1126 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \ | 1358 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \ |
1127 | jmp *%cs:pv_cpu_ops+PV_CPU_iret) | 1359 | jmp *%cs:pv_cpu_ops+PV_CPU_iret) |
1128 | 1360 | ||
1129 | #define DISABLE_INTERRUPTS(clobbers) \ | 1361 | #define DISABLE_INTERRUPTS(clobbers) \ |
1130 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ | 1362 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ |
1131 | pushl %eax; pushl %ecx; pushl %edx; \ | 1363 | PV_SAVE_REGS; \ |
1132 | call *%cs:pv_irq_ops+PV_IRQ_irq_disable; \ | 1364 | call *%cs:pv_irq_ops+PV_IRQ_irq_disable; \ |
1133 | popl %edx; popl %ecx; popl %eax) \ | 1365 | PV_RESTORE_REGS;) \ |
1134 | 1366 | ||
1135 | #define ENABLE_INTERRUPTS(clobbers) \ | 1367 | #define ENABLE_INTERRUPTS(clobbers) \ |
1136 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ | 1368 | PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ |
1137 | pushl %eax; pushl %ecx; pushl %edx; \ | 1369 | PV_SAVE_REGS; \ |
1138 | call *%cs:pv_irq_ops+PV_IRQ_irq_enable; \ | 1370 | call *%cs:pv_irq_ops+PV_IRQ_irq_enable; \ |
1139 | popl %edx; popl %ecx; popl %eax) | 1371 | PV_RESTORE_REGS;) |
1372 | |||
1373 | #define ENABLE_INTERRUPTS_SYSCALL_RET \ | ||
1374 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_syscall_ret),\ | ||
1375 | CLBR_NONE, \ | ||
1376 | jmp *%cs:pv_cpu_ops+PV_CPU_irq_enable_syscall_ret) | ||
1140 | 1377 | ||
1141 | #define ENABLE_INTERRUPTS_SYSEXIT \ | ||
1142 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), CLBR_NONE,\ | ||
1143 | jmp *%cs:pv_cpu_ops+PV_CPU_irq_enable_sysexit) | ||
1144 | 1378 | ||
1379 | #ifdef CONFIG_X86_32 | ||
1145 | #define GET_CR0_INTO_EAX \ | 1380 | #define GET_CR0_INTO_EAX \ |
1146 | push %ecx; push %edx; \ | 1381 | push %ecx; push %edx; \ |
1147 | call *pv_cpu_ops+PV_CPU_read_cr0; \ | 1382 | call *pv_cpu_ops+PV_CPU_read_cr0; \ |
1148 | pop %edx; pop %ecx | 1383 | pop %edx; pop %ecx |
1384 | #else | ||
1385 | #define SWAPGS \ | ||
1386 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ | ||
1387 | PV_SAVE_REGS; \ | ||
1388 | call *pv_cpu_ops+PV_CPU_swapgs; \ | ||
1389 | PV_RESTORE_REGS \ | ||
1390 | ) | ||
1391 | |||
1392 | #define GET_CR2_INTO_RCX \ | ||
1393 | call *pv_mmu_ops+PV_MMU_read_cr2; \ | ||
1394 | movq %rax, %rcx; \ | ||
1395 | xorq %rax, %rax; | ||
1396 | |||
1397 | #endif | ||
1149 | 1398 | ||
1150 | #endif /* __ASSEMBLY__ */ | 1399 | #endif /* __ASSEMBLY__ */ |
1151 | #endif /* CONFIG_PARAVIRT */ | 1400 | #endif /* CONFIG_PARAVIRT */ |
diff --git a/include/asm-x86/pci.h b/include/asm-x86/pci.h index e88361966347..c61190cb9e12 100644 --- a/include/asm-x86/pci.h +++ b/include/asm-x86/pci.h | |||
@@ -66,6 +66,7 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | |||
66 | 66 | ||
67 | 67 | ||
68 | #ifdef CONFIG_PCI | 68 | #ifdef CONFIG_PCI |
69 | extern void early_quirks(void); | ||
69 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, | 70 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, |
70 | enum pci_dma_burst_strategy *strat, | 71 | enum pci_dma_burst_strategy *strat, |
71 | unsigned long *strategy_parameter) | 72 | unsigned long *strategy_parameter) |
@@ -73,9 +74,10 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, | |||
73 | *strat = PCI_DMA_BURST_INFINITY; | 74 | *strat = PCI_DMA_BURST_INFINITY; |
74 | *strategy_parameter = ~0UL; | 75 | *strategy_parameter = ~0UL; |
75 | } | 76 | } |
77 | #else | ||
78 | static inline void early_quirks(void) { } | ||
76 | #endif | 79 | #endif |
77 | 80 | ||
78 | |||
79 | #endif /* __KERNEL__ */ | 81 | #endif /* __KERNEL__ */ |
80 | 82 | ||
81 | #ifdef CONFIG_X86_32 | 83 | #ifdef CONFIG_X86_32 |
@@ -90,6 +92,19 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, | |||
90 | /* generic pci stuff */ | 92 | /* generic pci stuff */ |
91 | #include <asm-generic/pci.h> | 93 | #include <asm-generic/pci.h> |
92 | 94 | ||
95 | #ifdef CONFIG_NUMA | ||
96 | /* Returns the node based on pci bus */ | ||
97 | static inline int __pcibus_to_node(struct pci_bus *bus) | ||
98 | { | ||
99 | struct pci_sysdata *sd = bus->sysdata; | ||
93 | 100 | ||
101 | return sd->node; | ||
102 | } | ||
103 | |||
104 | static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus) | ||
105 | { | ||
106 | return node_to_cpumask(__pcibus_to_node(bus)); | ||
107 | } | ||
108 | #endif | ||
94 | 109 | ||
95 | #endif | 110 | #endif |
diff --git a/include/asm-x86/pci_64.h b/include/asm-x86/pci_64.h index ef54226a9325..374690314539 100644 --- a/include/asm-x86/pci_64.h +++ b/include/asm-x86/pci_64.h | |||
@@ -26,7 +26,6 @@ extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int l | |||
26 | 26 | ||
27 | 27 | ||
28 | extern void pci_iommu_alloc(void); | 28 | extern void pci_iommu_alloc(void); |
29 | extern int iommu_setup(char *opt); | ||
30 | 29 | ||
31 | /* The PCI address space does equal the physical memory | 30 | /* The PCI address space does equal the physical memory |
32 | * address space. The networking and block device layers use | 31 | * address space. The networking and block device layers use |
diff --git a/include/asm-x86/pda.h b/include/asm-x86/pda.h index 35962bbe5e72..c0305bff0f19 100644 --- a/include/asm-x86/pda.h +++ b/include/asm-x86/pda.h | |||
@@ -7,22 +7,22 @@ | |||
7 | #include <linux/cache.h> | 7 | #include <linux/cache.h> |
8 | #include <asm/page.h> | 8 | #include <asm/page.h> |
9 | 9 | ||
10 | /* Per processor datastructure. %gs points to it while the kernel runs */ | 10 | /* Per processor datastructure. %gs points to it while the kernel runs */ |
11 | struct x8664_pda { | 11 | struct x8664_pda { |
12 | struct task_struct *pcurrent; /* 0 Current process */ | 12 | struct task_struct *pcurrent; /* 0 Current process */ |
13 | unsigned long data_offset; /* 8 Per cpu data offset from linker | 13 | unsigned long data_offset; /* 8 Per cpu data offset from linker |
14 | address */ | 14 | address */ |
15 | unsigned long kernelstack; /* 16 top of kernel stack for current */ | 15 | unsigned long kernelstack; /* 16 top of kernel stack for current */ |
16 | unsigned long oldrsp; /* 24 user rsp for system call */ | 16 | unsigned long oldrsp; /* 24 user rsp for system call */ |
17 | int irqcount; /* 32 Irq nesting counter. Starts with -1 */ | 17 | int irqcount; /* 32 Irq nesting counter. Starts -1 */ |
18 | int cpunumber; /* 36 Logical CPU number */ | 18 | unsigned int cpunumber; /* 36 Logical CPU number */ |
19 | #ifdef CONFIG_CC_STACKPROTECTOR | 19 | #ifdef CONFIG_CC_STACKPROTECTOR |
20 | unsigned long stack_canary; /* 40 stack canary value */ | 20 | unsigned long stack_canary; /* 40 stack canary value */ |
21 | /* gcc-ABI: this canary MUST be at | 21 | /* gcc-ABI: this canary MUST be at |
22 | offset 40!!! */ | 22 | offset 40!!! */ |
23 | #endif | 23 | #endif |
24 | char *irqstackptr; | 24 | char *irqstackptr; |
25 | int nodenumber; /* number of current node */ | 25 | unsigned int nodenumber; /* number of current node */ |
26 | unsigned int __softirq_pending; | 26 | unsigned int __softirq_pending; |
27 | unsigned int __nmi_count; /* number of NMI on this CPUs */ | 27 | unsigned int __nmi_count; /* number of NMI on this CPUs */ |
28 | short mmu_state; | 28 | short mmu_state; |
@@ -40,13 +40,14 @@ struct x8664_pda { | |||
40 | 40 | ||
41 | extern struct x8664_pda *_cpu_pda[]; | 41 | extern struct x8664_pda *_cpu_pda[]; |
42 | extern struct x8664_pda boot_cpu_pda[]; | 42 | extern struct x8664_pda boot_cpu_pda[]; |
43 | extern void pda_init(int); | ||
43 | 44 | ||
44 | #define cpu_pda(i) (_cpu_pda[i]) | 45 | #define cpu_pda(i) (_cpu_pda[i]) |
45 | 46 | ||
46 | /* | 47 | /* |
47 | * There is no fast way to get the base address of the PDA, all the accesses | 48 | * There is no fast way to get the base address of the PDA, all the accesses |
48 | * have to mention %fs/%gs. So it needs to be done this Torvaldian way. | 49 | * have to mention %fs/%gs. So it needs to be done this Torvaldian way. |
49 | */ | 50 | */ |
50 | extern void __bad_pda_field(void) __attribute__((noreturn)); | 51 | extern void __bad_pda_field(void) __attribute__((noreturn)); |
51 | 52 | ||
52 | /* | 53 | /* |
@@ -57,70 +58,70 @@ extern struct x8664_pda _proxy_pda; | |||
57 | 58 | ||
58 | #define pda_offset(field) offsetof(struct x8664_pda, field) | 59 | #define pda_offset(field) offsetof(struct x8664_pda, field) |
59 | 60 | ||
60 | #define pda_to_op(op,field,val) do { \ | 61 | #define pda_to_op(op, field, val) do { \ |
61 | typedef typeof(_proxy_pda.field) T__; \ | 62 | typedef typeof(_proxy_pda.field) T__; \ |
62 | if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \ | 63 | if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \ |
63 | switch (sizeof(_proxy_pda.field)) { \ | 64 | switch (sizeof(_proxy_pda.field)) { \ |
64 | case 2: \ | 65 | case 2: \ |
65 | asm(op "w %1,%%gs:%c2" : \ | 66 | asm(op "w %1,%%gs:%c2" : \ |
66 | "+m" (_proxy_pda.field) : \ | 67 | "+m" (_proxy_pda.field) : \ |
67 | "ri" ((T__)val), \ | 68 | "ri" ((T__)val), \ |
68 | "i"(pda_offset(field))); \ | 69 | "i"(pda_offset(field))); \ |
69 | break; \ | 70 | break; \ |
70 | case 4: \ | 71 | case 4: \ |
71 | asm(op "l %1,%%gs:%c2" : \ | 72 | asm(op "l %1,%%gs:%c2" : \ |
72 | "+m" (_proxy_pda.field) : \ | 73 | "+m" (_proxy_pda.field) : \ |
73 | "ri" ((T__)val), \ | 74 | "ri" ((T__)val), \ |
74 | "i" (pda_offset(field))); \ | 75 | "i" (pda_offset(field))); \ |
75 | break; \ | 76 | break; \ |
76 | case 8: \ | 77 | case 8: \ |
77 | asm(op "q %1,%%gs:%c2": \ | 78 | asm(op "q %1,%%gs:%c2": \ |
78 | "+m" (_proxy_pda.field) : \ | 79 | "+m" (_proxy_pda.field) : \ |
79 | "ri" ((T__)val), \ | 80 | "ri" ((T__)val), \ |
80 | "i"(pda_offset(field))); \ | 81 | "i"(pda_offset(field))); \ |
81 | break; \ | 82 | break; \ |
82 | default: \ | 83 | default: \ |
83 | __bad_pda_field(); \ | 84 | __bad_pda_field(); \ |
84 | } \ | 85 | } \ |
85 | } while (0) | 86 | } while (0) |
86 | 87 | ||
87 | #define pda_from_op(op,field) ({ \ | 88 | #define pda_from_op(op,field) ({ \ |
88 | typeof(_proxy_pda.field) ret__; \ | 89 | typeof(_proxy_pda.field) ret__; \ |
89 | switch (sizeof(_proxy_pda.field)) { \ | 90 | switch (sizeof(_proxy_pda.field)) { \ |
90 | case 2: \ | 91 | case 2: \ |
91 | asm(op "w %%gs:%c1,%0" : \ | 92 | asm(op "w %%gs:%c1,%0" : \ |
92 | "=r" (ret__) : \ | 93 | "=r" (ret__) : \ |
93 | "i" (pda_offset(field)), \ | 94 | "i" (pda_offset(field)), \ |
94 | "m" (_proxy_pda.field)); \ | 95 | "m" (_proxy_pda.field)); \ |
95 | break; \ | 96 | break; \ |
96 | case 4: \ | 97 | case 4: \ |
97 | asm(op "l %%gs:%c1,%0": \ | 98 | asm(op "l %%gs:%c1,%0": \ |
98 | "=r" (ret__): \ | 99 | "=r" (ret__): \ |
99 | "i" (pda_offset(field)), \ | 100 | "i" (pda_offset(field)), \ |
100 | "m" (_proxy_pda.field)); \ | 101 | "m" (_proxy_pda.field)); \ |
101 | break; \ | 102 | break; \ |
102 | case 8: \ | 103 | case 8: \ |
103 | asm(op "q %%gs:%c1,%0": \ | 104 | asm(op "q %%gs:%c1,%0": \ |
104 | "=r" (ret__) : \ | 105 | "=r" (ret__) : \ |
105 | "i" (pda_offset(field)), \ | 106 | "i" (pda_offset(field)), \ |
106 | "m" (_proxy_pda.field)); \ | 107 | "m" (_proxy_pda.field)); \ |
107 | break; \ | 108 | break; \ |
108 | default: \ | 109 | default: \ |
109 | __bad_pda_field(); \ | 110 | __bad_pda_field(); \ |
110 | } \ | 111 | } \ |
111 | ret__; }) | 112 | ret__; }) |
112 | 113 | ||
113 | #define read_pda(field) pda_from_op("mov",field) | 114 | #define read_pda(field) pda_from_op("mov", field) |
114 | #define write_pda(field,val) pda_to_op("mov",field,val) | 115 | #define write_pda(field, val) pda_to_op("mov", field, val) |
115 | #define add_pda(field,val) pda_to_op("add",field,val) | 116 | #define add_pda(field, val) pda_to_op("add", field, val) |
116 | #define sub_pda(field,val) pda_to_op("sub",field,val) | 117 | #define sub_pda(field, val) pda_to_op("sub", field, val) |
117 | #define or_pda(field,val) pda_to_op("or",field,val) | 118 | #define or_pda(field, val) pda_to_op("or", field, val) |
118 | 119 | ||
119 | /* This is not atomic against other CPUs -- CPU preemption needs to be off */ | 120 | /* This is not atomic against other CPUs -- CPU preemption needs to be off */ |
120 | #define test_and_clear_bit_pda(bit,field) ({ \ | 121 | #define test_and_clear_bit_pda(bit, field) ({ \ |
121 | int old__; \ | 122 | int old__; \ |
122 | asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \ | 123 | asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \ |
123 | : "=r" (old__), "+m" (_proxy_pda.field) \ | 124 | : "=r" (old__), "+m" (_proxy_pda.field) \ |
124 | : "dIr" (bit), "i" (pda_offset(field)) : "memory"); \ | 125 | : "dIr" (bit), "i" (pda_offset(field)) : "memory"); \ |
125 | old__; \ | 126 | old__; \ |
126 | }) | 127 | }) |
diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h index a1aaad274cca..0dec00f27eb4 100644 --- a/include/asm-x86/percpu.h +++ b/include/asm-x86/percpu.h | |||
@@ -1,5 +1,142 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifndef _ASM_X86_PERCPU_H_ |
2 | # include "percpu_32.h" | 2 | #define _ASM_X86_PERCPU_H_ |
3 | #else | 3 | |
4 | # include "percpu_64.h" | 4 | #ifdef CONFIG_X86_64 |
5 | #include <linux/compiler.h> | ||
6 | |||
7 | /* Same as asm-generic/percpu.h, except that we store the per cpu offset | ||
8 | in the PDA. Longer term the PDA and every per cpu variable | ||
9 | should be just put into a single section and referenced directly | ||
10 | from %gs */ | ||
11 | |||
12 | #ifdef CONFIG_SMP | ||
13 | #include <asm/pda.h> | ||
14 | |||
15 | #define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset) | ||
16 | #define __my_cpu_offset read_pda(data_offset) | ||
17 | |||
18 | #define per_cpu_offset(x) (__per_cpu_offset(x)) | ||
19 | |||
5 | #endif | 20 | #endif |
21 | #include <asm-generic/percpu.h> | ||
22 | |||
23 | DECLARE_PER_CPU(struct x8664_pda, pda); | ||
24 | |||
25 | #else /* CONFIG_X86_64 */ | ||
26 | |||
27 | #ifdef __ASSEMBLY__ | ||
28 | |||
29 | /* | ||
30 | * PER_CPU finds an address of a per-cpu variable. | ||
31 | * | ||
32 | * Args: | ||
33 | * var - variable name | ||
34 | * reg - 32bit register | ||
35 | * | ||
36 | * The resulting address is stored in the "reg" argument. | ||
37 | * | ||
38 | * Example: | ||
39 | * PER_CPU(cpu_gdt_descr, %ebx) | ||
40 | */ | ||
41 | #ifdef CONFIG_SMP | ||
42 | #define PER_CPU(var, reg) \ | ||
43 | movl %fs:per_cpu__##this_cpu_off, reg; \ | ||
44 | lea per_cpu__##var(reg), reg | ||
45 | #define PER_CPU_VAR(var) %fs:per_cpu__##var | ||
46 | #else /* ! SMP */ | ||
47 | #define PER_CPU(var, reg) \ | ||
48 | movl $per_cpu__##var, reg | ||
49 | #define PER_CPU_VAR(var) per_cpu__##var | ||
50 | #endif /* SMP */ | ||
51 | |||
52 | #else /* ...!ASSEMBLY */ | ||
53 | |||
54 | /* | ||
55 | * PER_CPU finds an address of a per-cpu variable. | ||
56 | * | ||
57 | * Args: | ||
58 | * var - variable name | ||
59 | * cpu - 32bit register containing the current CPU number | ||
60 | * | ||
61 | * The resulting address is stored in the "cpu" argument. | ||
62 | * | ||
63 | * Example: | ||
64 | * PER_CPU(cpu_gdt_descr, %ebx) | ||
65 | */ | ||
66 | #ifdef CONFIG_SMP | ||
67 | |||
68 | #define __my_cpu_offset x86_read_percpu(this_cpu_off) | ||
69 | |||
70 | /* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */ | ||
71 | #define __percpu_seg "%%fs:" | ||
72 | |||
73 | #else /* !SMP */ | ||
74 | |||
75 | #define __percpu_seg "" | ||
76 | |||
77 | #endif /* SMP */ | ||
78 | |||
79 | #include <asm-generic/percpu.h> | ||
80 | |||
81 | /* We can use this directly for local CPU (faster). */ | ||
82 | DECLARE_PER_CPU(unsigned long, this_cpu_off); | ||
83 | |||
84 | /* For arch-specific code, we can use direct single-insn ops (they | ||
85 | * don't give an lvalue though). */ | ||
86 | extern void __bad_percpu_size(void); | ||
87 | |||
88 | #define percpu_to_op(op,var,val) \ | ||
89 | do { \ | ||
90 | typedef typeof(var) T__; \ | ||
91 | if (0) { T__ tmp__; tmp__ = (val); } \ | ||
92 | switch (sizeof(var)) { \ | ||
93 | case 1: \ | ||
94 | asm(op "b %1,"__percpu_seg"%0" \ | ||
95 | : "+m" (var) \ | ||
96 | :"ri" ((T__)val)); \ | ||
97 | break; \ | ||
98 | case 2: \ | ||
99 | asm(op "w %1,"__percpu_seg"%0" \ | ||
100 | : "+m" (var) \ | ||
101 | :"ri" ((T__)val)); \ | ||
102 | break; \ | ||
103 | case 4: \ | ||
104 | asm(op "l %1,"__percpu_seg"%0" \ | ||
105 | : "+m" (var) \ | ||
106 | :"ri" ((T__)val)); \ | ||
107 | break; \ | ||
108 | default: __bad_percpu_size(); \ | ||
109 | } \ | ||
110 | } while (0) | ||
111 | |||
112 | #define percpu_from_op(op,var) \ | ||
113 | ({ \ | ||
114 | typeof(var) ret__; \ | ||
115 | switch (sizeof(var)) { \ | ||
116 | case 1: \ | ||
117 | asm(op "b "__percpu_seg"%1,%0" \ | ||
118 | : "=r" (ret__) \ | ||
119 | : "m" (var)); \ | ||
120 | break; \ | ||
121 | case 2: \ | ||
122 | asm(op "w "__percpu_seg"%1,%0" \ | ||
123 | : "=r" (ret__) \ | ||
124 | : "m" (var)); \ | ||
125 | break; \ | ||
126 | case 4: \ | ||
127 | asm(op "l "__percpu_seg"%1,%0" \ | ||
128 | : "=r" (ret__) \ | ||
129 | : "m" (var)); \ | ||
130 | break; \ | ||
131 | default: __bad_percpu_size(); \ | ||
132 | } \ | ||
133 | ret__; }) | ||
134 | |||
135 | #define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var) | ||
136 | #define x86_write_percpu(var,val) percpu_to_op("mov", per_cpu__##var, val) | ||
137 | #define x86_add_percpu(var,val) percpu_to_op("add", per_cpu__##var, val) | ||
138 | #define x86_sub_percpu(var,val) percpu_to_op("sub", per_cpu__##var, val) | ||
139 | #define x86_or_percpu(var,val) percpu_to_op("or", per_cpu__##var, val) | ||
140 | #endif /* !__ASSEMBLY__ */ | ||
141 | #endif /* !CONFIG_X86_64 */ | ||
142 | #endif /* _ASM_X86_PERCPU_H_ */ | ||
diff --git a/include/asm-x86/percpu_32.h b/include/asm-x86/percpu_32.h deleted file mode 100644 index a7ebd436f3cc..000000000000 --- a/include/asm-x86/percpu_32.h +++ /dev/null | |||
@@ -1,154 +0,0 @@ | |||
1 | #ifndef __ARCH_I386_PERCPU__ | ||
2 | #define __ARCH_I386_PERCPU__ | ||
3 | |||
4 | #ifdef __ASSEMBLY__ | ||
5 | |||
6 | /* | ||
7 | * PER_CPU finds an address of a per-cpu variable. | ||
8 | * | ||
9 | * Args: | ||
10 | * var - variable name | ||
11 | * reg - 32bit register | ||
12 | * | ||
13 | * The resulting address is stored in the "reg" argument. | ||
14 | * | ||
15 | * Example: | ||
16 | * PER_CPU(cpu_gdt_descr, %ebx) | ||
17 | */ | ||
18 | #ifdef CONFIG_SMP | ||
19 | #define PER_CPU(var, reg) \ | ||
20 | movl %fs:per_cpu__##this_cpu_off, reg; \ | ||
21 | lea per_cpu__##var(reg), reg | ||
22 | #define PER_CPU_VAR(var) %fs:per_cpu__##var | ||
23 | #else /* ! SMP */ | ||
24 | #define PER_CPU(var, reg) \ | ||
25 | movl $per_cpu__##var, reg | ||
26 | #define PER_CPU_VAR(var) per_cpu__##var | ||
27 | #endif /* SMP */ | ||
28 | |||
29 | #else /* ...!ASSEMBLY */ | ||
30 | |||
31 | /* | ||
32 | * PER_CPU finds an address of a per-cpu variable. | ||
33 | * | ||
34 | * Args: | ||
35 | * var - variable name | ||
36 | * cpu - 32bit register containing the current CPU number | ||
37 | * | ||
38 | * The resulting address is stored in the "cpu" argument. | ||
39 | * | ||
40 | * Example: | ||
41 | * PER_CPU(cpu_gdt_descr, %ebx) | ||
42 | */ | ||
43 | #ifdef CONFIG_SMP | ||
44 | /* Same as generic implementation except for optimized local access. */ | ||
45 | #define __GENERIC_PER_CPU | ||
46 | |||
47 | /* This is used for other cpus to find our section. */ | ||
48 | extern unsigned long __per_cpu_offset[]; | ||
49 | |||
50 | #define per_cpu_offset(x) (__per_cpu_offset[x]) | ||
51 | |||
52 | /* Separate out the type, so (int[3], foo) works. */ | ||
53 | #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name | ||
54 | #define DEFINE_PER_CPU(type, name) \ | ||
55 | __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name | ||
56 | |||
57 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
58 | __attribute__((__section__(".data.percpu.shared_aligned"))) \ | ||
59 | __typeof__(type) per_cpu__##name \ | ||
60 | ____cacheline_aligned_in_smp | ||
61 | |||
62 | /* We can use this directly for local CPU (faster). */ | ||
63 | DECLARE_PER_CPU(unsigned long, this_cpu_off); | ||
64 | |||
65 | /* var is in discarded region: offset to particular copy we want */ | ||
66 | #define per_cpu(var, cpu) (*({ \ | ||
67 | extern int simple_indentifier_##var(void); \ | ||
68 | RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]); })) | ||
69 | |||
70 | #define __raw_get_cpu_var(var) (*({ \ | ||
71 | extern int simple_indentifier_##var(void); \ | ||
72 | RELOC_HIDE(&per_cpu__##var, x86_read_percpu(this_cpu_off)); \ | ||
73 | })) | ||
74 | |||
75 | #define __get_cpu_var(var) __raw_get_cpu_var(var) | ||
76 | |||
77 | /* A macro to avoid #include hell... */ | ||
78 | #define percpu_modcopy(pcpudst, src, size) \ | ||
79 | do { \ | ||
80 | unsigned int __i; \ | ||
81 | for_each_possible_cpu(__i) \ | ||
82 | memcpy((pcpudst)+__per_cpu_offset[__i], \ | ||
83 | (src), (size)); \ | ||
84 | } while (0) | ||
85 | |||
86 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) | ||
87 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) | ||
88 | |||
89 | /* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */ | ||
90 | #define __percpu_seg "%%fs:" | ||
91 | #else /* !SMP */ | ||
92 | #include <asm-generic/percpu.h> | ||
93 | #define __percpu_seg "" | ||
94 | #endif /* SMP */ | ||
95 | |||
96 | /* For arch-specific code, we can use direct single-insn ops (they | ||
97 | * don't give an lvalue though). */ | ||
98 | extern void __bad_percpu_size(void); | ||
99 | |||
100 | #define percpu_to_op(op,var,val) \ | ||
101 | do { \ | ||
102 | typedef typeof(var) T__; \ | ||
103 | if (0) { T__ tmp__; tmp__ = (val); } \ | ||
104 | switch (sizeof(var)) { \ | ||
105 | case 1: \ | ||
106 | asm(op "b %1,"__percpu_seg"%0" \ | ||
107 | : "+m" (var) \ | ||
108 | :"ri" ((T__)val)); \ | ||
109 | break; \ | ||
110 | case 2: \ | ||
111 | asm(op "w %1,"__percpu_seg"%0" \ | ||
112 | : "+m" (var) \ | ||
113 | :"ri" ((T__)val)); \ | ||
114 | break; \ | ||
115 | case 4: \ | ||
116 | asm(op "l %1,"__percpu_seg"%0" \ | ||
117 | : "+m" (var) \ | ||
118 | :"ri" ((T__)val)); \ | ||
119 | break; \ | ||
120 | default: __bad_percpu_size(); \ | ||
121 | } \ | ||
122 | } while (0) | ||
123 | |||
124 | #define percpu_from_op(op,var) \ | ||
125 | ({ \ | ||
126 | typeof(var) ret__; \ | ||
127 | switch (sizeof(var)) { \ | ||
128 | case 1: \ | ||
129 | asm(op "b "__percpu_seg"%1,%0" \ | ||
130 | : "=r" (ret__) \ | ||
131 | : "m" (var)); \ | ||
132 | break; \ | ||
133 | case 2: \ | ||
134 | asm(op "w "__percpu_seg"%1,%0" \ | ||
135 | : "=r" (ret__) \ | ||
136 | : "m" (var)); \ | ||
137 | break; \ | ||
138 | case 4: \ | ||
139 | asm(op "l "__percpu_seg"%1,%0" \ | ||
140 | : "=r" (ret__) \ | ||
141 | : "m" (var)); \ | ||
142 | break; \ | ||
143 | default: __bad_percpu_size(); \ | ||
144 | } \ | ||
145 | ret__; }) | ||
146 | |||
147 | #define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var) | ||
148 | #define x86_write_percpu(var,val) percpu_to_op("mov", per_cpu__##var, val) | ||
149 | #define x86_add_percpu(var,val) percpu_to_op("add", per_cpu__##var, val) | ||
150 | #define x86_sub_percpu(var,val) percpu_to_op("sub", per_cpu__##var, val) | ||
151 | #define x86_or_percpu(var,val) percpu_to_op("or", per_cpu__##var, val) | ||
152 | #endif /* !__ASSEMBLY__ */ | ||
153 | |||
154 | #endif /* __ARCH_I386_PERCPU__ */ | ||
diff --git a/include/asm-x86/percpu_64.h b/include/asm-x86/percpu_64.h deleted file mode 100644 index 5abd48270101..000000000000 --- a/include/asm-x86/percpu_64.h +++ /dev/null | |||
@@ -1,68 +0,0 @@ | |||
1 | #ifndef _ASM_X8664_PERCPU_H_ | ||
2 | #define _ASM_X8664_PERCPU_H_ | ||
3 | #include <linux/compiler.h> | ||
4 | |||
5 | /* Same as asm-generic/percpu.h, except that we store the per cpu offset | ||
6 | in the PDA. Longer term the PDA and every per cpu variable | ||
7 | should be just put into a single section and referenced directly | ||
8 | from %gs */ | ||
9 | |||
10 | #ifdef CONFIG_SMP | ||
11 | |||
12 | #include <asm/pda.h> | ||
13 | |||
14 | #define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset) | ||
15 | #define __my_cpu_offset() read_pda(data_offset) | ||
16 | |||
17 | #define per_cpu_offset(x) (__per_cpu_offset(x)) | ||
18 | |||
19 | /* Separate out the type, so (int[3], foo) works. */ | ||
20 | #define DEFINE_PER_CPU(type, name) \ | ||
21 | __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name | ||
22 | |||
23 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
24 | __attribute__((__section__(".data.percpu.shared_aligned"))) \ | ||
25 | __typeof__(type) per_cpu__##name \ | ||
26 | ____cacheline_internodealigned_in_smp | ||
27 | |||
28 | /* var is in discarded region: offset to particular copy we want */ | ||
29 | #define per_cpu(var, cpu) (*({ \ | ||
30 | extern int simple_identifier_##var(void); \ | ||
31 | RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)); })) | ||
32 | #define __get_cpu_var(var) (*({ \ | ||
33 | extern int simple_identifier_##var(void); \ | ||
34 | RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()); })) | ||
35 | #define __raw_get_cpu_var(var) (*({ \ | ||
36 | extern int simple_identifier_##var(void); \ | ||
37 | RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()); })) | ||
38 | |||
39 | /* A macro to avoid #include hell... */ | ||
40 | #define percpu_modcopy(pcpudst, src, size) \ | ||
41 | do { \ | ||
42 | unsigned int __i; \ | ||
43 | for_each_possible_cpu(__i) \ | ||
44 | memcpy((pcpudst)+__per_cpu_offset(__i), \ | ||
45 | (src), (size)); \ | ||
46 | } while (0) | ||
47 | |||
48 | extern void setup_per_cpu_areas(void); | ||
49 | |||
50 | #else /* ! SMP */ | ||
51 | |||
52 | #define DEFINE_PER_CPU(type, name) \ | ||
53 | __typeof__(type) per_cpu__##name | ||
54 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
55 | DEFINE_PER_CPU(type, name) | ||
56 | |||
57 | #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) | ||
58 | #define __get_cpu_var(var) per_cpu__##var | ||
59 | #define __raw_get_cpu_var(var) per_cpu__##var | ||
60 | |||
61 | #endif /* SMP */ | ||
62 | |||
63 | #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name | ||
64 | |||
65 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) | ||
66 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) | ||
67 | |||
68 | #endif /* _ASM_X8664_PERCPU_H_ */ | ||
diff --git a/include/asm-x86/pgalloc_32.h b/include/asm-x86/pgalloc_32.h index f2fc33ceb9f2..bab12718a913 100644 --- a/include/asm-x86/pgalloc_32.h +++ b/include/asm-x86/pgalloc_32.h | |||
@@ -3,66 +3,91 @@ | |||
3 | 3 | ||
4 | #include <linux/threads.h> | 4 | #include <linux/threads.h> |
5 | #include <linux/mm.h> /* for struct page */ | 5 | #include <linux/mm.h> /* for struct page */ |
6 | #include <linux/pagemap.h> | ||
7 | #include <asm/tlb.h> | ||
8 | #include <asm-generic/tlb.h> | ||
6 | 9 | ||
7 | #ifdef CONFIG_PARAVIRT | 10 | #ifdef CONFIG_PARAVIRT |
8 | #include <asm/paravirt.h> | 11 | #include <asm/paravirt.h> |
9 | #else | 12 | #else |
10 | #define paravirt_alloc_pt(mm, pfn) do { } while (0) | 13 | #define paravirt_alloc_pt(mm, pfn) do { } while (0) |
11 | #define paravirt_alloc_pd(pfn) do { } while (0) | 14 | #define paravirt_alloc_pd(mm, pfn) do { } while (0) |
12 | #define paravirt_alloc_pd(pfn) do { } while (0) | ||
13 | #define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0) | 15 | #define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0) |
14 | #define paravirt_release_pt(pfn) do { } while (0) | 16 | #define paravirt_release_pt(pfn) do { } while (0) |
15 | #define paravirt_release_pd(pfn) do { } while (0) | 17 | #define paravirt_release_pd(pfn) do { } while (0) |
16 | #endif | 18 | #endif |
17 | 19 | ||
18 | #define pmd_populate_kernel(mm, pmd, pte) \ | 20 | static inline void pmd_populate_kernel(struct mm_struct *mm, |
19 | do { \ | 21 | pmd_t *pmd, pte_t *pte) |
20 | paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT); \ | 22 | { |
21 | set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \ | 23 | paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT); |
22 | } while (0) | 24 | set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); |
25 | } | ||
23 | 26 | ||
24 | #define pmd_populate(mm, pmd, pte) \ | 27 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte) |
25 | do { \ | 28 | { |
26 | paravirt_alloc_pt(mm, page_to_pfn(pte)); \ | 29 | unsigned long pfn = page_to_pfn(pte); |
27 | set_pmd(pmd, __pmd(_PAGE_TABLE + \ | 30 | |
28 | ((unsigned long long)page_to_pfn(pte) << \ | 31 | paravirt_alloc_pt(mm, pfn); |
29 | (unsigned long long) PAGE_SHIFT))); \ | 32 | set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE)); |
30 | } while (0) | 33 | } |
31 | 34 | ||
32 | /* | 35 | /* |
33 | * Allocate and free page tables. | 36 | * Allocate and free page tables. |
34 | */ | 37 | */ |
35 | extern pgd_t *pgd_alloc(struct mm_struct *); | 38 | extern pgd_t *pgd_alloc(struct mm_struct *); |
36 | extern void pgd_free(pgd_t *pgd); | 39 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); |
37 | 40 | ||
38 | extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); | 41 | extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); |
39 | extern struct page *pte_alloc_one(struct mm_struct *, unsigned long); | 42 | extern struct page *pte_alloc_one(struct mm_struct *, unsigned long); |
40 | 43 | ||
41 | static inline void pte_free_kernel(pte_t *pte) | 44 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) |
42 | { | 45 | { |
43 | free_page((unsigned long)pte); | 46 | free_page((unsigned long)pte); |
44 | } | 47 | } |
45 | 48 | ||
46 | static inline void pte_free(struct page *pte) | 49 | static inline void pte_free(struct mm_struct *mm, struct page *pte) |
47 | { | 50 | { |
48 | __free_page(pte); | 51 | __free_page(pte); |
49 | } | 52 | } |
50 | 53 | ||
51 | 54 | ||
52 | #define __pte_free_tlb(tlb,pte) \ | 55 | extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte); |
53 | do { \ | ||
54 | paravirt_release_pt(page_to_pfn(pte)); \ | ||
55 | tlb_remove_page((tlb),(pte)); \ | ||
56 | } while (0) | ||
57 | 56 | ||
58 | #ifdef CONFIG_X86_PAE | 57 | #ifdef CONFIG_X86_PAE |
59 | /* | 58 | /* |
60 | * In the PAE case we free the pmds as part of the pgd. | 59 | * In the PAE case we free the pmds as part of the pgd. |
61 | */ | 60 | */ |
62 | #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) | 61 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) |
63 | #define pmd_free(x) do { } while (0) | 62 | { |
64 | #define __pmd_free_tlb(tlb,x) do { } while (0) | 63 | return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); |
65 | #define pud_populate(mm, pmd, pte) BUG() | 64 | } |
66 | #endif | 65 | |
66 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | ||
67 | { | ||
68 | BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); | ||
69 | free_page((unsigned long)pmd); | ||
70 | } | ||
71 | |||
72 | extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd); | ||
73 | |||
74 | static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) | ||
75 | { | ||
76 | paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT); | ||
77 | |||
78 | /* Note: almost everything apart from _PAGE_PRESENT is | ||
79 | reserved at the pmd (PDPT) level. */ | ||
80 | set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); | ||
81 | |||
82 | /* | ||
83 | * According to Intel App note "TLBs, Paging-Structure Caches, | ||
84 | * and Their Invalidation", April 2007, document 317080-001, | ||
85 | * section 8.1: in PAE mode we explicitly have to flush the | ||
86 | * TLB via cr3 if the top-level pgd is changed... | ||
87 | */ | ||
88 | if (mm == current->active_mm) | ||
89 | write_cr3(read_cr3()); | ||
90 | } | ||
91 | #endif /* CONFIG_X86_PAE */ | ||
67 | 92 | ||
68 | #endif /* _I386_PGALLOC_H */ | 93 | #endif /* _I386_PGALLOC_H */ |
diff --git a/include/asm-x86/pgalloc_64.h b/include/asm-x86/pgalloc_64.h index 8bb564687860..4f6220db22b1 100644 --- a/include/asm-x86/pgalloc_64.h +++ b/include/asm-x86/pgalloc_64.h | |||
@@ -17,7 +17,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p | |||
17 | set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT))); | 17 | set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT))); |
18 | } | 18 | } |
19 | 19 | ||
20 | static inline void pmd_free(pmd_t *pmd) | 20 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) |
21 | { | 21 | { |
22 | BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); | 22 | BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); |
23 | free_page((unsigned long)pmd); | 23 | free_page((unsigned long)pmd); |
@@ -33,7 +33,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) | |||
33 | return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); | 33 | return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); |
34 | } | 34 | } |
35 | 35 | ||
36 | static inline void pud_free (pud_t *pud) | 36 | static inline void pud_free(struct mm_struct *mm, pud_t *pud) |
37 | { | 37 | { |
38 | BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); | 38 | BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); |
39 | free_page((unsigned long)pud); | 39 | free_page((unsigned long)pud); |
@@ -42,19 +42,21 @@ static inline void pud_free (pud_t *pud) | |||
42 | static inline void pgd_list_add(pgd_t *pgd) | 42 | static inline void pgd_list_add(pgd_t *pgd) |
43 | { | 43 | { |
44 | struct page *page = virt_to_page(pgd); | 44 | struct page *page = virt_to_page(pgd); |
45 | unsigned long flags; | ||
45 | 46 | ||
46 | spin_lock(&pgd_lock); | 47 | spin_lock_irqsave(&pgd_lock, flags); |
47 | list_add(&page->lru, &pgd_list); | 48 | list_add(&page->lru, &pgd_list); |
48 | spin_unlock(&pgd_lock); | 49 | spin_unlock_irqrestore(&pgd_lock, flags); |
49 | } | 50 | } |
50 | 51 | ||
51 | static inline void pgd_list_del(pgd_t *pgd) | 52 | static inline void pgd_list_del(pgd_t *pgd) |
52 | { | 53 | { |
53 | struct page *page = virt_to_page(pgd); | 54 | struct page *page = virt_to_page(pgd); |
55 | unsigned long flags; | ||
54 | 56 | ||
55 | spin_lock(&pgd_lock); | 57 | spin_lock_irqsave(&pgd_lock, flags); |
56 | list_del(&page->lru); | 58 | list_del(&page->lru); |
57 | spin_unlock(&pgd_lock); | 59 | spin_unlock_irqrestore(&pgd_lock, flags); |
58 | } | 60 | } |
59 | 61 | ||
60 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | 62 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
@@ -77,7 +79,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) | |||
77 | return pgd; | 79 | return pgd; |
78 | } | 80 | } |
79 | 81 | ||
80 | static inline void pgd_free(pgd_t *pgd) | 82 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
81 | { | 83 | { |
82 | BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); | 84 | BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); |
83 | pgd_list_del(pgd); | 85 | pgd_list_del(pgd); |
@@ -100,13 +102,13 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add | |||
100 | /* Should really implement gc for free page table pages. This could be | 102 | /* Should really implement gc for free page table pages. This could be |
101 | done with a reference count in struct page. */ | 103 | done with a reference count in struct page. */ |
102 | 104 | ||
103 | static inline void pte_free_kernel(pte_t *pte) | 105 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) |
104 | { | 106 | { |
105 | BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); | 107 | BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); |
106 | free_page((unsigned long)pte); | 108 | free_page((unsigned long)pte); |
107 | } | 109 | } |
108 | 110 | ||
109 | static inline void pte_free(struct page *pte) | 111 | static inline void pte_free(struct mm_struct *mm, struct page *pte) |
110 | { | 112 | { |
111 | __free_page(pte); | 113 | __free_page(pte); |
112 | } | 114 | } |
diff --git a/include/asm-x86/pgtable-2level.h b/include/asm-x86/pgtable-2level.h index 84b03cf56a79..701404fab308 100644 --- a/include/asm-x86/pgtable-2level.h +++ b/include/asm-x86/pgtable-2level.h | |||
@@ -15,30 +15,31 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte) | |||
15 | { | 15 | { |
16 | *ptep = pte; | 16 | *ptep = pte; |
17 | } | 17 | } |
18 | static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, | 18 | |
19 | pte_t *ptep , pte_t pte) | ||
20 | { | ||
21 | native_set_pte(ptep, pte); | ||
22 | } | ||
23 | static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) | 19 | static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) |
24 | { | 20 | { |
25 | *pmdp = pmd; | 21 | *pmdp = pmd; |
26 | } | 22 | } |
27 | #ifndef CONFIG_PARAVIRT | ||
28 | #define set_pte(pteptr, pteval) native_set_pte(pteptr, pteval) | ||
29 | #define set_pte_at(mm,addr,ptep,pteval) native_set_pte_at(mm, addr, ptep, pteval) | ||
30 | #define set_pmd(pmdptr, pmdval) native_set_pmd(pmdptr, pmdval) | ||
31 | #endif | ||
32 | 23 | ||
33 | #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) | 24 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) |
34 | #define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval) | 25 | { |
26 | native_set_pte(ptep, pte); | ||
27 | } | ||
35 | 28 | ||
36 | #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) | 29 | static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr, |
37 | #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) | 30 | pte_t *ptep, pte_t pte) |
31 | { | ||
32 | native_set_pte(ptep, pte); | ||
33 | } | ||
34 | |||
35 | static inline void native_pmd_clear(pmd_t *pmdp) | ||
36 | { | ||
37 | native_set_pmd(pmdp, __pmd(0)); | ||
38 | } | ||
38 | 39 | ||
39 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp) | 40 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp) |
40 | { | 41 | { |
41 | *xp = __pte(0); | 42 | *xp = native_make_pte(0); |
42 | } | 43 | } |
43 | 44 | ||
44 | #ifdef CONFIG_SMP | 45 | #ifdef CONFIG_SMP |
@@ -53,16 +54,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp) | |||
53 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 54 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
54 | #define pte_none(x) (!(x).pte_low) | 55 | #define pte_none(x) (!(x).pte_low) |
55 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) | 56 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) |
56 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | ||
57 | #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | ||
58 | |||
59 | /* | ||
60 | * All present pages are kernel-executable: | ||
61 | */ | ||
62 | static inline int pte_exec_kernel(pte_t pte) | ||
63 | { | ||
64 | return 1; | ||
65 | } | ||
66 | 57 | ||
67 | /* | 58 | /* |
68 | * Bits 0, 6 and 7 are taken, split up the 29 bits of offset | 59 | * Bits 0, 6 and 7 are taken, split up the 29 bits of offset |
@@ -74,13 +65,13 @@ static inline int pte_exec_kernel(pte_t pte) | |||
74 | ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 )) | 65 | ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 )) |
75 | 66 | ||
76 | #define pgoff_to_pte(off) \ | 67 | #define pgoff_to_pte(off) \ |
77 | ((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE }) | 68 | ((pte_t) { .pte_low = (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE }) |
78 | 69 | ||
79 | /* Encode and de-code a swap entry */ | 70 | /* Encode and de-code a swap entry */ |
80 | #define __swp_type(x) (((x).val >> 1) & 0x1f) | 71 | #define __swp_type(x) (((x).val >> 1) & 0x1f) |
81 | #define __swp_offset(x) ((x).val >> 8) | 72 | #define __swp_offset(x) ((x).val >> 8) |
82 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) | 73 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) |
83 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) | 74 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) |
84 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | 75 | #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) |
85 | 76 | ||
86 | #endif /* _I386_PGTABLE_2LEVEL_H */ | 77 | #endif /* _I386_PGTABLE_2LEVEL_H */ |
diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h index 948a33414118..1d763eec740f 100644 --- a/include/asm-x86/pgtable-3level.h +++ b/include/asm-x86/pgtable-3level.h | |||
@@ -15,16 +15,18 @@ | |||
15 | #define pgd_ERROR(e) \ | 15 | #define pgd_ERROR(e) \ |
16 | printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) | 16 | printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) |
17 | 17 | ||
18 | #define pud_none(pud) 0 | ||
19 | #define pud_bad(pud) 0 | ||
20 | #define pud_present(pud) 1 | ||
21 | 18 | ||
22 | /* | 19 | static inline int pud_none(pud_t pud) |
23 | * All present pages with !NX bit are kernel-executable: | 20 | { |
24 | */ | 21 | return pud_val(pud) == 0; |
25 | static inline int pte_exec_kernel(pte_t pte) | 22 | } |
23 | static inline int pud_bad(pud_t pud) | ||
24 | { | ||
25 | return (pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0; | ||
26 | } | ||
27 | static inline int pud_present(pud_t pud) | ||
26 | { | 28 | { |
27 | return !(pte_val(pte) & _PAGE_NX); | 29 | return pud_val(pud) & _PAGE_PRESENT; |
28 | } | 30 | } |
29 | 31 | ||
30 | /* Rules for using set_pte: the pte being assigned *must* be | 32 | /* Rules for using set_pte: the pte being assigned *must* be |
@@ -39,11 +41,6 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte) | |||
39 | smp_wmb(); | 41 | smp_wmb(); |
40 | ptep->pte_low = pte.pte_low; | 42 | ptep->pte_low = pte.pte_low; |
41 | } | 43 | } |
42 | static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
43 | pte_t *ptep , pte_t pte) | ||
44 | { | ||
45 | native_set_pte(ptep, pte); | ||
46 | } | ||
47 | 44 | ||
48 | /* | 45 | /* |
49 | * Since this is only called on user PTEs, and the page fault handler | 46 | * Since this is only called on user PTEs, and the page fault handler |
@@ -71,7 +68,7 @@ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) | |||
71 | } | 68 | } |
72 | static inline void native_set_pud(pud_t *pudp, pud_t pud) | 69 | static inline void native_set_pud(pud_t *pudp, pud_t pud) |
73 | { | 70 | { |
74 | *pudp = pud; | 71 | set_64bit((unsigned long long *)(pudp),native_pud_val(pud)); |
75 | } | 72 | } |
76 | 73 | ||
77 | /* | 74 | /* |
@@ -94,24 +91,25 @@ static inline void native_pmd_clear(pmd_t *pmd) | |||
94 | *(tmp + 1) = 0; | 91 | *(tmp + 1) = 0; |
95 | } | 92 | } |
96 | 93 | ||
97 | #ifndef CONFIG_PARAVIRT | 94 | static inline void pud_clear(pud_t *pudp) |
98 | #define set_pte(ptep, pte) native_set_pte(ptep, pte) | 95 | { |
99 | #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) | 96 | unsigned long pgd; |
100 | #define set_pte_present(mm, addr, ptep, pte) native_set_pte_present(mm, addr, ptep, pte) | 97 | |
101 | #define set_pte_atomic(ptep, pte) native_set_pte_atomic(ptep, pte) | 98 | set_pud(pudp, __pud(0)); |
102 | #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) | 99 | |
103 | #define set_pud(pudp, pud) native_set_pud(pudp, pud) | 100 | /* |
104 | #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) | 101 | * According to Intel App note "TLBs, Paging-Structure Caches, |
105 | #define pmd_clear(pmd) native_pmd_clear(pmd) | 102 | * and Their Invalidation", April 2007, document 317080-001, |
106 | #endif | 103 | * section 8.1: in PAE mode we explicitly have to flush the |
107 | 104 | * TLB via cr3 if the top-level pgd is changed... | |
108 | /* | 105 | * |
109 | * Pentium-II erratum A13: in PAE mode we explicitly have to flush | 106 | * Make sure the pud entry we're updating is within the |
110 | * the TLB via cr3 if the top-level pgd is changed... | 107 | * current pgd to avoid unnecessary TLB flushes. |
111 | * We do not let the generic code free and clear pgd entries due to | 108 | */ |
112 | * this erratum. | 109 | pgd = read_cr3(); |
113 | */ | 110 | if (__pa(pudp) >= pgd && __pa(pudp) < (pgd + sizeof(pgd_t)*PTRS_PER_PGD)) |
114 | static inline void pud_clear (pud_t * pud) { } | 111 | write_cr3(pgd); |
112 | } | ||
115 | 113 | ||
116 | #define pud_page(pud) \ | 114 | #define pud_page(pud) \ |
117 | ((struct page *) __va(pud_val(pud) & PAGE_MASK)) | 115 | ((struct page *) __va(pud_val(pud) & PAGE_MASK)) |
@@ -155,21 +153,7 @@ static inline int pte_none(pte_t pte) | |||
155 | 153 | ||
156 | static inline unsigned long pte_pfn(pte_t pte) | 154 | static inline unsigned long pte_pfn(pte_t pte) |
157 | { | 155 | { |
158 | return pte_val(pte) >> PAGE_SHIFT; | 156 | return (pte_val(pte) & ~_PAGE_NX) >> PAGE_SHIFT; |
159 | } | ||
160 | |||
161 | extern unsigned long long __supported_pte_mask; | ||
162 | |||
163 | static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) | ||
164 | { | ||
165 | return __pte((((unsigned long long)page_nr << PAGE_SHIFT) | | ||
166 | pgprot_val(pgprot)) & __supported_pte_mask); | ||
167 | } | ||
168 | |||
169 | static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) | ||
170 | { | ||
171 | return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) | | ||
172 | pgprot_val(pgprot)) & __supported_pte_mask); | ||
173 | } | 157 | } |
174 | 158 | ||
175 | /* | 159 | /* |
@@ -177,7 +161,7 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) | |||
177 | * put the 32 bits of offset into the high part. | 161 | * put the 32 bits of offset into the high part. |
178 | */ | 162 | */ |
179 | #define pte_to_pgoff(pte) ((pte).pte_high) | 163 | #define pte_to_pgoff(pte) ((pte).pte_high) |
180 | #define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) }) | 164 | #define pgoff_to_pte(off) ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } }) |
181 | #define PTE_FILE_MAX_BITS 32 | 165 | #define PTE_FILE_MAX_BITS 32 |
182 | 166 | ||
183 | /* Encode and de-code a swap entry */ | 167 | /* Encode and de-code a swap entry */ |
@@ -185,8 +169,6 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) | |||
185 | #define __swp_offset(x) ((x).val >> 5) | 169 | #define __swp_offset(x) ((x).val >> 5) |
186 | #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) | 170 | #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) |
187 | #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) | 171 | #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) |
188 | #define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val }) | 172 | #define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } }) |
189 | |||
190 | #define __pmd_free_tlb(tlb, x) do { } while (0) | ||
191 | 173 | ||
192 | #endif /* _I386_PGTABLE_3LEVEL_H */ | 174 | #endif /* _I386_PGTABLE_3LEVEL_H */ |
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 1039140652af..44c0a4f1b1eb 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h | |||
@@ -1,5 +1,368 @@ | |||
1 | #ifndef _ASM_X86_PGTABLE_H | ||
2 | #define _ASM_X86_PGTABLE_H | ||
3 | |||
4 | #define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1) | ||
5 | #define FIRST_USER_ADDRESS 0 | ||
6 | |||
7 | #define _PAGE_BIT_PRESENT 0 | ||
8 | #define _PAGE_BIT_RW 1 | ||
9 | #define _PAGE_BIT_USER 2 | ||
10 | #define _PAGE_BIT_PWT 3 | ||
11 | #define _PAGE_BIT_PCD 4 | ||
12 | #define _PAGE_BIT_ACCESSED 5 | ||
13 | #define _PAGE_BIT_DIRTY 6 | ||
14 | #define _PAGE_BIT_FILE 6 | ||
15 | #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ | ||
16 | #define _PAGE_BIT_PAT 7 /* on 4KB pages */ | ||
17 | #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ | ||
18 | #define _PAGE_BIT_UNUSED1 9 /* available for programmer */ | ||
19 | #define _PAGE_BIT_UNUSED2 10 | ||
20 | #define _PAGE_BIT_UNUSED3 11 | ||
21 | #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ | ||
22 | #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ | ||
23 | |||
24 | /* | ||
25 | * Note: we use _AC(1, L) instead of _AC(1, UL) so that we get a | ||
26 | * sign-extended value on 32-bit with all 1's in the upper word, | ||
27 | * which preserves the upper pte values on 64-bit ptes: | ||
28 | */ | ||
29 | #define _PAGE_PRESENT (_AC(1, L)<<_PAGE_BIT_PRESENT) | ||
30 | #define _PAGE_RW (_AC(1, L)<<_PAGE_BIT_RW) | ||
31 | #define _PAGE_USER (_AC(1, L)<<_PAGE_BIT_USER) | ||
32 | #define _PAGE_PWT (_AC(1, L)<<_PAGE_BIT_PWT) | ||
33 | #define _PAGE_PCD (_AC(1, L)<<_PAGE_BIT_PCD) | ||
34 | #define _PAGE_ACCESSED (_AC(1, L)<<_PAGE_BIT_ACCESSED) | ||
35 | #define _PAGE_DIRTY (_AC(1, L)<<_PAGE_BIT_DIRTY) | ||
36 | #define _PAGE_PSE (_AC(1, L)<<_PAGE_BIT_PSE) /* 2MB page */ | ||
37 | #define _PAGE_GLOBAL (_AC(1, L)<<_PAGE_BIT_GLOBAL) /* Global TLB entry */ | ||
38 | #define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1) | ||
39 | #define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2) | ||
40 | #define _PAGE_UNUSED3 (_AC(1, L)<<_PAGE_BIT_UNUSED3) | ||
41 | #define _PAGE_PAT (_AC(1, L)<<_PAGE_BIT_PAT) | ||
42 | #define _PAGE_PAT_LARGE (_AC(1, L)<<_PAGE_BIT_PAT_LARGE) | ||
43 | |||
44 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | ||
45 | #define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX) | ||
46 | #else | ||
47 | #define _PAGE_NX 0 | ||
48 | #endif | ||
49 | |||
50 | /* If _PAGE_PRESENT is clear, we use these: */ | ||
51 | #define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, saved PTE; unset:swap */ | ||
52 | #define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE; | ||
53 | pte_present gives true */ | ||
54 | |||
55 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
56 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
57 | |||
58 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
59 | |||
60 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) | ||
61 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | ||
62 | |||
63 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) | ||
64 | #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | ||
65 | #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | ||
66 | #define PAGE_COPY PAGE_COPY_NOEXEC | ||
67 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | ||
68 | #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | ||
69 | |||
70 | #ifdef CONFIG_X86_32 | ||
71 | #define _PAGE_KERNEL_EXEC \ | ||
72 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) | ||
73 | #define _PAGE_KERNEL (_PAGE_KERNEL_EXEC | _PAGE_NX) | ||
74 | |||
75 | #ifndef __ASSEMBLY__ | ||
76 | extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC; | ||
77 | #endif /* __ASSEMBLY__ */ | ||
78 | #else | ||
79 | #define __PAGE_KERNEL_EXEC \ | ||
80 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) | ||
81 | #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) | ||
82 | #endif | ||
83 | |||
84 | #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) | ||
85 | #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) | ||
86 | #define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT) | ||
87 | #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) | ||
88 | #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) | ||
89 | #define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT) | ||
90 | #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) | ||
91 | #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) | ||
92 | |||
93 | #ifdef CONFIG_X86_32 | ||
94 | # define MAKE_GLOBAL(x) __pgprot((x)) | ||
95 | #else | ||
96 | # define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL) | ||
97 | #endif | ||
98 | |||
99 | #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL) | ||
100 | #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO) | ||
101 | #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC) | ||
102 | #define PAGE_KERNEL_RX MAKE_GLOBAL(__PAGE_KERNEL_RX) | ||
103 | #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE) | ||
104 | #define PAGE_KERNEL_EXEC_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_EXEC_NOCACHE) | ||
105 | #define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE) | ||
106 | #define PAGE_KERNEL_LARGE_EXEC MAKE_GLOBAL(__PAGE_KERNEL_LARGE_EXEC) | ||
107 | #define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL) | ||
108 | #define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE) | ||
109 | |||
110 | /* xwr */ | ||
111 | #define __P000 PAGE_NONE | ||
112 | #define __P001 PAGE_READONLY | ||
113 | #define __P010 PAGE_COPY | ||
114 | #define __P011 PAGE_COPY | ||
115 | #define __P100 PAGE_READONLY_EXEC | ||
116 | #define __P101 PAGE_READONLY_EXEC | ||
117 | #define __P110 PAGE_COPY_EXEC | ||
118 | #define __P111 PAGE_COPY_EXEC | ||
119 | |||
120 | #define __S000 PAGE_NONE | ||
121 | #define __S001 PAGE_READONLY | ||
122 | #define __S010 PAGE_SHARED | ||
123 | #define __S011 PAGE_SHARED | ||
124 | #define __S100 PAGE_READONLY_EXEC | ||
125 | #define __S101 PAGE_READONLY_EXEC | ||
126 | #define __S110 PAGE_SHARED_EXEC | ||
127 | #define __S111 PAGE_SHARED_EXEC | ||
128 | |||
129 | #ifndef __ASSEMBLY__ | ||
130 | |||
131 | /* | ||
132 | * ZERO_PAGE is a global shared page that is always zero: used | ||
133 | * for zero-mapped memory areas etc.. | ||
134 | */ | ||
135 | extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | ||
136 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | ||
137 | |||
138 | extern spinlock_t pgd_lock; | ||
139 | extern struct list_head pgd_list; | ||
140 | |||
141 | /* | ||
142 | * The following only work if pte_present() is true. | ||
143 | * Undefined behaviour if not.. | ||
144 | */ | ||
145 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | ||
146 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | ||
147 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } | ||
148 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | ||
149 | static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; } | ||
150 | static inline int pte_global(pte_t pte) { return pte_val(pte) & _PAGE_GLOBAL; } | ||
151 | static inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_NX); } | ||
152 | |||
153 | static inline int pmd_large(pmd_t pte) { | ||
154 | return (pmd_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) == | ||
155 | (_PAGE_PSE|_PAGE_PRESENT); | ||
156 | } | ||
157 | |||
158 | static inline pte_t pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); } | ||
159 | static inline pte_t pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); } | ||
160 | static inline pte_t pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); } | ||
161 | static inline pte_t pte_mkexec(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); } | ||
162 | static inline pte_t pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | _PAGE_DIRTY); } | ||
163 | static inline pte_t pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | _PAGE_ACCESSED); } | ||
164 | static inline pte_t pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | _PAGE_RW); } | ||
165 | static inline pte_t pte_mkhuge(pte_t pte) { return __pte(pte_val(pte) | _PAGE_PSE); } | ||
166 | static inline pte_t pte_clrhuge(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); } | ||
167 | static inline pte_t pte_mkglobal(pte_t pte) { return __pte(pte_val(pte) | _PAGE_GLOBAL); } | ||
168 | static inline pte_t pte_clrglobal(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); } | ||
169 | |||
170 | extern pteval_t __supported_pte_mask; | ||
171 | |||
172 | static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) | ||
173 | { | ||
174 | return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) | | ||
175 | pgprot_val(pgprot)) & __supported_pte_mask); | ||
176 | } | ||
177 | |||
178 | static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) | ||
179 | { | ||
180 | return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) | | ||
181 | pgprot_val(pgprot)) & __supported_pte_mask); | ||
182 | } | ||
183 | |||
184 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
185 | { | ||
186 | pteval_t val = pte_val(pte); | ||
187 | |||
188 | /* | ||
189 | * Chop off the NX bit (if present), and add the NX portion of | ||
190 | * the newprot (if present): | ||
191 | */ | ||
192 | val &= _PAGE_CHG_MASK & ~_PAGE_NX; | ||
193 | val |= pgprot_val(newprot) & __supported_pte_mask; | ||
194 | |||
195 | return __pte(val); | ||
196 | } | ||
197 | |||
198 | #define pte_pgprot(x) __pgprot(pte_val(x) & (0xfff | _PAGE_NX)) | ||
199 | |||
200 | #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) | ||
201 | |||
202 | #ifdef CONFIG_PARAVIRT | ||
203 | #include <asm/paravirt.h> | ||
204 | #else /* !CONFIG_PARAVIRT */ | ||
205 | #define set_pte(ptep, pte) native_set_pte(ptep, pte) | ||
206 | #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) | ||
207 | |||
208 | #define set_pte_present(mm, addr, ptep, pte) \ | ||
209 | native_set_pte_present(mm, addr, ptep, pte) | ||
210 | #define set_pte_atomic(ptep, pte) \ | ||
211 | native_set_pte_atomic(ptep, pte) | ||
212 | |||
213 | #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) | ||
214 | |||
215 | #ifndef __PAGETABLE_PUD_FOLDED | ||
216 | #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) | ||
217 | #define pgd_clear(pgd) native_pgd_clear(pgd) | ||
218 | #endif | ||
219 | |||
220 | #ifndef set_pud | ||
221 | # define set_pud(pudp, pud) native_set_pud(pudp, pud) | ||
222 | #endif | ||
223 | |||
224 | #ifndef __PAGETABLE_PMD_FOLDED | ||
225 | #define pud_clear(pud) native_pud_clear(pud) | ||
226 | #endif | ||
227 | |||
228 | #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) | ||
229 | #define pmd_clear(pmd) native_pmd_clear(pmd) | ||
230 | |||
231 | #define pte_update(mm, addr, ptep) do { } while (0) | ||
232 | #define pte_update_defer(mm, addr, ptep) do { } while (0) | ||
233 | #endif /* CONFIG_PARAVIRT */ | ||
234 | |||
235 | #endif /* __ASSEMBLY__ */ | ||
236 | |||
1 | #ifdef CONFIG_X86_32 | 237 | #ifdef CONFIG_X86_32 |
2 | # include "pgtable_32.h" | 238 | # include "pgtable_32.h" |
3 | #else | 239 | #else |
4 | # include "pgtable_64.h" | 240 | # include "pgtable_64.h" |
5 | #endif | 241 | #endif |
242 | |||
243 | #ifndef __ASSEMBLY__ | ||
244 | |||
245 | enum { | ||
246 | PG_LEVEL_NONE, | ||
247 | PG_LEVEL_4K, | ||
248 | PG_LEVEL_2M, | ||
249 | PG_LEVEL_1G, | ||
250 | }; | ||
251 | |||
252 | /* | ||
253 | * Helper function that returns the kernel pagetable entry controlling | ||
254 | * the virtual address 'address'. NULL means no pagetable entry present. | ||
255 | * NOTE: the return type is pte_t but if the pmd is PSE then we return it | ||
256 | * as a pte too. | ||
257 | */ | ||
258 | extern pte_t *lookup_address(unsigned long address, int *level); | ||
259 | |||
260 | /* local pte updates need not use xchg for locking */ | ||
261 | static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) | ||
262 | { | ||
263 | pte_t res = *ptep; | ||
264 | |||
265 | /* Pure native function needs no input for mm, addr */ | ||
266 | native_pte_clear(NULL, 0, ptep); | ||
267 | return res; | ||
268 | } | ||
269 | |||
270 | static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
271 | pte_t *ptep , pte_t pte) | ||
272 | { | ||
273 | native_set_pte(ptep, pte); | ||
274 | } | ||
275 | |||
276 | #ifndef CONFIG_PARAVIRT | ||
277 | /* | ||
278 | * Rules for using pte_update - it must be called after any PTE update which | ||
279 | * has not been done using the set_pte / clear_pte interfaces. It is used by | ||
280 | * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE | ||
281 | * updates should either be sets, clears, or set_pte_atomic for P->P | ||
282 | * transitions, which means this hook should only be called for user PTEs. | ||
283 | * This hook implies a P->P protection or access change has taken place, which | ||
284 | * requires a subsequent TLB flush. The notification can optionally be delayed | ||
285 | * until the TLB flush event by using the pte_update_defer form of the | ||
286 | * interface, but care must be taken to assure that the flush happens while | ||
287 | * still holding the same page table lock so that the shadow and primary pages | ||
288 | * do not become out of sync on SMP. | ||
289 | */ | ||
290 | #define pte_update(mm, addr, ptep) do { } while (0) | ||
291 | #define pte_update_defer(mm, addr, ptep) do { } while (0) | ||
292 | #endif | ||
293 | |||
294 | /* | ||
295 | * We only update the dirty/accessed state if we set | ||
296 | * the dirty bit by hand in the kernel, since the hardware | ||
297 | * will do the accessed bit for us, and we don't want to | ||
298 | * race with other CPU's that might be updating the dirty | ||
299 | * bit at the same time. | ||
300 | */ | ||
301 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
302 | #define ptep_set_access_flags(vma, address, ptep, entry, dirty) \ | ||
303 | ({ \ | ||
304 | int __changed = !pte_same(*(ptep), entry); \ | ||
305 | if (__changed && dirty) { \ | ||
306 | *ptep = entry; \ | ||
307 | pte_update_defer((vma)->vm_mm, (address), (ptep)); \ | ||
308 | flush_tlb_page(vma, address); \ | ||
309 | } \ | ||
310 | __changed; \ | ||
311 | }) | ||
312 | |||
313 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
314 | #define ptep_test_and_clear_young(vma, addr, ptep) ({ \ | ||
315 | int __ret = 0; \ | ||
316 | if (pte_young(*(ptep))) \ | ||
317 | __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \ | ||
318 | &(ptep)->pte); \ | ||
319 | if (__ret) \ | ||
320 | pte_update((vma)->vm_mm, addr, ptep); \ | ||
321 | __ret; \ | ||
322 | }) | ||
323 | |||
324 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | ||
325 | #define ptep_clear_flush_young(vma, address, ptep) \ | ||
326 | ({ \ | ||
327 | int __young; \ | ||
328 | __young = ptep_test_and_clear_young((vma), (address), (ptep)); \ | ||
329 | if (__young) \ | ||
330 | flush_tlb_page(vma, address); \ | ||
331 | __young; \ | ||
332 | }) | ||
333 | |||
334 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
335 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
336 | { | ||
337 | pte_t pte = native_ptep_get_and_clear(ptep); | ||
338 | pte_update(mm, addr, ptep); | ||
339 | return pte; | ||
340 | } | ||
341 | |||
342 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | ||
343 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) | ||
344 | { | ||
345 | pte_t pte; | ||
346 | if (full) { | ||
347 | /* | ||
348 | * Full address destruction in progress; paravirt does not | ||
349 | * care about updates and native needs no locking | ||
350 | */ | ||
351 | pte = native_local_ptep_get_and_clear(ptep); | ||
352 | } else { | ||
353 | pte = ptep_get_and_clear(mm, addr, ptep); | ||
354 | } | ||
355 | return pte; | ||
356 | } | ||
357 | |||
358 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
359 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
360 | { | ||
361 | clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); | ||
362 | pte_update(mm, addr, ptep); | ||
363 | } | ||
364 | |||
365 | #include <asm-generic/pgtable.h> | ||
366 | #endif /* __ASSEMBLY__ */ | ||
367 | |||
368 | #endif /* _ASM_X86_PGTABLE_H */ | ||
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index ed3e70d8d04b..80dd438642f6 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h | |||
@@ -25,20 +25,11 @@ | |||
25 | struct mm_struct; | 25 | struct mm_struct; |
26 | struct vm_area_struct; | 26 | struct vm_area_struct; |
27 | 27 | ||
28 | /* | ||
29 | * ZERO_PAGE is a global shared page that is always zero: used | ||
30 | * for zero-mapped memory areas etc.. | ||
31 | */ | ||
32 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | ||
33 | extern unsigned long empty_zero_page[1024]; | ||
34 | extern pgd_t swapper_pg_dir[1024]; | 28 | extern pgd_t swapper_pg_dir[1024]; |
35 | extern struct kmem_cache *pmd_cache; | 29 | extern struct kmem_cache *pmd_cache; |
36 | extern spinlock_t pgd_lock; | ||
37 | extern struct page *pgd_list; | ||
38 | void check_pgt_cache(void); | 30 | void check_pgt_cache(void); |
39 | 31 | ||
40 | void pmd_ctor(struct kmem_cache *, void *); | 32 | static inline void pgtable_cache_init(void) {} |
41 | void pgtable_cache_init(void); | ||
42 | void paging_init(void); | 33 | void paging_init(void); |
43 | 34 | ||
44 | 35 | ||
@@ -58,9 +49,6 @@ void paging_init(void); | |||
58 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | 49 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
59 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 50 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
60 | 51 | ||
61 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) | ||
62 | #define FIRST_USER_ADDRESS 0 | ||
63 | |||
64 | #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) | 52 | #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) |
65 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) | 53 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) |
66 | 54 | ||
@@ -78,118 +66,19 @@ void paging_init(void); | |||
78 | #define VMALLOC_OFFSET (8*1024*1024) | 66 | #define VMALLOC_OFFSET (8*1024*1024) |
79 | #define VMALLOC_START (((unsigned long) high_memory + \ | 67 | #define VMALLOC_START (((unsigned long) high_memory + \ |
80 | 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1)) | 68 | 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1)) |
81 | #ifdef CONFIG_HIGHMEM | ||
82 | # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) | ||
83 | #else | ||
84 | # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) | ||
85 | #endif | ||
86 | |||
87 | /* | ||
88 | * _PAGE_PSE set in the page directory entry just means that | ||
89 | * the page directory entry points directly to a 4MB-aligned block of | ||
90 | * memory. | ||
91 | */ | ||
92 | #define _PAGE_BIT_PRESENT 0 | ||
93 | #define _PAGE_BIT_RW 1 | ||
94 | #define _PAGE_BIT_USER 2 | ||
95 | #define _PAGE_BIT_PWT 3 | ||
96 | #define _PAGE_BIT_PCD 4 | ||
97 | #define _PAGE_BIT_ACCESSED 5 | ||
98 | #define _PAGE_BIT_DIRTY 6 | ||
99 | #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */ | ||
100 | #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ | ||
101 | #define _PAGE_BIT_UNUSED1 9 /* available for programmer */ | ||
102 | #define _PAGE_BIT_UNUSED2 10 | ||
103 | #define _PAGE_BIT_UNUSED3 11 | ||
104 | #define _PAGE_BIT_NX 63 | ||
105 | |||
106 | #define _PAGE_PRESENT 0x001 | ||
107 | #define _PAGE_RW 0x002 | ||
108 | #define _PAGE_USER 0x004 | ||
109 | #define _PAGE_PWT 0x008 | ||
110 | #define _PAGE_PCD 0x010 | ||
111 | #define _PAGE_ACCESSED 0x020 | ||
112 | #define _PAGE_DIRTY 0x040 | ||
113 | #define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */ | ||
114 | #define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */ | ||
115 | #define _PAGE_UNUSED1 0x200 /* available for programmer */ | ||
116 | #define _PAGE_UNUSED2 0x400 | ||
117 | #define _PAGE_UNUSED3 0x800 | ||
118 | |||
119 | /* If _PAGE_PRESENT is clear, we use these: */ | ||
120 | #define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */ | ||
121 | #define _PAGE_PROTNONE 0x080 /* if the user mapped it with PROT_NONE; | ||
122 | pte_present gives true */ | ||
123 | #ifdef CONFIG_X86_PAE | 69 | #ifdef CONFIG_X86_PAE |
124 | #define _PAGE_NX (1ULL<<_PAGE_BIT_NX) | 70 | #define LAST_PKMAP 512 |
125 | #else | 71 | #else |
126 | #define _PAGE_NX 0 | 72 | #define LAST_PKMAP 1024 |
127 | #endif | 73 | #endif |
128 | 74 | ||
129 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) | 75 | #define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK) |
130 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
131 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
132 | |||
133 | #define PAGE_NONE \ | ||
134 | __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) | ||
135 | #define PAGE_SHARED \ | ||
136 | __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) | ||
137 | |||
138 | #define PAGE_SHARED_EXEC \ | ||
139 | __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) | ||
140 | #define PAGE_COPY_NOEXEC \ | ||
141 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | ||
142 | #define PAGE_COPY_EXEC \ | ||
143 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | ||
144 | #define PAGE_COPY \ | ||
145 | PAGE_COPY_NOEXEC | ||
146 | #define PAGE_READONLY \ | ||
147 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | ||
148 | #define PAGE_READONLY_EXEC \ | ||
149 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | ||
150 | |||
151 | #define _PAGE_KERNEL \ | ||
152 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX) | ||
153 | #define _PAGE_KERNEL_EXEC \ | ||
154 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) | ||
155 | |||
156 | extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC; | ||
157 | #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) | ||
158 | #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) | ||
159 | #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD) | ||
160 | #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) | ||
161 | #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) | ||
162 | |||
163 | #define PAGE_KERNEL __pgprot(__PAGE_KERNEL) | ||
164 | #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) | ||
165 | #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) | ||
166 | #define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX) | ||
167 | #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) | ||
168 | #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) | ||
169 | #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) | ||
170 | 76 | ||
171 | /* | 77 | #ifdef CONFIG_HIGHMEM |
172 | * The i386 can't do page protection for execute, and considers that | 78 | # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) |
173 | * the same are read. Also, write permissions imply read permissions. | 79 | #else |
174 | * This is the closest we can get.. | 80 | # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) |
175 | */ | 81 | #endif |
176 | #define __P000 PAGE_NONE | ||
177 | #define __P001 PAGE_READONLY | ||
178 | #define __P010 PAGE_COPY | ||
179 | #define __P011 PAGE_COPY | ||
180 | #define __P100 PAGE_READONLY_EXEC | ||
181 | #define __P101 PAGE_READONLY_EXEC | ||
182 | #define __P110 PAGE_COPY_EXEC | ||
183 | #define __P111 PAGE_COPY_EXEC | ||
184 | |||
185 | #define __S000 PAGE_NONE | ||
186 | #define __S001 PAGE_READONLY | ||
187 | #define __S010 PAGE_SHARED | ||
188 | #define __S011 PAGE_SHARED | ||
189 | #define __S100 PAGE_READONLY_EXEC | ||
190 | #define __S101 PAGE_READONLY_EXEC | ||
191 | #define __S110 PAGE_SHARED_EXEC | ||
192 | #define __S111 PAGE_SHARED_EXEC | ||
193 | 82 | ||
194 | /* | 83 | /* |
195 | * Define this if things work differently on an i386 and an i486: | 84 | * Define this if things work differently on an i386 and an i486: |
@@ -211,133 +100,12 @@ extern unsigned long pg0[]; | |||
211 | 100 | ||
212 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) | 101 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) |
213 | 102 | ||
214 | /* | ||
215 | * The following only work if pte_present() is true. | ||
216 | * Undefined behaviour if not.. | ||
217 | */ | ||
218 | static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; } | ||
219 | static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; } | ||
220 | static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; } | ||
221 | static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; } | ||
222 | |||
223 | /* | ||
224 | * The following only works if pte_present() is not true. | ||
225 | */ | ||
226 | static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; } | ||
227 | |||
228 | static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; } | ||
229 | static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; } | ||
230 | static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; } | ||
231 | static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; } | ||
232 | static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; } | ||
233 | static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } | ||
234 | static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; } | ||
235 | |||
236 | #ifdef CONFIG_X86_PAE | 103 | #ifdef CONFIG_X86_PAE |
237 | # include <asm/pgtable-3level.h> | 104 | # include <asm/pgtable-3level.h> |
238 | #else | 105 | #else |
239 | # include <asm/pgtable-2level.h> | 106 | # include <asm/pgtable-2level.h> |
240 | #endif | 107 | #endif |
241 | 108 | ||
242 | #ifndef CONFIG_PARAVIRT | ||
243 | /* | ||
244 | * Rules for using pte_update - it must be called after any PTE update which | ||
245 | * has not been done using the set_pte / clear_pte interfaces. It is used by | ||
246 | * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE | ||
247 | * updates should either be sets, clears, or set_pte_atomic for P->P | ||
248 | * transitions, which means this hook should only be called for user PTEs. | ||
249 | * This hook implies a P->P protection or access change has taken place, which | ||
250 | * requires a subsequent TLB flush. The notification can optionally be delayed | ||
251 | * until the TLB flush event by using the pte_update_defer form of the | ||
252 | * interface, but care must be taken to assure that the flush happens while | ||
253 | * still holding the same page table lock so that the shadow and primary pages | ||
254 | * do not become out of sync on SMP. | ||
255 | */ | ||
256 | #define pte_update(mm, addr, ptep) do { } while (0) | ||
257 | #define pte_update_defer(mm, addr, ptep) do { } while (0) | ||
258 | #endif | ||
259 | |||
260 | /* local pte updates need not use xchg for locking */ | ||
261 | static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) | ||
262 | { | ||
263 | pte_t res = *ptep; | ||
264 | |||
265 | /* Pure native function needs no input for mm, addr */ | ||
266 | native_pte_clear(NULL, 0, ptep); | ||
267 | return res; | ||
268 | } | ||
269 | |||
270 | /* | ||
271 | * We only update the dirty/accessed state if we set | ||
272 | * the dirty bit by hand in the kernel, since the hardware | ||
273 | * will do the accessed bit for us, and we don't want to | ||
274 | * race with other CPU's that might be updating the dirty | ||
275 | * bit at the same time. | ||
276 | */ | ||
277 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
278 | #define ptep_set_access_flags(vma, address, ptep, entry, dirty) \ | ||
279 | ({ \ | ||
280 | int __changed = !pte_same(*(ptep), entry); \ | ||
281 | if (__changed && dirty) { \ | ||
282 | (ptep)->pte_low = (entry).pte_low; \ | ||
283 | pte_update_defer((vma)->vm_mm, (address), (ptep)); \ | ||
284 | flush_tlb_page(vma, address); \ | ||
285 | } \ | ||
286 | __changed; \ | ||
287 | }) | ||
288 | |||
289 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
290 | #define ptep_test_and_clear_young(vma, addr, ptep) ({ \ | ||
291 | int __ret = 0; \ | ||
292 | if (pte_young(*(ptep))) \ | ||
293 | __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \ | ||
294 | &(ptep)->pte_low); \ | ||
295 | if (__ret) \ | ||
296 | pte_update((vma)->vm_mm, addr, ptep); \ | ||
297 | __ret; \ | ||
298 | }) | ||
299 | |||
300 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | ||
301 | #define ptep_clear_flush_young(vma, address, ptep) \ | ||
302 | ({ \ | ||
303 | int __young; \ | ||
304 | __young = ptep_test_and_clear_young((vma), (address), (ptep)); \ | ||
305 | if (__young) \ | ||
306 | flush_tlb_page(vma, address); \ | ||
307 | __young; \ | ||
308 | }) | ||
309 | |||
310 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
311 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
312 | { | ||
313 | pte_t pte = native_ptep_get_and_clear(ptep); | ||
314 | pte_update(mm, addr, ptep); | ||
315 | return pte; | ||
316 | } | ||
317 | |||
318 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | ||
319 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) | ||
320 | { | ||
321 | pte_t pte; | ||
322 | if (full) { | ||
323 | /* | ||
324 | * Full address destruction in progress; paravirt does not | ||
325 | * care about updates and native needs no locking | ||
326 | */ | ||
327 | pte = native_local_ptep_get_and_clear(ptep); | ||
328 | } else { | ||
329 | pte = ptep_get_and_clear(mm, addr, ptep); | ||
330 | } | ||
331 | return pte; | ||
332 | } | ||
333 | |||
334 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
335 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
336 | { | ||
337 | clear_bit(_PAGE_BIT_RW, &ptep->pte_low); | ||
338 | pte_update(mm, addr, ptep); | ||
339 | } | ||
340 | |||
341 | /* | 109 | /* |
342 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); | 110 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); |
343 | * | 111 | * |
@@ -367,25 +135,6 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | |||
367 | 135 | ||
368 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | 136 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) |
369 | 137 | ||
370 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
371 | { | ||
372 | pte.pte_low &= _PAGE_CHG_MASK; | ||
373 | pte.pte_low |= pgprot_val(newprot); | ||
374 | #ifdef CONFIG_X86_PAE | ||
375 | /* | ||
376 | * Chop off the NX bit (if present), and add the NX portion of | ||
377 | * the newprot (if present): | ||
378 | */ | ||
379 | pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32)); | ||
380 | pte.pte_high |= (pgprot_val(newprot) >> 32) & \ | ||
381 | (__supported_pte_mask >> 32); | ||
382 | #endif | ||
383 | return pte; | ||
384 | } | ||
385 | |||
386 | #define pmd_large(pmd) \ | ||
387 | ((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT)) | ||
388 | |||
389 | /* | 138 | /* |
390 | * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] | 139 | * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] |
391 | * | 140 | * |
@@ -407,6 +156,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
407 | */ | 156 | */ |
408 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 157 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
409 | 158 | ||
159 | static inline int pud_large(pud_t pud) { return 0; } | ||
160 | |||
410 | /* | 161 | /* |
411 | * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] | 162 | * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] |
412 | * | 163 | * |
@@ -432,26 +183,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
432 | #define pmd_page_vaddr(pmd) \ | 183 | #define pmd_page_vaddr(pmd) \ |
433 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | 184 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) |
434 | 185 | ||
435 | /* | ||
436 | * Helper function that returns the kernel pagetable entry controlling | ||
437 | * the virtual address 'address'. NULL means no pagetable entry present. | ||
438 | * NOTE: the return type is pte_t but if the pmd is PSE then we return it | ||
439 | * as a pte too. | ||
440 | */ | ||
441 | extern pte_t *lookup_address(unsigned long address); | ||
442 | |||
443 | /* | ||
444 | * Make a given kernel text page executable/non-executable. | ||
445 | * Returns the previous executability setting of that page (which | ||
446 | * is used to restore the previous state). Used by the SMP bootup code. | ||
447 | * NOTE: this is an __init function for security reasons. | ||
448 | */ | ||
449 | #ifdef CONFIG_X86_PAE | ||
450 | extern int set_kernel_exec(unsigned long vaddr, int enable); | ||
451 | #else | ||
452 | static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;} | ||
453 | #endif | ||
454 | |||
455 | #if defined(CONFIG_HIGHPTE) | 186 | #if defined(CONFIG_HIGHPTE) |
456 | #define pte_offset_map(dir, address) \ | 187 | #define pte_offset_map(dir, address) \ |
457 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) | 188 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) |
@@ -497,13 +228,17 @@ static inline void paravirt_pagetable_setup_done(pgd_t *base) | |||
497 | 228 | ||
498 | #endif /* !__ASSEMBLY__ */ | 229 | #endif /* !__ASSEMBLY__ */ |
499 | 230 | ||
231 | /* | ||
232 | * kern_addr_valid() is (1) for FLATMEM and (0) for | ||
233 | * SPARSEMEM and DISCONTIGMEM | ||
234 | */ | ||
500 | #ifdef CONFIG_FLATMEM | 235 | #ifdef CONFIG_FLATMEM |
501 | #define kern_addr_valid(addr) (1) | 236 | #define kern_addr_valid(addr) (1) |
502 | #endif /* CONFIG_FLATMEM */ | 237 | #else |
238 | #define kern_addr_valid(kaddr) (0) | ||
239 | #endif | ||
503 | 240 | ||
504 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | 241 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
505 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 242 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
506 | 243 | ||
507 | #include <asm-generic/pgtable.h> | ||
508 | |||
509 | #endif /* _I386_PGTABLE_H */ | 244 | #endif /* _I386_PGTABLE_H */ |
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 9b0ff477b39e..bd4740a60f29 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h | |||
@@ -17,22 +17,15 @@ extern pud_t level3_kernel_pgt[512]; | |||
17 | extern pud_t level3_ident_pgt[512]; | 17 | extern pud_t level3_ident_pgt[512]; |
18 | extern pmd_t level2_kernel_pgt[512]; | 18 | extern pmd_t level2_kernel_pgt[512]; |
19 | extern pgd_t init_level4_pgt[]; | 19 | extern pgd_t init_level4_pgt[]; |
20 | extern unsigned long __supported_pte_mask; | ||
21 | 20 | ||
22 | #define swapper_pg_dir init_level4_pgt | 21 | #define swapper_pg_dir init_level4_pgt |
23 | 22 | ||
24 | extern void paging_init(void); | 23 | extern void paging_init(void); |
25 | extern void clear_kernel_mapping(unsigned long addr, unsigned long size); | ||
26 | |||
27 | /* | ||
28 | * ZERO_PAGE is a global shared page that is always zero: used | ||
29 | * for zero-mapped memory areas etc.. | ||
30 | */ | ||
31 | extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | ||
32 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | ||
33 | 24 | ||
34 | #endif /* !__ASSEMBLY__ */ | 25 | #endif /* !__ASSEMBLY__ */ |
35 | 26 | ||
27 | #define SHARED_KERNEL_PMD 1 | ||
28 | |||
36 | /* | 29 | /* |
37 | * PGDIR_SHIFT determines what a top-level page table entry can map | 30 | * PGDIR_SHIFT determines what a top-level page table entry can map |
38 | */ | 31 | */ |
@@ -71,57 +64,68 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | |||
71 | #define pgd_none(x) (!pgd_val(x)) | 64 | #define pgd_none(x) (!pgd_val(x)) |
72 | #define pud_none(x) (!pud_val(x)) | 65 | #define pud_none(x) (!pud_val(x)) |
73 | 66 | ||
74 | static inline void set_pte(pte_t *dst, pte_t val) | 67 | struct mm_struct; |
68 | |||
69 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, | ||
70 | pte_t *ptep) | ||
75 | { | 71 | { |
76 | pte_val(*dst) = pte_val(val); | 72 | *ptep = native_make_pte(0); |
77 | } | 73 | } |
78 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | 74 | |
75 | static inline void native_set_pte(pte_t *ptep, pte_t pte) | ||
76 | { | ||
77 | *ptep = pte; | ||
78 | } | ||
79 | 79 | ||
80 | static inline void set_pmd(pmd_t *dst, pmd_t val) | 80 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) |
81 | { | 81 | { |
82 | pmd_val(*dst) = pmd_val(val); | 82 | native_set_pte(ptep, pte); |
83 | } | 83 | } |
84 | 84 | ||
85 | static inline void set_pud(pud_t *dst, pud_t val) | 85 | static inline pte_t native_ptep_get_and_clear(pte_t *xp) |
86 | { | 86 | { |
87 | pud_val(*dst) = pud_val(val); | 87 | #ifdef CONFIG_SMP |
88 | return native_make_pte(xchg(&xp->pte, 0)); | ||
89 | #else | ||
90 | /* native_local_ptep_get_and_clear, but duplicated because of cyclic dependency */ | ||
91 | pte_t ret = *xp; | ||
92 | native_pte_clear(NULL, 0, xp); | ||
93 | return ret; | ||
94 | #endif | ||
88 | } | 95 | } |
89 | 96 | ||
90 | static inline void pud_clear (pud_t *pud) | 97 | static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) |
91 | { | 98 | { |
92 | set_pud(pud, __pud(0)); | 99 | *pmdp = pmd; |
93 | } | 100 | } |
94 | 101 | ||
95 | static inline void set_pgd(pgd_t *dst, pgd_t val) | 102 | static inline void native_pmd_clear(pmd_t *pmd) |
96 | { | 103 | { |
97 | pgd_val(*dst) = pgd_val(val); | 104 | native_set_pmd(pmd, native_make_pmd(0)); |
98 | } | 105 | } |
99 | 106 | ||
100 | static inline void pgd_clear (pgd_t * pgd) | 107 | static inline void native_set_pud(pud_t *pudp, pud_t pud) |
101 | { | 108 | { |
102 | set_pgd(pgd, __pgd(0)); | 109 | *pudp = pud; |
103 | } | 110 | } |
104 | 111 | ||
105 | #define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0)) | 112 | static inline void native_pud_clear(pud_t *pud) |
113 | { | ||
114 | native_set_pud(pud, native_make_pud(0)); | ||
115 | } | ||
106 | 116 | ||
107 | struct mm_struct; | 117 | static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) |
118 | { | ||
119 | *pgdp = pgd; | ||
120 | } | ||
108 | 121 | ||
109 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) | 122 | static inline void native_pgd_clear(pgd_t * pgd) |
110 | { | 123 | { |
111 | pte_t pte; | 124 | native_set_pgd(pgd, native_make_pgd(0)); |
112 | if (full) { | ||
113 | pte = *ptep; | ||
114 | *ptep = __pte(0); | ||
115 | } else { | ||
116 | pte = ptep_get_and_clear(mm, addr, ptep); | ||
117 | } | ||
118 | return pte; | ||
119 | } | 125 | } |
120 | 126 | ||
121 | #define pte_same(a, b) ((a).pte == (b).pte) | 127 | #define pte_same(a, b) ((a).pte == (b).pte) |
122 | 128 | ||
123 | #define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK)) | ||
124 | |||
125 | #endif /* !__ASSEMBLY__ */ | 129 | #endif /* !__ASSEMBLY__ */ |
126 | 130 | ||
127 | #define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) | 131 | #define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) |
@@ -131,8 +135,6 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long | |||
131 | #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT) | 135 | #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT) |
132 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 136 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
133 | 137 | ||
134 | #define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1) | ||
135 | #define FIRST_USER_ADDRESS 0 | ||
136 | 138 | ||
137 | #define MAXMEM _AC(0x3fffffffffff, UL) | 139 | #define MAXMEM _AC(0x3fffffffffff, UL) |
138 | #define VMALLOC_START _AC(0xffffc20000000000, UL) | 140 | #define VMALLOC_START _AC(0xffffc20000000000, UL) |
@@ -142,91 +144,6 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long | |||
142 | #define MODULES_END _AC(0xfffffffffff00000, UL) | 144 | #define MODULES_END _AC(0xfffffffffff00000, UL) |
143 | #define MODULES_LEN (MODULES_END - MODULES_VADDR) | 145 | #define MODULES_LEN (MODULES_END - MODULES_VADDR) |
144 | 146 | ||
145 | #define _PAGE_BIT_PRESENT 0 | ||
146 | #define _PAGE_BIT_RW 1 | ||
147 | #define _PAGE_BIT_USER 2 | ||
148 | #define _PAGE_BIT_PWT 3 | ||
149 | #define _PAGE_BIT_PCD 4 | ||
150 | #define _PAGE_BIT_ACCESSED 5 | ||
151 | #define _PAGE_BIT_DIRTY 6 | ||
152 | #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ | ||
153 | #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ | ||
154 | #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ | ||
155 | |||
156 | #define _PAGE_PRESENT 0x001 | ||
157 | #define _PAGE_RW 0x002 | ||
158 | #define _PAGE_USER 0x004 | ||
159 | #define _PAGE_PWT 0x008 | ||
160 | #define _PAGE_PCD 0x010 | ||
161 | #define _PAGE_ACCESSED 0x020 | ||
162 | #define _PAGE_DIRTY 0x040 | ||
163 | #define _PAGE_PSE 0x080 /* 2MB page */ | ||
164 | #define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */ | ||
165 | #define _PAGE_GLOBAL 0x100 /* Global TLB entry */ | ||
166 | |||
167 | #define _PAGE_PROTNONE 0x080 /* If not present */ | ||
168 | #define _PAGE_NX (_AC(1,UL)<<_PAGE_BIT_NX) | ||
169 | |||
170 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
171 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
172 | |||
173 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
174 | |||
175 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) | ||
176 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | ||
177 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) | ||
178 | #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | ||
179 | #define PAGE_COPY PAGE_COPY_NOEXEC | ||
180 | #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | ||
181 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | ||
182 | #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | ||
183 | #define __PAGE_KERNEL \ | ||
184 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX) | ||
185 | #define __PAGE_KERNEL_EXEC \ | ||
186 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) | ||
187 | #define __PAGE_KERNEL_NOCACHE \ | ||
188 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX) | ||
189 | #define __PAGE_KERNEL_RO \ | ||
190 | (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX) | ||
191 | #define __PAGE_KERNEL_VSYSCALL \ | ||
192 | (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | ||
193 | #define __PAGE_KERNEL_VSYSCALL_NOCACHE \ | ||
194 | (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD) | ||
195 | #define __PAGE_KERNEL_LARGE \ | ||
196 | (__PAGE_KERNEL | _PAGE_PSE) | ||
197 | #define __PAGE_KERNEL_LARGE_EXEC \ | ||
198 | (__PAGE_KERNEL_EXEC | _PAGE_PSE) | ||
199 | |||
200 | #define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL) | ||
201 | |||
202 | #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL) | ||
203 | #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC) | ||
204 | #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO) | ||
205 | #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE) | ||
206 | #define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL) | ||
207 | #define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL) | ||
208 | #define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE) | ||
209 | #define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE) | ||
210 | |||
211 | /* xwr */ | ||
212 | #define __P000 PAGE_NONE | ||
213 | #define __P001 PAGE_READONLY | ||
214 | #define __P010 PAGE_COPY | ||
215 | #define __P011 PAGE_COPY | ||
216 | #define __P100 PAGE_READONLY_EXEC | ||
217 | #define __P101 PAGE_READONLY_EXEC | ||
218 | #define __P110 PAGE_COPY_EXEC | ||
219 | #define __P111 PAGE_COPY_EXEC | ||
220 | |||
221 | #define __S000 PAGE_NONE | ||
222 | #define __S001 PAGE_READONLY | ||
223 | #define __S010 PAGE_SHARED | ||
224 | #define __S011 PAGE_SHARED | ||
225 | #define __S100 PAGE_READONLY_EXEC | ||
226 | #define __S101 PAGE_READONLY_EXEC | ||
227 | #define __S110 PAGE_SHARED_EXEC | ||
228 | #define __S111 PAGE_SHARED_EXEC | ||
229 | |||
230 | #ifndef __ASSEMBLY__ | 147 | #ifndef __ASSEMBLY__ |
231 | 148 | ||
232 | static inline unsigned long pgd_bad(pgd_t pgd) | 149 | static inline unsigned long pgd_bad(pgd_t pgd) |
@@ -246,66 +163,16 @@ static inline unsigned long pmd_bad(pmd_t pmd) | |||
246 | 163 | ||
247 | #define pte_none(x) (!pte_val(x)) | 164 | #define pte_none(x) (!pte_val(x)) |
248 | #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) | 165 | #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) |
249 | #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) | ||
250 | 166 | ||
251 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this | 167 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */ |
252 | right? */ | ||
253 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 168 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
254 | #define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) | 169 | #define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) |
255 | 170 | ||
256 | static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) | ||
257 | { | ||
258 | pte_t pte; | ||
259 | pte_val(pte) = (page_nr << PAGE_SHIFT); | ||
260 | pte_val(pte) |= pgprot_val(pgprot); | ||
261 | pte_val(pte) &= __supported_pte_mask; | ||
262 | return pte; | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * The following only work if pte_present() is true. | ||
267 | * Undefined behaviour if not.. | ||
268 | */ | ||
269 | #define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT) | ||
270 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | ||
271 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | ||
272 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } | ||
273 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | ||
274 | static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; } | ||
275 | |||
276 | static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } | ||
277 | static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } | ||
278 | static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; } | ||
279 | static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_NX)); return pte; } | ||
280 | static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } | ||
281 | static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } | ||
282 | static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; } | ||
283 | static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_PSE)); return pte; } | ||
284 | static inline pte_t pte_clrhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_PSE)); return pte; } | ||
285 | |||
286 | struct vm_area_struct; | ||
287 | |||
288 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | ||
289 | { | ||
290 | if (!pte_young(*ptep)) | ||
291 | return 0; | ||
292 | return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte); | ||
293 | } | ||
294 | |||
295 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
296 | { | ||
297 | clear_bit(_PAGE_BIT_RW, &ptep->pte); | ||
298 | } | ||
299 | |||
300 | /* | 171 | /* |
301 | * Macro to mark a page protection value as "uncacheable". | 172 | * Macro to mark a page protection value as "uncacheable". |
302 | */ | 173 | */ |
303 | #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) | 174 | #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) |
304 | 175 | ||
305 | static inline int pmd_large(pmd_t pte) { | ||
306 | return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE; | ||
307 | } | ||
308 | |||
309 | 176 | ||
310 | /* | 177 | /* |
311 | * Conversion functions: convert a page and protection to a page entry, | 178 | * Conversion functions: convert a page and protection to a page entry, |
@@ -331,6 +198,12 @@ static inline int pmd_large(pmd_t pte) { | |||
331 | #define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address)) | 198 | #define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address)) |
332 | #define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT) | 199 | #define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT) |
333 | 200 | ||
201 | static inline int pud_large(pud_t pte) | ||
202 | { | ||
203 | return (pud_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) == | ||
204 | (_PAGE_PSE|_PAGE_PRESENT); | ||
205 | } | ||
206 | |||
334 | /* PMD - Level 2 access */ | 207 | /* PMD - Level 2 access */ |
335 | #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK)) | 208 | #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK)) |
336 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) | 209 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) |
@@ -340,29 +213,18 @@ static inline int pmd_large(pmd_t pte) { | |||
340 | pmd_index(address)) | 213 | pmd_index(address)) |
341 | #define pmd_none(x) (!pmd_val(x)) | 214 | #define pmd_none(x) (!pmd_val(x)) |
342 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) | 215 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) |
343 | #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) | ||
344 | #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) | 216 | #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) |
345 | #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) | 217 | #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) |
346 | 218 | ||
347 | #define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) | 219 | #define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) |
348 | #define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE }) | 220 | #define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | _PAGE_FILE }) |
349 | #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT | 221 | #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT |
350 | 222 | ||
351 | /* PTE - Level 1 access. */ | 223 | /* PTE - Level 1 access. */ |
352 | 224 | ||
353 | /* page, protection -> pte */ | 225 | /* page, protection -> pte */ |
354 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | 226 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) |
355 | #define mk_pte_huge(entry) (pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE) | ||
356 | 227 | ||
357 | /* Change flags of a PTE */ | ||
358 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
359 | { | ||
360 | pte_val(pte) &= _PAGE_CHG_MASK; | ||
361 | pte_val(pte) |= pgprot_val(newprot); | ||
362 | pte_val(pte) &= __supported_pte_mask; | ||
363 | return pte; | ||
364 | } | ||
365 | |||
366 | #define pte_index(address) \ | 228 | #define pte_index(address) \ |
367 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 229 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
368 | #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \ | 230 | #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \ |
@@ -376,40 +238,20 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
376 | 238 | ||
377 | #define update_mmu_cache(vma,address,pte) do { } while (0) | 239 | #define update_mmu_cache(vma,address,pte) do { } while (0) |
378 | 240 | ||
379 | /* We only update the dirty/accessed state if we set | ||
380 | * the dirty bit by hand in the kernel, since the hardware | ||
381 | * will do the accessed bit for us, and we don't want to | ||
382 | * race with other CPU's that might be updating the dirty | ||
383 | * bit at the same time. */ | ||
384 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
385 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ | ||
386 | ({ \ | ||
387 | int __changed = !pte_same(*(__ptep), __entry); \ | ||
388 | if (__changed && __dirty) { \ | ||
389 | set_pte(__ptep, __entry); \ | ||
390 | flush_tlb_page(__vma, __address); \ | ||
391 | } \ | ||
392 | __changed; \ | ||
393 | }) | ||
394 | |||
395 | /* Encode and de-code a swap entry */ | 241 | /* Encode and de-code a swap entry */ |
396 | #define __swp_type(x) (((x).val >> 1) & 0x3f) | 242 | #define __swp_type(x) (((x).val >> 1) & 0x3f) |
397 | #define __swp_offset(x) ((x).val >> 8) | 243 | #define __swp_offset(x) ((x).val >> 8) |
398 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) | 244 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) |
399 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | 245 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
400 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | 246 | #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) |
401 | |||
402 | extern spinlock_t pgd_lock; | ||
403 | extern struct list_head pgd_list; | ||
404 | 247 | ||
405 | extern int kern_addr_valid(unsigned long addr); | 248 | extern int kern_addr_valid(unsigned long addr); |
406 | 249 | ||
407 | pte_t *lookup_address(unsigned long addr); | ||
408 | |||
409 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | 250 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
410 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 251 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
411 | 252 | ||
412 | #define HAVE_ARCH_UNMAPPED_AREA | 253 | #define HAVE_ARCH_UNMAPPED_AREA |
254 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | ||
413 | 255 | ||
414 | #define pgtable_cache_init() do { } while (0) | 256 | #define pgtable_cache_init() do { } while (0) |
415 | #define check_pgt_cache() do { } while (0) | 257 | #define check_pgt_cache() do { } while (0) |
@@ -422,12 +264,7 @@ pte_t *lookup_address(unsigned long addr); | |||
422 | #define kc_offset_to_vaddr(o) \ | 264 | #define kc_offset_to_vaddr(o) \ |
423 | (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) | 265 | (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) |
424 | 266 | ||
425 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
426 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
427 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | ||
428 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
429 | #define __HAVE_ARCH_PTE_SAME | 267 | #define __HAVE_ARCH_PTE_SAME |
430 | #include <asm-generic/pgtable.h> | ||
431 | #endif /* !__ASSEMBLY__ */ | 268 | #endif /* !__ASSEMBLY__ */ |
432 | 269 | ||
433 | #endif /* _X86_64_PGTABLE_H */ | 270 | #endif /* _X86_64_PGTABLE_H */ |
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 46e1c04e309c..ab4d0c2a3f8f 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h | |||
@@ -1,5 +1,842 @@ | |||
1 | #ifndef __ASM_X86_PROCESSOR_H | ||
2 | #define __ASM_X86_PROCESSOR_H | ||
3 | |||
4 | #include <asm/processor-flags.h> | ||
5 | |||
6 | /* migration helpers, for KVM - will be removed in 2.6.25: */ | ||
7 | #include <asm/vm86.h> | ||
8 | #define Xgt_desc_struct desc_ptr | ||
9 | |||
10 | /* Forward declaration, a strange C thing */ | ||
11 | struct task_struct; | ||
12 | struct mm_struct; | ||
13 | |||
14 | #include <asm/vm86.h> | ||
15 | #include <asm/math_emu.h> | ||
16 | #include <asm/segment.h> | ||
17 | #include <asm/types.h> | ||
18 | #include <asm/sigcontext.h> | ||
19 | #include <asm/current.h> | ||
20 | #include <asm/cpufeature.h> | ||
21 | #include <asm/system.h> | ||
22 | #include <asm/page.h> | ||
23 | #include <asm/percpu.h> | ||
24 | #include <asm/msr.h> | ||
25 | #include <asm/desc_defs.h> | ||
26 | #include <asm/nops.h> | ||
27 | #include <linux/personality.h> | ||
28 | #include <linux/cpumask.h> | ||
29 | #include <linux/cache.h> | ||
30 | #include <linux/threads.h> | ||
31 | #include <linux/init.h> | ||
32 | |||
33 | /* | ||
34 | * Default implementation of macro that returns current | ||
35 | * instruction pointer ("program counter"). | ||
36 | */ | ||
37 | static inline void *current_text_addr(void) | ||
38 | { | ||
39 | void *pc; | ||
40 | asm volatile("mov $1f,%0\n1:":"=r" (pc)); | ||
41 | return pc; | ||
42 | } | ||
43 | |||
44 | #ifdef CONFIG_X86_VSMP | ||
45 | #define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) | ||
46 | #define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) | ||
47 | #else | ||
48 | #define ARCH_MIN_TASKALIGN 16 | ||
49 | #define ARCH_MIN_MMSTRUCT_ALIGN 0 | ||
50 | #endif | ||
51 | |||
52 | /* | ||
53 | * CPU type and hardware bug flags. Kept separately for each CPU. | ||
54 | * Members of this structure are referenced in head.S, so think twice | ||
55 | * before touching them. [mj] | ||
56 | */ | ||
57 | |||
58 | struct cpuinfo_x86 { | ||
59 | __u8 x86; /* CPU family */ | ||
60 | __u8 x86_vendor; /* CPU vendor */ | ||
61 | __u8 x86_model; | ||
62 | __u8 x86_mask; | ||
63 | #ifdef CONFIG_X86_32 | ||
64 | char wp_works_ok; /* It doesn't on 386's */ | ||
65 | char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */ | ||
66 | char hard_math; | ||
67 | char rfu; | ||
68 | char fdiv_bug; | ||
69 | char f00f_bug; | ||
70 | char coma_bug; | ||
71 | char pad0; | ||
72 | #else | ||
73 | /* number of 4K pages in DTLB/ITLB combined(in pages)*/ | ||
74 | int x86_tlbsize; | ||
75 | __u8 x86_virt_bits, x86_phys_bits; | ||
76 | /* cpuid returned core id bits */ | ||
77 | __u8 x86_coreid_bits; | ||
78 | /* Max extended CPUID function supported */ | ||
79 | __u32 extended_cpuid_level; | ||
80 | #endif | ||
81 | int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ | ||
82 | __u32 x86_capability[NCAPINTS]; | ||
83 | char x86_vendor_id[16]; | ||
84 | char x86_model_id[64]; | ||
85 | int x86_cache_size; /* in KB - valid for CPUS which support this | ||
86 | call */ | ||
87 | int x86_cache_alignment; /* In bytes */ | ||
88 | int x86_power; | ||
89 | unsigned long loops_per_jiffy; | ||
90 | #ifdef CONFIG_SMP | ||
91 | cpumask_t llc_shared_map; /* cpus sharing the last level cache */ | ||
92 | #endif | ||
93 | u16 x86_max_cores; /* cpuid returned max cores value */ | ||
94 | u16 apicid; | ||
95 | u16 x86_clflush_size; | ||
96 | #ifdef CONFIG_SMP | ||
97 | u16 booted_cores; /* number of cores as seen by OS */ | ||
98 | u16 phys_proc_id; /* Physical processor id. */ | ||
99 | u16 cpu_core_id; /* Core id */ | ||
100 | u16 cpu_index; /* index into per_cpu list */ | ||
101 | #endif | ||
102 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); | ||
103 | |||
104 | #define X86_VENDOR_INTEL 0 | ||
105 | #define X86_VENDOR_CYRIX 1 | ||
106 | #define X86_VENDOR_AMD 2 | ||
107 | #define X86_VENDOR_UMC 3 | ||
108 | #define X86_VENDOR_NEXGEN 4 | ||
109 | #define X86_VENDOR_CENTAUR 5 | ||
110 | #define X86_VENDOR_TRANSMETA 7 | ||
111 | #define X86_VENDOR_NSC 8 | ||
112 | #define X86_VENDOR_NUM 9 | ||
113 | #define X86_VENDOR_UNKNOWN 0xff | ||
114 | |||
115 | /* | ||
116 | * capabilities of CPUs | ||
117 | */ | ||
118 | extern struct cpuinfo_x86 boot_cpu_data; | ||
119 | extern struct cpuinfo_x86 new_cpu_data; | ||
120 | extern struct tss_struct doublefault_tss; | ||
121 | extern __u32 cleared_cpu_caps[NCAPINTS]; | ||
122 | |||
123 | #ifdef CONFIG_SMP | ||
124 | DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); | ||
125 | #define cpu_data(cpu) per_cpu(cpu_info, cpu) | ||
126 | #define current_cpu_data cpu_data(smp_processor_id()) | ||
127 | #else | ||
128 | #define cpu_data(cpu) boot_cpu_data | ||
129 | #define current_cpu_data boot_cpu_data | ||
130 | #endif | ||
131 | |||
132 | void cpu_detect(struct cpuinfo_x86 *c); | ||
133 | |||
134 | extern void identify_cpu(struct cpuinfo_x86 *); | ||
135 | extern void identify_boot_cpu(void); | ||
136 | extern void identify_secondary_cpu(struct cpuinfo_x86 *); | ||
137 | extern void print_cpu_info(struct cpuinfo_x86 *); | ||
138 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); | ||
139 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | ||
140 | extern unsigned short num_cache_leaves; | ||
141 | |||
142 | #if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64) | ||
143 | extern void detect_ht(struct cpuinfo_x86 *c); | ||
144 | #else | ||
145 | static inline void detect_ht(struct cpuinfo_x86 *c) {} | ||
146 | #endif | ||
147 | |||
148 | static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, | ||
149 | unsigned int *ecx, unsigned int *edx) | ||
150 | { | ||
151 | /* ecx is often an input as well as an output. */ | ||
152 | __asm__("cpuid" | ||
153 | : "=a" (*eax), | ||
154 | "=b" (*ebx), | ||
155 | "=c" (*ecx), | ||
156 | "=d" (*edx) | ||
157 | : "0" (*eax), "2" (*ecx)); | ||
158 | } | ||
159 | |||
160 | static inline void load_cr3(pgd_t *pgdir) | ||
161 | { | ||
162 | write_cr3(__pa(pgdir)); | ||
163 | } | ||
164 | |||
165 | #ifdef CONFIG_X86_32 | ||
166 | /* This is the TSS defined by the hardware. */ | ||
167 | struct x86_hw_tss { | ||
168 | unsigned short back_link, __blh; | ||
169 | unsigned long sp0; | ||
170 | unsigned short ss0, __ss0h; | ||
171 | unsigned long sp1; | ||
172 | unsigned short ss1, __ss1h; /* ss1 caches MSR_IA32_SYSENTER_CS */ | ||
173 | unsigned long sp2; | ||
174 | unsigned short ss2, __ss2h; | ||
175 | unsigned long __cr3; | ||
176 | unsigned long ip; | ||
177 | unsigned long flags; | ||
178 | unsigned long ax, cx, dx, bx; | ||
179 | unsigned long sp, bp, si, di; | ||
180 | unsigned short es, __esh; | ||
181 | unsigned short cs, __csh; | ||
182 | unsigned short ss, __ssh; | ||
183 | unsigned short ds, __dsh; | ||
184 | unsigned short fs, __fsh; | ||
185 | unsigned short gs, __gsh; | ||
186 | unsigned short ldt, __ldth; | ||
187 | unsigned short trace, io_bitmap_base; | ||
188 | } __attribute__((packed)); | ||
189 | #else | ||
190 | struct x86_hw_tss { | ||
191 | u32 reserved1; | ||
192 | u64 sp0; | ||
193 | u64 sp1; | ||
194 | u64 sp2; | ||
195 | u64 reserved2; | ||
196 | u64 ist[7]; | ||
197 | u32 reserved3; | ||
198 | u32 reserved4; | ||
199 | u16 reserved5; | ||
200 | u16 io_bitmap_base; | ||
201 | } __attribute__((packed)) ____cacheline_aligned; | ||
202 | #endif | ||
203 | |||
204 | /* | ||
205 | * Size of io_bitmap. | ||
206 | */ | ||
207 | #define IO_BITMAP_BITS 65536 | ||
208 | #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) | ||
209 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) | ||
210 | #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) | ||
211 | #define INVALID_IO_BITMAP_OFFSET 0x8000 | ||
212 | #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 | ||
213 | |||
214 | struct tss_struct { | ||
215 | struct x86_hw_tss x86_tss; | ||
216 | |||
217 | /* | ||
218 | * The extra 1 is there because the CPU will access an | ||
219 | * additional byte beyond the end of the IO permission | ||
220 | * bitmap. The extra byte must be all 1 bits, and must | ||
221 | * be within the limit. | ||
222 | */ | ||
223 | unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; | ||
224 | /* | ||
225 | * Cache the current maximum and the last task that used the bitmap: | ||
226 | */ | ||
227 | unsigned long io_bitmap_max; | ||
228 | struct thread_struct *io_bitmap_owner; | ||
229 | /* | ||
230 | * pads the TSS to be cacheline-aligned (size is 0x100) | ||
231 | */ | ||
232 | unsigned long __cacheline_filler[35]; | ||
233 | /* | ||
234 | * .. and then another 0x100 bytes for emergency kernel stack | ||
235 | */ | ||
236 | unsigned long stack[64]; | ||
237 | } __attribute__((packed)); | ||
238 | |||
239 | DECLARE_PER_CPU(struct tss_struct, init_tss); | ||
240 | |||
241 | /* Save the original ist values for checking stack pointers during debugging */ | ||
242 | struct orig_ist { | ||
243 | unsigned long ist[7]; | ||
244 | }; | ||
245 | |||
246 | #define MXCSR_DEFAULT 0x1f80 | ||
247 | |||
248 | struct i387_fsave_struct { | ||
249 | u32 cwd; | ||
250 | u32 swd; | ||
251 | u32 twd; | ||
252 | u32 fip; | ||
253 | u32 fcs; | ||
254 | u32 foo; | ||
255 | u32 fos; | ||
256 | u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ | ||
257 | u32 status; /* software status information */ | ||
258 | }; | ||
259 | |||
260 | struct i387_fxsave_struct { | ||
261 | u16 cwd; | ||
262 | u16 swd; | ||
263 | u16 twd; | ||
264 | u16 fop; | ||
265 | union { | ||
266 | struct { | ||
267 | u64 rip; | ||
268 | u64 rdp; | ||
269 | }; | ||
270 | struct { | ||
271 | u32 fip; | ||
272 | u32 fcs; | ||
273 | u32 foo; | ||
274 | u32 fos; | ||
275 | }; | ||
276 | }; | ||
277 | u32 mxcsr; | ||
278 | u32 mxcsr_mask; | ||
279 | u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ | ||
280 | u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */ | ||
281 | u32 padding[24]; | ||
282 | } __attribute__((aligned(16))); | ||
283 | |||
284 | struct i387_soft_struct { | ||
285 | u32 cwd; | ||
286 | u32 swd; | ||
287 | u32 twd; | ||
288 | u32 fip; | ||
289 | u32 fcs; | ||
290 | u32 foo; | ||
291 | u32 fos; | ||
292 | u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ | ||
293 | u8 ftop, changed, lookahead, no_update, rm, alimit; | ||
294 | struct info *info; | ||
295 | u32 entry_eip; | ||
296 | }; | ||
297 | |||
298 | union i387_union { | ||
299 | struct i387_fsave_struct fsave; | ||
300 | struct i387_fxsave_struct fxsave; | ||
301 | struct i387_soft_struct soft; | ||
302 | }; | ||
303 | |||
304 | #ifdef CONFIG_X86_32 | ||
305 | /* | ||
306 | * the following now lives in the per cpu area: | ||
307 | * extern int cpu_llc_id[NR_CPUS]; | ||
308 | */ | ||
309 | DECLARE_PER_CPU(u8, cpu_llc_id); | ||
310 | #else | ||
311 | DECLARE_PER_CPU(struct orig_ist, orig_ist); | ||
312 | #endif | ||
313 | |||
314 | extern void print_cpu_info(struct cpuinfo_x86 *); | ||
315 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); | ||
316 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | ||
317 | extern unsigned short num_cache_leaves; | ||
318 | |||
319 | struct thread_struct { | ||
320 | /* cached TLS descriptors. */ | ||
321 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | ||
322 | unsigned long sp0; | ||
323 | unsigned long sp; | ||
324 | #ifdef CONFIG_X86_32 | ||
325 | unsigned long sysenter_cs; | ||
326 | #else | ||
327 | unsigned long usersp; /* Copy from PDA */ | ||
328 | unsigned short es, ds, fsindex, gsindex; | ||
329 | #endif | ||
330 | unsigned long ip; | ||
331 | unsigned long fs; | ||
332 | unsigned long gs; | ||
333 | /* Hardware debugging registers */ | ||
334 | unsigned long debugreg0; | ||
335 | unsigned long debugreg1; | ||
336 | unsigned long debugreg2; | ||
337 | unsigned long debugreg3; | ||
338 | unsigned long debugreg6; | ||
339 | unsigned long debugreg7; | ||
340 | /* fault info */ | ||
341 | unsigned long cr2, trap_no, error_code; | ||
342 | /* floating point info */ | ||
343 | union i387_union i387 __attribute__((aligned(16)));; | ||
344 | #ifdef CONFIG_X86_32 | ||
345 | /* virtual 86 mode info */ | ||
346 | struct vm86_struct __user *vm86_info; | ||
347 | unsigned long screen_bitmap; | ||
348 | unsigned long v86flags, v86mask, saved_sp0; | ||
349 | unsigned int saved_fs, saved_gs; | ||
350 | #endif | ||
351 | /* IO permissions */ | ||
352 | unsigned long *io_bitmap_ptr; | ||
353 | unsigned long iopl; | ||
354 | /* max allowed port in the bitmap, in bytes: */ | ||
355 | unsigned io_bitmap_max; | ||
356 | /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ | ||
357 | unsigned long debugctlmsr; | ||
358 | /* Debug Store - if not 0 points to a DS Save Area configuration; | ||
359 | * goes into MSR_IA32_DS_AREA */ | ||
360 | unsigned long ds_area_msr; | ||
361 | }; | ||
362 | |||
363 | static inline unsigned long native_get_debugreg(int regno) | ||
364 | { | ||
365 | unsigned long val = 0; /* Damn you, gcc! */ | ||
366 | |||
367 | switch (regno) { | ||
368 | case 0: | ||
369 | asm("mov %%db0, %0" :"=r" (val)); break; | ||
370 | case 1: | ||
371 | asm("mov %%db1, %0" :"=r" (val)); break; | ||
372 | case 2: | ||
373 | asm("mov %%db2, %0" :"=r" (val)); break; | ||
374 | case 3: | ||
375 | asm("mov %%db3, %0" :"=r" (val)); break; | ||
376 | case 6: | ||
377 | asm("mov %%db6, %0" :"=r" (val)); break; | ||
378 | case 7: | ||
379 | asm("mov %%db7, %0" :"=r" (val)); break; | ||
380 | default: | ||
381 | BUG(); | ||
382 | } | ||
383 | return val; | ||
384 | } | ||
385 | |||
386 | static inline void native_set_debugreg(int regno, unsigned long value) | ||
387 | { | ||
388 | switch (regno) { | ||
389 | case 0: | ||
390 | asm("mov %0,%%db0" : /* no output */ :"r" (value)); | ||
391 | break; | ||
392 | case 1: | ||
393 | asm("mov %0,%%db1" : /* no output */ :"r" (value)); | ||
394 | break; | ||
395 | case 2: | ||
396 | asm("mov %0,%%db2" : /* no output */ :"r" (value)); | ||
397 | break; | ||
398 | case 3: | ||
399 | asm("mov %0,%%db3" : /* no output */ :"r" (value)); | ||
400 | break; | ||
401 | case 6: | ||
402 | asm("mov %0,%%db6" : /* no output */ :"r" (value)); | ||
403 | break; | ||
404 | case 7: | ||
405 | asm("mov %0,%%db7" : /* no output */ :"r" (value)); | ||
406 | break; | ||
407 | default: | ||
408 | BUG(); | ||
409 | } | ||
410 | } | ||
411 | |||
412 | /* | ||
413 | * Set IOPL bits in EFLAGS from given mask | ||
414 | */ | ||
415 | static inline void native_set_iopl_mask(unsigned mask) | ||
416 | { | ||
417 | #ifdef CONFIG_X86_32 | ||
418 | unsigned int reg; | ||
419 | __asm__ __volatile__ ("pushfl;" | ||
420 | "popl %0;" | ||
421 | "andl %1, %0;" | ||
422 | "orl %2, %0;" | ||
423 | "pushl %0;" | ||
424 | "popfl" | ||
425 | : "=&r" (reg) | ||
426 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); | ||
427 | #endif | ||
428 | } | ||
429 | |||
430 | static inline void native_load_sp0(struct tss_struct *tss, | ||
431 | struct thread_struct *thread) | ||
432 | { | ||
433 | tss->x86_tss.sp0 = thread->sp0; | ||
434 | #ifdef CONFIG_X86_32 | ||
435 | /* Only happens when SEP is enabled, no need to test "SEP"arately */ | ||
436 | if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { | ||
437 | tss->x86_tss.ss1 = thread->sysenter_cs; | ||
438 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | ||
439 | } | ||
440 | #endif | ||
441 | } | ||
442 | |||
443 | static inline void native_swapgs(void) | ||
444 | { | ||
445 | #ifdef CONFIG_X86_64 | ||
446 | asm volatile("swapgs" ::: "memory"); | ||
447 | #endif | ||
448 | } | ||
449 | |||
450 | #ifdef CONFIG_PARAVIRT | ||
451 | #include <asm/paravirt.h> | ||
452 | #else | ||
453 | #define __cpuid native_cpuid | ||
454 | #define paravirt_enabled() 0 | ||
455 | |||
456 | /* | ||
457 | * These special macros can be used to get or set a debugging register | ||
458 | */ | ||
459 | #define get_debugreg(var, register) \ | ||
460 | (var) = native_get_debugreg(register) | ||
461 | #define set_debugreg(value, register) \ | ||
462 | native_set_debugreg(register, value) | ||
463 | |||
464 | static inline void load_sp0(struct tss_struct *tss, | ||
465 | struct thread_struct *thread) | ||
466 | { | ||
467 | native_load_sp0(tss, thread); | ||
468 | } | ||
469 | |||
470 | #define set_iopl_mask native_set_iopl_mask | ||
471 | #define SWAPGS swapgs | ||
472 | #endif /* CONFIG_PARAVIRT */ | ||
473 | |||
474 | /* | ||
475 | * Save the cr4 feature set we're using (ie | ||
476 | * Pentium 4MB enable and PPro Global page | ||
477 | * enable), so that any CPU's that boot up | ||
478 | * after us can get the correct flags. | ||
479 | */ | ||
480 | extern unsigned long mmu_cr4_features; | ||
481 | |||
482 | static inline void set_in_cr4(unsigned long mask) | ||
483 | { | ||
484 | unsigned cr4; | ||
485 | mmu_cr4_features |= mask; | ||
486 | cr4 = read_cr4(); | ||
487 | cr4 |= mask; | ||
488 | write_cr4(cr4); | ||
489 | } | ||
490 | |||
491 | static inline void clear_in_cr4(unsigned long mask) | ||
492 | { | ||
493 | unsigned cr4; | ||
494 | mmu_cr4_features &= ~mask; | ||
495 | cr4 = read_cr4(); | ||
496 | cr4 &= ~mask; | ||
497 | write_cr4(cr4); | ||
498 | } | ||
499 | |||
500 | struct microcode_header { | ||
501 | unsigned int hdrver; | ||
502 | unsigned int rev; | ||
503 | unsigned int date; | ||
504 | unsigned int sig; | ||
505 | unsigned int cksum; | ||
506 | unsigned int ldrver; | ||
507 | unsigned int pf; | ||
508 | unsigned int datasize; | ||
509 | unsigned int totalsize; | ||
510 | unsigned int reserved[3]; | ||
511 | }; | ||
512 | |||
513 | struct microcode { | ||
514 | struct microcode_header hdr; | ||
515 | unsigned int bits[0]; | ||
516 | }; | ||
517 | |||
518 | typedef struct microcode microcode_t; | ||
519 | typedef struct microcode_header microcode_header_t; | ||
520 | |||
521 | /* microcode format is extended from prescott processors */ | ||
522 | struct extended_signature { | ||
523 | unsigned int sig; | ||
524 | unsigned int pf; | ||
525 | unsigned int cksum; | ||
526 | }; | ||
527 | |||
528 | struct extended_sigtable { | ||
529 | unsigned int count; | ||
530 | unsigned int cksum; | ||
531 | unsigned int reserved[3]; | ||
532 | struct extended_signature sigs[0]; | ||
533 | }; | ||
534 | |||
535 | typedef struct { | ||
536 | unsigned long seg; | ||
537 | } mm_segment_t; | ||
538 | |||
539 | |||
540 | /* | ||
541 | * create a kernel thread without removing it from tasklists | ||
542 | */ | ||
543 | extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | ||
544 | |||
545 | /* Free all resources held by a thread. */ | ||
546 | extern void release_thread(struct task_struct *); | ||
547 | |||
548 | /* Prepare to copy thread state - unlazy all lazy status */ | ||
549 | extern void prepare_to_copy(struct task_struct *tsk); | ||
550 | |||
551 | unsigned long get_wchan(struct task_struct *p); | ||
552 | |||
553 | /* | ||
554 | * Generic CPUID function | ||
555 | * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx | ||
556 | * resulting in stale register contents being returned. | ||
557 | */ | ||
558 | static inline void cpuid(unsigned int op, | ||
559 | unsigned int *eax, unsigned int *ebx, | ||
560 | unsigned int *ecx, unsigned int *edx) | ||
561 | { | ||
562 | *eax = op; | ||
563 | *ecx = 0; | ||
564 | __cpuid(eax, ebx, ecx, edx); | ||
565 | } | ||
566 | |||
567 | /* Some CPUID calls want 'count' to be placed in ecx */ | ||
568 | static inline void cpuid_count(unsigned int op, int count, | ||
569 | unsigned int *eax, unsigned int *ebx, | ||
570 | unsigned int *ecx, unsigned int *edx) | ||
571 | { | ||
572 | *eax = op; | ||
573 | *ecx = count; | ||
574 | __cpuid(eax, ebx, ecx, edx); | ||
575 | } | ||
576 | |||
577 | /* | ||
578 | * CPUID functions returning a single datum | ||
579 | */ | ||
580 | static inline unsigned int cpuid_eax(unsigned int op) | ||
581 | { | ||
582 | unsigned int eax, ebx, ecx, edx; | ||
583 | |||
584 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
585 | return eax; | ||
586 | } | ||
587 | static inline unsigned int cpuid_ebx(unsigned int op) | ||
588 | { | ||
589 | unsigned int eax, ebx, ecx, edx; | ||
590 | |||
591 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
592 | return ebx; | ||
593 | } | ||
594 | static inline unsigned int cpuid_ecx(unsigned int op) | ||
595 | { | ||
596 | unsigned int eax, ebx, ecx, edx; | ||
597 | |||
598 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
599 | return ecx; | ||
600 | } | ||
601 | static inline unsigned int cpuid_edx(unsigned int op) | ||
602 | { | ||
603 | unsigned int eax, ebx, ecx, edx; | ||
604 | |||
605 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
606 | return edx; | ||
607 | } | ||
608 | |||
609 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | ||
610 | static inline void rep_nop(void) | ||
611 | { | ||
612 | __asm__ __volatile__("rep;nop": : :"memory"); | ||
613 | } | ||
614 | |||
615 | /* Stop speculative execution */ | ||
616 | static inline void sync_core(void) | ||
617 | { | ||
618 | int tmp; | ||
619 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) | ||
620 | : "ebx", "ecx", "edx", "memory"); | ||
621 | } | ||
622 | |||
623 | #define cpu_relax() rep_nop() | ||
624 | |||
625 | static inline void __monitor(const void *eax, unsigned long ecx, | ||
626 | unsigned long edx) | ||
627 | { | ||
628 | /* "monitor %eax,%ecx,%edx;" */ | ||
629 | asm volatile( | ||
630 | ".byte 0x0f,0x01,0xc8;" | ||
631 | : :"a" (eax), "c" (ecx), "d"(edx)); | ||
632 | } | ||
633 | |||
634 | static inline void __mwait(unsigned long eax, unsigned long ecx) | ||
635 | { | ||
636 | /* "mwait %eax,%ecx;" */ | ||
637 | asm volatile( | ||
638 | ".byte 0x0f,0x01,0xc9;" | ||
639 | : :"a" (eax), "c" (ecx)); | ||
640 | } | ||
641 | |||
642 | static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | ||
643 | { | ||
644 | /* "mwait %eax,%ecx;" */ | ||
645 | asm volatile( | ||
646 | "sti; .byte 0x0f,0x01,0xc9;" | ||
647 | : :"a" (eax), "c" (ecx)); | ||
648 | } | ||
649 | |||
650 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); | ||
651 | |||
652 | extern int force_mwait; | ||
653 | |||
654 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | ||
655 | |||
656 | extern unsigned long boot_option_idle_override; | ||
657 | |||
658 | extern void enable_sep_cpu(void); | ||
659 | extern int sysenter_setup(void); | ||
660 | |||
661 | /* Defined in head.S */ | ||
662 | extern struct desc_ptr early_gdt_descr; | ||
663 | |||
664 | extern void cpu_set_gdt(int); | ||
665 | extern void switch_to_new_gdt(void); | ||
666 | extern void cpu_init(void); | ||
667 | extern void init_gdt(int cpu); | ||
668 | |||
669 | /* from system description table in BIOS. Mostly for MCA use, but | ||
670 | * others may find it useful. */ | ||
671 | extern unsigned int machine_id; | ||
672 | extern unsigned int machine_submodel_id; | ||
673 | extern unsigned int BIOS_revision; | ||
674 | extern unsigned int mca_pentium_flag; | ||
675 | |||
676 | /* Boot loader type from the setup header */ | ||
677 | extern int bootloader_type; | ||
678 | |||
679 | extern char ignore_fpu_irq; | ||
680 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) | ||
681 | |||
682 | #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 | ||
683 | #define ARCH_HAS_PREFETCHW | ||
684 | #define ARCH_HAS_SPINLOCK_PREFETCH | ||
685 | |||
686 | #ifdef CONFIG_X86_32 | ||
687 | #define BASE_PREFETCH ASM_NOP4 | ||
688 | #define ARCH_HAS_PREFETCH | ||
689 | #else | ||
690 | #define BASE_PREFETCH "prefetcht0 (%1)" | ||
691 | #endif | ||
692 | |||
693 | /* Prefetch instructions for Pentium III and AMD Athlon */ | ||
694 | /* It's not worth to care about 3dnow! prefetches for the K6 | ||
695 | because they are microcoded there and very slow. | ||
696 | However we don't do prefetches for pre XP Athlons currently | ||
697 | That should be fixed. */ | ||
698 | static inline void prefetch(const void *x) | ||
699 | { | ||
700 | alternative_input(BASE_PREFETCH, | ||
701 | "prefetchnta (%1)", | ||
702 | X86_FEATURE_XMM, | ||
703 | "r" (x)); | ||
704 | } | ||
705 | |||
706 | /* 3dnow! prefetch to get an exclusive cache line. Useful for | ||
707 | spinlocks to avoid one state transition in the cache coherency protocol. */ | ||
708 | static inline void prefetchw(const void *x) | ||
709 | { | ||
710 | alternative_input(BASE_PREFETCH, | ||
711 | "prefetchw (%1)", | ||
712 | X86_FEATURE_3DNOW, | ||
713 | "r" (x)); | ||
714 | } | ||
715 | |||
716 | #define spin_lock_prefetch(x) prefetchw(x) | ||
1 | #ifdef CONFIG_X86_32 | 717 | #ifdef CONFIG_X86_32 |
2 | # include "processor_32.h" | 718 | /* |
719 | * User space process size: 3GB (default). | ||
720 | */ | ||
721 | #define TASK_SIZE (PAGE_OFFSET) | ||
722 | |||
723 | #define INIT_THREAD { \ | ||
724 | .sp0 = sizeof(init_stack) + (long)&init_stack, \ | ||
725 | .vm86_info = NULL, \ | ||
726 | .sysenter_cs = __KERNEL_CS, \ | ||
727 | .io_bitmap_ptr = NULL, \ | ||
728 | .fs = __KERNEL_PERCPU, \ | ||
729 | } | ||
730 | |||
731 | /* | ||
732 | * Note that the .io_bitmap member must be extra-big. This is because | ||
733 | * the CPU will access an additional byte beyond the end of the IO | ||
734 | * permission bitmap. The extra byte must be all 1 bits, and must | ||
735 | * be within the limit. | ||
736 | */ | ||
737 | #define INIT_TSS { \ | ||
738 | .x86_tss = { \ | ||
739 | .sp0 = sizeof(init_stack) + (long)&init_stack, \ | ||
740 | .ss0 = __KERNEL_DS, \ | ||
741 | .ss1 = __KERNEL_CS, \ | ||
742 | .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ | ||
743 | }, \ | ||
744 | .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ | ||
745 | } | ||
746 | |||
747 | #define start_thread(regs, new_eip, new_esp) do { \ | ||
748 | __asm__("movl %0,%%gs": :"r" (0)); \ | ||
749 | regs->fs = 0; \ | ||
750 | set_fs(USER_DS); \ | ||
751 | regs->ds = __USER_DS; \ | ||
752 | regs->es = __USER_DS; \ | ||
753 | regs->ss = __USER_DS; \ | ||
754 | regs->cs = __USER_CS; \ | ||
755 | regs->ip = new_eip; \ | ||
756 | regs->sp = new_esp; \ | ||
757 | } while (0) | ||
758 | |||
759 | |||
760 | extern unsigned long thread_saved_pc(struct task_struct *tsk); | ||
761 | |||
762 | #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) | ||
763 | #define KSTK_TOP(info) \ | ||
764 | ({ \ | ||
765 | unsigned long *__ptr = (unsigned long *)(info); \ | ||
766 | (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ | ||
767 | }) | ||
768 | |||
769 | /* | ||
770 | * The below -8 is to reserve 8 bytes on top of the ring0 stack. | ||
771 | * This is necessary to guarantee that the entire "struct pt_regs" | ||
772 | * is accessable even if the CPU haven't stored the SS/ESP registers | ||
773 | * on the stack (interrupt gate does not save these registers | ||
774 | * when switching to the same priv ring). | ||
775 | * Therefore beware: accessing the ss/esp fields of the | ||
776 | * "struct pt_regs" is possible, but they may contain the | ||
777 | * completely wrong values. | ||
778 | */ | ||
779 | #define task_pt_regs(task) \ | ||
780 | ({ \ | ||
781 | struct pt_regs *__regs__; \ | ||
782 | __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ | ||
783 | __regs__ - 1; \ | ||
784 | }) | ||
785 | |||
786 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) | ||
787 | |||
3 | #else | 788 | #else |
4 | # include "processor_64.h" | 789 | /* |
790 | * User space process size. 47bits minus one guard page. | ||
791 | */ | ||
792 | #define TASK_SIZE64 (0x800000000000UL - 4096) | ||
793 | |||
794 | /* This decides where the kernel will search for a free chunk of vm | ||
795 | * space during mmap's. | ||
796 | */ | ||
797 | #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ | ||
798 | 0xc0000000 : 0xFFFFe000) | ||
799 | |||
800 | #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ | ||
801 | IA32_PAGE_OFFSET : TASK_SIZE64) | ||
802 | #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ | ||
803 | IA32_PAGE_OFFSET : TASK_SIZE64) | ||
804 | |||
805 | #define INIT_THREAD { \ | ||
806 | .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | ||
807 | } | ||
808 | |||
809 | #define INIT_TSS { \ | ||
810 | .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | ||
811 | } | ||
812 | |||
813 | #define start_thread(regs, new_rip, new_rsp) do { \ | ||
814 | asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \ | ||
815 | load_gs_index(0); \ | ||
816 | (regs)->ip = (new_rip); \ | ||
817 | (regs)->sp = (new_rsp); \ | ||
818 | write_pda(oldrsp, (new_rsp)); \ | ||
819 | (regs)->cs = __USER_CS; \ | ||
820 | (regs)->ss = __USER_DS; \ | ||
821 | (regs)->flags = 0x200; \ | ||
822 | set_fs(USER_DS); \ | ||
823 | } while (0) | ||
824 | |||
825 | /* | ||
826 | * Return saved PC of a blocked thread. | ||
827 | * What is this good for? it will be always the scheduler or ret_from_fork. | ||
828 | */ | ||
829 | #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) | ||
830 | |||
831 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) | ||
832 | #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ | ||
833 | #endif /* CONFIG_X86_64 */ | ||
834 | |||
835 | /* This decides where the kernel will search for a free chunk of vm | ||
836 | * space during mmap's. | ||
837 | */ | ||
838 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) | ||
839 | |||
840 | #define KSTK_EIP(task) (task_pt_regs(task)->ip) | ||
841 | |||
5 | #endif | 842 | #endif |
diff --git a/include/asm-x86/processor_32.h b/include/asm-x86/processor_32.h deleted file mode 100644 index 13976b086837..000000000000 --- a/include/asm-x86/processor_32.h +++ /dev/null | |||
@@ -1,786 +0,0 @@ | |||
1 | /* | ||
2 | * include/asm-i386/processor.h | ||
3 | * | ||
4 | * Copyright (C) 1994 Linus Torvalds | ||
5 | */ | ||
6 | |||
7 | #ifndef __ASM_I386_PROCESSOR_H | ||
8 | #define __ASM_I386_PROCESSOR_H | ||
9 | |||
10 | #include <asm/vm86.h> | ||
11 | #include <asm/math_emu.h> | ||
12 | #include <asm/segment.h> | ||
13 | #include <asm/page.h> | ||
14 | #include <asm/types.h> | ||
15 | #include <asm/sigcontext.h> | ||
16 | #include <asm/cpufeature.h> | ||
17 | #include <asm/msr.h> | ||
18 | #include <asm/system.h> | ||
19 | #include <linux/cache.h> | ||
20 | #include <linux/threads.h> | ||
21 | #include <asm/percpu.h> | ||
22 | #include <linux/cpumask.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <asm/processor-flags.h> | ||
25 | |||
26 | /* flag for disabling the tsc */ | ||
27 | extern int tsc_disable; | ||
28 | |||
29 | struct desc_struct { | ||
30 | unsigned long a,b; | ||
31 | }; | ||
32 | |||
33 | #define desc_empty(desc) \ | ||
34 | (!((desc)->a | (desc)->b)) | ||
35 | |||
36 | #define desc_equal(desc1, desc2) \ | ||
37 | (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) | ||
38 | /* | ||
39 | * Default implementation of macro that returns current | ||
40 | * instruction pointer ("program counter"). | ||
41 | */ | ||
42 | #define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; }) | ||
43 | |||
44 | /* | ||
45 | * CPU type and hardware bug flags. Kept separately for each CPU. | ||
46 | * Members of this structure are referenced in head.S, so think twice | ||
47 | * before touching them. [mj] | ||
48 | */ | ||
49 | |||
50 | struct cpuinfo_x86 { | ||
51 | __u8 x86; /* CPU family */ | ||
52 | __u8 x86_vendor; /* CPU vendor */ | ||
53 | __u8 x86_model; | ||
54 | __u8 x86_mask; | ||
55 | char wp_works_ok; /* It doesn't on 386's */ | ||
56 | char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */ | ||
57 | char hard_math; | ||
58 | char rfu; | ||
59 | int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ | ||
60 | unsigned long x86_capability[NCAPINTS]; | ||
61 | char x86_vendor_id[16]; | ||
62 | char x86_model_id[64]; | ||
63 | int x86_cache_size; /* in KB - valid for CPUS which support this | ||
64 | call */ | ||
65 | int x86_cache_alignment; /* In bytes */ | ||
66 | char fdiv_bug; | ||
67 | char f00f_bug; | ||
68 | char coma_bug; | ||
69 | char pad0; | ||
70 | int x86_power; | ||
71 | unsigned long loops_per_jiffy; | ||
72 | #ifdef CONFIG_SMP | ||
73 | cpumask_t llc_shared_map; /* cpus sharing the last level cache */ | ||
74 | #endif | ||
75 | unsigned char x86_max_cores; /* cpuid returned max cores value */ | ||
76 | unsigned char apicid; | ||
77 | unsigned short x86_clflush_size; | ||
78 | #ifdef CONFIG_SMP | ||
79 | unsigned char booted_cores; /* number of cores as seen by OS */ | ||
80 | __u8 phys_proc_id; /* Physical processor id. */ | ||
81 | __u8 cpu_core_id; /* Core id */ | ||
82 | __u8 cpu_index; /* index into per_cpu list */ | ||
83 | #endif | ||
84 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); | ||
85 | |||
86 | #define X86_VENDOR_INTEL 0 | ||
87 | #define X86_VENDOR_CYRIX 1 | ||
88 | #define X86_VENDOR_AMD 2 | ||
89 | #define X86_VENDOR_UMC 3 | ||
90 | #define X86_VENDOR_NEXGEN 4 | ||
91 | #define X86_VENDOR_CENTAUR 5 | ||
92 | #define X86_VENDOR_TRANSMETA 7 | ||
93 | #define X86_VENDOR_NSC 8 | ||
94 | #define X86_VENDOR_NUM 9 | ||
95 | #define X86_VENDOR_UNKNOWN 0xff | ||
96 | |||
97 | /* | ||
98 | * capabilities of CPUs | ||
99 | */ | ||
100 | |||
101 | extern struct cpuinfo_x86 boot_cpu_data; | ||
102 | extern struct cpuinfo_x86 new_cpu_data; | ||
103 | extern struct tss_struct doublefault_tss; | ||
104 | DECLARE_PER_CPU(struct tss_struct, init_tss); | ||
105 | |||
106 | #ifdef CONFIG_SMP | ||
107 | DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); | ||
108 | #define cpu_data(cpu) per_cpu(cpu_info, cpu) | ||
109 | #define current_cpu_data cpu_data(smp_processor_id()) | ||
110 | #else | ||
111 | #define cpu_data(cpu) boot_cpu_data | ||
112 | #define current_cpu_data boot_cpu_data | ||
113 | #endif | ||
114 | |||
115 | /* | ||
116 | * the following now lives in the per cpu area: | ||
117 | * extern int cpu_llc_id[NR_CPUS]; | ||
118 | */ | ||
119 | DECLARE_PER_CPU(u8, cpu_llc_id); | ||
120 | extern char ignore_fpu_irq; | ||
121 | |||
122 | void __init cpu_detect(struct cpuinfo_x86 *c); | ||
123 | |||
124 | extern void identify_boot_cpu(void); | ||
125 | extern void identify_secondary_cpu(struct cpuinfo_x86 *); | ||
126 | extern void print_cpu_info(struct cpuinfo_x86 *); | ||
127 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); | ||
128 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | ||
129 | extern unsigned short num_cache_leaves; | ||
130 | |||
131 | #ifdef CONFIG_X86_HT | ||
132 | extern void detect_ht(struct cpuinfo_x86 *c); | ||
133 | #else | ||
134 | static inline void detect_ht(struct cpuinfo_x86 *c) {} | ||
135 | #endif | ||
136 | |||
137 | static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, | ||
138 | unsigned int *ecx, unsigned int *edx) | ||
139 | { | ||
140 | /* ecx is often an input as well as an output. */ | ||
141 | __asm__("cpuid" | ||
142 | : "=a" (*eax), | ||
143 | "=b" (*ebx), | ||
144 | "=c" (*ecx), | ||
145 | "=d" (*edx) | ||
146 | : "0" (*eax), "2" (*ecx)); | ||
147 | } | ||
148 | |||
149 | #define load_cr3(pgdir) write_cr3(__pa(pgdir)) | ||
150 | |||
151 | /* | ||
152 | * Save the cr4 feature set we're using (ie | ||
153 | * Pentium 4MB enable and PPro Global page | ||
154 | * enable), so that any CPU's that boot up | ||
155 | * after us can get the correct flags. | ||
156 | */ | ||
157 | extern unsigned long mmu_cr4_features; | ||
158 | |||
159 | static inline void set_in_cr4 (unsigned long mask) | ||
160 | { | ||
161 | unsigned cr4; | ||
162 | mmu_cr4_features |= mask; | ||
163 | cr4 = read_cr4(); | ||
164 | cr4 |= mask; | ||
165 | write_cr4(cr4); | ||
166 | } | ||
167 | |||
168 | static inline void clear_in_cr4 (unsigned long mask) | ||
169 | { | ||
170 | unsigned cr4; | ||
171 | mmu_cr4_features &= ~mask; | ||
172 | cr4 = read_cr4(); | ||
173 | cr4 &= ~mask; | ||
174 | write_cr4(cr4); | ||
175 | } | ||
176 | |||
177 | /* Stop speculative execution */ | ||
178 | static inline void sync_core(void) | ||
179 | { | ||
180 | int tmp; | ||
181 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); | ||
182 | } | ||
183 | |||
184 | static inline void __monitor(const void *eax, unsigned long ecx, | ||
185 | unsigned long edx) | ||
186 | { | ||
187 | /* "monitor %eax,%ecx,%edx;" */ | ||
188 | asm volatile( | ||
189 | ".byte 0x0f,0x01,0xc8;" | ||
190 | : :"a" (eax), "c" (ecx), "d"(edx)); | ||
191 | } | ||
192 | |||
193 | static inline void __mwait(unsigned long eax, unsigned long ecx) | ||
194 | { | ||
195 | /* "mwait %eax,%ecx;" */ | ||
196 | asm volatile( | ||
197 | ".byte 0x0f,0x01,0xc9;" | ||
198 | : :"a" (eax), "c" (ecx)); | ||
199 | } | ||
200 | |||
201 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); | ||
202 | |||
203 | /* from system description table in BIOS. Mostly for MCA use, but | ||
204 | others may find it useful. */ | ||
205 | extern unsigned int machine_id; | ||
206 | extern unsigned int machine_submodel_id; | ||
207 | extern unsigned int BIOS_revision; | ||
208 | extern unsigned int mca_pentium_flag; | ||
209 | |||
210 | /* Boot loader type from the setup header */ | ||
211 | extern int bootloader_type; | ||
212 | |||
213 | /* | ||
214 | * User space process size: 3GB (default). | ||
215 | */ | ||
216 | #define TASK_SIZE (PAGE_OFFSET) | ||
217 | |||
218 | /* This decides where the kernel will search for a free chunk of vm | ||
219 | * space during mmap's. | ||
220 | */ | ||
221 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) | ||
222 | |||
223 | #define HAVE_ARCH_PICK_MMAP_LAYOUT | ||
224 | |||
225 | extern void hard_disable_TSC(void); | ||
226 | extern void disable_TSC(void); | ||
227 | extern void hard_enable_TSC(void); | ||
228 | |||
229 | /* | ||
230 | * Size of io_bitmap. | ||
231 | */ | ||
232 | #define IO_BITMAP_BITS 65536 | ||
233 | #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) | ||
234 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) | ||
235 | #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap) | ||
236 | #define INVALID_IO_BITMAP_OFFSET 0x8000 | ||
237 | #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 | ||
238 | |||
239 | struct i387_fsave_struct { | ||
240 | long cwd; | ||
241 | long swd; | ||
242 | long twd; | ||
243 | long fip; | ||
244 | long fcs; | ||
245 | long foo; | ||
246 | long fos; | ||
247 | long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ | ||
248 | long status; /* software status information */ | ||
249 | }; | ||
250 | |||
251 | struct i387_fxsave_struct { | ||
252 | unsigned short cwd; | ||
253 | unsigned short swd; | ||
254 | unsigned short twd; | ||
255 | unsigned short fop; | ||
256 | long fip; | ||
257 | long fcs; | ||
258 | long foo; | ||
259 | long fos; | ||
260 | long mxcsr; | ||
261 | long mxcsr_mask; | ||
262 | long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ | ||
263 | long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ | ||
264 | long padding[56]; | ||
265 | } __attribute__ ((aligned (16))); | ||
266 | |||
267 | struct i387_soft_struct { | ||
268 | long cwd; | ||
269 | long swd; | ||
270 | long twd; | ||
271 | long fip; | ||
272 | long fcs; | ||
273 | long foo; | ||
274 | long fos; | ||
275 | long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ | ||
276 | unsigned char ftop, changed, lookahead, no_update, rm, alimit; | ||
277 | struct info *info; | ||
278 | unsigned long entry_eip; | ||
279 | }; | ||
280 | |||
281 | union i387_union { | ||
282 | struct i387_fsave_struct fsave; | ||
283 | struct i387_fxsave_struct fxsave; | ||
284 | struct i387_soft_struct soft; | ||
285 | }; | ||
286 | |||
287 | typedef struct { | ||
288 | unsigned long seg; | ||
289 | } mm_segment_t; | ||
290 | |||
291 | struct thread_struct; | ||
292 | |||
293 | /* This is the TSS defined by the hardware. */ | ||
294 | struct i386_hw_tss { | ||
295 | unsigned short back_link,__blh; | ||
296 | unsigned long esp0; | ||
297 | unsigned short ss0,__ss0h; | ||
298 | unsigned long esp1; | ||
299 | unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */ | ||
300 | unsigned long esp2; | ||
301 | unsigned short ss2,__ss2h; | ||
302 | unsigned long __cr3; | ||
303 | unsigned long eip; | ||
304 | unsigned long eflags; | ||
305 | unsigned long eax,ecx,edx,ebx; | ||
306 | unsigned long esp; | ||
307 | unsigned long ebp; | ||
308 | unsigned long esi; | ||
309 | unsigned long edi; | ||
310 | unsigned short es, __esh; | ||
311 | unsigned short cs, __csh; | ||
312 | unsigned short ss, __ssh; | ||
313 | unsigned short ds, __dsh; | ||
314 | unsigned short fs, __fsh; | ||
315 | unsigned short gs, __gsh; | ||
316 | unsigned short ldt, __ldth; | ||
317 | unsigned short trace, io_bitmap_base; | ||
318 | } __attribute__((packed)); | ||
319 | |||
320 | struct tss_struct { | ||
321 | struct i386_hw_tss x86_tss; | ||
322 | |||
323 | /* | ||
324 | * The extra 1 is there because the CPU will access an | ||
325 | * additional byte beyond the end of the IO permission | ||
326 | * bitmap. The extra byte must be all 1 bits, and must | ||
327 | * be within the limit. | ||
328 | */ | ||
329 | unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; | ||
330 | /* | ||
331 | * Cache the current maximum and the last task that used the bitmap: | ||
332 | */ | ||
333 | unsigned long io_bitmap_max; | ||
334 | struct thread_struct *io_bitmap_owner; | ||
335 | /* | ||
336 | * pads the TSS to be cacheline-aligned (size is 0x100) | ||
337 | */ | ||
338 | unsigned long __cacheline_filler[35]; | ||
339 | /* | ||
340 | * .. and then another 0x100 bytes for emergency kernel stack | ||
341 | */ | ||
342 | unsigned long stack[64]; | ||
343 | } __attribute__((packed)); | ||
344 | |||
345 | #define ARCH_MIN_TASKALIGN 16 | ||
346 | |||
347 | struct thread_struct { | ||
348 | /* cached TLS descriptors. */ | ||
349 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | ||
350 | unsigned long esp0; | ||
351 | unsigned long sysenter_cs; | ||
352 | unsigned long eip; | ||
353 | unsigned long esp; | ||
354 | unsigned long fs; | ||
355 | unsigned long gs; | ||
356 | /* Hardware debugging registers */ | ||
357 | unsigned long debugreg[8]; /* %%db0-7 debug registers */ | ||
358 | /* fault info */ | ||
359 | unsigned long cr2, trap_no, error_code; | ||
360 | /* floating point info */ | ||
361 | union i387_union i387; | ||
362 | /* virtual 86 mode info */ | ||
363 | struct vm86_struct __user * vm86_info; | ||
364 | unsigned long screen_bitmap; | ||
365 | unsigned long v86flags, v86mask, saved_esp0; | ||
366 | unsigned int saved_fs, saved_gs; | ||
367 | /* IO permissions */ | ||
368 | unsigned long *io_bitmap_ptr; | ||
369 | unsigned long iopl; | ||
370 | /* max allowed port in the bitmap, in bytes: */ | ||
371 | unsigned long io_bitmap_max; | ||
372 | }; | ||
373 | |||
374 | #define INIT_THREAD { \ | ||
375 | .esp0 = sizeof(init_stack) + (long)&init_stack, \ | ||
376 | .vm86_info = NULL, \ | ||
377 | .sysenter_cs = __KERNEL_CS, \ | ||
378 | .io_bitmap_ptr = NULL, \ | ||
379 | .fs = __KERNEL_PERCPU, \ | ||
380 | } | ||
381 | |||
382 | /* | ||
383 | * Note that the .io_bitmap member must be extra-big. This is because | ||
384 | * the CPU will access an additional byte beyond the end of the IO | ||
385 | * permission bitmap. The extra byte must be all 1 bits, and must | ||
386 | * be within the limit. | ||
387 | */ | ||
388 | #define INIT_TSS { \ | ||
389 | .x86_tss = { \ | ||
390 | .esp0 = sizeof(init_stack) + (long)&init_stack, \ | ||
391 | .ss0 = __KERNEL_DS, \ | ||
392 | .ss1 = __KERNEL_CS, \ | ||
393 | .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ | ||
394 | }, \ | ||
395 | .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ | ||
396 | } | ||
397 | |||
398 | #define start_thread(regs, new_eip, new_esp) do { \ | ||
399 | __asm__("movl %0,%%gs": :"r" (0)); \ | ||
400 | regs->xfs = 0; \ | ||
401 | set_fs(USER_DS); \ | ||
402 | regs->xds = __USER_DS; \ | ||
403 | regs->xes = __USER_DS; \ | ||
404 | regs->xss = __USER_DS; \ | ||
405 | regs->xcs = __USER_CS; \ | ||
406 | regs->eip = new_eip; \ | ||
407 | regs->esp = new_esp; \ | ||
408 | } while (0) | ||
409 | |||
410 | /* Forward declaration, a strange C thing */ | ||
411 | struct task_struct; | ||
412 | struct mm_struct; | ||
413 | |||
414 | /* Free all resources held by a thread. */ | ||
415 | extern void release_thread(struct task_struct *); | ||
416 | |||
417 | /* Prepare to copy thread state - unlazy all lazy status */ | ||
418 | extern void prepare_to_copy(struct task_struct *tsk); | ||
419 | |||
420 | /* | ||
421 | * create a kernel thread without removing it from tasklists | ||
422 | */ | ||
423 | extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); | ||
424 | |||
425 | extern unsigned long thread_saved_pc(struct task_struct *tsk); | ||
426 | void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack); | ||
427 | |||
428 | unsigned long get_wchan(struct task_struct *p); | ||
429 | |||
430 | #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) | ||
431 | #define KSTK_TOP(info) \ | ||
432 | ({ \ | ||
433 | unsigned long *__ptr = (unsigned long *)(info); \ | ||
434 | (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ | ||
435 | }) | ||
436 | |||
437 | /* | ||
438 | * The below -8 is to reserve 8 bytes on top of the ring0 stack. | ||
439 | * This is necessary to guarantee that the entire "struct pt_regs" | ||
440 | * is accessable even if the CPU haven't stored the SS/ESP registers | ||
441 | * on the stack (interrupt gate does not save these registers | ||
442 | * when switching to the same priv ring). | ||
443 | * Therefore beware: accessing the xss/esp fields of the | ||
444 | * "struct pt_regs" is possible, but they may contain the | ||
445 | * completely wrong values. | ||
446 | */ | ||
447 | #define task_pt_regs(task) \ | ||
448 | ({ \ | ||
449 | struct pt_regs *__regs__; \ | ||
450 | __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ | ||
451 | __regs__ - 1; \ | ||
452 | }) | ||
453 | |||
454 | #define KSTK_EIP(task) (task_pt_regs(task)->eip) | ||
455 | #define KSTK_ESP(task) (task_pt_regs(task)->esp) | ||
456 | |||
457 | |||
458 | struct microcode_header { | ||
459 | unsigned int hdrver; | ||
460 | unsigned int rev; | ||
461 | unsigned int date; | ||
462 | unsigned int sig; | ||
463 | unsigned int cksum; | ||
464 | unsigned int ldrver; | ||
465 | unsigned int pf; | ||
466 | unsigned int datasize; | ||
467 | unsigned int totalsize; | ||
468 | unsigned int reserved[3]; | ||
469 | }; | ||
470 | |||
471 | struct microcode { | ||
472 | struct microcode_header hdr; | ||
473 | unsigned int bits[0]; | ||
474 | }; | ||
475 | |||
476 | typedef struct microcode microcode_t; | ||
477 | typedef struct microcode_header microcode_header_t; | ||
478 | |||
479 | /* microcode format is extended from prescott processors */ | ||
480 | struct extended_signature { | ||
481 | unsigned int sig; | ||
482 | unsigned int pf; | ||
483 | unsigned int cksum; | ||
484 | }; | ||
485 | |||
486 | struct extended_sigtable { | ||
487 | unsigned int count; | ||
488 | unsigned int cksum; | ||
489 | unsigned int reserved[3]; | ||
490 | struct extended_signature sigs[0]; | ||
491 | }; | ||
492 | |||
493 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | ||
494 | static inline void rep_nop(void) | ||
495 | { | ||
496 | __asm__ __volatile__("rep;nop": : :"memory"); | ||
497 | } | ||
498 | |||
499 | #define cpu_relax() rep_nop() | ||
500 | |||
501 | static inline void native_load_esp0(struct tss_struct *tss, struct thread_struct *thread) | ||
502 | { | ||
503 | tss->x86_tss.esp0 = thread->esp0; | ||
504 | /* This can only happen when SEP is enabled, no need to test "SEP"arately */ | ||
505 | if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { | ||
506 | tss->x86_tss.ss1 = thread->sysenter_cs; | ||
507 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | ||
508 | } | ||
509 | } | ||
510 | |||
511 | |||
512 | static inline unsigned long native_get_debugreg(int regno) | ||
513 | { | ||
514 | unsigned long val = 0; /* Damn you, gcc! */ | ||
515 | |||
516 | switch (regno) { | ||
517 | case 0: | ||
518 | asm("movl %%db0, %0" :"=r" (val)); break; | ||
519 | case 1: | ||
520 | asm("movl %%db1, %0" :"=r" (val)); break; | ||
521 | case 2: | ||
522 | asm("movl %%db2, %0" :"=r" (val)); break; | ||
523 | case 3: | ||
524 | asm("movl %%db3, %0" :"=r" (val)); break; | ||
525 | case 6: | ||
526 | asm("movl %%db6, %0" :"=r" (val)); break; | ||
527 | case 7: | ||
528 | asm("movl %%db7, %0" :"=r" (val)); break; | ||
529 | default: | ||
530 | BUG(); | ||
531 | } | ||
532 | return val; | ||
533 | } | ||
534 | |||
535 | static inline void native_set_debugreg(int regno, unsigned long value) | ||
536 | { | ||
537 | switch (regno) { | ||
538 | case 0: | ||
539 | asm("movl %0,%%db0" : /* no output */ :"r" (value)); | ||
540 | break; | ||
541 | case 1: | ||
542 | asm("movl %0,%%db1" : /* no output */ :"r" (value)); | ||
543 | break; | ||
544 | case 2: | ||
545 | asm("movl %0,%%db2" : /* no output */ :"r" (value)); | ||
546 | break; | ||
547 | case 3: | ||
548 | asm("movl %0,%%db3" : /* no output */ :"r" (value)); | ||
549 | break; | ||
550 | case 6: | ||
551 | asm("movl %0,%%db6" : /* no output */ :"r" (value)); | ||
552 | break; | ||
553 | case 7: | ||
554 | asm("movl %0,%%db7" : /* no output */ :"r" (value)); | ||
555 | break; | ||
556 | default: | ||
557 | BUG(); | ||
558 | } | ||
559 | } | ||
560 | |||
561 | /* | ||
562 | * Set IOPL bits in EFLAGS from given mask | ||
563 | */ | ||
564 | static inline void native_set_iopl_mask(unsigned mask) | ||
565 | { | ||
566 | unsigned int reg; | ||
567 | __asm__ __volatile__ ("pushfl;" | ||
568 | "popl %0;" | ||
569 | "andl %1, %0;" | ||
570 | "orl %2, %0;" | ||
571 | "pushl %0;" | ||
572 | "popfl" | ||
573 | : "=&r" (reg) | ||
574 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); | ||
575 | } | ||
576 | |||
577 | #ifdef CONFIG_PARAVIRT | ||
578 | #include <asm/paravirt.h> | ||
579 | #else | ||
580 | #define paravirt_enabled() 0 | ||
581 | #define __cpuid native_cpuid | ||
582 | |||
583 | static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread) | ||
584 | { | ||
585 | native_load_esp0(tss, thread); | ||
586 | } | ||
587 | |||
588 | /* | ||
589 | * These special macros can be used to get or set a debugging register | ||
590 | */ | ||
591 | #define get_debugreg(var, register) \ | ||
592 | (var) = native_get_debugreg(register) | ||
593 | #define set_debugreg(value, register) \ | ||
594 | native_set_debugreg(register, value) | ||
595 | |||
596 | #define set_iopl_mask native_set_iopl_mask | ||
597 | #endif /* CONFIG_PARAVIRT */ | ||
598 | |||
599 | /* | ||
600 | * Generic CPUID function | ||
601 | * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx | ||
602 | * resulting in stale register contents being returned. | ||
603 | */ | ||
604 | static inline void cpuid(unsigned int op, | ||
605 | unsigned int *eax, unsigned int *ebx, | ||
606 | unsigned int *ecx, unsigned int *edx) | ||
607 | { | ||
608 | *eax = op; | ||
609 | *ecx = 0; | ||
610 | __cpuid(eax, ebx, ecx, edx); | ||
611 | } | ||
612 | |||
613 | /* Some CPUID calls want 'count' to be placed in ecx */ | ||
614 | static inline void cpuid_count(unsigned int op, int count, | ||
615 | unsigned int *eax, unsigned int *ebx, | ||
616 | unsigned int *ecx, unsigned int *edx) | ||
617 | { | ||
618 | *eax = op; | ||
619 | *ecx = count; | ||
620 | __cpuid(eax, ebx, ecx, edx); | ||
621 | } | ||
622 | |||
623 | /* | ||
624 | * CPUID functions returning a single datum | ||
625 | */ | ||
626 | static inline unsigned int cpuid_eax(unsigned int op) | ||
627 | { | ||
628 | unsigned int eax, ebx, ecx, edx; | ||
629 | |||
630 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
631 | return eax; | ||
632 | } | ||
633 | static inline unsigned int cpuid_ebx(unsigned int op) | ||
634 | { | ||
635 | unsigned int eax, ebx, ecx, edx; | ||
636 | |||
637 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
638 | return ebx; | ||
639 | } | ||
640 | static inline unsigned int cpuid_ecx(unsigned int op) | ||
641 | { | ||
642 | unsigned int eax, ebx, ecx, edx; | ||
643 | |||
644 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
645 | return ecx; | ||
646 | } | ||
647 | static inline unsigned int cpuid_edx(unsigned int op) | ||
648 | { | ||
649 | unsigned int eax, ebx, ecx, edx; | ||
650 | |||
651 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
652 | return edx; | ||
653 | } | ||
654 | |||
655 | /* generic versions from gas */ | ||
656 | #define GENERIC_NOP1 ".byte 0x90\n" | ||
657 | #define GENERIC_NOP2 ".byte 0x89,0xf6\n" | ||
658 | #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n" | ||
659 | #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n" | ||
660 | #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4 | ||
661 | #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n" | ||
662 | #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n" | ||
663 | #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7 | ||
664 | |||
665 | /* Opteron nops */ | ||
666 | #define K8_NOP1 GENERIC_NOP1 | ||
667 | #define K8_NOP2 ".byte 0x66,0x90\n" | ||
668 | #define K8_NOP3 ".byte 0x66,0x66,0x90\n" | ||
669 | #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" | ||
670 | #define K8_NOP5 K8_NOP3 K8_NOP2 | ||
671 | #define K8_NOP6 K8_NOP3 K8_NOP3 | ||
672 | #define K8_NOP7 K8_NOP4 K8_NOP3 | ||
673 | #define K8_NOP8 K8_NOP4 K8_NOP4 | ||
674 | |||
675 | /* K7 nops */ | ||
676 | /* uses eax dependencies (arbitary choice) */ | ||
677 | #define K7_NOP1 GENERIC_NOP1 | ||
678 | #define K7_NOP2 ".byte 0x8b,0xc0\n" | ||
679 | #define K7_NOP3 ".byte 0x8d,0x04,0x20\n" | ||
680 | #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n" | ||
681 | #define K7_NOP5 K7_NOP4 ASM_NOP1 | ||
682 | #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n" | ||
683 | #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n" | ||
684 | #define K7_NOP8 K7_NOP7 ASM_NOP1 | ||
685 | |||
686 | /* P6 nops */ | ||
687 | /* uses eax dependencies (Intel-recommended choice) */ | ||
688 | #define P6_NOP1 GENERIC_NOP1 | ||
689 | #define P6_NOP2 ".byte 0x66,0x90\n" | ||
690 | #define P6_NOP3 ".byte 0x0f,0x1f,0x00\n" | ||
691 | #define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n" | ||
692 | #define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n" | ||
693 | #define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n" | ||
694 | #define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n" | ||
695 | #define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n" | ||
696 | |||
697 | #ifdef CONFIG_MK8 | ||
698 | #define ASM_NOP1 K8_NOP1 | ||
699 | #define ASM_NOP2 K8_NOP2 | ||
700 | #define ASM_NOP3 K8_NOP3 | ||
701 | #define ASM_NOP4 K8_NOP4 | ||
702 | #define ASM_NOP5 K8_NOP5 | ||
703 | #define ASM_NOP6 K8_NOP6 | ||
704 | #define ASM_NOP7 K8_NOP7 | ||
705 | #define ASM_NOP8 K8_NOP8 | ||
706 | #elif defined(CONFIG_MK7) | ||
707 | #define ASM_NOP1 K7_NOP1 | ||
708 | #define ASM_NOP2 K7_NOP2 | ||
709 | #define ASM_NOP3 K7_NOP3 | ||
710 | #define ASM_NOP4 K7_NOP4 | ||
711 | #define ASM_NOP5 K7_NOP5 | ||
712 | #define ASM_NOP6 K7_NOP6 | ||
713 | #define ASM_NOP7 K7_NOP7 | ||
714 | #define ASM_NOP8 K7_NOP8 | ||
715 | #elif defined(CONFIG_M686) || defined(CONFIG_MPENTIUMII) || \ | ||
716 | defined(CONFIG_MPENTIUMIII) || defined(CONFIG_MPENTIUMM) || \ | ||
717 | defined(CONFIG_MCORE2) || defined(CONFIG_PENTIUM4) | ||
718 | #define ASM_NOP1 P6_NOP1 | ||
719 | #define ASM_NOP2 P6_NOP2 | ||
720 | #define ASM_NOP3 P6_NOP3 | ||
721 | #define ASM_NOP4 P6_NOP4 | ||
722 | #define ASM_NOP5 P6_NOP5 | ||
723 | #define ASM_NOP6 P6_NOP6 | ||
724 | #define ASM_NOP7 P6_NOP7 | ||
725 | #define ASM_NOP8 P6_NOP8 | ||
726 | #else | ||
727 | #define ASM_NOP1 GENERIC_NOP1 | ||
728 | #define ASM_NOP2 GENERIC_NOP2 | ||
729 | #define ASM_NOP3 GENERIC_NOP3 | ||
730 | #define ASM_NOP4 GENERIC_NOP4 | ||
731 | #define ASM_NOP5 GENERIC_NOP5 | ||
732 | #define ASM_NOP6 GENERIC_NOP6 | ||
733 | #define ASM_NOP7 GENERIC_NOP7 | ||
734 | #define ASM_NOP8 GENERIC_NOP8 | ||
735 | #endif | ||
736 | |||
737 | #define ASM_NOP_MAX 8 | ||
738 | |||
739 | /* Prefetch instructions for Pentium III and AMD Athlon */ | ||
740 | /* It's not worth to care about 3dnow! prefetches for the K6 | ||
741 | because they are microcoded there and very slow. | ||
742 | However we don't do prefetches for pre XP Athlons currently | ||
743 | That should be fixed. */ | ||
744 | #define ARCH_HAS_PREFETCH | ||
745 | static inline void prefetch(const void *x) | ||
746 | { | ||
747 | alternative_input(ASM_NOP4, | ||
748 | "prefetchnta (%1)", | ||
749 | X86_FEATURE_XMM, | ||
750 | "r" (x)); | ||
751 | } | ||
752 | |||
753 | #define ARCH_HAS_PREFETCH | ||
754 | #define ARCH_HAS_PREFETCHW | ||
755 | #define ARCH_HAS_SPINLOCK_PREFETCH | ||
756 | |||
757 | /* 3dnow! prefetch to get an exclusive cache line. Useful for | ||
758 | spinlocks to avoid one state transition in the cache coherency protocol. */ | ||
759 | static inline void prefetchw(const void *x) | ||
760 | { | ||
761 | alternative_input(ASM_NOP4, | ||
762 | "prefetchw (%1)", | ||
763 | X86_FEATURE_3DNOW, | ||
764 | "r" (x)); | ||
765 | } | ||
766 | #define spin_lock_prefetch(x) prefetchw(x) | ||
767 | |||
768 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | ||
769 | |||
770 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) | ||
771 | |||
772 | extern unsigned long boot_option_idle_override; | ||
773 | extern void enable_sep_cpu(void); | ||
774 | extern int sysenter_setup(void); | ||
775 | |||
776 | /* Defined in head.S */ | ||
777 | extern struct Xgt_desc_struct early_gdt_descr; | ||
778 | |||
779 | extern void cpu_set_gdt(int); | ||
780 | extern void switch_to_new_gdt(void); | ||
781 | extern void cpu_init(void); | ||
782 | extern void init_gdt(int cpu); | ||
783 | |||
784 | extern int force_mwait; | ||
785 | |||
786 | #endif /* __ASM_I386_PROCESSOR_H */ | ||
diff --git a/include/asm-x86/processor_64.h b/include/asm-x86/processor_64.h deleted file mode 100644 index e4f19970a82b..000000000000 --- a/include/asm-x86/processor_64.h +++ /dev/null | |||
@@ -1,452 +0,0 @@ | |||
1 | /* | ||
2 | * include/asm-x86_64/processor.h | ||
3 | * | ||
4 | * Copyright (C) 1994 Linus Torvalds | ||
5 | */ | ||
6 | |||
7 | #ifndef __ASM_X86_64_PROCESSOR_H | ||
8 | #define __ASM_X86_64_PROCESSOR_H | ||
9 | |||
10 | #include <asm/segment.h> | ||
11 | #include <asm/page.h> | ||
12 | #include <asm/types.h> | ||
13 | #include <asm/sigcontext.h> | ||
14 | #include <asm/cpufeature.h> | ||
15 | #include <linux/threads.h> | ||
16 | #include <asm/msr.h> | ||
17 | #include <asm/current.h> | ||
18 | #include <asm/system.h> | ||
19 | #include <asm/mmsegment.h> | ||
20 | #include <asm/percpu.h> | ||
21 | #include <linux/personality.h> | ||
22 | #include <linux/cpumask.h> | ||
23 | #include <asm/processor-flags.h> | ||
24 | |||
25 | #define TF_MASK 0x00000100 | ||
26 | #define IF_MASK 0x00000200 | ||
27 | #define IOPL_MASK 0x00003000 | ||
28 | #define NT_MASK 0x00004000 | ||
29 | #define VM_MASK 0x00020000 | ||
30 | #define AC_MASK 0x00040000 | ||
31 | #define VIF_MASK 0x00080000 /* virtual interrupt flag */ | ||
32 | #define VIP_MASK 0x00100000 /* virtual interrupt pending */ | ||
33 | #define ID_MASK 0x00200000 | ||
34 | |||
35 | #define desc_empty(desc) \ | ||
36 | (!((desc)->a | (desc)->b)) | ||
37 | |||
38 | #define desc_equal(desc1, desc2) \ | ||
39 | (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) | ||
40 | |||
41 | /* | ||
42 | * Default implementation of macro that returns current | ||
43 | * instruction pointer ("program counter"). | ||
44 | */ | ||
45 | #define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; }) | ||
46 | |||
47 | /* | ||
48 | * CPU type and hardware bug flags. Kept separately for each CPU. | ||
49 | */ | ||
50 | |||
51 | struct cpuinfo_x86 { | ||
52 | __u8 x86; /* CPU family */ | ||
53 | __u8 x86_vendor; /* CPU vendor */ | ||
54 | __u8 x86_model; | ||
55 | __u8 x86_mask; | ||
56 | int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ | ||
57 | __u32 x86_capability[NCAPINTS]; | ||
58 | char x86_vendor_id[16]; | ||
59 | char x86_model_id[64]; | ||
60 | int x86_cache_size; /* in KB */ | ||
61 | int x86_clflush_size; | ||
62 | int x86_cache_alignment; | ||
63 | int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/ | ||
64 | __u8 x86_virt_bits, x86_phys_bits; | ||
65 | __u8 x86_max_cores; /* cpuid returned max cores value */ | ||
66 | __u32 x86_power; | ||
67 | __u32 extended_cpuid_level; /* Max extended CPUID function supported */ | ||
68 | unsigned long loops_per_jiffy; | ||
69 | #ifdef CONFIG_SMP | ||
70 | cpumask_t llc_shared_map; /* cpus sharing the last level cache */ | ||
71 | #endif | ||
72 | __u8 apicid; | ||
73 | #ifdef CONFIG_SMP | ||
74 | __u8 booted_cores; /* number of cores as seen by OS */ | ||
75 | __u8 phys_proc_id; /* Physical Processor id. */ | ||
76 | __u8 cpu_core_id; /* Core id. */ | ||
77 | __u8 cpu_index; /* index into per_cpu list */ | ||
78 | #endif | ||
79 | } ____cacheline_aligned; | ||
80 | |||
81 | #define X86_VENDOR_INTEL 0 | ||
82 | #define X86_VENDOR_CYRIX 1 | ||
83 | #define X86_VENDOR_AMD 2 | ||
84 | #define X86_VENDOR_UMC 3 | ||
85 | #define X86_VENDOR_NEXGEN 4 | ||
86 | #define X86_VENDOR_CENTAUR 5 | ||
87 | #define X86_VENDOR_TRANSMETA 7 | ||
88 | #define X86_VENDOR_NUM 8 | ||
89 | #define X86_VENDOR_UNKNOWN 0xff | ||
90 | |||
91 | #ifdef CONFIG_SMP | ||
92 | DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); | ||
93 | #define cpu_data(cpu) per_cpu(cpu_info, cpu) | ||
94 | #define current_cpu_data cpu_data(smp_processor_id()) | ||
95 | #else | ||
96 | #define cpu_data(cpu) boot_cpu_data | ||
97 | #define current_cpu_data boot_cpu_data | ||
98 | #endif | ||
99 | |||
100 | extern char ignore_irq13; | ||
101 | |||
102 | extern void identify_cpu(struct cpuinfo_x86 *); | ||
103 | extern void print_cpu_info(struct cpuinfo_x86 *); | ||
104 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); | ||
105 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | ||
106 | extern unsigned short num_cache_leaves; | ||
107 | |||
108 | /* | ||
109 | * Save the cr4 feature set we're using (ie | ||
110 | * Pentium 4MB enable and PPro Global page | ||
111 | * enable), so that any CPU's that boot up | ||
112 | * after us can get the correct flags. | ||
113 | */ | ||
114 | extern unsigned long mmu_cr4_features; | ||
115 | |||
116 | static inline void set_in_cr4 (unsigned long mask) | ||
117 | { | ||
118 | mmu_cr4_features |= mask; | ||
119 | __asm__("movq %%cr4,%%rax\n\t" | ||
120 | "orq %0,%%rax\n\t" | ||
121 | "movq %%rax,%%cr4\n" | ||
122 | : : "irg" (mask) | ||
123 | :"ax"); | ||
124 | } | ||
125 | |||
126 | static inline void clear_in_cr4 (unsigned long mask) | ||
127 | { | ||
128 | mmu_cr4_features &= ~mask; | ||
129 | __asm__("movq %%cr4,%%rax\n\t" | ||
130 | "andq %0,%%rax\n\t" | ||
131 | "movq %%rax,%%cr4\n" | ||
132 | : : "irg" (~mask) | ||
133 | :"ax"); | ||
134 | } | ||
135 | |||
136 | |||
137 | /* | ||
138 | * User space process size. 47bits minus one guard page. | ||
139 | */ | ||
140 | #define TASK_SIZE64 (0x800000000000UL - 4096) | ||
141 | |||
142 | /* This decides where the kernel will search for a free chunk of vm | ||
143 | * space during mmap's. | ||
144 | */ | ||
145 | #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000) | ||
146 | |||
147 | #define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64) | ||
148 | #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64) | ||
149 | |||
150 | #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3) | ||
151 | |||
152 | /* | ||
153 | * Size of io_bitmap. | ||
154 | */ | ||
155 | #define IO_BITMAP_BITS 65536 | ||
156 | #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) | ||
157 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) | ||
158 | #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap) | ||
159 | #define INVALID_IO_BITMAP_OFFSET 0x8000 | ||
160 | |||
161 | struct i387_fxsave_struct { | ||
162 | u16 cwd; | ||
163 | u16 swd; | ||
164 | u16 twd; | ||
165 | u16 fop; | ||
166 | u64 rip; | ||
167 | u64 rdp; | ||
168 | u32 mxcsr; | ||
169 | u32 mxcsr_mask; | ||
170 | u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ | ||
171 | u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */ | ||
172 | u32 padding[24]; | ||
173 | } __attribute__ ((aligned (16))); | ||
174 | |||
175 | union i387_union { | ||
176 | struct i387_fxsave_struct fxsave; | ||
177 | }; | ||
178 | |||
179 | struct tss_struct { | ||
180 | u32 reserved1; | ||
181 | u64 rsp0; | ||
182 | u64 rsp1; | ||
183 | u64 rsp2; | ||
184 | u64 reserved2; | ||
185 | u64 ist[7]; | ||
186 | u32 reserved3; | ||
187 | u32 reserved4; | ||
188 | u16 reserved5; | ||
189 | u16 io_bitmap_base; | ||
190 | /* | ||
191 | * The extra 1 is there because the CPU will access an | ||
192 | * additional byte beyond the end of the IO permission | ||
193 | * bitmap. The extra byte must be all 1 bits, and must | ||
194 | * be within the limit. Thus we have: | ||
195 | * | ||
196 | * 128 bytes, the bitmap itself, for ports 0..0x3ff | ||
197 | * 8 bytes, for an extra "long" of ~0UL | ||
198 | */ | ||
199 | unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; | ||
200 | } __attribute__((packed)) ____cacheline_aligned; | ||
201 | |||
202 | |||
203 | extern struct cpuinfo_x86 boot_cpu_data; | ||
204 | DECLARE_PER_CPU(struct tss_struct,init_tss); | ||
205 | /* Save the original ist values for checking stack pointers during debugging */ | ||
206 | struct orig_ist { | ||
207 | unsigned long ist[7]; | ||
208 | }; | ||
209 | DECLARE_PER_CPU(struct orig_ist, orig_ist); | ||
210 | |||
211 | #ifdef CONFIG_X86_VSMP | ||
212 | #define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) | ||
213 | #define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) | ||
214 | #else | ||
215 | #define ARCH_MIN_TASKALIGN 16 | ||
216 | #define ARCH_MIN_MMSTRUCT_ALIGN 0 | ||
217 | #endif | ||
218 | |||
219 | struct thread_struct { | ||
220 | unsigned long rsp0; | ||
221 | unsigned long rsp; | ||
222 | unsigned long userrsp; /* Copy from PDA */ | ||
223 | unsigned long fs; | ||
224 | unsigned long gs; | ||
225 | unsigned short es, ds, fsindex, gsindex; | ||
226 | /* Hardware debugging registers */ | ||
227 | unsigned long debugreg0; | ||
228 | unsigned long debugreg1; | ||
229 | unsigned long debugreg2; | ||
230 | unsigned long debugreg3; | ||
231 | unsigned long debugreg6; | ||
232 | unsigned long debugreg7; | ||
233 | /* fault info */ | ||
234 | unsigned long cr2, trap_no, error_code; | ||
235 | /* floating point info */ | ||
236 | union i387_union i387 __attribute__((aligned(16))); | ||
237 | /* IO permissions. the bitmap could be moved into the GDT, that would make | ||
238 | switch faster for a limited number of ioperm using tasks. -AK */ | ||
239 | int ioperm; | ||
240 | unsigned long *io_bitmap_ptr; | ||
241 | unsigned io_bitmap_max; | ||
242 | /* cached TLS descriptors. */ | ||
243 | u64 tls_array[GDT_ENTRY_TLS_ENTRIES]; | ||
244 | } __attribute__((aligned(16))); | ||
245 | |||
246 | #define INIT_THREAD { \ | ||
247 | .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | ||
248 | } | ||
249 | |||
250 | #define INIT_TSS { \ | ||
251 | .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | ||
252 | } | ||
253 | |||
254 | #define INIT_MMAP \ | ||
255 | { &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL } | ||
256 | |||
257 | #define start_thread(regs,new_rip,new_rsp) do { \ | ||
258 | asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \ | ||
259 | load_gs_index(0); \ | ||
260 | (regs)->rip = (new_rip); \ | ||
261 | (regs)->rsp = (new_rsp); \ | ||
262 | write_pda(oldrsp, (new_rsp)); \ | ||
263 | (regs)->cs = __USER_CS; \ | ||
264 | (regs)->ss = __USER_DS; \ | ||
265 | (regs)->eflags = 0x200; \ | ||
266 | set_fs(USER_DS); \ | ||
267 | } while(0) | ||
268 | |||
269 | #define get_debugreg(var, register) \ | ||
270 | __asm__("movq %%db" #register ", %0" \ | ||
271 | :"=r" (var)) | ||
272 | #define set_debugreg(value, register) \ | ||
273 | __asm__("movq %0,%%db" #register \ | ||
274 | : /* no output */ \ | ||
275 | :"r" (value)) | ||
276 | |||
277 | struct task_struct; | ||
278 | struct mm_struct; | ||
279 | |||
280 | /* Free all resources held by a thread. */ | ||
281 | extern void release_thread(struct task_struct *); | ||
282 | |||
283 | /* Prepare to copy thread state - unlazy all lazy status */ | ||
284 | extern void prepare_to_copy(struct task_struct *tsk); | ||
285 | |||
286 | /* | ||
287 | * create a kernel thread without removing it from tasklists | ||
288 | */ | ||
289 | extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); | ||
290 | |||
291 | /* | ||
292 | * Return saved PC of a blocked thread. | ||
293 | * What is this good for? it will be always the scheduler or ret_from_fork. | ||
294 | */ | ||
295 | #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8)) | ||
296 | |||
297 | extern unsigned long get_wchan(struct task_struct *p); | ||
298 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1) | ||
299 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip) | ||
300 | #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ | ||
301 | |||
302 | |||
303 | struct microcode_header { | ||
304 | unsigned int hdrver; | ||
305 | unsigned int rev; | ||
306 | unsigned int date; | ||
307 | unsigned int sig; | ||
308 | unsigned int cksum; | ||
309 | unsigned int ldrver; | ||
310 | unsigned int pf; | ||
311 | unsigned int datasize; | ||
312 | unsigned int totalsize; | ||
313 | unsigned int reserved[3]; | ||
314 | }; | ||
315 | |||
316 | struct microcode { | ||
317 | struct microcode_header hdr; | ||
318 | unsigned int bits[0]; | ||
319 | }; | ||
320 | |||
321 | typedef struct microcode microcode_t; | ||
322 | typedef struct microcode_header microcode_header_t; | ||
323 | |||
324 | /* microcode format is extended from prescott processors */ | ||
325 | struct extended_signature { | ||
326 | unsigned int sig; | ||
327 | unsigned int pf; | ||
328 | unsigned int cksum; | ||
329 | }; | ||
330 | |||
331 | struct extended_sigtable { | ||
332 | unsigned int count; | ||
333 | unsigned int cksum; | ||
334 | unsigned int reserved[3]; | ||
335 | struct extended_signature sigs[0]; | ||
336 | }; | ||
337 | |||
338 | |||
339 | #if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) | ||
340 | #define ASM_NOP1 P6_NOP1 | ||
341 | #define ASM_NOP2 P6_NOP2 | ||
342 | #define ASM_NOP3 P6_NOP3 | ||
343 | #define ASM_NOP4 P6_NOP4 | ||
344 | #define ASM_NOP5 P6_NOP5 | ||
345 | #define ASM_NOP6 P6_NOP6 | ||
346 | #define ASM_NOP7 P6_NOP7 | ||
347 | #define ASM_NOP8 P6_NOP8 | ||
348 | #else | ||
349 | #define ASM_NOP1 K8_NOP1 | ||
350 | #define ASM_NOP2 K8_NOP2 | ||
351 | #define ASM_NOP3 K8_NOP3 | ||
352 | #define ASM_NOP4 K8_NOP4 | ||
353 | #define ASM_NOP5 K8_NOP5 | ||
354 | #define ASM_NOP6 K8_NOP6 | ||
355 | #define ASM_NOP7 K8_NOP7 | ||
356 | #define ASM_NOP8 K8_NOP8 | ||
357 | #endif | ||
358 | |||
359 | /* Opteron nops */ | ||
360 | #define K8_NOP1 ".byte 0x90\n" | ||
361 | #define K8_NOP2 ".byte 0x66,0x90\n" | ||
362 | #define K8_NOP3 ".byte 0x66,0x66,0x90\n" | ||
363 | #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" | ||
364 | #define K8_NOP5 K8_NOP3 K8_NOP2 | ||
365 | #define K8_NOP6 K8_NOP3 K8_NOP3 | ||
366 | #define K8_NOP7 K8_NOP4 K8_NOP3 | ||
367 | #define K8_NOP8 K8_NOP4 K8_NOP4 | ||
368 | |||
369 | /* P6 nops */ | ||
370 | /* uses eax dependencies (Intel-recommended choice) */ | ||
371 | #define P6_NOP1 ".byte 0x90\n" | ||
372 | #define P6_NOP2 ".byte 0x66,0x90\n" | ||
373 | #define P6_NOP3 ".byte 0x0f,0x1f,0x00\n" | ||
374 | #define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n" | ||
375 | #define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n" | ||
376 | #define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n" | ||
377 | #define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n" | ||
378 | #define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n" | ||
379 | |||
380 | #define ASM_NOP_MAX 8 | ||
381 | |||
382 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | ||
383 | static inline void rep_nop(void) | ||
384 | { | ||
385 | __asm__ __volatile__("rep;nop": : :"memory"); | ||
386 | } | ||
387 | |||
388 | /* Stop speculative execution */ | ||
389 | static inline void sync_core(void) | ||
390 | { | ||
391 | int tmp; | ||
392 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); | ||
393 | } | ||
394 | |||
395 | #define ARCH_HAS_PREFETCHW 1 | ||
396 | static inline void prefetchw(void *x) | ||
397 | { | ||
398 | alternative_input("prefetcht0 (%1)", | ||
399 | "prefetchw (%1)", | ||
400 | X86_FEATURE_3DNOW, | ||
401 | "r" (x)); | ||
402 | } | ||
403 | |||
404 | #define ARCH_HAS_SPINLOCK_PREFETCH 1 | ||
405 | |||
406 | #define spin_lock_prefetch(x) prefetchw(x) | ||
407 | |||
408 | #define cpu_relax() rep_nop() | ||
409 | |||
410 | static inline void __monitor(const void *eax, unsigned long ecx, | ||
411 | unsigned long edx) | ||
412 | { | ||
413 | /* "monitor %eax,%ecx,%edx;" */ | ||
414 | asm volatile( | ||
415 | ".byte 0x0f,0x01,0xc8;" | ||
416 | : :"a" (eax), "c" (ecx), "d"(edx)); | ||
417 | } | ||
418 | |||
419 | static inline void __mwait(unsigned long eax, unsigned long ecx) | ||
420 | { | ||
421 | /* "mwait %eax,%ecx;" */ | ||
422 | asm volatile( | ||
423 | ".byte 0x0f,0x01,0xc9;" | ||
424 | : :"a" (eax), "c" (ecx)); | ||
425 | } | ||
426 | |||
427 | static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | ||
428 | { | ||
429 | /* "mwait %eax,%ecx;" */ | ||
430 | asm volatile( | ||
431 | "sti; .byte 0x0f,0x01,0xc9;" | ||
432 | : :"a" (eax), "c" (ecx)); | ||
433 | } | ||
434 | |||
435 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); | ||
436 | |||
437 | #define stack_current() \ | ||
438 | ({ \ | ||
439 | struct thread_info *ti; \ | ||
440 | asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ | ||
441 | ti->task; \ | ||
442 | }) | ||
443 | |||
444 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) | ||
445 | |||
446 | extern unsigned long boot_option_idle_override; | ||
447 | /* Boot loader type from the setup header */ | ||
448 | extern int bootloader_type; | ||
449 | |||
450 | #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 | ||
451 | |||
452 | #endif /* __ASM_X86_64_PROCESSOR_H */ | ||
diff --git a/include/asm-x86/proto.h b/include/asm-x86/proto.h index dabba55f7ed8..68563c0709ac 100644 --- a/include/asm-x86/proto.h +++ b/include/asm-x86/proto.h | |||
@@ -5,87 +5,24 @@ | |||
5 | 5 | ||
6 | /* misc architecture specific prototypes */ | 6 | /* misc architecture specific prototypes */ |
7 | 7 | ||
8 | struct cpuinfo_x86; | ||
9 | struct pt_regs; | ||
10 | |||
11 | extern void start_kernel(void); | ||
12 | extern void pda_init(int); | ||
13 | |||
14 | extern void early_idt_handler(void); | 8 | extern void early_idt_handler(void); |
15 | 9 | ||
16 | extern void mcheck_init(struct cpuinfo_x86 *c); | ||
17 | extern void init_memory_mapping(unsigned long start, unsigned long end); | 10 | extern void init_memory_mapping(unsigned long start, unsigned long end); |
18 | 11 | ||
19 | extern void system_call(void); | 12 | extern void system_call(void); |
20 | extern int kernel_syscall(void); | ||
21 | extern void syscall_init(void); | 13 | extern void syscall_init(void); |
22 | 14 | ||
23 | extern void ia32_syscall(void); | 15 | extern void ia32_syscall(void); |
24 | extern void ia32_cstar_target(void); | 16 | extern void ia32_cstar_target(void); |
25 | extern void ia32_sysenter_target(void); | 17 | extern void ia32_sysenter_target(void); |
26 | |||
27 | extern void config_acpi_tables(void); | ||
28 | extern void ia32_syscall(void); | ||
29 | |||
30 | extern int pmtimer_mark_offset(void); | ||
31 | extern void pmtimer_resume(void); | ||
32 | extern void pmtimer_wait(unsigned); | ||
33 | extern unsigned int do_gettimeoffset_pm(void); | ||
34 | #ifdef CONFIG_X86_PM_TIMER | ||
35 | extern u32 pmtmr_ioport; | ||
36 | #else | ||
37 | #define pmtmr_ioport 0 | ||
38 | #endif | ||
39 | extern int nohpet; | ||
40 | |||
41 | extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2))); | ||
42 | |||
43 | extern void early_identify_cpu(struct cpuinfo_x86 *c); | ||
44 | |||
45 | extern int k8_scan_nodes(unsigned long start, unsigned long end); | ||
46 | |||
47 | extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn); | ||
48 | extern unsigned long numa_free_all_bootmem(void); | ||
49 | 18 | ||
50 | extern void reserve_bootmem_generic(unsigned long phys, unsigned len); | 19 | extern void reserve_bootmem_generic(unsigned long phys, unsigned len); |
51 | 20 | ||
52 | extern void load_gs_index(unsigned gs); | ||
53 | |||
54 | extern unsigned long end_pfn_map; | ||
55 | |||
56 | extern void show_trace(struct task_struct *, struct pt_regs *, unsigned long * rsp); | ||
57 | extern void show_registers(struct pt_regs *regs); | ||
58 | |||
59 | extern void exception_table_check(void); | ||
60 | |||
61 | extern void acpi_reserve_bootmem(void); | ||
62 | |||
63 | extern void swap_low_mappings(void); | ||
64 | |||
65 | extern void __show_regs(struct pt_regs * regs); | ||
66 | extern void show_regs(struct pt_regs * regs); | ||
67 | |||
68 | extern void syscall32_cpu_init(void); | 21 | extern void syscall32_cpu_init(void); |
69 | 22 | ||
70 | extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end); | ||
71 | |||
72 | extern void early_quirks(void); | ||
73 | extern void check_efer(void); | 23 | extern void check_efer(void); |
74 | 24 | ||
75 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | ||
76 | |||
77 | extern unsigned long table_start, table_end; | ||
78 | |||
79 | extern int exception_trace; | ||
80 | extern unsigned cpu_khz; | ||
81 | extern unsigned tsc_khz; | ||
82 | |||
83 | extern int reboot_force; | 25 | extern int reboot_force; |
84 | extern int notsc_setup(char *); | ||
85 | |||
86 | extern int gsi_irq_sharing(int gsi); | ||
87 | |||
88 | extern int force_mwait; | ||
89 | 26 | ||
90 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); | 27 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); |
91 | 28 | ||
diff --git a/include/asm-x86/ptrace-abi.h b/include/asm-x86/ptrace-abi.h index 7524e1233833..81a8ee4c55fc 100644 --- a/include/asm-x86/ptrace-abi.h +++ b/include/asm-x86/ptrace-abi.h | |||
@@ -78,4 +78,66 @@ | |||
78 | # define PTRACE_SYSEMU_SINGLESTEP 32 | 78 | # define PTRACE_SYSEMU_SINGLESTEP 32 |
79 | #endif | 79 | #endif |
80 | 80 | ||
81 | #define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */ | ||
82 | |||
83 | #ifndef __ASSEMBLY__ | ||
84 | |||
85 | #include <asm/types.h> | ||
86 | |||
87 | /* configuration/status structure used in PTRACE_BTS_CONFIG and | ||
88 | PTRACE_BTS_STATUS commands. | ||
89 | */ | ||
90 | struct ptrace_bts_config { | ||
91 | /* requested or actual size of BTS buffer in bytes */ | ||
92 | u32 size; | ||
93 | /* bitmask of below flags */ | ||
94 | u32 flags; | ||
95 | /* buffer overflow signal */ | ||
96 | u32 signal; | ||
97 | /* actual size of bts_struct in bytes */ | ||
98 | u32 bts_size; | ||
99 | }; | ||
100 | #endif | ||
101 | |||
102 | #define PTRACE_BTS_O_TRACE 0x1 /* branch trace */ | ||
103 | #define PTRACE_BTS_O_SCHED 0x2 /* scheduling events w/ jiffies */ | ||
104 | #define PTRACE_BTS_O_SIGNAL 0x4 /* send SIG<signal> on buffer overflow | ||
105 | instead of wrapping around */ | ||
106 | #define PTRACE_BTS_O_CUT_SIZE 0x8 /* cut requested size to max available | ||
107 | instead of failing */ | ||
108 | |||
109 | #define PTRACE_BTS_CONFIG 40 | ||
110 | /* Configure branch trace recording. | ||
111 | ADDR points to a struct ptrace_bts_config. | ||
112 | DATA gives the size of that buffer. | ||
113 | A new buffer is allocated, iff the size changes. | ||
114 | Returns the number of bytes read. | ||
115 | */ | ||
116 | #define PTRACE_BTS_STATUS 41 | ||
117 | /* Return the current configuration in a struct ptrace_bts_config | ||
118 | pointed to by ADDR; DATA gives the size of that buffer. | ||
119 | Returns the number of bytes written. | ||
120 | */ | ||
121 | #define PTRACE_BTS_SIZE 42 | ||
122 | /* Return the number of available BTS records. | ||
123 | DATA and ADDR are ignored. | ||
124 | */ | ||
125 | #define PTRACE_BTS_GET 43 | ||
126 | /* Get a single BTS record. | ||
127 | DATA defines the index into the BTS array, where 0 is the newest | ||
128 | entry, and higher indices refer to older entries. | ||
129 | ADDR is pointing to struct bts_struct (see asm/ds.h). | ||
130 | */ | ||
131 | #define PTRACE_BTS_CLEAR 44 | ||
132 | /* Clear the BTS buffer. | ||
133 | DATA and ADDR are ignored. | ||
134 | */ | ||
135 | #define PTRACE_BTS_DRAIN 45 | ||
136 | /* Read all available BTS records and clear the buffer. | ||
137 | ADDR points to an array of struct bts_struct. | ||
138 | DATA gives the size of that buffer. | ||
139 | BTS records are read from oldest to newest. | ||
140 | Returns number of BTS records drained. | ||
141 | */ | ||
142 | |||
81 | #endif | 143 | #endif |
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h index 51ddb2590870..d9e04b46a440 100644 --- a/include/asm-x86/ptrace.h +++ b/include/asm-x86/ptrace.h | |||
@@ -4,12 +4,15 @@ | |||
4 | #include <linux/compiler.h> /* For __user */ | 4 | #include <linux/compiler.h> /* For __user */ |
5 | #include <asm/ptrace-abi.h> | 5 | #include <asm/ptrace-abi.h> |
6 | 6 | ||
7 | |||
7 | #ifndef __ASSEMBLY__ | 8 | #ifndef __ASSEMBLY__ |
8 | 9 | ||
9 | #ifdef __i386__ | 10 | #ifdef __i386__ |
10 | /* this struct defines the way the registers are stored on the | 11 | /* this struct defines the way the registers are stored on the |
11 | stack during a system call. */ | 12 | stack during a system call. */ |
12 | 13 | ||
14 | #ifndef __KERNEL__ | ||
15 | |||
13 | struct pt_regs { | 16 | struct pt_regs { |
14 | long ebx; | 17 | long ebx; |
15 | long ecx; | 18 | long ecx; |
@@ -21,7 +24,7 @@ struct pt_regs { | |||
21 | int xds; | 24 | int xds; |
22 | int xes; | 25 | int xes; |
23 | int xfs; | 26 | int xfs; |
24 | /* int xgs; */ | 27 | /* int gs; */ |
25 | long orig_eax; | 28 | long orig_eax; |
26 | long eip; | 29 | long eip; |
27 | int xcs; | 30 | int xcs; |
@@ -30,44 +33,37 @@ struct pt_regs { | |||
30 | int xss; | 33 | int xss; |
31 | }; | 34 | }; |
32 | 35 | ||
33 | #ifdef __KERNEL__ | 36 | #else /* __KERNEL__ */ |
37 | |||
38 | struct pt_regs { | ||
39 | long bx; | ||
40 | long cx; | ||
41 | long dx; | ||
42 | long si; | ||
43 | long di; | ||
44 | long bp; | ||
45 | long ax; | ||
46 | int ds; | ||
47 | int es; | ||
48 | int fs; | ||
49 | /* int gs; */ | ||
50 | long orig_ax; | ||
51 | long ip; | ||
52 | int cs; | ||
53 | long flags; | ||
54 | long sp; | ||
55 | int ss; | ||
56 | }; | ||
34 | 57 | ||
35 | #include <asm/vm86.h> | 58 | #include <asm/vm86.h> |
36 | #include <asm/segment.h> | 59 | #include <asm/segment.h> |
37 | 60 | ||
38 | struct task_struct; | ||
39 | extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); | ||
40 | |||
41 | /* | ||
42 | * user_mode_vm(regs) determines whether a register set came from user mode. | ||
43 | * This is true if V8086 mode was enabled OR if the register set was from | ||
44 | * protected mode with RPL-3 CS value. This tricky test checks that with | ||
45 | * one comparison. Many places in the kernel can bypass this full check | ||
46 | * if they have already ruled out V8086 mode, so user_mode(regs) can be used. | ||
47 | */ | ||
48 | static inline int user_mode(struct pt_regs *regs) | ||
49 | { | ||
50 | return (regs->xcs & SEGMENT_RPL_MASK) == USER_RPL; | ||
51 | } | ||
52 | static inline int user_mode_vm(struct pt_regs *regs) | ||
53 | { | ||
54 | return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL; | ||
55 | } | ||
56 | static inline int v8086_mode(struct pt_regs *regs) | ||
57 | { | ||
58 | return (regs->eflags & VM_MASK); | ||
59 | } | ||
60 | |||
61 | #define instruction_pointer(regs) ((regs)->eip) | ||
62 | #define frame_pointer(regs) ((regs)->ebp) | ||
63 | #define stack_pointer(regs) ((unsigned long)(regs)) | ||
64 | #define regs_return_value(regs) ((regs)->eax) | ||
65 | |||
66 | extern unsigned long profile_pc(struct pt_regs *regs); | ||
67 | #endif /* __KERNEL__ */ | 61 | #endif /* __KERNEL__ */ |
68 | 62 | ||
69 | #else /* __i386__ */ | 63 | #else /* __i386__ */ |
70 | 64 | ||
65 | #ifndef __KERNEL__ | ||
66 | |||
71 | struct pt_regs { | 67 | struct pt_regs { |
72 | unsigned long r15; | 68 | unsigned long r15; |
73 | unsigned long r14; | 69 | unsigned long r14; |
@@ -96,47 +92,143 @@ struct pt_regs { | |||
96 | /* top of stack page */ | 92 | /* top of stack page */ |
97 | }; | 93 | }; |
98 | 94 | ||
95 | #else /* __KERNEL__ */ | ||
96 | |||
97 | struct pt_regs { | ||
98 | unsigned long r15; | ||
99 | unsigned long r14; | ||
100 | unsigned long r13; | ||
101 | unsigned long r12; | ||
102 | unsigned long bp; | ||
103 | unsigned long bx; | ||
104 | /* arguments: non interrupts/non tracing syscalls only save upto here*/ | ||
105 | unsigned long r11; | ||
106 | unsigned long r10; | ||
107 | unsigned long r9; | ||
108 | unsigned long r8; | ||
109 | unsigned long ax; | ||
110 | unsigned long cx; | ||
111 | unsigned long dx; | ||
112 | unsigned long si; | ||
113 | unsigned long di; | ||
114 | unsigned long orig_ax; | ||
115 | /* end of arguments */ | ||
116 | /* cpu exception frame or undefined */ | ||
117 | unsigned long ip; | ||
118 | unsigned long cs; | ||
119 | unsigned long flags; | ||
120 | unsigned long sp; | ||
121 | unsigned long ss; | ||
122 | /* top of stack page */ | ||
123 | }; | ||
124 | |||
125 | #endif /* __KERNEL__ */ | ||
126 | #endif /* !__i386__ */ | ||
127 | |||
99 | #ifdef __KERNEL__ | 128 | #ifdef __KERNEL__ |
100 | 129 | ||
101 | #define user_mode(regs) (!!((regs)->cs & 3)) | 130 | /* the DS BTS struct is used for ptrace as well */ |
102 | #define user_mode_vm(regs) user_mode(regs) | 131 | #include <asm/ds.h> |
103 | #define instruction_pointer(regs) ((regs)->rip) | 132 | |
104 | #define frame_pointer(regs) ((regs)->rbp) | 133 | struct task_struct; |
105 | #define stack_pointer(regs) ((regs)->rsp) | 134 | |
106 | #define regs_return_value(regs) ((regs)->rax) | 135 | extern void ptrace_bts_take_timestamp(struct task_struct *, enum bts_qualifier); |
107 | 136 | ||
108 | extern unsigned long profile_pc(struct pt_regs *regs); | 137 | extern unsigned long profile_pc(struct pt_regs *regs); |
138 | |||
139 | extern unsigned long | ||
140 | convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); | ||
141 | |||
142 | #ifdef CONFIG_X86_32 | ||
143 | extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); | ||
144 | #else | ||
109 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where); | 145 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where); |
146 | #endif | ||
110 | 147 | ||
111 | struct task_struct; | 148 | #define regs_return_value(regs) ((regs)->ax) |
149 | |||
150 | /* | ||
151 | * user_mode_vm(regs) determines whether a register set came from user mode. | ||
152 | * This is true if V8086 mode was enabled OR if the register set was from | ||
153 | * protected mode with RPL-3 CS value. This tricky test checks that with | ||
154 | * one comparison. Many places in the kernel can bypass this full check | ||
155 | * if they have already ruled out V8086 mode, so user_mode(regs) can be used. | ||
156 | */ | ||
157 | static inline int user_mode(struct pt_regs *regs) | ||
158 | { | ||
159 | #ifdef CONFIG_X86_32 | ||
160 | return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL; | ||
161 | #else | ||
162 | return !!(regs->cs & 3); | ||
163 | #endif | ||
164 | } | ||
165 | |||
166 | static inline int user_mode_vm(struct pt_regs *regs) | ||
167 | { | ||
168 | #ifdef CONFIG_X86_32 | ||
169 | return ((regs->cs & SEGMENT_RPL_MASK) | | ||
170 | (regs->flags & VM_MASK)) >= USER_RPL; | ||
171 | #else | ||
172 | return user_mode(regs); | ||
173 | #endif | ||
174 | } | ||
175 | |||
176 | static inline int v8086_mode(struct pt_regs *regs) | ||
177 | { | ||
178 | #ifdef CONFIG_X86_32 | ||
179 | return (regs->flags & VM_MASK); | ||
180 | #else | ||
181 | return 0; /* No V86 mode support in long mode */ | ||
182 | #endif | ||
183 | } | ||
184 | |||
185 | /* | ||
186 | * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode | ||
187 | * when it traps. So regs will be the current sp. | ||
188 | * | ||
189 | * This is valid only for kernel mode traps. | ||
190 | */ | ||
191 | static inline unsigned long kernel_trap_sp(struct pt_regs *regs) | ||
192 | { | ||
193 | #ifdef CONFIG_X86_32 | ||
194 | return (unsigned long)regs; | ||
195 | #else | ||
196 | return regs->sp; | ||
197 | #endif | ||
198 | } | ||
199 | |||
200 | static inline unsigned long instruction_pointer(struct pt_regs *regs) | ||
201 | { | ||
202 | return regs->ip; | ||
203 | } | ||
204 | |||
205 | static inline unsigned long frame_pointer(struct pt_regs *regs) | ||
206 | { | ||
207 | return regs->bp; | ||
208 | } | ||
209 | |||
210 | /* | ||
211 | * These are defined as per linux/ptrace.h, which see. | ||
212 | */ | ||
213 | #define arch_has_single_step() (1) | ||
214 | extern void user_enable_single_step(struct task_struct *); | ||
215 | extern void user_disable_single_step(struct task_struct *); | ||
216 | |||
217 | extern void user_enable_block_step(struct task_struct *); | ||
218 | #ifdef CONFIG_X86_DEBUGCTLMSR | ||
219 | #define arch_has_block_step() (1) | ||
220 | #else | ||
221 | #define arch_has_block_step() (boot_cpu_data.x86 >= 6) | ||
222 | #endif | ||
223 | |||
224 | struct user_desc; | ||
225 | extern int do_get_thread_area(struct task_struct *p, int idx, | ||
226 | struct user_desc __user *info); | ||
227 | extern int do_set_thread_area(struct task_struct *p, int idx, | ||
228 | struct user_desc __user *info, int can_allocate); | ||
112 | 229 | ||
113 | extern unsigned long | ||
114 | convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs); | ||
115 | |||
116 | enum { | ||
117 | EF_CF = 0x00000001, | ||
118 | EF_PF = 0x00000004, | ||
119 | EF_AF = 0x00000010, | ||
120 | EF_ZF = 0x00000040, | ||
121 | EF_SF = 0x00000080, | ||
122 | EF_TF = 0x00000100, | ||
123 | EF_IE = 0x00000200, | ||
124 | EF_DF = 0x00000400, | ||
125 | EF_OF = 0x00000800, | ||
126 | EF_IOPL = 0x00003000, | ||
127 | EF_IOPL_RING0 = 0x00000000, | ||
128 | EF_IOPL_RING1 = 0x00001000, | ||
129 | EF_IOPL_RING2 = 0x00002000, | ||
130 | EF_NT = 0x00004000, /* nested task */ | ||
131 | EF_RF = 0x00010000, /* resume */ | ||
132 | EF_VM = 0x00020000, /* virtual mode */ | ||
133 | EF_AC = 0x00040000, /* alignment */ | ||
134 | EF_VIF = 0x00080000, /* virtual interrupt */ | ||
135 | EF_VIP = 0x00100000, /* virtual interrupt pending */ | ||
136 | EF_ID = 0x00200000, /* id */ | ||
137 | }; | ||
138 | #endif /* __KERNEL__ */ | 230 | #endif /* __KERNEL__ */ |
139 | #endif /* !__i386__ */ | 231 | |
140 | #endif /* !__ASSEMBLY__ */ | 232 | #endif /* !__ASSEMBLY__ */ |
141 | 233 | ||
142 | #endif | 234 | #endif |
diff --git a/include/asm-x86/resume-trace.h b/include/asm-x86/resume-trace.h index 9b6dd093a9f7..46f725b0bc82 100644 --- a/include/asm-x86/resume-trace.h +++ b/include/asm-x86/resume-trace.h | |||
@@ -1,5 +1,20 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifndef _ASM_X86_RESUME_TRACE_H |
2 | # include "resume-trace_32.h" | 2 | #define _ASM_X86_RESUME_TRACE_H |
3 | #else | 3 | |
4 | # include "resume-trace_64.h" | 4 | #include <asm/asm.h> |
5 | |||
6 | #define TRACE_RESUME(user) do { \ | ||
7 | if (pm_trace_enabled) { \ | ||
8 | void *tracedata; \ | ||
9 | asm volatile(_ASM_MOV_UL " $1f,%0\n" \ | ||
10 | ".section .tracedata,\"a\"\n" \ | ||
11 | "1:\t.word %c1\n\t" \ | ||
12 | _ASM_PTR " %c2\n" \ | ||
13 | ".previous" \ | ||
14 | :"=r" (tracedata) \ | ||
15 | : "i" (__LINE__), "i" (__FILE__)); \ | ||
16 | generate_resume_trace(tracedata, user); \ | ||
17 | } \ | ||
18 | } while (0) | ||
19 | |||
5 | #endif | 20 | #endif |
diff --git a/include/asm-x86/resume-trace_32.h b/include/asm-x86/resume-trace_32.h deleted file mode 100644 index ec9cfd656230..000000000000 --- a/include/asm-x86/resume-trace_32.h +++ /dev/null | |||
@@ -1,13 +0,0 @@ | |||
1 | #define TRACE_RESUME(user) do { \ | ||
2 | if (pm_trace_enabled) { \ | ||
3 | void *tracedata; \ | ||
4 | asm volatile("movl $1f,%0\n" \ | ||
5 | ".section .tracedata,\"a\"\n" \ | ||
6 | "1:\t.word %c1\n" \ | ||
7 | "\t.long %c2\n" \ | ||
8 | ".previous" \ | ||
9 | :"=r" (tracedata) \ | ||
10 | : "i" (__LINE__), "i" (__FILE__)); \ | ||
11 | generate_resume_trace(tracedata, user); \ | ||
12 | } \ | ||
13 | } while (0) | ||
diff --git a/include/asm-x86/resume-trace_64.h b/include/asm-x86/resume-trace_64.h deleted file mode 100644 index 34bf998fdf62..000000000000 --- a/include/asm-x86/resume-trace_64.h +++ /dev/null | |||
@@ -1,13 +0,0 @@ | |||
1 | #define TRACE_RESUME(user) do { \ | ||
2 | if (pm_trace_enabled) { \ | ||
3 | void *tracedata; \ | ||
4 | asm volatile("movq $1f,%0\n" \ | ||
5 | ".section .tracedata,\"a\"\n" \ | ||
6 | "1:\t.word %c1\n" \ | ||
7 | "\t.quad %c2\n" \ | ||
8 | ".previous" \ | ||
9 | :"=r" (tracedata) \ | ||
10 | : "i" (__LINE__), "i" (__FILE__)); \ | ||
11 | generate_resume_trace(tracedata, user); \ | ||
12 | } \ | ||
13 | } while (0) | ||
diff --git a/include/asm-x86/rio.h b/include/asm-x86/rio.h index c7350f6d2015..97cdcc9887ba 100644 --- a/include/asm-x86/rio.h +++ b/include/asm-x86/rio.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Derived from include/asm-i386/mach-summit/mach_mpparse.h | 2 | * Derived from include/asm-x86/mach-summit/mach_mpparse.h |
3 | * and include/asm-i386/mach-default/bios_ebda.h | 3 | * and include/asm-x86/mach-default/bios_ebda.h |
4 | * | 4 | * |
5 | * Author: Laurent Vivier <Laurent.Vivier@bull.net> | 5 | * Author: Laurent Vivier <Laurent.Vivier@bull.net> |
6 | */ | 6 | */ |
diff --git a/include/asm-x86/rwlock.h b/include/asm-x86/rwlock.h index f2b64a429e6b..6a8c0d645108 100644 --- a/include/asm-x86/rwlock.h +++ b/include/asm-x86/rwlock.h | |||
@@ -2,7 +2,6 @@ | |||
2 | #define _ASM_X86_RWLOCK_H | 2 | #define _ASM_X86_RWLOCK_H |
3 | 3 | ||
4 | #define RW_LOCK_BIAS 0x01000000 | 4 | #define RW_LOCK_BIAS 0x01000000 |
5 | #define RW_LOCK_BIAS_STR "0x01000000" | ||
6 | 5 | ||
7 | /* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */ | 6 | /* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */ |
8 | 7 | ||
diff --git a/include/asm-x86/rwsem.h b/include/asm-x86/rwsem.h index 041906f3c6df..520a379f4b80 100644 --- a/include/asm-x86/rwsem.h +++ b/include/asm-x86/rwsem.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * | 2 | * |
3 | * Written by David Howells (dhowells@redhat.com). | 3 | * Written by David Howells (dhowells@redhat.com). |
4 | * | 4 | * |
5 | * Derived from asm-i386/semaphore.h | 5 | * Derived from asm-x86/semaphore.h |
6 | * | 6 | * |
7 | * | 7 | * |
8 | * The MSW of the count is the negated number of active writers and waiting | 8 | * The MSW of the count is the negated number of active writers and waiting |
@@ -44,10 +44,14 @@ | |||
44 | 44 | ||
45 | struct rwsem_waiter; | 45 | struct rwsem_waiter; |
46 | 46 | ||
47 | extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem)); | 47 | extern asmregparm struct rw_semaphore * |
48 | extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem)); | 48 | rwsem_down_read_failed(struct rw_semaphore *sem); |
49 | extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *)); | 49 | extern asmregparm struct rw_semaphore * |
50 | extern struct rw_semaphore *FASTCALL(rwsem_downgrade_wake(struct rw_semaphore *sem)); | 50 | rwsem_down_write_failed(struct rw_semaphore *sem); |
51 | extern asmregparm struct rw_semaphore * | ||
52 | rwsem_wake(struct rw_semaphore *); | ||
53 | extern asmregparm struct rw_semaphore * | ||
54 | rwsem_downgrade_wake(struct rw_semaphore *sem); | ||
51 | 55 | ||
52 | /* | 56 | /* |
53 | * the semaphore definition | 57 | * the semaphore definition |
diff --git a/include/asm-x86/scatterlist.h b/include/asm-x86/scatterlist.h index 3a1e76257a27..d13c197866d6 100644 --- a/include/asm-x86/scatterlist.h +++ b/include/asm-x86/scatterlist.h | |||
@@ -1,5 +1,35 @@ | |||
1 | #ifndef _ASM_X86_SCATTERLIST_H | ||
2 | #define _ASM_X86_SCATTERLIST_H | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | |||
6 | struct scatterlist { | ||
7 | #ifdef CONFIG_DEBUG_SG | ||
8 | unsigned long sg_magic; | ||
9 | #endif | ||
10 | unsigned long page_link; | ||
11 | unsigned int offset; | ||
12 | unsigned int length; | ||
13 | dma_addr_t dma_address; | ||
14 | #ifdef CONFIG_X86_64 | ||
15 | unsigned int dma_length; | ||
16 | #endif | ||
17 | }; | ||
18 | |||
19 | #define ARCH_HAS_SG_CHAIN | ||
20 | #define ISA_DMA_THRESHOLD (0x00ffffff) | ||
21 | |||
22 | /* | ||
23 | * These macros should be used after a pci_map_sg call has been done | ||
24 | * to get bus addresses of each of the SG entries and their lengths. | ||
25 | * You should only work with the number of sg entries pci_map_sg | ||
26 | * returns. | ||
27 | */ | ||
28 | #define sg_dma_address(sg) ((sg)->dma_address) | ||
1 | #ifdef CONFIG_X86_32 | 29 | #ifdef CONFIG_X86_32 |
2 | # include "scatterlist_32.h" | 30 | # define sg_dma_len(sg) ((sg)->length) |
3 | #else | 31 | #else |
4 | # include "scatterlist_64.h" | 32 | # define sg_dma_len(sg) ((sg)->dma_length) |
33 | #endif | ||
34 | |||
5 | #endif | 35 | #endif |
diff --git a/include/asm-x86/scatterlist_32.h b/include/asm-x86/scatterlist_32.h deleted file mode 100644 index 0e7d997a34be..000000000000 --- a/include/asm-x86/scatterlist_32.h +++ /dev/null | |||
@@ -1,28 +0,0 @@ | |||
1 | #ifndef _I386_SCATTERLIST_H | ||
2 | #define _I386_SCATTERLIST_H | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | |||
6 | struct scatterlist { | ||
7 | #ifdef CONFIG_DEBUG_SG | ||
8 | unsigned long sg_magic; | ||
9 | #endif | ||
10 | unsigned long page_link; | ||
11 | unsigned int offset; | ||
12 | dma_addr_t dma_address; | ||
13 | unsigned int length; | ||
14 | }; | ||
15 | |||
16 | #define ARCH_HAS_SG_CHAIN | ||
17 | |||
18 | /* These macros should be used after a pci_map_sg call has been done | ||
19 | * to get bus addresses of each of the SG entries and their lengths. | ||
20 | * You should only work with the number of sg entries pci_map_sg | ||
21 | * returns. | ||
22 | */ | ||
23 | #define sg_dma_address(sg) ((sg)->dma_address) | ||
24 | #define sg_dma_len(sg) ((sg)->length) | ||
25 | |||
26 | #define ISA_DMA_THRESHOLD (0x00ffffff) | ||
27 | |||
28 | #endif /* !(_I386_SCATTERLIST_H) */ | ||
diff --git a/include/asm-x86/scatterlist_64.h b/include/asm-x86/scatterlist_64.h deleted file mode 100644 index 1847c72befeb..000000000000 --- a/include/asm-x86/scatterlist_64.h +++ /dev/null | |||
@@ -1,29 +0,0 @@ | |||
1 | #ifndef _X8664_SCATTERLIST_H | ||
2 | #define _X8664_SCATTERLIST_H | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | |||
6 | struct scatterlist { | ||
7 | #ifdef CONFIG_DEBUG_SG | ||
8 | unsigned long sg_magic; | ||
9 | #endif | ||
10 | unsigned long page_link; | ||
11 | unsigned int offset; | ||
12 | unsigned int length; | ||
13 | dma_addr_t dma_address; | ||
14 | unsigned int dma_length; | ||
15 | }; | ||
16 | |||
17 | #define ARCH_HAS_SG_CHAIN | ||
18 | |||
19 | #define ISA_DMA_THRESHOLD (0x00ffffff) | ||
20 | |||
21 | /* These macros should be used after a pci_map_sg call has been done | ||
22 | * to get bus addresses of each of the SG entries and their lengths. | ||
23 | * You should only work with the number of sg entries pci_map_sg | ||
24 | * returns. | ||
25 | */ | ||
26 | #define sg_dma_address(sg) ((sg)->dma_address) | ||
27 | #define sg_dma_len(sg) ((sg)->dma_length) | ||
28 | |||
29 | #endif | ||
diff --git a/include/asm-x86/segment.h b/include/asm-x86/segment.h index 605068280e28..23f0535fec61 100644 --- a/include/asm-x86/segment.h +++ b/include/asm-x86/segment.h | |||
@@ -1,5 +1,204 @@ | |||
1 | #ifndef _ASM_X86_SEGMENT_H_ | ||
2 | #define _ASM_X86_SEGMENT_H_ | ||
3 | |||
4 | /* Simple and small GDT entries for booting only */ | ||
5 | |||
6 | #define GDT_ENTRY_BOOT_CS 2 | ||
7 | #define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8) | ||
8 | |||
9 | #define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1) | ||
10 | #define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8) | ||
11 | |||
12 | #define GDT_ENTRY_BOOT_TSS (GDT_ENTRY_BOOT_CS + 2) | ||
13 | #define __BOOT_TSS (GDT_ENTRY_BOOT_TSS * 8) | ||
14 | |||
1 | #ifdef CONFIG_X86_32 | 15 | #ifdef CONFIG_X86_32 |
2 | # include "segment_32.h" | 16 | /* |
17 | * The layout of the per-CPU GDT under Linux: | ||
18 | * | ||
19 | * 0 - null | ||
20 | * 1 - reserved | ||
21 | * 2 - reserved | ||
22 | * 3 - reserved | ||
23 | * | ||
24 | * 4 - unused <==== new cacheline | ||
25 | * 5 - unused | ||
26 | * | ||
27 | * ------- start of TLS (Thread-Local Storage) segments: | ||
28 | * | ||
29 | * 6 - TLS segment #1 [ glibc's TLS segment ] | ||
30 | * 7 - TLS segment #2 [ Wine's %fs Win32 segment ] | ||
31 | * 8 - TLS segment #3 | ||
32 | * 9 - reserved | ||
33 | * 10 - reserved | ||
34 | * 11 - reserved | ||
35 | * | ||
36 | * ------- start of kernel segments: | ||
37 | * | ||
38 | * 12 - kernel code segment <==== new cacheline | ||
39 | * 13 - kernel data segment | ||
40 | * 14 - default user CS | ||
41 | * 15 - default user DS | ||
42 | * 16 - TSS | ||
43 | * 17 - LDT | ||
44 | * 18 - PNPBIOS support (16->32 gate) | ||
45 | * 19 - PNPBIOS support | ||
46 | * 20 - PNPBIOS support | ||
47 | * 21 - PNPBIOS support | ||
48 | * 22 - PNPBIOS support | ||
49 | * 23 - APM BIOS support | ||
50 | * 24 - APM BIOS support | ||
51 | * 25 - APM BIOS support | ||
52 | * | ||
53 | * 26 - ESPFIX small SS | ||
54 | * 27 - per-cpu [ offset to per-cpu data area ] | ||
55 | * 28 - unused | ||
56 | * 29 - unused | ||
57 | * 30 - unused | ||
58 | * 31 - TSS for double fault handler | ||
59 | */ | ||
60 | #define GDT_ENTRY_TLS_MIN 6 | ||
61 | #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) | ||
62 | |||
63 | #define GDT_ENTRY_DEFAULT_USER_CS 14 | ||
64 | #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3) | ||
65 | |||
66 | #define GDT_ENTRY_DEFAULT_USER_DS 15 | ||
67 | #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3) | ||
68 | |||
69 | #define GDT_ENTRY_KERNEL_BASE 12 | ||
70 | |||
71 | #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0) | ||
72 | #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8) | ||
73 | |||
74 | #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1) | ||
75 | #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8) | ||
76 | |||
77 | #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4) | ||
78 | #define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5) | ||
79 | |||
80 | #define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6) | ||
81 | #define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11) | ||
82 | |||
83 | #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14) | ||
84 | #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8) | ||
85 | |||
86 | #define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15) | ||
87 | #ifdef CONFIG_SMP | ||
88 | #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8) | ||
3 | #else | 89 | #else |
4 | # include "segment_64.h" | 90 | #define __KERNEL_PERCPU 0 |
91 | #endif | ||
92 | |||
93 | #define GDT_ENTRY_DOUBLEFAULT_TSS 31 | ||
94 | |||
95 | /* | ||
96 | * The GDT has 32 entries | ||
97 | */ | ||
98 | #define GDT_ENTRIES 32 | ||
99 | |||
100 | /* The PnP BIOS entries in the GDT */ | ||
101 | #define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0) | ||
102 | #define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1) | ||
103 | #define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2) | ||
104 | #define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3) | ||
105 | #define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4) | ||
106 | |||
107 | /* The PnP BIOS selectors */ | ||
108 | #define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */ | ||
109 | #define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */ | ||
110 | #define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */ | ||
111 | #define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */ | ||
112 | #define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */ | ||
113 | |||
114 | /* Bottom two bits of selector give the ring privilege level */ | ||
115 | #define SEGMENT_RPL_MASK 0x3 | ||
116 | /* Bit 2 is table indicator (LDT/GDT) */ | ||
117 | #define SEGMENT_TI_MASK 0x4 | ||
118 | |||
119 | /* User mode is privilege level 3 */ | ||
120 | #define USER_RPL 0x3 | ||
121 | /* LDT segment has TI set, GDT has it cleared */ | ||
122 | #define SEGMENT_LDT 0x4 | ||
123 | #define SEGMENT_GDT 0x0 | ||
124 | |||
125 | /* | ||
126 | * Matching rules for certain types of segments. | ||
127 | */ | ||
128 | |||
129 | /* Matches only __KERNEL_CS, ignoring PnP / USER / APM segments */ | ||
130 | #define SEGMENT_IS_KERNEL_CODE(x) (((x) & 0xfc) == GDT_ENTRY_KERNEL_CS * 8) | ||
131 | |||
132 | /* Matches __KERNEL_CS and __USER_CS (they must be 2 entries apart) */ | ||
133 | #define SEGMENT_IS_FLAT_CODE(x) (((x) & 0xec) == GDT_ENTRY_KERNEL_CS * 8) | ||
134 | |||
135 | /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */ | ||
136 | #define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8) | ||
137 | |||
138 | |||
139 | #else | ||
140 | #include <asm/cache.h> | ||
141 | |||
142 | #define __KERNEL_CS 0x10 | ||
143 | #define __KERNEL_DS 0x18 | ||
144 | |||
145 | #define __KERNEL32_CS 0x08 | ||
146 | |||
147 | /* | ||
148 | * we cannot use the same code segment descriptor for user and kernel | ||
149 | * -- not even in the long flat mode, because of different DPL /kkeil | ||
150 | * The segment offset needs to contain a RPL. Grr. -AK | ||
151 | * GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets) | ||
152 | */ | ||
153 | |||
154 | #define __USER32_CS 0x23 /* 4*8+3 */ | ||
155 | #define __USER_DS 0x2b /* 5*8+3 */ | ||
156 | #define __USER_CS 0x33 /* 6*8+3 */ | ||
157 | #define __USER32_DS __USER_DS | ||
158 | |||
159 | #define GDT_ENTRY_TSS 8 /* needs two entries */ | ||
160 | #define GDT_ENTRY_LDT 10 /* needs two entries */ | ||
161 | #define GDT_ENTRY_TLS_MIN 12 | ||
162 | #define GDT_ENTRY_TLS_MAX 14 | ||
163 | |||
164 | #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */ | ||
165 | #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3) | ||
166 | |||
167 | /* TLS indexes for 64bit - hardcoded in arch_prctl */ | ||
168 | #define FS_TLS 0 | ||
169 | #define GS_TLS 1 | ||
170 | |||
171 | #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3) | ||
172 | #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3) | ||
173 | |||
174 | #define GDT_ENTRIES 16 | ||
175 | |||
176 | #endif | ||
177 | |||
178 | #ifndef CONFIG_PARAVIRT | ||
179 | #define get_kernel_rpl() 0 | ||
180 | #endif | ||
181 | |||
182 | /* User mode is privilege level 3 */ | ||
183 | #define USER_RPL 0x3 | ||
184 | /* LDT segment has TI set, GDT has it cleared */ | ||
185 | #define SEGMENT_LDT 0x4 | ||
186 | #define SEGMENT_GDT 0x0 | ||
187 | |||
188 | /* Bottom two bits of selector give the ring privilege level */ | ||
189 | #define SEGMENT_RPL_MASK 0x3 | ||
190 | /* Bit 2 is table indicator (LDT/GDT) */ | ||
191 | #define SEGMENT_TI_MASK 0x4 | ||
192 | |||
193 | #define IDT_ENTRIES 256 | ||
194 | #define GDT_SIZE (GDT_ENTRIES * 8) | ||
195 | #define GDT_ENTRY_TLS_ENTRIES 3 | ||
196 | #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) | ||
197 | |||
198 | #ifdef __KERNEL__ | ||
199 | #ifndef __ASSEMBLY__ | ||
200 | extern const char early_idt_handlers[IDT_ENTRIES][10]; | ||
201 | #endif | ||
202 | #endif | ||
203 | |||
5 | #endif | 204 | #endif |
diff --git a/include/asm-x86/segment_32.h b/include/asm-x86/segment_32.h deleted file mode 100644 index 597a47c2515f..000000000000 --- a/include/asm-x86/segment_32.h +++ /dev/null | |||
@@ -1,148 +0,0 @@ | |||
1 | #ifndef _ASM_SEGMENT_H | ||
2 | #define _ASM_SEGMENT_H | ||
3 | |||
4 | /* | ||
5 | * The layout of the per-CPU GDT under Linux: | ||
6 | * | ||
7 | * 0 - null | ||
8 | * 1 - reserved | ||
9 | * 2 - reserved | ||
10 | * 3 - reserved | ||
11 | * | ||
12 | * 4 - unused <==== new cacheline | ||
13 | * 5 - unused | ||
14 | * | ||
15 | * ------- start of TLS (Thread-Local Storage) segments: | ||
16 | * | ||
17 | * 6 - TLS segment #1 [ glibc's TLS segment ] | ||
18 | * 7 - TLS segment #2 [ Wine's %fs Win32 segment ] | ||
19 | * 8 - TLS segment #3 | ||
20 | * 9 - reserved | ||
21 | * 10 - reserved | ||
22 | * 11 - reserved | ||
23 | * | ||
24 | * ------- start of kernel segments: | ||
25 | * | ||
26 | * 12 - kernel code segment <==== new cacheline | ||
27 | * 13 - kernel data segment | ||
28 | * 14 - default user CS | ||
29 | * 15 - default user DS | ||
30 | * 16 - TSS | ||
31 | * 17 - LDT | ||
32 | * 18 - PNPBIOS support (16->32 gate) | ||
33 | * 19 - PNPBIOS support | ||
34 | * 20 - PNPBIOS support | ||
35 | * 21 - PNPBIOS support | ||
36 | * 22 - PNPBIOS support | ||
37 | * 23 - APM BIOS support | ||
38 | * 24 - APM BIOS support | ||
39 | * 25 - APM BIOS support | ||
40 | * | ||
41 | * 26 - ESPFIX small SS | ||
42 | * 27 - per-cpu [ offset to per-cpu data area ] | ||
43 | * 28 - unused | ||
44 | * 29 - unused | ||
45 | * 30 - unused | ||
46 | * 31 - TSS for double fault handler | ||
47 | */ | ||
48 | #define GDT_ENTRY_TLS_ENTRIES 3 | ||
49 | #define GDT_ENTRY_TLS_MIN 6 | ||
50 | #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) | ||
51 | |||
52 | #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) | ||
53 | |||
54 | #define GDT_ENTRY_DEFAULT_USER_CS 14 | ||
55 | #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3) | ||
56 | |||
57 | #define GDT_ENTRY_DEFAULT_USER_DS 15 | ||
58 | #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3) | ||
59 | |||
60 | #define GDT_ENTRY_KERNEL_BASE 12 | ||
61 | |||
62 | #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0) | ||
63 | #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8) | ||
64 | |||
65 | #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1) | ||
66 | #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8) | ||
67 | |||
68 | #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4) | ||
69 | #define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5) | ||
70 | |||
71 | #define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6) | ||
72 | #define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11) | ||
73 | |||
74 | #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14) | ||
75 | #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8) | ||
76 | |||
77 | #define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15) | ||
78 | #ifdef CONFIG_SMP | ||
79 | #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8) | ||
80 | #else | ||
81 | #define __KERNEL_PERCPU 0 | ||
82 | #endif | ||
83 | |||
84 | #define GDT_ENTRY_DOUBLEFAULT_TSS 31 | ||
85 | |||
86 | /* | ||
87 | * The GDT has 32 entries | ||
88 | */ | ||
89 | #define GDT_ENTRIES 32 | ||
90 | #define GDT_SIZE (GDT_ENTRIES * 8) | ||
91 | |||
92 | /* Simple and small GDT entries for booting only */ | ||
93 | |||
94 | #define GDT_ENTRY_BOOT_CS 2 | ||
95 | #define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8) | ||
96 | |||
97 | #define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1) | ||
98 | #define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8) | ||
99 | |||
100 | /* The PnP BIOS entries in the GDT */ | ||
101 | #define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0) | ||
102 | #define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1) | ||
103 | #define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2) | ||
104 | #define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3) | ||
105 | #define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4) | ||
106 | |||
107 | /* The PnP BIOS selectors */ | ||
108 | #define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */ | ||
109 | #define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */ | ||
110 | #define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */ | ||
111 | #define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */ | ||
112 | #define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */ | ||
113 | |||
114 | /* | ||
115 | * The interrupt descriptor table has room for 256 idt's, | ||
116 | * the global descriptor table is dependent on the number | ||
117 | * of tasks we can have.. | ||
118 | */ | ||
119 | #define IDT_ENTRIES 256 | ||
120 | |||
121 | /* Bottom two bits of selector give the ring privilege level */ | ||
122 | #define SEGMENT_RPL_MASK 0x3 | ||
123 | /* Bit 2 is table indicator (LDT/GDT) */ | ||
124 | #define SEGMENT_TI_MASK 0x4 | ||
125 | |||
126 | /* User mode is privilege level 3 */ | ||
127 | #define USER_RPL 0x3 | ||
128 | /* LDT segment has TI set, GDT has it cleared */ | ||
129 | #define SEGMENT_LDT 0x4 | ||
130 | #define SEGMENT_GDT 0x0 | ||
131 | |||
132 | #ifndef CONFIG_PARAVIRT | ||
133 | #define get_kernel_rpl() 0 | ||
134 | #endif | ||
135 | /* | ||
136 | * Matching rules for certain types of segments. | ||
137 | */ | ||
138 | |||
139 | /* Matches only __KERNEL_CS, ignoring PnP / USER / APM segments */ | ||
140 | #define SEGMENT_IS_KERNEL_CODE(x) (((x) & 0xfc) == GDT_ENTRY_KERNEL_CS * 8) | ||
141 | |||
142 | /* Matches __KERNEL_CS and __USER_CS (they must be 2 entries apart) */ | ||
143 | #define SEGMENT_IS_FLAT_CODE(x) (((x) & 0xec) == GDT_ENTRY_KERNEL_CS * 8) | ||
144 | |||
145 | /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */ | ||
146 | #define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8) | ||
147 | |||
148 | #endif | ||
diff --git a/include/asm-x86/segment_64.h b/include/asm-x86/segment_64.h deleted file mode 100644 index 04b8ab21328f..000000000000 --- a/include/asm-x86/segment_64.h +++ /dev/null | |||
@@ -1,53 +0,0 @@ | |||
1 | #ifndef _ASM_SEGMENT_H | ||
2 | #define _ASM_SEGMENT_H | ||
3 | |||
4 | #include <asm/cache.h> | ||
5 | |||
6 | /* Simple and small GDT entries for booting only */ | ||
7 | |||
8 | #define GDT_ENTRY_BOOT_CS 2 | ||
9 | #define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8) | ||
10 | |||
11 | #define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1) | ||
12 | #define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8) | ||
13 | |||
14 | #define __KERNEL_CS 0x10 | ||
15 | #define __KERNEL_DS 0x18 | ||
16 | |||
17 | #define __KERNEL32_CS 0x08 | ||
18 | |||
19 | /* | ||
20 | * we cannot use the same code segment descriptor for user and kernel | ||
21 | * -- not even in the long flat mode, because of different DPL /kkeil | ||
22 | * The segment offset needs to contain a RPL. Grr. -AK | ||
23 | * GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets) | ||
24 | */ | ||
25 | |||
26 | #define __USER32_CS 0x23 /* 4*8+3 */ | ||
27 | #define __USER_DS 0x2b /* 5*8+3 */ | ||
28 | #define __USER_CS 0x33 /* 6*8+3 */ | ||
29 | #define __USER32_DS __USER_DS | ||
30 | |||
31 | #define GDT_ENTRY_TSS 8 /* needs two entries */ | ||
32 | #define GDT_ENTRY_LDT 10 /* needs two entries */ | ||
33 | #define GDT_ENTRY_TLS_MIN 12 | ||
34 | #define GDT_ENTRY_TLS_MAX 14 | ||
35 | |||
36 | #define GDT_ENTRY_TLS_ENTRIES 3 | ||
37 | |||
38 | #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */ | ||
39 | #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3) | ||
40 | |||
41 | /* TLS indexes for 64bit - hardcoded in arch_prctl */ | ||
42 | #define FS_TLS 0 | ||
43 | #define GS_TLS 1 | ||
44 | |||
45 | #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3) | ||
46 | #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3) | ||
47 | |||
48 | #define IDT_ENTRIES 256 | ||
49 | #define GDT_ENTRIES 16 | ||
50 | #define GDT_SIZE (GDT_ENTRIES * 8) | ||
51 | #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) | ||
52 | |||
53 | #endif | ||
diff --git a/include/asm-x86/semaphore_32.h b/include/asm-x86/semaphore_32.h index 835c1d751a9f..ac96d3804d0c 100644 --- a/include/asm-x86/semaphore_32.h +++ b/include/asm-x86/semaphore_32.h | |||
@@ -83,10 +83,10 @@ static inline void init_MUTEX_LOCKED (struct semaphore *sem) | |||
83 | sema_init(sem, 0); | 83 | sema_init(sem, 0); |
84 | } | 84 | } |
85 | 85 | ||
86 | fastcall void __down_failed(void /* special register calling convention */); | 86 | extern asmregparm void __down_failed(atomic_t *count_ptr); |
87 | fastcall int __down_failed_interruptible(void /* params in registers */); | 87 | extern asmregparm int __down_failed_interruptible(atomic_t *count_ptr); |
88 | fastcall int __down_failed_trylock(void /* params in registers */); | 88 | extern asmregparm int __down_failed_trylock(atomic_t *count_ptr); |
89 | fastcall void __up_wakeup(void /* special register calling convention */); | 89 | extern asmregparm void __up_wakeup(atomic_t *count_ptr); |
90 | 90 | ||
91 | /* | 91 | /* |
92 | * This is ugly, but we want the default case to fall through. | 92 | * This is ugly, but we want the default case to fall through. |
diff --git a/include/asm-x86/setup.h b/include/asm-x86/setup.h index 24d786e07b49..071e054abd82 100644 --- a/include/asm-x86/setup.h +++ b/include/asm-x86/setup.h | |||
@@ -3,6 +3,13 @@ | |||
3 | 3 | ||
4 | #define COMMAND_LINE_SIZE 2048 | 4 | #define COMMAND_LINE_SIZE 2048 |
5 | 5 | ||
6 | #ifndef __ASSEMBLY__ | ||
7 | char *machine_specific_memory_setup(void); | ||
8 | #ifndef CONFIG_PARAVIRT | ||
9 | #define paravirt_post_allocator_init() do {} while (0) | ||
10 | #endif | ||
11 | #endif /* __ASSEMBLY__ */ | ||
12 | |||
6 | #ifdef __KERNEL__ | 13 | #ifdef __KERNEL__ |
7 | 14 | ||
8 | #ifdef __i386__ | 15 | #ifdef __i386__ |
@@ -51,9 +58,7 @@ void __init add_memory_region(unsigned long long start, | |||
51 | 58 | ||
52 | extern unsigned long init_pg_tables_end; | 59 | extern unsigned long init_pg_tables_end; |
53 | 60 | ||
54 | #ifndef CONFIG_PARAVIRT | 61 | |
55 | #define paravirt_post_allocator_init() do {} while (0) | ||
56 | #endif | ||
57 | 62 | ||
58 | #endif /* __i386__ */ | 63 | #endif /* __i386__ */ |
59 | #endif /* _SETUP */ | 64 | #endif /* _SETUP */ |
diff --git a/include/asm-x86/sigcontext.h b/include/asm-x86/sigcontext.h index c047f9dc3423..681deade5f00 100644 --- a/include/asm-x86/sigcontext.h +++ b/include/asm-x86/sigcontext.h | |||
@@ -63,20 +63,20 @@ struct sigcontext { | |||
63 | unsigned short fs, __fsh; | 63 | unsigned short fs, __fsh; |
64 | unsigned short es, __esh; | 64 | unsigned short es, __esh; |
65 | unsigned short ds, __dsh; | 65 | unsigned short ds, __dsh; |
66 | unsigned long edi; | 66 | unsigned long di; |
67 | unsigned long esi; | 67 | unsigned long si; |
68 | unsigned long ebp; | 68 | unsigned long bp; |
69 | unsigned long esp; | 69 | unsigned long sp; |
70 | unsigned long ebx; | 70 | unsigned long bx; |
71 | unsigned long edx; | 71 | unsigned long dx; |
72 | unsigned long ecx; | 72 | unsigned long cx; |
73 | unsigned long eax; | 73 | unsigned long ax; |
74 | unsigned long trapno; | 74 | unsigned long trapno; |
75 | unsigned long err; | 75 | unsigned long err; |
76 | unsigned long eip; | 76 | unsigned long ip; |
77 | unsigned short cs, __csh; | 77 | unsigned short cs, __csh; |
78 | unsigned long eflags; | 78 | unsigned long flags; |
79 | unsigned long esp_at_signal; | 79 | unsigned long sp_at_signal; |
80 | unsigned short ss, __ssh; | 80 | unsigned short ss, __ssh; |
81 | struct _fpstate __user * fpstate; | 81 | struct _fpstate __user * fpstate; |
82 | unsigned long oldmask; | 82 | unsigned long oldmask; |
@@ -111,16 +111,16 @@ struct sigcontext { | |||
111 | unsigned long r13; | 111 | unsigned long r13; |
112 | unsigned long r14; | 112 | unsigned long r14; |
113 | unsigned long r15; | 113 | unsigned long r15; |
114 | unsigned long rdi; | 114 | unsigned long di; |
115 | unsigned long rsi; | 115 | unsigned long si; |
116 | unsigned long rbp; | 116 | unsigned long bp; |
117 | unsigned long rbx; | 117 | unsigned long bx; |
118 | unsigned long rdx; | 118 | unsigned long dx; |
119 | unsigned long rax; | 119 | unsigned long ax; |
120 | unsigned long rcx; | 120 | unsigned long cx; |
121 | unsigned long rsp; | 121 | unsigned long sp; |
122 | unsigned long rip; | 122 | unsigned long ip; |
123 | unsigned long eflags; /* RFLAGS */ | 123 | unsigned long flags; |
124 | unsigned short cs; | 124 | unsigned short cs; |
125 | unsigned short gs; | 125 | unsigned short gs; |
126 | unsigned short fs; | 126 | unsigned short fs; |
diff --git a/include/asm-x86/sigcontext32.h b/include/asm-x86/sigcontext32.h index 3d657038ab7c..6ffab4fd593a 100644 --- a/include/asm-x86/sigcontext32.h +++ b/include/asm-x86/sigcontext32.h | |||
@@ -48,20 +48,20 @@ struct sigcontext_ia32 { | |||
48 | unsigned short fs, __fsh; | 48 | unsigned short fs, __fsh; |
49 | unsigned short es, __esh; | 49 | unsigned short es, __esh; |
50 | unsigned short ds, __dsh; | 50 | unsigned short ds, __dsh; |
51 | unsigned int edi; | 51 | unsigned int di; |
52 | unsigned int esi; | 52 | unsigned int si; |
53 | unsigned int ebp; | 53 | unsigned int bp; |
54 | unsigned int esp; | 54 | unsigned int sp; |
55 | unsigned int ebx; | 55 | unsigned int bx; |
56 | unsigned int edx; | 56 | unsigned int dx; |
57 | unsigned int ecx; | 57 | unsigned int cx; |
58 | unsigned int eax; | 58 | unsigned int ax; |
59 | unsigned int trapno; | 59 | unsigned int trapno; |
60 | unsigned int err; | 60 | unsigned int err; |
61 | unsigned int eip; | 61 | unsigned int ip; |
62 | unsigned short cs, __csh; | 62 | unsigned short cs, __csh; |
63 | unsigned int eflags; | 63 | unsigned int flags; |
64 | unsigned int esp_at_signal; | 64 | unsigned int sp_at_signal; |
65 | unsigned short ss, __ssh; | 65 | unsigned short ss, __ssh; |
66 | unsigned int fpstate; /* really (struct _fpstate_ia32 *) */ | 66 | unsigned int fpstate; /* really (struct _fpstate_ia32 *) */ |
67 | unsigned int oldmask; | 67 | unsigned int oldmask; |
diff --git a/include/asm-x86/signal.h b/include/asm-x86/signal.h index 987a422a2c78..aee7eca585ab 100644 --- a/include/asm-x86/signal.h +++ b/include/asm-x86/signal.h | |||
@@ -245,21 +245,14 @@ static __inline__ int sigfindinword(unsigned long word) | |||
245 | 245 | ||
246 | struct pt_regs; | 246 | struct pt_regs; |
247 | 247 | ||
248 | #define ptrace_signal_deliver(regs, cookie) \ | ||
249 | do { \ | ||
250 | if (current->ptrace & PT_DTRACE) { \ | ||
251 | current->ptrace &= ~PT_DTRACE; \ | ||
252 | (regs)->eflags &= ~TF_MASK; \ | ||
253 | } \ | ||
254 | } while (0) | ||
255 | |||
256 | #else /* __i386__ */ | 248 | #else /* __i386__ */ |
257 | 249 | ||
258 | #undef __HAVE_ARCH_SIG_BITOPS | 250 | #undef __HAVE_ARCH_SIG_BITOPS |
259 | 251 | ||
252 | #endif /* !__i386__ */ | ||
253 | |||
260 | #define ptrace_signal_deliver(regs, cookie) do { } while (0) | 254 | #define ptrace_signal_deliver(regs, cookie) do { } while (0) |
261 | 255 | ||
262 | #endif /* !__i386__ */ | ||
263 | #endif /* __KERNEL__ */ | 256 | #endif /* __KERNEL__ */ |
264 | #endif /* __ASSEMBLY__ */ | 257 | #endif /* __ASSEMBLY__ */ |
265 | 258 | ||
diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index e10b7affdfe5..56152e312287 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h | |||
@@ -1,51 +1,41 @@ | |||
1 | #ifndef __ASM_SMP_H | 1 | #ifndef __ASM_SMP_H |
2 | #define __ASM_SMP_H | 2 | #define __ASM_SMP_H |
3 | 3 | ||
4 | #ifndef __ASSEMBLY__ | ||
5 | #include <linux/cpumask.h> | ||
6 | #include <linux/init.h> | ||
7 | |||
4 | /* | 8 | /* |
5 | * We need the APIC definitions automatically as part of 'smp.h' | 9 | * We need the APIC definitions automatically as part of 'smp.h' |
6 | */ | 10 | */ |
7 | #ifndef __ASSEMBLY__ | 11 | #ifdef CONFIG_X86_LOCAL_APIC |
8 | #include <linux/kernel.h> | 12 | # include <asm/mpspec.h> |
9 | #include <linux/threads.h> | 13 | # include <asm/apic.h> |
10 | #include <linux/cpumask.h> | 14 | # ifdef CONFIG_X86_IO_APIC |
15 | # include <asm/io_apic.h> | ||
16 | # endif | ||
11 | #endif | 17 | #endif |
12 | 18 | ||
13 | #if defined(CONFIG_X86_LOCAL_APIC) && !defined(__ASSEMBLY__) | 19 | extern cpumask_t cpu_callout_map; |
14 | #include <linux/bitops.h> | 20 | extern cpumask_t cpu_callin_map; |
15 | #include <asm/mpspec.h> | ||
16 | #include <asm/apic.h> | ||
17 | #ifdef CONFIG_X86_IO_APIC | ||
18 | #include <asm/io_apic.h> | ||
19 | #endif | ||
20 | #endif | ||
21 | 21 | ||
22 | #define BAD_APICID 0xFFu | 22 | extern int smp_num_siblings; |
23 | #ifdef CONFIG_SMP | 23 | extern unsigned int num_processors; |
24 | #ifndef __ASSEMBLY__ | ||
25 | 24 | ||
26 | /* | ||
27 | * Private routines/data | ||
28 | */ | ||
29 | |||
30 | extern void smp_alloc_memory(void); | 25 | extern void smp_alloc_memory(void); |
31 | extern int pic_mode; | 26 | extern void lock_ipi_call_lock(void); |
32 | extern int smp_num_siblings; | 27 | extern void unlock_ipi_call_lock(void); |
33 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); | ||
34 | DECLARE_PER_CPU(cpumask_t, cpu_core_map); | ||
35 | 28 | ||
36 | extern void (*mtrr_hook) (void); | 29 | extern void (*mtrr_hook) (void); |
37 | extern void zap_low_mappings (void); | 30 | extern void zap_low_mappings (void); |
38 | extern void lock_ipi_call_lock(void); | ||
39 | extern void unlock_ipi_call_lock(void); | ||
40 | 31 | ||
41 | #define MAX_APICID 256 | ||
42 | extern u8 __initdata x86_cpu_to_apicid_init[]; | 32 | extern u8 __initdata x86_cpu_to_apicid_init[]; |
43 | extern void *x86_cpu_to_apicid_ptr; | 33 | extern void *x86_cpu_to_apicid_early_ptr; |
44 | DECLARE_PER_CPU(u8, x86_cpu_to_apicid); | ||
45 | |||
46 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) | ||
47 | 34 | ||
48 | extern void set_cpu_sibling_map(int cpu); | 35 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); |
36 | DECLARE_PER_CPU(cpumask_t, cpu_core_map); | ||
37 | DECLARE_PER_CPU(u8, cpu_llc_id); | ||
38 | DECLARE_PER_CPU(u8, x86_cpu_to_apicid); | ||
49 | 39 | ||
50 | #ifdef CONFIG_HOTPLUG_CPU | 40 | #ifdef CONFIG_HOTPLUG_CPU |
51 | extern void cpu_exit_clear(void); | 41 | extern void cpu_exit_clear(void); |
@@ -53,6 +43,9 @@ extern void cpu_uninit(void); | |||
53 | extern void remove_siblinginfo(int cpu); | 43 | extern void remove_siblinginfo(int cpu); |
54 | #endif | 44 | #endif |
55 | 45 | ||
46 | /* Globals due to paravirt */ | ||
47 | extern void set_cpu_sibling_map(int cpu); | ||
48 | |||
56 | struct smp_ops | 49 | struct smp_ops |
57 | { | 50 | { |
58 | void (*smp_prepare_boot_cpu)(void); | 51 | void (*smp_prepare_boot_cpu)(void); |
@@ -67,6 +60,7 @@ struct smp_ops | |||
67 | int wait); | 60 | int wait); |
68 | }; | 61 | }; |
69 | 62 | ||
63 | #ifdef CONFIG_SMP | ||
70 | extern struct smp_ops smp_ops; | 64 | extern struct smp_ops smp_ops; |
71 | 65 | ||
72 | static inline void smp_prepare_boot_cpu(void) | 66 | static inline void smp_prepare_boot_cpu(void) |
@@ -107,10 +101,12 @@ int native_cpu_up(unsigned int cpunum); | |||
107 | void native_smp_cpus_done(unsigned int max_cpus); | 101 | void native_smp_cpus_done(unsigned int max_cpus); |
108 | 102 | ||
109 | #ifndef CONFIG_PARAVIRT | 103 | #ifndef CONFIG_PARAVIRT |
110 | #define startup_ipi_hook(phys_apicid, start_eip, start_esp) \ | 104 | #define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) |
111 | do { } while (0) | ||
112 | #endif | 105 | #endif |
113 | 106 | ||
107 | extern int __cpu_disable(void); | ||
108 | extern void __cpu_die(unsigned int cpu); | ||
109 | |||
114 | /* | 110 | /* |
115 | * This function is needed by all SMP systems. It must _always_ be valid | 111 | * This function is needed by all SMP systems. It must _always_ be valid |
116 | * from the initial startup. We map APIC_BASE very early in page_setup(), | 112 | * from the initial startup. We map APIC_BASE very early in page_setup(), |
@@ -119,9 +115,11 @@ do { } while (0) | |||
119 | DECLARE_PER_CPU(int, cpu_number); | 115 | DECLARE_PER_CPU(int, cpu_number); |
120 | #define raw_smp_processor_id() (x86_read_percpu(cpu_number)) | 116 | #define raw_smp_processor_id() (x86_read_percpu(cpu_number)) |
121 | 117 | ||
122 | extern cpumask_t cpu_callout_map; | 118 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) |
123 | extern cpumask_t cpu_callin_map; | 119 | |
124 | extern cpumask_t cpu_possible_map; | 120 | extern int safe_smp_processor_id(void); |
121 | |||
122 | void __cpuinit smp_store_cpu_info(int id); | ||
125 | 123 | ||
126 | /* We don't mark CPUs online until __cpu_up(), so we need another measure */ | 124 | /* We don't mark CPUs online until __cpu_up(), so we need another measure */ |
127 | static inline int num_booting_cpus(void) | 125 | static inline int num_booting_cpus(void) |
@@ -129,56 +127,39 @@ static inline int num_booting_cpus(void) | |||
129 | return cpus_weight(cpu_callout_map); | 127 | return cpus_weight(cpu_callout_map); |
130 | } | 128 | } |
131 | 129 | ||
132 | extern int safe_smp_processor_id(void); | ||
133 | extern int __cpu_disable(void); | ||
134 | extern void __cpu_die(unsigned int cpu); | ||
135 | extern unsigned int num_processors; | ||
136 | |||
137 | void __cpuinit smp_store_cpu_info(int id); | ||
138 | |||
139 | #endif /* !__ASSEMBLY__ */ | ||
140 | |||
141 | #else /* CONFIG_SMP */ | 130 | #else /* CONFIG_SMP */ |
142 | 131 | ||
143 | #define safe_smp_processor_id() 0 | 132 | #define safe_smp_processor_id() 0 |
144 | #define cpu_physical_id(cpu) boot_cpu_physical_apicid | 133 | #define cpu_physical_id(cpu) boot_cpu_physical_apicid |
145 | 134 | ||
146 | #define NO_PROC_ID 0xFF /* No processor magic marker */ | 135 | #endif /* !CONFIG_SMP */ |
147 | |||
148 | #endif /* CONFIG_SMP */ | ||
149 | |||
150 | #ifndef __ASSEMBLY__ | ||
151 | 136 | ||
152 | #ifdef CONFIG_X86_LOCAL_APIC | 137 | #ifdef CONFIG_X86_LOCAL_APIC |
153 | 138 | ||
154 | #ifdef APIC_DEFINITION | 139 | static __inline int logical_smp_processor_id(void) |
140 | { | ||
141 | /* we don't want to mark this access volatile - bad code generation */ | ||
142 | return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); | ||
143 | } | ||
144 | |||
145 | # ifdef APIC_DEFINITION | ||
155 | extern int hard_smp_processor_id(void); | 146 | extern int hard_smp_processor_id(void); |
156 | #else | 147 | # else |
157 | #include <mach_apicdef.h> | 148 | # include <mach_apicdef.h> |
158 | static inline int hard_smp_processor_id(void) | 149 | static inline int hard_smp_processor_id(void) |
159 | { | 150 | { |
160 | /* we don't want to mark this access volatile - bad code generation */ | 151 | /* we don't want to mark this access volatile - bad code generation */ |
161 | return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID)); | 152 | return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID)); |
162 | } | 153 | } |
163 | #endif /* APIC_DEFINITION */ | 154 | # endif /* APIC_DEFINITION */ |
164 | 155 | ||
165 | #else /* CONFIG_X86_LOCAL_APIC */ | 156 | #else /* CONFIG_X86_LOCAL_APIC */ |
166 | 157 | ||
167 | #ifndef CONFIG_SMP | 158 | # ifndef CONFIG_SMP |
168 | #define hard_smp_processor_id() 0 | 159 | # define hard_smp_processor_id() 0 |
169 | #endif | 160 | # endif |
170 | 161 | ||
171 | #endif /* CONFIG_X86_LOCAL_APIC */ | 162 | #endif /* CONFIG_X86_LOCAL_APIC */ |
172 | 163 | ||
173 | extern u8 apicid_2_node[]; | 164 | #endif /* !ASSEMBLY */ |
174 | |||
175 | #ifdef CONFIG_X86_LOCAL_APIC | ||
176 | static __inline int logical_smp_processor_id(void) | ||
177 | { | ||
178 | /* we don't want to mark this access volatile - bad code generation */ | ||
179 | return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); | ||
180 | } | ||
181 | #endif | ||
182 | #endif | ||
183 | |||
184 | #endif | 165 | #endif |
diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index ab612b0ff270..e0a75519ad21 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h | |||
@@ -1,130 +1,101 @@ | |||
1 | #ifndef __ASM_SMP_H | 1 | #ifndef __ASM_SMP_H |
2 | #define __ASM_SMP_H | 2 | #define __ASM_SMP_H |
3 | 3 | ||
4 | /* | ||
5 | * We need the APIC definitions automatically as part of 'smp.h' | ||
6 | */ | ||
7 | #include <linux/threads.h> | ||
8 | #include <linux/cpumask.h> | 4 | #include <linux/cpumask.h> |
9 | #include <linux/bitops.h> | ||
10 | #include <linux/init.h> | 5 | #include <linux/init.h> |
11 | extern int disable_apic; | ||
12 | 6 | ||
13 | #include <asm/mpspec.h> | 7 | /* |
8 | * We need the APIC definitions automatically as part of 'smp.h' | ||
9 | */ | ||
14 | #include <asm/apic.h> | 10 | #include <asm/apic.h> |
15 | #include <asm/io_apic.h> | 11 | #include <asm/io_apic.h> |
16 | #include <asm/thread_info.h> | 12 | #include <asm/mpspec.h> |
17 | |||
18 | #ifdef CONFIG_SMP | ||
19 | |||
20 | #include <asm/pda.h> | 13 | #include <asm/pda.h> |
14 | #include <asm/thread_info.h> | ||
21 | 15 | ||
22 | struct pt_regs; | ||
23 | |||
24 | extern cpumask_t cpu_present_mask; | ||
25 | extern cpumask_t cpu_possible_map; | ||
26 | extern cpumask_t cpu_online_map; | ||
27 | extern cpumask_t cpu_callout_map; | 16 | extern cpumask_t cpu_callout_map; |
28 | extern cpumask_t cpu_initialized; | 17 | extern cpumask_t cpu_initialized; |
29 | 18 | ||
30 | /* | 19 | extern int smp_num_siblings; |
31 | * Private routines/data | 20 | extern unsigned int num_processors; |
32 | */ | 21 | |
33 | |||
34 | extern void smp_alloc_memory(void); | 22 | extern void smp_alloc_memory(void); |
35 | extern volatile unsigned long smp_invalidate_needed; | ||
36 | extern void lock_ipi_call_lock(void); | 23 | extern void lock_ipi_call_lock(void); |
37 | extern void unlock_ipi_call_lock(void); | 24 | extern void unlock_ipi_call_lock(void); |
38 | extern int smp_num_siblings; | 25 | |
39 | extern void smp_send_reschedule(int cpu); | ||
40 | extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), | 26 | extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), |
41 | void *info, int wait); | 27 | void *info, int wait); |
42 | 28 | ||
43 | /* | 29 | extern u16 __initdata x86_cpu_to_apicid_init[]; |
44 | * cpu_sibling_map and cpu_core_map now live | 30 | extern u16 __initdata x86_bios_cpu_apicid_init[]; |
45 | * in the per cpu area | 31 | extern void *x86_cpu_to_apicid_early_ptr; |
46 | * | 32 | extern void *x86_bios_cpu_apicid_early_ptr; |
47 | * extern cpumask_t cpu_sibling_map[NR_CPUS]; | 33 | |
48 | * extern cpumask_t cpu_core_map[NR_CPUS]; | ||
49 | */ | ||
50 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); | 34 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); |
51 | DECLARE_PER_CPU(cpumask_t, cpu_core_map); | 35 | DECLARE_PER_CPU(cpumask_t, cpu_core_map); |
52 | DECLARE_PER_CPU(u8, cpu_llc_id); | 36 | DECLARE_PER_CPU(u16, cpu_llc_id); |
53 | 37 | DECLARE_PER_CPU(u16, x86_cpu_to_apicid); | |
54 | #define SMP_TRAMPOLINE_BASE 0x6000 | 38 | DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); |
55 | |||
56 | /* | ||
57 | * On x86 all CPUs are mapped 1:1 to the APIC space. | ||
58 | * This simplifies scheduling and IPI sending and | ||
59 | * compresses data structures. | ||
60 | */ | ||
61 | 39 | ||
62 | static inline int num_booting_cpus(void) | 40 | static inline int cpu_present_to_apicid(int mps_cpu) |
63 | { | 41 | { |
64 | return cpus_weight(cpu_callout_map); | 42 | if (cpu_present(mps_cpu)) |
43 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); | ||
44 | else | ||
45 | return BAD_APICID; | ||
65 | } | 46 | } |
66 | 47 | ||
67 | #define raw_smp_processor_id() read_pda(cpunumber) | 48 | #ifdef CONFIG_SMP |
49 | |||
50 | #define SMP_TRAMPOLINE_BASE 0x6000 | ||
68 | 51 | ||
69 | extern int __cpu_disable(void); | 52 | extern int __cpu_disable(void); |
70 | extern void __cpu_die(unsigned int cpu); | 53 | extern void __cpu_die(unsigned int cpu); |
71 | extern void prefill_possible_map(void); | 54 | extern void prefill_possible_map(void); |
72 | extern unsigned num_processors; | ||
73 | extern unsigned __cpuinitdata disabled_cpus; | 55 | extern unsigned __cpuinitdata disabled_cpus; |
74 | 56 | ||
75 | #define NO_PROC_ID 0xFF /* No processor magic marker */ | 57 | #define raw_smp_processor_id() read_pda(cpunumber) |
76 | 58 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) | |
77 | #endif /* CONFIG_SMP */ | ||
78 | 59 | ||
79 | #define safe_smp_processor_id() smp_processor_id() | 60 | #define stack_smp_processor_id() \ |
80 | 61 | ({ \ | |
81 | static inline int hard_smp_processor_id(void) | 62 | struct thread_info *ti; \ |
82 | { | 63 | __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ |
83 | /* we don't want to mark this access volatile - bad code generation */ | 64 | ti->cpu; \ |
84 | return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID)); | 65 | }) |
85 | } | ||
86 | 66 | ||
87 | /* | 67 | /* |
88 | * Some lowlevel functions might want to know about | 68 | * On x86 all CPUs are mapped 1:1 to the APIC space. This simplifies |
89 | * the real APIC ID <-> CPU # mapping. | 69 | * scheduling and IPI sending and compresses data structures. |
90 | */ | 70 | */ |
91 | extern u8 __initdata x86_cpu_to_apicid_init[]; | 71 | static inline int num_booting_cpus(void) |
92 | extern void *x86_cpu_to_apicid_ptr; | ||
93 | DECLARE_PER_CPU(u8, x86_cpu_to_apicid); /* physical ID */ | ||
94 | extern u8 bios_cpu_apicid[]; | ||
95 | |||
96 | static inline int cpu_present_to_apicid(int mps_cpu) | ||
97 | { | 72 | { |
98 | if (mps_cpu < NR_CPUS) | 73 | return cpus_weight(cpu_callout_map); |
99 | return (int)bios_cpu_apicid[mps_cpu]; | ||
100 | else | ||
101 | return BAD_APICID; | ||
102 | } | 74 | } |
103 | 75 | ||
104 | #ifndef CONFIG_SMP | 76 | extern void smp_send_reschedule(int cpu); |
77 | |||
78 | #else /* CONFIG_SMP */ | ||
79 | |||
80 | extern unsigned int boot_cpu_id; | ||
81 | #define cpu_physical_id(cpu) boot_cpu_id | ||
105 | #define stack_smp_processor_id() 0 | 82 | #define stack_smp_processor_id() 0 |
106 | #define cpu_logical_map(x) (x) | 83 | |
107 | #else | 84 | #endif /* !CONFIG_SMP */ |
108 | #include <asm/thread_info.h> | 85 | |
109 | #define stack_smp_processor_id() \ | 86 | #define safe_smp_processor_id() smp_processor_id() |
110 | ({ \ | ||
111 | struct thread_info *ti; \ | ||
112 | __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ | ||
113 | ti->cpu; \ | ||
114 | }) | ||
115 | #endif | ||
116 | 87 | ||
117 | static __inline int logical_smp_processor_id(void) | 88 | static __inline int logical_smp_processor_id(void) |
118 | { | 89 | { |
119 | /* we don't want to mark this access volatile - bad code generation */ | 90 | /* we don't want to mark this access volatile - bad code generation */ |
120 | return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); | 91 | return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); |
92 | } | ||
93 | |||
94 | static inline int hard_smp_processor_id(void) | ||
95 | { | ||
96 | /* we don't want to mark this access volatile - bad code generation */ | ||
97 | return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID)); | ||
121 | } | 98 | } |
122 | 99 | ||
123 | #ifdef CONFIG_SMP | ||
124 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) | ||
125 | #else | ||
126 | extern unsigned int boot_cpu_id; | ||
127 | #define cpu_physical_id(cpu) boot_cpu_id | ||
128 | #endif /* !CONFIG_SMP */ | ||
129 | #endif | 100 | #endif |
130 | 101 | ||
diff --git a/include/asm-x86/socket.h b/include/asm-x86/socket.h index 99ca648b94c5..80af9c4ccad7 100644 --- a/include/asm-x86/socket.h +++ b/include/asm-x86/socket.h | |||
@@ -52,4 +52,6 @@ | |||
52 | #define SO_TIMESTAMPNS 35 | 52 | #define SO_TIMESTAMPNS 35 |
53 | #define SCM_TIMESTAMPNS SO_TIMESTAMPNS | 53 | #define SCM_TIMESTAMPNS SO_TIMESTAMPNS |
54 | 54 | ||
55 | #define SO_MARK 36 | ||
56 | |||
55 | #endif /* _ASM_SOCKET_H */ | 57 | #endif /* _ASM_SOCKET_H */ |
diff --git a/include/asm-x86/sparsemem.h b/include/asm-x86/sparsemem.h index 3f203b1d9ee8..fa58cd55411a 100644 --- a/include/asm-x86/sparsemem.h +++ b/include/asm-x86/sparsemem.h | |||
@@ -1,5 +1,34 @@ | |||
1 | #ifndef _ASM_X86_SPARSEMEM_H | ||
2 | #define _ASM_X86_SPARSEMEM_H | ||
3 | |||
4 | #ifdef CONFIG_SPARSEMEM | ||
5 | /* | ||
6 | * generic non-linear memory support: | ||
7 | * | ||
8 | * 1) we will not split memory into more chunks than will fit into the flags | ||
9 | * field of the struct page | ||
10 | * | ||
11 | * SECTION_SIZE_BITS 2^n: size of each section | ||
12 | * MAX_PHYSADDR_BITS 2^n: max size of physical address space | ||
13 | * MAX_PHYSMEM_BITS 2^n: how much memory we can have in that space | ||
14 | * | ||
15 | */ | ||
16 | |||
1 | #ifdef CONFIG_X86_32 | 17 | #ifdef CONFIG_X86_32 |
2 | # include "sparsemem_32.h" | 18 | # ifdef CONFIG_X86_PAE |
3 | #else | 19 | # define SECTION_SIZE_BITS 30 |
4 | # include "sparsemem_64.h" | 20 | # define MAX_PHYSADDR_BITS 36 |
21 | # define MAX_PHYSMEM_BITS 36 | ||
22 | # else | ||
23 | # define SECTION_SIZE_BITS 26 | ||
24 | # define MAX_PHYSADDR_BITS 32 | ||
25 | # define MAX_PHYSMEM_BITS 32 | ||
26 | # endif | ||
27 | #else /* CONFIG_X86_32 */ | ||
28 | # define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */ | ||
29 | # define MAX_PHYSADDR_BITS 40 | ||
30 | # define MAX_PHYSMEM_BITS 40 | ||
31 | #endif | ||
32 | |||
33 | #endif /* CONFIG_SPARSEMEM */ | ||
5 | #endif | 34 | #endif |
diff --git a/include/asm-x86/sparsemem_32.h b/include/asm-x86/sparsemem_32.h deleted file mode 100644 index cfeed990585f..000000000000 --- a/include/asm-x86/sparsemem_32.h +++ /dev/null | |||
@@ -1,31 +0,0 @@ | |||
1 | #ifndef _I386_SPARSEMEM_H | ||
2 | #define _I386_SPARSEMEM_H | ||
3 | #ifdef CONFIG_SPARSEMEM | ||
4 | |||
5 | /* | ||
6 | * generic non-linear memory support: | ||
7 | * | ||
8 | * 1) we will not split memory into more chunks than will fit into the | ||
9 | * flags field of the struct page | ||
10 | */ | ||
11 | |||
12 | /* | ||
13 | * SECTION_SIZE_BITS 2^N: how big each section will be | ||
14 | * MAX_PHYSADDR_BITS 2^N: how much physical address space we have | ||
15 | * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space | ||
16 | */ | ||
17 | #ifdef CONFIG_X86_PAE | ||
18 | #define SECTION_SIZE_BITS 30 | ||
19 | #define MAX_PHYSADDR_BITS 36 | ||
20 | #define MAX_PHYSMEM_BITS 36 | ||
21 | #else | ||
22 | #define SECTION_SIZE_BITS 26 | ||
23 | #define MAX_PHYSADDR_BITS 32 | ||
24 | #define MAX_PHYSMEM_BITS 32 | ||
25 | #endif | ||
26 | |||
27 | /* XXX: FIXME -- wli */ | ||
28 | #define kern_addr_valid(kaddr) (0) | ||
29 | |||
30 | #endif /* CONFIG_SPARSEMEM */ | ||
31 | #endif /* _I386_SPARSEMEM_H */ | ||
diff --git a/include/asm-x86/sparsemem_64.h b/include/asm-x86/sparsemem_64.h deleted file mode 100644 index dabb16714a71..000000000000 --- a/include/asm-x86/sparsemem_64.h +++ /dev/null | |||
@@ -1,26 +0,0 @@ | |||
1 | #ifndef _ASM_X86_64_SPARSEMEM_H | ||
2 | #define _ASM_X86_64_SPARSEMEM_H 1 | ||
3 | |||
4 | #ifdef CONFIG_SPARSEMEM | ||
5 | |||
6 | /* | ||
7 | * generic non-linear memory support: | ||
8 | * | ||
9 | * 1) we will not split memory into more chunks than will fit into the flags | ||
10 | * field of the struct page | ||
11 | * | ||
12 | * SECTION_SIZE_BITS 2^n: size of each section | ||
13 | * MAX_PHYSADDR_BITS 2^n: max size of physical address space | ||
14 | * MAX_PHYSMEM_BITS 2^n: how much memory we can have in that space | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */ | ||
19 | #define MAX_PHYSADDR_BITS 40 | ||
20 | #define MAX_PHYSMEM_BITS 40 | ||
21 | |||
22 | extern int early_pfn_to_nid(unsigned long pfn); | ||
23 | |||
24 | #endif /* CONFIG_SPARSEMEM */ | ||
25 | |||
26 | #endif /* _ASM_X86_64_SPARSEMEM_H */ | ||
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index d74d85e71dcb..23804c1890ff 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h | |||
@@ -1,5 +1,296 @@ | |||
1 | #ifndef _X86_SPINLOCK_H_ | ||
2 | #define _X86_SPINLOCK_H_ | ||
3 | |||
4 | #include <asm/atomic.h> | ||
5 | #include <asm/rwlock.h> | ||
6 | #include <asm/page.h> | ||
7 | #include <asm/processor.h> | ||
8 | #include <linux/compiler.h> | ||
9 | |||
10 | /* | ||
11 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | ||
12 | * | ||
13 | * Simple spin lock operations. There are two variants, one clears IRQ's | ||
14 | * on the local processor, one does not. | ||
15 | * | ||
16 | * These are fair FIFO ticket locks, which are currently limited to 256 | ||
17 | * CPUs. | ||
18 | * | ||
19 | * (the type definitions are in asm/spinlock_types.h) | ||
20 | */ | ||
21 | |||
1 | #ifdef CONFIG_X86_32 | 22 | #ifdef CONFIG_X86_32 |
2 | # include "spinlock_32.h" | 23 | typedef char _slock_t; |
24 | # define LOCK_INS_DEC "decb" | ||
25 | # define LOCK_INS_XCH "xchgb" | ||
26 | # define LOCK_INS_MOV "movb" | ||
27 | # define LOCK_INS_CMP "cmpb" | ||
28 | # define LOCK_PTR_REG "a" | ||
3 | #else | 29 | #else |
4 | # include "spinlock_64.h" | 30 | typedef int _slock_t; |
31 | # define LOCK_INS_DEC "decl" | ||
32 | # define LOCK_INS_XCH "xchgl" | ||
33 | # define LOCK_INS_MOV "movl" | ||
34 | # define LOCK_INS_CMP "cmpl" | ||
35 | # define LOCK_PTR_REG "D" | ||
36 | #endif | ||
37 | |||
38 | #if defined(CONFIG_X86_32) && \ | ||
39 | (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)) | ||
40 | /* | ||
41 | * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock | ||
42 | * (PPro errata 66, 92) | ||
43 | */ | ||
44 | # define UNLOCK_LOCK_PREFIX LOCK_PREFIX | ||
45 | #else | ||
46 | # define UNLOCK_LOCK_PREFIX | ||
47 | #endif | ||
48 | |||
49 | /* | ||
50 | * Ticket locks are conceptually two parts, one indicating the current head of | ||
51 | * the queue, and the other indicating the current tail. The lock is acquired | ||
52 | * by atomically noting the tail and incrementing it by one (thus adding | ||
53 | * ourself to the queue and noting our position), then waiting until the head | ||
54 | * becomes equal to the the initial value of the tail. | ||
55 | * | ||
56 | * We use an xadd covering *both* parts of the lock, to increment the tail and | ||
57 | * also load the position of the head, which takes care of memory ordering | ||
58 | * issues and should be optimal for the uncontended case. Note the tail must be | ||
59 | * in the high part, because a wide xadd increment of the low part would carry | ||
60 | * up and contaminate the high part. | ||
61 | * | ||
62 | * With fewer than 2^8 possible CPUs, we can use x86's partial registers to | ||
63 | * save some instructions and make the code more elegant. There really isn't | ||
64 | * much between them in performance though, especially as locks are out of line. | ||
65 | */ | ||
66 | #if (NR_CPUS < 256) | ||
67 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | ||
68 | { | ||
69 | int tmp = *(volatile signed int *)(&(lock)->slock); | ||
70 | |||
71 | return (((tmp >> 8) & 0xff) != (tmp & 0xff)); | ||
72 | } | ||
73 | |||
74 | static inline int __raw_spin_is_contended(raw_spinlock_t *lock) | ||
75 | { | ||
76 | int tmp = *(volatile signed int *)(&(lock)->slock); | ||
77 | |||
78 | return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; | ||
79 | } | ||
80 | |||
81 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
82 | { | ||
83 | short inc = 0x0100; | ||
84 | |||
85 | __asm__ __volatile__ ( | ||
86 | LOCK_PREFIX "xaddw %w0, %1\n" | ||
87 | "1:\t" | ||
88 | "cmpb %h0, %b0\n\t" | ||
89 | "je 2f\n\t" | ||
90 | "rep ; nop\n\t" | ||
91 | "movb %1, %b0\n\t" | ||
92 | /* don't need lfence here, because loads are in-order */ | ||
93 | "jmp 1b\n" | ||
94 | "2:" | ||
95 | :"+Q" (inc), "+m" (lock->slock) | ||
96 | : | ||
97 | :"memory", "cc"); | ||
98 | } | ||
99 | |||
100 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | ||
101 | |||
102 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
103 | { | ||
104 | int tmp; | ||
105 | short new; | ||
106 | |||
107 | asm volatile( | ||
108 | "movw %2,%w0\n\t" | ||
109 | "cmpb %h0,%b0\n\t" | ||
110 | "jne 1f\n\t" | ||
111 | "movw %w0,%w1\n\t" | ||
112 | "incb %h1\n\t" | ||
113 | "lock ; cmpxchgw %w1,%2\n\t" | ||
114 | "1:" | ||
115 | "sete %b1\n\t" | ||
116 | "movzbl %b1,%0\n\t" | ||
117 | :"=&a" (tmp), "=Q" (new), "+m" (lock->slock) | ||
118 | : | ||
119 | : "memory", "cc"); | ||
120 | |||
121 | return tmp; | ||
122 | } | ||
123 | |||
124 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
125 | { | ||
126 | __asm__ __volatile__( | ||
127 | UNLOCK_LOCK_PREFIX "incb %0" | ||
128 | :"+m" (lock->slock) | ||
129 | : | ||
130 | :"memory", "cc"); | ||
131 | } | ||
132 | #else | ||
133 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | ||
134 | { | ||
135 | int tmp = *(volatile signed int *)(&(lock)->slock); | ||
136 | |||
137 | return (((tmp >> 16) & 0xffff) != (tmp & 0xffff)); | ||
138 | } | ||
139 | |||
140 | static inline int __raw_spin_is_contended(raw_spinlock_t *lock) | ||
141 | { | ||
142 | int tmp = *(volatile signed int *)(&(lock)->slock); | ||
143 | |||
144 | return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1; | ||
145 | } | ||
146 | |||
147 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
148 | { | ||
149 | int inc = 0x00010000; | ||
150 | int tmp; | ||
151 | |||
152 | __asm__ __volatile__ ( | ||
153 | "lock ; xaddl %0, %1\n" | ||
154 | "movzwl %w0, %2\n\t" | ||
155 | "shrl $16, %0\n\t" | ||
156 | "1:\t" | ||
157 | "cmpl %0, %2\n\t" | ||
158 | "je 2f\n\t" | ||
159 | "rep ; nop\n\t" | ||
160 | "movzwl %1, %2\n\t" | ||
161 | /* don't need lfence here, because loads are in-order */ | ||
162 | "jmp 1b\n" | ||
163 | "2:" | ||
164 | :"+Q" (inc), "+m" (lock->slock), "=r" (tmp) | ||
165 | : | ||
166 | :"memory", "cc"); | ||
167 | } | ||
168 | |||
169 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | ||
170 | |||
171 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
172 | { | ||
173 | int tmp; | ||
174 | int new; | ||
175 | |||
176 | asm volatile( | ||
177 | "movl %2,%0\n\t" | ||
178 | "movl %0,%1\n\t" | ||
179 | "roll $16, %0\n\t" | ||
180 | "cmpl %0,%1\n\t" | ||
181 | "jne 1f\n\t" | ||
182 | "addl $0x00010000, %1\n\t" | ||
183 | "lock ; cmpxchgl %1,%2\n\t" | ||
184 | "1:" | ||
185 | "sete %b1\n\t" | ||
186 | "movzbl %b1,%0\n\t" | ||
187 | :"=&a" (tmp), "=r" (new), "+m" (lock->slock) | ||
188 | : | ||
189 | : "memory", "cc"); | ||
190 | |||
191 | return tmp; | ||
192 | } | ||
193 | |||
194 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
195 | { | ||
196 | __asm__ __volatile__( | ||
197 | UNLOCK_LOCK_PREFIX "incw %0" | ||
198 | :"+m" (lock->slock) | ||
199 | : | ||
200 | :"memory", "cc"); | ||
201 | } | ||
202 | #endif | ||
203 | |||
204 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | ||
205 | { | ||
206 | while (__raw_spin_is_locked(lock)) | ||
207 | cpu_relax(); | ||
208 | } | ||
209 | |||
210 | /* | ||
211 | * Read-write spinlocks, allowing multiple readers | ||
212 | * but only one writer. | ||
213 | * | ||
214 | * NOTE! it is quite common to have readers in interrupts | ||
215 | * but no interrupt writers. For those circumstances we | ||
216 | * can "mix" irq-safe locks - any writer needs to get a | ||
217 | * irq-safe write-lock, but readers can get non-irqsafe | ||
218 | * read-locks. | ||
219 | * | ||
220 | * On x86, we implement read-write locks as a 32-bit counter | ||
221 | * with the high bit (sign) being the "contended" bit. | ||
222 | */ | ||
223 | |||
224 | /** | ||
225 | * read_can_lock - would read_trylock() succeed? | ||
226 | * @lock: the rwlock in question. | ||
227 | */ | ||
228 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) | ||
229 | { | ||
230 | return (int)(lock)->lock > 0; | ||
231 | } | ||
232 | |||
233 | /** | ||
234 | * write_can_lock - would write_trylock() succeed? | ||
235 | * @lock: the rwlock in question. | ||
236 | */ | ||
237 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) | ||
238 | { | ||
239 | return (lock)->lock == RW_LOCK_BIAS; | ||
240 | } | ||
241 | |||
242 | static inline void __raw_read_lock(raw_rwlock_t *rw) | ||
243 | { | ||
244 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" | ||
245 | "jns 1f\n" | ||
246 | "call __read_lock_failed\n\t" | ||
247 | "1:\n" | ||
248 | ::LOCK_PTR_REG (rw) : "memory"); | ||
249 | } | ||
250 | |||
251 | static inline void __raw_write_lock(raw_rwlock_t *rw) | ||
252 | { | ||
253 | asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" | ||
254 | "jz 1f\n" | ||
255 | "call __write_lock_failed\n\t" | ||
256 | "1:\n" | ||
257 | ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); | ||
258 | } | ||
259 | |||
260 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | ||
261 | { | ||
262 | atomic_t *count = (atomic_t *)lock; | ||
263 | |||
264 | atomic_dec(count); | ||
265 | if (atomic_read(count) >= 0) | ||
266 | return 1; | ||
267 | atomic_inc(count); | ||
268 | return 0; | ||
269 | } | ||
270 | |||
271 | static inline int __raw_write_trylock(raw_rwlock_t *lock) | ||
272 | { | ||
273 | atomic_t *count = (atomic_t *)lock; | ||
274 | |||
275 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | ||
276 | return 1; | ||
277 | atomic_add(RW_LOCK_BIAS, count); | ||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | ||
282 | { | ||
283 | asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); | ||
284 | } | ||
285 | |||
286 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | ||
287 | { | ||
288 | asm volatile(LOCK_PREFIX "addl %1, %0" | ||
289 | : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); | ||
290 | } | ||
291 | |||
292 | #define _raw_spin_relax(lock) cpu_relax() | ||
293 | #define _raw_read_relax(lock) cpu_relax() | ||
294 | #define _raw_write_relax(lock) cpu_relax() | ||
295 | |||
5 | #endif | 296 | #endif |
diff --git a/include/asm-x86/spinlock_32.h b/include/asm-x86/spinlock_32.h deleted file mode 100644 index d3bcebed60ca..000000000000 --- a/include/asm-x86/spinlock_32.h +++ /dev/null | |||
@@ -1,221 +0,0 @@ | |||
1 | #ifndef __ASM_SPINLOCK_H | ||
2 | #define __ASM_SPINLOCK_H | ||
3 | |||
4 | #include <asm/atomic.h> | ||
5 | #include <asm/rwlock.h> | ||
6 | #include <asm/page.h> | ||
7 | #include <asm/processor.h> | ||
8 | #include <linux/compiler.h> | ||
9 | |||
10 | #ifdef CONFIG_PARAVIRT | ||
11 | #include <asm/paravirt.h> | ||
12 | #else | ||
13 | #define CLI_STRING "cli" | ||
14 | #define STI_STRING "sti" | ||
15 | #define CLI_STI_CLOBBERS | ||
16 | #define CLI_STI_INPUT_ARGS | ||
17 | #endif /* CONFIG_PARAVIRT */ | ||
18 | |||
19 | /* | ||
20 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | ||
21 | * | ||
22 | * Simple spin lock operations. There are two variants, one clears IRQ's | ||
23 | * on the local processor, one does not. | ||
24 | * | ||
25 | * We make no fairness assumptions. They have a cost. | ||
26 | * | ||
27 | * (the type definitions are in asm/spinlock_types.h) | ||
28 | */ | ||
29 | |||
30 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) | ||
31 | { | ||
32 | return *(volatile signed char *)(&(x)->slock) <= 0; | ||
33 | } | ||
34 | |||
35 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
36 | { | ||
37 | asm volatile("\n1:\t" | ||
38 | LOCK_PREFIX " ; decb %0\n\t" | ||
39 | "jns 3f\n" | ||
40 | "2:\t" | ||
41 | "rep;nop\n\t" | ||
42 | "cmpb $0,%0\n\t" | ||
43 | "jle 2b\n\t" | ||
44 | "jmp 1b\n" | ||
45 | "3:\n\t" | ||
46 | : "+m" (lock->slock) : : "memory"); | ||
47 | } | ||
48 | |||
49 | /* | ||
50 | * It is easier for the lock validator if interrupts are not re-enabled | ||
51 | * in the middle of a lock-acquire. This is a performance feature anyway | ||
52 | * so we turn it off: | ||
53 | * | ||
54 | * NOTE: there's an irqs-on section here, which normally would have to be | ||
55 | * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant. | ||
56 | */ | ||
57 | #ifndef CONFIG_PROVE_LOCKING | ||
58 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | ||
59 | { | ||
60 | asm volatile( | ||
61 | "\n1:\t" | ||
62 | LOCK_PREFIX " ; decb %[slock]\n\t" | ||
63 | "jns 5f\n" | ||
64 | "2:\t" | ||
65 | "testl $0x200, %[flags]\n\t" | ||
66 | "jz 4f\n\t" | ||
67 | STI_STRING "\n" | ||
68 | "3:\t" | ||
69 | "rep;nop\n\t" | ||
70 | "cmpb $0, %[slock]\n\t" | ||
71 | "jle 3b\n\t" | ||
72 | CLI_STRING "\n\t" | ||
73 | "jmp 1b\n" | ||
74 | "4:\t" | ||
75 | "rep;nop\n\t" | ||
76 | "cmpb $0, %[slock]\n\t" | ||
77 | "jg 1b\n\t" | ||
78 | "jmp 4b\n" | ||
79 | "5:\n\t" | ||
80 | : [slock] "+m" (lock->slock) | ||
81 | : [flags] "r" (flags) | ||
82 | CLI_STI_INPUT_ARGS | ||
83 | : "memory" CLI_STI_CLOBBERS); | ||
84 | } | ||
85 | #endif | ||
86 | |||
87 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
88 | { | ||
89 | char oldval; | ||
90 | asm volatile( | ||
91 | "xchgb %b0,%1" | ||
92 | :"=q" (oldval), "+m" (lock->slock) | ||
93 | :"0" (0) : "memory"); | ||
94 | return oldval > 0; | ||
95 | } | ||
96 | |||
97 | /* | ||
98 | * __raw_spin_unlock based on writing $1 to the low byte. | ||
99 | * This method works. Despite all the confusion. | ||
100 | * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there) | ||
101 | * (PPro errata 66, 92) | ||
102 | */ | ||
103 | |||
104 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) | ||
105 | |||
106 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
107 | { | ||
108 | asm volatile("movb $1,%0" : "+m" (lock->slock) :: "memory"); | ||
109 | } | ||
110 | |||
111 | #else | ||
112 | |||
113 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
114 | { | ||
115 | char oldval = 1; | ||
116 | |||
117 | asm volatile("xchgb %b0, %1" | ||
118 | : "=q" (oldval), "+m" (lock->slock) | ||
119 | : "0" (oldval) : "memory"); | ||
120 | } | ||
121 | |||
122 | #endif | ||
123 | |||
124 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | ||
125 | { | ||
126 | while (__raw_spin_is_locked(lock)) | ||
127 | cpu_relax(); | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * Read-write spinlocks, allowing multiple readers | ||
132 | * but only one writer. | ||
133 | * | ||
134 | * NOTE! it is quite common to have readers in interrupts | ||
135 | * but no interrupt writers. For those circumstances we | ||
136 | * can "mix" irq-safe locks - any writer needs to get a | ||
137 | * irq-safe write-lock, but readers can get non-irqsafe | ||
138 | * read-locks. | ||
139 | * | ||
140 | * On x86, we implement read-write locks as a 32-bit counter | ||
141 | * with the high bit (sign) being the "contended" bit. | ||
142 | * | ||
143 | * The inline assembly is non-obvious. Think about it. | ||
144 | * | ||
145 | * Changed to use the same technique as rw semaphores. See | ||
146 | * semaphore.h for details. -ben | ||
147 | * | ||
148 | * the helpers are in arch/i386/kernel/semaphore.c | ||
149 | */ | ||
150 | |||
151 | /** | ||
152 | * read_can_lock - would read_trylock() succeed? | ||
153 | * @lock: the rwlock in question. | ||
154 | */ | ||
155 | static inline int __raw_read_can_lock(raw_rwlock_t *x) | ||
156 | { | ||
157 | return (int)(x)->lock > 0; | ||
158 | } | ||
159 | |||
160 | /** | ||
161 | * write_can_lock - would write_trylock() succeed? | ||
162 | * @lock: the rwlock in question. | ||
163 | */ | ||
164 | static inline int __raw_write_can_lock(raw_rwlock_t *x) | ||
165 | { | ||
166 | return (x)->lock == RW_LOCK_BIAS; | ||
167 | } | ||
168 | |||
169 | static inline void __raw_read_lock(raw_rwlock_t *rw) | ||
170 | { | ||
171 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" | ||
172 | "jns 1f\n" | ||
173 | "call __read_lock_failed\n\t" | ||
174 | "1:\n" | ||
175 | ::"a" (rw) : "memory"); | ||
176 | } | ||
177 | |||
178 | static inline void __raw_write_lock(raw_rwlock_t *rw) | ||
179 | { | ||
180 | asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" | ||
181 | "jz 1f\n" | ||
182 | "call __write_lock_failed\n\t" | ||
183 | "1:\n" | ||
184 | ::"a" (rw) : "memory"); | ||
185 | } | ||
186 | |||
187 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | ||
188 | { | ||
189 | atomic_t *count = (atomic_t *)lock; | ||
190 | atomic_dec(count); | ||
191 | if (atomic_read(count) >= 0) | ||
192 | return 1; | ||
193 | atomic_inc(count); | ||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static inline int __raw_write_trylock(raw_rwlock_t *lock) | ||
198 | { | ||
199 | atomic_t *count = (atomic_t *)lock; | ||
200 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | ||
201 | return 1; | ||
202 | atomic_add(RW_LOCK_BIAS, count); | ||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | ||
207 | { | ||
208 | asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); | ||
209 | } | ||
210 | |||
211 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | ||
212 | { | ||
213 | asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0" | ||
214 | : "+m" (rw->lock) : : "memory"); | ||
215 | } | ||
216 | |||
217 | #define _raw_spin_relax(lock) cpu_relax() | ||
218 | #define _raw_read_relax(lock) cpu_relax() | ||
219 | #define _raw_write_relax(lock) cpu_relax() | ||
220 | |||
221 | #endif /* __ASM_SPINLOCK_H */ | ||
diff --git a/include/asm-x86/spinlock_64.h b/include/asm-x86/spinlock_64.h deleted file mode 100644 index 88bf981e73cf..000000000000 --- a/include/asm-x86/spinlock_64.h +++ /dev/null | |||
@@ -1,167 +0,0 @@ | |||
1 | #ifndef __ASM_SPINLOCK_H | ||
2 | #define __ASM_SPINLOCK_H | ||
3 | |||
4 | #include <asm/atomic.h> | ||
5 | #include <asm/rwlock.h> | ||
6 | #include <asm/page.h> | ||
7 | #include <asm/processor.h> | ||
8 | |||
9 | /* | ||
10 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | ||
11 | * | ||
12 | * Simple spin lock operations. There are two variants, one clears IRQ's | ||
13 | * on the local processor, one does not. | ||
14 | * | ||
15 | * We make no fairness assumptions. They have a cost. | ||
16 | * | ||
17 | * (the type definitions are in asm/spinlock_types.h) | ||
18 | */ | ||
19 | |||
20 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | ||
21 | { | ||
22 | return *(volatile signed int *)(&(lock)->slock) <= 0; | ||
23 | } | ||
24 | |||
25 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
26 | { | ||
27 | asm volatile( | ||
28 | "\n1:\t" | ||
29 | LOCK_PREFIX " ; decl %0\n\t" | ||
30 | "jns 2f\n" | ||
31 | "3:\n" | ||
32 | "rep;nop\n\t" | ||
33 | "cmpl $0,%0\n\t" | ||
34 | "jle 3b\n\t" | ||
35 | "jmp 1b\n" | ||
36 | "2:\t" : "=m" (lock->slock) : : "memory"); | ||
37 | } | ||
38 | |||
39 | /* | ||
40 | * Same as __raw_spin_lock, but reenable interrupts during spinning. | ||
41 | */ | ||
42 | #ifndef CONFIG_PROVE_LOCKING | ||
43 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | ||
44 | { | ||
45 | asm volatile( | ||
46 | "\n1:\t" | ||
47 | LOCK_PREFIX " ; decl %0\n\t" | ||
48 | "jns 5f\n" | ||
49 | "testl $0x200, %1\n\t" /* interrupts were disabled? */ | ||
50 | "jz 4f\n\t" | ||
51 | "sti\n" | ||
52 | "3:\t" | ||
53 | "rep;nop\n\t" | ||
54 | "cmpl $0, %0\n\t" | ||
55 | "jle 3b\n\t" | ||
56 | "cli\n\t" | ||
57 | "jmp 1b\n" | ||
58 | "4:\t" | ||
59 | "rep;nop\n\t" | ||
60 | "cmpl $0, %0\n\t" | ||
61 | "jg 1b\n\t" | ||
62 | "jmp 4b\n" | ||
63 | "5:\n\t" | ||
64 | : "+m" (lock->slock) : "r" ((unsigned)flags) : "memory"); | ||
65 | } | ||
66 | #endif | ||
67 | |||
68 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
69 | { | ||
70 | int oldval; | ||
71 | |||
72 | asm volatile( | ||
73 | "xchgl %0,%1" | ||
74 | :"=q" (oldval), "=m" (lock->slock) | ||
75 | :"0" (0) : "memory"); | ||
76 | |||
77 | return oldval > 0; | ||
78 | } | ||
79 | |||
80 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
81 | { | ||
82 | asm volatile("movl $1,%0" :"=m" (lock->slock) :: "memory"); | ||
83 | } | ||
84 | |||
85 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | ||
86 | { | ||
87 | while (__raw_spin_is_locked(lock)) | ||
88 | cpu_relax(); | ||
89 | } | ||
90 | |||
91 | /* | ||
92 | * Read-write spinlocks, allowing multiple readers | ||
93 | * but only one writer. | ||
94 | * | ||
95 | * NOTE! it is quite common to have readers in interrupts | ||
96 | * but no interrupt writers. For those circumstances we | ||
97 | * can "mix" irq-safe locks - any writer needs to get a | ||
98 | * irq-safe write-lock, but readers can get non-irqsafe | ||
99 | * read-locks. | ||
100 | * | ||
101 | * On x86, we implement read-write locks as a 32-bit counter | ||
102 | * with the high bit (sign) being the "contended" bit. | ||
103 | */ | ||
104 | |||
105 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) | ||
106 | { | ||
107 | return (int)(lock)->lock > 0; | ||
108 | } | ||
109 | |||
110 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) | ||
111 | { | ||
112 | return (lock)->lock == RW_LOCK_BIAS; | ||
113 | } | ||
114 | |||
115 | static inline void __raw_read_lock(raw_rwlock_t *rw) | ||
116 | { | ||
117 | asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t" | ||
118 | "jns 1f\n" | ||
119 | "call __read_lock_failed\n" | ||
120 | "1:\n" | ||
121 | ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); | ||
122 | } | ||
123 | |||
124 | static inline void __raw_write_lock(raw_rwlock_t *rw) | ||
125 | { | ||
126 | asm volatile(LOCK_PREFIX "subl %1,(%0)\n\t" | ||
127 | "jz 1f\n" | ||
128 | "\tcall __write_lock_failed\n\t" | ||
129 | "1:\n" | ||
130 | ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); | ||
131 | } | ||
132 | |||
133 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | ||
134 | { | ||
135 | atomic_t *count = (atomic_t *)lock; | ||
136 | atomic_dec(count); | ||
137 | if (atomic_read(count) >= 0) | ||
138 | return 1; | ||
139 | atomic_inc(count); | ||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | static inline int __raw_write_trylock(raw_rwlock_t *lock) | ||
144 | { | ||
145 | atomic_t *count = (atomic_t *)lock; | ||
146 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | ||
147 | return 1; | ||
148 | atomic_add(RW_LOCK_BIAS, count); | ||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | ||
153 | { | ||
154 | asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory"); | ||
155 | } | ||
156 | |||
157 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | ||
158 | { | ||
159 | asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0" | ||
160 | : "=m" (rw->lock) : : "memory"); | ||
161 | } | ||
162 | |||
163 | #define _raw_spin_relax(lock) cpu_relax() | ||
164 | #define _raw_read_relax(lock) cpu_relax() | ||
165 | #define _raw_write_relax(lock) cpu_relax() | ||
166 | |||
167 | #endif /* __ASM_SPINLOCK_H */ | ||
diff --git a/include/asm-x86/spinlock_types.h b/include/asm-x86/spinlock_types.h index 4da9345c1500..9029cf78cf5d 100644 --- a/include/asm-x86/spinlock_types.h +++ b/include/asm-x86/spinlock_types.h | |||
@@ -9,7 +9,7 @@ typedef struct { | |||
9 | unsigned int slock; | 9 | unsigned int slock; |
10 | } raw_spinlock_t; | 10 | } raw_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } |
13 | 13 | ||
14 | typedef struct { | 14 | typedef struct { |
15 | unsigned int lock; | 15 | unsigned int lock; |
diff --git a/include/asm-x86/stacktrace.h b/include/asm-x86/stacktrace.h index 70dd5bae3235..30f82526a8e2 100644 --- a/include/asm-x86/stacktrace.h +++ b/include/asm-x86/stacktrace.h | |||
@@ -9,12 +9,13 @@ struct stacktrace_ops { | |||
9 | void (*warning)(void *data, char *msg); | 9 | void (*warning)(void *data, char *msg); |
10 | /* msg must contain %s for the symbol */ | 10 | /* msg must contain %s for the symbol */ |
11 | void (*warning_symbol)(void *data, char *msg, unsigned long symbol); | 11 | void (*warning_symbol)(void *data, char *msg, unsigned long symbol); |
12 | void (*address)(void *data, unsigned long address); | 12 | void (*address)(void *data, unsigned long address, int reliable); |
13 | /* On negative return stop dumping */ | 13 | /* On negative return stop dumping */ |
14 | int (*stack)(void *data, char *name); | 14 | int (*stack)(void *data, char *name); |
15 | }; | 15 | }; |
16 | 16 | ||
17 | void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack, | 17 | void dump_trace(struct task_struct *tsk, struct pt_regs *regs, |
18 | unsigned long *stack, unsigned long bp, | ||
18 | const struct stacktrace_ops *ops, void *data); | 19 | const struct stacktrace_ops *ops, void *data); |
19 | 20 | ||
20 | #endif | 21 | #endif |
diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h index 55bfa308f900..c5d13a86dea7 100644 --- a/include/asm-x86/string_32.h +++ b/include/asm-x86/string_32.h | |||
@@ -213,14 +213,14 @@ static __always_inline void * __constant_c_and_count_memset(void * s, unsigned l | |||
213 | case 0: | 213 | case 0: |
214 | return s; | 214 | return s; |
215 | case 1: | 215 | case 1: |
216 | *(unsigned char *)s = pattern; | 216 | *(unsigned char *)s = pattern & 0xff; |
217 | return s; | 217 | return s; |
218 | case 2: | 218 | case 2: |
219 | *(unsigned short *)s = pattern; | 219 | *(unsigned short *)s = pattern & 0xffff; |
220 | return s; | 220 | return s; |
221 | case 3: | 221 | case 3: |
222 | *(unsigned short *)s = pattern; | 222 | *(unsigned short *)s = pattern & 0xffff; |
223 | *(2+(unsigned char *)s) = pattern; | 223 | *(2+(unsigned char *)s) = pattern & 0xff; |
224 | return s; | 224 | return s; |
225 | case 4: | 225 | case 4: |
226 | *(unsigned long *)s = pattern; | 226 | *(unsigned long *)s = pattern; |
diff --git a/include/asm-x86/suspend_32.h b/include/asm-x86/suspend_32.h index a2520732ffd6..1bbda3ad7796 100644 --- a/include/asm-x86/suspend_32.h +++ b/include/asm-x86/suspend_32.h | |||
@@ -12,8 +12,8 @@ static inline int arch_prepare_suspend(void) { return 0; } | |||
12 | struct saved_context { | 12 | struct saved_context { |
13 | u16 es, fs, gs, ss; | 13 | u16 es, fs, gs, ss; |
14 | unsigned long cr0, cr2, cr3, cr4; | 14 | unsigned long cr0, cr2, cr3, cr4; |
15 | struct Xgt_desc_struct gdt; | 15 | struct desc_ptr gdt; |
16 | struct Xgt_desc_struct idt; | 16 | struct desc_ptr idt; |
17 | u16 ldt; | 17 | u16 ldt; |
18 | u16 tss; | 18 | u16 tss; |
19 | unsigned long tr; | 19 | unsigned long tr; |
diff --git a/include/asm-x86/suspend_64.h b/include/asm-x86/suspend_64.h index c505a76bcf6e..2eb92cb81a0d 100644 --- a/include/asm-x86/suspend_64.h +++ b/include/asm-x86/suspend_64.h | |||
@@ -15,7 +15,14 @@ arch_prepare_suspend(void) | |||
15 | return 0; | 15 | return 0; |
16 | } | 16 | } |
17 | 17 | ||
18 | /* Image of the saved processor state. If you touch this, fix acpi/wakeup.S. */ | 18 | /* |
19 | * Image of the saved processor state, used by the low level ACPI suspend to | ||
20 | * RAM code and by the low level hibernation code. | ||
21 | * | ||
22 | * If you modify it, fix arch/x86/kernel/acpi/wakeup_64.S and make sure that | ||
23 | * __save/__restore_processor_state(), defined in arch/x86/kernel/suspend_64.c, | ||
24 | * still work as required. | ||
25 | */ | ||
19 | struct saved_context { | 26 | struct saved_context { |
20 | struct pt_regs regs; | 27 | struct pt_regs regs; |
21 | u16 ds, es, fs, gs, ss; | 28 | u16 ds, es, fs, gs, ss; |
@@ -38,8 +45,6 @@ struct saved_context { | |||
38 | #define loaddebug(thread,register) \ | 45 | #define loaddebug(thread,register) \ |
39 | set_debugreg((thread)->debugreg##register, register) | 46 | set_debugreg((thread)->debugreg##register, register) |
40 | 47 | ||
41 | extern void fix_processor_context(void); | ||
42 | |||
43 | /* routines for saving/restoring kernel state */ | 48 | /* routines for saving/restoring kernel state */ |
44 | extern int acpi_save_state_mem(void); | 49 | extern int acpi_save_state_mem(void); |
45 | extern char core_restore_code; | 50 | extern char core_restore_code; |
diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h index 692562b48f2a..9cff02ffe6c2 100644 --- a/include/asm-x86/system.h +++ b/include/asm-x86/system.h | |||
@@ -1,5 +1,409 @@ | |||
1 | #ifndef _ASM_X86_SYSTEM_H_ | ||
2 | #define _ASM_X86_SYSTEM_H_ | ||
3 | |||
4 | #include <asm/asm.h> | ||
5 | #include <asm/segment.h> | ||
6 | #include <asm/cpufeature.h> | ||
7 | #include <asm/cmpxchg.h> | ||
8 | #include <asm/nops.h> | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/irqflags.h> | ||
12 | |||
13 | /* entries in ARCH_DLINFO: */ | ||
14 | #ifdef CONFIG_IA32_EMULATION | ||
15 | # define AT_VECTOR_SIZE_ARCH 2 | ||
16 | #else | ||
17 | # define AT_VECTOR_SIZE_ARCH 1 | ||
18 | #endif | ||
19 | |||
20 | #ifdef CONFIG_X86_32 | ||
21 | |||
22 | struct task_struct; /* one of the stranger aspects of C forward declarations */ | ||
23 | struct task_struct *__switch_to(struct task_struct *prev, | ||
24 | struct task_struct *next); | ||
25 | |||
26 | /* | ||
27 | * Saving eflags is important. It switches not only IOPL between tasks, | ||
28 | * it also protects other tasks from NT leaking through sysenter etc. | ||
29 | */ | ||
30 | #define switch_to(prev, next, last) do { \ | ||
31 | unsigned long esi, edi; \ | ||
32 | asm volatile("pushfl\n\t" /* Save flags */ \ | ||
33 | "pushl %%ebp\n\t" \ | ||
34 | "movl %%esp,%0\n\t" /* save ESP */ \ | ||
35 | "movl %5,%%esp\n\t" /* restore ESP */ \ | ||
36 | "movl $1f,%1\n\t" /* save EIP */ \ | ||
37 | "pushl %6\n\t" /* restore EIP */ \ | ||
38 | "jmp __switch_to\n" \ | ||
39 | "1:\t" \ | ||
40 | "popl %%ebp\n\t" \ | ||
41 | "popfl" \ | ||
42 | :"=m" (prev->thread.sp), "=m" (prev->thread.ip), \ | ||
43 | "=a" (last), "=S" (esi), "=D" (edi) \ | ||
44 | :"m" (next->thread.sp), "m" (next->thread.ip), \ | ||
45 | "2" (prev), "d" (next)); \ | ||
46 | } while (0) | ||
47 | |||
48 | /* | ||
49 | * disable hlt during certain critical i/o operations | ||
50 | */ | ||
51 | #define HAVE_DISABLE_HLT | ||
52 | #else | ||
53 | #define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" | ||
54 | #define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" | ||
55 | |||
56 | /* frame pointer must be last for get_wchan */ | ||
57 | #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" | ||
58 | #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" | ||
59 | |||
60 | #define __EXTRA_CLOBBER \ | ||
61 | , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ | ||
62 | "r12", "r13", "r14", "r15" | ||
63 | |||
64 | /* Save restore flags to clear handle leaking NT */ | ||
65 | #define switch_to(prev, next, last) \ | ||
66 | asm volatile(SAVE_CONTEXT \ | ||
67 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ | ||
68 | "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ | ||
69 | "call __switch_to\n\t" \ | ||
70 | ".globl thread_return\n" \ | ||
71 | "thread_return:\n\t" \ | ||
72 | "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ | ||
73 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ | ||
74 | LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ | ||
75 | "movq %%rax,%%rdi\n\t" \ | ||
76 | "jc ret_from_fork\n\t" \ | ||
77 | RESTORE_CONTEXT \ | ||
78 | : "=a" (last) \ | ||
79 | : [next] "S" (next), [prev] "D" (prev), \ | ||
80 | [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ | ||
81 | [ti_flags] "i" (offsetof(struct thread_info, flags)), \ | ||
82 | [tif_fork] "i" (TIF_FORK), \ | ||
83 | [thread_info] "i" (offsetof(struct task_struct, stack)), \ | ||
84 | [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ | ||
85 | : "memory", "cc" __EXTRA_CLOBBER) | ||
86 | #endif | ||
87 | |||
88 | #ifdef __KERNEL__ | ||
89 | #define _set_base(addr, base) do { unsigned long __pr; \ | ||
90 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | ||
91 | "rorl $16,%%edx\n\t" \ | ||
92 | "movb %%dl,%2\n\t" \ | ||
93 | "movb %%dh,%3" \ | ||
94 | :"=&d" (__pr) \ | ||
95 | :"m" (*((addr)+2)), \ | ||
96 | "m" (*((addr)+4)), \ | ||
97 | "m" (*((addr)+7)), \ | ||
98 | "0" (base) \ | ||
99 | ); } while (0) | ||
100 | |||
101 | #define _set_limit(addr, limit) do { unsigned long __lr; \ | ||
102 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | ||
103 | "rorl $16,%%edx\n\t" \ | ||
104 | "movb %2,%%dh\n\t" \ | ||
105 | "andb $0xf0,%%dh\n\t" \ | ||
106 | "orb %%dh,%%dl\n\t" \ | ||
107 | "movb %%dl,%2" \ | ||
108 | :"=&d" (__lr) \ | ||
109 | :"m" (*(addr)), \ | ||
110 | "m" (*((addr)+6)), \ | ||
111 | "0" (limit) \ | ||
112 | ); } while (0) | ||
113 | |||
114 | #define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base)) | ||
115 | #define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1)) | ||
116 | |||
117 | extern void load_gs_index(unsigned); | ||
118 | |||
119 | /* | ||
120 | * Load a segment. Fall back on loading the zero | ||
121 | * segment if something goes wrong.. | ||
122 | */ | ||
123 | #define loadsegment(seg, value) \ | ||
124 | asm volatile("\n" \ | ||
125 | "1:\t" \ | ||
126 | "movl %k0,%%" #seg "\n" \ | ||
127 | "2:\n" \ | ||
128 | ".section .fixup,\"ax\"\n" \ | ||
129 | "3:\t" \ | ||
130 | "movl %k1, %%" #seg "\n\t" \ | ||
131 | "jmp 2b\n" \ | ||
132 | ".previous\n" \ | ||
133 | _ASM_EXTABLE(1b,3b) \ | ||
134 | : :"r" (value), "r" (0)) | ||
135 | |||
136 | |||
137 | /* | ||
138 | * Save a segment register away | ||
139 | */ | ||
140 | #define savesegment(seg, value) \ | ||
141 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) | ||
142 | |||
143 | static inline unsigned long get_limit(unsigned long segment) | ||
144 | { | ||
145 | unsigned long __limit; | ||
146 | __asm__("lsll %1,%0" | ||
147 | :"=r" (__limit):"r" (segment)); | ||
148 | return __limit+1; | ||
149 | } | ||
150 | |||
151 | static inline void native_clts(void) | ||
152 | { | ||
153 | asm volatile ("clts"); | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * Volatile isn't enough to prevent the compiler from reordering the | ||
158 | * read/write functions for the control registers and messing everything up. | ||
159 | * A memory clobber would solve the problem, but would prevent reordering of | ||
160 | * all loads stores around it, which can hurt performance. Solution is to | ||
161 | * use a variable and mimic reads and writes to it to enforce serialization | ||
162 | */ | ||
163 | static unsigned long __force_order; | ||
164 | |||
165 | static inline unsigned long native_read_cr0(void) | ||
166 | { | ||
167 | unsigned long val; | ||
168 | asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order)); | ||
169 | return val; | ||
170 | } | ||
171 | |||
172 | static inline void native_write_cr0(unsigned long val) | ||
173 | { | ||
174 | asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order)); | ||
175 | } | ||
176 | |||
177 | static inline unsigned long native_read_cr2(void) | ||
178 | { | ||
179 | unsigned long val; | ||
180 | asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order)); | ||
181 | return val; | ||
182 | } | ||
183 | |||
184 | static inline void native_write_cr2(unsigned long val) | ||
185 | { | ||
186 | asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order)); | ||
187 | } | ||
188 | |||
189 | static inline unsigned long native_read_cr3(void) | ||
190 | { | ||
191 | unsigned long val; | ||
192 | asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order)); | ||
193 | return val; | ||
194 | } | ||
195 | |||
196 | static inline void native_write_cr3(unsigned long val) | ||
197 | { | ||
198 | asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order)); | ||
199 | } | ||
200 | |||
201 | static inline unsigned long native_read_cr4(void) | ||
202 | { | ||
203 | unsigned long val; | ||
204 | asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order)); | ||
205 | return val; | ||
206 | } | ||
207 | |||
208 | static inline unsigned long native_read_cr4_safe(void) | ||
209 | { | ||
210 | unsigned long val; | ||
211 | /* This could fault if %cr4 does not exist. In x86_64, a cr4 always | ||
212 | * exists, so it will never fail. */ | ||
213 | #ifdef CONFIG_X86_32 | ||
214 | asm volatile("1: mov %%cr4, %0\n" | ||
215 | "2:\n" | ||
216 | _ASM_EXTABLE(1b,2b) | ||
217 | : "=r" (val), "=m" (__force_order) : "0" (0)); | ||
218 | #else | ||
219 | val = native_read_cr4(); | ||
220 | #endif | ||
221 | return val; | ||
222 | } | ||
223 | |||
224 | static inline void native_write_cr4(unsigned long val) | ||
225 | { | ||
226 | asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order)); | ||
227 | } | ||
228 | |||
229 | #ifdef CONFIG_X86_64 | ||
230 | static inline unsigned long native_read_cr8(void) | ||
231 | { | ||
232 | unsigned long cr8; | ||
233 | asm volatile("movq %%cr8,%0" : "=r" (cr8)); | ||
234 | return cr8; | ||
235 | } | ||
236 | |||
237 | static inline void native_write_cr8(unsigned long val) | ||
238 | { | ||
239 | asm volatile("movq %0,%%cr8" :: "r" (val) : "memory"); | ||
240 | } | ||
241 | #endif | ||
242 | |||
243 | static inline void native_wbinvd(void) | ||
244 | { | ||
245 | asm volatile("wbinvd": : :"memory"); | ||
246 | } | ||
247 | #ifdef CONFIG_PARAVIRT | ||
248 | #include <asm/paravirt.h> | ||
249 | #else | ||
250 | #define read_cr0() (native_read_cr0()) | ||
251 | #define write_cr0(x) (native_write_cr0(x)) | ||
252 | #define read_cr2() (native_read_cr2()) | ||
253 | #define write_cr2(x) (native_write_cr2(x)) | ||
254 | #define read_cr3() (native_read_cr3()) | ||
255 | #define write_cr3(x) (native_write_cr3(x)) | ||
256 | #define read_cr4() (native_read_cr4()) | ||
257 | #define read_cr4_safe() (native_read_cr4_safe()) | ||
258 | #define write_cr4(x) (native_write_cr4(x)) | ||
259 | #define wbinvd() (native_wbinvd()) | ||
260 | #ifdef CONFIG_X86_64 | ||
261 | #define read_cr8() (native_read_cr8()) | ||
262 | #define write_cr8(x) (native_write_cr8(x)) | ||
263 | #endif | ||
264 | |||
265 | /* Clear the 'TS' bit */ | ||
266 | #define clts() (native_clts()) | ||
267 | |||
268 | #endif/* CONFIG_PARAVIRT */ | ||
269 | |||
270 | #define stts() write_cr0(8 | read_cr0()) | ||
271 | |||
272 | #endif /* __KERNEL__ */ | ||
273 | |||
274 | static inline void clflush(volatile void *__p) | ||
275 | { | ||
276 | asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); | ||
277 | } | ||
278 | |||
279 | #define nop() __asm__ __volatile__ ("nop") | ||
280 | |||
281 | void disable_hlt(void); | ||
282 | void enable_hlt(void); | ||
283 | |||
284 | extern int es7000_plat; | ||
285 | void cpu_idle_wait(void); | ||
286 | |||
287 | extern unsigned long arch_align_stack(unsigned long sp); | ||
288 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); | ||
289 | |||
290 | void default_idle(void); | ||
291 | |||
292 | /* | ||
293 | * Force strict CPU ordering. | ||
294 | * And yes, this is required on UP too when we're talking | ||
295 | * to devices. | ||
296 | */ | ||
1 | #ifdef CONFIG_X86_32 | 297 | #ifdef CONFIG_X86_32 |
2 | # include "system_32.h" | 298 | /* |
299 | * For now, "wmb()" doesn't actually do anything, as all | ||
300 | * Intel CPU's follow what Intel calls a *Processor Order*, | ||
301 | * in which all writes are seen in the program order even | ||
302 | * outside the CPU. | ||
303 | * | ||
304 | * I expect future Intel CPU's to have a weaker ordering, | ||
305 | * but I'd also expect them to finally get their act together | ||
306 | * and add some real memory barriers if so. | ||
307 | * | ||
308 | * Some non intel clones support out of order store. wmb() ceases to be a | ||
309 | * nop for these. | ||
310 | */ | ||
311 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | ||
312 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | ||
313 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | ||
3 | #else | 314 | #else |
4 | # include "system_64.h" | 315 | #define mb() asm volatile("mfence":::"memory") |
316 | #define rmb() asm volatile("lfence":::"memory") | ||
317 | #define wmb() asm volatile("sfence" ::: "memory") | ||
318 | #endif | ||
319 | |||
320 | /** | ||
321 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
322 | * depend on. | ||
323 | * | ||
324 | * No data-dependent reads from memory-like regions are ever reordered | ||
325 | * over this barrier. All reads preceding this primitive are guaranteed | ||
326 | * to access memory (but not necessarily other CPUs' caches) before any | ||
327 | * reads following this primitive that depend on the data return by | ||
328 | * any of the preceding reads. This primitive is much lighter weight than | ||
329 | * rmb() on most CPUs, and is never heavier weight than is | ||
330 | * rmb(). | ||
331 | * | ||
332 | * These ordering constraints are respected by both the local CPU | ||
333 | * and the compiler. | ||
334 | * | ||
335 | * Ordering is not guaranteed by anything other than these primitives, | ||
336 | * not even by data dependencies. See the documentation for | ||
337 | * memory_barrier() for examples and URLs to more information. | ||
338 | * | ||
339 | * For example, the following code would force ordering (the initial | ||
340 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
341 | * | ||
342 | * <programlisting> | ||
343 | * CPU 0 CPU 1 | ||
344 | * | ||
345 | * b = 2; | ||
346 | * memory_barrier(); | ||
347 | * p = &b; q = p; | ||
348 | * read_barrier_depends(); | ||
349 | * d = *q; | ||
350 | * </programlisting> | ||
351 | * | ||
352 | * because the read of "*q" depends on the read of "p" and these | ||
353 | * two reads are separated by a read_barrier_depends(). However, | ||
354 | * the following code, with the same initial values for "a" and "b": | ||
355 | * | ||
356 | * <programlisting> | ||
357 | * CPU 0 CPU 1 | ||
358 | * | ||
359 | * a = 2; | ||
360 | * memory_barrier(); | ||
361 | * b = 3; y = b; | ||
362 | * read_barrier_depends(); | ||
363 | * x = a; | ||
364 | * </programlisting> | ||
365 | * | ||
366 | * does not enforce ordering, since there is no data dependency between | ||
367 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
368 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
369 | * in cases like this where there are no data dependencies. | ||
370 | **/ | ||
371 | |||
372 | #define read_barrier_depends() do { } while (0) | ||
373 | |||
374 | #ifdef CONFIG_SMP | ||
375 | #define smp_mb() mb() | ||
376 | #ifdef CONFIG_X86_PPRO_FENCE | ||
377 | # define smp_rmb() rmb() | ||
378 | #else | ||
379 | # define smp_rmb() barrier() | ||
380 | #endif | ||
381 | #ifdef CONFIG_X86_OOSTORE | ||
382 | # define smp_wmb() wmb() | ||
383 | #else | ||
384 | # define smp_wmb() barrier() | ||
385 | #endif | ||
386 | #define smp_read_barrier_depends() read_barrier_depends() | ||
387 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) | ||
388 | #else | ||
389 | #define smp_mb() barrier() | ||
390 | #define smp_rmb() barrier() | ||
391 | #define smp_wmb() barrier() | ||
392 | #define smp_read_barrier_depends() do { } while (0) | ||
393 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | ||
394 | #endif | ||
395 | |||
396 | /* | ||
397 | * Stop RDTSC speculation. This is needed when you need to use RDTSC | ||
398 | * (or get_cycles or vread that possibly accesses the TSC) in a defined | ||
399 | * code region. | ||
400 | * | ||
401 | * (Could use an alternative three way for this if there was one.) | ||
402 | */ | ||
403 | static inline void rdtsc_barrier(void) | ||
404 | { | ||
405 | alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); | ||
406 | alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); | ||
407 | } | ||
408 | |||
5 | #endif | 409 | #endif |
diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h deleted file mode 100644 index ef8468883bac..000000000000 --- a/include/asm-x86/system_32.h +++ /dev/null | |||
@@ -1,320 +0,0 @@ | |||
1 | #ifndef __ASM_SYSTEM_H | ||
2 | #define __ASM_SYSTEM_H | ||
3 | |||
4 | #include <linux/kernel.h> | ||
5 | #include <asm/segment.h> | ||
6 | #include <asm/cpufeature.h> | ||
7 | #include <asm/cmpxchg.h> | ||
8 | |||
9 | #ifdef __KERNEL__ | ||
10 | #define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */ | ||
11 | |||
12 | struct task_struct; /* one of the stranger aspects of C forward declarations.. */ | ||
13 | extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); | ||
14 | |||
15 | /* | ||
16 | * Saving eflags is important. It switches not only IOPL between tasks, | ||
17 | * it also protects other tasks from NT leaking through sysenter etc. | ||
18 | */ | ||
19 | #define switch_to(prev,next,last) do { \ | ||
20 | unsigned long esi,edi; \ | ||
21 | asm volatile("pushfl\n\t" /* Save flags */ \ | ||
22 | "pushl %%ebp\n\t" \ | ||
23 | "movl %%esp,%0\n\t" /* save ESP */ \ | ||
24 | "movl %5,%%esp\n\t" /* restore ESP */ \ | ||
25 | "movl $1f,%1\n\t" /* save EIP */ \ | ||
26 | "pushl %6\n\t" /* restore EIP */ \ | ||
27 | "jmp __switch_to\n" \ | ||
28 | "1:\t" \ | ||
29 | "popl %%ebp\n\t" \ | ||
30 | "popfl" \ | ||
31 | :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ | ||
32 | "=a" (last),"=S" (esi),"=D" (edi) \ | ||
33 | :"m" (next->thread.esp),"m" (next->thread.eip), \ | ||
34 | "2" (prev), "d" (next)); \ | ||
35 | } while (0) | ||
36 | |||
37 | #define _set_base(addr,base) do { unsigned long __pr; \ | ||
38 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | ||
39 | "rorl $16,%%edx\n\t" \ | ||
40 | "movb %%dl,%2\n\t" \ | ||
41 | "movb %%dh,%3" \ | ||
42 | :"=&d" (__pr) \ | ||
43 | :"m" (*((addr)+2)), \ | ||
44 | "m" (*((addr)+4)), \ | ||
45 | "m" (*((addr)+7)), \ | ||
46 | "0" (base) \ | ||
47 | ); } while(0) | ||
48 | |||
49 | #define _set_limit(addr,limit) do { unsigned long __lr; \ | ||
50 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | ||
51 | "rorl $16,%%edx\n\t" \ | ||
52 | "movb %2,%%dh\n\t" \ | ||
53 | "andb $0xf0,%%dh\n\t" \ | ||
54 | "orb %%dh,%%dl\n\t" \ | ||
55 | "movb %%dl,%2" \ | ||
56 | :"=&d" (__lr) \ | ||
57 | :"m" (*(addr)), \ | ||
58 | "m" (*((addr)+6)), \ | ||
59 | "0" (limit) \ | ||
60 | ); } while(0) | ||
61 | |||
62 | #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) ) | ||
63 | #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) ) | ||
64 | |||
65 | /* | ||
66 | * Load a segment. Fall back on loading the zero | ||
67 | * segment if something goes wrong.. | ||
68 | */ | ||
69 | #define loadsegment(seg,value) \ | ||
70 | asm volatile("\n" \ | ||
71 | "1:\t" \ | ||
72 | "mov %0,%%" #seg "\n" \ | ||
73 | "2:\n" \ | ||
74 | ".section .fixup,\"ax\"\n" \ | ||
75 | "3:\t" \ | ||
76 | "pushl $0\n\t" \ | ||
77 | "popl %%" #seg "\n\t" \ | ||
78 | "jmp 2b\n" \ | ||
79 | ".previous\n" \ | ||
80 | ".section __ex_table,\"a\"\n\t" \ | ||
81 | ".align 4\n\t" \ | ||
82 | ".long 1b,3b\n" \ | ||
83 | ".previous" \ | ||
84 | : :"rm" (value)) | ||
85 | |||
86 | /* | ||
87 | * Save a segment register away | ||
88 | */ | ||
89 | #define savesegment(seg, value) \ | ||
90 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) | ||
91 | |||
92 | |||
93 | static inline void native_clts(void) | ||
94 | { | ||
95 | asm volatile ("clts"); | ||
96 | } | ||
97 | |||
98 | static inline unsigned long native_read_cr0(void) | ||
99 | { | ||
100 | unsigned long val; | ||
101 | asm volatile("movl %%cr0,%0\n\t" :"=r" (val)); | ||
102 | return val; | ||
103 | } | ||
104 | |||
105 | static inline void native_write_cr0(unsigned long val) | ||
106 | { | ||
107 | asm volatile("movl %0,%%cr0": :"r" (val)); | ||
108 | } | ||
109 | |||
110 | static inline unsigned long native_read_cr2(void) | ||
111 | { | ||
112 | unsigned long val; | ||
113 | asm volatile("movl %%cr2,%0\n\t" :"=r" (val)); | ||
114 | return val; | ||
115 | } | ||
116 | |||
117 | static inline void native_write_cr2(unsigned long val) | ||
118 | { | ||
119 | asm volatile("movl %0,%%cr2": :"r" (val)); | ||
120 | } | ||
121 | |||
122 | static inline unsigned long native_read_cr3(void) | ||
123 | { | ||
124 | unsigned long val; | ||
125 | asm volatile("movl %%cr3,%0\n\t" :"=r" (val)); | ||
126 | return val; | ||
127 | } | ||
128 | |||
129 | static inline void native_write_cr3(unsigned long val) | ||
130 | { | ||
131 | asm volatile("movl %0,%%cr3": :"r" (val)); | ||
132 | } | ||
133 | |||
134 | static inline unsigned long native_read_cr4(void) | ||
135 | { | ||
136 | unsigned long val; | ||
137 | asm volatile("movl %%cr4,%0\n\t" :"=r" (val)); | ||
138 | return val; | ||
139 | } | ||
140 | |||
141 | static inline unsigned long native_read_cr4_safe(void) | ||
142 | { | ||
143 | unsigned long val; | ||
144 | /* This could fault if %cr4 does not exist */ | ||
145 | asm volatile("1: movl %%cr4, %0 \n" | ||
146 | "2: \n" | ||
147 | ".section __ex_table,\"a\" \n" | ||
148 | ".long 1b,2b \n" | ||
149 | ".previous \n" | ||
150 | : "=r" (val): "0" (0)); | ||
151 | return val; | ||
152 | } | ||
153 | |||
154 | static inline void native_write_cr4(unsigned long val) | ||
155 | { | ||
156 | asm volatile("movl %0,%%cr4": :"r" (val)); | ||
157 | } | ||
158 | |||
159 | static inline void native_wbinvd(void) | ||
160 | { | ||
161 | asm volatile("wbinvd": : :"memory"); | ||
162 | } | ||
163 | |||
164 | static inline void clflush(volatile void *__p) | ||
165 | { | ||
166 | asm volatile("clflush %0" : "+m" (*(char __force *)__p)); | ||
167 | } | ||
168 | |||
169 | #ifdef CONFIG_PARAVIRT | ||
170 | #include <asm/paravirt.h> | ||
171 | #else | ||
172 | #define read_cr0() (native_read_cr0()) | ||
173 | #define write_cr0(x) (native_write_cr0(x)) | ||
174 | #define read_cr2() (native_read_cr2()) | ||
175 | #define write_cr2(x) (native_write_cr2(x)) | ||
176 | #define read_cr3() (native_read_cr3()) | ||
177 | #define write_cr3(x) (native_write_cr3(x)) | ||
178 | #define read_cr4() (native_read_cr4()) | ||
179 | #define read_cr4_safe() (native_read_cr4_safe()) | ||
180 | #define write_cr4(x) (native_write_cr4(x)) | ||
181 | #define wbinvd() (native_wbinvd()) | ||
182 | |||
183 | /* Clear the 'TS' bit */ | ||
184 | #define clts() (native_clts()) | ||
185 | |||
186 | #endif/* CONFIG_PARAVIRT */ | ||
187 | |||
188 | /* Set the 'TS' bit */ | ||
189 | #define stts() write_cr0(8 | read_cr0()) | ||
190 | |||
191 | #endif /* __KERNEL__ */ | ||
192 | |||
193 | static inline unsigned long get_limit(unsigned long segment) | ||
194 | { | ||
195 | unsigned long __limit; | ||
196 | __asm__("lsll %1,%0" | ||
197 | :"=r" (__limit):"r" (segment)); | ||
198 | return __limit+1; | ||
199 | } | ||
200 | |||
201 | #define nop() __asm__ __volatile__ ("nop") | ||
202 | |||
203 | /* | ||
204 | * Force strict CPU ordering. | ||
205 | * And yes, this is required on UP too when we're talking | ||
206 | * to devices. | ||
207 | * | ||
208 | * For now, "wmb()" doesn't actually do anything, as all | ||
209 | * Intel CPU's follow what Intel calls a *Processor Order*, | ||
210 | * in which all writes are seen in the program order even | ||
211 | * outside the CPU. | ||
212 | * | ||
213 | * I expect future Intel CPU's to have a weaker ordering, | ||
214 | * but I'd also expect them to finally get their act together | ||
215 | * and add some real memory barriers if so. | ||
216 | * | ||
217 | * Some non intel clones support out of order store. wmb() ceases to be a | ||
218 | * nop for these. | ||
219 | */ | ||
220 | |||
221 | |||
222 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | ||
223 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | ||
224 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | ||
225 | |||
226 | /** | ||
227 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
228 | * depend on. | ||
229 | * | ||
230 | * No data-dependent reads from memory-like regions are ever reordered | ||
231 | * over this barrier. All reads preceding this primitive are guaranteed | ||
232 | * to access memory (but not necessarily other CPUs' caches) before any | ||
233 | * reads following this primitive that depend on the data return by | ||
234 | * any of the preceding reads. This primitive is much lighter weight than | ||
235 | * rmb() on most CPUs, and is never heavier weight than is | ||
236 | * rmb(). | ||
237 | * | ||
238 | * These ordering constraints are respected by both the local CPU | ||
239 | * and the compiler. | ||
240 | * | ||
241 | * Ordering is not guaranteed by anything other than these primitives, | ||
242 | * not even by data dependencies. See the documentation for | ||
243 | * memory_barrier() for examples and URLs to more information. | ||
244 | * | ||
245 | * For example, the following code would force ordering (the initial | ||
246 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
247 | * | ||
248 | * <programlisting> | ||
249 | * CPU 0 CPU 1 | ||
250 | * | ||
251 | * b = 2; | ||
252 | * memory_barrier(); | ||
253 | * p = &b; q = p; | ||
254 | * read_barrier_depends(); | ||
255 | * d = *q; | ||
256 | * </programlisting> | ||
257 | * | ||
258 | * because the read of "*q" depends on the read of "p" and these | ||
259 | * two reads are separated by a read_barrier_depends(). However, | ||
260 | * the following code, with the same initial values for "a" and "b": | ||
261 | * | ||
262 | * <programlisting> | ||
263 | * CPU 0 CPU 1 | ||
264 | * | ||
265 | * a = 2; | ||
266 | * memory_barrier(); | ||
267 | * b = 3; y = b; | ||
268 | * read_barrier_depends(); | ||
269 | * x = a; | ||
270 | * </programlisting> | ||
271 | * | ||
272 | * does not enforce ordering, since there is no data dependency between | ||
273 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
274 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
275 | * in cases like this where there are no data dependencies. | ||
276 | **/ | ||
277 | |||
278 | #define read_barrier_depends() do { } while(0) | ||
279 | |||
280 | #ifdef CONFIG_SMP | ||
281 | #define smp_mb() mb() | ||
282 | #ifdef CONFIG_X86_PPRO_FENCE | ||
283 | # define smp_rmb() rmb() | ||
284 | #else | ||
285 | # define smp_rmb() barrier() | ||
286 | #endif | ||
287 | #ifdef CONFIG_X86_OOSTORE | ||
288 | # define smp_wmb() wmb() | ||
289 | #else | ||
290 | # define smp_wmb() barrier() | ||
291 | #endif | ||
292 | #define smp_read_barrier_depends() read_barrier_depends() | ||
293 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) | ||
294 | #else | ||
295 | #define smp_mb() barrier() | ||
296 | #define smp_rmb() barrier() | ||
297 | #define smp_wmb() barrier() | ||
298 | #define smp_read_barrier_depends() do { } while(0) | ||
299 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | ||
300 | #endif | ||
301 | |||
302 | #include <linux/irqflags.h> | ||
303 | |||
304 | /* | ||
305 | * disable hlt during certain critical i/o operations | ||
306 | */ | ||
307 | #define HAVE_DISABLE_HLT | ||
308 | void disable_hlt(void); | ||
309 | void enable_hlt(void); | ||
310 | |||
311 | extern int es7000_plat; | ||
312 | void cpu_idle_wait(void); | ||
313 | |||
314 | extern unsigned long arch_align_stack(unsigned long sp); | ||
315 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); | ||
316 | |||
317 | void default_idle(void); | ||
318 | void __show_registers(struct pt_regs *, int all); | ||
319 | |||
320 | #endif | ||
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h index 6e9e4841a2da..97fa251ccb2b 100644 --- a/include/asm-x86/system_64.h +++ b/include/asm-x86/system_64.h | |||
@@ -1,126 +1,9 @@ | |||
1 | #ifndef __ASM_SYSTEM_H | 1 | #ifndef __ASM_SYSTEM_H |
2 | #define __ASM_SYSTEM_H | 2 | #define __ASM_SYSTEM_H |
3 | 3 | ||
4 | #include <linux/kernel.h> | ||
5 | #include <asm/segment.h> | 4 | #include <asm/segment.h> |
6 | #include <asm/cmpxchg.h> | 5 | #include <asm/cmpxchg.h> |
7 | 6 | ||
8 | #ifdef __KERNEL__ | ||
9 | |||
10 | /* entries in ARCH_DLINFO: */ | ||
11 | #ifdef CONFIG_IA32_EMULATION | ||
12 | # define AT_VECTOR_SIZE_ARCH 2 | ||
13 | #else | ||
14 | # define AT_VECTOR_SIZE_ARCH 1 | ||
15 | #endif | ||
16 | |||
17 | #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" | ||
18 | #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" | ||
19 | |||
20 | /* frame pointer must be last for get_wchan */ | ||
21 | #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" | ||
22 | #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" | ||
23 | |||
24 | #define __EXTRA_CLOBBER \ | ||
25 | ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15" | ||
26 | |||
27 | /* Save restore flags to clear handle leaking NT */ | ||
28 | #define switch_to(prev,next,last) \ | ||
29 | asm volatile(SAVE_CONTEXT \ | ||
30 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ | ||
31 | "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ | ||
32 | "call __switch_to\n\t" \ | ||
33 | ".globl thread_return\n" \ | ||
34 | "thread_return:\n\t" \ | ||
35 | "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ | ||
36 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ | ||
37 | LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ | ||
38 | "movq %%rax,%%rdi\n\t" \ | ||
39 | "jc ret_from_fork\n\t" \ | ||
40 | RESTORE_CONTEXT \ | ||
41 | : "=a" (last) \ | ||
42 | : [next] "S" (next), [prev] "D" (prev), \ | ||
43 | [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \ | ||
44 | [ti_flags] "i" (offsetof(struct thread_info, flags)),\ | ||
45 | [tif_fork] "i" (TIF_FORK), \ | ||
46 | [thread_info] "i" (offsetof(struct task_struct, stack)), \ | ||
47 | [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ | ||
48 | : "memory", "cc" __EXTRA_CLOBBER) | ||
49 | |||
50 | extern void load_gs_index(unsigned); | ||
51 | |||
52 | /* | ||
53 | * Load a segment. Fall back on loading the zero | ||
54 | * segment if something goes wrong.. | ||
55 | */ | ||
56 | #define loadsegment(seg,value) \ | ||
57 | asm volatile("\n" \ | ||
58 | "1:\t" \ | ||
59 | "movl %k0,%%" #seg "\n" \ | ||
60 | "2:\n" \ | ||
61 | ".section .fixup,\"ax\"\n" \ | ||
62 | "3:\t" \ | ||
63 | "movl %1,%%" #seg "\n\t" \ | ||
64 | "jmp 2b\n" \ | ||
65 | ".previous\n" \ | ||
66 | ".section __ex_table,\"a\"\n\t" \ | ||
67 | ".align 8\n\t" \ | ||
68 | ".quad 1b,3b\n" \ | ||
69 | ".previous" \ | ||
70 | : :"r" (value), "r" (0)) | ||
71 | |||
72 | /* | ||
73 | * Clear and set 'TS' bit respectively | ||
74 | */ | ||
75 | #define clts() __asm__ __volatile__ ("clts") | ||
76 | |||
77 | static inline unsigned long read_cr0(void) | ||
78 | { | ||
79 | unsigned long cr0; | ||
80 | asm volatile("movq %%cr0,%0" : "=r" (cr0)); | ||
81 | return cr0; | ||
82 | } | ||
83 | |||
84 | static inline void write_cr0(unsigned long val) | ||
85 | { | ||
86 | asm volatile("movq %0,%%cr0" :: "r" (val)); | ||
87 | } | ||
88 | |||
89 | static inline unsigned long read_cr2(void) | ||
90 | { | ||
91 | unsigned long cr2; | ||
92 | asm volatile("movq %%cr2,%0" : "=r" (cr2)); | ||
93 | return cr2; | ||
94 | } | ||
95 | |||
96 | static inline void write_cr2(unsigned long val) | ||
97 | { | ||
98 | asm volatile("movq %0,%%cr2" :: "r" (val)); | ||
99 | } | ||
100 | |||
101 | static inline unsigned long read_cr3(void) | ||
102 | { | ||
103 | unsigned long cr3; | ||
104 | asm volatile("movq %%cr3,%0" : "=r" (cr3)); | ||
105 | return cr3; | ||
106 | } | ||
107 | |||
108 | static inline void write_cr3(unsigned long val) | ||
109 | { | ||
110 | asm volatile("movq %0,%%cr3" :: "r" (val) : "memory"); | ||
111 | } | ||
112 | |||
113 | static inline unsigned long read_cr4(void) | ||
114 | { | ||
115 | unsigned long cr4; | ||
116 | asm volatile("movq %%cr4,%0" : "=r" (cr4)); | ||
117 | return cr4; | ||
118 | } | ||
119 | |||
120 | static inline void write_cr4(unsigned long val) | ||
121 | { | ||
122 | asm volatile("movq %0,%%cr4" :: "r" (val) : "memory"); | ||
123 | } | ||
124 | 7 | ||
125 | static inline unsigned long read_cr8(void) | 8 | static inline unsigned long read_cr8(void) |
126 | { | 9 | { |
@@ -134,52 +17,6 @@ static inline void write_cr8(unsigned long val) | |||
134 | asm volatile("movq %0,%%cr8" :: "r" (val) : "memory"); | 17 | asm volatile("movq %0,%%cr8" :: "r" (val) : "memory"); |
135 | } | 18 | } |
136 | 19 | ||
137 | #define stts() write_cr0(8 | read_cr0()) | ||
138 | |||
139 | #define wbinvd() \ | ||
140 | __asm__ __volatile__ ("wbinvd": : :"memory") | ||
141 | |||
142 | #endif /* __KERNEL__ */ | ||
143 | |||
144 | static inline void clflush(volatile void *__p) | ||
145 | { | ||
146 | asm volatile("clflush %0" : "+m" (*(char __force *)__p)); | ||
147 | } | ||
148 | |||
149 | #define nop() __asm__ __volatile__ ("nop") | ||
150 | |||
151 | #ifdef CONFIG_SMP | ||
152 | #define smp_mb() mb() | ||
153 | #define smp_rmb() barrier() | ||
154 | #define smp_wmb() barrier() | ||
155 | #define smp_read_barrier_depends() do {} while(0) | ||
156 | #else | ||
157 | #define smp_mb() barrier() | ||
158 | #define smp_rmb() barrier() | ||
159 | #define smp_wmb() barrier() | ||
160 | #define smp_read_barrier_depends() do {} while(0) | ||
161 | #endif | ||
162 | |||
163 | |||
164 | /* | ||
165 | * Force strict CPU ordering. | ||
166 | * And yes, this is required on UP too when we're talking | ||
167 | * to devices. | ||
168 | */ | ||
169 | #define mb() asm volatile("mfence":::"memory") | ||
170 | #define rmb() asm volatile("lfence":::"memory") | ||
171 | #define wmb() asm volatile("sfence" ::: "memory") | ||
172 | |||
173 | #define read_barrier_depends() do {} while(0) | ||
174 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) | ||
175 | |||
176 | #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) | ||
177 | |||
178 | #include <linux/irqflags.h> | 20 | #include <linux/irqflags.h> |
179 | 21 | ||
180 | void cpu_idle_wait(void); | ||
181 | |||
182 | extern unsigned long arch_align_stack(unsigned long sp); | ||
183 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); | ||
184 | |||
185 | #endif | 22 | #endif |
diff --git a/include/asm-x86/thread_info_32.h b/include/asm-x86/thread_info_32.h index 22a8cbcd35e2..5bd508260ffb 100644 --- a/include/asm-x86/thread_info_32.h +++ b/include/asm-x86/thread_info_32.h | |||
@@ -85,7 +85,7 @@ struct thread_info { | |||
85 | 85 | ||
86 | 86 | ||
87 | /* how to get the current stack pointer from C */ | 87 | /* how to get the current stack pointer from C */ |
88 | register unsigned long current_stack_pointer asm("esp") __attribute_used__; | 88 | register unsigned long current_stack_pointer asm("esp") __used; |
89 | 89 | ||
90 | /* how to get the thread information struct from C */ | 90 | /* how to get the thread information struct from C */ |
91 | static inline struct thread_info *current_thread_info(void) | 91 | static inline struct thread_info *current_thread_info(void) |
@@ -132,11 +132,16 @@ static inline struct thread_info *current_thread_info(void) | |||
132 | #define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */ | 132 | #define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */ |
133 | #define TIF_SECCOMP 7 /* secure computing */ | 133 | #define TIF_SECCOMP 7 /* secure computing */ |
134 | #define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */ | 134 | #define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */ |
135 | #define TIF_HRTICK_RESCHED 9 /* reprogram hrtick timer */ | ||
135 | #define TIF_MEMDIE 16 | 136 | #define TIF_MEMDIE 16 |
136 | #define TIF_DEBUG 17 /* uses debug registers */ | 137 | #define TIF_DEBUG 17 /* uses debug registers */ |
137 | #define TIF_IO_BITMAP 18 /* uses I/O bitmap */ | 138 | #define TIF_IO_BITMAP 18 /* uses I/O bitmap */ |
138 | #define TIF_FREEZE 19 /* is freezing for suspend */ | 139 | #define TIF_FREEZE 19 /* is freezing for suspend */ |
139 | #define TIF_NOTSC 20 /* TSC is not accessible in userland */ | 140 | #define TIF_NOTSC 20 /* TSC is not accessible in userland */ |
141 | #define TIF_FORCED_TF 21 /* true if TF in eflags artificially */ | ||
142 | #define TIF_DEBUGCTLMSR 22 /* uses thread_struct.debugctlmsr */ | ||
143 | #define TIF_DS_AREA_MSR 23 /* uses thread_struct.ds_area_msr */ | ||
144 | #define TIF_BTS_TRACE_TS 24 /* record scheduling event timestamps */ | ||
140 | 145 | ||
141 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 146 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
142 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | 147 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) |
@@ -147,10 +152,15 @@ static inline struct thread_info *current_thread_info(void) | |||
147 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | 152 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) |
148 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | 153 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) |
149 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 154 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
155 | #define _TIF_HRTICK_RESCHED (1<<TIF_HRTICK_RESCHED) | ||
150 | #define _TIF_DEBUG (1<<TIF_DEBUG) | 156 | #define _TIF_DEBUG (1<<TIF_DEBUG) |
151 | #define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP) | 157 | #define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP) |
152 | #define _TIF_FREEZE (1<<TIF_FREEZE) | 158 | #define _TIF_FREEZE (1<<TIF_FREEZE) |
153 | #define _TIF_NOTSC (1<<TIF_NOTSC) | 159 | #define _TIF_NOTSC (1<<TIF_NOTSC) |
160 | #define _TIF_FORCED_TF (1<<TIF_FORCED_TF) | ||
161 | #define _TIF_DEBUGCTLMSR (1<<TIF_DEBUGCTLMSR) | ||
162 | #define _TIF_DS_AREA_MSR (1<<TIF_DS_AREA_MSR) | ||
163 | #define _TIF_BTS_TRACE_TS (1<<TIF_BTS_TRACE_TS) | ||
154 | 164 | ||
155 | /* work to do on interrupt/exception return */ | 165 | /* work to do on interrupt/exception return */ |
156 | #define _TIF_WORK_MASK \ | 166 | #define _TIF_WORK_MASK \ |
@@ -160,8 +170,12 @@ static inline struct thread_info *current_thread_info(void) | |||
160 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) | 170 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) |
161 | 171 | ||
162 | /* flags to check in __switch_to() */ | 172 | /* flags to check in __switch_to() */ |
163 | #define _TIF_WORK_CTXSW_NEXT (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUG) | 173 | #define _TIF_WORK_CTXSW \ |
164 | #define _TIF_WORK_CTXSW_PREV (_TIF_IO_BITMAP | _TIF_NOTSC) | 174 | (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUGCTLMSR | \ |
175 | _TIF_DS_AREA_MSR | _TIF_BTS_TRACE_TS) | ||
176 | #define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW | ||
177 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW | _TIF_DEBUG) | ||
178 | |||
165 | 179 | ||
166 | /* | 180 | /* |
167 | * Thread-synchronous status. | 181 | * Thread-synchronous status. |
diff --git a/include/asm-x86/thread_info_64.h b/include/asm-x86/thread_info_64.h index beae2bfb62ca..6c9b214b8fc3 100644 --- a/include/asm-x86/thread_info_64.h +++ b/include/asm-x86/thread_info_64.h | |||
@@ -21,7 +21,7 @@ | |||
21 | #ifndef __ASSEMBLY__ | 21 | #ifndef __ASSEMBLY__ |
22 | struct task_struct; | 22 | struct task_struct; |
23 | struct exec_domain; | 23 | struct exec_domain; |
24 | #include <asm/mmsegment.h> | 24 | #include <asm/processor.h> |
25 | 25 | ||
26 | struct thread_info { | 26 | struct thread_info { |
27 | struct task_struct *task; /* main task structure */ | 27 | struct task_struct *task; /* main task structure */ |
@@ -33,6 +33,9 @@ struct thread_info { | |||
33 | 33 | ||
34 | mm_segment_t addr_limit; | 34 | mm_segment_t addr_limit; |
35 | struct restart_block restart_block; | 35 | struct restart_block restart_block; |
36 | #ifdef CONFIG_IA32_EMULATION | ||
37 | void __user *sysenter_return; | ||
38 | #endif | ||
36 | }; | 39 | }; |
37 | #endif | 40 | #endif |
38 | 41 | ||
@@ -74,20 +77,14 @@ static inline struct thread_info *stack_thread_info(void) | |||
74 | 77 | ||
75 | /* thread information allocation */ | 78 | /* thread information allocation */ |
76 | #ifdef CONFIG_DEBUG_STACK_USAGE | 79 | #ifdef CONFIG_DEBUG_STACK_USAGE |
77 | #define alloc_thread_info(tsk) \ | 80 | #define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO) |
78 | ({ \ | ||
79 | struct thread_info *ret; \ | ||
80 | \ | ||
81 | ret = ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER)); \ | ||
82 | if (ret) \ | ||
83 | memset(ret, 0, THREAD_SIZE); \ | ||
84 | ret; \ | ||
85 | }) | ||
86 | #else | 81 | #else |
87 | #define alloc_thread_info(tsk) \ | 82 | #define THREAD_FLAGS GFP_KERNEL |
88 | ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER)) | ||
89 | #endif | 83 | #endif |
90 | 84 | ||
85 | #define alloc_thread_info(tsk) \ | ||
86 | ((struct thread_info *) __get_free_pages(THREAD_FLAGS, THREAD_ORDER)) | ||
87 | |||
91 | #define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) | 88 | #define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) |
92 | 89 | ||
93 | #else /* !__ASSEMBLY__ */ | 90 | #else /* !__ASSEMBLY__ */ |
@@ -115,6 +112,7 @@ static inline struct thread_info *stack_thread_info(void) | |||
115 | #define TIF_SECCOMP 8 /* secure computing */ | 112 | #define TIF_SECCOMP 8 /* secure computing */ |
116 | #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ | 113 | #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ |
117 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ | 114 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ |
115 | #define TIF_HRTICK_RESCHED 11 /* reprogram hrtick timer */ | ||
118 | /* 16 free */ | 116 | /* 16 free */ |
119 | #define TIF_IA32 17 /* 32bit process */ | 117 | #define TIF_IA32 17 /* 32bit process */ |
120 | #define TIF_FORK 18 /* ret_from_fork */ | 118 | #define TIF_FORK 18 /* ret_from_fork */ |
@@ -123,6 +121,10 @@ static inline struct thread_info *stack_thread_info(void) | |||
123 | #define TIF_DEBUG 21 /* uses debug registers */ | 121 | #define TIF_DEBUG 21 /* uses debug registers */ |
124 | #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ | 122 | #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ |
125 | #define TIF_FREEZE 23 /* is freezing for suspend */ | 123 | #define TIF_FREEZE 23 /* is freezing for suspend */ |
124 | #define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ | ||
125 | #define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ | ||
126 | #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ | ||
127 | #define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */ | ||
126 | 128 | ||
127 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 129 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
128 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | 130 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) |
@@ -133,12 +135,17 @@ static inline struct thread_info *stack_thread_info(void) | |||
133 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | 135 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) |
134 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 136 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
135 | #define _TIF_MCE_NOTIFY (1<<TIF_MCE_NOTIFY) | 137 | #define _TIF_MCE_NOTIFY (1<<TIF_MCE_NOTIFY) |
138 | #define _TIF_HRTICK_RESCHED (1<<TIF_HRTICK_RESCHED) | ||
136 | #define _TIF_IA32 (1<<TIF_IA32) | 139 | #define _TIF_IA32 (1<<TIF_IA32) |
137 | #define _TIF_FORK (1<<TIF_FORK) | 140 | #define _TIF_FORK (1<<TIF_FORK) |
138 | #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) | 141 | #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) |
139 | #define _TIF_DEBUG (1<<TIF_DEBUG) | 142 | #define _TIF_DEBUG (1<<TIF_DEBUG) |
140 | #define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP) | 143 | #define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP) |
141 | #define _TIF_FREEZE (1<<TIF_FREEZE) | 144 | #define _TIF_FREEZE (1<<TIF_FREEZE) |
145 | #define _TIF_FORCED_TF (1<<TIF_FORCED_TF) | ||
146 | #define _TIF_DEBUGCTLMSR (1<<TIF_DEBUGCTLMSR) | ||
147 | #define _TIF_DS_AREA_MSR (1<<TIF_DS_AREA_MSR) | ||
148 | #define _TIF_BTS_TRACE_TS (1<<TIF_BTS_TRACE_TS) | ||
142 | 149 | ||
143 | /* work to do on interrupt/exception return */ | 150 | /* work to do on interrupt/exception return */ |
144 | #define _TIF_WORK_MASK \ | 151 | #define _TIF_WORK_MASK \ |
@@ -146,8 +153,14 @@ static inline struct thread_info *stack_thread_info(void) | |||
146 | /* work to do on any return to user space */ | 153 | /* work to do on any return to user space */ |
147 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) | 154 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) |
148 | 155 | ||
156 | #define _TIF_DO_NOTIFY_MASK \ | ||
157 | (_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED) | ||
158 | |||
149 | /* flags to check in __switch_to() */ | 159 | /* flags to check in __switch_to() */ |
150 | #define _TIF_WORK_CTXSW (_TIF_DEBUG|_TIF_IO_BITMAP) | 160 | #define _TIF_WORK_CTXSW \ |
161 | (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS) | ||
162 | #define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW | ||
163 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) | ||
151 | 164 | ||
152 | #define PREEMPT_ACTIVE 0x10000000 | 165 | #define PREEMPT_ACTIVE 0x10000000 |
153 | 166 | ||
diff --git a/include/asm-x86/time.h b/include/asm-x86/time.h index eac011366dc2..68779b048a3e 100644 --- a/include/asm-x86/time.h +++ b/include/asm-x86/time.h | |||
@@ -1,8 +1,12 @@ | |||
1 | #ifndef _ASMi386_TIME_H | 1 | #ifndef _ASMX86_TIME_H |
2 | #define _ASMi386_TIME_H | 2 | #define _ASMX86_TIME_H |
3 | 3 | ||
4 | extern void (*late_time_init)(void); | ||
5 | extern void hpet_time_init(void); | ||
6 | |||
7 | #include <asm/mc146818rtc.h> | ||
8 | #ifdef CONFIG_X86_32 | ||
4 | #include <linux/efi.h> | 9 | #include <linux/efi.h> |
5 | #include "mach_time.h" | ||
6 | 10 | ||
7 | static inline unsigned long native_get_wallclock(void) | 11 | static inline unsigned long native_get_wallclock(void) |
8 | { | 12 | { |
@@ -28,8 +32,20 @@ static inline int native_set_wallclock(unsigned long nowtime) | |||
28 | return retval; | 32 | return retval; |
29 | } | 33 | } |
30 | 34 | ||
31 | extern void (*late_time_init)(void); | 35 | #else |
32 | extern void hpet_time_init(void); | 36 | extern void native_time_init_hook(void); |
37 | |||
38 | static inline unsigned long native_get_wallclock(void) | ||
39 | { | ||
40 | return mach_get_cmos_time(); | ||
41 | } | ||
42 | |||
43 | static inline int native_set_wallclock(unsigned long nowtime) | ||
44 | { | ||
45 | return mach_set_rtc_mmss(nowtime); | ||
46 | } | ||
47 | |||
48 | #endif | ||
33 | 49 | ||
34 | #ifdef CONFIG_PARAVIRT | 50 | #ifdef CONFIG_PARAVIRT |
35 | #include <asm/paravirt.h> | 51 | #include <asm/paravirt.h> |
diff --git a/include/asm-x86/timer.h b/include/asm-x86/timer.h index 0db7e994fb8b..4f6fcb050c11 100644 --- a/include/asm-x86/timer.h +++ b/include/asm-x86/timer.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASMi386_TIMER_H | 2 | #define _ASMi386_TIMER_H |
3 | #include <linux/init.h> | 3 | #include <linux/init.h> |
4 | #include <linux/pm.h> | 4 | #include <linux/pm.h> |
5 | #include <linux/percpu.h> | ||
5 | 6 | ||
6 | #define TICK_SIZE (tick_nsec / 1000) | 7 | #define TICK_SIZE (tick_nsec / 1000) |
7 | 8 | ||
@@ -16,7 +17,7 @@ extern int recalibrate_cpu_khz(void); | |||
16 | #define calculate_cpu_khz() native_calculate_cpu_khz() | 17 | #define calculate_cpu_khz() native_calculate_cpu_khz() |
17 | #endif | 18 | #endif |
18 | 19 | ||
19 | /* Accellerators for sched_clock() | 20 | /* Accelerators for sched_clock() |
20 | * convert from cycles(64bits) => nanoseconds (64bits) | 21 | * convert from cycles(64bits) => nanoseconds (64bits) |
21 | * basic equation: | 22 | * basic equation: |
22 | * ns = cycles / (freq / ns_per_sec) | 23 | * ns = cycles / (freq / ns_per_sec) |
@@ -31,20 +32,32 @@ extern int recalibrate_cpu_khz(void); | |||
31 | * And since SC is a constant power of two, we can convert the div | 32 | * And since SC is a constant power of two, we can convert the div |
32 | * into a shift. | 33 | * into a shift. |
33 | * | 34 | * |
34 | * We can use khz divisor instead of mhz to keep a better percision, since | 35 | * We can use khz divisor instead of mhz to keep a better precision, since |
35 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | 36 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. |
36 | * (mathieu.desnoyers@polymtl.ca) | 37 | * (mathieu.desnoyers@polymtl.ca) |
37 | * | 38 | * |
38 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | 39 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" |
39 | */ | 40 | */ |
40 | extern unsigned long cyc2ns_scale __read_mostly; | 41 | |
42 | DECLARE_PER_CPU(unsigned long, cyc2ns); | ||
41 | 43 | ||
42 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | 44 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ |
43 | 45 | ||
44 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) | 46 | static inline unsigned long long __cycles_2_ns(unsigned long long cyc) |
45 | { | 47 | { |
46 | return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; | 48 | return cyc * per_cpu(cyc2ns, smp_processor_id()) >> CYC2NS_SCALE_FACTOR; |
47 | } | 49 | } |
48 | 50 | ||
51 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) | ||
52 | { | ||
53 | unsigned long long ns; | ||
54 | unsigned long flags; | ||
55 | |||
56 | local_irq_save(flags); | ||
57 | ns = __cycles_2_ns(cyc); | ||
58 | local_irq_restore(flags); | ||
59 | |||
60 | return ns; | ||
61 | } | ||
49 | 62 | ||
50 | #endif | 63 | #endif |
diff --git a/include/asm-x86/timex.h b/include/asm-x86/timex.h index 39a21ab030f0..43e5a78500c5 100644 --- a/include/asm-x86/timex.h +++ b/include/asm-x86/timex.h | |||
@@ -7,12 +7,13 @@ | |||
7 | 7 | ||
8 | #ifdef CONFIG_X86_ELAN | 8 | #ifdef CONFIG_X86_ELAN |
9 | # define PIT_TICK_RATE 1189200 /* AMD Elan has different frequency! */ | 9 | # define PIT_TICK_RATE 1189200 /* AMD Elan has different frequency! */ |
10 | #elif defined(CONFIG_X86_RDC321X) | ||
11 | # define PIT_TICK_RATE 1041667 /* Underlying HZ for R8610 */ | ||
10 | #else | 12 | #else |
11 | # define PIT_TICK_RATE 1193182 /* Underlying HZ */ | 13 | # define PIT_TICK_RATE 1193182 /* Underlying HZ */ |
12 | #endif | 14 | #endif |
13 | #define CLOCK_TICK_RATE PIT_TICK_RATE | 15 | #define CLOCK_TICK_RATE PIT_TICK_RATE |
14 | 16 | ||
15 | extern int read_current_timer(unsigned long *timer_value); | 17 | #define ARCH_HAS_READ_CURRENT_TIMER |
16 | #define ARCH_HAS_READ_CURRENT_TIMER 1 | ||
17 | 18 | ||
18 | #endif | 19 | #endif |
diff --git a/include/asm-x86/tlbflush.h b/include/asm-x86/tlbflush.h index 9af4cc83a1af..3998709ed637 100644 --- a/include/asm-x86/tlbflush.h +++ b/include/asm-x86/tlbflush.h | |||
@@ -1,5 +1,158 @@ | |||
1 | #ifndef _ASM_X86_TLBFLUSH_H | ||
2 | #define _ASM_X86_TLBFLUSH_H | ||
3 | |||
4 | #include <linux/mm.h> | ||
5 | #include <linux/sched.h> | ||
6 | |||
7 | #include <asm/processor.h> | ||
8 | #include <asm/system.h> | ||
9 | |||
10 | #ifdef CONFIG_PARAVIRT | ||
11 | #include <asm/paravirt.h> | ||
12 | #else | ||
13 | #define __flush_tlb() __native_flush_tlb() | ||
14 | #define __flush_tlb_global() __native_flush_tlb_global() | ||
15 | #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) | ||
16 | #endif | ||
17 | |||
18 | static inline void __native_flush_tlb(void) | ||
19 | { | ||
20 | write_cr3(read_cr3()); | ||
21 | } | ||
22 | |||
23 | static inline void __native_flush_tlb_global(void) | ||
24 | { | ||
25 | unsigned long cr4 = read_cr4(); | ||
26 | |||
27 | /* clear PGE */ | ||
28 | write_cr4(cr4 & ~X86_CR4_PGE); | ||
29 | /* write old PGE again and flush TLBs */ | ||
30 | write_cr4(cr4); | ||
31 | } | ||
32 | |||
33 | static inline void __native_flush_tlb_single(unsigned long addr) | ||
34 | { | ||
35 | __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory"); | ||
36 | } | ||
37 | |||
38 | static inline void __flush_tlb_all(void) | ||
39 | { | ||
40 | if (cpu_has_pge) | ||
41 | __flush_tlb_global(); | ||
42 | else | ||
43 | __flush_tlb(); | ||
44 | } | ||
45 | |||
46 | static inline void __flush_tlb_one(unsigned long addr) | ||
47 | { | ||
48 | if (cpu_has_invlpg) | ||
49 | __flush_tlb_single(addr); | ||
50 | else | ||
51 | __flush_tlb(); | ||
52 | } | ||
53 | |||
1 | #ifdef CONFIG_X86_32 | 54 | #ifdef CONFIG_X86_32 |
2 | # include "tlbflush_32.h" | 55 | # define TLB_FLUSH_ALL 0xffffffff |
3 | #else | 56 | #else |
4 | # include "tlbflush_64.h" | 57 | # define TLB_FLUSH_ALL -1ULL |
58 | #endif | ||
59 | |||
60 | /* | ||
61 | * TLB flushing: | ||
62 | * | ||
63 | * - flush_tlb() flushes the current mm struct TLBs | ||
64 | * - flush_tlb_all() flushes all processes TLBs | ||
65 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | ||
66 | * - flush_tlb_page(vma, vmaddr) flushes one page | ||
67 | * - flush_tlb_range(vma, start, end) flushes a range of pages | ||
68 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | ||
69 | * - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus | ||
70 | * | ||
71 | * ..but the i386 has somewhat limited tlb flushing capabilities, | ||
72 | * and page-granular flushes are available only on i486 and up. | ||
73 | * | ||
74 | * x86-64 can only flush individual pages or full VMs. For a range flush | ||
75 | * we always do the full VM. Might be worth trying if for a small | ||
76 | * range a few INVLPGs in a row are a win. | ||
77 | */ | ||
78 | |||
79 | #ifndef CONFIG_SMP | ||
80 | |||
81 | #define flush_tlb() __flush_tlb() | ||
82 | #define flush_tlb_all() __flush_tlb_all() | ||
83 | #define local_flush_tlb() __flush_tlb() | ||
84 | |||
85 | static inline void flush_tlb_mm(struct mm_struct *mm) | ||
86 | { | ||
87 | if (mm == current->active_mm) | ||
88 | __flush_tlb(); | ||
89 | } | ||
90 | |||
91 | static inline void flush_tlb_page(struct vm_area_struct *vma, | ||
92 | unsigned long addr) | ||
93 | { | ||
94 | if (vma->vm_mm == current->active_mm) | ||
95 | __flush_tlb_one(addr); | ||
96 | } | ||
97 | |||
98 | static inline void flush_tlb_range(struct vm_area_struct *vma, | ||
99 | unsigned long start, unsigned long end) | ||
100 | { | ||
101 | if (vma->vm_mm == current->active_mm) | ||
102 | __flush_tlb(); | ||
103 | } | ||
104 | |||
105 | static inline void native_flush_tlb_others(const cpumask_t *cpumask, | ||
106 | struct mm_struct *mm, | ||
107 | unsigned long va) | ||
108 | { | ||
109 | } | ||
110 | |||
111 | #else /* SMP */ | ||
112 | |||
113 | #include <asm/smp.h> | ||
114 | |||
115 | #define local_flush_tlb() __flush_tlb() | ||
116 | |||
117 | extern void flush_tlb_all(void); | ||
118 | extern void flush_tlb_current_task(void); | ||
119 | extern void flush_tlb_mm(struct mm_struct *); | ||
120 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); | ||
121 | |||
122 | #define flush_tlb() flush_tlb_current_task() | ||
123 | |||
124 | static inline void flush_tlb_range(struct vm_area_struct *vma, | ||
125 | unsigned long start, unsigned long end) | ||
126 | { | ||
127 | flush_tlb_mm(vma->vm_mm); | ||
128 | } | ||
129 | |||
130 | void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm, | ||
131 | unsigned long va); | ||
132 | |||
133 | #define TLBSTATE_OK 1 | ||
134 | #define TLBSTATE_LAZY 2 | ||
135 | |||
136 | #ifdef CONFIG_X86_32 | ||
137 | struct tlb_state | ||
138 | { | ||
139 | struct mm_struct *active_mm; | ||
140 | int state; | ||
141 | char __cacheline_padding[L1_CACHE_BYTES-8]; | ||
142 | }; | ||
143 | DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); | ||
144 | #endif | ||
145 | |||
146 | #endif /* SMP */ | ||
147 | |||
148 | #ifndef CONFIG_PARAVIRT | ||
149 | #define flush_tlb_others(mask, mm, va) native_flush_tlb_others(&mask, mm, va) | ||
5 | #endif | 150 | #endif |
151 | |||
152 | static inline void flush_tlb_kernel_range(unsigned long start, | ||
153 | unsigned long end) | ||
154 | { | ||
155 | flush_tlb_all(); | ||
156 | } | ||
157 | |||
158 | #endif /* _ASM_X86_TLBFLUSH_H */ | ||
diff --git a/include/asm-x86/tlbflush_32.h b/include/asm-x86/tlbflush_32.h deleted file mode 100644 index 2bd5b95e2048..000000000000 --- a/include/asm-x86/tlbflush_32.h +++ /dev/null | |||
@@ -1,168 +0,0 @@ | |||
1 | #ifndef _I386_TLBFLUSH_H | ||
2 | #define _I386_TLBFLUSH_H | ||
3 | |||
4 | #include <linux/mm.h> | ||
5 | #include <asm/processor.h> | ||
6 | |||
7 | #ifdef CONFIG_PARAVIRT | ||
8 | #include <asm/paravirt.h> | ||
9 | #else | ||
10 | #define __flush_tlb() __native_flush_tlb() | ||
11 | #define __flush_tlb_global() __native_flush_tlb_global() | ||
12 | #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) | ||
13 | #endif | ||
14 | |||
15 | #define __native_flush_tlb() \ | ||
16 | do { \ | ||
17 | unsigned int tmpreg; \ | ||
18 | \ | ||
19 | __asm__ __volatile__( \ | ||
20 | "movl %%cr3, %0; \n" \ | ||
21 | "movl %0, %%cr3; # flush TLB \n" \ | ||
22 | : "=r" (tmpreg) \ | ||
23 | :: "memory"); \ | ||
24 | } while (0) | ||
25 | |||
26 | /* | ||
27 | * Global pages have to be flushed a bit differently. Not a real | ||
28 | * performance problem because this does not happen often. | ||
29 | */ | ||
30 | #define __native_flush_tlb_global() \ | ||
31 | do { \ | ||
32 | unsigned int tmpreg, cr4, cr4_orig; \ | ||
33 | \ | ||
34 | __asm__ __volatile__( \ | ||
35 | "movl %%cr4, %2; # turn off PGE \n" \ | ||
36 | "movl %2, %1; \n" \ | ||
37 | "andl %3, %1; \n" \ | ||
38 | "movl %1, %%cr4; \n" \ | ||
39 | "movl %%cr3, %0; \n" \ | ||
40 | "movl %0, %%cr3; # flush TLB \n" \ | ||
41 | "movl %2, %%cr4; # turn PGE back on \n" \ | ||
42 | : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \ | ||
43 | : "i" (~X86_CR4_PGE) \ | ||
44 | : "memory"); \ | ||
45 | } while (0) | ||
46 | |||
47 | #define __native_flush_tlb_single(addr) \ | ||
48 | __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory") | ||
49 | |||
50 | # define __flush_tlb_all() \ | ||
51 | do { \ | ||
52 | if (cpu_has_pge) \ | ||
53 | __flush_tlb_global(); \ | ||
54 | else \ | ||
55 | __flush_tlb(); \ | ||
56 | } while (0) | ||
57 | |||
58 | #define cpu_has_invlpg (boot_cpu_data.x86 > 3) | ||
59 | |||
60 | #ifdef CONFIG_X86_INVLPG | ||
61 | # define __flush_tlb_one(addr) __flush_tlb_single(addr) | ||
62 | #else | ||
63 | # define __flush_tlb_one(addr) \ | ||
64 | do { \ | ||
65 | if (cpu_has_invlpg) \ | ||
66 | __flush_tlb_single(addr); \ | ||
67 | else \ | ||
68 | __flush_tlb(); \ | ||
69 | } while (0) | ||
70 | #endif | ||
71 | |||
72 | /* | ||
73 | * TLB flushing: | ||
74 | * | ||
75 | * - flush_tlb() flushes the current mm struct TLBs | ||
76 | * - flush_tlb_all() flushes all processes TLBs | ||
77 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | ||
78 | * - flush_tlb_page(vma, vmaddr) flushes one page | ||
79 | * - flush_tlb_range(vma, start, end) flushes a range of pages | ||
80 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | ||
81 | * - flush_tlb_others(cpumask, mm, va) flushes a TLBs on other cpus | ||
82 | * | ||
83 | * ..but the i386 has somewhat limited tlb flushing capabilities, | ||
84 | * and page-granular flushes are available only on i486 and up. | ||
85 | */ | ||
86 | |||
87 | #define TLB_FLUSH_ALL 0xffffffff | ||
88 | |||
89 | |||
90 | #ifndef CONFIG_SMP | ||
91 | |||
92 | #include <linux/sched.h> | ||
93 | |||
94 | #define flush_tlb() __flush_tlb() | ||
95 | #define flush_tlb_all() __flush_tlb_all() | ||
96 | #define local_flush_tlb() __flush_tlb() | ||
97 | |||
98 | static inline void flush_tlb_mm(struct mm_struct *mm) | ||
99 | { | ||
100 | if (mm == current->active_mm) | ||
101 | __flush_tlb(); | ||
102 | } | ||
103 | |||
104 | static inline void flush_tlb_page(struct vm_area_struct *vma, | ||
105 | unsigned long addr) | ||
106 | { | ||
107 | if (vma->vm_mm == current->active_mm) | ||
108 | __flush_tlb_one(addr); | ||
109 | } | ||
110 | |||
111 | static inline void flush_tlb_range(struct vm_area_struct *vma, | ||
112 | unsigned long start, unsigned long end) | ||
113 | { | ||
114 | if (vma->vm_mm == current->active_mm) | ||
115 | __flush_tlb(); | ||
116 | } | ||
117 | |||
118 | static inline void native_flush_tlb_others(const cpumask_t *cpumask, | ||
119 | struct mm_struct *mm, unsigned long va) | ||
120 | { | ||
121 | } | ||
122 | |||
123 | #else /* SMP */ | ||
124 | |||
125 | #include <asm/smp.h> | ||
126 | |||
127 | #define local_flush_tlb() \ | ||
128 | __flush_tlb() | ||
129 | |||
130 | extern void flush_tlb_all(void); | ||
131 | extern void flush_tlb_current_task(void); | ||
132 | extern void flush_tlb_mm(struct mm_struct *); | ||
133 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); | ||
134 | |||
135 | #define flush_tlb() flush_tlb_current_task() | ||
136 | |||
137 | static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) | ||
138 | { | ||
139 | flush_tlb_mm(vma->vm_mm); | ||
140 | } | ||
141 | |||
142 | void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm, | ||
143 | unsigned long va); | ||
144 | |||
145 | #define TLBSTATE_OK 1 | ||
146 | #define TLBSTATE_LAZY 2 | ||
147 | |||
148 | struct tlb_state | ||
149 | { | ||
150 | struct mm_struct *active_mm; | ||
151 | int state; | ||
152 | char __cacheline_padding[L1_CACHE_BYTES-8]; | ||
153 | }; | ||
154 | DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); | ||
155 | #endif /* SMP */ | ||
156 | |||
157 | #ifndef CONFIG_PARAVIRT | ||
158 | #define flush_tlb_others(mask, mm, va) \ | ||
159 | native_flush_tlb_others(&mask, mm, va) | ||
160 | #endif | ||
161 | |||
162 | static inline void flush_tlb_kernel_range(unsigned long start, | ||
163 | unsigned long end) | ||
164 | { | ||
165 | flush_tlb_all(); | ||
166 | } | ||
167 | |||
168 | #endif /* _I386_TLBFLUSH_H */ | ||
diff --git a/include/asm-x86/tlbflush_64.h b/include/asm-x86/tlbflush_64.h deleted file mode 100644 index 7731fd23d572..000000000000 --- a/include/asm-x86/tlbflush_64.h +++ /dev/null | |||
@@ -1,100 +0,0 @@ | |||
1 | #ifndef _X8664_TLBFLUSH_H | ||
2 | #define _X8664_TLBFLUSH_H | ||
3 | |||
4 | #include <linux/mm.h> | ||
5 | #include <linux/sched.h> | ||
6 | #include <asm/processor.h> | ||
7 | #include <asm/system.h> | ||
8 | |||
9 | static inline void __flush_tlb(void) | ||
10 | { | ||
11 | write_cr3(read_cr3()); | ||
12 | } | ||
13 | |||
14 | static inline void __flush_tlb_all(void) | ||
15 | { | ||
16 | unsigned long cr4 = read_cr4(); | ||
17 | write_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */ | ||
18 | write_cr4(cr4); /* write old PGE again and flush TLBs */ | ||
19 | } | ||
20 | |||
21 | #define __flush_tlb_one(addr) \ | ||
22 | __asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory") | ||
23 | |||
24 | |||
25 | /* | ||
26 | * TLB flushing: | ||
27 | * | ||
28 | * - flush_tlb() flushes the current mm struct TLBs | ||
29 | * - flush_tlb_all() flushes all processes TLBs | ||
30 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | ||
31 | * - flush_tlb_page(vma, vmaddr) flushes one page | ||
32 | * - flush_tlb_range(vma, start, end) flushes a range of pages | ||
33 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | ||
34 | * | ||
35 | * x86-64 can only flush individual pages or full VMs. For a range flush | ||
36 | * we always do the full VM. Might be worth trying if for a small | ||
37 | * range a few INVLPGs in a row are a win. | ||
38 | */ | ||
39 | |||
40 | #ifndef CONFIG_SMP | ||
41 | |||
42 | #define flush_tlb() __flush_tlb() | ||
43 | #define flush_tlb_all() __flush_tlb_all() | ||
44 | #define local_flush_tlb() __flush_tlb() | ||
45 | |||
46 | static inline void flush_tlb_mm(struct mm_struct *mm) | ||
47 | { | ||
48 | if (mm == current->active_mm) | ||
49 | __flush_tlb(); | ||
50 | } | ||
51 | |||
52 | static inline void flush_tlb_page(struct vm_area_struct *vma, | ||
53 | unsigned long addr) | ||
54 | { | ||
55 | if (vma->vm_mm == current->active_mm) | ||
56 | __flush_tlb_one(addr); | ||
57 | } | ||
58 | |||
59 | static inline void flush_tlb_range(struct vm_area_struct *vma, | ||
60 | unsigned long start, unsigned long end) | ||
61 | { | ||
62 | if (vma->vm_mm == current->active_mm) | ||
63 | __flush_tlb(); | ||
64 | } | ||
65 | |||
66 | #else | ||
67 | |||
68 | #include <asm/smp.h> | ||
69 | |||
70 | #define local_flush_tlb() \ | ||
71 | __flush_tlb() | ||
72 | |||
73 | extern void flush_tlb_all(void); | ||
74 | extern void flush_tlb_current_task(void); | ||
75 | extern void flush_tlb_mm(struct mm_struct *); | ||
76 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); | ||
77 | |||
78 | #define flush_tlb() flush_tlb_current_task() | ||
79 | |||
80 | static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) | ||
81 | { | ||
82 | flush_tlb_mm(vma->vm_mm); | ||
83 | } | ||
84 | |||
85 | #define TLBSTATE_OK 1 | ||
86 | #define TLBSTATE_LAZY 2 | ||
87 | |||
88 | /* Roughly an IPI every 20MB with 4k pages for freeing page table | ||
89 | ranges. Cost is about 42k of memory for each CPU. */ | ||
90 | #define ARCH_FREE_PTE_NR 5350 | ||
91 | |||
92 | #endif | ||
93 | |||
94 | static inline void flush_tlb_kernel_range(unsigned long start, | ||
95 | unsigned long end) | ||
96 | { | ||
97 | flush_tlb_all(); | ||
98 | } | ||
99 | |||
100 | #endif /* _X8664_TLBFLUSH_H */ | ||
diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h index b10fde9798ea..8af05a93f097 100644 --- a/include/asm-x86/topology.h +++ b/include/asm-x86/topology.h | |||
@@ -1,5 +1,188 @@ | |||
1 | /* | ||
2 | * Written by: Matthew Dobson, IBM Corporation | ||
3 | * | ||
4 | * Copyright (C) 2002, IBM Corp. | ||
5 | * | ||
6 | * All rights reserved. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
16 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
17 | * details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
22 | * | ||
23 | * Send feedback to <colpatch@us.ibm.com> | ||
24 | */ | ||
25 | #ifndef _ASM_X86_TOPOLOGY_H | ||
26 | #define _ASM_X86_TOPOLOGY_H | ||
27 | |||
28 | #ifdef CONFIG_NUMA | ||
29 | #include <linux/cpumask.h> | ||
30 | #include <asm/mpspec.h> | ||
31 | |||
32 | /* Mappings between logical cpu number and node number */ | ||
1 | #ifdef CONFIG_X86_32 | 33 | #ifdef CONFIG_X86_32 |
2 | # include "topology_32.h" | 34 | extern int cpu_to_node_map[]; |
35 | |||
3 | #else | 36 | #else |
4 | # include "topology_64.h" | 37 | DECLARE_PER_CPU(int, x86_cpu_to_node_map); |
38 | extern int x86_cpu_to_node_map_init[]; | ||
39 | extern void *x86_cpu_to_node_map_early_ptr; | ||
40 | /* Returns the number of the current Node. */ | ||
41 | #define numa_node_id() (early_cpu_to_node(raw_smp_processor_id())) | ||
42 | #endif | ||
43 | |||
44 | extern cpumask_t node_to_cpumask_map[]; | ||
45 | |||
46 | #define NUMA_NO_NODE (-1) | ||
47 | |||
48 | /* Returns the number of the node containing CPU 'cpu' */ | ||
49 | #ifdef CONFIG_X86_32 | ||
50 | #define early_cpu_to_node(cpu) cpu_to_node(cpu) | ||
51 | static inline int cpu_to_node(int cpu) | ||
52 | { | ||
53 | return cpu_to_node_map[cpu]; | ||
54 | } | ||
55 | |||
56 | #else /* CONFIG_X86_64 */ | ||
57 | static inline int early_cpu_to_node(int cpu) | ||
58 | { | ||
59 | int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr; | ||
60 | |||
61 | if (cpu_to_node_map) | ||
62 | return cpu_to_node_map[cpu]; | ||
63 | else if (per_cpu_offset(cpu)) | ||
64 | return per_cpu(x86_cpu_to_node_map, cpu); | ||
65 | else | ||
66 | return NUMA_NO_NODE; | ||
67 | } | ||
68 | |||
69 | static inline int cpu_to_node(int cpu) | ||
70 | { | ||
71 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | ||
72 | if (x86_cpu_to_node_map_early_ptr) { | ||
73 | printk("KERN_NOTICE cpu_to_node(%d): usage too early!\n", | ||
74 | (int)cpu); | ||
75 | dump_stack(); | ||
76 | return ((int *)x86_cpu_to_node_map_early_ptr)[cpu]; | ||
77 | } | ||
78 | #endif | ||
79 | if (per_cpu_offset(cpu)) | ||
80 | return per_cpu(x86_cpu_to_node_map, cpu); | ||
81 | else | ||
82 | return NUMA_NO_NODE; | ||
83 | } | ||
84 | #endif /* CONFIG_X86_64 */ | ||
85 | |||
86 | /* | ||
87 | * Returns the number of the node containing Node 'node'. This | ||
88 | * architecture is flat, so it is a pretty simple function! | ||
89 | */ | ||
90 | #define parent_node(node) (node) | ||
91 | |||
92 | /* Returns a bitmask of CPUs on Node 'node'. */ | ||
93 | static inline cpumask_t node_to_cpumask(int node) | ||
94 | { | ||
95 | return node_to_cpumask_map[node]; | ||
96 | } | ||
97 | |||
98 | /* Returns the number of the first CPU on Node 'node'. */ | ||
99 | static inline int node_to_first_cpu(int node) | ||
100 | { | ||
101 | cpumask_t mask = node_to_cpumask(node); | ||
102 | |||
103 | return first_cpu(mask); | ||
104 | } | ||
105 | |||
106 | #define pcibus_to_node(bus) __pcibus_to_node(bus) | ||
107 | #define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus) | ||
108 | |||
109 | #ifdef CONFIG_X86_32 | ||
110 | extern unsigned long node_start_pfn[]; | ||
111 | extern unsigned long node_end_pfn[]; | ||
112 | extern unsigned long node_remap_size[]; | ||
113 | #define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid]) | ||
114 | |||
115 | # ifdef CONFIG_X86_HT | ||
116 | # define ENABLE_TOPO_DEFINES | ||
117 | # endif | ||
118 | |||
119 | # define SD_CACHE_NICE_TRIES 1 | ||
120 | # define SD_IDLE_IDX 1 | ||
121 | # define SD_NEWIDLE_IDX 2 | ||
122 | # define SD_FORKEXEC_IDX 0 | ||
123 | |||
124 | #else | ||
125 | |||
126 | # ifdef CONFIG_SMP | ||
127 | # define ENABLE_TOPO_DEFINES | ||
128 | # endif | ||
129 | |||
130 | # define SD_CACHE_NICE_TRIES 2 | ||
131 | # define SD_IDLE_IDX 2 | ||
132 | # define SD_NEWIDLE_IDX 0 | ||
133 | # define SD_FORKEXEC_IDX 1 | ||
134 | |||
135 | #endif | ||
136 | |||
137 | /* sched_domains SD_NODE_INIT for NUMAQ machines */ | ||
138 | #define SD_NODE_INIT (struct sched_domain) { \ | ||
139 | .span = CPU_MASK_NONE, \ | ||
140 | .parent = NULL, \ | ||
141 | .child = NULL, \ | ||
142 | .groups = NULL, \ | ||
143 | .min_interval = 8, \ | ||
144 | .max_interval = 32, \ | ||
145 | .busy_factor = 32, \ | ||
146 | .imbalance_pct = 125, \ | ||
147 | .cache_nice_tries = SD_CACHE_NICE_TRIES, \ | ||
148 | .busy_idx = 3, \ | ||
149 | .idle_idx = SD_IDLE_IDX, \ | ||
150 | .newidle_idx = SD_NEWIDLE_IDX, \ | ||
151 | .wake_idx = 1, \ | ||
152 | .forkexec_idx = SD_FORKEXEC_IDX, \ | ||
153 | .flags = SD_LOAD_BALANCE \ | ||
154 | | SD_BALANCE_EXEC \ | ||
155 | | SD_BALANCE_FORK \ | ||
156 | | SD_SERIALIZE \ | ||
157 | | SD_WAKE_BALANCE, \ | ||
158 | .last_balance = jiffies, \ | ||
159 | .balance_interval = 1, \ | ||
160 | .nr_balance_failed = 0, \ | ||
161 | } | ||
162 | |||
163 | #ifdef CONFIG_X86_64_ACPI_NUMA | ||
164 | extern int __node_distance(int, int); | ||
165 | #define node_distance(a, b) __node_distance(a, b) | ||
166 | #endif | ||
167 | |||
168 | #else /* CONFIG_NUMA */ | ||
169 | |||
170 | #include <asm-generic/topology.h> | ||
171 | |||
172 | #endif | ||
173 | |||
174 | extern cpumask_t cpu_coregroup_map(int cpu); | ||
175 | |||
176 | #ifdef ENABLE_TOPO_DEFINES | ||
177 | #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) | ||
178 | #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) | ||
179 | #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) | ||
180 | #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) | ||
181 | #endif | ||
182 | |||
183 | #ifdef CONFIG_SMP | ||
184 | #define mc_capable() (boot_cpu_data.x86_max_cores > 1) | ||
185 | #define smt_capable() (smp_num_siblings > 1) | ||
186 | #endif | ||
187 | |||
5 | #endif | 188 | #endif |
diff --git a/include/asm-x86/topology_32.h b/include/asm-x86/topology_32.h deleted file mode 100644 index 9040f5a61278..000000000000 --- a/include/asm-x86/topology_32.h +++ /dev/null | |||
@@ -1,121 +0,0 @@ | |||
1 | /* | ||
2 | * linux/include/asm-i386/topology.h | ||
3 | * | ||
4 | * Written by: Matthew Dobson, IBM Corporation | ||
5 | * | ||
6 | * Copyright (C) 2002, IBM Corp. | ||
7 | * | ||
8 | * All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but | ||
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
18 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
19 | * details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
24 | * | ||
25 | * Send feedback to <colpatch@us.ibm.com> | ||
26 | */ | ||
27 | #ifndef _ASM_I386_TOPOLOGY_H | ||
28 | #define _ASM_I386_TOPOLOGY_H | ||
29 | |||
30 | #ifdef CONFIG_X86_HT | ||
31 | #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) | ||
32 | #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) | ||
33 | #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) | ||
34 | #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) | ||
35 | #endif | ||
36 | |||
37 | #ifdef CONFIG_NUMA | ||
38 | |||
39 | #include <asm/mpspec.h> | ||
40 | |||
41 | #include <linux/cpumask.h> | ||
42 | |||
43 | /* Mappings between logical cpu number and node number */ | ||
44 | extern cpumask_t node_2_cpu_mask[]; | ||
45 | extern int cpu_2_node[]; | ||
46 | |||
47 | /* Returns the number of the node containing CPU 'cpu' */ | ||
48 | static inline int cpu_to_node(int cpu) | ||
49 | { | ||
50 | return cpu_2_node[cpu]; | ||
51 | } | ||
52 | |||
53 | /* Returns the number of the node containing Node 'node'. This architecture is flat, | ||
54 | so it is a pretty simple function! */ | ||
55 | #define parent_node(node) (node) | ||
56 | |||
57 | /* Returns a bitmask of CPUs on Node 'node'. */ | ||
58 | static inline cpumask_t node_to_cpumask(int node) | ||
59 | { | ||
60 | return node_2_cpu_mask[node]; | ||
61 | } | ||
62 | |||
63 | /* Returns the number of the first CPU on Node 'node'. */ | ||
64 | static inline int node_to_first_cpu(int node) | ||
65 | { | ||
66 | cpumask_t mask = node_to_cpumask(node); | ||
67 | return first_cpu(mask); | ||
68 | } | ||
69 | |||
70 | #define pcibus_to_node(bus) ((struct pci_sysdata *)((bus)->sysdata))->node | ||
71 | #define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)) | ||
72 | |||
73 | /* sched_domains SD_NODE_INIT for NUMAQ machines */ | ||
74 | #define SD_NODE_INIT (struct sched_domain) { \ | ||
75 | .span = CPU_MASK_NONE, \ | ||
76 | .parent = NULL, \ | ||
77 | .child = NULL, \ | ||
78 | .groups = NULL, \ | ||
79 | .min_interval = 8, \ | ||
80 | .max_interval = 32, \ | ||
81 | .busy_factor = 32, \ | ||
82 | .imbalance_pct = 125, \ | ||
83 | .cache_nice_tries = 1, \ | ||
84 | .busy_idx = 3, \ | ||
85 | .idle_idx = 1, \ | ||
86 | .newidle_idx = 2, \ | ||
87 | .wake_idx = 1, \ | ||
88 | .flags = SD_LOAD_BALANCE \ | ||
89 | | SD_BALANCE_EXEC \ | ||
90 | | SD_BALANCE_FORK \ | ||
91 | | SD_SERIALIZE \ | ||
92 | | SD_WAKE_BALANCE, \ | ||
93 | .last_balance = jiffies, \ | ||
94 | .balance_interval = 1, \ | ||
95 | .nr_balance_failed = 0, \ | ||
96 | } | ||
97 | |||
98 | extern unsigned long node_start_pfn[]; | ||
99 | extern unsigned long node_end_pfn[]; | ||
100 | extern unsigned long node_remap_size[]; | ||
101 | |||
102 | #define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid]) | ||
103 | |||
104 | #else /* !CONFIG_NUMA */ | ||
105 | /* | ||
106 | * Other i386 platforms should define their own version of the | ||
107 | * above macros here. | ||
108 | */ | ||
109 | |||
110 | #include <asm-generic/topology.h> | ||
111 | |||
112 | #endif /* CONFIG_NUMA */ | ||
113 | |||
114 | extern cpumask_t cpu_coregroup_map(int cpu); | ||
115 | |||
116 | #ifdef CONFIG_SMP | ||
117 | #define mc_capable() (boot_cpu_data.x86_max_cores > 1) | ||
118 | #define smt_capable() (smp_num_siblings > 1) | ||
119 | #endif | ||
120 | |||
121 | #endif /* _ASM_I386_TOPOLOGY_H */ | ||
diff --git a/include/asm-x86/topology_64.h b/include/asm-x86/topology_64.h deleted file mode 100644 index a718dda037e0..000000000000 --- a/include/asm-x86/topology_64.h +++ /dev/null | |||
@@ -1,71 +0,0 @@ | |||
1 | #ifndef _ASM_X86_64_TOPOLOGY_H | ||
2 | #define _ASM_X86_64_TOPOLOGY_H | ||
3 | |||
4 | |||
5 | #ifdef CONFIG_NUMA | ||
6 | |||
7 | #include <asm/mpspec.h> | ||
8 | #include <linux/bitops.h> | ||
9 | |||
10 | extern cpumask_t cpu_online_map; | ||
11 | |||
12 | extern unsigned char cpu_to_node[]; | ||
13 | extern cpumask_t node_to_cpumask[]; | ||
14 | |||
15 | #ifdef CONFIG_ACPI_NUMA | ||
16 | extern int __node_distance(int, int); | ||
17 | #define node_distance(a,b) __node_distance(a,b) | ||
18 | /* #else fallback version */ | ||
19 | #endif | ||
20 | |||
21 | #define cpu_to_node(cpu) (cpu_to_node[cpu]) | ||
22 | #define parent_node(node) (node) | ||
23 | #define node_to_first_cpu(node) (first_cpu(node_to_cpumask[node])) | ||
24 | #define node_to_cpumask(node) (node_to_cpumask[node]) | ||
25 | #define pcibus_to_node(bus) ((struct pci_sysdata *)((bus)->sysdata))->node | ||
26 | #define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)); | ||
27 | |||
28 | #define numa_node_id() read_pda(nodenumber) | ||
29 | |||
30 | /* sched_domains SD_NODE_INIT for x86_64 machines */ | ||
31 | #define SD_NODE_INIT (struct sched_domain) { \ | ||
32 | .span = CPU_MASK_NONE, \ | ||
33 | .parent = NULL, \ | ||
34 | .child = NULL, \ | ||
35 | .groups = NULL, \ | ||
36 | .min_interval = 8, \ | ||
37 | .max_interval = 32, \ | ||
38 | .busy_factor = 32, \ | ||
39 | .imbalance_pct = 125, \ | ||
40 | .cache_nice_tries = 2, \ | ||
41 | .busy_idx = 3, \ | ||
42 | .idle_idx = 2, \ | ||
43 | .newidle_idx = 0, \ | ||
44 | .wake_idx = 1, \ | ||
45 | .forkexec_idx = 1, \ | ||
46 | .flags = SD_LOAD_BALANCE \ | ||
47 | | SD_BALANCE_FORK \ | ||
48 | | SD_BALANCE_EXEC \ | ||
49 | | SD_SERIALIZE \ | ||
50 | | SD_WAKE_BALANCE, \ | ||
51 | .last_balance = jiffies, \ | ||
52 | .balance_interval = 1, \ | ||
53 | .nr_balance_failed = 0, \ | ||
54 | } | ||
55 | |||
56 | #endif | ||
57 | |||
58 | #ifdef CONFIG_SMP | ||
59 | #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) | ||
60 | #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) | ||
61 | #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) | ||
62 | #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) | ||
63 | #define mc_capable() (boot_cpu_data.x86_max_cores > 1) | ||
64 | #define smt_capable() (smp_num_siblings > 1) | ||
65 | #endif | ||
66 | |||
67 | #include <asm-generic/topology.h> | ||
68 | |||
69 | extern cpumask_t cpu_coregroup_map(int cpu); | ||
70 | |||
71 | #endif | ||
diff --git a/include/asm-x86/tsc.h b/include/asm-x86/tsc.h index 6baab30dc2c8..7d3e27f7d484 100644 --- a/include/asm-x86/tsc.h +++ b/include/asm-x86/tsc.h | |||
@@ -17,6 +17,8 @@ typedef unsigned long long cycles_t; | |||
17 | extern unsigned int cpu_khz; | 17 | extern unsigned int cpu_khz; |
18 | extern unsigned int tsc_khz; | 18 | extern unsigned int tsc_khz; |
19 | 19 | ||
20 | extern void disable_TSC(void); | ||
21 | |||
20 | static inline cycles_t get_cycles(void) | 22 | static inline cycles_t get_cycles(void) |
21 | { | 23 | { |
22 | unsigned long long ret = 0; | 24 | unsigned long long ret = 0; |
@@ -25,39 +27,22 @@ static inline cycles_t get_cycles(void) | |||
25 | if (!cpu_has_tsc) | 27 | if (!cpu_has_tsc) |
26 | return 0; | 28 | return 0; |
27 | #endif | 29 | #endif |
28 | |||
29 | #if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC) | ||
30 | rdtscll(ret); | 30 | rdtscll(ret); |
31 | #endif | 31 | |
32 | return ret; | 32 | return ret; |
33 | } | 33 | } |
34 | 34 | ||
35 | /* Like get_cycles, but make sure the CPU is synchronized. */ | 35 | static inline cycles_t vget_cycles(void) |
36 | static __always_inline cycles_t get_cycles_sync(void) | ||
37 | { | 36 | { |
38 | unsigned long long ret; | ||
39 | unsigned eax, edx; | ||
40 | |||
41 | /* | ||
42 | * Use RDTSCP if possible; it is guaranteed to be synchronous | ||
43 | * and doesn't cause a VMEXIT on Hypervisors | ||
44 | */ | ||
45 | alternative_io(ASM_NOP3, ".byte 0x0f,0x01,0xf9", X86_FEATURE_RDTSCP, | ||
46 | ASM_OUTPUT2("=a" (eax), "=d" (edx)), | ||
47 | "a" (0U), "d" (0U) : "ecx", "memory"); | ||
48 | ret = (((unsigned long long)edx) << 32) | ((unsigned long long)eax); | ||
49 | if (ret) | ||
50 | return ret; | ||
51 | |||
52 | /* | 37 | /* |
53 | * Don't do an additional sync on CPUs where we know | 38 | * We only do VDSOs on TSC capable CPUs, so this shouldnt |
54 | * RDTSC is already synchronous: | 39 | * access boot_cpu_data (which is not VDSO-safe): |
55 | */ | 40 | */ |
56 | alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC, | 41 | #ifndef CONFIG_X86_TSC |
57 | "=a" (eax), "0" (1) : "ebx","ecx","edx","memory"); | 42 | if (!cpu_has_tsc) |
58 | rdtscll(ret); | 43 | return 0; |
59 | 44 | #endif | |
60 | return ret; | 45 | return (cycles_t) __native_read_tsc(); |
61 | } | 46 | } |
62 | 47 | ||
63 | extern void tsc_init(void); | 48 | extern void tsc_init(void); |
@@ -73,8 +58,7 @@ int check_tsc_unstable(void); | |||
73 | extern void check_tsc_sync_source(int cpu); | 58 | extern void check_tsc_sync_source(int cpu); |
74 | extern void check_tsc_sync_target(void); | 59 | extern void check_tsc_sync_target(void); |
75 | 60 | ||
76 | #ifdef CONFIG_X86_64 | ||
77 | extern void tsc_calibrate(void); | 61 | extern void tsc_calibrate(void); |
78 | #endif | 62 | extern int notsc_setup(char *); |
79 | 63 | ||
80 | #endif | 64 | #endif |
diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h index d2a4f7be9c2c..fcc570ec4fee 100644 --- a/include/asm-x86/uaccess_32.h +++ b/include/asm-x86/uaccess_32.h | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/thread_info.h> | 8 | #include <linux/thread_info.h> |
9 | #include <linux/prefetch.h> | 9 | #include <linux/prefetch.h> |
10 | #include <linux/string.h> | 10 | #include <linux/string.h> |
11 | #include <asm/asm.h> | ||
11 | #include <asm/page.h> | 12 | #include <asm/page.h> |
12 | 13 | ||
13 | #define VERIFY_READ 0 | 14 | #define VERIFY_READ 0 |
@@ -287,11 +288,8 @@ extern void __put_user_8(void); | |||
287 | "4: movl %3,%0\n" \ | 288 | "4: movl %3,%0\n" \ |
288 | " jmp 3b\n" \ | 289 | " jmp 3b\n" \ |
289 | ".previous\n" \ | 290 | ".previous\n" \ |
290 | ".section __ex_table,\"a\"\n" \ | 291 | _ASM_EXTABLE(1b,4b) \ |
291 | " .align 4\n" \ | 292 | _ASM_EXTABLE(2b,4b) \ |
292 | " .long 1b,4b\n" \ | ||
293 | " .long 2b,4b\n" \ | ||
294 | ".previous" \ | ||
295 | : "=r"(err) \ | 293 | : "=r"(err) \ |
296 | : "A" (x), "r" (addr), "i"(-EFAULT), "0"(err)) | 294 | : "A" (x), "r" (addr), "i"(-EFAULT), "0"(err)) |
297 | 295 | ||
@@ -338,10 +336,7 @@ struct __large_struct { unsigned long buf[100]; }; | |||
338 | "3: movl %3,%0\n" \ | 336 | "3: movl %3,%0\n" \ |
339 | " jmp 2b\n" \ | 337 | " jmp 2b\n" \ |
340 | ".previous\n" \ | 338 | ".previous\n" \ |
341 | ".section __ex_table,\"a\"\n" \ | 339 | _ASM_EXTABLE(1b,3b) \ |
342 | " .align 4\n" \ | ||
343 | " .long 1b,3b\n" \ | ||
344 | ".previous" \ | ||
345 | : "=r"(err) \ | 340 | : "=r"(err) \ |
346 | : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err)) | 341 | : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err)) |
347 | 342 | ||
@@ -378,10 +373,7 @@ do { \ | |||
378 | " xor"itype" %"rtype"1,%"rtype"1\n" \ | 373 | " xor"itype" %"rtype"1,%"rtype"1\n" \ |
379 | " jmp 2b\n" \ | 374 | " jmp 2b\n" \ |
380 | ".previous\n" \ | 375 | ".previous\n" \ |
381 | ".section __ex_table,\"a\"\n" \ | 376 | _ASM_EXTABLE(1b,3b) \ |
382 | " .align 4\n" \ | ||
383 | " .long 1b,3b\n" \ | ||
384 | ".previous" \ | ||
385 | : "=r"(err), ltype (x) \ | 377 | : "=r"(err), ltype (x) \ |
386 | : "m"(__m(addr)), "i"(errret), "0"(err)) | 378 | : "m"(__m(addr)), "i"(errret), "0"(err)) |
387 | 379 | ||
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h index f4ce8768ad44..b87eb4ba8f9d 100644 --- a/include/asm-x86/uaccess_64.h +++ b/include/asm-x86/uaccess_64.h | |||
@@ -65,6 +65,8 @@ struct exception_table_entry | |||
65 | unsigned long insn, fixup; | 65 | unsigned long insn, fixup; |
66 | }; | 66 | }; |
67 | 67 | ||
68 | extern int fixup_exception(struct pt_regs *regs); | ||
69 | |||
68 | #define ARCH_HAS_SEARCH_EXTABLE | 70 | #define ARCH_HAS_SEARCH_EXTABLE |
69 | 71 | ||
70 | /* | 72 | /* |
@@ -179,10 +181,7 @@ struct __large_struct { unsigned long buf[100]; }; | |||
179 | "3: mov %3,%0\n" \ | 181 | "3: mov %3,%0\n" \ |
180 | " jmp 2b\n" \ | 182 | " jmp 2b\n" \ |
181 | ".previous\n" \ | 183 | ".previous\n" \ |
182 | ".section __ex_table,\"a\"\n" \ | 184 | _ASM_EXTABLE(1b,3b) \ |
183 | " .align 8\n" \ | ||
184 | " .quad 1b,3b\n" \ | ||
185 | ".previous" \ | ||
186 | : "=r"(err) \ | 185 | : "=r"(err) \ |
187 | : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err)) | 186 | : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err)) |
188 | 187 | ||
@@ -224,10 +223,7 @@ do { \ | |||
224 | " xor"itype" %"rtype"1,%"rtype"1\n" \ | 223 | " xor"itype" %"rtype"1,%"rtype"1\n" \ |
225 | " jmp 2b\n" \ | 224 | " jmp 2b\n" \ |
226 | ".previous\n" \ | 225 | ".previous\n" \ |
227 | ".section __ex_table,\"a\"\n" \ | 226 | _ASM_EXTABLE(1b,3b) \ |
228 | " .align 8\n" \ | ||
229 | " .quad 1b,3b\n" \ | ||
230 | ".previous" \ | ||
231 | : "=r"(err), ltype (x) \ | 227 | : "=r"(err), ltype (x) \ |
232 | : "m"(__m(addr)), "i"(errno), "0"(err)) | 228 | : "m"(__m(addr)), "i"(errno), "0"(err)) |
233 | 229 | ||
diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h index 9b15545eb9b5..984123a68f7c 100644 --- a/include/asm-x86/unistd_32.h +++ b/include/asm-x86/unistd_32.h | |||
@@ -327,14 +327,14 @@ | |||
327 | #define __NR_epoll_pwait 319 | 327 | #define __NR_epoll_pwait 319 |
328 | #define __NR_utimensat 320 | 328 | #define __NR_utimensat 320 |
329 | #define __NR_signalfd 321 | 329 | #define __NR_signalfd 321 |
330 | #define __NR_timerfd 322 | 330 | #define __NR_timerfd_create 322 |
331 | #define __NR_eventfd 323 | 331 | #define __NR_eventfd 323 |
332 | #define __NR_fallocate 324 | 332 | #define __NR_fallocate 324 |
333 | #define __NR_timerfd_settime 325 | ||
334 | #define __NR_timerfd_gettime 326 | ||
333 | 335 | ||
334 | #ifdef __KERNEL__ | 336 | #ifdef __KERNEL__ |
335 | 337 | ||
336 | #define NR_syscalls 325 | ||
337 | |||
338 | #define __ARCH_WANT_IPC_PARSE_VERSION | 338 | #define __ARCH_WANT_IPC_PARSE_VERSION |
339 | #define __ARCH_WANT_OLD_READDIR | 339 | #define __ARCH_WANT_OLD_READDIR |
340 | #define __ARCH_WANT_OLD_STAT | 340 | #define __ARCH_WANT_OLD_STAT |
diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h index 5ff4d3e24c34..3883ceb54ef5 100644 --- a/include/asm-x86/unistd_64.h +++ b/include/asm-x86/unistd_64.h | |||
@@ -629,12 +629,17 @@ __SYSCALL(__NR_utimensat, sys_utimensat) | |||
629 | __SYSCALL(__NR_epoll_pwait, sys_epoll_pwait) | 629 | __SYSCALL(__NR_epoll_pwait, sys_epoll_pwait) |
630 | #define __NR_signalfd 282 | 630 | #define __NR_signalfd 282 |
631 | __SYSCALL(__NR_signalfd, sys_signalfd) | 631 | __SYSCALL(__NR_signalfd, sys_signalfd) |
632 | #define __NR_timerfd 283 | 632 | #define __NR_timerfd_create 283 |
633 | __SYSCALL(__NR_timerfd, sys_timerfd) | 633 | __SYSCALL(__NR_timerfd_create, sys_timerfd_create) |
634 | #define __NR_eventfd 284 | 634 | #define __NR_eventfd 284 |
635 | __SYSCALL(__NR_eventfd, sys_eventfd) | 635 | __SYSCALL(__NR_eventfd, sys_eventfd) |
636 | #define __NR_fallocate 285 | 636 | #define __NR_fallocate 285 |
637 | __SYSCALL(__NR_fallocate, sys_fallocate) | 637 | __SYSCALL(__NR_fallocate, sys_fallocate) |
638 | #define __NR_timerfd_settime 286 | ||
639 | __SYSCALL(__NR_timerfd_settime, sys_timerfd_settime) | ||
640 | #define __NR_timerfd_gettime 287 | ||
641 | __SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime) | ||
642 | |||
638 | 643 | ||
639 | #ifndef __NO_STUBS | 644 | #ifndef __NO_STUBS |
640 | #define __ARCH_WANT_OLD_READDIR | 645 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/include/asm-x86/user_32.h b/include/asm-x86/user_32.h index 0e85d2a5e33a..ed8b8fc6906c 100644 --- a/include/asm-x86/user_32.h +++ b/include/asm-x86/user_32.h | |||
@@ -75,13 +75,23 @@ struct user_fxsr_struct { | |||
75 | * doesn't use the extra segment registers) | 75 | * doesn't use the extra segment registers) |
76 | */ | 76 | */ |
77 | struct user_regs_struct { | 77 | struct user_regs_struct { |
78 | long ebx, ecx, edx, esi, edi, ebp, eax; | 78 | unsigned long bx; |
79 | unsigned short ds, __ds, es, __es; | 79 | unsigned long cx; |
80 | unsigned short fs, __fs, gs, __gs; | 80 | unsigned long dx; |
81 | long orig_eax, eip; | 81 | unsigned long si; |
82 | unsigned short cs, __cs; | 82 | unsigned long di; |
83 | long eflags, esp; | 83 | unsigned long bp; |
84 | unsigned short ss, __ss; | 84 | unsigned long ax; |
85 | unsigned long ds; | ||
86 | unsigned long es; | ||
87 | unsigned long fs; | ||
88 | unsigned long gs; | ||
89 | unsigned long orig_ax; | ||
90 | unsigned long ip; | ||
91 | unsigned long cs; | ||
92 | unsigned long flags; | ||
93 | unsigned long sp; | ||
94 | unsigned long ss; | ||
85 | }; | 95 | }; |
86 | 96 | ||
87 | /* When the kernel dumps core, it starts by dumping the user struct - | 97 | /* When the kernel dumps core, it starts by dumping the user struct - |
diff --git a/include/asm-x86/user_64.h b/include/asm-x86/user_64.h index 12785c649ac5..a5449d456cc0 100644 --- a/include/asm-x86/user_64.h +++ b/include/asm-x86/user_64.h | |||
@@ -40,13 +40,13 @@ | |||
40 | * and both the standard and SIMD floating point data can be accessed via | 40 | * and both the standard and SIMD floating point data can be accessed via |
41 | * the new ptrace requests. In either case, changes to the FPU environment | 41 | * the new ptrace requests. In either case, changes to the FPU environment |
42 | * will be reflected in the task's state as expected. | 42 | * will be reflected in the task's state as expected. |
43 | * | 43 | * |
44 | * x86-64 support by Andi Kleen. | 44 | * x86-64 support by Andi Kleen. |
45 | */ | 45 | */ |
46 | 46 | ||
47 | /* This matches the 64bit FXSAVE format as defined by AMD. It is the same | 47 | /* This matches the 64bit FXSAVE format as defined by AMD. It is the same |
48 | as the 32bit format defined by Intel, except that the selector:offset pairs for | 48 | as the 32bit format defined by Intel, except that the selector:offset pairs for |
49 | data and eip are replaced with flat 64bit pointers. */ | 49 | data and eip are replaced with flat 64bit pointers. */ |
50 | struct user_i387_struct { | 50 | struct user_i387_struct { |
51 | unsigned short cwd; | 51 | unsigned short cwd; |
52 | unsigned short swd; | 52 | unsigned short swd; |
@@ -65,13 +65,34 @@ struct user_i387_struct { | |||
65 | * Segment register layout in coredumps. | 65 | * Segment register layout in coredumps. |
66 | */ | 66 | */ |
67 | struct user_regs_struct { | 67 | struct user_regs_struct { |
68 | unsigned long r15,r14,r13,r12,rbp,rbx,r11,r10; | 68 | unsigned long r15; |
69 | unsigned long r9,r8,rax,rcx,rdx,rsi,rdi,orig_rax; | 69 | unsigned long r14; |
70 | unsigned long rip,cs,eflags; | 70 | unsigned long r13; |
71 | unsigned long rsp,ss; | 71 | unsigned long r12; |
72 | unsigned long fs_base, gs_base; | 72 | unsigned long bp; |
73 | unsigned long ds,es,fs,gs; | 73 | unsigned long bx; |
74 | }; | 74 | unsigned long r11; |
75 | unsigned long r10; | ||
76 | unsigned long r9; | ||
77 | unsigned long r8; | ||
78 | unsigned long ax; | ||
79 | unsigned long cx; | ||
80 | unsigned long dx; | ||
81 | unsigned long si; | ||
82 | unsigned long di; | ||
83 | unsigned long orig_ax; | ||
84 | unsigned long ip; | ||
85 | unsigned long cs; | ||
86 | unsigned long flags; | ||
87 | unsigned long sp; | ||
88 | unsigned long ss; | ||
89 | unsigned long fs_base; | ||
90 | unsigned long gs_base; | ||
91 | unsigned long ds; | ||
92 | unsigned long es; | ||
93 | unsigned long fs; | ||
94 | unsigned long gs; | ||
95 | }; | ||
75 | 96 | ||
76 | /* When the kernel dumps core, it starts by dumping the user struct - | 97 | /* When the kernel dumps core, it starts by dumping the user struct - |
77 | this will be used by gdb to figure out where the data and stack segments | 98 | this will be used by gdb to figure out where the data and stack segments |
@@ -94,7 +115,7 @@ struct user{ | |||
94 | This is actually the bottom of the stack, | 115 | This is actually the bottom of the stack, |
95 | the top of the stack is always found in the | 116 | the top of the stack is always found in the |
96 | esp register. */ | 117 | esp register. */ |
97 | long int signal; /* Signal that caused the core dump. */ | 118 | long int signal; /* Signal that caused the core dump. */ |
98 | int reserved; /* No longer used */ | 119 | int reserved; /* No longer used */ |
99 | int pad1; | 120 | int pad1; |
100 | struct user_pt_regs * u_ar0; /* Used by gdb to help find the values for */ | 121 | struct user_pt_regs * u_ar0; /* Used by gdb to help find the values for */ |
diff --git a/include/asm-x86/vdso.h b/include/asm-x86/vdso.h new file mode 100644 index 000000000000..629bcb6e8e45 --- /dev/null +++ b/include/asm-x86/vdso.h | |||
@@ -0,0 +1,28 @@ | |||
1 | #ifndef _ASM_X86_VDSO_H | ||
2 | #define _ASM_X86_VDSO_H 1 | ||
3 | |||
4 | #ifdef CONFIG_X86_64 | ||
5 | extern const char VDSO64_PRELINK[]; | ||
6 | |||
7 | /* | ||
8 | * Given a pointer to the vDSO image, find the pointer to VDSO64_name | ||
9 | * as that symbol is defined in the vDSO sources or linker script. | ||
10 | */ | ||
11 | #define VDSO64_SYMBOL(base, name) ({ \ | ||
12 | extern const char VDSO64_##name[]; \ | ||
13 | (void *) (VDSO64_##name - VDSO64_PRELINK + (unsigned long) (base)); }) | ||
14 | #endif | ||
15 | |||
16 | #if defined CONFIG_X86_32 || defined CONFIG_COMPAT | ||
17 | extern const char VDSO32_PRELINK[]; | ||
18 | |||
19 | /* | ||
20 | * Given a pointer to the vDSO image, find the pointer to VDSO32_name | ||
21 | * as that symbol is defined in the vDSO sources or linker script. | ||
22 | */ | ||
23 | #define VDSO32_SYMBOL(base, name) ({ \ | ||
24 | extern const char VDSO32_##name[]; \ | ||
25 | (void *) (VDSO32_##name - VDSO32_PRELINK + (unsigned long) (base)); }) | ||
26 | #endif | ||
27 | |||
28 | #endif /* asm-x86/vdso.h */ | ||
diff --git a/include/asm-x86/vm86.h b/include/asm-x86/vm86.h index a5edf517b992..c92fe4af52e8 100644 --- a/include/asm-x86/vm86.h +++ b/include/asm-x86/vm86.h | |||
@@ -195,6 +195,7 @@ struct kernel_vm86_struct { | |||
195 | 195 | ||
196 | void handle_vm86_fault(struct kernel_vm86_regs *, long); | 196 | void handle_vm86_fault(struct kernel_vm86_regs *, long); |
197 | int handle_vm86_trap(struct kernel_vm86_regs *, long, int); | 197 | int handle_vm86_trap(struct kernel_vm86_regs *, long, int); |
198 | struct pt_regs *save_v86_state(struct kernel_vm86_regs *); | ||
198 | 199 | ||
199 | struct task_struct; | 200 | struct task_struct; |
200 | void release_vm86_irqs(struct task_struct *); | 201 | void release_vm86_irqs(struct task_struct *); |
diff --git a/include/asm-x86/vsyscall.h b/include/asm-x86/vsyscall.h index f01c49f5d108..17b3700949bf 100644 --- a/include/asm-x86/vsyscall.h +++ b/include/asm-x86/vsyscall.h | |||
@@ -36,6 +36,8 @@ extern volatile unsigned long __jiffies; | |||
36 | extern int vgetcpu_mode; | 36 | extern int vgetcpu_mode; |
37 | extern struct timezone sys_tz; | 37 | extern struct timezone sys_tz; |
38 | 38 | ||
39 | extern void map_vsyscall(void); | ||
40 | |||
39 | #endif /* __KERNEL__ */ | 41 | #endif /* __KERNEL__ */ |
40 | 42 | ||
41 | #endif /* _ASM_X86_64_VSYSCALL_H_ */ | 43 | #endif /* _ASM_X86_64_VSYSCALL_H_ */ |
diff --git a/include/asm-x86/vsyscall32.h b/include/asm-x86/vsyscall32.h deleted file mode 100644 index c631c082f8f7..000000000000 --- a/include/asm-x86/vsyscall32.h +++ /dev/null | |||
@@ -1,20 +0,0 @@ | |||
1 | #ifndef _ASM_VSYSCALL32_H | ||
2 | #define _ASM_VSYSCALL32_H 1 | ||
3 | |||
4 | /* Values need to match arch/x86_64/ia32/vsyscall.lds */ | ||
5 | |||
6 | #ifdef __ASSEMBLY__ | ||
7 | #define VSYSCALL32_BASE 0xffffe000 | ||
8 | #define VSYSCALL32_SYSEXIT (VSYSCALL32_BASE + 0x410) | ||
9 | #else | ||
10 | #define VSYSCALL32_BASE 0xffffe000UL | ||
11 | #define VSYSCALL32_END (VSYSCALL32_BASE + PAGE_SIZE) | ||
12 | #define VSYSCALL32_EHDR ((const struct elf32_hdr *) VSYSCALL32_BASE) | ||
13 | |||
14 | #define VSYSCALL32_VSYSCALL ((void *)VSYSCALL32_BASE + 0x400) | ||
15 | #define VSYSCALL32_SYSEXIT ((void *)VSYSCALL32_BASE + 0x410) | ||
16 | #define VSYSCALL32_SIGRETURN ((void __user *)VSYSCALL32_BASE + 0x500) | ||
17 | #define VSYSCALL32_RTSIGRETURN ((void __user *)VSYSCALL32_BASE + 0x600) | ||
18 | #endif | ||
19 | |||
20 | #endif | ||
diff --git a/include/asm-x86/xor_32.h b/include/asm-x86/xor_32.h index 23c86cef3b25..a41ef1bdd424 100644 --- a/include/asm-x86/xor_32.h +++ b/include/asm-x86/xor_32.h | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-i386/xor.h | ||
3 | * | ||
4 | * Optimized RAID-5 checksumming functions for MMX and SSE. | 2 | * Optimized RAID-5 checksumming functions for MMX and SSE. |
5 | * | 3 | * |
6 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
diff --git a/include/asm-x86/xor_64.h b/include/asm-x86/xor_64.h index f942fcc21831..1eee7fcb2420 100644 --- a/include/asm-x86/xor_64.h +++ b/include/asm-x86/xor_64.h | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-x86_64/xor.h | ||
3 | * | ||
4 | * Optimized RAID-5 checksumming functions for MMX and SSE. | 2 | * Optimized RAID-5 checksumming functions for MMX and SSE. |
5 | * | 3 | * |
6 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |