aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/acpi.h5
-rw-r--r--include/asm-x86/alternative.h2
-rw-r--r--include/asm-x86/amd_iommu.h32
-rw-r--r--include/asm-x86/amd_iommu_types.h244
-rw-r--r--include/asm-x86/apic.h14
-rw-r--r--include/asm-x86/asm.h55
-rw-r--r--include/asm-x86/atomic_64.h32
-rw-r--r--include/asm-x86/bios_ebda.h2
-rw-r--r--include/asm-x86/bitops.h68
-rw-r--r--include/asm-x86/bootparam.h2
-rw-r--r--include/asm-x86/cmpxchg_64.h37
-rw-r--r--include/asm-x86/cpufeature.h9
-rw-r--r--include/asm-x86/current.h42
-rw-r--r--include/asm-x86/current_32.h17
-rw-r--r--include/asm-x86/current_64.h27
-rw-r--r--include/asm-x86/desc.h50
-rw-r--r--include/asm-x86/desc_defs.h4
-rw-r--r--include/asm-x86/dmi.h8
-rw-r--r--include/asm-x86/dwarf2.h62
-rw-r--r--include/asm-x86/dwarf2_32.h61
-rw-r--r--include/asm-x86/dwarf2_64.h56
-rw-r--r--include/asm-x86/e820.h107
-rw-r--r--include/asm-x86/e820_32.h50
-rw-r--r--include/asm-x86/e820_64.h56
-rw-r--r--include/asm-x86/efi.h2
-rw-r--r--include/asm-x86/elf.h2
-rw-r--r--include/asm-x86/fixmap.h55
-rw-r--r--include/asm-x86/fixmap_32.h50
-rw-r--r--include/asm-x86/fixmap_64.h59
-rw-r--r--include/asm-x86/ftrace.h14
-rw-r--r--include/asm-x86/gart.h84
-rw-r--r--include/asm-x86/genapic_64.h2
-rw-r--r--include/asm-x86/hardirq.h6
-rw-r--r--include/asm-x86/highmem.h3
-rw-r--r--include/asm-x86/hpet.h2
-rw-r--r--include/asm-x86/hw_irq.h106
-rw-r--r--include/asm-x86/hw_irq_32.h66
-rw-r--r--include/asm-x86/hw_irq_64.h173
-rw-r--r--include/asm-x86/i8259.h2
-rw-r--r--include/asm-x86/io.h83
-rw-r--r--include/asm-x86/io_32.h61
-rw-r--r--include/asm-x86/io_64.h71
-rw-r--r--include/asm-x86/io_apic.h39
-rw-r--r--include/asm-x86/iommu.h31
-rw-r--r--include/asm-x86/ipi.h1
-rw-r--r--include/asm-x86/irq.h51
-rw-r--r--include/asm-x86/irq_32.h51
-rw-r--r--include/asm-x86/irq_64.h51
-rw-r--r--include/asm-x86/irq_vectors.h173
-rw-r--r--include/asm-x86/irqflags.h65
-rw-r--r--include/asm-x86/kvm_para.h15
-rw-r--r--include/asm-x86/mach-bigsmp/mach_apic.h2
-rw-r--r--include/asm-x86/mach-bigsmp/mach_mpspec.h8
-rw-r--r--include/asm-x86/mach-default/entry_arch.h1
-rw-r--r--include/asm-x86/mach-default/irq_vectors.h96
-rw-r--r--include/asm-x86/mach-default/irq_vectors_limits.h16
-rw-r--r--include/asm-x86/mach-default/mach_apic.h4
-rw-r--r--include/asm-x86/mach-default/setup_arch.h4
-rw-r--r--include/asm-x86/mach-default/smpboot_hooks.h10
-rw-r--r--include/asm-x86/mach-es7000/mach_mpspec.h8
-rw-r--r--include/asm-x86/mach-generic/mach_mpparse.h7
-rw-r--r--include/asm-x86/mach-numaq/mach_apic.h39
-rw-r--r--include/asm-x86/mach-numaq/mach_mpparse.h11
-rw-r--r--include/asm-x86/mach-numaq/mach_mpspec.h8
-rw-r--r--include/asm-x86/mach-summit/mach_mpspec.h9
-rw-r--r--include/asm-x86/mach-visws/entry_arch.h22
-rw-r--r--include/asm-x86/mach-visws/irq_vectors.h62
-rw-r--r--include/asm-x86/mach-visws/mach_apic.h104
-rw-r--r--include/asm-x86/mach-visws/mach_apicdef.h13
-rw-r--r--include/asm-x86/mach-visws/setup_arch.h9
-rw-r--r--include/asm-x86/mach-visws/smpboot_hooks.h29
-rw-r--r--include/asm-x86/mach-voyager/entry_arch.h2
-rw-r--r--include/asm-x86/mach-voyager/irq_vectors.h79
-rw-r--r--include/asm-x86/mmconfig.h12
-rw-r--r--include/asm-x86/mmu_context.h32
-rw-r--r--include/asm-x86/mmu_context_32.h28
-rw-r--r--include/asm-x86/mmu_context_64.h18
-rw-r--r--include/asm-x86/mmzone_32.h26
-rw-r--r--include/asm-x86/mpspec.h36
-rw-r--r--include/asm-x86/mpspec_def.h9
-rw-r--r--include/asm-x86/msr-index.h4
-rw-r--r--include/asm-x86/msr.h5
-rw-r--r--include/asm-x86/nmi.h47
-rw-r--r--include/asm-x86/numa_32.h8
-rw-r--r--include/asm-x86/numa_64.h20
-rw-r--r--include/asm-x86/numaq.h8
-rw-r--r--include/asm-x86/page.h12
-rw-r--r--include/asm-x86/page_32.h15
-rw-r--r--include/asm-x86/page_64.h18
-rw-r--r--include/asm-x86/paravirt.h197
-rw-r--r--include/asm-x86/pat.h8
-rw-r--r--include/asm-x86/pci.h2
-rw-r--r--include/asm-x86/pci_32.h14
-rw-r--r--include/asm-x86/pda.h5
-rw-r--r--include/asm-x86/percpu.h46
-rw-r--r--include/asm-x86/pgalloc.h4
-rw-r--r--include/asm-x86/pgtable.h141
-rw-r--r--include/asm-x86/pgtable_32.h20
-rw-r--r--include/asm-x86/pgtable_64.h8
-rw-r--r--include/asm-x86/processor-flags.h6
-rw-r--r--include/asm-x86/processor.h9
-rw-r--r--include/asm-x86/proto.h2
-rw-r--r--include/asm-x86/ptrace.h8
-rw-r--r--include/asm-x86/reboot.h2
-rw-r--r--include/asm-x86/required-features.h8
-rw-r--r--include/asm-x86/resume-trace.h2
-rw-r--r--include/asm-x86/seccomp_32.h1
-rw-r--r--include/asm-x86/seccomp_64.h1
-rw-r--r--include/asm-x86/segment.h23
-rw-r--r--include/asm-x86/setup.h37
-rw-r--r--include/asm-x86/smp.h48
-rw-r--r--include/asm-x86/srat.h12
-rw-r--r--include/asm-x86/string_32.h323
-rw-r--r--include/asm-x86/suspend_32.h5
-rw-r--r--include/asm-x86/system.h10
-rw-r--r--include/asm-x86/thread_info.h248
-rw-r--r--include/asm-x86/thread_info_32.h205
-rw-r--r--include/asm-x86/thread_info_64.h195
-rw-r--r--include/asm-x86/time.h2
-rw-r--r--include/asm-x86/timer.h4
-rw-r--r--include/asm-x86/topology.h157
-rw-r--r--include/asm-x86/tsc.h2
-rw-r--r--include/asm-x86/uaccess.h448
-rw-r--r--include/asm-x86/uaccess_32.h422
-rw-r--r--include/asm-x86/uaccess_64.h263
-rw-r--r--include/asm-x86/unistd_64.h2
-rw-r--r--include/asm-x86/uv/uv_bau.h337
-rw-r--r--include/asm-x86/uv/uv_hub.h190
-rw-r--r--include/asm-x86/uv/uv_mmrs.h954
-rw-r--r--include/asm-x86/visws/cobalt.h (renamed from include/asm-x86/mach-visws/cobalt.h)0
-rw-r--r--include/asm-x86/visws/lithium.h (renamed from include/asm-x86/mach-visws/lithium.h)0
-rw-r--r--include/asm-x86/visws/piix4.h (renamed from include/asm-x86/mach-visws/piix4.h)0
-rw-r--r--include/asm-x86/visws/sgivw.h5
-rw-r--r--include/asm-x86/vm86.h11
-rw-r--r--include/asm-x86/vmi_time.h2
-rw-r--r--include/asm-x86/vsyscall.h3
-rw-r--r--include/asm-x86/xen/events.h1
-rw-r--r--include/asm-x86/xen/hypercall.h11
-rw-r--r--include/asm-x86/xen/page.h25
-rw-r--r--include/asm-x86/xor_32.h5
-rw-r--r--include/asm-x86/xor_64.h5
141 files changed, 4400 insertions, 3240 deletions
diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h
index 14411c9de46f..635d764dc13e 100644
--- a/include/asm-x86/acpi.h
+++ b/include/asm-x86/acpi.h
@@ -28,6 +28,7 @@
28#include <asm/numa.h> 28#include <asm/numa.h>
29#include <asm/processor.h> 29#include <asm/processor.h>
30#include <asm/mmu.h> 30#include <asm/mmu.h>
31#include <asm/mpspec.h>
31 32
32#define COMPILER_DEPENDENT_INT64 long long 33#define COMPILER_DEPENDENT_INT64 long long
33#define COMPILER_DEPENDENT_UINT64 unsigned long long 34#define COMPILER_DEPENDENT_UINT64 unsigned long long
@@ -160,9 +161,7 @@ struct bootnode;
160#ifdef CONFIG_ACPI_NUMA 161#ifdef CONFIG_ACPI_NUMA
161extern int acpi_numa; 162extern int acpi_numa;
162extern int acpi_scan_nodes(unsigned long start, unsigned long end); 163extern int acpi_scan_nodes(unsigned long start, unsigned long end);
163#ifdef CONFIG_X86_64 164#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
164# define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
165#endif
166extern void acpi_fake_nodes(const struct bootnode *fake_nodes, 165extern void acpi_fake_nodes(const struct bootnode *fake_nodes,
167 int num_nodes); 166 int num_nodes);
168#else 167#else
diff --git a/include/asm-x86/alternative.h b/include/asm-x86/alternative.h
index 1f6a9ca10126..f6aa18eadf71 100644
--- a/include/asm-x86/alternative.h
+++ b/include/asm-x86/alternative.h
@@ -72,6 +72,8 @@ static inline void alternatives_smp_module_del(struct module *mod) {}
72static inline void alternatives_smp_switch(int smp) {} 72static inline void alternatives_smp_switch(int smp) {}
73#endif /* CONFIG_SMP */ 73#endif /* CONFIG_SMP */
74 74
75const unsigned char *const *find_nop_table(void);
76
75/* 77/*
76 * Alternative instructions for different CPU types or capabilities. 78 * Alternative instructions for different CPU types or capabilities.
77 * 79 *
diff --git a/include/asm-x86/amd_iommu.h b/include/asm-x86/amd_iommu.h
new file mode 100644
index 000000000000..30a12049353b
--- /dev/null
+++ b/include/asm-x86/amd_iommu.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#ifndef _ASM_X86_AMD_IOMMU_H
21#define _ASM_X86_AMD_IOMMU_H
22
23#ifdef CONFIG_AMD_IOMMU
24extern int amd_iommu_init(void);
25extern int amd_iommu_init_dma_ops(void);
26extern void amd_iommu_detect(void);
27#else
28static inline int amd_iommu_init(void) { return -ENODEV; }
29static inline void amd_iommu_detect(void) { }
30#endif
31
32#endif
diff --git a/include/asm-x86/amd_iommu_types.h b/include/asm-x86/amd_iommu_types.h
new file mode 100644
index 000000000000..7bfcb47cc452
--- /dev/null
+++ b/include/asm-x86/amd_iommu_types.h
@@ -0,0 +1,244 @@
1/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#ifndef __AMD_IOMMU_TYPES_H__
21#define __AMD_IOMMU_TYPES_H__
22
23#include <linux/types.h>
24#include <linux/list.h>
25#include <linux/spinlock.h>
26
27/*
28 * some size calculation constants
29 */
30#define DEV_TABLE_ENTRY_SIZE 256
31#define ALIAS_TABLE_ENTRY_SIZE 2
32#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *))
33
34/* helper macros */
35#define LOW_U32(x) ((x) & ((1ULL << 32)-1))
36#define HIGH_U32(x) (LOW_U32((x) >> 32))
37
38/* Length of the MMIO region for the AMD IOMMU */
39#define MMIO_REGION_LENGTH 0x4000
40
41/* Capability offsets used by the driver */
42#define MMIO_CAP_HDR_OFFSET 0x00
43#define MMIO_RANGE_OFFSET 0x0c
44
45/* Masks, shifts and macros to parse the device range capability */
46#define MMIO_RANGE_LD_MASK 0xff000000
47#define MMIO_RANGE_FD_MASK 0x00ff0000
48#define MMIO_RANGE_BUS_MASK 0x0000ff00
49#define MMIO_RANGE_LD_SHIFT 24
50#define MMIO_RANGE_FD_SHIFT 16
51#define MMIO_RANGE_BUS_SHIFT 8
52#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT)
53#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT)
54#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT)
55
56/* Flag masks for the AMD IOMMU exclusion range */
57#define MMIO_EXCL_ENABLE_MASK 0x01ULL
58#define MMIO_EXCL_ALLOW_MASK 0x02ULL
59
60/* Used offsets into the MMIO space */
61#define MMIO_DEV_TABLE_OFFSET 0x0000
62#define MMIO_CMD_BUF_OFFSET 0x0008
63#define MMIO_EVT_BUF_OFFSET 0x0010
64#define MMIO_CONTROL_OFFSET 0x0018
65#define MMIO_EXCL_BASE_OFFSET 0x0020
66#define MMIO_EXCL_LIMIT_OFFSET 0x0028
67#define MMIO_CMD_HEAD_OFFSET 0x2000
68#define MMIO_CMD_TAIL_OFFSET 0x2008
69#define MMIO_EVT_HEAD_OFFSET 0x2010
70#define MMIO_EVT_TAIL_OFFSET 0x2018
71#define MMIO_STATUS_OFFSET 0x2020
72
73/* feature control bits */
74#define CONTROL_IOMMU_EN 0x00ULL
75#define CONTROL_HT_TUN_EN 0x01ULL
76#define CONTROL_EVT_LOG_EN 0x02ULL
77#define CONTROL_EVT_INT_EN 0x03ULL
78#define CONTROL_COMWAIT_EN 0x04ULL
79#define CONTROL_PASSPW_EN 0x08ULL
80#define CONTROL_RESPASSPW_EN 0x09ULL
81#define CONTROL_COHERENT_EN 0x0aULL
82#define CONTROL_ISOC_EN 0x0bULL
83#define CONTROL_CMDBUF_EN 0x0cULL
84#define CONTROL_PPFLOG_EN 0x0dULL
85#define CONTROL_PPFINT_EN 0x0eULL
86
87/* command specific defines */
88#define CMD_COMPL_WAIT 0x01
89#define CMD_INV_DEV_ENTRY 0x02
90#define CMD_INV_IOMMU_PAGES 0x03
91
92#define CMD_COMPL_WAIT_STORE_MASK 0x01
93#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01
94#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02
95
96#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL
97
98/* macros and definitions for device table entries */
99#define DEV_ENTRY_VALID 0x00
100#define DEV_ENTRY_TRANSLATION 0x01
101#define DEV_ENTRY_IR 0x3d
102#define DEV_ENTRY_IW 0x3e
103#define DEV_ENTRY_EX 0x67
104#define DEV_ENTRY_SYSMGT1 0x68
105#define DEV_ENTRY_SYSMGT2 0x69
106#define DEV_ENTRY_INIT_PASS 0xb8
107#define DEV_ENTRY_EINT_PASS 0xb9
108#define DEV_ENTRY_NMI_PASS 0xba
109#define DEV_ENTRY_LINT0_PASS 0xbe
110#define DEV_ENTRY_LINT1_PASS 0xbf
111
112/* constants to configure the command buffer */
113#define CMD_BUFFER_SIZE 8192
114#define CMD_BUFFER_ENTRIES 512
115#define MMIO_CMD_SIZE_SHIFT 56
116#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
117
118#define PAGE_MODE_1_LEVEL 0x01
119#define PAGE_MODE_2_LEVEL 0x02
120#define PAGE_MODE_3_LEVEL 0x03
121
122#define IOMMU_PDE_NL_0 0x000ULL
123#define IOMMU_PDE_NL_1 0x200ULL
124#define IOMMU_PDE_NL_2 0x400ULL
125#define IOMMU_PDE_NL_3 0x600ULL
126
127#define IOMMU_PTE_L2_INDEX(address) (((address) >> 30) & 0x1ffULL)
128#define IOMMU_PTE_L1_INDEX(address) (((address) >> 21) & 0x1ffULL)
129#define IOMMU_PTE_L0_INDEX(address) (((address) >> 12) & 0x1ffULL)
130
131#define IOMMU_MAP_SIZE_L1 (1ULL << 21)
132#define IOMMU_MAP_SIZE_L2 (1ULL << 30)
133#define IOMMU_MAP_SIZE_L3 (1ULL << 39)
134
135#define IOMMU_PTE_P (1ULL << 0)
136#define IOMMU_PTE_U (1ULL << 59)
137#define IOMMU_PTE_FC (1ULL << 60)
138#define IOMMU_PTE_IR (1ULL << 61)
139#define IOMMU_PTE_IW (1ULL << 62)
140
141#define IOMMU_L1_PDE(address) \
142 ((address) | IOMMU_PDE_NL_1 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
143#define IOMMU_L2_PDE(address) \
144 ((address) | IOMMU_PDE_NL_2 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
145
146#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
147#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
148#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK))
149#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)
150
151#define IOMMU_PROT_MASK 0x03
152#define IOMMU_PROT_IR 0x01
153#define IOMMU_PROT_IW 0x02
154
155/* IOMMU capabilities */
156#define IOMMU_CAP_IOTLB 24
157#define IOMMU_CAP_NPCACHE 26
158
159#define MAX_DOMAIN_ID 65536
160
161struct protection_domain {
162 spinlock_t lock;
163 u16 id;
164 int mode;
165 u64 *pt_root;
166 void *priv;
167};
168
169struct dma_ops_domain {
170 struct list_head list;
171 struct protection_domain domain;
172 unsigned long aperture_size;
173 unsigned long next_bit;
174 unsigned long *bitmap;
175 u64 **pte_pages;
176};
177
178struct amd_iommu {
179 struct list_head list;
180 spinlock_t lock;
181
182 u16 devid;
183 u16 cap_ptr;
184
185 u64 mmio_phys;
186 u8 *mmio_base;
187 u32 cap;
188 u16 first_device;
189 u16 last_device;
190 u64 exclusion_start;
191 u64 exclusion_length;
192
193 u8 *cmd_buf;
194 u32 cmd_buf_size;
195
196 int need_sync;
197
198 struct dma_ops_domain *default_dom;
199};
200
201extern struct list_head amd_iommu_list;
202
203struct dev_table_entry {
204 u32 data[8];
205};
206
207struct unity_map_entry {
208 struct list_head list;
209 u16 devid_start;
210 u16 devid_end;
211 u64 address_start;
212 u64 address_end;
213 int prot;
214};
215
216extern struct list_head amd_iommu_unity_map;
217
218/* data structures for device handling */
219extern struct dev_table_entry *amd_iommu_dev_table;
220extern u16 *amd_iommu_alias_table;
221extern struct amd_iommu **amd_iommu_rlookup_table;
222
223extern unsigned amd_iommu_aperture_order;
224
225extern u16 amd_iommu_last_bdf;
226
227/* data structures for protection domain handling */
228extern struct protection_domain **amd_iommu_pd_table;
229extern unsigned long *amd_iommu_pd_alloc_bitmap;
230
231extern int amd_iommu_isolate;
232
233static inline void print_devid(u16 devid, int nl)
234{
235 int bus = devid >> 8;
236 int dev = devid >> 3 & 0x1f;
237 int fn = devid & 0x07;
238
239 printk("%02x:%02x.%x", bus, dev, fn);
240 if (nl)
241 printk("\n");
242}
243
244#endif
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h
index be9639a9a186..4e2c1e517f06 100644
--- a/include/asm-x86/apic.h
+++ b/include/asm-x86/apic.h
@@ -36,15 +36,11 @@ extern void generic_apic_probe(void);
36#ifdef CONFIG_X86_LOCAL_APIC 36#ifdef CONFIG_X86_LOCAL_APIC
37 37
38extern int apic_verbosity; 38extern int apic_verbosity;
39extern int timer_over_8254;
40extern int local_apic_timer_c2_ok; 39extern int local_apic_timer_c2_ok;
41extern int local_apic_timer_disabled;
42 40
43extern int apic_runs_main_timer;
44extern int ioapic_force; 41extern int ioapic_force;
45extern int disable_apic;
46extern int disable_apic_timer;
47 42
43extern int disable_apic;
48/* 44/*
49 * Basic functions accessing APICs. 45 * Basic functions accessing APICs.
50 */ 46 */
@@ -125,16 +121,22 @@ extern void enable_NMI_through_LVT0(void);
125 */ 121 */
126#ifdef CONFIG_X86_64 122#ifdef CONFIG_X86_64
127extern void early_init_lapic_mapping(void); 123extern void early_init_lapic_mapping(void);
124extern int apic_is_clustered_box(void);
125#else
126static inline int apic_is_clustered_box(void)
127{
128 return 0;
129}
128#endif 130#endif
129 131
130extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask); 132extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask);
131extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask); 133extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask);
132 134
133extern int apic_is_clustered_box(void);
134 135
135#else /* !CONFIG_X86_LOCAL_APIC */ 136#else /* !CONFIG_X86_LOCAL_APIC */
136static inline void lapic_shutdown(void) { } 137static inline void lapic_shutdown(void) { }
137#define local_apic_timer_c2_ok 1 138#define local_apic_timer_c2_ok 1
139static inline void init_apic_mappings(void) { }
138 140
139#endif /* !CONFIG_X86_LOCAL_APIC */ 141#endif /* !CONFIG_X86_LOCAL_APIC */
140 142
diff --git a/include/asm-x86/asm.h b/include/asm-x86/asm.h
index 90dec0c23646..97220321f39d 100644
--- a/include/asm-x86/asm.h
+++ b/include/asm-x86/asm.h
@@ -1,37 +1,40 @@
1#ifndef _ASM_X86_ASM_H 1#ifndef _ASM_X86_ASM_H
2#define _ASM_X86_ASM_H 2#define _ASM_X86_ASM_H
3 3
4#ifdef CONFIG_X86_32 4#ifdef __ASSEMBLY__
5/* 32 bits */ 5# define __ASM_FORM(x) x
6 6# define __ASM_EX_SEC .section __ex_table
7# define _ASM_PTR " .long "
8# define _ASM_ALIGN " .balign 4 "
9# define _ASM_MOV_UL " movl "
10
11# define _ASM_INC " incl "
12# define _ASM_DEC " decl "
13# define _ASM_ADD " addl "
14# define _ASM_SUB " subl "
15# define _ASM_XADD " xaddl "
16
17#else 7#else
18/* 64 bits */ 8# define __ASM_FORM(x) " " #x " "
9# define __ASM_EX_SEC " .section __ex_table,\"a\"\n"
10#endif
19 11
20# define _ASM_PTR " .quad " 12#ifdef CONFIG_X86_32
21# define _ASM_ALIGN " .balign 8 " 13# define __ASM_SEL(a,b) __ASM_FORM(a)
22# define _ASM_MOV_UL " movq " 14#else
23 15# define __ASM_SEL(a,b) __ASM_FORM(b)
24# define _ASM_INC " incq " 16#endif
25# define _ASM_DEC " decq " 17
26# define _ASM_ADD " addq " 18#define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q)
27# define _ASM_SUB " subq " 19#define __ASM_REG(reg) __ASM_SEL(e##reg, r##reg)
28# define _ASM_XADD " xaddq " 20
29 21#define _ASM_PTR __ASM_SEL(.long, .quad)
30#endif /* CONFIG_X86_32 */ 22#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
23#define _ASM_MOV_UL __ASM_SIZE(mov)
24
25#define _ASM_INC __ASM_SIZE(inc)
26#define _ASM_DEC __ASM_SIZE(dec)
27#define _ASM_ADD __ASM_SIZE(add)
28#define _ASM_SUB __ASM_SIZE(sub)
29#define _ASM_XADD __ASM_SIZE(xadd)
30#define _ASM_AX __ASM_REG(ax)
31#define _ASM_BX __ASM_REG(bx)
32#define _ASM_CX __ASM_REG(cx)
33#define _ASM_DX __ASM_REG(dx)
31 34
32/* Exception table entry */ 35/* Exception table entry */
33# define _ASM_EXTABLE(from,to) \ 36# define _ASM_EXTABLE(from,to) \
34 " .section __ex_table,\"a\"\n" \ 37 __ASM_EX_SEC \
35 _ASM_ALIGN "\n" \ 38 _ASM_ALIGN "\n" \
36 _ASM_PTR #from "," #to "\n" \ 39 _ASM_PTR #from "," #to "\n" \
37 " .previous\n" 40 " .previous\n"
diff --git a/include/asm-x86/atomic_64.h b/include/asm-x86/atomic_64.h
index 3e0cd7d38335..a0095191c02e 100644
--- a/include/asm-x86/atomic_64.h
+++ b/include/asm-x86/atomic_64.h
@@ -11,12 +11,6 @@
11 * resource counting etc.. 11 * resource counting etc..
12 */ 12 */
13 13
14#ifdef CONFIG_SMP
15#define LOCK "lock ; "
16#else
17#define LOCK ""
18#endif
19
20/* 14/*
21 * Make sure gcc doesn't try to be clever and move things around 15 * Make sure gcc doesn't try to be clever and move things around
22 * on us. We need to use _exactly_ the address the user gave us, 16 * on us. We need to use _exactly_ the address the user gave us,
@@ -431,6 +425,32 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
431 return c != (u); 425 return c != (u);
432} 426}
433 427
428/**
429 * atomic_inc_short - increment of a short integer
430 * @v: pointer to type int
431 *
432 * Atomically adds 1 to @v
433 * Returns the new value of @u
434 */
435static inline short int atomic_inc_short(short int *v)
436{
437 asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
438 return *v;
439}
440
441/**
442 * atomic_or_long - OR of two long integers
443 * @v1: pointer to type unsigned long
444 * @v2: pointer to type unsigned long
445 *
446 * Atomically ORs @v1 and @v2
447 * Returns the result of the OR
448 */
449static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
450{
451 asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2));
452}
453
434#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 454#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
435 455
436/* These are x86-specific, used by some header files */ 456/* These are x86-specific, used by some header files */
diff --git a/include/asm-x86/bios_ebda.h b/include/asm-x86/bios_ebda.h
index b4a46b7be794..0033e50c13b2 100644
--- a/include/asm-x86/bios_ebda.h
+++ b/include/asm-x86/bios_ebda.h
@@ -14,4 +14,6 @@ static inline unsigned int get_bios_ebda(void)
14 return address; /* 0 means none */ 14 return address; /* 0 means none */
15} 15}
16 16
17void reserve_ebda_region(void);
18
17#endif /* _MACH_BIOS_EBDA_H */ 19#endif /* _MACH_BIOS_EBDA_H */
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
index ee4b3ead6a43..96b1829cea15 100644
--- a/include/asm-x86/bitops.h
+++ b/include/asm-x86/bitops.h
@@ -23,11 +23,21 @@
23#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) 23#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
24/* Technically wrong, but this avoids compilation errors on some gcc 24/* Technically wrong, but this avoids compilation errors on some gcc
25 versions. */ 25 versions. */
26#define ADDR "=m" (*(volatile long *) addr) 26#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
27#else 27#else
28#define ADDR "+m" (*(volatile long *) addr) 28#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
29#endif 29#endif
30 30
31#define ADDR BITOP_ADDR(addr)
32
33/*
34 * We do the locked ops that don't return the old value as
35 * a mask operation on a byte.
36 */
37#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
38#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
39#define CONST_MASK(nr) (1 << ((nr) & 7))
40
31/** 41/**
32 * set_bit - Atomically set a bit in memory 42 * set_bit - Atomically set a bit in memory
33 * @nr: the bit to set 43 * @nr: the bit to set
@@ -43,9 +53,17 @@
43 * Note that @nr may be almost arbitrarily large; this function is not 53 * Note that @nr may be almost arbitrarily large; this function is not
44 * restricted to acting on a single-word quantity. 54 * restricted to acting on a single-word quantity.
45 */ 55 */
46static inline void set_bit(int nr, volatile void *addr) 56static inline void set_bit(unsigned int nr, volatile unsigned long *addr)
47{ 57{
48 asm volatile(LOCK_PREFIX "bts %1,%0" : ADDR : "Ir" (nr) : "memory"); 58 if (IS_IMMEDIATE(nr)) {
59 asm volatile(LOCK_PREFIX "orb %1,%0"
60 : CONST_MASK_ADDR(nr, addr)
61 : "iq" ((u8)CONST_MASK(nr))
62 : "memory");
63 } else {
64 asm volatile(LOCK_PREFIX "bts %1,%0"
65 : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
66 }
49} 67}
50 68
51/** 69/**
@@ -57,7 +75,7 @@ static inline void set_bit(int nr, volatile void *addr)
57 * If it's called on the same region of memory simultaneously, the effect 75 * If it's called on the same region of memory simultaneously, the effect
58 * may be that only one operation succeeds. 76 * may be that only one operation succeeds.
59 */ 77 */
60static inline void __set_bit(int nr, volatile void *addr) 78static inline void __set_bit(int nr, volatile unsigned long *addr)
61{ 79{
62 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); 80 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
63} 81}
@@ -72,9 +90,17 @@ static inline void __set_bit(int nr, volatile void *addr)
72 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 90 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
73 * in order to ensure changes are visible on other processors. 91 * in order to ensure changes are visible on other processors.
74 */ 92 */
75static inline void clear_bit(int nr, volatile void *addr) 93static inline void clear_bit(int nr, volatile unsigned long *addr)
76{ 94{
77 asm volatile(LOCK_PREFIX "btr %1,%0" : ADDR : "Ir" (nr)); 95 if (IS_IMMEDIATE(nr)) {
96 asm volatile(LOCK_PREFIX "andb %1,%0"
97 : CONST_MASK_ADDR(nr, addr)
98 : "iq" ((u8)~CONST_MASK(nr)));
99 } else {
100 asm volatile(LOCK_PREFIX "btr %1,%0"
101 : BITOP_ADDR(addr)
102 : "Ir" (nr));
103 }
78} 104}
79 105
80/* 106/*
@@ -85,13 +111,13 @@ static inline void clear_bit(int nr, volatile void *addr)
85 * clear_bit() is atomic and implies release semantics before the memory 111 * clear_bit() is atomic and implies release semantics before the memory
86 * operation. It can be used for an unlock. 112 * operation. It can be used for an unlock.
87 */ 113 */
88static inline void clear_bit_unlock(unsigned nr, volatile void *addr) 114static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
89{ 115{
90 barrier(); 116 barrier();
91 clear_bit(nr, addr); 117 clear_bit(nr, addr);
92} 118}
93 119
94static inline void __clear_bit(int nr, volatile void *addr) 120static inline void __clear_bit(int nr, volatile unsigned long *addr)
95{ 121{
96 asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); 122 asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
97} 123}
@@ -108,7 +134,7 @@ static inline void __clear_bit(int nr, volatile void *addr)
108 * No memory barrier is required here, because x86 cannot reorder stores past 134 * No memory barrier is required here, because x86 cannot reorder stores past
109 * older loads. Same principle as spin_unlock. 135 * older loads. Same principle as spin_unlock.
110 */ 136 */
111static inline void __clear_bit_unlock(unsigned nr, volatile void *addr) 137static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
112{ 138{
113 barrier(); 139 barrier();
114 __clear_bit(nr, addr); 140 __clear_bit(nr, addr);
@@ -126,7 +152,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile void *addr)
126 * If it's called on the same region of memory simultaneously, the effect 152 * If it's called on the same region of memory simultaneously, the effect
127 * may be that only one operation succeeds. 153 * may be that only one operation succeeds.
128 */ 154 */
129static inline void __change_bit(int nr, volatile void *addr) 155static inline void __change_bit(int nr, volatile unsigned long *addr)
130{ 156{
131 asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); 157 asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
132} 158}
@@ -140,7 +166,7 @@ static inline void __change_bit(int nr, volatile void *addr)
140 * Note that @nr may be almost arbitrarily large; this function is not 166 * Note that @nr may be almost arbitrarily large; this function is not
141 * restricted to acting on a single-word quantity. 167 * restricted to acting on a single-word quantity.
142 */ 168 */
143static inline void change_bit(int nr, volatile void *addr) 169static inline void change_bit(int nr, volatile unsigned long *addr)
144{ 170{
145 asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr)); 171 asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr));
146} 172}
@@ -153,7 +179,7 @@ static inline void change_bit(int nr, volatile void *addr)
153 * This operation is atomic and cannot be reordered. 179 * This operation is atomic and cannot be reordered.
154 * It also implies a memory barrier. 180 * It also implies a memory barrier.
155 */ 181 */
156static inline int test_and_set_bit(int nr, volatile void *addr) 182static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
157{ 183{
158 int oldbit; 184 int oldbit;
159 185
@@ -170,7 +196,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
170 * 196 *
171 * This is the same as test_and_set_bit on x86. 197 * This is the same as test_and_set_bit on x86.
172 */ 198 */
173static inline int test_and_set_bit_lock(int nr, volatile void *addr) 199static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr)
174{ 200{
175 return test_and_set_bit(nr, addr); 201 return test_and_set_bit(nr, addr);
176} 202}
@@ -184,7 +210,7 @@ static inline int test_and_set_bit_lock(int nr, volatile void *addr)
184 * If two examples of this operation race, one can appear to succeed 210 * If two examples of this operation race, one can appear to succeed
185 * but actually fail. You must protect multiple accesses with a lock. 211 * but actually fail. You must protect multiple accesses with a lock.
186 */ 212 */
187static inline int __test_and_set_bit(int nr, volatile void *addr) 213static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
188{ 214{
189 int oldbit; 215 int oldbit;
190 216
@@ -203,7 +229,7 @@ static inline int __test_and_set_bit(int nr, volatile void *addr)
203 * This operation is atomic and cannot be reordered. 229 * This operation is atomic and cannot be reordered.
204 * It also implies a memory barrier. 230 * It also implies a memory barrier.
205 */ 231 */
206static inline int test_and_clear_bit(int nr, volatile void *addr) 232static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
207{ 233{
208 int oldbit; 234 int oldbit;
209 235
@@ -223,7 +249,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
223 * If two examples of this operation race, one can appear to succeed 249 * If two examples of this operation race, one can appear to succeed
224 * but actually fail. You must protect multiple accesses with a lock. 250 * but actually fail. You must protect multiple accesses with a lock.
225 */ 251 */
226static inline int __test_and_clear_bit(int nr, volatile void *addr) 252static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
227{ 253{
228 int oldbit; 254 int oldbit;
229 255
@@ -235,7 +261,7 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr)
235} 261}
236 262
237/* WARNING: non atomic and it can be reordered! */ 263/* WARNING: non atomic and it can be reordered! */
238static inline int __test_and_change_bit(int nr, volatile void *addr) 264static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
239{ 265{
240 int oldbit; 266 int oldbit;
241 267
@@ -255,7 +281,7 @@ static inline int __test_and_change_bit(int nr, volatile void *addr)
255 * This operation is atomic and cannot be reordered. 281 * This operation is atomic and cannot be reordered.
256 * It also implies a memory barrier. 282 * It also implies a memory barrier.
257 */ 283 */
258static inline int test_and_change_bit(int nr, volatile void *addr) 284static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
259{ 285{
260 int oldbit; 286 int oldbit;
261 287
@@ -266,13 +292,13 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
266 return oldbit; 292 return oldbit;
267} 293}
268 294
269static inline int constant_test_bit(int nr, const volatile void *addr) 295static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
270{ 296{
271 return ((1UL << (nr % BITS_PER_LONG)) & 297 return ((1UL << (nr % BITS_PER_LONG)) &
272 (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; 298 (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
273} 299}
274 300
275static inline int variable_test_bit(int nr, volatile const void *addr) 301static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
276{ 302{
277 int oldbit; 303 int oldbit;
278 304
diff --git a/include/asm-x86/bootparam.h b/include/asm-x86/bootparam.h
index f62f4733606b..ae22bdf0ab14 100644
--- a/include/asm-x86/bootparam.h
+++ b/include/asm-x86/bootparam.h
@@ -11,6 +11,7 @@
11 11
12/* setup data types */ 12/* setup data types */
13#define SETUP_NONE 0 13#define SETUP_NONE 0
14#define SETUP_E820_EXT 1
14 15
15/* extensible setup data list node */ 16/* extensible setup data list node */
16struct setup_data { 17struct setup_data {
@@ -40,6 +41,7 @@ struct setup_header {
40 __u8 type_of_loader; 41 __u8 type_of_loader;
41 __u8 loadflags; 42 __u8 loadflags;
42#define LOADED_HIGH (1<<0) 43#define LOADED_HIGH (1<<0)
44#define QUIET_FLAG (1<<5)
43#define KEEP_SEGMENTS (1<<6) 45#define KEEP_SEGMENTS (1<<6)
44#define CAN_USE_HEAP (1<<7) 46#define CAN_USE_HEAP (1<<7)
45 __u16 setup_move_size; 47 __u16 setup_move_size;
diff --git a/include/asm-x86/cmpxchg_64.h b/include/asm-x86/cmpxchg_64.h
index d9b26b9a28cf..17463ccf8166 100644
--- a/include/asm-x86/cmpxchg_64.h
+++ b/include/asm-x86/cmpxchg_64.h
@@ -93,6 +93,39 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
93 return old; 93 return old;
94} 94}
95 95
96/*
97 * Always use locked operations when touching memory shared with a
98 * hypervisor, since the system may be SMP even if the guest kernel
99 * isn't.
100 */
101static inline unsigned long __sync_cmpxchg(volatile void *ptr,
102 unsigned long old,
103 unsigned long new, int size)
104{
105 unsigned long prev;
106 switch (size) {
107 case 1:
108 asm volatile("lock; cmpxchgb %b1,%2"
109 : "=a"(prev)
110 : "q"(new), "m"(*__xg(ptr)), "0"(old)
111 : "memory");
112 return prev;
113 case 2:
114 asm volatile("lock; cmpxchgw %w1,%2"
115 : "=a"(prev)
116 : "r"(new), "m"(*__xg(ptr)), "0"(old)
117 : "memory");
118 return prev;
119 case 4:
120 asm volatile("lock; cmpxchgl %1,%2"
121 : "=a"(prev)
122 : "r"(new), "m"(*__xg(ptr)), "0"(old)
123 : "memory");
124 return prev;
125 }
126 return old;
127}
128
96static inline unsigned long __cmpxchg_local(volatile void *ptr, 129static inline unsigned long __cmpxchg_local(volatile void *ptr,
97 unsigned long old, 130 unsigned long old,
98 unsigned long new, int size) 131 unsigned long new, int size)
@@ -139,6 +172,10 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
139 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ 172 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
140 (unsigned long)(n), \ 173 (unsigned long)(n), \
141 sizeof(*(ptr)))) 174 sizeof(*(ptr))))
175#define sync_cmpxchg(ptr, o, n) \
176 ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
177 (unsigned long)(n), \
178 sizeof(*(ptr))))
142#define cmpxchg64_local(ptr, o, n) \ 179#define cmpxchg64_local(ptr, o, n) \
143({ \ 180({ \
144 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ 181 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h
index 0d609c837a41..75ef959db329 100644
--- a/include/asm-x86/cpufeature.h
+++ b/include/asm-x86/cpufeature.h
@@ -74,8 +74,8 @@
74#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ 74#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
75#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ 75#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
76#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ 76#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
77/* 14 free */ 77#define X86_FEATURE_SYSCALL32 (3*32+14) /* syscall in ia32 userspace */
78/* 15 free */ 78#define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */
79#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ 79#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */
80#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */ 80#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */
81#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ 81#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */
@@ -106,6 +106,7 @@
106/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ 106/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
107#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ 107#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
108#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ 108#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */
109#define X86_FEATURE_IBS (6*32+ 10) /* Instruction Based Sampling */
109 110
110/* 111/*
111 * Auxiliary flags: Linux defined - For features scattered in various 112 * Auxiliary flags: Linux defined - For features scattered in various
@@ -142,11 +143,11 @@ extern const char * const x86_power_flags[32];
142#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability)) 143#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability))
143#define setup_clear_cpu_cap(bit) do { \ 144#define setup_clear_cpu_cap(bit) do { \
144 clear_cpu_cap(&boot_cpu_data, bit); \ 145 clear_cpu_cap(&boot_cpu_data, bit); \
145 set_bit(bit, cleared_cpu_caps); \ 146 set_bit(bit, (unsigned long *)cleared_cpu_caps); \
146} while (0) 147} while (0)
147#define setup_force_cpu_cap(bit) do { \ 148#define setup_force_cpu_cap(bit) do { \
148 set_cpu_cap(&boot_cpu_data, bit); \ 149 set_cpu_cap(&boot_cpu_data, bit); \
149 clear_bit(bit, cleared_cpu_caps); \ 150 clear_bit(bit, (unsigned long *)cleared_cpu_caps); \
150} while (0) 151} while (0)
151 152
152#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) 153#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
diff --git a/include/asm-x86/current.h b/include/asm-x86/current.h
index d2526d3f7346..7515c19d4988 100644
--- a/include/asm-x86/current.h
+++ b/include/asm-x86/current.h
@@ -1,5 +1,39 @@
1#ifndef _X86_CURRENT_H
2#define _X86_CURRENT_H
3
1#ifdef CONFIG_X86_32 4#ifdef CONFIG_X86_32
2# include "current_32.h" 5#include <linux/compiler.h>
3#else 6#include <asm/percpu.h>
4# include "current_64.h" 7
5#endif 8struct task_struct;
9
10DECLARE_PER_CPU(struct task_struct *, current_task);
11static __always_inline struct task_struct *get_current(void)
12{
13 return x86_read_percpu(current_task);
14}
15
16#else /* X86_32 */
17
18#ifndef __ASSEMBLY__
19#include <asm/pda.h>
20
21struct task_struct;
22
23static __always_inline struct task_struct *get_current(void)
24{
25 return read_pda(pcurrent);
26}
27
28#else /* __ASSEMBLY__ */
29
30#include <asm/asm-offsets.h>
31#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
32
33#endif /* __ASSEMBLY__ */
34
35#endif /* X86_32 */
36
37#define current get_current()
38
39#endif /* X86_CURRENT_H */
diff --git a/include/asm-x86/current_32.h b/include/asm-x86/current_32.h
deleted file mode 100644
index 5af9bdb97a16..000000000000
--- a/include/asm-x86/current_32.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef _I386_CURRENT_H
2#define _I386_CURRENT_H
3
4#include <linux/compiler.h>
5#include <asm/percpu.h>
6
7struct task_struct;
8
9DECLARE_PER_CPU(struct task_struct *, current_task);
10static __always_inline struct task_struct *get_current(void)
11{
12 return x86_read_percpu(current_task);
13}
14
15#define current get_current()
16
17#endif /* !(_I386_CURRENT_H) */
diff --git a/include/asm-x86/current_64.h b/include/asm-x86/current_64.h
deleted file mode 100644
index 2d368ede2fc1..000000000000
--- a/include/asm-x86/current_64.h
+++ /dev/null
@@ -1,27 +0,0 @@
1#ifndef _X86_64_CURRENT_H
2#define _X86_64_CURRENT_H
3
4#if !defined(__ASSEMBLY__)
5struct task_struct;
6
7#include <asm/pda.h>
8
9static inline struct task_struct *get_current(void)
10{
11 struct task_struct *t = read_pda(pcurrent);
12 return t;
13}
14
15#define current get_current()
16
17#else
18
19#ifndef ASM_OFFSET_H
20#include <asm/asm-offsets.h>
21#endif
22
23#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
24
25#endif
26
27#endif /* !(_X86_64_CURRENT_H) */
diff --git a/include/asm-x86/desc.h b/include/asm-x86/desc.h
index 268a012bcd79..a44c4dc70590 100644
--- a/include/asm-x86/desc.h
+++ b/include/asm-x86/desc.h
@@ -29,11 +29,17 @@ static inline void fill_ldt(struct desc_struct *desc,
29extern struct desc_ptr idt_descr; 29extern struct desc_ptr idt_descr;
30extern gate_desc idt_table[]; 30extern gate_desc idt_table[];
31 31
32struct gdt_page {
33 struct desc_struct gdt[GDT_ENTRIES];
34} __attribute__((aligned(PAGE_SIZE)));
35DECLARE_PER_CPU(struct gdt_page, gdt_page);
36
37static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
38{
39 return per_cpu(gdt_page, cpu).gdt;
40}
41
32#ifdef CONFIG_X86_64 42#ifdef CONFIG_X86_64
33extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
34extern struct desc_ptr cpu_gdt_descr[];
35/* the cpu gdt accessor */
36#define get_cpu_gdt_table(x) ((struct desc_struct *)cpu_gdt_descr[x].address)
37 43
38static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func, 44static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
39 unsigned dpl, unsigned ist, unsigned seg) 45 unsigned dpl, unsigned ist, unsigned seg)
@@ -51,16 +57,6 @@ static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
51} 57}
52 58
53#else 59#else
54struct gdt_page {
55 struct desc_struct gdt[GDT_ENTRIES];
56} __attribute__((aligned(PAGE_SIZE)));
57DECLARE_PER_CPU(struct gdt_page, gdt_page);
58
59static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
60{
61 return per_cpu(gdt_page, cpu).gdt;
62}
63
64static inline void pack_gate(gate_desc *gate, unsigned char type, 60static inline void pack_gate(gate_desc *gate, unsigned char type,
65 unsigned long base, unsigned dpl, unsigned flags, 61 unsigned long base, unsigned dpl, unsigned flags,
66 unsigned short seg) 62 unsigned short seg)
@@ -192,8 +188,8 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
192 unsigned cpu = smp_processor_id(); 188 unsigned cpu = smp_processor_id();
193 ldt_desc ldt; 189 ldt_desc ldt;
194 190
195 set_tssldt_descriptor(&ldt, (unsigned long)addr, 191 set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT,
196 DESC_LDT, entries * sizeof(ldt) - 1); 192 entries * LDT_ENTRY_SIZE - 1);
197 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, 193 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT,
198 &ldt, DESC_LDT); 194 &ldt, DESC_LDT);
199 asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8)); 195 asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
@@ -311,6 +307,28 @@ static inline void set_intr_gate(unsigned int n, void *addr)
311 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); 307 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
312} 308}
313 309
310#define SYS_VECTOR_FREE 0
311#define SYS_VECTOR_ALLOCED 1
312
313extern int first_system_vector;
314extern char system_vectors[];
315
316static inline void alloc_system_vector(int vector)
317{
318 if (system_vectors[vector] == SYS_VECTOR_FREE) {
319 system_vectors[vector] = SYS_VECTOR_ALLOCED;
320 if (first_system_vector > vector)
321 first_system_vector = vector;
322 } else
323 BUG();
324}
325
326static inline void alloc_intr_gate(unsigned int n, void *addr)
327{
328 alloc_system_vector(n);
329 set_intr_gate(n, addr);
330}
331
314/* 332/*
315 * This routine sets up an interrupt gate at directory privilege level 3. 333 * This routine sets up an interrupt gate at directory privilege level 3.
316 */ 334 */
diff --git a/include/asm-x86/desc_defs.h b/include/asm-x86/desc_defs.h
index eccb4ea1f918..f7bacf357dac 100644
--- a/include/asm-x86/desc_defs.h
+++ b/include/asm-x86/desc_defs.h
@@ -75,10 +75,14 @@ struct ldttss_desc64 {
75typedef struct gate_struct64 gate_desc; 75typedef struct gate_struct64 gate_desc;
76typedef struct ldttss_desc64 ldt_desc; 76typedef struct ldttss_desc64 ldt_desc;
77typedef struct ldttss_desc64 tss_desc; 77typedef struct ldttss_desc64 tss_desc;
78#define gate_offset(g) ((g).offset_low | ((unsigned long)(g).offset_middle << 16) | ((unsigned long)(g).offset_high << 32))
79#define gate_segment(g) ((g).segment)
78#else 80#else
79typedef struct desc_struct gate_desc; 81typedef struct desc_struct gate_desc;
80typedef struct desc_struct ldt_desc; 82typedef struct desc_struct ldt_desc;
81typedef struct desc_struct tss_desc; 83typedef struct desc_struct tss_desc;
84#define gate_offset(g) (((g).b & 0xffff0000) | ((g).a & 0x0000ffff))
85#define gate_segment(g) ((g).a >> 16)
82#endif 86#endif
83 87
84struct desc_ptr { 88struct desc_ptr {
diff --git a/include/asm-x86/dmi.h b/include/asm-x86/dmi.h
index 4edf7514a750..58a86571fe0f 100644
--- a/include/asm-x86/dmi.h
+++ b/include/asm-x86/dmi.h
@@ -3,12 +3,6 @@
3 3
4#include <asm/io.h> 4#include <asm/io.h>
5 5
6#ifdef CONFIG_X86_32
7
8#define dmi_alloc alloc_bootmem
9
10#else /* CONFIG_X86_32 */
11
12#define DMI_MAX_DATA 2048 6#define DMI_MAX_DATA 2048
13 7
14extern int dmi_alloc_index; 8extern int dmi_alloc_index;
@@ -25,8 +19,6 @@ static inline void *dmi_alloc(unsigned len)
25 return dmi_alloc_data + idx; 19 return dmi_alloc_data + idx;
26} 20}
27 21
28#endif
29
30/* Use early IO mappings for DMI because it's initialized early */ 22/* Use early IO mappings for DMI because it's initialized early */
31#define dmi_ioremap early_ioremap 23#define dmi_ioremap early_ioremap
32#define dmi_iounmap early_iounmap 24#define dmi_iounmap early_iounmap
diff --git a/include/asm-x86/dwarf2.h b/include/asm-x86/dwarf2.h
index b3cbb0ccae18..738bb9fb3e53 100644
--- a/include/asm-x86/dwarf2.h
+++ b/include/asm-x86/dwarf2.h
@@ -1,5 +1,61 @@
1#ifdef CONFIG_X86_32 1#ifndef _DWARF2_H
2# include "dwarf2_32.h" 2#define _DWARF2_H
3
4#ifndef __ASSEMBLY__
5#warning "asm/dwarf2.h should be only included in pure assembly files"
6#endif
7
8/*
9 Macros for dwarf2 CFI unwind table entries.
10 See "as.info" for details on these pseudo ops. Unfortunately
11 they are only supported in very new binutils, so define them
12 away for older version.
13 */
14
15#ifdef CONFIG_AS_CFI
16
17#define CFI_STARTPROC .cfi_startproc
18#define CFI_ENDPROC .cfi_endproc
19#define CFI_DEF_CFA .cfi_def_cfa
20#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
21#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
22#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
23#define CFI_OFFSET .cfi_offset
24#define CFI_REL_OFFSET .cfi_rel_offset
25#define CFI_REGISTER .cfi_register
26#define CFI_RESTORE .cfi_restore
27#define CFI_REMEMBER_STATE .cfi_remember_state
28#define CFI_RESTORE_STATE .cfi_restore_state
29#define CFI_UNDEFINED .cfi_undefined
30
31#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
32#define CFI_SIGNAL_FRAME .cfi_signal_frame
33#else
34#define CFI_SIGNAL_FRAME
35#endif
36
3#else 37#else
4# include "dwarf2_64.h" 38
39/* Due to the structure of pre-exisiting code, don't use assembler line
40 comment character # to ignore the arguments. Instead, use a dummy macro. */
41.macro cfi_ignore a=0, b=0, c=0, d=0
42.endm
43
44#define CFI_STARTPROC cfi_ignore
45#define CFI_ENDPROC cfi_ignore
46#define CFI_DEF_CFA cfi_ignore
47#define CFI_DEF_CFA_REGISTER cfi_ignore
48#define CFI_DEF_CFA_OFFSET cfi_ignore
49#define CFI_ADJUST_CFA_OFFSET cfi_ignore
50#define CFI_OFFSET cfi_ignore
51#define CFI_REL_OFFSET cfi_ignore
52#define CFI_REGISTER cfi_ignore
53#define CFI_RESTORE cfi_ignore
54#define CFI_REMEMBER_STATE cfi_ignore
55#define CFI_RESTORE_STATE cfi_ignore
56#define CFI_UNDEFINED cfi_ignore
57#define CFI_SIGNAL_FRAME cfi_ignore
58
59#endif
60
5#endif 61#endif
diff --git a/include/asm-x86/dwarf2_32.h b/include/asm-x86/dwarf2_32.h
deleted file mode 100644
index 6d66398a307d..000000000000
--- a/include/asm-x86/dwarf2_32.h
+++ /dev/null
@@ -1,61 +0,0 @@
1#ifndef _DWARF2_H
2#define _DWARF2_H
3
4#ifndef __ASSEMBLY__
5#warning "asm/dwarf2.h should be only included in pure assembly files"
6#endif
7
8/*
9 Macros for dwarf2 CFI unwind table entries.
10 See "as.info" for details on these pseudo ops. Unfortunately
11 they are only supported in very new binutils, so define them
12 away for older version.
13 */
14
15#ifdef CONFIG_UNWIND_INFO
16
17#define CFI_STARTPROC .cfi_startproc
18#define CFI_ENDPROC .cfi_endproc
19#define CFI_DEF_CFA .cfi_def_cfa
20#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
21#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
22#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
23#define CFI_OFFSET .cfi_offset
24#define CFI_REL_OFFSET .cfi_rel_offset
25#define CFI_REGISTER .cfi_register
26#define CFI_RESTORE .cfi_restore
27#define CFI_REMEMBER_STATE .cfi_remember_state
28#define CFI_RESTORE_STATE .cfi_restore_state
29#define CFI_UNDEFINED .cfi_undefined
30
31#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
32#define CFI_SIGNAL_FRAME .cfi_signal_frame
33#else
34#define CFI_SIGNAL_FRAME
35#endif
36
37#else
38
39/* Due to the structure of pre-exisiting code, don't use assembler line
40 comment character # to ignore the arguments. Instead, use a dummy macro. */
41.macro ignore a=0, b=0, c=0, d=0
42.endm
43
44#define CFI_STARTPROC ignore
45#define CFI_ENDPROC ignore
46#define CFI_DEF_CFA ignore
47#define CFI_DEF_CFA_REGISTER ignore
48#define CFI_DEF_CFA_OFFSET ignore
49#define CFI_ADJUST_CFA_OFFSET ignore
50#define CFI_OFFSET ignore
51#define CFI_REL_OFFSET ignore
52#define CFI_REGISTER ignore
53#define CFI_RESTORE ignore
54#define CFI_REMEMBER_STATE ignore
55#define CFI_RESTORE_STATE ignore
56#define CFI_UNDEFINED ignore
57#define CFI_SIGNAL_FRAME ignore
58
59#endif
60
61#endif
diff --git a/include/asm-x86/dwarf2_64.h b/include/asm-x86/dwarf2_64.h
deleted file mode 100644
index c950519a264d..000000000000
--- a/include/asm-x86/dwarf2_64.h
+++ /dev/null
@@ -1,56 +0,0 @@
1#ifndef _DWARF2_H
2#define _DWARF2_H 1
3
4#ifndef __ASSEMBLY__
5#warning "asm/dwarf2.h should be only included in pure assembly files"
6#endif
7
8/*
9 Macros for dwarf2 CFI unwind table entries.
10 See "as.info" for details on these pseudo ops. Unfortunately
11 they are only supported in very new binutils, so define them
12 away for older version.
13 */
14
15#ifdef CONFIG_AS_CFI
16
17#define CFI_STARTPROC .cfi_startproc
18#define CFI_ENDPROC .cfi_endproc
19#define CFI_DEF_CFA .cfi_def_cfa
20#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
21#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
22#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
23#define CFI_OFFSET .cfi_offset
24#define CFI_REL_OFFSET .cfi_rel_offset
25#define CFI_REGISTER .cfi_register
26#define CFI_RESTORE .cfi_restore
27#define CFI_REMEMBER_STATE .cfi_remember_state
28#define CFI_RESTORE_STATE .cfi_restore_state
29#define CFI_UNDEFINED .cfi_undefined
30#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
31#define CFI_SIGNAL_FRAME .cfi_signal_frame
32#else
33#define CFI_SIGNAL_FRAME
34#endif
35
36#else
37
38/* use assembler line comment character # to ignore the arguments. */
39#define CFI_STARTPROC #
40#define CFI_ENDPROC #
41#define CFI_DEF_CFA #
42#define CFI_DEF_CFA_REGISTER #
43#define CFI_DEF_CFA_OFFSET #
44#define CFI_ADJUST_CFA_OFFSET #
45#define CFI_OFFSET #
46#define CFI_REL_OFFSET #
47#define CFI_REGISTER #
48#define CFI_RESTORE #
49#define CFI_REMEMBER_STATE #
50#define CFI_RESTORE_STATE #
51#define CFI_UNDEFINED #
52#define CFI_SIGNAL_FRAME #
53
54#endif
55
56#endif
diff --git a/include/asm-x86/e820.h b/include/asm-x86/e820.h
index 7004251fc66b..33e793e991d0 100644
--- a/include/asm-x86/e820.h
+++ b/include/asm-x86/e820.h
@@ -2,6 +2,41 @@
2#define __ASM_E820_H 2#define __ASM_E820_H
3#define E820MAP 0x2d0 /* our map */ 3#define E820MAP 0x2d0 /* our map */
4#define E820MAX 128 /* number of entries in E820MAP */ 4#define E820MAX 128 /* number of entries in E820MAP */
5
6/*
7 * Legacy E820 BIOS limits us to 128 (E820MAX) nodes due to the
8 * constrained space in the zeropage. If we have more nodes than
9 * that, and if we've booted off EFI firmware, then the EFI tables
10 * passed us from the EFI firmware can list more nodes. Size our
11 * internal memory map tables to have room for these additional
12 * nodes, based on up to three entries per node for which the
13 * kernel was built: MAX_NUMNODES == (1 << CONFIG_NODES_SHIFT),
14 * plus E820MAX, allowing space for the possible duplicate E820
15 * entries that might need room in the same arrays, prior to the
16 * call to sanitize_e820_map() to remove duplicates. The allowance
17 * of three memory map entries per node is "enough" entries for
18 * the initial hardware platform motivating this mechanism to make
19 * use of additional EFI map entries. Future platforms may want
20 * to allow more than three entries per node or otherwise refine
21 * this size.
22 */
23
24/*
25 * Odd: 'make headers_check' complains about numa.h if I try
26 * to collapse the next two #ifdef lines to a single line:
27 * #if defined(__KERNEL__) && defined(CONFIG_EFI)
28 */
29#ifdef __KERNEL__
30#ifdef CONFIG_EFI
31#include <linux/numa.h>
32#define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES)
33#else /* ! CONFIG_EFI */
34#define E820_X_MAX E820MAX
35#endif
36#else /* ! __KERNEL__ */
37#define E820_X_MAX E820MAX
38#endif
39
5#define E820NR 0x1e8 /* # entries in E820MAP */ 40#define E820NR 0x1e8 /* # entries in E820MAP */
6 41
7#define E820_RAM 1 42#define E820_RAM 1
@@ -9,6 +44,9 @@
9#define E820_ACPI 3 44#define E820_ACPI 3
10#define E820_NVS 4 45#define E820_NVS 4
11 46
47/* reserved RAM used by kernel itself */
48#define E820_RESERVED_KERN 128
49
12#ifndef __ASSEMBLY__ 50#ifndef __ASSEMBLY__
13struct e820entry { 51struct e820entry {
14 __u64 addr; /* start of memory segment */ 52 __u64 addr; /* start of memory segment */
@@ -18,22 +56,79 @@ struct e820entry {
18 56
19struct e820map { 57struct e820map {
20 __u32 nr_map; 58 __u32 nr_map;
21 struct e820entry map[E820MAX]; 59 struct e820entry map[E820_X_MAX];
22}; 60};
61
62/* see comment in arch/x86/kernel/e820.c */
63extern struct e820map e820;
64extern struct e820map e820_saved;
65
66extern int e820_any_mapped(u64 start, u64 end, unsigned type);
67extern int e820_all_mapped(u64 start, u64 end, unsigned type);
68extern void e820_add_region(u64 start, u64 size, int type);
69extern void e820_print_map(char *who);
70extern int
71sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, int *pnr_map);
72extern u64 e820_update_range(u64 start, u64 size, unsigned old_type,
73 unsigned new_type);
74extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type,
75 int checktype);
76extern void update_e820(void);
77extern void e820_setup_gap(void);
78extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
79 unsigned long start_addr, unsigned long long end_addr);
80struct setup_data;
81extern void parse_e820_ext(struct setup_data *data, unsigned long pa_data);
82
83#if defined(CONFIG_X86_64) || \
84 (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
85extern void e820_mark_nosave_regions(unsigned long limit_pfn);
86#else
87static inline void e820_mark_nosave_regions(unsigned long limit_pfn)
88{
89}
90#endif
91
92extern unsigned long end_user_pfn;
93
94extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align);
95extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align);
96extern void reserve_early(u64 start, u64 end, char *name);
97extern void reserve_early_overlap_ok(u64 start, u64 end, char *name);
98extern void free_early(u64 start, u64 end);
99extern void early_res_to_bootmem(u64 start, u64 end);
100extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
101
102extern unsigned long e820_end_of_ram_pfn(void);
103extern unsigned long e820_end_of_low_ram_pfn(void);
104extern int e820_find_active_region(const struct e820entry *ei,
105 unsigned long start_pfn,
106 unsigned long last_pfn,
107 unsigned long *ei_startpfn,
108 unsigned long *ei_endpfn);
109extern void e820_register_active_regions(int nid, unsigned long start_pfn,
110 unsigned long end_pfn);
111extern u64 e820_hole_size(u64 start, u64 end);
112extern void finish_e820_parsing(void);
113extern void e820_reserve_resources(void);
114extern void setup_memory_map(void);
115extern char *default_machine_specific_memory_setup(void);
116extern char *machine_specific_memory_setup(void);
117extern char *memory_setup(void);
118
23#endif /* __ASSEMBLY__ */ 119#endif /* __ASSEMBLY__ */
24 120
25#define ISA_START_ADDRESS 0xa0000 121#define ISA_START_ADDRESS 0xa0000
26#define ISA_END_ADDRESS 0x100000 122#define ISA_END_ADDRESS 0x100000
123#define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
27 124
28#define BIOS_BEGIN 0x000a0000 125#define BIOS_BEGIN 0x000a0000
29#define BIOS_END 0x00100000 126#define BIOS_END 0x00100000
30 127
31#ifdef __KERNEL__ 128#ifdef __KERNEL__
32#ifdef CONFIG_X86_32 129#include <linux/ioport.h>
33# include "e820_32.h" 130
34#else 131#define HIGH_MEMORY (1024*1024)
35# include "e820_64.h"
36#endif
37#endif /* __KERNEL__ */ 132#endif /* __KERNEL__ */
38 133
39#endif /* __ASM_E820_H */ 134#endif /* __ASM_E820_H */
diff --git a/include/asm-x86/e820_32.h b/include/asm-x86/e820_32.h
deleted file mode 100644
index a9f7c6ec32bf..000000000000
--- a/include/asm-x86/e820_32.h
+++ /dev/null
@@ -1,50 +0,0 @@
1/*
2 * structures and definitions for the int 15, ax=e820 memory map
3 * scheme.
4 *
5 * In a nutshell, arch/i386/boot/setup.S populates a scratch table
6 * in the empty_zero_block that contains a list of usable address/size
7 * duples. In arch/i386/kernel/setup.c, this information is
8 * transferred into the e820map, and in arch/i386/mm/init.c, that
9 * new information is used to mark pages reserved or not.
10 *
11 */
12#ifndef __E820_HEADER
13#define __E820_HEADER
14
15#include <linux/ioport.h>
16
17#define HIGH_MEMORY (1024*1024)
18
19#ifndef __ASSEMBLY__
20
21extern struct e820map e820;
22extern void update_e820(void);
23
24extern int e820_all_mapped(unsigned long start, unsigned long end,
25 unsigned type);
26extern int e820_any_mapped(u64 start, u64 end, unsigned type);
27extern void propagate_e820_map(void);
28extern void register_bootmem_low_pages(unsigned long max_low_pfn);
29extern void add_memory_region(unsigned long long start,
30 unsigned long long size, int type);
31extern void update_memory_range(u64 start, u64 size, unsigned old_type,
32 unsigned new_type);
33extern void e820_register_memory(void);
34extern void limit_regions(unsigned long long size);
35extern void print_memory_map(char *who);
36extern void init_iomem_resources(struct resource *code_resource,
37 struct resource *data_resource,
38 struct resource *bss_resource);
39
40#if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION)
41extern void e820_mark_nosave_regions(void);
42#else
43static inline void e820_mark_nosave_regions(void)
44{
45}
46#endif
47
48
49#endif/*!__ASSEMBLY__*/
50#endif/*__E820_HEADER*/
diff --git a/include/asm-x86/e820_64.h b/include/asm-x86/e820_64.h
deleted file mode 100644
index 71c4d685d30d..000000000000
--- a/include/asm-x86/e820_64.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * structures and definitions for the int 15, ax=e820 memory map
3 * scheme.
4 *
5 * In a nutshell, setup.S populates a scratch table in the
6 * empty_zero_block that contains a list of usable address/size
7 * duples. setup.c, this information is transferred into the e820map,
8 * and in init.c/numa.c, that new information is used to mark pages
9 * reserved or not.
10 */
11#ifndef __E820_HEADER
12#define __E820_HEADER
13
14#include <linux/ioport.h>
15
16#ifndef __ASSEMBLY__
17extern unsigned long find_e820_area(unsigned long start, unsigned long end,
18 unsigned long size, unsigned long align);
19extern unsigned long find_e820_area_size(unsigned long start,
20 unsigned long *sizep,
21 unsigned long align);
22extern void add_memory_region(unsigned long start, unsigned long size,
23 int type);
24extern void update_memory_range(u64 start, u64 size, unsigned old_type,
25 unsigned new_type);
26extern void setup_memory_region(void);
27extern void contig_e820_setup(void);
28extern unsigned long e820_end_of_ram(void);
29extern void e820_reserve_resources(void);
30extern void e820_mark_nosave_regions(void);
31extern int e820_any_mapped(unsigned long start, unsigned long end,
32 unsigned type);
33extern int e820_all_mapped(unsigned long start, unsigned long end,
34 unsigned type);
35extern int e820_any_non_reserved(unsigned long start, unsigned long end);
36extern int is_memory_any_valid(unsigned long start, unsigned long end);
37extern int e820_all_non_reserved(unsigned long start, unsigned long end);
38extern int is_memory_all_valid(unsigned long start, unsigned long end);
39extern unsigned long e820_hole_size(unsigned long start, unsigned long end);
40
41extern void e820_setup_gap(void);
42extern void e820_register_active_regions(int nid, unsigned long start_pfn,
43 unsigned long end_pfn);
44
45extern void finish_e820_parsing(void);
46
47extern struct e820map e820;
48extern void update_e820(void);
49
50extern void reserve_early(unsigned long start, unsigned long end, char *name);
51extern void free_early(unsigned long start, unsigned long end);
52extern void early_res_to_bootmem(unsigned long start, unsigned long end);
53
54#endif/*!__ASSEMBLY__*/
55
56#endif/*__E820_HEADER*/
diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h
index d53004b855cc..7ed2bd7a7f51 100644
--- a/include/asm-x86/efi.h
+++ b/include/asm-x86/efi.h
@@ -90,7 +90,7 @@ extern void *efi_ioremap(unsigned long addr, unsigned long size);
90 90
91#endif /* CONFIG_X86_32 */ 91#endif /* CONFIG_X86_32 */
92 92
93extern void efi_reserve_bootmem(void); 93extern void efi_reserve_early(void);
94extern void efi_call_phys_prelog(void); 94extern void efi_call_phys_prelog(void);
95extern void efi_call_phys_epilog(void); 95extern void efi_call_phys_epilog(void);
96 96
diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h
index 8f232dc5b5fe..7be4733c793e 100644
--- a/include/asm-x86/elf.h
+++ b/include/asm-x86/elf.h
@@ -83,9 +83,9 @@ extern unsigned int vdso_enabled;
83 (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) 83 (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
84 84
85#include <asm/processor.h> 85#include <asm/processor.h>
86#include <asm/system.h>
86 87
87#ifdef CONFIG_X86_32 88#ifdef CONFIG_X86_32
88#include <asm/system.h> /* for savesegment */
89#include <asm/desc.h> 89#include <asm/desc.h>
90 90
91#define elf_check_arch(x) elf_check_arch_ia32(x) 91#define elf_check_arch(x) elf_check_arch_ia32(x)
diff --git a/include/asm-x86/fixmap.h b/include/asm-x86/fixmap.h
index 5bd206973dca..44d4f8217349 100644
--- a/include/asm-x86/fixmap.h
+++ b/include/asm-x86/fixmap.h
@@ -7,7 +7,62 @@
7# include "fixmap_64.h" 7# include "fixmap_64.h"
8#endif 8#endif
9 9
10extern int fixmaps_set;
11
12void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
13void native_set_fixmap(enum fixed_addresses idx,
14 unsigned long phys, pgprot_t flags);
15
16#ifndef CONFIG_PARAVIRT
17static inline void __set_fixmap(enum fixed_addresses idx,
18 unsigned long phys, pgprot_t flags)
19{
20 native_set_fixmap(idx, phys, flags);
21}
22#endif
23
24#define set_fixmap(idx, phys) \
25 __set_fixmap(idx, phys, PAGE_KERNEL)
26
27/*
28 * Some hardware wants to get fixmapped without caching.
29 */
30#define set_fixmap_nocache(idx, phys) \
31 __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
32
10#define clear_fixmap(idx) \ 33#define clear_fixmap(idx) \
11 __set_fixmap(idx, 0, __pgprot(0)) 34 __set_fixmap(idx, 0, __pgprot(0))
12 35
36#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
37#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
38
39extern void __this_fixmap_does_not_exist(void);
40
41/*
42 * 'index to address' translation. If anyone tries to use the idx
43 * directly without translation, we catch the bug with a NULL-deference
44 * kernel oops. Illegal ranges of incoming indices are caught too.
45 */
46static __always_inline unsigned long fix_to_virt(const unsigned int idx)
47{
48 /*
49 * this branch gets completely eliminated after inlining,
50 * except when someone tries to use fixaddr indices in an
51 * illegal way. (such as mixing up address types or using
52 * out-of-range indices).
53 *
54 * If it doesn't get removed, the linker will complain
55 * loudly with a reasonably clear error message..
56 */
57 if (idx >= __end_of_fixed_addresses)
58 __this_fixmap_does_not_exist();
59
60 return __fix_to_virt(idx);
61}
62
63static inline unsigned long virt_to_fix(const unsigned long vaddr)
64{
65 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
66 return __virt_to_fix(vaddr);
67}
13#endif 68#endif
diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h
index 4b96148e90c1..aae2f0501a40 100644
--- a/include/asm-x86/fixmap_32.h
+++ b/include/asm-x86/fixmap_32.h
@@ -79,10 +79,6 @@ enum fixed_addresses {
79 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ 79 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
80 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, 80 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
81#endif 81#endif
82#ifdef CONFIG_ACPI
83 FIX_ACPI_BEGIN,
84 FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
85#endif
86#ifdef CONFIG_PCI_MMCONFIG 82#ifdef CONFIG_PCI_MMCONFIG
87 FIX_PCIE_MCFG, 83 FIX_PCIE_MCFG,
88#endif 84#endif
@@ -103,23 +99,18 @@ enum fixed_addresses {
103 (__end_of_permanent_fixed_addresses & 511), 99 (__end_of_permanent_fixed_addresses & 511),
104 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1, 100 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1,
105 FIX_WP_TEST, 101 FIX_WP_TEST,
102#ifdef CONFIG_ACPI
103 FIX_ACPI_BEGIN,
104 FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
105#endif
106#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT 106#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
107 FIX_OHCI1394_BASE, 107 FIX_OHCI1394_BASE,
108#endif 108#endif
109 __end_of_fixed_addresses 109 __end_of_fixed_addresses
110}; 110};
111 111
112extern void __set_fixmap(enum fixed_addresses idx,
113 unsigned long phys, pgprot_t flags);
114extern void reserve_top_address(unsigned long reserve); 112extern void reserve_top_address(unsigned long reserve);
115 113
116#define set_fixmap(idx, phys) \
117 __set_fixmap(idx, phys, PAGE_KERNEL)
118/*
119 * Some hardware wants to get fixmapped without caching.
120 */
121#define set_fixmap_nocache(idx, phys) \
122 __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
123 114
124#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) 115#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
125 116
@@ -128,38 +119,5 @@ extern void reserve_top_address(unsigned long reserve);
128#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) 119#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
129#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE) 120#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
130 121
131#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
132#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
133
134extern void __this_fixmap_does_not_exist(void);
135
136/*
137 * 'index to address' translation. If anyone tries to use the idx
138 * directly without tranlation, we catch the bug with a NULL-deference
139 * kernel oops. Illegal ranges of incoming indices are caught too.
140 */
141static __always_inline unsigned long fix_to_virt(const unsigned int idx)
142{
143 /*
144 * this branch gets completely eliminated after inlining,
145 * except when someone tries to use fixaddr indices in an
146 * illegal way. (such as mixing up address types or using
147 * out-of-range indices).
148 *
149 * If it doesn't get removed, the linker will complain
150 * loudly with a reasonably clear error message..
151 */
152 if (idx >= __end_of_fixed_addresses)
153 __this_fixmap_does_not_exist();
154
155 return __fix_to_virt(idx);
156}
157
158static inline unsigned long virt_to_fix(const unsigned long vaddr)
159{
160 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
161 return __virt_to_fix(vaddr);
162}
163
164#endif /* !__ASSEMBLY__ */ 122#endif /* !__ASSEMBLY__ */
165#endif 123#endif
diff --git a/include/asm-x86/fixmap_64.h b/include/asm-x86/fixmap_64.h
index 355d26a75a82..00f3d74a0524 100644
--- a/include/asm-x86/fixmap_64.h
+++ b/include/asm-x86/fixmap_64.h
@@ -12,6 +12,7 @@
12#define _ASM_FIXMAP_64_H 12#define _ASM_FIXMAP_64_H
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <asm/acpi.h>
15#include <asm/apicdef.h> 16#include <asm/apicdef.h>
16#include <asm/page.h> 17#include <asm/page.h>
17#include <asm/vsyscall.h> 18#include <asm/vsyscall.h>
@@ -39,30 +40,38 @@ enum fixed_addresses {
39 VSYSCALL_HPET, 40 VSYSCALL_HPET,
40 FIX_DBGP_BASE, 41 FIX_DBGP_BASE,
41 FIX_EARLYCON_MEM_BASE, 42 FIX_EARLYCON_MEM_BASE,
42 FIX_HPET_BASE,
43 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ 43 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
44 FIX_IO_APIC_BASE_0, 44 FIX_IO_APIC_BASE_0,
45 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, 45 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
46 FIX_EFI_IO_MAP_LAST_PAGE, 46 FIX_EFI_IO_MAP_LAST_PAGE,
47 FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE 47 FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE
48 + MAX_EFI_IO_PAGES - 1, 48 + MAX_EFI_IO_PAGES - 1,
49#ifdef CONFIG_PARAVIRT
50 FIX_PARAVIRT_BOOTMAP,
51#endif
52#ifdef CONFIG_ACPI
53 FIX_ACPI_BEGIN,
54 FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
55#endif
49#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT 56#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
50 FIX_OHCI1394_BASE, 57 FIX_OHCI1394_BASE,
51#endif 58#endif
59 __end_of_permanent_fixed_addresses,
60 /*
61 * 256 temporary boot-time mappings, used by early_ioremap(),
62 * before ioremap() is functional.
63 *
64 * We round it up to the next 512 pages boundary so that we
65 * can have a single pgd entry and a single pte table:
66 */
67#define NR_FIX_BTMAPS 64
68#define FIX_BTMAPS_NESTING 4
69 FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 512 -
70 (__end_of_permanent_fixed_addresses & 511),
71 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1,
52 __end_of_fixed_addresses 72 __end_of_fixed_addresses
53}; 73};
54 74
55extern void __set_fixmap(enum fixed_addresses idx,
56 unsigned long phys, pgprot_t flags);
57
58#define set_fixmap(idx, phys) \
59 __set_fixmap(idx, phys, PAGE_KERNEL)
60/*
61 * Some hardware wants to get fixmapped without caching.
62 */
63#define set_fixmap_nocache(idx, phys) \
64 __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
65
66#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE) 75#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
67#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) 76#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
68#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 77#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
@@ -71,30 +80,4 @@ extern void __set_fixmap(enum fixed_addresses idx,
71#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL) 80#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
72#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) 81#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
73 82
74#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
75
76extern void __this_fixmap_does_not_exist(void);
77
78/*
79 * 'index to address' translation. If anyone tries to use the idx
80 * directly without translation, we catch the bug with a NULL-deference
81 * kernel oops. Illegal ranges of incoming indices are caught too.
82 */
83static __always_inline unsigned long fix_to_virt(const unsigned int idx)
84{
85 /*
86 * this branch gets completely eliminated after inlining,
87 * except when someone tries to use fixaddr indices in an
88 * illegal way. (such as mixing up address types or using
89 * out-of-range indices).
90 *
91 * If it doesn't get removed, the linker will complain
92 * loudly with a reasonably clear error message..
93 */
94 if (idx >= __end_of_fixed_addresses)
95 __this_fixmap_does_not_exist();
96
97 return __fix_to_virt(idx);
98}
99
100#endif 83#endif
diff --git a/include/asm-x86/ftrace.h b/include/asm-x86/ftrace.h
new file mode 100644
index 000000000000..c184441133f2
--- /dev/null
+++ b/include/asm-x86/ftrace.h
@@ -0,0 +1,14 @@
1#ifndef _ASM_X86_FTRACE
2#define _ASM_SPARC64_FTRACE
3
4#ifdef CONFIG_FTRACE
5#define MCOUNT_ADDR ((long)(mcount))
6#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
7
8#ifndef __ASSEMBLY__
9extern void mcount(void);
10#endif
11
12#endif /* CONFIG_FTRACE */
13
14#endif /* _ASM_X86_FTRACE */
diff --git a/include/asm-x86/gart.h b/include/asm-x86/gart.h
index 90958ed993fa..33b9aeeb35a2 100644
--- a/include/asm-x86/gart.h
+++ b/include/asm-x86/gart.h
@@ -1,34 +1,72 @@
1#ifndef _ASM_X8664_IOMMU_H 1#ifndef _ASM_X8664_GART_H
2#define _ASM_X8664_IOMMU_H 1 2#define _ASM_X8664_GART_H 1
3 3
4extern void pci_iommu_shutdown(void); 4#include <asm/e820.h>
5extern void no_iommu_init(void); 5#include <asm/iommu.h>
6extern int force_iommu, no_iommu; 6
7extern int iommu_detected; 7extern void set_up_gart_resume(u32, u32);
8#ifdef CONFIG_GART_IOMMU 8
9extern void gart_iommu_init(void);
10extern void gart_iommu_shutdown(void);
11extern void __init gart_parse_options(char *);
12extern void early_gart_iommu_check(void);
13extern void gart_iommu_hole_init(void);
14extern int fallback_aper_order; 9extern int fallback_aper_order;
15extern int fallback_aper_force; 10extern int fallback_aper_force;
16extern int gart_iommu_aperture;
17extern int gart_iommu_aperture_allowed;
18extern int gart_iommu_aperture_disabled;
19extern int fix_aperture; 11extern int fix_aperture;
20#else
21#define gart_iommu_aperture 0
22#define gart_iommu_aperture_allowed 0
23 12
24static inline void early_gart_iommu_check(void) 13/* PTE bits. */
14#define GPTE_VALID 1
15#define GPTE_COHERENT 2
16
17/* Aperture control register bits. */
18#define GARTEN (1<<0)
19#define DISGARTCPU (1<<4)
20#define DISGARTIO (1<<5)
21
22/* GART cache control register bits. */
23#define INVGART (1<<0)
24#define GARTPTEERR (1<<1)
25
26/* K8 On-cpu GART registers */
27#define AMD64_GARTAPERTURECTL 0x90
28#define AMD64_GARTAPERTUREBASE 0x94
29#define AMD64_GARTTABLEBASE 0x98
30#define AMD64_GARTCACHECTL 0x9c
31#define AMD64_GARTEN (1<<0)
32
33static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
25{ 34{
35 u32 tmp, ctl;
36
37 /* address of the mappings table */
38 addr >>= 12;
39 tmp = (u32) addr<<4;
40 tmp &= ~0xf;
41 pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp);
42
43 /* Enable GART translation for this hammer. */
44 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
45 ctl |= GARTEN;
46 ctl &= ~(DISGARTCPU | DISGARTIO);
47 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
26} 48}
27 49
28static inline void gart_iommu_shutdown(void) 50static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
29{ 51{
30} 52 if (!aper_base)
53 return 0;
31 54
32#endif 55 if (aper_base + aper_size > 0x100000000ULL) {
56 printk(KERN_ERR "Aperture beyond 4GB. Ignoring.\n");
57 return 0;
58 }
59 if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
60 printk(KERN_ERR "Aperture pointing to e820 RAM. Ignoring.\n");
61 return 0;
62 }
63 if (aper_size < min_size) {
64 printk(KERN_ERR "Aperture too small (%d MB) than (%d MB)\n",
65 aper_size>>20, min_size>>20);
66 return 0;
67 }
68
69 return 1;
70}
33 71
34#endif 72#endif
diff --git a/include/asm-x86/genapic_64.h b/include/asm-x86/genapic_64.h
index 1de931b263ce..0f8504627c41 100644
--- a/include/asm-x86/genapic_64.h
+++ b/include/asm-x86/genapic_64.h
@@ -44,4 +44,6 @@ DECLARE_PER_CPU(int, x2apic_extra_bits);
44extern void uv_cpu_init(void); 44extern void uv_cpu_init(void);
45extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip); 45extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
46 46
47extern void setup_apic_routing(void);
48
47#endif 49#endif
diff --git a/include/asm-x86/hardirq.h b/include/asm-x86/hardirq.h
index 314434d664e7..000787df66e6 100644
--- a/include/asm-x86/hardirq.h
+++ b/include/asm-x86/hardirq.h
@@ -3,3 +3,9 @@
3#else 3#else
4# include "hardirq_64.h" 4# include "hardirq_64.h"
5#endif 5#endif
6
7extern u64 arch_irq_stat_cpu(unsigned int cpu);
8#define arch_irq_stat_cpu arch_irq_stat_cpu
9
10extern u64 arch_irq_stat(void);
11#define arch_irq_stat arch_irq_stat
diff --git a/include/asm-x86/highmem.h b/include/asm-x86/highmem.h
index e153f3b44774..4514b16cc723 100644
--- a/include/asm-x86/highmem.h
+++ b/include/asm-x86/highmem.h
@@ -74,6 +74,9 @@ struct page *kmap_atomic_to_page(void *ptr);
74 74
75#define flush_cache_kmaps() do { } while (0) 75#define flush_cache_kmaps() do { } while (0)
76 76
77extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
78 unsigned long end_pfn);
79
77#endif /* __KERNEL__ */ 80#endif /* __KERNEL__ */
78 81
79#endif /* _ASM_HIGHMEM_H */ 82#endif /* _ASM_HIGHMEM_H */
diff --git a/include/asm-x86/hpet.h b/include/asm-x86/hpet.h
index 6a9b4ac59bf7..82f1ac641bd7 100644
--- a/include/asm-x86/hpet.h
+++ b/include/asm-x86/hpet.h
@@ -86,8 +86,8 @@ extern void hpet_unregister_irq_handler(rtc_irq_handler handler);
86#else /* CONFIG_HPET_TIMER */ 86#else /* CONFIG_HPET_TIMER */
87 87
88static inline int hpet_enable(void) { return 0; } 88static inline int hpet_enable(void) { return 0; }
89static inline unsigned long hpet_readl(unsigned long a) { return 0; }
90static inline int is_hpet_enabled(void) { return 0; } 89static inline int is_hpet_enabled(void) { return 0; }
90#define hpet_readl(a) 0
91 91
92#endif 92#endif
93#endif /* ASM_X86_HPET_H */ 93#endif /* ASM_X86_HPET_H */
diff --git a/include/asm-x86/hw_irq.h b/include/asm-x86/hw_irq.h
index bf025399d939..77ba51df5668 100644
--- a/include/asm-x86/hw_irq.h
+++ b/include/asm-x86/hw_irq.h
@@ -1,5 +1,107 @@
1#ifndef _ASM_HW_IRQ_H
2#define _ASM_HW_IRQ_H
3
4/*
5 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
6 *
7 * moved some of the old arch/i386/kernel/irq.h to here. VY
8 *
9 * IRQ/IPI changes taken from work by Thomas Radke
10 * <tomsoft@informatik.tu-chemnitz.de>
11 *
12 * hacked by Andi Kleen for x86-64.
13 * unified by tglx
14 */
15
16#include <asm/irq_vectors.h>
17
18#ifndef __ASSEMBLY__
19
20#include <linux/percpu.h>
21#include <linux/profile.h>
22#include <linux/smp.h>
23
24#include <asm/atomic.h>
25#include <asm/irq.h>
26#include <asm/sections.h>
27
28#define platform_legacy_irq(irq) ((irq) < 16)
29
30/* Interrupt handlers registered during init_IRQ */
31extern void apic_timer_interrupt(void);
32extern void error_interrupt(void);
33extern void spurious_interrupt(void);
34extern void thermal_interrupt(void);
35extern void reschedule_interrupt(void);
36
37extern void invalidate_interrupt(void);
38extern void invalidate_interrupt0(void);
39extern void invalidate_interrupt1(void);
40extern void invalidate_interrupt2(void);
41extern void invalidate_interrupt3(void);
42extern void invalidate_interrupt4(void);
43extern void invalidate_interrupt5(void);
44extern void invalidate_interrupt6(void);
45extern void invalidate_interrupt7(void);
46
47extern void irq_move_cleanup_interrupt(void);
48extern void threshold_interrupt(void);
49
50extern void call_function_interrupt(void);
51extern void call_function_single_interrupt(void);
52
53/* PIC specific functions */
54extern void disable_8259A_irq(unsigned int irq);
55extern void enable_8259A_irq(unsigned int irq);
56extern int i8259A_irq_pending(unsigned int irq);
57extern void make_8259A_irq(unsigned int irq);
58extern void init_8259A(int aeoi);
59
60/* IOAPIC */
61#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
62extern unsigned long io_apic_irqs;
63
64extern void init_VISWS_APIC_irqs(void);
65extern void setup_IO_APIC(void);
66extern void disable_IO_APIC(void);
67extern void print_IO_APIC(void);
68extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
69extern void setup_ioapic_dest(void);
70
71#ifdef CONFIG_X86_64
72extern void enable_IO_APIC(void);
73#endif
74
75/* IPI functions */
76extern void send_IPI_self(int vector);
77extern void send_IPI(int dest, int vector);
78
79/* Statistics */
80extern atomic_t irq_err_count;
81extern atomic_t irq_mis_count;
82
83/* EISA */
84extern void eisa_set_level_irq(unsigned int irq);
85
86/* Voyager functions */
87extern asmlinkage void vic_cpi_interrupt(void);
88extern asmlinkage void vic_sys_interrupt(void);
89extern asmlinkage void vic_cmn_interrupt(void);
90extern asmlinkage void qic_timer_interrupt(void);
91extern asmlinkage void qic_invalidate_interrupt(void);
92extern asmlinkage void qic_reschedule_interrupt(void);
93extern asmlinkage void qic_enable_irq_interrupt(void);
94extern asmlinkage void qic_call_function_interrupt(void);
95
1#ifdef CONFIG_X86_32 96#ifdef CONFIG_X86_32
2# include "hw_irq_32.h" 97extern void (*const interrupt[NR_IRQS])(void);
3#else 98#else
4# include "hw_irq_64.h" 99typedef int vector_irq_t[NR_VECTORS];
100DECLARE_PER_CPU(vector_irq_t, vector_irq);
101extern spinlock_t vector_lock;
102#endif
103extern void setup_vector_irq(int cpu);
104
105#endif /* !ASSEMBLY_ */
106
5#endif 107#endif
diff --git a/include/asm-x86/hw_irq_32.h b/include/asm-x86/hw_irq_32.h
deleted file mode 100644
index ea88054e03f3..000000000000
--- a/include/asm-x86/hw_irq_32.h
+++ /dev/null
@@ -1,66 +0,0 @@
1#ifndef _ASM_HW_IRQ_H
2#define _ASM_HW_IRQ_H
3
4/*
5 * linux/include/asm/hw_irq.h
6 *
7 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
8 *
9 * moved some of the old arch/i386/kernel/irq.h to here. VY
10 *
11 * IRQ/IPI changes taken from work by Thomas Radke
12 * <tomsoft@informatik.tu-chemnitz.de>
13 */
14
15#include <linux/profile.h>
16#include <asm/atomic.h>
17#include <asm/irq.h>
18#include <asm/sections.h>
19
20#define NMI_VECTOR 0x02
21
22/*
23 * Various low-level irq details needed by irq.c, process.c,
24 * time.c, io_apic.c and smp.c
25 *
26 * Interrupt entry/exit code at both C and assembly level
27 */
28
29extern void (*const interrupt[NR_IRQS])(void);
30
31#ifdef CONFIG_SMP
32void reschedule_interrupt(void);
33void invalidate_interrupt(void);
34void call_function_interrupt(void);
35#endif
36
37#ifdef CONFIG_X86_LOCAL_APIC
38void apic_timer_interrupt(void);
39void error_interrupt(void);
40void spurious_interrupt(void);
41void thermal_interrupt(void);
42#define platform_legacy_irq(irq) ((irq) < 16)
43#endif
44
45void disable_8259A_irq(unsigned int irq);
46void enable_8259A_irq(unsigned int irq);
47int i8259A_irq_pending(unsigned int irq);
48void make_8259A_irq(unsigned int irq);
49void init_8259A(int aeoi);
50void send_IPI_self(int vector);
51void init_VISWS_APIC_irqs(void);
52void setup_IO_APIC(void);
53void disable_IO_APIC(void);
54void print_IO_APIC(void);
55int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
56void send_IPI(int dest, int vector);
57void setup_ioapic_dest(void);
58
59extern unsigned long io_apic_irqs;
60
61extern atomic_t irq_err_count;
62extern atomic_t irq_mis_count;
63
64#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
65
66#endif /* _ASM_HW_IRQ_H */
diff --git a/include/asm-x86/hw_irq_64.h b/include/asm-x86/hw_irq_64.h
deleted file mode 100644
index 0062ef390f67..000000000000
--- a/include/asm-x86/hw_irq_64.h
+++ /dev/null
@@ -1,173 +0,0 @@
1#ifndef _ASM_HW_IRQ_H
2#define _ASM_HW_IRQ_H
3
4/*
5 * linux/include/asm/hw_irq.h
6 *
7 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
8 *
9 * moved some of the old arch/i386/kernel/irq.h to here. VY
10 *
11 * IRQ/IPI changes taken from work by Thomas Radke
12 * <tomsoft@informatik.tu-chemnitz.de>
13 *
14 * hacked by Andi Kleen for x86-64.
15 */
16
17#ifndef __ASSEMBLY__
18#include <asm/atomic.h>
19#include <asm/irq.h>
20#include <linux/profile.h>
21#include <linux/smp.h>
22#include <linux/percpu.h>
23#endif
24
25#define NMI_VECTOR 0x02
26/*
27 * IDT vectors usable for external interrupt sources start
28 * at 0x20:
29 */
30#define FIRST_EXTERNAL_VECTOR 0x20
31
32#define IA32_SYSCALL_VECTOR 0x80
33
34
35/* Reserve the lowest usable priority level 0x20 - 0x2f for triggering
36 * cleanup after irq migration.
37 */
38#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
39
40/*
41 * Vectors 0x30-0x3f are used for ISA interrupts.
42 */
43#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
44#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
45#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
46#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
47#define IRQ4_VECTOR (IRQ0_VECTOR + 4)
48#define IRQ5_VECTOR (IRQ0_VECTOR + 5)
49#define IRQ6_VECTOR (IRQ0_VECTOR + 6)
50#define IRQ7_VECTOR (IRQ0_VECTOR + 7)
51#define IRQ8_VECTOR (IRQ0_VECTOR + 8)
52#define IRQ9_VECTOR (IRQ0_VECTOR + 9)
53#define IRQ10_VECTOR (IRQ0_VECTOR + 10)
54#define IRQ11_VECTOR (IRQ0_VECTOR + 11)
55#define IRQ12_VECTOR (IRQ0_VECTOR + 12)
56#define IRQ13_VECTOR (IRQ0_VECTOR + 13)
57#define IRQ14_VECTOR (IRQ0_VECTOR + 14)
58#define IRQ15_VECTOR (IRQ0_VECTOR + 15)
59
60/*
61 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
62 *
63 * some of the following vectors are 'rare', they are merged
64 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
65 * TLB, reschedule and local APIC vectors are performance-critical.
66 */
67#define SPURIOUS_APIC_VECTOR 0xff
68#define ERROR_APIC_VECTOR 0xfe
69#define RESCHEDULE_VECTOR 0xfd
70#define CALL_FUNCTION_VECTOR 0xfc
71/* fb free - please don't readd KDB here because it's useless
72 (hint - think what a NMI bit does to a vector) */
73#define THERMAL_APIC_VECTOR 0xfa
74#define THRESHOLD_APIC_VECTOR 0xf9
75/* f8 free */
76#define INVALIDATE_TLB_VECTOR_END 0xf7
77#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
78
79#define NUM_INVALIDATE_TLB_VECTORS 8
80
81/*
82 * Local APIC timer IRQ vector is on a different priority level,
83 * to work around the 'lost local interrupt if more than 2 IRQ
84 * sources per level' errata.
85 */
86#define LOCAL_TIMER_VECTOR 0xef
87
88/*
89 * First APIC vector available to drivers: (vectors 0x30-0xee)
90 * we start at 0x41 to spread out vectors evenly between priority
91 * levels. (0x80 is the syscall vector)
92 */
93#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
94#define FIRST_SYSTEM_VECTOR 0xef /* duplicated in irq.h */
95
96
97#ifndef __ASSEMBLY__
98
99/* Interrupt handlers registered during init_IRQ */
100void apic_timer_interrupt(void);
101void spurious_interrupt(void);
102void error_interrupt(void);
103void reschedule_interrupt(void);
104void call_function_interrupt(void);
105void irq_move_cleanup_interrupt(void);
106void invalidate_interrupt0(void);
107void invalidate_interrupt1(void);
108void invalidate_interrupt2(void);
109void invalidate_interrupt3(void);
110void invalidate_interrupt4(void);
111void invalidate_interrupt5(void);
112void invalidate_interrupt6(void);
113void invalidate_interrupt7(void);
114void thermal_interrupt(void);
115void threshold_interrupt(void);
116void i8254_timer_resume(void);
117
118typedef int vector_irq_t[NR_VECTORS];
119DECLARE_PER_CPU(vector_irq_t, vector_irq);
120extern void __setup_vector_irq(int cpu);
121extern spinlock_t vector_lock;
122
123/*
124 * Various low-level irq details needed by irq.c, process.c,
125 * time.c, io_apic.c and smp.c
126 *
127 * Interrupt entry/exit code at both C and assembly level
128 */
129
130extern void disable_8259A_irq(unsigned int irq);
131extern void enable_8259A_irq(unsigned int irq);
132extern int i8259A_irq_pending(unsigned int irq);
133extern void make_8259A_irq(unsigned int irq);
134extern void init_8259A(int aeoi);
135extern void send_IPI_self(int vector);
136extern void init_VISWS_APIC_irqs(void);
137extern void setup_IO_APIC(void);
138extern void enable_IO_APIC(void);
139extern void disable_IO_APIC(void);
140extern void print_IO_APIC(void);
141extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
142extern void send_IPI(int dest, int vector);
143extern void setup_ioapic_dest(void);
144extern void native_init_IRQ(void);
145
146extern unsigned long io_apic_irqs;
147
148extern atomic_t irq_err_count;
149extern atomic_t irq_mis_count;
150
151#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
152
153#include <asm/ptrace.h>
154
155#define IRQ_NAME2(nr) nr##_interrupt(void)
156#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
157
158/*
159 * SMP has a few special interrupts for IPI messages
160 */
161
162#define BUILD_IRQ(nr) \
163 asmlinkage void IRQ_NAME(nr); \
164 asm("\n.p2align\n" \
165 "IRQ" #nr "_interrupt:\n\t" \
166 "push $~(" #nr ") ; " \
167 "jmp common_interrupt");
168
169#define platform_legacy_irq(irq) ((irq) < 16)
170
171#endif
172
173#endif /* _ASM_HW_IRQ_H */
diff --git a/include/asm-x86/i8259.h b/include/asm-x86/i8259.h
index 45d4df3e51e6..2f98df91f1f2 100644
--- a/include/asm-x86/i8259.h
+++ b/include/asm-x86/i8259.h
@@ -55,4 +55,6 @@ static inline void outb_pic(unsigned char value, unsigned int port)
55 udelay(2); 55 udelay(2);
56} 56}
57 57
58extern struct irq_chip i8259A_chip;
59
58#endif /* __ASM_I8259_H__ */ 60#endif /* __ASM_I8259_H__ */
diff --git a/include/asm-x86/io.h b/include/asm-x86/io.h
index d5b11f60dbd0..bf5d629b3a39 100644
--- a/include/asm-x86/io.h
+++ b/include/asm-x86/io.h
@@ -3,6 +3,76 @@
3 3
4#define ARCH_HAS_IOREMAP_WC 4#define ARCH_HAS_IOREMAP_WC
5 5
6#include <linux/compiler.h>
7
8/*
9 * early_ioremap() and early_iounmap() are for temporary early boot-time
10 * mappings, before the real ioremap() is functional.
11 * A boot-time mapping is currently limited to at most 16 pages.
12 */
13#ifndef __ASSEMBLY__
14extern void early_ioremap_init(void);
15extern void early_ioremap_clear(void);
16extern void early_ioremap_reset(void);
17extern void *early_ioremap(unsigned long offset, unsigned long size);
18extern void early_iounmap(void *addr, unsigned long size);
19extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
20#endif
21
22#define build_mmio_read(name, size, type, reg, barrier) \
23static inline type name(const volatile void __iomem *addr) \
24{ type ret; asm volatile("mov" size " %1,%0":"=" reg (ret) \
25:"m" (*(volatile type __force *)addr) barrier); return ret; }
26
27#define build_mmio_write(name, size, type, reg, barrier) \
28static inline void name(type val, volatile void __iomem *addr) \
29{ asm volatile("mov" size " %0,%1": :reg (val), \
30"m" (*(volatile type __force *)addr) barrier); }
31
32build_mmio_read(readb, "b", unsigned char, "q", :"memory")
33build_mmio_read(readw, "w", unsigned short, "r", :"memory")
34build_mmio_read(readl, "l", unsigned int, "r", :"memory")
35
36build_mmio_read(__readb, "b", unsigned char, "q", )
37build_mmio_read(__readw, "w", unsigned short, "r", )
38build_mmio_read(__readl, "l", unsigned int, "r", )
39
40build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
41build_mmio_write(writew, "w", unsigned short, "r", :"memory")
42build_mmio_write(writel, "l", unsigned int, "r", :"memory")
43
44build_mmio_write(__writeb, "b", unsigned char, "q", )
45build_mmio_write(__writew, "w", unsigned short, "r", )
46build_mmio_write(__writel, "l", unsigned int, "r", )
47
48#define readb_relaxed(a) __readb(a)
49#define readw_relaxed(a) __readw(a)
50#define readl_relaxed(a) __readl(a)
51#define __raw_readb __readb
52#define __raw_readw __readw
53#define __raw_readl __readl
54
55#define __raw_writeb __writeb
56#define __raw_writew __writew
57#define __raw_writel __writel
58
59#define mmiowb() barrier()
60
61#ifdef CONFIG_X86_64
62build_mmio_read(readq, "q", unsigned long, "r", :"memory")
63build_mmio_read(__readq, "q", unsigned long, "r", )
64build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
65build_mmio_write(__writeq, "q", unsigned long, "r", )
66
67#define readq_relaxed(a) __readq(a)
68#define __raw_readq __readq
69#define __raw_writeq writeq
70
71/* Let people know we have them */
72#define readq readq
73#define writeq writeq
74#endif
75
6#ifdef CONFIG_X86_32 76#ifdef CONFIG_X86_32
7# include "io_32.h" 77# include "io_32.h"
8#else 78#else
@@ -16,4 +86,17 @@ extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
16 unsigned long prot_val); 86 unsigned long prot_val);
17extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); 87extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size);
18 88
89/*
90 * early_ioremap() and early_iounmap() are for temporary early boot-time
91 * mappings, before the real ioremap() is functional.
92 * A boot-time mapping is currently limited to at most 16 pages.
93 */
94extern void early_ioremap_init(void);
95extern void early_ioremap_clear(void);
96extern void early_ioremap_reset(void);
97extern void *early_ioremap(unsigned long offset, unsigned long size);
98extern void early_iounmap(void *addr, unsigned long size);
99extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
100
101
19#endif /* _ASM_X86_IO_H */ 102#endif /* _ASM_X86_IO_H */
diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h
index 049e81e797a0..4df44ed54077 100644
--- a/include/asm-x86/io_32.h
+++ b/include/asm-x86/io_32.h
@@ -122,18 +122,6 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
122extern void iounmap(volatile void __iomem *addr); 122extern void iounmap(volatile void __iomem *addr);
123 123
124/* 124/*
125 * early_ioremap() and early_iounmap() are for temporary early boot-time
126 * mappings, before the real ioremap() is functional.
127 * A boot-time mapping is currently limited to at most 16 pages.
128 */
129extern void early_ioremap_init(void);
130extern void early_ioremap_clear(void);
131extern void early_ioremap_reset(void);
132extern void *early_ioremap(unsigned long offset, unsigned long size);
133extern void early_iounmap(void *addr, unsigned long size);
134extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
135
136/*
137 * ISA I/O bus memory addresses are 1:1 with the physical address. 125 * ISA I/O bus memory addresses are 1:1 with the physical address.
138 */ 126 */
139#define isa_virt_to_bus virt_to_phys 127#define isa_virt_to_bus virt_to_phys
@@ -149,55 +137,6 @@ extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
149#define virt_to_bus virt_to_phys 137#define virt_to_bus virt_to_phys
150#define bus_to_virt phys_to_virt 138#define bus_to_virt phys_to_virt
151 139
152/*
153 * readX/writeX() are used to access memory mapped devices. On some
154 * architectures the memory mapped IO stuff needs to be accessed
155 * differently. On the x86 architecture, we just read/write the
156 * memory location directly.
157 */
158
159static inline unsigned char readb(const volatile void __iomem *addr)
160{
161 return *(volatile unsigned char __force *)addr;
162}
163
164static inline unsigned short readw(const volatile void __iomem *addr)
165{
166 return *(volatile unsigned short __force *)addr;
167}
168
169static inline unsigned int readl(const volatile void __iomem *addr)
170{
171 return *(volatile unsigned int __force *) addr;
172}
173
174#define readb_relaxed(addr) readb(addr)
175#define readw_relaxed(addr) readw(addr)
176#define readl_relaxed(addr) readl(addr)
177#define __raw_readb readb
178#define __raw_readw readw
179#define __raw_readl readl
180
181static inline void writeb(unsigned char b, volatile void __iomem *addr)
182{
183 *(volatile unsigned char __force *)addr = b;
184}
185
186static inline void writew(unsigned short b, volatile void __iomem *addr)
187{
188 *(volatile unsigned short __force *)addr = b;
189}
190
191static inline void writel(unsigned int b, volatile void __iomem *addr)
192{
193 *(volatile unsigned int __force *)addr = b;
194}
195#define __raw_writeb writeb
196#define __raw_writew writew
197#define __raw_writel writel
198
199#define mmiowb()
200
201static inline void 140static inline void
202memset_io(volatile void __iomem *addr, unsigned char val, int count) 141memset_io(volatile void __iomem *addr, unsigned char val, int count)
203{ 142{
diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h
index 0930bedf9e4d..ddd8058a5026 100644
--- a/include/asm-x86/io_64.h
+++ b/include/asm-x86/io_64.h
@@ -204,77 +204,6 @@ extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
204#define virt_to_bus virt_to_phys 204#define virt_to_bus virt_to_phys
205#define bus_to_virt phys_to_virt 205#define bus_to_virt phys_to_virt
206 206
207/*
208 * readX/writeX() are used to access memory mapped devices. On some
209 * architectures the memory mapped IO stuff needs to be accessed
210 * differently. On the x86 architecture, we just read/write the
211 * memory location directly.
212 */
213
214static inline __u8 __readb(const volatile void __iomem *addr)
215{
216 return *(__force volatile __u8 *)addr;
217}
218
219static inline __u16 __readw(const volatile void __iomem *addr)
220{
221 return *(__force volatile __u16 *)addr;
222}
223
224static __always_inline __u32 __readl(const volatile void __iomem *addr)
225{
226 return *(__force volatile __u32 *)addr;
227}
228
229static inline __u64 __readq(const volatile void __iomem *addr)
230{
231 return *(__force volatile __u64 *)addr;
232}
233
234#define readb(x) __readb(x)
235#define readw(x) __readw(x)
236#define readl(x) __readl(x)
237#define readq(x) __readq(x)
238#define readb_relaxed(a) readb(a)
239#define readw_relaxed(a) readw(a)
240#define readl_relaxed(a) readl(a)
241#define readq_relaxed(a) readq(a)
242#define __raw_readb readb
243#define __raw_readw readw
244#define __raw_readl readl
245#define __raw_readq readq
246
247#define mmiowb()
248
249static inline void __writel(__u32 b, volatile void __iomem *addr)
250{
251 *(__force volatile __u32 *)addr = b;
252}
253
254static inline void __writeq(__u64 b, volatile void __iomem *addr)
255{
256 *(__force volatile __u64 *)addr = b;
257}
258
259static inline void __writeb(__u8 b, volatile void __iomem *addr)
260{
261 *(__force volatile __u8 *)addr = b;
262}
263
264static inline void __writew(__u16 b, volatile void __iomem *addr)
265{
266 *(__force volatile __u16 *)addr = b;
267}
268
269#define writeq(val, addr) __writeq((val), (addr))
270#define writel(val, addr) __writel((val), (addr))
271#define writew(val, addr) __writew((val), (addr))
272#define writeb(val, addr) __writeb((val), (addr))
273#define __raw_writeb writeb
274#define __raw_writew writew
275#define __raw_writel writel
276#define __raw_writeq writeq
277
278void __memcpy_fromio(void *, unsigned long, unsigned); 207void __memcpy_fromio(void *, unsigned long, unsigned);
279void __memcpy_toio(unsigned long, const void *, unsigned); 208void __memcpy_toio(unsigned long, const void *, unsigned);
280 209
diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h
index d593e14f0341..14f82bbcb5fd 100644
--- a/include/asm-x86/io_apic.h
+++ b/include/asm-x86/io_apic.h
@@ -11,6 +11,15 @@
11 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar 11 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar
12 */ 12 */
13 13
14/* I/O Unit Redirection Table */
15#define IO_APIC_REDIR_VECTOR_MASK 0x000FF
16#define IO_APIC_REDIR_DEST_LOGICAL 0x00800
17#define IO_APIC_REDIR_DEST_PHYSICAL 0x00000
18#define IO_APIC_REDIR_SEND_PENDING (1 << 12)
19#define IO_APIC_REDIR_REMOTE_IRR (1 << 14)
20#define IO_APIC_REDIR_LEVEL_TRIGGER (1 << 15)
21#define IO_APIC_REDIR_MASKED (1 << 16)
22
14/* 23/*
15 * The structure of the IO-APIC: 24 * The structure of the IO-APIC:
16 */ 25 */
@@ -112,21 +121,32 @@ extern int nr_ioapic_registers[MAX_IO_APICS];
112 121
113#define MP_MAX_IOAPIC_PIN 127 122#define MP_MAX_IOAPIC_PIN 127
114 123
115struct mp_ioapic_routing { 124struct mp_config_ioapic {
116 int apic_id; 125 unsigned long mp_apicaddr;
117 int gsi_base; 126 unsigned int mp_apicid;
118 int gsi_end; 127 unsigned char mp_type;
119 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); 128 unsigned char mp_apicver;
129 unsigned char mp_flags;
130};
131
132struct mp_config_intsrc {
133 unsigned int mp_dstapic;
134 unsigned char mp_type;
135 unsigned char mp_irqtype;
136 unsigned short mp_irqflag;
137 unsigned char mp_srcbus;
138 unsigned char mp_srcbusirq;
139 unsigned char mp_dstirq;
120}; 140};
121 141
122/* I/O APIC entries */ 142/* I/O APIC entries */
123extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; 143extern struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
124 144
125/* # of MP IRQ source entries */ 145/* # of MP IRQ source entries */
126extern int mp_irq_entries; 146extern int mp_irq_entries;
127 147
128/* MP IRQ source entries */ 148/* MP IRQ source entries */
129extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; 149extern struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
130 150
131/* non-0 if default (table-less) MP configuration */ 151/* non-0 if default (table-less) MP configuration */
132extern int mpc_default_type; 152extern int mpc_default_type;
@@ -137,6 +157,9 @@ extern int sis_apic_bug;
137/* 1 if "noapic" boot option passed */ 157/* 1 if "noapic" boot option passed */
138extern int skip_ioapic_setup; 158extern int skip_ioapic_setup;
139 159
160/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
161extern int timer_through_8259;
162
140static inline void disable_ioapic_setup(void) 163static inline void disable_ioapic_setup(void)
141{ 164{
142 skip_ioapic_setup = 1; 165 skip_ioapic_setup = 1;
@@ -162,6 +185,8 @@ extern void ioapic_init_mappings(void);
162 185
163#else /* !CONFIG_X86_IO_APIC */ 186#else /* !CONFIG_X86_IO_APIC */
164#define io_apic_assign_pci_irqs 0 187#define io_apic_assign_pci_irqs 0
188static const int timer_through_8259 = 0;
189static inline void ioapic_init_mappings(void) { }
165#endif 190#endif
166 191
167#endif 192#endif
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h
index 07862fdd23c0..068c9a40aa5b 100644
--- a/include/asm-x86/iommu.h
+++ b/include/asm-x86/iommu.h
@@ -1,29 +1,34 @@
1#ifndef _ASM_X8664_GART_H 1#ifndef _ASM_X8664_IOMMU_H
2#define _ASM_X8664_GART_H 1 2#define _ASM_X8664_IOMMU_H 1
3 3
4extern void pci_iommu_shutdown(void); 4extern void pci_iommu_shutdown(void);
5extern void no_iommu_init(void); 5extern void no_iommu_init(void);
6extern int force_iommu, no_iommu; 6extern int force_iommu, no_iommu;
7extern int iommu_detected; 7extern int iommu_detected;
8#ifdef CONFIG_IOMMU 8
9#ifdef CONFIG_GART_IOMMU
10extern int gart_iommu_aperture;
11extern int gart_iommu_aperture_allowed;
12extern int gart_iommu_aperture_disabled;
13
14extern void early_gart_iommu_check(void);
9extern void gart_iommu_init(void); 15extern void gart_iommu_init(void);
10extern void gart_iommu_shutdown(void); 16extern void gart_iommu_shutdown(void);
11extern void __init gart_parse_options(char *); 17extern void __init gart_parse_options(char *);
12extern void iommu_hole_init(void); 18extern void gart_iommu_hole_init(void);
13extern int fallback_aper_order; 19
14extern int fallback_aper_force;
15extern int iommu_aperture;
16extern int iommu_aperture_allowed;
17extern int iommu_aperture_disabled;
18extern int fix_aperture;
19#else 20#else
20#define iommu_aperture 0 21#define gart_iommu_aperture 0
21#define iommu_aperture_allowed 0 22#define gart_iommu_aperture_allowed 0
23#define gart_iommu_aperture_disabled 1
22 24
23static inline void gart_iommu_shutdown(void) 25static inline void early_gart_iommu_check(void)
24{ 26{
25} 27}
26 28
29static inline void gart_iommu_shutdown(void)
30{
31}
27#endif 32#endif
28 33
29#endif 34#endif
diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h
index 5f7310aa3efd..bb1c09f7a76c 100644
--- a/include/asm-x86/ipi.h
+++ b/include/asm-x86/ipi.h
@@ -20,6 +20,7 @@
20 20
21#include <asm/hw_irq.h> 21#include <asm/hw_irq.h>
22#include <asm/apic.h> 22#include <asm/apic.h>
23#include <asm/smp.h>
23 24
24/* 25/*
25 * the following functions deal with sending IPIs between CPUs. 26 * the following functions deal with sending IPIs between CPUs.
diff --git a/include/asm-x86/irq.h b/include/asm-x86/irq.h
index 7ba905465a53..1a2925757317 100644
--- a/include/asm-x86/irq.h
+++ b/include/asm-x86/irq.h
@@ -1,5 +1,50 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_IRQ_H
2# include "irq_32.h" 2#define _ASM_IRQ_H
3/*
4 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
5 *
6 * IRQ/IPI changes taken from work by Thomas Radke
7 * <tomsoft@informatik.tu-chemnitz.de>
8 */
9
10#include <asm/apicdef.h>
11#include <asm/irq_vectors.h>
12
13static inline int irq_canonicalize(int irq)
14{
15 return ((irq == 2) ? 9 : irq);
16}
17
18#ifdef CONFIG_X86_LOCAL_APIC
19# define ARCH_HAS_NMI_WATCHDOG
20#endif
21
22#ifdef CONFIG_4KSTACKS
23 extern void irq_ctx_init(int cpu);
24 extern void irq_ctx_exit(int cpu);
25# define __ARCH_HAS_DO_SOFTIRQ
3#else 26#else
4# include "irq_64.h" 27# define irq_ctx_init(cpu) do { } while (0)
28# define irq_ctx_exit(cpu) do { } while (0)
29# ifdef CONFIG_X86_64
30# define __ARCH_HAS_DO_SOFTIRQ
31# endif
32#endif
33
34#ifdef CONFIG_IRQBALANCE
35extern int irqbalance_disable(char *str);
36#endif
37
38#ifdef CONFIG_HOTPLUG_CPU
39#include <linux/cpumask.h>
40extern void fixup_irqs(cpumask_t map);
5#endif 41#endif
42
43extern unsigned int do_IRQ(struct pt_regs *regs);
44extern void init_IRQ(void);
45extern void native_init_IRQ(void);
46
47/* Interrupt vector management */
48extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
49
50#endif /* _ASM_IRQ_H */
diff --git a/include/asm-x86/irq_32.h b/include/asm-x86/irq_32.h
deleted file mode 100644
index 0b79f3185243..000000000000
--- a/include/asm-x86/irq_32.h
+++ /dev/null
@@ -1,51 +0,0 @@
1#ifndef _ASM_IRQ_H
2#define _ASM_IRQ_H
3
4/*
5 * linux/include/asm/irq.h
6 *
7 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
8 *
9 * IRQ/IPI changes taken from work by Thomas Radke
10 * <tomsoft@informatik.tu-chemnitz.de>
11 */
12
13#include <linux/sched.h>
14/* include comes from machine specific directory */
15#include "irq_vectors.h"
16#include <asm/thread_info.h>
17
18static inline int irq_canonicalize(int irq)
19{
20 return ((irq == 2) ? 9 : irq);
21}
22
23#ifdef CONFIG_X86_LOCAL_APIC
24# define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
25#endif
26
27#ifdef CONFIG_4KSTACKS
28 extern void irq_ctx_init(int cpu);
29 extern void irq_ctx_exit(int cpu);
30# define __ARCH_HAS_DO_SOFTIRQ
31#else
32# define irq_ctx_init(cpu) do { } while (0)
33# define irq_ctx_exit(cpu) do { } while (0)
34#endif
35
36#ifdef CONFIG_IRQBALANCE
37extern int irqbalance_disable(char *str);
38#endif
39
40#ifdef CONFIG_HOTPLUG_CPU
41extern void fixup_irqs(cpumask_t map);
42#endif
43
44unsigned int do_IRQ(struct pt_regs *regs);
45void init_IRQ(void);
46void __init native_init_IRQ(void);
47
48/* Interrupt vector management */
49extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
50
51#endif /* _ASM_IRQ_H */
diff --git a/include/asm-x86/irq_64.h b/include/asm-x86/irq_64.h
deleted file mode 100644
index 083d35a62c94..000000000000
--- a/include/asm-x86/irq_64.h
+++ /dev/null
@@ -1,51 +0,0 @@
1#ifndef _ASM_IRQ_H
2#define _ASM_IRQ_H
3
4/*
5 * linux/include/asm/irq.h
6 *
7 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
8 *
9 * IRQ/IPI changes taken from work by Thomas Radke
10 * <tomsoft@informatik.tu-chemnitz.de>
11 */
12
13#define TIMER_IRQ 0
14
15/*
16 * 16 8259A IRQ's, 208 potential APIC interrupt sources.
17 * Right now the APIC is mostly only used for SMP.
18 * 256 vectors is an architectural limit. (we can have
19 * more than 256 devices theoretically, but they will
20 * have to use shared interrupts)
21 * Since vectors 0x00-0x1f are used/reserved for the CPU,
22 * the usable vector space is 0x20-0xff (224 vectors)
23 */
24
25/*
26 * The maximum number of vectors supported by x86_64 processors
27 * is limited to 256. For processors other than x86_64, NR_VECTORS
28 * should be changed accordingly.
29 */
30#define NR_VECTORS 256
31
32#define FIRST_SYSTEM_VECTOR 0xef /* duplicated in hw_irq.h */
33
34#define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
35#define NR_IRQ_VECTORS NR_IRQS
36
37static inline int irq_canonicalize(int irq)
38{
39 return ((irq == 2) ? 9 : irq);
40}
41
42#define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
43
44#ifdef CONFIG_HOTPLUG_CPU
45#include <linux/cpumask.h>
46extern void fixup_irqs(cpumask_t map);
47#endif
48
49#define __ARCH_HAS_DO_SOFTIRQ 1
50
51#endif /* _ASM_IRQ_H */
diff --git a/include/asm-x86/irq_vectors.h b/include/asm-x86/irq_vectors.h
new file mode 100644
index 000000000000..90b1d1f12f08
--- /dev/null
+++ b/include/asm-x86/irq_vectors.h
@@ -0,0 +1,173 @@
1#ifndef _ASM_IRQ_VECTORS_H
2#define _ASM_IRQ_VECTORS_H
3
4#include <linux/threads.h>
5
6#define NMI_VECTOR 0x02
7
8/*
9 * IDT vectors usable for external interrupt sources start
10 * at 0x20:
11 */
12#define FIRST_EXTERNAL_VECTOR 0x20
13
14#ifdef CONFIG_X86_32
15# define SYSCALL_VECTOR 0x80
16#else
17# define IA32_SYSCALL_VECTOR 0x80
18#endif
19
20/*
21 * Reserve the lowest usable priority level 0x20 - 0x2f for triggering
22 * cleanup after irq migration on 64 bit.
23 */
24#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
25
26/*
27 * Vectors 0x20-0x2f are used for ISA interrupts on 32 bit.
28 * Vectors 0x30-0x3f are used for ISA interrupts on 64 bit.
29 */
30#ifdef CONFIG_X86_32
31#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR)
32#else
33#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
34#endif
35#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
36#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
37#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
38#define IRQ4_VECTOR (IRQ0_VECTOR + 4)
39#define IRQ5_VECTOR (IRQ0_VECTOR + 5)
40#define IRQ6_VECTOR (IRQ0_VECTOR + 6)
41#define IRQ7_VECTOR (IRQ0_VECTOR + 7)
42#define IRQ8_VECTOR (IRQ0_VECTOR + 8)
43#define IRQ9_VECTOR (IRQ0_VECTOR + 9)
44#define IRQ10_VECTOR (IRQ0_VECTOR + 10)
45#define IRQ11_VECTOR (IRQ0_VECTOR + 11)
46#define IRQ12_VECTOR (IRQ0_VECTOR + 12)
47#define IRQ13_VECTOR (IRQ0_VECTOR + 13)
48#define IRQ14_VECTOR (IRQ0_VECTOR + 14)
49#define IRQ15_VECTOR (IRQ0_VECTOR + 15)
50
51/*
52 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
53 *
54 * some of the following vectors are 'rare', they are merged
55 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
56 * TLB, reschedule and local APIC vectors are performance-critical.
57 *
58 * Vectors 0xf0-0xfa are free (reserved for future Linux use).
59 */
60#ifdef CONFIG_X86_32
61
62# define SPURIOUS_APIC_VECTOR 0xff
63# define ERROR_APIC_VECTOR 0xfe
64# define INVALIDATE_TLB_VECTOR 0xfd
65# define RESCHEDULE_VECTOR 0xfc
66# define CALL_FUNCTION_VECTOR 0xfb
67# define CALL_FUNCTION_SINGLE_VECTOR 0xfa
68# define THERMAL_APIC_VECTOR 0xf0
69
70#else
71
72#define SPURIOUS_APIC_VECTOR 0xff
73#define ERROR_APIC_VECTOR 0xfe
74#define RESCHEDULE_VECTOR 0xfd
75#define CALL_FUNCTION_VECTOR 0xfc
76#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
77#define THERMAL_APIC_VECTOR 0xfa
78#define THRESHOLD_APIC_VECTOR 0xf9
79#define INVALIDATE_TLB_VECTOR_END 0xf7
80#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
81
82#define NUM_INVALIDATE_TLB_VECTORS 8
83
84#endif
85
86/*
87 * Local APIC timer IRQ vector is on a different priority level,
88 * to work around the 'lost local interrupt if more than 2 IRQ
89 * sources per level' errata.
90 */
91#define LOCAL_TIMER_VECTOR 0xef
92
93/*
94 * First APIC vector available to drivers: (vectors 0x30-0xee) we
95 * start at 0x31(0x41) to spread out vectors evenly between priority
96 * levels. (0x80 is the syscall vector)
97 */
98#ifdef CONFIG_X86_32
99# define FIRST_DEVICE_VECTOR 0x31
100#else
101# define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
102#endif
103
104#define NR_VECTORS 256
105
106#define FPU_IRQ 13
107
108#define FIRST_VM86_IRQ 3
109#define LAST_VM86_IRQ 15
110#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
111
112#if !defined(CONFIG_X86_VOYAGER)
113
114# if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT) || defined(CONFIG_X86_VISWS)
115
116# define NR_IRQS 224
117
118# if (224 >= 32 * NR_CPUS)
119# define NR_IRQ_VECTORS NR_IRQS
120# else
121# define NR_IRQ_VECTORS (32 * NR_CPUS)
122# endif
123
124# else /* IO_APIC || PARAVIRT */
125
126# define NR_IRQS 16
127# define NR_IRQ_VECTORS NR_IRQS
128
129# endif
130
131#else /* !VISWS && !VOYAGER */
132
133# define NR_IRQS 224
134# define NR_IRQ_VECTORS NR_IRQS
135
136#endif /* VISWS */
137
138/* Voyager specific defines */
139/* These define the CPIs we use in linux */
140#define VIC_CPI_LEVEL0 0
141#define VIC_CPI_LEVEL1 1
142/* now the fake CPIs */
143#define VIC_TIMER_CPI 2
144#define VIC_INVALIDATE_CPI 3
145#define VIC_RESCHEDULE_CPI 4
146#define VIC_ENABLE_IRQ_CPI 5
147#define VIC_CALL_FUNCTION_CPI 6
148#define VIC_CALL_FUNCTION_SINGLE_CPI 7
149
150/* Now the QIC CPIs: Since we don't need the two initial levels,
151 * these are 2 less than the VIC CPIs */
152#define QIC_CPI_OFFSET 1
153#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET)
154#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
155#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
156#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
157#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
158#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
159
160#define VIC_START_FAKE_CPI VIC_TIMER_CPI
161#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI
162
163/* this is the SYS_INT CPI. */
164#define VIC_SYS_INT 8
165#define VIC_CMN_INT 15
166
167/* This is the boot CPI for alternate processors. It gets overwritten
168 * by the above once the system has activated all available processors */
169#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0
170#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8)
171
172
173#endif /* _ASM_IRQ_VECTORS_H */
diff --git a/include/asm-x86/irqflags.h b/include/asm-x86/irqflags.h
index c242527f970e..424acb48cd61 100644
--- a/include/asm-x86/irqflags.h
+++ b/include/asm-x86/irqflags.h
@@ -111,14 +111,35 @@ static inline unsigned long __raw_local_irq_save(void)
111#define DISABLE_INTERRUPTS(x) cli 111#define DISABLE_INTERRUPTS(x) cli
112 112
113#ifdef CONFIG_X86_64 113#ifdef CONFIG_X86_64
114#define SWAPGS swapgs
115/*
116 * Currently paravirt can't handle swapgs nicely when we
117 * don't have a stack we can rely on (such as a user space
118 * stack). So we either find a way around these or just fault
119 * and emulate if a guest tries to call swapgs directly.
120 *
121 * Either way, this is a good way to document that we don't
122 * have a reliable stack. x86_64 only.
123 */
124#define SWAPGS_UNSAFE_STACK swapgs
125
126#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
127
114#define INTERRUPT_RETURN iretq 128#define INTERRUPT_RETURN iretq
115#define ENABLE_INTERRUPTS_SYSCALL_RET \ 129#define USERGS_SYSRET64 \
116 movq %gs:pda_oldrsp, %rsp; \ 130 swapgs; \
117 swapgs; \ 131 sysretq;
118 sysretq; 132#define USERGS_SYSRET32 \
133 swapgs; \
134 sysretl
135#define ENABLE_INTERRUPTS_SYSEXIT32 \
136 swapgs; \
137 sti; \
138 sysexit
139
119#else 140#else
120#define INTERRUPT_RETURN iret 141#define INTERRUPT_RETURN iret
121#define ENABLE_INTERRUPTS_SYSCALL_RET sti; sysexit 142#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
122#define GET_CR0_INTO_EAX movl %cr0, %eax 143#define GET_CR0_INTO_EAX movl %cr0, %eax
123#endif 144#endif
124 145
@@ -169,18 +190,6 @@ static inline void trace_hardirqs_fixup(void)
169#else 190#else
170 191
171#ifdef CONFIG_X86_64 192#ifdef CONFIG_X86_64
172/*
173 * Currently paravirt can't handle swapgs nicely when we
174 * don't have a stack we can rely on (such as a user space
175 * stack). So we either find a way around these or just fault
176 * and emulate if a guest tries to call swapgs directly.
177 *
178 * Either way, this is a good way to document that we don't
179 * have a reliable stack. x86_64 only.
180 */
181#define SWAPGS_UNSAFE_STACK swapgs
182#define ARCH_TRACE_IRQS_ON call trace_hardirqs_on_thunk
183#define ARCH_TRACE_IRQS_OFF call trace_hardirqs_off_thunk
184#define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk 193#define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
185#define ARCH_LOCKDEP_SYS_EXIT_IRQ \ 194#define ARCH_LOCKDEP_SYS_EXIT_IRQ \
186 TRACE_IRQS_ON; \ 195 TRACE_IRQS_ON; \
@@ -192,24 +201,6 @@ static inline void trace_hardirqs_fixup(void)
192 TRACE_IRQS_OFF; 201 TRACE_IRQS_OFF;
193 202
194#else 203#else
195#define ARCH_TRACE_IRQS_ON \
196 pushl %eax; \
197 pushl %ecx; \
198 pushl %edx; \
199 call trace_hardirqs_on; \
200 popl %edx; \
201 popl %ecx; \
202 popl %eax;
203
204#define ARCH_TRACE_IRQS_OFF \
205 pushl %eax; \
206 pushl %ecx; \
207 pushl %edx; \
208 call trace_hardirqs_off; \
209 popl %edx; \
210 popl %ecx; \
211 popl %eax;
212
213#define ARCH_LOCKDEP_SYS_EXIT \ 204#define ARCH_LOCKDEP_SYS_EXIT \
214 pushl %eax; \ 205 pushl %eax; \
215 pushl %ecx; \ 206 pushl %ecx; \
@@ -223,8 +214,8 @@ static inline void trace_hardirqs_fixup(void)
223#endif 214#endif
224 215
225#ifdef CONFIG_TRACE_IRQFLAGS 216#ifdef CONFIG_TRACE_IRQFLAGS
226# define TRACE_IRQS_ON ARCH_TRACE_IRQS_ON 217# define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
227# define TRACE_IRQS_OFF ARCH_TRACE_IRQS_OFF 218# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
228#else 219#else
229# define TRACE_IRQS_ON 220# define TRACE_IRQS_ON
230# define TRACE_IRQS_OFF 221# define TRACE_IRQS_OFF
diff --git a/include/asm-x86/kvm_para.h b/include/asm-x86/kvm_para.h
index bfd9900742bf..76f392146daa 100644
--- a/include/asm-x86/kvm_para.h
+++ b/include/asm-x86/kvm_para.h
@@ -71,7 +71,8 @@ static inline long kvm_hypercall0(unsigned int nr)
71 long ret; 71 long ret;
72 asm volatile(KVM_HYPERCALL 72 asm volatile(KVM_HYPERCALL
73 : "=a"(ret) 73 : "=a"(ret)
74 : "a"(nr)); 74 : "a"(nr)
75 : "memory");
75 return ret; 76 return ret;
76} 77}
77 78
@@ -80,7 +81,8 @@ static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
80 long ret; 81 long ret;
81 asm volatile(KVM_HYPERCALL 82 asm volatile(KVM_HYPERCALL
82 : "=a"(ret) 83 : "=a"(ret)
83 : "a"(nr), "b"(p1)); 84 : "a"(nr), "b"(p1)
85 : "memory");
84 return ret; 86 return ret;
85} 87}
86 88
@@ -90,7 +92,8 @@ static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
90 long ret; 92 long ret;
91 asm volatile(KVM_HYPERCALL 93 asm volatile(KVM_HYPERCALL
92 : "=a"(ret) 94 : "=a"(ret)
93 : "a"(nr), "b"(p1), "c"(p2)); 95 : "a"(nr), "b"(p1), "c"(p2)
96 : "memory");
94 return ret; 97 return ret;
95} 98}
96 99
@@ -100,7 +103,8 @@ static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
100 long ret; 103 long ret;
101 asm volatile(KVM_HYPERCALL 104 asm volatile(KVM_HYPERCALL
102 : "=a"(ret) 105 : "=a"(ret)
103 : "a"(nr), "b"(p1), "c"(p2), "d"(p3)); 106 : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
107 : "memory");
104 return ret; 108 return ret;
105} 109}
106 110
@@ -111,7 +115,8 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
111 long ret; 115 long ret;
112 asm volatile(KVM_HYPERCALL 116 asm volatile(KVM_HYPERCALL
113 : "=a"(ret) 117 : "=a"(ret)
114 : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)); 118 : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
119 : "memory");
115 return ret; 120 return ret;
116} 121}
117 122
diff --git a/include/asm-x86/mach-bigsmp/mach_apic.h b/include/asm-x86/mach-bigsmp/mach_apic.h
index 8327907c79bf..017c8c19ad8f 100644
--- a/include/asm-x86/mach-bigsmp/mach_apic.h
+++ b/include/asm-x86/mach-bigsmp/mach_apic.h
@@ -81,7 +81,7 @@ static inline int multi_timer_check(int apic, int irq)
81 81
82static inline int apicid_to_node(int logical_apicid) 82static inline int apicid_to_node(int logical_apicid)
83{ 83{
84 return (0); 84 return apicid_2_node[hard_smp_processor_id()];
85} 85}
86 86
87static inline int cpu_present_to_apicid(int mps_cpu) 87static inline int cpu_present_to_apicid(int mps_cpu)
diff --git a/include/asm-x86/mach-bigsmp/mach_mpspec.h b/include/asm-x86/mach-bigsmp/mach_mpspec.h
deleted file mode 100644
index 6b5dadcf1d0e..000000000000
--- a/include/asm-x86/mach-bigsmp/mach_mpspec.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef __ASM_MACH_MPSPEC_H
2#define __ASM_MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 256
5
6#define MAX_MP_BUSSES 32
7
8#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/include/asm-x86/mach-default/entry_arch.h b/include/asm-x86/mach-default/entry_arch.h
index bc861469bdba..9283b60a1dd2 100644
--- a/include/asm-x86/mach-default/entry_arch.h
+++ b/include/asm-x86/mach-default/entry_arch.h
@@ -13,6 +13,7 @@
13BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) 13BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
14BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) 14BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
15BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) 15BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
16BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
16#endif 17#endif
17 18
18/* 19/*
diff --git a/include/asm-x86/mach-default/irq_vectors.h b/include/asm-x86/mach-default/irq_vectors.h
deleted file mode 100644
index 881c63ca61ad..000000000000
--- a/include/asm-x86/mach-default/irq_vectors.h
+++ /dev/null
@@ -1,96 +0,0 @@
1/*
2 * This file should contain #defines for all of the interrupt vector
3 * numbers used by this architecture.
4 *
5 * In addition, there are some standard defines:
6 *
7 * FIRST_EXTERNAL_VECTOR:
8 * The first free place for external interrupts
9 *
10 * SYSCALL_VECTOR:
11 * The IRQ vector a syscall makes the user to kernel transition
12 * under.
13 *
14 * TIMER_IRQ:
15 * The IRQ number the timer interrupt comes in at.
16 *
17 * NR_IRQS:
18 * The total number of interrupt vectors (including all the
19 * architecture specific interrupts) needed.
20 *
21 */
22#ifndef _ASM_IRQ_VECTORS_H
23#define _ASM_IRQ_VECTORS_H
24
25/*
26 * IDT vectors usable for external interrupt sources start
27 * at 0x20:
28 */
29#define FIRST_EXTERNAL_VECTOR 0x20
30
31#define SYSCALL_VECTOR 0x80
32
33/*
34 * Vectors 0x20-0x2f are used for ISA interrupts.
35 */
36
37/*
38 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
39 *
40 * some of the following vectors are 'rare', they are merged
41 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
42 * TLB, reschedule and local APIC vectors are performance-critical.
43 *
44 * Vectors 0xf0-0xfa are free (reserved for future Linux use).
45 */
46#define SPURIOUS_APIC_VECTOR 0xff
47#define ERROR_APIC_VECTOR 0xfe
48#define INVALIDATE_TLB_VECTOR 0xfd
49#define RESCHEDULE_VECTOR 0xfc
50#define CALL_FUNCTION_VECTOR 0xfb
51
52#define THERMAL_APIC_VECTOR 0xf0
53/*
54 * Local APIC timer IRQ vector is on a different priority level,
55 * to work around the 'lost local interrupt if more than 2 IRQ
56 * sources per level' errata.
57 */
58#define LOCAL_TIMER_VECTOR 0xef
59
60/*
61 * First APIC vector available to drivers: (vectors 0x30-0xee)
62 * we start at 0x31 to spread out vectors evenly between priority
63 * levels. (0x80 is the syscall vector)
64 */
65#define FIRST_DEVICE_VECTOR 0x31
66#define FIRST_SYSTEM_VECTOR 0xef
67
68#define TIMER_IRQ 0
69
70/*
71 * 16 8259A IRQ's, 208 potential APIC interrupt sources.
72 * Right now the APIC is mostly only used for SMP.
73 * 256 vectors is an architectural limit. (we can have
74 * more than 256 devices theoretically, but they will
75 * have to use shared interrupts)
76 * Since vectors 0x00-0x1f are used/reserved for the CPU,
77 * the usable vector space is 0x20-0xff (224 vectors)
78 */
79
80/*
81 * The maximum number of vectors supported by i386 processors
82 * is limited to 256. For processors other than i386, NR_VECTORS
83 * should be changed accordingly.
84 */
85#define NR_VECTORS 256
86
87#include "irq_vectors_limits.h"
88
89#define FPU_IRQ 13
90
91#define FIRST_VM86_IRQ 3
92#define LAST_VM86_IRQ 15
93#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
94
95
96#endif /* _ASM_IRQ_VECTORS_H */
diff --git a/include/asm-x86/mach-default/irq_vectors_limits.h b/include/asm-x86/mach-default/irq_vectors_limits.h
deleted file mode 100644
index a90c7a60109f..000000000000
--- a/include/asm-x86/mach-default/irq_vectors_limits.h
+++ /dev/null
@@ -1,16 +0,0 @@
1#ifndef _ASM_IRQ_VECTORS_LIMITS_H
2#define _ASM_IRQ_VECTORS_LIMITS_H
3
4#if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT)
5#define NR_IRQS 224
6# if (224 >= 32 * NR_CPUS)
7# define NR_IRQ_VECTORS NR_IRQS
8# else
9# define NR_IRQ_VECTORS (32 * NR_CPUS)
10# endif
11#else
12#define NR_IRQS 16
13#define NR_IRQ_VECTORS NR_IRQS
14#endif
15
16#endif /* _ASM_IRQ_VECTORS_LIMITS_H */
diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h
index 21003b56ae95..0b2cde5e1b74 100644
--- a/include/asm-x86/mach-default/mach_apic.h
+++ b/include/asm-x86/mach-default/mach_apic.h
@@ -77,7 +77,11 @@ static inline void setup_apic_routing(void)
77 77
78static inline int apicid_to_node(int logical_apicid) 78static inline int apicid_to_node(int logical_apicid)
79{ 79{
80#ifdef CONFIG_SMP
81 return apicid_2_node[hard_smp_processor_id()];
82#else
80 return 0; 83 return 0;
84#endif
81} 85}
82#endif 86#endif
83 87
diff --git a/include/asm-x86/mach-default/setup_arch.h b/include/asm-x86/mach-default/setup_arch.h
index 605e3ccb991b..38846208b548 100644
--- a/include/asm-x86/mach-default/setup_arch.h
+++ b/include/asm-x86/mach-default/setup_arch.h
@@ -1,7 +1,3 @@
1/* Hook to call BIOS initialisation function */ 1/* Hook to call BIOS initialisation function */
2 2
3/* no action for generic */ 3/* no action for generic */
4
5#ifndef ARCH_SETUP
6#define ARCH_SETUP
7#endif
diff --git a/include/asm-x86/mach-default/smpboot_hooks.h b/include/asm-x86/mach-default/smpboot_hooks.h
index 56d0e1fa0258..56d001b9dce4 100644
--- a/include/asm-x86/mach-default/smpboot_hooks.h
+++ b/include/asm-x86/mach-default/smpboot_hooks.h
@@ -3,7 +3,9 @@
3 3
4static inline void smpboot_clear_io_apic_irqs(void) 4static inline void smpboot_clear_io_apic_irqs(void)
5{ 5{
6#ifdef CONFIG_X86_IO_APIC
6 io_apic_irqs = 0; 7 io_apic_irqs = 0;
8#endif
7} 9}
8 10
9static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) 11static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
@@ -35,17 +37,23 @@ static inline void smpboot_restore_warm_reset_vector(void)
35 37
36static inline void __init smpboot_setup_io_apic(void) 38static inline void __init smpboot_setup_io_apic(void)
37{ 39{
40#ifdef CONFIG_X86_IO_APIC
38 /* 41 /*
39 * Here we can be sure that there is an IO-APIC in the system. Let's 42 * Here we can be sure that there is an IO-APIC in the system. Let's
40 * go and set it up: 43 * go and set it up:
41 */ 44 */
42 if (!skip_ioapic_setup && nr_ioapics) 45 if (!skip_ioapic_setup && nr_ioapics)
43 setup_IO_APIC(); 46 setup_IO_APIC();
44 else 47 else {
45 nr_ioapics = 0; 48 nr_ioapics = 0;
49 localise_nmi_watchdog();
50 }
51#endif
46} 52}
47 53
48static inline void smpboot_clear_io_apic(void) 54static inline void smpboot_clear_io_apic(void)
49{ 55{
56#ifdef CONFIG_X86_IO_APIC
50 nr_ioapics = 0; 57 nr_ioapics = 0;
58#endif
51} 59}
diff --git a/include/asm-x86/mach-es7000/mach_mpspec.h b/include/asm-x86/mach-es7000/mach_mpspec.h
deleted file mode 100644
index b1f5039d4506..000000000000
--- a/include/asm-x86/mach-es7000/mach_mpspec.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef __ASM_MACH_MPSPEC_H
2#define __ASM_MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 256
5
6#define MAX_MP_BUSSES 256
7
8#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/include/asm-x86/mach-generic/mach_mpparse.h b/include/asm-x86/mach-generic/mach_mpparse.h
index 0d0b5ba2e9d1..586cadbf3787 100644
--- a/include/asm-x86/mach-generic/mach_mpparse.h
+++ b/include/asm-x86/mach-generic/mach_mpparse.h
@@ -1,7 +1,10 @@
1#ifndef _MACH_MPPARSE_H 1#ifndef _MACH_MPPARSE_H
2#define _MACH_MPPARSE_H 1 2#define _MACH_MPPARSE_H 1
3 3
4int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid); 4
5int acpi_madt_oem_check(char *oem_id, char *oem_table_id); 5extern int mps_oem_check(struct mp_config_table *mpc, char *oem,
6 char *productid);
7
8extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
6 9
7#endif 10#endif
diff --git a/include/asm-x86/mach-numaq/mach_apic.h b/include/asm-x86/mach-numaq/mach_apic.h
index 75a56e5afbe7..d802465e026a 100644
--- a/include/asm-x86/mach-numaq/mach_apic.h
+++ b/include/asm-x86/mach-numaq/mach_apic.h
@@ -20,8 +20,14 @@ static inline cpumask_t target_cpus(void)
20#define INT_DELIVERY_MODE dest_LowestPrio 20#define INT_DELIVERY_MODE dest_LowestPrio
21#define INT_DEST_MODE 0 /* physical delivery on LOCAL quad */ 21#define INT_DEST_MODE 0 /* physical delivery on LOCAL quad */
22 22
23#define check_apicid_used(bitmap, apicid) physid_isset(apicid, bitmap) 23static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
24#define check_apicid_present(bit) physid_isset(bit, phys_cpu_present_map) 24{
25 return physid_isset(apicid, bitmap);
26}
27static inline unsigned long check_apicid_present(int bit)
28{
29 return physid_isset(bit, phys_cpu_present_map);
30}
25#define apicid_cluster(apicid) (apicid & 0xF0) 31#define apicid_cluster(apicid) (apicid & 0xF0)
26 32
27static inline int apic_id_registered(void) 33static inline int apic_id_registered(void)
@@ -77,11 +83,6 @@ static inline int cpu_present_to_apicid(int mps_cpu)
77 return BAD_APICID; 83 return BAD_APICID;
78} 84}
79 85
80static inline int generate_logical_apicid(int quad, int phys_apicid)
81{
82 return (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1);
83}
84
85static inline int apicid_to_node(int logical_apicid) 86static inline int apicid_to_node(int logical_apicid)
86{ 87{
87 return logical_apicid >> 4; 88 return logical_apicid >> 4;
@@ -95,30 +96,6 @@ static inline physid_mask_t apicid_to_cpu_present(int logical_apicid)
95 return physid_mask_of_physid(cpu + 4*node); 96 return physid_mask_of_physid(cpu + 4*node);
96} 97}
97 98
98struct mpc_config_translation {
99 unsigned char mpc_type;
100 unsigned char trans_len;
101 unsigned char trans_type;
102 unsigned char trans_quad;
103 unsigned char trans_global;
104 unsigned char trans_local;
105 unsigned short trans_reserved;
106};
107
108static inline int mpc_apic_id(struct mpc_config_processor *m,
109 struct mpc_config_translation *translation_record)
110{
111 int quad = translation_record->trans_quad;
112 int logical_apicid = generate_logical_apicid(quad, m->mpc_apicid);
113
114 printk("Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n",
115 m->mpc_apicid,
116 (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
117 (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
118 m->mpc_apicver, quad, logical_apicid);
119 return logical_apicid;
120}
121
122extern void *xquad_portio; 99extern void *xquad_portio;
123 100
124static inline void setup_portio_remap(void) 101static inline void setup_portio_remap(void)
diff --git a/include/asm-x86/mach-numaq/mach_mpparse.h b/include/asm-x86/mach-numaq/mach_mpparse.h
index 459b12401187..626aef6b155f 100644
--- a/include/asm-x86/mach-numaq/mach_mpparse.h
+++ b/include/asm-x86/mach-numaq/mach_mpparse.h
@@ -1,14 +1,7 @@
1#ifndef __ASM_MACH_MPPARSE_H 1#ifndef __ASM_MACH_MPPARSE_H
2#define __ASM_MACH_MPPARSE_H 2#define __ASM_MACH_MPPARSE_H
3 3
4extern void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, 4extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
5 struct mpc_config_translation *translation); 5 char *productid);
6extern void mpc_oem_pci_bus(struct mpc_config_bus *m,
7 struct mpc_config_translation *translation);
8
9/* Hook from generic ACPI tables.c */
10static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id)
11{
12}
13 6
14#endif /* __ASM_MACH_MPPARSE_H */ 7#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/include/asm-x86/mach-numaq/mach_mpspec.h b/include/asm-x86/mach-numaq/mach_mpspec.h
deleted file mode 100644
index dffb09856f8f..000000000000
--- a/include/asm-x86/mach-numaq/mach_mpspec.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef __ASM_MACH_MPSPEC_H
2#define __ASM_MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 512
5
6#define MAX_MP_BUSSES 32
7
8#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/include/asm-x86/mach-summit/mach_mpspec.h b/include/asm-x86/mach-summit/mach_mpspec.h
deleted file mode 100644
index bd765523511a..000000000000
--- a/include/asm-x86/mach-summit/mach_mpspec.h
+++ /dev/null
@@ -1,9 +0,0 @@
1#ifndef __ASM_MACH_MPSPEC_H
2#define __ASM_MACH_MPSPEC_H
3
4#define MAX_IRQ_SOURCES 256
5
6/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */
7#define MAX_MP_BUSSES 260
8
9#endif /* __ASM_MACH_MPSPEC_H */
diff --git a/include/asm-x86/mach-visws/entry_arch.h b/include/asm-x86/mach-visws/entry_arch.h
index b183fa6d83d9..86be554342d4 100644
--- a/include/asm-x86/mach-visws/entry_arch.h
+++ b/include/asm-x86/mach-visws/entry_arch.h
@@ -1,23 +1,5 @@
1/* 1/*
2 * The following vectors are part of the Linux architecture, there 2 * VISWS uses the standard Linux entry points:
3 * is no hardware IRQ pin equivalent for them, they are triggered
4 * through the ICC by us (IPIs)
5 */ 3 */
6#ifdef CONFIG_X86_SMP
7BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
8BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
9BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
10#endif
11 4
12/* 5#include "../mach-default/entry_arch.h"
13 * every pentium local APIC has two 'local interrupts', with a
14 * soft-definable vector attached to both interrupts, one of
15 * which is a timer interrupt, the other one is error counter
16 * overflow. Linux uses the local APIC timer interrupt to get
17 * a much simpler SMP time architecture:
18 */
19#ifdef CONFIG_X86_LOCAL_APIC
20BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
21BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
22BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
23#endif
diff --git a/include/asm-x86/mach-visws/irq_vectors.h b/include/asm-x86/mach-visws/irq_vectors.h
deleted file mode 100644
index cb572d8db505..000000000000
--- a/include/asm-x86/mach-visws/irq_vectors.h
+++ /dev/null
@@ -1,62 +0,0 @@
1#ifndef _ASM_IRQ_VECTORS_H
2#define _ASM_IRQ_VECTORS_H
3
4/*
5 * IDT vectors usable for external interrupt sources start
6 * at 0x20:
7 */
8#define FIRST_EXTERNAL_VECTOR 0x20
9
10#define SYSCALL_VECTOR 0x80
11
12/*
13 * Vectors 0x20-0x2f are used for ISA interrupts.
14 */
15
16/*
17 * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
18 *
19 * some of the following vectors are 'rare', they are merged
20 * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
21 * TLB, reschedule and local APIC vectors are performance-critical.
22 *
23 * Vectors 0xf0-0xfa are free (reserved for future Linux use).
24 */
25#define SPURIOUS_APIC_VECTOR 0xff
26#define ERROR_APIC_VECTOR 0xfe
27#define INVALIDATE_TLB_VECTOR 0xfd
28#define RESCHEDULE_VECTOR 0xfc
29#define CALL_FUNCTION_VECTOR 0xfb
30
31#define THERMAL_APIC_VECTOR 0xf0
32/*
33 * Local APIC timer IRQ vector is on a different priority level,
34 * to work around the 'lost local interrupt if more than 2 IRQ
35 * sources per level' errata.
36 */
37#define LOCAL_TIMER_VECTOR 0xef
38
39/*
40 * First APIC vector available to drivers: (vectors 0x30-0xee)
41 * we start at 0x31 to spread out vectors evenly between priority
42 * levels. (0x80 is the syscall vector)
43 */
44#define FIRST_DEVICE_VECTOR 0x31
45#define FIRST_SYSTEM_VECTOR 0xef
46
47#define TIMER_IRQ 0
48
49/*
50 * IRQ definitions
51 */
52#define NR_VECTORS 256
53#define NR_IRQS 224
54#define NR_IRQ_VECTORS NR_IRQS
55
56#define FPU_IRQ 13
57
58#define FIRST_VM86_IRQ 3
59#define LAST_VM86_IRQ 15
60#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
61
62#endif /* _ASM_IRQ_VECTORS_H */
diff --git a/include/asm-x86/mach-visws/mach_apic.h b/include/asm-x86/mach-visws/mach_apic.h
index a9ef33a8a995..6943e7a1d0e6 100644
--- a/include/asm-x86/mach-visws/mach_apic.h
+++ b/include/asm-x86/mach-visws/mach_apic.h
@@ -1,103 +1 @@
1#ifndef __ASM_MACH_APIC_H #include "../mach-default/mach_apic.h"
2#define __ASM_MACH_APIC_H
3
4#include <mach_apicdef.h>
5#include <asm/smp.h>
6
7#define APIC_DFR_VALUE (APIC_DFR_FLAT)
8
9#define no_balance_irq (0)
10#define esr_disable (0)
11
12#define INT_DELIVERY_MODE dest_LowestPrio
13#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
14
15#ifdef CONFIG_SMP
16 #define TARGET_CPUS cpu_online_map
17#else
18 #define TARGET_CPUS cpumask_of_cpu(0)
19#endif
20
21#define check_apicid_used(bitmap, apicid) physid_isset(apicid, bitmap)
22#define check_apicid_present(bit) physid_isset(bit, phys_cpu_present_map)
23
24static inline int apic_id_registered(void)
25{
26 return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map);
27}
28
29/*
30 * Set up the logical destination ID.
31 *
32 * Intel recommends to set DFR, LDR and TPR before enabling
33 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
34 * document number 292116). So here it goes...
35 */
36static inline void init_apic_ldr(void)
37{
38 unsigned long val;
39
40 apic_write_around(APIC_DFR, APIC_DFR_VALUE);
41 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
42 val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
43 apic_write_around(APIC_LDR, val);
44}
45
46static inline void summit_check(char *oem, char *productid)
47{
48}
49
50static inline void setup_apic_routing(void)
51{
52}
53
54static inline int apicid_to_node(int logical_apicid)
55{
56 return 0;
57}
58
59/* Mapping from cpu number to logical apicid */
60static inline int cpu_to_logical_apicid(int cpu)
61{
62 return 1 << cpu;
63}
64
65static inline int cpu_present_to_apicid(int mps_cpu)
66{
67 if (mps_cpu < get_physical_broadcast())
68 return mps_cpu;
69 else
70 return BAD_APICID;
71}
72
73static inline physid_mask_t apicid_to_cpu_present(int apicid)
74{
75 return physid_mask_of_physid(apicid);
76}
77
78#define WAKE_SECONDARY_VIA_INIT
79
80static inline void setup_portio_remap(void)
81{
82}
83
84static inline void enable_apic_mode(void)
85{
86}
87
88static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
89{
90 return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
91}
92
93static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
94{
95 return cpus_addr(cpumask)[0];
96}
97
98static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
99{
100 return cpuid_apic >> index_msb;
101}
102
103#endif /* __ASM_MACH_APIC_H */
diff --git a/include/asm-x86/mach-visws/mach_apicdef.h b/include/asm-x86/mach-visws/mach_apicdef.h
index 826cfa97d778..42711d152a93 100644
--- a/include/asm-x86/mach-visws/mach_apicdef.h
+++ b/include/asm-x86/mach-visws/mach_apicdef.h
@@ -1,12 +1 @@
1#ifndef __ASM_MACH_APICDEF_H #include "../mach-default/mach_apicdef.h"
2#define __ASM_MACH_APICDEF_H
3
4#define APIC_ID_MASK (0xF<<24)
5
6static inline unsigned get_apic_id(unsigned long x)
7{
8 return (((x)>>24)&0xF);
9}
10#define GET_APIC_ID(x) get_apic_id(x)
11
12#endif
diff --git a/include/asm-x86/mach-visws/setup_arch.h b/include/asm-x86/mach-visws/setup_arch.h
index 33f700ef6831..fa4766ca2d10 100644
--- a/include/asm-x86/mach-visws/setup_arch.h
+++ b/include/asm-x86/mach-visws/setup_arch.h
@@ -1,8 +1 @@
1/* Hook to call BIOS initialisation function */ #include "../mach-default/setup_arch.h"
2
3extern unsigned long sgivwfb_mem_phys;
4extern unsigned long sgivwfb_mem_size;
5
6/* no action for visws */
7
8#define ARCH_SETUP
diff --git a/include/asm-x86/mach-visws/smpboot_hooks.h b/include/asm-x86/mach-visws/smpboot_hooks.h
index c9b83e395a2e..e4433ca88715 100644
--- a/include/asm-x86/mach-visws/smpboot_hooks.h
+++ b/include/asm-x86/mach-visws/smpboot_hooks.h
@@ -1,28 +1 @@
1static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) #include "../mach-default/smpboot_hooks.h"
2{
3 CMOS_WRITE(0xa, 0xf);
4 local_flush_tlb();
5 Dprintk("1.\n");
6 *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
7 Dprintk("2.\n");
8 *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
9 Dprintk("3.\n");
10}
11
12/* for visws do nothing for any of these */
13
14static inline void smpboot_clear_io_apic_irqs(void)
15{
16}
17
18static inline void smpboot_restore_warm_reset_vector(void)
19{
20}
21
22static inline void smpboot_setup_io_apic(void)
23{
24}
25
26static inline void smpboot_clear_io_apic(void)
27{
28}
diff --git a/include/asm-x86/mach-voyager/entry_arch.h b/include/asm-x86/mach-voyager/entry_arch.h
index 4a1e1e8c10b6..ae52624b5937 100644
--- a/include/asm-x86/mach-voyager/entry_arch.h
+++ b/include/asm-x86/mach-voyager/entry_arch.h
@@ -23,4 +23,4 @@ BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI);
23BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI); 23BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI);
24BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI); 24BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI);
25BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI); 25BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI);
26 26BUILD_INTERRUPT(qic_call_function_single_interrupt, QIC_CALL_FUNCTION_SINGLE_CPI);
diff --git a/include/asm-x86/mach-voyager/irq_vectors.h b/include/asm-x86/mach-voyager/irq_vectors.h
deleted file mode 100644
index 165421f5821c..000000000000
--- a/include/asm-x86/mach-voyager/irq_vectors.h
+++ /dev/null
@@ -1,79 +0,0 @@
1/* -*- mode: c; c-basic-offset: 8 -*- */
2
3/* Copyright (C) 2002
4 *
5 * Author: James.Bottomley@HansenPartnership.com
6 *
7 * linux/arch/i386/voyager/irq_vectors.h
8 *
9 * This file provides definitions for the VIC and QIC CPIs
10 */
11
12#ifndef _ASM_IRQ_VECTORS_H
13#define _ASM_IRQ_VECTORS_H
14
15/*
16 * IDT vectors usable for external interrupt sources start
17 * at 0x20:
18 */
19#define FIRST_EXTERNAL_VECTOR 0x20
20
21#define SYSCALL_VECTOR 0x80
22
23/*
24 * Vectors 0x20-0x2f are used for ISA interrupts.
25 */
26
27/* These define the CPIs we use in linux */
28#define VIC_CPI_LEVEL0 0
29#define VIC_CPI_LEVEL1 1
30/* now the fake CPIs */
31#define VIC_TIMER_CPI 2
32#define VIC_INVALIDATE_CPI 3
33#define VIC_RESCHEDULE_CPI 4
34#define VIC_ENABLE_IRQ_CPI 5
35#define VIC_CALL_FUNCTION_CPI 6
36
37/* Now the QIC CPIs: Since we don't need the two initial levels,
38 * these are 2 less than the VIC CPIs */
39#define QIC_CPI_OFFSET 1
40#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET)
41#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
42#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
43#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
44#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
45
46#define VIC_START_FAKE_CPI VIC_TIMER_CPI
47#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_CPI
48
49/* this is the SYS_INT CPI. */
50#define VIC_SYS_INT 8
51#define VIC_CMN_INT 15
52
53/* This is the boot CPI for alternate processors. It gets overwritten
54 * by the above once the system has activated all available processors */
55#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0
56#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8)
57
58#define NR_VECTORS 256
59#define NR_IRQS 224
60#define NR_IRQ_VECTORS NR_IRQS
61
62#define FPU_IRQ 13
63
64#define FIRST_VM86_IRQ 3
65#define LAST_VM86_IRQ 15
66#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
67
68#ifndef __ASSEMBLY__
69extern asmlinkage void vic_cpi_interrupt(void);
70extern asmlinkage void vic_sys_interrupt(void);
71extern asmlinkage void vic_cmn_interrupt(void);
72extern asmlinkage void qic_timer_interrupt(void);
73extern asmlinkage void qic_invalidate_interrupt(void);
74extern asmlinkage void qic_reschedule_interrupt(void);
75extern asmlinkage void qic_enable_irq_interrupt(void);
76extern asmlinkage void qic_call_function_interrupt(void);
77#endif /* !__ASSEMBLY__ */
78
79#endif /* _ASM_IRQ_VECTORS_H */
diff --git a/include/asm-x86/mmconfig.h b/include/asm-x86/mmconfig.h
new file mode 100644
index 000000000000..95beda07c6fa
--- /dev/null
+++ b/include/asm-x86/mmconfig.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_MMCONFIG_H
2#define _ASM_MMCONFIG_H
3
4#ifdef CONFIG_PCI_MMCONFIG
5extern void __cpuinit fam10h_check_enable_mmcfg(void);
6extern void __init check_enable_amd_mmconf_dmi(void);
7#else
8static inline void fam10h_check_enable_mmcfg(void) { }
9static inline void check_enable_amd_mmconf_dmi(void) { }
10#endif
11
12#endif
diff --git a/include/asm-x86/mmu_context.h b/include/asm-x86/mmu_context.h
index 6598450da6c6..fac57014e7c6 100644
--- a/include/asm-x86/mmu_context.h
+++ b/include/asm-x86/mmu_context.h
@@ -1,5 +1,37 @@
1#ifndef __ASM_X86_MMU_CONTEXT_H
2#define __ASM_X86_MMU_CONTEXT_H
3
4#include <asm/desc.h>
5#include <asm/atomic.h>
6#include <asm/pgalloc.h>
7#include <asm/tlbflush.h>
8#include <asm/paravirt.h>
9#ifndef CONFIG_PARAVIRT
10#include <asm-generic/mm_hooks.h>
11
12static inline void paravirt_activate_mm(struct mm_struct *prev,
13 struct mm_struct *next)
14{
15}
16#endif /* !CONFIG_PARAVIRT */
17
18/*
19 * Used for LDT copy/destruction.
20 */
21int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
22void destroy_context(struct mm_struct *mm);
23
1#ifdef CONFIG_X86_32 24#ifdef CONFIG_X86_32
2# include "mmu_context_32.h" 25# include "mmu_context_32.h"
3#else 26#else
4# include "mmu_context_64.h" 27# include "mmu_context_64.h"
5#endif 28#endif
29
30#define activate_mm(prev, next) \
31do { \
32 paravirt_activate_mm((prev), (next)); \
33 switch_mm((prev), (next), NULL); \
34} while (0);
35
36
37#endif /* __ASM_X86_MMU_CONTEXT_H */
diff --git a/include/asm-x86/mmu_context_32.h b/include/asm-x86/mmu_context_32.h
index 9756ae0f1dd3..824fc575c6d8 100644
--- a/include/asm-x86/mmu_context_32.h
+++ b/include/asm-x86/mmu_context_32.h
@@ -1,28 +1,6 @@
1#ifndef __I386_SCHED_H 1#ifndef __I386_SCHED_H
2#define __I386_SCHED_H 2#define __I386_SCHED_H
3 3
4#include <asm/desc.h>
5#include <asm/atomic.h>
6#include <asm/pgalloc.h>
7#include <asm/tlbflush.h>
8#include <asm/paravirt.h>
9#ifndef CONFIG_PARAVIRT
10#include <asm-generic/mm_hooks.h>
11
12static inline void paravirt_activate_mm(struct mm_struct *prev,
13 struct mm_struct *next)
14{
15}
16#endif /* !CONFIG_PARAVIRT */
17
18
19/*
20 * Used for LDT copy/destruction.
21 */
22int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
23void destroy_context(struct mm_struct *mm);
24
25
26static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 4static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
27{ 5{
28#ifdef CONFIG_SMP 6#ifdef CONFIG_SMP
@@ -75,10 +53,4 @@ static inline void switch_mm(struct mm_struct *prev,
75#define deactivate_mm(tsk, mm) \ 53#define deactivate_mm(tsk, mm) \
76 asm("movl %0,%%gs": :"r" (0)); 54 asm("movl %0,%%gs": :"r" (0));
77 55
78#define activate_mm(prev, next) \
79do { \
80 paravirt_activate_mm((prev), (next)); \
81 switch_mm((prev), (next), NULL); \
82} while (0);
83
84#endif 56#endif
diff --git a/include/asm-x86/mmu_context_64.h b/include/asm-x86/mmu_context_64.h
index ca44c71e7fb3..c7000634ccae 100644
--- a/include/asm-x86/mmu_context_64.h
+++ b/include/asm-x86/mmu_context_64.h
@@ -1,21 +1,7 @@
1#ifndef __X86_64_MMU_CONTEXT_H 1#ifndef __X86_64_MMU_CONTEXT_H
2#define __X86_64_MMU_CONTEXT_H 2#define __X86_64_MMU_CONTEXT_H
3 3
4#include <asm/desc.h>
5#include <asm/atomic.h>
6#include <asm/pgalloc.h>
7#include <asm/pda.h> 4#include <asm/pda.h>
8#include <asm/pgtable.h>
9#include <asm/tlbflush.h>
10#ifndef CONFIG_PARAVIRT
11#include <asm-generic/mm_hooks.h>
12#endif
13
14/*
15 * possibly do the LDT unload here?
16 */
17int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
18void destroy_context(struct mm_struct *mm);
19 5
20static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 6static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
21{ 7{
@@ -65,8 +51,4 @@ do { \
65 asm volatile("movl %0,%%fs"::"r"(0)); \ 51 asm volatile("movl %0,%%fs"::"r"(0)); \
66} while (0) 52} while (0)
67 53
68#define activate_mm(prev, next) \
69 switch_mm((prev), (next), NULL)
70
71
72#endif 54#endif
diff --git a/include/asm-x86/mmzone_32.h b/include/asm-x86/mmzone_32.h
index cb2cad0b65a7..b2298a227567 100644
--- a/include/asm-x86/mmzone_32.h
+++ b/include/asm-x86/mmzone_32.h
@@ -12,11 +12,9 @@
12extern struct pglist_data *node_data[]; 12extern struct pglist_data *node_data[];
13#define NODE_DATA(nid) (node_data[nid]) 13#define NODE_DATA(nid) (node_data[nid])
14 14
15#ifdef CONFIG_X86_NUMAQ 15#include <asm/numaq.h>
16 #include <asm/numaq.h> 16/* summit or generic arch */
17#elif defined(CONFIG_ACPI_SRAT)/* summit or generic arch */ 17#include <asm/srat.h>
18 #include <asm/srat.h>
19#endif
20 18
21extern int get_memcfg_numa_flat(void); 19extern int get_memcfg_numa_flat(void);
22/* 20/*
@@ -26,28 +24,20 @@ extern int get_memcfg_numa_flat(void);
26 */ 24 */
27static inline void get_memcfg_numa(void) 25static inline void get_memcfg_numa(void)
28{ 26{
29#ifdef CONFIG_X86_NUMAQ 27
30 if (get_memcfg_numaq()) 28 if (get_memcfg_numaq())
31 return; 29 return;
32#elif defined(CONFIG_ACPI_SRAT)
33 if (get_memcfg_from_srat()) 30 if (get_memcfg_from_srat())
34 return; 31 return;
35#endif
36
37 get_memcfg_numa_flat(); 32 get_memcfg_numa_flat();
38} 33}
39 34
40extern int early_pfn_to_nid(unsigned long pfn); 35extern int early_pfn_to_nid(unsigned long pfn);
41extern void numa_kva_reserve(void);
42 36
43#else /* !CONFIG_NUMA */ 37#else /* !CONFIG_NUMA */
44 38
45#define get_memcfg_numa get_memcfg_numa_flat 39#define get_memcfg_numa get_memcfg_numa_flat
46#define get_zholes_size(n) (0)
47 40
48static inline void numa_kva_reserve(void)
49{
50}
51#endif /* CONFIG_NUMA */ 41#endif /* CONFIG_NUMA */
52 42
53#ifdef CONFIG_DISCONTIGMEM 43#ifdef CONFIG_DISCONTIGMEM
@@ -55,14 +45,14 @@ static inline void numa_kva_reserve(void)
55/* 45/*
56 * generic node memory support, the following assumptions apply: 46 * generic node memory support, the following assumptions apply:
57 * 47 *
58 * 1) memory comes in 256Mb contigious chunks which are either present or not 48 * 1) memory comes in 64Mb contigious chunks which are either present or not
59 * 2) we will not have more than 64Gb in total 49 * 2) we will not have more than 64Gb in total
60 * 50 *
61 * for now assume that 64Gb is max amount of RAM for whole system 51 * for now assume that 64Gb is max amount of RAM for whole system
62 * 64Gb / 4096bytes/page = 16777216 pages 52 * 64Gb / 4096bytes/page = 16777216 pages
63 */ 53 */
64#define MAX_NR_PAGES 16777216 54#define MAX_NR_PAGES 16777216
65#define MAX_ELEMENTS 256 55#define MAX_ELEMENTS 1024
66#define PAGES_PER_ELEMENT (MAX_NR_PAGES/MAX_ELEMENTS) 56#define PAGES_PER_ELEMENT (MAX_NR_PAGES/MAX_ELEMENTS)
67 57
68extern s8 physnode_map[]; 58extern s8 physnode_map[];
@@ -87,9 +77,6 @@ static inline int pfn_to_nid(unsigned long pfn)
87 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ 77 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
88}) 78})
89 79
90#ifdef CONFIG_X86_NUMAQ /* we have contiguous memory on NUMA-Q */
91#define pfn_valid(pfn) ((pfn) < num_physpages)
92#else
93static inline int pfn_valid(int pfn) 80static inline int pfn_valid(int pfn)
94{ 81{
95 int nid = pfn_to_nid(pfn); 82 int nid = pfn_to_nid(pfn);
@@ -98,7 +85,6 @@ static inline int pfn_valid(int pfn)
98 return (pfn < node_end_pfn(nid)); 85 return (pfn < node_end_pfn(nid));
99 return 0; 86 return 0;
100} 87}
101#endif /* CONFIG_X86_NUMAQ */
102 88
103#endif /* CONFIG_DISCONTIGMEM */ 89#endif /* CONFIG_DISCONTIGMEM */
104 90
diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h
index 57a991b9c053..b6995e567fcc 100644
--- a/include/asm-x86/mpspec.h
+++ b/include/asm-x86/mpspec.h
@@ -13,6 +13,12 @@ extern int apic_version[MAX_APICS];
13extern u8 apicid_2_node[]; 13extern u8 apicid_2_node[];
14extern int pic_mode; 14extern int pic_mode;
15 15
16#ifdef CONFIG_X86_NUMAQ
17extern int mp_bus_id_to_node[MAX_MP_BUSSES];
18extern int mp_bus_id_to_local[MAX_MP_BUSSES];
19extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
20#endif
21
16#define MAX_APICID 256 22#define MAX_APICID 256
17 23
18#else 24#else
@@ -21,26 +27,30 @@ extern int pic_mode;
21/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */ 27/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */
22#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4) 28#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
23 29
30#endif
31
24extern void early_find_smp_config(void); 32extern void early_find_smp_config(void);
25extern void early_get_smp_config(void); 33extern void early_get_smp_config(void);
26 34
27#endif
28
29#if defined(CONFIG_MCA) || defined(CONFIG_EISA) 35#if defined(CONFIG_MCA) || defined(CONFIG_EISA)
30extern int mp_bus_id_to_type[MAX_MP_BUSSES]; 36extern int mp_bus_id_to_type[MAX_MP_BUSSES];
31#endif 37#endif
32 38
33extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); 39extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
34 40
35extern int mp_bus_id_to_pci_bus[MAX_MP_BUSSES];
36
37extern unsigned int boot_cpu_physical_apicid; 41extern unsigned int boot_cpu_physical_apicid;
42extern unsigned int max_physical_apicid;
38extern int smp_found_config; 43extern int smp_found_config;
39extern int mpc_default_type; 44extern int mpc_default_type;
40extern unsigned long mp_lapic_addr; 45extern unsigned long mp_lapic_addr;
41 46
42extern void find_smp_config(void); 47extern void find_smp_config(void);
43extern void get_smp_config(void); 48extern void get_smp_config(void);
49#ifdef CONFIG_X86_MPPARSE
50extern void early_reserve_e820_mpc_new(void);
51#else
52static inline void early_reserve_e820_mpc_new(void) { }
53#endif
44 54
45void __cpuinit generic_processor_info(int apicid, int version); 55void __cpuinit generic_processor_info(int apicid, int version);
46#ifdef CONFIG_ACPI 56#ifdef CONFIG_ACPI
@@ -49,6 +59,17 @@ extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
49 u32 gsi); 59 u32 gsi);
50extern void mp_config_acpi_legacy_irqs(void); 60extern void mp_config_acpi_legacy_irqs(void);
51extern int mp_register_gsi(u32 gsi, int edge_level, int active_high_low); 61extern int mp_register_gsi(u32 gsi, int edge_level, int active_high_low);
62#ifdef CONFIG_X86_IO_APIC
63extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
64 u32 gsi, int triggering, int polarity);
65#else
66static inline int
67mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
68 u32 gsi, int triggering, int polarity)
69{
70 return 0;
71}
72#endif
52#endif /* CONFIG_ACPI */ 73#endif /* CONFIG_ACPI */
53 74
54#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) 75#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)
@@ -101,6 +122,7 @@ typedef struct physid_mask physid_mask_t;
101 __physid_mask; \ 122 __physid_mask; \
102 }) 123 })
103 124
125/* Note: will create very large stack frames if physid_mask_t is big */
104#define physid_mask_of_physid(physid) \ 126#define physid_mask_of_physid(physid) \
105 ({ \ 127 ({ \
106 physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ 128 physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
@@ -108,6 +130,12 @@ typedef struct physid_mask physid_mask_t;
108 __physid_mask; \ 130 __physid_mask; \
109 }) 131 })
110 132
133static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map)
134{
135 physids_clear(*map);
136 physid_set(physid, *map);
137}
138
111#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} } 139#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} }
112#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} } 140#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} }
113 141
diff --git a/include/asm-x86/mpspec_def.h b/include/asm-x86/mpspec_def.h
index dc6ef85e3624..38d1e73b49e4 100644
--- a/include/asm-x86/mpspec_def.h
+++ b/include/asm-x86/mpspec_def.h
@@ -17,10 +17,11 @@
17# define MAX_MPC_ENTRY 1024 17# define MAX_MPC_ENTRY 1024
18# define MAX_APICS 256 18# define MAX_APICS 256
19#else 19#else
20/* 20# if NR_CPUS <= 255
21 * A maximum of 255 APICs with the current APIC ID architecture. 21# define MAX_APICS 255
22 */ 22# else
23# define MAX_APICS 255 23# define MAX_APICS 32768
24# endif
24#endif 25#endif
25 26
26struct intel_mp_floating { 27struct intel_mp_floating {
diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h
index 09413ad39d3c..44bce773012e 100644
--- a/include/asm-x86/msr-index.h
+++ b/include/asm-x86/msr-index.h
@@ -111,7 +111,9 @@
111#define MSR_K8_TOP_MEM2 0xc001001d 111#define MSR_K8_TOP_MEM2 0xc001001d
112#define MSR_K8_SYSCFG 0xc0010010 112#define MSR_K8_SYSCFG 0xc0010010
113#define MSR_K8_HWCR 0xc0010015 113#define MSR_K8_HWCR 0xc0010015
114#define MSR_K8_ENABLE_C1E 0xc0010055 114#define MSR_K8_INT_PENDING_MSG 0xc0010055
115/* C1E active bits in int pending message */
116#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
115#define MSR_K8_TSEG_ADDR 0xc0010112 117#define MSR_K8_TSEG_ADDR 0xc0010112
116#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ 118#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
117#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ 119#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h
index 2b5f2c91db25..ca110ee73f07 100644
--- a/include/asm-x86/msr.h
+++ b/include/asm-x86/msr.h
@@ -66,7 +66,7 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
66static inline void native_write_msr(unsigned int msr, 66static inline void native_write_msr(unsigned int msr,
67 unsigned low, unsigned high) 67 unsigned low, unsigned high)
68{ 68{
69 asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high)); 69 asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
70} 70}
71 71
72static inline int native_write_msr_safe(unsigned int msr, 72static inline int native_write_msr_safe(unsigned int msr,
@@ -81,7 +81,8 @@ static inline int native_write_msr_safe(unsigned int msr,
81 _ASM_EXTABLE(2b, 3b) 81 _ASM_EXTABLE(2b, 3b)
82 : "=a" (err) 82 : "=a" (err)
83 : "c" (msr), "0" (low), "d" (high), 83 : "c" (msr), "0" (low), "d" (high),
84 "i" (-EFAULT)); 84 "i" (-EFAULT)
85 : "memory");
85 return err; 86 return err;
86} 87}
87 88
diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h
index 1e363021e72f..21f8d0202a82 100644
--- a/include/asm-x86/nmi.h
+++ b/include/asm-x86/nmi.h
@@ -15,38 +15,13 @@
15 */ 15 */
16int do_nmi_callback(struct pt_regs *regs, int cpu); 16int do_nmi_callback(struct pt_regs *regs, int cpu);
17 17
18#ifdef CONFIG_PM
19
20/** Replace the PM callback routine for NMI. */
21struct pm_dev *set_nmi_pm_callback(pm_callback callback);
22
23/** Unset the PM callback routine back to the default. */
24void unset_nmi_pm_callback(struct pm_dev *dev);
25
26#else
27
28static inline struct pm_dev *set_nmi_pm_callback(pm_callback callback)
29{
30 return 0;
31}
32
33static inline void unset_nmi_pm_callback(struct pm_dev *dev)
34{
35}
36
37#endif /* CONFIG_PM */
38
39#ifdef CONFIG_X86_64 18#ifdef CONFIG_X86_64
40extern void default_do_nmi(struct pt_regs *); 19extern void default_do_nmi(struct pt_regs *);
41extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
42extern void nmi_watchdog_default(void);
43#else
44#define nmi_watchdog_default() do {} while (0)
45#endif 20#endif
46 21
22extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
47extern int check_nmi_watchdog(void); 23extern int check_nmi_watchdog(void);
48extern int nmi_watchdog_enabled; 24extern int nmi_watchdog_enabled;
49extern int unknown_nmi_panic;
50extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); 25extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
51extern int avail_to_resrv_perfctr_nmi(unsigned int); 26extern int avail_to_resrv_perfctr_nmi(unsigned int);
52extern int reserve_perfctr_nmi(unsigned int); 27extern int reserve_perfctr_nmi(unsigned int);
@@ -62,12 +37,10 @@ extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason);
62 37
63extern atomic_t nmi_active; 38extern atomic_t nmi_active;
64extern unsigned int nmi_watchdog; 39extern unsigned int nmi_watchdog;
65#define NMI_DISABLED -1
66#define NMI_NONE 0 40#define NMI_NONE 0
67#define NMI_IO_APIC 1 41#define NMI_IO_APIC 1
68#define NMI_LOCAL_APIC 2 42#define NMI_LOCAL_APIC 2
69#define NMI_INVALID 3 43#define NMI_INVALID 3
70#define NMI_DEFAULT NMI_DISABLED
71 44
72struct ctl_table; 45struct ctl_table;
73struct file; 46struct file;
@@ -78,6 +51,24 @@ extern int unknown_nmi_panic;
78void __trigger_all_cpu_backtrace(void); 51void __trigger_all_cpu_backtrace(void);
79#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() 52#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
80 53
54static inline void localise_nmi_watchdog(void)
55{
56 if (nmi_watchdog == NMI_IO_APIC)
57 nmi_watchdog = NMI_LOCAL_APIC;
58}
59
60/* check if nmi_watchdog is active (ie was specified at boot) */
61static inline int nmi_watchdog_active(void)
62{
63 /*
64 * actually it should be:
65 * return (nmi_watchdog == NMI_LOCAL_APIC ||
66 * nmi_watchdog == NMI_IO_APIC)
67 * but since they are power of two we could use a
68 * cheaper way --cvg
69 */
70 return nmi_watchdog & 0x3;
71}
81#endif 72#endif
82 73
83void lapic_watchdog_stop(void); 74void lapic_watchdog_stop(void);
diff --git a/include/asm-x86/numa_32.h b/include/asm-x86/numa_32.h
index 03d0f7a9bf02..220d7b7707a0 100644
--- a/include/asm-x86/numa_32.h
+++ b/include/asm-x86/numa_32.h
@@ -2,14 +2,10 @@
2#define _ASM_X86_32_NUMA_H 1 2#define _ASM_X86_32_NUMA_H 1
3 3
4extern int pxm_to_nid(int pxm); 4extern int pxm_to_nid(int pxm);
5extern void numa_remove_cpu(int cpu);
5 6
6#ifdef CONFIG_NUMA 7#ifdef CONFIG_NUMA
7extern void __init remap_numa_kva(void); 8extern void set_highmem_pages_init(void);
8extern void set_highmem_pages_init(int);
9#else
10static inline void remap_numa_kva(void)
11{
12}
13#endif 9#endif
14 10
15#endif /* _ASM_X86_32_NUMA_H */ 11#endif /* _ASM_X86_32_NUMA_H */
diff --git a/include/asm-x86/numa_64.h b/include/asm-x86/numa_64.h
index 22e87c9f6a80..3830094434a9 100644
--- a/include/asm-x86/numa_64.h
+++ b/include/asm-x86/numa_64.h
@@ -14,32 +14,30 @@ extern int compute_hash_shift(struct bootnode *nodes, int numblks,
14 14
15#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) 15#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
16 16
17extern void numa_add_cpu(int cpu);
18extern void numa_init_array(void); 17extern void numa_init_array(void);
19extern int numa_off; 18extern int numa_off;
20 19
21extern void numa_set_node(int cpu, int node);
22extern void srat_reserve_add_area(int nodeid); 20extern void srat_reserve_add_area(int nodeid);
23extern int hotadd_percent; 21extern int hotadd_percent;
24 22
25extern s16 apicid_to_node[MAX_LOCAL_APIC]; 23extern s16 apicid_to_node[MAX_LOCAL_APIC];
26 24
27extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn);
28extern unsigned long numa_free_all_bootmem(void); 25extern unsigned long numa_free_all_bootmem(void);
29extern void setup_node_bootmem(int nodeid, unsigned long start, 26extern void setup_node_bootmem(int nodeid, unsigned long start,
30 unsigned long end); 27 unsigned long end);
31 28
32#ifdef CONFIG_NUMA 29#ifdef CONFIG_NUMA
33extern void __init init_cpu_to_node(void); 30extern void __init init_cpu_to_node(void);
34 31extern void __cpuinit numa_set_node(int cpu, int node);
35static inline void clear_node_cpumask(int cpu) 32extern void __cpuinit numa_clear_node(int cpu);
36{ 33extern void __cpuinit numa_add_cpu(int cpu);
37 clear_bit(cpu, (unsigned long *)&node_to_cpumask_map[cpu_to_node(cpu)]); 34extern void __cpuinit numa_remove_cpu(int cpu);
38}
39
40#else 35#else
41#define init_cpu_to_node() do {} while (0) 36static inline void init_cpu_to_node(void) { }
42#define clear_node_cpumask(cpu) do {} while (0) 37static inline void numa_set_node(int cpu, int node) { }
38static inline void numa_clear_node(int cpu) { }
39static inline void numa_add_cpu(int cpu, int node) { }
40static inline void numa_remove_cpu(int cpu) { }
43#endif 41#endif
44 42
45#endif 43#endif
diff --git a/include/asm-x86/numaq.h b/include/asm-x86/numaq.h
index 94b86c31239a..34b92d581fa3 100644
--- a/include/asm-x86/numaq.h
+++ b/include/asm-x86/numaq.h
@@ -28,6 +28,7 @@
28 28
29#ifdef CONFIG_X86_NUMAQ 29#ifdef CONFIG_X86_NUMAQ
30 30
31extern int found_numaq;
31extern int get_memcfg_numaq(void); 32extern int get_memcfg_numaq(void);
32 33
33/* 34/*
@@ -156,9 +157,12 @@ struct sys_cfg_data {
156 struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */ 157 struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */
157}; 158};
158 159
159static inline unsigned long *get_zholes_size(int nid) 160void numaq_tsc_disable(void);
161
162#else
163static inline int get_memcfg_numaq(void)
160{ 164{
161 return NULL; 165 return 0;
162} 166}
163#endif /* CONFIG_X86_NUMAQ */ 167#endif /* CONFIG_X86_NUMAQ */
164#endif /* NUMAQ_H */ 168#endif /* NUMAQ_H */
diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h
index dc936dddf161..28d7b4533b1a 100644
--- a/include/asm-x86/page.h
+++ b/include/asm-x86/page.h
@@ -51,9 +51,17 @@
51 51
52#ifndef __ASSEMBLY__ 52#ifndef __ASSEMBLY__
53 53
54typedef struct { pgdval_t pgd; } pgd_t;
55typedef struct { pgprotval_t pgprot; } pgprot_t;
56
54extern int page_is_ram(unsigned long pagenr); 57extern int page_is_ram(unsigned long pagenr);
55extern int devmem_is_allowed(unsigned long pagenr); 58extern int devmem_is_allowed(unsigned long pagenr);
59extern void map_devmem(unsigned long pfn, unsigned long size,
60 pgprot_t vma_prot);
61extern void unmap_devmem(unsigned long pfn, unsigned long size,
62 pgprot_t vma_prot);
56 63
64extern unsigned long max_low_pfn_mapped;
57extern unsigned long max_pfn_mapped; 65extern unsigned long max_pfn_mapped;
58 66
59struct page; 67struct page;
@@ -74,9 +82,6 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
74 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) 82 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
75#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 83#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
76 84
77typedef struct { pgdval_t pgd; } pgd_t;
78typedef struct { pgprotval_t pgprot; } pgprot_t;
79
80static inline pgd_t native_make_pgd(pgdval_t val) 85static inline pgd_t native_make_pgd(pgdval_t val)
81{ 86{
82 return (pgd_t) { val }; 87 return (pgd_t) { val };
@@ -160,6 +165,7 @@ static inline pteval_t native_pte_val(pte_t pte)
160#endif 165#endif
161 166
162#define pte_val(x) native_pte_val(x) 167#define pte_val(x) native_pte_val(x)
168#define pte_flags(x) native_pte_val(x)
163#define __pte(x) native_make_pte(x) 169#define __pte(x) native_make_pte(x)
164 170
165#endif /* CONFIG_PARAVIRT */ 171#endif /* CONFIG_PARAVIRT */
diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h
index ccf0ba3c3aba..ab8528793f08 100644
--- a/include/asm-x86/page_32.h
+++ b/include/asm-x86/page_32.h
@@ -13,6 +13,14 @@
13 */ 13 */
14#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) 14#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
15 15
16#ifdef CONFIG_4KSTACKS
17#define THREAD_ORDER 0
18#else
19#define THREAD_ORDER 1
20#endif
21#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
22
23
16#ifdef CONFIG_X86_PAE 24#ifdef CONFIG_X86_PAE
17/* 44=32+12, the limit we can fit into an unsigned long pfn */ 25/* 44=32+12, the limit we can fit into an unsigned long pfn */
18#define __PHYSICAL_MASK_SHIFT 44 26#define __PHYSICAL_MASK_SHIFT 44
@@ -84,6 +92,13 @@ extern int sysctl_legacy_va_layout;
84#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) 92#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
85#define MAXMEM (-__PAGE_OFFSET - __VMALLOC_RESERVE) 93#define MAXMEM (-__PAGE_OFFSET - __VMALLOC_RESERVE)
86 94
95extern void find_low_pfn_range(void);
96extern unsigned long init_memory_mapping(unsigned long start,
97 unsigned long end);
98extern void initmem_init(unsigned long, unsigned long);
99extern void setup_bootmem_allocator(void);
100
101
87#ifdef CONFIG_X86_USE_3DNOW 102#ifdef CONFIG_X86_USE_3DNOW
88#include <asm/mmx.h> 103#include <asm/mmx.h>
89 104
diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h
index 6ea72859c491..c6916c83e6b1 100644
--- a/include/asm-x86/page_64.h
+++ b/include/asm-x86/page_64.h
@@ -26,7 +26,13 @@
26#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) 26#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
27#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) 27#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
28 28
29#define __PAGE_OFFSET _AC(0xffff810000000000, UL) 29/*
30 * Set __PAGE_OFFSET to the most negative possible address +
31 * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
32 * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
33 * what Xen requires.
34 */
35#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
30 36
31#define __PHYSICAL_START CONFIG_PHYSICAL_START 37#define __PHYSICAL_START CONFIG_PHYSICAL_START
32#define __KERNEL_ALIGN 0x200000 38#define __KERNEL_ALIGN 0x200000
@@ -58,7 +64,8 @@
58void clear_page(void *page); 64void clear_page(void *page);
59void copy_page(void *to, void *from); 65void copy_page(void *to, void *from);
60 66
61extern unsigned long end_pfn; 67/* duplicated to the one in bootmem.h */
68extern unsigned long max_pfn;
62extern unsigned long phys_base; 69extern unsigned long phys_base;
63 70
64extern unsigned long __phys_addr(unsigned long); 71extern unsigned long __phys_addr(unsigned long);
@@ -83,10 +90,15 @@ typedef struct { pteval_t pte; } pte_t;
83extern unsigned long init_memory_mapping(unsigned long start, 90extern unsigned long init_memory_mapping(unsigned long start,
84 unsigned long end); 91 unsigned long end);
85 92
93extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
94
95extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
96extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
97
86#endif /* !__ASSEMBLY__ */ 98#endif /* !__ASSEMBLY__ */
87 99
88#ifdef CONFIG_FLATMEM 100#ifdef CONFIG_FLATMEM
89#define pfn_valid(pfn) ((pfn) < end_pfn) 101#define pfn_valid(pfn) ((pfn) < max_pfn)
90#endif 102#endif
91 103
92 104
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index 0f13b945e240..ef5e8ec6a6ab 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -84,7 +84,7 @@ struct pv_time_ops {
84 int (*set_wallclock)(unsigned long); 84 int (*set_wallclock)(unsigned long);
85 85
86 unsigned long long (*sched_clock)(void); 86 unsigned long long (*sched_clock)(void);
87 unsigned long (*get_cpu_khz)(void); 87 unsigned long (*get_tsc_khz)(void);
88}; 88};
89 89
90struct pv_cpu_ops { 90struct pv_cpu_ops {
@@ -115,6 +115,9 @@ struct pv_cpu_ops {
115 void (*set_ldt)(const void *desc, unsigned entries); 115 void (*set_ldt)(const void *desc, unsigned entries);
116 unsigned long (*store_tr)(void); 116 unsigned long (*store_tr)(void);
117 void (*load_tls)(struct thread_struct *t, unsigned int cpu); 117 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
118#ifdef CONFIG_X86_64
119 void (*load_gs_index)(unsigned int idx);
120#endif
118 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, 121 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
119 const void *desc); 122 const void *desc);
120 void (*write_gdt_entry)(struct desc_struct *, 123 void (*write_gdt_entry)(struct desc_struct *,
@@ -141,8 +144,32 @@ struct pv_cpu_ops {
141 u64 (*read_pmc)(int counter); 144 u64 (*read_pmc)(int counter);
142 unsigned long long (*read_tscp)(unsigned int *aux); 145 unsigned long long (*read_tscp)(unsigned int *aux);
143 146
144 /* These two are jmp to, not actually called. */ 147 /*
145 void (*irq_enable_syscall_ret)(void); 148 * Atomically enable interrupts and return to userspace. This
149 * is only ever used to return to 32-bit processes; in a
150 * 64-bit kernel, it's used for 32-on-64 compat processes, but
151 * never native 64-bit processes. (Jump, not call.)
152 */
153 void (*irq_enable_sysexit)(void);
154
155 /*
156 * Switch to usermode gs and return to 64-bit usermode using
157 * sysret. Only used in 64-bit kernels to return to 64-bit
158 * processes. Usermode register state, including %rsp, must
159 * already be restored.
160 */
161 void (*usergs_sysret64)(void);
162
163 /*
164 * Switch to usermode gs and return to 32-bit usermode using
165 * sysret. Used to return to 32-on-64 compat processes.
166 * Other usermode register state, including %esp, must already
167 * be restored.
168 */
169 void (*usergs_sysret32)(void);
170
171 /* Normal iret. Jump to this with the standard iret stack
172 frame set up. */
146 void (*iret)(void); 173 void (*iret)(void);
147 174
148 void (*swapgs)(void); 175 void (*swapgs)(void);
@@ -165,6 +192,10 @@ struct pv_irq_ops {
165 void (*irq_enable)(void); 192 void (*irq_enable)(void);
166 void (*safe_halt)(void); 193 void (*safe_halt)(void);
167 void (*halt)(void); 194 void (*halt)(void);
195
196#ifdef CONFIG_X86_64
197 void (*adjust_exception_frame)(void);
198#endif
168}; 199};
169 200
170struct pv_apic_ops { 201struct pv_apic_ops {
@@ -219,7 +250,14 @@ struct pv_mmu_ops {
219 void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm, 250 void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
220 unsigned long va); 251 unsigned long va);
221 252
222 /* Hooks for allocating/releasing pagetable pages */ 253 /* Hooks for allocating and freeing a pagetable top-level */
254 int (*pgd_alloc)(struct mm_struct *mm);
255 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
256
257 /*
258 * Hooks for allocating/releasing pagetable pages when they're
259 * attached to a pagetable
260 */
223 void (*alloc_pte)(struct mm_struct *mm, u32 pfn); 261 void (*alloc_pte)(struct mm_struct *mm, u32 pfn);
224 void (*alloc_pmd)(struct mm_struct *mm, u32 pfn); 262 void (*alloc_pmd)(struct mm_struct *mm, u32 pfn);
225 void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); 263 void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
@@ -238,7 +276,13 @@ struct pv_mmu_ops {
238 void (*pte_update_defer)(struct mm_struct *mm, 276 void (*pte_update_defer)(struct mm_struct *mm,
239 unsigned long addr, pte_t *ptep); 277 unsigned long addr, pte_t *ptep);
240 278
279 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
280 pte_t *ptep);
281 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
282 pte_t *ptep, pte_t pte);
283
241 pteval_t (*pte_val)(pte_t); 284 pteval_t (*pte_val)(pte_t);
285 pteval_t (*pte_flags)(pte_t);
242 pte_t (*make_pte)(pteval_t pte); 286 pte_t (*make_pte)(pteval_t pte);
243 287
244 pgdval_t (*pgd_val)(pgd_t); 288 pgdval_t (*pgd_val)(pgd_t);
@@ -273,6 +317,13 @@ struct pv_mmu_ops {
273#endif 317#endif
274 318
275 struct pv_lazy_ops lazy_mode; 319 struct pv_lazy_ops lazy_mode;
320
321 /* dom0 ops */
322
323 /* Sometimes the physical address is a pfn, and sometimes its
324 an mfn. We can tell which is which from the index. */
325 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
326 unsigned long phys, pgprot_t flags);
276}; 327};
277 328
278/* This contains all the paravirt structures: we get a convenient 329/* This contains all the paravirt structures: we get a convenient
@@ -439,10 +490,17 @@ int paravirt_disable_iospace(void);
439#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11" 490#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
440#endif 491#endif
441 492
493#ifdef CONFIG_PARAVIRT_DEBUG
494#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
495#else
496#define PVOP_TEST_NULL(op) ((void)op)
497#endif
498
442#define __PVOP_CALL(rettype, op, pre, post, ...) \ 499#define __PVOP_CALL(rettype, op, pre, post, ...) \
443 ({ \ 500 ({ \
444 rettype __ret; \ 501 rettype __ret; \
445 PVOP_CALL_ARGS; \ 502 PVOP_CALL_ARGS; \
503 PVOP_TEST_NULL(op); \
446 /* This is 32-bit specific, but is okay in 64-bit */ \ 504 /* This is 32-bit specific, but is okay in 64-bit */ \
447 /* since this condition will never hold */ \ 505 /* since this condition will never hold */ \
448 if (sizeof(rettype) > sizeof(unsigned long)) { \ 506 if (sizeof(rettype) > sizeof(unsigned long)) { \
@@ -471,6 +529,7 @@ int paravirt_disable_iospace(void);
471#define __PVOP_VCALL(op, pre, post, ...) \ 529#define __PVOP_VCALL(op, pre, post, ...) \
472 ({ \ 530 ({ \
473 PVOP_VCALL_ARGS; \ 531 PVOP_VCALL_ARGS; \
532 PVOP_TEST_NULL(op); \
474 asm volatile(pre \ 533 asm volatile(pre \
475 paravirt_alt(PARAVIRT_CALL) \ 534 paravirt_alt(PARAVIRT_CALL) \
476 post \ 535 post \
@@ -720,7 +779,7 @@ static inline unsigned long long paravirt_sched_clock(void)
720{ 779{
721 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock); 780 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
722} 781}
723#define calculate_cpu_khz() (pv_time_ops.get_cpu_khz()) 782#define calibrate_tsc() (pv_time_ops.get_tsc_khz())
724 783
725static inline unsigned long long paravirt_read_pmc(int counter) 784static inline unsigned long long paravirt_read_pmc(int counter)
726{ 785{
@@ -789,6 +848,13 @@ static inline void load_TLS(struct thread_struct *t, unsigned cpu)
789 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu); 848 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
790} 849}
791 850
851#ifdef CONFIG_X86_64
852static inline void load_gs_index(unsigned int gs)
853{
854 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
855}
856#endif
857
792static inline void write_ldt_entry(struct desc_struct *dt, int entry, 858static inline void write_ldt_entry(struct desc_struct *dt, int entry,
793 const void *desc) 859 const void *desc)
794{ 860{
@@ -912,6 +978,16 @@ static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
912 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va); 978 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
913} 979}
914 980
981static inline int paravirt_pgd_alloc(struct mm_struct *mm)
982{
983 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
984}
985
986static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
987{
988 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
989}
990
915static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn) 991static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn)
916{ 992{
917 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn); 993 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
@@ -996,6 +1072,20 @@ static inline pteval_t pte_val(pte_t pte)
996 return ret; 1072 return ret;
997} 1073}
998 1074
1075static inline pteval_t pte_flags(pte_t pte)
1076{
1077 pteval_t ret;
1078
1079 if (sizeof(pteval_t) > sizeof(long))
1080 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
1081 pte.pte, (u64)pte.pte >> 32);
1082 else
1083 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
1084 pte.pte);
1085
1086 return ret;
1087}
1088
999static inline pgd_t __pgd(pgdval_t val) 1089static inline pgd_t __pgd(pgdval_t val)
1000{ 1090{
1001 pgdval_t ret; 1091 pgdval_t ret;
@@ -1024,6 +1114,29 @@ static inline pgdval_t pgd_val(pgd_t pgd)
1024 return ret; 1114 return ret;
1025} 1115}
1026 1116
1117#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1118static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
1119 pte_t *ptep)
1120{
1121 pteval_t ret;
1122
1123 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
1124 mm, addr, ptep);
1125
1126 return (pte_t) { .pte = ret };
1127}
1128
1129static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
1130 pte_t *ptep, pte_t pte)
1131{
1132 if (sizeof(pteval_t) > sizeof(long))
1133 /* 5 arg words */
1134 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
1135 else
1136 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
1137 mm, addr, ptep, pte.pte);
1138}
1139
1027static inline void set_pte(pte_t *ptep, pte_t pte) 1140static inline void set_pte(pte_t *ptep, pte_t pte)
1028{ 1141{
1029 if (sizeof(pteval_t) > sizeof(long)) 1142 if (sizeof(pteval_t) > sizeof(long))
@@ -1252,6 +1365,12 @@ static inline void arch_flush_lazy_mmu_mode(void)
1252 } 1365 }
1253} 1366}
1254 1367
1368static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1369 unsigned long phys, pgprot_t flags)
1370{
1371 pv_mmu_ops.set_fixmap(idx, phys, flags);
1372}
1373
1255void _paravirt_nop(void); 1374void _paravirt_nop(void);
1256#define paravirt_nop ((void *)_paravirt_nop) 1375#define paravirt_nop ((void *)_paravirt_nop)
1257 1376
@@ -1374,54 +1493,86 @@ static inline unsigned long __raw_local_irq_save(void)
1374#define PV_RESTORE_REGS popq %rdx; popq %rcx; popq %rdi; popq %rax 1493#define PV_RESTORE_REGS popq %rdx; popq %rcx; popq %rdi; popq %rax
1375#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8) 1494#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
1376#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8) 1495#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
1496#define PARA_INDIRECT(addr) *addr(%rip)
1377#else 1497#else
1378#define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx 1498#define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx
1379#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax 1499#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
1380#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) 1500#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
1381#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) 1501#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
1502#define PARA_INDIRECT(addr) *%cs:addr
1382#endif 1503#endif
1383 1504
1384#define INTERRUPT_RETURN \ 1505#define INTERRUPT_RETURN \
1385 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \ 1506 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
1386 jmp *%cs:pv_cpu_ops+PV_CPU_iret) 1507 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
1387 1508
1388#define DISABLE_INTERRUPTS(clobbers) \ 1509#define DISABLE_INTERRUPTS(clobbers) \
1389 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ 1510 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1390 PV_SAVE_REGS; \ 1511 PV_SAVE_REGS; \
1391 call *%cs:pv_irq_ops+PV_IRQ_irq_disable; \ 1512 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
1392 PV_RESTORE_REGS;) \ 1513 PV_RESTORE_REGS;) \
1393 1514
1394#define ENABLE_INTERRUPTS(clobbers) \ 1515#define ENABLE_INTERRUPTS(clobbers) \
1395 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ 1516 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
1396 PV_SAVE_REGS; \ 1517 PV_SAVE_REGS; \
1397 call *%cs:pv_irq_ops+PV_IRQ_irq_enable; \ 1518 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
1398 PV_RESTORE_REGS;) 1519 PV_RESTORE_REGS;)
1399 1520
1400#define ENABLE_INTERRUPTS_SYSCALL_RET \ 1521#define USERGS_SYSRET32 \
1401 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_syscall_ret),\ 1522 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
1402 CLBR_NONE, \ 1523 CLBR_NONE, \
1403 jmp *%cs:pv_cpu_ops+PV_CPU_irq_enable_syscall_ret) 1524 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
1404
1405 1525
1406#ifdef CONFIG_X86_32 1526#ifdef CONFIG_X86_32
1407#define GET_CR0_INTO_EAX \ 1527#define GET_CR0_INTO_EAX \
1408 push %ecx; push %edx; \ 1528 push %ecx; push %edx; \
1409 call *pv_cpu_ops+PV_CPU_read_cr0; \ 1529 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
1410 pop %edx; pop %ecx 1530 pop %edx; pop %ecx
1411#else 1531
1532#define ENABLE_INTERRUPTS_SYSEXIT \
1533 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1534 CLBR_NONE, \
1535 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1536
1537
1538#else /* !CONFIG_X86_32 */
1539
1540/*
1541 * If swapgs is used while the userspace stack is still current,
1542 * there's no way to call a pvop. The PV replacement *must* be
1543 * inlined, or the swapgs instruction must be trapped and emulated.
1544 */
1545#define SWAPGS_UNSAFE_STACK \
1546 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1547 swapgs)
1548
1412#define SWAPGS \ 1549#define SWAPGS \
1413 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ 1550 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1414 PV_SAVE_REGS; \ 1551 PV_SAVE_REGS; \
1415 call *pv_cpu_ops+PV_CPU_swapgs; \ 1552 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
1416 PV_RESTORE_REGS \ 1553 PV_RESTORE_REGS \
1417 ) 1554 )
1418 1555
1419#define GET_CR2_INTO_RCX \ 1556#define GET_CR2_INTO_RCX \
1420 call *pv_mmu_ops+PV_MMU_read_cr2; \ 1557 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1421 movq %rax, %rcx; \ 1558 movq %rax, %rcx; \
1422 xorq %rax, %rax; 1559 xorq %rax, %rax;
1423 1560
1424#endif 1561#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
1562 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1563 CLBR_NONE, \
1564 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1565
1566#define USERGS_SYSRET64 \
1567 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
1568 CLBR_NONE, \
1569 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1570
1571#define ENABLE_INTERRUPTS_SYSEXIT32 \
1572 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1573 CLBR_NONE, \
1574 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1575#endif /* CONFIG_X86_32 */
1425 1576
1426#endif /* __ASSEMBLY__ */ 1577#endif /* __ASSEMBLY__ */
1427#endif /* CONFIG_PARAVIRT */ 1578#endif /* CONFIG_PARAVIRT */
diff --git a/include/asm-x86/pat.h b/include/asm-x86/pat.h
index 88f60cc6a227..7edc47307217 100644
--- a/include/asm-x86/pat.h
+++ b/include/asm-x86/pat.h
@@ -1,14 +1,13 @@
1
2#ifndef _ASM_PAT_H 1#ifndef _ASM_PAT_H
3#define _ASM_PAT_H 1 2#define _ASM_PAT_H
4 3
5#include <linux/types.h> 4#include <linux/types.h>
6 5
7#ifdef CONFIG_X86_PAT 6#ifdef CONFIG_X86_PAT
8extern int pat_wc_enabled; 7extern int pat_enabled;
9extern void validate_pat_support(struct cpuinfo_x86 *c); 8extern void validate_pat_support(struct cpuinfo_x86 *c);
10#else 9#else
11static const int pat_wc_enabled = 0; 10static const int pat_enabled;
12static inline void validate_pat_support(struct cpuinfo_x86 *c) { } 11static inline void validate_pat_support(struct cpuinfo_x86 *c) { }
13#endif 12#endif
14 13
@@ -21,4 +20,3 @@ extern int free_memtype(u64 start, u64 end);
21extern void pat_disable(char *reason); 20extern void pat_disable(char *reason);
22 21
23#endif 22#endif
24
diff --git a/include/asm-x86/pci.h b/include/asm-x86/pci.h
index 30bbde0cb34b..2db14cf17db8 100644
--- a/include/asm-x86/pci.h
+++ b/include/asm-x86/pci.h
@@ -18,6 +18,8 @@ struct pci_sysdata {
18#endif 18#endif
19}; 19};
20 20
21extern int pci_routeirq;
22
21/* scan a bus after allocating a pci_sysdata for it */ 23/* scan a bus after allocating a pci_sysdata for it */
22extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops, 24extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops,
23 int node); 25 int node);
diff --git a/include/asm-x86/pci_32.h b/include/asm-x86/pci_32.h
index 8c4c3a0368e2..a50d46851285 100644
--- a/include/asm-x86/pci_32.h
+++ b/include/asm-x86/pci_32.h
@@ -18,12 +18,14 @@ struct pci_dev;
18#define PCI_DMA_BUS_IS_PHYS (1) 18#define PCI_DMA_BUS_IS_PHYS (1)
19 19
20/* pci_unmap_{page,single} is a nop so... */ 20/* pci_unmap_{page,single} is a nop so... */
21#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) 21#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME[0];
22#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) 22#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) unsigned LEN_NAME[0];
23#define pci_unmap_addr(PTR, ADDR_NAME) (0) 23#define pci_unmap_addr(PTR, ADDR_NAME) sizeof((PTR)->ADDR_NAME)
24#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) 24#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
25#define pci_unmap_len(PTR, LEN_NAME) (0) 25 do { break; } while (pci_unmap_addr(PTR, ADDR_NAME))
26#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) 26#define pci_unmap_len(PTR, LEN_NAME) sizeof((PTR)->LEN_NAME)
27#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
28 do { break; } while (pci_unmap_len(PTR, LEN_NAME))
27 29
28 30
29#endif /* __KERNEL__ */ 31#endif /* __KERNEL__ */
diff --git a/include/asm-x86/pda.h b/include/asm-x86/pda.h
index 101fb9e11954..b34e9a7cc80b 100644
--- a/include/asm-x86/pda.h
+++ b/include/asm-x86/pda.h
@@ -22,6 +22,8 @@ struct x8664_pda {
22 offset 40!!! */ 22 offset 40!!! */
23#endif 23#endif
24 char *irqstackptr; 24 char *irqstackptr;
25 short nodenumber; /* number of current node (32k max) */
26 short in_bootmem; /* pda lives in bootmem */
25 unsigned int __softirq_pending; 27 unsigned int __softirq_pending;
26 unsigned int __nmi_count; /* number of NMI on this CPUs */ 28 unsigned int __nmi_count; /* number of NMI on this CPUs */
27 short mmu_state; 29 short mmu_state;
@@ -37,8 +39,7 @@ struct x8664_pda {
37 unsigned irq_spurious_count; 39 unsigned irq_spurious_count;
38} ____cacheline_aligned_in_smp; 40} ____cacheline_aligned_in_smp;
39 41
40extern struct x8664_pda *_cpu_pda[]; 42extern struct x8664_pda **_cpu_pda;
41extern struct x8664_pda boot_cpu_pda[];
42extern void pda_init(int); 43extern void pda_init(int);
43 44
44#define cpu_pda(i) (_cpu_pda[i]) 45#define cpu_pda(i) (_cpu_pda[i])
diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h
index 736fc3bb8e1e..912a3a17b9db 100644
--- a/include/asm-x86/percpu.h
+++ b/include/asm-x86/percpu.h
@@ -143,4 +143,50 @@ do { \
143#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val) 143#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val)
144#endif /* !__ASSEMBLY__ */ 144#endif /* !__ASSEMBLY__ */
145#endif /* !CONFIG_X86_64 */ 145#endif /* !CONFIG_X86_64 */
146
147#ifdef CONFIG_SMP
148
149/*
150 * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
151 * variables that are initialized and accessed before there are per_cpu
152 * areas allocated.
153 */
154
155#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
156 DEFINE_PER_CPU(_type, _name) = _initvalue; \
157 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
158 { [0 ... NR_CPUS-1] = _initvalue }; \
159 __typeof__(_type) *_name##_early_ptr = _name##_early_map
160
161#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
162 EXPORT_PER_CPU_SYMBOL(_name)
163
164#define DECLARE_EARLY_PER_CPU(_type, _name) \
165 DECLARE_PER_CPU(_type, _name); \
166 extern __typeof__(_type) *_name##_early_ptr; \
167 extern __typeof__(_type) _name##_early_map[]
168
169#define early_per_cpu_ptr(_name) (_name##_early_ptr)
170#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
171#define early_per_cpu(_name, _cpu) \
172 (early_per_cpu_ptr(_name) ? \
173 early_per_cpu_ptr(_name)[_cpu] : \
174 per_cpu(_name, _cpu))
175
176#else /* !CONFIG_SMP */
177#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
178 DEFINE_PER_CPU(_type, _name) = _initvalue
179
180#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
181 EXPORT_PER_CPU_SYMBOL(_name)
182
183#define DECLARE_EARLY_PER_CPU(_type, _name) \
184 DECLARE_PER_CPU(_type, _name)
185
186#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
187#define early_per_cpu_ptr(_name) NULL
188/* no early_per_cpu_map() */
189
190#endif /* !CONFIG_SMP */
191
146#endif /* _ASM_X86_PERCPU_H_ */ 192#endif /* _ASM_X86_PERCPU_H_ */
diff --git a/include/asm-x86/pgalloc.h b/include/asm-x86/pgalloc.h
index 91e4641f3f31..d63ea431cb3b 100644
--- a/include/asm-x86/pgalloc.h
+++ b/include/asm-x86/pgalloc.h
@@ -5,9 +5,13 @@
5#include <linux/mm.h> /* for struct page */ 5#include <linux/mm.h> /* for struct page */
6#include <linux/pagemap.h> 6#include <linux/pagemap.h>
7 7
8static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
9
8#ifdef CONFIG_PARAVIRT 10#ifdef CONFIG_PARAVIRT
9#include <asm/paravirt.h> 11#include <asm/paravirt.h>
10#else 12#else
13#define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm)
14static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {}
11static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {} 15static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {}
12static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {} 16static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {}
13static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn, 17static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index 97c271b2910b..49cbd76b9547 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -20,30 +20,25 @@
20#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ 20#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
21#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ 21#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
22 22
23/* 23#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
24 * Note: we use _AC(1, L) instead of _AC(1, UL) so that we get a 24#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
25 * sign-extended value on 32-bit with all 1's in the upper word, 25#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
26 * which preserves the upper pte values on 64-bit ptes: 26#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
27 */ 27#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
28#define _PAGE_PRESENT (_AC(1, L)<<_PAGE_BIT_PRESENT) 28#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
29#define _PAGE_RW (_AC(1, L)<<_PAGE_BIT_RW) 29#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
30#define _PAGE_USER (_AC(1, L)<<_PAGE_BIT_USER) 30#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
31#define _PAGE_PWT (_AC(1, L)<<_PAGE_BIT_PWT) 31#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
32#define _PAGE_PCD (_AC(1, L)<<_PAGE_BIT_PCD) 32#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
33#define _PAGE_ACCESSED (_AC(1, L)<<_PAGE_BIT_ACCESSED) 33#define _PAGE_UNUSED2 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED2)
34#define _PAGE_DIRTY (_AC(1, L)<<_PAGE_BIT_DIRTY) 34#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
35#define _PAGE_PSE (_AC(1, L)<<_PAGE_BIT_PSE) /* 2MB page */ 35#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
36#define _PAGE_GLOBAL (_AC(1, L)<<_PAGE_BIT_GLOBAL) /* Global TLB entry */ 36#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
37#define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1)
38#define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2)
39#define _PAGE_UNUSED3 (_AC(1, L)<<_PAGE_BIT_UNUSED3)
40#define _PAGE_PAT (_AC(1, L)<<_PAGE_BIT_PAT)
41#define _PAGE_PAT_LARGE (_AC(1, L)<<_PAGE_BIT_PAT_LARGE)
42 37
43#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 38#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
44#define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX) 39#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
45#else 40#else
46#define _PAGE_NX 0 41#define _PAGE_NX (_AT(pteval_t, 0))
47#endif 42#endif
48 43
49/* If _PAGE_PRESENT is clear, we use these: */ 44/* If _PAGE_PRESENT is clear, we use these: */
@@ -83,19 +78,9 @@
83#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ 78#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
84 _PAGE_ACCESSED) 79 _PAGE_ACCESSED)
85 80
86#ifdef CONFIG_X86_32
87#define _PAGE_KERNEL_EXEC \
88 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
89#define _PAGE_KERNEL (_PAGE_KERNEL_EXEC | _PAGE_NX)
90
91#ifndef __ASSEMBLY__
92extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
93#endif /* __ASSEMBLY__ */
94#else
95#define __PAGE_KERNEL_EXEC \ 81#define __PAGE_KERNEL_EXEC \
96 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) 82 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
97#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) 83#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
98#endif
99 84
100#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) 85#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
101#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) 86#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
@@ -106,26 +91,22 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
106#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) 91#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
107#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT) 92#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
108#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) 93#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
94#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
109#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) 95#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
110 96
111#ifdef CONFIG_X86_32 97#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
112# define MAKE_GLOBAL(x) __pgprot((x)) 98#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
113#else 99#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
114# define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL) 100#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
115#endif 101#define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC)
116 102#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
117#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL) 103#define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS)
118#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO) 104#define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
119#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC) 105#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
120#define PAGE_KERNEL_RX MAKE_GLOBAL(__PAGE_KERNEL_RX) 106#define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
121#define PAGE_KERNEL_WC MAKE_GLOBAL(__PAGE_KERNEL_WC) 107#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
122#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE) 108#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
123#define PAGE_KERNEL_UC_MINUS MAKE_GLOBAL(__PAGE_KERNEL_UC_MINUS) 109#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
124#define PAGE_KERNEL_EXEC_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_EXEC_NOCACHE)
125#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
126#define PAGE_KERNEL_LARGE_EXEC MAKE_GLOBAL(__PAGE_KERNEL_LARGE_EXEC)
127#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
128#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
129 110
130/* xwr */ 111/* xwr */
131#define __P000 PAGE_NONE 112#define __P000 PAGE_NONE
@@ -164,37 +145,37 @@ extern struct list_head pgd_list;
164 */ 145 */
165static inline int pte_dirty(pte_t pte) 146static inline int pte_dirty(pte_t pte)
166{ 147{
167 return pte_val(pte) & _PAGE_DIRTY; 148 return pte_flags(pte) & _PAGE_DIRTY;
168} 149}
169 150
170static inline int pte_young(pte_t pte) 151static inline int pte_young(pte_t pte)
171{ 152{
172 return pte_val(pte) & _PAGE_ACCESSED; 153 return pte_flags(pte) & _PAGE_ACCESSED;
173} 154}
174 155
175static inline int pte_write(pte_t pte) 156static inline int pte_write(pte_t pte)
176{ 157{
177 return pte_val(pte) & _PAGE_RW; 158 return pte_flags(pte) & _PAGE_RW;
178} 159}
179 160
180static inline int pte_file(pte_t pte) 161static inline int pte_file(pte_t pte)
181{ 162{
182 return pte_val(pte) & _PAGE_FILE; 163 return pte_flags(pte) & _PAGE_FILE;
183} 164}
184 165
185static inline int pte_huge(pte_t pte) 166static inline int pte_huge(pte_t pte)
186{ 167{
187 return pte_val(pte) & _PAGE_PSE; 168 return pte_flags(pte) & _PAGE_PSE;
188} 169}
189 170
190static inline int pte_global(pte_t pte) 171static inline int pte_global(pte_t pte)
191{ 172{
192 return pte_val(pte) & _PAGE_GLOBAL; 173 return pte_flags(pte) & _PAGE_GLOBAL;
193} 174}
194 175
195static inline int pte_exec(pte_t pte) 176static inline int pte_exec(pte_t pte)
196{ 177{
197 return !(pte_val(pte) & _PAGE_NX); 178 return !(pte_flags(pte) & _PAGE_NX);
198} 179}
199 180
200static inline int pte_special(pte_t pte) 181static inline int pte_special(pte_t pte)
@@ -210,22 +191,22 @@ static inline int pmd_large(pmd_t pte)
210 191
211static inline pte_t pte_mkclean(pte_t pte) 192static inline pte_t pte_mkclean(pte_t pte)
212{ 193{
213 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); 194 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
214} 195}
215 196
216static inline pte_t pte_mkold(pte_t pte) 197static inline pte_t pte_mkold(pte_t pte)
217{ 198{
218 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); 199 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
219} 200}
220 201
221static inline pte_t pte_wrprotect(pte_t pte) 202static inline pte_t pte_wrprotect(pte_t pte)
222{ 203{
223 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); 204 return __pte(pte_val(pte) & ~_PAGE_RW);
224} 205}
225 206
226static inline pte_t pte_mkexec(pte_t pte) 207static inline pte_t pte_mkexec(pte_t pte)
227{ 208{
228 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); 209 return __pte(pte_val(pte) & ~_PAGE_NX);
229} 210}
230 211
231static inline pte_t pte_mkdirty(pte_t pte) 212static inline pte_t pte_mkdirty(pte_t pte)
@@ -250,7 +231,7 @@ static inline pte_t pte_mkhuge(pte_t pte)
250 231
251static inline pte_t pte_clrhuge(pte_t pte) 232static inline pte_t pte_clrhuge(pte_t pte)
252{ 233{
253 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); 234 return __pte(pte_val(pte) & ~_PAGE_PSE);
254} 235}
255 236
256static inline pte_t pte_mkglobal(pte_t pte) 237static inline pte_t pte_mkglobal(pte_t pte)
@@ -260,7 +241,7 @@ static inline pte_t pte_mkglobal(pte_t pte)
260 241
261static inline pte_t pte_clrglobal(pte_t pte) 242static inline pte_t pte_clrglobal(pte_t pte)
262{ 243{
263 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); 244 return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
264} 245}
265 246
266static inline pte_t pte_mkspecial(pte_t pte) 247static inline pte_t pte_mkspecial(pte_t pte)
@@ -305,7 +286,7 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
305 return __pgprot(preservebits | addbits); 286 return __pgprot(preservebits | addbits);
306} 287}
307 288
308#define pte_pgprot(x) __pgprot(pte_val(x) & ~PTE_MASK) 289#define pte_pgprot(x) __pgprot(pte_flags(x) & ~PTE_MASK)
309 290
310#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) 291#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
311 292
@@ -318,6 +299,9 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
318 unsigned long size, pgprot_t *vma_prot); 299 unsigned long size, pgprot_t *vma_prot);
319#endif 300#endif
320 301
302/* Install a pte for a particular vaddr in kernel space. */
303void set_pte_vaddr(unsigned long vaddr, pte_t pte);
304
321#ifdef CONFIG_PARAVIRT 305#ifdef CONFIG_PARAVIRT
322#include <asm/paravirt.h> 306#include <asm/paravirt.h>
323#else /* !CONFIG_PARAVIRT */ 307#else /* !CONFIG_PARAVIRT */
@@ -359,6 +343,26 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
359# include "pgtable_64.h" 343# include "pgtable_64.h"
360#endif 344#endif
361 345
346/*
347 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
348 *
349 * this macro returns the index of the entry in the pgd page which would
350 * control the given virtual address
351 */
352#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
353
354/*
355 * pgd_offset() returns a (pgd_t *)
356 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
357 */
358#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
359/*
360 * a shortcut which implies the use of the kernel's pgd, instead
361 * of a process's
362 */
363#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
364
365
362#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) 366#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
363#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) 367#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
364 368
@@ -369,8 +373,15 @@ enum {
369 PG_LEVEL_4K, 373 PG_LEVEL_4K,
370 PG_LEVEL_2M, 374 PG_LEVEL_2M,
371 PG_LEVEL_1G, 375 PG_LEVEL_1G,
376 PG_LEVEL_NUM
372}; 377};
373 378
379#ifdef CONFIG_PROC_FS
380extern void update_page_count(int level, unsigned long pages);
381#else
382static inline void update_page_count(int level, unsigned long pages) { }
383#endif
384
374/* 385/*
375 * Helper function that returns the kernel pagetable entry controlling 386 * Helper function that returns the kernel pagetable entry controlling
376 * the virtual address 'address'. NULL means no pagetable entry present. 387 * the virtual address 'address'. NULL means no pagetable entry present.
@@ -420,6 +431,8 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
420 * race with other CPU's that might be updating the dirty 431 * race with other CPU's that might be updating the dirty
421 * bit at the same time. 432 * bit at the same time.
422 */ 433 */
434struct vm_area_struct;
435
423#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 436#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
424extern int ptep_set_access_flags(struct vm_area_struct *vma, 437extern int ptep_set_access_flags(struct vm_area_struct *vma,
425 unsigned long address, pte_t *ptep, 438 unsigned long address, pte_t *ptep,
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h
index 32ca03109a4c..ec871c420d7e 100644
--- a/include/asm-x86/pgtable_32.h
+++ b/include/asm-x86/pgtable_32.h
@@ -113,26 +113,6 @@ extern unsigned long pg0[];
113 */ 113 */
114#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 114#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
115 115
116/*
117 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
118 *
119 * this macro returns the index of the entry in the pgd page which would
120 * control the given virtual address
121 */
122#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
123#define pgd_index_k(addr) pgd_index((addr))
124
125/*
126 * pgd_offset() returns a (pgd_t *)
127 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
128 */
129#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
130
131/*
132 * a shortcut which implies the use of the kernel's pgd, instead
133 * of a process's
134 */
135#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
136 116
137static inline int pud_large(pud_t pud) { return 0; } 117static inline int pud_large(pud_t pud) { return 0; }
138 118
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h
index 1cc50d22d735..fa7208b483ca 100644
--- a/include/asm-x86/pgtable_64.h
+++ b/include/asm-x86/pgtable_64.h
@@ -70,6 +70,9 @@ extern void paging_init(void);
70 70
71struct mm_struct; 71struct mm_struct;
72 72
73void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
74
75
73static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, 76static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
74 pte_t *ptep) 77 pte_t *ptep)
75{ 78{
@@ -190,12 +193,9 @@ static inline int pmd_bad(pmd_t pmd)
190#define pgd_page_vaddr(pgd) \ 193#define pgd_page_vaddr(pgd) \
191 ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_MASK)) 194 ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_MASK))
192#define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT)) 195#define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT))
193#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
194#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
195#define pgd_offset_k(address) (init_level4_pgt + pgd_index((address)))
196#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT) 196#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
197static inline int pgd_large(pgd_t pgd) { return 0; } 197static inline int pgd_large(pgd_t pgd) { return 0; }
198#define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE }) 198#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
199 199
200/* PUD - Level3 access */ 200/* PUD - Level3 access */
201/* to find an entry in a page-table-directory. */ 201/* to find an entry in a page-table-directory. */
diff --git a/include/asm-x86/processor-flags.h b/include/asm-x86/processor-flags.h
index 199cab107d85..092b39b3a7e6 100644
--- a/include/asm-x86/processor-flags.h
+++ b/include/asm-x86/processor-flags.h
@@ -88,4 +88,10 @@
88#define CX86_ARR_BASE 0xc4 88#define CX86_ARR_BASE 0xc4
89#define CX86_RCR_BASE 0xdc 89#define CX86_RCR_BASE 0xdc
90 90
91#ifdef CONFIG_VM86
92#define X86_VM_MASK X86_EFLAGS_VM
93#else
94#define X86_VM_MASK 0 /* No VM86 support */
95#endif
96
91#endif /* __ASM_I386_PROCESSOR_FLAGS_H */ 97#endif /* __ASM_I386_PROCESSOR_FLAGS_H */
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 559105220a47..7f7382704592 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -153,7 +153,7 @@ static inline int hlt_works(int cpu)
153 153
154extern void cpu_detect(struct cpuinfo_x86 *c); 154extern void cpu_detect(struct cpuinfo_x86 *c);
155 155
156extern void identify_cpu(struct cpuinfo_x86 *); 156extern void early_cpu_init(void);
157extern void identify_boot_cpu(void); 157extern void identify_boot_cpu(void);
158extern void identify_secondary_cpu(struct cpuinfo_x86 *); 158extern void identify_secondary_cpu(struct cpuinfo_x86 *);
159extern void print_cpu_info(struct cpuinfo_x86 *); 159extern void print_cpu_info(struct cpuinfo_x86 *);
@@ -263,15 +263,11 @@ struct tss_struct {
263 struct thread_struct *io_bitmap_owner; 263 struct thread_struct *io_bitmap_owner;
264 264
265 /* 265 /*
266 * Pad the TSS to be cacheline-aligned (size is 0x100):
267 */
268 unsigned long __cacheline_filler[35];
269 /*
270 * .. and then another 0x100 bytes for the emergency kernel stack: 266 * .. and then another 0x100 bytes for the emergency kernel stack:
271 */ 267 */
272 unsigned long stack[64]; 268 unsigned long stack[64];
273 269
274} __attribute__((packed)); 270} ____cacheline_aligned;
275 271
276DECLARE_PER_CPU(struct tss_struct, init_tss); 272DECLARE_PER_CPU(struct tss_struct, init_tss);
277 273
@@ -535,7 +531,6 @@ static inline void load_sp0(struct tss_struct *tss,
535} 531}
536 532
537#define set_iopl_mask native_set_iopl_mask 533#define set_iopl_mask native_set_iopl_mask
538#define SWAPGS swapgs
539#endif /* CONFIG_PARAVIRT */ 534#endif /* CONFIG_PARAVIRT */
540 535
541/* 536/*
diff --git a/include/asm-x86/proto.h b/include/asm-x86/proto.h
index 6c8b41b03f6d..3dd458c385c0 100644
--- a/include/asm-x86/proto.h
+++ b/include/asm-x86/proto.h
@@ -14,8 +14,6 @@ extern void ia32_syscall(void);
14extern void ia32_cstar_target(void); 14extern void ia32_cstar_target(void);
15extern void ia32_sysenter_target(void); 15extern void ia32_sysenter_target(void);
16 16
17extern void reserve_bootmem_generic(unsigned long phys, unsigned len);
18
19extern void syscall32_cpu_init(void); 17extern void syscall32_cpu_init(void);
20 18
21extern void check_efer(void); 19extern void check_efer(void);
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h
index 9f922b0b95d6..8a71db803da6 100644
--- a/include/asm-x86/ptrace.h
+++ b/include/asm-x86/ptrace.h
@@ -3,7 +3,12 @@
3 3
4#include <linux/compiler.h> /* For __user */ 4#include <linux/compiler.h> /* For __user */
5#include <asm/ptrace-abi.h> 5#include <asm/ptrace-abi.h>
6#include <asm/processor-flags.h>
6 7
8#ifdef __KERNEL__
9#include <asm/ds.h> /* the DS BTS struct is used for ptrace too */
10#include <asm/segment.h>
11#endif
7 12
8#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
9 14
@@ -55,9 +60,6 @@ struct pt_regs {
55 unsigned long ss; 60 unsigned long ss;
56}; 61};
57 62
58#include <asm/vm86.h>
59#include <asm/segment.h>
60
61#endif /* __KERNEL__ */ 63#endif /* __KERNEL__ */
62 64
63#else /* __i386__ */ 65#else /* __i386__ */
diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h
index e63741f19392..206f355786dc 100644
--- a/include/asm-x86/reboot.h
+++ b/include/asm-x86/reboot.h
@@ -14,8 +14,8 @@ struct machine_ops {
14 14
15extern struct machine_ops machine_ops; 15extern struct machine_ops machine_ops;
16 16
17void machine_real_restart(unsigned char *code, int length);
18void native_machine_crash_shutdown(struct pt_regs *regs); 17void native_machine_crash_shutdown(struct pt_regs *regs);
19void native_machine_shutdown(void); 18void native_machine_shutdown(void);
19void machine_real_restart(const unsigned char *code, int length);
20 20
21#endif /* _ASM_REBOOT_H */ 21#endif /* _ASM_REBOOT_H */
diff --git a/include/asm-x86/required-features.h b/include/asm-x86/required-features.h
index 7400d3ad75c6..adec887dd7cd 100644
--- a/include/asm-x86/required-features.h
+++ b/include/asm-x86/required-features.h
@@ -19,9 +19,13 @@
19 19
20#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) 20#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
21# define NEED_PAE (1<<(X86_FEATURE_PAE & 31)) 21# define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
22# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
23#else 22#else
24# define NEED_PAE 0 23# define NEED_PAE 0
24#endif
25
26#ifdef CONFIG_X86_CMPXCHG64
27# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
28#else
25# define NEED_CX8 0 29# define NEED_CX8 0
26#endif 30#endif
27 31
@@ -38,7 +42,7 @@
38#endif 42#endif
39 43
40#ifdef CONFIG_X86_64 44#ifdef CONFIG_X86_64
41#define NEED_PSE (1<<(X86_FEATURE_PSE & 31)) 45#define NEED_PSE 0
42#define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) 46#define NEED_MSR (1<<(X86_FEATURE_MSR & 31))
43#define NEED_PGE (1<<(X86_FEATURE_PGE & 31)) 47#define NEED_PGE (1<<(X86_FEATURE_PGE & 31))
44#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31)) 48#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31))
diff --git a/include/asm-x86/resume-trace.h b/include/asm-x86/resume-trace.h
index 2557514d7ef6..8d9f0b41ee86 100644
--- a/include/asm-x86/resume-trace.h
+++ b/include/asm-x86/resume-trace.h
@@ -6,7 +6,7 @@
6#define TRACE_RESUME(user) \ 6#define TRACE_RESUME(user) \
7do { \ 7do { \
8 if (pm_trace_enabled) { \ 8 if (pm_trace_enabled) { \
9 void *tracedata; \ 9 const void *tracedata; \
10 asm volatile(_ASM_MOV_UL " $1f,%0\n" \ 10 asm volatile(_ASM_MOV_UL " $1f,%0\n" \
11 ".section .tracedata,\"a\"\n" \ 11 ".section .tracedata,\"a\"\n" \
12 "1:\t.word %c1\n\t" \ 12 "1:\t.word %c1\n\t" \
diff --git a/include/asm-x86/seccomp_32.h b/include/asm-x86/seccomp_32.h
index 18da19e89bff..36e71c5f306f 100644
--- a/include/asm-x86/seccomp_32.h
+++ b/include/asm-x86/seccomp_32.h
@@ -1,4 +1,5 @@
1#ifndef _ASM_SECCOMP_H 1#ifndef _ASM_SECCOMP_H
2#define _ASM_SECCOMP_H
2 3
3#include <linux/thread_info.h> 4#include <linux/thread_info.h>
4 5
diff --git a/include/asm-x86/seccomp_64.h b/include/asm-x86/seccomp_64.h
index 553af65a2287..76cfe69aa63c 100644
--- a/include/asm-x86/seccomp_64.h
+++ b/include/asm-x86/seccomp_64.h
@@ -1,4 +1,5 @@
1#ifndef _ASM_SECCOMP_H 1#ifndef _ASM_SECCOMP_H
2#define _ASM_SECCOMP_H
2 3
3#include <linux/thread_info.h> 4#include <linux/thread_info.h>
4 5
diff --git a/include/asm-x86/segment.h b/include/asm-x86/segment.h
index ed5131dd7d92..dfc8601c0892 100644
--- a/include/asm-x86/segment.h
+++ b/include/asm-x86/segment.h
@@ -61,18 +61,14 @@
61#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) 61#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
62 62
63#define GDT_ENTRY_DEFAULT_USER_CS 14 63#define GDT_ENTRY_DEFAULT_USER_CS 14
64#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3)
65 64
66#define GDT_ENTRY_DEFAULT_USER_DS 15 65#define GDT_ENTRY_DEFAULT_USER_DS 15
67#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3)
68 66
69#define GDT_ENTRY_KERNEL_BASE 12 67#define GDT_ENTRY_KERNEL_BASE 12
70 68
71#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0) 69#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
72#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
73 70
74#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1) 71#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
75#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
76 72
77#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4) 73#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
78#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5) 74#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5)
@@ -139,10 +135,11 @@
139#else 135#else
140#include <asm/cache.h> 136#include <asm/cache.h>
141 137
142#define __KERNEL_CS 0x10 138#define GDT_ENTRY_KERNEL32_CS 1
143#define __KERNEL_DS 0x18 139#define GDT_ENTRY_KERNEL_CS 2
140#define GDT_ENTRY_KERNEL_DS 3
144 141
145#define __KERNEL32_CS 0x08 142#define __KERNEL32_CS (GDT_ENTRY_KERNEL32_CS * 8)
146 143
147/* 144/*
148 * we cannot use the same code segment descriptor for user and kernel 145 * we cannot use the same code segment descriptor for user and kernel
@@ -150,10 +147,10 @@
150 * The segment offset needs to contain a RPL. Grr. -AK 147 * The segment offset needs to contain a RPL. Grr. -AK
151 * GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets) 148 * GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets)
152 */ 149 */
153 150#define GDT_ENTRY_DEFAULT_USER32_CS 4
154#define __USER32_CS 0x23 /* 4*8+3 */ 151#define GDT_ENTRY_DEFAULT_USER_DS 5
155#define __USER_DS 0x2b /* 5*8+3 */ 152#define GDT_ENTRY_DEFAULT_USER_CS 6
156#define __USER_CS 0x33 /* 6*8+3 */ 153#define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
157#define __USER32_DS __USER_DS 154#define __USER32_DS __USER_DS
158 155
159#define GDT_ENTRY_TSS 8 /* needs two entries */ 156#define GDT_ENTRY_TSS 8 /* needs two entries */
@@ -175,6 +172,10 @@
175 172
176#endif 173#endif
177 174
175#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
176#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
177#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
178#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
178#ifndef CONFIG_PARAVIRT 179#ifndef CONFIG_PARAVIRT
179#define get_kernel_rpl() 0 180#define get_kernel_rpl() 0
180#endif 181#endif
diff --git a/include/asm-x86/setup.h b/include/asm-x86/setup.h
index fa6763af8d26..90ab2225e71b 100644
--- a/include/asm-x86/setup.h
+++ b/include/asm-x86/setup.h
@@ -8,7 +8,25 @@
8/* Interrupt control for vSMPowered x86_64 systems */ 8/* Interrupt control for vSMPowered x86_64 systems */
9void vsmp_init(void); 9void vsmp_init(void);
10 10
11char *machine_specific_memory_setup(void); 11#ifdef CONFIG_X86_VISWS
12extern void visws_early_detect(void);
13extern int is_visws_box(void);
14#else
15static inline void visws_early_detect(void) { }
16static inline int is_visws_box(void) { return 0; }
17#endif
18
19/*
20 * Any setup quirks to be performed?
21 */
22extern int (*arch_time_init_quirk)(void);
23extern int (*arch_pre_intr_init_quirk)(void);
24extern int (*arch_intr_init_quirk)(void);
25extern int (*arch_trap_init_quirk)(void);
26extern char * (*arch_memory_setup_quirk)(void);
27extern int (*mach_get_smp_config_quirk)(unsigned int early);
28extern int (*mach_find_smp_config_quirk)(unsigned int reserve);
29
12#ifndef CONFIG_PARAVIRT 30#ifndef CONFIG_PARAVIRT
13#define paravirt_post_allocator_init() do {} while (0) 31#define paravirt_post_allocator_init() do {} while (0)
14#endif 32#endif
@@ -43,26 +61,23 @@ char *machine_specific_memory_setup(void);
43 */ 61 */
44extern struct boot_params boot_params; 62extern struct boot_params boot_params;
45 63
46#ifdef __i386__
47/* 64/*
48 * Do NOT EVER look at the BIOS memory size location. 65 * Do NOT EVER look at the BIOS memory size location.
49 * It does not work on many machines. 66 * It does not work on many machines.
50 */ 67 */
51#define LOWMEMSIZE() (0x9f000) 68#define LOWMEMSIZE() (0x9f000)
52 69
53struct e820entry; 70#ifdef __i386__
54
55char * __init machine_specific_memory_setup(void);
56char *memory_setup(void);
57 71
58int __init copy_e820_map(struct e820entry *biosmap, int nr_map); 72void __init i386_start_kernel(void);
59int __init sanitize_e820_map(struct e820entry *biosmap, char *pnr_map); 73extern void probe_roms(void);
60void __init add_memory_region(unsigned long long start,
61 unsigned long long size, int type);
62 74
75extern unsigned long init_pg_tables_start;
63extern unsigned long init_pg_tables_end; 76extern unsigned long init_pg_tables_end;
64 77
65 78#else
79void __init x86_64_start_kernel(char *real_mode);
80void __init x86_64_start_reservations(char *real_mode_data);
66 81
67#endif /* __i386__ */ 82#endif /* __i386__ */
68#endif /* _SETUP */ 83#endif /* _SETUP */
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h
index 1ebaa5cd3112..c2784b3e0b77 100644
--- a/include/asm-x86/smp.h
+++ b/include/asm-x86/smp.h
@@ -29,21 +29,12 @@ extern int smp_num_siblings;
29extern unsigned int num_processors; 29extern unsigned int num_processors;
30extern cpumask_t cpu_initialized; 30extern cpumask_t cpu_initialized;
31 31
32#ifdef CONFIG_SMP
33extern u16 x86_cpu_to_apicid_init[];
34extern u16 x86_bios_cpu_apicid_init[];
35extern void *x86_cpu_to_apicid_early_ptr;
36extern void *x86_bios_cpu_apicid_early_ptr;
37#else
38#define x86_cpu_to_apicid_early_ptr NULL
39#define x86_bios_cpu_apicid_early_ptr NULL
40#endif
41
42DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 32DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
43DECLARE_PER_CPU(cpumask_t, cpu_core_map); 33DECLARE_PER_CPU(cpumask_t, cpu_core_map);
44DECLARE_PER_CPU(u16, cpu_llc_id); 34DECLARE_PER_CPU(u16, cpu_llc_id);
45DECLARE_PER_CPU(u16, x86_cpu_to_apicid); 35
46DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); 36DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
37DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
47 38
48/* Static state in head.S used to set up a CPU */ 39/* Static state in head.S used to set up a CPU */
49extern struct { 40extern struct {
@@ -59,9 +50,9 @@ struct smp_ops {
59 50
60 void (*smp_send_stop)(void); 51 void (*smp_send_stop)(void);
61 void (*smp_send_reschedule)(int cpu); 52 void (*smp_send_reschedule)(int cpu);
62 int (*smp_call_function_mask)(cpumask_t mask, 53
63 void (*func)(void *info), void *info, 54 void (*send_call_func_ipi)(cpumask_t mask);
64 int wait); 55 void (*send_call_func_single_ipi)(int cpu);
65}; 56};
66 57
67/* Globals due to paravirt */ 58/* Globals due to paravirt */
@@ -103,23 +94,26 @@ static inline void smp_send_reschedule(int cpu)
103 smp_ops.smp_send_reschedule(cpu); 94 smp_ops.smp_send_reschedule(cpu);
104} 95}
105 96
106static inline int smp_call_function_mask(cpumask_t mask, 97static inline void arch_send_call_function_single_ipi(int cpu)
107 void (*func) (void *info), void *info, 98{
108 int wait) 99 smp_ops.send_call_func_single_ipi(cpu);
100}
101
102static inline void arch_send_call_function_ipi(cpumask_t mask)
109{ 103{
110 return smp_ops.smp_call_function_mask(mask, func, info, wait); 104 smp_ops.send_call_func_ipi(mask);
111} 105}
112 106
113void native_smp_prepare_boot_cpu(void); 107void native_smp_prepare_boot_cpu(void);
114void native_smp_prepare_cpus(unsigned int max_cpus); 108void native_smp_prepare_cpus(unsigned int max_cpus);
115void native_smp_cpus_done(unsigned int max_cpus); 109void native_smp_cpus_done(unsigned int max_cpus);
116int native_cpu_up(unsigned int cpunum); 110int native_cpu_up(unsigned int cpunum);
111void native_send_call_func_ipi(cpumask_t mask);
112void native_send_call_func_single_ipi(int cpu);
117 113
118extern int __cpu_disable(void); 114extern int __cpu_disable(void);
119extern void __cpu_die(unsigned int cpu); 115extern void __cpu_die(unsigned int cpu);
120 116
121extern void prefill_possible_map(void);
122
123void smp_store_cpu_info(int id); 117void smp_store_cpu_info(int id);
124#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) 118#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
125 119
@@ -130,6 +124,14 @@ static inline int num_booting_cpus(void)
130} 124}
131#endif /* CONFIG_SMP */ 125#endif /* CONFIG_SMP */
132 126
127#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_CPU)
128extern void prefill_possible_map(void);
129#else
130static inline void prefill_possible_map(void)
131{
132}
133#endif
134
133extern unsigned disabled_cpus __cpuinitdata; 135extern unsigned disabled_cpus __cpuinitdata;
134 136
135#ifdef CONFIG_X86_32_SMP 137#ifdef CONFIG_X86_32_SMP
@@ -197,12 +199,8 @@ static inline int hard_smp_processor_id(void)
197#endif /* CONFIG_X86_LOCAL_APIC */ 199#endif /* CONFIG_X86_LOCAL_APIC */
198 200
199#ifdef CONFIG_HOTPLUG_CPU 201#ifdef CONFIG_HOTPLUG_CPU
200extern void cpu_exit_clear(void);
201extern void cpu_uninit(void); 202extern void cpu_uninit(void);
202#endif 203#endif
203 204
204extern void smp_alloc_memory(void);
205extern void lock_ipi_call_lock(void);
206extern void unlock_ipi_call_lock(void);
207#endif /* __ASSEMBLY__ */ 205#endif /* __ASSEMBLY__ */
208#endif 206#endif
diff --git a/include/asm-x86/srat.h b/include/asm-x86/srat.h
index f4bba131d068..774c919dc232 100644
--- a/include/asm-x86/srat.h
+++ b/include/asm-x86/srat.h
@@ -27,11 +27,13 @@
27#ifndef _ASM_SRAT_H_ 27#ifndef _ASM_SRAT_H_
28#define _ASM_SRAT_H_ 28#define _ASM_SRAT_H_
29 29
30#ifndef CONFIG_ACPI_SRAT 30#ifdef CONFIG_ACPI_NUMA
31#error CONFIG_ACPI_SRAT not defined, and srat.h header has been included
32#endif
33
34extern int get_memcfg_from_srat(void); 31extern int get_memcfg_from_srat(void);
35extern unsigned long *get_zholes_size(int); 32#else
33static inline int get_memcfg_from_srat(void)
34{
35 return 0;
36}
37#endif
36 38
37#endif /* _ASM_SRAT_H_ */ 39#endif /* _ASM_SRAT_H_ */
diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h
index b49369ad9a61..193578cd1fd9 100644
--- a/include/asm-x86/string_32.h
+++ b/include/asm-x86/string_32.h
@@ -29,81 +29,116 @@ extern char *strchr(const char *s, int c);
29#define __HAVE_ARCH_STRLEN 29#define __HAVE_ARCH_STRLEN
30extern size_t strlen(const char *s); 30extern size_t strlen(const char *s);
31 31
32static __always_inline void * __memcpy(void * to, const void * from, size_t n) 32static __always_inline void *__memcpy(void *to, const void *from, size_t n)
33{ 33{
34int d0, d1, d2; 34 int d0, d1, d2;
35__asm__ __volatile__( 35 asm volatile("rep ; movsl\n\t"
36 "rep ; movsl\n\t" 36 "movl %4,%%ecx\n\t"
37 "movl %4,%%ecx\n\t" 37 "andl $3,%%ecx\n\t"
38 "andl $3,%%ecx\n\t" 38 "jz 1f\n\t"
39 "jz 1f\n\t" 39 "rep ; movsb\n\t"
40 "rep ; movsb\n\t" 40 "1:"
41 "1:" 41 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
42 : "=&c" (d0), "=&D" (d1), "=&S" (d2) 42 : "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from)
43 : "0" (n/4), "g" (n), "1" ((long) to), "2" ((long) from) 43 : "memory");
44 : "memory"); 44 return to;
45return (to);
46} 45}
47 46
48/* 47/*
49 * This looks ugly, but the compiler can optimize it totally, 48 * This looks ugly, but the compiler can optimize it totally,
50 * as the count is constant. 49 * as the count is constant.
51 */ 50 */
52static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n) 51static __always_inline void *__constant_memcpy(void *to, const void *from,
52 size_t n)
53{ 53{
54 long esi, edi; 54 long esi, edi;
55 if (!n) return to; 55 if (!n)
56#if 1 /* want to do small copies with non-string ops? */ 56 return to;
57
57 switch (n) { 58 switch (n) {
58 case 1: *(char*)to = *(char*)from; return to; 59 case 1:
59 case 2: *(short*)to = *(short*)from; return to; 60 *(char *)to = *(char *)from;
60 case 4: *(int*)to = *(int*)from; return to; 61 return to;
61#if 1 /* including those doable with two moves? */ 62 case 2:
62 case 3: *(short*)to = *(short*)from; 63 *(short *)to = *(short *)from;
63 *((char*)to+2) = *((char*)from+2); return to; 64 return to;
64 case 5: *(int*)to = *(int*)from; 65 case 4:
65 *((char*)to+4) = *((char*)from+4); return to; 66 *(int *)to = *(int *)from;
66 case 6: *(int*)to = *(int*)from; 67 return to;
67 *((short*)to+2) = *((short*)from+2); return to; 68
68 case 8: *(int*)to = *(int*)from; 69 case 3:
69 *((int*)to+1) = *((int*)from+1); return to; 70 *(short *)to = *(short *)from;
70#endif 71 *((char *)to + 2) = *((char *)from + 2);
72 return to;
73 case 5:
74 *(int *)to = *(int *)from;
75 *((char *)to + 4) = *((char *)from + 4);
76 return to;
77 case 6:
78 *(int *)to = *(int *)from;
79 *((short *)to + 2) = *((short *)from + 2);
80 return to;
81 case 8:
82 *(int *)to = *(int *)from;
83 *((int *)to + 1) = *((int *)from + 1);
84 return to;
71 } 85 }
72#endif 86
73 esi = (long) from; 87 esi = (long)from;
74 edi = (long) to; 88 edi = (long)to;
75 if (n >= 5*4) { 89 if (n >= 5 * 4) {
76 /* large block: use rep prefix */ 90 /* large block: use rep prefix */
77 int ecx; 91 int ecx;
78 __asm__ __volatile__( 92 asm volatile("rep ; movsl"
79 "rep ; movsl" 93 : "=&c" (ecx), "=&D" (edi), "=&S" (esi)
80 : "=&c" (ecx), "=&D" (edi), "=&S" (esi) 94 : "0" (n / 4), "1" (edi), "2" (esi)
81 : "0" (n/4), "1" (edi),"2" (esi) 95 : "memory"
82 : "memory"
83 ); 96 );
84 } else { 97 } else {
85 /* small block: don't clobber ecx + smaller code */ 98 /* small block: don't clobber ecx + smaller code */
86 if (n >= 4*4) __asm__ __volatile__("movsl" 99 if (n >= 4 * 4)
87 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); 100 asm volatile("movsl"
88 if (n >= 3*4) __asm__ __volatile__("movsl" 101 : "=&D"(edi), "=&S"(esi)
89 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); 102 : "0"(edi), "1"(esi)
90 if (n >= 2*4) __asm__ __volatile__("movsl" 103 : "memory");
91 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); 104 if (n >= 3 * 4)
92 if (n >= 1*4) __asm__ __volatile__("movsl" 105 asm volatile("movsl"
93 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); 106 : "=&D"(edi), "=&S"(esi)
107 : "0"(edi), "1"(esi)
108 : "memory");
109 if (n >= 2 * 4)
110 asm volatile("movsl"
111 : "=&D"(edi), "=&S"(esi)
112 : "0"(edi), "1"(esi)
113 : "memory");
114 if (n >= 1 * 4)
115 asm volatile("movsl"
116 : "=&D"(edi), "=&S"(esi)
117 : "0"(edi), "1"(esi)
118 : "memory");
94 } 119 }
95 switch (n % 4) { 120 switch (n % 4) {
96 /* tail */ 121 /* tail */
97 case 0: return to; 122 case 0:
98 case 1: __asm__ __volatile__("movsb" 123 return to;
99 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); 124 case 1:
100 return to; 125 asm volatile("movsb"
101 case 2: __asm__ __volatile__("movsw" 126 : "=&D"(edi), "=&S"(esi)
102 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); 127 : "0"(edi), "1"(esi)
103 return to; 128 : "memory");
104 default: __asm__ __volatile__("movsw\n\tmovsb" 129 return to;
105 :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); 130 case 2:
106 return to; 131 asm volatile("movsw"
132 : "=&D"(edi), "=&S"(esi)
133 : "0"(edi), "1"(esi)
134 : "memory");
135 return to;
136 default:
137 asm volatile("movsw\n\tmovsb"
138 : "=&D"(edi), "=&S"(esi)
139 : "0"(edi), "1"(esi)
140 : "memory");
141 return to;
107 } 142 }
108} 143}
109 144
@@ -117,87 +152,86 @@ static __always_inline void * __constant_memcpy(void * to, const void * from, si
117 * This CPU favours 3DNow strongly (eg AMD Athlon) 152 * This CPU favours 3DNow strongly (eg AMD Athlon)
118 */ 153 */
119 154
120static inline void * __constant_memcpy3d(void * to, const void * from, size_t len) 155static inline void *__constant_memcpy3d(void *to, const void *from, size_t len)
121{ 156{
122 if (len < 512) 157 if (len < 512)
123 return __constant_memcpy(to, from, len); 158 return __constant_memcpy(to, from, len);
124 return _mmx_memcpy(to, from, len); 159 return _mmx_memcpy(to, from, len);
125} 160}
126 161
127static __inline__ void *__memcpy3d(void *to, const void *from, size_t len) 162static inline void *__memcpy3d(void *to, const void *from, size_t len)
128{ 163{
129 if (len < 512) 164 if (len < 512)
130 return __memcpy(to, from, len); 165 return __memcpy(to, from, len);
131 return _mmx_memcpy(to, from, len); 166 return _mmx_memcpy(to, from, len);
132} 167}
133 168
134#define memcpy(t, f, n) \ 169#define memcpy(t, f, n) \
135(__builtin_constant_p(n) ? \ 170 (__builtin_constant_p((n)) \
136 __constant_memcpy3d((t),(f),(n)) : \ 171 ? __constant_memcpy3d((t), (f), (n)) \
137 __memcpy3d((t),(f),(n))) 172 : __memcpy3d((t), (f), (n)))
138 173
139#else 174#else
140 175
141/* 176/*
142 * No 3D Now! 177 * No 3D Now!
143 */ 178 */
144 179
145#define memcpy(t, f, n) \ 180#define memcpy(t, f, n) \
146(__builtin_constant_p(n) ? \ 181 (__builtin_constant_p((n)) \
147 __constant_memcpy((t),(f),(n)) : \ 182 ? __constant_memcpy((t), (f), (n)) \
148 __memcpy((t),(f),(n))) 183 : __memcpy((t), (f), (n)))
149 184
150#endif 185#endif
151 186
152#define __HAVE_ARCH_MEMMOVE 187#define __HAVE_ARCH_MEMMOVE
153void *memmove(void * dest,const void * src, size_t n); 188void *memmove(void *dest, const void *src, size_t n);
154 189
155#define memcmp __builtin_memcmp 190#define memcmp __builtin_memcmp
156 191
157#define __HAVE_ARCH_MEMCHR 192#define __HAVE_ARCH_MEMCHR
158extern void *memchr(const void * cs,int c,size_t count); 193extern void *memchr(const void *cs, int c, size_t count);
159 194
160static inline void * __memset_generic(void * s, char c,size_t count) 195static inline void *__memset_generic(void *s, char c, size_t count)
161{ 196{
162int d0, d1; 197 int d0, d1;
163__asm__ __volatile__( 198 asm volatile("rep\n\t"
164 "rep\n\t" 199 "stosb"
165 "stosb" 200 : "=&c" (d0), "=&D" (d1)
166 : "=&c" (d0), "=&D" (d1) 201 : "a" (c), "1" (s), "0" (count)
167 :"a" (c),"1" (s),"0" (count) 202 : "memory");
168 :"memory"); 203 return s;
169return s;
170} 204}
171 205
172/* we might want to write optimized versions of these later */ 206/* we might want to write optimized versions of these later */
173#define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count)) 207#define __constant_count_memset(s, c, count) __memset_generic((s), (c), (count))
174 208
175/* 209/*
176 * memset(x,0,y) is a reasonably common thing to do, so we want to fill 210 * memset(x, 0, y) is a reasonably common thing to do, so we want to fill
177 * things 32 bits at a time even when we don't know the size of the 211 * things 32 bits at a time even when we don't know the size of the
178 * area at compile-time.. 212 * area at compile-time..
179 */ 213 */
180static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count) 214static __always_inline
215void *__constant_c_memset(void *s, unsigned long c, size_t count)
181{ 216{
182int d0, d1; 217 int d0, d1;
183__asm__ __volatile__( 218 asm volatile("rep ; stosl\n\t"
184 "rep ; stosl\n\t" 219 "testb $2,%b3\n\t"
185 "testb $2,%b3\n\t" 220 "je 1f\n\t"
186 "je 1f\n\t" 221 "stosw\n"
187 "stosw\n" 222 "1:\ttestb $1,%b3\n\t"
188 "1:\ttestb $1,%b3\n\t" 223 "je 2f\n\t"
189 "je 2f\n\t" 224 "stosb\n"
190 "stosb\n" 225 "2:"
191 "2:" 226 : "=&c" (d0), "=&D" (d1)
192 :"=&c" (d0), "=&D" (d1) 227 : "a" (c), "q" (count), "0" (count/4), "1" ((long)s)
193 :"a" (c), "q" (count), "0" (count/4), "1" ((long) s) 228 : "memory");
194 :"memory"); 229 return s;
195return (s);
196} 230}
197 231
198/* Added by Gertjan van Wingerde to make minix and sysv module work */ 232/* Added by Gertjan van Wingerde to make minix and sysv module work */
199#define __HAVE_ARCH_STRNLEN 233#define __HAVE_ARCH_STRNLEN
200extern size_t strnlen(const char * s, size_t count); 234extern size_t strnlen(const char *s, size_t count);
201/* end of additional stuff */ 235/* end of additional stuff */
202 236
203#define __HAVE_ARCH_STRSTR 237#define __HAVE_ARCH_STRSTR
@@ -207,66 +241,85 @@ extern char *strstr(const char *cs, const char *ct);
207 * This looks horribly ugly, but the compiler can optimize it totally, 241 * This looks horribly ugly, but the compiler can optimize it totally,
208 * as we by now know that both pattern and count is constant.. 242 * as we by now know that both pattern and count is constant..
209 */ 243 */
210static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) 244static __always_inline
245void *__constant_c_and_count_memset(void *s, unsigned long pattern,
246 size_t count)
211{ 247{
212 switch (count) { 248 switch (count) {
249 case 0:
250 return s;
251 case 1:
252 *(unsigned char *)s = pattern & 0xff;
253 return s;
254 case 2:
255 *(unsigned short *)s = pattern & 0xffff;
256 return s;
257 case 3:
258 *(unsigned short *)s = pattern & 0xffff;
259 *((unsigned char *)s + 2) = pattern & 0xff;
260 return s;
261 case 4:
262 *(unsigned long *)s = pattern;
263 return s;
264 }
265
266#define COMMON(x) \
267 asm volatile("rep ; stosl" \
268 x \
269 : "=&c" (d0), "=&D" (d1) \
270 : "a" (eax), "0" (count/4), "1" ((long)s) \
271 : "memory")
272
273 {
274 int d0, d1;
275#if __GNUC__ == 4 && __GNUC_MINOR__ == 0
276 /* Workaround for broken gcc 4.0 */
277 register unsigned long eax asm("%eax") = pattern;
278#else
279 unsigned long eax = pattern;
280#endif
281
282 switch (count % 4) {
213 case 0: 283 case 0:
284 COMMON("");
214 return s; 285 return s;
215 case 1: 286 case 1:
216 *(unsigned char *)s = pattern & 0xff; 287 COMMON("\n\tstosb");
217 return s; 288 return s;
218 case 2: 289 case 2:
219 *(unsigned short *)s = pattern & 0xffff; 290 COMMON("\n\tstosw");
220 return s; 291 return s;
221 case 3: 292 default:
222 *(unsigned short *)s = pattern & 0xffff; 293 COMMON("\n\tstosw\n\tstosb");
223 *(2+(unsigned char *)s) = pattern & 0xff;
224 return s;
225 case 4:
226 *(unsigned long *)s = pattern;
227 return s; 294 return s;
295 }
228 } 296 }
229#define COMMON(x) \ 297
230__asm__ __volatile__( \
231 "rep ; stosl" \
232 x \
233 : "=&c" (d0), "=&D" (d1) \
234 : "a" (pattern),"0" (count/4),"1" ((long) s) \
235 : "memory")
236{
237 int d0, d1;
238 switch (count % 4) {
239 case 0: COMMON(""); return s;
240 case 1: COMMON("\n\tstosb"); return s;
241 case 2: COMMON("\n\tstosw"); return s;
242 default: COMMON("\n\tstosw\n\tstosb"); return s;
243 }
244}
245
246#undef COMMON 298#undef COMMON
247} 299}
248 300
249#define __constant_c_x_memset(s, c, count) \ 301#define __constant_c_x_memset(s, c, count) \
250(__builtin_constant_p(count) ? \ 302 (__builtin_constant_p(count) \
251 __constant_c_and_count_memset((s),(c),(count)) : \ 303 ? __constant_c_and_count_memset((s), (c), (count)) \
252 __constant_c_memset((s),(c),(count))) 304 : __constant_c_memset((s), (c), (count)))
253 305
254#define __memset(s, c, count) \ 306#define __memset(s, c, count) \
255(__builtin_constant_p(count) ? \ 307 (__builtin_constant_p(count) \
256 __constant_count_memset((s),(c),(count)) : \ 308 ? __constant_count_memset((s), (c), (count)) \
257 __memset_generic((s),(c),(count))) 309 : __memset_generic((s), (c), (count)))
258 310
259#define __HAVE_ARCH_MEMSET 311#define __HAVE_ARCH_MEMSET
260#define memset(s, c, count) \ 312#define memset(s, c, count) \
261(__builtin_constant_p(c) ? \ 313 (__builtin_constant_p(c) \
262 __constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \ 314 ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \
263 __memset((s),(c),(count))) 315 (count)) \
316 : __memset((s), (c), (count)))
264 317
265/* 318/*
266 * find the first occurrence of byte 'c', or 1 past the area if none 319 * find the first occurrence of byte 'c', or 1 past the area if none
267 */ 320 */
268#define __HAVE_ARCH_MEMSCAN 321#define __HAVE_ARCH_MEMSCAN
269extern void *memscan(void * addr, int c, size_t size); 322extern void *memscan(void *addr, int c, size_t size);
270 323
271#endif /* __KERNEL__ */ 324#endif /* __KERNEL__ */
272 325
diff --git a/include/asm-x86/suspend_32.h b/include/asm-x86/suspend_32.h
index 24e1c080aa8a..8675c6782a7d 100644
--- a/include/asm-x86/suspend_32.h
+++ b/include/asm-x86/suspend_32.h
@@ -3,6 +3,9 @@
3 * Based on code 3 * Based on code
4 * Copyright 2001 Patrick Mochel <mochel@osdl.org> 4 * Copyright 2001 Patrick Mochel <mochel@osdl.org>
5 */ 5 */
6#ifndef __ASM_X86_32_SUSPEND_H
7#define __ASM_X86_32_SUSPEND_H
8
6#include <asm/desc.h> 9#include <asm/desc.h>
7#include <asm/i387.h> 10#include <asm/i387.h>
8 11
@@ -44,3 +47,5 @@ static inline void acpi_save_register_state(unsigned long return_point)
44/* routines for saving/restoring kernel state */ 47/* routines for saving/restoring kernel state */
45extern int acpi_save_state_mem(void); 48extern int acpi_save_state_mem(void);
46#endif 49#endif
50
51#endif /* __ASM_X86_32_SUSPEND_H */
diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
index a2f04cd79b29..983ce37c491f 100644
--- a/include/asm-x86/system.h
+++ b/include/asm-x86/system.h
@@ -136,7 +136,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
136#define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base)) 136#define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
137#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1)) 137#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
138 138
139extern void load_gs_index(unsigned); 139extern void native_load_gs_index(unsigned);
140 140
141/* 141/*
142 * Load a segment. Fall back on loading the zero 142 * Load a segment. Fall back on loading the zero
@@ -153,14 +153,14 @@ extern void load_gs_index(unsigned);
153 "jmp 2b\n" \ 153 "jmp 2b\n" \
154 ".previous\n" \ 154 ".previous\n" \
155 _ASM_EXTABLE(1b,3b) \ 155 _ASM_EXTABLE(1b,3b) \
156 : :"r" (value), "r" (0)) 156 : :"r" (value), "r" (0) : "memory")
157 157
158 158
159/* 159/*
160 * Save a segment register away 160 * Save a segment register away
161 */ 161 */
162#define savesegment(seg, value) \ 162#define savesegment(seg, value) \
163 asm volatile("mov %%" #seg ",%0":"=rm" (value)) 163 asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
164 164
165static inline unsigned long get_limit(unsigned long segment) 165static inline unsigned long get_limit(unsigned long segment)
166{ 166{
@@ -282,6 +282,7 @@ static inline void native_wbinvd(void)
282#ifdef CONFIG_X86_64 282#ifdef CONFIG_X86_64
283#define read_cr8() (native_read_cr8()) 283#define read_cr8() (native_read_cr8())
284#define write_cr8(x) (native_write_cr8(x)) 284#define write_cr8(x) (native_write_cr8(x))
285#define load_gs_index native_load_gs_index
285#endif 286#endif
286 287
287/* Clear the 'TS' bit */ 288/* Clear the 'TS' bit */
@@ -289,7 +290,7 @@ static inline void native_wbinvd(void)
289 290
290#endif/* CONFIG_PARAVIRT */ 291#endif/* CONFIG_PARAVIRT */
291 292
292#define stts() write_cr0(8 | read_cr0()) 293#define stts() write_cr0(read_cr0() | X86_CR0_TS)
293 294
294#endif /* __KERNEL__ */ 295#endif /* __KERNEL__ */
295 296
@@ -303,7 +304,6 @@ static inline void clflush(volatile void *__p)
303void disable_hlt(void); 304void disable_hlt(void);
304void enable_hlt(void); 305void enable_hlt(void);
305 306
306extern int es7000_plat;
307void cpu_idle_wait(void); 307void cpu_idle_wait(void);
308 308
309extern unsigned long arch_align_stack(unsigned long sp); 309extern unsigned long arch_align_stack(unsigned long sp);
diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h
index 77244f17993f..895339d2bc0b 100644
--- a/include/asm-x86/thread_info.h
+++ b/include/asm-x86/thread_info.h
@@ -1,9 +1,253 @@
1/* thread_info.h: low-level thread information
2 *
3 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
4 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
5 */
6
1#ifndef _ASM_X86_THREAD_INFO_H 7#ifndef _ASM_X86_THREAD_INFO_H
8#define _ASM_X86_THREAD_INFO_H
9
10#include <linux/compiler.h>
11#include <asm/page.h>
12#include <asm/types.h>
13
14/*
15 * low level task data that entry.S needs immediate access to
16 * - this struct should fit entirely inside of one cache line
17 * - this struct shares the supervisor stack pages
18 */
19#ifndef __ASSEMBLY__
20struct task_struct;
21struct exec_domain;
22#include <asm/processor.h>
23
24struct thread_info {
25 struct task_struct *task; /* main task structure */
26 struct exec_domain *exec_domain; /* execution domain */
27 unsigned long flags; /* low level flags */
28 __u32 status; /* thread synchronous flags */
29 __u32 cpu; /* current CPU */
30 int preempt_count; /* 0 => preemptable,
31 <0 => BUG */
32 mm_segment_t addr_limit;
33 struct restart_block restart_block;
34 void __user *sysenter_return;
35#ifdef CONFIG_X86_32
36 unsigned long previous_esp; /* ESP of the previous stack in
37 case of nested (IRQ) stacks
38 */
39 __u8 supervisor_stack[0];
40#endif
41};
42
43#define INIT_THREAD_INFO(tsk) \
44{ \
45 .task = &tsk, \
46 .exec_domain = &default_exec_domain, \
47 .flags = 0, \
48 .cpu = 0, \
49 .preempt_count = 1, \
50 .addr_limit = KERNEL_DS, \
51 .restart_block = { \
52 .fn = do_no_restart_syscall, \
53 }, \
54}
55
56#define init_thread_info (init_thread_union.thread_info)
57#define init_stack (init_thread_union.stack)
58
59#else /* !__ASSEMBLY__ */
60
61#include <asm/asm-offsets.h>
62
63#endif
64
65/*
66 * thread information flags
67 * - these are process state flags that various assembly files
68 * may need to access
69 * - pending work-to-be-done flags are in LSW
70 * - other flags in MSW
71 * Warning: layout of LSW is hardcoded in entry.S
72 */
73#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
74#define TIF_SIGPENDING 2 /* signal pending */
75#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
76#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
77#define TIF_IRET 5 /* force IRET */
2#ifdef CONFIG_X86_32 78#ifdef CONFIG_X86_32
3# include "thread_info_32.h" 79#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
80#endif
81#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
82#define TIF_SECCOMP 8 /* secure computing */
83#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
84#define TIF_HRTICK_RESCHED 11 /* reprogram hrtick timer */
85#define TIF_NOTSC 16 /* TSC is not accessible in userland */
86#define TIF_IA32 17 /* 32bit process */
87#define TIF_FORK 18 /* ret_from_fork */
88#define TIF_ABI_PENDING 19
89#define TIF_MEMDIE 20
90#define TIF_DEBUG 21 /* uses debug registers */
91#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
92#define TIF_FREEZE 23 /* is freezing for suspend */
93#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
94#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */
95#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
96#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */
97
98#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
99#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
100#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
101#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
102#define _TIF_IRET (1 << TIF_IRET)
103#ifdef CONFIG_X86_32
104#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
4#else 105#else
5# include "thread_info_64.h" 106#define _TIF_SYSCALL_EMU 0
6#endif 107#endif
108#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
109#define _TIF_SECCOMP (1 << TIF_SECCOMP)
110#define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY)
111#define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED)
112#define _TIF_NOTSC (1 << TIF_NOTSC)
113#define _TIF_IA32 (1 << TIF_IA32)
114#define _TIF_FORK (1 << TIF_FORK)
115#define _TIF_ABI_PENDING (1 << TIF_ABI_PENDING)
116#define _TIF_DEBUG (1 << TIF_DEBUG)
117#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
118#define _TIF_FREEZE (1 << TIF_FREEZE)
119#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
120#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
121#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
122#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS)
123
124/* work to do on interrupt/exception return */
125#define _TIF_WORK_MASK \
126 (0x0000FFFF & \
127 ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP| \
128 _TIF_SECCOMP|_TIF_SYSCALL_EMU))
129
130/* work to do on any return to user space */
131#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
132
133/* Only used for 64 bit */
134#define _TIF_DO_NOTIFY_MASK \
135 (_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED)
136
137/* flags to check in __switch_to() */
138#define _TIF_WORK_CTXSW \
139 (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS| \
140 _TIF_NOTSC)
141
142#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
143#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
144
145#define PREEMPT_ACTIVE 0x10000000
146
147/* thread information allocation */
148#ifdef CONFIG_DEBUG_STACK_USAGE
149#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO)
150#else
151#define THREAD_FLAGS GFP_KERNEL
152#endif
153
154#define alloc_thread_info(tsk) \
155 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
156
157#ifdef CONFIG_X86_32
158
159#define STACK_WARN (THREAD_SIZE/8)
160/*
161 * macros/functions for gaining access to the thread information structure
162 *
163 * preempt_count needs to be 1 initially, until the scheduler is functional.
164 */
165#ifndef __ASSEMBLY__
166
167
168/* how to get the current stack pointer from C */
169register unsigned long current_stack_pointer asm("esp") __used;
170
171/* how to get the thread information struct from C */
172static inline struct thread_info *current_thread_info(void)
173{
174 return (struct thread_info *)
175 (current_stack_pointer & ~(THREAD_SIZE - 1));
176}
177
178#else /* !__ASSEMBLY__ */
179
180/* how to get the thread information struct from ASM */
181#define GET_THREAD_INFO(reg) \
182 movl $-THREAD_SIZE, reg; \
183 andl %esp, reg
184
185/* use this one if reg already contains %esp */
186#define GET_THREAD_INFO_WITH_ESP(reg) \
187 andl $-THREAD_SIZE, reg
188
189#endif
190
191#else /* X86_32 */
192
193#include <asm/pda.h>
194
195/*
196 * macros/functions for gaining access to the thread information structure
197 * preempt_count needs to be 1 initially, until the scheduler is functional.
198 */
199#ifndef __ASSEMBLY__
200static inline struct thread_info *current_thread_info(void)
201{
202 struct thread_info *ti;
203 ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
204 return ti;
205}
206
207/* do not use in interrupt context */
208static inline struct thread_info *stack_thread_info(void)
209{
210 struct thread_info *ti;
211 asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1)));
212 return ti;
213}
214
215#else /* !__ASSEMBLY__ */
216
217/* how to get the thread information struct from ASM */
218#define GET_THREAD_INFO(reg) \
219 movq %gs:pda_kernelstack,reg ; \
220 subq $(THREAD_SIZE-PDA_STACKOFFSET),reg
221
222#endif
223
224#endif /* !X86_32 */
225
226/*
227 * Thread-synchronous status.
228 *
229 * This is different from the flags in that nobody else
230 * ever touches our thread-synchronous status, so we don't
231 * have to worry about atomic accesses.
232 */
233#define TS_USEDFPU 0x0001 /* FPU was used by this task
234 this quantum (SMP) */
235#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
236#define TS_POLLING 0x0004 /* true if in idle loop
237 and not sleeping */
238#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
239
240#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
241
242#ifndef __ASSEMBLY__
243#define HAVE_SET_RESTORE_SIGMASK 1
244static inline void set_restore_sigmask(void)
245{
246 struct thread_info *ti = current_thread_info();
247 ti->status |= TS_RESTORE_SIGMASK;
248 set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
249}
250#endif /* !__ASSEMBLY__ */
7 251
8#ifndef __ASSEMBLY__ 252#ifndef __ASSEMBLY__
9extern void arch_task_cache_init(void); 253extern void arch_task_cache_init(void);
diff --git a/include/asm-x86/thread_info_32.h b/include/asm-x86/thread_info_32.h
deleted file mode 100644
index b6338829d1a8..000000000000
--- a/include/asm-x86/thread_info_32.h
+++ /dev/null
@@ -1,205 +0,0 @@
1/* thread_info.h: i386 low-level thread information
2 *
3 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
4 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
5 */
6
7#ifndef _ASM_THREAD_INFO_H
8#define _ASM_THREAD_INFO_H
9
10#ifdef __KERNEL__
11
12#include <linux/compiler.h>
13#include <asm/page.h>
14
15#ifndef __ASSEMBLY__
16#include <asm/processor.h>
17#endif
18
19/*
20 * low level task data that entry.S needs immediate access to
21 * - this struct should fit entirely inside of one cache line
22 * - this struct shares the supervisor stack pages
23 * - if the contents of this structure are changed,
24 * the assembly constants must also be changed
25 */
26#ifndef __ASSEMBLY__
27
28struct thread_info {
29 struct task_struct *task; /* main task structure */
30 struct exec_domain *exec_domain; /* execution domain */
31 unsigned long flags; /* low level flags */
32 unsigned long status; /* thread-synchronous flags */
33 __u32 cpu; /* current CPU */
34 int preempt_count; /* 0 => preemptable,
35 <0 => BUG */
36 mm_segment_t addr_limit; /* thread address space:
37 0-0xBFFFFFFF user-thread
38 0-0xFFFFFFFF kernel-thread
39 */
40 void *sysenter_return;
41 struct restart_block restart_block;
42 unsigned long previous_esp; /* ESP of the previous stack in
43 case of nested (IRQ) stacks
44 */
45 __u8 supervisor_stack[0];
46};
47
48#else /* !__ASSEMBLY__ */
49
50#include <asm/asm-offsets.h>
51
52#endif
53
54#define PREEMPT_ACTIVE 0x10000000
55#ifdef CONFIG_4KSTACKS
56#define THREAD_SIZE (4096)
57#else
58#define THREAD_SIZE (8192)
59#endif
60
61#define STACK_WARN (THREAD_SIZE/8)
62/*
63 * macros/functions for gaining access to the thread information structure
64 *
65 * preempt_count needs to be 1 initially, until the scheduler is functional.
66 */
67#ifndef __ASSEMBLY__
68
69#define INIT_THREAD_INFO(tsk) \
70{ \
71 .task = &tsk, \
72 .exec_domain = &default_exec_domain, \
73 .flags = 0, \
74 .cpu = 0, \
75 .preempt_count = 1, \
76 .addr_limit = KERNEL_DS, \
77 .restart_block = { \
78 .fn = do_no_restart_syscall, \
79 }, \
80}
81
82#define init_thread_info (init_thread_union.thread_info)
83#define init_stack (init_thread_union.stack)
84
85
86/* how to get the current stack pointer from C */
87register unsigned long current_stack_pointer asm("esp") __used;
88
89/* how to get the thread information struct from C */
90static inline struct thread_info *current_thread_info(void)
91{
92 return (struct thread_info *)
93 (current_stack_pointer & ~(THREAD_SIZE - 1));
94}
95
96/* thread information allocation */
97#ifdef CONFIG_DEBUG_STACK_USAGE
98#define alloc_thread_info(tsk) ((struct thread_info *) \
99 __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(THREAD_SIZE)))
100#else
101#define alloc_thread_info(tsk) ((struct thread_info *) \
102 __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE)))
103#endif
104
105#else /* !__ASSEMBLY__ */
106
107/* how to get the thread information struct from ASM */
108#define GET_THREAD_INFO(reg) \
109 movl $-THREAD_SIZE, reg; \
110 andl %esp, reg
111
112/* use this one if reg already contains %esp */
113#define GET_THREAD_INFO_WITH_ESP(reg) \
114 andl $-THREAD_SIZE, reg
115
116#endif
117
118/*
119 * thread information flags
120 * - these are process state flags that various
121 * assembly files may need to access
122 * - pending work-to-be-done flags are in LSW
123 * - other flags in MSW
124 */
125#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
126#define TIF_SIGPENDING 1 /* signal pending */
127#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
128#define TIF_SINGLESTEP 3 /* restore singlestep on return to
129 user mode */
130#define TIF_IRET 4 /* return with iret */
131#define TIF_SYSCALL_EMU 5 /* syscall emulation active */
132#define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */
133#define TIF_SECCOMP 7 /* secure computing */
134#define TIF_HRTICK_RESCHED 9 /* reprogram hrtick timer */
135#define TIF_MEMDIE 16
136#define TIF_DEBUG 17 /* uses debug registers */
137#define TIF_IO_BITMAP 18 /* uses I/O bitmap */
138#define TIF_FREEZE 19 /* is freezing for suspend */
139#define TIF_NOTSC 20 /* TSC is not accessible in userland */
140#define TIF_FORCED_TF 21 /* true if TF in eflags artificially */
141#define TIF_DEBUGCTLMSR 22 /* uses thread_struct.debugctlmsr */
142#define TIF_DS_AREA_MSR 23 /* uses thread_struct.ds_area_msr */
143#define TIF_BTS_TRACE_TS 24 /* record scheduling event timestamps */
144
145#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
146#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
147#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
148#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
149#define _TIF_IRET (1 << TIF_IRET)
150#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
151#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
152#define _TIF_SECCOMP (1 << TIF_SECCOMP)
153#define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED)
154#define _TIF_DEBUG (1 << TIF_DEBUG)
155#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
156#define _TIF_FREEZE (1 << TIF_FREEZE)
157#define _TIF_NOTSC (1 << TIF_NOTSC)
158#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
159#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
160#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
161#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS)
162
163/* work to do on interrupt/exception return */
164#define _TIF_WORK_MASK \
165 (0x0000FFFF & ~(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
166 _TIF_SECCOMP | _TIF_SYSCALL_EMU))
167/* work to do on any return to u-space */
168#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
169
170/* flags to check in __switch_to() */
171#define _TIF_WORK_CTXSW \
172 (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUGCTLMSR | \
173 _TIF_DS_AREA_MSR | _TIF_BTS_TRACE_TS)
174#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
175#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW | _TIF_DEBUG)
176
177
178/*
179 * Thread-synchronous status.
180 *
181 * This is different from the flags in that nobody else
182 * ever touches our thread-synchronous status, so we don't
183 * have to worry about atomic accesses.
184 */
185#define TS_USEDFPU 0x0001 /* FPU was used by this task
186 this quantum (SMP) */
187#define TS_POLLING 0x0002 /* True if in idle loop
188 and not sleeping */
189#define TS_RESTORE_SIGMASK 0x0004 /* restore signal mask in do_signal() */
190
191#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
192
193#ifndef __ASSEMBLY__
194#define HAVE_SET_RESTORE_SIGMASK 1
195static inline void set_restore_sigmask(void)
196{
197 struct thread_info *ti = current_thread_info();
198 ti->status |= TS_RESTORE_SIGMASK;
199 set_bit(TIF_SIGPENDING, &ti->flags);
200}
201#endif /* !__ASSEMBLY__ */
202
203#endif /* __KERNEL__ */
204
205#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-x86/thread_info_64.h b/include/asm-x86/thread_info_64.h
deleted file mode 100644
index cb69f70abba1..000000000000
--- a/include/asm-x86/thread_info_64.h
+++ /dev/null
@@ -1,195 +0,0 @@
1/* thread_info.h: x86_64 low-level thread information
2 *
3 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
4 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
5 */
6
7#ifndef _ASM_THREAD_INFO_H
8#define _ASM_THREAD_INFO_H
9
10#ifdef __KERNEL__
11
12#include <asm/page.h>
13#include <asm/types.h>
14#include <asm/pda.h>
15
16/*
17 * low level task data that entry.S needs immediate access to
18 * - this struct should fit entirely inside of one cache line
19 * - this struct shares the supervisor stack pages
20 */
21#ifndef __ASSEMBLY__
22struct task_struct;
23struct exec_domain;
24#include <asm/processor.h>
25
26struct thread_info {
27 struct task_struct *task; /* main task structure */
28 struct exec_domain *exec_domain; /* execution domain */
29 __u32 flags; /* low level flags */
30 __u32 status; /* thread synchronous flags */
31 __u32 cpu; /* current CPU */
32 int preempt_count; /* 0 => preemptable,
33 <0 => BUG */
34 mm_segment_t addr_limit;
35 struct restart_block restart_block;
36#ifdef CONFIG_IA32_EMULATION
37 void __user *sysenter_return;
38#endif
39};
40#endif
41
42/*
43 * macros/functions for gaining access to the thread information structure
44 * preempt_count needs to be 1 initially, until the scheduler is functional.
45 */
46#ifndef __ASSEMBLY__
47#define INIT_THREAD_INFO(tsk) \
48{ \
49 .task = &tsk, \
50 .exec_domain = &default_exec_domain, \
51 .flags = 0, \
52 .cpu = 0, \
53 .preempt_count = 1, \
54 .addr_limit = KERNEL_DS, \
55 .restart_block = { \
56 .fn = do_no_restart_syscall, \
57 }, \
58}
59
60#define init_thread_info (init_thread_union.thread_info)
61#define init_stack (init_thread_union.stack)
62
63static inline struct thread_info *current_thread_info(void)
64{
65 struct thread_info *ti;
66 ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
67 return ti;
68}
69
70/* do not use in interrupt context */
71static inline struct thread_info *stack_thread_info(void)
72{
73 struct thread_info *ti;
74 asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1)));
75 return ti;
76}
77
78/* thread information allocation */
79#ifdef CONFIG_DEBUG_STACK_USAGE
80#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO)
81#else
82#define THREAD_FLAGS GFP_KERNEL
83#endif
84
85#define alloc_thread_info(tsk) \
86 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
87
88#else /* !__ASSEMBLY__ */
89
90/* how to get the thread information struct from ASM */
91#define GET_THREAD_INFO(reg) \
92 movq %gs:pda_kernelstack,reg ; \
93 subq $(THREAD_SIZE-PDA_STACKOFFSET),reg
94
95#endif
96
97/*
98 * thread information flags
99 * - these are process state flags that various assembly files
100 * may need to access
101 * - pending work-to-be-done flags are in LSW
102 * - other flags in MSW
103 * Warning: layout of LSW is hardcoded in entry.S
104 */
105#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
106#define TIF_SIGPENDING 2 /* signal pending */
107#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
108#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
109#define TIF_IRET 5 /* force IRET */
110#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
111#define TIF_SECCOMP 8 /* secure computing */
112#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
113#define TIF_HRTICK_RESCHED 11 /* reprogram hrtick timer */
114/* 16 free */
115#define TIF_IA32 17 /* 32bit process */
116#define TIF_FORK 18 /* ret_from_fork */
117#define TIF_ABI_PENDING 19
118#define TIF_MEMDIE 20
119#define TIF_DEBUG 21 /* uses debug registers */
120#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
121#define TIF_FREEZE 23 /* is freezing for suspend */
122#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
123#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */
124#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
125#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */
126#define TIF_NOTSC 28 /* TSC is not accessible in userland */
127
128#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
129#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
130#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
131#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
132#define _TIF_IRET (1 << TIF_IRET)
133#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
134#define _TIF_SECCOMP (1 << TIF_SECCOMP)
135#define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY)
136#define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED)
137#define _TIF_IA32 (1 << TIF_IA32)
138#define _TIF_FORK (1 << TIF_FORK)
139#define _TIF_ABI_PENDING (1 << TIF_ABI_PENDING)
140#define _TIF_DEBUG (1 << TIF_DEBUG)
141#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
142#define _TIF_FREEZE (1 << TIF_FREEZE)
143#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
144#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
145#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
146#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS)
147#define _TIF_NOTSC (1 << TIF_NOTSC)
148
149/* work to do on interrupt/exception return */
150#define _TIF_WORK_MASK \
151 (0x0000FFFF & \
152 ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP|_TIF_SECCOMP))
153/* work to do on any return to user space */
154#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
155
156#define _TIF_DO_NOTIFY_MASK \
157 (_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED)
158
159/* flags to check in __switch_to() */
160#define _TIF_WORK_CTXSW \
161 (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS|_TIF_NOTSC)
162#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
163#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
164
165#define PREEMPT_ACTIVE 0x10000000
166
167/*
168 * Thread-synchronous status.
169 *
170 * This is different from the flags in that nobody else
171 * ever touches our thread-synchronous status, so we don't
172 * have to worry about atomic accesses.
173 */
174#define TS_USEDFPU 0x0001 /* FPU was used by this task
175 this quantum (SMP) */
176#define TS_COMPAT 0x0002 /* 32bit syscall active */
177#define TS_POLLING 0x0004 /* true if in idle loop
178 and not sleeping */
179#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
180
181#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
182
183#ifndef __ASSEMBLY__
184#define HAVE_SET_RESTORE_SIGMASK 1
185static inline void set_restore_sigmask(void)
186{
187 struct thread_info *ti = current_thread_info();
188 ti->status |= TS_RESTORE_SIGMASK;
189 set_bit(TIF_SIGPENDING, &ti->flags);
190}
191#endif /* !__ASSEMBLY__ */
192
193#endif /* __KERNEL__ */
194
195#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-x86/time.h b/include/asm-x86/time.h
index bce72d7a958c..a17fa473e91d 100644
--- a/include/asm-x86/time.h
+++ b/include/asm-x86/time.h
@@ -56,4 +56,6 @@ static inline int native_set_wallclock(unsigned long nowtime)
56 56
57#endif /* CONFIG_PARAVIRT */ 57#endif /* CONFIG_PARAVIRT */
58 58
59extern unsigned long __init calibrate_cpu(void);
60
59#endif 61#endif
diff --git a/include/asm-x86/timer.h b/include/asm-x86/timer.h
index 4f6fcb050c11..fb2a4ddddf3d 100644
--- a/include/asm-x86/timer.h
+++ b/include/asm-x86/timer.h
@@ -7,14 +7,14 @@
7#define TICK_SIZE (tick_nsec / 1000) 7#define TICK_SIZE (tick_nsec / 1000)
8 8
9unsigned long long native_sched_clock(void); 9unsigned long long native_sched_clock(void);
10unsigned long native_calculate_cpu_khz(void); 10unsigned long native_calibrate_tsc(void);
11 11
12extern int timer_ack; 12extern int timer_ack;
13extern int no_timer_check; 13extern int no_timer_check;
14extern int recalibrate_cpu_khz(void); 14extern int recalibrate_cpu_khz(void);
15 15
16#ifndef CONFIG_PARAVIRT 16#ifndef CONFIG_PARAVIRT
17#define calculate_cpu_khz() native_calculate_cpu_khz() 17#define calibrate_tsc() native_calibrate_tsc()
18#endif 18#endif
19 19
20/* Accelerators for sched_clock() 20/* Accelerators for sched_clock()
diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h
index dcf3f8131d6b..90ac7718469a 100644
--- a/include/asm-x86/topology.h
+++ b/include/asm-x86/topology.h
@@ -35,79 +35,93 @@
35# endif 35# endif
36#endif 36#endif
37 37
38/* Node not present */
39#define NUMA_NO_NODE (-1)
40
38#ifdef CONFIG_NUMA 41#ifdef CONFIG_NUMA
39#include <linux/cpumask.h> 42#include <linux/cpumask.h>
40#include <asm/mpspec.h> 43#include <asm/mpspec.h>
41 44
42/* Mappings between logical cpu number and node number */
43#ifdef CONFIG_X86_32 45#ifdef CONFIG_X86_32
44extern int cpu_to_node_map[];
45#else
46/* Returns the number of the current Node. */
47#define numa_node_id() (early_cpu_to_node(raw_smp_processor_id()))
48#endif
49
50DECLARE_PER_CPU(int, x86_cpu_to_node_map);
51
52#ifdef CONFIG_SMP
53extern int x86_cpu_to_node_map_init[];
54extern void *x86_cpu_to_node_map_early_ptr;
55#else
56#define x86_cpu_to_node_map_early_ptr NULL
57#endif
58 46
47/* Mappings between node number and cpus on that node. */
59extern cpumask_t node_to_cpumask_map[]; 48extern cpumask_t node_to_cpumask_map[];
60 49
61#define NUMA_NO_NODE (-1) 50/* Mappings between logical cpu number and node number */
51extern int cpu_to_node_map[];
62 52
63/* Returns the number of the node containing CPU 'cpu' */ 53/* Returns the number of the node containing CPU 'cpu' */
64#ifdef CONFIG_X86_32
65#define early_cpu_to_node(cpu) cpu_to_node(cpu)
66static inline int cpu_to_node(int cpu) 54static inline int cpu_to_node(int cpu)
67{ 55{
68 return cpu_to_node_map[cpu]; 56 return cpu_to_node_map[cpu];
69} 57}
58#define early_cpu_to_node(cpu) cpu_to_node(cpu)
70 59
71#else /* CONFIG_X86_64 */ 60/* Returns a bitmask of CPUs on Node 'node'.
72 61 *
73#ifdef CONFIG_SMP 62 * Side note: this function creates the returned cpumask on the stack
74static inline int early_cpu_to_node(int cpu) 63 * so with a high NR_CPUS count, excessive stack space is used. The
64 * node_to_cpumask_ptr function should be used whenever possible.
65 */
66static inline cpumask_t node_to_cpumask(int node)
75{ 67{
76 int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr; 68 return node_to_cpumask_map[node];
77
78 if (cpu_to_node_map)
79 return cpu_to_node_map[cpu];
80 else if (per_cpu_offset(cpu))
81 return per_cpu(x86_cpu_to_node_map, cpu);
82 else
83 return NUMA_NO_NODE;
84} 69}
85#else
86#define early_cpu_to_node(cpu) cpu_to_node(cpu)
87#endif
88 70
71#else /* CONFIG_X86_64 */
72
73/* Mappings between node number and cpus on that node. */
74extern cpumask_t *node_to_cpumask_map;
75
76/* Mappings between logical cpu number and node number */
77DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
78
79/* Returns the number of the current Node. */
80#define numa_node_id() read_pda(nodenumber)
81
82#ifdef CONFIG_DEBUG_PER_CPU_MAPS
83extern int cpu_to_node(int cpu);
84extern int early_cpu_to_node(int cpu);
85extern const cpumask_t *_node_to_cpumask_ptr(int node);
86extern cpumask_t node_to_cpumask(int node);
87
88#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
89
90/* Returns the number of the node containing CPU 'cpu' */
89static inline int cpu_to_node(int cpu) 91static inline int cpu_to_node(int cpu)
90{ 92{
91#ifdef CONFIG_DEBUG_PER_CPU_MAPS
92 if (x86_cpu_to_node_map_early_ptr) {
93 printk("KERN_NOTICE cpu_to_node(%d): usage too early!\n",
94 (int)cpu);
95 dump_stack();
96 return ((int *)x86_cpu_to_node_map_early_ptr)[cpu];
97 }
98#endif
99 return per_cpu(x86_cpu_to_node_map, cpu); 93 return per_cpu(x86_cpu_to_node_map, cpu);
100} 94}
101 95
102#ifdef CONFIG_NUMA 96/* Same function but used if called before per_cpu areas are setup */
97static inline int early_cpu_to_node(int cpu)
98{
99 if (early_per_cpu_ptr(x86_cpu_to_node_map))
100 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
101
102 return per_cpu(x86_cpu_to_node_map, cpu);
103}
103 104
104/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ 105/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
106static inline const cpumask_t *_node_to_cpumask_ptr(int node)
107{
108 return &node_to_cpumask_map[node];
109}
110
111/* Returns a bitmask of CPUs on Node 'node'. */
112static inline cpumask_t node_to_cpumask(int node)
113{
114 return node_to_cpumask_map[node];
115}
116
117#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
118
119/* Replace default node_to_cpumask_ptr with optimized version */
105#define node_to_cpumask_ptr(v, node) \ 120#define node_to_cpumask_ptr(v, node) \
106 cpumask_t *v = &(node_to_cpumask_map[node]) 121 const cpumask_t *v = _node_to_cpumask_ptr(node)
107 122
108#define node_to_cpumask_ptr_next(v, node) \ 123#define node_to_cpumask_ptr_next(v, node) \
109 v = &(node_to_cpumask_map[node]) 124 v = _node_to_cpumask_ptr(node)
110#endif
111 125
112#endif /* CONFIG_X86_64 */ 126#endif /* CONFIG_X86_64 */
113 127
@@ -117,20 +131,6 @@ static inline int cpu_to_node(int cpu)
117 */ 131 */
118#define parent_node(node) (node) 132#define parent_node(node) (node)
119 133
120/* Returns a bitmask of CPUs on Node 'node'. */
121static inline cpumask_t node_to_cpumask(int node)
122{
123 return node_to_cpumask_map[node];
124}
125
126/* Returns the number of the first CPU on Node 'node'. */
127static inline int node_to_first_cpu(int node)
128{
129 cpumask_t mask = node_to_cpumask(node);
130
131 return first_cpu(mask);
132}
133
134#define pcibus_to_node(bus) __pcibus_to_node(bus) 134#define pcibus_to_node(bus) __pcibus_to_node(bus)
135#define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus) 135#define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus)
136 136
@@ -180,12 +180,44 @@ extern int __node_distance(int, int);
180#define node_distance(a, b) __node_distance(a, b) 180#define node_distance(a, b) __node_distance(a, b)
181#endif 181#endif
182 182
183#else /* CONFIG_NUMA */ 183#else /* !CONFIG_NUMA */
184 184
185#define numa_node_id() 0
186#define cpu_to_node(cpu) 0
187#define early_cpu_to_node(cpu) 0
188
189static inline const cpumask_t *_node_to_cpumask_ptr(int node)
190{
191 return &cpu_online_map;
192}
193static inline cpumask_t node_to_cpumask(int node)
194{
195 return cpu_online_map;
196}
197static inline int node_to_first_cpu(int node)
198{
199 return first_cpu(cpu_online_map);
200}
201
202/* Replace default node_to_cpumask_ptr with optimized version */
203#define node_to_cpumask_ptr(v, node) \
204 const cpumask_t *v = _node_to_cpumask_ptr(node)
205
206#define node_to_cpumask_ptr_next(v, node) \
207 v = _node_to_cpumask_ptr(node)
185#endif 208#endif
186 209
187#include <asm-generic/topology.h> 210#include <asm-generic/topology.h>
188 211
212#ifdef CONFIG_NUMA
213/* Returns the number of the first CPU on Node 'node'. */
214static inline int node_to_first_cpu(int node)
215{
216 node_to_cpumask_ptr(mask, node);
217 return first_cpu(*mask);
218}
219#endif
220
189extern cpumask_t cpu_coregroup_map(int cpu); 221extern cpumask_t cpu_coregroup_map(int cpu);
190 222
191#ifdef ENABLE_TOPO_DEFINES 223#ifdef ENABLE_TOPO_DEFINES
@@ -193,6 +225,9 @@ extern cpumask_t cpu_coregroup_map(int cpu);
193#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) 225#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
194#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) 226#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
195#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) 227#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
228
229/* indicates that pointers to the topology cpumask_t maps are valid */
230#define arch_provides_topology_pointers yes
196#endif 231#endif
197 232
198static inline void arch_fix_phys_package_id(int num, u32 slot) 233static inline void arch_fix_phys_package_id(int num, u32 slot)
@@ -220,4 +255,4 @@ static inline void set_mp_bus_to_node(int busnum, int node)
220} 255}
221#endif 256#endif
222 257
223#endif 258#endif /* _ASM_X86_TOPOLOGY_H */
diff --git a/include/asm-x86/tsc.h b/include/asm-x86/tsc.h
index 548873ab5fc1..cb6f6ee45b8f 100644
--- a/include/asm-x86/tsc.h
+++ b/include/asm-x86/tsc.h
@@ -48,7 +48,6 @@ static __always_inline cycles_t vget_cycles(void)
48extern void tsc_init(void); 48extern void tsc_init(void);
49extern void mark_tsc_unstable(char *reason); 49extern void mark_tsc_unstable(char *reason);
50extern int unsynchronized_tsc(void); 50extern int unsynchronized_tsc(void);
51extern void init_tsc_clocksource(void);
52int check_tsc_unstable(void); 51int check_tsc_unstable(void);
53 52
54/* 53/*
@@ -58,7 +57,6 @@ int check_tsc_unstable(void);
58extern void check_tsc_sync_source(int cpu); 57extern void check_tsc_sync_source(int cpu);
59extern void check_tsc_sync_target(void); 58extern void check_tsc_sync_target(void);
60 59
61extern void tsc_calibrate(void);
62extern int notsc_setup(char *); 60extern int notsc_setup(char *);
63 61
64#endif 62#endif
diff --git a/include/asm-x86/uaccess.h b/include/asm-x86/uaccess.h
index 9fefd2947e78..f6fa4d841bbc 100644
--- a/include/asm-x86/uaccess.h
+++ b/include/asm-x86/uaccess.h
@@ -1,5 +1,453 @@
1#ifndef _ASM_UACCES_H_
2#define _ASM_UACCES_H_
3/*
4 * User space memory access functions
5 */
6#include <linux/errno.h>
7#include <linux/compiler.h>
8#include <linux/thread_info.h>
9#include <linux/prefetch.h>
10#include <linux/string.h>
11#include <asm/asm.h>
12#include <asm/page.h>
13
14#define VERIFY_READ 0
15#define VERIFY_WRITE 1
16
17/*
18 * The fs value determines whether argument validity checking should be
19 * performed or not. If get_fs() == USER_DS, checking is performed, with
20 * get_fs() == KERNEL_DS, checking is bypassed.
21 *
22 * For historical reasons, these macros are grossly misnamed.
23 */
24
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26
27#define KERNEL_DS MAKE_MM_SEG(-1UL)
28#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
29
30#define get_ds() (KERNEL_DS)
31#define get_fs() (current_thread_info()->addr_limit)
32#define set_fs(x) (current_thread_info()->addr_limit = (x))
33
34#define segment_eq(a, b) ((a).seg == (b).seg)
35
36#define __addr_ok(addr) \
37 ((unsigned long __force)(addr) < \
38 (current_thread_info()->addr_limit.seg))
39
40/*
41 * Test whether a block of memory is a valid user space address.
42 * Returns 0 if the range is valid, nonzero otherwise.
43 *
44 * This is equivalent to the following test:
45 * (u33)addr + (u33)size >= (u33)current->addr_limit.seg (u65 for x86_64)
46 *
47 * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
48 */
49
50#define __range_not_ok(addr, size) \
51({ \
52 unsigned long flag, roksum; \
53 __chk_user_ptr(addr); \
54 asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \
55 : "=&r" (flag), "=r" (roksum) \
56 : "1" (addr), "g" ((long)(size)), \
57 "rm" (current_thread_info()->addr_limit.seg)); \
58 flag; \
59})
60
61/**
62 * access_ok: - Checks if a user space pointer is valid
63 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
64 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
65 * to write to a block, it is always safe to read from it.
66 * @addr: User space pointer to start of block to check
67 * @size: Size of block to check
68 *
69 * Context: User context only. This function may sleep.
70 *
71 * Checks if a pointer to a block of memory in user space is valid.
72 *
73 * Returns true (nonzero) if the memory block may be valid, false (zero)
74 * if it is definitely invalid.
75 *
76 * Note that, depending on architecture, this function probably just
77 * checks that the pointer is in the user space range - after calling
78 * this function, memory access functions may still return -EFAULT.
79 */
80#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
81
82/*
83 * The exception table consists of pairs of addresses: the first is the
84 * address of an instruction that is allowed to fault, and the second is
85 * the address at which the program should continue. No registers are
86 * modified, so it is entirely up to the continuation code to figure out
87 * what to do.
88 *
89 * All the routines below use bits of fixup code that are out of line
90 * with the main instruction path. This means when everything is well,
91 * we don't even have to jump over them. Further, they do not intrude
92 * on our cache or tlb entries.
93 */
94
95struct exception_table_entry {
96 unsigned long insn, fixup;
97};
98
99extern int fixup_exception(struct pt_regs *regs);
100
101/*
102 * These are the main single-value transfer routines. They automatically
103 * use the right size if we just have the right pointer type.
104 *
105 * This gets kind of ugly. We want to return _two_ values in "get_user()"
106 * and yet we don't want to do any pointers, because that is too much
107 * of a performance impact. Thus we have a few rather ugly macros here,
108 * and hide all the ugliness from the user.
109 *
110 * The "__xxx" versions of the user access functions are versions that
111 * do not verify the address space, that must have been done previously
112 * with a separate "access_ok()" call (this is used when we do multiple
113 * accesses to the same area of user memory).
114 */
115
116extern int __get_user_1(void);
117extern int __get_user_2(void);
118extern int __get_user_4(void);
119extern int __get_user_8(void);
120extern int __get_user_bad(void);
121
122#define __get_user_x(size, ret, x, ptr) \
123 asm volatile("call __get_user_" #size \
124 : "=a" (ret),"=d" (x) \
125 : "0" (ptr)) \
126
127/* Careful: we have to cast the result to the type of the pointer
128 * for sign reasons */
129
130/**
131 * get_user: - Get a simple variable from user space.
132 * @x: Variable to store result.
133 * @ptr: Source address, in user space.
134 *
135 * Context: User context only. This function may sleep.
136 *
137 * This macro copies a single simple variable from user space to kernel
138 * space. It supports simple types like char and int, but not larger
139 * data types like structures or arrays.
140 *
141 * @ptr must have pointer-to-simple-variable type, and the result of
142 * dereferencing @ptr must be assignable to @x without a cast.
143 *
144 * Returns zero on success, or -EFAULT on error.
145 * On error, the variable @x is set to zero.
146 */
147#ifdef CONFIG_X86_32
148#define __get_user_8(__ret_gu, __val_gu, ptr) \
149 __get_user_x(X, __ret_gu, __val_gu, ptr)
150#else
151#define __get_user_8(__ret_gu, __val_gu, ptr) \
152 __get_user_x(8, __ret_gu, __val_gu, ptr)
153#endif
154
155#define get_user(x, ptr) \
156({ \
157 int __ret_gu; \
158 unsigned long __val_gu; \
159 __chk_user_ptr(ptr); \
160 switch (sizeof(*(ptr))) { \
161 case 1: \
162 __get_user_x(1, __ret_gu, __val_gu, ptr); \
163 break; \
164 case 2: \
165 __get_user_x(2, __ret_gu, __val_gu, ptr); \
166 break; \
167 case 4: \
168 __get_user_x(4, __ret_gu, __val_gu, ptr); \
169 break; \
170 case 8: \
171 __get_user_8(__ret_gu, __val_gu, ptr); \
172 break; \
173 default: \
174 __get_user_x(X, __ret_gu, __val_gu, ptr); \
175 break; \
176 } \
177 (x) = (__typeof__(*(ptr)))__val_gu; \
178 __ret_gu; \
179})
180
181#define __put_user_x(size, x, ptr, __ret_pu) \
182 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
183 :"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
184
185
186
187#ifdef CONFIG_X86_32
188#define __put_user_u64(x, addr, err) \
189 asm volatile("1: movl %%eax,0(%2)\n" \
190 "2: movl %%edx,4(%2)\n" \
191 "3:\n" \
192 ".section .fixup,\"ax\"\n" \
193 "4: movl %3,%0\n" \
194 " jmp 3b\n" \
195 ".previous\n" \
196 _ASM_EXTABLE(1b, 4b) \
197 _ASM_EXTABLE(2b, 4b) \
198 : "=r" (err) \
199 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
200
201#define __put_user_x8(x, ptr, __ret_pu) \
202 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
203 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
204#else
205#define __put_user_u64(x, ptr, retval) \
206 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT)
207#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
208#endif
209
210extern void __put_user_bad(void);
211
212/*
213 * Strange magic calling convention: pointer in %ecx,
214 * value in %eax(:%edx), return value in %eax. clobbers %rbx
215 */
216extern void __put_user_1(void);
217extern void __put_user_2(void);
218extern void __put_user_4(void);
219extern void __put_user_8(void);
220
221#ifdef CONFIG_X86_WP_WORKS_OK
222
223/**
224 * put_user: - Write a simple value into user space.
225 * @x: Value to copy to user space.
226 * @ptr: Destination address, in user space.
227 *
228 * Context: User context only. This function may sleep.
229 *
230 * This macro copies a single simple value from kernel space to user
231 * space. It supports simple types like char and int, but not larger
232 * data types like structures or arrays.
233 *
234 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
235 * to the result of dereferencing @ptr.
236 *
237 * Returns zero on success, or -EFAULT on error.
238 */
239#define put_user(x, ptr) \
240({ \
241 int __ret_pu; \
242 __typeof__(*(ptr)) __pu_val; \
243 __chk_user_ptr(ptr); \
244 __pu_val = x; \
245 switch (sizeof(*(ptr))) { \
246 case 1: \
247 __put_user_x(1, __pu_val, ptr, __ret_pu); \
248 break; \
249 case 2: \
250 __put_user_x(2, __pu_val, ptr, __ret_pu); \
251 break; \
252 case 4: \
253 __put_user_x(4, __pu_val, ptr, __ret_pu); \
254 break; \
255 case 8: \
256 __put_user_x8(__pu_val, ptr, __ret_pu); \
257 break; \
258 default: \
259 __put_user_x(X, __pu_val, ptr, __ret_pu); \
260 break; \
261 } \
262 __ret_pu; \
263})
264
265#define __put_user_size(x, ptr, size, retval, errret) \
266do { \
267 retval = 0; \
268 __chk_user_ptr(ptr); \
269 switch (size) { \
270 case 1: \
271 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
272 break; \
273 case 2: \
274 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
275 break; \
276 case 4: \
277 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret);\
278 break; \
279 case 8: \
280 __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \
281 break; \
282 default: \
283 __put_user_bad(); \
284 } \
285} while (0)
286
287#else
288
289#define __put_user_size(x, ptr, size, retval, errret) \
290do { \
291 __typeof__(*(ptr))__pus_tmp = x; \
292 retval = 0; \
293 \
294 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
295 retval = errret; \
296} while (0)
297
298#define put_user(x, ptr) \
299({ \
300 int __ret_pu; \
301 __typeof__(*(ptr))__pus_tmp = x; \
302 __ret_pu = 0; \
303 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \
304 sizeof(*(ptr))) != 0)) \
305 __ret_pu = -EFAULT; \
306 __ret_pu; \
307})
308#endif
309
310#ifdef CONFIG_X86_32
311#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
312#else
313#define __get_user_asm_u64(x, ptr, retval, errret) \
314 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
315#endif
316
317#define __get_user_size(x, ptr, size, retval, errret) \
318do { \
319 retval = 0; \
320 __chk_user_ptr(ptr); \
321 switch (size) { \
322 case 1: \
323 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
324 break; \
325 case 2: \
326 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
327 break; \
328 case 4: \
329 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
330 break; \
331 case 8: \
332 __get_user_asm_u64(x, ptr, retval, errret); \
333 break; \
334 default: \
335 (x) = __get_user_bad(); \
336 } \
337} while (0)
338
339#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
340 asm volatile("1: mov"itype" %2,%"rtype"1\n" \
341 "2:\n" \
342 ".section .fixup,\"ax\"\n" \
343 "3: mov %3,%0\n" \
344 " xor"itype" %"rtype"1,%"rtype"1\n" \
345 " jmp 2b\n" \
346 ".previous\n" \
347 _ASM_EXTABLE(1b, 3b) \
348 : "=r" (err), ltype(x) \
349 : "m" (__m(addr)), "i" (errret), "0" (err))
350
351#define __put_user_nocheck(x, ptr, size) \
352({ \
353 long __pu_err; \
354 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
355 __pu_err; \
356})
357
358#define __get_user_nocheck(x, ptr, size) \
359({ \
360 long __gu_err; \
361 unsigned long __gu_val; \
362 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
363 (x) = (__force __typeof__(*(ptr)))__gu_val; \
364 __gu_err; \
365})
366
367/* FIXME: this hack is definitely wrong -AK */
368struct __large_struct { unsigned long buf[100]; };
369#define __m(x) (*(struct __large_struct __user *)(x))
370
371/*
372 * Tell gcc we read from memory instead of writing: this is because
373 * we do not write to any memory gcc knows about, so there are no
374 * aliasing issues.
375 */
376#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
377 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
378 "2:\n" \
379 ".section .fixup,\"ax\"\n" \
380 "3: mov %3,%0\n" \
381 " jmp 2b\n" \
382 ".previous\n" \
383 _ASM_EXTABLE(1b, 3b) \
384 : "=r"(err) \
385 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
386/**
387 * __get_user: - Get a simple variable from user space, with less checking.
388 * @x: Variable to store result.
389 * @ptr: Source address, in user space.
390 *
391 * Context: User context only. This function may sleep.
392 *
393 * This macro copies a single simple variable from user space to kernel
394 * space. It supports simple types like char and int, but not larger
395 * data types like structures or arrays.
396 *
397 * @ptr must have pointer-to-simple-variable type, and the result of
398 * dereferencing @ptr must be assignable to @x without a cast.
399 *
400 * Caller must check the pointer with access_ok() before calling this
401 * function.
402 *
403 * Returns zero on success, or -EFAULT on error.
404 * On error, the variable @x is set to zero.
405 */
406
407#define __get_user(x, ptr) \
408 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
409/**
410 * __put_user: - Write a simple value into user space, with less checking.
411 * @x: Value to copy to user space.
412 * @ptr: Destination address, in user space.
413 *
414 * Context: User context only. This function may sleep.
415 *
416 * This macro copies a single simple value from kernel space to user
417 * space. It supports simple types like char and int, but not larger
418 * data types like structures or arrays.
419 *
420 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
421 * to the result of dereferencing @ptr.
422 *
423 * Caller must check the pointer with access_ok() before calling this
424 * function.
425 *
426 * Returns zero on success, or -EFAULT on error.
427 */
428
429#define __put_user(x, ptr) \
430 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
431
432#define __get_user_unaligned __get_user
433#define __put_user_unaligned __put_user
434
435/*
436 * movsl can be slow when source and dest are not both 8-byte aligned
437 */
438#ifdef CONFIG_X86_INTEL_USERCOPY
439extern struct movsl_mask {
440 int mask;
441} ____cacheline_aligned_in_smp movsl_mask;
442#endif
443
444#define ARCH_HAS_NOCACHE_UACCESS 1
445
1#ifdef CONFIG_X86_32 446#ifdef CONFIG_X86_32
2# include "uaccess_32.h" 447# include "uaccess_32.h"
3#else 448#else
449# define ARCH_HAS_SEARCH_EXTABLE
4# include "uaccess_64.h" 450# include "uaccess_64.h"
5#endif 451#endif
452
453#endif
diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h
index 8e7595c1f34e..6fdef39a0bcb 100644
--- a/include/asm-x86/uaccess_32.h
+++ b/include/asm-x86/uaccess_32.h
@@ -11,426 +11,6 @@
11#include <asm/asm.h> 11#include <asm/asm.h>
12#include <asm/page.h> 12#include <asm/page.h>
13 13
14#define VERIFY_READ 0
15#define VERIFY_WRITE 1
16
17/*
18 * The fs value determines whether argument validity checking should be
19 * performed or not. If get_fs() == USER_DS, checking is performed, with
20 * get_fs() == KERNEL_DS, checking is bypassed.
21 *
22 * For historical reasons, these macros are grossly misnamed.
23 */
24
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26
27
28#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFUL)
29#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
30
31#define get_ds() (KERNEL_DS)
32#define get_fs() (current_thread_info()->addr_limit)
33#define set_fs(x) (current_thread_info()->addr_limit = (x))
34
35#define segment_eq(a, b) ((a).seg == (b).seg)
36
37/*
38 * movsl can be slow when source and dest are not both 8-byte aligned
39 */
40#ifdef CONFIG_X86_INTEL_USERCOPY
41extern struct movsl_mask {
42 int mask;
43} ____cacheline_aligned_in_smp movsl_mask;
44#endif
45
46#define __addr_ok(addr) \
47 ((unsigned long __force)(addr) < \
48 (current_thread_info()->addr_limit.seg))
49
50/*
51 * Test whether a block of memory is a valid user space address.
52 * Returns 0 if the range is valid, nonzero otherwise.
53 *
54 * This is equivalent to the following test:
55 * (u33)addr + (u33)size >= (u33)current->addr_limit.seg
56 *
57 * This needs 33-bit arithmetic. We have a carry...
58 */
59#define __range_ok(addr, size) \
60({ \
61 unsigned long flag, roksum; \
62 __chk_user_ptr(addr); \
63 asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
64 :"=&r" (flag), "=r" (roksum) \
65 :"1" (addr), "g" ((int)(size)), \
66 "rm" (current_thread_info()->addr_limit.seg)); \
67 flag; \
68})
69
70/**
71 * access_ok: - Checks if a user space pointer is valid
72 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
73 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
74 * to write to a block, it is always safe to read from it.
75 * @addr: User space pointer to start of block to check
76 * @size: Size of block to check
77 *
78 * Context: User context only. This function may sleep.
79 *
80 * Checks if a pointer to a block of memory in user space is valid.
81 *
82 * Returns true (nonzero) if the memory block may be valid, false (zero)
83 * if it is definitely invalid.
84 *
85 * Note that, depending on architecture, this function probably just
86 * checks that the pointer is in the user space range - after calling
87 * this function, memory access functions may still return -EFAULT.
88 */
89#define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0))
90
91/*
92 * The exception table consists of pairs of addresses: the first is the
93 * address of an instruction that is allowed to fault, and the second is
94 * the address at which the program should continue. No registers are
95 * modified, so it is entirely up to the continuation code to figure out
96 * what to do.
97 *
98 * All the routines below use bits of fixup code that are out of line
99 * with the main instruction path. This means when everything is well,
100 * we don't even have to jump over them. Further, they do not intrude
101 * on our cache or tlb entries.
102 */
103
104struct exception_table_entry {
105 unsigned long insn, fixup;
106};
107
108extern int fixup_exception(struct pt_regs *regs);
109
110/*
111 * These are the main single-value transfer routines. They automatically
112 * use the right size if we just have the right pointer type.
113 *
114 * This gets kind of ugly. We want to return _two_ values in "get_user()"
115 * and yet we don't want to do any pointers, because that is too much
116 * of a performance impact. Thus we have a few rather ugly macros here,
117 * and hide all the ugliness from the user.
118 *
119 * The "__xxx" versions of the user access functions are versions that
120 * do not verify the address space, that must have been done previously
121 * with a separate "access_ok()" call (this is used when we do multiple
122 * accesses to the same area of user memory).
123 */
124
125extern void __get_user_1(void);
126extern void __get_user_2(void);
127extern void __get_user_4(void);
128
129#define __get_user_x(size, ret, x, ptr) \
130 asm volatile("call __get_user_" #size \
131 :"=a" (ret),"=d" (x) \
132 :"0" (ptr))
133
134
135/* Careful: we have to cast the result to the type of the pointer
136 * for sign reasons */
137
138/**
139 * get_user: - Get a simple variable from user space.
140 * @x: Variable to store result.
141 * @ptr: Source address, in user space.
142 *
143 * Context: User context only. This function may sleep.
144 *
145 * This macro copies a single simple variable from user space to kernel
146 * space. It supports simple types like char and int, but not larger
147 * data types like structures or arrays.
148 *
149 * @ptr must have pointer-to-simple-variable type, and the result of
150 * dereferencing @ptr must be assignable to @x without a cast.
151 *
152 * Returns zero on success, or -EFAULT on error.
153 * On error, the variable @x is set to zero.
154 */
155#define get_user(x, ptr) \
156({ \
157 int __ret_gu; \
158 unsigned long __val_gu; \
159 __chk_user_ptr(ptr); \
160 switch (sizeof(*(ptr))) { \
161 case 1: \
162 __get_user_x(1, __ret_gu, __val_gu, ptr); \
163 break; \
164 case 2: \
165 __get_user_x(2, __ret_gu, __val_gu, ptr); \
166 break; \
167 case 4: \
168 __get_user_x(4, __ret_gu, __val_gu, ptr); \
169 break; \
170 default: \
171 __get_user_x(X, __ret_gu, __val_gu, ptr); \
172 break; \
173 } \
174 (x) = (__typeof__(*(ptr)))__val_gu; \
175 __ret_gu; \
176})
177
178extern void __put_user_bad(void);
179
180/*
181 * Strange magic calling convention: pointer in %ecx,
182 * value in %eax(:%edx), return value in %eax, no clobbers.
183 */
184extern void __put_user_1(void);
185extern void __put_user_2(void);
186extern void __put_user_4(void);
187extern void __put_user_8(void);
188
189#define __put_user_1(x, ptr) \
190 asm volatile("call __put_user_1" : "=a" (__ret_pu) \
191 : "0" ((typeof(*(ptr)))(x)), "c" (ptr))
192
193#define __put_user_2(x, ptr) \
194 asm volatile("call __put_user_2" : "=a" (__ret_pu) \
195 : "0" ((typeof(*(ptr)))(x)), "c" (ptr))
196
197#define __put_user_4(x, ptr) \
198 asm volatile("call __put_user_4" : "=a" (__ret_pu) \
199 : "0" ((typeof(*(ptr)))(x)), "c" (ptr))
200
201#define __put_user_8(x, ptr) \
202 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
203 : "A" ((typeof(*(ptr)))(x)), "c" (ptr))
204
205#define __put_user_X(x, ptr) \
206 asm volatile("call __put_user_X" : "=a" (__ret_pu) \
207 : "c" (ptr))
208
209/**
210 * put_user: - Write a simple value into user space.
211 * @x: Value to copy to user space.
212 * @ptr: Destination address, in user space.
213 *
214 * Context: User context only. This function may sleep.
215 *
216 * This macro copies a single simple value from kernel space to user
217 * space. It supports simple types like char and int, but not larger
218 * data types like structures or arrays.
219 *
220 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
221 * to the result of dereferencing @ptr.
222 *
223 * Returns zero on success, or -EFAULT on error.
224 */
225#ifdef CONFIG_X86_WP_WORKS_OK
226
227#define put_user(x, ptr) \
228({ \
229 int __ret_pu; \
230 __typeof__(*(ptr)) __pu_val; \
231 __chk_user_ptr(ptr); \
232 __pu_val = x; \
233 switch (sizeof(*(ptr))) { \
234 case 1: \
235 __put_user_1(__pu_val, ptr); \
236 break; \
237 case 2: \
238 __put_user_2(__pu_val, ptr); \
239 break; \
240 case 4: \
241 __put_user_4(__pu_val, ptr); \
242 break; \
243 case 8: \
244 __put_user_8(__pu_val, ptr); \
245 break; \
246 default: \
247 __put_user_X(__pu_val, ptr); \
248 break; \
249 } \
250 __ret_pu; \
251})
252
253#else
254#define put_user(x, ptr) \
255({ \
256 int __ret_pu; \
257 __typeof__(*(ptr))__pus_tmp = x; \
258 __ret_pu = 0; \
259 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \
260 sizeof(*(ptr))) != 0)) \
261 __ret_pu = -EFAULT; \
262 __ret_pu; \
263})
264
265
266#endif
267
268/**
269 * __get_user: - Get a simple variable from user space, with less checking.
270 * @x: Variable to store result.
271 * @ptr: Source address, in user space.
272 *
273 * Context: User context only. This function may sleep.
274 *
275 * This macro copies a single simple variable from user space to kernel
276 * space. It supports simple types like char and int, but not larger
277 * data types like structures or arrays.
278 *
279 * @ptr must have pointer-to-simple-variable type, and the result of
280 * dereferencing @ptr must be assignable to @x without a cast.
281 *
282 * Caller must check the pointer with access_ok() before calling this
283 * function.
284 *
285 * Returns zero on success, or -EFAULT on error.
286 * On error, the variable @x is set to zero.
287 */
288#define __get_user(x, ptr) \
289 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
290
291
292/**
293 * __put_user: - Write a simple value into user space, with less checking.
294 * @x: Value to copy to user space.
295 * @ptr: Destination address, in user space.
296 *
297 * Context: User context only. This function may sleep.
298 *
299 * This macro copies a single simple value from kernel space to user
300 * space. It supports simple types like char and int, but not larger
301 * data types like structures or arrays.
302 *
303 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
304 * to the result of dereferencing @ptr.
305 *
306 * Caller must check the pointer with access_ok() before calling this
307 * function.
308 *
309 * Returns zero on success, or -EFAULT on error.
310 */
311#define __put_user(x, ptr) \
312 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
313
314#define __put_user_nocheck(x, ptr, size) \
315({ \
316 long __pu_err; \
317 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
318 __pu_err; \
319})
320
321
322#define __put_user_u64(x, addr, err) \
323 asm volatile("1: movl %%eax,0(%2)\n" \
324 "2: movl %%edx,4(%2)\n" \
325 "3:\n" \
326 ".section .fixup,\"ax\"\n" \
327 "4: movl %3,%0\n" \
328 " jmp 3b\n" \
329 ".previous\n" \
330 _ASM_EXTABLE(1b, 4b) \
331 _ASM_EXTABLE(2b, 4b) \
332 : "=r" (err) \
333 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
334
335#ifdef CONFIG_X86_WP_WORKS_OK
336
337#define __put_user_size(x, ptr, size, retval, errret) \
338do { \
339 retval = 0; \
340 __chk_user_ptr(ptr); \
341 switch (size) { \
342 case 1: \
343 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
344 break; \
345 case 2: \
346 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
347 break; \
348 case 4: \
349 __put_user_asm(x, ptr, retval, "l", "", "ir", errret); \
350 break; \
351 case 8: \
352 __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \
353 break; \
354 default: \
355 __put_user_bad(); \
356 } \
357} while (0)
358
359#else
360
361#define __put_user_size(x, ptr, size, retval, errret) \
362do { \
363 __typeof__(*(ptr))__pus_tmp = x; \
364 retval = 0; \
365 \
366 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
367 retval = errret; \
368} while (0)
369
370#endif
371struct __large_struct { unsigned long buf[100]; };
372#define __m(x) (*(struct __large_struct __user *)(x))
373
374/*
375 * Tell gcc we read from memory instead of writing: this is because
376 * we do not write to any memory gcc knows about, so there are no
377 * aliasing issues.
378 */
379#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
380 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
381 "2:\n" \
382 ".section .fixup,\"ax\"\n" \
383 "3: movl %3,%0\n" \
384 " jmp 2b\n" \
385 ".previous\n" \
386 _ASM_EXTABLE(1b, 3b) \
387 : "=r"(err) \
388 : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
389
390
391#define __get_user_nocheck(x, ptr, size) \
392({ \
393 long __gu_err; \
394 unsigned long __gu_val; \
395 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
396 (x) = (__typeof__(*(ptr)))__gu_val; \
397 __gu_err; \
398})
399
400extern long __get_user_bad(void);
401
402#define __get_user_size(x, ptr, size, retval, errret) \
403do { \
404 retval = 0; \
405 __chk_user_ptr(ptr); \
406 switch (size) { \
407 case 1: \
408 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
409 break; \
410 case 2: \
411 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
412 break; \
413 case 4: \
414 __get_user_asm(x, ptr, retval, "l", "", "=r", errret); \
415 break; \
416 default: \
417 (x) = __get_user_bad(); \
418 } \
419} while (0)
420
421#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
422 asm volatile("1: mov"itype" %2,%"rtype"1\n" \
423 "2:\n" \
424 ".section .fixup,\"ax\"\n" \
425 "3: movl %3,%0\n" \
426 " xor"itype" %"rtype"1,%"rtype"1\n" \
427 " jmp 2b\n" \
428 ".previous\n" \
429 _ASM_EXTABLE(1b, 3b) \
430 : "=r" (err), ltype (x) \
431 : "m" (__m(addr)), "i" (errret), "0" (err))
432
433
434unsigned long __must_check __copy_to_user_ll 14unsigned long __must_check __copy_to_user_ll
435 (void __user *to, const void *from, unsigned long n); 15 (void __user *to, const void *from, unsigned long n);
436unsigned long __must_check __copy_from_user_ll 16unsigned long __must_check __copy_from_user_ll
@@ -576,8 +156,6 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
576 return __copy_from_user_ll(to, from, n); 156 return __copy_from_user_ll(to, from, n);
577} 157}
578 158
579#define ARCH_HAS_NOCACHE_UACCESS
580
581static __always_inline unsigned long __copy_from_user_nocache(void *to, 159static __always_inline unsigned long __copy_from_user_nocache(void *to,
582 const void __user *from, unsigned long n) 160 const void __user *from, unsigned long n)
583{ 161{
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h
index b8a2f4339903..515d4dce96b5 100644
--- a/include/asm-x86/uaccess_64.h
+++ b/include/asm-x86/uaccess_64.h
@@ -9,265 +9,6 @@
9#include <linux/prefetch.h> 9#include <linux/prefetch.h>
10#include <asm/page.h> 10#include <asm/page.h>
11 11
12#define VERIFY_READ 0
13#define VERIFY_WRITE 1
14
15/*
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
19 *
20 * For historical reasons, these macros are grossly misnamed.
21 */
22
23#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
24
25#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL)
26#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
27
28#define get_ds() (KERNEL_DS)
29#define get_fs() (current_thread_info()->addr_limit)
30#define set_fs(x) (current_thread_info()->addr_limit = (x))
31
32#define segment_eq(a, b) ((a).seg == (b).seg)
33
34#define __addr_ok(addr) (!((unsigned long)(addr) & \
35 (current_thread_info()->addr_limit.seg)))
36
37/*
38 * Uhhuh, this needs 65-bit arithmetic. We have a carry..
39 */
40#define __range_not_ok(addr, size) \
41({ \
42 unsigned long flag, roksum; \
43 __chk_user_ptr(addr); \
44 asm("# range_ok\n\r" \
45 "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \
46 : "=&r" (flag), "=r" (roksum) \
47 : "1" (addr), "g" ((long)(size)), \
48 "g" (current_thread_info()->addr_limit.seg)); \
49 flag; \
50})
51
52#define access_ok(type, addr, size) (__range_not_ok(addr, size) == 0)
53
54/*
55 * The exception table consists of pairs of addresses: the first is the
56 * address of an instruction that is allowed to fault, and the second is
57 * the address at which the program should continue. No registers are
58 * modified, so it is entirely up to the continuation code to figure out
59 * what to do.
60 *
61 * All the routines below use bits of fixup code that are out of line
62 * with the main instruction path. This means when everything is well,
63 * we don't even have to jump over them. Further, they do not intrude
64 * on our cache or tlb entries.
65 */
66
67struct exception_table_entry {
68 unsigned long insn, fixup;
69};
70
71extern int fixup_exception(struct pt_regs *regs);
72
73#define ARCH_HAS_SEARCH_EXTABLE
74
75/*
76 * These are the main single-value transfer routines. They automatically
77 * use the right size if we just have the right pointer type.
78 *
79 * This gets kind of ugly. We want to return _two_ values in "get_user()"
80 * and yet we don't want to do any pointers, because that is too much
81 * of a performance impact. Thus we have a few rather ugly macros here,
82 * and hide all the ugliness from the user.
83 *
84 * The "__xxx" versions of the user access functions are versions that
85 * do not verify the address space, that must have been done previously
86 * with a separate "access_ok()" call (this is used when we do multiple
87 * accesses to the same area of user memory).
88 */
89
90#define __get_user_x(size, ret, x, ptr) \
91 asm volatile("call __get_user_" #size \
92 : "=a" (ret),"=d" (x) \
93 : "c" (ptr) \
94 : "r8")
95
96/* Careful: we have to cast the result to the type of the pointer
97 * for sign reasons */
98
99#define get_user(x, ptr) \
100({ \
101 unsigned long __val_gu; \
102 int __ret_gu; \
103 __chk_user_ptr(ptr); \
104 switch (sizeof(*(ptr))) { \
105 case 1: \
106 __get_user_x(1, __ret_gu, __val_gu, ptr); \
107 break; \
108 case 2: \
109 __get_user_x(2, __ret_gu, __val_gu, ptr); \
110 break; \
111 case 4: \
112 __get_user_x(4, __ret_gu, __val_gu, ptr); \
113 break; \
114 case 8: \
115 __get_user_x(8, __ret_gu, __val_gu, ptr); \
116 break; \
117 default: \
118 __get_user_bad(); \
119 break; \
120 } \
121 (x) = (__force typeof(*(ptr)))__val_gu; \
122 __ret_gu; \
123})
124
125extern void __put_user_1(void);
126extern void __put_user_2(void);
127extern void __put_user_4(void);
128extern void __put_user_8(void);
129extern void __put_user_bad(void);
130
131#define __put_user_x(size, ret, x, ptr) \
132 asm volatile("call __put_user_" #size \
133 :"=a" (ret) \
134 :"c" (ptr),"d" (x) \
135 :"r8")
136
137#define put_user(x, ptr) \
138 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
139
140#define __get_user(x, ptr) \
141 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
142#define __put_user(x, ptr) \
143 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
144
145#define __get_user_unaligned __get_user
146#define __put_user_unaligned __put_user
147
148#define __put_user_nocheck(x, ptr, size) \
149({ \
150 int __pu_err; \
151 __put_user_size((x), (ptr), (size), __pu_err); \
152 __pu_err; \
153})
154
155
156#define __put_user_check(x, ptr, size) \
157({ \
158 int __pu_err; \
159 typeof(*(ptr)) __user *__pu_addr = (ptr); \
160 switch (size) { \
161 case 1: \
162 __put_user_x(1, __pu_err, x, __pu_addr); \
163 break; \
164 case 2: \
165 __put_user_x(2, __pu_err, x, __pu_addr); \
166 break; \
167 case 4: \
168 __put_user_x(4, __pu_err, x, __pu_addr); \
169 break; \
170 case 8: \
171 __put_user_x(8, __pu_err, x, __pu_addr); \
172 break; \
173 default: \
174 __put_user_bad(); \
175 } \
176 __pu_err; \
177})
178
179#define __put_user_size(x, ptr, size, retval) \
180do { \
181 retval = 0; \
182 __chk_user_ptr(ptr); \
183 switch (size) { \
184 case 1: \
185 __put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\
186 break; \
187 case 2: \
188 __put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\
189 break; \
190 case 4: \
191 __put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\
192 break; \
193 case 8: \
194 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \
195 break; \
196 default: \
197 __put_user_bad(); \
198 } \
199} while (0)
200
201/* FIXME: this hack is definitely wrong -AK */
202struct __large_struct { unsigned long buf[100]; };
203#define __m(x) (*(struct __large_struct __user *)(x))
204
205/*
206 * Tell gcc we read from memory instead of writing: this is because
207 * we do not write to any memory gcc knows about, so there are no
208 * aliasing issues.
209 */
210#define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
211 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
212 "2:\n" \
213 ".section .fixup, \"ax\"\n" \
214 "3: mov %3,%0\n" \
215 " jmp 2b\n" \
216 ".previous\n" \
217 _ASM_EXTABLE(1b, 3b) \
218 : "=r"(err) \
219 : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err))
220
221
222#define __get_user_nocheck(x, ptr, size) \
223({ \
224 int __gu_err; \
225 unsigned long __gu_val; \
226 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
227 (x) = (__force typeof(*(ptr)))__gu_val; \
228 __gu_err; \
229})
230
231extern int __get_user_1(void);
232extern int __get_user_2(void);
233extern int __get_user_4(void);
234extern int __get_user_8(void);
235extern int __get_user_bad(void);
236
237#define __get_user_size(x, ptr, size, retval) \
238do { \
239 retval = 0; \
240 __chk_user_ptr(ptr); \
241 switch (size) { \
242 case 1: \
243 __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\
244 break; \
245 case 2: \
246 __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\
247 break; \
248 case 4: \
249 __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\
250 break; \
251 case 8: \
252 __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \
253 break; \
254 default: \
255 (x) = __get_user_bad(); \
256 } \
257} while (0)
258
259#define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
260 asm volatile("1: mov"itype" %2,%"rtype"1\n" \
261 "2:\n" \
262 ".section .fixup, \"ax\"\n" \
263 "3: mov %3,%0\n" \
264 " xor"itype" %"rtype"1,%"rtype"1\n" \
265 " jmp 2b\n" \
266 ".previous\n" \
267 _ASM_EXTABLE(1b, 3b) \
268 : "=r" (err), ltype (x) \
269 : "m" (__m(addr)), "i"(errno), "0"(err))
270
271/* 12/*
272 * Copy To/From Userspace 13 * Copy To/From Userspace
273 */ 14 */
@@ -437,7 +178,6 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
437 return copy_user_generic((__force void *)dst, src, size); 178 return copy_user_generic((__force void *)dst, src, size);
438} 179}
439 180
440#define ARCH_HAS_NOCACHE_UACCESS 1
441extern long __copy_user_nocache(void *dst, const void __user *src, 181extern long __copy_user_nocache(void *dst, const void __user *src,
442 unsigned size, int zerorest); 182 unsigned size, int zerorest);
443 183
@@ -455,4 +195,7 @@ static inline int __copy_from_user_inatomic_nocache(void *dst,
455 return __copy_user_nocache(dst, src, size, 0); 195 return __copy_user_nocache(dst, src, size, 0);
456} 196}
457 197
198unsigned long
199copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
200
458#endif /* __X86_64_UACCESS_H */ 201#endif /* __X86_64_UACCESS_H */
diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h
index fe26e36d0f51..9c1a4a3470d9 100644
--- a/include/asm-x86/unistd_64.h
+++ b/include/asm-x86/unistd_64.h
@@ -290,7 +290,7 @@ __SYSCALL(__NR_rt_sigtimedwait, sys_rt_sigtimedwait)
290#define __NR_rt_sigqueueinfo 129 290#define __NR_rt_sigqueueinfo 129
291__SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo) 291__SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo)
292#define __NR_rt_sigsuspend 130 292#define __NR_rt_sigsuspend 130
293__SYSCALL(__NR_rt_sigsuspend, stub_rt_sigsuspend) 293__SYSCALL(__NR_rt_sigsuspend, sys_rt_sigsuspend)
294#define __NR_sigaltstack 131 294#define __NR_sigaltstack 131
295__SYSCALL(__NR_sigaltstack, stub_sigaltstack) 295__SYSCALL(__NR_sigaltstack, stub_sigaltstack)
296#define __NR_utime 132 296#define __NR_utime 132
diff --git a/include/asm-x86/uv/uv_bau.h b/include/asm-x86/uv/uv_bau.h
new file mode 100644
index 000000000000..91ac0dfb7588
--- /dev/null
+++ b/include/asm-x86/uv/uv_bau.h
@@ -0,0 +1,337 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * SGI UV Broadcast Assist Unit definitions
7 *
8 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
9 */
10
11#ifndef __ASM_X86_UV_BAU__
12#define __ASM_X86_UV_BAU__
13
14#include <linux/bitmap.h>
15#define BITSPERBYTE 8
16
17/*
18 * Broadcast Assist Unit messaging structures
19 *
20 * Selective Broadcast activations are induced by software action
21 * specifying a particular 8-descriptor "set" via a 6-bit index written
22 * to an MMR.
23 * Thus there are 64 unique 512-byte sets of SB descriptors - one set for
24 * each 6-bit index value. These descriptor sets are mapped in sequence
25 * starting with set 0 located at the address specified in the
26 * BAU_SB_DESCRIPTOR_BASE register, set 1 is located at BASE + 512,
27 * set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on.
28 *
29 * We will use 31 sets, one for sending BAU messages from each of the 32
30 * cpu's on the node.
31 *
32 * TLB shootdown will use the first of the 8 descriptors of each set.
33 * Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set).
34 */
35
36#define UV_ITEMS_PER_DESCRIPTOR 8
37#define UV_CPUS_PER_ACT_STATUS 32
38#define UV_ACT_STATUS_MASK 0x3
39#define UV_ACT_STATUS_SIZE 2
40#define UV_ACTIVATION_DESCRIPTOR_SIZE 32
41#define UV_DISTRIBUTION_SIZE 256
42#define UV_SW_ACK_NPENDING 8
43#define UV_BAU_MESSAGE 200
44/*
45 * Messaging irq; see irq_64.h and include/asm-x86/hw_irq_64.h
46 * To be dynamically allocated in the future
47 */
48#define UV_NET_ENDPOINT_INTD 0x38
49#define UV_DESC_BASE_PNODE_SHIFT 49
50#define UV_PAYLOADQ_PNODE_SHIFT 49
51#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
52#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask))
53
54/*
55 * bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1
56 */
57#define DESC_STATUS_IDLE 0
58#define DESC_STATUS_ACTIVE 1
59#define DESC_STATUS_DESTINATION_TIMEOUT 2
60#define DESC_STATUS_SOURCE_TIMEOUT 3
61
62/*
63 * source side threshholds at which message retries print a warning
64 */
65#define SOURCE_TIMEOUT_LIMIT 20
66#define DESTINATION_TIMEOUT_LIMIT 20
67
68/*
69 * number of entries in the destination side payload queue
70 */
71#define DEST_Q_SIZE 17
72/*
73 * number of destination side software ack resources
74 */
75#define DEST_NUM_RESOURCES 8
76#define MAX_CPUS_PER_NODE 32
77/*
78 * completion statuses for sending a TLB flush message
79 */
80#define FLUSH_RETRY 1
81#define FLUSH_GIVEUP 2
82#define FLUSH_COMPLETE 3
83
84/*
85 * Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor)
86 * If the 'multilevel' flag in the header portion of the descriptor
87 * has been set to 0, then endpoint multi-unicast mode is selected.
88 * The distribution specification (32 bytes) is interpreted as a 256-bit
89 * distribution vector. Adjacent bits correspond to consecutive even numbered
90 * nodeIDs. The result of adding the index of a given bit to the 15-bit
91 * 'base_dest_nodeid' field of the header corresponds to the
92 * destination nodeID associated with that specified bit.
93 */
94struct bau_target_nodemask {
95 unsigned long bits[BITS_TO_LONGS(256)];
96};
97
98/*
99 * mask of cpu's on a node
100 * (during initialization we need to check that unsigned long has
101 * enough bits for max. cpu's per node)
102 */
103struct bau_local_cpumask {
104 unsigned long bits;
105};
106
107/*
108 * Payload: 16 bytes (128 bits) (bytes 0x20-0x2f of descriptor)
109 * only 12 bytes (96 bits) of the payload area are usable.
110 * An additional 3 bytes (bits 27:4) of the header address are carried
111 * to the next bytes of the destination payload queue.
112 * And an additional 2 bytes of the header Suppl_A field are also
113 * carried to the destination payload queue.
114 * But the first byte of the Suppl_A becomes bits 127:120 (the 16th byte)
115 * of the destination payload queue, which is written by the hardware
116 * with the s/w ack resource bit vector.
117 * [ effective message contents (16 bytes (128 bits) maximum), not counting
118 * the s/w ack bit vector ]
119 */
120
121/*
122 * The payload is software-defined for INTD transactions
123 */
124struct bau_msg_payload {
125 unsigned long address; /* signifies a page or all TLB's
126 of the cpu */
127 /* 64 bits */
128 unsigned short sending_cpu; /* filled in by sender */
129 /* 16 bits */
130 unsigned short acknowledge_count;/* filled in by destination */
131 /* 16 bits */
132 unsigned int reserved1:32; /* not usable */
133};
134
135
136/*
137 * Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
138 * see table 4.2.3.0.1 in broacast_assist spec.
139 */
140struct bau_msg_header {
141 int dest_subnodeid:6; /* must be zero */
142 /* bits 5:0 */
143 int base_dest_nodeid:15; /* nasid>>1 (pnode) of first bit in node_map */
144 /* bits 20:6 */
145 int command:8; /* message type */
146 /* bits 28:21 */
147 /* 0x38: SN3net EndPoint Message */
148 int rsvd_1:3; /* must be zero */
149 /* bits 31:29 */
150 /* int will align on 32 bits */
151 int rsvd_2:9; /* must be zero */
152 /* bits 40:32 */
153 /* Suppl_A is 56-41 */
154 int payload_2a:8; /* becomes byte 16 of msg */
155 /* bits 48:41 */ /* not currently using */
156 int payload_2b:8; /* becomes byte 17 of msg */
157 /* bits 56:49 */ /* not currently using */
158 /* Address field (96:57) is never used as an
159 address (these are address bits 42:3) */
160 int rsvd_3:1; /* must be zero */
161 /* bit 57 */
162 /* address bits 27:4 are payload */
163 /* these 24 bits become bytes 12-14 of msg */
164 int replied_to:1; /* sent as 0 by the source to byte 12 */
165 /* bit 58 */
166
167 int payload_1a:5; /* not currently used */
168 /* bits 63:59 */
169 int payload_1b:8; /* not currently used */
170 /* bits 71:64 */
171 int payload_1c:8; /* not currently used */
172 /* bits 79:72 */
173 int payload_1d:2; /* not currently used */
174 /* bits 81:80 */
175
176 int rsvd_4:7; /* must be zero */
177 /* bits 88:82 */
178 int sw_ack_flag:1; /* software acknowledge flag */
179 /* bit 89 */
180 /* INTD trasactions at destination are to
181 wait for software acknowledge */
182 int rsvd_5:6; /* must be zero */
183 /* bits 95:90 */
184 int rsvd_6:5; /* must be zero */
185 /* bits 100:96 */
186 int int_both:1; /* if 1, interrupt both sockets on the blade */
187 /* bit 101*/
188 int fairness:3; /* usually zero */
189 /* bits 104:102 */
190 int multilevel:1; /* multi-level multicast format */
191 /* bit 105 */
192 /* 0 for TLB: endpoint multi-unicast messages */
193 int chaining:1; /* next descriptor is part of this activation*/
194 /* bit 106 */
195 int rsvd_7:21; /* must be zero */
196 /* bits 127:107 */
197};
198
199/*
200 * The activation descriptor:
201 * The format of the message to send, plus all accompanying control
202 * Should be 64 bytes
203 */
204struct bau_desc {
205 struct bau_target_nodemask distribution;
206 /*
207 * message template, consisting of header and payload:
208 */
209 struct bau_msg_header header;
210 struct bau_msg_payload payload;
211};
212/*
213 * -payload-- ---------header------
214 * bytes 0-11 bits 41-56 bits 58-81
215 * A B (2) C (3)
216 *
217 * A/B/C are moved to:
218 * A C B
219 * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
220 * ------------payload queue-----------
221 */
222
223/*
224 * The payload queue on the destination side is an array of these.
225 * With BAU_MISC_CONTROL set for software acknowledge mode, the messages
226 * are 32 bytes (2 micropackets) (256 bits) in length, but contain only 17
227 * bytes of usable data, including the sw ack vector in byte 15 (bits 127:120)
228 * (12 bytes come from bau_msg_payload, 3 from payload_1, 2 from
229 * sw_ack_vector and payload_2)
230 * "Enabling Software Acknowledgment mode (see Section 4.3.3 Software
231 * Acknowledge Processing) also selects 32 byte (17 bytes usable) payload
232 * operation."
233 */
234struct bau_payload_queue_entry {
235 unsigned long address; /* signifies a page or all TLB's
236 of the cpu */
237 /* 64 bits, bytes 0-7 */
238
239 unsigned short sending_cpu; /* cpu that sent the message */
240 /* 16 bits, bytes 8-9 */
241
242 unsigned short acknowledge_count; /* filled in by destination */
243 /* 16 bits, bytes 10-11 */
244
245 unsigned short replied_to:1; /* sent as 0 by the source */
246 /* 1 bit */
247 unsigned short unused1:7; /* not currently using */
248 /* 7 bits: byte 12) */
249
250 unsigned char unused2[2]; /* not currently using */
251 /* bytes 13-14 */
252
253 unsigned char sw_ack_vector; /* filled in by the hardware */
254 /* byte 15 (bits 127:120) */
255
256 unsigned char unused4[3]; /* not currently using bytes 17-19 */
257 /* bytes 17-19 */
258
259 int number_of_cpus; /* filled in at destination */
260 /* 32 bits, bytes 20-23 (aligned) */
261
262 unsigned char unused5[8]; /* not using */
263 /* bytes 24-31 */
264};
265
266/*
267 * one for every slot in the destination payload queue
268 */
269struct bau_msg_status {
270 struct bau_local_cpumask seen_by; /* map of cpu's */
271};
272
273/*
274 * one for every slot in the destination software ack resources
275 */
276struct bau_sw_ack_status {
277 struct bau_payload_queue_entry *msg; /* associated message */
278 int watcher; /* cpu monitoring, or -1 */
279};
280
281/*
282 * one on every node and per-cpu; to locate the software tables
283 */
284struct bau_control {
285 struct bau_desc *descriptor_base;
286 struct bau_payload_queue_entry *bau_msg_head;
287 struct bau_payload_queue_entry *va_queue_first;
288 struct bau_payload_queue_entry *va_queue_last;
289 struct bau_msg_status *msg_statuses;
290 int *watching; /* pointer to array */
291};
292
293/*
294 * This structure is allocated per_cpu for UV TLB shootdown statistics.
295 */
296struct ptc_stats {
297 unsigned long ptc_i; /* number of IPI-style flushes */
298 unsigned long requestor; /* number of nodes this cpu sent to */
299 unsigned long requestee; /* times cpu was remotely requested */
300 unsigned long alltlb; /* times all tlb's on this cpu were flushed */
301 unsigned long onetlb; /* times just one tlb on this cpu was flushed */
302 unsigned long s_retry; /* retries on source side timeouts */
303 unsigned long d_retry; /* retries on destination side timeouts */
304 unsigned long sflush; /* cycles spent in uv_flush_tlb_others */
305 unsigned long dflush; /* cycles spent on destination side */
306 unsigned long retriesok; /* successes on retries */
307 unsigned long nomsg; /* interrupts with no message */
308 unsigned long multmsg; /* interrupts with multiple messages */
309 unsigned long ntargeted;/* nodes targeted */
310};
311
312static inline int bau_node_isset(int node, struct bau_target_nodemask *dstp)
313{
314 return constant_test_bit(node, &dstp->bits[0]);
315}
316static inline void bau_node_set(int node, struct bau_target_nodemask *dstp)
317{
318 __set_bit(node, &dstp->bits[0]);
319}
320static inline void bau_nodes_clear(struct bau_target_nodemask *dstp, int nbits)
321{
322 bitmap_zero(&dstp->bits[0], nbits);
323}
324
325static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
326{
327 bitmap_zero(&dstp->bits, nbits);
328}
329
330#define cpubit_isset(cpu, bau_local_cpumask) \
331 test_bit((cpu), (bau_local_cpumask).bits)
332
333extern int uv_flush_tlb_others(cpumask_t *, struct mm_struct *, unsigned long);
334extern void uv_bau_message_intr1(void);
335extern void uv_bau_timeout_intr1(void);
336
337#endif /* __ASM_X86_UV_BAU__ */
diff --git a/include/asm-x86/uv/uv_hub.h b/include/asm-x86/uv/uv_hub.h
index 26b9240d1e23..a4ef26e5850b 100644
--- a/include/asm-x86/uv/uv_hub.h
+++ b/include/asm-x86/uv/uv_hub.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * SGI UV architectural definitions 6 * SGI UV architectural definitions
7 * 7 *
8 * Copyright (C) 2007 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#ifndef __ASM_X86_UV_HUB_H__ 11#ifndef __ASM_X86_UV_HUB_H__
@@ -20,26 +20,49 @@
20/* 20/*
21 * Addressing Terminology 21 * Addressing Terminology
22 * 22 *
23 * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of 23 * M - The low M bits of a physical address represent the offset
24 * routers always have low bit of 1, C/MBricks have low bit 24 * into the blade local memory. RAM memory on a blade is physically
25 * equal to 0. Most addressing macros that target UV hub chips 25 * contiguous (although various IO spaces may punch holes in
26 * right shift the NASID by 1 to exclude the always-zero bit. 26 * it)..
27 * 27 *
28 * SNASID - NASID right shifted by 1 bit. 28 * N - Number of bits in the node portion of a socket physical
29 * address.
30 *
31 * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of
32 * routers always have low bit of 1, C/MBricks have low bit
33 * equal to 0. Most addressing macros that target UV hub chips
34 * right shift the NASID by 1 to exclude the always-zero bit.
35 * NASIDs contain up to 15 bits.
36 *
37 * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead
38 * of nasids.
39 *
40 * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
41 * of the nasid for socket usage.
42 *
43 *
44 * NumaLink Global Physical Address Format:
45 * +--------------------------------+---------------------+
46 * |00..000| GNODE | NodeOffset |
47 * +--------------------------------+---------------------+
48 * |<-------53 - M bits --->|<--------M bits ----->
49 *
50 * M - number of node offset bits (35 .. 40)
29 * 51 *
30 * 52 *
31 * Memory/UV-HUB Processor Socket Address Format: 53 * Memory/UV-HUB Processor Socket Address Format:
32 * +--------+---------------+---------------------+ 54 * +----------------+---------------+---------------------+
33 * |00..0000| SNASID | NodeOffset | 55 * |00..000000000000| PNODE | NodeOffset |
34 * +--------+---------------+---------------------+ 56 * +----------------+---------------+---------------------+
35 * <--- N bits --->|<--------M bits -----> 57 * <--- N bits --->|<--------M bits ----->
36 * 58 *
37 * M number of node offset bits (35 .. 40) 59 * M - number of node offset bits (35 .. 40)
38 * N number of SNASID bits (0 .. 10) 60 * N - number of PNODE bits (0 .. 10)
39 * 61 *
40 * Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64). 62 * Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64).
41 * The actual values are configuration dependent and are set at 63 * The actual values are configuration dependent and are set at
42 * boot time 64 * boot time. M & N values are set by the hardware/BIOS at boot.
65 *
43 * 66 *
44 * APICID format 67 * APICID format
45 * NOTE!!!!!! This is the current format of the APICID. However, code 68 * NOTE!!!!!! This is the current format of the APICID. However, code
@@ -48,14 +71,14 @@
48 * 71 *
49 * 1111110000000000 72 * 1111110000000000
50 * 5432109876543210 73 * 5432109876543210
51 * nnnnnnnnnnlc0cch 74 * pppppppppplc0cch
52 * sssssssssss 75 * sssssssssss
53 * 76 *
54 * n = snasid bits 77 * p = pnode bits
55 * l = socket number on board 78 * l = socket number on board
56 * c = core 79 * c = core
57 * h = hyperthread 80 * h = hyperthread
58 * s = bits that are in the socket CSR 81 * s = bits that are in the SOCKET_ID CSR
59 * 82 *
60 * Note: Processor only supports 12 bits in the APICID register. The ACPI 83 * Note: Processor only supports 12 bits in the APICID register. The ACPI
61 * tables hold all 16 bits. Software needs to be aware of this. 84 * tables hold all 16 bits. Software needs to be aware of this.
@@ -74,7 +97,7 @@
74 * This value is also the value of the maximum number of non-router NASIDs 97 * This value is also the value of the maximum number of non-router NASIDs
75 * in the numalink fabric. 98 * in the numalink fabric.
76 * 99 *
77 * NOTE: a brick may be 1 or 2 OS nodes. Don't get these confused. 100 * NOTE: a brick may contain 1 or 2 OS nodes. Don't get these confused.
78 */ 101 */
79#define UV_MAX_NUMALINK_BLADES 16384 102#define UV_MAX_NUMALINK_BLADES 16384
80 103
@@ -96,8 +119,12 @@
96 */ 119 */
97struct uv_hub_info_s { 120struct uv_hub_info_s {
98 unsigned long global_mmr_base; 121 unsigned long global_mmr_base;
99 unsigned short local_nasid; 122 unsigned long gpa_mask;
100 unsigned short gnode_upper; 123 unsigned long gnode_upper;
124 unsigned long lowmem_remap_top;
125 unsigned long lowmem_remap_base;
126 unsigned short pnode;
127 unsigned short pnode_mask;
101 unsigned short coherency_domain_number; 128 unsigned short coherency_domain_number;
102 unsigned short numa_blade_id; 129 unsigned short numa_blade_id;
103 unsigned char blade_processor_id; 130 unsigned char blade_processor_id;
@@ -112,83 +139,126 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
112 * Local & Global MMR space macros. 139 * Local & Global MMR space macros.
113 * Note: macros are intended to be used ONLY by inline functions 140 * Note: macros are intended to be used ONLY by inline functions
114 * in this file - not by other kernel code. 141 * in this file - not by other kernel code.
142 * n - NASID (full 15-bit global nasid)
143 * g - GNODE (full 15-bit global nasid, right shifted 1)
144 * p - PNODE (local part of nsids, right shifted 1)
115 */ 145 */
116#define UV_SNASID(n) ((n) >> 1) 146#define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask)
117#define UV_NASID(n) ((n) << 1) 147#define UV_PNODE_TO_NASID(p) (((p) << 1) | uv_hub_info->gnode_upper)
118 148
119#define UV_LOCAL_MMR_BASE 0xf4000000UL 149#define UV_LOCAL_MMR_BASE 0xf4000000UL
120#define UV_GLOBAL_MMR32_BASE 0xf8000000UL 150#define UV_GLOBAL_MMR32_BASE 0xf8000000UL
121#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base) 151#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base)
152#define UV_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
153#define UV_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
122 154
123#define UV_GLOBAL_MMR32_SNASID_MASK 0x3ff 155#define UV_GLOBAL_MMR32_PNODE_SHIFT 15
124#define UV_GLOBAL_MMR32_SNASID_SHIFT 15 156#define UV_GLOBAL_MMR64_PNODE_SHIFT 26
125#define UV_GLOBAL_MMR64_SNASID_SHIFT 26
126 157
127#define UV_GLOBAL_MMR32_NASID_BITS(n) \ 158#define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT))
128 (((UV_SNASID(n) & UV_GLOBAL_MMR32_SNASID_MASK)) << \
129 (UV_GLOBAL_MMR32_SNASID_SHIFT))
130 159
131#define UV_GLOBAL_MMR64_NASID_BITS(n) \ 160#define UV_GLOBAL_MMR64_PNODE_BITS(p) \
132 ((unsigned long)UV_SNASID(n) << UV_GLOBAL_MMR64_SNASID_SHIFT) 161 ((unsigned long)(p) << UV_GLOBAL_MMR64_PNODE_SHIFT)
162
163#define UV_APIC_PNODE_SHIFT 6
164
165/*
166 * Macros for converting between kernel virtual addresses, socket local physical
167 * addresses, and UV global physical addresses.
168 * Note: use the standard __pa() & __va() macros for converting
169 * between socket virtual and socket physical addresses.
170 */
171
172/* socket phys RAM --> UV global physical address */
173static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
174{
175 if (paddr < uv_hub_info->lowmem_remap_top)
176 paddr += uv_hub_info->lowmem_remap_base;
177 return paddr | uv_hub_info->gnode_upper;
178}
179
180
181/* socket virtual --> UV global physical address */
182static inline unsigned long uv_gpa(void *v)
183{
184 return __pa(v) | uv_hub_info->gnode_upper;
185}
186
187/* socket virtual --> UV global physical address */
188static inline void *uv_vgpa(void *v)
189{
190 return (void *)uv_gpa(v);
191}
192
193/* UV global physical address --> socket virtual */
194static inline void *uv_va(unsigned long gpa)
195{
196 return __va(gpa & uv_hub_info->gpa_mask);
197}
198
199/* pnode, offset --> socket virtual */
200static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset)
201{
202 return __va(((unsigned long)pnode << uv_hub_info->m_val) | offset);
203}
133 204
134#define UV_APIC_NASID_SHIFT 6
135 205
136/* 206/*
137 * Extract a NASID from an APICID (full apicid, not processor subset) 207 * Extract a PNODE from an APICID (full apicid, not processor subset)
138 */ 208 */
139static inline int uv_apicid_to_nasid(int apicid) 209static inline int uv_apicid_to_pnode(int apicid)
140{ 210{
141 return (UV_NASID(apicid >> UV_APIC_NASID_SHIFT)); 211 return (apicid >> UV_APIC_PNODE_SHIFT);
142} 212}
143 213
144/* 214/*
145 * Access global MMRs using the low memory MMR32 space. This region supports 215 * Access global MMRs using the low memory MMR32 space. This region supports
146 * faster MMR access but not all MMRs are accessible in this space. 216 * faster MMR access but not all MMRs are accessible in this space.
147 */ 217 */
148static inline unsigned long *uv_global_mmr32_address(int nasid, 218static inline unsigned long *uv_global_mmr32_address(int pnode,
149 unsigned long offset) 219 unsigned long offset)
150{ 220{
151 return __va(UV_GLOBAL_MMR32_BASE | 221 return __va(UV_GLOBAL_MMR32_BASE |
152 UV_GLOBAL_MMR32_NASID_BITS(nasid) | offset); 222 UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset);
153} 223}
154 224
155static inline void uv_write_global_mmr32(int nasid, unsigned long offset, 225static inline void uv_write_global_mmr32(int pnode, unsigned long offset,
156 unsigned long val) 226 unsigned long val)
157{ 227{
158 *uv_global_mmr32_address(nasid, offset) = val; 228 *uv_global_mmr32_address(pnode, offset) = val;
159} 229}
160 230
161static inline unsigned long uv_read_global_mmr32(int nasid, 231static inline unsigned long uv_read_global_mmr32(int pnode,
162 unsigned long offset) 232 unsigned long offset)
163{ 233{
164 return *uv_global_mmr32_address(nasid, offset); 234 return *uv_global_mmr32_address(pnode, offset);
165} 235}
166 236
167/* 237/*
168 * Access Global MMR space using the MMR space located at the top of physical 238 * Access Global MMR space using the MMR space located at the top of physical
169 * memory. 239 * memory.
170 */ 240 */
171static inline unsigned long *uv_global_mmr64_address(int nasid, 241static inline unsigned long *uv_global_mmr64_address(int pnode,
172 unsigned long offset) 242 unsigned long offset)
173{ 243{
174 return __va(UV_GLOBAL_MMR64_BASE | 244 return __va(UV_GLOBAL_MMR64_BASE |
175 UV_GLOBAL_MMR64_NASID_BITS(nasid) | offset); 245 UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
176} 246}
177 247
178static inline void uv_write_global_mmr64(int nasid, unsigned long offset, 248static inline void uv_write_global_mmr64(int pnode, unsigned long offset,
179 unsigned long val) 249 unsigned long val)
180{ 250{
181 *uv_global_mmr64_address(nasid, offset) = val; 251 *uv_global_mmr64_address(pnode, offset) = val;
182} 252}
183 253
184static inline unsigned long uv_read_global_mmr64(int nasid, 254static inline unsigned long uv_read_global_mmr64(int pnode,
185 unsigned long offset) 255 unsigned long offset)
186{ 256{
187 return *uv_global_mmr64_address(nasid, offset); 257 return *uv_global_mmr64_address(pnode, offset);
188} 258}
189 259
190/* 260/*
191 * Access node local MMRs. Faster than using global space but only local MMRs 261 * Access hub local MMRs. Faster than using global space but only local MMRs
192 * are accessible. 262 * are accessible.
193 */ 263 */
194static inline unsigned long *uv_local_mmr_address(unsigned long offset) 264static inline unsigned long *uv_local_mmr_address(unsigned long offset)
@@ -207,15 +277,15 @@ static inline void uv_write_local_mmr(unsigned long offset, unsigned long val)
207} 277}
208 278
209/* 279/*
210 * Structures and definitions for converting between cpu, node, and blade 280 * Structures and definitions for converting between cpu, node, pnode, and blade
211 * numbers. 281 * numbers.
212 */ 282 */
213struct uv_blade_info { 283struct uv_blade_info {
214 unsigned short nr_posible_cpus; 284 unsigned short nr_possible_cpus;
215 unsigned short nr_online_cpus; 285 unsigned short nr_online_cpus;
216 unsigned short nasid; 286 unsigned short pnode;
217}; 287};
218struct uv_blade_info *uv_blade_info; 288extern struct uv_blade_info *uv_blade_info;
219extern short *uv_node_to_blade; 289extern short *uv_node_to_blade;
220extern short *uv_cpu_to_blade; 290extern short *uv_cpu_to_blade;
221extern short uv_possible_blades; 291extern short uv_possible_blades;
@@ -244,16 +314,16 @@ static inline int uv_node_to_blade_id(int nid)
244 return uv_node_to_blade[nid]; 314 return uv_node_to_blade[nid];
245} 315}
246 316
247/* Convert a blade id to the NASID of the blade */ 317/* Convert a blade id to the PNODE of the blade */
248static inline int uv_blade_to_nasid(int bid) 318static inline int uv_blade_to_pnode(int bid)
249{ 319{
250 return uv_blade_info[bid].nasid; 320 return uv_blade_info[bid].pnode;
251} 321}
252 322
253/* Determine the number of possible cpus on a blade */ 323/* Determine the number of possible cpus on a blade */
254static inline int uv_blade_nr_possible_cpus(int bid) 324static inline int uv_blade_nr_possible_cpus(int bid)
255{ 325{
256 return uv_blade_info[bid].nr_posible_cpus; 326 return uv_blade_info[bid].nr_possible_cpus;
257} 327}
258 328
259/* Determine the number of online cpus on a blade */ 329/* Determine the number of online cpus on a blade */
@@ -262,16 +332,16 @@ static inline int uv_blade_nr_online_cpus(int bid)
262 return uv_blade_info[bid].nr_online_cpus; 332 return uv_blade_info[bid].nr_online_cpus;
263} 333}
264 334
265/* Convert a cpu id to the NASID of the blade containing the cpu */ 335/* Convert a cpu id to the PNODE of the blade containing the cpu */
266static inline int uv_cpu_to_nasid(int cpu) 336static inline int uv_cpu_to_pnode(int cpu)
267{ 337{
268 return uv_blade_info[uv_cpu_to_blade_id(cpu)].nasid; 338 return uv_blade_info[uv_cpu_to_blade_id(cpu)].pnode;
269} 339}
270 340
271/* Convert a node number to the NASID of the blade */ 341/* Convert a linux node number to the PNODE of the blade */
272static inline int uv_node_to_nasid(int nid) 342static inline int uv_node_to_pnode(int nid)
273{ 343{
274 return uv_blade_info[uv_node_to_blade_id(nid)].nasid; 344 return uv_blade_info[uv_node_to_blade_id(nid)].pnode;
275} 345}
276 346
277/* Maximum possible number of blades */ 347/* Maximum possible number of blades */
diff --git a/include/asm-x86/uv/uv_mmrs.h b/include/asm-x86/uv/uv_mmrs.h
index 3b69fe6b6376..151fd7fcb809 100644
--- a/include/asm-x86/uv/uv_mmrs.h
+++ b/include/asm-x86/uv/uv_mmrs.h
@@ -11,17 +11,290 @@
11#ifndef __ASM_X86_UV_MMRS__ 11#ifndef __ASM_X86_UV_MMRS__
12#define __ASM_X86_UV_MMRS__ 12#define __ASM_X86_UV_MMRS__
13 13
14/* 14#define UV_MMR_ENABLE (1UL << 63)
15 * AUTO GENERATED - Do not edit 15
16 */ 16/* ========================================================================= */
17/* UVH_BAU_DATA_CONFIG */
18/* ========================================================================= */
19#define UVH_BAU_DATA_CONFIG 0x61680UL
20#define UVH_BAU_DATA_CONFIG_32 0x0438
21
22#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0
23#define UVH_BAU_DATA_CONFIG_VECTOR_MASK 0x00000000000000ffUL
24#define UVH_BAU_DATA_CONFIG_DM_SHFT 8
25#define UVH_BAU_DATA_CONFIG_DM_MASK 0x0000000000000700UL
26#define UVH_BAU_DATA_CONFIG_DESTMODE_SHFT 11
27#define UVH_BAU_DATA_CONFIG_DESTMODE_MASK 0x0000000000000800UL
28#define UVH_BAU_DATA_CONFIG_STATUS_SHFT 12
29#define UVH_BAU_DATA_CONFIG_STATUS_MASK 0x0000000000001000UL
30#define UVH_BAU_DATA_CONFIG_P_SHFT 13
31#define UVH_BAU_DATA_CONFIG_P_MASK 0x0000000000002000UL
32#define UVH_BAU_DATA_CONFIG_T_SHFT 15
33#define UVH_BAU_DATA_CONFIG_T_MASK 0x0000000000008000UL
34#define UVH_BAU_DATA_CONFIG_M_SHFT 16
35#define UVH_BAU_DATA_CONFIG_M_MASK 0x0000000000010000UL
36#define UVH_BAU_DATA_CONFIG_APIC_ID_SHFT 32
37#define UVH_BAU_DATA_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
38
39union uvh_bau_data_config_u {
40 unsigned long v;
41 struct uvh_bau_data_config_s {
42 unsigned long vector_ : 8; /* RW */
43 unsigned long dm : 3; /* RW */
44 unsigned long destmode : 1; /* RW */
45 unsigned long status : 1; /* RO */
46 unsigned long p : 1; /* RO */
47 unsigned long rsvd_14 : 1; /* */
48 unsigned long t : 1; /* RO */
49 unsigned long m : 1; /* RW */
50 unsigned long rsvd_17_31: 15; /* */
51 unsigned long apic_id : 32; /* RW */
52 } s;
53};
54
55/* ========================================================================= */
56/* UVH_EVENT_OCCURRED0 */
57/* ========================================================================= */
58#define UVH_EVENT_OCCURRED0 0x70000UL
59#define UVH_EVENT_OCCURRED0_32 0x005e8
60
61#define UVH_EVENT_OCCURRED0_LB_HCERR_SHFT 0
62#define UVH_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
63#define UVH_EVENT_OCCURRED0_GR0_HCERR_SHFT 1
64#define UVH_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL
65#define UVH_EVENT_OCCURRED0_GR1_HCERR_SHFT 2
66#define UVH_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL
67#define UVH_EVENT_OCCURRED0_LH_HCERR_SHFT 3
68#define UVH_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL
69#define UVH_EVENT_OCCURRED0_RH_HCERR_SHFT 4
70#define UVH_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000010UL
71#define UVH_EVENT_OCCURRED0_XN_HCERR_SHFT 5
72#define UVH_EVENT_OCCURRED0_XN_HCERR_MASK 0x0000000000000020UL
73#define UVH_EVENT_OCCURRED0_SI_HCERR_SHFT 6
74#define UVH_EVENT_OCCURRED0_SI_HCERR_MASK 0x0000000000000040UL
75#define UVH_EVENT_OCCURRED0_LB_AOERR0_SHFT 7
76#define UVH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000080UL
77#define UVH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8
78#define UVH_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL
79#define UVH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9
80#define UVH_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL
81#define UVH_EVENT_OCCURRED0_LH_AOERR0_SHFT 10
82#define UVH_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL
83#define UVH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
84#define UVH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
85#define UVH_EVENT_OCCURRED0_XN_AOERR0_SHFT 12
86#define UVH_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL
87#define UVH_EVENT_OCCURRED0_SI_AOERR0_SHFT 13
88#define UVH_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL
89#define UVH_EVENT_OCCURRED0_LB_AOERR1_SHFT 14
90#define UVH_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL
91#define UVH_EVENT_OCCURRED0_GR0_AOERR1_SHFT 15
92#define UVH_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000000008000UL
93#define UVH_EVENT_OCCURRED0_GR1_AOERR1_SHFT 16
94#define UVH_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000000010000UL
95#define UVH_EVENT_OCCURRED0_LH_AOERR1_SHFT 17
96#define UVH_EVENT_OCCURRED0_LH_AOERR1_MASK 0x0000000000020000UL
97#define UVH_EVENT_OCCURRED0_RH_AOERR1_SHFT 18
98#define UVH_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000040000UL
99#define UVH_EVENT_OCCURRED0_XN_AOERR1_SHFT 19
100#define UVH_EVENT_OCCURRED0_XN_AOERR1_MASK 0x0000000000080000UL
101#define UVH_EVENT_OCCURRED0_SI_AOERR1_SHFT 20
102#define UVH_EVENT_OCCURRED0_SI_AOERR1_MASK 0x0000000000100000UL
103#define UVH_EVENT_OCCURRED0_RH_VPI_INT_SHFT 21
104#define UVH_EVENT_OCCURRED0_RH_VPI_INT_MASK 0x0000000000200000UL
105#define UVH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 22
106#define UVH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
107#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 23
108#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000000800000UL
109#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 24
110#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000001000000UL
111#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 25
112#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000002000000UL
113#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 26
114#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000004000000UL
115#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 27
116#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000000008000000UL
117#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 28
118#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000000010000000UL
119#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 29
120#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000000020000000UL
121#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 30
122#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000000040000000UL
123#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 31
124#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000000080000000UL
125#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 32
126#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000000100000000UL
127#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 33
128#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000000200000000UL
129#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 34
130#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000000400000000UL
131#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 35
132#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000000800000000UL
133#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 36
134#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000001000000000UL
135#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 37
136#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000002000000000UL
137#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 38
138#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000004000000000UL
139#define UVH_EVENT_OCCURRED0_L1_NMI_INT_SHFT 39
140#define UVH_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0000008000000000UL
141#define UVH_EVENT_OCCURRED0_STOP_CLOCK_SHFT 40
142#define UVH_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0000010000000000UL
143#define UVH_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 41
144#define UVH_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0000020000000000UL
145#define UVH_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 42
146#define UVH_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0000040000000000UL
147#define UVH_EVENT_OCCURRED0_LTC_INT_SHFT 43
148#define UVH_EVENT_OCCURRED0_LTC_INT_MASK 0x0000080000000000UL
149#define UVH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 44
150#define UVH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
151#define UVH_EVENT_OCCURRED0_IPI_INT_SHFT 45
152#define UVH_EVENT_OCCURRED0_IPI_INT_MASK 0x0000200000000000UL
153#define UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT 46
154#define UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0000400000000000UL
155#define UVH_EVENT_OCCURRED0_EXTIO_INT1_SHFT 47
156#define UVH_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0000800000000000UL
157#define UVH_EVENT_OCCURRED0_EXTIO_INT2_SHFT 48
158#define UVH_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0001000000000000UL
159#define UVH_EVENT_OCCURRED0_EXTIO_INT3_SHFT 49
160#define UVH_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0002000000000000UL
161#define UVH_EVENT_OCCURRED0_PROFILE_INT_SHFT 50
162#define UVH_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0004000000000000UL
163#define UVH_EVENT_OCCURRED0_RTC0_SHFT 51
164#define UVH_EVENT_OCCURRED0_RTC0_MASK 0x0008000000000000UL
165#define UVH_EVENT_OCCURRED0_RTC1_SHFT 52
166#define UVH_EVENT_OCCURRED0_RTC1_MASK 0x0010000000000000UL
167#define UVH_EVENT_OCCURRED0_RTC2_SHFT 53
168#define UVH_EVENT_OCCURRED0_RTC2_MASK 0x0020000000000000UL
169#define UVH_EVENT_OCCURRED0_RTC3_SHFT 54
170#define UVH_EVENT_OCCURRED0_RTC3_MASK 0x0040000000000000UL
171#define UVH_EVENT_OCCURRED0_BAU_DATA_SHFT 55
172#define UVH_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL
173#define UVH_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56
174#define UVH_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL
175union uvh_event_occurred0_u {
176 unsigned long v;
177 struct uvh_event_occurred0_s {
178 unsigned long lb_hcerr : 1; /* RW, W1C */
179 unsigned long gr0_hcerr : 1; /* RW, W1C */
180 unsigned long gr1_hcerr : 1; /* RW, W1C */
181 unsigned long lh_hcerr : 1; /* RW, W1C */
182 unsigned long rh_hcerr : 1; /* RW, W1C */
183 unsigned long xn_hcerr : 1; /* RW, W1C */
184 unsigned long si_hcerr : 1; /* RW, W1C */
185 unsigned long lb_aoerr0 : 1; /* RW, W1C */
186 unsigned long gr0_aoerr0 : 1; /* RW, W1C */
187 unsigned long gr1_aoerr0 : 1; /* RW, W1C */
188 unsigned long lh_aoerr0 : 1; /* RW, W1C */
189 unsigned long rh_aoerr0 : 1; /* RW, W1C */
190 unsigned long xn_aoerr0 : 1; /* RW, W1C */
191 unsigned long si_aoerr0 : 1; /* RW, W1C */
192 unsigned long lb_aoerr1 : 1; /* RW, W1C */
193 unsigned long gr0_aoerr1 : 1; /* RW, W1C */
194 unsigned long gr1_aoerr1 : 1; /* RW, W1C */
195 unsigned long lh_aoerr1 : 1; /* RW, W1C */
196 unsigned long rh_aoerr1 : 1; /* RW, W1C */
197 unsigned long xn_aoerr1 : 1; /* RW, W1C */
198 unsigned long si_aoerr1 : 1; /* RW, W1C */
199 unsigned long rh_vpi_int : 1; /* RW, W1C */
200 unsigned long system_shutdown_int : 1; /* RW, W1C */
201 unsigned long lb_irq_int_0 : 1; /* RW, W1C */
202 unsigned long lb_irq_int_1 : 1; /* RW, W1C */
203 unsigned long lb_irq_int_2 : 1; /* RW, W1C */
204 unsigned long lb_irq_int_3 : 1; /* RW, W1C */
205 unsigned long lb_irq_int_4 : 1; /* RW, W1C */
206 unsigned long lb_irq_int_5 : 1; /* RW, W1C */
207 unsigned long lb_irq_int_6 : 1; /* RW, W1C */
208 unsigned long lb_irq_int_7 : 1; /* RW, W1C */
209 unsigned long lb_irq_int_8 : 1; /* RW, W1C */
210 unsigned long lb_irq_int_9 : 1; /* RW, W1C */
211 unsigned long lb_irq_int_10 : 1; /* RW, W1C */
212 unsigned long lb_irq_int_11 : 1; /* RW, W1C */
213 unsigned long lb_irq_int_12 : 1; /* RW, W1C */
214 unsigned long lb_irq_int_13 : 1; /* RW, W1C */
215 unsigned long lb_irq_int_14 : 1; /* RW, W1C */
216 unsigned long lb_irq_int_15 : 1; /* RW, W1C */
217 unsigned long l1_nmi_int : 1; /* RW, W1C */
218 unsigned long stop_clock : 1; /* RW, W1C */
219 unsigned long asic_to_l1 : 1; /* RW, W1C */
220 unsigned long l1_to_asic : 1; /* RW, W1C */
221 unsigned long ltc_int : 1; /* RW, W1C */
222 unsigned long la_seq_trigger : 1; /* RW, W1C */
223 unsigned long ipi_int : 1; /* RW, W1C */
224 unsigned long extio_int0 : 1; /* RW, W1C */
225 unsigned long extio_int1 : 1; /* RW, W1C */
226 unsigned long extio_int2 : 1; /* RW, W1C */
227 unsigned long extio_int3 : 1; /* RW, W1C */
228 unsigned long profile_int : 1; /* RW, W1C */
229 unsigned long rtc0 : 1; /* RW, W1C */
230 unsigned long rtc1 : 1; /* RW, W1C */
231 unsigned long rtc2 : 1; /* RW, W1C */
232 unsigned long rtc3 : 1; /* RW, W1C */
233 unsigned long bau_data : 1; /* RW, W1C */
234 unsigned long power_management_req : 1; /* RW, W1C */
235 unsigned long rsvd_57_63 : 7; /* */
236 } s;
237};
238
239/* ========================================================================= */
240/* UVH_EVENT_OCCURRED0_ALIAS */
241/* ========================================================================= */
242#define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL
243#define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0
244
245/* ========================================================================= */
246/* UVH_INT_CMPB */
247/* ========================================================================= */
248#define UVH_INT_CMPB 0x22080UL
249
250#define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
251#define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL
252
253union uvh_int_cmpb_u {
254 unsigned long v;
255 struct uvh_int_cmpb_s {
256 unsigned long real_time_cmpb : 56; /* RW */
257 unsigned long rsvd_56_63 : 8; /* */
258 } s;
259};
260
261/* ========================================================================= */
262/* UVH_INT_CMPC */
263/* ========================================================================= */
264#define UVH_INT_CMPC 0x22100UL
265
266#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT 0
267#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK 0x00ffffffffffffffUL
268
269union uvh_int_cmpc_u {
270 unsigned long v;
271 struct uvh_int_cmpc_s {
272 unsigned long real_time_cmpc : 56; /* RW */
273 unsigned long rsvd_56_63 : 8; /* */
274 } s;
275};
276
277/* ========================================================================= */
278/* UVH_INT_CMPD */
279/* ========================================================================= */
280#define UVH_INT_CMPD 0x22180UL
17 281
18 #define UV_MMR_ENABLE (1UL << 63) 282#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT 0
283#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK 0x00ffffffffffffffUL
284
285union uvh_int_cmpd_u {
286 unsigned long v;
287 struct uvh_int_cmpd_s {
288 unsigned long real_time_cmpd : 56; /* RW */
289 unsigned long rsvd_56_63 : 8; /* */
290 } s;
291};
19 292
20/* ========================================================================= */ 293/* ========================================================================= */
21/* UVH_IPI_INT */ 294/* UVH_IPI_INT */
22/* ========================================================================= */ 295/* ========================================================================= */
23#define UVH_IPI_INT 0x60500UL 296#define UVH_IPI_INT 0x60500UL
24#define UVH_IPI_INT_32 0x0360 297#define UVH_IPI_INT_32 0x0348
25 298
26#define UVH_IPI_INT_VECTOR_SHFT 0 299#define UVH_IPI_INT_VECTOR_SHFT 0
27#define UVH_IPI_INT_VECTOR_MASK 0x00000000000000ffUL 300#define UVH_IPI_INT_VECTOR_MASK 0x00000000000000ffUL
@@ -51,7 +324,7 @@ union uvh_ipi_int_u {
51/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */ 324/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */
52/* ========================================================================= */ 325/* ========================================================================= */
53#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL 326#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
54#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x009f0 327#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x009c0
55 328
56#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4 329#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
57#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL 330#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL
@@ -73,7 +346,7 @@ union uvh_lb_bau_intd_payload_queue_first_u {
73/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */ 346/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */
74/* ========================================================================= */ 347/* ========================================================================= */
75#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL 348#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
76#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x009f8 349#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x009c8
77 350
78#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4 351#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
79#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL 352#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
@@ -91,7 +364,7 @@ union uvh_lb_bau_intd_payload_queue_last_u {
91/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */ 364/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */
92/* ========================================================================= */ 365/* ========================================================================= */
93#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL 366#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
94#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x00a00 367#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x009d0
95 368
96#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4 369#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
97#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL 370#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
@@ -109,6 +382,7 @@ union uvh_lb_bau_intd_payload_queue_tail_u {
109/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */ 382/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */
110/* ========================================================================= */ 383/* ========================================================================= */
111#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL 384#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
385#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0x0a68
112 386
113#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0 387#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
114#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL 388#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL
@@ -169,12 +443,13 @@ union uvh_lb_bau_intd_software_acknowledge_u {
169/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */ 443/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */
170/* ========================================================================= */ 444/* ========================================================================= */
171#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL 445#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL
446#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0x0a70
172 447
173/* ========================================================================= */ 448/* ========================================================================= */
174/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */ 449/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */
175/* ========================================================================= */ 450/* ========================================================================= */
176#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL 451#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
177#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x009d8 452#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x009a8
178 453
179#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0 454#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0
180#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_MASK 0x000000000000003fUL 455#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_MASK 0x000000000000003fUL
@@ -197,7 +472,7 @@ union uvh_lb_bau_sb_activation_control_u {
197/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */ 472/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */
198/* ========================================================================= */ 473/* ========================================================================= */
199#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL 474#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
200#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x009e0 475#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x009b0
201 476
202#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0 477#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0
203#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL 478#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL
@@ -213,7 +488,7 @@ union uvh_lb_bau_sb_activation_status_0_u {
213/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */ 488/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */
214/* ========================================================================= */ 489/* ========================================================================= */
215#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL 490#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
216#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x009e8 491#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x009b8
217 492
218#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0 493#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0
219#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL 494#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL
@@ -229,7 +504,7 @@ union uvh_lb_bau_sb_activation_status_1_u {
229/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */ 504/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */
230/* ========================================================================= */ 505/* ========================================================================= */
231#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL 506#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
232#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x009d0 507#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x009a0
233 508
234#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12 509#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12
235#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL 510#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
@@ -248,6 +523,334 @@ union uvh_lb_bau_sb_descriptor_base_u {
248}; 523};
249 524
250/* ========================================================================= */ 525/* ========================================================================= */
526/* UVH_LB_MCAST_AOERR0_RPT_ENABLE */
527/* ========================================================================= */
528#define UVH_LB_MCAST_AOERR0_RPT_ENABLE 0x50b20UL
529
530#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_OBESE_MSG_SHFT 0
531#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_OBESE_MSG_MASK 0x0000000000000001UL
532#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_DATA_SB_ERR_SHFT 1
533#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_DATA_SB_ERR_MASK 0x0000000000000002UL
534#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_NACK_BUFF_PARITY_SHFT 2
535#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_NACK_BUFF_PARITY_MASK 0x0000000000000004UL
536#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_TIMEOUT_SHFT 3
537#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_TIMEOUT_MASK 0x0000000000000008UL
538#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_INACTIVE_REPLY_SHFT 4
539#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_INACTIVE_REPLY_MASK 0x0000000000000010UL
540#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_UPGRADE_ERROR_SHFT 5
541#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_UPGRADE_ERROR_MASK 0x0000000000000020UL
542#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REG_COUNT_UNDERFLOW_SHFT 6
543#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REG_COUNT_UNDERFLOW_MASK 0x0000000000000040UL
544#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REP_OBESE_MSG_SHFT 7
545#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REP_OBESE_MSG_MASK 0x0000000000000080UL
546#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_RUNT_MSG_SHFT 8
547#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_RUNT_MSG_MASK 0x0000000000000100UL
548#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_OBESE_MSG_SHFT 9
549#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_OBESE_MSG_MASK 0x0000000000000200UL
550#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_DATA_SB_ERR_SHFT 10
551#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_DATA_SB_ERR_MASK 0x0000000000000400UL
552#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_RUNT_MSG_SHFT 11
553#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_RUNT_MSG_MASK 0x0000000000000800UL
554#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_OBESE_MSG_SHFT 12
555#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_OBESE_MSG_MASK 0x0000000000001000UL
556#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_DATA_SB_ERR_SHFT 13
557#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_DATA_SB_ERR_MASK 0x0000000000002000UL
558#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_COMMAND_ERR_SHFT 14
559#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_COMMAND_ERR_MASK 0x0000000000004000UL
560#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_PEND_TIMEOUT_SHFT 15
561#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_PEND_TIMEOUT_MASK 0x0000000000008000UL
562#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_RUNT_MSG_SHFT 16
563#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_RUNT_MSG_MASK 0x0000000000010000UL
564#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_OBESE_MSG_SHFT 17
565#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_OBESE_MSG_MASK 0x0000000000020000UL
566#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_DATA_SB_ERR_SHFT 18
567#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_DATA_SB_ERR_MASK 0x0000000000040000UL
568#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_RUNT_MSG_SHFT 19
569#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_RUNT_MSG_MASK 0x0000000000080000UL
570#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_OBESE_MSG_SHFT 20
571#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_OBESE_MSG_MASK 0x0000000000100000UL
572#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_DATA_SB_ERR_SHFT 21
573#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_DATA_SB_ERR_MASK 0x0000000000200000UL
574#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_AMO_TIMEOUT_SHFT 22
575#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_AMO_TIMEOUT_MASK 0x0000000000400000UL
576#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_PUT_TIMEOUT_SHFT 23
577#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_PUT_TIMEOUT_MASK 0x0000000000800000UL
578#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_SPURIOUS_EVENT_SHFT 24
579#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_SPURIOUS_EVENT_MASK 0x0000000001000000UL
580#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IOH_DESTINATION_TABLE_PARITY_SHFT 25
581#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IOH_DESTINATION_TABLE_PARITY_MASK 0x0000000002000000UL
582#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_HAD_ERROR_REPLY_SHFT 26
583#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_HAD_ERROR_REPLY_MASK 0x0000000004000000UL
584#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_TIMEOUT_SHFT 27
585#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_TIMEOUT_MASK 0x0000000008000000UL
586#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_LOCK_MANAGER_HAD_ERROR_REPLY_SHFT 28
587#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_LOCK_MANAGER_HAD_ERROR_REPLY_MASK 0x0000000010000000UL
588#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_HAD_ERROR_REPLY_SHFT 29
589#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_HAD_ERROR_REPLY_MASK 0x0000000020000000UL
590#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_TIMEOUT_SHFT 30
591#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_TIMEOUT_MASK 0x0000000040000000UL
592#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SB_ACTIVATION_OVERRUN_SHFT 31
593#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SB_ACTIVATION_OVERRUN_MASK 0x0000000080000000UL
594#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_HAD_ERROR_REPLY_SHFT 32
595#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_HAD_ERROR_REPLY_MASK 0x0000000100000000UL
596#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_TIMEOUT_SHFT 33
597#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_TIMEOUT_MASK 0x0000000200000000UL
598#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_0_PARITY_SHFT 34
599#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_0_PARITY_MASK 0x0000000400000000UL
600#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_1_PARITY_SHFT 35
601#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_1_PARITY_MASK 0x0000000800000000UL
602#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SOCKET_DESTINATION_TABLE_PARITY_SHFT 36
603#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SOCKET_DESTINATION_TABLE_PARITY_MASK 0x0000001000000000UL
604#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_BAU_REPLY_PAYLOAD_CORRUPTION_SHFT 37
605#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_BAU_REPLY_PAYLOAD_CORRUPTION_MASK 0x0000002000000000UL
606#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IO_PORT_DESTINATION_TABLE_PARITY_SHFT 38
607#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IO_PORT_DESTINATION_TABLE_PARITY_MASK 0x0000004000000000UL
608#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INTD_SOFT_ACK_TIMEOUT_SHFT 39
609#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INTD_SOFT_ACK_TIMEOUT_MASK 0x0000008000000000UL
610#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_OBESE_MSG_SHFT 40
611#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_OBESE_MSG_MASK 0x0000010000000000UL
612#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_COMMAND_ERR_SHFT 41
613#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_COMMAND_ERR_MASK 0x0000020000000000UL
614#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_TIMEOUT_SHFT 42
615#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_TIMEOUT_MASK 0x0000040000000000UL
616
617union uvh_lb_mcast_aoerr0_rpt_enable_u {
618 unsigned long v;
619 struct uvh_lb_mcast_aoerr0_rpt_enable_s {
620 unsigned long mcast_obese_msg : 1; /* RW */
621 unsigned long mcast_data_sb_err : 1; /* RW */
622 unsigned long mcast_nack_buff_parity : 1; /* RW */
623 unsigned long mcast_timeout : 1; /* RW */
624 unsigned long mcast_inactive_reply : 1; /* RW */
625 unsigned long mcast_upgrade_error : 1; /* RW */
626 unsigned long mcast_reg_count_underflow : 1; /* RW */
627 unsigned long mcast_rep_obese_msg : 1; /* RW */
628 unsigned long ucache_req_runt_msg : 1; /* RW */
629 unsigned long ucache_req_obese_msg : 1; /* RW */
630 unsigned long ucache_req_data_sb_err : 1; /* RW */
631 unsigned long ucache_rep_runt_msg : 1; /* RW */
632 unsigned long ucache_rep_obese_msg : 1; /* RW */
633 unsigned long ucache_rep_data_sb_err : 1; /* RW */
634 unsigned long ucache_rep_command_err : 1; /* RW */
635 unsigned long ucache_pend_timeout : 1; /* RW */
636 unsigned long macc_req_runt_msg : 1; /* RW */
637 unsigned long macc_req_obese_msg : 1; /* RW */
638 unsigned long macc_req_data_sb_err : 1; /* RW */
639 unsigned long macc_rep_runt_msg : 1; /* RW */
640 unsigned long macc_rep_obese_msg : 1; /* RW */
641 unsigned long macc_rep_data_sb_err : 1; /* RW */
642 unsigned long macc_amo_timeout : 1; /* RW */
643 unsigned long macc_put_timeout : 1; /* RW */
644 unsigned long macc_spurious_event : 1; /* RW */
645 unsigned long ioh_destination_table_parity : 1; /* RW */
646 unsigned long get_had_error_reply : 1; /* RW */
647 unsigned long get_timeout : 1; /* RW */
648 unsigned long lock_manager_had_error_reply : 1; /* RW */
649 unsigned long put_had_error_reply : 1; /* RW */
650 unsigned long put_timeout : 1; /* RW */
651 unsigned long sb_activation_overrun : 1; /* RW */
652 unsigned long completed_gb_activation_had_error_reply : 1; /* RW */
653 unsigned long completed_gb_activation_timeout : 1; /* RW */
654 unsigned long descriptor_buffer_0_parity : 1; /* RW */
655 unsigned long descriptor_buffer_1_parity : 1; /* RW */
656 unsigned long socket_destination_table_parity : 1; /* RW */
657 unsigned long bau_reply_payload_corruption : 1; /* RW */
658 unsigned long io_port_destination_table_parity : 1; /* RW */
659 unsigned long intd_soft_ack_timeout : 1; /* RW */
660 unsigned long int_rep_obese_msg : 1; /* RW */
661 unsigned long int_rep_command_err : 1; /* RW */
662 unsigned long int_timeout : 1; /* RW */
663 unsigned long rsvd_43_63 : 21; /* */
664 } s;
665};
666
667/* ========================================================================= */
668/* UVH_LOCAL_INT0_CONFIG */
669/* ========================================================================= */
670#define UVH_LOCAL_INT0_CONFIG 0x61000UL
671
672#define UVH_LOCAL_INT0_CONFIG_VECTOR_SHFT 0
673#define UVH_LOCAL_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
674#define UVH_LOCAL_INT0_CONFIG_DM_SHFT 8
675#define UVH_LOCAL_INT0_CONFIG_DM_MASK 0x0000000000000700UL
676#define UVH_LOCAL_INT0_CONFIG_DESTMODE_SHFT 11
677#define UVH_LOCAL_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
678#define UVH_LOCAL_INT0_CONFIG_STATUS_SHFT 12
679#define UVH_LOCAL_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
680#define UVH_LOCAL_INT0_CONFIG_P_SHFT 13
681#define UVH_LOCAL_INT0_CONFIG_P_MASK 0x0000000000002000UL
682#define UVH_LOCAL_INT0_CONFIG_T_SHFT 15
683#define UVH_LOCAL_INT0_CONFIG_T_MASK 0x0000000000008000UL
684#define UVH_LOCAL_INT0_CONFIG_M_SHFT 16
685#define UVH_LOCAL_INT0_CONFIG_M_MASK 0x0000000000010000UL
686#define UVH_LOCAL_INT0_CONFIG_APIC_ID_SHFT 32
687#define UVH_LOCAL_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
688
689union uvh_local_int0_config_u {
690 unsigned long v;
691 struct uvh_local_int0_config_s {
692 unsigned long vector_ : 8; /* RW */
693 unsigned long dm : 3; /* RW */
694 unsigned long destmode : 1; /* RW */
695 unsigned long status : 1; /* RO */
696 unsigned long p : 1; /* RO */
697 unsigned long rsvd_14 : 1; /* */
698 unsigned long t : 1; /* RO */
699 unsigned long m : 1; /* RW */
700 unsigned long rsvd_17_31: 15; /* */
701 unsigned long apic_id : 32; /* RW */
702 } s;
703};
704
705/* ========================================================================= */
706/* UVH_LOCAL_INT0_ENABLE */
707/* ========================================================================= */
708#define UVH_LOCAL_INT0_ENABLE 0x65000UL
709
710#define UVH_LOCAL_INT0_ENABLE_LB_HCERR_SHFT 0
711#define UVH_LOCAL_INT0_ENABLE_LB_HCERR_MASK 0x0000000000000001UL
712#define UVH_LOCAL_INT0_ENABLE_GR0_HCERR_SHFT 1
713#define UVH_LOCAL_INT0_ENABLE_GR0_HCERR_MASK 0x0000000000000002UL
714#define UVH_LOCAL_INT0_ENABLE_GR1_HCERR_SHFT 2
715#define UVH_LOCAL_INT0_ENABLE_GR1_HCERR_MASK 0x0000000000000004UL
716#define UVH_LOCAL_INT0_ENABLE_LH_HCERR_SHFT 3
717#define UVH_LOCAL_INT0_ENABLE_LH_HCERR_MASK 0x0000000000000008UL
718#define UVH_LOCAL_INT0_ENABLE_RH_HCERR_SHFT 4
719#define UVH_LOCAL_INT0_ENABLE_RH_HCERR_MASK 0x0000000000000010UL
720#define UVH_LOCAL_INT0_ENABLE_XN_HCERR_SHFT 5
721#define UVH_LOCAL_INT0_ENABLE_XN_HCERR_MASK 0x0000000000000020UL
722#define UVH_LOCAL_INT0_ENABLE_SI_HCERR_SHFT 6
723#define UVH_LOCAL_INT0_ENABLE_SI_HCERR_MASK 0x0000000000000040UL
724#define UVH_LOCAL_INT0_ENABLE_LB_AOERR0_SHFT 7
725#define UVH_LOCAL_INT0_ENABLE_LB_AOERR0_MASK 0x0000000000000080UL
726#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR0_SHFT 8
727#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR0_MASK 0x0000000000000100UL
728#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR0_SHFT 9
729#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR0_MASK 0x0000000000000200UL
730#define UVH_LOCAL_INT0_ENABLE_LH_AOERR0_SHFT 10
731#define UVH_LOCAL_INT0_ENABLE_LH_AOERR0_MASK 0x0000000000000400UL
732#define UVH_LOCAL_INT0_ENABLE_RH_AOERR0_SHFT 11
733#define UVH_LOCAL_INT0_ENABLE_RH_AOERR0_MASK 0x0000000000000800UL
734#define UVH_LOCAL_INT0_ENABLE_XN_AOERR0_SHFT 12
735#define UVH_LOCAL_INT0_ENABLE_XN_AOERR0_MASK 0x0000000000001000UL
736#define UVH_LOCAL_INT0_ENABLE_SI_AOERR0_SHFT 13
737#define UVH_LOCAL_INT0_ENABLE_SI_AOERR0_MASK 0x0000000000002000UL
738#define UVH_LOCAL_INT0_ENABLE_LB_AOERR1_SHFT 14
739#define UVH_LOCAL_INT0_ENABLE_LB_AOERR1_MASK 0x0000000000004000UL
740#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR1_SHFT 15
741#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR1_MASK 0x0000000000008000UL
742#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR1_SHFT 16
743#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR1_MASK 0x0000000000010000UL
744#define UVH_LOCAL_INT0_ENABLE_LH_AOERR1_SHFT 17
745#define UVH_LOCAL_INT0_ENABLE_LH_AOERR1_MASK 0x0000000000020000UL
746#define UVH_LOCAL_INT0_ENABLE_RH_AOERR1_SHFT 18
747#define UVH_LOCAL_INT0_ENABLE_RH_AOERR1_MASK 0x0000000000040000UL
748#define UVH_LOCAL_INT0_ENABLE_XN_AOERR1_SHFT 19
749#define UVH_LOCAL_INT0_ENABLE_XN_AOERR1_MASK 0x0000000000080000UL
750#define UVH_LOCAL_INT0_ENABLE_SI_AOERR1_SHFT 20
751#define UVH_LOCAL_INT0_ENABLE_SI_AOERR1_MASK 0x0000000000100000UL
752#define UVH_LOCAL_INT0_ENABLE_RH_VPI_INT_SHFT 21
753#define UVH_LOCAL_INT0_ENABLE_RH_VPI_INT_MASK 0x0000000000200000UL
754#define UVH_LOCAL_INT0_ENABLE_SYSTEM_SHUTDOWN_INT_SHFT 22
755#define UVH_LOCAL_INT0_ENABLE_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
756#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_0_SHFT 23
757#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_0_MASK 0x0000000000800000UL
758#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_1_SHFT 24
759#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_1_MASK 0x0000000001000000UL
760#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_2_SHFT 25
761#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_2_MASK 0x0000000002000000UL
762#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_3_SHFT 26
763#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_3_MASK 0x0000000004000000UL
764#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_4_SHFT 27
765#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_4_MASK 0x0000000008000000UL
766#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_5_SHFT 28
767#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_5_MASK 0x0000000010000000UL
768#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_6_SHFT 29
769#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_6_MASK 0x0000000020000000UL
770#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_7_SHFT 30
771#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_7_MASK 0x0000000040000000UL
772#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_8_SHFT 31
773#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_8_MASK 0x0000000080000000UL
774#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_9_SHFT 32
775#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_9_MASK 0x0000000100000000UL
776#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_10_SHFT 33
777#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_10_MASK 0x0000000200000000UL
778#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_11_SHFT 34
779#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_11_MASK 0x0000000400000000UL
780#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_12_SHFT 35
781#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_12_MASK 0x0000000800000000UL
782#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_13_SHFT 36
783#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_13_MASK 0x0000001000000000UL
784#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_14_SHFT 37
785#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_14_MASK 0x0000002000000000UL
786#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_15_SHFT 38
787#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_15_MASK 0x0000004000000000UL
788#define UVH_LOCAL_INT0_ENABLE_L1_NMI_INT_SHFT 39
789#define UVH_LOCAL_INT0_ENABLE_L1_NMI_INT_MASK 0x0000008000000000UL
790#define UVH_LOCAL_INT0_ENABLE_STOP_CLOCK_SHFT 40
791#define UVH_LOCAL_INT0_ENABLE_STOP_CLOCK_MASK 0x0000010000000000UL
792#define UVH_LOCAL_INT0_ENABLE_ASIC_TO_L1_SHFT 41
793#define UVH_LOCAL_INT0_ENABLE_ASIC_TO_L1_MASK 0x0000020000000000UL
794#define UVH_LOCAL_INT0_ENABLE_L1_TO_ASIC_SHFT 42
795#define UVH_LOCAL_INT0_ENABLE_L1_TO_ASIC_MASK 0x0000040000000000UL
796#define UVH_LOCAL_INT0_ENABLE_LTC_INT_SHFT 43
797#define UVH_LOCAL_INT0_ENABLE_LTC_INT_MASK 0x0000080000000000UL
798#define UVH_LOCAL_INT0_ENABLE_LA_SEQ_TRIGGER_SHFT 44
799#define UVH_LOCAL_INT0_ENABLE_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
800
801union uvh_local_int0_enable_u {
802 unsigned long v;
803 struct uvh_local_int0_enable_s {
804 unsigned long lb_hcerr : 1; /* RW */
805 unsigned long gr0_hcerr : 1; /* RW */
806 unsigned long gr1_hcerr : 1; /* RW */
807 unsigned long lh_hcerr : 1; /* RW */
808 unsigned long rh_hcerr : 1; /* RW */
809 unsigned long xn_hcerr : 1; /* RW */
810 unsigned long si_hcerr : 1; /* RW */
811 unsigned long lb_aoerr0 : 1; /* RW */
812 unsigned long gr0_aoerr0 : 1; /* RW */
813 unsigned long gr1_aoerr0 : 1; /* RW */
814 unsigned long lh_aoerr0 : 1; /* RW */
815 unsigned long rh_aoerr0 : 1; /* RW */
816 unsigned long xn_aoerr0 : 1; /* RW */
817 unsigned long si_aoerr0 : 1; /* RW */
818 unsigned long lb_aoerr1 : 1; /* RW */
819 unsigned long gr0_aoerr1 : 1; /* RW */
820 unsigned long gr1_aoerr1 : 1; /* RW */
821 unsigned long lh_aoerr1 : 1; /* RW */
822 unsigned long rh_aoerr1 : 1; /* RW */
823 unsigned long xn_aoerr1 : 1; /* RW */
824 unsigned long si_aoerr1 : 1; /* RW */
825 unsigned long rh_vpi_int : 1; /* RW */
826 unsigned long system_shutdown_int : 1; /* RW */
827 unsigned long lb_irq_int_0 : 1; /* RW */
828 unsigned long lb_irq_int_1 : 1; /* RW */
829 unsigned long lb_irq_int_2 : 1; /* RW */
830 unsigned long lb_irq_int_3 : 1; /* RW */
831 unsigned long lb_irq_int_4 : 1; /* RW */
832 unsigned long lb_irq_int_5 : 1; /* RW */
833 unsigned long lb_irq_int_6 : 1; /* RW */
834 unsigned long lb_irq_int_7 : 1; /* RW */
835 unsigned long lb_irq_int_8 : 1; /* RW */
836 unsigned long lb_irq_int_9 : 1; /* RW */
837 unsigned long lb_irq_int_10 : 1; /* RW */
838 unsigned long lb_irq_int_11 : 1; /* RW */
839 unsigned long lb_irq_int_12 : 1; /* RW */
840 unsigned long lb_irq_int_13 : 1; /* RW */
841 unsigned long lb_irq_int_14 : 1; /* RW */
842 unsigned long lb_irq_int_15 : 1; /* RW */
843 unsigned long l1_nmi_int : 1; /* RW */
844 unsigned long stop_clock : 1; /* RW */
845 unsigned long asic_to_l1 : 1; /* RW */
846 unsigned long l1_to_asic : 1; /* RW */
847 unsigned long ltc_int : 1; /* RW */
848 unsigned long la_seq_trigger : 1; /* RW */
849 unsigned long rsvd_45_63 : 19; /* */
850 } s;
851};
852
853/* ========================================================================= */
251/* UVH_NODE_ID */ 854/* UVH_NODE_ID */
252/* ========================================================================= */ 855/* ========================================================================= */
253#define UVH_NODE_ID 0x0UL 856#define UVH_NODE_ID 0x0UL
@@ -284,14 +887,101 @@ union uvh_node_id_u {
284}; 887};
285 888
286/* ========================================================================= */ 889/* ========================================================================= */
890/* UVH_NODE_PRESENT_TABLE */
891/* ========================================================================= */
892#define UVH_NODE_PRESENT_TABLE 0x1400UL
893#define UVH_NODE_PRESENT_TABLE_DEPTH 16
894
895#define UVH_NODE_PRESENT_TABLE_NODES_SHFT 0
896#define UVH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL
897
898union uvh_node_present_table_u {
899 unsigned long v;
900 struct uvh_node_present_table_s {
901 unsigned long nodes : 64; /* RW */
902 } s;
903};
904
905/* ========================================================================= */
906/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
907/* ========================================================================= */
908#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
909
910#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
911#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
912
913union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
914 unsigned long v;
915 struct uvh_rh_gam_alias210_redirect_config_0_mmr_s {
916 unsigned long rsvd_0_23 : 24; /* */
917 unsigned long dest_base : 22; /* RW */
918 unsigned long rsvd_46_63: 18; /* */
919 } s;
920};
921
922/* ========================================================================= */
923/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */
924/* ========================================================================= */
925#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
926
927#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
928#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
929
930union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
931 unsigned long v;
932 struct uvh_rh_gam_alias210_redirect_config_1_mmr_s {
933 unsigned long rsvd_0_23 : 24; /* */
934 unsigned long dest_base : 22; /* RW */
935 unsigned long rsvd_46_63: 18; /* */
936 } s;
937};
938
939/* ========================================================================= */
940/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */
941/* ========================================================================= */
942#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
943
944#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
945#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
946
947union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
948 unsigned long v;
949 struct uvh_rh_gam_alias210_redirect_config_2_mmr_s {
950 unsigned long rsvd_0_23 : 24; /* */
951 unsigned long dest_base : 22; /* RW */
952 unsigned long rsvd_46_63: 18; /* */
953 } s;
954};
955
956/* ========================================================================= */
957/* UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR */
958/* ========================================================================= */
959#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR 0x1600020UL
960
961#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT 26
962#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
963#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
964#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
965
966union uvh_rh_gam_cfg_overlay_config_mmr_u {
967 unsigned long v;
968 struct uvh_rh_gam_cfg_overlay_config_mmr_s {
969 unsigned long rsvd_0_25: 26; /* */
970 unsigned long base : 20; /* RW */
971 unsigned long rsvd_46_62: 17; /* */
972 unsigned long enable : 1; /* RW */
973 } s;
974};
975
976/* ========================================================================= */
287/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ 977/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
288/* ========================================================================= */ 978/* ========================================================================= */
289#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL 979#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
290 980
291#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 981#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
292#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL 982#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
293#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 46 983#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48
294#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0000400000000000UL 984#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0001000000000000UL
295#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 985#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
296#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL 986#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
297#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 987#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
@@ -302,8 +992,9 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
302 struct uvh_rh_gam_gru_overlay_config_mmr_s { 992 struct uvh_rh_gam_gru_overlay_config_mmr_s {
303 unsigned long rsvd_0_27: 28; /* */ 993 unsigned long rsvd_0_27: 28; /* */
304 unsigned long base : 18; /* RW */ 994 unsigned long base : 18; /* RW */
995 unsigned long rsvd_46_47: 2; /* */
305 unsigned long gr4 : 1; /* RW */ 996 unsigned long gr4 : 1; /* RW */
306 unsigned long rsvd_47_51: 5; /* */ 997 unsigned long rsvd_49_51: 3; /* */
307 unsigned long n_gru : 4; /* RW */ 998 unsigned long n_gru : 4; /* RW */
308 unsigned long rsvd_56_62: 7; /* */ 999 unsigned long rsvd_56_62: 7; /* */
309 unsigned long enable : 1; /* RW */ 1000 unsigned long enable : 1; /* RW */
@@ -311,6 +1002,32 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
311}; 1002};
312 1003
313/* ========================================================================= */ 1004/* ========================================================================= */
1005/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR */
1006/* ========================================================================= */
1007#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
1008
1009#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30
1010#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003fffc0000000UL
1011#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
1012#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
1013#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
1014#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
1015#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1016#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1017
1018union uvh_rh_gam_mmioh_overlay_config_mmr_u {
1019 unsigned long v;
1020 struct uvh_rh_gam_mmioh_overlay_config_mmr_s {
1021 unsigned long rsvd_0_29: 30; /* */
1022 unsigned long base : 16; /* RW */
1023 unsigned long m_io : 6; /* RW */
1024 unsigned long n_io : 4; /* RW */
1025 unsigned long rsvd_56_62: 7; /* */
1026 unsigned long enable : 1; /* RW */
1027 } s;
1028};
1029
1030/* ========================================================================= */
314/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */ 1031/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */
315/* ========================================================================= */ 1032/* ========================================================================= */
316#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL 1033#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
@@ -336,7 +1053,7 @@ union uvh_rh_gam_mmr_overlay_config_mmr_u {
336/* ========================================================================= */ 1053/* ========================================================================= */
337/* UVH_RTC */ 1054/* UVH_RTC */
338/* ========================================================================= */ 1055/* ========================================================================= */
339#define UVH_RTC 0x28000UL 1056#define UVH_RTC 0x340000UL
340 1057
341#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0 1058#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0
342#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL 1059#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL
@@ -350,6 +1067,139 @@ union uvh_rtc_u {
350}; 1067};
351 1068
352/* ========================================================================= */ 1069/* ========================================================================= */
1070/* UVH_RTC1_INT_CONFIG */
1071/* ========================================================================= */
1072#define UVH_RTC1_INT_CONFIG 0x615c0UL
1073
1074#define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0
1075#define UVH_RTC1_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
1076#define UVH_RTC1_INT_CONFIG_DM_SHFT 8
1077#define UVH_RTC1_INT_CONFIG_DM_MASK 0x0000000000000700UL
1078#define UVH_RTC1_INT_CONFIG_DESTMODE_SHFT 11
1079#define UVH_RTC1_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
1080#define UVH_RTC1_INT_CONFIG_STATUS_SHFT 12
1081#define UVH_RTC1_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
1082#define UVH_RTC1_INT_CONFIG_P_SHFT 13
1083#define UVH_RTC1_INT_CONFIG_P_MASK 0x0000000000002000UL
1084#define UVH_RTC1_INT_CONFIG_T_SHFT 15
1085#define UVH_RTC1_INT_CONFIG_T_MASK 0x0000000000008000UL
1086#define UVH_RTC1_INT_CONFIG_M_SHFT 16
1087#define UVH_RTC1_INT_CONFIG_M_MASK 0x0000000000010000UL
1088#define UVH_RTC1_INT_CONFIG_APIC_ID_SHFT 32
1089#define UVH_RTC1_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
1090
1091union uvh_rtc1_int_config_u {
1092 unsigned long v;
1093 struct uvh_rtc1_int_config_s {
1094 unsigned long vector_ : 8; /* RW */
1095 unsigned long dm : 3; /* RW */
1096 unsigned long destmode : 1; /* RW */
1097 unsigned long status : 1; /* RO */
1098 unsigned long p : 1; /* RO */
1099 unsigned long rsvd_14 : 1; /* */
1100 unsigned long t : 1; /* RO */
1101 unsigned long m : 1; /* RW */
1102 unsigned long rsvd_17_31: 15; /* */
1103 unsigned long apic_id : 32; /* RW */
1104 } s;
1105};
1106
1107/* ========================================================================= */
1108/* UVH_RTC2_INT_CONFIG */
1109/* ========================================================================= */
1110#define UVH_RTC2_INT_CONFIG 0x61600UL
1111
1112#define UVH_RTC2_INT_CONFIG_VECTOR_SHFT 0
1113#define UVH_RTC2_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
1114#define UVH_RTC2_INT_CONFIG_DM_SHFT 8
1115#define UVH_RTC2_INT_CONFIG_DM_MASK 0x0000000000000700UL
1116#define UVH_RTC2_INT_CONFIG_DESTMODE_SHFT 11
1117#define UVH_RTC2_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
1118#define UVH_RTC2_INT_CONFIG_STATUS_SHFT 12
1119#define UVH_RTC2_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
1120#define UVH_RTC2_INT_CONFIG_P_SHFT 13
1121#define UVH_RTC2_INT_CONFIG_P_MASK 0x0000000000002000UL
1122#define UVH_RTC2_INT_CONFIG_T_SHFT 15
1123#define UVH_RTC2_INT_CONFIG_T_MASK 0x0000000000008000UL
1124#define UVH_RTC2_INT_CONFIG_M_SHFT 16
1125#define UVH_RTC2_INT_CONFIG_M_MASK 0x0000000000010000UL
1126#define UVH_RTC2_INT_CONFIG_APIC_ID_SHFT 32
1127#define UVH_RTC2_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
1128
1129union uvh_rtc2_int_config_u {
1130 unsigned long v;
1131 struct uvh_rtc2_int_config_s {
1132 unsigned long vector_ : 8; /* RW */
1133 unsigned long dm : 3; /* RW */
1134 unsigned long destmode : 1; /* RW */
1135 unsigned long status : 1; /* RO */
1136 unsigned long p : 1; /* RO */
1137 unsigned long rsvd_14 : 1; /* */
1138 unsigned long t : 1; /* RO */
1139 unsigned long m : 1; /* RW */
1140 unsigned long rsvd_17_31: 15; /* */
1141 unsigned long apic_id : 32; /* RW */
1142 } s;
1143};
1144
1145/* ========================================================================= */
1146/* UVH_RTC3_INT_CONFIG */
1147/* ========================================================================= */
1148#define UVH_RTC3_INT_CONFIG 0x61640UL
1149
1150#define UVH_RTC3_INT_CONFIG_VECTOR_SHFT 0
1151#define UVH_RTC3_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
1152#define UVH_RTC3_INT_CONFIG_DM_SHFT 8
1153#define UVH_RTC3_INT_CONFIG_DM_MASK 0x0000000000000700UL
1154#define UVH_RTC3_INT_CONFIG_DESTMODE_SHFT 11
1155#define UVH_RTC3_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
1156#define UVH_RTC3_INT_CONFIG_STATUS_SHFT 12
1157#define UVH_RTC3_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
1158#define UVH_RTC3_INT_CONFIG_P_SHFT 13
1159#define UVH_RTC3_INT_CONFIG_P_MASK 0x0000000000002000UL
1160#define UVH_RTC3_INT_CONFIG_T_SHFT 15
1161#define UVH_RTC3_INT_CONFIG_T_MASK 0x0000000000008000UL
1162#define UVH_RTC3_INT_CONFIG_M_SHFT 16
1163#define UVH_RTC3_INT_CONFIG_M_MASK 0x0000000000010000UL
1164#define UVH_RTC3_INT_CONFIG_APIC_ID_SHFT 32
1165#define UVH_RTC3_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
1166
1167union uvh_rtc3_int_config_u {
1168 unsigned long v;
1169 struct uvh_rtc3_int_config_s {
1170 unsigned long vector_ : 8; /* RW */
1171 unsigned long dm : 3; /* RW */
1172 unsigned long destmode : 1; /* RW */
1173 unsigned long status : 1; /* RO */
1174 unsigned long p : 1; /* RO */
1175 unsigned long rsvd_14 : 1; /* */
1176 unsigned long t : 1; /* RO */
1177 unsigned long m : 1; /* RW */
1178 unsigned long rsvd_17_31: 15; /* */
1179 unsigned long apic_id : 32; /* RW */
1180 } s;
1181};
1182
1183/* ========================================================================= */
1184/* UVH_RTC_INC_RATIO */
1185/* ========================================================================= */
1186#define UVH_RTC_INC_RATIO 0x350000UL
1187
1188#define UVH_RTC_INC_RATIO_FRACTION_SHFT 0
1189#define UVH_RTC_INC_RATIO_FRACTION_MASK 0x00000000000fffffUL
1190#define UVH_RTC_INC_RATIO_RATIO_SHFT 20
1191#define UVH_RTC_INC_RATIO_RATIO_MASK 0x0000000000700000UL
1192
1193union uvh_rtc_inc_ratio_u {
1194 unsigned long v;
1195 struct uvh_rtc_inc_ratio_s {
1196 unsigned long fraction : 20; /* RW */
1197 unsigned long ratio : 3; /* RW */
1198 unsigned long rsvd_23_63: 41; /* */
1199 } s;
1200};
1201
1202/* ========================================================================= */
353/* UVH_SI_ADDR_MAP_CONFIG */ 1203/* UVH_SI_ADDR_MAP_CONFIG */
354/* ========================================================================= */ 1204/* ========================================================================= */
355#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL 1205#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
@@ -369,5 +1219,77 @@ union uvh_si_addr_map_config_u {
369 } s; 1219 } s;
370}; 1220};
371 1221
1222/* ========================================================================= */
1223/* UVH_SI_ALIAS0_OVERLAY_CONFIG */
1224/* ========================================================================= */
1225#define UVH_SI_ALIAS0_OVERLAY_CONFIG 0xc80008UL
1226
1227#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_SHFT 24
1228#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
1229#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_SHFT 48
1230#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
1231#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_SHFT 63
1232#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
1233
1234union uvh_si_alias0_overlay_config_u {
1235 unsigned long v;
1236 struct uvh_si_alias0_overlay_config_s {
1237 unsigned long rsvd_0_23: 24; /* */
1238 unsigned long base : 8; /* RW */
1239 unsigned long rsvd_32_47: 16; /* */
1240 unsigned long m_alias : 5; /* RW */
1241 unsigned long rsvd_53_62: 10; /* */
1242 unsigned long enable : 1; /* RW */
1243 } s;
1244};
1245
1246/* ========================================================================= */
1247/* UVH_SI_ALIAS1_OVERLAY_CONFIG */
1248/* ========================================================================= */
1249#define UVH_SI_ALIAS1_OVERLAY_CONFIG 0xc80010UL
1250
1251#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_SHFT 24
1252#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
1253#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_SHFT 48
1254#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
1255#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_SHFT 63
1256#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
1257
1258union uvh_si_alias1_overlay_config_u {
1259 unsigned long v;
1260 struct uvh_si_alias1_overlay_config_s {
1261 unsigned long rsvd_0_23: 24; /* */
1262 unsigned long base : 8; /* RW */
1263 unsigned long rsvd_32_47: 16; /* */
1264 unsigned long m_alias : 5; /* RW */
1265 unsigned long rsvd_53_62: 10; /* */
1266 unsigned long enable : 1; /* RW */
1267 } s;
1268};
1269
1270/* ========================================================================= */
1271/* UVH_SI_ALIAS2_OVERLAY_CONFIG */
1272/* ========================================================================= */
1273#define UVH_SI_ALIAS2_OVERLAY_CONFIG 0xc80018UL
1274
1275#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_SHFT 24
1276#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
1277#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_SHFT 48
1278#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
1279#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_SHFT 63
1280#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
1281
1282union uvh_si_alias2_overlay_config_u {
1283 unsigned long v;
1284 struct uvh_si_alias2_overlay_config_s {
1285 unsigned long rsvd_0_23: 24; /* */
1286 unsigned long base : 8; /* RW */
1287 unsigned long rsvd_32_47: 16; /* */
1288 unsigned long m_alias : 5; /* RW */
1289 unsigned long rsvd_53_62: 10; /* */
1290 unsigned long enable : 1; /* RW */
1291 } s;
1292};
1293
372 1294
373#endif /* __ASM_X86_UV_MMRS__ */ 1295#endif /* __ASM_X86_UV_MMRS__ */
diff --git a/include/asm-x86/mach-visws/cobalt.h b/include/asm-x86/visws/cobalt.h
index 995258831b7f..995258831b7f 100644
--- a/include/asm-x86/mach-visws/cobalt.h
+++ b/include/asm-x86/visws/cobalt.h
diff --git a/include/asm-x86/mach-visws/lithium.h b/include/asm-x86/visws/lithium.h
index dfcd4f07ab85..dfcd4f07ab85 100644
--- a/include/asm-x86/mach-visws/lithium.h
+++ b/include/asm-x86/visws/lithium.h
diff --git a/include/asm-x86/mach-visws/piix4.h b/include/asm-x86/visws/piix4.h
index 83ea4f46e419..83ea4f46e419 100644
--- a/include/asm-x86/mach-visws/piix4.h
+++ b/include/asm-x86/visws/piix4.h
diff --git a/include/asm-x86/visws/sgivw.h b/include/asm-x86/visws/sgivw.h
new file mode 100644
index 000000000000..5fbf63e1003c
--- /dev/null
+++ b/include/asm-x86/visws/sgivw.h
@@ -0,0 +1,5 @@
1/*
2 * Frame buffer position and size:
3 */
4extern unsigned long sgivwfb_mem_phys;
5extern unsigned long sgivwfb_mem_size;
diff --git a/include/asm-x86/vm86.h b/include/asm-x86/vm86.h
index 074b357146df..5ce351325e01 100644
--- a/include/asm-x86/vm86.h
+++ b/include/asm-x86/vm86.h
@@ -14,12 +14,6 @@
14 14
15#include <asm/processor-flags.h> 15#include <asm/processor-flags.h>
16 16
17#ifdef CONFIG_VM86
18#define X86_VM_MASK X86_EFLAGS_VM
19#else
20#define X86_VM_MASK 0 /* No VM86 support */
21#endif
22
23#define BIOSSEG 0x0f000 17#define BIOSSEG 0x0f000
24 18
25#define CPU_086 0 19#define CPU_086 0
@@ -121,7 +115,6 @@ struct vm86plus_info_struct {
121 unsigned long is_vm86pus:1; /* for vm86 internal use */ 115 unsigned long is_vm86pus:1; /* for vm86 internal use */
122 unsigned char vm86dbg_intxxtab[32]; /* for debugger */ 116 unsigned char vm86dbg_intxxtab[32]; /* for debugger */
123}; 117};
124
125struct vm86plus_struct { 118struct vm86plus_struct {
126 struct vm86_regs regs; 119 struct vm86_regs regs;
127 unsigned long flags; 120 unsigned long flags;
@@ -133,6 +126,9 @@ struct vm86plus_struct {
133}; 126};
134 127
135#ifdef __KERNEL__ 128#ifdef __KERNEL__
129
130#include <asm/ptrace.h>
131
136/* 132/*
137 * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86 133 * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86
138 * mode - the main change is that the old segment descriptors aren't 134 * mode - the main change is that the old segment descriptors aren't
@@ -141,7 +137,6 @@ struct vm86plus_struct {
141 * at the end of the structure. Look at ptrace.h to see the "normal" 137 * at the end of the structure. Look at ptrace.h to see the "normal"
142 * setup. For user space layout see 'struct vm86_regs' above. 138 * setup. For user space layout see 'struct vm86_regs' above.
143 */ 139 */
144#include <asm/ptrace.h>
145 140
146struct kernel_vm86_regs { 141struct kernel_vm86_regs {
147/* 142/*
diff --git a/include/asm-x86/vmi_time.h b/include/asm-x86/vmi_time.h
index 478188130328..c3118c385156 100644
--- a/include/asm-x86/vmi_time.h
+++ b/include/asm-x86/vmi_time.h
@@ -50,7 +50,7 @@ extern void __init vmi_time_init(void);
50extern unsigned long vmi_get_wallclock(void); 50extern unsigned long vmi_get_wallclock(void);
51extern int vmi_set_wallclock(unsigned long now); 51extern int vmi_set_wallclock(unsigned long now);
52extern unsigned long long vmi_sched_clock(void); 52extern unsigned long long vmi_sched_clock(void);
53extern unsigned long vmi_cpu_khz(void); 53extern unsigned long vmi_tsc_khz(void);
54 54
55#ifdef CONFIG_X86_LOCAL_APIC 55#ifdef CONFIG_X86_LOCAL_APIC
56extern void __devinit vmi_time_bsp_init(void); 56extern void __devinit vmi_time_bsp_init(void);
diff --git a/include/asm-x86/vsyscall.h b/include/asm-x86/vsyscall.h
index 17b3700949bf..6b66ff905af0 100644
--- a/include/asm-x86/vsyscall.h
+++ b/include/asm-x86/vsyscall.h
@@ -24,7 +24,8 @@ enum vsyscall_num {
24 ((unused, __section__ (".vsyscall_gtod_data"),aligned(16))) 24 ((unused, __section__ (".vsyscall_gtod_data"),aligned(16)))
25#define __section_vsyscall_clock __attribute__ \ 25#define __section_vsyscall_clock __attribute__ \
26 ((unused, __section__ (".vsyscall_clock"),aligned(16))) 26 ((unused, __section__ (".vsyscall_clock"),aligned(16)))
27#define __vsyscall_fn __attribute__ ((unused,__section__(".vsyscall_fn"))) 27#define __vsyscall_fn \
28 __attribute__ ((unused, __section__(".vsyscall_fn"))) notrace
28 29
29#define VGETCPU_RDTSCP 1 30#define VGETCPU_RDTSCP 1
30#define VGETCPU_LSL 2 31#define VGETCPU_LSL 2
diff --git a/include/asm-x86/xen/events.h b/include/asm-x86/xen/events.h
index 596312a7bfc9..f8d57ea1f05f 100644
--- a/include/asm-x86/xen/events.h
+++ b/include/asm-x86/xen/events.h
@@ -4,6 +4,7 @@
4enum ipi_vector { 4enum ipi_vector {
5 XEN_RESCHEDULE_VECTOR, 5 XEN_RESCHEDULE_VECTOR,
6 XEN_CALL_FUNCTION_VECTOR, 6 XEN_CALL_FUNCTION_VECTOR,
7 XEN_CALL_FUNCTION_SINGLE_VECTOR,
7 8
8 XEN_NR_IPIS, 9 XEN_NR_IPIS,
9}; 10};
diff --git a/include/asm-x86/xen/hypercall.h b/include/asm-x86/xen/hypercall.h
index c2ccd997ed35..2a4f9b41d684 100644
--- a/include/asm-x86/xen/hypercall.h
+++ b/include/asm-x86/xen/hypercall.h
@@ -176,9 +176,9 @@ HYPERVISOR_fpu_taskswitch(int set)
176} 176}
177 177
178static inline int 178static inline int
179HYPERVISOR_sched_op(int cmd, unsigned long arg) 179HYPERVISOR_sched_op(int cmd, void *arg)
180{ 180{
181 return _hypercall2(int, sched_op, cmd, arg); 181 return _hypercall2(int, sched_op_new, cmd, arg);
182} 182}
183 183
184static inline long 184static inline long
@@ -315,6 +315,13 @@ HYPERVISOR_nmi_op(unsigned long op, unsigned long arg)
315} 315}
316 316
317static inline void 317static inline void
318MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
319{
320 mcl->op = __HYPERVISOR_fpu_taskswitch;
321 mcl->args[0] = set;
322}
323
324static inline void
318MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, 325MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
319 pte_t new_val, unsigned long flags) 326 pte_t new_val, unsigned long flags)
320{ 327{
diff --git a/include/asm-x86/xen/page.h b/include/asm-x86/xen/page.h
index e11f24038b1d..377c04591c15 100644
--- a/include/asm-x86/xen/page.h
+++ b/include/asm-x86/xen/page.h
@@ -26,15 +26,20 @@ typedef struct xpaddr {
26#define FOREIGN_FRAME_BIT (1UL<<31) 26#define FOREIGN_FRAME_BIT (1UL<<31)
27#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) 27#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
28 28
29extern unsigned long *phys_to_machine_mapping; 29/* Maximum amount of memory we can handle in a domain in pages */
30#define MAX_DOMAIN_PAGES \
31 ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE))
32
33
34extern unsigned long get_phys_to_machine(unsigned long pfn);
35extern void set_phys_to_machine(unsigned long pfn, unsigned long mfn);
30 36
31static inline unsigned long pfn_to_mfn(unsigned long pfn) 37static inline unsigned long pfn_to_mfn(unsigned long pfn)
32{ 38{
33 if (xen_feature(XENFEAT_auto_translated_physmap)) 39 if (xen_feature(XENFEAT_auto_translated_physmap))
34 return pfn; 40 return pfn;
35 41
36 return phys_to_machine_mapping[(unsigned int)(pfn)] & 42 return get_phys_to_machine(pfn) & ~FOREIGN_FRAME_BIT;
37 ~FOREIGN_FRAME_BIT;
38} 43}
39 44
40static inline int phys_to_machine_mapping_valid(unsigned long pfn) 45static inline int phys_to_machine_mapping_valid(unsigned long pfn)
@@ -42,7 +47,7 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
42 if (xen_feature(XENFEAT_auto_translated_physmap)) 47 if (xen_feature(XENFEAT_auto_translated_physmap))
43 return 1; 48 return 1;
44 49
45 return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY); 50 return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
46} 51}
47 52
48static inline unsigned long mfn_to_pfn(unsigned long mfn) 53static inline unsigned long mfn_to_pfn(unsigned long mfn)
@@ -106,20 +111,12 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
106 unsigned long pfn = mfn_to_pfn(mfn); 111 unsigned long pfn = mfn_to_pfn(mfn);
107 if ((pfn < max_mapnr) 112 if ((pfn < max_mapnr)
108 && !xen_feature(XENFEAT_auto_translated_physmap) 113 && !xen_feature(XENFEAT_auto_translated_physmap)
109 && (phys_to_machine_mapping[pfn] != mfn)) 114 && (get_phys_to_machine(pfn) != mfn))
110 return max_mapnr; /* force !pfn_valid() */ 115 return max_mapnr; /* force !pfn_valid() */
116 /* XXX fixme; not true with sparsemem */
111 return pfn; 117 return pfn;
112} 118}
113 119
114static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
115{
116 if (xen_feature(XENFEAT_auto_translated_physmap)) {
117 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
118 return;
119 }
120 phys_to_machine_mapping[pfn] = mfn;
121}
122
123/* VIRT <-> MACHINE conversion */ 120/* VIRT <-> MACHINE conversion */
124#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) 121#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
125#define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v)))) 122#define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v))))
diff --git a/include/asm-x86/xor_32.h b/include/asm-x86/xor_32.h
index 067b5c1835a3..921b45840449 100644
--- a/include/asm-x86/xor_32.h
+++ b/include/asm-x86/xor_32.h
@@ -1,3 +1,6 @@
1#ifndef ASM_X86__XOR_32_H
2#define ASM_X86__XOR_32_H
3
1/* 4/*
2 * Optimized RAID-5 checksumming functions for MMX and SSE. 5 * Optimized RAID-5 checksumming functions for MMX and SSE.
3 * 6 *
@@ -881,3 +884,5 @@ do { \
881 deals with a load to a line that is being prefetched. */ 884 deals with a load to a line that is being prefetched. */
882#define XOR_SELECT_TEMPLATE(FASTEST) \ 885#define XOR_SELECT_TEMPLATE(FASTEST) \
883 (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) 886 (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
887
888#endif /* ASM_X86__XOR_32_H */
diff --git a/include/asm-x86/xor_64.h b/include/asm-x86/xor_64.h
index 24957e39ac8a..2d3a18de295b 100644
--- a/include/asm-x86/xor_64.h
+++ b/include/asm-x86/xor_64.h
@@ -1,3 +1,6 @@
1#ifndef ASM_X86__XOR_64_H
2#define ASM_X86__XOR_64_H
3
1/* 4/*
2 * Optimized RAID-5 checksumming functions for MMX and SSE. 5 * Optimized RAID-5 checksumming functions for MMX and SSE.
3 * 6 *
@@ -354,3 +357,5 @@ do { \
354 We may also be able to load into the L1 only depending on how the cpu 357 We may also be able to load into the L1 only depending on how the cpu
355 deals with a load to a line that is being prefetched. */ 358 deals with a load to a line that is being prefetched. */
356#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse) 359#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
360
361#endif /* ASM_X86__XOR_64_H */