aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/x86/include/asm
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/acpi.h30
-rw-r--r--arch/x86/include/asm/alternative-asm.h9
-rw-r--r--arch/x86/include/asm/alternative.h10
-rw-r--r--arch/x86/include/asm/amd_iommu.h6
-rw-r--r--arch/x86/include/asm/amd_iommu_proto.h15
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h51
-rw-r--r--arch/x86/include/asm/amd_nb.h64
-rw-r--r--arch/x86/include/asm/apb_timer.h3
-rw-r--r--arch/x86/include/asm/apic.h91
-rw-r--r--arch/x86/include/asm/apicdef.h15
-rw-r--r--arch/x86/include/asm/bios_ebda.h28
-rw-r--r--arch/x86/include/asm/bitops.h6
-rw-r--r--arch/x86/include/asm/boot.h6
-rw-r--r--arch/x86/include/asm/bootparam.h2
-rw-r--r--arch/x86/include/asm/cacheflush.h44
-rw-r--r--arch/x86/include/asm/calgary.h4
-rw-r--r--arch/x86/include/asm/calling.h52
-rw-r--r--arch/x86/include/asm/ce4100.h6
-rw-r--r--arch/x86/include/asm/cpu.h2
-rw-r--r--arch/x86/include/asm/cpufeature.h32
-rw-r--r--arch/x86/include/asm/debugreg.h2
-rw-r--r--arch/x86/include/asm/desc.h152
-rw-r--r--arch/x86/include/asm/dma.h19
-rw-r--r--arch/x86/include/asm/dwarf2.h20
-rw-r--r--arch/x86/include/asm/e820.h25
-rw-r--r--arch/x86/include/asm/efi.h3
-rw-r--r--arch/x86/include/asm/entry_arch.h28
-rw-r--r--arch/x86/include/asm/fixmap.h19
-rw-r--r--arch/x86/include/asm/frame.h6
-rw-r--r--arch/x86/include/asm/ftrace.h7
-rw-r--r--arch/x86/include/asm/futex.h22
-rw-r--r--arch/x86/include/asm/gart.h42
-rw-r--r--arch/x86/include/asm/gpio.h5
-rw-r--r--arch/x86/include/asm/hardirq.h2
-rw-r--r--arch/x86/include/asm/highmem.h11
-rw-r--r--arch/x86/include/asm/hpet.h10
-rw-r--r--arch/x86/include/asm/hw_irq.h43
-rw-r--r--arch/x86/include/asm/hypervisor.h12
-rw-r--r--arch/x86/include/asm/i387.h205
-rw-r--r--arch/x86/include/asm/i8253.h2
-rw-r--r--arch/x86/include/asm/i8259.h2
-rw-r--r--arch/x86/include/asm/idle.h2
-rw-r--r--arch/x86/include/asm/init.h6
-rw-r--r--arch/x86/include/asm/io.h39
-rw-r--r--arch/x86/include/asm/io_apic.h70
-rw-r--r--arch/x86/include/asm/iomap.h4
-rw-r--r--arch/x86/include/asm/iommu_table.h100
-rw-r--r--arch/x86/include/asm/ipi.h8
-rw-r--r--arch/x86/include/asm/irq.h16
-rw-r--r--arch/x86/include/asm/irq_controller.h12
-rw-r--r--arch/x86/include/asm/irq_remapping.h35
-rw-r--r--arch/x86/include/asm/irq_vectors.h53
-rw-r--r--arch/x86/include/asm/irqflags.h32
-rw-r--r--arch/x86/include/asm/jump_label.h42
-rw-r--r--arch/x86/include/asm/k8.h36
-rw-r--r--arch/x86/include/asm/kdebug.h2
-rw-r--r--arch/x86/include/asm/kgdb.h1
-rw-r--r--arch/x86/include/asm/kvm_emulate.h241
-rw-r--r--arch/x86/include/asm/kvm_host.h210
-rw-r--r--arch/x86/include/asm/kvm_para.h30
-rw-r--r--arch/x86/include/asm/linkage.h5
-rw-r--r--arch/x86/include/asm/mach_traps.h12
-rw-r--r--arch/x86/include/asm/mce.h5
-rw-r--r--arch/x86/include/asm/memblock.h23
-rw-r--r--arch/x86/include/asm/microcode.h6
-rw-r--r--arch/x86/include/asm/mmu.h6
-rw-r--r--arch/x86/include/asm/mmu_context.h5
-rw-r--r--arch/x86/include/asm/mmzone_32.h33
-rw-r--r--arch/x86/include/asm/mmzone_64.h26
-rw-r--r--arch/x86/include/asm/module.h9
-rw-r--r--arch/x86/include/asm/mpspec.h34
-rw-r--r--arch/x86/include/asm/mpspec_def.h7
-rw-r--r--arch/x86/include/asm/mrst-vrtc.h9
-rw-r--r--arch/x86/include/asm/mrst.h24
-rw-r--r--arch/x86/include/asm/msr-index.h34
-rw-r--r--arch/x86/include/asm/mwait.h15
-rw-r--r--arch/x86/include/asm/nmi.h72
-rw-r--r--arch/x86/include/asm/nops.h148
-rw-r--r--arch/x86/include/asm/numa.h80
-rw-r--r--arch/x86/include/asm/numa_32.h3
-rw-r--r--arch/x86/include/asm/numa_64.h46
-rw-r--r--arch/x86/include/asm/numaq.h7
-rw-r--r--arch/x86/include/asm/olpc.h14
-rw-r--r--arch/x86/include/asm/olpc_ofw.h14
-rw-r--r--arch/x86/include/asm/page_32_types.h4
-rw-r--r--arch/x86/include/asm/page_types.h11
-rw-r--r--arch/x86/include/asm/paravirt.h47
-rw-r--r--arch/x86/include/asm/paravirt_types.h7
-rw-r--r--arch/x86/include/asm/pci.h36
-rw-r--r--arch/x86/include/asm/pci_x86.h1
-rw-r--r--arch/x86/include/asm/percpu.h256
-rw-r--r--arch/x86/include/asm/perf_event.h21
-rw-r--r--arch/x86/include/asm/perf_event_p4.h123
-rw-r--r--arch/x86/include/asm/pgalloc.h2
-rw-r--r--arch/x86/include/asm/pgtable-2level.h9
-rw-r--r--arch/x86/include/asm/pgtable-3level.h34
-rw-r--r--arch/x86/include/asm/pgtable.h147
-rw-r--r--arch/x86/include/asm/pgtable_32.h16
-rw-r--r--arch/x86/include/asm/pgtable_64.h32
-rw-r--r--arch/x86/include/asm/pgtable_types.h4
-rw-r--r--arch/x86/include/asm/probe_roms.h8
-rw-r--r--arch/x86/include/asm/processor-flags.h3
-rw-r--r--arch/x86/include/asm/processor.h45
-rw-r--r--arch/x86/include/asm/prom.h69
-rw-r--r--arch/x86/include/asm/ptrace-abi.h2
-rw-r--r--arch/x86/include/asm/ptrace.h22
-rw-r--r--arch/x86/include/asm/pvclock.h44
-rw-r--r--arch/x86/include/asm/reboot.h5
-rw-r--r--arch/x86/include/asm/rwsem.h80
-rw-r--r--arch/x86/include/asm/segment.h44
-rw-r--r--arch/x86/include/asm/setup.h15
-rw-r--r--arch/x86/include/asm/smp.h34
-rw-r--r--arch/x86/include/asm/smpboot_hooks.h3
-rw-r--r--arch/x86/include/asm/srat.h39
-rw-r--r--arch/x86/include/asm/stacktrace.h34
-rw-r--r--arch/x86/include/asm/suspend_32.h2
-rw-r--r--arch/x86/include/asm/suspend_64.h5
-rw-r--r--arch/x86/include/asm/svm.h57
-rw-r--r--arch/x86/include/asm/swiotlb.h13
-rw-r--r--arch/x86/include/asm/system.h87
-rw-r--r--arch/x86/include/asm/system_64.h22
-rw-r--r--arch/x86/include/asm/thread_info.h10
-rw-r--r--arch/x86/include/asm/timer.h6
-rw-r--r--arch/x86/include/asm/tlbflush.h2
-rw-r--r--arch/x86/include/asm/topology.h27
-rw-r--r--arch/x86/include/asm/trampoline.h36
-rw-r--r--arch/x86/include/asm/traps.h1
-rw-r--r--arch/x86/include/asm/tsc.h6
-rw-r--r--arch/x86/include/asm/types.h16
-rw-r--r--arch/x86/include/asm/uaccess.h3
-rw-r--r--arch/x86/include/asm/uaccess_32.h1
-rw-r--r--arch/x86/include/asm/uaccess_64.h1
-rw-r--r--arch/x86/include/asm/unistd_32.h10
-rw-r--r--arch/x86/include/asm/unistd_64.h16
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h596
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h92
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h1140
-rw-r--r--arch/x86/include/asm/vdso.h14
-rw-r--r--arch/x86/include/asm/vgtod.h2
-rw-r--r--arch/x86/include/asm/vmi.h269
-rw-r--r--arch/x86/include/asm/vmi_time.h98
-rw-r--r--arch/x86/include/asm/vmx.h15
-rw-r--r--arch/x86/include/asm/vsyscall.h12
-rw-r--r--arch/x86/include/asm/vvar.h52
-rw-r--r--arch/x86/include/asm/x2apic.h62
-rw-r--r--arch/x86/include/asm/x86_init.h23
-rw-r--r--arch/x86/include/asm/xen/hypercall.h39
-rw-r--r--arch/x86/include/asm/xen/hypervisor.h35
-rw-r--r--arch/x86/include/asm/xen/interface.h8
-rw-r--r--arch/x86/include/asm/xen/interface_32.h5
-rw-r--r--arch/x86/include/asm/xen/interface_64.h13
-rw-r--r--arch/x86/include/asm/xen/page.h67
-rw-r--r--arch/x86/include/asm/xen/pci.h81
153 files changed, 4496 insertions, 2334 deletions
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 92091de11113..610001d385dd 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -29,6 +29,7 @@
29#include <asm/processor.h> 29#include <asm/processor.h>
30#include <asm/mmu.h> 30#include <asm/mmu.h>
31#include <asm/mpspec.h> 31#include <asm/mpspec.h>
32#include <asm/trampoline.h>
32 33
33#define COMPILER_DEPENDENT_INT64 long long 34#define COMPILER_DEPENDENT_INT64 long long
34#define COMPILER_DEPENDENT_UINT64 unsigned long long 35#define COMPILER_DEPENDENT_UINT64 unsigned long long
@@ -88,11 +89,15 @@ extern int acpi_disabled;
88extern int acpi_pci_disabled; 89extern int acpi_pci_disabled;
89extern int acpi_skip_timer_override; 90extern int acpi_skip_timer_override;
90extern int acpi_use_timer_override; 91extern int acpi_use_timer_override;
92extern int acpi_fix_pin2_polarity;
91 93
92extern u8 acpi_sci_flags; 94extern u8 acpi_sci_flags;
93extern int acpi_sci_override_gsi; 95extern int acpi_sci_override_gsi;
94void acpi_pic_sci_set_trigger(unsigned int, u16); 96void acpi_pic_sci_set_trigger(unsigned int, u16);
95 97
98extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
99 int trigger, int polarity);
100
96static inline void disable_acpi(void) 101static inline void disable_acpi(void)
97{ 102{
98 acpi_disabled = 1; 103 acpi_disabled = 1;
@@ -109,11 +114,11 @@ static inline void acpi_disable_pci(void)
109 acpi_noirq_set(); 114 acpi_noirq_set();
110} 115}
111 116
112/* routines for saving/restoring kernel state */ 117/* Low-level suspend routine. */
113extern int acpi_save_state_mem(void); 118extern int acpi_suspend_lowlevel(void);
114extern void acpi_restore_state_mem(void);
115 119
116extern unsigned long acpi_wakeup_address; 120extern const unsigned char acpi_wakeup_code[];
121#define acpi_wakeup_address (__pa(TRAMPOLINE_SYM(acpi_wakeup_code)))
117 122
118/* early initialization routine */ 123/* early initialization routine */
119extern void acpi_reserve_wakeup_memory(void); 124extern void acpi_reserve_wakeup_memory(void);
@@ -134,7 +139,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
134 boot_cpu_data.x86_model <= 0x05 && 139 boot_cpu_data.x86_model <= 0x05 &&
135 boot_cpu_data.x86_mask < 0x0A) 140 boot_cpu_data.x86_mask < 0x0A)
136 return 1; 141 return 1;
137 else if (c1e_detected) 142 else if (amd_e400_c1e_detected)
138 return 1; 143 return 1;
139 else 144 else
140 return max_cstate; 145 return max_cstate;
@@ -178,21 +183,10 @@ static inline void disable_acpi(void) { }
178 183
179#define ARCH_HAS_POWER_INIT 1 184#define ARCH_HAS_POWER_INIT 1
180 185
181struct bootnode;
182
183#ifdef CONFIG_ACPI_NUMA 186#ifdef CONFIG_ACPI_NUMA
184extern int acpi_numa; 187extern int acpi_numa;
185extern int acpi_get_nodes(struct bootnode *physnodes); 188extern int x86_acpi_numa_init(void);
186extern int acpi_scan_nodes(unsigned long start, unsigned long end); 189#endif /* CONFIG_ACPI_NUMA */
187#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
188extern void acpi_fake_nodes(const struct bootnode *fake_nodes,
189 int num_nodes);
190#else
191static inline void acpi_fake_nodes(const struct bootnode *fake_nodes,
192 int num_nodes)
193{
194}
195#endif
196 190
197#define acpi_unlazy_tlb(x) leave_mm(x) 191#define acpi_unlazy_tlb(x) leave_mm(x)
198 192
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index a63a68be1cce..94d420b360d1 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -15,4 +15,13 @@
15 .endm 15 .endm
16#endif 16#endif
17 17
18.macro altinstruction_entry orig alt feature orig_len alt_len
19 .align 8
20 .quad \orig
21 .quad \alt
22 .word \feature
23 .byte \orig_len
24 .byte \alt_len
25.endm
26
18#endif /* __ASSEMBLY__ */ 27#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index bc6abb7bc7ee..bf535f947e8c 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -65,6 +65,7 @@ extern void alternatives_smp_module_add(struct module *mod, char *name,
65extern void alternatives_smp_module_del(struct module *mod); 65extern void alternatives_smp_module_del(struct module *mod);
66extern void alternatives_smp_switch(int smp); 66extern void alternatives_smp_switch(int smp);
67extern int alternatives_text_reserved(void *start, void *end); 67extern int alternatives_text_reserved(void *start, void *end);
68extern bool skip_smp_alternatives;
68#else 69#else
69static inline void alternatives_smp_module_add(struct module *mod, char *name, 70static inline void alternatives_smp_module_add(struct module *mod, char *name,
70 void *locks, void *locks_end, 71 void *locks, void *locks_end,
@@ -160,6 +161,8 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
160#define __parainstructions_end NULL 161#define __parainstructions_end NULL
161#endif 162#endif
162 163
164extern void *text_poke_early(void *addr, const void *opcode, size_t len);
165
163/* 166/*
164 * Clear and restore the kernel write-protection flag on the local CPU. 167 * Clear and restore the kernel write-protection flag on the local CPU.
165 * Allows the kernel to edit read-only pages. 168 * Allows the kernel to edit read-only pages.
@@ -177,7 +180,14 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
177 * On the local CPU you need to be protected again NMI or MCE handlers seeing an 180 * On the local CPU you need to be protected again NMI or MCE handlers seeing an
178 * inconsistent instruction while you patch. 181 * inconsistent instruction while you patch.
179 */ 182 */
183struct text_poke_param {
184 void *addr;
185 const void *opcode;
186 size_t len;
187};
188
180extern void *text_poke(void *addr, const void *opcode, size_t len); 189extern void *text_poke(void *addr, const void *opcode, size_t len);
181extern void *text_poke_smp(void *addr, const void *opcode, size_t len); 190extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
191extern void text_poke_smp_batch(struct text_poke_param *params, int n);
182 192
183#endif /* _ASM_X86_ALTERNATIVE_H */ 193#endif /* _ASM_X86_ALTERNATIVE_H */
diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h
index 5af2982133b5..a6863a2dec1f 100644
--- a/arch/x86/include/asm/amd_iommu.h
+++ b/arch/x86/include/asm/amd_iommu.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. 2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com> 4 * Leo Duran <leo.duran@amd.com>
5 * 5 *
@@ -24,11 +24,11 @@
24 24
25#ifdef CONFIG_AMD_IOMMU 25#ifdef CONFIG_AMD_IOMMU
26 26
27extern void amd_iommu_detect(void); 27extern int amd_iommu_detect(void);
28 28
29#else 29#else
30 30
31static inline void amd_iommu_detect(void) { } 31static inline int amd_iommu_detect(void) { return -ENODEV; }
32 32
33#endif 33#endif
34 34
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h
index cb030374b90a..55d95eb789b3 100644
--- a/arch/x86/include/asm/amd_iommu_proto.h
+++ b/arch/x86/include/asm/amd_iommu_proto.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2009 Advanced Micro Devices, Inc. 2 * Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
@@ -19,13 +19,12 @@
19#ifndef _ASM_X86_AMD_IOMMU_PROTO_H 19#ifndef _ASM_X86_AMD_IOMMU_PROTO_H
20#define _ASM_X86_AMD_IOMMU_PROTO_H 20#define _ASM_X86_AMD_IOMMU_PROTO_H
21 21
22struct amd_iommu; 22#include <asm/amd_iommu_types.h>
23 23
24extern int amd_iommu_init_dma_ops(void); 24extern int amd_iommu_init_dma_ops(void);
25extern int amd_iommu_init_passthrough(void); 25extern int amd_iommu_init_passthrough(void);
26extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
26extern irqreturn_t amd_iommu_int_handler(int irq, void *data); 27extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
27extern void amd_iommu_flush_all_domains(void);
28extern void amd_iommu_flush_all_devices(void);
29extern void amd_iommu_apply_erratum_63(u16 devid); 28extern void amd_iommu_apply_erratum_63(u16 devid);
30extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); 29extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
31extern int amd_iommu_init_devices(void); 30extern int amd_iommu_init_devices(void);
@@ -44,4 +43,12 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev)
44 (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); 43 (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
45} 44}
46 45
46static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
47{
48 if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
49 return false;
50
51 return !!(iommu->features & f);
52}
53
47#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ 54#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 08616180deaf..4c9982995414 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. 2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com> 4 * Leo Duran <leo.duran@amd.com>
5 * 5 *
@@ -68,12 +68,25 @@
68#define MMIO_CONTROL_OFFSET 0x0018 68#define MMIO_CONTROL_OFFSET 0x0018
69#define MMIO_EXCL_BASE_OFFSET 0x0020 69#define MMIO_EXCL_BASE_OFFSET 0x0020
70#define MMIO_EXCL_LIMIT_OFFSET 0x0028 70#define MMIO_EXCL_LIMIT_OFFSET 0x0028
71#define MMIO_EXT_FEATURES 0x0030
71#define MMIO_CMD_HEAD_OFFSET 0x2000 72#define MMIO_CMD_HEAD_OFFSET 0x2000
72#define MMIO_CMD_TAIL_OFFSET 0x2008 73#define MMIO_CMD_TAIL_OFFSET 0x2008
73#define MMIO_EVT_HEAD_OFFSET 0x2010 74#define MMIO_EVT_HEAD_OFFSET 0x2010
74#define MMIO_EVT_TAIL_OFFSET 0x2018 75#define MMIO_EVT_TAIL_OFFSET 0x2018
75#define MMIO_STATUS_OFFSET 0x2020 76#define MMIO_STATUS_OFFSET 0x2020
76 77
78
79/* Extended Feature Bits */
80#define FEATURE_PREFETCH (1ULL<<0)
81#define FEATURE_PPR (1ULL<<1)
82#define FEATURE_X2APIC (1ULL<<2)
83#define FEATURE_NX (1ULL<<3)
84#define FEATURE_GT (1ULL<<4)
85#define FEATURE_IA (1ULL<<6)
86#define FEATURE_GA (1ULL<<7)
87#define FEATURE_HE (1ULL<<8)
88#define FEATURE_PC (1ULL<<9)
89
77/* MMIO status bits */ 90/* MMIO status bits */
78#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 91#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04
79 92
@@ -113,7 +126,9 @@
113/* command specific defines */ 126/* command specific defines */
114#define CMD_COMPL_WAIT 0x01 127#define CMD_COMPL_WAIT 0x01
115#define CMD_INV_DEV_ENTRY 0x02 128#define CMD_INV_DEV_ENTRY 0x02
116#define CMD_INV_IOMMU_PAGES 0x03 129#define CMD_INV_IOMMU_PAGES 0x03
130#define CMD_INV_IOTLB_PAGES 0x04
131#define CMD_INV_ALL 0x08
117 132
118#define CMD_COMPL_WAIT_STORE_MASK 0x01 133#define CMD_COMPL_WAIT_STORE_MASK 0x01
119#define CMD_COMPL_WAIT_INT_MASK 0x02 134#define CMD_COMPL_WAIT_INT_MASK 0x02
@@ -215,6 +230,8 @@
215#define IOMMU_PTE_IR (1ULL << 61) 230#define IOMMU_PTE_IR (1ULL << 61)
216#define IOMMU_PTE_IW (1ULL << 62) 231#define IOMMU_PTE_IW (1ULL << 62)
217 232
233#define DTE_FLAG_IOTLB 0x01
234
218#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) 235#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
219#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) 236#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
220#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) 237#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK))
@@ -227,6 +244,7 @@
227/* IOMMU capabilities */ 244/* IOMMU capabilities */
228#define IOMMU_CAP_IOTLB 24 245#define IOMMU_CAP_IOTLB 24
229#define IOMMU_CAP_NPCACHE 26 246#define IOMMU_CAP_NPCACHE 26
247#define IOMMU_CAP_EFR 27
230 248
231#define MAX_DOMAIN_ID 65536 249#define MAX_DOMAIN_ID 65536
232 250
@@ -249,6 +267,8 @@ extern bool amd_iommu_dump;
249 267
250/* global flag if IOMMUs cache non-present entries */ 268/* global flag if IOMMUs cache non-present entries */
251extern bool amd_iommu_np_cache; 269extern bool amd_iommu_np_cache;
270/* Only true if all IOMMUs support device IOTLBs */
271extern bool amd_iommu_iotlb_sup;
252 272
253/* 273/*
254 * Make iterating over all IOMMUs easier 274 * Make iterating over all IOMMUs easier
@@ -371,6 +391,9 @@ struct amd_iommu {
371 /* flags read from acpi table */ 391 /* flags read from acpi table */
372 u8 acpi_flags; 392 u8 acpi_flags;
373 393
394 /* Extended features */
395 u64 features;
396
374 /* 397 /*
375 * Capability pointer. There could be more than one IOMMU per PCI 398 * Capability pointer. There could be more than one IOMMU per PCI
376 * device function if there are more than one AMD IOMMU capability 399 * device function if there are more than one AMD IOMMU capability
@@ -409,20 +432,26 @@ struct amd_iommu {
409 /* if one, we need to send a completion wait command */ 432 /* if one, we need to send a completion wait command */
410 bool need_sync; 433 bool need_sync;
411 434
412 /* becomes true if a command buffer reset is running */
413 bool reset_in_progress;
414
415 /* default dma_ops domain for that IOMMU */ 435 /* default dma_ops domain for that IOMMU */
416 struct dma_ops_domain *default_dom; 436 struct dma_ops_domain *default_dom;
417 437
418 /* 438 /*
419 * This array is required to work around a potential BIOS bug. 439 * We can't rely on the BIOS to restore all values on reinit, so we
420 * The BIOS may miss to restore parts of the PCI configuration 440 * need to stash them
421 * space when the system resumes from S3. The result is that the
422 * IOMMU does not execute commands anymore which leads to system
423 * failure.
424 */ 441 */
425 u32 cache_cfg[4]; 442
443 /* The iommu BAR */
444 u32 stored_addr_lo;
445 u32 stored_addr_hi;
446
447 /*
448 * Each iommu has 6 l1s, each of which is documented as having 0x12
449 * registers
450 */
451 u32 stored_l1[6][0x12];
452
453 /* The l2 indirect registers */
454 u32 stored_l2[0x83];
426}; 455};
427 456
428/* 457/*
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
new file mode 100644
index 000000000000..67f87f257611
--- /dev/null
+++ b/arch/x86/include/asm/amd_nb.h
@@ -0,0 +1,64 @@
1#ifndef _ASM_X86_AMD_NB_H
2#define _ASM_X86_AMD_NB_H
3
4#include <linux/pci.h>
5
6struct amd_nb_bus_dev_range {
7 u8 bus;
8 u8 dev_base;
9 u8 dev_limit;
10};
11
12extern const struct pci_device_id amd_nb_misc_ids[];
13extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
14
15extern bool early_is_amd_nb(u32 value);
16extern int amd_cache_northbridges(void);
17extern void amd_flush_garts(void);
18extern int amd_numa_init(void);
19extern int amd_get_subcaches(int);
20extern int amd_set_subcaches(int, int);
21
22struct amd_northbridge {
23 struct pci_dev *misc;
24 struct pci_dev *link;
25};
26
27struct amd_northbridge_info {
28 u16 num;
29 u64 flags;
30 struct amd_northbridge *nb;
31};
32extern struct amd_northbridge_info amd_northbridges;
33
34#define AMD_NB_GART BIT(0)
35#define AMD_NB_L3_INDEX_DISABLE BIT(1)
36#define AMD_NB_L3_PARTITIONING BIT(2)
37
38#ifdef CONFIG_AMD_NB
39
40static inline u16 amd_nb_num(void)
41{
42 return amd_northbridges.num;
43}
44
45static inline bool amd_nb_has_feature(unsigned feature)
46{
47 return ((amd_northbridges.flags & feature) == feature);
48}
49
50static inline struct amd_northbridge *node_to_amd_nb(int node)
51{
52 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
53}
54
55#else
56
57#define amd_nb_num(x) 0
58#define amd_nb_has_feature(x) false
59#define node_to_amd_nb(x) NULL
60
61#endif
62
63
64#endif /* _ASM_X86_AMD_NB_H */
diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h
index a69b1ac9eaf8..af60d8a2e288 100644
--- a/arch/x86/include/asm/apb_timer.h
+++ b/arch/x86/include/asm/apb_timer.h
@@ -54,7 +54,6 @@ extern struct clock_event_device *global_clock_event;
54extern unsigned long apbt_quick_calibrate(void); 54extern unsigned long apbt_quick_calibrate(void);
55extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu); 55extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu);
56extern void apbt_setup_secondary_clock(void); 56extern void apbt_setup_secondary_clock(void);
57extern unsigned int boot_cpu_id;
58 57
59extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); 58extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint);
60extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr); 59extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr);
@@ -63,7 +62,7 @@ extern int sfi_mtimer_num;
63#else /* CONFIG_APB_TIMER */ 62#else /* CONFIG_APB_TIMER */
64 63
65static inline unsigned long apbt_quick_calibrate(void) {return 0; } 64static inline unsigned long apbt_quick_calibrate(void) {return 0; }
66static inline void apbt_time_init(void) {return 0; } 65static inline void apbt_time_init(void) { }
67 66
68#endif 67#endif
69#endif /* ASM_X86_APBT_H */ 68#endif /* ASM_X86_APBT_H */
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 1fa03e04ae44..4a0b7c7e2cce 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -2,7 +2,6 @@
2#define _ASM_X86_APIC_H 2#define _ASM_X86_APIC_H
3 3
4#include <linux/cpumask.h> 4#include <linux/cpumask.h>
5#include <linux/delay.h>
6#include <linux/pm.h> 5#include <linux/pm.h>
7 6
8#include <asm/alternative.h> 7#include <asm/alternative.h>
@@ -141,13 +140,13 @@ static inline void native_apic_msr_write(u32 reg, u32 v)
141 140
142static inline u32 native_apic_msr_read(u32 reg) 141static inline u32 native_apic_msr_read(u32 reg)
143{ 142{
144 u32 low, high; 143 u64 msr;
145 144
146 if (reg == APIC_DFR) 145 if (reg == APIC_DFR)
147 return -1; 146 return -1;
148 147
149 rdmsr(APIC_BASE_MSR + (reg >> 4), low, high); 148 rdmsrl(APIC_BASE_MSR + (reg >> 4), msr);
150 return low; 149 return (u32)msr;
151} 150}
152 151
153static inline void native_x2apic_wait_icr_idle(void) 152static inline void native_x2apic_wait_icr_idle(void)
@@ -181,12 +180,12 @@ extern void enable_x2apic(void);
181extern void x2apic_icr_write(u32 low, u32 id); 180extern void x2apic_icr_write(u32 low, u32 id);
182static inline int x2apic_enabled(void) 181static inline int x2apic_enabled(void)
183{ 182{
184 int msr, msr2; 183 u64 msr;
185 184
186 if (!cpu_has_x2apic) 185 if (!cpu_has_x2apic)
187 return 0; 186 return 0;
188 187
189 rdmsr(MSR_IA32_APICBASE, msr, msr2); 188 rdmsrl(MSR_IA32_APICBASE, msr);
190 if (msr & X2APIC_ENABLE) 189 if (msr & X2APIC_ENABLE)
191 return 1; 190 return 1;
192 return 0; 191 return 0;
@@ -220,7 +219,6 @@ extern void enable_IR_x2apic(void);
220 219
221extern int get_physical_broadcast(void); 220extern int get_physical_broadcast(void);
222 221
223extern void apic_disable(void);
224extern int lapic_get_maxlvt(void); 222extern int lapic_get_maxlvt(void);
225extern void clear_local_APIC(void); 223extern void clear_local_APIC(void);
226extern void connect_bsp_APIC(void); 224extern void connect_bsp_APIC(void);
@@ -228,22 +226,22 @@ extern void disconnect_bsp_APIC(int virt_wire_setup);
228extern void disable_local_APIC(void); 226extern void disable_local_APIC(void);
229extern void lapic_shutdown(void); 227extern void lapic_shutdown(void);
230extern int verify_local_APIC(void); 228extern int verify_local_APIC(void);
231extern void cache_APIC_registers(void);
232extern void sync_Arb_IDs(void); 229extern void sync_Arb_IDs(void);
233extern void init_bsp_APIC(void); 230extern void init_bsp_APIC(void);
234extern void setup_local_APIC(void); 231extern void setup_local_APIC(void);
235extern void end_local_APIC_setup(void); 232extern void end_local_APIC_setup(void);
233extern void bsp_end_local_APIC_setup(void);
236extern void init_apic_mappings(void); 234extern void init_apic_mappings(void);
235void register_lapic_address(unsigned long address);
237extern void setup_boot_APIC_clock(void); 236extern void setup_boot_APIC_clock(void);
238extern void setup_secondary_APIC_clock(void); 237extern void setup_secondary_APIC_clock(void);
239extern int APIC_init_uniprocessor(void); 238extern int APIC_init_uniprocessor(void);
240extern void enable_NMI_through_LVT0(void); 239extern int apic_force_enable(unsigned long addr);
241 240
242/* 241/*
243 * On 32bit this is mach-xxx local 242 * On 32bit this is mach-xxx local
244 */ 243 */
245#ifdef CONFIG_X86_64 244#ifdef CONFIG_X86_64
246extern void early_init_lapic_mapping(void);
247extern int apic_is_clustered_box(void); 245extern int apic_is_clustered_box(void);
248#else 246#else
249static inline int apic_is_clustered_box(void) 247static inline int apic_is_clustered_box(void)
@@ -252,16 +250,13 @@ static inline int apic_is_clustered_box(void)
252} 250}
253#endif 251#endif
254 252
255extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask); 253extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
256extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask);
257
258 254
259#else /* !CONFIG_X86_LOCAL_APIC */ 255#else /* !CONFIG_X86_LOCAL_APIC */
260static inline void lapic_shutdown(void) { } 256static inline void lapic_shutdown(void) { }
261#define local_apic_timer_c2_ok 1 257#define local_apic_timer_c2_ok 1
262static inline void init_apic_mappings(void) { } 258static inline void init_apic_mappings(void) { }
263static inline void disable_local_APIC(void) { } 259static inline void disable_local_APIC(void) { }
264static inline void apic_disable(void) { }
265# define setup_boot_APIC_clock x86_init_noop 260# define setup_boot_APIC_clock x86_init_noop
266# define setup_secondary_APIC_clock x86_init_noop 261# define setup_secondary_APIC_clock x86_init_noop
267#endif /* !CONFIG_X86_LOCAL_APIC */ 262#endif /* !CONFIG_X86_LOCAL_APIC */
@@ -307,8 +302,6 @@ struct apic {
307 302
308 void (*setup_apic_routing)(void); 303 void (*setup_apic_routing)(void);
309 int (*multi_timer_check)(int apic, int irq); 304 int (*multi_timer_check)(int apic, int irq);
310 int (*apicid_to_node)(int logical_apicid);
311 int (*cpu_to_logical_apicid)(int cpu);
312 int (*cpu_present_to_apicid)(int mps_cpu); 305 int (*cpu_present_to_apicid)(int mps_cpu);
313 void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap); 306 void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap);
314 void (*setup_portio_remap)(void); 307 void (*setup_portio_remap)(void);
@@ -356,6 +349,28 @@ struct apic {
356 void (*icr_write)(u32 low, u32 high); 349 void (*icr_write)(u32 low, u32 high);
357 void (*wait_icr_idle)(void); 350 void (*wait_icr_idle)(void);
358 u32 (*safe_wait_icr_idle)(void); 351 u32 (*safe_wait_icr_idle)(void);
352
353#ifdef CONFIG_X86_32
354 /*
355 * Called very early during boot from get_smp_config(). It should
356 * return the logical apicid. x86_[bios]_cpu_to_apicid is
357 * initialized before this function is called.
358 *
359 * If logical apicid can't be determined that early, the function
360 * may return BAD_APICID. Logical apicid will be configured after
361 * init_apic_ldr() while bringing up CPUs. Note that NUMA affinity
362 * won't be applied properly during early boot in this case.
363 */
364 int (*x86_32_early_logical_apicid)(int cpu);
365
366 /*
367 * Optional method called from setup_local_APIC() after logical
368 * apicid is guaranteed to be known to initialize apicid -> node
369 * mapping if NUMA initialization hasn't done so already. Don't
370 * add new users.
371 */
372 int (*x86_32_numa_cpu_node)(int cpu);
373#endif
359}; 374};
360 375
361/* 376/*
@@ -366,6 +381,26 @@ struct apic {
366extern struct apic *apic; 381extern struct apic *apic;
367 382
368/* 383/*
384 * APIC drivers are probed based on how they are listed in the .apicdrivers
385 * section. So the order is important and enforced by the ordering
386 * of different apic driver files in the Makefile.
387 *
388 * For the files having two apic drivers, we use apic_drivers()
389 * to enforce the order with in them.
390 */
391#define apic_driver(sym) \
392 static struct apic *__apicdrivers_##sym __used \
393 __aligned(sizeof(struct apic *)) \
394 __section(.apicdrivers) = { &sym }
395
396#define apic_drivers(sym1, sym2) \
397 static struct apic *__apicdrivers_##sym1##sym2[2] __used \
398 __aligned(sizeof(struct apic *)) \
399 __section(.apicdrivers) = { &sym1, &sym2 }
400
401extern struct apic *__apicdrivers[], *__apicdrivers_end[];
402
403/*
369 * APIC functionality to boot other CPUs - only used on SMP: 404 * APIC functionality to boot other CPUs - only used on SMP:
370 */ 405 */
371#ifdef CONFIG_SMP 406#ifdef CONFIG_SMP
@@ -443,15 +478,10 @@ static inline unsigned default_get_apic_id(unsigned long x)
443#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469 478#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469
444 479
445#ifdef CONFIG_X86_64 480#ifdef CONFIG_X86_64
446extern struct apic apic_flat;
447extern struct apic apic_physflat;
448extern struct apic apic_x2apic_cluster;
449extern struct apic apic_x2apic_phys;
450extern int default_acpi_madt_oem_check(char *, char *); 481extern int default_acpi_madt_oem_check(char *, char *);
451 482
452extern void apic_send_IPI_self(int vector); 483extern void apic_send_IPI_self(int vector);
453 484
454extern struct apic apic_x2apic_uv_x;
455DECLARE_PER_CPU(int, x2apic_extra_bits); 485DECLARE_PER_CPU(int, x2apic_extra_bits);
456 486
457extern int default_cpu_present_to_apicid(int mps_cpu); 487extern int default_cpu_present_to_apicid(int mps_cpu);
@@ -465,7 +495,7 @@ static inline void default_wait_for_init_deassert(atomic_t *deassert)
465 return; 495 return;
466} 496}
467 497
468extern void generic_bigsmp_probe(void); 498extern struct apic *generic_bigsmp_probe(void);
469 499
470 500
471#ifdef CONFIG_X86_LOCAL_APIC 501#ifdef CONFIG_X86_LOCAL_APIC
@@ -501,7 +531,10 @@ extern struct apic apic_noop;
501 531
502#ifdef CONFIG_X86_32 532#ifdef CONFIG_X86_32
503 533
504extern struct apic apic_default; 534static inline int noop_x86_32_early_logical_apicid(int cpu)
535{
536 return BAD_APICID;
537}
505 538
506/* 539/*
507 * Set up the logical destination ID. 540 * Set up the logical destination ID.
@@ -522,8 +555,6 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
522 return cpuid_apic >> index_msb; 555 return cpuid_apic >> index_msb;
523} 556}
524 557
525extern int default_apicid_to_node(int logical_apicid);
526
527#endif 558#endif
528 559
529static inline unsigned int 560static inline unsigned int
@@ -558,12 +589,6 @@ static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_ma
558 *retmap = *phys_map; 589 *retmap = *phys_map;
559} 590}
560 591
561/* Mapping from cpu number to logical apicid */
562static inline int default_cpu_to_logical_apicid(int cpu)
563{
564 return 1 << cpu;
565}
566
567static inline int __default_cpu_present_to_apicid(int mps_cpu) 592static inline int __default_cpu_present_to_apicid(int mps_cpu)
568{ 593{
569 if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) 594 if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
@@ -596,8 +621,4 @@ extern int default_check_phys_apicid_present(int phys_apicid);
596 621
597#endif /* CONFIG_X86_LOCAL_APIC */ 622#endif /* CONFIG_X86_LOCAL_APIC */
598 623
599#ifdef CONFIG_X86_32
600extern u8 cpu_2_logical_apicid[NR_CPUS];
601#endif
602
603#endif /* _ASM_X86_APIC_H */ 624#endif /* _ASM_X86_APIC_H */
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h
index 7fe3b3060f08..34595d5e1038 100644
--- a/arch/x86/include/asm/apicdef.h
+++ b/arch/x86/include/asm/apicdef.h
@@ -78,6 +78,7 @@
78#define APIC_DEST_LOGICAL 0x00800 78#define APIC_DEST_LOGICAL 0x00800
79#define APIC_DEST_PHYSICAL 0x00000 79#define APIC_DEST_PHYSICAL 0x00000
80#define APIC_DM_FIXED 0x00000 80#define APIC_DM_FIXED 0x00000
81#define APIC_DM_FIXED_MASK 0x00700
81#define APIC_DM_LOWEST 0x00100 82#define APIC_DM_LOWEST 0x00100
82#define APIC_DM_SMI 0x00200 83#define APIC_DM_SMI 0x00200
83#define APIC_DM_REMRD 0x00300 84#define APIC_DM_REMRD 0x00300
@@ -131,6 +132,7 @@
131#define APIC_EILVTn(n) (0x500 + 0x10 * n) 132#define APIC_EILVTn(n) (0x500 + 0x10 * n)
132#define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ 133#define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */
133#define APIC_EILVT_NR_AMD_10H 4 134#define APIC_EILVT_NR_AMD_10H 4
135#define APIC_EILVT_NR_MAX APIC_EILVT_NR_AMD_10H
134#define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) 136#define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF)
135#define APIC_EILVT_MSG_FIX 0x0 137#define APIC_EILVT_MSG_FIX 0x0
136#define APIC_EILVT_MSG_SMI 0x2 138#define APIC_EILVT_MSG_SMI 0x2
@@ -144,6 +146,7 @@
144 146
145#ifdef CONFIG_X86_32 147#ifdef CONFIG_X86_32
146# define MAX_IO_APICS 64 148# define MAX_IO_APICS 64
149# define MAX_LOCAL_APIC 256
147#else 150#else
148# define MAX_IO_APICS 128 151# define MAX_IO_APICS 128
149# define MAX_LOCAL_APIC 32768 152# define MAX_LOCAL_APIC 32768
@@ -424,4 +427,16 @@ struct local_apic {
424#else 427#else
425 #define BAD_APICID 0xFFFFu 428 #define BAD_APICID 0xFFFFu
426#endif 429#endif
430
431enum ioapic_irq_destination_types {
432 dest_Fixed = 0,
433 dest_LowestPrio = 1,
434 dest_SMI = 2,
435 dest__reserved_1 = 3,
436 dest_NMI = 4,
437 dest_INIT = 5,
438 dest__reserved_2 = 6,
439 dest_ExtINT = 7
440};
441
427#endif /* _ASM_X86_APICDEF_H */ 442#endif /* _ASM_X86_APICDEF_H */
diff --git a/arch/x86/include/asm/bios_ebda.h b/arch/x86/include/asm/bios_ebda.h
index 3c7521063d3f..aa6a3170ab5a 100644
--- a/arch/x86/include/asm/bios_ebda.h
+++ b/arch/x86/include/asm/bios_ebda.h
@@ -4,16 +4,40 @@
4#include <asm/io.h> 4#include <asm/io.h>
5 5
6/* 6/*
7 * there is a real-mode segmented pointer pointing to the 7 * Returns physical address of EBDA. Returns 0 if there is no EBDA.
8 * 4K EBDA area at 0x40E.
9 */ 8 */
10static inline unsigned int get_bios_ebda(void) 9static inline unsigned int get_bios_ebda(void)
11{ 10{
11 /*
12 * There is a real-mode segmented pointer pointing to the
13 * 4K EBDA area at 0x40E.
14 */
12 unsigned int address = *(unsigned short *)phys_to_virt(0x40E); 15 unsigned int address = *(unsigned short *)phys_to_virt(0x40E);
13 address <<= 4; 16 address <<= 4;
14 return address; /* 0 means none */ 17 return address; /* 0 means none */
15} 18}
16 19
20/*
21 * Return the sanitized length of the EBDA in bytes, if it exists.
22 */
23static inline unsigned int get_bios_ebda_length(void)
24{
25 unsigned int address;
26 unsigned int length;
27
28 address = get_bios_ebda();
29 if (!address)
30 return 0;
31
32 /* EBDA length is byte 0 of the EBDA (stored in KiB) */
33 length = *(unsigned char *)phys_to_virt(address);
34 length <<= 10;
35
36 /* Trim the length if it extends beyond 640KiB */
37 length = min_t(unsigned int, (640 * 1024) - address, length);
38 return length;
39}
40
17void reserve_ebda_region(void); 41void reserve_ebda_region(void);
18 42
19#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION 43#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index bafd80defa43..69d58131bc8e 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -440,6 +440,8 @@ static inline int fls(int x)
440 440
441#ifdef __KERNEL__ 441#ifdef __KERNEL__
442 442
443#include <asm-generic/bitops/find.h>
444
443#include <asm-generic/bitops/sched.h> 445#include <asm-generic/bitops/sched.h>
444 446
445#define ARCH_HAS_FAST_MULTIPLIER 1 447#define ARCH_HAS_FAST_MULTIPLIER 1
@@ -454,14 +456,12 @@ static inline int fls(int x)
454 456
455#ifdef __KERNEL__ 457#ifdef __KERNEL__
456 458
457#include <asm-generic/bitops/ext2-non-atomic.h> 459#include <asm-generic/bitops/le.h>
458 460
459#define ext2_set_bit_atomic(lock, nr, addr) \ 461#define ext2_set_bit_atomic(lock, nr, addr) \
460 test_and_set_bit((nr), (unsigned long *)(addr)) 462 test_and_set_bit((nr), (unsigned long *)(addr))
461#define ext2_clear_bit_atomic(lock, nr, addr) \ 463#define ext2_clear_bit_atomic(lock, nr, addr) \
462 test_and_clear_bit((nr), (unsigned long *)(addr)) 464 test_and_clear_bit((nr), (unsigned long *)(addr))
463 465
464#include <asm-generic/bitops/minix.h>
465
466#endif /* __KERNEL__ */ 466#endif /* __KERNEL__ */
467#endif /* _ASM_X86_BITOPS_H */ 467#endif /* _ASM_X86_BITOPS_H */
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 3b62ab56c7a0..5e1a2eef3e7c 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -32,11 +32,7 @@
32#define BOOT_HEAP_SIZE 0x400000 32#define BOOT_HEAP_SIZE 0x400000
33#else /* !CONFIG_KERNEL_BZIP2 */ 33#else /* !CONFIG_KERNEL_BZIP2 */
34 34
35#ifdef CONFIG_X86_64 35#define BOOT_HEAP_SIZE 0x8000
36#define BOOT_HEAP_SIZE 0x7000
37#else
38#define BOOT_HEAP_SIZE 0x4000
39#endif
40 36
41#endif /* !CONFIG_KERNEL_BZIP2 */ 37#endif /* !CONFIG_KERNEL_BZIP2 */
42 38
diff --git a/arch/x86/include/asm/bootparam.h b/arch/x86/include/asm/bootparam.h
index 8e6218550e77..e020d88ec02d 100644
--- a/arch/x86/include/asm/bootparam.h
+++ b/arch/x86/include/asm/bootparam.h
@@ -12,6 +12,7 @@
12/* setup data types */ 12/* setup data types */
13#define SETUP_NONE 0 13#define SETUP_NONE 0
14#define SETUP_E820_EXT 1 14#define SETUP_E820_EXT 1
15#define SETUP_DTB 2
15 16
16/* extensible setup data list node */ 17/* extensible setup data list node */
17struct setup_data { 18struct setup_data {
@@ -124,6 +125,7 @@ enum {
124 X86_SUBARCH_LGUEST, 125 X86_SUBARCH_LGUEST,
125 X86_SUBARCH_XEN, 126 X86_SUBARCH_XEN,
126 X86_SUBARCH_MRST, 127 X86_SUBARCH_MRST,
128 X86_SUBARCH_CE4100,
127 X86_NR_SUBARCHS, 129 X86_NR_SUBARCHS,
128}; 130};
129 131
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 63e35ec9075c..4e12668711e5 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -1,48 +1,8 @@
1#ifndef _ASM_X86_CACHEFLUSH_H 1#ifndef _ASM_X86_CACHEFLUSH_H
2#define _ASM_X86_CACHEFLUSH_H 2#define _ASM_X86_CACHEFLUSH_H
3 3
4/* Keep includes the same across arches. */
5#include <linux/mm.h>
6
7/* Caches aren't brain-dead on the intel. */ 4/* Caches aren't brain-dead on the intel. */
8static inline void flush_cache_all(void) { } 5#include <asm-generic/cacheflush.h>
9static inline void flush_cache_mm(struct mm_struct *mm) { }
10static inline void flush_cache_dup_mm(struct mm_struct *mm) { }
11static inline void flush_cache_range(struct vm_area_struct *vma,
12 unsigned long start, unsigned long end) { }
13static inline void flush_cache_page(struct vm_area_struct *vma,
14 unsigned long vmaddr, unsigned long pfn) { }
15#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
16static inline void flush_dcache_page(struct page *page) { }
17static inline void flush_dcache_mmap_lock(struct address_space *mapping) { }
18static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { }
19static inline void flush_icache_range(unsigned long start,
20 unsigned long end) { }
21static inline void flush_icache_page(struct vm_area_struct *vma,
22 struct page *page) { }
23static inline void flush_icache_user_range(struct vm_area_struct *vma,
24 struct page *page,
25 unsigned long addr,
26 unsigned long len) { }
27static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
28static inline void flush_cache_vunmap(unsigned long start,
29 unsigned long end) { }
30
31static inline void copy_to_user_page(struct vm_area_struct *vma,
32 struct page *page, unsigned long vaddr,
33 void *dst, const void *src,
34 unsigned long len)
35{
36 memcpy(dst, src, len);
37}
38
39static inline void copy_from_user_page(struct vm_area_struct *vma,
40 struct page *page, unsigned long vaddr,
41 void *dst, const void *src,
42 unsigned long len)
43{
44 memcpy(dst, src, len);
45}
46 6
47#ifdef CONFIG_X86_PAT 7#ifdef CONFIG_X86_PAT
48/* 8/*
@@ -111,7 +71,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype) { }
111 * Read/Write : ReadOnly, ReadWrite 71 * Read/Write : ReadOnly, ReadWrite
112 * Presence : NotPresent 72 * Presence : NotPresent
113 * 73 *
114 * Within a catagory, the attributes are mutually exclusive. 74 * Within a category, the attributes are mutually exclusive.
115 * 75 *
116 * The implementation of this API will take care of various aspects that 76 * The implementation of this API will take care of various aspects that
117 * are associated with changing such attributes, such as: 77 * are associated with changing such attributes, such as:
diff --git a/arch/x86/include/asm/calgary.h b/arch/x86/include/asm/calgary.h
index 0918654305af..0d467b338835 100644
--- a/arch/x86/include/asm/calgary.h
+++ b/arch/x86/include/asm/calgary.h
@@ -62,9 +62,9 @@ struct cal_chipset_ops {
62extern int use_calgary; 62extern int use_calgary;
63 63
64#ifdef CONFIG_CALGARY_IOMMU 64#ifdef CONFIG_CALGARY_IOMMU
65extern void detect_calgary(void); 65extern int detect_calgary(void);
66#else 66#else
67static inline void detect_calgary(void) { return; } 67static inline int detect_calgary(void) { return -ENODEV; }
68#endif 68#endif
69 69
70#endif /* _ASM_X86_CALGARY_H */ 70#endif /* _ASM_X86_CALGARY_H */
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index 0e63c9a2a8d0..30af5a832163 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -48,36 +48,38 @@ For 32-bit we have the following conventions - kernel is built with
48 48
49 49
50/* 50/*
51 * 64-bit system call stack frame layout defines and helpers, 51 * 64-bit system call stack frame layout defines and helpers, for
52 * for assembly code: 52 * assembly code (note that the seemingly unnecessary parentheses
53 * are to prevent cpp from inserting spaces in expressions that get
54 * passed to macros):
53 */ 55 */
54 56
55#define R15 0 57#define R15 (0)
56#define R14 8 58#define R14 (8)
57#define R13 16 59#define R13 (16)
58#define R12 24 60#define R12 (24)
59#define RBP 32 61#define RBP (32)
60#define RBX 40 62#define RBX (40)
61 63
62/* arguments: interrupts/non tracing syscalls only save up to here: */ 64/* arguments: interrupts/non tracing syscalls only save up to here: */
63#define R11 48 65#define R11 (48)
64#define R10 56 66#define R10 (56)
65#define R9 64 67#define R9 (64)
66#define R8 72 68#define R8 (72)
67#define RAX 80 69#define RAX (80)
68#define RCX 88 70#define RCX (88)
69#define RDX 96 71#define RDX (96)
70#define RSI 104 72#define RSI (104)
71#define RDI 112 73#define RDI (112)
72#define ORIG_RAX 120 /* + error_code */ 74#define ORIG_RAX (120) /* + error_code */
73/* end of arguments */ 75/* end of arguments */
74 76
75/* cpu exception frame or undefined in case of fast syscall: */ 77/* cpu exception frame or undefined in case of fast syscall: */
76#define RIP 128 78#define RIP (128)
77#define CS 136 79#define CS (136)
78#define EFLAGS 144 80#define EFLAGS (144)
79#define RSP 152 81#define RSP (152)
80#define SS 160 82#define SS (160)
81 83
82#define ARGOFFSET R11 84#define ARGOFFSET R11
83#define SWFRAME ORIG_RAX 85#define SWFRAME ORIG_RAX
@@ -111,7 +113,7 @@ For 32-bit we have the following conventions - kernel is built with
111 .endif 113 .endif
112 .endm 114 .endm
113 115
114#define ARG_SKIP 9*8 116#define ARG_SKIP (9*8)
115 117
116 .macro RESTORE_ARGS skiprax=0, addskip=0, skiprcx=0, skipr11=0, \ 118 .macro RESTORE_ARGS skiprax=0, addskip=0, skiprcx=0, skipr11=0, \
117 skipr8910=0, skiprdx=0 119 skipr8910=0, skiprdx=0
@@ -169,7 +171,7 @@ For 32-bit we have the following conventions - kernel is built with
169 .endif 171 .endif
170 .endm 172 .endm
171 173
172#define REST_SKIP 6*8 174#define REST_SKIP (6*8)
173 175
174 .macro SAVE_REST 176 .macro SAVE_REST
175 subq $REST_SKIP, %rsp 177 subq $REST_SKIP, %rsp
diff --git a/arch/x86/include/asm/ce4100.h b/arch/x86/include/asm/ce4100.h
new file mode 100644
index 000000000000..e656ad8c0a2e
--- /dev/null
+++ b/arch/x86/include/asm/ce4100.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_CE4100_H_
2#define _ASM_CE4100_H_
3
4int ce4100_pci_init(void);
5
6#endif
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index b185091bf19c..4564c8e28a33 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -32,6 +32,6 @@ extern void arch_unregister_cpu(int);
32 32
33DECLARE_PER_CPU(int, cpu_state); 33DECLARE_PER_CPU(int, cpu_state);
34 34
35extern unsigned int boot_cpu_id; 35int mwait_usable(const struct cpuinfo_x86 *);
36 36
37#endif /* _ASM_X86_CPU_H */ 37#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 3f76523589af..71cc3800712c 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -125,7 +125,7 @@
125#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */ 125#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
126#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */ 126#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
127#define X86_FEATURE_F16C (4*32+29) /* 16-bit fp conversions */ 127#define X86_FEATURE_F16C (4*32+29) /* 16-bit fp conversions */
128#define X86_FEATURE_RDRND (4*32+30) /* The RDRAND instruction */ 128#define X86_FEATURE_RDRAND (4*32+30) /* The RDRAND instruction */
129#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */ 129#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */
130 130
131/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ 131/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
@@ -152,10 +152,15 @@
152#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */ 152#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
153#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */ 153#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */
154#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */ 154#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */
155#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */ 155#define X86_FEATURE_XOP (6*32+11) /* extended AVX instructions */
156#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ 156#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
157#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ 157#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
158#define X86_FEATURE_LWP (6*32+15) /* Light Weight Profiling */
159#define X86_FEATURE_FMA4 (6*32+16) /* 4 operands MAC instructions */
158#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ 160#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
161#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
162#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
163#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */
159 164
160/* 165/*
161 * Auxiliary flags: Linux defined - For features scattered in various 166 * Auxiliary flags: Linux defined - For features scattered in various
@@ -180,9 +185,18 @@
180#define X86_FEATURE_LBRV (8*32+ 6) /* AMD LBR Virtualization support */ 185#define X86_FEATURE_LBRV (8*32+ 6) /* AMD LBR Virtualization support */
181#define X86_FEATURE_SVML (8*32+ 7) /* "svm_lock" AMD SVM locking MSR */ 186#define X86_FEATURE_SVML (8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
182#define X86_FEATURE_NRIPS (8*32+ 8) /* "nrip_save" AMD SVM next_rip save */ 187#define X86_FEATURE_NRIPS (8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
188#define X86_FEATURE_TSCRATEMSR (8*32+ 9) /* "tsc_scale" AMD TSC scaling support */
189#define X86_FEATURE_VMCBCLEAN (8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */
190#define X86_FEATURE_FLUSHBYASID (8*32+11) /* AMD flush-by-ASID support */
191#define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
192#define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
193#define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
194
183 195
184/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ 196/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
185#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ 197#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
198#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
199#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
186 200
187#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 201#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
188 202
@@ -195,8 +209,7 @@ extern const char * const x86_power_flags[32];
195#define test_cpu_cap(c, bit) \ 209#define test_cpu_cap(c, bit) \
196 test_bit(bit, (unsigned long *)((c)->x86_capability)) 210 test_bit(bit, (unsigned long *)((c)->x86_capability))
197 211
198#define cpu_has(c, bit) \ 212#define REQUIRED_MASK_BIT_SET(bit) \
199 (__builtin_constant_p(bit) && \
200 ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ 213 ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \
201 (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \ 214 (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \
202 (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \ 215 (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \
@@ -206,10 +219,16 @@ extern const char * const x86_power_flags[32];
206 (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ 219 (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
207 (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \ 220 (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \
208 (((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \ 221 (((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \
209 (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) ) \ 222 (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) )
210 ? 1 : \ 223
224#define cpu_has(c, bit) \
225 (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
211 test_cpu_cap(c, bit)) 226 test_cpu_cap(c, bit))
212 227
228#define this_cpu_has(bit) \
229 (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
230 x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability))
231
213#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) 232#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
214 233
215#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) 234#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability))
@@ -268,6 +287,7 @@ extern const char * const x86_power_flags[32];
268#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) 287#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
269#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) 288#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
270#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) 289#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
290#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
271 291
272#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) 292#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
273# define cpu_has_invlpg 1 293# define cpu_has_invlpg 1
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index b81002f23614..078ad0caefc6 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -94,7 +94,7 @@ static inline void hw_breakpoint_disable(void)
94 94
95static inline int hw_breakpoint_active(void) 95static inline int hw_breakpoint_active(void)
96{ 96{
97 return __get_cpu_var(cpu_dr7) & DR_GLOBAL_ENABLE_MASK; 97 return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
98} 98}
99 99
100extern void aout_dump_debugregs(struct user *dump); 100extern void aout_dump_debugregs(struct user *dump);
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 617bd56b3070..7b439d9aea2a 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -4,30 +4,33 @@
4#include <asm/desc_defs.h> 4#include <asm/desc_defs.h>
5#include <asm/ldt.h> 5#include <asm/ldt.h>
6#include <asm/mmu.h> 6#include <asm/mmu.h>
7
7#include <linux/smp.h> 8#include <linux/smp.h>
8 9
9static inline void fill_ldt(struct desc_struct *desc, 10static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info)
10 const struct user_desc *info) 11{
11{ 12 desc->limit0 = info->limit & 0x0ffff;
12 desc->limit0 = info->limit & 0x0ffff; 13
13 desc->base0 = info->base_addr & 0x0000ffff; 14 desc->base0 = (info->base_addr & 0x0000ffff);
14 15 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
15 desc->base1 = (info->base_addr & 0x00ff0000) >> 16; 16
16 desc->type = (info->read_exec_only ^ 1) << 1; 17 desc->type = (info->read_exec_only ^ 1) << 1;
17 desc->type |= info->contents << 2; 18 desc->type |= info->contents << 2;
18 desc->s = 1; 19
19 desc->dpl = 0x3; 20 desc->s = 1;
20 desc->p = info->seg_not_present ^ 1; 21 desc->dpl = 0x3;
21 desc->limit = (info->limit & 0xf0000) >> 16; 22 desc->p = info->seg_not_present ^ 1;
22 desc->avl = info->useable; 23 desc->limit = (info->limit & 0xf0000) >> 16;
23 desc->d = info->seg_32bit; 24 desc->avl = info->useable;
24 desc->g = info->limit_in_pages; 25 desc->d = info->seg_32bit;
25 desc->base2 = (info->base_addr & 0xff000000) >> 24; 26 desc->g = info->limit_in_pages;
27
28 desc->base2 = (info->base_addr & 0xff000000) >> 24;
26 /* 29 /*
27 * Don't allow setting of the lm bit. It is useless anyway 30 * Don't allow setting of the lm bit. It is useless anyway
28 * because 64bit system calls require __USER_CS: 31 * because 64bit system calls require __USER_CS:
29 */ 32 */
30 desc->l = 0; 33 desc->l = 0;
31} 34}
32 35
33extern struct desc_ptr idt_descr; 36extern struct desc_ptr idt_descr;
@@ -36,6 +39,7 @@ extern gate_desc idt_table[];
36struct gdt_page { 39struct gdt_page {
37 struct desc_struct gdt[GDT_ENTRIES]; 40 struct desc_struct gdt[GDT_ENTRIES];
38} __attribute__((aligned(PAGE_SIZE))); 41} __attribute__((aligned(PAGE_SIZE)));
42
39DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page); 43DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
40 44
41static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) 45static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
@@ -48,16 +52,16 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
48static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func, 52static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
49 unsigned dpl, unsigned ist, unsigned seg) 53 unsigned dpl, unsigned ist, unsigned seg)
50{ 54{
51 gate->offset_low = PTR_LOW(func); 55 gate->offset_low = PTR_LOW(func);
52 gate->segment = __KERNEL_CS; 56 gate->segment = __KERNEL_CS;
53 gate->ist = ist; 57 gate->ist = ist;
54 gate->p = 1; 58 gate->p = 1;
55 gate->dpl = dpl; 59 gate->dpl = dpl;
56 gate->zero0 = 0; 60 gate->zero0 = 0;
57 gate->zero1 = 0; 61 gate->zero1 = 0;
58 gate->type = type; 62 gate->type = type;
59 gate->offset_middle = PTR_MIDDLE(func); 63 gate->offset_middle = PTR_MIDDLE(func);
60 gate->offset_high = PTR_HIGH(func); 64 gate->offset_high = PTR_HIGH(func);
61} 65}
62 66
63#else 67#else
@@ -66,8 +70,7 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
66 unsigned short seg) 70 unsigned short seg)
67{ 71{
68 gate->a = (seg << 16) | (base & 0xffff); 72 gate->a = (seg << 16) | (base & 0xffff);
69 gate->b = (base & 0xffff0000) | 73 gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
70 (((0x80 | type | (dpl << 5)) & 0xff) << 8);
71} 74}
72 75
73#endif 76#endif
@@ -75,31 +78,29 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
75static inline int desc_empty(const void *ptr) 78static inline int desc_empty(const void *ptr)
76{ 79{
77 const u32 *desc = ptr; 80 const u32 *desc = ptr;
81
78 return !(desc[0] | desc[1]); 82 return !(desc[0] | desc[1]);
79} 83}
80 84
81#ifdef CONFIG_PARAVIRT 85#ifdef CONFIG_PARAVIRT
82#include <asm/paravirt.h> 86#include <asm/paravirt.h>
83#else 87#else
84#define load_TR_desc() native_load_tr_desc() 88#define load_TR_desc() native_load_tr_desc()
85#define load_gdt(dtr) native_load_gdt(dtr) 89#define load_gdt(dtr) native_load_gdt(dtr)
86#define load_idt(dtr) native_load_idt(dtr) 90#define load_idt(dtr) native_load_idt(dtr)
87#define load_tr(tr) asm volatile("ltr %0"::"m" (tr)) 91#define load_tr(tr) asm volatile("ltr %0"::"m" (tr))
88#define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt)) 92#define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt))
89 93
90#define store_gdt(dtr) native_store_gdt(dtr) 94#define store_gdt(dtr) native_store_gdt(dtr)
91#define store_idt(dtr) native_store_idt(dtr) 95#define store_idt(dtr) native_store_idt(dtr)
92#define store_tr(tr) (tr = native_store_tr()) 96#define store_tr(tr) (tr = native_store_tr())
93 97
94#define load_TLS(t, cpu) native_load_tls(t, cpu) 98#define load_TLS(t, cpu) native_load_tls(t, cpu)
95#define set_ldt native_set_ldt 99#define set_ldt native_set_ldt
96 100
97#define write_ldt_entry(dt, entry, desc) \ 101#define write_ldt_entry(dt, entry, desc) native_write_ldt_entry(dt, entry, desc)
98 native_write_ldt_entry(dt, entry, desc) 102#define write_gdt_entry(dt, entry, desc, type) native_write_gdt_entry(dt, entry, desc, type)
99#define write_gdt_entry(dt, entry, desc, type) \ 103#define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g)
100 native_write_gdt_entry(dt, entry, desc, type)
101#define write_idt_entry(dt, entry, g) \
102 native_write_idt_entry(dt, entry, g)
103 104
104static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries) 105static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
105{ 106{
@@ -112,33 +113,27 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
112 113
113#define store_ldt(ldt) asm("sldt %0" : "=m"(ldt)) 114#define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))
114 115
115static inline void native_write_idt_entry(gate_desc *idt, int entry, 116static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
116 const gate_desc *gate)
117{ 117{
118 memcpy(&idt[entry], gate, sizeof(*gate)); 118 memcpy(&idt[entry], gate, sizeof(*gate));
119} 119}
120 120
121static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, 121static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
122 const void *desc)
123{ 122{
124 memcpy(&ldt[entry], desc, 8); 123 memcpy(&ldt[entry], desc, 8);
125} 124}
126 125
127static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry, 126static inline void
128 const void *desc, int type) 127native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type)
129{ 128{
130 unsigned int size; 129 unsigned int size;
130
131 switch (type) { 131 switch (type) {
132 case DESC_TSS: 132 case DESC_TSS: size = sizeof(tss_desc); break;
133 size = sizeof(tss_desc); 133 case DESC_LDT: size = sizeof(ldt_desc); break;
134 break; 134 default: size = sizeof(*gdt); break;
135 case DESC_LDT:
136 size = sizeof(ldt_desc);
137 break;
138 default:
139 size = sizeof(struct desc_struct);
140 break;
141 } 135 }
136
142 memcpy(&gdt[entry], desc, size); 137 memcpy(&gdt[entry], desc, size);
143} 138}
144 139
@@ -154,20 +149,21 @@ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
154} 149}
155 150
156 151
157static inline void set_tssldt_descriptor(void *d, unsigned long addr, 152static inline void set_tssldt_descriptor(void *d, unsigned long addr, unsigned type, unsigned size)
158 unsigned type, unsigned size)
159{ 153{
160#ifdef CONFIG_X86_64 154#ifdef CONFIG_X86_64
161 struct ldttss_desc64 *desc = d; 155 struct ldttss_desc64 *desc = d;
156
162 memset(desc, 0, sizeof(*desc)); 157 memset(desc, 0, sizeof(*desc));
163 desc->limit0 = size & 0xFFFF; 158
164 desc->base0 = PTR_LOW(addr); 159 desc->limit0 = size & 0xFFFF;
165 desc->base1 = PTR_MIDDLE(addr) & 0xFF; 160 desc->base0 = PTR_LOW(addr);
166 desc->type = type; 161 desc->base1 = PTR_MIDDLE(addr) & 0xFF;
167 desc->p = 1; 162 desc->type = type;
168 desc->limit1 = (size >> 16) & 0xF; 163 desc->p = 1;
169 desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF; 164 desc->limit1 = (size >> 16) & 0xF;
170 desc->base3 = PTR_HIGH(addr); 165 desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF;
166 desc->base3 = PTR_HIGH(addr);
171#else 167#else
172 pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0); 168 pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0);
173#endif 169#endif
@@ -237,14 +233,16 @@ static inline void native_store_idt(struct desc_ptr *dtr)
237static inline unsigned long native_store_tr(void) 233static inline unsigned long native_store_tr(void)
238{ 234{
239 unsigned long tr; 235 unsigned long tr;
236
240 asm volatile("str %0":"=r" (tr)); 237 asm volatile("str %0":"=r" (tr));
238
241 return tr; 239 return tr;
242} 240}
243 241
244static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) 242static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
245{ 243{
246 unsigned int i;
247 struct desc_struct *gdt = get_cpu_gdt_table(cpu); 244 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
245 unsigned int i;
248 246
249 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) 247 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
250 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; 248 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
@@ -313,6 +311,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
313 unsigned dpl, unsigned ist, unsigned seg) 311 unsigned dpl, unsigned ist, unsigned seg)
314{ 312{
315 gate_desc s; 313 gate_desc s;
314
316 pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg); 315 pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
317 /* 316 /*
318 * does not need to be atomic because it is only done once at 317 * does not need to be atomic because it is only done once at
@@ -343,8 +342,9 @@ static inline void alloc_system_vector(int vector)
343 set_bit(vector, used_vectors); 342 set_bit(vector, used_vectors);
344 if (first_system_vector > vector) 343 if (first_system_vector > vector)
345 first_system_vector = vector; 344 first_system_vector = vector;
346 } else 345 } else {
347 BUG(); 346 BUG();
347 }
348} 348}
349 349
350static inline void alloc_intr_gate(unsigned int n, void *addr) 350static inline void alloc_intr_gate(unsigned int n, void *addr)
diff --git a/arch/x86/include/asm/dma.h b/arch/x86/include/asm/dma.h
index ca1098a7e580..0bdb0c54d9a1 100644
--- a/arch/x86/include/asm/dma.h
+++ b/arch/x86/include/asm/dma.h
@@ -10,7 +10,6 @@
10 10
11#include <linux/spinlock.h> /* And spinlocks */ 11#include <linux/spinlock.h> /* And spinlocks */
12#include <asm/io.h> /* need byte IO */ 12#include <asm/io.h> /* need byte IO */
13#include <linux/delay.h>
14 13
15#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER 14#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
16#define dma_outb outb_p 15#define dma_outb outb_p
@@ -70,22 +69,18 @@
70 69
71#define MAX_DMA_CHANNELS 8 70#define MAX_DMA_CHANNELS 8
72 71
73#ifdef CONFIG_X86_32
74
75/* The maximum address that we can perform a DMA transfer to on this platform */
76#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x1000000)
77
78#else
79
80/* 16MB ISA DMA zone */ 72/* 16MB ISA DMA zone */
81#define MAX_DMA_PFN ((16 * 1024 * 1024) >> PAGE_SHIFT) 73#define MAX_DMA_PFN ((16 * 1024 * 1024) >> PAGE_SHIFT)
82 74
83/* 4GB broken PCI/AGP hardware bus master zone */ 75/* 4GB broken PCI/AGP hardware bus master zone */
84#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT) 76#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
85 77
78#ifdef CONFIG_X86_32
79/* The maximum address that we can perform a DMA transfer to on this platform */
80#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x1000000)
81#else
86/* Compat define for old dma zone */ 82/* Compat define for old dma zone */
87#define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT)) 83#define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT))
88
89#endif 84#endif
90 85
91/* 8237 DMA controllers */ 86/* 8237 DMA controllers */
@@ -151,6 +146,7 @@
151#define DMA_AUTOINIT 0x10 146#define DMA_AUTOINIT 0x10
152 147
153 148
149#ifdef CONFIG_ISA_DMA_API
154extern spinlock_t dma_spin_lock; 150extern spinlock_t dma_spin_lock;
155 151
156static inline unsigned long claim_dma_lock(void) 152static inline unsigned long claim_dma_lock(void)
@@ -164,6 +160,7 @@ static inline void release_dma_lock(unsigned long flags)
164{ 160{
165 spin_unlock_irqrestore(&dma_spin_lock, flags); 161 spin_unlock_irqrestore(&dma_spin_lock, flags);
166} 162}
163#endif /* CONFIG_ISA_DMA_API */
167 164
168/* enable/disable a specific DMA channel */ 165/* enable/disable a specific DMA channel */
169static inline void enable_dma(unsigned int dmanr) 166static inline void enable_dma(unsigned int dmanr)
@@ -303,9 +300,11 @@ static inline int get_dma_residue(unsigned int dmanr)
303} 300}
304 301
305 302
306/* These are in kernel/dma.c: */ 303/* These are in kernel/dma.c because x86 uses CONFIG_GENERIC_ISA_DMA */
304#ifdef CONFIG_ISA_DMA_API
307extern int request_dma(unsigned int dmanr, const char *device_id); 305extern int request_dma(unsigned int dmanr, const char *device_id);
308extern void free_dma(unsigned int dmanr); 306extern void free_dma(unsigned int dmanr);
307#endif
309 308
310/* From PCI */ 309/* From PCI */
311 310
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
index 733f7e91e7a9..326099199318 100644
--- a/arch/x86/include/asm/dwarf2.h
+++ b/arch/x86/include/asm/dwarf2.h
@@ -89,6 +89,16 @@
89 CFI_ADJUST_CFA_OFFSET -8 89 CFI_ADJUST_CFA_OFFSET -8
90 .endm 90 .endm
91 91
92 .macro pushfq_cfi
93 pushfq
94 CFI_ADJUST_CFA_OFFSET 8
95 .endm
96
97 .macro popfq_cfi
98 popfq
99 CFI_ADJUST_CFA_OFFSET -8
100 .endm
101
92 .macro movq_cfi reg offset=0 102 .macro movq_cfi reg offset=0
93 movq %\reg, \offset(%rsp) 103 movq %\reg, \offset(%rsp)
94 CFI_REL_OFFSET \reg, \offset 104 CFI_REL_OFFSET \reg, \offset
@@ -109,6 +119,16 @@
109 CFI_ADJUST_CFA_OFFSET -4 119 CFI_ADJUST_CFA_OFFSET -4
110 .endm 120 .endm
111 121
122 .macro pushfl_cfi
123 pushfl
124 CFI_ADJUST_CFA_OFFSET 4
125 .endm
126
127 .macro popfl_cfi
128 popfl
129 CFI_ADJUST_CFA_OFFSET -4
130 .endm
131
112 .macro movl_cfi reg offset=0 132 .macro movl_cfi reg offset=0
113 movl %\reg, \offset(%esp) 133 movl %\reg, \offset(%esp)
114 CFI_REL_OFFSET \reg, \offset 134 CFI_REL_OFFSET \reg, \offset
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index ec8a52d14ab1..908b96957d88 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -72,6 +72,9 @@ struct e820map {
72#define BIOS_BEGIN 0x000a0000 72#define BIOS_BEGIN 0x000a0000
73#define BIOS_END 0x00100000 73#define BIOS_END 0x00100000
74 74
75#define BIOS_ROM_BASE 0xffe00000
76#define BIOS_ROM_END 0xffffffff
77
75#ifdef __KERNEL__ 78#ifdef __KERNEL__
76/* see comment in arch/x86/kernel/e820.c */ 79/* see comment in arch/x86/kernel/e820.c */
77extern struct e820map e820; 80extern struct e820map e820;
@@ -93,7 +96,7 @@ extern void e820_setup_gap(void);
93extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize, 96extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
94 unsigned long start_addr, unsigned long long end_addr); 97 unsigned long start_addr, unsigned long long end_addr);
95struct setup_data; 98struct setup_data;
96extern void parse_e820_ext(struct setup_data *data, unsigned long pa_data); 99extern void parse_e820_ext(struct setup_data *data);
97 100
98#if defined(CONFIG_X86_64) || \ 101#if defined(CONFIG_X86_64) || \
99 (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION)) 102 (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
@@ -112,23 +115,13 @@ static inline void early_memtest(unsigned long start, unsigned long end)
112} 115}
113#endif 116#endif
114 117
115extern unsigned long end_user_pfn;
116
117extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align);
118extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align);
119extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
120#include <linux/early_res.h>
121
122extern unsigned long e820_end_of_ram_pfn(void); 118extern unsigned long e820_end_of_ram_pfn(void);
123extern unsigned long e820_end_of_low_ram_pfn(void); 119extern unsigned long e820_end_of_low_ram_pfn(void);
124extern int e820_find_active_region(const struct e820entry *ei, 120extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
125 unsigned long start_pfn, 121
126 unsigned long last_pfn, 122void memblock_x86_fill(void);
127 unsigned long *ei_startpfn, 123void memblock_find_dma_reserve(void);
128 unsigned long *ei_endpfn); 124
129extern void e820_register_active_regions(int nid, unsigned long start_pfn,
130 unsigned long end_pfn);
131extern u64 e820_hole_size(u64 start, u64 end);
132extern void finish_e820_parsing(void); 125extern void finish_e820_parsing(void);
133extern void e820_reserve_resources(void); 126extern void e820_reserve_resources(void);
134extern void e820_reserve_resources_late(void); 127extern void e820_reserve_resources_late(void);
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 8406ed7f9926..7093e4a6a0bc 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -90,7 +90,8 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
90#endif /* CONFIG_X86_32 */ 90#endif /* CONFIG_X86_32 */
91 91
92extern int add_efi_memmap; 92extern int add_efi_memmap;
93extern void efi_reserve_early(void); 93extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
94extern void efi_memblock_x86_reserve_range(void);
94extern void efi_call_phys_prelog(void); 95extern void efi_call_phys_prelog(void);
95extern void efi_call_phys_epilog(void); 96extern void efi_call_phys_epilog(void);
96 97
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index 5d07dea2ebb8..3b0d7ef959b8 100644
--- a/arch/x86/include/asm/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -17,22 +17,14 @@ BUILD_INTERRUPT(pull_timers_interrupt,PULL_TIMERS_VECTOR)
17BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) 17BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
18BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR) 18BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR)
19 19
20BUILD_INTERRUPT3(invalidate_interrupt0,INVALIDATE_TLB_VECTOR_START+0, 20.irp idx,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
21 smp_invalidate_interrupt) 21 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
22BUILD_INTERRUPT3(invalidate_interrupt1,INVALIDATE_TLB_VECTOR_START+1, 22.if NUM_INVALIDATE_TLB_VECTORS > \idx
23 smp_invalidate_interrupt) 23BUILD_INTERRUPT3(invalidate_interrupt\idx,
24BUILD_INTERRUPT3(invalidate_interrupt2,INVALIDATE_TLB_VECTOR_START+2, 24 (INVALIDATE_TLB_VECTOR_START)+\idx,
25 smp_invalidate_interrupt) 25 smp_invalidate_interrupt)
26BUILD_INTERRUPT3(invalidate_interrupt3,INVALIDATE_TLB_VECTOR_START+3, 26.endif
27 smp_invalidate_interrupt) 27.endr
28BUILD_INTERRUPT3(invalidate_interrupt4,INVALIDATE_TLB_VECTOR_START+4,
29 smp_invalidate_interrupt)
30BUILD_INTERRUPT3(invalidate_interrupt5,INVALIDATE_TLB_VECTOR_START+5,
31 smp_invalidate_interrupt)
32BUILD_INTERRUPT3(invalidate_interrupt6,INVALIDATE_TLB_VECTOR_START+6,
33 smp_invalidate_interrupt)
34BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7,
35 smp_invalidate_interrupt)
36#endif 28#endif
37 29
38BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR) 30BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR)
@@ -50,8 +42,8 @@ BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
50BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) 42BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
51BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) 43BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
52 44
53#ifdef CONFIG_PERF_EVENTS 45#ifdef CONFIG_IRQ_WORK
54BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR) 46BUILD_INTERRUPT(irq_work_interrupt, IRQ_WORK_VECTOR)
55#endif 47#endif
56 48
57#ifdef CONFIG_X86_THERMAL_VECTOR 49#ifdef CONFIG_X86_THERMAL_VECTOR
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index d07b44f7d1dc..4729b2b63117 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -116,7 +116,11 @@ enum fixed_addresses {
116#endif 116#endif
117 FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */ 117 FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */
118 FIX_TEXT_POKE0, /* first page is last, because allocation is backward */ 118 FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
119#ifdef CONFIG_X86_MRST
120 FIX_LNW_VRTC,
121#endif
119 __end_of_permanent_fixed_addresses, 122 __end_of_permanent_fixed_addresses,
123
120 /* 124 /*
121 * 256 temporary boot-time mappings, used by early_ioremap(), 125 * 256 temporary boot-time mappings, used by early_ioremap(),
122 * before ioremap() is functional. 126 * before ioremap() is functional.
@@ -214,5 +218,20 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
214 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 218 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
215 return __virt_to_fix(vaddr); 219 return __virt_to_fix(vaddr);
216} 220}
221
222/* Return an pointer with offset calculated */
223static __always_inline unsigned long
224__set_fixmap_offset(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
225{
226 __set_fixmap(idx, phys, flags);
227 return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1));
228}
229
230#define set_fixmap_offset(idx, phys) \
231 __set_fixmap_offset(idx, phys, PAGE_KERNEL)
232
233#define set_fixmap_offset_nocache(idx, phys) \
234 __set_fixmap_offset(idx, phys, PAGE_KERNEL_NOCACHE)
235
217#endif /* !__ASSEMBLY__ */ 236#endif /* !__ASSEMBLY__ */
218#endif /* _ASM_X86_FIXMAP_H */ 237#endif /* _ASM_X86_FIXMAP_H */
diff --git a/arch/x86/include/asm/frame.h b/arch/x86/include/asm/frame.h
index 06850a7194e1..2c6fc9e62812 100644
--- a/arch/x86/include/asm/frame.h
+++ b/arch/x86/include/asm/frame.h
@@ -7,14 +7,12 @@
7 frame pointer later */ 7 frame pointer later */
8#ifdef CONFIG_FRAME_POINTER 8#ifdef CONFIG_FRAME_POINTER
9 .macro FRAME 9 .macro FRAME
10 pushl %ebp 10 pushl_cfi %ebp
11 CFI_ADJUST_CFA_OFFSET 4
12 CFI_REL_OFFSET ebp,0 11 CFI_REL_OFFSET ebp,0
13 movl %esp,%ebp 12 movl %esp,%ebp
14 .endm 13 .endm
15 .macro ENDFRAME 14 .macro ENDFRAME
16 popl %ebp 15 popl_cfi %ebp
17 CFI_ADJUST_CFA_OFFSET -4
18 CFI_RESTORE ebp 16 CFI_RESTORE ebp
19 .endm 17 .endm
20#else 18#else
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index db24c2278be0..268c783ab1c0 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -38,11 +38,10 @@ extern void mcount(void);
38static inline unsigned long ftrace_call_adjust(unsigned long addr) 38static inline unsigned long ftrace_call_adjust(unsigned long addr)
39{ 39{
40 /* 40 /*
41 * call mcount is "e8 <4 byte offset>" 41 * addr is the address of the mcount call instruction.
42 * The addr points to the 4 byte offset and the caller of this 42 * recordmcount does the necessary offset calculation.
43 * function wants the pointer to e8. Simply subtract one.
44 */ 43 */
45 return addr - 1; 44 return addr;
46} 45}
47 46
48#ifdef CONFIG_DYNAMIC_FTRACE 47#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index 1f11ce44e956..d09bb03653f0 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -37,7 +37,7 @@
37 "+m" (*uaddr), "=&r" (tem) \ 37 "+m" (*uaddr), "=&r" (tem) \
38 : "r" (oparg), "i" (-EFAULT), "1" (0)) 38 : "r" (oparg), "i" (-EFAULT), "1" (0))
39 39
40static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) 40static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
41{ 41{
42 int op = (encoded_op >> 28) & 7; 42 int op = (encoded_op >> 28) & 7;
43 int cmp = (encoded_op >> 24) & 15; 43 int cmp = (encoded_op >> 24) & 15;
@@ -48,7 +48,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
48 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 48 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
49 oparg = 1 << oparg; 49 oparg = 1 << oparg;
50 50
51 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) 51 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
52 return -EFAULT; 52 return -EFAULT;
53 53
54#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) 54#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
@@ -109,9 +109,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
109 return ret; 109 return ret;
110} 110}
111 111
112static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, 112static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
113 int newval) 113 u32 oldval, u32 newval)
114{ 114{
115 int ret = 0;
115 116
116#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) 117#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
117 /* Real i386 machines have no cmpxchg instruction */ 118 /* Real i386 machines have no cmpxchg instruction */
@@ -119,21 +120,22 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
119 return -ENOSYS; 120 return -ENOSYS;
120#endif 121#endif
121 122
122 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) 123 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
123 return -EFAULT; 124 return -EFAULT;
124 125
125 asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n" 126 asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
126 "2:\t.section .fixup, \"ax\"\n" 127 "2:\t.section .fixup, \"ax\"\n"
127 "3:\tmov %2, %0\n" 128 "3:\tmov %3, %0\n"
128 "\tjmp 2b\n" 129 "\tjmp 2b\n"
129 "\t.previous\n" 130 "\t.previous\n"
130 _ASM_EXTABLE(1b, 3b) 131 _ASM_EXTABLE(1b, 3b)
131 : "=a" (oldval), "+m" (*uaddr) 132 : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
132 : "i" (-EFAULT), "r" (newval), "0" (oldval) 133 : "i" (-EFAULT), "r" (newval), "1" (oldval)
133 : "memory" 134 : "memory"
134 ); 135 );
135 136
136 return oldval; 137 *uval = oldval;
138 return ret;
137} 139}
138 140
139#endif 141#endif
diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h
index 4ac5b0f33fc1..156cd5d18d2a 100644
--- a/arch/x86/include/asm/gart.h
+++ b/arch/x86/include/asm/gart.h
@@ -17,6 +17,7 @@ extern int fix_aperture;
17#define GARTEN (1<<0) 17#define GARTEN (1<<0)
18#define DISGARTCPU (1<<4) 18#define DISGARTCPU (1<<4)
19#define DISGARTIO (1<<5) 19#define DISGARTIO (1<<5)
20#define DISTLBWALKPRB (1<<6)
20 21
21/* GART cache control register bits. */ 22/* GART cache control register bits. */
22#define INVGART (1<<0) 23#define INVGART (1<<0)
@@ -27,7 +28,6 @@ extern int fix_aperture;
27#define AMD64_GARTAPERTUREBASE 0x94 28#define AMD64_GARTAPERTUREBASE 0x94
28#define AMD64_GARTTABLEBASE 0x98 29#define AMD64_GARTTABLEBASE 0x98
29#define AMD64_GARTCACHECTL 0x9c 30#define AMD64_GARTCACHECTL 0x9c
30#define AMD64_GARTEN (1<<0)
31 31
32#ifdef CONFIG_GART_IOMMU 32#ifdef CONFIG_GART_IOMMU
33extern int gart_iommu_aperture; 33extern int gart_iommu_aperture;
@@ -37,7 +37,7 @@ extern int gart_iommu_aperture_disabled;
37extern void early_gart_iommu_check(void); 37extern void early_gart_iommu_check(void);
38extern int gart_iommu_init(void); 38extern int gart_iommu_init(void);
39extern void __init gart_parse_options(char *); 39extern void __init gart_parse_options(char *);
40extern void gart_iommu_hole_init(void); 40extern int gart_iommu_hole_init(void);
41 41
42#else 42#else
43#define gart_iommu_aperture 0 43#define gart_iommu_aperture 0
@@ -50,28 +50,42 @@ static inline void early_gart_iommu_check(void)
50static inline void gart_parse_options(char *options) 50static inline void gart_parse_options(char *options)
51{ 51{
52} 52}
53static inline void gart_iommu_hole_init(void) 53static inline int gart_iommu_hole_init(void)
54{ 54{
55 return -ENODEV;
55} 56}
56#endif 57#endif
57 58
58extern int agp_amd64_init(void); 59extern int agp_amd64_init(void);
59 60
61static inline void gart_set_size_and_enable(struct pci_dev *dev, u32 order)
62{
63 u32 ctl;
64
65 /*
66 * Don't enable translation but enable GART IO and CPU accesses.
67 * Also, set DISTLBWALKPRB since GART tables memory is UC.
68 */
69 ctl = order << 1;
70
71 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
72}
73
60static inline void enable_gart_translation(struct pci_dev *dev, u64 addr) 74static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
61{ 75{
62 u32 tmp, ctl; 76 u32 tmp, ctl;
63 77
64 /* address of the mappings table */ 78 /* address of the mappings table */
65 addr >>= 12; 79 addr >>= 12;
66 tmp = (u32) addr<<4; 80 tmp = (u32) addr<<4;
67 tmp &= ~0xf; 81 tmp &= ~0xf;
68 pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp); 82 pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp);
69 83
70 /* Enable GART translation for this hammer. */ 84 /* Enable GART translation for this hammer. */
71 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); 85 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
72 ctl |= GARTEN; 86 ctl |= GARTEN | DISTLBWALKPRB;
73 ctl &= ~(DISGARTCPU | DISGARTIO); 87 ctl &= ~(DISGARTCPU | DISGARTIO);
74 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); 88 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
75} 89}
76 90
77static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size) 91static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
diff --git a/arch/x86/include/asm/gpio.h b/arch/x86/include/asm/gpio.h
index 49dbfdfa50f9..91d915a65259 100644
--- a/arch/x86/include/asm/gpio.h
+++ b/arch/x86/include/asm/gpio.h
@@ -38,12 +38,9 @@ static inline int gpio_cansleep(unsigned int gpio)
38 return __gpio_cansleep(gpio); 38 return __gpio_cansleep(gpio);
39} 39}
40 40
41/*
42 * Not implemented, yet.
43 */
44static inline int gpio_to_irq(unsigned int gpio) 41static inline int gpio_to_irq(unsigned int gpio)
45{ 42{
46 return -ENOSYS; 43 return __gpio_to_irq(gpio);
47} 44}
48 45
49static inline int irq_to_gpio(unsigned int irq) 46static inline int irq_to_gpio(unsigned int irq)
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index aeab29aee617..55e4de613f0e 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -14,7 +14,7 @@ typedef struct {
14#endif 14#endif
15 unsigned int x86_platform_ipis; /* arch dependent */ 15 unsigned int x86_platform_ipis; /* arch dependent */
16 unsigned int apic_perf_irqs; 16 unsigned int apic_perf_irqs;
17 unsigned int apic_pending_irqs; 17 unsigned int apic_irq_work_irqs;
18#ifdef CONFIG_SMP 18#ifdef CONFIG_SMP
19 unsigned int irq_resched_count; 19 unsigned int irq_resched_count;
20 unsigned int irq_call_count; 20 unsigned int irq_call_count;
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index 8caac76ac324..3bd04022fd0c 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -59,11 +59,12 @@ extern void kunmap_high(struct page *page);
59 59
60void *kmap(struct page *page); 60void *kmap(struct page *page);
61void kunmap(struct page *page); 61void kunmap(struct page *page);
62void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); 62
63void *kmap_atomic(struct page *page, enum km_type type); 63void *kmap_atomic_prot(struct page *page, pgprot_t prot);
64void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); 64void *__kmap_atomic(struct page *page);
65void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 65void __kunmap_atomic(void *kvaddr);
66void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 66void *kmap_atomic_pfn(unsigned long pfn);
67void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
67struct page *kmap_atomic_to_page(void *ptr); 68struct page *kmap_atomic_to_page(void *ptr);
68 69
69#define flush_cache_kmaps() do { } while (0) 70#define flush_cache_kmaps() do { } while (0)
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
index 1d5c08a1bdfd..2c392d663dce 100644
--- a/arch/x86/include/asm/hpet.h
+++ b/arch/x86/include/asm/hpet.h
@@ -74,10 +74,12 @@ extern void hpet_disable(void);
74extern unsigned int hpet_readl(unsigned int a); 74extern unsigned int hpet_readl(unsigned int a);
75extern void force_hpet_resume(void); 75extern void force_hpet_resume(void);
76 76
77extern void hpet_msi_unmask(unsigned int irq); 77struct irq_data;
78extern void hpet_msi_mask(unsigned int irq); 78extern void hpet_msi_unmask(struct irq_data *data);
79extern void hpet_msi_write(unsigned int irq, struct msi_msg *msg); 79extern void hpet_msi_mask(struct irq_data *data);
80extern void hpet_msi_read(unsigned int irq, struct msi_msg *msg); 80struct hpet_dev;
81extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg);
82extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg);
81 83
82#ifdef CONFIG_PCI_MSI 84#ifdef CONFIG_PCI_MSI
83extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id); 85extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id);
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index c17411503f28..c490d89a9b7b 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -29,7 +29,7 @@
29extern void apic_timer_interrupt(void); 29extern void apic_timer_interrupt(void);
30extern void x86_platform_ipi(void); 30extern void x86_platform_ipi(void);
31extern void error_interrupt(void); 31extern void error_interrupt(void);
32extern void perf_pending_interrupt(void); 32extern void irq_work_interrupt(void);
33 33
34extern void spurious_interrupt(void); 34extern void spurious_interrupt(void);
35extern void thermal_interrupt(void); 35extern void thermal_interrupt(void);
@@ -45,6 +45,30 @@ extern void invalidate_interrupt4(void);
45extern void invalidate_interrupt5(void); 45extern void invalidate_interrupt5(void);
46extern void invalidate_interrupt6(void); 46extern void invalidate_interrupt6(void);
47extern void invalidate_interrupt7(void); 47extern void invalidate_interrupt7(void);
48extern void invalidate_interrupt8(void);
49extern void invalidate_interrupt9(void);
50extern void invalidate_interrupt10(void);
51extern void invalidate_interrupt11(void);
52extern void invalidate_interrupt12(void);
53extern void invalidate_interrupt13(void);
54extern void invalidate_interrupt14(void);
55extern void invalidate_interrupt15(void);
56extern void invalidate_interrupt16(void);
57extern void invalidate_interrupt17(void);
58extern void invalidate_interrupt18(void);
59extern void invalidate_interrupt19(void);
60extern void invalidate_interrupt20(void);
61extern void invalidate_interrupt21(void);
62extern void invalidate_interrupt22(void);
63extern void invalidate_interrupt23(void);
64extern void invalidate_interrupt24(void);
65extern void invalidate_interrupt25(void);
66extern void invalidate_interrupt26(void);
67extern void invalidate_interrupt27(void);
68extern void invalidate_interrupt28(void);
69extern void invalidate_interrupt29(void);
70extern void invalidate_interrupt30(void);
71extern void invalidate_interrupt31(void);
48 72
49extern void irq_move_cleanup_interrupt(void); 73extern void irq_move_cleanup_interrupt(void);
50extern void reboot_interrupt(void); 74extern void reboot_interrupt(void);
@@ -80,6 +104,13 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr,
80 irq_attr->polarity = polarity; 104 irq_attr->polarity = polarity;
81} 105}
82 106
107struct irq_2_iommu {
108 struct intel_iommu *iommu;
109 u16 irte_index;
110 u16 sub_handle;
111 u8 irte_mask;
112};
113
83/* 114/*
84 * This is performance-critical, we want to do it O(1) 115 * This is performance-critical, we want to do it O(1)
85 * 116 *
@@ -91,15 +122,17 @@ struct irq_cfg {
91 cpumask_var_t old_domain; 122 cpumask_var_t old_domain;
92 u8 vector; 123 u8 vector;
93 u8 move_in_progress : 1; 124 u8 move_in_progress : 1;
125#ifdef CONFIG_INTR_REMAP
126 struct irq_2_iommu irq_2_iommu;
127#endif
94}; 128};
95 129
96extern struct irq_cfg *irq_cfg(unsigned int);
97extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); 130extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *);
98extern void send_cleanup_vector(struct irq_cfg *); 131extern void send_cleanup_vector(struct irq_cfg *);
99 132
100struct irq_desc; 133struct irq_data;
101extern unsigned int set_desc_affinity(struct irq_desc *, const struct cpumask *, 134int __ioapic_set_affinity(struct irq_data *, const struct cpumask *,
102 unsigned int *dest_id); 135 unsigned int *dest_id);
103extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); 136extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr);
104extern void setup_ioapic_dest(void); 137extern void setup_ioapic_dest(void);
105 138
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index ff2546ce7178..7a15153c675d 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -20,6 +20,9 @@
20#ifndef _ASM_X86_HYPERVISOR_H 20#ifndef _ASM_X86_HYPERVISOR_H
21#define _ASM_X86_HYPERVISOR_H 21#define _ASM_X86_HYPERVISOR_H
22 22
23#include <asm/kvm_para.h>
24#include <asm/xen/hypervisor.h>
25
23extern void init_hypervisor(struct cpuinfo_x86 *c); 26extern void init_hypervisor(struct cpuinfo_x86 *c);
24extern void init_hypervisor_platform(void); 27extern void init_hypervisor_platform(void);
25 28
@@ -47,4 +50,13 @@ extern const struct hypervisor_x86 x86_hyper_vmware;
47extern const struct hypervisor_x86 x86_hyper_ms_hyperv; 50extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
48extern const struct hypervisor_x86 x86_hyper_xen_hvm; 51extern const struct hypervisor_x86 x86_hyper_xen_hvm;
49 52
53static inline bool hypervisor_x2apic_available(void)
54{
55 if (kvm_para_available())
56 return true;
57 if (xen_x2apic_para_available())
58 return true;
59 return false;
60}
61
50#endif 62#endif
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index a73a8d5a5e69..c9e09ea05644 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -55,6 +55,12 @@ extern int save_i387_xstate_ia32(void __user *buf);
55extern int restore_i387_xstate_ia32(void __user *buf); 55extern int restore_i387_xstate_ia32(void __user *buf);
56#endif 56#endif
57 57
58#ifdef CONFIG_MATH_EMULATION
59extern void finit_soft_fpu(struct i387_soft_struct *soft);
60#else
61static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
62#endif
63
58#define X87_FSW_ES (1 << 7) /* Exception Summary */ 64#define X87_FSW_ES (1 << 7) /* Exception Summary */
59 65
60static __always_inline __pure bool use_xsaveopt(void) 66static __always_inline __pure bool use_xsaveopt(void)
@@ -67,6 +73,11 @@ static __always_inline __pure bool use_xsave(void)
67 return static_cpu_has(X86_FEATURE_XSAVE); 73 return static_cpu_has(X86_FEATURE_XSAVE);
68} 74}
69 75
76static __always_inline __pure bool use_fxsr(void)
77{
78 return static_cpu_has(X86_FEATURE_FXSR);
79}
80
70extern void __sanitize_i387_state(struct task_struct *); 81extern void __sanitize_i387_state(struct task_struct *);
71 82
72static inline void sanitize_i387_state(struct task_struct *tsk) 83static inline void sanitize_i387_state(struct task_struct *tsk)
@@ -77,20 +88,13 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
77} 88}
78 89
79#ifdef CONFIG_X86_64 90#ifdef CONFIG_X86_64
80
81/* Ignore delayed exceptions from user space */
82static inline void tolerant_fwait(void)
83{
84 asm volatile("1: fwait\n"
85 "2:\n"
86 _ASM_EXTABLE(1b, 2b));
87}
88
89static inline int fxrstor_checking(struct i387_fxsave_struct *fx) 91static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
90{ 92{
91 int err; 93 int err;
92 94
93 asm volatile("1: rex64/fxrstor (%[fx])\n\t" 95 /* See comment in fxsave() below. */
96#ifdef CONFIG_AS_FXSAVEQ
97 asm volatile("1: fxrstorq %[fx]\n\t"
94 "2:\n" 98 "2:\n"
95 ".section .fixup,\"ax\"\n" 99 ".section .fixup,\"ax\"\n"
96 "3: movl $-1,%[err]\n" 100 "3: movl $-1,%[err]\n"
@@ -98,44 +102,21 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
98 ".previous\n" 102 ".previous\n"
99 _ASM_EXTABLE(1b, 3b) 103 _ASM_EXTABLE(1b, 3b)
100 : [err] "=r" (err) 104 : [err] "=r" (err)
101#if 0 /* See comment in fxsave() below. */ 105 : [fx] "m" (*fx), "0" (0));
102 : [fx] "r" (fx), "m" (*fx), "0" (0));
103#else 106#else
104 : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0)); 107 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
108 "2:\n"
109 ".section .fixup,\"ax\"\n"
110 "3: movl $-1,%[err]\n"
111 " jmp 2b\n"
112 ".previous\n"
113 _ASM_EXTABLE(1b, 3b)
114 : [err] "=r" (err)
115 : [fx] "R" (fx), "m" (*fx), "0" (0));
105#endif 116#endif
106 return err; 117 return err;
107} 118}
108 119
109/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
110 is pending. Clear the x87 state here by setting it to fixed
111 values. The kernel data segment can be sometimes 0 and sometimes
112 new user value. Both should be ok.
113 Use the PDA as safe address because it should be already in L1. */
114static inline void fpu_clear(struct fpu *fpu)
115{
116 struct xsave_struct *xstate = &fpu->state->xsave;
117 struct i387_fxsave_struct *fx = &fpu->state->fxsave;
118
119 /*
120 * xsave header may indicate the init state of the FP.
121 */
122 if (use_xsave() &&
123 !(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
124 return;
125
126 if (unlikely(fx->swd & X87_FSW_ES))
127 asm volatile("fnclex");
128 alternative_input(ASM_NOP8 ASM_NOP2,
129 " emms\n" /* clear stack tags */
130 " fildl %%gs:0", /* load to clear state */
131 X86_FEATURE_FXSAVE_LEAK);
132}
133
134static inline void clear_fpu_state(struct task_struct *tsk)
135{
136 fpu_clear(&tsk->thread.fpu);
137}
138
139static inline int fxsave_user(struct i387_fxsave_struct __user *fx) 120static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
140{ 121{
141 int err; 122 int err;
@@ -149,6 +130,18 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
149 if (unlikely(err)) 130 if (unlikely(err))
150 return -EFAULT; 131 return -EFAULT;
151 132
133 /* See comment in fxsave() below. */
134#ifdef CONFIG_AS_FXSAVEQ
135 asm volatile("1: fxsaveq %[fx]\n\t"
136 "2:\n"
137 ".section .fixup,\"ax\"\n"
138 "3: movl $-1,%[err]\n"
139 " jmp 2b\n"
140 ".previous\n"
141 _ASM_EXTABLE(1b, 3b)
142 : [err] "=r" (err), [fx] "=m" (*fx)
143 : "0" (0));
144#else
152 asm volatile("1: rex64/fxsave (%[fx])\n\t" 145 asm volatile("1: rex64/fxsave (%[fx])\n\t"
153 "2:\n" 146 "2:\n"
154 ".section .fixup,\"ax\"\n" 147 ".section .fixup,\"ax\"\n"
@@ -157,10 +150,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
157 ".previous\n" 150 ".previous\n"
158 _ASM_EXTABLE(1b, 3b) 151 _ASM_EXTABLE(1b, 3b)
159 : [err] "=r" (err), "=m" (*fx) 152 : [err] "=r" (err), "=m" (*fx)
160#if 0 /* See comment in fxsave() below. */ 153 : [fx] "R" (fx), "0" (0));
161 : [fx] "r" (fx), "0" (0));
162#else
163 : [fx] "cdaSDb" (fx), "0" (0));
164#endif 154#endif
165 if (unlikely(err) && 155 if (unlikely(err) &&
166 __clear_user(fx, sizeof(struct i387_fxsave_struct))) 156 __clear_user(fx, sizeof(struct i387_fxsave_struct)))
@@ -175,56 +165,29 @@ static inline void fpu_fxsave(struct fpu *fpu)
175 uses any extended registers for addressing, a second REX prefix 165 uses any extended registers for addressing, a second REX prefix
176 will be generated (to the assembler, rex64 followed by semicolon 166 will be generated (to the assembler, rex64 followed by semicolon
177 is a separate instruction), and hence the 64-bitness is lost. */ 167 is a separate instruction), and hence the 64-bitness is lost. */
178#if 0 168
169#ifdef CONFIG_AS_FXSAVEQ
179 /* Using "fxsaveq %0" would be the ideal choice, but is only supported 170 /* Using "fxsaveq %0" would be the ideal choice, but is only supported
180 starting with gas 2.16. */ 171 starting with gas 2.16. */
181 __asm__ __volatile__("fxsaveq %0" 172 __asm__ __volatile__("fxsaveq %0"
182 : "=m" (fpu->state->fxsave)); 173 : "=m" (fpu->state->fxsave));
183#elif 0 174#else
184 /* Using, as a workaround, the properly prefixed form below isn't 175 /* Using, as a workaround, the properly prefixed form below isn't
185 accepted by any binutils version so far released, complaining that 176 accepted by any binutils version so far released, complaining that
186 the same type of prefix is used twice if an extended register is 177 the same type of prefix is used twice if an extended register is
187 needed for addressing (fix submitted to mainline 2005-11-21). */ 178 needed for addressing (fix submitted to mainline 2005-11-21).
188 __asm__ __volatile__("rex64/fxsave %0" 179 asm volatile("rex64/fxsave %0"
189 : "=m" (fpu->state->fxsave)); 180 : "=m" (fpu->state->fxsave));
190#else 181 This, however, we can work around by forcing the compiler to select
191 /* This, however, we can work around by forcing the compiler to select
192 an addressing mode that doesn't require extended registers. */ 182 an addressing mode that doesn't require extended registers. */
193 __asm__ __volatile__("rex64/fxsave (%1)" 183 asm volatile("rex64/fxsave (%[fx])"
194 : "=m" (fpu->state->fxsave) 184 : "=m" (fpu->state->fxsave)
195 : "cdaSDb" (&fpu->state->fxsave)); 185 : [fx] "R" (&fpu->state->fxsave));
196#endif 186#endif
197} 187}
198 188
199static inline void fpu_save_init(struct fpu *fpu)
200{
201 if (use_xsave())
202 fpu_xsave(fpu);
203 else
204 fpu_fxsave(fpu);
205
206 fpu_clear(fpu);
207}
208
209static inline void __save_init_fpu(struct task_struct *tsk)
210{
211 fpu_save_init(&tsk->thread.fpu);
212 task_thread_info(tsk)->status &= ~TS_USEDFPU;
213}
214
215#else /* CONFIG_X86_32 */ 189#else /* CONFIG_X86_32 */
216 190
217#ifdef CONFIG_MATH_EMULATION
218extern void finit_soft_fpu(struct i387_soft_struct *soft);
219#else
220static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
221#endif
222
223static inline void tolerant_fwait(void)
224{
225 asm volatile("fnclex ; fwait");
226}
227
228/* perform fxrstor iff the processor has extended states, otherwise frstor */ 191/* perform fxrstor iff the processor has extended states, otherwise frstor */
229static inline int fxrstor_checking(struct i387_fxsave_struct *fx) 192static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
230{ 193{
@@ -241,6 +204,14 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
241 return 0; 204 return 0;
242} 205}
243 206
207static inline void fpu_fxsave(struct fpu *fpu)
208{
209 asm volatile("fxsave %[fx]"
210 : [fx] "=m" (fpu->state->fxsave));
211}
212
213#endif /* CONFIG_X86_64 */
214
244/* We need a safe address that is cheap to find and that is already 215/* We need a safe address that is cheap to find and that is already
245 in L1 during context switch. The best choices are unfortunately 216 in L1 during context switch. The best choices are unfortunately
246 different for UP and SMP */ 217 different for UP and SMP */
@@ -256,47 +227,33 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
256static inline void fpu_save_init(struct fpu *fpu) 227static inline void fpu_save_init(struct fpu *fpu)
257{ 228{
258 if (use_xsave()) { 229 if (use_xsave()) {
259 struct xsave_struct *xstate = &fpu->state->xsave;
260 struct i387_fxsave_struct *fx = &fpu->state->fxsave;
261
262 fpu_xsave(fpu); 230 fpu_xsave(fpu);
263 231
264 /* 232 /*
265 * xsave header may indicate the init state of the FP. 233 * xsave header may indicate the init state of the FP.
266 */ 234 */
267 if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) 235 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
268 goto end; 236 return;
269 237 } else if (use_fxsr()) {
270 if (unlikely(fx->swd & X87_FSW_ES)) 238 fpu_fxsave(fpu);
271 asm volatile("fnclex"); 239 } else {
272 240 asm volatile("fnsave %[fx]; fwait"
273 /* 241 : [fx] "=m" (fpu->state->fsave));
274 * we can do a simple return here or be paranoid :) 242 return;
275 */
276 goto clear_state;
277 } 243 }
278 244
279 /* Use more nops than strictly needed in case the compiler 245 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES))
280 varies code */ 246 asm volatile("fnclex");
281 alternative_input( 247
282 "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4,
283 "fxsave %[fx]\n"
284 "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
285 X86_FEATURE_FXSR,
286 [fx] "m" (fpu->state->fxsave),
287 [fsw] "m" (fpu->state->fxsave.swd) : "memory");
288clear_state:
289 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception 248 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
290 is pending. Clear the x87 state here by setting it to fixed 249 is pending. Clear the x87 state here by setting it to fixed
291 values. safe_address is a random variable that should be in L1 */ 250 values. safe_address is a random variable that should be in L1 */
292 alternative_input( 251 alternative_input(
293 GENERIC_NOP8 GENERIC_NOP2, 252 ASM_NOP8 ASM_NOP2,
294 "emms\n\t" /* clear stack tags */ 253 "emms\n\t" /* clear stack tags */
295 "fildl %[addr]", /* set F?P to defined value */ 254 "fildl %P[addr]", /* set F?P to defined value */
296 X86_FEATURE_FXSAVE_LEAK, 255 X86_FEATURE_FXSAVE_LEAK,
297 [addr] "m" (safe_address)); 256 [addr] "m" (safe_address));
298end:
299 ;
300} 257}
301 258
302static inline void __save_init_fpu(struct task_struct *tsk) 259static inline void __save_init_fpu(struct task_struct *tsk)
@@ -305,9 +262,6 @@ static inline void __save_init_fpu(struct task_struct *tsk)
305 task_thread_info(tsk)->status &= ~TS_USEDFPU; 262 task_thread_info(tsk)->status &= ~TS_USEDFPU;
306} 263}
307 264
308
309#endif /* CONFIG_X86_64 */
310
311static inline int fpu_fxrstor_checking(struct fpu *fpu) 265static inline int fpu_fxrstor_checking(struct fpu *fpu)
312{ 266{
313 return fxrstor_checking(&fpu->state->fxsave); 267 return fxrstor_checking(&fpu->state->fxsave);
@@ -344,7 +298,10 @@ static inline void __unlazy_fpu(struct task_struct *tsk)
344static inline void __clear_fpu(struct task_struct *tsk) 298static inline void __clear_fpu(struct task_struct *tsk)
345{ 299{
346 if (task_thread_info(tsk)->status & TS_USEDFPU) { 300 if (task_thread_info(tsk)->status & TS_USEDFPU) {
347 tolerant_fwait(); 301 /* Ignore delayed exceptions from user space */
302 asm volatile("1: fwait\n"
303 "2:\n"
304 _ASM_EXTABLE(1b, 2b));
348 task_thread_info(tsk)->status &= ~TS_USEDFPU; 305 task_thread_info(tsk)->status &= ~TS_USEDFPU;
349 stts(); 306 stts();
350 } 307 }
@@ -405,19 +362,6 @@ static inline void irq_ts_restore(int TS_state)
405 stts(); 362 stts();
406} 363}
407 364
408#ifdef CONFIG_X86_64
409
410static inline void save_init_fpu(struct task_struct *tsk)
411{
412 __save_init_fpu(tsk);
413 stts();
414}
415
416#define unlazy_fpu __unlazy_fpu
417#define clear_fpu __clear_fpu
418
419#else /* CONFIG_X86_32 */
420
421/* 365/*
422 * These disable preemption on their own and are safe 366 * These disable preemption on their own and are safe
423 */ 367 */
@@ -443,8 +387,6 @@ static inline void clear_fpu(struct task_struct *tsk)
443 preempt_enable(); 387 preempt_enable();
444} 388}
445 389
446#endif /* CONFIG_X86_64 */
447
448/* 390/*
449 * i387 state interaction 391 * i387 state interaction
450 */ 392 */
@@ -508,7 +450,4 @@ extern void fpu_finit(struct fpu *fpu);
508 450
509#endif /* __ASSEMBLY__ */ 451#endif /* __ASSEMBLY__ */
510 452
511#define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5
512#define PSHUFB_XMM5_XMM6 .byte 0x66, 0x0f, 0x38, 0x00, 0xf5
513
514#endif /* _ASM_X86_I387_H */ 453#endif /* _ASM_X86_I387_H */
diff --git a/arch/x86/include/asm/i8253.h b/arch/x86/include/asm/i8253.h
index fc1f579fb965..65aaa91d5850 100644
--- a/arch/x86/include/asm/i8253.h
+++ b/arch/x86/include/asm/i8253.h
@@ -6,6 +6,8 @@
6#define PIT_CH0 0x40 6#define PIT_CH0 0x40
7#define PIT_CH2 0x42 7#define PIT_CH2 0x42
8 8
9#define PIT_LATCH LATCH
10
9extern raw_spinlock_t i8253_lock; 11extern raw_spinlock_t i8253_lock;
10 12
11extern struct clock_event_device *global_clock_event; 13extern struct clock_event_device *global_clock_event;
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index 1655147646aa..a20365953bf8 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -55,6 +55,8 @@ extern struct irq_chip i8259A_chip;
55struct legacy_pic { 55struct legacy_pic {
56 int nr_legacy_irqs; 56 int nr_legacy_irqs;
57 struct irq_chip *chip; 57 struct irq_chip *chip;
58 void (*mask)(unsigned int irq);
59 void (*unmask)(unsigned int irq);
58 void (*mask_all)(void); 60 void (*mask_all)(void);
59 void (*restore_mask)(void); 61 void (*restore_mask)(void);
60 void (*init)(int auto_eoi); 62 void (*init)(int auto_eoi);
diff --git a/arch/x86/include/asm/idle.h b/arch/x86/include/asm/idle.h
index 38d87379e270..f49253d75710 100644
--- a/arch/x86/include/asm/idle.h
+++ b/arch/x86/include/asm/idle.h
@@ -16,6 +16,6 @@ static inline void enter_idle(void) { }
16static inline void exit_idle(void) { } 16static inline void exit_idle(void) { }
17#endif /* CONFIG_X86_64 */ 17#endif /* CONFIG_X86_64 */
18 18
19void c1e_remove_cpu(int cpu); 19void amd_e400_remove_cpu(int cpu);
20 20
21#endif /* _ASM_X86_IDLE_H */ 21#endif /* _ASM_X86_IDLE_H */
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
index 36fb1a6a5109..8dbe353e41e1 100644
--- a/arch/x86/include/asm/init.h
+++ b/arch/x86/include/asm/init.h
@@ -11,8 +11,8 @@ kernel_physical_mapping_init(unsigned long start,
11 unsigned long page_size_mask); 11 unsigned long page_size_mask);
12 12
13 13
14extern unsigned long __initdata e820_table_start; 14extern unsigned long __initdata pgt_buf_start;
15extern unsigned long __meminitdata e820_table_end; 15extern unsigned long __meminitdata pgt_buf_end;
16extern unsigned long __meminitdata e820_table_top; 16extern unsigned long __meminitdata pgt_buf_top;
17 17
18#endif /* _ASM_X86_INIT_32_H */ 18#endif /* _ASM_X86_INIT_32_H */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 30a3e9776123..d02804d650c4 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -38,9 +38,10 @@
38 38
39#include <linux/string.h> 39#include <linux/string.h>
40#include <linux/compiler.h> 40#include <linux/compiler.h>
41#include <asm-generic/int-ll64.h>
42#include <asm/page.h> 41#include <asm/page.h>
43 42
43#include <xen/xen.h>
44
44#define build_mmio_read(name, size, type, reg, barrier) \ 45#define build_mmio_read(name, size, type, reg, barrier) \
45static inline type name(const volatile void __iomem *addr) \ 46static inline type name(const volatile void __iomem *addr) \
46{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \ 47{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \
@@ -85,27 +86,6 @@ build_mmio_write(__writel, "l", unsigned int, "r", )
85build_mmio_read(readq, "q", unsigned long, "=r", :"memory") 86build_mmio_read(readq, "q", unsigned long, "=r", :"memory")
86build_mmio_write(writeq, "q", unsigned long, "r", :"memory") 87build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
87 88
88#else
89
90static inline __u64 readq(const volatile void __iomem *addr)
91{
92 const volatile u32 __iomem *p = addr;
93 u32 low, high;
94
95 low = readl(p);
96 high = readl(p + 1);
97
98 return low + ((u64)high << 32);
99}
100
101static inline void writeq(__u64 val, volatile void __iomem *addr)
102{
103 writel(val, addr);
104 writel(val >> 32, addr+4);
105}
106
107#endif
108
109#define readq_relaxed(a) readq(a) 89#define readq_relaxed(a) readq(a)
110 90
111#define __raw_readq(a) readq(a) 91#define __raw_readq(a) readq(a)
@@ -115,6 +95,8 @@ static inline void writeq(__u64 val, volatile void __iomem *addr)
115#define readq readq 95#define readq readq
116#define writeq writeq 96#define writeq writeq
117 97
98#endif
99
118/** 100/**
119 * virt_to_phys - map virtual addresses to physical 101 * virt_to_phys - map virtual addresses to physical
120 * @address: address to remap 102 * @address: address to remap
@@ -206,6 +188,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
206 188
207extern void iounmap(volatile void __iomem *addr); 189extern void iounmap(volatile void __iomem *addr);
208 190
191extern void set_iounmap_nonlazy(void);
209 192
210#ifdef __KERNEL__ 193#ifdef __KERNEL__
211 194
@@ -348,6 +331,18 @@ extern void __iomem *early_memremap(resource_size_t phys_addr,
348 unsigned long size); 331 unsigned long size);
349extern void early_iounmap(void __iomem *addr, unsigned long size); 332extern void early_iounmap(void __iomem *addr, unsigned long size);
350extern void fixup_early_ioremap(void); 333extern void fixup_early_ioremap(void);
334extern bool is_early_ioremap_ptep(pte_t *ptep);
335
336#ifdef CONFIG_XEN
337struct bio_vec;
338
339extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
340 const struct bio_vec *vec2);
341
342#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
343 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
344 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
345#endif /* CONFIG_XEN */
351 346
352#define IO_SPACE_LIMIT 0xffff 347#define IO_SPACE_LIMIT 0xffff
353 348
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 9cb2edb87c2f..690d1cc9a877 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -63,17 +63,6 @@ union IO_APIC_reg_03 {
63 } __attribute__ ((packed)) bits; 63 } __attribute__ ((packed)) bits;
64}; 64};
65 65
66enum ioapic_irq_destination_types {
67 dest_Fixed = 0,
68 dest_LowestPrio = 1,
69 dest_SMI = 2,
70 dest__reserved_1 = 3,
71 dest_NMI = 4,
72 dest_INIT = 5,
73 dest__reserved_2 = 6,
74 dest_ExtINT = 7
75};
76
77struct IO_APIC_route_entry { 66struct IO_APIC_route_entry {
78 __u32 vector : 8, 67 __u32 vector : 8,
79 delivery_mode : 3, /* 000: FIXED 68 delivery_mode : 3, /* 000: FIXED
@@ -106,18 +95,22 @@ struct IR_IO_APIC_route_entry {
106 index : 15; 95 index : 15;
107} __attribute__ ((packed)); 96} __attribute__ ((packed));
108 97
98#define IOAPIC_AUTO -1
99#define IOAPIC_EDGE 0
100#define IOAPIC_LEVEL 1
101
109#ifdef CONFIG_X86_IO_APIC 102#ifdef CONFIG_X86_IO_APIC
110 103
111/* 104/*
112 * # of IO-APICs and # of IRQ routing registers 105 * # of IO-APICs and # of IRQ routing registers
113 */ 106 */
114extern int nr_ioapics; 107extern int nr_ioapics;
115extern int nr_ioapic_registers[MAX_IO_APICS];
116 108
117#define MP_MAX_IOAPIC_PIN 127 109extern int mpc_ioapic_id(int ioapic);
110extern unsigned int mpc_ioapic_addr(int ioapic);
111extern struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic);
118 112
119/* I/O APIC entries */ 113#define MP_MAX_IOAPIC_PIN 127
120extern struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
121 114
122/* # of MP IRQ source entries */ 115/* # of MP IRQ source entries */
123extern int mp_irq_entries; 116extern int mp_irq_entries;
@@ -150,33 +143,23 @@ extern int timer_through_8259;
150#define io_apic_assign_pci_irqs \ 143#define io_apic_assign_pci_irqs \
151 (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) 144 (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
152 145
153extern u8 io_apic_unique_id(u8 id);
154extern int io_apic_get_unique_id(int ioapic, int apic_id);
155extern int io_apic_get_version(int ioapic);
156extern int io_apic_get_redir_entries(int ioapic);
157
158struct io_apic_irq_attr; 146struct io_apic_irq_attr;
159extern int io_apic_set_pci_routing(struct device *dev, int irq, 147extern int io_apic_set_pci_routing(struct device *dev, int irq,
160 struct io_apic_irq_attr *irq_attr); 148 struct io_apic_irq_attr *irq_attr);
161void setup_IO_APIC_irq_extra(u32 gsi); 149void setup_IO_APIC_irq_extra(u32 gsi);
162extern void ioapic_init_mappings(void); 150extern void ioapic_and_gsi_init(void);
163extern void ioapic_insert_resources(void); 151extern void ioapic_insert_resources(void);
164 152
165extern struct IO_APIC_route_entry **alloc_ioapic_entries(void); 153int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr);
166extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries);
167extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
168extern void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
169extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
170 154
171extern void probe_nr_irqs_gsi(void); 155extern int save_ioapic_entries(void);
156extern void mask_ioapic_entries(void);
157extern int restore_ioapic_entries(void);
158
159extern int get_nr_irqs_gsi(void);
172 160
173extern int setup_ioapic_entry(int apic, int irq,
174 struct IO_APIC_route_entry *entry,
175 unsigned int destination, int trigger,
176 int polarity, int vector, int pin);
177extern void ioapic_write_entry(int apic, int pin,
178 struct IO_APIC_route_entry e);
179extern void setup_ioapic_ids_from_mpc(void); 161extern void setup_ioapic_ids_from_mpc(void);
162extern void setup_ioapic_ids_from_mpc_nocheck(void);
180 163
181struct mp_ioapic_gsi{ 164struct mp_ioapic_gsi{
182 u32 gsi_base; 165 u32 gsi_base;
@@ -189,20 +172,37 @@ int mp_find_ioapic_pin(int ioapic, u32 gsi);
189void __init mp_register_ioapic(int id, u32 address, u32 gsi_base); 172void __init mp_register_ioapic(int id, u32 address, u32 gsi_base);
190extern void __init pre_init_apic_IRQ0(void); 173extern void __init pre_init_apic_IRQ0(void);
191 174
175extern void mp_save_irq(struct mpc_intsrc *m);
176
177extern void disable_ioapic_support(void);
178
192#else /* !CONFIG_X86_IO_APIC */ 179#else /* !CONFIG_X86_IO_APIC */
193 180
194#define io_apic_assign_pci_irqs 0 181#define io_apic_assign_pci_irqs 0
195#define setup_ioapic_ids_from_mpc x86_init_noop 182#define setup_ioapic_ids_from_mpc x86_init_noop
196static const int timer_through_8259 = 0; 183static const int timer_through_8259 = 0;
197static inline void ioapic_init_mappings(void) { } 184static inline void ioapic_and_gsi_init(void) { }
198static inline void ioapic_insert_resources(void) { } 185static inline void ioapic_insert_resources(void) { }
199static inline void probe_nr_irqs_gsi(void) { }
200#define gsi_top (NR_IRQS_LEGACY) 186#define gsi_top (NR_IRQS_LEGACY)
201static inline int mp_find_ioapic(u32 gsi) { return 0; } 187static inline int mp_find_ioapic(u32 gsi) { return 0; }
202 188
203struct io_apic_irq_attr; 189struct io_apic_irq_attr;
204static inline int io_apic_set_pci_routing(struct device *dev, int irq, 190static inline int io_apic_set_pci_routing(struct device *dev, int irq,
205 struct io_apic_irq_attr *irq_attr) { return 0; } 191 struct io_apic_irq_attr *irq_attr) { return 0; }
192
193static inline int save_ioapic_entries(void)
194{
195 return -ENOMEM;
196}
197
198static inline void mask_ioapic_entries(void) { }
199static inline int restore_ioapic_entries(void)
200{
201 return -ENOMEM;
202}
203
204static inline void mp_save_irq(struct mpc_intsrc *m) { };
205static inline void disable_ioapic_support(void) { }
206#endif 206#endif
207 207
208#endif /* _ASM_X86_IO_APIC_H */ 208#endif /* _ASM_X86_IO_APIC_H */
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h
index c4191b3b7056..363e33eb6ec1 100644
--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -27,10 +27,10 @@
27#include <asm/tlbflush.h> 27#include <asm/tlbflush.h>
28 28
29void __iomem * 29void __iomem *
30iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 30iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
31 31
32void 32void
33iounmap_atomic(void __iomem *kvaddr, enum km_type type); 33iounmap_atomic(void __iomem *kvaddr);
34 34
35int 35int
36iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot); 36iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
diff --git a/arch/x86/include/asm/iommu_table.h b/arch/x86/include/asm/iommu_table.h
new file mode 100644
index 000000000000..f229b13a5f30
--- /dev/null
+++ b/arch/x86/include/asm/iommu_table.h
@@ -0,0 +1,100 @@
1#ifndef _ASM_X86_IOMMU_TABLE_H
2#define _ASM_X86_IOMMU_TABLE_H
3
4#include <asm/swiotlb.h>
5
6/*
7 * History lesson:
8 * The execution chain of IOMMUs in 2.6.36 looks as so:
9 *
10 * [xen-swiotlb]
11 * |
12 * +----[swiotlb *]--+
13 * / | \
14 * / | \
15 * [GART] [Calgary] [Intel VT-d]
16 * /
17 * /
18 * [AMD-Vi]
19 *
20 * *: if SWIOTLB detected 'iommu=soft'/'swiotlb=force' it would skip
21 * over the rest of IOMMUs and unconditionally initialize the SWIOTLB.
22 * Also it would surreptitiously initialize set the swiotlb=1 if there were
23 * more than 4GB and if the user did not pass in 'iommu=off'. The swiotlb
24 * flag would be turned off by all IOMMUs except the Calgary one.
25 *
26 * The IOMMU_INIT* macros allow a similar tree (or more complex if desired)
27 * to be built by defining who we depend on.
28 *
29 * And all that needs to be done is to use one of the macros in the IOMMU
30 * and the pci-dma.c will take care of the rest.
31 */
32
33struct iommu_table_entry {
34 initcall_t detect;
35 initcall_t depend;
36 void (*early_init)(void); /* No memory allocate available. */
37 void (*late_init)(void); /* Yes, can allocate memory. */
38#define IOMMU_FINISH_IF_DETECTED (1<<0)
39#define IOMMU_DETECTED (1<<1)
40 int flags;
41};
42/*
43 * Macro fills out an entry in the .iommu_table that is equivalent
44 * to the fields that 'struct iommu_table_entry' has. The entries
45 * that are put in the .iommu_table section are not put in any order
46 * hence during boot-time we will have to resort them based on
47 * dependency. */
48
49
50#define __IOMMU_INIT(_detect, _depend, _early_init, _late_init, _finish)\
51 static const struct iommu_table_entry const \
52 __iommu_entry_##_detect __used \
53 __attribute__ ((unused, __section__(".iommu_table"), \
54 aligned((sizeof(void *))))) \
55 = {_detect, _depend, _early_init, _late_init, \
56 _finish ? IOMMU_FINISH_IF_DETECTED : 0}
57/*
58 * The simplest IOMMU definition. Provide the detection routine
59 * and it will be run after the SWIOTLB and the other IOMMUs
60 * that utilize this macro. If the IOMMU is detected (ie, the
61 * detect routine returns a positive value), the other IOMMUs
62 * are also checked. You can use IOMMU_INIT_POST_FINISH if you prefer
63 * to stop detecting the other IOMMUs after yours has been detected.
64 */
65#define IOMMU_INIT_POST(_detect) \
66 __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, 0, 0, 0)
67
68#define IOMMU_INIT_POST_FINISH(detect) \
69 __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, 0, 0, 1)
70
71/*
72 * A more sophisticated version of IOMMU_INIT. This variant requires:
73 * a). A detection routine function.
74 * b). The name of the detection routine we depend on to get called
75 * before us.
76 * c). The init routine which gets called if the detection routine
77 * returns a positive value from the pci_iommu_alloc. This means
78 * no presence of a memory allocator.
79 * d). Similar to the 'init', except that this gets called from pci_iommu_init
80 * where we do have a memory allocator.
81 *
82 * The standard vs the _FINISH differs in that the _FINISH variant will
83 * continue detecting other IOMMUs in the call list after the
84 * the detection routine returns a positive number. The _FINISH will
85 * stop the execution chain. Both will still call the 'init' and
86 * 'late_init' functions if they are set.
87 */
88#define IOMMU_INIT_FINISH(_detect, _depend, _init, _late_init) \
89 __IOMMU_INIT(_detect, _depend, _init, _late_init, 1)
90
91#define IOMMU_INIT(_detect, _depend, _init, _late_init) \
92 __IOMMU_INIT(_detect, _depend, _init, _late_init, 0)
93
94void sort_iommu_table(struct iommu_table_entry *start,
95 struct iommu_table_entry *finish);
96
97void check_iommu_entries(struct iommu_table_entry *start,
98 struct iommu_table_entry *finish);
99
100#endif /* _ASM_X86_IOMMU_TABLE_H */
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h
index 0b7228268a63..615fa9061b57 100644
--- a/arch/x86/include/asm/ipi.h
+++ b/arch/x86/include/asm/ipi.h
@@ -123,10 +123,6 @@ extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
123 int vector); 123 int vector);
124extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, 124extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
125 int vector); 125 int vector);
126extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
127 int vector);
128extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
129 int vector);
130 126
131/* Avoid include hell */ 127/* Avoid include hell */
132#define NMI_VECTOR 0x02 128#define NMI_VECTOR 0x02
@@ -150,6 +146,10 @@ static inline void __default_local_send_IPI_all(int vector)
150} 146}
151 147
152#ifdef CONFIG_X86_32 148#ifdef CONFIG_X86_32
149extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
150 int vector);
151extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
152 int vector);
153extern void default_send_IPI_mask_logical(const struct cpumask *mask, 153extern void default_send_IPI_mask_logical(const struct cpumask *mask,
154 int vector); 154 int vector);
155extern void default_send_IPI_allbutself(int vector); 155extern void default_send_IPI_allbutself(int vector);
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 5458380b6ef8..ba870bb6dd8e 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -15,22 +15,14 @@ static inline int irq_canonicalize(int irq)
15 return ((irq == 2) ? 9 : irq); 15 return ((irq == 2) ? 9 : irq);
16} 16}
17 17
18#ifdef CONFIG_X86_LOCAL_APIC 18#ifdef CONFIG_X86_32
19# define ARCH_HAS_NMI_WATCHDOG 19extern void irq_ctx_init(int cpu);
20#endif
21
22#ifdef CONFIG_4KSTACKS
23 extern void irq_ctx_init(int cpu);
24 extern void irq_ctx_exit(int cpu);
25# define __ARCH_HAS_DO_SOFTIRQ
26#else 20#else
27# define irq_ctx_init(cpu) do { } while (0) 21# define irq_ctx_init(cpu) do { } while (0)
28# define irq_ctx_exit(cpu) do { } while (0)
29# ifdef CONFIG_X86_64
30# define __ARCH_HAS_DO_SOFTIRQ
31# endif
32#endif 22#endif
33 23
24#define __ARCH_HAS_DO_SOFTIRQ
25
34#ifdef CONFIG_HOTPLUG_CPU 26#ifdef CONFIG_HOTPLUG_CPU
35#include <linux/cpumask.h> 27#include <linux/cpumask.h>
36extern void fixup_irqs(void); 28extern void fixup_irqs(void);
diff --git a/arch/x86/include/asm/irq_controller.h b/arch/x86/include/asm/irq_controller.h
new file mode 100644
index 000000000000..423bbbddf36d
--- /dev/null
+++ b/arch/x86/include/asm/irq_controller.h
@@ -0,0 +1,12 @@
1#ifndef __IRQ_CONTROLLER__
2#define __IRQ_CONTROLLER__
3
4struct irq_domain {
5 int (*xlate)(struct irq_domain *h, const u32 *intspec, u32 intsize,
6 u32 *out_hwirq, u32 *out_type);
7 void *priv;
8 struct device_node *controller;
9 struct list_head l;
10};
11
12#endif
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index f275e2244505..1c23360fb2d8 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -3,4 +3,39 @@
3 3
4#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8) 4#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
5 5
6#ifdef CONFIG_INTR_REMAP
7static inline void prepare_irte(struct irte *irte, int vector,
8 unsigned int dest)
9{
10 memset(irte, 0, sizeof(*irte));
11
12 irte->present = 1;
13 irte->dst_mode = apic->irq_dest_mode;
14 /*
15 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
16 * actual level or edge trigger will be setup in the IO-APIC
17 * RTE. This will help simplify level triggered irq migration.
18 * For more details, see the comments (in io_apic.c) explainig IO-APIC
19 * irq migration in the presence of interrupt-remapping.
20 */
21 irte->trigger_mode = 0;
22 irte->dlvry_mode = apic->irq_delivery_mode;
23 irte->vector = vector;
24 irte->dest_id = IRTE_DEST(dest);
25 irte->redir_hint = 1;
26}
27static inline bool irq_remapped(struct irq_cfg *cfg)
28{
29 return cfg->irq_2_iommu.iommu != NULL;
30}
31#else
32static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
33{
34}
35static inline bool irq_remapped(struct irq_cfg *cfg)
36{
37 return false;
38}
39#endif
40
6#endif /* _ASM_X86_IRQ_REMAPPING_H */ 41#endif /* _ASM_X86_IRQ_REMAPPING_H */
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 6143ebeeebfa..99a44cf98453 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -1,6 +1,7 @@
1#ifndef _ASM_X86_IRQ_VECTORS_H 1#ifndef _ASM_X86_IRQ_VECTORS_H
2#define _ASM_X86_IRQ_VECTORS_H 2#define _ASM_X86_IRQ_VECTORS_H
3 3
4#include <linux/threads.h>
4/* 5/*
5 * Linux IRQ vector layout. 6 * Linux IRQ vector layout.
6 * 7 *
@@ -16,8 +17,8 @@
16 * Vectors 0 ... 31 : system traps and exceptions - hardcoded events 17 * Vectors 0 ... 31 : system traps and exceptions - hardcoded events
17 * Vectors 32 ... 127 : device interrupts 18 * Vectors 32 ... 127 : device interrupts
18 * Vector 128 : legacy int80 syscall interface 19 * Vector 128 : legacy int80 syscall interface
19 * Vectors 129 ... 237 : device interrupts 20 * Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 : device interrupts
20 * Vectors 238 ... 255 : special interrupts 21 * Vectors INVALIDATE_TLB_VECTOR_START ... 255 : special interrupts
21 * 22 *
22 * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table. 23 * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
23 * 24 *
@@ -96,42 +97,50 @@
96#define THRESHOLD_APIC_VECTOR 0xf9 97#define THRESHOLD_APIC_VECTOR 0xf9
97#define REBOOT_VECTOR 0xf8 98#define REBOOT_VECTOR 0xf8
98 99
99/* f0-f7 used for spreading out TLB flushes: */
100#define INVALIDATE_TLB_VECTOR_END 0xf7
101#define INVALIDATE_TLB_VECTOR_START 0xf0
102#define NUM_INVALIDATE_TLB_VECTORS 8
103
104/* 100/*
105 * Local APIC timer IRQ vector is on a different priority level, 101 * Generic system vector for platform specific use
106 * to work around the 'lost local interrupt if more than 2 IRQ
107 * sources per level' errata.
108 */ 102 */
109#define LOCAL_TIMER_VECTOR 0xef 103#define X86_PLATFORM_IPI_VECTOR 0xf7
110 104
111/* 105/*
112 * LITMUS^RT pull timers IRQ vector 106 * IRQ work vector:
113 */ 107 */
114#define PULL_TIMERS_VECTOR 0xee 108#define IRQ_WORK_VECTOR 0xf6
109
110#define UV_BAU_MESSAGE 0xf5
115 111
116/* 112/*
117 * Generic system vector for platform specific use 113 * Self IPI vector for machine checks
118 */ 114 */
119#define X86_PLATFORM_IPI_VECTOR 0xed 115#define MCE_SELF_VECTOR 0xf4
116
117/* Xen vector callback to receive events in a HVM domain */
118#define XEN_HVM_EVTCHN_CALLBACK 0xf3
120 119
121/* 120/*
122 * Performance monitoring pending work vector: 121 * Local APIC timer IRQ vector is on a different priority level,
122 * to work around the 'lost local interrupt if more than 2 IRQ
123 * sources per level' errata.
123 */ 124 */
124#define LOCAL_PENDING_VECTOR 0xec 125#define LOCAL_TIMER_VECTOR 0xef
125 126
126#define UV_BAU_MESSAGE 0xea 127/* up to 32 vectors used for spreading out TLB flushes: */
128#if NR_CPUS <= 32
129# define NUM_INVALIDATE_TLB_VECTORS (NR_CPUS)
130#else
131# define NUM_INVALIDATE_TLB_VECTORS (32)
132#endif
133
134#define INVALIDATE_TLB_VECTOR_END (0xee)
135#define INVALIDATE_TLB_VECTOR_START \
136 (INVALIDATE_TLB_VECTOR_END-NUM_INVALIDATE_TLB_VECTORS+1)
127 137
128/* 138/*
129 * Self IPI vector for machine checks 139 * LITMUS^RT pull timers IRQ vector
140 * Make sure it's below the above max 32 vectors.
130 */ 141 */
131#define MCE_SELF_VECTOR 0xeb 142#define PULL_TIMERS_VECTOR 0xce
132 143
133/* Xen vector callback to receive events in a HVM domain */
134#define XEN_HVM_EVTCHN_CALLBACK 0xe9
135 144
136#define NR_VECTORS 256 145#define NR_VECTORS 256
137 146
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index 9e2b952f810a..5745ce8bf108 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -61,22 +61,22 @@ static inline void native_halt(void)
61#else 61#else
62#ifndef __ASSEMBLY__ 62#ifndef __ASSEMBLY__
63 63
64static inline unsigned long __raw_local_save_flags(void) 64static inline unsigned long arch_local_save_flags(void)
65{ 65{
66 return native_save_fl(); 66 return native_save_fl();
67} 67}
68 68
69static inline void raw_local_irq_restore(unsigned long flags) 69static inline void arch_local_irq_restore(unsigned long flags)
70{ 70{
71 native_restore_fl(flags); 71 native_restore_fl(flags);
72} 72}
73 73
74static inline void raw_local_irq_disable(void) 74static inline void arch_local_irq_disable(void)
75{ 75{
76 native_irq_disable(); 76 native_irq_disable();
77} 77}
78 78
79static inline void raw_local_irq_enable(void) 79static inline void arch_local_irq_enable(void)
80{ 80{
81 native_irq_enable(); 81 native_irq_enable();
82} 82}
@@ -85,7 +85,7 @@ static inline void raw_local_irq_enable(void)
85 * Used in the idle loop; sti takes one instruction cycle 85 * Used in the idle loop; sti takes one instruction cycle
86 * to complete: 86 * to complete:
87 */ 87 */
88static inline void raw_safe_halt(void) 88static inline void arch_safe_halt(void)
89{ 89{
90 native_safe_halt(); 90 native_safe_halt();
91} 91}
@@ -102,12 +102,10 @@ static inline void halt(void)
102/* 102/*
103 * For spinlocks, etc: 103 * For spinlocks, etc:
104 */ 104 */
105static inline unsigned long __raw_local_irq_save(void) 105static inline unsigned long arch_local_irq_save(void)
106{ 106{
107 unsigned long flags = __raw_local_save_flags(); 107 unsigned long flags = arch_local_save_flags();
108 108 arch_local_irq_disable();
109 raw_local_irq_disable();
110
111 return flags; 109 return flags;
112} 110}
113#else 111#else
@@ -153,22 +151,16 @@ static inline unsigned long __raw_local_irq_save(void)
153#endif /* CONFIG_PARAVIRT */ 151#endif /* CONFIG_PARAVIRT */
154 152
155#ifndef __ASSEMBLY__ 153#ifndef __ASSEMBLY__
156#define raw_local_save_flags(flags) \ 154static inline int arch_irqs_disabled_flags(unsigned long flags)
157 do { (flags) = __raw_local_save_flags(); } while (0)
158
159#define raw_local_irq_save(flags) \
160 do { (flags) = __raw_local_irq_save(); } while (0)
161
162static inline int raw_irqs_disabled_flags(unsigned long flags)
163{ 155{
164 return !(flags & X86_EFLAGS_IF); 156 return !(flags & X86_EFLAGS_IF);
165} 157}
166 158
167static inline int raw_irqs_disabled(void) 159static inline int arch_irqs_disabled(void)
168{ 160{
169 unsigned long flags = __raw_local_save_flags(); 161 unsigned long flags = arch_local_save_flags();
170 162
171 return raw_irqs_disabled_flags(flags); 163 return arch_irqs_disabled_flags(flags);
172} 164}
173 165
174#else 166#else
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
new file mode 100644
index 000000000000..a32b18ce6ead
--- /dev/null
+++ b/arch/x86/include/asm/jump_label.h
@@ -0,0 +1,42 @@
1#ifndef _ASM_X86_JUMP_LABEL_H
2#define _ASM_X86_JUMP_LABEL_H
3
4#ifdef __KERNEL__
5
6#include <linux/types.h>
7#include <asm/nops.h>
8#include <asm/asm.h>
9
10#define JUMP_LABEL_NOP_SIZE 5
11
12#define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t"
13
14static __always_inline bool arch_static_branch(struct jump_label_key *key)
15{
16 asm goto("1:"
17 JUMP_LABEL_INITIAL_NOP
18 ".pushsection __jump_table, \"aw\" \n\t"
19 _ASM_ALIGN "\n\t"
20 _ASM_PTR "1b, %l[l_yes], %c0 \n\t"
21 ".popsection \n\t"
22 : : "i" (key) : : l_yes);
23 return false;
24l_yes:
25 return true;
26}
27
28#endif /* __KERNEL__ */
29
30#ifdef CONFIG_X86_64
31typedef u64 jump_label_t;
32#else
33typedef u32 jump_label_t;
34#endif
35
36struct jump_entry {
37 jump_label_t code;
38 jump_label_t target;
39 jump_label_t key;
40};
41
42#endif
diff --git a/arch/x86/include/asm/k8.h b/arch/x86/include/asm/k8.h
deleted file mode 100644
index af00bd1d2089..000000000000
--- a/arch/x86/include/asm/k8.h
+++ /dev/null
@@ -1,36 +0,0 @@
1#ifndef _ASM_X86_K8_H
2#define _ASM_X86_K8_H
3
4#include <linux/pci.h>
5
6extern struct pci_device_id k8_nb_ids[];
7struct bootnode;
8
9extern int early_is_k8_nb(u32 value);
10extern struct pci_dev **k8_northbridges;
11extern int num_k8_northbridges;
12extern int cache_k8_northbridges(void);
13extern void k8_flush_garts(void);
14extern int k8_get_nodes(struct bootnode *nodes);
15extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn);
16extern int k8_scan_nodes(void);
17
18#ifdef CONFIG_K8_NB
19extern int num_k8_northbridges;
20
21static inline struct pci_dev *node_to_k8_nb_misc(int node)
22{
23 return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL;
24}
25
26#else
27#define num_k8_northbridges 0
28
29static inline struct pci_dev *node_to_k8_nb_misc(int node)
30{
31 return NULL;
32}
33#endif
34
35
36#endif /* _ASM_X86_K8_H */
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index 5bdfca86581b..fe2cc6e105fa 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -13,12 +13,10 @@ enum die_val {
13 DIE_PANIC, 13 DIE_PANIC,
14 DIE_NMI, 14 DIE_NMI,
15 DIE_DIE, 15 DIE_DIE,
16 DIE_NMIWATCHDOG,
17 DIE_KERNELDEBUG, 16 DIE_KERNELDEBUG,
18 DIE_TRAP, 17 DIE_TRAP,
19 DIE_GPF, 18 DIE_GPF,
20 DIE_CALL, 19 DIE_CALL,
21 DIE_NMI_IPI,
22 DIE_PAGE_FAULT, 20 DIE_PAGE_FAULT,
23 DIE_NMIUNKNOWN, 21 DIE_NMIUNKNOWN,
24}; 22};
diff --git a/arch/x86/include/asm/kgdb.h b/arch/x86/include/asm/kgdb.h
index 396f5b5fc4d7..77e95f54570a 100644
--- a/arch/x86/include/asm/kgdb.h
+++ b/arch/x86/include/asm/kgdb.h
@@ -77,6 +77,7 @@ static inline void arch_kgdb_breakpoint(void)
77} 77}
78#define BREAK_INSTR_SIZE 1 78#define BREAK_INSTR_SIZE 1
79#define CACHE_FLUSH_IS_SAFE 1 79#define CACHE_FLUSH_IS_SAFE 1
80#define GDB_ADJUSTS_BREAK_OFFSET
80 81
81extern int kgdb_ll_trap(int cmd, const char *str, 82extern int kgdb_ll_trap(int cmd, const char *str,
82 struct pt_regs *regs, long err, int trap, int sig); 83 struct pt_regs *regs, long err, int trap, int sig);
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 1f99ecfc48e1..0049211959c0 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -14,6 +14,34 @@
14#include <asm/desc_defs.h> 14#include <asm/desc_defs.h>
15 15
16struct x86_emulate_ctxt; 16struct x86_emulate_ctxt;
17enum x86_intercept;
18enum x86_intercept_stage;
19
20struct x86_exception {
21 u8 vector;
22 bool error_code_valid;
23 u16 error_code;
24 bool nested_page_fault;
25 u64 address; /* cr2 or nested page fault gpa */
26};
27
28/*
29 * This struct is used to carry enough information from the instruction
30 * decoder to main KVM so that a decision can be made whether the
31 * instruction needs to be intercepted or not.
32 */
33struct x86_instruction_info {
34 u8 intercept; /* which intercept */
35 u8 rep_prefix; /* rep prefix? */
36 u8 modrm_mod; /* mod part of modrm */
37 u8 modrm_reg; /* index of register used */
38 u8 modrm_rm; /* rm part of modrm */
39 u64 src_val; /* value of source operand */
40 u8 src_bytes; /* size of source operand */
41 u8 dst_bytes; /* size of destination operand */
42 u8 ad_bytes; /* size of src/dst address */
43 u64 next_rip; /* rip following the instruction */
44};
17 45
18/* 46/*
19 * x86_emulate_ops: 47 * x86_emulate_ops:
@@ -54,6 +82,7 @@ struct x86_emulate_ctxt;
54#define X86EMUL_RETRY_INSTR 3 /* retry the instruction for some reason */ 82#define X86EMUL_RETRY_INSTR 3 /* retry the instruction for some reason */
55#define X86EMUL_CMPXCHG_FAILED 4 /* cmpxchg did not see expected value */ 83#define X86EMUL_CMPXCHG_FAILED 4 /* cmpxchg did not see expected value */
56#define X86EMUL_IO_NEEDED 5 /* IO is needed to complete emulation */ 84#define X86EMUL_IO_NEEDED 5 /* IO is needed to complete emulation */
85#define X86EMUL_INTERCEPTED 6 /* Intercepted by nested VMCB/VMCS */
57 86
58struct x86_emulate_ops { 87struct x86_emulate_ops {
59 /* 88 /*
@@ -63,8 +92,10 @@ struct x86_emulate_ops {
63 * @val: [OUT] Value read from memory, zero-extended to 'u_long'. 92 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
64 * @bytes: [IN ] Number of bytes to read from memory. 93 * @bytes: [IN ] Number of bytes to read from memory.
65 */ 94 */
66 int (*read_std)(unsigned long addr, void *val, 95 int (*read_std)(struct x86_emulate_ctxt *ctxt,
67 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); 96 unsigned long addr, void *val,
97 unsigned int bytes,
98 struct x86_exception *fault);
68 99
69 /* 100 /*
70 * write_std: Write bytes of standard (non-emulated/special) memory. 101 * write_std: Write bytes of standard (non-emulated/special) memory.
@@ -73,8 +104,9 @@ struct x86_emulate_ops {
73 * @val: [OUT] Value write to memory, zero-extended to 'u_long'. 104 * @val: [OUT] Value write to memory, zero-extended to 'u_long'.
74 * @bytes: [IN ] Number of bytes to write to memory. 105 * @bytes: [IN ] Number of bytes to write to memory.
75 */ 106 */
76 int (*write_std)(unsigned long addr, void *val, 107 int (*write_std)(struct x86_emulate_ctxt *ctxt,
77 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); 108 unsigned long addr, void *val, unsigned int bytes,
109 struct x86_exception *fault);
78 /* 110 /*
79 * fetch: Read bytes of standard (non-emulated/special) memory. 111 * fetch: Read bytes of standard (non-emulated/special) memory.
80 * Used for instruction fetch. 112 * Used for instruction fetch.
@@ -82,8 +114,9 @@ struct x86_emulate_ops {
82 * @val: [OUT] Value read from memory, zero-extended to 'u_long'. 114 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
83 * @bytes: [IN ] Number of bytes to read from memory. 115 * @bytes: [IN ] Number of bytes to read from memory.
84 */ 116 */
85 int (*fetch)(unsigned long addr, void *val, 117 int (*fetch)(struct x86_emulate_ctxt *ctxt,
86 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); 118 unsigned long addr, void *val, unsigned int bytes,
119 struct x86_exception *fault);
87 120
88 /* 121 /*
89 * read_emulated: Read bytes from emulated/special memory area. 122 * read_emulated: Read bytes from emulated/special memory area.
@@ -91,11 +124,9 @@ struct x86_emulate_ops {
91 * @val: [OUT] Value read from memory, zero-extended to 'u_long'. 124 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
92 * @bytes: [IN ] Number of bytes to read from memory. 125 * @bytes: [IN ] Number of bytes to read from memory.
93 */ 126 */
94 int (*read_emulated)(unsigned long addr, 127 int (*read_emulated)(struct x86_emulate_ctxt *ctxt,
95 void *val, 128 unsigned long addr, void *val, unsigned int bytes,
96 unsigned int bytes, 129 struct x86_exception *fault);
97 unsigned int *error,
98 struct kvm_vcpu *vcpu);
99 130
100 /* 131 /*
101 * write_emulated: Write bytes to emulated/special memory area. 132 * write_emulated: Write bytes to emulated/special memory area.
@@ -104,11 +135,10 @@ struct x86_emulate_ops {
104 * required). 135 * required).
105 * @bytes: [IN ] Number of bytes to write to memory. 136 * @bytes: [IN ] Number of bytes to write to memory.
106 */ 137 */
107 int (*write_emulated)(unsigned long addr, 138 int (*write_emulated)(struct x86_emulate_ctxt *ctxt,
108 const void *val, 139 unsigned long addr, const void *val,
109 unsigned int bytes, 140 unsigned int bytes,
110 unsigned int *error, 141 struct x86_exception *fault);
111 struct kvm_vcpu *vcpu);
112 142
113 /* 143 /*
114 * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an 144 * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
@@ -118,49 +148,72 @@ struct x86_emulate_ops {
118 * @new: [IN ] Value to write to @addr. 148 * @new: [IN ] Value to write to @addr.
119 * @bytes: [IN ] Number of bytes to access using CMPXCHG. 149 * @bytes: [IN ] Number of bytes to access using CMPXCHG.
120 */ 150 */
121 int (*cmpxchg_emulated)(unsigned long addr, 151 int (*cmpxchg_emulated)(struct x86_emulate_ctxt *ctxt,
152 unsigned long addr,
122 const void *old, 153 const void *old,
123 const void *new, 154 const void *new,
124 unsigned int bytes, 155 unsigned int bytes,
125 unsigned int *error, 156 struct x86_exception *fault);
126 struct kvm_vcpu *vcpu); 157 void (*invlpg)(struct x86_emulate_ctxt *ctxt, ulong addr);
127 158
128 int (*pio_in_emulated)(int size, unsigned short port, void *val, 159 int (*pio_in_emulated)(struct x86_emulate_ctxt *ctxt,
129 unsigned int count, struct kvm_vcpu *vcpu); 160 int size, unsigned short port, void *val,
130 161 unsigned int count);
131 int (*pio_out_emulated)(int size, unsigned short port, const void *val, 162
132 unsigned int count, struct kvm_vcpu *vcpu); 163 int (*pio_out_emulated)(struct x86_emulate_ctxt *ctxt,
133 164 int size, unsigned short port, const void *val,
134 bool (*get_cached_descriptor)(struct desc_struct *desc, 165 unsigned int count);
135 int seg, struct kvm_vcpu *vcpu); 166
136 void (*set_cached_descriptor)(struct desc_struct *desc, 167 bool (*get_segment)(struct x86_emulate_ctxt *ctxt, u16 *selector,
137 int seg, struct kvm_vcpu *vcpu); 168 struct desc_struct *desc, u32 *base3, int seg);
138 u16 (*get_segment_selector)(int seg, struct kvm_vcpu *vcpu); 169 void (*set_segment)(struct x86_emulate_ctxt *ctxt, u16 selector,
139 void (*set_segment_selector)(u16 sel, int seg, struct kvm_vcpu *vcpu); 170 struct desc_struct *desc, u32 base3, int seg);
140 unsigned long (*get_cached_segment_base)(int seg, struct kvm_vcpu *vcpu); 171 unsigned long (*get_cached_segment_base)(struct x86_emulate_ctxt *ctxt,
141 void (*get_gdt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu); 172 int seg);
142 ulong (*get_cr)(int cr, struct kvm_vcpu *vcpu); 173 void (*get_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
143 int (*set_cr)(int cr, ulong val, struct kvm_vcpu *vcpu); 174 void (*get_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
144 int (*cpl)(struct kvm_vcpu *vcpu); 175 void (*set_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
145 int (*get_dr)(int dr, unsigned long *dest, struct kvm_vcpu *vcpu); 176 void (*set_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
146 int (*set_dr)(int dr, unsigned long value, struct kvm_vcpu *vcpu); 177 ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr);
147 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); 178 int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val);
148 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); 179 int (*cpl)(struct x86_emulate_ctxt *ctxt);
180 int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest);
181 int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
182 int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
183 int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
184 void (*halt)(struct x86_emulate_ctxt *ctxt);
185 void (*wbinvd)(struct x86_emulate_ctxt *ctxt);
186 int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt);
187 void (*get_fpu)(struct x86_emulate_ctxt *ctxt); /* disables preempt */
188 void (*put_fpu)(struct x86_emulate_ctxt *ctxt); /* reenables preempt */
189 int (*intercept)(struct x86_emulate_ctxt *ctxt,
190 struct x86_instruction_info *info,
191 enum x86_intercept_stage stage);
149}; 192};
150 193
194typedef u32 __attribute__((vector_size(16))) sse128_t;
195
151/* Type, address-of, and value of an instruction's operand. */ 196/* Type, address-of, and value of an instruction's operand. */
152struct operand { 197struct operand {
153 enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type; 198 enum { OP_REG, OP_MEM, OP_IMM, OP_XMM, OP_NONE } type;
154 unsigned int bytes; 199 unsigned int bytes;
155 union { 200 union {
156 unsigned long orig_val; 201 unsigned long orig_val;
157 u64 orig_val64; 202 u64 orig_val64;
158 }; 203 };
159 unsigned long *ptr; 204 union {
205 unsigned long *reg;
206 struct segmented_address {
207 ulong ea;
208 unsigned seg;
209 } mem;
210 unsigned xmm;
211 } addr;
160 union { 212 union {
161 unsigned long val; 213 unsigned long val;
162 u64 val64; 214 u64 val64;
163 char valptr[sizeof(unsigned long) + 2]; 215 char valptr[sizeof(unsigned long) + 2];
216 sse128_t vec_val;
164 }; 217 };
165}; 218};
166 219
@@ -179,6 +232,7 @@ struct read_cache {
179struct decode_cache { 232struct decode_cache {
180 u8 twobyte; 233 u8 twobyte;
181 u8 b; 234 u8 b;
235 u8 intercept;
182 u8 lock_prefix; 236 u8 lock_prefix;
183 u8 rep_prefix; 237 u8 rep_prefix;
184 u8 op_bytes; 238 u8 op_bytes;
@@ -190,6 +244,8 @@ struct decode_cache {
190 bool has_seg_override; 244 bool has_seg_override;
191 u8 seg_override; 245 u8 seg_override;
192 unsigned int d; 246 unsigned int d;
247 int (*execute)(struct x86_emulate_ctxt *ctxt);
248 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
193 unsigned long regs[NR_VCPU_REGS]; 249 unsigned long regs[NR_VCPU_REGS];
194 unsigned long eip; 250 unsigned long eip;
195 /* modrm */ 251 /* modrm */
@@ -197,43 +253,39 @@ struct decode_cache {
197 u8 modrm_mod; 253 u8 modrm_mod;
198 u8 modrm_reg; 254 u8 modrm_reg;
199 u8 modrm_rm; 255 u8 modrm_rm;
200 u8 use_modrm_ea; 256 u8 modrm_seg;
201 bool rip_relative; 257 bool rip_relative;
202 unsigned long modrm_ea;
203 void *modrm_ptr;
204 unsigned long modrm_val;
205 struct fetch_cache fetch; 258 struct fetch_cache fetch;
206 struct read_cache io_read; 259 struct read_cache io_read;
207 struct read_cache mem_read; 260 struct read_cache mem_read;
208}; 261};
209 262
210struct x86_emulate_ctxt { 263struct x86_emulate_ctxt {
211 /* Register state before/after emulation. */ 264 struct x86_emulate_ops *ops;
212 struct kvm_vcpu *vcpu;
213 265
266 /* Register state before/after emulation. */
214 unsigned long eflags; 267 unsigned long eflags;
215 unsigned long eip; /* eip before instruction emulation */ 268 unsigned long eip; /* eip before instruction emulation */
216 /* Emulated execution mode, represented by an X86EMUL_MODE value. */ 269 /* Emulated execution mode, represented by an X86EMUL_MODE value. */
217 int mode; 270 int mode;
218 u32 cs_base;
219 271
220 /* interruptibility state, as a result of execution of STI or MOV SS */ 272 /* interruptibility state, as a result of execution of STI or MOV SS */
221 int interruptibility; 273 int interruptibility;
222 274
223 bool restart; /* restart string instruction after writeback */ 275 bool guest_mode; /* guest running a nested guest */
276 bool perm_ok; /* do not check permissions if true */
277 bool only_vendor_specific_insn;
224 278
225 int exception; /* exception that happens during emulation or -1 */ 279 bool have_exception;
226 u32 error_code; /* error code for exception */ 280 struct x86_exception exception;
227 bool error_code_valid;
228 unsigned long cr2; /* faulted address in case of #PF */
229 281
230 /* decode cache */ 282 /* decode cache */
231 struct decode_cache decode; 283 struct decode_cache decode;
232}; 284};
233 285
234/* Repeat String Operation Prefix */ 286/* Repeat String Operation Prefix */
235#define REPE_PREFIX 1 287#define REPE_PREFIX 0xf3
236#define REPNE_PREFIX 2 288#define REPNE_PREFIX 0xf2
237 289
238/* Execution mode, passed to the emulator. */ 290/* Execution mode, passed to the emulator. */
239#define X86EMUL_MODE_REAL 0 /* Real mode. */ 291#define X86EMUL_MODE_REAL 0 /* Real mode. */
@@ -242,6 +294,69 @@ struct x86_emulate_ctxt {
242#define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */ 294#define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */
243#define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */ 295#define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */
244 296
297/* any protected mode */
298#define X86EMUL_MODE_PROT (X86EMUL_MODE_PROT16|X86EMUL_MODE_PROT32| \
299 X86EMUL_MODE_PROT64)
300
301enum x86_intercept_stage {
302 X86_ICTP_NONE = 0, /* Allow zero-init to not match anything */
303 X86_ICPT_PRE_EXCEPT,
304 X86_ICPT_POST_EXCEPT,
305 X86_ICPT_POST_MEMACCESS,
306};
307
308enum x86_intercept {
309 x86_intercept_none,
310 x86_intercept_cr_read,
311 x86_intercept_cr_write,
312 x86_intercept_clts,
313 x86_intercept_lmsw,
314 x86_intercept_smsw,
315 x86_intercept_dr_read,
316 x86_intercept_dr_write,
317 x86_intercept_lidt,
318 x86_intercept_sidt,
319 x86_intercept_lgdt,
320 x86_intercept_sgdt,
321 x86_intercept_lldt,
322 x86_intercept_sldt,
323 x86_intercept_ltr,
324 x86_intercept_str,
325 x86_intercept_rdtsc,
326 x86_intercept_rdpmc,
327 x86_intercept_pushf,
328 x86_intercept_popf,
329 x86_intercept_cpuid,
330 x86_intercept_rsm,
331 x86_intercept_iret,
332 x86_intercept_intn,
333 x86_intercept_invd,
334 x86_intercept_pause,
335 x86_intercept_hlt,
336 x86_intercept_invlpg,
337 x86_intercept_invlpga,
338 x86_intercept_vmrun,
339 x86_intercept_vmload,
340 x86_intercept_vmsave,
341 x86_intercept_vmmcall,
342 x86_intercept_stgi,
343 x86_intercept_clgi,
344 x86_intercept_skinit,
345 x86_intercept_rdtscp,
346 x86_intercept_icebp,
347 x86_intercept_wbinvd,
348 x86_intercept_monitor,
349 x86_intercept_mwait,
350 x86_intercept_rdmsr,
351 x86_intercept_wrmsr,
352 x86_intercept_in,
353 x86_intercept_ins,
354 x86_intercept_out,
355 x86_intercept_outs,
356
357 nr_x86_intercepts
358};
359
245/* Host execution mode. */ 360/* Host execution mode. */
246#if defined(CONFIG_X86_32) 361#if defined(CONFIG_X86_32)
247#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32 362#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
@@ -249,13 +364,15 @@ struct x86_emulate_ctxt {
249#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64 364#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
250#endif 365#endif
251 366
252int x86_decode_insn(struct x86_emulate_ctxt *ctxt, 367int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
253 struct x86_emulate_ops *ops); 368#define EMULATION_FAILED -1
254int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, 369#define EMULATION_OK 0
255 struct x86_emulate_ops *ops); 370#define EMULATION_RESTART 1
371#define EMULATION_INTERCEPTED 2
372int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
256int emulator_task_switch(struct x86_emulate_ctxt *ctxt, 373int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
257 struct x86_emulate_ops *ops,
258 u16 tss_selector, int reason, 374 u16 tss_selector, int reason,
259 bool has_error_code, u32 error_code); 375 bool has_error_code, u32 error_code);
260 376int emulate_int_real(struct x86_emulate_ctxt *ctxt,
377 struct x86_emulate_ops *ops, int irq);
261#endif /* _ASM_X86_KVM_X86_EMULATE_H */ 378#endif /* _ASM_X86_KVM_X86_EMULATE_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c52e2eb40a1e..d2ac8e2ee897 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -30,14 +30,30 @@
30#define KVM_MEMORY_SLOTS 32 30#define KVM_MEMORY_SLOTS 32
31/* memory slots that does not exposed to userspace */ 31/* memory slots that does not exposed to userspace */
32#define KVM_PRIVATE_MEM_SLOTS 4 32#define KVM_PRIVATE_MEM_SLOTS 4
33#define KVM_MMIO_SIZE 16
33 34
34#define KVM_PIO_PAGE_OFFSET 1 35#define KVM_PIO_PAGE_OFFSET 1
35#define KVM_COALESCED_MMIO_PAGE_OFFSET 2 36#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
36 37
38#define CR0_RESERVED_BITS \
39 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
40 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
41 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
42
37#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) 43#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
38#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) 44#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
39#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ 45#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
40 0xFFFFFF0000000000ULL) 46 0xFFFFFF0000000000ULL)
47#define CR4_RESERVED_BITS \
48 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
49 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
50 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
51 | X86_CR4_OSXSAVE \
52 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
53
54#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
55
56
41 57
42#define INVALID_PAGE (~(hpa_t)0) 58#define INVALID_PAGE (~(hpa_t)0)
43#define VALID_PAGE(x) ((x) != INVALID_PAGE) 59#define VALID_PAGE(x) ((x) != INVALID_PAGE)
@@ -79,15 +95,18 @@
79#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) 95#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
80#define KVM_MIN_FREE_MMU_PAGES 5 96#define KVM_MIN_FREE_MMU_PAGES 5
81#define KVM_REFILL_PAGES 25 97#define KVM_REFILL_PAGES 25
82#define KVM_MAX_CPUID_ENTRIES 40 98#define KVM_MAX_CPUID_ENTRIES 80
83#define KVM_NR_FIXED_MTRR_REGION 88 99#define KVM_NR_FIXED_MTRR_REGION 88
84#define KVM_NR_VAR_MTRR 8 100#define KVM_NR_VAR_MTRR 8
85 101
86extern spinlock_t kvm_lock; 102#define ASYNC_PF_PER_VCPU 64
103
104extern raw_spinlock_t kvm_lock;
87extern struct list_head vm_list; 105extern struct list_head vm_list;
88 106
89struct kvm_vcpu; 107struct kvm_vcpu;
90struct kvm; 108struct kvm;
109struct kvm_async_pf;
91 110
92enum kvm_reg { 111enum kvm_reg {
93 VCPU_REGS_RAX = 0, 112 VCPU_REGS_RAX = 0,
@@ -114,6 +133,10 @@ enum kvm_reg {
114 133
115enum kvm_reg_ex { 134enum kvm_reg_ex {
116 VCPU_EXREG_PDPTR = NR_VCPU_REGS, 135 VCPU_EXREG_PDPTR = NR_VCPU_REGS,
136 VCPU_EXREG_CR3,
137 VCPU_EXREG_RFLAGS,
138 VCPU_EXREG_CPL,
139 VCPU_EXREG_SEGMENTS,
117}; 140};
118 141
119enum { 142enum {
@@ -236,26 +259,39 @@ struct kvm_pio_request {
236 */ 259 */
237struct kvm_mmu { 260struct kvm_mmu {
238 void (*new_cr3)(struct kvm_vcpu *vcpu); 261 void (*new_cr3)(struct kvm_vcpu *vcpu);
239 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); 262 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
263 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
264 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
265 bool prefault);
266 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
267 struct x86_exception *fault);
240 void (*free)(struct kvm_vcpu *vcpu); 268 void (*free)(struct kvm_vcpu *vcpu);
241 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, 269 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
242 u32 *error); 270 struct x86_exception *exception);
271 gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
243 void (*prefetch_page)(struct kvm_vcpu *vcpu, 272 void (*prefetch_page)(struct kvm_vcpu *vcpu,
244 struct kvm_mmu_page *page); 273 struct kvm_mmu_page *page);
245 int (*sync_page)(struct kvm_vcpu *vcpu, 274 int (*sync_page)(struct kvm_vcpu *vcpu,
246 struct kvm_mmu_page *sp, bool clear_unsync); 275 struct kvm_mmu_page *sp);
247 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); 276 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
277 void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
278 u64 *spte, const void *pte);
248 hpa_t root_hpa; 279 hpa_t root_hpa;
249 int root_level; 280 int root_level;
250 int shadow_root_level; 281 int shadow_root_level;
251 union kvm_mmu_page_role base_role; 282 union kvm_mmu_page_role base_role;
283 bool direct_map;
252 284
253 u64 *pae_root; 285 u64 *pae_root;
286 u64 *lm_root;
254 u64 rsvd_bits_mask[2][4]; 287 u64 rsvd_bits_mask[2][4];
288
289 bool nx;
290
291 u64 pdptrs[4]; /* pae */
255}; 292};
256 293
257struct kvm_vcpu_arch { 294struct kvm_vcpu_arch {
258 u64 host_tsc;
259 /* 295 /*
260 * rip and regs accesses must go through 296 * rip and regs accesses must go through
261 * kvm_{register,rip}_{read,write} functions. 297 * kvm_{register,rip}_{read,write} functions.
@@ -272,7 +308,6 @@ struct kvm_vcpu_arch {
272 unsigned long cr4_guest_owned_bits; 308 unsigned long cr4_guest_owned_bits;
273 unsigned long cr8; 309 unsigned long cr8;
274 u32 hflags; 310 u32 hflags;
275 u64 pdptrs[4]; /* pae */
276 u64 efer; 311 u64 efer;
277 u64 apic_base; 312 u64 apic_base;
278 struct kvm_lapic *apic; /* kernel irqchip context */ 313 struct kvm_lapic *apic; /* kernel irqchip context */
@@ -282,7 +317,31 @@ struct kvm_vcpu_arch {
282 u64 ia32_misc_enable_msr; 317 u64 ia32_misc_enable_msr;
283 bool tpr_access_reporting; 318 bool tpr_access_reporting;
284 319
320 /*
321 * Paging state of the vcpu
322 *
323 * If the vcpu runs in guest mode with two level paging this still saves
324 * the paging mode of the l1 guest. This context is always used to
325 * handle faults.
326 */
285 struct kvm_mmu mmu; 327 struct kvm_mmu mmu;
328
329 /*
330 * Paging state of an L2 guest (used for nested npt)
331 *
332 * This context will save all necessary information to walk page tables
333 * of the an L2 guest. This context is only initialized for page table
334 * walking and not for faulting since we never handle l2 page faults on
335 * the host.
336 */
337 struct kvm_mmu nested_mmu;
338
339 /*
340 * Pointer to the mmu context currently used for
341 * gva_to_gpa translations.
342 */
343 struct kvm_mmu *walk_mmu;
344
286 /* only needed in kvm_pv_mmu_op() path, but it's hot so 345 /* only needed in kvm_pv_mmu_op() path, but it's hot so
287 * put it here to avoid allocation */ 346 * put it here to avoid allocation */
288 struct kvm_pv_mmu_op_buffer mmu_op_buffer; 347 struct kvm_pv_mmu_op_buffer mmu_op_buffer;
@@ -297,16 +356,9 @@ struct kvm_vcpu_arch {
297 u64 *last_pte_updated; 356 u64 *last_pte_updated;
298 gfn_t last_pte_gfn; 357 gfn_t last_pte_gfn;
299 358
300 struct {
301 gfn_t gfn; /* presumed gfn during guest pte update */
302 pfn_t pfn; /* pfn corresponding to that gfn */
303 unsigned long mmu_seq;
304 } update_pte;
305
306 struct fpu guest_fpu; 359 struct fpu guest_fpu;
307 u64 xcr0; 360 u64 xcr0;
308 361
309 gva_t mmio_fault_cr2;
310 struct kvm_pio_request pio; 362 struct kvm_pio_request pio;
311 void *pio_data; 363 void *pio_data;
312 364
@@ -333,12 +385,22 @@ struct kvm_vcpu_arch {
333 /* emulate context */ 385 /* emulate context */
334 386
335 struct x86_emulate_ctxt emulate_ctxt; 387 struct x86_emulate_ctxt emulate_ctxt;
388 bool emulate_regs_need_sync_to_vcpu;
389 bool emulate_regs_need_sync_from_vcpu;
336 390
337 gpa_t time; 391 gpa_t time;
338 struct pvclock_vcpu_time_info hv_clock; 392 struct pvclock_vcpu_time_info hv_clock;
339 unsigned int hv_clock_tsc_khz; 393 unsigned int hw_tsc_khz;
340 unsigned int time_offset; 394 unsigned int time_offset;
341 struct page *time_page; 395 struct page *time_page;
396 u64 last_guest_tsc;
397 u64 last_kernel_ns;
398 u64 last_tsc_nsec;
399 u64 last_tsc_write;
400 u32 virtual_tsc_khz;
401 bool tsc_catchup;
402 u32 tsc_catchup_mult;
403 s8 tsc_catchup_shift;
342 404
343 bool nmi_pending; 405 bool nmi_pending;
344 bool nmi_injected; 406 bool nmi_injected;
@@ -364,12 +426,21 @@ struct kvm_vcpu_arch {
364 u64 hv_vapic; 426 u64 hv_vapic;
365 427
366 cpumask_var_t wbinvd_dirty_mask; 428 cpumask_var_t wbinvd_dirty_mask;
429
430 struct {
431 bool halted;
432 gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
433 struct gfn_to_hva_cache data;
434 u64 msr_val;
435 u32 id;
436 bool send_user_only;
437 } apf;
367}; 438};
368 439
369struct kvm_arch { 440struct kvm_arch {
370 unsigned int n_free_mmu_pages; 441 unsigned int n_used_mmu_pages;
371 unsigned int n_requested_mmu_pages; 442 unsigned int n_requested_mmu_pages;
372 unsigned int n_alloc_mmu_pages; 443 unsigned int n_max_mmu_pages;
373 atomic_t invlpg_counter; 444 atomic_t invlpg_counter;
374 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 445 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
375 /* 446 /*
@@ -394,14 +465,21 @@ struct kvm_arch {
394 gpa_t ept_identity_map_addr; 465 gpa_t ept_identity_map_addr;
395 466
396 unsigned long irq_sources_bitmap; 467 unsigned long irq_sources_bitmap;
397 u64 vm_init_tsc;
398 s64 kvmclock_offset; 468 s64 kvmclock_offset;
469 raw_spinlock_t tsc_write_lock;
470 u64 last_tsc_nsec;
471 u64 last_tsc_offset;
472 u64 last_tsc_write;
399 473
400 struct kvm_xen_hvm_config xen_hvm_config; 474 struct kvm_xen_hvm_config xen_hvm_config;
401 475
402 /* fields used by HYPER-V emulation */ 476 /* fields used by HYPER-V emulation */
403 u64 hv_guest_os_id; 477 u64 hv_guest_os_id;
404 u64 hv_hypercall; 478 u64 hv_hypercall;
479
480 #ifdef CONFIG_KVM_MMU_AUDIT
481 int audit_point;
482 #endif
405}; 483};
406 484
407struct kvm_vm_stat { 485struct kvm_vm_stat {
@@ -443,6 +521,8 @@ struct kvm_vcpu_stat {
443 u32 nmi_injections; 521 u32 nmi_injections;
444}; 522};
445 523
524struct x86_instruction_info;
525
446struct kvm_x86_ops { 526struct kvm_x86_ops {
447 int (*cpu_has_kvm_support)(void); /* __init */ 527 int (*cpu_has_kvm_support)(void); /* __init */
448 int (*disabled_by_bios)(void); /* __init */ 528 int (*disabled_by_bios)(void); /* __init */
@@ -475,6 +555,7 @@ struct kvm_x86_ops {
475 struct kvm_segment *var, int seg); 555 struct kvm_segment *var, int seg);
476 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); 556 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
477 void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu); 557 void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
558 void (*decache_cr3)(struct kvm_vcpu *vcpu);
478 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); 559 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
479 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); 560 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
480 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 561 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
@@ -505,6 +586,7 @@ struct kvm_x86_ops {
505 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, 586 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
506 bool has_error_code, u32 error_code, 587 bool has_error_code, u32 error_code,
507 bool reinject); 588 bool reinject);
589 void (*cancel_injection)(struct kvm_vcpu *vcpu);
508 int (*interrupt_allowed)(struct kvm_vcpu *vcpu); 590 int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
509 int (*nmi_allowed)(struct kvm_vcpu *vcpu); 591 int (*nmi_allowed)(struct kvm_vcpu *vcpu);
510 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); 592 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
@@ -517,14 +599,35 @@ struct kvm_x86_ops {
517 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); 599 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
518 int (*get_lpage_level)(void); 600 int (*get_lpage_level)(void);
519 bool (*rdtscp_supported)(void); 601 bool (*rdtscp_supported)(void);
602 void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment);
603
604 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
520 605
521 void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry); 606 void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
522 607
523 bool (*has_wbinvd_exit)(void); 608 bool (*has_wbinvd_exit)(void);
524 609
610 void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz);
611 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
612
613 u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
614
615 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
616
617 int (*check_intercept)(struct kvm_vcpu *vcpu,
618 struct x86_instruction_info *info,
619 enum x86_intercept_stage stage);
620
525 const struct trace_print_flags *exit_reasons_str; 621 const struct trace_print_flags *exit_reasons_str;
526}; 622};
527 623
624struct kvm_arch_async_pf {
625 u32 token;
626 gfn_t gfn;
627 unsigned long cr3;
628 bool direct_map;
629};
630
528extern struct kvm_x86_ops *kvm_x86_ops; 631extern struct kvm_x86_ops *kvm_x86_ops;
529 632
530int kvm_mmu_module_init(void); 633int kvm_mmu_module_init(void);
@@ -534,7 +637,6 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
534int kvm_mmu_create(struct kvm_vcpu *vcpu); 637int kvm_mmu_create(struct kvm_vcpu *vcpu);
535int kvm_mmu_setup(struct kvm_vcpu *vcpu); 638int kvm_mmu_setup(struct kvm_vcpu *vcpu);
536void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); 639void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
537void kvm_mmu_set_base_ptes(u64 base_pte);
538void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, 640void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
539 u64 dirty_mask, u64 nx_mask, u64 x_mask); 641 u64 dirty_mask, u64 nx_mask, u64 x_mask);
540 642
@@ -544,7 +646,7 @@ void kvm_mmu_zap_all(struct kvm *kvm);
544unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); 646unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
545void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 647void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
546 648
547int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 649int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
548 650
549int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 651int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
550 const void *val, int bytes); 652 const void *val, int bytes);
@@ -554,6 +656,13 @@ u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
554 656
555extern bool tdp_enabled; 657extern bool tdp_enabled;
556 658
659/* control of guest tsc rate supported? */
660extern bool kvm_has_tsc_control;
661/* minimum supported tsc_khz for guests */
662extern u32 kvm_min_guest_tsc_khz;
663/* maximum supported tsc_khz for guests */
664extern u32 kvm_max_guest_tsc_khz;
665
557enum emulation_result { 666enum emulation_result {
558 EMULATE_DONE, /* no further processing */ 667 EMULATE_DONE, /* no further processing */
559 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */ 668 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
@@ -563,10 +672,14 @@ enum emulation_result {
563#define EMULTYPE_NO_DECODE (1 << 0) 672#define EMULTYPE_NO_DECODE (1 << 0)
564#define EMULTYPE_TRAP_UD (1 << 1) 673#define EMULTYPE_TRAP_UD (1 << 1)
565#define EMULTYPE_SKIP (1 << 2) 674#define EMULTYPE_SKIP (1 << 2)
566int emulate_instruction(struct kvm_vcpu *vcpu, 675int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
567 unsigned long cr2, u16 error_code, int emulation_type); 676 int emulation_type, void *insn, int insn_len);
568void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); 677
569void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); 678static inline int emulate_instruction(struct kvm_vcpu *vcpu,
679 int emulation_type)
680{
681 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
682}
570 683
571void kvm_enable_efer_bits(u64); 684void kvm_enable_efer_bits(u64);
572int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); 685int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
@@ -577,8 +690,6 @@ struct x86_emulate_ctxt;
577int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port); 690int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
578void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); 691void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
579int kvm_emulate_halt(struct kvm_vcpu *vcpu); 692int kvm_emulate_halt(struct kvm_vcpu *vcpu);
580int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
581int emulate_clts(struct kvm_vcpu *vcpu);
582int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu); 693int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
583 694
584void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); 695void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
@@ -590,7 +701,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
590int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 701int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
591int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); 702int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
592int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 703int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
593void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); 704int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
594int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); 705int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
595int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); 706int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
596unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); 707unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
@@ -608,8 +719,11 @@ void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
608void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 719void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
609void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); 720void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
610void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 721void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
611void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, 722void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
612 u32 error_code); 723int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
724 gfn_t gfn, void *data, int offset, int len,
725 u32 access);
726void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
613bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); 727bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
614 728
615int kvm_pic_set_irq(void *opaque, int irq, int level); 729int kvm_pic_set_irq(void *opaque, int irq, int level);
@@ -627,16 +741,19 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
627int kvm_mmu_load(struct kvm_vcpu *vcpu); 741int kvm_mmu_load(struct kvm_vcpu *vcpu);
628void kvm_mmu_unload(struct kvm_vcpu *vcpu); 742void kvm_mmu_unload(struct kvm_vcpu *vcpu);
629void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); 743void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
630gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); 744gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
631gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); 745 struct x86_exception *exception);
632gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); 746gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
633gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); 747 struct x86_exception *exception);
748gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
749 struct x86_exception *exception);
750gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
751 struct x86_exception *exception);
634 752
635int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); 753int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
636 754
637int kvm_fix_hypercall(struct kvm_vcpu *vcpu); 755int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
638 756 void *insn, int insn_len);
639int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
640void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); 757void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
641 758
642void kvm_enable_tdp(void); 759void kvm_enable_tdp(void);
@@ -703,20 +820,25 @@ enum {
703#define HF_VINTR_MASK (1 << 2) 820#define HF_VINTR_MASK (1 << 2)
704#define HF_NMI_MASK (1 << 3) 821#define HF_NMI_MASK (1 << 3)
705#define HF_IRET_MASK (1 << 4) 822#define HF_IRET_MASK (1 << 4)
823#define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */
706 824
707/* 825/*
708 * Hardware virtualization extension instructions may fault if a 826 * Hardware virtualization extension instructions may fault if a
709 * reboot turns off virtualization while processes are running. 827 * reboot turns off virtualization while processes are running.
710 * Trap the fault and ignore the instruction if that happens. 828 * Trap the fault and ignore the instruction if that happens.
711 */ 829 */
712asmlinkage void kvm_handle_fault_on_reboot(void); 830asmlinkage void kvm_spurious_fault(void);
831extern bool kvm_rebooting;
713 832
714#define __kvm_handle_fault_on_reboot(insn) \ 833#define __kvm_handle_fault_on_reboot(insn) \
715 "666: " insn "\n\t" \ 834 "666: " insn "\n\t" \
835 "668: \n\t" \
716 ".pushsection .fixup, \"ax\" \n" \ 836 ".pushsection .fixup, \"ax\" \n" \
717 "667: \n\t" \ 837 "667: \n\t" \
838 "cmpb $0, kvm_rebooting \n\t" \
839 "jne 668b \n\t" \
718 __ASM_SIZE(push) " $666b \n\t" \ 840 __ASM_SIZE(push) " $666b \n\t" \
719 "jmp kvm_handle_fault_on_reboot \n\t" \ 841 "call kvm_spurious_fault \n\t" \
720 ".popsection \n\t" \ 842 ".popsection \n\t" \
721 ".pushsection __ex_table, \"a\" \n\t" \ 843 ".pushsection __ex_table, \"a\" \n\t" \
722 _ASM_PTR " 666b, 667b \n\t" \ 844 _ASM_PTR " 666b, 667b \n\t" \
@@ -725,6 +847,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void);
725#define KVM_ARCH_WANT_MMU_NOTIFIER 847#define KVM_ARCH_WANT_MMU_NOTIFIER
726int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 848int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
727int kvm_age_hva(struct kvm *kvm, unsigned long hva); 849int kvm_age_hva(struct kvm *kvm, unsigned long hva);
850int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
728void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 851void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
729int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); 852int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
730int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); 853int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
@@ -736,4 +859,15 @@ void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
736 859
737bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); 860bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
738 861
862void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
863 struct kvm_async_pf *work);
864void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
865 struct kvm_async_pf *work);
866void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
867 struct kvm_async_pf *work);
868bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
869extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
870
871void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
872
739#endif /* _ASM_X86_KVM_HOST_H */ 873#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 05eba5e9a8e8..a427bf77a93d 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -20,6 +20,7 @@
20 * are available. The use of 0x11 and 0x12 is deprecated 20 * are available. The use of 0x11 and 0x12 is deprecated
21 */ 21 */
22#define KVM_FEATURE_CLOCKSOURCE2 3 22#define KVM_FEATURE_CLOCKSOURCE2 3
23#define KVM_FEATURE_ASYNC_PF 4
23 24
24/* The last 8 bits are used to indicate how to interpret the flags field 25/* The last 8 bits are used to indicate how to interpret the flags field
25 * in pvclock structure. If no bits are set, all flags are ignored. 26 * in pvclock structure. If no bits are set, all flags are ignored.
@@ -32,9 +33,13 @@
32/* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */ 33/* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */
33#define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00 34#define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00
34#define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01 35#define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
36#define MSR_KVM_ASYNC_PF_EN 0x4b564d02
35 37
36#define KVM_MAX_MMU_OP_BATCH 32 38#define KVM_MAX_MMU_OP_BATCH 32
37 39
40#define KVM_ASYNC_PF_ENABLED (1 << 0)
41#define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
42
38/* Operations for KVM_HC_MMU_OP */ 43/* Operations for KVM_HC_MMU_OP */
39#define KVM_MMU_OP_WRITE_PTE 1 44#define KVM_MMU_OP_WRITE_PTE 1
40#define KVM_MMU_OP_FLUSH_TLB 2 45#define KVM_MMU_OP_FLUSH_TLB 2
@@ -61,10 +66,20 @@ struct kvm_mmu_op_release_pt {
61 __u64 pt_phys; 66 __u64 pt_phys;
62}; 67};
63 68
69#define KVM_PV_REASON_PAGE_NOT_PRESENT 1
70#define KVM_PV_REASON_PAGE_READY 2
71
72struct kvm_vcpu_pv_apf_data {
73 __u32 reason;
74 __u8 pad[60];
75 __u32 enabled;
76};
77
64#ifdef __KERNEL__ 78#ifdef __KERNEL__
65#include <asm/processor.h> 79#include <asm/processor.h>
66 80
67extern void kvmclock_init(void); 81extern void kvmclock_init(void);
82extern int kvm_register_clock(char *txt);
68 83
69 84
70/* This instruction is vmcall. On non-VT architectures, it will generate a 85/* This instruction is vmcall. On non-VT architectures, it will generate a
@@ -158,6 +173,21 @@ static inline unsigned int kvm_arch_para_features(void)
158 return cpuid_eax(KVM_CPUID_FEATURES); 173 return cpuid_eax(KVM_CPUID_FEATURES);
159} 174}
160 175
176#ifdef CONFIG_KVM_GUEST
177void __init kvm_guest_init(void);
178void kvm_async_pf_task_wait(u32 token);
179void kvm_async_pf_task_wake(u32 token);
180u32 kvm_read_and_reset_pf_reason(void);
181#else
182#define kvm_guest_init() do { } while (0)
183#define kvm_async_pf_task_wait(T) do {} while(0)
184#define kvm_async_pf_task_wake(T) do {} while(0)
185static inline u32 kvm_read_and_reset_pf_reason(void)
186{
187 return 0;
188}
161#endif 189#endif
162 190
191#endif /* __KERNEL__ */
192
163#endif /* _ASM_X86_KVM_PARA_H */ 193#endif /* _ASM_X86_KVM_PARA_H */
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
index 12d55e773eb6..48142971b25d 100644
--- a/arch/x86/include/asm/linkage.h
+++ b/arch/x86/include/asm/linkage.h
@@ -8,11 +8,6 @@
8 8
9#ifdef CONFIG_X86_32 9#ifdef CONFIG_X86_32
10#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) 10#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
11/*
12 * For 32-bit UML - mark functions implemented in assembly that use
13 * regparm input parameters:
14 */
15#define asmregparm __attribute__((regparm(3)))
16 11
17/* 12/*
18 * Make sure the compiler doesn't do anything stupid with the 13 * Make sure the compiler doesn't do anything stupid with the
diff --git a/arch/x86/include/asm/mach_traps.h b/arch/x86/include/asm/mach_traps.h
index f7920601e472..72a8b52e7dfd 100644
--- a/arch/x86/include/asm/mach_traps.h
+++ b/arch/x86/include/asm/mach_traps.h
@@ -7,9 +7,19 @@
7 7
8#include <asm/mc146818rtc.h> 8#include <asm/mc146818rtc.h>
9 9
10#define NMI_REASON_PORT 0x61
11
12#define NMI_REASON_SERR 0x80
13#define NMI_REASON_IOCHK 0x40
14#define NMI_REASON_MASK (NMI_REASON_SERR | NMI_REASON_IOCHK)
15
16#define NMI_REASON_CLEAR_SERR 0x04
17#define NMI_REASON_CLEAR_IOCHK 0x08
18#define NMI_REASON_CLEAR_MASK 0x0f
19
10static inline unsigned char get_nmi_reason(void) 20static inline unsigned char get_nmi_reason(void)
11{ 21{
12 return inb(0x61); 22 return inb(NMI_REASON_PORT);
13} 23}
14 24
15static inline void reassert_nmi(void) 25static inline void reassert_nmi(void)
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index c62c13cb9788..021979a6e23f 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -142,8 +142,6 @@ static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {}
142static inline void enable_p5_mce(void) {} 142static inline void enable_p5_mce(void) {}
143#endif 143#endif
144 144
145extern void (*x86_mce_decode_callback)(struct mce *m);
146
147void mce_setup(struct mce *m); 145void mce_setup(struct mce *m);
148void mce_log(struct mce *m); 146void mce_log(struct mce *m);
149DECLARE_PER_CPU(struct sys_device, mce_dev); 147DECLARE_PER_CPU(struct sys_device, mce_dev);
@@ -223,6 +221,9 @@ void intel_init_thermal(struct cpuinfo_x86 *c);
223 221
224void mce_log_therm_throt_event(__u64 status); 222void mce_log_therm_throt_event(__u64 status);
225 223
224/* Interrupt Handler for core thermal thresholds */
225extern int (*platform_thermal_notify)(__u64 msr_val);
226
226#ifdef CONFIG_X86_THERMAL_VECTOR 227#ifdef CONFIG_X86_THERMAL_VECTOR
227extern void mcheck_intel_therm_init(void); 228extern void mcheck_intel_therm_init(void);
228#else 229#else
diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h
new file mode 100644
index 000000000000..0cd3800f33b9
--- /dev/null
+++ b/arch/x86/include/asm/memblock.h
@@ -0,0 +1,23 @@
1#ifndef _X86_MEMBLOCK_H
2#define _X86_MEMBLOCK_H
3
4#define ARCH_DISCARD_MEMBLOCK
5
6u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align);
7
8void memblock_x86_reserve_range(u64 start, u64 end, char *name);
9void memblock_x86_free_range(u64 start, u64 end);
10struct range;
11int __get_free_all_memory_range(struct range **range, int nodeid,
12 unsigned long start_pfn, unsigned long end_pfn);
13int get_free_all_memory_range(struct range **rangep, int nodeid);
14
15void memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
16 unsigned long last_pfn);
17u64 memblock_x86_hole_size(u64 start, u64 end);
18u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align);
19u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit);
20u64 memblock_x86_memory_in_range(u64 addr, u64 limit);
21bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align);
22
23#endif
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index ef51b501e22a..24215072d0e1 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -48,6 +48,12 @@ static inline struct microcode_ops * __init init_intel_microcode(void)
48 48
49#ifdef CONFIG_MICROCODE_AMD 49#ifdef CONFIG_MICROCODE_AMD
50extern struct microcode_ops * __init init_amd_microcode(void); 50extern struct microcode_ops * __init init_amd_microcode(void);
51
52static inline void get_ucode_data(void *to, const u8 *from, size_t n)
53{
54 memcpy(to, from, n);
55}
56
51#else 57#else
52static inline struct microcode_ops * __init init_amd_microcode(void) 58static inline struct microcode_ops * __init init_amd_microcode(void)
53{ 59{
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 80a1dee5bea5..5f55e6962769 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -11,6 +11,12 @@
11typedef struct { 11typedef struct {
12 void *ldt; 12 void *ldt;
13 int size; 13 int size;
14
15#ifdef CONFIG_X86_64
16 /* True if mm supports a task running in 32 bit compatibility mode. */
17 unsigned short ia32_compat;
18#endif
19
14 struct mutex lock; 20 struct mutex lock;
15 void *vdso; 21 void *vdso;
16} mm_context_t; 22} mm_context_t;
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 4a2d4e0c18d9..8b5393ec1080 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -36,8 +36,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
36 unsigned cpu = smp_processor_id(); 36 unsigned cpu = smp_processor_id();
37 37
38 if (likely(prev != next)) { 38 if (likely(prev != next)) {
39 /* stop flush ipis for the previous mm */
40 cpumask_clear_cpu(cpu, mm_cpumask(prev));
41#ifdef CONFIG_SMP 39#ifdef CONFIG_SMP
42 percpu_write(cpu_tlbstate.state, TLBSTATE_OK); 40 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
43 percpu_write(cpu_tlbstate.active_mm, next); 41 percpu_write(cpu_tlbstate.active_mm, next);
@@ -47,6 +45,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
47 /* Re-load page tables */ 45 /* Re-load page tables */
48 load_cr3(next->pgd); 46 load_cr3(next->pgd);
49 47
48 /* stop flush ipis for the previous mm */
49 cpumask_clear_cpu(cpu, mm_cpumask(prev));
50
50 /* 51 /*
51 * load the LDT, if the LDT is different: 52 * load the LDT, if the LDT is different:
52 */ 53 */
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index 91df7c51806c..ffa037f28d39 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -13,31 +13,11 @@ extern struct pglist_data *node_data[];
13#define NODE_DATA(nid) (node_data[nid]) 13#define NODE_DATA(nid) (node_data[nid])
14 14
15#include <asm/numaq.h> 15#include <asm/numaq.h>
16/* summit or generic arch */
17#include <asm/srat.h>
18
19extern int get_memcfg_numa_flat(void);
20/*
21 * This allows any one NUMA architecture to be compiled
22 * for, and still fall back to the flat function if it
23 * fails.
24 */
25static inline void get_memcfg_numa(void)
26{
27
28 if (get_memcfg_numaq())
29 return;
30 if (get_memcfg_from_srat())
31 return;
32 get_memcfg_numa_flat();
33}
34 16
35extern void resume_map_numa_kva(pgd_t *pgd); 17extern void resume_map_numa_kva(pgd_t *pgd);
36 18
37#else /* !CONFIG_NUMA */ 19#else /* !CONFIG_NUMA */
38 20
39#define get_memcfg_numa get_memcfg_numa_flat
40
41static inline void resume_map_numa_kva(pgd_t *pgd) {} 21static inline void resume_map_numa_kva(pgd_t *pgd) {}
42 22
43#endif /* CONFIG_NUMA */ 23#endif /* CONFIG_NUMA */
@@ -68,17 +48,6 @@ static inline int pfn_to_nid(unsigned long pfn)
68#endif 48#endif
69} 49}
70 50
71/*
72 * Following are macros that each numa implmentation must define.
73 */
74
75#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
76#define node_end_pfn(nid) \
77({ \
78 pg_data_t *__pgdat = NODE_DATA(nid); \
79 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
80})
81
82static inline int pfn_valid(int pfn) 51static inline int pfn_valid(int pfn)
83{ 52{
84 int nid = pfn_to_nid(pfn); 53 int nid = pfn_to_nid(pfn);
@@ -88,6 +57,8 @@ static inline int pfn_valid(int pfn)
88 return 0; 57 return 0;
89} 58}
90 59
60#define early_pfn_valid(pfn) pfn_valid((pfn))
61
91#endif /* CONFIG_DISCONTIGMEM */ 62#endif /* CONFIG_DISCONTIGMEM */
92 63
93#ifdef CONFIG_NEED_MULTIPLE_NODES 64#ifdef CONFIG_NEED_MULTIPLE_NODES
diff --git a/arch/x86/include/asm/mmzone_64.h b/arch/x86/include/asm/mmzone_64.h
index 288b96f815a6..129d9aa3ceb3 100644
--- a/arch/x86/include/asm/mmzone_64.h
+++ b/arch/x86/include/asm/mmzone_64.h
@@ -4,40 +4,14 @@
4#ifndef _ASM_X86_MMZONE_64_H 4#ifndef _ASM_X86_MMZONE_64_H
5#define _ASM_X86_MMZONE_64_H 5#define _ASM_X86_MMZONE_64_H
6 6
7
8#ifdef CONFIG_NUMA 7#ifdef CONFIG_NUMA
9 8
10#include <linux/mmdebug.h> 9#include <linux/mmdebug.h>
11
12#include <asm/smp.h> 10#include <asm/smp.h>
13 11
14/* Simple perfect hash to map physical addresses to node numbers */
15struct memnode {
16 int shift;
17 unsigned int mapsize;
18 s16 *map;
19 s16 embedded_map[64 - 8];
20} ____cacheline_aligned; /* total size = 128 bytes */
21extern struct memnode memnode;
22#define memnode_shift memnode.shift
23#define memnodemap memnode.map
24#define memnodemapsize memnode.mapsize
25
26extern struct pglist_data *node_data[]; 12extern struct pglist_data *node_data[];
27 13
28static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
29{
30 unsigned nid;
31 VIRTUAL_BUG_ON(!memnodemap);
32 nid = memnodemap[addr >> memnode_shift];
33 VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]);
34 return nid;
35}
36
37#define NODE_DATA(nid) (node_data[nid]) 14#define NODE_DATA(nid) (node_data[nid])
38 15
39#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
40#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
41 NODE_DATA(nid)->node_spanned_pages)
42#endif 16#endif
43#endif /* _ASM_X86_MMZONE_64_H */ 17#endif /* _ASM_X86_MMZONE_64_H */
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
index 3e2ce58a31a3..9eae7752ae9b 100644
--- a/arch/x86/include/asm/module.h
+++ b/arch/x86/include/asm/module.h
@@ -35,7 +35,7 @@
35#define MODULE_PROC_FAMILY "K7 " 35#define MODULE_PROC_FAMILY "K7 "
36#elif defined CONFIG_MK8 36#elif defined CONFIG_MK8
37#define MODULE_PROC_FAMILY "K8 " 37#define MODULE_PROC_FAMILY "K8 "
38#elif defined CONFIG_X86_ELAN 38#elif defined CONFIG_MELAN
39#define MODULE_PROC_FAMILY "ELAN " 39#define MODULE_PROC_FAMILY "ELAN "
40#elif defined CONFIG_MCRUSOE 40#elif defined CONFIG_MCRUSOE
41#define MODULE_PROC_FAMILY "CRUSOE " 41#define MODULE_PROC_FAMILY "CRUSOE "
@@ -60,12 +60,7 @@
60#endif 60#endif
61 61
62#ifdef CONFIG_X86_32 62#ifdef CONFIG_X86_32
63# ifdef CONFIG_4KSTACKS 63# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
64# define MODULE_STACKSIZE "4KSTACKS "
65# else
66# define MODULE_STACKSIZE ""
67# endif
68# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
69#endif 64#endif
70 65
71#endif /* _ASM_X86_MODULE_H */ 66#endif /* _ASM_X86_MODULE_H */
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index c82868e9f905..9c7d95f6174b 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -5,8 +5,9 @@
5 5
6#include <asm/mpspec_def.h> 6#include <asm/mpspec_def.h>
7#include <asm/x86_init.h> 7#include <asm/x86_init.h>
8#include <asm/apicdef.h>
8 9
9extern int apic_version[MAX_APICS]; 10extern int apic_version[];
10extern int pic_mode; 11extern int pic_mode;
11 12
12#ifdef CONFIG_X86_32 13#ifdef CONFIG_X86_32
@@ -24,7 +25,6 @@ extern int pic_mode;
24#define MAX_IRQ_SOURCES 256 25#define MAX_IRQ_SOURCES 256
25 26
26extern unsigned int def_to_bigsmp; 27extern unsigned int def_to_bigsmp;
27extern u8 apicid_2_node[];
28 28
29#ifdef CONFIG_X86_NUMAQ 29#ifdef CONFIG_X86_NUMAQ
30extern int mp_bus_id_to_node[MAX_MP_BUSSES]; 30extern int mp_bus_id_to_node[MAX_MP_BUSSES];
@@ -32,8 +32,6 @@ extern int mp_bus_id_to_local[MAX_MP_BUSSES];
32extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; 32extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
33#endif 33#endif
34 34
35#define MAX_APICID 256
36
37#else /* CONFIG_X86_64: */ 35#else /* CONFIG_X86_64: */
38 36
39#define MAX_MP_BUSSES 256 37#define MAX_MP_BUSSES 256
@@ -107,7 +105,7 @@ extern int mp_register_gsi(struct device *dev, u32 gsi, int edge_level,
107 int active_high_low); 105 int active_high_low);
108#endif /* CONFIG_ACPI */ 106#endif /* CONFIG_ACPI */
109 107
110#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) 108#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_LOCAL_APIC)
111 109
112struct physid_mask { 110struct physid_mask {
113 unsigned long mask[PHYSID_ARRAY_SIZE]; 111 unsigned long mask[PHYSID_ARRAY_SIZE];
@@ -122,31 +120,31 @@ typedef struct physid_mask physid_mask_t;
122 test_and_set_bit(physid, (map).mask) 120 test_and_set_bit(physid, (map).mask)
123 121
124#define physids_and(dst, src1, src2) \ 122#define physids_and(dst, src1, src2) \
125 bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) 123 bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_LOCAL_APIC)
126 124
127#define physids_or(dst, src1, src2) \ 125#define physids_or(dst, src1, src2) \
128 bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) 126 bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_LOCAL_APIC)
129 127
130#define physids_clear(map) \ 128#define physids_clear(map) \
131 bitmap_zero((map).mask, MAX_APICS) 129 bitmap_zero((map).mask, MAX_LOCAL_APIC)
132 130
133#define physids_complement(dst, src) \ 131#define physids_complement(dst, src) \
134 bitmap_complement((dst).mask, (src).mask, MAX_APICS) 132 bitmap_complement((dst).mask, (src).mask, MAX_LOCAL_APIC)
135 133
136#define physids_empty(map) \ 134#define physids_empty(map) \
137 bitmap_empty((map).mask, MAX_APICS) 135 bitmap_empty((map).mask, MAX_LOCAL_APIC)
138 136
139#define physids_equal(map1, map2) \ 137#define physids_equal(map1, map2) \
140 bitmap_equal((map1).mask, (map2).mask, MAX_APICS) 138 bitmap_equal((map1).mask, (map2).mask, MAX_LOCAL_APIC)
141 139
142#define physids_weight(map) \ 140#define physids_weight(map) \
143 bitmap_weight((map).mask, MAX_APICS) 141 bitmap_weight((map).mask, MAX_LOCAL_APIC)
144 142
145#define physids_shift_right(d, s, n) \ 143#define physids_shift_right(d, s, n) \
146 bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS) 144 bitmap_shift_right((d).mask, (s).mask, n, MAX_LOCAL_APIC)
147 145
148#define physids_shift_left(d, s, n) \ 146#define physids_shift_left(d, s, n) \
149 bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) 147 bitmap_shift_left((d).mask, (s).mask, n, MAX_LOCAL_APIC)
150 148
151static inline unsigned long physids_coerce(physid_mask_t *map) 149static inline unsigned long physids_coerce(physid_mask_t *map)
152{ 150{
@@ -159,14 +157,6 @@ static inline void physids_promote(unsigned long physids, physid_mask_t *map)
159 map->mask[0] = physids; 157 map->mask[0] = physids;
160} 158}
161 159
162/* Note: will create very large stack frames if physid_mask_t is big */
163#define physid_mask_of_physid(physid) \
164 ({ \
165 physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
166 physid_set(physid, __physid_mask); \
167 __physid_mask; \
168 })
169
170static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map) 160static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map)
171{ 161{
172 physids_clear(*map); 162 physids_clear(*map);
diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h
index 4a7f96d7c188..c0a955a9a087 100644
--- a/arch/x86/include/asm/mpspec_def.h
+++ b/arch/x86/include/asm/mpspec_def.h
@@ -15,13 +15,6 @@
15 15
16#ifdef CONFIG_X86_32 16#ifdef CONFIG_X86_32
17# define MAX_MPC_ENTRY 1024 17# define MAX_MPC_ENTRY 1024
18# define MAX_APICS 256
19#else
20# if NR_CPUS <= 255
21# define MAX_APICS 255
22# else
23# define MAX_APICS 32768
24# endif
25#endif 18#endif
26 19
27/* Intel MP Floating Pointer Structure */ 20/* Intel MP Floating Pointer Structure */
diff --git a/arch/x86/include/asm/mrst-vrtc.h b/arch/x86/include/asm/mrst-vrtc.h
new file mode 100644
index 000000000000..73668abdbedf
--- /dev/null
+++ b/arch/x86/include/asm/mrst-vrtc.h
@@ -0,0 +1,9 @@
1#ifndef _MRST_VRTC_H
2#define _MRST_VRTC_H
3
4extern unsigned char vrtc_cmos_read(unsigned char reg);
5extern void vrtc_cmos_write(unsigned char val, unsigned char reg);
6extern unsigned long vrtc_get_time(void);
7extern int vrtc_set_mmss(unsigned long nowtime);
8
9#endif
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h
index 16350740edf6..719f00b28ff5 100644
--- a/arch/x86/include/asm/mrst.h
+++ b/arch/x86/include/asm/mrst.h
@@ -10,8 +10,13 @@
10 */ 10 */
11#ifndef _ASM_X86_MRST_H 11#ifndef _ASM_X86_MRST_H
12#define _ASM_X86_MRST_H 12#define _ASM_X86_MRST_H
13
14#include <linux/sfi.h>
15
13extern int pci_mrst_init(void); 16extern int pci_mrst_init(void);
14int __init sfi_parse_mrtc(struct sfi_table_header *table); 17extern int __init sfi_parse_mrtc(struct sfi_table_header *table);
18extern int sfi_mrtc_num;
19extern struct sfi_rtc_table_entry sfi_mrtc_array[];
15 20
16/* 21/*
17 * Medfield is the follow-up of Moorestown, it combines two chip solution into 22 * Medfield is the follow-up of Moorestown, it combines two chip solution into
@@ -26,7 +31,7 @@ enum mrst_cpu_type {
26}; 31};
27 32
28extern enum mrst_cpu_type __mrst_cpu_chip; 33extern enum mrst_cpu_type __mrst_cpu_chip;
29static enum mrst_cpu_type mrst_identify_cpu(void) 34static inline enum mrst_cpu_type mrst_identify_cpu(void)
30{ 35{
31 return __mrst_cpu_chip; 36 return __mrst_cpu_chip;
32} 37}
@@ -42,4 +47,19 @@ extern enum mrst_timer_options mrst_timer_options;
42#define SFI_MTMR_MAX_NUM 8 47#define SFI_MTMR_MAX_NUM 8
43#define SFI_MRTC_MAX 8 48#define SFI_MRTC_MAX 8
44 49
50extern struct console early_mrst_console;
51extern void mrst_early_console_init(void);
52
53extern struct console early_hsu_console;
54extern void hsu_early_console_init(void);
55
56extern void intel_scu_devices_create(void);
57extern void intel_scu_devices_destroy(void);
58
59/* VRTC timer */
60#define MRST_VRTC_MAP_SZ (1024)
61/*#define MRST_VRTC_PGOFFSET (0xc00) */
62
63extern void mrst_rtc_init(void);
64
45#endif /* _ASM_X86_MRST_H */ 65#endif /* _ASM_X86_MRST_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 986f7790fdb2..485b4f1f079b 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -36,8 +36,14 @@
36#define MSR_IA32_PERFCTR1 0x000000c2 36#define MSR_IA32_PERFCTR1 0x000000c2
37#define MSR_FSB_FREQ 0x000000cd 37#define MSR_FSB_FREQ 0x000000cd
38 38
39#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
40#define NHM_C3_AUTO_DEMOTE (1UL << 25)
41#define NHM_C1_AUTO_DEMOTE (1UL << 26)
42#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25)
43
39#define MSR_MTRRcap 0x000000fe 44#define MSR_MTRRcap 0x000000fe
40#define MSR_IA32_BBL_CR_CTL 0x00000119 45#define MSR_IA32_BBL_CR_CTL 0x00000119
46#define MSR_IA32_BBL_CR_CTL3 0x0000011e
41 47
42#define MSR_IA32_SYSENTER_CS 0x00000174 48#define MSR_IA32_SYSENTER_CS 0x00000174
43#define MSR_IA32_SYSENTER_ESP 0x00000175 49#define MSR_IA32_SYSENTER_ESP 0x00000175
@@ -47,6 +53,9 @@
47#define MSR_IA32_MCG_STATUS 0x0000017a 53#define MSR_IA32_MCG_STATUS 0x0000017a
48#define MSR_IA32_MCG_CTL 0x0000017b 54#define MSR_IA32_MCG_CTL 0x0000017b
49 55
56#define MSR_OFFCORE_RSP_0 0x000001a6
57#define MSR_OFFCORE_RSP_1 0x000001a7
58
50#define MSR_IA32_PEBS_ENABLE 0x000003f1 59#define MSR_IA32_PEBS_ENABLE 0x000003f1
51#define MSR_IA32_DS_AREA 0x00000600 60#define MSR_IA32_DS_AREA 0x00000600
52#define MSR_IA32_PERF_CAPABILITIES 0x00000345 61#define MSR_IA32_PERF_CAPABILITIES 0x00000345
@@ -87,11 +96,15 @@
87#define MSR_IA32_MC0_ADDR 0x00000402 96#define MSR_IA32_MC0_ADDR 0x00000402
88#define MSR_IA32_MC0_MISC 0x00000403 97#define MSR_IA32_MC0_MISC 0x00000403
89 98
99#define MSR_AMD64_MC0_MASK 0xc0010044
100
90#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) 101#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x))
91#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x)) 102#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x))
92#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x)) 103#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x))
93#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x)) 104#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x))
94 105
106#define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x))
107
95/* These are consecutive and not in the normal 4er MCE bank block */ 108/* These are consecutive and not in the normal 4er MCE bank block */
96#define MSR_IA32_MC0_CTL2 0x00000280 109#define MSR_IA32_MC0_CTL2 0x00000280
97#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x)) 110#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x))
@@ -105,6 +118,7 @@
105 complete list. */ 118 complete list. */
106 119
107#define MSR_AMD64_PATCH_LEVEL 0x0000008b 120#define MSR_AMD64_PATCH_LEVEL 0x0000008b
121#define MSR_AMD64_TSC_RATIO 0xc0000104
108#define MSR_AMD64_NB_CFG 0xc001001f 122#define MSR_AMD64_NB_CFG 0xc001001f
109#define MSR_AMD64_PATCH_LOADER 0xc0010020 123#define MSR_AMD64_PATCH_LOADER 0xc0010020
110#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 124#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
@@ -121,13 +135,18 @@
121#define MSR_AMD64_IBSDCLINAD 0xc0011038 135#define MSR_AMD64_IBSDCLINAD 0xc0011038
122#define MSR_AMD64_IBSDCPHYSAD 0xc0011039 136#define MSR_AMD64_IBSDCPHYSAD 0xc0011039
123#define MSR_AMD64_IBSCTL 0xc001103a 137#define MSR_AMD64_IBSCTL 0xc001103a
138#define MSR_AMD64_IBSBRTARGET 0xc001103b
139
140/* Fam 15h MSRs */
141#define MSR_F15H_PERF_CTL 0xc0010200
142#define MSR_F15H_PERF_CTR 0xc0010201
124 143
125/* Fam 10h MSRs */ 144/* Fam 10h MSRs */
126#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 145#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
127#define FAM10H_MMIO_CONF_ENABLE (1<<0) 146#define FAM10H_MMIO_CONF_ENABLE (1<<0)
128#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf 147#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf
129#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2 148#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2
130#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffff 149#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
131#define FAM10H_MMIO_CONF_BASE_SHIFT 20 150#define FAM10H_MMIO_CONF_BASE_SHIFT 20
132#define MSR_FAM10H_NODE_ID 0xc001100c 151#define MSR_FAM10H_NODE_ID 0xc001100c
133 152
@@ -198,6 +217,7 @@
198#define MSR_IA32_TSC 0x00000010 217#define MSR_IA32_TSC 0x00000010
199#define MSR_IA32_PLATFORM_ID 0x00000017 218#define MSR_IA32_PLATFORM_ID 0x00000017
200#define MSR_IA32_EBL_CR_POWERON 0x0000002a 219#define MSR_IA32_EBL_CR_POWERON 0x0000002a
220#define MSR_EBC_FREQUENCY_ID 0x0000002c
201#define MSR_IA32_FEATURE_CONTROL 0x0000003a 221#define MSR_IA32_FEATURE_CONTROL 0x0000003a
202 222
203#define FEATURE_CONTROL_LOCKED (1<<0) 223#define FEATURE_CONTROL_LOCKED (1<<0)
@@ -251,6 +271,18 @@
251#define PACKAGE_THERM_INT_LOW_ENABLE (1 << 1) 271#define PACKAGE_THERM_INT_LOW_ENABLE (1 << 1)
252#define PACKAGE_THERM_INT_PLN_ENABLE (1 << 24) 272#define PACKAGE_THERM_INT_PLN_ENABLE (1 << 24)
253 273
274/* Thermal Thresholds Support */
275#define THERM_INT_THRESHOLD0_ENABLE (1 << 15)
276#define THERM_SHIFT_THRESHOLD0 8
277#define THERM_MASK_THRESHOLD0 (0x7f << THERM_SHIFT_THRESHOLD0)
278#define THERM_INT_THRESHOLD1_ENABLE (1 << 23)
279#define THERM_SHIFT_THRESHOLD1 16
280#define THERM_MASK_THRESHOLD1 (0x7f << THERM_SHIFT_THRESHOLD1)
281#define THERM_STATUS_THRESHOLD0 (1 << 6)
282#define THERM_LOG_THRESHOLD0 (1 << 7)
283#define THERM_STATUS_THRESHOLD1 (1 << 8)
284#define THERM_LOG_THRESHOLD1 (1 << 9)
285
254/* MISC_ENABLE bits: architectural */ 286/* MISC_ENABLE bits: architectural */
255#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0) 287#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0)
256#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1) 288#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1)
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
new file mode 100644
index 000000000000..bcdff997668c
--- /dev/null
+++ b/arch/x86/include/asm/mwait.h
@@ -0,0 +1,15 @@
1#ifndef _ASM_X86_MWAIT_H
2#define _ASM_X86_MWAIT_H
3
4#define MWAIT_SUBSTATE_MASK 0xf
5#define MWAIT_CSTATE_MASK 0xf
6#define MWAIT_SUBSTATE_SIZE 4
7#define MWAIT_MAX_NUM_CSTATES 8
8
9#define CPUID_MWAIT_LEAF 5
10#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
11#define CPUID5_ECX_INTERRUPT_BREAK 0x2
12
13#define MWAIT_ECX_INTERRUPT_BREAK 0x1
14
15#endif /* _ASM_X86_MWAIT_H */
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index 932f0f86b4b7..4886a68f267e 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -5,41 +5,14 @@
5#include <asm/irq.h> 5#include <asm/irq.h>
6#include <asm/io.h> 6#include <asm/io.h>
7 7
8#ifdef ARCH_HAS_NMI_WATCHDOG 8#ifdef CONFIG_X86_LOCAL_APIC
9 9
10/**
11 * do_nmi_callback
12 *
13 * Check to see if a callback exists and execute it. Return 1
14 * if the handler exists and was handled successfully.
15 */
16int do_nmi_callback(struct pt_regs *regs, int cpu);
17
18extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
19extern int check_nmi_watchdog(void);
20#if !defined(CONFIG_LOCKUP_DETECTOR)
21extern int nmi_watchdog_enabled;
22#endif
23extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); 10extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
24extern int reserve_perfctr_nmi(unsigned int); 11extern int reserve_perfctr_nmi(unsigned int);
25extern void release_perfctr_nmi(unsigned int); 12extern void release_perfctr_nmi(unsigned int);
26extern int reserve_evntsel_nmi(unsigned int); 13extern int reserve_evntsel_nmi(unsigned int);
27extern void release_evntsel_nmi(unsigned int); 14extern void release_evntsel_nmi(unsigned int);
28 15
29extern void setup_apic_nmi_watchdog(void *);
30extern void stop_apic_nmi_watchdog(void *);
31extern void disable_timer_nmi_watchdog(void);
32extern void enable_timer_nmi_watchdog(void);
33extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason);
34extern void cpu_nmi_set_wd_enabled(void);
35
36extern atomic_t nmi_active;
37extern unsigned int nmi_watchdog;
38#define NMI_NONE 0
39#define NMI_IO_APIC 1
40#define NMI_LOCAL_APIC 2
41#define NMI_INVALID 3
42
43struct ctl_table; 16struct ctl_table;
44extern int proc_nmi_enabled(struct ctl_table *, int , 17extern int proc_nmi_enabled(struct ctl_table *, int ,
45 void __user *, size_t *, loff_t *); 18 void __user *, size_t *, loff_t *);
@@ -47,33 +20,28 @@ extern int unknown_nmi_panic;
47 20
48void arch_trigger_all_cpu_backtrace(void); 21void arch_trigger_all_cpu_backtrace(void);
49#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace 22#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
23#endif
50 24
51static inline void localise_nmi_watchdog(void) 25/*
52{ 26 * Define some priorities for the nmi notifier call chain.
53 if (nmi_watchdog == NMI_IO_APIC) 27 *
54 nmi_watchdog = NMI_LOCAL_APIC; 28 * Create a local nmi bit that has a higher priority than
55} 29 * external nmis, because the local ones are more frequent.
30 *
31 * Also setup some default high/normal/low settings for
32 * subsystems to registers with. Using 4 bits to separate
33 * the priorities. This can go a lot higher if needed be.
34 */
56 35
57/* check if nmi_watchdog is active (ie was specified at boot) */ 36#define NMI_LOCAL_SHIFT 16 /* randomly picked */
58static inline int nmi_watchdog_active(void) 37#define NMI_LOCAL_BIT (1ULL << NMI_LOCAL_SHIFT)
59{ 38#define NMI_HIGH_PRIOR (1ULL << 8)
60 /* 39#define NMI_NORMAL_PRIOR (1ULL << 4)
61 * actually it should be: 40#define NMI_LOW_PRIOR (1ULL << 0)
62 * return (nmi_watchdog == NMI_LOCAL_APIC || 41#define NMI_LOCAL_HIGH_PRIOR (NMI_LOCAL_BIT | NMI_HIGH_PRIOR)
63 * nmi_watchdog == NMI_IO_APIC) 42#define NMI_LOCAL_NORMAL_PRIOR (NMI_LOCAL_BIT | NMI_NORMAL_PRIOR)
64 * but since they are power of two we could use a 43#define NMI_LOCAL_LOW_PRIOR (NMI_LOCAL_BIT | NMI_LOW_PRIOR)
65 * cheaper way --cvg
66 */
67 return nmi_watchdog & (NMI_LOCAL_APIC | NMI_IO_APIC);
68}
69#endif
70 44
71void lapic_watchdog_stop(void);
72int lapic_watchdog_init(unsigned nmi_hz);
73int lapic_wd_event(unsigned nmi_hz);
74unsigned lapic_adjust_nmi_hz(unsigned hz);
75void disable_lapic_nmi_watchdog(void);
76void enable_lapic_nmi_watchdog(void);
77void stop_nmi(void); 45void stop_nmi(void);
78void restart_nmi(void); 46void restart_nmi(void);
79 47
diff --git a/arch/x86/include/asm/nops.h b/arch/x86/include/asm/nops.h
index 6d8723a766cc..405b4032a60b 100644
--- a/arch/x86/include/asm/nops.h
+++ b/arch/x86/include/asm/nops.h
@@ -1,7 +1,13 @@
1#ifndef _ASM_X86_NOPS_H 1#ifndef _ASM_X86_NOPS_H
2#define _ASM_X86_NOPS_H 2#define _ASM_X86_NOPS_H
3 3
4/* Define nops for use with alternative() */ 4/*
5 * Define nops for use with alternative() and for tracing.
6 *
7 * *_NOP5_ATOMIC must be a single instruction.
8 */
9
10#define NOP_DS_PREFIX 0x3e
5 11
6/* generic versions from gas 12/* generic versions from gas
7 1: nop 13 1: nop
@@ -13,14 +19,15 @@
13 6: leal 0x00000000(%esi),%esi 19 6: leal 0x00000000(%esi),%esi
14 7: leal 0x00000000(,%esi,1),%esi 20 7: leal 0x00000000(,%esi,1),%esi
15*/ 21*/
16#define GENERIC_NOP1 ".byte 0x90\n" 22#define GENERIC_NOP1 0x90
17#define GENERIC_NOP2 ".byte 0x89,0xf6\n" 23#define GENERIC_NOP2 0x89,0xf6
18#define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n" 24#define GENERIC_NOP3 0x8d,0x76,0x00
19#define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n" 25#define GENERIC_NOP4 0x8d,0x74,0x26,0x00
20#define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4 26#define GENERIC_NOP5 GENERIC_NOP1,GENERIC_NOP4
21#define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n" 27#define GENERIC_NOP6 0x8d,0xb6,0x00,0x00,0x00,0x00
22#define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n" 28#define GENERIC_NOP7 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00
23#define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7 29#define GENERIC_NOP8 GENERIC_NOP1,GENERIC_NOP7
30#define GENERIC_NOP5_ATOMIC NOP_DS_PREFIX,GENERIC_NOP4
24 31
25/* Opteron 64bit nops 32/* Opteron 64bit nops
26 1: nop 33 1: nop
@@ -29,16 +36,17 @@
29 4: osp osp osp nop 36 4: osp osp osp nop
30*/ 37*/
31#define K8_NOP1 GENERIC_NOP1 38#define K8_NOP1 GENERIC_NOP1
32#define K8_NOP2 ".byte 0x66,0x90\n" 39#define K8_NOP2 0x66,K8_NOP1
33#define K8_NOP3 ".byte 0x66,0x66,0x90\n" 40#define K8_NOP3 0x66,K8_NOP2
34#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" 41#define K8_NOP4 0x66,K8_NOP3
35#define K8_NOP5 K8_NOP3 K8_NOP2 42#define K8_NOP5 K8_NOP3,K8_NOP2
36#define K8_NOP6 K8_NOP3 K8_NOP3 43#define K8_NOP6 K8_NOP3,K8_NOP3
37#define K8_NOP7 K8_NOP4 K8_NOP3 44#define K8_NOP7 K8_NOP4,K8_NOP3
38#define K8_NOP8 K8_NOP4 K8_NOP4 45#define K8_NOP8 K8_NOP4,K8_NOP4
46#define K8_NOP5_ATOMIC 0x66,K8_NOP4
39 47
40/* K7 nops 48/* K7 nops
41 uses eax dependencies (arbitary choice) 49 uses eax dependencies (arbitrary choice)
42 1: nop 50 1: nop
43 2: movl %eax,%eax 51 2: movl %eax,%eax
44 3: leal (,%eax,1),%eax 52 3: leal (,%eax,1),%eax
@@ -47,13 +55,14 @@
47 7: leal 0x00000000(,%eax,1),%eax 55 7: leal 0x00000000(,%eax,1),%eax
48*/ 56*/
49#define K7_NOP1 GENERIC_NOP1 57#define K7_NOP1 GENERIC_NOP1
50#define K7_NOP2 ".byte 0x8b,0xc0\n" 58#define K7_NOP2 0x8b,0xc0
51#define K7_NOP3 ".byte 0x8d,0x04,0x20\n" 59#define K7_NOP3 0x8d,0x04,0x20
52#define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n" 60#define K7_NOP4 0x8d,0x44,0x20,0x00
53#define K7_NOP5 K7_NOP4 ASM_NOP1 61#define K7_NOP5 K7_NOP4,K7_NOP1
54#define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n" 62#define K7_NOP6 0x8d,0x80,0,0,0,0
55#define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n" 63#define K7_NOP7 0x8D,0x04,0x05,0,0,0,0
56#define K7_NOP8 K7_NOP7 ASM_NOP1 64#define K7_NOP8 K7_NOP7,K7_NOP1
65#define K7_NOP5_ATOMIC NOP_DS_PREFIX,K7_NOP4
57 66
58/* P6 nops 67/* P6 nops
59 uses eax dependencies (Intel-recommended choice) 68 uses eax dependencies (Intel-recommended choice)
@@ -69,52 +78,65 @@
69 There is kernel code that depends on this. 78 There is kernel code that depends on this.
70*/ 79*/
71#define P6_NOP1 GENERIC_NOP1 80#define P6_NOP1 GENERIC_NOP1
72#define P6_NOP2 ".byte 0x66,0x90\n" 81#define P6_NOP2 0x66,0x90
73#define P6_NOP3 ".byte 0x0f,0x1f,0x00\n" 82#define P6_NOP3 0x0f,0x1f,0x00
74#define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n" 83#define P6_NOP4 0x0f,0x1f,0x40,0
75#define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n" 84#define P6_NOP5 0x0f,0x1f,0x44,0x00,0
76#define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n" 85#define P6_NOP6 0x66,0x0f,0x1f,0x44,0x00,0
77#define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n" 86#define P6_NOP7 0x0f,0x1f,0x80,0,0,0,0
78#define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n" 87#define P6_NOP8 0x0f,0x1f,0x84,0x00,0,0,0,0
88#define P6_NOP5_ATOMIC P6_NOP5
89
90#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n"
79 91
80#if defined(CONFIG_MK7) 92#if defined(CONFIG_MK7)
81#define ASM_NOP1 K7_NOP1 93#define ASM_NOP1 _ASM_MK_NOP(K7_NOP1)
82#define ASM_NOP2 K7_NOP2 94#define ASM_NOP2 _ASM_MK_NOP(K7_NOP2)
83#define ASM_NOP3 K7_NOP3 95#define ASM_NOP3 _ASM_MK_NOP(K7_NOP3)
84#define ASM_NOP4 K7_NOP4 96#define ASM_NOP4 _ASM_MK_NOP(K7_NOP4)
85#define ASM_NOP5 K7_NOP5 97#define ASM_NOP5 _ASM_MK_NOP(K7_NOP5)
86#define ASM_NOP6 K7_NOP6 98#define ASM_NOP6 _ASM_MK_NOP(K7_NOP6)
87#define ASM_NOP7 K7_NOP7 99#define ASM_NOP7 _ASM_MK_NOP(K7_NOP7)
88#define ASM_NOP8 K7_NOP8 100#define ASM_NOP8 _ASM_MK_NOP(K7_NOP8)
101#define ASM_NOP5_ATOMIC _ASM_MK_NOP(K7_NOP5_ATOMIC)
89#elif defined(CONFIG_X86_P6_NOP) 102#elif defined(CONFIG_X86_P6_NOP)
90#define ASM_NOP1 P6_NOP1 103#define ASM_NOP1 _ASM_MK_NOP(P6_NOP1)
91#define ASM_NOP2 P6_NOP2 104#define ASM_NOP2 _ASM_MK_NOP(P6_NOP2)
92#define ASM_NOP3 P6_NOP3 105#define ASM_NOP3 _ASM_MK_NOP(P6_NOP3)
93#define ASM_NOP4 P6_NOP4 106#define ASM_NOP4 _ASM_MK_NOP(P6_NOP4)
94#define ASM_NOP5 P6_NOP5 107#define ASM_NOP5 _ASM_MK_NOP(P6_NOP5)
95#define ASM_NOP6 P6_NOP6 108#define ASM_NOP6 _ASM_MK_NOP(P6_NOP6)
96#define ASM_NOP7 P6_NOP7 109#define ASM_NOP7 _ASM_MK_NOP(P6_NOP7)
97#define ASM_NOP8 P6_NOP8 110#define ASM_NOP8 _ASM_MK_NOP(P6_NOP8)
111#define ASM_NOP5_ATOMIC _ASM_MK_NOP(P6_NOP5_ATOMIC)
98#elif defined(CONFIG_X86_64) 112#elif defined(CONFIG_X86_64)
99#define ASM_NOP1 K8_NOP1 113#define ASM_NOP1 _ASM_MK_NOP(K8_NOP1)
100#define ASM_NOP2 K8_NOP2 114#define ASM_NOP2 _ASM_MK_NOP(K8_NOP2)
101#define ASM_NOP3 K8_NOP3 115#define ASM_NOP3 _ASM_MK_NOP(K8_NOP3)
102#define ASM_NOP4 K8_NOP4 116#define ASM_NOP4 _ASM_MK_NOP(K8_NOP4)
103#define ASM_NOP5 K8_NOP5 117#define ASM_NOP5 _ASM_MK_NOP(K8_NOP5)
104#define ASM_NOP6 K8_NOP6 118#define ASM_NOP6 _ASM_MK_NOP(K8_NOP6)
105#define ASM_NOP7 K8_NOP7 119#define ASM_NOP7 _ASM_MK_NOP(K8_NOP7)
106#define ASM_NOP8 K8_NOP8 120#define ASM_NOP8 _ASM_MK_NOP(K8_NOP8)
121#define ASM_NOP5_ATOMIC _ASM_MK_NOP(K8_NOP5_ATOMIC)
107#else 122#else
108#define ASM_NOP1 GENERIC_NOP1 123#define ASM_NOP1 _ASM_MK_NOP(GENERIC_NOP1)
109#define ASM_NOP2 GENERIC_NOP2 124#define ASM_NOP2 _ASM_MK_NOP(GENERIC_NOP2)
110#define ASM_NOP3 GENERIC_NOP3 125#define ASM_NOP3 _ASM_MK_NOP(GENERIC_NOP3)
111#define ASM_NOP4 GENERIC_NOP4 126#define ASM_NOP4 _ASM_MK_NOP(GENERIC_NOP4)
112#define ASM_NOP5 GENERIC_NOP5 127#define ASM_NOP5 _ASM_MK_NOP(GENERIC_NOP5)
113#define ASM_NOP6 GENERIC_NOP6 128#define ASM_NOP6 _ASM_MK_NOP(GENERIC_NOP6)
114#define ASM_NOP7 GENERIC_NOP7 129#define ASM_NOP7 _ASM_MK_NOP(GENERIC_NOP7)
115#define ASM_NOP8 GENERIC_NOP8 130#define ASM_NOP8 _ASM_MK_NOP(GENERIC_NOP8)
131#define ASM_NOP5_ATOMIC _ASM_MK_NOP(GENERIC_NOP5_ATOMIC)
116#endif 132#endif
117 133
118#define ASM_NOP_MAX 8 134#define ASM_NOP_MAX 8
135#define NOP_ATOMIC5 (ASM_NOP_MAX+1) /* Entry for the 5-byte atomic NOP */
136
137#ifndef __ASSEMBLY__
138extern const unsigned char * const *ideal_nops;
139extern void arch_init_ideal_nops(void);
140#endif
119 141
120#endif /* _ASM_X86_NOPS_H */ 142#endif /* _ASM_X86_NOPS_H */
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
index 27da400d3138..bfacd2ccf651 100644
--- a/arch/x86/include/asm/numa.h
+++ b/arch/x86/include/asm/numa.h
@@ -1,5 +1,85 @@
1#ifndef _ASM_X86_NUMA_H
2#define _ASM_X86_NUMA_H
3
4#include <linux/nodemask.h>
5
6#include <asm/topology.h>
7#include <asm/apicdef.h>
8
9#ifdef CONFIG_NUMA
10
11#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
12#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
13
14/*
15 * Too small node sizes may confuse the VM badly. Usually they
16 * result from BIOS bugs. So dont recognize nodes as standalone
17 * NUMA entities that have less than this amount of RAM listed:
18 */
19#define NODE_MIN_SIZE (4*1024*1024)
20
21extern int numa_off;
22
23/*
24 * __apicid_to_node[] stores the raw mapping between physical apicid and
25 * node and is used to initialize cpu_to_node mapping.
26 *
27 * The mapping may be overridden by apic->numa_cpu_node() on 32bit and thus
28 * should be accessed by the accessors - set_apicid_to_node() and
29 * numa_cpu_node().
30 */
31extern s16 __apicid_to_node[MAX_LOCAL_APIC];
32extern nodemask_t numa_nodes_parsed __initdata;
33
34extern int __init numa_add_memblk(int nodeid, u64 start, u64 end);
35extern void __init numa_set_distance(int from, int to, int distance);
36
37static inline void set_apicid_to_node(int apicid, s16 node)
38{
39 __apicid_to_node[apicid] = node;
40}
41
42extern int __cpuinit numa_cpu_node(int cpu);
43
44#else /* CONFIG_NUMA */
45static inline void set_apicid_to_node(int apicid, s16 node)
46{
47}
48
49static inline int numa_cpu_node(int cpu)
50{
51 return NUMA_NO_NODE;
52}
53#endif /* CONFIG_NUMA */
54
1#ifdef CONFIG_X86_32 55#ifdef CONFIG_X86_32
2# include "numa_32.h" 56# include "numa_32.h"
3#else 57#else
4# include "numa_64.h" 58# include "numa_64.h"
5#endif 59#endif
60
61#ifdef CONFIG_NUMA
62extern void __cpuinit numa_set_node(int cpu, int node);
63extern void __cpuinit numa_clear_node(int cpu);
64extern void __init init_cpu_to_node(void);
65extern void __cpuinit numa_add_cpu(int cpu);
66extern void __cpuinit numa_remove_cpu(int cpu);
67#else /* CONFIG_NUMA */
68static inline void numa_set_node(int cpu, int node) { }
69static inline void numa_clear_node(int cpu) { }
70static inline void init_cpu_to_node(void) { }
71static inline void numa_add_cpu(int cpu) { }
72static inline void numa_remove_cpu(int cpu) { }
73#endif /* CONFIG_NUMA */
74
75#ifdef CONFIG_DEBUG_PER_CPU_MAPS
76void debug_cpumask_set_cpu(int cpu, int node, bool enable);
77#endif
78
79#ifdef CONFIG_NUMA_EMU
80#define FAKE_NODE_MIN_SIZE ((u64)32 << 20)
81#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL))
82void numa_emu_cmdline(char *);
83#endif /* CONFIG_NUMA_EMU */
84
85#endif /* _ASM_X86_NUMA_H */
diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h
index a37229011b56..e7d6b8254742 100644
--- a/arch/x86/include/asm/numa_32.h
+++ b/arch/x86/include/asm/numa_32.h
@@ -1,9 +1,6 @@
1#ifndef _ASM_X86_NUMA_32_H 1#ifndef _ASM_X86_NUMA_32_H
2#define _ASM_X86_NUMA_32_H 2#define _ASM_X86_NUMA_32_H
3 3
4extern int pxm_to_nid(int pxm);
5extern void numa_remove_cpu(int cpu);
6
7#ifdef CONFIG_HIGHMEM 4#ifdef CONFIG_HIGHMEM
8extern void set_highmem_pages_init(void); 5extern void set_highmem_pages_init(void);
9#else 6#else
diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h
index 823e070e7c26..0c05f7ae46e8 100644
--- a/arch/x86/include/asm/numa_64.h
+++ b/arch/x86/include/asm/numa_64.h
@@ -1,52 +1,6 @@
1#ifndef _ASM_X86_NUMA_64_H 1#ifndef _ASM_X86_NUMA_64_H
2#define _ASM_X86_NUMA_64_H 2#define _ASM_X86_NUMA_64_H
3 3
4#include <linux/nodemask.h>
5#include <asm/apicdef.h>
6
7struct bootnode {
8 u64 start;
9 u64 end;
10};
11
12extern int compute_hash_shift(struct bootnode *nodes, int numblks,
13 int *nodeids);
14
15#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
16
17extern void numa_init_array(void);
18extern int numa_off;
19
20extern s16 apicid_to_node[MAX_LOCAL_APIC];
21
22extern unsigned long numa_free_all_bootmem(void); 4extern unsigned long numa_free_all_bootmem(void);
23extern void setup_node_bootmem(int nodeid, unsigned long start,
24 unsigned long end);
25
26#ifdef CONFIG_NUMA
27/*
28 * Too small node sizes may confuse the VM badly. Usually they
29 * result from BIOS bugs. So dont recognize nodes as standalone
30 * NUMA entities that have less than this amount of RAM listed:
31 */
32#define NODE_MIN_SIZE (4*1024*1024)
33
34extern void __init init_cpu_to_node(void);
35extern void __cpuinit numa_set_node(int cpu, int node);
36extern void __cpuinit numa_clear_node(int cpu);
37extern void __cpuinit numa_add_cpu(int cpu);
38extern void __cpuinit numa_remove_cpu(int cpu);
39
40#ifdef CONFIG_NUMA_EMU
41#define FAKE_NODE_MIN_SIZE ((u64)64 << 20)
42#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL))
43#endif /* CONFIG_NUMA_EMU */
44#else
45static inline void init_cpu_to_node(void) { }
46static inline void numa_set_node(int cpu, int node) { }
47static inline void numa_clear_node(int cpu) { }
48static inline void numa_add_cpu(int cpu, int node) { }
49static inline void numa_remove_cpu(int cpu) { }
50#endif
51 5
52#endif /* _ASM_X86_NUMA_64_H */ 6#endif /* _ASM_X86_NUMA_64_H */
diff --git a/arch/x86/include/asm/numaq.h b/arch/x86/include/asm/numaq.h
index 37c516545ec8..c3b3c322fd87 100644
--- a/arch/x86/include/asm/numaq.h
+++ b/arch/x86/include/asm/numaq.h
@@ -29,7 +29,7 @@
29#ifdef CONFIG_X86_NUMAQ 29#ifdef CONFIG_X86_NUMAQ
30 30
31extern int found_numaq; 31extern int found_numaq;
32extern int get_memcfg_numaq(void); 32extern int numaq_numa_init(void);
33extern int pci_numaq_init(void); 33extern int pci_numaq_init(void);
34 34
35extern void *xquad_portio; 35extern void *xquad_portio;
@@ -166,11 +166,6 @@ struct sys_cfg_data {
166 166
167void numaq_tsc_disable(void); 167void numaq_tsc_disable(void);
168 168
169#else
170static inline int get_memcfg_numaq(void)
171{
172 return 0;
173}
174#endif /* CONFIG_X86_NUMAQ */ 169#endif /* CONFIG_X86_NUMAQ */
175#endif /* _ASM_X86_NUMAQ_H */ 170#endif /* _ASM_X86_NUMAQ_H */
176 171
diff --git a/arch/x86/include/asm/olpc.h b/arch/x86/include/asm/olpc.h
index 101229b0d8ed..5ca6801b75f3 100644
--- a/arch/x86/include/asm/olpc.h
+++ b/arch/x86/include/asm/olpc.h
@@ -20,7 +20,7 @@ extern struct olpc_platform_t olpc_platform_info;
20 20
21/* 21/*
22 * OLPC board IDs contain the major build number within the mask 0x0ff0, 22 * OLPC board IDs contain the major build number within the mask 0x0ff0,
23 * and the minor build number withing 0x000f. Pre-builds have a minor 23 * and the minor build number within 0x000f. Pre-builds have a minor
24 * number less than 8, and normal builds start at 8. For example, 0x0B10 24 * number less than 8, and normal builds start at 8. For example, 0x0B10
25 * is a PreB1, and 0x0C18 is a C1. 25 * is a PreB1, and 0x0C18 is a C1.
26 */ 26 */
@@ -89,6 +89,8 @@ extern int olpc_ec_mask_unset(uint8_t bits);
89/* EC commands */ 89/* EC commands */
90 90
91#define EC_FIRMWARE_REV 0x08 91#define EC_FIRMWARE_REV 0x08
92#define EC_WLAN_ENTER_RESET 0x35
93#define EC_WLAN_LEAVE_RESET 0x25
92 94
93/* SCI source values */ 95/* SCI source values */
94 96
@@ -105,10 +107,14 @@ extern int olpc_ec_mask_unset(uint8_t bits);
105/* GPIO assignments */ 107/* GPIO assignments */
106 108
107#define OLPC_GPIO_MIC_AC 1 109#define OLPC_GPIO_MIC_AC 1
108#define OLPC_GPIO_DCON_IRQ geode_gpio(7) 110#define OLPC_GPIO_DCON_STAT0 5
111#define OLPC_GPIO_DCON_STAT1 6
112#define OLPC_GPIO_DCON_IRQ 7
109#define OLPC_GPIO_THRM_ALRM geode_gpio(10) 113#define OLPC_GPIO_THRM_ALRM geode_gpio(10)
110#define OLPC_GPIO_SMB_CLK geode_gpio(14) 114#define OLPC_GPIO_DCON_LOAD 11
111#define OLPC_GPIO_SMB_DATA geode_gpio(15) 115#define OLPC_GPIO_DCON_BLANK 12
116#define OLPC_GPIO_SMB_CLK 14
117#define OLPC_GPIO_SMB_DATA 15
112#define OLPC_GPIO_WORKAUX geode_gpio(24) 118#define OLPC_GPIO_WORKAUX geode_gpio(24)
113#define OLPC_GPIO_LID geode_gpio(26) 119#define OLPC_GPIO_LID geode_gpio(26)
114#define OLPC_GPIO_ECSCI geode_gpio(27) 120#define OLPC_GPIO_ECSCI geode_gpio(27)
diff --git a/arch/x86/include/asm/olpc_ofw.h b/arch/x86/include/asm/olpc_ofw.h
index 08fde475cb3b..24487712e0b1 100644
--- a/arch/x86/include/asm/olpc_ofw.h
+++ b/arch/x86/include/asm/olpc_ofw.h
@@ -6,7 +6,9 @@
6 6
7#define OLPC_OFW_SIG 0x2057464F /* aka "OFW " */ 7#define OLPC_OFW_SIG 0x2057464F /* aka "OFW " */
8 8
9#ifdef CONFIG_OLPC_OPENFIRMWARE 9#ifdef CONFIG_OLPC
10
11extern bool olpc_ofw_is_installed(void);
10 12
11/* run an OFW command by calling into the firmware */ 13/* run an OFW command by calling into the firmware */
12#define olpc_ofw(name, args, res) \ 14#define olpc_ofw(name, args, res) \
@@ -21,11 +23,15 @@ extern void olpc_ofw_detect(void);
21/* install OFW's pde permanently into the kernel's pgtable */ 23/* install OFW's pde permanently into the kernel's pgtable */
22extern void setup_olpc_ofw_pgd(void); 24extern void setup_olpc_ofw_pgd(void);
23 25
24#else /* !CONFIG_OLPC_OPENFIRMWARE */ 26/* check if OFW was detected during boot */
27extern bool olpc_ofw_present(void);
28
29extern void olpc_dt_build_devicetree(void);
25 30
31#else /* !CONFIG_OLPC */
26static inline void olpc_ofw_detect(void) { } 32static inline void olpc_ofw_detect(void) { }
27static inline void setup_olpc_ofw_pgd(void) { } 33static inline void setup_olpc_ofw_pgd(void) { }
28 34static inline void olpc_dt_build_devicetree(void) { }
29#endif /* !CONFIG_OLPC_OPENFIRMWARE */ 35#endif /* !CONFIG_OLPC */
30 36
31#endif /* _ASM_X86_OLPC_OFW_H */ 37#endif /* _ASM_X86_OLPC_OFW_H */
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index 6f1b7331313f..ade619ff9e2a 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -15,11 +15,7 @@
15 */ 15 */
16#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) 16#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
17 17
18#ifdef CONFIG_4KSTACKS
19#define THREAD_ORDER 0
20#else
21#define THREAD_ORDER 1 18#define THREAD_ORDER 1
22#endif
23#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 19#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
24 20
25#define STACKFAULT_STACK 0 21#define STACKFAULT_STACK 0
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index a667f24c7254..bce688d54c12 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -2,13 +2,14 @@
2#define _ASM_X86_PAGE_DEFS_H 2#define _ASM_X86_PAGE_DEFS_H
3 3
4#include <linux/const.h> 4#include <linux/const.h>
5#include <linux/types.h>
5 6
6/* PAGE_SHIFT determines the page size */ 7/* PAGE_SHIFT determines the page size */
7#define PAGE_SHIFT 12 8#define PAGE_SHIFT 12
8#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) 9#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
9#define PAGE_MASK (~(PAGE_SIZE-1)) 10#define PAGE_MASK (~(PAGE_SIZE-1))
10 11
11#define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1) 12#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
12#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) 13#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
13 14
14/* Cast PAGE_MASK to a signed type so that it is sign-extended if 15/* Cast PAGE_MASK to a signed type so that it is sign-extended if
@@ -45,11 +46,15 @@ extern int devmem_is_allowed(unsigned long pagenr);
45extern unsigned long max_low_pfn_mapped; 46extern unsigned long max_low_pfn_mapped;
46extern unsigned long max_pfn_mapped; 47extern unsigned long max_pfn_mapped;
47 48
49static inline phys_addr_t get_max_mapped(void)
50{
51 return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
52}
53
48extern unsigned long init_memory_mapping(unsigned long start, 54extern unsigned long init_memory_mapping(unsigned long start,
49 unsigned long end); 55 unsigned long end);
50 56
51extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn, 57extern void initmem_init(void);
52 int acpi, int k8);
53extern void free_initmem(void); 58extern void free_initmem(void);
54 59
55#endif /* !__ASSEMBLY__ */ 60#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 5653f43d90e5..ebbc4d8ab170 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -105,14 +105,14 @@ static inline void write_cr8(unsigned long x)
105} 105}
106#endif 106#endif
107 107
108static inline void raw_safe_halt(void) 108static inline void arch_safe_halt(void)
109{ 109{
110 PVOP_VCALL0(pv_irq_ops.safe_halt); 110 PVOP_VCALL0(pv_irq_ops.safe_halt);
111} 111}
112 112
113static inline void halt(void) 113static inline void halt(void)
114{ 114{
115 PVOP_VCALL0(pv_irq_ops.safe_halt); 115 PVOP_VCALL0(pv_irq_ops.halt);
116} 116}
117 117
118static inline void wbinvd(void) 118static inline void wbinvd(void)
@@ -416,11 +416,6 @@ static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
416 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn); 416 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
417} 417}
418 418
419static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
420 unsigned long start, unsigned long count)
421{
422 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
423}
424static inline void paravirt_release_pmd(unsigned long pfn) 419static inline void paravirt_release_pmd(unsigned long pfn)
425{ 420{
426 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn); 421 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
@@ -440,6 +435,11 @@ static inline void pte_update(struct mm_struct *mm, unsigned long addr,
440{ 435{
441 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep); 436 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
442} 437}
438static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
439 pmd_t *pmdp)
440{
441 PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
442}
443 443
444static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr, 444static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
445 pte_t *ptep) 445 pte_t *ptep)
@@ -447,6 +447,12 @@ static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
447 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep); 447 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
448} 448}
449 449
450static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
451 pmd_t *pmdp)
452{
453 PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
454}
455
450static inline pte_t __pte(pteval_t val) 456static inline pte_t __pte(pteval_t val)
451{ 457{
452 pteval_t ret; 458 pteval_t ret;
@@ -548,6 +554,19 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
548 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte); 554 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
549} 555}
550 556
557#ifdef CONFIG_TRANSPARENT_HUGEPAGE
558static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
559 pmd_t *pmdp, pmd_t pmd)
560{
561 if (sizeof(pmdval_t) > sizeof(long))
562 /* 5 arg words */
563 pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
564 else
565 PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
566 native_pmd_val(pmd));
567}
568#endif
569
551static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 570static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
552{ 571{
553 pmdval_t val = native_pmd_val(pmd); 572 pmdval_t val = native_pmd_val(pmd);
@@ -829,32 +848,32 @@ static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
829#define __PV_IS_CALLEE_SAVE(func) \ 848#define __PV_IS_CALLEE_SAVE(func) \
830 ((struct paravirt_callee_save) { func }) 849 ((struct paravirt_callee_save) { func })
831 850
832static inline unsigned long __raw_local_save_flags(void) 851static inline notrace unsigned long arch_local_save_flags(void)
833{ 852{
834 return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); 853 return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
835} 854}
836 855
837static inline void raw_local_irq_restore(unsigned long f) 856static inline notrace void arch_local_irq_restore(unsigned long f)
838{ 857{
839 PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); 858 PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
840} 859}
841 860
842static inline void raw_local_irq_disable(void) 861static inline notrace void arch_local_irq_disable(void)
843{ 862{
844 PVOP_VCALLEE0(pv_irq_ops.irq_disable); 863 PVOP_VCALLEE0(pv_irq_ops.irq_disable);
845} 864}
846 865
847static inline void raw_local_irq_enable(void) 866static inline notrace void arch_local_irq_enable(void)
848{ 867{
849 PVOP_VCALLEE0(pv_irq_ops.irq_enable); 868 PVOP_VCALLEE0(pv_irq_ops.irq_enable);
850} 869}
851 870
852static inline unsigned long __raw_local_irq_save(void) 871static inline notrace unsigned long arch_local_irq_save(void)
853{ 872{
854 unsigned long f; 873 unsigned long f;
855 874
856 f = __raw_local_save_flags(); 875 f = arch_local_save_flags();
857 raw_local_irq_disable(); 876 arch_local_irq_disable();
858 return f; 877 return f;
859} 878}
860 879
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index db9ef5532341..82885099c869 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -255,7 +255,6 @@ struct pv_mmu_ops {
255 */ 255 */
256 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); 256 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
257 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); 257 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
258 void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
259 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); 258 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
260 void (*release_pte)(unsigned long pfn); 259 void (*release_pte)(unsigned long pfn);
261 void (*release_pmd)(unsigned long pfn); 260 void (*release_pmd)(unsigned long pfn);
@@ -266,10 +265,16 @@ struct pv_mmu_ops {
266 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, 265 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
267 pte_t *ptep, pte_t pteval); 266 pte_t *ptep, pte_t pteval);
268 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); 267 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
268 void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr,
269 pmd_t *pmdp, pmd_t pmdval);
269 void (*pte_update)(struct mm_struct *mm, unsigned long addr, 270 void (*pte_update)(struct mm_struct *mm, unsigned long addr,
270 pte_t *ptep); 271 pte_t *ptep);
271 void (*pte_update_defer)(struct mm_struct *mm, 272 void (*pte_update_defer)(struct mm_struct *mm,
272 unsigned long addr, pte_t *ptep); 273 unsigned long addr, pte_t *ptep);
274 void (*pmd_update)(struct mm_struct *mm, unsigned long addr,
275 pmd_t *pmdp);
276 void (*pmd_update_defer)(struct mm_struct *mm,
277 unsigned long addr, pmd_t *pmdp);
273 278
274 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr, 279 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
275 pte_t *ptep); 280 pte_t *ptep);
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index d395540ff894..d498943b906c 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -7,6 +7,7 @@
7#include <linux/string.h> 7#include <linux/string.h>
8#include <asm/scatterlist.h> 8#include <asm/scatterlist.h>
9#include <asm/io.h> 9#include <asm/io.h>
10#include <asm/x86_init.h>
10 11
11#ifdef __KERNEL__ 12#ifdef __KERNEL__
12 13
@@ -64,6 +65,7 @@ extern unsigned long pci_mem_start;
64 65
65#define PCIBIOS_MIN_CARDBUS_IO 0x4000 66#define PCIBIOS_MIN_CARDBUS_IO 0x4000
66 67
68extern int pcibios_enabled;
67void pcibios_config_init(void); 69void pcibios_config_init(void);
68struct pci_bus *pcibios_scan_root(int bus); 70struct pci_bus *pcibios_scan_root(int bus);
69 71
@@ -94,8 +96,36 @@ static inline void early_quirks(void) { }
94 96
95extern void pci_iommu_alloc(void); 97extern void pci_iommu_alloc(void);
96 98
97/* MSI arch hook */ 99#ifdef CONFIG_PCI_MSI
98#define arch_setup_msi_irqs arch_setup_msi_irqs 100/* MSI arch specific hooks */
101static inline int x86_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
102{
103 return x86_msi.setup_msi_irqs(dev, nvec, type);
104}
105
106static inline void x86_teardown_msi_irqs(struct pci_dev *dev)
107{
108 x86_msi.teardown_msi_irqs(dev);
109}
110
111static inline void x86_teardown_msi_irq(unsigned int irq)
112{
113 x86_msi.teardown_msi_irq(irq);
114}
115#define arch_setup_msi_irqs x86_setup_msi_irqs
116#define arch_teardown_msi_irqs x86_teardown_msi_irqs
117#define arch_teardown_msi_irq x86_teardown_msi_irq
118/* implemented in arch/x86/kernel/apic/io_apic. */
119int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
120void native_teardown_msi_irq(unsigned int irq);
121/* default to the implementation in drivers/lib/msi.c */
122#define HAVE_DEFAULT_MSI_TEARDOWN_IRQS
123void default_teardown_msi_irqs(struct pci_dev *dev);
124#else
125#define native_setup_msi_irqs NULL
126#define native_teardown_msi_irq NULL
127#define default_teardown_msi_irqs NULL
128#endif
99 129
100#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) 130#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
101 131
@@ -105,8 +135,6 @@ extern void pci_iommu_alloc(void);
105#include "pci_64.h" 135#include "pci_64.h"
106#endif 136#endif
107 137
108void dma32_reserve_bootmem(void);
109
110/* implement the pci_ DMA API in terms of the generic device dma_ one */ 138/* implement the pci_ DMA API in terms of the generic device dma_ one */
111#include <asm-generic/pci-dma-compat.h> 139#include <asm-generic/pci-dma-compat.h>
112 140
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 49c7219826f9..704526734bef 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -47,6 +47,7 @@ enum pci_bf_sort_state {
47extern unsigned int pcibios_max_latency; 47extern unsigned int pcibios_max_latency;
48 48
49void pcibios_resource_survey(void); 49void pcibios_resource_survey(void);
50void pcibios_set_cache_line_size(void);
50 51
51/* pci-pc.c */ 52/* pci-pc.c */
52 53
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index cd28f9ad910d..a0a9779084d1 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -45,12 +45,28 @@
45#include <linux/stringify.h> 45#include <linux/stringify.h>
46 46
47#ifdef CONFIG_SMP 47#ifdef CONFIG_SMP
48#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x 48#define __percpu_prefix "%%"__stringify(__percpu_seg)":"
49#define __my_cpu_offset percpu_read(this_cpu_off) 49#define __my_cpu_offset percpu_read(this_cpu_off)
50
51/*
52 * Compared to the generic __my_cpu_offset version, the following
53 * saves one instruction and avoids clobbering a temp register.
54 */
55#define __this_cpu_ptr(ptr) \
56({ \
57 unsigned long tcp_ptr__; \
58 __verify_pcpu_ptr(ptr); \
59 asm volatile("add " __percpu_arg(1) ", %0" \
60 : "=r" (tcp_ptr__) \
61 : "m" (this_cpu_off), "0" (ptr)); \
62 (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \
63})
50#else 64#else
51#define __percpu_arg(x) "%P" #x 65#define __percpu_prefix ""
52#endif 66#endif
53 67
68#define __percpu_arg(x) __percpu_prefix "%P" #x
69
54/* 70/*
55 * Initialized pointers to per-cpu variables needed for the boot 71 * Initialized pointers to per-cpu variables needed for the boot
56 * processor need to use these macros to get the proper address 72 * processor need to use these macros to get the proper address
@@ -216,6 +232,125 @@ do { \
216}) 232})
217 233
218/* 234/*
235 * Add return operation
236 */
237#define percpu_add_return_op(var, val) \
238({ \
239 typeof(var) paro_ret__ = val; \
240 switch (sizeof(var)) { \
241 case 1: \
242 asm("xaddb %0, "__percpu_arg(1) \
243 : "+q" (paro_ret__), "+m" (var) \
244 : : "memory"); \
245 break; \
246 case 2: \
247 asm("xaddw %0, "__percpu_arg(1) \
248 : "+r" (paro_ret__), "+m" (var) \
249 : : "memory"); \
250 break; \
251 case 4: \
252 asm("xaddl %0, "__percpu_arg(1) \
253 : "+r" (paro_ret__), "+m" (var) \
254 : : "memory"); \
255 break; \
256 case 8: \
257 asm("xaddq %0, "__percpu_arg(1) \
258 : "+re" (paro_ret__), "+m" (var) \
259 : : "memory"); \
260 break; \
261 default: __bad_percpu_size(); \
262 } \
263 paro_ret__ += val; \
264 paro_ret__; \
265})
266
267/*
268 * xchg is implemented using cmpxchg without a lock prefix. xchg is
269 * expensive due to the implied lock prefix. The processor cannot prefetch
270 * cachelines if xchg is used.
271 */
272#define percpu_xchg_op(var, nval) \
273({ \
274 typeof(var) pxo_ret__; \
275 typeof(var) pxo_new__ = (nval); \
276 switch (sizeof(var)) { \
277 case 1: \
278 asm("\n\tmov "__percpu_arg(1)",%%al" \
279 "\n1:\tcmpxchgb %2, "__percpu_arg(1) \
280 "\n\tjnz 1b" \
281 : "=&a" (pxo_ret__), "+m" (var) \
282 : "q" (pxo_new__) \
283 : "memory"); \
284 break; \
285 case 2: \
286 asm("\n\tmov "__percpu_arg(1)",%%ax" \
287 "\n1:\tcmpxchgw %2, "__percpu_arg(1) \
288 "\n\tjnz 1b" \
289 : "=&a" (pxo_ret__), "+m" (var) \
290 : "r" (pxo_new__) \
291 : "memory"); \
292 break; \
293 case 4: \
294 asm("\n\tmov "__percpu_arg(1)",%%eax" \
295 "\n1:\tcmpxchgl %2, "__percpu_arg(1) \
296 "\n\tjnz 1b" \
297 : "=&a" (pxo_ret__), "+m" (var) \
298 : "r" (pxo_new__) \
299 : "memory"); \
300 break; \
301 case 8: \
302 asm("\n\tmov "__percpu_arg(1)",%%rax" \
303 "\n1:\tcmpxchgq %2, "__percpu_arg(1) \
304 "\n\tjnz 1b" \
305 : "=&a" (pxo_ret__), "+m" (var) \
306 : "r" (pxo_new__) \
307 : "memory"); \
308 break; \
309 default: __bad_percpu_size(); \
310 } \
311 pxo_ret__; \
312})
313
314/*
315 * cmpxchg has no such implied lock semantics as a result it is much
316 * more efficient for cpu local operations.
317 */
318#define percpu_cmpxchg_op(var, oval, nval) \
319({ \
320 typeof(var) pco_ret__; \
321 typeof(var) pco_old__ = (oval); \
322 typeof(var) pco_new__ = (nval); \
323 switch (sizeof(var)) { \
324 case 1: \
325 asm("cmpxchgb %2, "__percpu_arg(1) \
326 : "=a" (pco_ret__), "+m" (var) \
327 : "q" (pco_new__), "0" (pco_old__) \
328 : "memory"); \
329 break; \
330 case 2: \
331 asm("cmpxchgw %2, "__percpu_arg(1) \
332 : "=a" (pco_ret__), "+m" (var) \
333 : "r" (pco_new__), "0" (pco_old__) \
334 : "memory"); \
335 break; \
336 case 4: \
337 asm("cmpxchgl %2, "__percpu_arg(1) \
338 : "=a" (pco_ret__), "+m" (var) \
339 : "r" (pco_new__), "0" (pco_old__) \
340 : "memory"); \
341 break; \
342 case 8: \
343 asm("cmpxchgq %2, "__percpu_arg(1) \
344 : "=a" (pco_ret__), "+m" (var) \
345 : "r" (pco_new__), "0" (pco_old__) \
346 : "memory"); \
347 break; \
348 default: __bad_percpu_size(); \
349 } \
350 pco_ret__; \
351})
352
353/*
219 * percpu_read() makes gcc load the percpu variable every time it is 354 * percpu_read() makes gcc load the percpu variable every time it is
220 * accessed while percpu_read_stable() allows the value to be cached. 355 * accessed while percpu_read_stable() allows the value to be cached.
221 * percpu_read_stable() is more efficient and can be used if its value 356 * percpu_read_stable() is more efficient and can be used if its value
@@ -253,6 +388,12 @@ do { \
253#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) 388#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
254#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) 389#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
255#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) 390#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
391/*
392 * Generic fallback operations for __this_cpu_xchg_[1-4] are okay and much
393 * faster than an xchg with forced lock semantics.
394 */
395#define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
396#define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
256 397
257#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 398#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
258#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 399#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
@@ -272,6 +413,9 @@ do { \
272#define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) 413#define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
273#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) 414#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
274#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) 415#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
416#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
417#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
418#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
275 419
276#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val) 420#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
277#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val) 421#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
@@ -285,6 +429,49 @@ do { \
285#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) 429#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
286#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) 430#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
287#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) 431#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
432#define irqsafe_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
433#define irqsafe_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
434#define irqsafe_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
435
436#ifndef CONFIG_M386
437#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
438#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
439#define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
440#define __this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
441#define __this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
442#define __this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
443
444#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
445#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
446#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
447#define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
448#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
449#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
450
451#define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
452#define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
453#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
454#endif /* !CONFIG_M386 */
455
456#ifdef CONFIG_X86_CMPXCHG64
457#define percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) \
458({ \
459 char __ret; \
460 typeof(o1) __o1 = o1; \
461 typeof(o1) __n1 = n1; \
462 typeof(o2) __o2 = o2; \
463 typeof(o2) __n2 = n2; \
464 typeof(o2) __dummy = n2; \
465 asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \
466 : "=a"(__ret), "=m" (pcp1), "=d"(__dummy) \
467 : "b"(__n1), "c"(__n2), "a"(__o1), "d"(__o2)); \
468 __ret; \
469})
470
471#define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
472#define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
473#define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
474#endif /* CONFIG_X86_CMPXCHG64 */
288 475
289/* 476/*
290 * Per cpu atomic 64 bit operations are only available under 64 bit. 477 * Per cpu atomic 64 bit operations are only available under 64 bit.
@@ -297,6 +484,7 @@ do { \
297#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) 484#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
298#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 485#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
299#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 486#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
487#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
300 488
301#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) 489#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
302#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) 490#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
@@ -304,11 +492,48 @@ do { \
304#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) 492#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
305#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 493#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
306#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 494#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
495#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
496#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
497#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
307 498
308#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val) 499#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
309#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) 500#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
310#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) 501#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
311#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) 502#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
503#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
504#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
505
506/*
507 * Pretty complex macro to generate cmpxchg16 instruction. The instruction
508 * is not supported on early AMD64 processors so we must be able to emulate
509 * it in software. The address used in the cmpxchg16 instruction must be
510 * aligned to a 16 byte boundary.
511 */
512#ifdef CONFIG_SMP
513#define CMPXCHG16B_EMU_CALL "call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP3
514#else
515#define CMPXCHG16B_EMU_CALL "call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP2
516#endif
517#define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) \
518({ \
519 char __ret; \
520 typeof(o1) __o1 = o1; \
521 typeof(o1) __n1 = n1; \
522 typeof(o2) __o2 = o2; \
523 typeof(o2) __n2 = n2; \
524 typeof(o2) __dummy; \
525 alternative_io(CMPXCHG16B_EMU_CALL, \
526 "cmpxchg16b " __percpu_prefix "(%%rsi)\n\tsetz %0\n\t", \
527 X86_FEATURE_CX16, \
528 ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \
529 "S" (&pcp1), "b"(__n1), "c"(__n2), \
530 "a"(__o1), "d"(__o2) : "memory"); \
531 __ret; \
532})
533
534#define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
535#define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
536#define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
312 537
313#endif 538#endif
314 539
@@ -322,6 +547,33 @@ do { \
322 old__; \ 547 old__; \
323}) 548})
324 549
550static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr,
551 const unsigned long __percpu *addr)
552{
553 unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG;
554
555 return ((1UL << (nr % BITS_PER_LONG)) & percpu_read(*a)) != 0;
556}
557
558static inline int x86_this_cpu_variable_test_bit(int nr,
559 const unsigned long __percpu *addr)
560{
561 int oldbit;
562
563 asm volatile("bt "__percpu_arg(2)",%1\n\t"
564 "sbb %0,%0"
565 : "=r" (oldbit)
566 : "m" (*(unsigned long *)addr), "Ir" (nr));
567
568 return oldbit;
569}
570
571#define x86_this_cpu_test_bit(nr, addr) \
572 (__builtin_constant_p((nr)) \
573 ? x86_this_cpu_constant_test_bit((nr), (addr)) \
574 : x86_this_cpu_variable_test_bit((nr), (addr)))
575
576
325#include <asm-generic/percpu.h> 577#include <asm-generic/percpu.h>
326 578
327/* We can use this directly for local CPU (faster). */ 579/* We can use this directly for local CPU (faster). */
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 6e742cc4251b..d9d4dae305f6 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -111,20 +111,20 @@ union cpuid10_edx {
111#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) 111#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
112 112
113/* IbsFetchCtl bits/masks */ 113/* IbsFetchCtl bits/masks */
114#define IBS_FETCH_RAND_EN (1ULL<<57) 114#define IBS_FETCH_RAND_EN (1ULL<<57)
115#define IBS_FETCH_VAL (1ULL<<49) 115#define IBS_FETCH_VAL (1ULL<<49)
116#define IBS_FETCH_ENABLE (1ULL<<48) 116#define IBS_FETCH_ENABLE (1ULL<<48)
117#define IBS_FETCH_CNT 0xFFFF0000ULL 117#define IBS_FETCH_CNT 0xFFFF0000ULL
118#define IBS_FETCH_MAX_CNT 0x0000FFFFULL 118#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
119 119
120/* IbsOpCtl bits */ 120/* IbsOpCtl bits */
121#define IBS_OP_CNT_CTL (1ULL<<19) 121#define IBS_OP_CNT_CTL (1ULL<<19)
122#define IBS_OP_VAL (1ULL<<18) 122#define IBS_OP_VAL (1ULL<<18)
123#define IBS_OP_ENABLE (1ULL<<17) 123#define IBS_OP_ENABLE (1ULL<<17)
124#define IBS_OP_MAX_CNT 0x0000FFFFULL 124#define IBS_OP_MAX_CNT 0x0000FFFFULL
125#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
125 126
126#ifdef CONFIG_PERF_EVENTS 127#ifdef CONFIG_PERF_EVENTS
127extern void init_hw_perf_events(void);
128extern void perf_events_lapic_init(void); 128extern void perf_events_lapic_init(void);
129 129
130#define PERF_EVENT_INDEX_OFFSET 0 130#define PERF_EVENT_INDEX_OFFSET 0
@@ -155,7 +155,6 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
155} 155}
156 156
157#else 157#else
158static inline void init_hw_perf_events(void) { }
159static inline void perf_events_lapic_init(void) { } 158static inline void perf_events_lapic_init(void) { }
160#endif 159#endif
161 160
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index def500776b16..56fd9e3abbda 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Netburst Perfomance Events (P4, old Xeon) 2 * Netburst Performance Events (P4, old Xeon)
3 */ 3 */
4 4
5#ifndef PERF_EVENT_P4_H 5#ifndef PERF_EVENT_P4_H
@@ -9,7 +9,7 @@
9#include <linux/bitops.h> 9#include <linux/bitops.h>
10 10
11/* 11/*
12 * NetBurst has perfomance MSRs shared between 12 * NetBurst has performance MSRs shared between
13 * threads if HT is turned on, ie for both logical 13 * threads if HT is turned on, ie for both logical
14 * processors (mem: in turn in Atom with HT support 14 * processors (mem: in turn in Atom with HT support
15 * perf-MSRs are not shared and every thread has its 15 * perf-MSRs are not shared and every thread has its
@@ -20,6 +20,10 @@
20#define ARCH_P4_MAX_ESCR (ARCH_P4_TOTAL_ESCR - ARCH_P4_RESERVED_ESCR) 20#define ARCH_P4_MAX_ESCR (ARCH_P4_TOTAL_ESCR - ARCH_P4_RESERVED_ESCR)
21#define ARCH_P4_MAX_CCCR (18) 21#define ARCH_P4_MAX_CCCR (18)
22 22
23#define ARCH_P4_CNTRVAL_BITS (40)
24#define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1)
25#define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1))
26
23#define P4_ESCR_EVENT_MASK 0x7e000000U 27#define P4_ESCR_EVENT_MASK 0x7e000000U
24#define P4_ESCR_EVENT_SHIFT 25 28#define P4_ESCR_EVENT_SHIFT 25
25#define P4_ESCR_EVENTMASK_MASK 0x01fffe00U 29#define P4_ESCR_EVENTMASK_MASK 0x01fffe00U
@@ -36,19 +40,6 @@
36#define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT) 40#define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT)
37#define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT) 41#define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT)
38 42
39/* Non HT mask */
40#define P4_ESCR_MASK \
41 (P4_ESCR_EVENT_MASK | \
42 P4_ESCR_EVENTMASK_MASK | \
43 P4_ESCR_TAG_MASK | \
44 P4_ESCR_TAG_ENABLE | \
45 P4_ESCR_T0_OS | \
46 P4_ESCR_T0_USR)
47
48/* HT mask */
49#define P4_ESCR_MASK_HT \
50 (P4_ESCR_MASK | P4_ESCR_T1_OS | P4_ESCR_T1_USR)
51
52#define P4_CCCR_OVF 0x80000000U 43#define P4_CCCR_OVF 0x80000000U
53#define P4_CCCR_CASCADE 0x40000000U 44#define P4_CCCR_CASCADE 0x40000000U
54#define P4_CCCR_OVF_PMI_T0 0x04000000U 45#define P4_CCCR_OVF_PMI_T0 0x04000000U
@@ -70,23 +61,6 @@
70#define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT) 61#define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT)
71#define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT) 62#define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT)
72 63
73/* Non HT mask */
74#define P4_CCCR_MASK \
75 (P4_CCCR_OVF | \
76 P4_CCCR_CASCADE | \
77 P4_CCCR_OVF_PMI_T0 | \
78 P4_CCCR_FORCE_OVF | \
79 P4_CCCR_EDGE | \
80 P4_CCCR_THRESHOLD_MASK | \
81 P4_CCCR_COMPLEMENT | \
82 P4_CCCR_COMPARE | \
83 P4_CCCR_ESCR_SELECT_MASK | \
84 P4_CCCR_ENABLE)
85
86/* HT mask */
87#define P4_CCCR_MASK_HT \
88 (P4_CCCR_MASK | P4_CCCR_OVF_PMI_T1 | P4_CCCR_THREAD_ANY)
89
90#define P4_GEN_ESCR_EMASK(class, name, bit) \ 64#define P4_GEN_ESCR_EMASK(class, name, bit) \
91 class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT) 65 class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT)
92#define P4_ESCR_EMASK_BIT(class, name) class##__##name 66#define P4_ESCR_EMASK_BIT(class, name) class##__##name
@@ -127,6 +101,28 @@
127#define P4_CONFIG_HT_SHIFT 63 101#define P4_CONFIG_HT_SHIFT 63
128#define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT) 102#define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT)
129 103
104/*
105 * The bits we allow to pass for RAW events
106 */
107#define P4_CONFIG_MASK_ESCR \
108 P4_ESCR_EVENT_MASK | \
109 P4_ESCR_EVENTMASK_MASK | \
110 P4_ESCR_TAG_MASK | \
111 P4_ESCR_TAG_ENABLE
112
113#define P4_CONFIG_MASK_CCCR \
114 P4_CCCR_EDGE | \
115 P4_CCCR_THRESHOLD_MASK | \
116 P4_CCCR_COMPLEMENT | \
117 P4_CCCR_COMPARE | \
118 P4_CCCR_THREAD_ANY | \
119 P4_CCCR_RESERVED
120
121/* some dangerous bits are reserved for kernel internals */
122#define P4_CONFIG_MASK \
123 (p4_config_pack_escr(P4_CONFIG_MASK_ESCR)) | \
124 (p4_config_pack_cccr(P4_CONFIG_MASK_CCCR))
125
130static inline bool p4_is_event_cascaded(u64 config) 126static inline bool p4_is_event_cascaded(u64 config)
131{ 127{
132 u32 cccr = p4_config_unpack_cccr(config); 128 u32 cccr = p4_config_unpack_cccr(config);
@@ -752,14 +748,6 @@ enum P4_ESCR_EMASKS {
752}; 748};
753 749
754/* 750/*
755 * P4 PEBS specifics (Replay Event only)
756 *
757 * Format (bits):
758 * 0-6: metric from P4_PEBS_METRIC enum
759 * 7 : reserved
760 * 8 : reserved
761 * 9-11 : reserved
762 *
763 * Note we have UOP and PEBS bits reserved for now 751 * Note we have UOP and PEBS bits reserved for now
764 * just in case if we will need them once 752 * just in case if we will need them once
765 */ 753 */
@@ -796,5 +784,60 @@ enum P4_PEBS_METRIC {
796 P4_PEBS_METRIC__max 784 P4_PEBS_METRIC__max
797}; 785};
798 786
787/*
788 * Notes on internal configuration of ESCR+CCCR tuples
789 *
790 * Since P4 has quite the different architecture of
791 * performance registers in compare with "architectural"
792 * once and we have on 64 bits to keep configuration
793 * of performance event, the following trick is used.
794 *
795 * 1) Since both ESCR and CCCR registers have only low
796 * 32 bits valuable, we pack them into a single 64 bit
797 * configuration. Low 32 bits of such config correspond
798 * to low 32 bits of CCCR register and high 32 bits
799 * correspond to low 32 bits of ESCR register.
800 *
801 * 2) The meaning of every bit of such config field can
802 * be found in Intel SDM but it should be noted that
803 * we "borrow" some reserved bits for own usage and
804 * clean them or set to a proper value when we do
805 * a real write to hardware registers.
806 *
807 * 3) The format of bits of config is the following
808 * and should be either 0 or set to some predefined
809 * values:
810 *
811 * Low 32 bits
812 * -----------
813 * 0-6: P4_PEBS_METRIC enum
814 * 7-11: reserved
815 * 12: reserved (Enable)
816 * 13-15: reserved (ESCR select)
817 * 16-17: Active Thread
818 * 18: Compare
819 * 19: Complement
820 * 20-23: Threshold
821 * 24: Edge
822 * 25: reserved (FORCE_OVF)
823 * 26: reserved (OVF_PMI_T0)
824 * 27: reserved (OVF_PMI_T1)
825 * 28-29: reserved
826 * 30: reserved (Cascade)
827 * 31: reserved (OVF)
828 *
829 * High 32 bits
830 * ------------
831 * 0: reserved (T1_USR)
832 * 1: reserved (T1_OS)
833 * 2: reserved (T0_USR)
834 * 3: reserved (T0_OS)
835 * 4: Tag Enable
836 * 5-8: Tag Value
837 * 9-24: Event Mask (may use P4_ESCR_EMASK_BIT helper)
838 * 25-30: enum P4_EVENTS
839 * 31: reserved (HT thread)
840 */
841
799#endif /* PERF_EVENT_P4_H */ 842#endif /* PERF_EVENT_P4_H */
800 843
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index 271de94c3810..b4389a468fb6 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -92,7 +92,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
92extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd); 92extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
93 93
94static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, 94static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
95 unsigned long adddress) 95 unsigned long address)
96{ 96{
97 ___pmd_free_tlb(tlb, pmd); 97 ___pmd_free_tlb(tlb, pmd);
98} 98}
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index 2334982b339e..98391db840c6 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -46,6 +46,15 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
46#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) 46#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
47#endif 47#endif
48 48
49#ifdef CONFIG_SMP
50static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
51{
52 return __pmd(xchg((pmdval_t *)xp, 0));
53}
54#else
55#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
56#endif
57
49/* 58/*
50 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, 59 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
51 * split up the 29 bits of offset into this range: 60 * split up the 29 bits of offset into this range:
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 177b0165ea01..effff47a3c82 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -69,8 +69,6 @@ static inline void native_pmd_clear(pmd_t *pmd)
69 69
70static inline void pud_clear(pud_t *pudp) 70static inline void pud_clear(pud_t *pudp)
71{ 71{
72 unsigned long pgd;
73
74 set_pud(pudp, __pud(0)); 72 set_pud(pudp, __pud(0));
75 73
76 /* 74 /*
@@ -79,13 +77,10 @@ static inline void pud_clear(pud_t *pudp)
79 * section 8.1: in PAE mode we explicitly have to flush the 77 * section 8.1: in PAE mode we explicitly have to flush the
80 * TLB via cr3 if the top-level pgd is changed... 78 * TLB via cr3 if the top-level pgd is changed...
81 * 79 *
82 * Make sure the pud entry we're updating is within the 80 * Currently all places where pud_clear() is called either have
83 * current pgd to avoid unnecessary TLB flushes. 81 * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or
82 * pud_clear_bad()), so we don't need TLB flush here.
84 */ 83 */
85 pgd = read_cr3();
86 if (__pa(pudp) >= pgd && __pa(pudp) <
87 (pgd + sizeof(pgd_t)*PTRS_PER_PGD))
88 write_cr3(pgd);
89} 84}
90 85
91#ifdef CONFIG_SMP 86#ifdef CONFIG_SMP
@@ -104,6 +99,29 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
104#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) 99#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
105#endif 100#endif
106 101
102#ifdef CONFIG_SMP
103union split_pmd {
104 struct {
105 u32 pmd_low;
106 u32 pmd_high;
107 };
108 pmd_t pmd;
109};
110static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
111{
112 union split_pmd res, *orig = (union split_pmd *)pmdp;
113
114 /* xchg acts as a barrier before setting of the high bits */
115 res.pmd_low = xchg(&orig->pmd_low, 0);
116 res.pmd_high = orig->pmd_high;
117 orig->pmd_high = 0;
118
119 return res.pmd;
120}
121#else
122#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
123#endif
124
107/* 125/*
108 * Bits 0, 6 and 7 are taken in the low part of the pte, 126 * Bits 0, 6 and 7 are taken in the low part of the pte,
109 * put the 32 bits of offset into the high part. 127 * put the 32 bits of offset into the high part.
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index a34c785c5a63..18601c86fab1 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -28,11 +28,14 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
28extern spinlock_t pgd_lock; 28extern spinlock_t pgd_lock;
29extern struct list_head pgd_list; 29extern struct list_head pgd_list;
30 30
31extern struct mm_struct *pgd_page_get_mm(struct page *page);
32
31#ifdef CONFIG_PARAVIRT 33#ifdef CONFIG_PARAVIRT
32#include <asm/paravirt.h> 34#include <asm/paravirt.h>
33#else /* !CONFIG_PARAVIRT */ 35#else /* !CONFIG_PARAVIRT */
34#define set_pte(ptep, pte) native_set_pte(ptep, pte) 36#define set_pte(ptep, pte) native_set_pte(ptep, pte)
35#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) 37#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
38#define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
36 39
37#define set_pte_atomic(ptep, pte) \ 40#define set_pte_atomic(ptep, pte) \
38 native_set_pte_atomic(ptep, pte) 41 native_set_pte_atomic(ptep, pte)
@@ -57,6 +60,8 @@ extern struct list_head pgd_list;
57 60
58#define pte_update(mm, addr, ptep) do { } while (0) 61#define pte_update(mm, addr, ptep) do { } while (0)
59#define pte_update_defer(mm, addr, ptep) do { } while (0) 62#define pte_update_defer(mm, addr, ptep) do { } while (0)
63#define pmd_update(mm, addr, ptep) do { } while (0)
64#define pmd_update_defer(mm, addr, ptep) do { } while (0)
60 65
61#define pgd_val(x) native_pgd_val(x) 66#define pgd_val(x) native_pgd_val(x)
62#define __pgd(x) native_make_pgd(x) 67#define __pgd(x) native_make_pgd(x)
@@ -92,6 +97,11 @@ static inline int pte_young(pte_t pte)
92 return pte_flags(pte) & _PAGE_ACCESSED; 97 return pte_flags(pte) & _PAGE_ACCESSED;
93} 98}
94 99
100static inline int pmd_young(pmd_t pmd)
101{
102 return pmd_flags(pmd) & _PAGE_ACCESSED;
103}
104
95static inline int pte_write(pte_t pte) 105static inline int pte_write(pte_t pte)
96{ 106{
97 return pte_flags(pte) & _PAGE_RW; 107 return pte_flags(pte) & _PAGE_RW;
@@ -140,6 +150,23 @@ static inline int pmd_large(pmd_t pte)
140 (_PAGE_PSE | _PAGE_PRESENT); 150 (_PAGE_PSE | _PAGE_PRESENT);
141} 151}
142 152
153#ifdef CONFIG_TRANSPARENT_HUGEPAGE
154static inline int pmd_trans_splitting(pmd_t pmd)
155{
156 return pmd_val(pmd) & _PAGE_SPLITTING;
157}
158
159static inline int pmd_trans_huge(pmd_t pmd)
160{
161 return pmd_val(pmd) & _PAGE_PSE;
162}
163
164static inline int has_transparent_hugepage(void)
165{
166 return cpu_has_pse;
167}
168#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
169
143static inline pte_t pte_set_flags(pte_t pte, pteval_t set) 170static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
144{ 171{
145 pteval_t v = native_pte_val(pte); 172 pteval_t v = native_pte_val(pte);
@@ -214,6 +241,55 @@ static inline pte_t pte_mkspecial(pte_t pte)
214 return pte_set_flags(pte, _PAGE_SPECIAL); 241 return pte_set_flags(pte, _PAGE_SPECIAL);
215} 242}
216 243
244static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
245{
246 pmdval_t v = native_pmd_val(pmd);
247
248 return __pmd(v | set);
249}
250
251static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
252{
253 pmdval_t v = native_pmd_val(pmd);
254
255 return __pmd(v & ~clear);
256}
257
258static inline pmd_t pmd_mkold(pmd_t pmd)
259{
260 return pmd_clear_flags(pmd, _PAGE_ACCESSED);
261}
262
263static inline pmd_t pmd_wrprotect(pmd_t pmd)
264{
265 return pmd_clear_flags(pmd, _PAGE_RW);
266}
267
268static inline pmd_t pmd_mkdirty(pmd_t pmd)
269{
270 return pmd_set_flags(pmd, _PAGE_DIRTY);
271}
272
273static inline pmd_t pmd_mkhuge(pmd_t pmd)
274{
275 return pmd_set_flags(pmd, _PAGE_PSE);
276}
277
278static inline pmd_t pmd_mkyoung(pmd_t pmd)
279{
280 return pmd_set_flags(pmd, _PAGE_ACCESSED);
281}
282
283static inline pmd_t pmd_mkwrite(pmd_t pmd)
284{
285 return pmd_set_flags(pmd, _PAGE_RW);
286}
287
288static inline pmd_t pmd_mknotpresent(pmd_t pmd)
289{
290 return pmd_clear_flags(pmd, _PAGE_PRESENT);
291}
292
217/* 293/*
218 * Mask out unsupported bits in a present pgprot. Non-present pgprots 294 * Mask out unsupported bits in a present pgprot. Non-present pgprots
219 * can use those bits for other purposes, so leave them be. 295 * can use those bits for other purposes, so leave them be.
@@ -254,6 +330,16 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
254 return __pte(val); 330 return __pte(val);
255} 331}
256 332
333static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
334{
335 pmdval_t val = pmd_val(pmd);
336
337 val &= _HPAGE_CHG_MASK;
338 val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
339
340 return __pmd(val);
341}
342
257/* mprotect needs to preserve PAT bits when updating vm_page_prot */ 343/* mprotect needs to preserve PAT bits when updating vm_page_prot */
258#define pgprot_modify pgprot_modify 344#define pgprot_modify pgprot_modify
259static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) 345static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
@@ -348,7 +434,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
348 * Currently stuck as a macro due to indirect forward reference to 434 * Currently stuck as a macro due to indirect forward reference to
349 * linux/mmzone.h's __section_mem_map_addr() definition: 435 * linux/mmzone.h's __section_mem_map_addr() definition:
350 */ 436 */
351#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) 437#define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
352 438
353/* 439/*
354 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] 440 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
@@ -522,12 +608,26 @@ static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
522 return res; 608 return res;
523} 609}
524 610
611static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
612{
613 pmd_t res = *pmdp;
614
615 native_pmd_clear(pmdp);
616 return res;
617}
618
525static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, 619static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
526 pte_t *ptep , pte_t pte) 620 pte_t *ptep , pte_t pte)
527{ 621{
528 native_set_pte(ptep, pte); 622 native_set_pte(ptep, pte);
529} 623}
530 624
625static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
626 pmd_t *pmdp , pmd_t pmd)
627{
628 native_set_pmd(pmdp, pmd);
629}
630
531#ifndef CONFIG_PARAVIRT 631#ifndef CONFIG_PARAVIRT
532/* 632/*
533 * Rules for using pte_update - it must be called after any PTE update which 633 * Rules for using pte_update - it must be called after any PTE update which
@@ -603,6 +703,51 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
603 pte_update(mm, addr, ptep); 703 pte_update(mm, addr, ptep);
604} 704}
605 705
706#define flush_tlb_fix_spurious_fault(vma, address)
707
708#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
709
710#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
711extern int pmdp_set_access_flags(struct vm_area_struct *vma,
712 unsigned long address, pmd_t *pmdp,
713 pmd_t entry, int dirty);
714
715#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
716extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
717 unsigned long addr, pmd_t *pmdp);
718
719#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
720extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
721 unsigned long address, pmd_t *pmdp);
722
723
724#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
725extern void pmdp_splitting_flush(struct vm_area_struct *vma,
726 unsigned long addr, pmd_t *pmdp);
727
728#define __HAVE_ARCH_PMD_WRITE
729static inline int pmd_write(pmd_t pmd)
730{
731 return pmd_flags(pmd) & _PAGE_RW;
732}
733
734#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
735static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
736 pmd_t *pmdp)
737{
738 pmd_t pmd = native_pmdp_get_and_clear(pmdp);
739 pmd_update(mm, addr, pmdp);
740 return pmd;
741}
742
743#define __HAVE_ARCH_PMDP_SET_WRPROTECT
744static inline void pmdp_set_wrprotect(struct mm_struct *mm,
745 unsigned long addr, pmd_t *pmdp)
746{
747 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
748 pmd_update(mm, addr, pmdp);
749}
750
606/* 751/*
607 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); 752 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
608 * 753 *
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index f686f49e8b7b..0c92113c4cb6 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -26,7 +26,7 @@ struct mm_struct;
26struct vm_area_struct; 26struct vm_area_struct;
27 27
28extern pgd_t swapper_pg_dir[1024]; 28extern pgd_t swapper_pg_dir[1024];
29extern pgd_t trampoline_pg_dir[1024]; 29extern pgd_t initial_page_table[1024];
30 30
31static inline void pgtable_cache_init(void) { } 31static inline void pgtable_cache_init(void) { }
32static inline void check_pgt_cache(void) { } 32static inline void check_pgt_cache(void) { }
@@ -49,24 +49,14 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
49#endif 49#endif
50 50
51#if defined(CONFIG_HIGHPTE) 51#if defined(CONFIG_HIGHPTE)
52#define __KM_PTE \
53 (in_nmi() ? KM_NMI_PTE : \
54 in_irq() ? KM_IRQ_PTE : \
55 KM_PTE0)
56#define pte_offset_map(dir, address) \ 52#define pte_offset_map(dir, address) \
57 ((pte_t *)kmap_atomic(pmd_page(*(dir)), __KM_PTE) + \ 53 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
58 pte_index((address))) 54 pte_index((address)))
59#define pte_offset_map_nested(dir, address) \ 55#define pte_unmap(pte) kunmap_atomic((pte))
60 ((pte_t *)kmap_atomic(pmd_page(*(dir)), KM_PTE1) + \
61 pte_index((address)))
62#define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE)
63#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
64#else 56#else
65#define pte_offset_map(dir, address) \ 57#define pte_offset_map(dir, address) \
66 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address))) 58 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
67#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address))
68#define pte_unmap(pte) do { } while (0) 59#define pte_unmap(pte) do { } while (0)
69#define pte_unmap_nested(pte) do { } while (0)
70#endif 60#endif
71 61
72/* Clear a kernel PTE and flush it from the TLB */ 62/* Clear a kernel PTE and flush it from the TLB */
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 076052cd62be..975f709e09ae 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -59,6 +59,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
59 native_set_pte(ptep, pte); 59 native_set_pte(ptep, pte);
60} 60}
61 61
62static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
63{
64 *pmdp = pmd;
65}
66
67static inline void native_pmd_clear(pmd_t *pmd)
68{
69 native_set_pmd(pmd, native_make_pmd(0));
70}
71
62static inline pte_t native_ptep_get_and_clear(pte_t *xp) 72static inline pte_t native_ptep_get_and_clear(pte_t *xp)
63{ 73{
64#ifdef CONFIG_SMP 74#ifdef CONFIG_SMP
@@ -72,14 +82,17 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
72#endif 82#endif
73} 83}
74 84
75static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) 85static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
76{
77 *pmdp = pmd;
78}
79
80static inline void native_pmd_clear(pmd_t *pmd)
81{ 86{
82 native_set_pmd(pmd, native_make_pmd(0)); 87#ifdef CONFIG_SMP
88 return native_make_pmd(xchg(&xp->pmd, 0));
89#else
90 /* native_local_pmdp_get_and_clear,
91 but duplicated because of cyclic dependency */
92 pmd_t ret = *xp;
93 native_pmd_clear(xp);
94 return ret;
95#endif
83} 96}
84 97
85static inline void native_set_pud(pud_t *pudp, pud_t pud) 98static inline void native_set_pud(pud_t *pudp, pud_t pud)
@@ -102,6 +115,8 @@ static inline void native_pgd_clear(pgd_t *pgd)
102 native_set_pgd(pgd, native_make_pgd(0)); 115 native_set_pgd(pgd, native_make_pgd(0));
103} 116}
104 117
118extern void sync_global_pgds(unsigned long start, unsigned long end);
119
105/* 120/*
106 * Conversion functions: convert a page and protection to a page entry, 121 * Conversion functions: convert a page and protection to a page entry,
107 * and a page entry and page directory to the page they refer to. 122 * and a page entry and page directory to the page they refer to.
@@ -125,9 +140,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
125 140
126/* x86-64 always has all page tables mapped. */ 141/* x86-64 always has all page tables mapped. */
127#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) 142#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
128#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
129#define pte_unmap(pte) ((void)(pte))/* NOP */ 143#define pte_unmap(pte) ((void)(pte))/* NOP */
130#define pte_unmap_nested(pte) ((void)(pte)) /* NOP */
131 144
132#define update_mmu_cache(vma, address, ptep) do { } while (0) 145#define update_mmu_cache(vma, address, ptep) do { } while (0)
133 146
@@ -168,6 +181,7 @@ extern void cleanup_highmap(void);
168#define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK) 181#define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
169 182
170#define __HAVE_ARCH_PTE_SAME 183#define __HAVE_ARCH_PTE_SAME
184
171#endif /* !__ASSEMBLY__ */ 185#endif /* !__ASSEMBLY__ */
172 186
173#endif /* _ASM_X86_PGTABLE_64_H */ 187#endif /* _ASM_X86_PGTABLE_64_H */
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index d1f4a760be23..d56187c6b838 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -22,6 +22,7 @@
22#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ 22#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
23#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 23#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
24#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1 24#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
25#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
25#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ 26#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
26 27
27/* If _PAGE_BIT_PRESENT is clear, we use these: */ 28/* If _PAGE_BIT_PRESENT is clear, we use these: */
@@ -45,6 +46,7 @@
45#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) 46#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
46#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) 47#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
47#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST) 48#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
49#define _PAGE_SPLITTING (_AT(pteval_t, 1) << _PAGE_BIT_SPLITTING)
48#define __HAVE_ARCH_PTE_SPECIAL 50#define __HAVE_ARCH_PTE_SPECIAL
49 51
50#ifdef CONFIG_KMEMCHECK 52#ifdef CONFIG_KMEMCHECK
@@ -70,6 +72,7 @@
70/* Set of bits not changed in pte_modify */ 72/* Set of bits not changed in pte_modify */
71#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ 73#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
72 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY) 74 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
75#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
73 76
74#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) 77#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
75#define _PAGE_CACHE_WB (0) 78#define _PAGE_CACHE_WB (0)
@@ -296,6 +299,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
296/* Install a pte for a particular vaddr in kernel space. */ 299/* Install a pte for a particular vaddr in kernel space. */
297void set_pte_vaddr(unsigned long vaddr, pte_t pte); 300void set_pte_vaddr(unsigned long vaddr, pte_t pte);
298 301
302extern void native_pagetable_reserve(u64 start, u64 end);
299#ifdef CONFIG_X86_32 303#ifdef CONFIG_X86_32
300extern void native_pagetable_setup_start(pgd_t *base); 304extern void native_pagetable_setup_start(pgd_t *base);
301extern void native_pagetable_setup_done(pgd_t *base); 305extern void native_pagetable_setup_done(pgd_t *base);
diff --git a/arch/x86/include/asm/probe_roms.h b/arch/x86/include/asm/probe_roms.h
new file mode 100644
index 000000000000..4950a0b1d09c
--- /dev/null
+++ b/arch/x86/include/asm/probe_roms.h
@@ -0,0 +1,8 @@
1#ifndef _PROBE_ROMS_H_
2#define _PROBE_ROMS_H_
3struct pci_dev;
4
5extern void __iomem *pci_map_biosrom(struct pci_dev *pdev);
6extern void pci_unmap_biosrom(void __iomem *rom);
7extern size_t pci_biosrom_size(struct pci_dev *pdev);
8#endif
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index 7a3e836eb2a9..59ab4dffa377 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -7,7 +7,7 @@
7 */ 7 */
8#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */ 8#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
9#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */ 9#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
10#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */ 10#define X86_EFLAGS_AF 0x00000010 /* Auxiliary carry Flag */
11#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */ 11#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
12#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */ 12#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
13#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */ 13#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
@@ -60,6 +60,7 @@
60#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */ 60#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */
61#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */ 61#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */
62#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */ 62#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
63#define X86_CR4_SMEP 0x00100000 /* enable SMEP support */
63 64
64/* 65/*
65 * x86-64 Task Priority Register, CR8 66 * x86-64 Task Priority Register, CR8
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index ebaa04a8d3af..b844edc69fe9 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -94,10 +94,6 @@ struct cpuinfo_x86 {
94 int x86_cache_alignment; /* In bytes */ 94 int x86_cache_alignment; /* In bytes */
95 int x86_power; 95 int x86_power;
96 unsigned long loops_per_jiffy; 96 unsigned long loops_per_jiffy;
97#ifdef CONFIG_SMP
98 /* cpus sharing the last level cache: */
99 cpumask_var_t llc_shared_map;
100#endif
101 /* cpuid returned max cores value: */ 97 /* cpuid returned max cores value: */
102 u16 x86_max_cores; 98 u16 x86_max_cores;
103 u16 apicid; 99 u16 apicid;
@@ -110,6 +106,8 @@ struct cpuinfo_x86 {
110 u16 phys_proc_id; 106 u16 phys_proc_id;
111 /* Core id: */ 107 /* Core id: */
112 u16 cpu_core_id; 108 u16 cpu_core_id;
109 /* Compute unit id */
110 u8 compute_unit_id;
113 /* Index into per_cpu list: */ 111 /* Index into per_cpu list: */
114 u16 cpu_index; 112 u16 cpu_index;
115#endif 113#endif
@@ -139,10 +137,9 @@ extern __u32 cpu_caps_set[NCAPINTS];
139#ifdef CONFIG_SMP 137#ifdef CONFIG_SMP
140DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); 138DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
141#define cpu_data(cpu) per_cpu(cpu_info, cpu) 139#define cpu_data(cpu) per_cpu(cpu_info, cpu)
142#define current_cpu_data __get_cpu_var(cpu_info)
143#else 140#else
141#define cpu_info boot_cpu_data
144#define cpu_data(cpu) boot_cpu_data 142#define cpu_data(cpu) boot_cpu_data
145#define current_cpu_data boot_cpu_data
146#endif 143#endif
147 144
148extern const struct seq_operations cpuinfo_op; 145extern const struct seq_operations cpuinfo_op;
@@ -606,7 +603,7 @@ extern unsigned long mmu_cr4_features;
606 603
607static inline void set_in_cr4(unsigned long mask) 604static inline void set_in_cr4(unsigned long mask)
608{ 605{
609 unsigned cr4; 606 unsigned long cr4;
610 607
611 mmu_cr4_features |= mask; 608 mmu_cr4_features |= mask;
612 cr4 = read_cr4(); 609 cr4 = read_cr4();
@@ -616,7 +613,7 @@ static inline void set_in_cr4(unsigned long mask)
616 613
617static inline void clear_in_cr4(unsigned long mask) 614static inline void clear_in_cr4(unsigned long mask)
618{ 615{
619 unsigned cr4; 616 unsigned long cr4;
620 617
621 mmu_cr4_features &= ~mask; 618 mmu_cr4_features &= ~mask;
622 cr4 = read_cr4(); 619 cr4 = read_cr4();
@@ -761,35 +758,13 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
761extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); 758extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
762 759
763extern void select_idle_routine(const struct cpuinfo_x86 *c); 760extern void select_idle_routine(const struct cpuinfo_x86 *c);
764extern void init_c1e_mask(void); 761extern void init_amd_e400_c1e_mask(void);
765 762
766extern unsigned long boot_option_idle_override; 763extern unsigned long boot_option_idle_override;
767extern unsigned long idle_halt; 764extern bool amd_e400_c1e_detected;
768extern unsigned long idle_nomwait;
769extern bool c1e_detected;
770 765
771/* 766enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
772 * on systems with caches, caches must be flashed as the absolute 767 IDLE_POLL, IDLE_FORCE_MWAIT};
773 * last instruction before going into a suspended halt. Otherwise,
774 * dirty data can linger in the cache and become stale on resume,
775 * leading to strange errors.
776 *
777 * perform a variety of operations to guarantee that the compiler
778 * will not reorder instructions. wbinvd itself is serializing
779 * so the processor will not reorder.
780 *
781 * Systems without cache can just go into halt.
782 */
783static inline void wbinvd_halt(void)
784{
785 mb();
786 /* check for clflush to determine if wbinvd is legal */
787 if (cpu_has_clflush)
788 asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory");
789 else
790 while (1)
791 halt();
792}
793 768
794extern void enable_sep_cpu(void); 769extern void enable_sep_cpu(void);
795extern int sysenter_setup(void); 770extern int sysenter_setup(void);
@@ -927,7 +902,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
927/* 902/*
928 * The below -8 is to reserve 8 bytes on top of the ring0 stack. 903 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
929 * This is necessary to guarantee that the entire "struct pt_regs" 904 * This is necessary to guarantee that the entire "struct pt_regs"
930 * is accessable even if the CPU haven't stored the SS/ESP registers 905 * is accessible even if the CPU haven't stored the SS/ESP registers
931 * on the stack (interrupt gate does not save these registers 906 * on the stack (interrupt gate does not save these registers
932 * when switching to the same priv ring). 907 * when switching to the same priv ring).
933 * Therefore beware: accessing the ss/esp fields of the 908 * Therefore beware: accessing the ss/esp fields of the
diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h
new file mode 100644
index 000000000000..971e0b46446e
--- /dev/null
+++ b/arch/x86/include/asm/prom.h
@@ -0,0 +1,69 @@
1/*
2 * Definitions for Device tree / OpenFirmware handling on X86
3 *
4 * based on arch/powerpc/include/asm/prom.h which is
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#ifndef _ASM_X86_PROM_H
14#define _ASM_X86_PROM_H
15#ifndef __ASSEMBLY__
16
17#include <linux/of.h>
18#include <linux/types.h>
19#include <linux/pci.h>
20
21#include <asm/irq.h>
22#include <asm/atomic.h>
23#include <asm/setup.h>
24#include <asm/irq_controller.h>
25
26#ifdef CONFIG_OF
27extern int of_ioapic;
28extern u64 initial_dtb;
29extern void add_dtb(u64 data);
30extern void x86_add_irq_domains(void);
31void __cpuinit x86_of_pci_init(void);
32void x86_dtb_init(void);
33
34static inline struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
35{
36 return pdev ? pdev->dev.of_node : NULL;
37}
38
39static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
40{
41 return pci_device_to_OF_node(bus->self);
42}
43
44#else
45static inline void add_dtb(u64 data) { }
46static inline void x86_add_irq_domains(void) { }
47static inline void x86_of_pci_init(void) { }
48static inline void x86_dtb_init(void) { }
49#define of_ioapic 0
50#endif
51
52extern char cmd_line[COMMAND_LINE_SIZE];
53
54#define pci_address_to_pio pci_address_to_pio
55unsigned long pci_address_to_pio(phys_addr_t addr);
56
57/**
58 * irq_dispose_mapping - Unmap an interrupt
59 * @virq: linux virq number of the interrupt to unmap
60 *
61 * FIXME: We really should implement proper virq handling like power,
62 * but that's going to be major surgery.
63 */
64static inline void irq_dispose_mapping(unsigned int virq) { }
65
66#define HAVE_ARCH_DEVTREE_FIXUPS
67
68#endif /* __ASSEMBLY__ */
69#endif
diff --git a/arch/x86/include/asm/ptrace-abi.h b/arch/x86/include/asm/ptrace-abi.h
index 52b098a6eebb..7b0a55a88851 100644
--- a/arch/x86/include/asm/ptrace-abi.h
+++ b/arch/x86/include/asm/ptrace-abi.h
@@ -31,7 +31,7 @@
31#define R12 24 31#define R12 24
32#define RBP 32 32#define RBP 32
33#define RBX 40 33#define RBX 40
34/* arguments: interrupts/non tracing syscalls only save upto here*/ 34/* arguments: interrupts/non tracing syscalls only save up to here*/
35#define R11 48 35#define R11 48
36#define R10 56 36#define R10 56
37#define R9 64 37#define R9 64
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 78cd1ea94500..94e7618fcac8 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -73,7 +73,7 @@ struct pt_regs {
73 unsigned long r12; 73 unsigned long r12;
74 unsigned long rbp; 74 unsigned long rbp;
75 unsigned long rbx; 75 unsigned long rbx;
76/* arguments: non interrupts/non tracing syscalls only save upto here*/ 76/* arguments: non interrupts/non tracing syscalls only save up to here*/
77 unsigned long r11; 77 unsigned long r11;
78 unsigned long r10; 78 unsigned long r10;
79 unsigned long r9; 79 unsigned long r9;
@@ -103,7 +103,7 @@ struct pt_regs {
103 unsigned long r12; 103 unsigned long r12;
104 unsigned long bp; 104 unsigned long bp;
105 unsigned long bx; 105 unsigned long bx;
106/* arguments: non interrupts/non tracing syscalls only save upto here*/ 106/* arguments: non interrupts/non tracing syscalls only save up to here*/
107 unsigned long r11; 107 unsigned long r11;
108 unsigned long r10; 108 unsigned long r10;
109 unsigned long r9; 109 unsigned long r9;
@@ -136,6 +136,7 @@ struct cpuinfo_x86;
136struct task_struct; 136struct task_struct;
137 137
138extern unsigned long profile_pc(struct pt_regs *regs); 138extern unsigned long profile_pc(struct pt_regs *regs);
139#define profile_pc profile_pc
139 140
140extern unsigned long 141extern unsigned long
141convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); 142convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
@@ -202,20 +203,11 @@ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
202#endif 203#endif
203} 204}
204 205
205static inline unsigned long instruction_pointer(struct pt_regs *regs) 206#define GET_IP(regs) ((regs)->ip)
206{ 207#define GET_FP(regs) ((regs)->bp)
207 return regs->ip; 208#define GET_USP(regs) ((regs)->sp)
208}
209
210static inline unsigned long frame_pointer(struct pt_regs *regs)
211{
212 return regs->bp;
213}
214 209
215static inline unsigned long user_stack_pointer(struct pt_regs *regs) 210#include <asm-generic/ptrace.h>
216{
217 return regs->sp;
218}
219 211
220/* Query offset/name of register from its name/offset */ 212/* Query offset/name of register from its name/offset */
221extern int regs_query_register_offset(const char *name); 213extern int regs_query_register_offset(const char *name);
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index cd02f324aa6b..a518c0a45044 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -11,5 +11,49 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
11void pvclock_read_wallclock(struct pvclock_wall_clock *wall, 11void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
12 struct pvclock_vcpu_time_info *vcpu, 12 struct pvclock_vcpu_time_info *vcpu,
13 struct timespec *ts); 13 struct timespec *ts);
14void pvclock_resume(void);
15
16/*
17 * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
18 * yielding a 64-bit result.
19 */
20static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
21{
22 u64 product;
23#ifdef __i386__
24 u32 tmp1, tmp2;
25#else
26 ulong tmp;
27#endif
28
29 if (shift < 0)
30 delta >>= -shift;
31 else
32 delta <<= shift;
33
34#ifdef __i386__
35 __asm__ (
36 "mul %5 ; "
37 "mov %4,%%eax ; "
38 "mov %%edx,%4 ; "
39 "mul %5 ; "
40 "xor %5,%5 ; "
41 "add %4,%%eax ; "
42 "adc %5,%%edx ; "
43 : "=A" (product), "=r" (tmp1), "=r" (tmp2)
44 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
45#elif defined(__x86_64__)
46 __asm__ (
47 "mul %[mul_frac] ; shrd $32, %[hi], %[lo]"
48 : [lo]"=a"(product),
49 [hi]"=d"(tmp)
50 : "0"(delta),
51 [mul_frac]"rm"((u64)mul_frac));
52#else
53#error implement me!
54#endif
55
56 return product;
57}
14 58
15#endif /* _ASM_X86_PVCLOCK_H */ 59#endif /* _ASM_X86_PVCLOCK_H */
diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
index 562d4fd31ba8..3250e3d605d9 100644
--- a/arch/x86/include/asm/reboot.h
+++ b/arch/x86/include/asm/reboot.h
@@ -18,7 +18,10 @@ extern struct machine_ops machine_ops;
18 18
19void native_machine_crash_shutdown(struct pt_regs *regs); 19void native_machine_crash_shutdown(struct pt_regs *regs);
20void native_machine_shutdown(void); 20void native_machine_shutdown(void);
21void machine_real_restart(const unsigned char *code, int length); 21void machine_real_restart(unsigned int type);
22/* These must match dispatch_table in reboot_32.S */
23#define MRR_BIOS 0
24#define MRR_APM 1
22 25
23typedef void (*nmi_shootdown_cb)(int, struct die_args*); 26typedef void (*nmi_shootdown_cb)(int, struct die_args*);
24void nmi_shootdown_cpus(nmi_shootdown_cb callback); 27void nmi_shootdown_cpus(nmi_shootdown_cb callback);
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index d1e41b0f9b60..df4cd32b4cc6 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -37,26 +37,9 @@
37#endif 37#endif
38 38
39#ifdef __KERNEL__ 39#ifdef __KERNEL__
40
41#include <linux/list.h>
42#include <linux/spinlock.h>
43#include <linux/lockdep.h>
44#include <asm/asm.h> 40#include <asm/asm.h>
45 41
46struct rwsem_waiter;
47
48extern asmregparm struct rw_semaphore *
49 rwsem_down_read_failed(struct rw_semaphore *sem);
50extern asmregparm struct rw_semaphore *
51 rwsem_down_write_failed(struct rw_semaphore *sem);
52extern asmregparm struct rw_semaphore *
53 rwsem_wake(struct rw_semaphore *);
54extern asmregparm struct rw_semaphore *
55 rwsem_downgrade_wake(struct rw_semaphore *sem);
56
57/* 42/*
58 * the semaphore definition
59 *
60 * The bias values and the counter type limits the number of 43 * The bias values and the counter type limits the number of
61 * potential readers/writers to 32767 for 32 bits and 2147483647 44 * potential readers/writers to 32767 for 32 bits and 2147483647
62 * for 64 bits. 45 * for 64 bits.
@@ -74,43 +57,6 @@ extern asmregparm struct rw_semaphore *
74#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 57#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
75#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 58#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
76 59
77typedef signed long rwsem_count_t;
78
79struct rw_semaphore {
80 rwsem_count_t count;
81 spinlock_t wait_lock;
82 struct list_head wait_list;
83#ifdef CONFIG_DEBUG_LOCK_ALLOC
84 struct lockdep_map dep_map;
85#endif
86};
87
88#ifdef CONFIG_DEBUG_LOCK_ALLOC
89# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
90#else
91# define __RWSEM_DEP_MAP_INIT(lockname)
92#endif
93
94
95#define __RWSEM_INITIALIZER(name) \
96{ \
97 RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
98 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \
99}
100
101#define DECLARE_RWSEM(name) \
102 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
103
104extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
105 struct lock_class_key *key);
106
107#define init_rwsem(sem) \
108do { \
109 static struct lock_class_key __key; \
110 \
111 __init_rwsem((sem), #sem, &__key); \
112} while (0)
113
114/* 60/*
115 * lock for reading 61 * lock for reading
116 */ 62 */
@@ -133,7 +79,7 @@ static inline void __down_read(struct rw_semaphore *sem)
133 */ 79 */
134static inline int __down_read_trylock(struct rw_semaphore *sem) 80static inline int __down_read_trylock(struct rw_semaphore *sem)
135{ 81{
136 rwsem_count_t result, tmp; 82 long result, tmp;
137 asm volatile("# beginning __down_read_trylock\n\t" 83 asm volatile("# beginning __down_read_trylock\n\t"
138 " mov %0,%1\n\t" 84 " mov %0,%1\n\t"
139 "1:\n\t" 85 "1:\n\t"
@@ -155,7 +101,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
155 */ 101 */
156static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) 102static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
157{ 103{
158 rwsem_count_t tmp; 104 long tmp;
159 asm volatile("# beginning down_write\n\t" 105 asm volatile("# beginning down_write\n\t"
160 LOCK_PREFIX " xadd %1,(%2)\n\t" 106 LOCK_PREFIX " xadd %1,(%2)\n\t"
161 /* adds 0xffff0001, returns the old value */ 107 /* adds 0xffff0001, returns the old value */
@@ -180,9 +126,8 @@ static inline void __down_write(struct rw_semaphore *sem)
180 */ 126 */
181static inline int __down_write_trylock(struct rw_semaphore *sem) 127static inline int __down_write_trylock(struct rw_semaphore *sem)
182{ 128{
183 rwsem_count_t ret = cmpxchg(&sem->count, 129 long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
184 RWSEM_UNLOCKED_VALUE, 130 RWSEM_ACTIVE_WRITE_BIAS);
185 RWSEM_ACTIVE_WRITE_BIAS);
186 if (ret == RWSEM_UNLOCKED_VALUE) 131 if (ret == RWSEM_UNLOCKED_VALUE)
187 return 1; 132 return 1;
188 return 0; 133 return 0;
@@ -193,7 +138,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
193 */ 138 */
194static inline void __up_read(struct rw_semaphore *sem) 139static inline void __up_read(struct rw_semaphore *sem)
195{ 140{
196 rwsem_count_t tmp; 141 long tmp;
197 asm volatile("# beginning __up_read\n\t" 142 asm volatile("# beginning __up_read\n\t"
198 LOCK_PREFIX " xadd %1,(%2)\n\t" 143 LOCK_PREFIX " xadd %1,(%2)\n\t"
199 /* subtracts 1, returns the old value */ 144 /* subtracts 1, returns the old value */
@@ -211,7 +156,7 @@ static inline void __up_read(struct rw_semaphore *sem)
211 */ 156 */
212static inline void __up_write(struct rw_semaphore *sem) 157static inline void __up_write(struct rw_semaphore *sem)
213{ 158{
214 rwsem_count_t tmp; 159 long tmp;
215 asm volatile("# beginning __up_write\n\t" 160 asm volatile("# beginning __up_write\n\t"
216 LOCK_PREFIX " xadd %1,(%2)\n\t" 161 LOCK_PREFIX " xadd %1,(%2)\n\t"
217 /* subtracts 0xffff0001, returns the old value */ 162 /* subtracts 0xffff0001, returns the old value */
@@ -247,8 +192,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
247/* 192/*
248 * implement atomic add functionality 193 * implement atomic add functionality
249 */ 194 */
250static inline void rwsem_atomic_add(rwsem_count_t delta, 195static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
251 struct rw_semaphore *sem)
252{ 196{
253 asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0" 197 asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
254 : "+m" (sem->count) 198 : "+m" (sem->count)
@@ -258,10 +202,9 @@ static inline void rwsem_atomic_add(rwsem_count_t delta,
258/* 202/*
259 * implement exchange and add functionality 203 * implement exchange and add functionality
260 */ 204 */
261static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta, 205static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
262 struct rw_semaphore *sem)
263{ 206{
264 rwsem_count_t tmp = delta; 207 long tmp = delta;
265 208
266 asm volatile(LOCK_PREFIX "xadd %0,%1" 209 asm volatile(LOCK_PREFIX "xadd %0,%1"
267 : "+r" (tmp), "+m" (sem->count) 210 : "+r" (tmp), "+m" (sem->count)
@@ -270,10 +213,5 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
270 return tmp + delta; 213 return tmp + delta;
271} 214}
272 215
273static inline int rwsem_is_locked(struct rw_semaphore *sem)
274{
275 return (sem->count != 0);
276}
277
278#endif /* __KERNEL__ */ 216#endif /* __KERNEL__ */
279#endif /* _ASM_X86_RWSEM_H */ 217#endif /* _ASM_X86_RWSEM_H */
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 14e0ed86a6f9..cd84f7208f76 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -1,14 +1,16 @@
1#ifndef _ASM_X86_SEGMENT_H 1#ifndef _ASM_X86_SEGMENT_H
2#define _ASM_X86_SEGMENT_H 2#define _ASM_X86_SEGMENT_H
3 3
4#include <linux/const.h>
5
4/* Constructor for a conventional segment GDT (or LDT) entry */ 6/* Constructor for a conventional segment GDT (or LDT) entry */
5/* This is a macro so it can be used in initializers */ 7/* This is a macro so it can be used in initializers */
6#define GDT_ENTRY(flags, base, limit) \ 8#define GDT_ENTRY(flags, base, limit) \
7 ((((base) & 0xff000000ULL) << (56-24)) | \ 9 ((((base) & _AC(0xff000000,ULL)) << (56-24)) | \
8 (((flags) & 0x0000f0ffULL) << 40) | \ 10 (((flags) & _AC(0x0000f0ff,ULL)) << 40) | \
9 (((limit) & 0x000f0000ULL) << (48-16)) | \ 11 (((limit) & _AC(0x000f0000,ULL)) << (48-16)) | \
10 (((base) & 0x00ffffffULL) << 16) | \ 12 (((base) & _AC(0x00ffffff,ULL)) << 16) | \
11 (((limit) & 0x0000ffffULL))) 13 (((limit) & _AC(0x0000ffff,ULL))))
12 14
13/* Simple and small GDT entries for booting only */ 15/* Simple and small GDT entries for booting only */
14 16
@@ -73,31 +75,31 @@
73 75
74#define GDT_ENTRY_DEFAULT_USER_DS 15 76#define GDT_ENTRY_DEFAULT_USER_DS 15
75 77
76#define GDT_ENTRY_KERNEL_BASE 12 78#define GDT_ENTRY_KERNEL_BASE (12)
77 79
78#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0) 80#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
79 81
80#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1) 82#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
81 83
82#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4) 84#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
83#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5) 85#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE+5)
84 86
85#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6) 87#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE+6)
86#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11) 88#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE+11)
87 89
88#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14) 90#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE+14)
89#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8) 91#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS*8)
90 92
91#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15) 93#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE+15)
92#ifdef CONFIG_SMP 94#ifdef CONFIG_SMP
93#define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8) 95#define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
94#else 96#else
95#define __KERNEL_PERCPU 0 97#define __KERNEL_PERCPU 0
96#endif 98#endif
97 99
98#define GDT_ENTRY_STACK_CANARY (GDT_ENTRY_KERNEL_BASE + 16) 100#define GDT_ENTRY_STACK_CANARY (GDT_ENTRY_KERNEL_BASE+16)
99#ifdef CONFIG_CC_STACKPROTECTOR 101#ifdef CONFIG_CC_STACKPROTECTOR
100#define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY * 8) 102#define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY*8)
101#else 103#else
102#define __KERNEL_STACK_CANARY 0 104#define __KERNEL_STACK_CANARY 0
103#endif 105#endif
@@ -182,10 +184,10 @@
182 184
183#endif 185#endif
184 186
185#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8) 187#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
186#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8) 188#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
187#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3) 189#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
188#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3) 190#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
189#ifndef CONFIG_PARAVIRT 191#ifndef CONFIG_PARAVIRT
190#define get_kernel_rpl() 0 192#define get_kernel_rpl() 0
191#endif 193#endif
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index ef292c792d74..9756551ec760 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -53,6 +53,12 @@ extern void x86_mrst_early_setup(void);
53static inline void x86_mrst_early_setup(void) { } 53static inline void x86_mrst_early_setup(void) { }
54#endif 54#endif
55 55
56#ifdef CONFIG_X86_INTEL_CE
57extern void x86_ce4100_early_setup(void);
58#else
59static inline void x86_ce4100_early_setup(void) { }
60#endif
61
56#ifndef _SETUP 62#ifndef _SETUP
57 63
58/* 64/*
@@ -82,7 +88,7 @@ void *extend_brk(size_t size, size_t align);
82 * executable.) 88 * executable.)
83 */ 89 */
84#define RESERVE_BRK(name,sz) \ 90#define RESERVE_BRK(name,sz) \
85 static void __section(.discard.text) __used \ 91 static void __section(.discard.text) __used notrace \
86 __brk_reservation_fn_##name##__(void) { \ 92 __brk_reservation_fn_##name##__(void) { \
87 asm volatile ( \ 93 asm volatile ( \
88 ".pushsection .brk_reservation,\"aw\",@nobits;" \ 94 ".pushsection .brk_reservation,\"aw\",@nobits;" \
@@ -93,10 +99,15 @@ void *extend_brk(size_t size, size_t align);
93 : : "i" (sz)); \ 99 : : "i" (sz)); \
94 } 100 }
95 101
102/* Helper for reserving space for arrays of things */
103#define RESERVE_BRK_ARRAY(type, name, entries) \
104 type *name; \
105 RESERVE_BRK(name, sizeof(type) * entries)
106
107extern void probe_roms(void);
96#ifdef __i386__ 108#ifdef __i386__
97 109
98void __init i386_start_kernel(void); 110void __init i386_start_kernel(void);
99extern void probe_roms(void);
100 111
101#else 112#else
102void __init x86_64_start_kernel(char *real_mode); 113void __init x86_64_start_kernel(char *real_mode);
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 4cfc90824068..73b11bc0ae6f 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -17,12 +17,24 @@
17#endif 17#endif
18#include <asm/thread_info.h> 18#include <asm/thread_info.h>
19#include <asm/cpumask.h> 19#include <asm/cpumask.h>
20#include <asm/cpufeature.h>
20 21
21extern int smp_num_siblings; 22extern int smp_num_siblings;
22extern unsigned int num_processors; 23extern unsigned int num_processors;
23 24
25static inline bool cpu_has_ht_siblings(void)
26{
27 bool has_siblings = false;
28#ifdef CONFIG_SMP
29 has_siblings = cpu_has_ht && smp_num_siblings > 1;
30#endif
31 return has_siblings;
32}
33
24DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map); 34DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
25DECLARE_PER_CPU(cpumask_var_t, cpu_core_map); 35DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
36/* cpus sharing the last level cache: */
37DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
26DECLARE_PER_CPU(u16, cpu_llc_id); 38DECLARE_PER_CPU(u16, cpu_llc_id);
27DECLARE_PER_CPU(int, cpu_number); 39DECLARE_PER_CPU(int, cpu_number);
28 40
@@ -36,21 +48,26 @@ static inline struct cpumask *cpu_core_mask(int cpu)
36 return per_cpu(cpu_core_map, cpu); 48 return per_cpu(cpu_core_map, cpu);
37} 49}
38 50
51static inline struct cpumask *cpu_llc_shared_mask(int cpu)
52{
53 return per_cpu(cpu_llc_shared_map, cpu);
54}
55
39DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); 56DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
40DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); 57DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
58#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
59DECLARE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid);
60#endif
41 61
42/* Static state in head.S used to set up a CPU */ 62/* Static state in head.S used to set up a CPU */
43extern struct { 63extern unsigned long stack_start; /* Initial stack pointer address */
44 void *sp;
45 unsigned short ss;
46} stack_start;
47 64
48struct smp_ops { 65struct smp_ops {
49 void (*smp_prepare_boot_cpu)(void); 66 void (*smp_prepare_boot_cpu)(void);
50 void (*smp_prepare_cpus)(unsigned max_cpus); 67 void (*smp_prepare_cpus)(unsigned max_cpus);
51 void (*smp_cpus_done)(unsigned max_cpus); 68 void (*smp_cpus_done)(unsigned max_cpus);
52 69
53 void (*smp_send_stop)(void); 70 void (*stop_other_cpus)(int wait);
54 void (*smp_send_reschedule)(int cpu); 71 void (*smp_send_reschedule)(int cpu);
55 72
56 int (*cpu_up)(unsigned cpu); 73 int (*cpu_up)(unsigned cpu);
@@ -73,7 +90,12 @@ extern struct smp_ops smp_ops;
73 90
74static inline void smp_send_stop(void) 91static inline void smp_send_stop(void)
75{ 92{
76 smp_ops.smp_send_stop(); 93 smp_ops.stop_other_cpus(0);
94}
95
96static inline void stop_other_cpus(void)
97{
98 smp_ops.stop_other_cpus(1);
77} 99}
78 100
79static inline void smp_prepare_boot_cpu(void) 101static inline void smp_prepare_boot_cpu(void)
diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h
index 1def60114906..725b77831993 100644
--- a/arch/x86/include/asm/smpboot_hooks.h
+++ b/arch/x86/include/asm/smpboot_hooks.h
@@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
34 */ 34 */
35 CMOS_WRITE(0, 0xf); 35 CMOS_WRITE(0, 0xf);
36 36
37 *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0; 37 *((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0;
38} 38}
39 39
40static inline void __init smpboot_setup_io_apic(void) 40static inline void __init smpboot_setup_io_apic(void)
@@ -48,7 +48,6 @@ static inline void __init smpboot_setup_io_apic(void)
48 setup_IO_APIC(); 48 setup_IO_APIC();
49 else { 49 else {
50 nr_ioapics = 0; 50 nr_ioapics = 0;
51 localise_nmi_watchdog();
52 } 51 }
53#endif 52#endif
54} 53}
diff --git a/arch/x86/include/asm/srat.h b/arch/x86/include/asm/srat.h
deleted file mode 100644
index b508d639d1a7..000000000000
--- a/arch/x86/include/asm/srat.h
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * Some of the code in this file has been gleaned from the 64 bit
3 * discontigmem support code base.
4 *
5 * Copyright (C) 2002, IBM Corp.
6 *
7 * All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
17 * NON INFRINGEMENT. See the GNU General Public License for more
18 * details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Send feedback to Pat Gaughen <gone@us.ibm.com>
25 */
26
27#ifndef _ASM_X86_SRAT_H
28#define _ASM_X86_SRAT_H
29
30#ifdef CONFIG_ACPI_NUMA
31extern int get_memcfg_from_srat(void);
32#else
33static inline int get_memcfg_from_srat(void)
34{
35 return 0;
36}
37#endif
38
39#endif /* _ASM_X86_SRAT_H */
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index 2b16a2ad23dc..70bbe39043a9 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -7,6 +7,7 @@
7#define _ASM_X86_STACKTRACE_H 7#define _ASM_X86_STACKTRACE_H
8 8
9#include <linux/uaccess.h> 9#include <linux/uaccess.h>
10#include <linux/ptrace.h>
10 11
11extern int kstack_depth_to_print; 12extern int kstack_depth_to_print;
12 13
@@ -36,9 +37,6 @@ print_context_stack_bp(struct thread_info *tinfo,
36/* Generic stack tracer with callbacks */ 37/* Generic stack tracer with callbacks */
37 38
38struct stacktrace_ops { 39struct stacktrace_ops {
39 void (*warning)(void *data, char *msg);
40 /* msg must contain %s for the symbol */
41 void (*warning_symbol)(void *data, char *msg, unsigned long symbol);
42 void (*address)(void *data, unsigned long address, int reliable); 40 void (*address)(void *data, unsigned long address, int reliable);
43 /* On negative return stop dumping */ 41 /* On negative return stop dumping */
44 int (*stack)(void *data, char *name); 42 int (*stack)(void *data, char *name);
@@ -57,13 +55,39 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
57#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :) 55#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :)
58#endif 56#endif
59 57
58#ifdef CONFIG_FRAME_POINTER
59static inline unsigned long
60stack_frame(struct task_struct *task, struct pt_regs *regs)
61{
62 unsigned long bp;
63
64 if (regs)
65 return regs->bp;
66
67 if (task == current) {
68 /* Grab bp right from our regs */
69 get_bp(bp);
70 return bp;
71 }
72
73 /* bp is the last reg pushed by switch_to */
74 return *(unsigned long *)task->thread.sp;
75}
76#else
77static inline unsigned long
78stack_frame(struct task_struct *task, struct pt_regs *regs)
79{
80 return 0;
81}
82#endif
83
60extern void 84extern void
61show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, 85show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
62 unsigned long *stack, unsigned long bp, char *log_lvl); 86 unsigned long *stack, unsigned long bp, char *log_lvl);
63 87
64extern void 88extern void
65show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, 89show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
66 unsigned long *sp, unsigned long bp, char *log_lvl); 90 unsigned long *sp, unsigned long bp, char *log_lvl);
67 91
68extern unsigned int code_bytes; 92extern unsigned int code_bytes;
69 93
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
index fd921c3a6841..487055c8c1aa 100644
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -9,8 +9,6 @@
9#include <asm/desc.h> 9#include <asm/desc.h>
10#include <asm/i387.h> 10#include <asm/i387.h>
11 11
12static inline int arch_prepare_suspend(void) { return 0; }
13
14/* image of the saved processor state */ 12/* image of the saved processor state */
15struct saved_context { 13struct saved_context {
16 u16 es, fs, gs, ss; 14 u16 es, fs, gs, ss;
diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
index 8d942afae681..09b0bf104156 100644
--- a/arch/x86/include/asm/suspend_64.h
+++ b/arch/x86/include/asm/suspend_64.h
@@ -9,11 +9,6 @@
9#include <asm/desc.h> 9#include <asm/desc.h>
10#include <asm/i387.h> 10#include <asm/i387.h>
11 11
12static inline int arch_prepare_suspend(void)
13{
14 return 0;
15}
16
17/* 12/*
18 * Image of the saved processor state, used by the low level ACPI suspend to 13 * Image of the saved processor state, used by the low level ACPI suspend to
19 * RAM code and by the low level hibernation code. 14 * RAM code and by the low level hibernation code.
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 0e831059ac5a..f2b83bc7d784 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -47,14 +47,13 @@ enum {
47 INTERCEPT_MONITOR, 47 INTERCEPT_MONITOR,
48 INTERCEPT_MWAIT, 48 INTERCEPT_MWAIT,
49 INTERCEPT_MWAIT_COND, 49 INTERCEPT_MWAIT_COND,
50 INTERCEPT_XSETBV,
50}; 51};
51 52
52 53
53struct __attribute__ ((__packed__)) vmcb_control_area { 54struct __attribute__ ((__packed__)) vmcb_control_area {
54 u16 intercept_cr_read; 55 u32 intercept_cr;
55 u16 intercept_cr_write; 56 u32 intercept_dr;
56 u16 intercept_dr_read;
57 u16 intercept_dr_write;
58 u32 intercept_exceptions; 57 u32 intercept_exceptions;
59 u64 intercept; 58 u64 intercept;
60 u8 reserved_1[42]; 59 u8 reserved_1[42];
@@ -81,14 +80,19 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
81 u32 event_inj_err; 80 u32 event_inj_err;
82 u64 nested_cr3; 81 u64 nested_cr3;
83 u64 lbr_ctl; 82 u64 lbr_ctl;
84 u64 reserved_5; 83 u32 clean;
84 u32 reserved_5;
85 u64 next_rip; 85 u64 next_rip;
86 u8 reserved_6[816]; 86 u8 insn_len;
87 u8 insn_bytes[15];
88 u8 reserved_6[800];
87}; 89};
88 90
89 91
90#define TLB_CONTROL_DO_NOTHING 0 92#define TLB_CONTROL_DO_NOTHING 0
91#define TLB_CONTROL_FLUSH_ALL_ASID 1 93#define TLB_CONTROL_FLUSH_ALL_ASID 1
94#define TLB_CONTROL_FLUSH_ASID 3
95#define TLB_CONTROL_FLUSH_ASID_LOCAL 7
92 96
93#define V_TPR_MASK 0x0f 97#define V_TPR_MASK 0x0f
94 98
@@ -204,19 +208,31 @@ struct __attribute__ ((__packed__)) vmcb {
204#define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK 208#define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK
205#define SVM_SELECTOR_CODE_MASK (1 << 3) 209#define SVM_SELECTOR_CODE_MASK (1 << 3)
206 210
207#define INTERCEPT_CR0_MASK 1 211#define INTERCEPT_CR0_READ 0
208#define INTERCEPT_CR3_MASK (1 << 3) 212#define INTERCEPT_CR3_READ 3
209#define INTERCEPT_CR4_MASK (1 << 4) 213#define INTERCEPT_CR4_READ 4
210#define INTERCEPT_CR8_MASK (1 << 8) 214#define INTERCEPT_CR8_READ 8
211 215#define INTERCEPT_CR0_WRITE (16 + 0)
212#define INTERCEPT_DR0_MASK 1 216#define INTERCEPT_CR3_WRITE (16 + 3)
213#define INTERCEPT_DR1_MASK (1 << 1) 217#define INTERCEPT_CR4_WRITE (16 + 4)
214#define INTERCEPT_DR2_MASK (1 << 2) 218#define INTERCEPT_CR8_WRITE (16 + 8)
215#define INTERCEPT_DR3_MASK (1 << 3) 219
216#define INTERCEPT_DR4_MASK (1 << 4) 220#define INTERCEPT_DR0_READ 0
217#define INTERCEPT_DR5_MASK (1 << 5) 221#define INTERCEPT_DR1_READ 1
218#define INTERCEPT_DR6_MASK (1 << 6) 222#define INTERCEPT_DR2_READ 2
219#define INTERCEPT_DR7_MASK (1 << 7) 223#define INTERCEPT_DR3_READ 3
224#define INTERCEPT_DR4_READ 4
225#define INTERCEPT_DR5_READ 5
226#define INTERCEPT_DR6_READ 6
227#define INTERCEPT_DR7_READ 7
228#define INTERCEPT_DR0_WRITE (16 + 0)
229#define INTERCEPT_DR1_WRITE (16 + 1)
230#define INTERCEPT_DR2_WRITE (16 + 2)
231#define INTERCEPT_DR3_WRITE (16 + 3)
232#define INTERCEPT_DR4_WRITE (16 + 4)
233#define INTERCEPT_DR5_WRITE (16 + 5)
234#define INTERCEPT_DR6_WRITE (16 + 6)
235#define INTERCEPT_DR7_WRITE (16 + 7)
220 236
221#define SVM_EVTINJ_VEC_MASK 0xff 237#define SVM_EVTINJ_VEC_MASK 0xff
222 238
@@ -246,6 +262,8 @@ struct __attribute__ ((__packed__)) vmcb {
246#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38 262#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
247#define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44 263#define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
248 264
265#define SVM_EXITINFO_REG_MASK 0x0F
266
249#define SVM_EXIT_READ_CR0 0x000 267#define SVM_EXIT_READ_CR0 0x000
250#define SVM_EXIT_READ_CR3 0x003 268#define SVM_EXIT_READ_CR3 0x003
251#define SVM_EXIT_READ_CR4 0x004 269#define SVM_EXIT_READ_CR4 0x004
@@ -316,6 +334,7 @@ struct __attribute__ ((__packed__)) vmcb {
316#define SVM_EXIT_MONITOR 0x08a 334#define SVM_EXIT_MONITOR 0x08a
317#define SVM_EXIT_MWAIT 0x08b 335#define SVM_EXIT_MWAIT 0x08b
318#define SVM_EXIT_MWAIT_COND 0x08c 336#define SVM_EXIT_MWAIT_COND 0x08c
337#define SVM_EXIT_XSETBV 0x08d
319#define SVM_EXIT_NPF 0x400 338#define SVM_EXIT_NPF 0x400
320 339
321#define SVM_EXIT_ERR -1 340#define SVM_EXIT_ERR -1
diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h
index 8085277e1b8b..977f1761a25d 100644
--- a/arch/x86/include/asm/swiotlb.h
+++ b/arch/x86/include/asm/swiotlb.h
@@ -5,17 +5,26 @@
5 5
6#ifdef CONFIG_SWIOTLB 6#ifdef CONFIG_SWIOTLB
7extern int swiotlb; 7extern int swiotlb;
8extern int __init pci_swiotlb_detect(void); 8extern int __init pci_swiotlb_detect_override(void);
9extern int __init pci_swiotlb_detect_4gb(void);
9extern void __init pci_swiotlb_init(void); 10extern void __init pci_swiotlb_init(void);
11extern void __init pci_swiotlb_late_init(void);
10#else 12#else
11#define swiotlb 0 13#define swiotlb 0
12static inline int pci_swiotlb_detect(void) 14static inline int pci_swiotlb_detect_override(void)
15{
16 return 0;
17}
18static inline int pci_swiotlb_detect_4gb(void)
13{ 19{
14 return 0; 20 return 0;
15} 21}
16static inline void pci_swiotlb_init(void) 22static inline void pci_swiotlb_init(void)
17{ 23{
18} 24}
25static inline void pci_swiotlb_late_init(void)
26{
27}
19#endif 28#endif
20 29
21static inline void dma_mark_clean(void *addr, size_t size) {} 30static inline void dma_mark_clean(void *addr, size_t size) {}
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index 33ecc3ea8782..c2ff2a1d845e 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -98,8 +98,6 @@ do { \
98 */ 98 */
99#define HAVE_DISABLE_HLT 99#define HAVE_DISABLE_HLT
100#else 100#else
101#define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
102#define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
103 101
104/* frame pointer must be last for get_wchan */ 102/* frame pointer must be last for get_wchan */
105#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" 103#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
@@ -305,24 +303,81 @@ static inline void native_wbinvd(void)
305#ifdef CONFIG_PARAVIRT 303#ifdef CONFIG_PARAVIRT
306#include <asm/paravirt.h> 304#include <asm/paravirt.h>
307#else 305#else
308#define read_cr0() (native_read_cr0()) 306
309#define write_cr0(x) (native_write_cr0(x)) 307static inline unsigned long read_cr0(void)
310#define read_cr2() (native_read_cr2()) 308{
311#define write_cr2(x) (native_write_cr2(x)) 309 return native_read_cr0();
312#define read_cr3() (native_read_cr3()) 310}
313#define write_cr3(x) (native_write_cr3(x)) 311
314#define read_cr4() (native_read_cr4()) 312static inline void write_cr0(unsigned long x)
315#define read_cr4_safe() (native_read_cr4_safe()) 313{
316#define write_cr4(x) (native_write_cr4(x)) 314 native_write_cr0(x);
317#define wbinvd() (native_wbinvd()) 315}
316
317static inline unsigned long read_cr2(void)
318{
319 return native_read_cr2();
320}
321
322static inline void write_cr2(unsigned long x)
323{
324 native_write_cr2(x);
325}
326
327static inline unsigned long read_cr3(void)
328{
329 return native_read_cr3();
330}
331
332static inline void write_cr3(unsigned long x)
333{
334 native_write_cr3(x);
335}
336
337static inline unsigned long read_cr4(void)
338{
339 return native_read_cr4();
340}
341
342static inline unsigned long read_cr4_safe(void)
343{
344 return native_read_cr4_safe();
345}
346
347static inline void write_cr4(unsigned long x)
348{
349 native_write_cr4(x);
350}
351
352static inline void wbinvd(void)
353{
354 native_wbinvd();
355}
356
318#ifdef CONFIG_X86_64 357#ifdef CONFIG_X86_64
319#define read_cr8() (native_read_cr8()) 358
320#define write_cr8(x) (native_write_cr8(x)) 359static inline unsigned long read_cr8(void)
321#define load_gs_index native_load_gs_index 360{
361 return native_read_cr8();
362}
363
364static inline void write_cr8(unsigned long x)
365{
366 native_write_cr8(x);
367}
368
369static inline void load_gs_index(unsigned selector)
370{
371 native_load_gs_index(selector);
372}
373
322#endif 374#endif
323 375
324/* Clear the 'TS' bit */ 376/* Clear the 'TS' bit */
325#define clts() (native_clts()) 377static inline void clts(void)
378{
379 native_clts();
380}
326 381
327#endif/* CONFIG_PARAVIRT */ 382#endif/* CONFIG_PARAVIRT */
328 383
diff --git a/arch/x86/include/asm/system_64.h b/arch/x86/include/asm/system_64.h
deleted file mode 100644
index 1159e091ad09..000000000000
--- a/arch/x86/include/asm/system_64.h
+++ /dev/null
@@ -1,22 +0,0 @@
1#ifndef _ASM_X86_SYSTEM_64_H
2#define _ASM_X86_SYSTEM_64_H
3
4#include <asm/segment.h>
5#include <asm/cmpxchg.h>
6
7
8static inline unsigned long read_cr8(void)
9{
10 unsigned long cr8;
11 asm volatile("movq %%cr8,%0" : "=r" (cr8));
12 return cr8;
13}
14
15static inline void write_cr8(unsigned long val)
16{
17 asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
18}
19
20#include <linux/irqflags.h>
21
22#endif /* _ASM_X86_SYSTEM_64_H */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index f0b6e5dbc5a0..1f2e61e28981 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -161,8 +161,14 @@ struct thread_info {
161 161
162#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 162#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
163 163
164#define alloc_thread_info(tsk) \ 164#define alloc_thread_info_node(tsk, node) \
165 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER)) 165({ \
166 struct page *page = alloc_pages_node(node, THREAD_FLAGS, \
167 THREAD_ORDER); \
168 struct thread_info *ret = page ? page_address(page) : NULL; \
169 \
170 ret; \
171})
166 172
167#ifdef CONFIG_X86_32 173#ifdef CONFIG_X86_32
168 174
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index 5469630b27f5..fa7b9176b76c 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -10,12 +10,6 @@
10unsigned long long native_sched_clock(void); 10unsigned long long native_sched_clock(void);
11extern int recalibrate_cpu_khz(void); 11extern int recalibrate_cpu_khz(void);
12 12
13#if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC)
14extern int timer_ack;
15#else
16# define timer_ack (0)
17#endif
18
19extern int no_timer_check; 13extern int no_timer_check;
20 14
21/* Accelerators for sched_clock() 15/* Accelerators for sched_clock()
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 7f3eba08e7de..169be8938b96 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -172,6 +172,4 @@ static inline void flush_tlb_kernel_range(unsigned long start,
172 flush_tlb_all(); 172 flush_tlb_all();
173} 173}
174 174
175extern void zap_low_mappings(bool early);
176
177#endif /* _ASM_X86_TLBFLUSH_H */ 175#endif /* _ASM_X86_TLBFLUSH_H */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 21899cc31e52..c00692476e9f 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -47,21 +47,6 @@
47 47
48#include <asm/mpspec.h> 48#include <asm/mpspec.h>
49 49
50#ifdef CONFIG_X86_32
51
52/* Mappings between logical cpu number and node number */
53extern int cpu_to_node_map[];
54
55/* Returns the number of the node containing CPU 'cpu' */
56static inline int __cpu_to_node(int cpu)
57{
58 return cpu_to_node_map[cpu];
59}
60#define early_cpu_to_node __cpu_to_node
61#define cpu_to_node __cpu_to_node
62
63#else /* CONFIG_X86_64 */
64
65/* Mappings between logical cpu number and node number */ 50/* Mappings between logical cpu number and node number */
66DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); 51DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
67 52
@@ -84,8 +69,6 @@ static inline int early_cpu_to_node(int cpu)
84 69
85#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 70#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
86 71
87#endif /* CONFIG_X86_64 */
88
89/* Mappings between node number and cpus on that node. */ 72/* Mappings between node number and cpus on that node. */
90extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; 73extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
91 74
@@ -110,19 +93,11 @@ extern void setup_node_to_cpumask_map(void);
110#define pcibus_to_node(bus) __pcibus_to_node(bus) 93#define pcibus_to_node(bus) __pcibus_to_node(bus)
111 94
112#ifdef CONFIG_X86_32 95#ifdef CONFIG_X86_32
113extern unsigned long node_start_pfn[];
114extern unsigned long node_end_pfn[];
115extern unsigned long node_remap_size[];
116#define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid])
117
118# define SD_CACHE_NICE_TRIES 1 96# define SD_CACHE_NICE_TRIES 1
119# define SD_IDLE_IDX 1 97# define SD_IDLE_IDX 1
120
121#else 98#else
122
123# define SD_CACHE_NICE_TRIES 2 99# define SD_CACHE_NICE_TRIES 2
124# define SD_IDLE_IDX 2 100# define SD_IDLE_IDX 2
125
126#endif 101#endif
127 102
128/* sched_domains SD_NODE_INIT for NUMA machines */ 103/* sched_domains SD_NODE_INIT for NUMA machines */
@@ -155,7 +130,7 @@ extern unsigned long node_remap_size[];
155 .balance_interval = 1, \ 130 .balance_interval = 1, \
156} 131}
157 132
158#ifdef CONFIG_X86_64_ACPI_NUMA 133#ifdef CONFIG_X86_64
159extern int __node_distance(int, int); 134extern int __node_distance(int, int);
160#define node_distance(a, b) __node_distance(a, b) 135#define node_distance(a, b) __node_distance(a, b)
161#endif 136#endif
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h
index 4dde797c0578..feca3118a73b 100644
--- a/arch/x86/include/asm/trampoline.h
+++ b/arch/x86/include/asm/trampoline.h
@@ -3,28 +3,36 @@
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5 5
6#ifdef CONFIG_X86_TRAMPOLINE 6#include <linux/types.h>
7#include <asm/io.h>
8
7/* 9/*
8 * Trampoline 80x86 program as an array. 10 * Trampoline 80x86 program as an array. These are in the init rodata
11 * segment, but that's okay, because we only care about the relative
12 * addresses of the symbols.
9 */ 13 */
10extern const unsigned char trampoline_data []; 14extern const unsigned char x86_trampoline_start [];
11extern const unsigned char trampoline_end []; 15extern const unsigned char x86_trampoline_end [];
12extern unsigned char *trampoline_base; 16extern unsigned char *x86_trampoline_base;
13 17
14extern unsigned long init_rsp; 18extern unsigned long init_rsp;
15extern unsigned long initial_code; 19extern unsigned long initial_code;
16extern unsigned long initial_page_table;
17extern unsigned long initial_gs; 20extern unsigned long initial_gs;
18 21
19#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) 22extern void __init setup_trampolines(void);
23
24extern const unsigned char trampoline_data[];
25extern const unsigned char trampoline_status[];
26
27#define TRAMPOLINE_SYM(x) \
28 ((void *)(x86_trampoline_base + \
29 ((const unsigned char *)(x) - x86_trampoline_start)))
20 30
21extern unsigned long setup_trampoline(void); 31/* Address of the SMP trampoline */
22extern void __init setup_trampoline_page_table(void); 32static inline unsigned long trampoline_address(void)
23extern void __init reserve_trampoline_memory(void); 33{
24#else 34 return virt_to_phys(TRAMPOLINE_SYM(trampoline_data));
25static inline void setup_trampoline_page_table(void) {} 35}
26static inline void reserve_trampoline_memory(void) {}
27#endif /* CONFIG_X86_TRAMPOLINE */
28 36
29#endif /* __ASSEMBLY__ */ 37#endif /* __ASSEMBLY__ */
30 38
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index f66cda56781d..0310da67307f 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -30,6 +30,7 @@ asmlinkage void segment_not_present(void);
30asmlinkage void stack_segment(void); 30asmlinkage void stack_segment(void);
31asmlinkage void general_protection(void); 31asmlinkage void general_protection(void);
32asmlinkage void page_fault(void); 32asmlinkage void page_fault(void);
33asmlinkage void async_page_fault(void);
33asmlinkage void spurious_interrupt_bug(void); 34asmlinkage void spurious_interrupt_bug(void);
34asmlinkage void coprocessor_error(void); 35asmlinkage void coprocessor_error(void);
35asmlinkage void alignment_check(void); 36asmlinkage void alignment_check(void);
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 1ca132fc0d03..9db5583b6d38 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -35,7 +35,7 @@ static inline cycles_t get_cycles(void)
35static __always_inline cycles_t vget_cycles(void) 35static __always_inline cycles_t vget_cycles(void)
36{ 36{
37 /* 37 /*
38 * We only do VDSOs on TSC capable CPUs, so this shouldnt 38 * We only do VDSOs on TSC capable CPUs, so this shouldn't
39 * access boot_cpu_data (which is not VDSO-safe): 39 * access boot_cpu_data (which is not VDSO-safe):
40 */ 40 */
41#ifndef CONFIG_X86_TSC 41#ifndef CONFIG_X86_TSC
@@ -51,6 +51,10 @@ extern int unsynchronized_tsc(void);
51extern int check_tsc_unstable(void); 51extern int check_tsc_unstable(void);
52extern unsigned long native_calibrate_tsc(void); 52extern unsigned long native_calibrate_tsc(void);
53 53
54#ifdef CONFIG_X86_64
55extern cycles_t vread_tsc(void);
56#endif
57
54/* 58/*
55 * Boot-time check whether the TSCs are synchronized across 59 * Boot-time check whether the TSCs are synchronized across
56 * all CPUs/cores: 60 * all CPUs/cores:
diff --git a/arch/x86/include/asm/types.h b/arch/x86/include/asm/types.h
index df1da20f4534..8e8c23fef08c 100644
--- a/arch/x86/include/asm/types.h
+++ b/arch/x86/include/asm/types.h
@@ -1,22 +1,6 @@
1#ifndef _ASM_X86_TYPES_H 1#ifndef _ASM_X86_TYPES_H
2#define _ASM_X86_TYPES_H 2#define _ASM_X86_TYPES_H
3 3
4#define dma_addr_t dma_addr_t
5
6#include <asm-generic/types.h> 4#include <asm-generic/types.h>
7 5
8#ifdef __KERNEL__
9#ifndef __ASSEMBLY__
10
11typedef u64 dma64_addr_t;
12#if defined(CONFIG_X86_64) || defined(CONFIG_HIGHMEM64G)
13/* DMA addresses come in 32-bit and 64-bit flavours. */
14typedef u64 dma_addr_t;
15#else
16typedef u32 dma_addr_t;
17#endif
18
19#endif /* __ASSEMBLY__ */
20#endif /* __KERNEL__ */
21
22#endif /* _ASM_X86_TYPES_H */ 6#endif /* _ASM_X86_TYPES_H */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index abd3e0ea762a..99ddd148a760 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -6,7 +6,6 @@
6#include <linux/errno.h> 6#include <linux/errno.h>
7#include <linux/compiler.h> 7#include <linux/compiler.h>
8#include <linux/thread_info.h> 8#include <linux/thread_info.h>
9#include <linux/prefetch.h>
10#include <linux/string.h> 9#include <linux/string.h>
11#include <asm/asm.h> 10#include <asm/asm.h>
12#include <asm/page.h> 11#include <asm/page.h>
@@ -42,7 +41,7 @@
42 * Returns 0 if the range is valid, nonzero otherwise. 41 * Returns 0 if the range is valid, nonzero otherwise.
43 * 42 *
44 * This is equivalent to the following test: 43 * This is equivalent to the following test:
45 * (u33)addr + (u33)size >= (u33)current->addr_limit.seg (u65 for x86_64) 44 * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64)
46 * 45 *
47 * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... 46 * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
48 */ 47 */
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 088d09fb1615..566e803cc602 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -6,7 +6,6 @@
6 */ 6 */
7#include <linux/errno.h> 7#include <linux/errno.h>
8#include <linux/thread_info.h> 8#include <linux/thread_info.h>
9#include <linux/prefetch.h>
10#include <linux/string.h> 9#include <linux/string.h>
11#include <asm/asm.h> 10#include <asm/asm.h>
12#include <asm/page.h> 11#include <asm/page.h>
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 316708d5af92..1c66d30971ad 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -6,7 +6,6 @@
6 */ 6 */
7#include <linux/compiler.h> 7#include <linux/compiler.h>
8#include <linux/errno.h> 8#include <linux/errno.h>
9#include <linux/prefetch.h>
10#include <linux/lockdep.h> 9#include <linux/lockdep.h>
11#include <asm/alternative.h> 10#include <asm/alternative.h>
12#include <asm/cpufeature.h> 11#include <asm/cpufeature.h>
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h
index b7ba19acd3f8..2f6e127db30c 100644
--- a/arch/x86/include/asm/unistd_32.h
+++ b/arch/x86/include/asm/unistd_32.h
@@ -346,14 +346,20 @@
346#define __NR_fanotify_init 338 346#define __NR_fanotify_init 338
347#define __NR_fanotify_mark 339 347#define __NR_fanotify_mark 339
348#define __NR_prlimit64 340 348#define __NR_prlimit64 340
349#define __NR_name_to_handle_at 341
350#define __NR_open_by_handle_at 342
351#define __NR_clock_adjtime 343
352#define __NR_syncfs 344
353#define __NR_sendmmsg 345
354#define __NR_setns 346
349 355
350#define __NR_LITMUS 341 356#define __NR_LITMUS 347
351 357
352#include "litmus/unistd_32.h" 358#include "litmus/unistd_32.h"
353 359
354#ifdef __KERNEL__ 360#ifdef __KERNEL__
355 361
356#define NR_syscalls 341 + NR_litmus_syscalls 362#define NR_syscalls 347 + NR_litmus_syscalls
357 363
358#define __ARCH_WANT_IPC_PARSE_VERSION 364#define __ARCH_WANT_IPC_PARSE_VERSION
359#define __ARCH_WANT_OLD_READDIR 365#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
index 332bf3c9c84c..e347f0773788 100644
--- a/arch/x86/include/asm/unistd_64.h
+++ b/arch/x86/include/asm/unistd_64.h
@@ -669,8 +669,20 @@ __SYSCALL(__NR_fanotify_init, sys_fanotify_init)
669__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark) 669__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
670#define __NR_prlimit64 302 670#define __NR_prlimit64 302
671__SYSCALL(__NR_prlimit64, sys_prlimit64) 671__SYSCALL(__NR_prlimit64, sys_prlimit64)
672 672#define __NR_name_to_handle_at 303
673#define __NR_LITMUS 303 673__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
674#define __NR_open_by_handle_at 304
675__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
676#define __NR_clock_adjtime 305
677__SYSCALL(__NR_clock_adjtime, sys_clock_adjtime)
678#define __NR_syncfs 306
679__SYSCALL(__NR_syncfs, sys_syncfs)
680#define __NR_sendmmsg 307
681__SYSCALL(__NR_sendmmsg, sys_sendmmsg)
682#define __NR_setns 308
683__SYSCALL(__NR_setns, sys_setns)
684
685#define __NR_LITMUS 309
674 686
675#include "litmus/unistd_64.h" 687#include "litmus/unistd_64.h"
676 688
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 42d412fd8b02..a291c40efd43 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * SGI UV Broadcast Assist Unit definitions 6 * SGI UV Broadcast Assist Unit definitions
7 * 7 *
8 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2008-2011 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#ifndef _ASM_X86_UV_UV_BAU_H 11#ifndef _ASM_X86_UV_UV_BAU_H
@@ -26,24 +26,29 @@
26 * BAU_SB_DESCRIPTOR_BASE register, set 1 is located at BASE + 512, 26 * BAU_SB_DESCRIPTOR_BASE register, set 1 is located at BASE + 512,
27 * set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on. 27 * set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on.
28 * 28 *
29 * We will use 31 sets, one for sending BAU messages from each of the 32 29 * We will use one set for sending BAU messages from each of the
30 * cpu's on the uvhub. 30 * cpu's on the uvhub.
31 * 31 *
32 * TLB shootdown will use the first of the 8 descriptors of each set. 32 * TLB shootdown will use the first of the 8 descriptors of each set.
33 * Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set). 33 * Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set).
34 */ 34 */
35 35
36#define UV_ITEMS_PER_DESCRIPTOR 8 36#define MAX_CPUS_PER_UVHUB 64
37#define MAX_CPUS_PER_SOCKET 32
38#define ADP_SZ 64 /* hardware-provided max. */
39#define UV_CPUS_PER_AS 32 /* hardware-provided max. */
40#define ITEMS_PER_DESC 8
37/* the 'throttle' to prevent the hardware stay-busy bug */ 41/* the 'throttle' to prevent the hardware stay-busy bug */
38#define MAX_BAU_CONCURRENT 3 42#define MAX_BAU_CONCURRENT 3
39#define UV_CPUS_PER_ACT_STATUS 32
40#define UV_ACT_STATUS_MASK 0x3 43#define UV_ACT_STATUS_MASK 0x3
41#define UV_ACT_STATUS_SIZE 2 44#define UV_ACT_STATUS_SIZE 2
42#define UV_ADP_SIZE 32
43#define UV_DISTRIBUTION_SIZE 256 45#define UV_DISTRIBUTION_SIZE 256
44#define UV_SW_ACK_NPENDING 8 46#define UV_SW_ACK_NPENDING 8
45#define UV_NET_ENDPOINT_INTD 0x38 47#define UV1_NET_ENDPOINT_INTD 0x38
46#define UV_DESC_BASE_PNODE_SHIFT 49 48#define UV2_NET_ENDPOINT_INTD 0x28
49#define UV_NET_ENDPOINT_INTD (is_uv1_hub() ? \
50 UV1_NET_ENDPOINT_INTD : UV2_NET_ENDPOINT_INTD)
51#define UV_DESC_PSHIFT 49
47#define UV_PAYLOADQ_PNODE_SHIFT 49 52#define UV_PAYLOADQ_PNODE_SHIFT 49
48#define UV_PTC_BASENAME "sgi_uv/ptc_statistics" 53#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
49#define UV_BAU_BASENAME "sgi_uv/bau_tunables" 54#define UV_BAU_BASENAME "sgi_uv/bau_tunables"
@@ -51,29 +56,64 @@
51#define UV_BAU_TUNABLES_FILE "bau_tunables" 56#define UV_BAU_TUNABLES_FILE "bau_tunables"
52#define WHITESPACE " \t\n" 57#define WHITESPACE " \t\n"
53#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask)) 58#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask))
54#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15 59#define cpubit_isset(cpu, bau_local_cpumask) \
55#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16 60 test_bit((cpu), (bau_local_cpumask).bits)
56#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x0000000009UL 61
57/* [19:16] SOFT_ACK timeout period 19: 1 is urgency 7 17:16 1 is multiplier */ 62/* [19:16] SOFT_ACK timeout period 19: 1 is urgency 7 17:16 1 is multiplier */
58#define BAU_MISC_CONTROL_MULT_MASK 3 63/*
64 * UV2: Bit 19 selects between
65 * (0): 10 microsecond timebase and
66 * (1): 80 microseconds
67 * we're using 655us, similar to UV1: 65 units of 10us
68 */
69#define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL)
70#define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (65*10UL)
71
72#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD (is_uv1_hub() ? \
73 UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD : \
74 UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD)
59 75
60#define UVH_AGING_PRESCALE_SEL 0x000000b000UL 76#define BAU_MISC_CONTROL_MULT_MASK 3
77
78#define UVH_AGING_PRESCALE_SEL 0x000000b000UL
61/* [30:28] URGENCY_7 an index into a table of times */ 79/* [30:28] URGENCY_7 an index into a table of times */
62#define BAU_URGENCY_7_SHIFT 28 80#define BAU_URGENCY_7_SHIFT 28
63#define BAU_URGENCY_7_MASK 7 81#define BAU_URGENCY_7_MASK 7
64 82
65#define UVH_TRANSACTION_TIMEOUT 0x000000b200UL 83#define UVH_TRANSACTION_TIMEOUT 0x000000b200UL
66/* [45:40] BAU - BAU transaction timeout select - a multiplier */ 84/* [45:40] BAU - BAU transaction timeout select - a multiplier */
67#define BAU_TRANS_SHIFT 40 85#define BAU_TRANS_SHIFT 40
68#define BAU_TRANS_MASK 0x3f 86#define BAU_TRANS_MASK 0x3f
87
88/*
89 * shorten some awkward names
90 */
91#define AS_PUSH_SHIFT UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT
92#define SOFTACK_MSHIFT UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT
93#define SOFTACK_PSHIFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
94#define SOFTACK_TIMEOUT_PERIOD UV_INTD_SOFT_ACK_TIMEOUT_PERIOD
95#define write_gmmr uv_write_global_mmr64
96#define write_lmmr uv_write_local_mmr
97#define read_lmmr uv_read_local_mmr
98#define read_gmmr uv_read_global_mmr64
69 99
70/* 100/*
71 * bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1 101 * bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1
72 */ 102 */
73#define DESC_STATUS_IDLE 0 103#define DS_IDLE 0
74#define DESC_STATUS_ACTIVE 1 104#define DS_ACTIVE 1
75#define DESC_STATUS_DESTINATION_TIMEOUT 2 105#define DS_DESTINATION_TIMEOUT 2
76#define DESC_STATUS_SOURCE_TIMEOUT 3 106#define DS_SOURCE_TIMEOUT 3
107/*
108 * bits put together from HRP_LB_BAU_SB_ACTIVATION_STATUS_0/1/2
109 * values 1 and 5 will not occur
110 */
111#define UV2H_DESC_IDLE 0
112#define UV2H_DESC_DEST_TIMEOUT 2
113#define UV2H_DESC_DEST_STRONG_NACK 3
114#define UV2H_DESC_BUSY 4
115#define UV2H_DESC_SOURCE_TIMEOUT 6
116#define UV2H_DESC_DEST_PUT_ERR 7
77 117
78/* 118/*
79 * delay for 'plugged' timeout retries, in microseconds 119 * delay for 'plugged' timeout retries, in microseconds
@@ -84,13 +124,24 @@
84 * threshholds at which to use IPI to free resources 124 * threshholds at which to use IPI to free resources
85 */ 125 */
86/* after this # consecutive 'plugged' timeouts, use IPI to release resources */ 126/* after this # consecutive 'plugged' timeouts, use IPI to release resources */
87#define PLUGSB4RESET 100 127#define PLUGSB4RESET 100
88/* after this many consecutive timeouts, use IPI to release resources */ 128/* after this many consecutive timeouts, use IPI to release resources */
89#define TIMEOUTSB4RESET 1 129#define TIMEOUTSB4RESET 1
90/* at this number uses of IPI to release resources, giveup the request */ 130/* at this number uses of IPI to release resources, giveup the request */
91#define IPI_RESET_LIMIT 1 131#define IPI_RESET_LIMIT 1
92/* after this # consecutive successes, bump up the throttle if it was lowered */ 132/* after this # consecutive successes, bump up the throttle if it was lowered */
93#define COMPLETE_THRESHOLD 5 133#define COMPLETE_THRESHOLD 5
134
135#define UV_LB_SUBNODEID 0x10
136
137/* these two are the same for UV1 and UV2: */
138#define UV_SA_SHFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
139#define UV_SA_MASK UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK
140/* 4 bits of software ack period */
141#define UV2_ACK_MASK 0x7UL
142#define UV2_ACK_UNITS_SHFT 3
143#define UV2_LEG_SHFT UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT
144#define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT
94 145
95/* 146/*
96 * number of entries in the destination side payload queue 147 * number of entries in the destination side payload queue
@@ -100,7 +151,6 @@
100 * number of destination side software ack resources 151 * number of destination side software ack resources
101 */ 152 */
102#define DEST_NUM_RESOURCES 8 153#define DEST_NUM_RESOURCES 8
103#define MAX_CPUS_PER_NODE 32
104/* 154/*
105 * completion statuses for sending a TLB flush message 155 * completion statuses for sending a TLB flush message
106 */ 156 */
@@ -112,9 +162,16 @@
112/* 162/*
113 * tuning the action when the numalink network is extremely delayed 163 * tuning the action when the numalink network is extremely delayed
114 */ 164 */
115#define CONGESTED_RESPONSE_US 1000 /* 'long' response time, in microseconds */ 165#define CONGESTED_RESPONSE_US 1000 /* 'long' response time, in
116#define CONGESTED_REPS 10 /* long delays averaged over this many broadcasts */ 166 microseconds */
117#define CONGESTED_PERIOD 30 /* time for the bau to be disabled, in seconds */ 167#define CONGESTED_REPS 10 /* long delays averaged over
168 this many broadcasts */
169#define CONGESTED_PERIOD 30 /* time for the bau to be
170 disabled, in seconds */
171/* see msg_type: */
172#define MSG_NOOP 0
173#define MSG_REGULAR 1
174#define MSG_RETRY 2
118 175
119/* 176/*
120 * Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor) 177 * Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor)
@@ -123,11 +180,11 @@
123 * The distribution specification (32 bytes) is interpreted as a 256-bit 180 * The distribution specification (32 bytes) is interpreted as a 256-bit
124 * distribution vector. Adjacent bits correspond to consecutive even numbered 181 * distribution vector. Adjacent bits correspond to consecutive even numbered
125 * nodeIDs. The result of adding the index of a given bit to the 15-bit 182 * nodeIDs. The result of adding the index of a given bit to the 15-bit
126 * 'base_dest_nodeid' field of the header corresponds to the 183 * 'base_dest_nasid' field of the header corresponds to the
127 * destination nodeID associated with that specified bit. 184 * destination nodeID associated with that specified bit.
128 */ 185 */
129struct bau_target_uvhubmask { 186struct bau_targ_hubmask {
130 unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)]; 187 unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)];
131}; 188};
132 189
133/* 190/*
@@ -136,7 +193,7 @@ struct bau_target_uvhubmask {
136 * enough bits for max. cpu's per uvhub) 193 * enough bits for max. cpu's per uvhub)
137 */ 194 */
138struct bau_local_cpumask { 195struct bau_local_cpumask {
139 unsigned long bits; 196 unsigned long bits;
140}; 197};
141 198
142/* 199/*
@@ -157,14 +214,14 @@ struct bau_local_cpumask {
157 * The payload is software-defined for INTD transactions 214 * The payload is software-defined for INTD transactions
158 */ 215 */
159struct bau_msg_payload { 216struct bau_msg_payload {
160 unsigned long address; /* signifies a page or all TLB's 217 unsigned long address; /* signifies a page or all
161 of the cpu */ 218 TLB's of the cpu */
162 /* 64 bits */ 219 /* 64 bits */
163 unsigned short sending_cpu; /* filled in by sender */ 220 unsigned short sending_cpu; /* filled in by sender */
164 /* 16 bits */ 221 /* 16 bits */
165 unsigned short acknowledge_count;/* filled in by destination */ 222 unsigned short acknowledge_count; /* filled in by destination */
166 /* 16 bits */ 223 /* 16 bits */
167 unsigned int reserved1:32; /* not usable */ 224 unsigned int reserved1:32; /* not usable */
168}; 225};
169 226
170 227
@@ -173,93 +230,96 @@ struct bau_msg_payload {
173 * see table 4.2.3.0.1 in broacast_assist spec. 230 * see table 4.2.3.0.1 in broacast_assist spec.
174 */ 231 */
175struct bau_msg_header { 232struct bau_msg_header {
176 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ 233 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
177 /* bits 5:0 */ 234 /* bits 5:0 */
178 unsigned int base_dest_nodeid:15; /* nasid (pnode<<1) of */ 235 unsigned int base_dest_nasid:15; /* nasid of the first bit */
179 /* bits 20:6 */ /* first bit in uvhub map */ 236 /* bits 20:6 */ /* in uvhub map */
180 unsigned int command:8; /* message type */ 237 unsigned int command:8; /* message type */
181 /* bits 28:21 */ 238 /* bits 28:21 */
182 /* 0x38: SN3net EndPoint Message */ 239 /* 0x38: SN3net EndPoint Message */
183 unsigned int rsvd_1:3; /* must be zero */ 240 unsigned int rsvd_1:3; /* must be zero */
184 /* bits 31:29 */ 241 /* bits 31:29 */
185 /* int will align on 32 bits */ 242 /* int will align on 32 bits */
186 unsigned int rsvd_2:9; /* must be zero */ 243 unsigned int rsvd_2:9; /* must be zero */
187 /* bits 40:32 */ 244 /* bits 40:32 */
188 /* Suppl_A is 56-41 */ 245 /* Suppl_A is 56-41 */
189 unsigned int sequence:16;/* message sequence number */ 246 unsigned int sequence:16; /* message sequence number */
190 /* bits 56:41 */ /* becomes bytes 16-17 of msg */ 247 /* bits 56:41 */ /* becomes bytes 16-17 of msg */
191 /* Address field (96:57) is never used as an 248 /* Address field (96:57) is
192 address (these are address bits 42:3) */ 249 never used as an address
193 250 (these are address bits
194 unsigned int rsvd_3:1; /* must be zero */ 251 42:3) */
252
253 unsigned int rsvd_3:1; /* must be zero */
195 /* bit 57 */ 254 /* bit 57 */
196 /* address bits 27:4 are payload */ 255 /* address bits 27:4 are payload */
197 /* these next 24 (58-81) bits become bytes 12-14 of msg */ 256 /* these next 24 (58-81) bits become bytes 12-14 of msg */
198
199 /* bits 65:58 land in byte 12 */ 257 /* bits 65:58 land in byte 12 */
200 unsigned int replied_to:1;/* sent as 0 by the source to byte 12 */ 258 unsigned int replied_to:1; /* sent as 0 by the source to
259 byte 12 */
201 /* bit 58 */ 260 /* bit 58 */
202 unsigned int msg_type:3; /* software type of the message*/ 261 unsigned int msg_type:3; /* software type of the
262 message */
203 /* bits 61:59 */ 263 /* bits 61:59 */
204 unsigned int canceled:1; /* message canceled, resource to be freed*/ 264 unsigned int canceled:1; /* message canceled, resource
265 is to be freed*/
205 /* bit 62 */ 266 /* bit 62 */
206 unsigned int payload_1a:1;/* not currently used */ 267 unsigned int payload_1a:1; /* not currently used */
207 /* bit 63 */ 268 /* bit 63 */
208 unsigned int payload_1b:2;/* not currently used */ 269 unsigned int payload_1b:2; /* not currently used */
209 /* bits 65:64 */ 270 /* bits 65:64 */
210 271
211 /* bits 73:66 land in byte 13 */ 272 /* bits 73:66 land in byte 13 */
212 unsigned int payload_1ca:6;/* not currently used */ 273 unsigned int payload_1ca:6; /* not currently used */
213 /* bits 71:66 */ 274 /* bits 71:66 */
214 unsigned int payload_1c:2;/* not currently used */ 275 unsigned int payload_1c:2; /* not currently used */
215 /* bits 73:72 */ 276 /* bits 73:72 */
216 277
217 /* bits 81:74 land in byte 14 */ 278 /* bits 81:74 land in byte 14 */
218 unsigned int payload_1d:6;/* not currently used */ 279 unsigned int payload_1d:6; /* not currently used */
219 /* bits 79:74 */ 280 /* bits 79:74 */
220 unsigned int payload_1e:2;/* not currently used */ 281 unsigned int payload_1e:2; /* not currently used */
221 /* bits 81:80 */ 282 /* bits 81:80 */
222 283
223 unsigned int rsvd_4:7; /* must be zero */ 284 unsigned int rsvd_4:7; /* must be zero */
224 /* bits 88:82 */ 285 /* bits 88:82 */
225 unsigned int sw_ack_flag:1;/* software acknowledge flag */ 286 unsigned int swack_flag:1; /* software acknowledge flag */
226 /* bit 89 */ 287 /* bit 89 */
227 /* INTD trasactions at destination are to 288 /* INTD trasactions at
228 wait for software acknowledge */ 289 destination are to wait for
229 unsigned int rsvd_5:6; /* must be zero */ 290 software acknowledge */
291 unsigned int rsvd_5:6; /* must be zero */
230 /* bits 95:90 */ 292 /* bits 95:90 */
231 unsigned int rsvd_6:5; /* must be zero */ 293 unsigned int rsvd_6:5; /* must be zero */
232 /* bits 100:96 */ 294 /* bits 100:96 */
233 unsigned int int_both:1;/* if 1, interrupt both sockets on the uvhub */ 295 unsigned int int_both:1; /* if 1, interrupt both sockets
296 on the uvhub */
234 /* bit 101*/ 297 /* bit 101*/
235 unsigned int fairness:3;/* usually zero */ 298 unsigned int fairness:3; /* usually zero */
236 /* bits 104:102 */ 299 /* bits 104:102 */
237 unsigned int multilevel:1; /* multi-level multicast format */ 300 unsigned int multilevel:1; /* multi-level multicast
301 format */
238 /* bit 105 */ 302 /* bit 105 */
239 /* 0 for TLB: endpoint multi-unicast messages */ 303 /* 0 for TLB: endpoint multi-unicast messages */
240 unsigned int chaining:1;/* next descriptor is part of this activation*/ 304 unsigned int chaining:1; /* next descriptor is part of
305 this activation*/
241 /* bit 106 */ 306 /* bit 106 */
242 unsigned int rsvd_7:21; /* must be zero */ 307 unsigned int rsvd_7:21; /* must be zero */
243 /* bits 127:107 */ 308 /* bits 127:107 */
244}; 309};
245 310
246/* see msg_type: */
247#define MSG_NOOP 0
248#define MSG_REGULAR 1
249#define MSG_RETRY 2
250
251/* 311/*
252 * The activation descriptor: 312 * The activation descriptor:
253 * The format of the message to send, plus all accompanying control 313 * The format of the message to send, plus all accompanying control
254 * Should be 64 bytes 314 * Should be 64 bytes
255 */ 315 */
256struct bau_desc { 316struct bau_desc {
257 struct bau_target_uvhubmask distribution; 317 struct bau_targ_hubmask distribution;
258 /* 318 /*
259 * message template, consisting of header and payload: 319 * message template, consisting of header and payload:
260 */ 320 */
261 struct bau_msg_header header; 321 struct bau_msg_header header;
262 struct bau_msg_payload payload; 322 struct bau_msg_payload payload;
263}; 323};
264/* 324/*
265 * -payload-- ---------header------ 325 * -payload-- ---------header------
@@ -278,59 +338,51 @@ struct bau_desc {
278 * are 32 bytes (2 micropackets) (256 bits) in length, but contain only 17 338 * are 32 bytes (2 micropackets) (256 bits) in length, but contain only 17
279 * bytes of usable data, including the sw ack vector in byte 15 (bits 127:120) 339 * bytes of usable data, including the sw ack vector in byte 15 (bits 127:120)
280 * (12 bytes come from bau_msg_payload, 3 from payload_1, 2 from 340 * (12 bytes come from bau_msg_payload, 3 from payload_1, 2 from
281 * sw_ack_vector and payload_2) 341 * swack_vec and payload_2)
282 * "Enabling Software Acknowledgment mode (see Section 4.3.3 Software 342 * "Enabling Software Acknowledgment mode (see Section 4.3.3 Software
283 * Acknowledge Processing) also selects 32 byte (17 bytes usable) payload 343 * Acknowledge Processing) also selects 32 byte (17 bytes usable) payload
284 * operation." 344 * operation."
285 */ 345 */
286struct bau_payload_queue_entry { 346struct bau_pq_entry {
287 unsigned long address; /* signifies a page or all TLB's 347 unsigned long address; /* signifies a page or all TLB's
288 of the cpu */ 348 of the cpu */
289 /* 64 bits, bytes 0-7 */ 349 /* 64 bits, bytes 0-7 */
290 350 unsigned short sending_cpu; /* cpu that sent the message */
291 unsigned short sending_cpu; /* cpu that sent the message */
292 /* 16 bits, bytes 8-9 */ 351 /* 16 bits, bytes 8-9 */
293 352 unsigned short acknowledge_count; /* filled in by destination */
294 unsigned short acknowledge_count; /* filled in by destination */
295 /* 16 bits, bytes 10-11 */ 353 /* 16 bits, bytes 10-11 */
296
297 /* these next 3 bytes come from bits 58-81 of the message header */ 354 /* these next 3 bytes come from bits 58-81 of the message header */
298 unsigned short replied_to:1; /* sent as 0 by the source */ 355 unsigned short replied_to:1; /* sent as 0 by the source */
299 unsigned short msg_type:3; /* software message type */ 356 unsigned short msg_type:3; /* software message type */
300 unsigned short canceled:1; /* sent as 0 by the source */ 357 unsigned short canceled:1; /* sent as 0 by the source */
301 unsigned short unused1:3; /* not currently using */ 358 unsigned short unused1:3; /* not currently using */
302 /* byte 12 */ 359 /* byte 12 */
303 360 unsigned char unused2a; /* not currently using */
304 unsigned char unused2a; /* not currently using */
305 /* byte 13 */ 361 /* byte 13 */
306 unsigned char unused2; /* not currently using */ 362 unsigned char unused2; /* not currently using */
307 /* byte 14 */ 363 /* byte 14 */
308 364 unsigned char swack_vec; /* filled in by the hardware */
309 unsigned char sw_ack_vector; /* filled in by the hardware */
310 /* byte 15 (bits 127:120) */ 365 /* byte 15 (bits 127:120) */
311 366 unsigned short sequence; /* message sequence number */
312 unsigned short sequence; /* message sequence number */
313 /* bytes 16-17 */ 367 /* bytes 16-17 */
314 unsigned char unused4[2]; /* not currently using bytes 18-19 */ 368 unsigned char unused4[2]; /* not currently using bytes 18-19 */
315 /* bytes 18-19 */ 369 /* bytes 18-19 */
316 370 int number_of_cpus; /* filled in at destination */
317 int number_of_cpus; /* filled in at destination */
318 /* 32 bits, bytes 20-23 (aligned) */ 371 /* 32 bits, bytes 20-23 (aligned) */
319 372 unsigned char unused5[8]; /* not using */
320 unsigned char unused5[8]; /* not using */
321 /* bytes 24-31 */ 373 /* bytes 24-31 */
322}; 374};
323 375
324struct msg_desc { 376struct msg_desc {
325 struct bau_payload_queue_entry *msg; 377 struct bau_pq_entry *msg;
326 int msg_slot; 378 int msg_slot;
327 int sw_ack_slot; 379 int swack_slot;
328 struct bau_payload_queue_entry *va_queue_first; 380 struct bau_pq_entry *queue_first;
329 struct bau_payload_queue_entry *va_queue_last; 381 struct bau_pq_entry *queue_last;
330}; 382};
331 383
332struct reset_args { 384struct reset_args {
333 int sender; 385 int sender;
334}; 386};
335 387
336/* 388/*
@@ -338,105 +390,226 @@ struct reset_args {
338 */ 390 */
339struct ptc_stats { 391struct ptc_stats {
340 /* sender statistics */ 392 /* sender statistics */
341 unsigned long s_giveup; /* number of fall backs to IPI-style flushes */ 393 unsigned long s_giveup; /* number of fall backs to
342 unsigned long s_requestor; /* number of shootdown requests */ 394 IPI-style flushes */
343 unsigned long s_stimeout; /* source side timeouts */ 395 unsigned long s_requestor; /* number of shootdown
344 unsigned long s_dtimeout; /* destination side timeouts */ 396 requests */
345 unsigned long s_time; /* time spent in sending side */ 397 unsigned long s_stimeout; /* source side timeouts */
346 unsigned long s_retriesok; /* successful retries */ 398 unsigned long s_dtimeout; /* destination side timeouts */
347 unsigned long s_ntargcpu; /* total number of cpu's targeted */ 399 unsigned long s_time; /* time spent in sending side */
348 unsigned long s_ntargself; /* times the sending cpu was targeted */ 400 unsigned long s_retriesok; /* successful retries */
349 unsigned long s_ntarglocals; /* targets of cpus on the local blade */ 401 unsigned long s_ntargcpu; /* total number of cpu's
350 unsigned long s_ntargremotes; /* targets of cpus on remote blades */ 402 targeted */
351 unsigned long s_ntarglocaluvhub; /* targets of the local hub */ 403 unsigned long s_ntargself; /* times the sending cpu was
352 unsigned long s_ntargremoteuvhub; /* remotes hubs targeted */ 404 targeted */
353 unsigned long s_ntarguvhub; /* total number of uvhubs targeted */ 405 unsigned long s_ntarglocals; /* targets of cpus on the local
354 unsigned long s_ntarguvhub16; /* number of times target hubs >= 16*/ 406 blade */
355 unsigned long s_ntarguvhub8; /* number of times target hubs >= 8 */ 407 unsigned long s_ntargremotes; /* targets of cpus on remote
356 unsigned long s_ntarguvhub4; /* number of times target hubs >= 4 */ 408 blades */
357 unsigned long s_ntarguvhub2; /* number of times target hubs >= 2 */ 409 unsigned long s_ntarglocaluvhub; /* targets of the local hub */
358 unsigned long s_ntarguvhub1; /* number of times target hubs == 1 */ 410 unsigned long s_ntargremoteuvhub; /* remotes hubs targeted */
359 unsigned long s_resets_plug; /* ipi-style resets from plug state */ 411 unsigned long s_ntarguvhub; /* total number of uvhubs
360 unsigned long s_resets_timeout; /* ipi-style resets from timeouts */ 412 targeted */
361 unsigned long s_busy; /* status stayed busy past s/w timer */ 413 unsigned long s_ntarguvhub16; /* number of times target
362 unsigned long s_throttles; /* waits in throttle */ 414 hubs >= 16*/
363 unsigned long s_retry_messages; /* retry broadcasts */ 415 unsigned long s_ntarguvhub8; /* number of times target
364 unsigned long s_bau_reenabled; /* for bau enable/disable */ 416 hubs >= 8 */
365 unsigned long s_bau_disabled; /* for bau enable/disable */ 417 unsigned long s_ntarguvhub4; /* number of times target
418 hubs >= 4 */
419 unsigned long s_ntarguvhub2; /* number of times target
420 hubs >= 2 */
421 unsigned long s_ntarguvhub1; /* number of times target
422 hubs == 1 */
423 unsigned long s_resets_plug; /* ipi-style resets from plug
424 state */
425 unsigned long s_resets_timeout; /* ipi-style resets from
426 timeouts */
427 unsigned long s_busy; /* status stayed busy past
428 s/w timer */
429 unsigned long s_throttles; /* waits in throttle */
430 unsigned long s_retry_messages; /* retry broadcasts */
431 unsigned long s_bau_reenabled; /* for bau enable/disable */
432 unsigned long s_bau_disabled; /* for bau enable/disable */
366 /* destination statistics */ 433 /* destination statistics */
367 unsigned long d_alltlb; /* times all tlb's on this cpu were flushed */ 434 unsigned long d_alltlb; /* times all tlb's on this
368 unsigned long d_onetlb; /* times just one tlb on this cpu was flushed */ 435 cpu were flushed */
369 unsigned long d_multmsg; /* interrupts with multiple messages */ 436 unsigned long d_onetlb; /* times just one tlb on this
370 unsigned long d_nomsg; /* interrupts with no message */ 437 cpu was flushed */
371 unsigned long d_time; /* time spent on destination side */ 438 unsigned long d_multmsg; /* interrupts with multiple
372 unsigned long d_requestee; /* number of messages processed */ 439 messages */
373 unsigned long d_retries; /* number of retry messages processed */ 440 unsigned long d_nomsg; /* interrupts with no message */
374 unsigned long d_canceled; /* number of messages canceled by retries */ 441 unsigned long d_time; /* time spent on destination
375 unsigned long d_nocanceled; /* retries that found nothing to cancel */ 442 side */
376 unsigned long d_resets; /* number of ipi-style requests processed */ 443 unsigned long d_requestee; /* number of messages
377 unsigned long d_rcanceled; /* number of messages canceled by resets */ 444 processed */
445 unsigned long d_retries; /* number of retry messages
446 processed */
447 unsigned long d_canceled; /* number of messages canceled
448 by retries */
449 unsigned long d_nocanceled; /* retries that found nothing
450 to cancel */
451 unsigned long d_resets; /* number of ipi-style requests
452 processed */
453 unsigned long d_rcanceled; /* number of messages canceled
454 by resets */
455};
456
457struct tunables {
458 int *tunp;
459 int deflt;
460};
461
462struct hub_and_pnode {
463 short uvhub;
464 short pnode;
465};
466
467struct socket_desc {
468 short num_cpus;
469 short cpu_number[MAX_CPUS_PER_SOCKET];
470};
471
472struct uvhub_desc {
473 unsigned short socket_mask;
474 short num_cpus;
475 short uvhub;
476 short pnode;
477 struct socket_desc socket[2];
378}; 478};
379 479
380/* 480/*
381 * one per-cpu; to locate the software tables 481 * one per-cpu; to locate the software tables
382 */ 482 */
383struct bau_control { 483struct bau_control {
384 struct bau_desc *descriptor_base; 484 struct bau_desc *descriptor_base;
385 struct bau_payload_queue_entry *va_queue_first; 485 struct bau_pq_entry *queue_first;
386 struct bau_payload_queue_entry *va_queue_last; 486 struct bau_pq_entry *queue_last;
387 struct bau_payload_queue_entry *bau_msg_head; 487 struct bau_pq_entry *bau_msg_head;
388 struct bau_control *uvhub_master; 488 struct bau_control *uvhub_master;
389 struct bau_control *socket_master; 489 struct bau_control *socket_master;
390 struct ptc_stats *statp; 490 struct ptc_stats *statp;
391 unsigned long timeout_interval; 491 unsigned long timeout_interval;
392 unsigned long set_bau_on_time; 492 unsigned long set_bau_on_time;
393 atomic_t active_descriptor_count; 493 atomic_t active_descriptor_count;
394 int plugged_tries; 494 int plugged_tries;
395 int timeout_tries; 495 int timeout_tries;
396 int ipi_attempts; 496 int ipi_attempts;
397 int conseccompletes; 497 int conseccompletes;
398 int baudisabled; 498 int baudisabled;
399 int set_bau_off; 499 int set_bau_off;
400 short cpu; 500 short cpu;
401 short uvhub_cpu; 501 short osnode;
402 short uvhub; 502 short uvhub_cpu;
403 short cpus_in_socket; 503 short uvhub;
404 short cpus_in_uvhub; 504 short cpus_in_socket;
405 unsigned short message_number; 505 short cpus_in_uvhub;
406 unsigned short uvhub_quiesce; 506 short partition_base_pnode;
407 short socket_acknowledge_count[DEST_Q_SIZE]; 507 unsigned short message_number;
408 cycles_t send_message; 508 unsigned short uvhub_quiesce;
409 spinlock_t uvhub_lock; 509 short socket_acknowledge_count[DEST_Q_SIZE];
410 spinlock_t queue_lock; 510 cycles_t send_message;
511 spinlock_t uvhub_lock;
512 spinlock_t queue_lock;
411 /* tunables */ 513 /* tunables */
412 int max_bau_concurrent; 514 int max_concurr;
413 int max_bau_concurrent_constant; 515 int max_concurr_const;
414 int plugged_delay; 516 int plugged_delay;
415 int plugsb4reset; 517 int plugsb4reset;
416 int timeoutsb4reset; 518 int timeoutsb4reset;
417 int ipi_reset_limit; 519 int ipi_reset_limit;
418 int complete_threshold; 520 int complete_threshold;
419 int congested_response_us; 521 int cong_response_us;
420 int congested_reps; 522 int cong_reps;
421 int congested_period; 523 int cong_period;
422 cycles_t period_time; 524 cycles_t period_time;
423 long period_requests; 525 long period_requests;
526 struct hub_and_pnode *thp;
424}; 527};
425 528
426static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp) 529static unsigned long read_mmr_uv2_status(void)
530{
531 return read_lmmr(UV2H_LB_BAU_SB_ACTIVATION_STATUS_2);
532}
533
534static void write_mmr_data_broadcast(int pnode, unsigned long mmr_image)
535{
536 write_gmmr(pnode, UVH_BAU_DATA_BROADCAST, mmr_image);
537}
538
539static void write_mmr_descriptor_base(int pnode, unsigned long mmr_image)
540{
541 write_gmmr(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, mmr_image);
542}
543
544static void write_mmr_activation(unsigned long index)
545{
546 write_lmmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
547}
548
549static void write_gmmr_activation(int pnode, unsigned long mmr_image)
550{
551 write_gmmr(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL, mmr_image);
552}
553
554static void write_mmr_payload_first(int pnode, unsigned long mmr_image)
555{
556 write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, mmr_image);
557}
558
559static void write_mmr_payload_tail(int pnode, unsigned long mmr_image)
560{
561 write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, mmr_image);
562}
563
564static void write_mmr_payload_last(int pnode, unsigned long mmr_image)
565{
566 write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, mmr_image);
567}
568
569static void write_mmr_misc_control(int pnode, unsigned long mmr_image)
570{
571 write_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
572}
573
574static unsigned long read_mmr_misc_control(int pnode)
575{
576 return read_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL);
577}
578
579static void write_mmr_sw_ack(unsigned long mr)
580{
581 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
582}
583
584static unsigned long read_mmr_sw_ack(void)
585{
586 return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
587}
588
589static unsigned long read_gmmr_sw_ack(int pnode)
590{
591 return read_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
592}
593
594static void write_mmr_data_config(int pnode, unsigned long mr)
595{
596 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, mr);
597}
598
599static inline int bau_uvhub_isset(int uvhub, struct bau_targ_hubmask *dstp)
427{ 600{
428 return constant_test_bit(uvhub, &dstp->bits[0]); 601 return constant_test_bit(uvhub, &dstp->bits[0]);
429} 602}
430static inline void bau_uvhub_set(int uvhub, struct bau_target_uvhubmask *dstp) 603static inline void bau_uvhub_set(int pnode, struct bau_targ_hubmask *dstp)
431{ 604{
432 __set_bit(uvhub, &dstp->bits[0]); 605 __set_bit(pnode, &dstp->bits[0]);
433} 606}
434static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp, 607static inline void bau_uvhubs_clear(struct bau_targ_hubmask *dstp,
435 int nbits) 608 int nbits)
436{ 609{
437 bitmap_zero(&dstp->bits[0], nbits); 610 bitmap_zero(&dstp->bits[0], nbits);
438} 611}
439static inline int bau_uvhub_weight(struct bau_target_uvhubmask *dstp) 612static inline int bau_uvhub_weight(struct bau_targ_hubmask *dstp)
440{ 613{
441 return bitmap_weight((unsigned long *)&dstp->bits[0], 614 return bitmap_weight((unsigned long *)&dstp->bits[0],
442 UV_DISTRIBUTION_SIZE); 615 UV_DISTRIBUTION_SIZE);
@@ -447,9 +620,6 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
447 bitmap_zero(&dstp->bits, nbits); 620 bitmap_zero(&dstp->bits, nbits);
448} 621}
449 622
450#define cpubit_isset(cpu, bau_local_cpumask) \
451 test_bit((cpu), (bau_local_cpumask).bits)
452
453extern void uv_bau_message_intr1(void); 623extern void uv_bau_message_intr1(void);
454extern void uv_bau_timeout_intr1(void); 624extern void uv_bau_timeout_intr1(void);
455 625
@@ -457,7 +627,7 @@ struct atomic_short {
457 short counter; 627 short counter;
458}; 628};
459 629
460/** 630/*
461 * atomic_read_short - read a short atomic variable 631 * atomic_read_short - read a short atomic variable
462 * @v: pointer of type atomic_short 632 * @v: pointer of type atomic_short
463 * 633 *
@@ -468,14 +638,14 @@ static inline int atomic_read_short(const struct atomic_short *v)
468 return v->counter; 638 return v->counter;
469} 639}
470 640
471/** 641/*
472 * atomic_add_short_return - add and return a short int 642 * atom_asr - add and return a short int
473 * @i: short value to add 643 * @i: short value to add
474 * @v: pointer of type atomic_short 644 * @v: pointer of type atomic_short
475 * 645 *
476 * Atomically adds @i to @v and returns @i + @v 646 * Atomically adds @i to @v and returns @i + @v
477 */ 647 */
478static inline int atomic_add_short_return(short i, struct atomic_short *v) 648static inline int atom_asr(short i, struct atomic_short *v)
479{ 649{
480 short __i = i; 650 short __i = i;
481 asm volatile(LOCK_PREFIX "xaddw %0, %1" 651 asm volatile(LOCK_PREFIX "xaddw %0, %1"
@@ -484,4 +654,26 @@ static inline int atomic_add_short_return(short i, struct atomic_short *v)
484 return i + __i; 654 return i + __i;
485} 655}
486 656
657/*
658 * conditionally add 1 to *v, unless *v is >= u
659 * return 0 if we cannot add 1 to *v because it is >= u
660 * return 1 if we can add 1 to *v because it is < u
661 * the add is atomic
662 *
663 * This is close to atomic_add_unless(), but this allows the 'u' value
664 * to be lowered below the current 'v'. atomic_add_unless can only stop
665 * on equal.
666 */
667static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
668{
669 spin_lock(lock);
670 if (atomic_read(v) >= u) {
671 spin_unlock(lock);
672 return 0;
673 }
674 atomic_inc(v);
675 spin_unlock(lock);
676 return 1;
677}
678
487#endif /* _ASM_X86_UV_UV_BAU_H */ 679#endif /* _ASM_X86_UV_UV_BAU_H */
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index bf6b88ef8eeb..f26544a15214 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * SGI UV architectural definitions 6 * SGI UV architectural definitions
7 * 7 *
8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#ifndef _ASM_X86_UV_UV_HUB_H 11#ifndef _ASM_X86_UV_UV_HUB_H
@@ -77,7 +77,9 @@
77 * 77 *
78 * 1111110000000000 78 * 1111110000000000
79 * 5432109876543210 79 * 5432109876543210
80 * pppppppppplc0cch 80 * pppppppppplc0cch Nehalem-EX (12 bits in hdw reg)
81 * ppppppppplcc0cch Westmere-EX (12 bits in hdw reg)
82 * pppppppppppcccch SandyBridge (15 bits in hdw reg)
81 * sssssssssss 83 * sssssssssss
82 * 84 *
83 * p = pnode bits 85 * p = pnode bits
@@ -86,7 +88,7 @@
86 * h = hyperthread 88 * h = hyperthread
87 * s = bits that are in the SOCKET_ID CSR 89 * s = bits that are in the SOCKET_ID CSR
88 * 90 *
89 * Note: Processor only supports 12 bits in the APICID register. The ACPI 91 * Note: Processor may support fewer bits in the APICID register. The ACPI
90 * tables hold all 16 bits. Software needs to be aware of this. 92 * tables hold all 16 bits. Software needs to be aware of this.
91 * 93 *
92 * Unless otherwise specified, all references to APICID refer to 94 * Unless otherwise specified, all references to APICID refer to
@@ -137,6 +139,8 @@ struct uv_hub_info_s {
137 unsigned long global_mmr_base; 139 unsigned long global_mmr_base;
138 unsigned long gpa_mask; 140 unsigned long gpa_mask;
139 unsigned int gnode_extra; 141 unsigned int gnode_extra;
142 unsigned char hub_revision;
143 unsigned char apic_pnode_shift;
140 unsigned long gnode_upper; 144 unsigned long gnode_upper;
141 unsigned long lowmem_remap_top; 145 unsigned long lowmem_remap_top;
142 unsigned long lowmem_remap_base; 146 unsigned long lowmem_remap_base;
@@ -155,6 +159,37 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
155#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) 159#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
156 160
157/* 161/*
162 * Hub revisions less than UV2_HUB_REVISION_BASE are UV1 hubs. All UV2
163 * hubs have revision numbers greater than or equal to UV2_HUB_REVISION_BASE.
164 * This is a software convention - NOT the hardware revision numbers in
165 * the hub chip.
166 */
167#define UV1_HUB_REVISION_BASE 1
168#define UV2_HUB_REVISION_BASE 3
169
170static inline int is_uv1_hub(void)
171{
172 return uv_hub_info->hub_revision < UV2_HUB_REVISION_BASE;
173}
174
175static inline int is_uv2_hub(void)
176{
177 return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE;
178}
179
180union uvh_apicid {
181 unsigned long v;
182 struct uvh_apicid_s {
183 unsigned long local_apic_mask : 24;
184 unsigned long local_apic_shift : 5;
185 unsigned long unused1 : 3;
186 unsigned long pnode_mask : 24;
187 unsigned long pnode_shift : 5;
188 unsigned long unused2 : 3;
189 } s;
190};
191
192/*
158 * Local & Global MMR space macros. 193 * Local & Global MMR space macros.
159 * Note: macros are intended to be used ONLY by inline functions 194 * Note: macros are intended to be used ONLY by inline functions
160 * in this file - not by other kernel code. 195 * in this file - not by other kernel code.
@@ -166,11 +201,25 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
166#define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra) 201#define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra)
167#define UV_PNODE_TO_NASID(p) (UV_PNODE_TO_GNODE(p) << 1) 202#define UV_PNODE_TO_NASID(p) (UV_PNODE_TO_GNODE(p) << 1)
168 203
169#define UV_LOCAL_MMR_BASE 0xf4000000UL 204#define UV1_LOCAL_MMR_BASE 0xf4000000UL
170#define UV_GLOBAL_MMR32_BASE 0xf8000000UL 205#define UV1_GLOBAL_MMR32_BASE 0xf8000000UL
206#define UV1_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
207#define UV1_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
208
209#define UV2_LOCAL_MMR_BASE 0xfa000000UL
210#define UV2_GLOBAL_MMR32_BASE 0xfc000000UL
211#define UV2_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
212#define UV2_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024)
213
214#define UV_LOCAL_MMR_BASE (is_uv1_hub() ? UV1_LOCAL_MMR_BASE \
215 : UV2_LOCAL_MMR_BASE)
216#define UV_GLOBAL_MMR32_BASE (is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE \
217 : UV2_GLOBAL_MMR32_BASE)
218#define UV_LOCAL_MMR_SIZE (is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \
219 UV2_LOCAL_MMR_SIZE)
220#define UV_GLOBAL_MMR32_SIZE (is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE :\
221 UV2_GLOBAL_MMR32_SIZE)
171#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base) 222#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base)
172#define UV_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
173#define UV_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
174 223
175#define UV_GLOBAL_GRU_MMR_BASE 0x4000000 224#define UV_GLOBAL_GRU_MMR_BASE 0x4000000
176 225
@@ -182,8 +231,11 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
182#define UV_GLOBAL_MMR64_PNODE_BITS(p) \ 231#define UV_GLOBAL_MMR64_PNODE_BITS(p) \
183 (((unsigned long)(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT) 232 (((unsigned long)(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT)
184 233
234#define UVH_APICID 0x002D0E00L
185#define UV_APIC_PNODE_SHIFT 6 235#define UV_APIC_PNODE_SHIFT 6
186 236
237#define UV_APICID_HIBIT_MASK 0xffff0000
238
187/* Local Bus from cpu's perspective */ 239/* Local Bus from cpu's perspective */
188#define LOCAL_BUS_BASE 0x1c00000 240#define LOCAL_BUS_BASE 0x1c00000
189#define LOCAL_BUS_SIZE (4 * 1024 * 1024) 241#define LOCAL_BUS_SIZE (4 * 1024 * 1024)
@@ -280,7 +332,18 @@ static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset)
280 */ 332 */
281static inline int uv_apicid_to_pnode(int apicid) 333static inline int uv_apicid_to_pnode(int apicid)
282{ 334{
283 return (apicid >> UV_APIC_PNODE_SHIFT); 335 return (apicid >> uv_hub_info->apic_pnode_shift);
336}
337
338/*
339 * Convert an apicid to the socket number on the blade
340 */
341static inline int uv_apicid_to_socket(int apicid)
342{
343 if (is_uv1_hub())
344 return (apicid >> (uv_hub_info->apic_pnode_shift - 1)) & 1;
345 else
346 return 0;
284} 347}
285 348
286/* 349/*
@@ -381,6 +444,8 @@ struct uv_blade_info {
381 unsigned short nr_online_cpus; 444 unsigned short nr_online_cpus;
382 unsigned short pnode; 445 unsigned short pnode;
383 short memory_nid; 446 short memory_nid;
447 spinlock_t nmi_lock;
448 unsigned long nmi_count;
384}; 449};
385extern struct uv_blade_info *uv_blade_info; 450extern struct uv_blade_info *uv_blade_info;
386extern short *uv_node_to_blade; 451extern short *uv_node_to_blade;
@@ -476,8 +541,10 @@ static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
476 } 541 }
477} 542}
478 543
544extern unsigned int uv_apicid_hibits;
479static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode) 545static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode)
480{ 546{
547 apicid |= uv_apicid_hibits;
481 return (1UL << UVH_IPI_INT_SEND_SHFT) | 548 return (1UL << UVH_IPI_INT_SEND_SHFT) |
482 ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) | 549 ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
483 (mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) | 550 (mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
@@ -498,14 +565,13 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
498 565
499/* 566/*
500 * Get the minimum revision number of the hub chips within the partition. 567 * Get the minimum revision number of the hub chips within the partition.
501 * 1 - initial rev 1.0 silicon 568 * 1 - UV1 rev 1.0 initial silicon
502 * 2 - rev 2.0 production silicon 569 * 2 - UV1 rev 2.0 production silicon
570 * 3 - UV2 rev 1.0 initial silicon
503 */ 571 */
504static inline int uv_get_min_hub_revision_id(void) 572static inline int uv_get_min_hub_revision_id(void)
505{ 573{
506 extern int uv_min_hub_revision_id; 574 return uv_hub_info->hub_revision;
507
508 return uv_min_hub_revision_id;
509} 575}
510 576
511#endif /* CONFIG_X86_64 */ 577#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index b2f2d2e05cec..4be52c863448 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -5,19 +5,70 @@
5 * 5 *
6 * SGI UV MMR definitions 6 * SGI UV MMR definitions
7 * 7 *
8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2011 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#ifndef _ASM_X86_UV_UV_MMRS_H 11#ifndef _ASM_X86_UV_UV_MMRS_H
12#define _ASM_X86_UV_UV_MMRS_H 12#define _ASM_X86_UV_UV_MMRS_H
13 13
14/*
15 * This file contains MMR definitions for both UV1 & UV2 hubs.
16 *
17 * In general, MMR addresses and structures are identical on both hubs.
18 * These MMRs are identified as:
19 * #define UVH_xxx <address>
20 * union uvh_xxx {
21 * unsigned long v;
22 * struct uvh_int_cmpd_s {
23 * } s;
24 * };
25 *
26 * If the MMR exists on both hub type but has different addresses or
27 * contents, the MMR definition is similar to:
28 * #define UV1H_xxx <uv1 address>
29 * #define UV2H_xxx <uv2address>
30 * #define UVH_xxx (is_uv1_hub() ? UV1H_xxx : UV2H_xxx)
31 * union uvh_xxx {
32 * unsigned long v;
33 * struct uv1h_int_cmpd_s { (Common fields only)
34 * } s;
35 * struct uv1h_int_cmpd_s { (Full UV1 definition)
36 * } s1;
37 * struct uv2h_int_cmpd_s { (Full UV2 definition)
38 * } s2;
39 * };
40 *
41 * Only essential difference are enumerated. For example, if the address is
42 * the same for both UV1 & UV2, only a single #define is generated. Likewise,
43 * if the contents is the same for both hubs, only the "s" structure is
44 * generated.
45 *
46 * If the MMR exists on ONLY 1 type of hub, no generic definition is
47 * generated:
48 * #define UVnH_xxx <uvn address>
49 * union uvnh_xxx {
50 * unsigned long v;
51 * struct uvh_int_cmpd_s {
52 * } sn;
53 * };
54 */
55
14#define UV_MMR_ENABLE (1UL << 63) 56#define UV_MMR_ENABLE (1UL << 63)
15 57
58#define UV1_HUB_PART_NUMBER 0x88a5
59#define UV2_HUB_PART_NUMBER 0x8eb8
60
61/* Compat: if this #define is present, UV headers support UV2 */
62#define UV2_HUB_IS_SUPPORTED 1
63
64/* KABI compat: if this #define is present, KABI hacks are present */
65#define UV2_HUB_KABI_HACKS 1
66
16/* ========================================================================= */ 67/* ========================================================================= */
17/* UVH_BAU_DATA_BROADCAST */ 68/* UVH_BAU_DATA_BROADCAST */
18/* ========================================================================= */ 69/* ========================================================================= */
19#define UVH_BAU_DATA_BROADCAST 0x61688UL 70#define UVH_BAU_DATA_BROADCAST 0x61688UL
20#define UVH_BAU_DATA_BROADCAST_32 0x0440 71#define UVH_BAU_DATA_BROADCAST_32 0x440
21 72
22#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0 73#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0
23#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL 74#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL
@@ -34,7 +85,7 @@ union uvh_bau_data_broadcast_u {
34/* UVH_BAU_DATA_CONFIG */ 85/* UVH_BAU_DATA_CONFIG */
35/* ========================================================================= */ 86/* ========================================================================= */
36#define UVH_BAU_DATA_CONFIG 0x61680UL 87#define UVH_BAU_DATA_CONFIG 0x61680UL
37#define UVH_BAU_DATA_CONFIG_32 0x0438 88#define UVH_BAU_DATA_CONFIG_32 0x438
38 89
39#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0 90#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0
40#define UVH_BAU_DATA_CONFIG_VECTOR_MASK 0x00000000000000ffUL 91#define UVH_BAU_DATA_CONFIG_VECTOR_MASK 0x00000000000000ffUL
@@ -73,125 +124,245 @@ union uvh_bau_data_config_u {
73/* UVH_EVENT_OCCURRED0 */ 124/* UVH_EVENT_OCCURRED0 */
74/* ========================================================================= */ 125/* ========================================================================= */
75#define UVH_EVENT_OCCURRED0 0x70000UL 126#define UVH_EVENT_OCCURRED0 0x70000UL
76#define UVH_EVENT_OCCURRED0_32 0x005e8 127#define UVH_EVENT_OCCURRED0_32 0x5e8
77 128
78#define UVH_EVENT_OCCURRED0_LB_HCERR_SHFT 0 129#define UV1H_EVENT_OCCURRED0_LB_HCERR_SHFT 0
79#define UVH_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL 130#define UV1H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
80#define UVH_EVENT_OCCURRED0_GR0_HCERR_SHFT 1 131#define UV1H_EVENT_OCCURRED0_GR0_HCERR_SHFT 1
81#define UVH_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL 132#define UV1H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL
82#define UVH_EVENT_OCCURRED0_GR1_HCERR_SHFT 2 133#define UV1H_EVENT_OCCURRED0_GR1_HCERR_SHFT 2
83#define UVH_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL 134#define UV1H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL
84#define UVH_EVENT_OCCURRED0_LH_HCERR_SHFT 3 135#define UV1H_EVENT_OCCURRED0_LH_HCERR_SHFT 3
85#define UVH_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL 136#define UV1H_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL
86#define UVH_EVENT_OCCURRED0_RH_HCERR_SHFT 4 137#define UV1H_EVENT_OCCURRED0_RH_HCERR_SHFT 4
87#define UVH_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000010UL 138#define UV1H_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000010UL
88#define UVH_EVENT_OCCURRED0_XN_HCERR_SHFT 5 139#define UV1H_EVENT_OCCURRED0_XN_HCERR_SHFT 5
89#define UVH_EVENT_OCCURRED0_XN_HCERR_MASK 0x0000000000000020UL 140#define UV1H_EVENT_OCCURRED0_XN_HCERR_MASK 0x0000000000000020UL
90#define UVH_EVENT_OCCURRED0_SI_HCERR_SHFT 6 141#define UV1H_EVENT_OCCURRED0_SI_HCERR_SHFT 6
91#define UVH_EVENT_OCCURRED0_SI_HCERR_MASK 0x0000000000000040UL 142#define UV1H_EVENT_OCCURRED0_SI_HCERR_MASK 0x0000000000000040UL
92#define UVH_EVENT_OCCURRED0_LB_AOERR0_SHFT 7 143#define UV1H_EVENT_OCCURRED0_LB_AOERR0_SHFT 7
93#define UVH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000080UL 144#define UV1H_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000080UL
94#define UVH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8 145#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8
95#define UVH_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL 146#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL
96#define UVH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9 147#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9
97#define UVH_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL 148#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL
98#define UVH_EVENT_OCCURRED0_LH_AOERR0_SHFT 10 149#define UV1H_EVENT_OCCURRED0_LH_AOERR0_SHFT 10
99#define UVH_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL 150#define UV1H_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL
100#define UVH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11 151#define UV1H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
101#define UVH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL 152#define UV1H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
102#define UVH_EVENT_OCCURRED0_XN_AOERR0_SHFT 12 153#define UV1H_EVENT_OCCURRED0_XN_AOERR0_SHFT 12
103#define UVH_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL 154#define UV1H_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL
104#define UVH_EVENT_OCCURRED0_SI_AOERR0_SHFT 13 155#define UV1H_EVENT_OCCURRED0_SI_AOERR0_SHFT 13
105#define UVH_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL 156#define UV1H_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL
106#define UVH_EVENT_OCCURRED0_LB_AOERR1_SHFT 14 157#define UV1H_EVENT_OCCURRED0_LB_AOERR1_SHFT 14
107#define UVH_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL 158#define UV1H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL
108#define UVH_EVENT_OCCURRED0_GR0_AOERR1_SHFT 15 159#define UV1H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 15
109#define UVH_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000000008000UL 160#define UV1H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000000008000UL
110#define UVH_EVENT_OCCURRED0_GR1_AOERR1_SHFT 16 161#define UV1H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 16
111#define UVH_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000000010000UL 162#define UV1H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000000010000UL
112#define UVH_EVENT_OCCURRED0_LH_AOERR1_SHFT 17 163#define UV1H_EVENT_OCCURRED0_LH_AOERR1_SHFT 17
113#define UVH_EVENT_OCCURRED0_LH_AOERR1_MASK 0x0000000000020000UL 164#define UV1H_EVENT_OCCURRED0_LH_AOERR1_MASK 0x0000000000020000UL
114#define UVH_EVENT_OCCURRED0_RH_AOERR1_SHFT 18 165#define UV1H_EVENT_OCCURRED0_RH_AOERR1_SHFT 18
115#define UVH_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000040000UL 166#define UV1H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000040000UL
116#define UVH_EVENT_OCCURRED0_XN_AOERR1_SHFT 19 167#define UV1H_EVENT_OCCURRED0_XN_AOERR1_SHFT 19
117#define UVH_EVENT_OCCURRED0_XN_AOERR1_MASK 0x0000000000080000UL 168#define UV1H_EVENT_OCCURRED0_XN_AOERR1_MASK 0x0000000000080000UL
118#define UVH_EVENT_OCCURRED0_SI_AOERR1_SHFT 20 169#define UV1H_EVENT_OCCURRED0_SI_AOERR1_SHFT 20
119#define UVH_EVENT_OCCURRED0_SI_AOERR1_MASK 0x0000000000100000UL 170#define UV1H_EVENT_OCCURRED0_SI_AOERR1_MASK 0x0000000000100000UL
120#define UVH_EVENT_OCCURRED0_RH_VPI_INT_SHFT 21 171#define UV1H_EVENT_OCCURRED0_RH_VPI_INT_SHFT 21
121#define UVH_EVENT_OCCURRED0_RH_VPI_INT_MASK 0x0000000000200000UL 172#define UV1H_EVENT_OCCURRED0_RH_VPI_INT_MASK 0x0000000000200000UL
122#define UVH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 22 173#define UV1H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 22
123#define UVH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL 174#define UV1H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
124#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 23 175#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 23
125#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000000800000UL 176#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000000800000UL
126#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 24 177#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 24
127#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000001000000UL 178#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000001000000UL
128#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 25 179#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 25
129#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000002000000UL 180#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000002000000UL
130#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 26 181#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 26
131#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000004000000UL 182#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000004000000UL
132#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 27 183#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 27
133#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000000008000000UL 184#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000000008000000UL
134#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 28 185#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 28
135#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000000010000000UL 186#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000000010000000UL
136#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 29 187#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 29
137#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000000020000000UL 188#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000000020000000UL
138#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 30 189#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 30
139#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000000040000000UL 190#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000000040000000UL
140#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 31 191#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 31
141#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000000080000000UL 192#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000000080000000UL
142#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 32 193#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 32
143#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000000100000000UL 194#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000000100000000UL
144#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 33 195#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 33
145#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000000200000000UL 196#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000000200000000UL
146#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 34 197#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 34
147#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000000400000000UL 198#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000000400000000UL
148#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 35 199#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 35
149#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000000800000000UL 200#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000000800000000UL
150#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 36 201#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 36
151#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000001000000000UL 202#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000001000000000UL
152#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 37 203#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 37
153#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000002000000000UL 204#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000002000000000UL
154#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 38 205#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 38
155#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000004000000000UL 206#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000004000000000UL
156#define UVH_EVENT_OCCURRED0_L1_NMI_INT_SHFT 39 207#define UV1H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 39
157#define UVH_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0000008000000000UL 208#define UV1H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0000008000000000UL
158#define UVH_EVENT_OCCURRED0_STOP_CLOCK_SHFT 40 209#define UV1H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 40
159#define UVH_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0000010000000000UL 210#define UV1H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0000010000000000UL
160#define UVH_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 41 211#define UV1H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 41
161#define UVH_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0000020000000000UL 212#define UV1H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0000020000000000UL
162#define UVH_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 42 213#define UV1H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 42
163#define UVH_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0000040000000000UL 214#define UV1H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0000040000000000UL
164#define UVH_EVENT_OCCURRED0_LTC_INT_SHFT 43 215#define UV1H_EVENT_OCCURRED0_LTC_INT_SHFT 43
165#define UVH_EVENT_OCCURRED0_LTC_INT_MASK 0x0000080000000000UL 216#define UV1H_EVENT_OCCURRED0_LTC_INT_MASK 0x0000080000000000UL
166#define UVH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 44 217#define UV1H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 44
167#define UVH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL 218#define UV1H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
168#define UVH_EVENT_OCCURRED0_IPI_INT_SHFT 45 219#define UV1H_EVENT_OCCURRED0_IPI_INT_SHFT 45
169#define UVH_EVENT_OCCURRED0_IPI_INT_MASK 0x0000200000000000UL 220#define UV1H_EVENT_OCCURRED0_IPI_INT_MASK 0x0000200000000000UL
170#define UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT 46 221#define UV1H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 46
171#define UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0000400000000000UL 222#define UV1H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0000400000000000UL
172#define UVH_EVENT_OCCURRED0_EXTIO_INT1_SHFT 47 223#define UV1H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 47
173#define UVH_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0000800000000000UL 224#define UV1H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0000800000000000UL
174#define UVH_EVENT_OCCURRED0_EXTIO_INT2_SHFT 48 225#define UV1H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 48
175#define UVH_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0001000000000000UL 226#define UV1H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0001000000000000UL
176#define UVH_EVENT_OCCURRED0_EXTIO_INT3_SHFT 49 227#define UV1H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 49
177#define UVH_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0002000000000000UL 228#define UV1H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0002000000000000UL
178#define UVH_EVENT_OCCURRED0_PROFILE_INT_SHFT 50 229#define UV1H_EVENT_OCCURRED0_PROFILE_INT_SHFT 50
179#define UVH_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0004000000000000UL 230#define UV1H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0004000000000000UL
180#define UVH_EVENT_OCCURRED0_RTC0_SHFT 51 231#define UV1H_EVENT_OCCURRED0_RTC0_SHFT 51
181#define UVH_EVENT_OCCURRED0_RTC0_MASK 0x0008000000000000UL 232#define UV1H_EVENT_OCCURRED0_RTC0_MASK 0x0008000000000000UL
182#define UVH_EVENT_OCCURRED0_RTC1_SHFT 52 233#define UV1H_EVENT_OCCURRED0_RTC1_SHFT 52
183#define UVH_EVENT_OCCURRED0_RTC1_MASK 0x0010000000000000UL 234#define UV1H_EVENT_OCCURRED0_RTC1_MASK 0x0010000000000000UL
184#define UVH_EVENT_OCCURRED0_RTC2_SHFT 53 235#define UV1H_EVENT_OCCURRED0_RTC2_SHFT 53
185#define UVH_EVENT_OCCURRED0_RTC2_MASK 0x0020000000000000UL 236#define UV1H_EVENT_OCCURRED0_RTC2_MASK 0x0020000000000000UL
186#define UVH_EVENT_OCCURRED0_RTC3_SHFT 54 237#define UV1H_EVENT_OCCURRED0_RTC3_SHFT 54
187#define UVH_EVENT_OCCURRED0_RTC3_MASK 0x0040000000000000UL 238#define UV1H_EVENT_OCCURRED0_RTC3_MASK 0x0040000000000000UL
188#define UVH_EVENT_OCCURRED0_BAU_DATA_SHFT 55 239#define UV1H_EVENT_OCCURRED0_BAU_DATA_SHFT 55
189#define UVH_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL 240#define UV1H_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL
190#define UVH_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56 241#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56
191#define UVH_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL 242#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL
243
244#define UV2H_EVENT_OCCURRED0_LB_HCERR_SHFT 0
245#define UV2H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
246#define UV2H_EVENT_OCCURRED0_QP_HCERR_SHFT 1
247#define UV2H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL
248#define UV2H_EVENT_OCCURRED0_RH_HCERR_SHFT 2
249#define UV2H_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000004UL
250#define UV2H_EVENT_OCCURRED0_LH0_HCERR_SHFT 3
251#define UV2H_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000008UL
252#define UV2H_EVENT_OCCURRED0_LH1_HCERR_SHFT 4
253#define UV2H_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000010UL
254#define UV2H_EVENT_OCCURRED0_GR0_HCERR_SHFT 5
255#define UV2H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000020UL
256#define UV2H_EVENT_OCCURRED0_GR1_HCERR_SHFT 6
257#define UV2H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000040UL
258#define UV2H_EVENT_OCCURRED0_NI0_HCERR_SHFT 7
259#define UV2H_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000080UL
260#define UV2H_EVENT_OCCURRED0_NI1_HCERR_SHFT 8
261#define UV2H_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000100UL
262#define UV2H_EVENT_OCCURRED0_LB_AOERR0_SHFT 9
263#define UV2H_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000200UL
264#define UV2H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10
265#define UV2H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL
266#define UV2H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
267#define UV2H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
268#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12
269#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000001000UL
270#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13
271#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000002000UL
272#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14
273#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000004000UL
274#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15
275#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000008000UL
276#define UV2H_EVENT_OCCURRED0_XB_AOERR0_SHFT 16
277#define UV2H_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000010000UL
278#define UV2H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17
279#define UV2H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL
280#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18
281#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL
282#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19
283#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL
284#define UV2H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20
285#define UV2H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL
286#define UV2H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21
287#define UV2H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL
288#define UV2H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22
289#define UV2H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL
290#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23
291#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL
292#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24
293#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL
294#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25
295#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL
296#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26
297#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL
298#define UV2H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27
299#define UV2H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL
300#define UV2H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28
301#define UV2H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL
302#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29
303#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL
304#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30
305#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL
306#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31
307#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL
308#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32
309#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL
310#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33
311#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL
312#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34
313#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL
314#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35
315#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL
316#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36
317#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL
318#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37
319#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL
320#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38
321#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL
322#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39
323#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL
324#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40
325#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL
326#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41
327#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL
328#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42
329#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL
330#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43
331#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL
332#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44
333#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL
334#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45
335#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL
336#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46
337#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL
338#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47
339#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL
340#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48
341#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL
342#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49
343#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL
344#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50
345#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL
346#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51
347#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL
348#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52
349#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL
350#define UV2H_EVENT_OCCURRED0_IPI_INT_SHFT 53
351#define UV2H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL
352#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54
353#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL
354#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55
355#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
356#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56
357#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
358#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57
359#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
360#define UV2H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58
361#define UV2H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
362
192union uvh_event_occurred0_u { 363union uvh_event_occurred0_u {
193 unsigned long v; 364 unsigned long v;
194 struct uvh_event_occurred0_s { 365 struct uv1h_event_occurred0_s {
195 unsigned long lb_hcerr : 1; /* RW, W1C */ 366 unsigned long lb_hcerr : 1; /* RW, W1C */
196 unsigned long gr0_hcerr : 1; /* RW, W1C */ 367 unsigned long gr0_hcerr : 1; /* RW, W1C */
197 unsigned long gr1_hcerr : 1; /* RW, W1C */ 368 unsigned long gr1_hcerr : 1; /* RW, W1C */
@@ -250,14 +421,76 @@ union uvh_event_occurred0_u {
250 unsigned long bau_data : 1; /* RW, W1C */ 421 unsigned long bau_data : 1; /* RW, W1C */
251 unsigned long power_management_req : 1; /* RW, W1C */ 422 unsigned long power_management_req : 1; /* RW, W1C */
252 unsigned long rsvd_57_63 : 7; /* */ 423 unsigned long rsvd_57_63 : 7; /* */
253 } s; 424 } s1;
425 struct uv2h_event_occurred0_s {
426 unsigned long lb_hcerr : 1; /* RW */
427 unsigned long qp_hcerr : 1; /* RW */
428 unsigned long rh_hcerr : 1; /* RW */
429 unsigned long lh0_hcerr : 1; /* RW */
430 unsigned long lh1_hcerr : 1; /* RW */
431 unsigned long gr0_hcerr : 1; /* RW */
432 unsigned long gr1_hcerr : 1; /* RW */
433 unsigned long ni0_hcerr : 1; /* RW */
434 unsigned long ni1_hcerr : 1; /* RW */
435 unsigned long lb_aoerr0 : 1; /* RW */
436 unsigned long qp_aoerr0 : 1; /* RW */
437 unsigned long rh_aoerr0 : 1; /* RW */
438 unsigned long lh0_aoerr0 : 1; /* RW */
439 unsigned long lh1_aoerr0 : 1; /* RW */
440 unsigned long gr0_aoerr0 : 1; /* RW */
441 unsigned long gr1_aoerr0 : 1; /* RW */
442 unsigned long xb_aoerr0 : 1; /* RW */
443 unsigned long rt_aoerr0 : 1; /* RW */
444 unsigned long ni0_aoerr0 : 1; /* RW */
445 unsigned long ni1_aoerr0 : 1; /* RW */
446 unsigned long lb_aoerr1 : 1; /* RW */
447 unsigned long qp_aoerr1 : 1; /* RW */
448 unsigned long rh_aoerr1 : 1; /* RW */
449 unsigned long lh0_aoerr1 : 1; /* RW */
450 unsigned long lh1_aoerr1 : 1; /* RW */
451 unsigned long gr0_aoerr1 : 1; /* RW */
452 unsigned long gr1_aoerr1 : 1; /* RW */
453 unsigned long xb_aoerr1 : 1; /* RW */
454 unsigned long rt_aoerr1 : 1; /* RW */
455 unsigned long ni0_aoerr1 : 1; /* RW */
456 unsigned long ni1_aoerr1 : 1; /* RW */
457 unsigned long system_shutdown_int : 1; /* RW */
458 unsigned long lb_irq_int_0 : 1; /* RW */
459 unsigned long lb_irq_int_1 : 1; /* RW */
460 unsigned long lb_irq_int_2 : 1; /* RW */
461 unsigned long lb_irq_int_3 : 1; /* RW */
462 unsigned long lb_irq_int_4 : 1; /* RW */
463 unsigned long lb_irq_int_5 : 1; /* RW */
464 unsigned long lb_irq_int_6 : 1; /* RW */
465 unsigned long lb_irq_int_7 : 1; /* RW */
466 unsigned long lb_irq_int_8 : 1; /* RW */
467 unsigned long lb_irq_int_9 : 1; /* RW */
468 unsigned long lb_irq_int_10 : 1; /* RW */
469 unsigned long lb_irq_int_11 : 1; /* RW */
470 unsigned long lb_irq_int_12 : 1; /* RW */
471 unsigned long lb_irq_int_13 : 1; /* RW */
472 unsigned long lb_irq_int_14 : 1; /* RW */
473 unsigned long lb_irq_int_15 : 1; /* RW */
474 unsigned long l1_nmi_int : 1; /* RW */
475 unsigned long stop_clock : 1; /* RW */
476 unsigned long asic_to_l1 : 1; /* RW */
477 unsigned long l1_to_asic : 1; /* RW */
478 unsigned long la_seq_trigger : 1; /* RW */
479 unsigned long ipi_int : 1; /* RW */
480 unsigned long extio_int0 : 1; /* RW */
481 unsigned long extio_int1 : 1; /* RW */
482 unsigned long extio_int2 : 1; /* RW */
483 unsigned long extio_int3 : 1; /* RW */
484 unsigned long profile_int : 1; /* RW */
485 unsigned long rsvd_59_63 : 5; /* */
486 } s2;
254}; 487};
255 488
256/* ========================================================================= */ 489/* ========================================================================= */
257/* UVH_EVENT_OCCURRED0_ALIAS */ 490/* UVH_EVENT_OCCURRED0_ALIAS */
258/* ========================================================================= */ 491/* ========================================================================= */
259#define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL 492#define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL
260#define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0 493#define UVH_EVENT_OCCURRED0_ALIAS_32 0x5f0
261 494
262/* ========================================================================= */ 495/* ========================================================================= */
263/* UVH_GR0_TLB_INT0_CONFIG */ 496/* UVH_GR0_TLB_INT0_CONFIG */
@@ -432,8 +665,16 @@ union uvh_int_cmpb_u {
432/* ========================================================================= */ 665/* ========================================================================= */
433#define UVH_INT_CMPC 0x22100UL 666#define UVH_INT_CMPC 0x22100UL
434 667
435#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT 0 668#define UV1H_INT_CMPC_REAL_TIME_CMPC_SHFT 0
436#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK 0x00ffffffffffffffUL 669#define UV2H_INT_CMPC_REAL_TIME_CMPC_SHFT 0
670#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT (is_uv1_hub() ? \
671 UV1H_INT_CMPC_REAL_TIME_CMPC_SHFT : \
672 UV2H_INT_CMPC_REAL_TIME_CMPC_SHFT)
673#define UV1H_INT_CMPC_REAL_TIME_CMPC_MASK 0xffffffffffffffUL
674#define UV2H_INT_CMPC_REAL_TIME_CMPC_MASK 0xffffffffffffffUL
675#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK (is_uv1_hub() ? \
676 UV1H_INT_CMPC_REAL_TIME_CMPC_MASK : \
677 UV2H_INT_CMPC_REAL_TIME_CMPC_MASK)
437 678
438union uvh_int_cmpc_u { 679union uvh_int_cmpc_u {
439 unsigned long v; 680 unsigned long v;
@@ -448,8 +689,16 @@ union uvh_int_cmpc_u {
448/* ========================================================================= */ 689/* ========================================================================= */
449#define UVH_INT_CMPD 0x22180UL 690#define UVH_INT_CMPD 0x22180UL
450 691
451#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT 0 692#define UV1H_INT_CMPD_REAL_TIME_CMPD_SHFT 0
452#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK 0x00ffffffffffffffUL 693#define UV2H_INT_CMPD_REAL_TIME_CMPD_SHFT 0
694#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT (is_uv1_hub() ? \
695 UV1H_INT_CMPD_REAL_TIME_CMPD_SHFT : \
696 UV2H_INT_CMPD_REAL_TIME_CMPD_SHFT)
697#define UV1H_INT_CMPD_REAL_TIME_CMPD_MASK 0xffffffffffffffUL
698#define UV2H_INT_CMPD_REAL_TIME_CMPD_MASK 0xffffffffffffffUL
699#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK (is_uv1_hub() ? \
700 UV1H_INT_CMPD_REAL_TIME_CMPD_MASK : \
701 UV2H_INT_CMPD_REAL_TIME_CMPD_MASK)
453 702
454union uvh_int_cmpd_u { 703union uvh_int_cmpd_u {
455 unsigned long v; 704 unsigned long v;
@@ -463,7 +712,7 @@ union uvh_int_cmpd_u {
463/* UVH_IPI_INT */ 712/* UVH_IPI_INT */
464/* ========================================================================= */ 713/* ========================================================================= */
465#define UVH_IPI_INT 0x60500UL 714#define UVH_IPI_INT 0x60500UL
466#define UVH_IPI_INT_32 0x0348 715#define UVH_IPI_INT_32 0x348
467 716
468#define UVH_IPI_INT_VECTOR_SHFT 0 717#define UVH_IPI_INT_VECTOR_SHFT 0
469#define UVH_IPI_INT_VECTOR_MASK 0x00000000000000ffUL 718#define UVH_IPI_INT_VECTOR_MASK 0x00000000000000ffUL
@@ -493,7 +742,7 @@ union uvh_ipi_int_u {
493/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */ 742/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */
494/* ========================================================================= */ 743/* ========================================================================= */
495#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL 744#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
496#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x009c0 745#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0
497 746
498#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4 747#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
499#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL 748#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL
@@ -515,7 +764,7 @@ union uvh_lb_bau_intd_payload_queue_first_u {
515/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */ 764/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */
516/* ========================================================================= */ 765/* ========================================================================= */
517#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL 766#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
518#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x009c8 767#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8
519 768
520#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4 769#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
521#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL 770#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
@@ -533,7 +782,7 @@ union uvh_lb_bau_intd_payload_queue_last_u {
533/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */ 782/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */
534/* ========================================================================= */ 783/* ========================================================================= */
535#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL 784#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
536#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x009d0 785#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0
537 786
538#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4 787#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
539#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL 788#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
@@ -551,7 +800,7 @@ union uvh_lb_bau_intd_payload_queue_tail_u {
551/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */ 800/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */
552/* ========================================================================= */ 801/* ========================================================================= */
553#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL 802#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
554#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0x0a68 803#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68
555 804
556#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0 805#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
557#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL 806#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL
@@ -585,6 +834,7 @@ union uvh_lb_bau_intd_payload_queue_tail_u {
585#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL 834#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL
586#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15 835#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15
587#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL 836#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL
837
588union uvh_lb_bau_intd_software_acknowledge_u { 838union uvh_lb_bau_intd_software_acknowledge_u {
589 unsigned long v; 839 unsigned long v;
590 struct uvh_lb_bau_intd_software_acknowledge_s { 840 struct uvh_lb_bau_intd_software_acknowledge_s {
@@ -612,13 +862,13 @@ union uvh_lb_bau_intd_software_acknowledge_u {
612/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */ 862/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */
613/* ========================================================================= */ 863/* ========================================================================= */
614#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL 864#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL
615#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0x0a70 865#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70
616 866
617/* ========================================================================= */ 867/* ========================================================================= */
618/* UVH_LB_BAU_MISC_CONTROL */ 868/* UVH_LB_BAU_MISC_CONTROL */
619/* ========================================================================= */ 869/* ========================================================================= */
620#define UVH_LB_BAU_MISC_CONTROL 0x320170UL 870#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
621#define UVH_LB_BAU_MISC_CONTROL_32 0x00a10 871#define UVH_LB_BAU_MISC_CONTROL_32 0xa10
622 872
623#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 873#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
624#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL 874#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
@@ -628,8 +878,8 @@ union uvh_lb_bau_intd_software_acknowledge_u {
628#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL 878#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
629#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10 879#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
630#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL 880#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
631#define UVH_LB_BAU_MISC_CONTROL_CSI_AGENT_PRESENCE_VECTOR_SHFT 11 881#define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
632#define UVH_LB_BAU_MISC_CONTROL_CSI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL 882#define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
633#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14 883#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
634#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL 884#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
635#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15 885#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
@@ -650,8 +900,86 @@ union uvh_lb_bau_intd_software_acknowledge_u {
650#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL 900#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
651#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28 901#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
652#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL 902#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
653#define UVH_LB_BAU_MISC_CONTROL_FUN_SHFT 48 903
654#define UVH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL 904#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
905#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
906#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
907#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
908#define UV1H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
909#define UV1H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
910#define UV1H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
911#define UV1H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
912#define UV1H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
913#define UV1H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
914#define UV1H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
915#define UV1H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
916#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
917#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
918#define UV1H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
919#define UV1H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
920#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
921#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
922#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
923#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
924#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
925#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
926#define UV1H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
927#define UV1H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
928#define UV1H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
929#define UV1H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
930#define UV1H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
931#define UV1H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
932#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
933#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
934#define UV1H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
935#define UV1H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
936
937#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
938#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
939#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
940#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
941#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
942#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
943#define UV2H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
944#define UV2H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
945#define UV2H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
946#define UV2H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
947#define UV2H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
948#define UV2H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
949#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
950#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
951#define UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
952#define UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
953#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
954#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
955#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
956#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
957#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
958#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
959#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
960#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
961#define UV2H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
962#define UV2H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
963#define UV2H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
964#define UV2H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
965#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
966#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
967#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
968#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
969#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
970#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
971#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
972#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
973#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
974#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
975#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
976#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
977#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
978#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
979#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
980#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
981#define UV2H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
982#define UV2H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
655 983
656union uvh_lb_bau_misc_control_u { 984union uvh_lb_bau_misc_control_u {
657 unsigned long v; 985 unsigned long v;
@@ -660,7 +988,25 @@ union uvh_lb_bau_misc_control_u {
660 unsigned long apic_mode : 1; /* RW */ 988 unsigned long apic_mode : 1; /* RW */
661 unsigned long force_broadcast : 1; /* RW */ 989 unsigned long force_broadcast : 1; /* RW */
662 unsigned long force_lock_nop : 1; /* RW */ 990 unsigned long force_lock_nop : 1; /* RW */
663 unsigned long csi_agent_presence_vector : 3; /* RW */ 991 unsigned long qpi_agent_presence_vector : 3; /* RW */
992 unsigned long descriptor_fetch_mode : 1; /* RW */
993 unsigned long enable_intd_soft_ack_mode : 1; /* RW */
994 unsigned long intd_soft_ack_timeout_period : 4; /* RW */
995 unsigned long enable_dual_mapping_mode : 1; /* RW */
996 unsigned long vga_io_port_decode_enable : 1; /* RW */
997 unsigned long vga_io_port_16_bit_decode : 1; /* RW */
998 unsigned long suppress_dest_registration : 1; /* RW */
999 unsigned long programmed_initial_priority : 3; /* RW */
1000 unsigned long use_incoming_priority : 1; /* RW */
1001 unsigned long enable_programmed_initial_priority : 1; /* RW */
1002 unsigned long rsvd_29_63 : 35;
1003 } s;
1004 struct uv1h_lb_bau_misc_control_s {
1005 unsigned long rejection_delay : 8; /* RW */
1006 unsigned long apic_mode : 1; /* RW */
1007 unsigned long force_broadcast : 1; /* RW */
1008 unsigned long force_lock_nop : 1; /* RW */
1009 unsigned long qpi_agent_presence_vector : 3; /* RW */
664 unsigned long descriptor_fetch_mode : 1; /* RW */ 1010 unsigned long descriptor_fetch_mode : 1; /* RW */
665 unsigned long enable_intd_soft_ack_mode : 1; /* RW */ 1011 unsigned long enable_intd_soft_ack_mode : 1; /* RW */
666 unsigned long intd_soft_ack_timeout_period : 4; /* RW */ 1012 unsigned long intd_soft_ack_timeout_period : 4; /* RW */
@@ -673,14 +1019,40 @@ union uvh_lb_bau_misc_control_u {
673 unsigned long enable_programmed_initial_priority : 1; /* RW */ 1019 unsigned long enable_programmed_initial_priority : 1; /* RW */
674 unsigned long rsvd_29_47 : 19; /* */ 1020 unsigned long rsvd_29_47 : 19; /* */
675 unsigned long fun : 16; /* RW */ 1021 unsigned long fun : 16; /* RW */
676 } s; 1022 } s1;
1023 struct uv2h_lb_bau_misc_control_s {
1024 unsigned long rejection_delay : 8; /* RW */
1025 unsigned long apic_mode : 1; /* RW */
1026 unsigned long force_broadcast : 1; /* RW */
1027 unsigned long force_lock_nop : 1; /* RW */
1028 unsigned long qpi_agent_presence_vector : 3; /* RW */
1029 unsigned long descriptor_fetch_mode : 1; /* RW */
1030 unsigned long enable_intd_soft_ack_mode : 1; /* RW */
1031 unsigned long intd_soft_ack_timeout_period : 4; /* RW */
1032 unsigned long enable_dual_mapping_mode : 1; /* RW */
1033 unsigned long vga_io_port_decode_enable : 1; /* RW */
1034 unsigned long vga_io_port_16_bit_decode : 1; /* RW */
1035 unsigned long suppress_dest_registration : 1; /* RW */
1036 unsigned long programmed_initial_priority : 3; /* RW */
1037 unsigned long use_incoming_priority : 1; /* RW */
1038 unsigned long enable_programmed_initial_priority : 1; /* RW */
1039 unsigned long enable_automatic_apic_mode_selection : 1; /* RW */
1040 unsigned long apic_mode_status : 1; /* RO */
1041 unsigned long suppress_interrupts_to_self : 1; /* RW */
1042 unsigned long enable_lock_based_system_flush : 1; /* RW */
1043 unsigned long enable_extended_sb_status : 1; /* RW */
1044 unsigned long suppress_int_prio_udt_to_self : 1; /* RW */
1045 unsigned long use_legacy_descriptor_formats : 1; /* RW */
1046 unsigned long rsvd_36_47 : 12; /* */
1047 unsigned long fun : 16; /* RW */
1048 } s2;
677}; 1049};
678 1050
679/* ========================================================================= */ 1051/* ========================================================================= */
680/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */ 1052/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */
681/* ========================================================================= */ 1053/* ========================================================================= */
682#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL 1054#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
683#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x009a8 1055#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
684 1056
685#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0 1057#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0
686#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_MASK 0x000000000000003fUL 1058#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_MASK 0x000000000000003fUL
@@ -703,7 +1075,7 @@ union uvh_lb_bau_sb_activation_control_u {
703/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */ 1075/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */
704/* ========================================================================= */ 1076/* ========================================================================= */
705#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL 1077#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
706#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x009b0 1078#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
707 1079
708#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0 1080#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0
709#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL 1081#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL
@@ -719,7 +1091,7 @@ union uvh_lb_bau_sb_activation_status_0_u {
719/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */ 1091/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */
720/* ========================================================================= */ 1092/* ========================================================================= */
721#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL 1093#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
722#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x009b8 1094#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
723 1095
724#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0 1096#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0
725#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL 1097#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL
@@ -735,7 +1107,7 @@ union uvh_lb_bau_sb_activation_status_1_u {
735/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */ 1107/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */
736/* ========================================================================= */ 1108/* ========================================================================= */
737#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL 1109#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
738#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x009a0 1110#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
739 1111
740#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12 1112#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12
741#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL 1113#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
@@ -768,10 +1140,36 @@ union uvh_lb_bau_sb_descriptor_base_u {
768#define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL 1140#define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
769#define UVH_NODE_ID_NODE_ID_SHFT 32 1141#define UVH_NODE_ID_NODE_ID_SHFT 32
770#define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL 1142#define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
771#define UVH_NODE_ID_NODES_PER_BIT_SHFT 48 1143
772#define UVH_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL 1144#define UV1H_NODE_ID_FORCE1_SHFT 0
773#define UVH_NODE_ID_NI_PORT_SHFT 56 1145#define UV1H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
774#define UVH_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL 1146#define UV1H_NODE_ID_MANUFACTURER_SHFT 1
1147#define UV1H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
1148#define UV1H_NODE_ID_PART_NUMBER_SHFT 12
1149#define UV1H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
1150#define UV1H_NODE_ID_REVISION_SHFT 28
1151#define UV1H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
1152#define UV1H_NODE_ID_NODE_ID_SHFT 32
1153#define UV1H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
1154#define UV1H_NODE_ID_NODES_PER_BIT_SHFT 48
1155#define UV1H_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL
1156#define UV1H_NODE_ID_NI_PORT_SHFT 56
1157#define UV1H_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL
1158
1159#define UV2H_NODE_ID_FORCE1_SHFT 0
1160#define UV2H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
1161#define UV2H_NODE_ID_MANUFACTURER_SHFT 1
1162#define UV2H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
1163#define UV2H_NODE_ID_PART_NUMBER_SHFT 12
1164#define UV2H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
1165#define UV2H_NODE_ID_REVISION_SHFT 28
1166#define UV2H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
1167#define UV2H_NODE_ID_NODE_ID_SHFT 32
1168#define UV2H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
1169#define UV2H_NODE_ID_NODES_PER_BIT_SHFT 50
1170#define UV2H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
1171#define UV2H_NODE_ID_NI_PORT_SHFT 57
1172#define UV2H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
775 1173
776union uvh_node_id_u { 1174union uvh_node_id_u {
777 unsigned long v; 1175 unsigned long v;
@@ -781,12 +1179,31 @@ union uvh_node_id_u {
781 unsigned long part_number : 16; /* RO */ 1179 unsigned long part_number : 16; /* RO */
782 unsigned long revision : 4; /* RO */ 1180 unsigned long revision : 4; /* RO */
783 unsigned long node_id : 15; /* RW */ 1181 unsigned long node_id : 15; /* RW */
1182 unsigned long rsvd_47_63 : 17;
1183 } s;
1184 struct uv1h_node_id_s {
1185 unsigned long force1 : 1; /* RO */
1186 unsigned long manufacturer : 11; /* RO */
1187 unsigned long part_number : 16; /* RO */
1188 unsigned long revision : 4; /* RO */
1189 unsigned long node_id : 15; /* RW */
784 unsigned long rsvd_47 : 1; /* */ 1190 unsigned long rsvd_47 : 1; /* */
785 unsigned long nodes_per_bit : 7; /* RW */ 1191 unsigned long nodes_per_bit : 7; /* RW */
786 unsigned long rsvd_55 : 1; /* */ 1192 unsigned long rsvd_55 : 1; /* */
787 unsigned long ni_port : 4; /* RO */ 1193 unsigned long ni_port : 4; /* RO */
788 unsigned long rsvd_60_63 : 4; /* */ 1194 unsigned long rsvd_60_63 : 4; /* */
789 } s; 1195 } s1;
1196 struct uv2h_node_id_s {
1197 unsigned long force1 : 1; /* RO */
1198 unsigned long manufacturer : 11; /* RO */
1199 unsigned long part_number : 16; /* RO */
1200 unsigned long revision : 4; /* RO */
1201 unsigned long node_id : 15; /* RW */
1202 unsigned long rsvd_47_49 : 3; /* */
1203 unsigned long nodes_per_bit : 7; /* RO */
1204 unsigned long ni_port : 5; /* RO */
1205 unsigned long rsvd_62_63 : 2; /* */
1206 } s2;
790}; 1207};
791 1208
792/* ========================================================================= */ 1209/* ========================================================================= */
@@ -806,6 +1223,78 @@ union uvh_node_present_table_u {
806}; 1223};
807 1224
808/* ========================================================================= */ 1225/* ========================================================================= */
1226/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */
1227/* ========================================================================= */
1228#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
1229
1230#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
1231#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
1232#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
1233#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
1234#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
1235#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
1236
1237union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
1238 unsigned long v;
1239 struct uvh_rh_gam_alias210_overlay_config_0_mmr_s {
1240 unsigned long rsvd_0_23: 24; /* */
1241 unsigned long base : 8; /* RW */
1242 unsigned long rsvd_32_47: 16; /* */
1243 unsigned long m_alias : 5; /* RW */
1244 unsigned long rsvd_53_62: 10; /* */
1245 unsigned long enable : 1; /* RW */
1246 } s;
1247};
1248
1249/* ========================================================================= */
1250/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */
1251/* ========================================================================= */
1252#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
1253
1254#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
1255#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
1256#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
1257#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
1258#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
1259#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
1260
1261union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
1262 unsigned long v;
1263 struct uvh_rh_gam_alias210_overlay_config_1_mmr_s {
1264 unsigned long rsvd_0_23: 24; /* */
1265 unsigned long base : 8; /* RW */
1266 unsigned long rsvd_32_47: 16; /* */
1267 unsigned long m_alias : 5; /* RW */
1268 unsigned long rsvd_53_62: 10; /* */
1269 unsigned long enable : 1; /* RW */
1270 } s;
1271};
1272
1273/* ========================================================================= */
1274/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */
1275/* ========================================================================= */
1276#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
1277
1278#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
1279#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
1280#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
1281#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
1282#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
1283#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
1284
1285union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
1286 unsigned long v;
1287 struct uvh_rh_gam_alias210_overlay_config_2_mmr_s {
1288 unsigned long rsvd_0_23: 24; /* */
1289 unsigned long base : 8; /* RW */
1290 unsigned long rsvd_32_47: 16; /* */
1291 unsigned long m_alias : 5; /* RW */
1292 unsigned long rsvd_53_62: 10; /* */
1293 unsigned long enable : 1; /* RW */
1294 } s;
1295};
1296
1297/* ========================================================================= */
809/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */ 1298/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
810/* ========================================================================= */ 1299/* ========================================================================= */
811#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL 1300#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
@@ -857,31 +1346,98 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
857}; 1346};
858 1347
859/* ========================================================================= */ 1348/* ========================================================================= */
1349/* UVH_RH_GAM_CONFIG_MMR */
1350/* ========================================================================= */
1351#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL
1352
1353#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
1354#define UVH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
1355#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
1356#define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
1357
1358#define UV1H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
1359#define UV1H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
1360#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
1361#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
1362#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12
1363#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
1364
1365#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
1366#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
1367#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
1368#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
1369
1370union uvh_rh_gam_config_mmr_u {
1371 unsigned long v;
1372 struct uvh_rh_gam_config_mmr_s {
1373 unsigned long m_skt : 6; /* RW */
1374 unsigned long n_skt : 4; /* RW */
1375 unsigned long rsvd_10_63 : 54;
1376 } s;
1377 struct uv1h_rh_gam_config_mmr_s {
1378 unsigned long m_skt : 6; /* RW */
1379 unsigned long n_skt : 4; /* RW */
1380 unsigned long rsvd_10_11: 2; /* */
1381 unsigned long mmiol_cfg : 1; /* RW */
1382 unsigned long rsvd_13_63: 51; /* */
1383 } s1;
1384 struct uv2h_rh_gam_config_mmr_s {
1385 unsigned long m_skt : 6; /* RW */
1386 unsigned long n_skt : 4; /* RW */
1387 unsigned long rsvd_10_63: 54; /* */
1388 } s2;
1389};
1390
1391/* ========================================================================= */
860/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ 1392/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
861/* ========================================================================= */ 1393/* ========================================================================= */
862#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL 1394#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
863 1395
864#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 1396#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
865#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL 1397#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
866#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48 1398
867#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0001000000000000UL 1399#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
868#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 1400#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
869#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL 1401#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48
870#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 1402#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0001000000000000UL
871#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL 1403#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
1404#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
1405#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1406#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1407
1408#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
1409#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
1410#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
1411#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
1412#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1413#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
872 1414
873union uvh_rh_gam_gru_overlay_config_mmr_u { 1415union uvh_rh_gam_gru_overlay_config_mmr_u {
874 unsigned long v; 1416 unsigned long v;
875 struct uvh_rh_gam_gru_overlay_config_mmr_s { 1417 struct uvh_rh_gam_gru_overlay_config_mmr_s {
876 unsigned long rsvd_0_27: 28; /* */ 1418 unsigned long rsvd_0_27: 28; /* */
877 unsigned long base : 18; /* RW */ 1419 unsigned long base : 18; /* RW */
1420 unsigned long rsvd_46_62 : 17;
1421 unsigned long enable : 1; /* RW */
1422 } s;
1423 struct uv1h_rh_gam_gru_overlay_config_mmr_s {
1424 unsigned long rsvd_0_27: 28; /* */
1425 unsigned long base : 18; /* RW */
878 unsigned long rsvd_46_47: 2; /* */ 1426 unsigned long rsvd_46_47: 2; /* */
879 unsigned long gr4 : 1; /* RW */ 1427 unsigned long gr4 : 1; /* RW */
880 unsigned long rsvd_49_51: 3; /* */ 1428 unsigned long rsvd_49_51: 3; /* */
881 unsigned long n_gru : 4; /* RW */ 1429 unsigned long n_gru : 4; /* RW */
882 unsigned long rsvd_56_62: 7; /* */ 1430 unsigned long rsvd_56_62: 7; /* */
883 unsigned long enable : 1; /* RW */ 1431 unsigned long enable : 1; /* RW */
884 } s; 1432 } s1;
1433 struct uv2h_rh_gam_gru_overlay_config_mmr_s {
1434 unsigned long rsvd_0_27: 28; /* */
1435 unsigned long base : 18; /* RW */
1436 unsigned long rsvd_46_51: 6; /* */
1437 unsigned long n_gru : 4; /* RW */
1438 unsigned long rsvd_56_62: 7; /* */
1439 unsigned long enable : 1; /* RW */
1440 } s2;
885}; 1441};
886 1442
887/* ========================================================================= */ 1443/* ========================================================================= */
@@ -889,25 +1445,42 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
889/* ========================================================================= */ 1445/* ========================================================================= */
890#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL 1446#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
891 1447
892#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30 1448#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30
893#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003fffc0000000UL 1449#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003fffc0000000UL
894#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46 1450#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
895#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL 1451#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
896#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52 1452#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
897#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL 1453#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
898#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 1454#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
899#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL 1455#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1456
1457#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 27
1458#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff8000000UL
1459#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
1460#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
1461#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
1462#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
1463#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1464#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
900 1465
901union uvh_rh_gam_mmioh_overlay_config_mmr_u { 1466union uvh_rh_gam_mmioh_overlay_config_mmr_u {
902 unsigned long v; 1467 unsigned long v;
903 struct uvh_rh_gam_mmioh_overlay_config_mmr_s { 1468 struct uv1h_rh_gam_mmioh_overlay_config_mmr_s {
904 unsigned long rsvd_0_29: 30; /* */ 1469 unsigned long rsvd_0_29: 30; /* */
905 unsigned long base : 16; /* RW */ 1470 unsigned long base : 16; /* RW */
906 unsigned long m_io : 6; /* RW */ 1471 unsigned long m_io : 6; /* RW */
907 unsigned long n_io : 4; /* RW */ 1472 unsigned long n_io : 4; /* RW */
908 unsigned long rsvd_56_62: 7; /* */ 1473 unsigned long rsvd_56_62: 7; /* */
909 unsigned long enable : 1; /* RW */ 1474 unsigned long enable : 1; /* RW */
910 } s; 1475 } s1;
1476 struct uv2h_rh_gam_mmioh_overlay_config_mmr_s {
1477 unsigned long rsvd_0_26: 27; /* */
1478 unsigned long base : 19; /* RW */
1479 unsigned long m_io : 6; /* RW */
1480 unsigned long n_io : 4; /* RW */
1481 unsigned long rsvd_56_62: 7; /* */
1482 unsigned long enable : 1; /* RW */
1483 } s2;
911}; 1484};
912 1485
913/* ========================================================================= */ 1486/* ========================================================================= */
@@ -917,20 +1490,40 @@ union uvh_rh_gam_mmioh_overlay_config_mmr_u {
917 1490
918#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 1491#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
919#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL 1492#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
920#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46 1493
921#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL 1494#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
922#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 1495#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
923#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL 1496#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46
1497#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL
1498#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1499#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1500
1501#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
1502#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
1503#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1504#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
924 1505
925union uvh_rh_gam_mmr_overlay_config_mmr_u { 1506union uvh_rh_gam_mmr_overlay_config_mmr_u {
926 unsigned long v; 1507 unsigned long v;
927 struct uvh_rh_gam_mmr_overlay_config_mmr_s { 1508 struct uvh_rh_gam_mmr_overlay_config_mmr_s {
928 unsigned long rsvd_0_25: 26; /* */ 1509 unsigned long rsvd_0_25: 26; /* */
929 unsigned long base : 20; /* RW */ 1510 unsigned long base : 20; /* RW */
1511 unsigned long rsvd_46_62 : 17;
1512 unsigned long enable : 1; /* RW */
1513 } s;
1514 struct uv1h_rh_gam_mmr_overlay_config_mmr_s {
1515 unsigned long rsvd_0_25: 26; /* */
1516 unsigned long base : 20; /* RW */
930 unsigned long dual_hub : 1; /* RW */ 1517 unsigned long dual_hub : 1; /* RW */
931 unsigned long rsvd_47_62: 16; /* */ 1518 unsigned long rsvd_47_62: 16; /* */
932 unsigned long enable : 1; /* RW */ 1519 unsigned long enable : 1; /* RW */
933 } s; 1520 } s1;
1521 struct uv2h_rh_gam_mmr_overlay_config_mmr_s {
1522 unsigned long rsvd_0_25: 26; /* */
1523 unsigned long base : 20; /* RW */
1524 unsigned long rsvd_46_62: 17; /* */
1525 unsigned long enable : 1; /* RW */
1526 } s2;
934}; 1527};
935 1528
936/* ========================================================================= */ 1529/* ========================================================================= */
@@ -988,96 +1581,169 @@ union uvh_rtc1_int_config_u {
988}; 1581};
989 1582
990/* ========================================================================= */ 1583/* ========================================================================= */
991/* UVH_SI_ADDR_MAP_CONFIG */ 1584/* UVH_SCRATCH5 */
992/* ========================================================================= */ 1585/* ========================================================================= */
993#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL 1586#define UVH_SCRATCH5 0x2d0200UL
1587#define UVH_SCRATCH5_32 0x778
994 1588
995#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0 1589#define UVH_SCRATCH5_SCRATCH5_SHFT 0
996#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL 1590#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
997#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8
998#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL
999 1591
1000union uvh_si_addr_map_config_u { 1592union uvh_scratch5_u {
1001 unsigned long v; 1593 unsigned long v;
1002 struct uvh_si_addr_map_config_s { 1594 struct uvh_scratch5_s {
1003 unsigned long m_skt : 6; /* RW */ 1595 unsigned long scratch5 : 64; /* RW, W1CS */
1004 unsigned long rsvd_6_7: 2; /* */
1005 unsigned long n_skt : 4; /* RW */
1006 unsigned long rsvd_12_63: 52; /* */
1007 } s; 1596 } s;
1008}; 1597};
1009 1598
1010/* ========================================================================= */ 1599/* ========================================================================= */
1011/* UVH_SI_ALIAS0_OVERLAY_CONFIG */ 1600/* UV2H_EVENT_OCCURRED2 */
1012/* ========================================================================= */ 1601/* ========================================================================= */
1013#define UVH_SI_ALIAS0_OVERLAY_CONFIG 0xc80008UL 1602#define UV2H_EVENT_OCCURRED2 0x70100UL
1014 1603#define UV2H_EVENT_OCCURRED2_32 0xb68
1015#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_SHFT 24 1604
1016#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL 1605#define UV2H_EVENT_OCCURRED2_RTC_0_SHFT 0
1017#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_SHFT 48 1606#define UV2H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL
1018#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL 1607#define UV2H_EVENT_OCCURRED2_RTC_1_SHFT 1
1019#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_SHFT 63 1608#define UV2H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL
1020#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL 1609#define UV2H_EVENT_OCCURRED2_RTC_2_SHFT 2
1021 1610#define UV2H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL
1022union uvh_si_alias0_overlay_config_u { 1611#define UV2H_EVENT_OCCURRED2_RTC_3_SHFT 3
1612#define UV2H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL
1613#define UV2H_EVENT_OCCURRED2_RTC_4_SHFT 4
1614#define UV2H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL
1615#define UV2H_EVENT_OCCURRED2_RTC_5_SHFT 5
1616#define UV2H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL
1617#define UV2H_EVENT_OCCURRED2_RTC_6_SHFT 6
1618#define UV2H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL
1619#define UV2H_EVENT_OCCURRED2_RTC_7_SHFT 7
1620#define UV2H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL
1621#define UV2H_EVENT_OCCURRED2_RTC_8_SHFT 8
1622#define UV2H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL
1623#define UV2H_EVENT_OCCURRED2_RTC_9_SHFT 9
1624#define UV2H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL
1625#define UV2H_EVENT_OCCURRED2_RTC_10_SHFT 10
1626#define UV2H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL
1627#define UV2H_EVENT_OCCURRED2_RTC_11_SHFT 11
1628#define UV2H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL
1629#define UV2H_EVENT_OCCURRED2_RTC_12_SHFT 12
1630#define UV2H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL
1631#define UV2H_EVENT_OCCURRED2_RTC_13_SHFT 13
1632#define UV2H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL
1633#define UV2H_EVENT_OCCURRED2_RTC_14_SHFT 14
1634#define UV2H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL
1635#define UV2H_EVENT_OCCURRED2_RTC_15_SHFT 15
1636#define UV2H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL
1637#define UV2H_EVENT_OCCURRED2_RTC_16_SHFT 16
1638#define UV2H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL
1639#define UV2H_EVENT_OCCURRED2_RTC_17_SHFT 17
1640#define UV2H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL
1641#define UV2H_EVENT_OCCURRED2_RTC_18_SHFT 18
1642#define UV2H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL
1643#define UV2H_EVENT_OCCURRED2_RTC_19_SHFT 19
1644#define UV2H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL
1645#define UV2H_EVENT_OCCURRED2_RTC_20_SHFT 20
1646#define UV2H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL
1647#define UV2H_EVENT_OCCURRED2_RTC_21_SHFT 21
1648#define UV2H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL
1649#define UV2H_EVENT_OCCURRED2_RTC_22_SHFT 22
1650#define UV2H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL
1651#define UV2H_EVENT_OCCURRED2_RTC_23_SHFT 23
1652#define UV2H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL
1653#define UV2H_EVENT_OCCURRED2_RTC_24_SHFT 24
1654#define UV2H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL
1655#define UV2H_EVENT_OCCURRED2_RTC_25_SHFT 25
1656#define UV2H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL
1657#define UV2H_EVENT_OCCURRED2_RTC_26_SHFT 26
1658#define UV2H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL
1659#define UV2H_EVENT_OCCURRED2_RTC_27_SHFT 27
1660#define UV2H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL
1661#define UV2H_EVENT_OCCURRED2_RTC_28_SHFT 28
1662#define UV2H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL
1663#define UV2H_EVENT_OCCURRED2_RTC_29_SHFT 29
1664#define UV2H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL
1665#define UV2H_EVENT_OCCURRED2_RTC_30_SHFT 30
1666#define UV2H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL
1667#define UV2H_EVENT_OCCURRED2_RTC_31_SHFT 31
1668#define UV2H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL
1669
1670union uv2h_event_occurred2_u {
1023 unsigned long v; 1671 unsigned long v;
1024 struct uvh_si_alias0_overlay_config_s { 1672 struct uv2h_event_occurred2_s {
1025 unsigned long rsvd_0_23: 24; /* */ 1673 unsigned long rtc_0 : 1; /* RW */
1026 unsigned long base : 8; /* RW */ 1674 unsigned long rtc_1 : 1; /* RW */
1027 unsigned long rsvd_32_47: 16; /* */ 1675 unsigned long rtc_2 : 1; /* RW */
1028 unsigned long m_alias : 5; /* RW */ 1676 unsigned long rtc_3 : 1; /* RW */
1029 unsigned long rsvd_53_62: 10; /* */ 1677 unsigned long rtc_4 : 1; /* RW */
1030 unsigned long enable : 1; /* RW */ 1678 unsigned long rtc_5 : 1; /* RW */
1031 } s; 1679 unsigned long rtc_6 : 1; /* RW */
1680 unsigned long rtc_7 : 1; /* RW */
1681 unsigned long rtc_8 : 1; /* RW */
1682 unsigned long rtc_9 : 1; /* RW */
1683 unsigned long rtc_10 : 1; /* RW */
1684 unsigned long rtc_11 : 1; /* RW */
1685 unsigned long rtc_12 : 1; /* RW */
1686 unsigned long rtc_13 : 1; /* RW */
1687 unsigned long rtc_14 : 1; /* RW */
1688 unsigned long rtc_15 : 1; /* RW */
1689 unsigned long rtc_16 : 1; /* RW */
1690 unsigned long rtc_17 : 1; /* RW */
1691 unsigned long rtc_18 : 1; /* RW */
1692 unsigned long rtc_19 : 1; /* RW */
1693 unsigned long rtc_20 : 1; /* RW */
1694 unsigned long rtc_21 : 1; /* RW */
1695 unsigned long rtc_22 : 1; /* RW */
1696 unsigned long rtc_23 : 1; /* RW */
1697 unsigned long rtc_24 : 1; /* RW */
1698 unsigned long rtc_25 : 1; /* RW */
1699 unsigned long rtc_26 : 1; /* RW */
1700 unsigned long rtc_27 : 1; /* RW */
1701 unsigned long rtc_28 : 1; /* RW */
1702 unsigned long rtc_29 : 1; /* RW */
1703 unsigned long rtc_30 : 1; /* RW */
1704 unsigned long rtc_31 : 1; /* RW */
1705 unsigned long rsvd_32_63: 32; /* */
1706 } s1;
1032}; 1707};
1033 1708
1034/* ========================================================================= */ 1709/* ========================================================================= */
1035/* UVH_SI_ALIAS1_OVERLAY_CONFIG */ 1710/* UV2H_EVENT_OCCURRED2_ALIAS */
1036/* ========================================================================= */ 1711/* ========================================================================= */
1037#define UVH_SI_ALIAS1_OVERLAY_CONFIG 0xc80010UL 1712#define UV2H_EVENT_OCCURRED2_ALIAS 0x70108UL
1713#define UV2H_EVENT_OCCURRED2_ALIAS_32 0xb70
1038 1714
1039#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_SHFT 24 1715/* ========================================================================= */
1040#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL 1716/* UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 */
1041#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_SHFT 48 1717/* ========================================================================= */
1042#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL 1718#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
1043#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_SHFT 63 1719#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0
1044#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
1045 1720
1046union uvh_si_alias1_overlay_config_u { 1721#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
1722#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
1723
1724union uv2h_lb_bau_sb_activation_status_2_u {
1047 unsigned long v; 1725 unsigned long v;
1048 struct uvh_si_alias1_overlay_config_s { 1726 struct uv2h_lb_bau_sb_activation_status_2_s {
1049 unsigned long rsvd_0_23: 24; /* */ 1727 unsigned long aux_error : 64; /* RW */
1050 unsigned long base : 8; /* RW */ 1728 } s1;
1051 unsigned long rsvd_32_47: 16; /* */
1052 unsigned long m_alias : 5; /* RW */
1053 unsigned long rsvd_53_62: 10; /* */
1054 unsigned long enable : 1; /* RW */
1055 } s;
1056}; 1729};
1057 1730
1058/* ========================================================================= */ 1731/* ========================================================================= */
1059/* UVH_SI_ALIAS2_OVERLAY_CONFIG */ 1732/* UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK */
1060/* ========================================================================= */ 1733/* ========================================================================= */
1061#define UVH_SI_ALIAS2_OVERLAY_CONFIG 0xc80018UL 1734#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK 0x320130UL
1735#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_32 0x9f0
1062 1736
1063#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_SHFT 24 1737#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_SHFT 0
1064#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL 1738#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_MASK 0x00000000ffffffffUL
1065#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_SHFT 48
1066#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
1067#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_SHFT 63
1068#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
1069 1739
1070union uvh_si_alias2_overlay_config_u { 1740union uv1h_lb_target_physical_apic_id_mask_u {
1071 unsigned long v; 1741 unsigned long v;
1072 struct uvh_si_alias2_overlay_config_s { 1742 struct uv1h_lb_target_physical_apic_id_mask_s {
1073 unsigned long rsvd_0_23: 24; /* */ 1743 unsigned long bit_enables : 32; /* RW */
1074 unsigned long base : 8; /* RW */ 1744 unsigned long rsvd_32_63 : 32; /* */
1075 unsigned long rsvd_32_47: 16; /* */ 1745 } s1;
1076 unsigned long m_alias : 5; /* RW */
1077 unsigned long rsvd_53_62: 10; /* */
1078 unsigned long enable : 1; /* RW */
1079 } s;
1080}; 1746};
1081 1747
1082 1748
1083#endif /* _ASM_X86_UV_UV_MMRS_H */ 1749#endif /* __ASM_UV_MMRS_X86_H__ */
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
index 9064052b73de..bb0522850b74 100644
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -1,20 +1,6 @@
1#ifndef _ASM_X86_VDSO_H 1#ifndef _ASM_X86_VDSO_H
2#define _ASM_X86_VDSO_H 2#define _ASM_X86_VDSO_H
3 3
4#ifdef CONFIG_X86_64
5extern const char VDSO64_PRELINK[];
6
7/*
8 * Given a pointer to the vDSO image, find the pointer to VDSO64_name
9 * as that symbol is defined in the vDSO sources or linker script.
10 */
11#define VDSO64_SYMBOL(base, name) \
12({ \
13 extern const char VDSO64_##name[]; \
14 (void *)(VDSO64_##name - VDSO64_PRELINK + (unsigned long)(base)); \
15})
16#endif
17
18#if defined CONFIG_X86_32 || defined CONFIG_COMPAT 4#if defined CONFIG_X86_32 || defined CONFIG_COMPAT
19extern const char VDSO32_PRELINK[]; 5extern const char VDSO32_PRELINK[];
20 6
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 3d61e204826f..646b4c1ca695 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -23,8 +23,6 @@ struct vsyscall_gtod_data {
23 struct timespec wall_to_monotonic; 23 struct timespec wall_to_monotonic;
24 struct timespec wall_time_coarse; 24 struct timespec wall_time_coarse;
25}; 25};
26extern struct vsyscall_gtod_data __vsyscall_gtod_data
27__section_vsyscall_gtod_data;
28extern struct vsyscall_gtod_data vsyscall_gtod_data; 26extern struct vsyscall_gtod_data vsyscall_gtod_data;
29 27
30#endif /* _ASM_X86_VGTOD_H */ 28#endif /* _ASM_X86_VGTOD_H */
diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
deleted file mode 100644
index 61e08c0a2907..000000000000
--- a/arch/x86/include/asm/vmi.h
+++ /dev/null
@@ -1,269 +0,0 @@
1/*
2 * VMI interface definition
3 *
4 * Copyright (C) 2005, VMware, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 * Maintained by: Zachary Amsden zach@vmware.com
22 *
23 */
24#include <linux/types.h>
25
26/*
27 *---------------------------------------------------------------------
28 *
29 * VMI Option ROM API
30 *
31 *---------------------------------------------------------------------
32 */
33#define VMI_SIGNATURE 0x696d5663 /* "cVmi" */
34
35#define PCI_VENDOR_ID_VMWARE 0x15AD
36#define PCI_DEVICE_ID_VMWARE_VMI 0x0801
37
38/*
39 * We use two version numbers for compatibility, with the major
40 * number signifying interface breakages, and the minor number
41 * interface extensions.
42 */
43#define VMI_API_REV_MAJOR 3
44#define VMI_API_REV_MINOR 0
45
46#define VMI_CALL_CPUID 0
47#define VMI_CALL_WRMSR 1
48#define VMI_CALL_RDMSR 2
49#define VMI_CALL_SetGDT 3
50#define VMI_CALL_SetLDT 4
51#define VMI_CALL_SetIDT 5
52#define VMI_CALL_SetTR 6
53#define VMI_CALL_GetGDT 7
54#define VMI_CALL_GetLDT 8
55#define VMI_CALL_GetIDT 9
56#define VMI_CALL_GetTR 10
57#define VMI_CALL_WriteGDTEntry 11
58#define VMI_CALL_WriteLDTEntry 12
59#define VMI_CALL_WriteIDTEntry 13
60#define VMI_CALL_UpdateKernelStack 14
61#define VMI_CALL_SetCR0 15
62#define VMI_CALL_SetCR2 16
63#define VMI_CALL_SetCR3 17
64#define VMI_CALL_SetCR4 18
65#define VMI_CALL_GetCR0 19
66#define VMI_CALL_GetCR2 20
67#define VMI_CALL_GetCR3 21
68#define VMI_CALL_GetCR4 22
69#define VMI_CALL_WBINVD 23
70#define VMI_CALL_SetDR 24
71#define VMI_CALL_GetDR 25
72#define VMI_CALL_RDPMC 26
73#define VMI_CALL_RDTSC 27
74#define VMI_CALL_CLTS 28
75#define VMI_CALL_EnableInterrupts 29
76#define VMI_CALL_DisableInterrupts 30
77#define VMI_CALL_GetInterruptMask 31
78#define VMI_CALL_SetInterruptMask 32
79#define VMI_CALL_IRET 33
80#define VMI_CALL_SYSEXIT 34
81#define VMI_CALL_Halt 35
82#define VMI_CALL_Reboot 36
83#define VMI_CALL_Shutdown 37
84#define VMI_CALL_SetPxE 38
85#define VMI_CALL_SetPxELong 39
86#define VMI_CALL_UpdatePxE 40
87#define VMI_CALL_UpdatePxELong 41
88#define VMI_CALL_MachineToPhysical 42
89#define VMI_CALL_PhysicalToMachine 43
90#define VMI_CALL_AllocatePage 44
91#define VMI_CALL_ReleasePage 45
92#define VMI_CALL_InvalPage 46
93#define VMI_CALL_FlushTLB 47
94#define VMI_CALL_SetLinearMapping 48
95
96#define VMI_CALL_SetIOPLMask 61
97#define VMI_CALL_SetInitialAPState 62
98#define VMI_CALL_APICWrite 63
99#define VMI_CALL_APICRead 64
100#define VMI_CALL_IODelay 65
101#define VMI_CALL_SetLazyMode 73
102
103/*
104 *---------------------------------------------------------------------
105 *
106 * MMU operation flags
107 *
108 *---------------------------------------------------------------------
109 */
110
111/* Flags used by VMI_{Allocate|Release}Page call */
112#define VMI_PAGE_PAE 0x10 /* Allocate PAE shadow */
113#define VMI_PAGE_CLONE 0x20 /* Clone from another shadow */
114#define VMI_PAGE_ZEROED 0x40 /* Page is pre-zeroed */
115
116
117/* Flags shared by Allocate|Release Page and PTE updates */
118#define VMI_PAGE_PT 0x01
119#define VMI_PAGE_PD 0x02
120#define VMI_PAGE_PDP 0x04
121#define VMI_PAGE_PML4 0x08
122
123#define VMI_PAGE_NORMAL 0x00 /* for debugging */
124
125/* Flags used by PTE updates */
126#define VMI_PAGE_CURRENT_AS 0x10 /* implies VMI_PAGE_VA_MASK is valid */
127#define VMI_PAGE_DEFER 0x20 /* may queue update until TLB inval */
128#define VMI_PAGE_VA_MASK 0xfffff000
129
130#ifdef CONFIG_X86_PAE
131#define VMI_PAGE_L1 (VMI_PAGE_PT | VMI_PAGE_PAE | VMI_PAGE_ZEROED)
132#define VMI_PAGE_L2 (VMI_PAGE_PD | VMI_PAGE_PAE | VMI_PAGE_ZEROED)
133#else
134#define VMI_PAGE_L1 (VMI_PAGE_PT | VMI_PAGE_ZEROED)
135#define VMI_PAGE_L2 (VMI_PAGE_PD | VMI_PAGE_ZEROED)
136#endif
137
138/* Flags used by VMI_FlushTLB call */
139#define VMI_FLUSH_TLB 0x01
140#define VMI_FLUSH_GLOBAL 0x02
141
142/*
143 *---------------------------------------------------------------------
144 *
145 * VMI relocation definitions for ROM call get_reloc
146 *
147 *---------------------------------------------------------------------
148 */
149
150/* VMI Relocation types */
151#define VMI_RELOCATION_NONE 0
152#define VMI_RELOCATION_CALL_REL 1
153#define VMI_RELOCATION_JUMP_REL 2
154#define VMI_RELOCATION_NOP 3
155
156#ifndef __ASSEMBLY__
157struct vmi_relocation_info {
158 unsigned char *eip;
159 unsigned char type;
160 unsigned char reserved[3];
161};
162#endif
163
164
165/*
166 *---------------------------------------------------------------------
167 *
168 * Generic ROM structures and definitions
169 *
170 *---------------------------------------------------------------------
171 */
172
173#ifndef __ASSEMBLY__
174
175struct vrom_header {
176 u16 rom_signature; /* option ROM signature */
177 u8 rom_length; /* ROM length in 512 byte chunks */
178 u8 rom_entry[4]; /* 16-bit code entry point */
179 u8 rom_pad0; /* 4-byte align pad */
180 u32 vrom_signature; /* VROM identification signature */
181 u8 api_version_min;/* Minor version of API */
182 u8 api_version_maj;/* Major version of API */
183 u8 jump_slots; /* Number of jump slots */
184 u8 reserved1; /* Reserved for expansion */
185 u32 virtual_top; /* Hypervisor virtual address start */
186 u16 reserved2; /* Reserved for expansion */
187 u16 license_offs; /* Offset to License string */
188 u16 pci_header_offs;/* Offset to PCI OPROM header */
189 u16 pnp_header_offs;/* Offset to PnP OPROM header */
190 u32 rom_pad3; /* PnP reserverd / VMI reserved */
191 u8 reserved[96]; /* Reserved for headers */
192 char vmi_init[8]; /* VMI_Init jump point */
193 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
194} __attribute__((packed));
195
196struct pnp_header {
197 char sig[4];
198 char rev;
199 char size;
200 short next;
201 short res;
202 long devID;
203 unsigned short manufacturer_offset;
204 unsigned short product_offset;
205} __attribute__((packed));
206
207struct pci_header {
208 char sig[4];
209 short vendorID;
210 short deviceID;
211 short vpdData;
212 short size;
213 char rev;
214 char class;
215 char subclass;
216 char interface;
217 short chunks;
218 char rom_version_min;
219 char rom_version_maj;
220 char codetype;
221 char lastRom;
222 short reserved;
223} __attribute__((packed));
224
225/* Function prototypes for bootstrapping */
226#ifdef CONFIG_VMI
227extern void vmi_init(void);
228extern void vmi_activate(void);
229extern void vmi_bringup(void);
230#else
231static inline void vmi_init(void) {}
232static inline void vmi_activate(void) {}
233static inline void vmi_bringup(void) {}
234#endif
235
236/* State needed to start an application processor in an SMP system. */
237struct vmi_ap_state {
238 u32 cr0;
239 u32 cr2;
240 u32 cr3;
241 u32 cr4;
242
243 u64 efer;
244
245 u32 eip;
246 u32 eflags;
247 u32 eax;
248 u32 ebx;
249 u32 ecx;
250 u32 edx;
251 u32 esp;
252 u32 ebp;
253 u32 esi;
254 u32 edi;
255 u16 cs;
256 u16 ss;
257 u16 ds;
258 u16 es;
259 u16 fs;
260 u16 gs;
261 u16 ldtr;
262
263 u16 gdtr_limit;
264 u32 gdtr_base;
265 u32 idtr_base;
266 u16 idtr_limit;
267};
268
269#endif
diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
deleted file mode 100644
index c6e0bee93e3c..000000000000
--- a/arch/x86/include/asm/vmi_time.h
+++ /dev/null
@@ -1,98 +0,0 @@
1/*
2 * VMI Time wrappers
3 *
4 * Copyright (C) 2006, VMware, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 * Send feedback to dhecht@vmware.com
22 *
23 */
24
25#ifndef _ASM_X86_VMI_TIME_H
26#define _ASM_X86_VMI_TIME_H
27
28/*
29 * Raw VMI call indices for timer functions
30 */
31#define VMI_CALL_GetCycleFrequency 66
32#define VMI_CALL_GetCycleCounter 67
33#define VMI_CALL_SetAlarm 68
34#define VMI_CALL_CancelAlarm 69
35#define VMI_CALL_GetWallclockTime 70
36#define VMI_CALL_WallclockUpdated 71
37
38/* Cached VMI timer operations */
39extern struct vmi_timer_ops {
40 u64 (*get_cycle_frequency)(void);
41 u64 (*get_cycle_counter)(int);
42 u64 (*get_wallclock)(void);
43 int (*wallclock_updated)(void);
44 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
45 void (*cancel_alarm)(u32 flags);
46} vmi_timer_ops;
47
48/* Prototypes */
49extern void __init vmi_time_init(void);
50extern unsigned long vmi_get_wallclock(void);
51extern int vmi_set_wallclock(unsigned long now);
52extern unsigned long long vmi_sched_clock(void);
53extern unsigned long vmi_tsc_khz(void);
54
55#ifdef CONFIG_X86_LOCAL_APIC
56extern void __devinit vmi_time_bsp_init(void);
57extern void __devinit vmi_time_ap_init(void);
58#endif
59
60/*
61 * When run under a hypervisor, a vcpu is always in one of three states:
62 * running, halted, or ready. The vcpu is in the 'running' state if it
63 * is executing. When the vcpu executes the halt interface, the vcpu
64 * enters the 'halted' state and remains halted until there is some work
65 * pending for the vcpu (e.g. an alarm expires, host I/O completes on
66 * behalf of virtual I/O). At this point, the vcpu enters the 'ready'
67 * state (waiting for the hypervisor to reschedule it). Finally, at any
68 * time when the vcpu is not in the 'running' state nor the 'halted'
69 * state, it is in the 'ready' state.
70 *
71 * Real time is advances while the vcpu is 'running', 'ready', or
72 * 'halted'. Stolen time is the time in which the vcpu is in the
73 * 'ready' state. Available time is the remaining time -- the vcpu is
74 * either 'running' or 'halted'.
75 *
76 * All three views of time are accessible through the VMI cycle
77 * counters.
78 */
79
80/* The cycle counters. */
81#define VMI_CYCLES_REAL 0
82#define VMI_CYCLES_AVAILABLE 1
83#define VMI_CYCLES_STOLEN 2
84
85/* The alarm interface 'flags' bits */
86#define VMI_ALARM_COUNTERS 2
87
88#define VMI_ALARM_COUNTER_MASK 0x000000ff
89
90#define VMI_ALARM_WIRED_IRQ0 0x00000000
91#define VMI_ALARM_WIRED_LVTT 0x00010000
92
93#define VMI_ALARM_IS_ONESHOT 0x00000000
94#define VMI_ALARM_IS_PERIODIC 0x00000100
95
96#define CONFIG_VMI_ALARM_HZ 100
97
98#endif /* _ASM_X86_VMI_TIME_H */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 9f0cbd987d50..84471b810460 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -66,15 +66,23 @@
66#define PIN_BASED_NMI_EXITING 0x00000008 66#define PIN_BASED_NMI_EXITING 0x00000008
67#define PIN_BASED_VIRTUAL_NMIS 0x00000020 67#define PIN_BASED_VIRTUAL_NMIS 0x00000020
68 68
69#define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000002
69#define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 70#define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200
71#define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000
70#define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 72#define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000
71#define VM_EXIT_SAVE_IA32_PAT 0x00040000 73#define VM_EXIT_SAVE_IA32_PAT 0x00040000
72#define VM_EXIT_LOAD_IA32_PAT 0x00080000 74#define VM_EXIT_LOAD_IA32_PAT 0x00080000
75#define VM_EXIT_SAVE_IA32_EFER 0x00100000
76#define VM_EXIT_LOAD_IA32_EFER 0x00200000
77#define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000
73 78
79#define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000002
74#define VM_ENTRY_IA32E_MODE 0x00000200 80#define VM_ENTRY_IA32E_MODE 0x00000200
75#define VM_ENTRY_SMM 0x00000400 81#define VM_ENTRY_SMM 0x00000400
76#define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 82#define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800
83#define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000
77#define VM_ENTRY_LOAD_IA32_PAT 0x00004000 84#define VM_ENTRY_LOAD_IA32_PAT 0x00004000
85#define VM_ENTRY_LOAD_IA32_EFER 0x00008000
78 86
79/* VMCS Encodings */ 87/* VMCS Encodings */
80enum vmcs_field { 88enum vmcs_field {
@@ -239,6 +247,7 @@ enum vmcs_field {
239#define EXIT_REASON_TASK_SWITCH 9 247#define EXIT_REASON_TASK_SWITCH 9
240#define EXIT_REASON_CPUID 10 248#define EXIT_REASON_CPUID 10
241#define EXIT_REASON_HLT 12 249#define EXIT_REASON_HLT 12
250#define EXIT_REASON_INVD 13
242#define EXIT_REASON_INVLPG 14 251#define EXIT_REASON_INVLPG 14
243#define EXIT_REASON_RDPMC 15 252#define EXIT_REASON_RDPMC 15
244#define EXIT_REASON_RDTSC 16 253#define EXIT_REASON_RDTSC 16
@@ -296,6 +305,12 @@ enum vmcs_field {
296#define GUEST_INTR_STATE_SMI 0x00000004 305#define GUEST_INTR_STATE_SMI 0x00000004
297#define GUEST_INTR_STATE_NMI 0x00000008 306#define GUEST_INTR_STATE_NMI 0x00000008
298 307
308/* GUEST_ACTIVITY_STATE flags */
309#define GUEST_ACTIVITY_ACTIVE 0
310#define GUEST_ACTIVITY_HLT 1
311#define GUEST_ACTIVITY_SHUTDOWN 2
312#define GUEST_ACTIVITY_WAIT_SIPI 3
313
299/* 314/*
300 * Exit Qualifications for MOV for Control Register Access 315 * Exit Qualifications for MOV for Control Register Access
301 */ 316 */
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index d0983d255fbd..d55597351f6a 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -16,27 +16,19 @@ enum vsyscall_num {
16#ifdef __KERNEL__ 16#ifdef __KERNEL__
17#include <linux/seqlock.h> 17#include <linux/seqlock.h>
18 18
19#define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
20#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
21
22/* Definitions for CONFIG_GENERIC_TIME definitions */ 19/* Definitions for CONFIG_GENERIC_TIME definitions */
23#define __section_vsyscall_gtod_data __attribute__ \
24 ((unused, __section__ (".vsyscall_gtod_data"),aligned(16)))
25#define __section_vsyscall_clock __attribute__ \
26 ((unused, __section__ (".vsyscall_clock"),aligned(16)))
27#define __vsyscall_fn \ 20#define __vsyscall_fn \
28 __attribute__ ((unused, __section__(".vsyscall_fn"))) notrace 21 __attribute__ ((unused, __section__(".vsyscall_fn"))) notrace
29 22
30#define VGETCPU_RDTSCP 1 23#define VGETCPU_RDTSCP 1
31#define VGETCPU_LSL 2 24#define VGETCPU_LSL 2
32 25
33extern int __vgetcpu_mode;
34extern volatile unsigned long __jiffies;
35
36/* kernel space (writeable) */ 26/* kernel space (writeable) */
37extern int vgetcpu_mode; 27extern int vgetcpu_mode;
38extern struct timezone sys_tz; 28extern struct timezone sys_tz;
39 29
30#include <asm/vvar.h>
31
40extern void map_vsyscall(void); 32extern void map_vsyscall(void);
41 33
42#endif /* __KERNEL__ */ 34#endif /* __KERNEL__ */
diff --git a/arch/x86/include/asm/vvar.h b/arch/x86/include/asm/vvar.h
new file mode 100644
index 000000000000..341b3559452b
--- /dev/null
+++ b/arch/x86/include/asm/vvar.h
@@ -0,0 +1,52 @@
1/*
2 * vvar.h: Shared vDSO/kernel variable declarations
3 * Copyright (c) 2011 Andy Lutomirski
4 * Subject to the GNU General Public License, version 2
5 *
6 * A handful of variables are accessible (read-only) from userspace
7 * code in the vsyscall page and the vdso. They are declared here.
8 * Some other file must define them with DEFINE_VVAR.
9 *
10 * In normal kernel code, they are used like any other variable.
11 * In user code, they are accessed through the VVAR macro.
12 *
13 * Each of these variables lives in the vsyscall page, and each
14 * one needs a unique offset within the little piece of the page
15 * reserved for vvars. Specify that offset in DECLARE_VVAR.
16 * (There are 896 bytes available. If you mess up, the linker will
17 * catch it.)
18 */
19
20/* Offset of vars within vsyscall page */
21#define VSYSCALL_VARS_OFFSET (3072 + 128)
22
23#if defined(__VVAR_KERNEL_LDS)
24
25/* The kernel linker script defines its own magic to put vvars in the
26 * right place.
27 */
28#define DECLARE_VVAR(offset, type, name) \
29 EMIT_VVAR(name, VSYSCALL_VARS_OFFSET + offset)
30
31#else
32
33#define DECLARE_VVAR(offset, type, name) \
34 static type const * const vvaraddr_ ## name = \
35 (void *)(VSYSCALL_START + VSYSCALL_VARS_OFFSET + (offset));
36
37#define DEFINE_VVAR(type, name) \
38 type __vvar_ ## name \
39 __attribute__((section(".vsyscall_var_" #name), aligned(16)))
40
41#define VVAR(name) (*vvaraddr_ ## name)
42
43#endif
44
45/* DECLARE_VVAR(offset, type, name) */
46
47DECLARE_VVAR(0, volatile unsigned long, jiffies)
48DECLARE_VVAR(8, int, vgetcpu_mode)
49DECLARE_VVAR(128, struct vsyscall_gtod_data, vsyscall_gtod_data)
50
51#undef DECLARE_VVAR
52#undef VSYSCALL_VARS_OFFSET
diff --git a/arch/x86/include/asm/x2apic.h b/arch/x86/include/asm/x2apic.h
new file mode 100644
index 000000000000..6bf5b8e478c0
--- /dev/null
+++ b/arch/x86/include/asm/x2apic.h
@@ -0,0 +1,62 @@
1/*
2 * Common bits for X2APIC cluster/physical modes.
3 */
4
5#ifndef _ASM_X86_X2APIC_H
6#define _ASM_X86_X2APIC_H
7
8#include <asm/apic.h>
9#include <asm/ipi.h>
10#include <linux/cpumask.h>
11
12/*
13 * Need to use more than cpu 0, because we need more vectors
14 * when MSI-X are used.
15 */
16static const struct cpumask *x2apic_target_cpus(void)
17{
18 return cpu_online_mask;
19}
20
21static int x2apic_apic_id_registered(void)
22{
23 return 1;
24}
25
26/*
27 * For now each logical cpu is in its own vector allocation domain.
28 */
29static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
30{
31 cpumask_clear(retmask);
32 cpumask_set_cpu(cpu, retmask);
33}
34
35static void
36__x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
37{
38 unsigned long cfg = __prepare_ICR(0, vector, dest);
39 native_x2apic_icr_write(cfg, apicid);
40}
41
42static unsigned int x2apic_get_apic_id(unsigned long id)
43{
44 return id;
45}
46
47static unsigned long x2apic_set_apic_id(unsigned int id)
48{
49 return id;
50}
51
52static int x2apic_phys_pkg_id(int initial_apicid, int index_msb)
53{
54 return initial_apicid >> index_msb;
55}
56
57static void x2apic_send_IPI_self(int vector)
58{
59 apic_write(APIC_SELF_IPI, vector);
60}
61
62#endif /* _ASM_X86_X2APIC_H */
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index baa579c8e038..d3d859035af9 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -68,6 +68,17 @@ struct x86_init_oem {
68}; 68};
69 69
70/** 70/**
71 * struct x86_init_mapping - platform specific initial kernel pagetable setup
72 * @pagetable_reserve: reserve a range of addresses for kernel pagetable usage
73 *
74 * For more details on the purpose of this hook, look in
75 * init_memory_mapping and the commit that added it.
76 */
77struct x86_init_mapping {
78 void (*pagetable_reserve)(u64 start, u64 end);
79};
80
81/**
71 * struct x86_init_paging - platform specific paging functions 82 * struct x86_init_paging - platform specific paging functions
72 * @pagetable_setup_start: platform specific pre paging_init() call 83 * @pagetable_setup_start: platform specific pre paging_init() call
73 * @pagetable_setup_done: platform specific post paging_init() call 84 * @pagetable_setup_done: platform specific post paging_init() call
@@ -83,11 +94,13 @@ struct x86_init_paging {
83 * boot cpu 94 * boot cpu
84 * @tsc_pre_init: platform function called before TSC init 95 * @tsc_pre_init: platform function called before TSC init
85 * @timer_init: initialize the platform timer (default PIT/HPET) 96 * @timer_init: initialize the platform timer (default PIT/HPET)
97 * @wallclock_init: init the wallclock device
86 */ 98 */
87struct x86_init_timers { 99struct x86_init_timers {
88 void (*setup_percpu_clockev)(void); 100 void (*setup_percpu_clockev)(void);
89 void (*tsc_pre_init)(void); 101 void (*tsc_pre_init)(void);
90 void (*timer_init)(void); 102 void (*timer_init)(void);
103 void (*wallclock_init)(void);
91}; 104};
92 105
93/** 106/**
@@ -121,6 +134,7 @@ struct x86_init_ops {
121 struct x86_init_mpparse mpparse; 134 struct x86_init_mpparse mpparse;
122 struct x86_init_irqs irqs; 135 struct x86_init_irqs irqs;
123 struct x86_init_oem oem; 136 struct x86_init_oem oem;
137 struct x86_init_mapping mapping;
124 struct x86_init_paging paging; 138 struct x86_init_paging paging;
125 struct x86_init_timers timers; 139 struct x86_init_timers timers;
126 struct x86_init_iommu iommu; 140 struct x86_init_iommu iommu;
@@ -154,9 +168,18 @@ struct x86_platform_ops {
154 int (*i8042_detect)(void); 168 int (*i8042_detect)(void);
155}; 169};
156 170
171struct pci_dev;
172
173struct x86_msi_ops {
174 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
175 void (*teardown_msi_irq)(unsigned int irq);
176 void (*teardown_msi_irqs)(struct pci_dev *dev);
177};
178
157extern struct x86_init_ops x86_init; 179extern struct x86_init_ops x86_init;
158extern struct x86_cpuinit_ops x86_cpuinit; 180extern struct x86_cpuinit_ops x86_cpuinit;
159extern struct x86_platform_ops x86_platform; 181extern struct x86_platform_ops x86_platform;
182extern struct x86_msi_ops x86_msi;
160 183
161extern void x86_init_noop(void); 184extern void x86_init_noop(void);
162extern void x86_init_uint_noop(unsigned int unused); 185extern void x86_init_uint_noop(unsigned int unused);
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 7fda040a76cd..d240ea950519 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -200,6 +200,23 @@ extern struct { char _entry[32]; } hypercall_page[];
200 (type)__res; \ 200 (type)__res; \
201}) 201})
202 202
203static inline long
204privcmd_call(unsigned call,
205 unsigned long a1, unsigned long a2,
206 unsigned long a3, unsigned long a4,
207 unsigned long a5)
208{
209 __HYPERCALL_DECLS;
210 __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
211
212 asm volatile("call *%[call]"
213 : __HYPERCALL_5PARAM
214 : [call] "a" (&hypercall_page[call])
215 : __HYPERCALL_CLOBBER5);
216
217 return (long)__res;
218}
219
203static inline int 220static inline int
204HYPERVISOR_set_trap_table(struct trap_info *table) 221HYPERVISOR_set_trap_table(struct trap_info *table)
205{ 222{
@@ -270,7 +287,7 @@ HYPERVISOR_fpu_taskswitch(int set)
270static inline int 287static inline int
271HYPERVISOR_sched_op(int cmd, void *arg) 288HYPERVISOR_sched_op(int cmd, void *arg)
272{ 289{
273 return _hypercall2(int, sched_op_new, cmd, arg); 290 return _hypercall2(int, sched_op, cmd, arg);
274} 291}
275 292
276static inline long 293static inline long
@@ -405,10 +422,17 @@ HYPERVISOR_set_segment_base(int reg, unsigned long value)
405#endif 422#endif
406 423
407static inline int 424static inline int
408HYPERVISOR_suspend(unsigned long srec) 425HYPERVISOR_suspend(unsigned long start_info_mfn)
409{ 426{
410 return _hypercall3(int, sched_op, SCHEDOP_shutdown, 427 struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
411 SHUTDOWN_suspend, srec); 428
429 /*
430 * For a PV guest the tools require that the start_info mfn be
431 * present in rdx/edx when the hypercall is made. Per the
432 * hypercall calling convention this is the third hypercall
433 * argument, which is start_info_mfn here.
434 */
435 return _hypercall3(int, sched_op, SCHEDOP_shutdown, &r, start_info_mfn);
412} 436}
413 437
414static inline int 438static inline int
@@ -423,6 +447,13 @@ HYPERVISOR_hvm_op(int op, void *arg)
423 return _hypercall2(unsigned long, hvm_op, op, arg); 447 return _hypercall2(unsigned long, hvm_op, op, arg);
424} 448}
425 449
450static inline int
451HYPERVISOR_tmem_op(
452 struct tmem_op *op)
453{
454 return _hypercall1(int, tmem_op, op);
455}
456
426static inline void 457static inline void
427MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set) 458MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
428{ 459{
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index 396ff4cc8ed4..66d0fff1ee84 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -37,4 +37,39 @@
37extern struct shared_info *HYPERVISOR_shared_info; 37extern struct shared_info *HYPERVISOR_shared_info;
38extern struct start_info *xen_start_info; 38extern struct start_info *xen_start_info;
39 39
40#include <asm/processor.h>
41
42static inline uint32_t xen_cpuid_base(void)
43{
44 uint32_t base, eax, ebx, ecx, edx;
45 char signature[13];
46
47 for (base = 0x40000000; base < 0x40010000; base += 0x100) {
48 cpuid(base, &eax, &ebx, &ecx, &edx);
49 *(uint32_t *)(signature + 0) = ebx;
50 *(uint32_t *)(signature + 4) = ecx;
51 *(uint32_t *)(signature + 8) = edx;
52 signature[12] = 0;
53
54 if (!strcmp("XenVMMXenVMM", signature) && ((eax - base) >= 2))
55 return base;
56 }
57
58 return 0;
59}
60
61#ifdef CONFIG_XEN
62extern bool xen_hvm_need_lapic(void);
63
64static inline bool xen_x2apic_para_available(void)
65{
66 return xen_hvm_need_lapic();
67}
68#else
69static inline bool xen_x2apic_para_available(void)
70{
71 return (xen_cpuid_base() != 0);
72}
73#endif
74
40#endif /* _ASM_X86_XEN_HYPERVISOR_H */ 75#endif /* _ASM_X86_XEN_HYPERVISOR_H */
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
index e8506c1f0c55..5d4922ad4b9b 100644
--- a/arch/x86/include/asm/xen/interface.h
+++ b/arch/x86/include/asm/xen/interface.h
@@ -61,9 +61,9 @@ DEFINE_GUEST_HANDLE(void);
61#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) 61#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
62#endif 62#endif
63 63
64#ifndef machine_to_phys_mapping 64#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
65#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) 65#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
66#endif 66#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>__MACH2PHYS_SHIFT)
67 67
68/* Maximum number of virtual CPUs in multi-processor guests. */ 68/* Maximum number of virtual CPUs in multi-processor guests. */
69#define MAX_VIRT_CPUS 32 69#define MAX_VIRT_CPUS 32
@@ -86,7 +86,7 @@ DEFINE_GUEST_HANDLE(void);
86 * The privilege level specifies which modes may enter a trap via a software 86 * The privilege level specifies which modes may enter a trap via a software
87 * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate 87 * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
88 * privilege levels as follows: 88 * privilege levels as follows:
89 * Level == 0: Noone may enter 89 * Level == 0: No one may enter
90 * Level == 1: Kernel may enter 90 * Level == 1: Kernel may enter
91 * Level == 2: Kernel may enter 91 * Level == 2: Kernel may enter
92 * Level == 3: Everyone may enter 92 * Level == 3: Everyone may enter
diff --git a/arch/x86/include/asm/xen/interface_32.h b/arch/x86/include/asm/xen/interface_32.h
index 42a7e004ae5c..8413688b2571 100644
--- a/arch/x86/include/asm/xen/interface_32.h
+++ b/arch/x86/include/asm/xen/interface_32.h
@@ -32,6 +32,11 @@
32/* And the trap vector is... */ 32/* And the trap vector is... */
33#define TRAP_INSTR "int $0x82" 33#define TRAP_INSTR "int $0x82"
34 34
35#define __MACH2PHYS_VIRT_START 0xF5800000
36#define __MACH2PHYS_VIRT_END 0xF6800000
37
38#define __MACH2PHYS_SHIFT 2
39
35/* 40/*
36 * Virtual addresses beyond this are not modifiable by guest OSes. The 41 * Virtual addresses beyond this are not modifiable by guest OSes. The
37 * machine->physical mapping table starts at this address, read-only. 42 * machine->physical mapping table starts at this address, read-only.
diff --git a/arch/x86/include/asm/xen/interface_64.h b/arch/x86/include/asm/xen/interface_64.h
index 100d2662b97c..839a4811cf98 100644
--- a/arch/x86/include/asm/xen/interface_64.h
+++ b/arch/x86/include/asm/xen/interface_64.h
@@ -39,18 +39,7 @@
39#define __HYPERVISOR_VIRT_END 0xFFFF880000000000 39#define __HYPERVISOR_VIRT_END 0xFFFF880000000000
40#define __MACH2PHYS_VIRT_START 0xFFFF800000000000 40#define __MACH2PHYS_VIRT_START 0xFFFF800000000000
41#define __MACH2PHYS_VIRT_END 0xFFFF804000000000 41#define __MACH2PHYS_VIRT_END 0xFFFF804000000000
42 42#define __MACH2PHYS_SHIFT 3
43#ifndef HYPERVISOR_VIRT_START
44#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
45#define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END)
46#endif
47
48#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
49#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
50#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3)
51#ifndef machine_to_phys_mapping
52#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
53#endif
54 43
55/* 44/*
56 * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base) 45 * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index bf5f7d32bd08..64a619d47d34 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -5,6 +5,7 @@
5#include <linux/types.h> 5#include <linux/types.h>
6#include <linux/spinlock.h> 6#include <linux/spinlock.h>
7#include <linux/pfn.h> 7#include <linux/pfn.h>
8#include <linux/mm.h>
8 9
9#include <asm/uaccess.h> 10#include <asm/uaccess.h>
10#include <asm/page.h> 11#include <asm/page.h>
@@ -28,23 +29,46 @@ typedef struct xpaddr {
28 29
29/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ 30/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
30#define INVALID_P2M_ENTRY (~0UL) 31#define INVALID_P2M_ENTRY (~0UL)
31#define FOREIGN_FRAME_BIT (1UL<<31) 32#define FOREIGN_FRAME_BIT (1UL<<(BITS_PER_LONG-1))
33#define IDENTITY_FRAME_BIT (1UL<<(BITS_PER_LONG-2))
32#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) 34#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
35#define IDENTITY_FRAME(m) ((m) | IDENTITY_FRAME_BIT)
33 36
34/* Maximum amount of memory we can handle in a domain in pages */ 37/* Maximum amount of memory we can handle in a domain in pages */
35#define MAX_DOMAIN_PAGES \ 38#define MAX_DOMAIN_PAGES \
36 ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE)) 39 ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE))
37 40
41extern unsigned long *machine_to_phys_mapping;
42extern unsigned int machine_to_phys_order;
38 43
39extern unsigned long get_phys_to_machine(unsigned long pfn); 44extern unsigned long get_phys_to_machine(unsigned long pfn);
40extern void set_phys_to_machine(unsigned long pfn, unsigned long mfn); 45extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
41 46extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
47extern unsigned long set_phys_range_identity(unsigned long pfn_s,
48 unsigned long pfn_e);
49
50extern int m2p_add_override(unsigned long mfn, struct page *page,
51 bool clear_pte);
52extern int m2p_remove_override(struct page *page, bool clear_pte);
53extern struct page *m2p_find_override(unsigned long mfn);
54extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
55
56#ifdef CONFIG_XEN_DEBUG_FS
57extern int p2m_dump_show(struct seq_file *m, void *v);
58#endif
42static inline unsigned long pfn_to_mfn(unsigned long pfn) 59static inline unsigned long pfn_to_mfn(unsigned long pfn)
43{ 60{
61 unsigned long mfn;
62
44 if (xen_feature(XENFEAT_auto_translated_physmap)) 63 if (xen_feature(XENFEAT_auto_translated_physmap))
45 return pfn; 64 return pfn;
46 65
47 return get_phys_to_machine(pfn) & ~FOREIGN_FRAME_BIT; 66 mfn = get_phys_to_machine(pfn);
67
68 if (mfn != INVALID_P2M_ENTRY)
69 mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
70
71 return mfn;
48} 72}
49 73
50static inline int phys_to_machine_mapping_valid(unsigned long pfn) 74static inline int phys_to_machine_mapping_valid(unsigned long pfn)
@@ -58,22 +82,44 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
58static inline unsigned long mfn_to_pfn(unsigned long mfn) 82static inline unsigned long mfn_to_pfn(unsigned long mfn)
59{ 83{
60 unsigned long pfn; 84 unsigned long pfn;
85 int ret = 0;
61 86
62 if (xen_feature(XENFEAT_auto_translated_physmap)) 87 if (xen_feature(XENFEAT_auto_translated_physmap))
63 return mfn; 88 return mfn;
64 89
65#if 0 90 if (unlikely((mfn >> machine_to_phys_order) != 0)) {
66 if (unlikely((mfn >> machine_to_phys_order) != 0)) 91 pfn = ~0;
67 return max_mapnr; 92 goto try_override;
68#endif 93 }
69
70 pfn = 0; 94 pfn = 0;
71 /* 95 /*
72 * The array access can fail (e.g., device space beyond end of RAM). 96 * The array access can fail (e.g., device space beyond end of RAM).
73 * In such cases it doesn't matter what we return (we return garbage), 97 * In such cases it doesn't matter what we return (we return garbage),
74 * but we must handle the fault without crashing! 98 * but we must handle the fault without crashing!
75 */ 99 */
76 __get_user(pfn, &machine_to_phys_mapping[mfn]); 100 ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
101try_override:
102 /* ret might be < 0 if there are no entries in the m2p for mfn */
103 if (ret < 0)
104 pfn = ~0;
105 else if (get_phys_to_machine(pfn) != mfn)
106 /*
107 * If this appears to be a foreign mfn (because the pfn
108 * doesn't map back to the mfn), then check the local override
109 * table to see if there's a better pfn to use.
110 *
111 * m2p_find_override_pfn returns ~0 if it doesn't find anything.
112 */
113 pfn = m2p_find_override_pfn(mfn, ~0);
114
115 /*
116 * pfn is ~0 if there are no entries in the m2p for mfn or if the
117 * entry doesn't map back to the mfn and m2p_override doesn't have a
118 * valid entry for it.
119 */
120 if (pfn == ~0 &&
121 get_phys_to_machine(mfn) == IDENTITY_FRAME(mfn))
122 pfn = mfn;
77 123
78 return pfn; 124 return pfn;
79} 125}
@@ -159,6 +205,7 @@ static inline pte_t __pte_ma(pteval_t x)
159 205
160#define pgd_val_ma(x) ((x).pgd) 206#define pgd_val_ma(x) ((x).pgd)
161 207
208void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid);
162 209
163xmaddr_t arbitrary_virt_to_machine(void *address); 210xmaddr_t arbitrary_virt_to_machine(void *address);
164unsigned long arbitrary_virt_to_mfn(void *vaddr); 211unsigned long arbitrary_virt_to_mfn(void *vaddr);
diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h
new file mode 100644
index 000000000000..4fbda9a3f339
--- /dev/null
+++ b/arch/x86/include/asm/xen/pci.h
@@ -0,0 +1,81 @@
1#ifndef _ASM_X86_XEN_PCI_H
2#define _ASM_X86_XEN_PCI_H
3
4#if defined(CONFIG_PCI_XEN)
5extern int __init pci_xen_init(void);
6extern int __init pci_xen_hvm_init(void);
7#define pci_xen 1
8#else
9#define pci_xen 0
10#define pci_xen_init (0)
11static inline int pci_xen_hvm_init(void)
12{
13 return -1;
14}
15#endif
16#if defined(CONFIG_XEN_DOM0)
17void __init xen_setup_pirqs(void);
18int xen_find_device_domain_owner(struct pci_dev *dev);
19int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
20int xen_unregister_device_domain_owner(struct pci_dev *dev);
21#else
22static inline void __init xen_setup_pirqs(void)
23{
24}
25static inline int xen_find_device_domain_owner(struct pci_dev *dev)
26{
27 return -1;
28}
29static inline int xen_register_device_domain_owner(struct pci_dev *dev,
30 uint16_t domain)
31{
32 return -1;
33}
34static inline int xen_unregister_device_domain_owner(struct pci_dev *dev)
35{
36 return -1;
37}
38#endif
39
40#if defined(CONFIG_PCI_MSI)
41#if defined(CONFIG_PCI_XEN)
42/* The drivers/pci/xen-pcifront.c sets this structure to
43 * its own functions.
44 */
45struct xen_pci_frontend_ops {
46 int (*enable_msi)(struct pci_dev *dev, int vectors[]);
47 void (*disable_msi)(struct pci_dev *dev);
48 int (*enable_msix)(struct pci_dev *dev, int vectors[], int nvec);
49 void (*disable_msix)(struct pci_dev *dev);
50};
51
52extern struct xen_pci_frontend_ops *xen_pci_frontend;
53
54static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev,
55 int vectors[])
56{
57 if (xen_pci_frontend && xen_pci_frontend->enable_msi)
58 return xen_pci_frontend->enable_msi(dev, vectors);
59 return -ENODEV;
60}
61static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev)
62{
63 if (xen_pci_frontend && xen_pci_frontend->disable_msi)
64 xen_pci_frontend->disable_msi(dev);
65}
66static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev,
67 int vectors[], int nvec)
68{
69 if (xen_pci_frontend && xen_pci_frontend->enable_msix)
70 return xen_pci_frontend->enable_msix(dev, vectors, nvec);
71 return -ENODEV;
72}
73static inline void xen_pci_frontend_disable_msix(struct pci_dev *dev)
74{
75 if (xen_pci_frontend && xen_pci_frontend->disable_msix)
76 xen_pci_frontend->disable_msix(dev);
77}
78#endif /* CONFIG_PCI_XEN */
79#endif /* CONFIG_PCI_MSI */
80
81#endif /* _ASM_X86_XEN_PCI_H */