aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-09-23 17:08:43 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2009-09-23 17:08:43 -0400
commitd7a4b414eed51f1653bb05ebe84122bf9a7ae18b (patch)
treebd6603a0c27de4c138a1767871897e9cd3e1a1d2 /arch/x86/include
parent1f0ab40976460bc4673fa204ce917a725185d8f2 (diff)
parenta724eada8c2a7b62463b73ccf73fd0bb6e928aeb (diff)
Merge commit 'linus/master' into tracing/kprobes
Conflicts: kernel/trace/Makefile kernel/trace/trace.h kernel/trace/trace_event_types.h kernel/trace/trace_export.c Merge reason: Sync with latest significant tracing core changes.
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/acpi.h1
-rw-r--r--arch/x86/include/asm/agp.h4
-rw-r--r--arch/x86/include/asm/alternative.h7
-rw-r--r--arch/x86/include/asm/amd_iommu.h1
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h50
-rw-r--r--arch/x86/include/asm/apic.h39
-rw-r--r--arch/x86/include/asm/apicdef.h5
-rw-r--r--arch/x86/include/asm/asm.h10
-rw-r--r--arch/x86/include/asm/bootparam.h13
-rw-r--r--arch/x86/include/asm/cacheflush.h54
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/current.h2
-rw-r--r--arch/x86/include/asm/desc.h13
-rw-r--r--arch/x86/include/asm/desc_defs.h6
-rw-r--r--arch/x86/include/asm/device.h3
-rw-r--r--arch/x86/include/asm/dma-mapping.h18
-rw-r--r--arch/x86/include/asm/do_timer.h16
-rw-r--r--arch/x86/include/asm/dwarf2.h18
-rw-r--r--arch/x86/include/asm/e820.h2
-rw-r--r--arch/x86/include/asm/elf.h2
-rw-r--r--arch/x86/include/asm/entry_arch.h4
-rw-r--r--arch/x86/include/asm/fixmap.h3
-rw-r--r--arch/x86/include/asm/hypervisor.h2
-rw-r--r--arch/x86/include/asm/i387.h9
-rw-r--r--arch/x86/include/asm/io_apic.h20
-rw-r--r--arch/x86/include/asm/ioctls.h95
-rw-r--r--arch/x86/include/asm/iomap.h9
-rw-r--r--arch/x86/include/asm/ipcbuf.h29
-rw-r--r--arch/x86/include/asm/irq.h3
-rw-r--r--arch/x86/include/asm/irqflags.h9
-rw-r--r--arch/x86/include/asm/kvm.h10
-rw-r--r--arch/x86/include/asm/kvm_emulate.h (renamed from arch/x86/include/asm/kvm_x86_emulate.h)0
-rw-r--r--arch/x86/include/asm/kvm_host.h60
-rw-r--r--arch/x86/include/asm/kvm_para.h2
-rw-r--r--arch/x86/include/asm/lguest.h5
-rw-r--r--arch/x86/include/asm/mce.h32
-rw-r--r--arch/x86/include/asm/mman.h14
-rw-r--r--arch/x86/include/asm/module.h15
-rw-r--r--arch/x86/include/asm/mpspec.h47
-rw-r--r--arch/x86/include/asm/msgbuf.h40
-rw-r--r--arch/x86/include/asm/msr-index.h12
-rw-r--r--arch/x86/include/asm/msr.h75
-rw-r--r--arch/x86/include/asm/mtrr.h6
-rw-r--r--arch/x86/include/asm/nmi.h4
-rw-r--r--arch/x86/include/asm/nops.h2
-rw-r--r--arch/x86/include/asm/param.h23
-rw-r--r--arch/x86/include/asm/paravirt.h797
-rw-r--r--arch/x86/include/asm/paravirt_types.h693
-rw-r--r--arch/x86/include/asm/pat.h5
-rw-r--r--arch/x86/include/asm/pci.h1
-rw-r--r--arch/x86/include/asm/percpu.h35
-rw-r--r--arch/x86/include/asm/perf_event.h (renamed from arch/x86/include/asm/perf_counter.h)36
-rw-r--r--arch/x86/include/asm/pgtable.h26
-rw-r--r--arch/x86/include/asm/pgtable_types.h4
-rw-r--r--arch/x86/include/asm/processor.h60
-rw-r--r--arch/x86/include/asm/scatterlist.h27
-rw-r--r--arch/x86/include/asm/setup.h49
-rw-r--r--arch/x86/include/asm/shmbuf.h52
-rw-r--r--arch/x86/include/asm/socket.h61
-rw-r--r--arch/x86/include/asm/sockios.h14
-rw-r--r--arch/x86/include/asm/stackprotector.h10
-rw-r--r--arch/x86/include/asm/string_32.h1
-rw-r--r--arch/x86/include/asm/syscall.h14
-rw-r--r--arch/x86/include/asm/system.h29
-rw-r--r--arch/x86/include/asm/termbits.h199
-rw-r--r--arch/x86/include/asm/termios.h115
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/x86/include/asm/time.h53
-rw-r--r--arch/x86/include/asm/timer.h14
-rw-r--r--arch/x86/include/asm/topology.h49
-rw-r--r--arch/x86/include/asm/traps.h4
-rw-r--r--arch/x86/include/asm/tsc.h3
-rw-r--r--arch/x86/include/asm/types.h12
-rw-r--r--arch/x86/include/asm/uaccess_32.h2
-rw-r--r--arch/x86/include/asm/ucontext.h8
-rw-r--r--arch/x86/include/asm/unistd_32.h2
-rw-r--r--arch/x86/include/asm/unistd_64.h4
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h19
-rw-r--r--arch/x86/include/asm/vgtod.h1
-rw-r--r--arch/x86/include/asm/vmware.h2
-rw-r--r--arch/x86/include/asm/vmx.h8
-rw-r--r--arch/x86/include/asm/x86_init.h133
82 files changed, 1496 insertions, 1844 deletions
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 20d1465a2ab0..4518dc500903 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -144,7 +144,6 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
144 144
145#else /* !CONFIG_ACPI */ 145#else /* !CONFIG_ACPI */
146 146
147#define acpi_disabled 1
148#define acpi_lapic 0 147#define acpi_lapic 0
149#define acpi_ioapic 0 148#define acpi_ioapic 0
150static inline void acpi_noirq_set(void) { } 149static inline void acpi_noirq_set(void) { }
diff --git a/arch/x86/include/asm/agp.h b/arch/x86/include/asm/agp.h
index 9825cd64c9b6..eec2a70d4376 100644
--- a/arch/x86/include/asm/agp.h
+++ b/arch/x86/include/asm/agp.h
@@ -22,10 +22,6 @@
22 */ 22 */
23#define flush_agp_cache() wbinvd() 23#define flush_agp_cache() wbinvd()
24 24
25/* Convert a physical address to an address suitable for the GART. */
26#define phys_to_gart(x) (x)
27#define gart_to_phys(x) (x)
28
29/* GATT allocation. Returns/accepts GATT kernel virtual address. */ 25/* GATT allocation. Returns/accepts GATT kernel virtual address. */
30#define alloc_gatt_pages(order) \ 26#define alloc_gatt_pages(order) \
31 ((char *)__get_free_pages(GFP_KERNEL, (order))) 27 ((char *)__get_free_pages(GFP_KERNEL, (order)))
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 1a37bcdc8606..c240efc74e00 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -73,8 +73,6 @@ static inline void alternatives_smp_module_del(struct module *mod) {}
73static inline void alternatives_smp_switch(int smp) {} 73static inline void alternatives_smp_switch(int smp) {}
74#endif /* CONFIG_SMP */ 74#endif /* CONFIG_SMP */
75 75
76const unsigned char *const *find_nop_table(void);
77
78/* alternative assembly primitive: */ 76/* alternative assembly primitive: */
79#define ALTERNATIVE(oldinstr, newinstr, feature) \ 77#define ALTERNATIVE(oldinstr, newinstr, feature) \
80 \ 78 \
@@ -144,8 +142,6 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
144#define __parainstructions_end NULL 142#define __parainstructions_end NULL
145#endif 143#endif
146 144
147extern void add_nops(void *insns, unsigned int len);
148
149/* 145/*
150 * Clear and restore the kernel write-protection flag on the local CPU. 146 * Clear and restore the kernel write-protection flag on the local CPU.
151 * Allows the kernel to edit read-only pages. 147 * Allows the kernel to edit read-only pages.
@@ -161,10 +157,7 @@ extern void add_nops(void *insns, unsigned int len);
161 * Intel's errata. 157 * Intel's errata.
162 * On the local CPU you need to be protected again NMI or MCE handlers seeing an 158 * On the local CPU you need to be protected again NMI or MCE handlers seeing an
163 * inconsistent instruction while you patch. 159 * inconsistent instruction while you patch.
164 * The _early version expects the memory to already be RW.
165 */ 160 */
166
167extern void *text_poke(void *addr, const void *opcode, size_t len); 161extern void *text_poke(void *addr, const void *opcode, size_t len);
168extern void *text_poke_early(void *addr, const void *opcode, size_t len);
169 162
170#endif /* _ASM_X86_ALTERNATIVE_H */ 163#endif /* _ASM_X86_ALTERNATIVE_H */
diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h
index bdf96f119f06..ac95995b7bad 100644
--- a/arch/x86/include/asm/amd_iommu.h
+++ b/arch/x86/include/asm/amd_iommu.h
@@ -25,6 +25,7 @@
25#ifdef CONFIG_AMD_IOMMU 25#ifdef CONFIG_AMD_IOMMU
26extern int amd_iommu_init(void); 26extern int amd_iommu_init(void);
27extern int amd_iommu_init_dma_ops(void); 27extern int amd_iommu_init_dma_ops(void);
28extern int amd_iommu_init_passthrough(void);
28extern void amd_iommu_detect(void); 29extern void amd_iommu_detect(void);
29extern irqreturn_t amd_iommu_int_handler(int irq, void *data); 30extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
30extern void amd_iommu_flush_all_domains(void); 31extern void amd_iommu_flush_all_domains(void);
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 0c878caaa0a2..2a2cc7a78a81 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -143,22 +143,29 @@
143#define EVT_BUFFER_SIZE 8192 /* 512 entries */ 143#define EVT_BUFFER_SIZE 8192 /* 512 entries */
144#define EVT_LEN_MASK (0x9ULL << 56) 144#define EVT_LEN_MASK (0x9ULL << 56)
145 145
146#define PAGE_MODE_NONE 0x00
146#define PAGE_MODE_1_LEVEL 0x01 147#define PAGE_MODE_1_LEVEL 0x01
147#define PAGE_MODE_2_LEVEL 0x02 148#define PAGE_MODE_2_LEVEL 0x02
148#define PAGE_MODE_3_LEVEL 0x03 149#define PAGE_MODE_3_LEVEL 0x03
149 150#define PAGE_MODE_4_LEVEL 0x04
150#define IOMMU_PDE_NL_0 0x000ULL 151#define PAGE_MODE_5_LEVEL 0x05
151#define IOMMU_PDE_NL_1 0x200ULL 152#define PAGE_MODE_6_LEVEL 0x06
152#define IOMMU_PDE_NL_2 0x400ULL 153
153#define IOMMU_PDE_NL_3 0x600ULL 154#define PM_LEVEL_SHIFT(x) (12 + ((x) * 9))
154 155#define PM_LEVEL_SIZE(x) (((x) < 6) ? \
155#define IOMMU_PTE_L2_INDEX(address) (((address) >> 30) & 0x1ffULL) 156 ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \
156#define IOMMU_PTE_L1_INDEX(address) (((address) >> 21) & 0x1ffULL) 157 (0xffffffffffffffffULL))
157#define IOMMU_PTE_L0_INDEX(address) (((address) >> 12) & 0x1ffULL) 158#define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL)
158 159#define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL)
159#define IOMMU_MAP_SIZE_L1 (1ULL << 21) 160#define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \
160#define IOMMU_MAP_SIZE_L2 (1ULL << 30) 161 IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
161#define IOMMU_MAP_SIZE_L3 (1ULL << 39) 162#define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL)
163
164#define PM_MAP_4k 0
165#define PM_ADDR_MASK 0x000ffffffffff000ULL
166#define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \
167 (~((1ULL << (12 + ((lvl) * 9))) - 1)))
168#define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr))
162 169
163#define IOMMU_PTE_P (1ULL << 0) 170#define IOMMU_PTE_P (1ULL << 0)
164#define IOMMU_PTE_TV (1ULL << 1) 171#define IOMMU_PTE_TV (1ULL << 1)
@@ -167,11 +174,6 @@
167#define IOMMU_PTE_IR (1ULL << 61) 174#define IOMMU_PTE_IR (1ULL << 61)
168#define IOMMU_PTE_IW (1ULL << 62) 175#define IOMMU_PTE_IW (1ULL << 62)
169 176
170#define IOMMU_L1_PDE(address) \
171 ((address) | IOMMU_PDE_NL_1 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
172#define IOMMU_L2_PDE(address) \
173 ((address) | IOMMU_PDE_NL_2 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
174
175#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) 177#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
176#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) 178#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
177#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) 179#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK))
@@ -194,11 +196,14 @@
194#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ 196#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
195#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops 197#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
196 domain for an IOMMU */ 198 domain for an IOMMU */
199#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
200 translation */
201
197extern bool amd_iommu_dump; 202extern bool amd_iommu_dump;
198#define DUMP_printk(format, arg...) \ 203#define DUMP_printk(format, arg...) \
199 do { \ 204 do { \
200 if (amd_iommu_dump) \ 205 if (amd_iommu_dump) \
201 printk(KERN_INFO "AMD IOMMU: " format, ## arg); \ 206 printk(KERN_INFO "AMD-Vi: " format, ## arg); \
202 } while(0); 207 } while(0);
203 208
204/* 209/*
@@ -226,6 +231,7 @@ struct protection_domain {
226 int mode; /* paging mode (0-6 levels) */ 231 int mode; /* paging mode (0-6 levels) */
227 u64 *pt_root; /* page table root pointer */ 232 u64 *pt_root; /* page table root pointer */
228 unsigned long flags; /* flags to find out type of domain */ 233 unsigned long flags; /* flags to find out type of domain */
234 bool updated; /* complete domain flush required */
229 unsigned dev_cnt; /* devices assigned to this domain */ 235 unsigned dev_cnt; /* devices assigned to this domain */
230 void *priv; /* private data */ 236 void *priv; /* private data */
231}; 237};
@@ -337,6 +343,9 @@ struct amd_iommu {
337 /* if one, we need to send a completion wait command */ 343 /* if one, we need to send a completion wait command */
338 bool need_sync; 344 bool need_sync;
339 345
346 /* becomes true if a command buffer reset is running */
347 bool reset_in_progress;
348
340 /* default dma_ops domain for that IOMMU */ 349 /* default dma_ops domain for that IOMMU */
341 struct dma_ops_domain *default_dom; 350 struct dma_ops_domain *default_dom;
342}; 351};
@@ -457,4 +466,7 @@ static inline void amd_iommu_stats_init(void) { }
457 466
458#endif /* CONFIG_AMD_IOMMU_STATS */ 467#endif /* CONFIG_AMD_IOMMU_STATS */
459 468
469/* some function prototypes */
470extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
471
460#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ 472#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index bb7d47925847..474d80d3e6cc 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -66,13 +66,23 @@ static inline void default_inquire_remote_apic(int apicid)
66} 66}
67 67
68/* 68/*
69 * With 82489DX we can't rely on apic feature bit
70 * retrieved via cpuid but still have to deal with
71 * such an apic chip so we assume that SMP configuration
72 * is found from MP table (64bit case uses ACPI mostly
73 * which set smp presence flag as well so we are safe
74 * to use this helper too).
75 */
76static inline bool apic_from_smp_config(void)
77{
78 return smp_found_config && !disable_apic;
79}
80
81/*
69 * Basic functions accessing APICs. 82 * Basic functions accessing APICs.
70 */ 83 */
71#ifdef CONFIG_PARAVIRT 84#ifdef CONFIG_PARAVIRT
72#include <asm/paravirt.h> 85#include <asm/paravirt.h>
73#else
74#define setup_boot_clock setup_boot_APIC_clock
75#define setup_secondary_clock setup_secondary_APIC_clock
76#endif 86#endif
77 87
78#ifdef CONFIG_X86_64 88#ifdef CONFIG_X86_64
@@ -183,6 +193,10 @@ static inline int x2apic_enabled(void)
183} 193}
184 194
185#define x2apic_supported() (cpu_has_x2apic) 195#define x2apic_supported() (cpu_has_x2apic)
196static inline void x2apic_force_phys(void)
197{
198 x2apic_phys = 1;
199}
186#else 200#else
187static inline void check_x2apic(void) 201static inline void check_x2apic(void)
188{ 202{
@@ -194,6 +208,9 @@ static inline int x2apic_enabled(void)
194{ 208{
195 return 0; 209 return 0;
196} 210}
211static inline void x2apic_force_phys(void)
212{
213}
197 214
198#define x2apic_preenabled 0 215#define x2apic_preenabled 0
199#define x2apic_supported() 0 216#define x2apic_supported() 0
@@ -245,6 +262,8 @@ static inline void lapic_shutdown(void) { }
245static inline void init_apic_mappings(void) { } 262static inline void init_apic_mappings(void) { }
246static inline void disable_local_APIC(void) { } 263static inline void disable_local_APIC(void) { }
247static inline void apic_disable(void) { } 264static inline void apic_disable(void) { }
265# define setup_boot_APIC_clock x86_init_noop
266# define setup_secondary_APIC_clock x86_init_noop
248#endif /* !CONFIG_X86_LOCAL_APIC */ 267#endif /* !CONFIG_X86_LOCAL_APIC */
249 268
250#ifdef CONFIG_X86_64 269#ifdef CONFIG_X86_64
@@ -293,7 +312,7 @@ struct apic {
293 int (*cpu_present_to_apicid)(int mps_cpu); 312 int (*cpu_present_to_apicid)(int mps_cpu);
294 physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); 313 physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
295 void (*setup_portio_remap)(void); 314 void (*setup_portio_remap)(void);
296 int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); 315 int (*check_phys_apicid_present)(int phys_apicid);
297 void (*enable_apic_mode)(void); 316 void (*enable_apic_mode)(void);
298 int (*phys_pkg_id)(int cpuid_apic, int index_msb); 317 int (*phys_pkg_id)(int cpuid_apic, int index_msb);
299 318
@@ -427,7 +446,7 @@ extern struct apic apic_x2apic_uv_x;
427DECLARE_PER_CPU(int, x2apic_extra_bits); 446DECLARE_PER_CPU(int, x2apic_extra_bits);
428 447
429extern int default_cpu_present_to_apicid(int mps_cpu); 448extern int default_cpu_present_to_apicid(int mps_cpu);
430extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid); 449extern int default_check_phys_apicid_present(int phys_apicid);
431#endif 450#endif
432 451
433static inline void default_wait_for_init_deassert(atomic_t *deassert) 452static inline void default_wait_for_init_deassert(atomic_t *deassert)
@@ -543,9 +562,9 @@ static inline int __default_cpu_present_to_apicid(int mps_cpu)
543} 562}
544 563
545static inline int 564static inline int
546__default_check_phys_apicid_present(int boot_cpu_physical_apicid) 565__default_check_phys_apicid_present(int phys_apicid)
547{ 566{
548 return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); 567 return physid_isset(phys_apicid, phys_cpu_present_map);
549} 568}
550 569
551#ifdef CONFIG_X86_32 570#ifdef CONFIG_X86_32
@@ -555,13 +574,13 @@ static inline int default_cpu_present_to_apicid(int mps_cpu)
555} 574}
556 575
557static inline int 576static inline int
558default_check_phys_apicid_present(int boot_cpu_physical_apicid) 577default_check_phys_apicid_present(int phys_apicid)
559{ 578{
560 return __default_check_phys_apicid_present(boot_cpu_physical_apicid); 579 return __default_check_phys_apicid_present(phys_apicid);
561} 580}
562#else 581#else
563extern int default_cpu_present_to_apicid(int mps_cpu); 582extern int default_cpu_present_to_apicid(int mps_cpu);
564extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid); 583extern int default_check_phys_apicid_present(int phys_apicid);
565#endif 584#endif
566 585
567static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid) 586static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid)
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h
index 7ddb36ab933b..3b62da926de9 100644
--- a/arch/x86/include/asm/apicdef.h
+++ b/arch/x86/include/asm/apicdef.h
@@ -8,12 +8,14 @@
8 * Ingo Molnar <mingo@redhat.com>, 1999, 2000 8 * Ingo Molnar <mingo@redhat.com>, 1999, 2000
9 */ 9 */
10 10
11#define APIC_DEFAULT_PHYS_BASE 0xfee00000 11#define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000
12#define APIC_DEFAULT_PHYS_BASE 0xfee00000
12 13
13#define APIC_ID 0x20 14#define APIC_ID 0x20
14 15
15#define APIC_LVR 0x30 16#define APIC_LVR 0x30
16#define APIC_LVR_MASK 0xFF00FF 17#define APIC_LVR_MASK 0xFF00FF
18#define APIC_LVR_DIRECTED_EOI (1 << 24)
17#define GET_APIC_VERSION(x) ((x) & 0xFFu) 19#define GET_APIC_VERSION(x) ((x) & 0xFFu)
18#define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu) 20#define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu)
19#ifdef CONFIG_X86_32 21#ifdef CONFIG_X86_32
@@ -40,6 +42,7 @@
40#define APIC_DFR_CLUSTER 0x0FFFFFFFul 42#define APIC_DFR_CLUSTER 0x0FFFFFFFul
41#define APIC_DFR_FLAT 0xFFFFFFFFul 43#define APIC_DFR_FLAT 0xFFFFFFFFul
42#define APIC_SPIV 0xF0 44#define APIC_SPIV 0xF0
45#define APIC_SPIV_DIRECTED_EOI (1 << 12)
43#define APIC_SPIV_FOCUS_DISABLED (1 << 9) 46#define APIC_SPIV_FOCUS_DISABLED (1 << 9)
44#define APIC_SPIV_APIC_ENABLED (1 << 8) 47#define APIC_SPIV_APIC_ENABLED (1 << 8)
45#define APIC_ISR 0x100 48#define APIC_ISR 0x100
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 56be78f582f0..b3ed1e1460ff 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -3,7 +3,7 @@
3 3
4#ifdef __ASSEMBLY__ 4#ifdef __ASSEMBLY__
5# define __ASM_FORM(x) x 5# define __ASM_FORM(x) x
6# define __ASM_EX_SEC .section __ex_table 6# define __ASM_EX_SEC .section __ex_table, "a"
7#else 7#else
8# define __ASM_FORM(x) " " #x " " 8# define __ASM_FORM(x) " " #x " "
9# define __ASM_EX_SEC " .section __ex_table,\"a\"\n" 9# define __ASM_EX_SEC " .section __ex_table,\"a\"\n"
@@ -38,10 +38,18 @@
38#define _ASM_DI __ASM_REG(di) 38#define _ASM_DI __ASM_REG(di)
39 39
40/* Exception table entry */ 40/* Exception table entry */
41#ifdef __ASSEMBLY__
42# define _ASM_EXTABLE(from,to) \
43 __ASM_EX_SEC ; \
44 _ASM_ALIGN ; \
45 _ASM_PTR from , to ; \
46 .previous
47#else
41# define _ASM_EXTABLE(from,to) \ 48# define _ASM_EXTABLE(from,to) \
42 __ASM_EX_SEC \ 49 __ASM_EX_SEC \
43 _ASM_ALIGN "\n" \ 50 _ASM_ALIGN "\n" \
44 _ASM_PTR #from "," #to "\n" \ 51 _ASM_PTR #from "," #to "\n" \
45 " .previous\n" 52 " .previous\n"
53#endif
46 54
47#endif /* _ASM_X86_ASM_H */ 55#endif /* _ASM_X86_ASM_H */
diff --git a/arch/x86/include/asm/bootparam.h b/arch/x86/include/asm/bootparam.h
index 1724e8de317c..6be33d83c716 100644
--- a/arch/x86/include/asm/bootparam.h
+++ b/arch/x86/include/asm/bootparam.h
@@ -85,7 +85,8 @@ struct efi_info {
85struct boot_params { 85struct boot_params {
86 struct screen_info screen_info; /* 0x000 */ 86 struct screen_info screen_info; /* 0x000 */
87 struct apm_bios_info apm_bios_info; /* 0x040 */ 87 struct apm_bios_info apm_bios_info; /* 0x040 */
88 __u8 _pad2[12]; /* 0x054 */ 88 __u8 _pad2[4]; /* 0x054 */
89 __u64 tboot_addr; /* 0x058 */
89 struct ist_info ist_info; /* 0x060 */ 90 struct ist_info ist_info; /* 0x060 */
90 __u8 _pad3[16]; /* 0x070 */ 91 __u8 _pad3[16]; /* 0x070 */
91 __u8 hd0_info[16]; /* obsolete! */ /* 0x080 */ 92 __u8 hd0_info[16]; /* obsolete! */ /* 0x080 */
@@ -109,4 +110,14 @@ struct boot_params {
109 __u8 _pad9[276]; /* 0xeec */ 110 __u8 _pad9[276]; /* 0xeec */
110} __attribute__((packed)); 111} __attribute__((packed));
111 112
113enum {
114 X86_SUBARCH_PC = 0,
115 X86_SUBARCH_LGUEST,
116 X86_SUBARCH_XEN,
117 X86_SUBARCH_MRST,
118 X86_NR_SUBARCHS,
119};
120
121
122
112#endif /* _ASM_X86_BOOTPARAM_H */ 123#endif /* _ASM_X86_BOOTPARAM_H */
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index e55dfc1ad453..b54f6afe7ec4 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -43,8 +43,58 @@ static inline void copy_from_user_page(struct vm_area_struct *vma,
43 memcpy(dst, src, len); 43 memcpy(dst, src, len);
44} 44}
45 45
46#define PG_non_WB PG_arch_1 46#define PG_WC PG_arch_1
47PAGEFLAG(NonWB, non_WB) 47PAGEFLAG(WC, WC)
48
49#ifdef CONFIG_X86_PAT
50/*
51 * X86 PAT uses page flags WC and Uncached together to keep track of
52 * memory type of pages that have backing page struct. X86 PAT supports 3
53 * different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and
54 * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not
55 * been changed from its default (value of -1 used to denote this).
56 * Note we do not support _PAGE_CACHE_UC here.
57 *
58 * Caller must hold memtype_lock for atomicity.
59 */
60static inline unsigned long get_page_memtype(struct page *pg)
61{
62 if (!PageUncached(pg) && !PageWC(pg))
63 return -1;
64 else if (!PageUncached(pg) && PageWC(pg))
65 return _PAGE_CACHE_WC;
66 else if (PageUncached(pg) && !PageWC(pg))
67 return _PAGE_CACHE_UC_MINUS;
68 else
69 return _PAGE_CACHE_WB;
70}
71
72static inline void set_page_memtype(struct page *pg, unsigned long memtype)
73{
74 switch (memtype) {
75 case _PAGE_CACHE_WC:
76 ClearPageUncached(pg);
77 SetPageWC(pg);
78 break;
79 case _PAGE_CACHE_UC_MINUS:
80 SetPageUncached(pg);
81 ClearPageWC(pg);
82 break;
83 case _PAGE_CACHE_WB:
84 SetPageUncached(pg);
85 SetPageWC(pg);
86 break;
87 default:
88 case -1:
89 ClearPageUncached(pg);
90 ClearPageWC(pg);
91 break;
92 }
93}
94#else
95static inline unsigned long get_page_memtype(struct page *pg) { return -1; }
96static inline void set_page_memtype(struct page *pg, unsigned long memtype) { }
97#endif
48 98
49/* 99/*
50 * The set_memory_* API can be used to change various attributes of a virtual 100 * The set_memory_* API can be used to change various attributes of a virtual
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 4a28d22d4793..9cfc88b97742 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -95,6 +95,8 @@
95#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */ 95#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */
96#define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */ 96#define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */
97#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */ 97#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */
98#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */
99#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */
98 100
99/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 101/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
100#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ 102#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h
index c68c361697e1..4d447b732d82 100644
--- a/arch/x86/include/asm/current.h
+++ b/arch/x86/include/asm/current.h
@@ -11,7 +11,7 @@ DECLARE_PER_CPU(struct task_struct *, current_task);
11 11
12static __always_inline struct task_struct *get_current(void) 12static __always_inline struct task_struct *get_current(void)
13{ 13{
14 return percpu_read(current_task); 14 return percpu_read_stable(current_task);
15} 15}
16 16
17#define current get_current() 17#define current get_current()
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index c993e9e0fed4..e8de2f6f5ca5 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -291,11 +291,24 @@ static inline unsigned long get_desc_base(const struct desc_struct *desc)
291 return desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24); 291 return desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24);
292} 292}
293 293
294static inline void set_desc_base(struct desc_struct *desc, unsigned long base)
295{
296 desc->base0 = base & 0xffff;
297 desc->base1 = (base >> 16) & 0xff;
298 desc->base2 = (base >> 24) & 0xff;
299}
300
294static inline unsigned long get_desc_limit(const struct desc_struct *desc) 301static inline unsigned long get_desc_limit(const struct desc_struct *desc)
295{ 302{
296 return desc->limit0 | (desc->limit << 16); 303 return desc->limit0 | (desc->limit << 16);
297} 304}
298 305
306static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
307{
308 desc->limit0 = limit & 0xffff;
309 desc->limit = (limit >> 16) & 0xf;
310}
311
299static inline void _set_gate(int gate, unsigned type, void *addr, 312static inline void _set_gate(int gate, unsigned type, void *addr,
300 unsigned dpl, unsigned ist, unsigned seg) 313 unsigned dpl, unsigned ist, unsigned seg)
301{ 314{
diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
index a6adefa28b94..9d6684849fd9 100644
--- a/arch/x86/include/asm/desc_defs.h
+++ b/arch/x86/include/asm/desc_defs.h
@@ -34,6 +34,12 @@ struct desc_struct {
34 }; 34 };
35} __attribute__((packed)); 35} __attribute__((packed));
36 36
37#define GDT_ENTRY_INIT(flags, base, limit) { { { \
38 .a = ((limit) & 0xffff) | (((base) & 0xffff) << 16), \
39 .b = (((base) & 0xff0000) >> 16) | (((flags) & 0xf0ff) << 8) | \
40 ((limit) & 0xf0000) | ((base) & 0xff000000), \
41 } } }
42
37enum { 43enum {
38 GATE_INTERRUPT = 0xE, 44 GATE_INTERRUPT = 0xE,
39 GATE_TRAP = 0xF, 45 GATE_TRAP = 0xF,
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
index 4994a20acbcb..cee34e9ca45b 100644
--- a/arch/x86/include/asm/device.h
+++ b/arch/x86/include/asm/device.h
@@ -13,4 +13,7 @@ struct dma_map_ops *dma_ops;
13#endif 13#endif
14}; 14};
15 15
16struct pdev_archdata {
17};
18
16#endif /* _ASM_X86_DEVICE_H */ 19#endif /* _ASM_X86_DEVICE_H */
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 1c3f9435f1c9..0ee770d23d0e 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -55,6 +55,24 @@ extern int dma_set_mask(struct device *dev, u64 mask);
55extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, 55extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
56 dma_addr_t *dma_addr, gfp_t flag); 56 dma_addr_t *dma_addr, gfp_t flag);
57 57
58static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
59{
60 if (!dev->dma_mask)
61 return 0;
62
63 return addr + size <= *dev->dma_mask;
64}
65
66static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
67{
68 return paddr;
69}
70
71static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
72{
73 return daddr;
74}
75
58static inline void 76static inline void
59dma_cache_sync(struct device *dev, void *vaddr, size_t size, 77dma_cache_sync(struct device *dev, void *vaddr, size_t size,
60 enum dma_data_direction dir) 78 enum dma_data_direction dir)
diff --git a/arch/x86/include/asm/do_timer.h b/arch/x86/include/asm/do_timer.h
deleted file mode 100644
index 23ecda0b28a0..000000000000
--- a/arch/x86/include/asm/do_timer.h
+++ /dev/null
@@ -1,16 +0,0 @@
1/* defines for inline arch setup functions */
2#include <linux/clockchips.h>
3
4#include <asm/i8259.h>
5#include <asm/i8253.h>
6
7/**
8 * do_timer_interrupt_hook - hook into timer tick
9 *
10 * Call the pit clock event handler. see asm/i8253.h
11 **/
12
13static inline void do_timer_interrupt_hook(void)
14{
15 global_clock_event->event_handler(global_clock_event);
16}
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
index 3afc5e87cfdd..ae6253ab9029 100644
--- a/arch/x86/include/asm/dwarf2.h
+++ b/arch/x86/include/asm/dwarf2.h
@@ -87,9 +87,25 @@
87 CFI_RESTORE \reg 87 CFI_RESTORE \reg
88 .endm 88 .endm
89#else /*!CONFIG_X86_64*/ 89#else /*!CONFIG_X86_64*/
90 .macro pushl_cfi reg
91 pushl \reg
92 CFI_ADJUST_CFA_OFFSET 4
93 .endm
90 94
91 /* 32bit defenitions are missed yet */ 95 .macro popl_cfi reg
96 popl \reg
97 CFI_ADJUST_CFA_OFFSET -4
98 .endm
92 99
100 .macro movl_cfi reg offset=0
101 movl %\reg, \offset(%esp)
102 CFI_REL_OFFSET \reg, \offset
103 .endm
104
105 .macro movl_cfi_restore offset reg
106 movl \offset(%esp), %\reg
107 CFI_RESTORE \reg
108 .endm
93#endif /*!CONFIG_X86_64*/ 109#endif /*!CONFIG_X86_64*/
94#endif /*__ASSEMBLY__*/ 110#endif /*__ASSEMBLY__*/
95 111
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 7ecba4d85089..40b4e614fe71 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -126,8 +126,6 @@ extern void e820_reserve_resources(void);
126extern void e820_reserve_resources_late(void); 126extern void e820_reserve_resources_late(void);
127extern void setup_memory_map(void); 127extern void setup_memory_map(void);
128extern char *default_machine_specific_memory_setup(void); 128extern char *default_machine_specific_memory_setup(void);
129extern char *machine_specific_memory_setup(void);
130extern char *memory_setup(void);
131#endif /* __KERNEL__ */ 129#endif /* __KERNEL__ */
132#endif /* __ASSEMBLY__ */ 130#endif /* __ASSEMBLY__ */
133 131
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 83c1bc8d2e8a..456a304b8172 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -299,6 +299,8 @@ do { \
299 299
300#ifdef CONFIG_X86_32 300#ifdef CONFIG_X86_32
301 301
302#define STACK_RND_MASK (0x7ff)
303
302#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO)) 304#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO))
303 305
304#define ARCH_DLINFO ARCH_DLINFO_IA32(vdso_enabled) 306#define ARCH_DLINFO ARCH_DLINFO_IA32(vdso_enabled)
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index ff8cbfa07851..f5693c81a1db 100644
--- a/arch/x86/include/asm/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -49,7 +49,7 @@ BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
49BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) 49BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
50BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) 50BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
51 51
52#ifdef CONFIG_PERF_COUNTERS 52#ifdef CONFIG_PERF_EVENTS
53BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR) 53BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR)
54#endif 54#endif
55 55
@@ -61,7 +61,7 @@ BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
61BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR) 61BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR)
62#endif 62#endif
63 63
64#ifdef CONFIG_X86_NEW_MCE 64#ifdef CONFIG_X86_MCE
65BUILD_INTERRUPT(mce_self_interrupt,MCE_SELF_VECTOR) 65BUILD_INTERRUPT(mce_self_interrupt,MCE_SELF_VECTOR)
66#endif 66#endif
67 67
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 7b2d71df39a6..14f9890eb495 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -132,6 +132,9 @@ enum fixed_addresses {
132#ifdef CONFIG_X86_32 132#ifdef CONFIG_X86_32
133 FIX_WP_TEST, 133 FIX_WP_TEST,
134#endif 134#endif
135#ifdef CONFIG_INTEL_TXT
136 FIX_TBOOT_BASE,
137#endif
135 __end_of_fixed_addresses 138 __end_of_fixed_addresses
136}; 139};
137 140
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index 369f5c5d09a1..b78c0941e422 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -20,7 +20,7 @@
20#ifndef ASM_X86__HYPERVISOR_H 20#ifndef ASM_X86__HYPERVISOR_H
21#define ASM_X86__HYPERVISOR_H 21#define ASM_X86__HYPERVISOR_H
22 22
23extern unsigned long get_hypervisor_tsc_freq(void);
24extern void init_hypervisor(struct cpuinfo_x86 *c); 23extern void init_hypervisor(struct cpuinfo_x86 *c);
24extern void init_hypervisor_platform(void);
25 25
26#endif 26#endif
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index 175adf58dd4f..0b20bbb758f2 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -26,6 +26,7 @@ extern void fpu_init(void);
26extern void mxcsr_feature_mask_init(void); 26extern void mxcsr_feature_mask_init(void);
27extern int init_fpu(struct task_struct *child); 27extern int init_fpu(struct task_struct *child);
28extern asmlinkage void math_state_restore(void); 28extern asmlinkage void math_state_restore(void);
29extern void __math_state_restore(void);
29extern void init_thread_xstate(void); 30extern void init_thread_xstate(void);
30extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); 31extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
31 32
@@ -301,6 +302,14 @@ static inline void kernel_fpu_end(void)
301 preempt_enable(); 302 preempt_enable();
302} 303}
303 304
305static inline bool irq_fpu_usable(void)
306{
307 struct pt_regs *regs;
308
309 return !in_interrupt() || !(regs = get_irq_regs()) || \
310 user_mode(regs) || (read_cr0() & X86_CR0_TS);
311}
312
304/* 313/*
305 * Some instructions like VIA's padlock instructions generate a spurious 314 * Some instructions like VIA's padlock instructions generate a spurious
306 * DNA fault but don't modify SSE registers. And these instructions 315 * DNA fault but don't modify SSE registers. And these instructions
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 330ee807f89e..7c7c16cde1f8 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -143,6 +143,8 @@ extern int noioapicreroute;
143/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */ 143/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
144extern int timer_through_8259; 144extern int timer_through_8259;
145 145
146extern void io_apic_disable_legacy(void);
147
146/* 148/*
147 * If we use the IO-APIC for IRQ routing, disable automatic 149 * If we use the IO-APIC for IRQ routing, disable automatic
148 * assignment of PCI IRQ's. 150 * assignment of PCI IRQ's.
@@ -150,11 +152,10 @@ extern int timer_through_8259;
150#define io_apic_assign_pci_irqs \ 152#define io_apic_assign_pci_irqs \
151 (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) 153 (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
152 154
153#ifdef CONFIG_ACPI 155extern u8 io_apic_unique_id(u8 id);
154extern int io_apic_get_unique_id(int ioapic, int apic_id); 156extern int io_apic_get_unique_id(int ioapic, int apic_id);
155extern int io_apic_get_version(int ioapic); 157extern int io_apic_get_version(int ioapic);
156extern int io_apic_get_redir_entries(int ioapic); 158extern int io_apic_get_redir_entries(int ioapic);
157#endif /* CONFIG_ACPI */
158 159
159struct io_apic_irq_attr; 160struct io_apic_irq_attr;
160extern int io_apic_set_pci_routing(struct device *dev, int irq, 161extern int io_apic_set_pci_routing(struct device *dev, int irq,
@@ -177,13 +178,26 @@ extern int setup_ioapic_entry(int apic, int irq,
177 int polarity, int vector, int pin); 178 int polarity, int vector, int pin);
178extern void ioapic_write_entry(int apic, int pin, 179extern void ioapic_write_entry(int apic, int pin,
179 struct IO_APIC_route_entry e); 180 struct IO_APIC_route_entry e);
181extern void setup_ioapic_ids_from_mpc(void);
182
183struct mp_ioapic_gsi{
184 int gsi_base;
185 int gsi_end;
186};
187extern struct mp_ioapic_gsi mp_gsi_routing[];
188int mp_find_ioapic(int gsi);
189int mp_find_ioapic_pin(int ioapic, int gsi);
190void __init mp_register_ioapic(int id, u32 address, u32 gsi_base);
191
180#else /* !CONFIG_X86_IO_APIC */ 192#else /* !CONFIG_X86_IO_APIC */
193
181#define io_apic_assign_pci_irqs 0 194#define io_apic_assign_pci_irqs 0
195#define setup_ioapic_ids_from_mpc x86_init_noop
182static const int timer_through_8259 = 0; 196static const int timer_through_8259 = 0;
183static inline void ioapic_init_mappings(void) { } 197static inline void ioapic_init_mappings(void) { }
184static inline void ioapic_insert_resources(void) { } 198static inline void ioapic_insert_resources(void) { }
185
186static inline void probe_nr_irqs_gsi(void) { } 199static inline void probe_nr_irqs_gsi(void) { }
200
187#endif 201#endif
188 202
189#endif /* _ASM_X86_IO_APIC_H */ 203#endif /* _ASM_X86_IO_APIC_H */
diff --git a/arch/x86/include/asm/ioctls.h b/arch/x86/include/asm/ioctls.h
index 0d5b23b7b06e..ec34c760665e 100644
--- a/arch/x86/include/asm/ioctls.h
+++ b/arch/x86/include/asm/ioctls.h
@@ -1,94 +1 @@
1#ifndef _ASM_X86_IOCTLS_H #include <asm-generic/ioctls.h>
2#define _ASM_X86_IOCTLS_H
3
4#include <asm/ioctl.h>
5
6/* 0x54 is just a magic number to make these relatively unique ('T') */
7
8#define TCGETS 0x5401
9#define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */
10#define TCSETSW 0x5403
11#define TCSETSF 0x5404
12#define TCGETA 0x5405
13#define TCSETA 0x5406
14#define TCSETAW 0x5407
15#define TCSETAF 0x5408
16#define TCSBRK 0x5409
17#define TCXONC 0x540A
18#define TCFLSH 0x540B
19#define TIOCEXCL 0x540C
20#define TIOCNXCL 0x540D
21#define TIOCSCTTY 0x540E
22#define TIOCGPGRP 0x540F
23#define TIOCSPGRP 0x5410
24#define TIOCOUTQ 0x5411
25#define TIOCSTI 0x5412
26#define TIOCGWINSZ 0x5413
27#define TIOCSWINSZ 0x5414
28#define TIOCMGET 0x5415
29#define TIOCMBIS 0x5416
30#define TIOCMBIC 0x5417
31#define TIOCMSET 0x5418
32#define TIOCGSOFTCAR 0x5419
33#define TIOCSSOFTCAR 0x541A
34#define FIONREAD 0x541B
35#define TIOCINQ FIONREAD
36#define TIOCLINUX 0x541C
37#define TIOCCONS 0x541D
38#define TIOCGSERIAL 0x541E
39#define TIOCSSERIAL 0x541F
40#define TIOCPKT 0x5420
41#define FIONBIO 0x5421
42#define TIOCNOTTY 0x5422
43#define TIOCSETD 0x5423
44#define TIOCGETD 0x5424
45#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
46/* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */
47#define TIOCSBRK 0x5427 /* BSD compatibility */
48#define TIOCCBRK 0x5428 /* BSD compatibility */
49#define TIOCGSID 0x5429 /* Return the session ID of FD */
50#define TCGETS2 _IOR('T', 0x2A, struct termios2)
51#define TCSETS2 _IOW('T', 0x2B, struct termios2)
52#define TCSETSW2 _IOW('T', 0x2C, struct termios2)
53#define TCSETSF2 _IOW('T', 0x2D, struct termios2)
54#define TIOCGRS485 0x542E
55#define TIOCSRS485 0x542F
56#define TIOCGPTN _IOR('T', 0x30, unsigned int)
57 /* Get Pty Number (of pty-mux device) */
58#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */
59#define TCGETX 0x5432 /* SYS5 TCGETX compatibility */
60#define TCSETX 0x5433
61#define TCSETXF 0x5434
62#define TCSETXW 0x5435
63
64#define FIONCLEX 0x5450
65#define FIOCLEX 0x5451
66#define FIOASYNC 0x5452
67#define TIOCSERCONFIG 0x5453
68#define TIOCSERGWILD 0x5454
69#define TIOCSERSWILD 0x5455
70#define TIOCGLCKTRMIOS 0x5456
71#define TIOCSLCKTRMIOS 0x5457
72#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
73#define TIOCSERGETLSR 0x5459 /* Get line status register */
74#define TIOCSERGETMULTI 0x545A /* Get multiport config */
75#define TIOCSERSETMULTI 0x545B /* Set multiport config */
76
77#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
78#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
79#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
80#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
81#define FIOQSIZE 0x5460
82
83/* Used for packet mode */
84#define TIOCPKT_DATA 0
85#define TIOCPKT_FLUSHREAD 1
86#define TIOCPKT_FLUSHWRITE 2
87#define TIOCPKT_STOP 4
88#define TIOCPKT_START 8
89#define TIOCPKT_NOSTOP 16
90#define TIOCPKT_DOSTOP 32
91
92#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
93
94#endif /* _ASM_X86_IOCTLS_H */
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h
index 0e9fe1d9d971..f35eb45d6576 100644
--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -26,13 +26,16 @@
26#include <asm/pgtable.h> 26#include <asm/pgtable.h>
27#include <asm/tlbflush.h> 27#include <asm/tlbflush.h>
28 28
29int
30is_io_mapping_possible(resource_size_t base, unsigned long size);
31
32void * 29void *
33iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 30iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
34 31
35void 32void
36iounmap_atomic(void *kvaddr, enum km_type type); 33iounmap_atomic(void *kvaddr, enum km_type type);
37 34
35int
36iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
37
38void
39iomap_free(resource_size_t base, unsigned long size);
40
38#endif /* _ASM_X86_IOMAP_H */ 41#endif /* _ASM_X86_IOMAP_H */
diff --git a/arch/x86/include/asm/ipcbuf.h b/arch/x86/include/asm/ipcbuf.h
index ee678fd51594..84c7e51cb6d0 100644
--- a/arch/x86/include/asm/ipcbuf.h
+++ b/arch/x86/include/asm/ipcbuf.h
@@ -1,28 +1 @@
1#ifndef _ASM_X86_IPCBUF_H #include <asm-generic/ipcbuf.h>
2#define _ASM_X86_IPCBUF_H
3
4/*
5 * The ipc64_perm structure for x86 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 32-bit mode_t and seq
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct ipc64_perm {
15 __kernel_key_t key;
16 __kernel_uid32_t uid;
17 __kernel_gid32_t gid;
18 __kernel_uid32_t cuid;
19 __kernel_gid32_t cgid;
20 __kernel_mode_t mode;
21 unsigned short __pad1;
22 unsigned short seq;
23 unsigned short __pad2;
24 unsigned long __unused1;
25 unsigned long __unused2;
26};
27
28#endif /* _ASM_X86_IPCBUF_H */
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index f38481bcd455..ddda6cbed6f4 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -37,7 +37,6 @@ extern void fixup_irqs(void);
37#endif 37#endif
38 38
39extern void (*generic_interrupt_extension)(void); 39extern void (*generic_interrupt_extension)(void);
40extern void init_IRQ(void);
41extern void native_init_IRQ(void); 40extern void native_init_IRQ(void);
42extern bool handle_irq(unsigned irq, struct pt_regs *regs); 41extern bool handle_irq(unsigned irq, struct pt_regs *regs);
43 42
@@ -47,4 +46,6 @@ extern unsigned int do_IRQ(struct pt_regs *regs);
47extern DECLARE_BITMAP(used_vectors, NR_VECTORS); 46extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
48extern int vector_used_by_percpu_irq(unsigned int vector); 47extern int vector_used_by_percpu_irq(unsigned int vector);
49 48
49extern void init_ISA_irqs(void);
50
50#endif /* _ASM_X86_IRQ_H */ 51#endif /* _ASM_X86_IRQ_H */
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index c6ccbe7e81ad..9e2b952f810a 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -13,14 +13,13 @@ static inline unsigned long native_save_fl(void)
13 unsigned long flags; 13 unsigned long flags;
14 14
15 /* 15 /*
16 * Note: this needs to be "=r" not "=rm", because we have the 16 * "=rm" is safe here, because "pop" adjusts the stack before
17 * stack offset from what gcc expects at the time the "pop" is 17 * it evaluates its effective address -- this is part of the
18 * executed, and so a memory reference with respect to the stack 18 * documented behavior of the "pop" instruction.
19 * would end up using the wrong address.
20 */ 19 */
21 asm volatile("# __raw_save_flags\n\t" 20 asm volatile("# __raw_save_flags\n\t"
22 "pushf ; pop %0" 21 "pushf ; pop %0"
23 : "=r" (flags) 22 : "=rm" (flags)
24 : /* no input */ 23 : /* no input */
25 : "memory"); 24 : "memory");
26 25
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index 125be8b19568..4a5fe914dc59 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -17,6 +17,8 @@
17#define __KVM_HAVE_USER_NMI 17#define __KVM_HAVE_USER_NMI
18#define __KVM_HAVE_GUEST_DEBUG 18#define __KVM_HAVE_GUEST_DEBUG
19#define __KVM_HAVE_MSIX 19#define __KVM_HAVE_MSIX
20#define __KVM_HAVE_MCE
21#define __KVM_HAVE_PIT_STATE2
20 22
21/* Architectural interrupt line count. */ 23/* Architectural interrupt line count. */
22#define KVM_NR_INTERRUPTS 256 24#define KVM_NR_INTERRUPTS 256
@@ -236,6 +238,14 @@ struct kvm_pit_state {
236 struct kvm_pit_channel_state channels[3]; 238 struct kvm_pit_channel_state channels[3];
237}; 239};
238 240
241#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001
242
243struct kvm_pit_state2 {
244 struct kvm_pit_channel_state channels[3];
245 __u32 flags;
246 __u32 reserved[9];
247};
248
239struct kvm_reinject_control { 249struct kvm_reinject_control {
240 __u8 pit_reinject; 250 __u8 pit_reinject;
241 __u8 reserved[31]; 251 __u8 reserved[31];
diff --git a/arch/x86/include/asm/kvm_x86_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index b7ed2c423116..b7ed2c423116 100644
--- a/arch/x86/include/asm/kvm_x86_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index eabdc1cfab5c..3be000435fad 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -14,6 +14,7 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/mmu_notifier.h> 16#include <linux/mmu_notifier.h>
17#include <linux/tracepoint.h>
17 18
18#include <linux/kvm.h> 19#include <linux/kvm.h>
19#include <linux/kvm_para.h> 20#include <linux/kvm_para.h>
@@ -37,12 +38,14 @@
37#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ 38#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
38 0xFFFFFF0000000000ULL) 39 0xFFFFFF0000000000ULL)
39 40
40#define KVM_GUEST_CR0_MASK \ 41#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \
41 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \ 42 (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
42 | X86_CR0_NW | X86_CR0_CD) 43#define KVM_GUEST_CR0_MASK \
44 (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
45#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \
46 (X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP)
43#define KVM_VM_CR0_ALWAYS_ON \ 47#define KVM_VM_CR0_ALWAYS_ON \
44 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \ 48 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
45 | X86_CR0_MP)
46#define KVM_GUEST_CR4_MASK \ 49#define KVM_GUEST_CR4_MASK \
47 (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE) 50 (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
48#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) 51#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
@@ -51,12 +54,12 @@
51#define INVALID_PAGE (~(hpa_t)0) 54#define INVALID_PAGE (~(hpa_t)0)
52#define UNMAPPED_GVA (~(gpa_t)0) 55#define UNMAPPED_GVA (~(gpa_t)0)
53 56
54/* shadow tables are PAE even on non-PAE hosts */ 57/* KVM Hugepage definitions for x86 */
55#define KVM_HPAGE_SHIFT 21 58#define KVM_NR_PAGE_SIZES 3
56#define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT) 59#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + (((x) - 1) * 9))
57#define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1)) 60#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
58 61#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
59#define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE) 62#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
60 63
61#define DE_VECTOR 0 64#define DE_VECTOR 0
62#define DB_VECTOR 1 65#define DB_VECTOR 1
@@ -120,6 +123,10 @@ enum kvm_reg {
120 NR_VCPU_REGS 123 NR_VCPU_REGS
121}; 124};
122 125
126enum kvm_reg_ex {
127 VCPU_EXREG_PDPTR = NR_VCPU_REGS,
128};
129
123enum { 130enum {
124 VCPU_SREG_ES, 131 VCPU_SREG_ES,
125 VCPU_SREG_CS, 132 VCPU_SREG_CS,
@@ -131,7 +138,7 @@ enum {
131 VCPU_SREG_LDTR, 138 VCPU_SREG_LDTR,
132}; 139};
133 140
134#include <asm/kvm_x86_emulate.h> 141#include <asm/kvm_emulate.h>
135 142
136#define KVM_NR_MEM_OBJS 40 143#define KVM_NR_MEM_OBJS 40
137 144
@@ -308,7 +315,6 @@ struct kvm_vcpu_arch {
308 struct { 315 struct {
309 gfn_t gfn; /* presumed gfn during guest pte update */ 316 gfn_t gfn; /* presumed gfn during guest pte update */
310 pfn_t pfn; /* pfn corresponding to that gfn */ 317 pfn_t pfn; /* pfn corresponding to that gfn */
311 int largepage;
312 unsigned long mmu_seq; 318 unsigned long mmu_seq;
313 } update_pte; 319 } update_pte;
314 320
@@ -334,16 +340,6 @@ struct kvm_vcpu_arch {
334 u8 nr; 340 u8 nr;
335 } interrupt; 341 } interrupt;
336 342
337 struct {
338 int vm86_active;
339 u8 save_iopl;
340 struct kvm_save_segment {
341 u16 selector;
342 unsigned long base;
343 u32 limit;
344 u32 ar;
345 } tr, es, ds, fs, gs;
346 } rmode;
347 int halt_request; /* real mode on Intel only */ 343 int halt_request; /* real mode on Intel only */
348 344
349 int cpuid_nent; 345 int cpuid_nent;
@@ -366,13 +362,15 @@ struct kvm_vcpu_arch {
366 u32 pat; 362 u32 pat;
367 363
368 int switch_db_regs; 364 int switch_db_regs;
369 unsigned long host_db[KVM_NR_DB_REGS];
370 unsigned long host_dr6;
371 unsigned long host_dr7;
372 unsigned long db[KVM_NR_DB_REGS]; 365 unsigned long db[KVM_NR_DB_REGS];
373 unsigned long dr6; 366 unsigned long dr6;
374 unsigned long dr7; 367 unsigned long dr7;
375 unsigned long eff_db[KVM_NR_DB_REGS]; 368 unsigned long eff_db[KVM_NR_DB_REGS];
369
370 u64 mcg_cap;
371 u64 mcg_status;
372 u64 mcg_ctl;
373 u64 *mce_banks;
376}; 374};
377 375
378struct kvm_mem_alias { 376struct kvm_mem_alias {
@@ -409,6 +407,7 @@ struct kvm_arch{
409 407
410 struct page *ept_identity_pagetable; 408 struct page *ept_identity_pagetable;
411 bool ept_identity_pagetable_done; 409 bool ept_identity_pagetable_done;
410 gpa_t ept_identity_map_addr;
412 411
413 unsigned long irq_sources_bitmap; 412 unsigned long irq_sources_bitmap;
414 unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; 413 unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
@@ -526,6 +525,9 @@ struct kvm_x86_ops {
526 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); 525 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
527 int (*get_tdp_level)(void); 526 int (*get_tdp_level)(void);
528 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); 527 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
528 bool (*gb_page_enable)(void);
529
530 const struct trace_print_flags *exit_reasons_str;
529}; 531};
530 532
531extern struct kvm_x86_ops *kvm_x86_ops; 533extern struct kvm_x86_ops *kvm_x86_ops;
@@ -618,6 +620,7 @@ void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
618void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 620void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
619void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, 621void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
620 u32 error_code); 622 u32 error_code);
623bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
621 624
622int kvm_pic_set_irq(void *opaque, int irq, int level); 625int kvm_pic_set_irq(void *opaque, int irq, int level);
623 626
@@ -752,8 +755,6 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
752 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); 755 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
753} 756}
754 757
755#define MSR_IA32_TIME_STAMP_COUNTER 0x010
756
757#define TSS_IOPB_BASE_OFFSET 0x66 758#define TSS_IOPB_BASE_OFFSET 0x66
758#define TSS_BASE_SIZE 0x68 759#define TSS_BASE_SIZE 0x68
759#define TSS_IOPB_SIZE (65536 / 8) 760#define TSS_IOPB_SIZE (65536 / 8)
@@ -796,5 +797,8 @@ asmlinkage void kvm_handle_fault_on_reboot(void);
796int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 797int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
797int kvm_age_hva(struct kvm *kvm, unsigned long hva); 798int kvm_age_hva(struct kvm *kvm, unsigned long hva);
798int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); 799int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
800int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
801int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
802int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
799 803
800#endif /* _ASM_X86_KVM_HOST_H */ 804#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index b8a3305ae093..c584076a47f4 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_KVM_PARA_H 1#ifndef _ASM_X86_KVM_PARA_H
2#define _ASM_X86_KVM_PARA_H 2#define _ASM_X86_KVM_PARA_H
3 3
4#include <linux/types.h>
5
4/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It 6/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
5 * should be used to determine that a VM is running under KVM. 7 * should be used to determine that a VM is running under KVM.
6 */ 8 */
diff --git a/arch/x86/include/asm/lguest.h b/arch/x86/include/asm/lguest.h
index 5136dad57cbb..0d97deba1e35 100644
--- a/arch/x86/include/asm/lguest.h
+++ b/arch/x86/include/asm/lguest.h
@@ -90,8 +90,9 @@ static inline void lguest_set_ts(void)
90} 90}
91 91
92/* Full 4G segment descriptors, suitable for CS and DS. */ 92/* Full 4G segment descriptors, suitable for CS and DS. */
93#define FULL_EXEC_SEGMENT ((struct desc_struct){ { {0x0000ffff, 0x00cf9b00} } }) 93#define FULL_EXEC_SEGMENT \
94#define FULL_SEGMENT ((struct desc_struct){ { {0x0000ffff, 0x00cf9300} } }) 94 ((struct desc_struct)GDT_ENTRY_INIT(0xc09b, 0, 0xfffff))
95#define FULL_SEGMENT ((struct desc_struct)GDT_ENTRY_INIT(0xc093, 0, 0xfffff))
95 96
96#endif /* __ASSEMBLY__ */ 97#endif /* __ASSEMBLY__ */
97 98
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 5cdd8d100ec9..b608a64c5814 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -9,7 +9,7 @@
9 */ 9 */
10 10
11#define MCG_BANKCNT_MASK 0xff /* Number of Banks */ 11#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
12#define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ 12#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */
13#define MCG_EXT_P (1ULL<<9) /* Extended registers available */ 13#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
14#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */ 14#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
15#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */ 15#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
@@ -38,6 +38,14 @@
38#define MCM_ADDR_MEM 3 /* memory address */ 38#define MCM_ADDR_MEM 3 /* memory address */
39#define MCM_ADDR_GENERIC 7 /* generic */ 39#define MCM_ADDR_GENERIC 7 /* generic */
40 40
41#define MCJ_CTX_MASK 3
42#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
43#define MCJ_CTX_RANDOM 0 /* inject context: random */
44#define MCJ_CTX_PROCESS 1 /* inject context: process */
45#define MCJ_CTX_IRQ 2 /* inject context: IRQ */
46#define MCJ_NMI_BROADCAST 4 /* do NMI broadcasting */
47#define MCJ_EXCEPTION 8 /* raise as exception */
48
41/* Fields are zero when not available */ 49/* Fields are zero when not available */
42struct mce { 50struct mce {
43 __u64 status; 51 __u64 status;
@@ -48,8 +56,8 @@ struct mce {
48 __u64 tsc; /* cpu time stamp counter */ 56 __u64 tsc; /* cpu time stamp counter */
49 __u64 time; /* wall time_t when error was detected */ 57 __u64 time; /* wall time_t when error was detected */
50 __u8 cpuvendor; /* cpu vendor as encoded in system.h */ 58 __u8 cpuvendor; /* cpu vendor as encoded in system.h */
51 __u8 pad1; 59 __u8 inject_flags; /* software inject flags */
52 __u16 pad2; 60 __u16 pad;
53 __u32 cpuid; /* CPUID 1 EAX */ 61 __u32 cpuid; /* CPUID 1 EAX */
54 __u8 cs; /* code segment */ 62 __u8 cs; /* code segment */
55 __u8 bank; /* machine check bank */ 63 __u8 bank; /* machine check bank */
@@ -115,13 +123,6 @@ void mcheck_init(struct cpuinfo_x86 *c);
115static inline void mcheck_init(struct cpuinfo_x86 *c) {} 123static inline void mcheck_init(struct cpuinfo_x86 *c) {}
116#endif 124#endif
117 125
118#ifdef CONFIG_X86_OLD_MCE
119extern int nr_mce_banks;
120void amd_mcheck_init(struct cpuinfo_x86 *c);
121void intel_p4_mcheck_init(struct cpuinfo_x86 *c);
122void intel_p6_mcheck_init(struct cpuinfo_x86 *c);
123#endif
124
125#ifdef CONFIG_X86_ANCIENT_MCE 126#ifdef CONFIG_X86_ANCIENT_MCE
126void intel_p5_mcheck_init(struct cpuinfo_x86 *c); 127void intel_p5_mcheck_init(struct cpuinfo_x86 *c);
127void winchip_mcheck_init(struct cpuinfo_x86 *c); 128void winchip_mcheck_init(struct cpuinfo_x86 *c);
@@ -137,10 +138,11 @@ void mce_log(struct mce *m);
137DECLARE_PER_CPU(struct sys_device, mce_dev); 138DECLARE_PER_CPU(struct sys_device, mce_dev);
138 139
139/* 140/*
140 * To support more than 128 would need to escape the predefined 141 * Maximum banks number.
141 * Linux defined extended banks first. 142 * This is the limit of the current register layout on
143 * Intel CPUs.
142 */ 144 */
143#define MAX_NR_BANKS (MCE_EXTENDED_BANK - 1) 145#define MAX_NR_BANKS 32
144 146
145#ifdef CONFIG_X86_MCE_INTEL 147#ifdef CONFIG_X86_MCE_INTEL
146extern int mce_cmci_disabled; 148extern int mce_cmci_disabled;
@@ -208,11 +210,7 @@ extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
208 210
209void intel_init_thermal(struct cpuinfo_x86 *c); 211void intel_init_thermal(struct cpuinfo_x86 *c);
210 212
211#ifdef CONFIG_X86_NEW_MCE
212void mce_log_therm_throt_event(__u64 status); 213void mce_log_therm_throt_event(__u64 status);
213#else
214static inline void mce_log_therm_throt_event(__u64 status) {}
215#endif
216 214
217#endif /* __KERNEL__ */ 215#endif /* __KERNEL__ */
218#endif /* _ASM_X86_MCE_H */ 216#endif /* _ASM_X86_MCE_H */
diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
index 751af2550ed9..593e51d4643f 100644
--- a/arch/x86/include/asm/mman.h
+++ b/arch/x86/include/asm/mman.h
@@ -1,20 +1,8 @@
1#ifndef _ASM_X86_MMAN_H 1#ifndef _ASM_X86_MMAN_H
2#define _ASM_X86_MMAN_H 2#define _ASM_X86_MMAN_H
3 3
4#include <asm-generic/mman-common.h>
5
6#define MAP_32BIT 0x40 /* only give out 32bit addresses */ 4#define MAP_32BIT 0x40 /* only give out 32bit addresses */
7 5
8#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 6#include <asm-generic/mman.h>
9#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
10#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
11#define MAP_LOCKED 0x2000 /* pages are locked */
12#define MAP_NORESERVE 0x4000 /* don't check for reservations */
13#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
14#define MAP_NONBLOCK 0x10000 /* do not block on IO */
15#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
16
17#define MCL_CURRENT 1 /* lock all current mappings */
18#define MCL_FUTURE 2 /* lock all future mappings */
19 7
20#endif /* _ASM_X86_MMAN_H */ 8#endif /* _ASM_X86_MMAN_H */
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
index 47d62743c4d5..3e2ce58a31a3 100644
--- a/arch/x86/include/asm/module.h
+++ b/arch/x86/include/asm/module.h
@@ -1,18 +1,7 @@
1#ifndef _ASM_X86_MODULE_H 1#ifndef _ASM_X86_MODULE_H
2#define _ASM_X86_MODULE_H 2#define _ASM_X86_MODULE_H
3 3
4/* x86_32/64 are simple */ 4#include <asm-generic/module.h>
5struct mod_arch_specific {};
6
7#ifdef CONFIG_X86_32
8# define Elf_Shdr Elf32_Shdr
9# define Elf_Sym Elf32_Sym
10# define Elf_Ehdr Elf32_Ehdr
11#else
12# define Elf_Shdr Elf64_Shdr
13# define Elf_Sym Elf64_Sym
14# define Elf_Ehdr Elf64_Ehdr
15#endif
16 5
17#ifdef CONFIG_X86_64 6#ifdef CONFIG_X86_64
18/* X86_64 does not define MODULE_PROC_FAMILY */ 7/* X86_64 does not define MODULE_PROC_FAMILY */
@@ -28,6 +17,8 @@ struct mod_arch_specific {};
28#define MODULE_PROC_FAMILY "586MMX " 17#define MODULE_PROC_FAMILY "586MMX "
29#elif defined CONFIG_MCORE2 18#elif defined CONFIG_MCORE2
30#define MODULE_PROC_FAMILY "CORE2 " 19#define MODULE_PROC_FAMILY "CORE2 "
20#elif defined CONFIG_MATOM
21#define MODULE_PROC_FAMILY "ATOM "
31#elif defined CONFIG_M686 22#elif defined CONFIG_M686
32#define MODULE_PROC_FAMILY "686 " 23#define MODULE_PROC_FAMILY "686 "
33#elif defined CONFIG_MPENTIUMII 24#elif defined CONFIG_MPENTIUMII
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index e2a1bb6d71ea..79c94500c0bb 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -4,6 +4,7 @@
4#include <linux/init.h> 4#include <linux/init.h>
5 5
6#include <asm/mpspec_def.h> 6#include <asm/mpspec_def.h>
7#include <asm/x86_init.h>
7 8
8extern int apic_version[MAX_APICS]; 9extern int apic_version[MAX_APICS];
9extern int pic_mode; 10extern int pic_mode;
@@ -41,9 +42,6 @@ extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
41 42
42#endif /* CONFIG_X86_64 */ 43#endif /* CONFIG_X86_64 */
43 44
44extern void early_find_smp_config(void);
45extern void early_get_smp_config(void);
46
47#if defined(CONFIG_MCA) || defined(CONFIG_EISA) 45#if defined(CONFIG_MCA) || defined(CONFIG_EISA)
48extern int mp_bus_id_to_type[MAX_MP_BUSSES]; 46extern int mp_bus_id_to_type[MAX_MP_BUSSES];
49#endif 47#endif
@@ -52,20 +50,55 @@ extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
52 50
53extern unsigned int boot_cpu_physical_apicid; 51extern unsigned int boot_cpu_physical_apicid;
54extern unsigned int max_physical_apicid; 52extern unsigned int max_physical_apicid;
55extern int smp_found_config;
56extern int mpc_default_type; 53extern int mpc_default_type;
57extern unsigned long mp_lapic_addr; 54extern unsigned long mp_lapic_addr;
58 55
59extern void get_smp_config(void); 56#ifdef CONFIG_X86_LOCAL_APIC
57extern int smp_found_config;
58#else
59# define smp_found_config 0
60#endif
61
62static inline void get_smp_config(void)
63{
64 x86_init.mpparse.get_smp_config(0);
65}
66
67static inline void early_get_smp_config(void)
68{
69 x86_init.mpparse.get_smp_config(1);
70}
71
72static inline void find_smp_config(void)
73{
74 x86_init.mpparse.find_smp_config(1);
75}
76
77static inline void early_find_smp_config(void)
78{
79 x86_init.mpparse.find_smp_config(0);
80}
60 81
61#ifdef CONFIG_X86_MPPARSE 82#ifdef CONFIG_X86_MPPARSE
62extern void find_smp_config(void);
63extern void early_reserve_e820_mpc_new(void); 83extern void early_reserve_e820_mpc_new(void);
64extern int enable_update_mptable; 84extern int enable_update_mptable;
85extern int default_mpc_apic_id(struct mpc_cpu *m);
86extern void default_smp_read_mpc_oem(struct mpc_table *mpc);
87# ifdef CONFIG_X86_IO_APIC
88extern void default_mpc_oem_bus_info(struct mpc_bus *m, char *str);
89# else
90# define default_mpc_oem_bus_info NULL
91# endif
92extern void default_find_smp_config(unsigned int reserve);
93extern void default_get_smp_config(unsigned int early);
65#else 94#else
66static inline void find_smp_config(void) { }
67static inline void early_reserve_e820_mpc_new(void) { } 95static inline void early_reserve_e820_mpc_new(void) { }
68#define enable_update_mptable 0 96#define enable_update_mptable 0
97#define default_mpc_apic_id NULL
98#define default_smp_read_mpc_oem NULL
99#define default_mpc_oem_bus_info NULL
100#define default_find_smp_config x86_init_uint_noop
101#define default_get_smp_config x86_init_uint_noop
69#endif 102#endif
70 103
71void __cpuinit generic_processor_info(int apicid, int version); 104void __cpuinit generic_processor_info(int apicid, int version);
diff --git a/arch/x86/include/asm/msgbuf.h b/arch/x86/include/asm/msgbuf.h
index 7e4e9481f51c..809134c644a6 100644
--- a/arch/x86/include/asm/msgbuf.h
+++ b/arch/x86/include/asm/msgbuf.h
@@ -1,39 +1 @@
1#ifndef _ASM_X86_MSGBUF_H #include <asm-generic/msgbuf.h>
2#define _ASM_X86_MSGBUF_H
3
4/*
5 * The msqid64_ds structure for i386 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space on i386 is left for:
10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values
12 *
13 * Pad space on x8664 is left for:
14 * - 2 miscellaneous 64-bit values
15 */
16struct msqid64_ds {
17 struct ipc64_perm msg_perm;
18 __kernel_time_t msg_stime; /* last msgsnd time */
19#ifdef __i386__
20 unsigned long __unused1;
21#endif
22 __kernel_time_t msg_rtime; /* last msgrcv time */
23#ifdef __i386__
24 unsigned long __unused2;
25#endif
26 __kernel_time_t msg_ctime; /* last change time */
27#ifdef __i386__
28 unsigned long __unused3;
29#endif
30 unsigned long msg_cbytes; /* current number of bytes on queue */
31 unsigned long msg_qnum; /* number of messages in queue */
32 unsigned long msg_qbytes; /* max number of bytes on queue */
33 __kernel_pid_t msg_lspid; /* pid of last msgsnd */
34 __kernel_pid_t msg_lrpid; /* last receive pid */
35 unsigned long __unused4;
36 unsigned long __unused5;
37};
38
39#endif /* _ASM_X86_MSGBUF_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 6be7fc254b59..4ffe09b2ad75 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -81,8 +81,15 @@
81#define MSR_IA32_MC0_ADDR 0x00000402 81#define MSR_IA32_MC0_ADDR 0x00000402
82#define MSR_IA32_MC0_MISC 0x00000403 82#define MSR_IA32_MC0_MISC 0x00000403
83 83
84#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x))
85#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x))
86#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x))
87#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x))
88
84/* These are consecutive and not in the normal 4er MCE bank block */ 89/* These are consecutive and not in the normal 4er MCE bank block */
85#define MSR_IA32_MC0_CTL2 0x00000280 90#define MSR_IA32_MC0_CTL2 0x00000280
91#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x))
92
86#define CMCI_EN (1ULL << 30) 93#define CMCI_EN (1ULL << 30)
87#define CMCI_THRESHOLD_MASK 0xffffULL 94#define CMCI_THRESHOLD_MASK 0xffffULL
88 95
@@ -215,6 +222,10 @@
215 222
216#define THERM_STATUS_PROCHOT (1 << 0) 223#define THERM_STATUS_PROCHOT (1 << 0)
217 224
225#define MSR_THERM2_CTL 0x0000019d
226
227#define MSR_THERM2_CTL_TM_SELECT (1ULL << 16)
228
218#define MSR_IA32_MISC_ENABLE 0x000001a0 229#define MSR_IA32_MISC_ENABLE 0x000001a0
219 230
220/* MISC_ENABLE bits: architectural */ 231/* MISC_ENABLE bits: architectural */
@@ -374,6 +385,7 @@
374/* AMD-V MSRs */ 385/* AMD-V MSRs */
375 386
376#define MSR_VM_CR 0xc0010114 387#define MSR_VM_CR 0xc0010114
388#define MSR_VM_IGNNE 0xc0010115
377#define MSR_VM_HSAVE_PA 0xc0010117 389#define MSR_VM_HSAVE_PA 0xc0010117
378 390
379#endif /* _ASM_X86_MSR_INDEX_H */ 391#endif /* _ASM_X86_MSR_INDEX_H */
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 48ad9d29484a..7e2b6ba962ff 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -3,10 +3,16 @@
3 3
4#include <asm/msr-index.h> 4#include <asm/msr-index.h>
5 5
6#ifdef __KERNEL__
7#ifndef __ASSEMBLY__ 6#ifndef __ASSEMBLY__
8 7
9#include <linux/types.h> 8#include <linux/types.h>
9#include <linux/ioctl.h>
10
11#define X86_IOC_RDMSR_REGS _IOWR('c', 0xA0, __u32[8])
12#define X86_IOC_WRMSR_REGS _IOWR('c', 0xA1, __u32[8])
13
14#ifdef __KERNEL__
15
10#include <asm/asm.h> 16#include <asm/asm.h>
11#include <asm/errno.h> 17#include <asm/errno.h>
12#include <asm/cpumask.h> 18#include <asm/cpumask.h>
@@ -67,23 +73,7 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
67 ".previous\n\t" 73 ".previous\n\t"
68 _ASM_EXTABLE(2b, 3b) 74 _ASM_EXTABLE(2b, 3b)
69 : [err] "=r" (*err), EAX_EDX_RET(val, low, high) 75 : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
70 : "c" (msr), [fault] "i" (-EFAULT)); 76 : "c" (msr), [fault] "i" (-EIO));
71 return EAX_EDX_VAL(val, low, high);
72}
73
74static inline unsigned long long native_read_msr_amd_safe(unsigned int msr,
75 int *err)
76{
77 DECLARE_ARGS(val, low, high);
78
79 asm volatile("2: rdmsr ; xor %0,%0\n"
80 "1:\n\t"
81 ".section .fixup,\"ax\"\n\t"
82 "3: mov %3,%0 ; jmp 1b\n\t"
83 ".previous\n\t"
84 _ASM_EXTABLE(2b, 3b)
85 : "=r" (*err), EAX_EDX_RET(val, low, high)
86 : "c" (msr), "D" (0x9c5a203a), "i" (-EFAULT));
87 return EAX_EDX_VAL(val, low, high); 77 return EAX_EDX_VAL(val, low, high);
88} 78}
89 79
@@ -106,13 +96,16 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
106 _ASM_EXTABLE(2b, 3b) 96 _ASM_EXTABLE(2b, 3b)
107 : [err] "=a" (err) 97 : [err] "=a" (err)
108 : "c" (msr), "0" (low), "d" (high), 98 : "c" (msr), "0" (low), "d" (high),
109 [fault] "i" (-EFAULT) 99 [fault] "i" (-EIO)
110 : "memory"); 100 : "memory");
111 return err; 101 return err;
112} 102}
113 103
114extern unsigned long long native_read_tsc(void); 104extern unsigned long long native_read_tsc(void);
115 105
106extern int native_rdmsr_safe_regs(u32 regs[8]);
107extern int native_wrmsr_safe_regs(u32 regs[8]);
108
116static __always_inline unsigned long long __native_read_tsc(void) 109static __always_inline unsigned long long __native_read_tsc(void)
117{ 110{
118 DECLARE_ARGS(val, low, high); 111 DECLARE_ARGS(val, low, high);
@@ -181,14 +174,44 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
181 *p = native_read_msr_safe(msr, &err); 174 *p = native_read_msr_safe(msr, &err);
182 return err; 175 return err;
183} 176}
177
184static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) 178static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
185{ 179{
180 u32 gprs[8] = { 0 };
186 int err; 181 int err;
187 182
188 *p = native_read_msr_amd_safe(msr, &err); 183 gprs[1] = msr;
184 gprs[7] = 0x9c5a203a;
185
186 err = native_rdmsr_safe_regs(gprs);
187
188 *p = gprs[0] | ((u64)gprs[2] << 32);
189
189 return err; 190 return err;
190} 191}
191 192
193static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
194{
195 u32 gprs[8] = { 0 };
196
197 gprs[0] = (u32)val;
198 gprs[1] = msr;
199 gprs[2] = val >> 32;
200 gprs[7] = 0x9c5a203a;
201
202 return native_wrmsr_safe_regs(gprs);
203}
204
205static inline int rdmsr_safe_regs(u32 regs[8])
206{
207 return native_rdmsr_safe_regs(regs);
208}
209
210static inline int wrmsr_safe_regs(u32 regs[8])
211{
212 return native_wrmsr_safe_regs(regs);
213}
214
192#define rdtscl(low) \ 215#define rdtscl(low) \
193 ((low) = (u32)__native_read_tsc()) 216 ((low) = (u32)__native_read_tsc())
194 217
@@ -228,6 +251,8 @@ void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
228void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs); 251void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
229int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 252int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
230int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 253int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
254int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
255int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
231#else /* CONFIG_SMP */ 256#else /* CONFIG_SMP */
232static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 257static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
233{ 258{
@@ -258,7 +283,15 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
258{ 283{
259 return wrmsr_safe(msr_no, l, h); 284 return wrmsr_safe(msr_no, l, h);
260} 285}
286static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
287{
288 return rdmsr_safe_regs(regs);
289}
290static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
291{
292 return wrmsr_safe_regs(regs);
293}
261#endif /* CONFIG_SMP */ 294#endif /* CONFIG_SMP */
262#endif /* __ASSEMBLY__ */
263#endif /* __KERNEL__ */ 295#endif /* __KERNEL__ */
296#endif /* __ASSEMBLY__ */
264#endif /* _ASM_X86_MSR_H */ 297#endif /* _ASM_X86_MSR_H */
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index a51ada8467de..4365ffdb461f 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -121,6 +121,9 @@ extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
121extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); 121extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
122extern void mtrr_ap_init(void); 122extern void mtrr_ap_init(void);
123extern void mtrr_bp_init(void); 123extern void mtrr_bp_init(void);
124extern void set_mtrr_aps_delayed_init(void);
125extern void mtrr_aps_init(void);
126extern void mtrr_bp_restore(void);
124extern int mtrr_trim_uncached_memory(unsigned long end_pfn); 127extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
125extern int amd_special_default_mtrr(void); 128extern int amd_special_default_mtrr(void);
126# else 129# else
@@ -161,6 +164,9 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
161 164
162#define mtrr_ap_init() do {} while (0) 165#define mtrr_ap_init() do {} while (0)
163#define mtrr_bp_init() do {} while (0) 166#define mtrr_bp_init() do {} while (0)
167#define set_mtrr_aps_delayed_init() do {} while (0)
168#define mtrr_aps_init() do {} while (0)
169#define mtrr_bp_restore() do {} while (0)
164# endif 170# endif
165 171
166#ifdef CONFIG_COMPAT 172#ifdef CONFIG_COMPAT
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index c86e5ed4af51..e63cf7d441e1 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -45,8 +45,8 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
45 void __user *, size_t *, loff_t *); 45 void __user *, size_t *, loff_t *);
46extern int unknown_nmi_panic; 46extern int unknown_nmi_panic;
47 47
48void __trigger_all_cpu_backtrace(void); 48void arch_trigger_all_cpu_backtrace(void);
49#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() 49#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
50 50
51static inline void localise_nmi_watchdog(void) 51static inline void localise_nmi_watchdog(void)
52{ 52{
diff --git a/arch/x86/include/asm/nops.h b/arch/x86/include/asm/nops.h
index ad2668ee1aa7..6d8723a766cc 100644
--- a/arch/x86/include/asm/nops.h
+++ b/arch/x86/include/asm/nops.h
@@ -65,6 +65,8 @@
65 6: osp nopl 0x00(%eax,%eax,1) 65 6: osp nopl 0x00(%eax,%eax,1)
66 7: nopl 0x00000000(%eax) 66 7: nopl 0x00000000(%eax)
67 8: nopl 0x00000000(%eax,%eax,1) 67 8: nopl 0x00000000(%eax,%eax,1)
68 Note: All the above are assumed to be a single instruction.
69 There is kernel code that depends on this.
68*/ 70*/
69#define P6_NOP1 GENERIC_NOP1 71#define P6_NOP1 GENERIC_NOP1
70#define P6_NOP2 ".byte 0x66,0x90\n" 72#define P6_NOP2 ".byte 0x66,0x90\n"
diff --git a/arch/x86/include/asm/param.h b/arch/x86/include/asm/param.h
index 6f0d0422f4ca..965d45427975 100644
--- a/arch/x86/include/asm/param.h
+++ b/arch/x86/include/asm/param.h
@@ -1,22 +1 @@
1#ifndef _ASM_X86_PARAM_H #include <asm-generic/param.h>
2#define _ASM_X86_PARAM_H
3
4#ifdef __KERNEL__
5# define HZ CONFIG_HZ /* Internal kernel timer frequency */
6# define USER_HZ 100 /* some user interfaces are */
7# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */
8#endif
9
10#ifndef HZ
11#define HZ 100
12#endif
13
14#define EXEC_PAGESIZE 4096
15
16#ifndef NOGROUP
17#define NOGROUP (-1)
18#endif
19
20#define MAXHOSTNAMELEN 64 /* max length of hostname */
21
22#endif /* _ASM_X86_PARAM_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 4fb37c8a0832..8aebcc41041d 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -7,689 +7,11 @@
7#include <asm/pgtable_types.h> 7#include <asm/pgtable_types.h>
8#include <asm/asm.h> 8#include <asm/asm.h>
9 9
10/* Bitmask of what can be clobbered: usually at least eax. */ 10#include <asm/paravirt_types.h>
11#define CLBR_NONE 0
12#define CLBR_EAX (1 << 0)
13#define CLBR_ECX (1 << 1)
14#define CLBR_EDX (1 << 2)
15#define CLBR_EDI (1 << 3)
16
17#ifdef CONFIG_X86_32
18/* CLBR_ANY should match all regs platform has. For i386, that's just it */
19#define CLBR_ANY ((1 << 4) - 1)
20
21#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
22#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
23#define CLBR_SCRATCH (0)
24#else
25#define CLBR_RAX CLBR_EAX
26#define CLBR_RCX CLBR_ECX
27#define CLBR_RDX CLBR_EDX
28#define CLBR_RDI CLBR_EDI
29#define CLBR_RSI (1 << 4)
30#define CLBR_R8 (1 << 5)
31#define CLBR_R9 (1 << 6)
32#define CLBR_R10 (1 << 7)
33#define CLBR_R11 (1 << 8)
34
35#define CLBR_ANY ((1 << 9) - 1)
36
37#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
38 CLBR_RCX | CLBR_R8 | CLBR_R9)
39#define CLBR_RET_REG (CLBR_RAX)
40#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
41
42#include <asm/desc_defs.h>
43#endif /* X86_64 */
44
45#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
46 11
47#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
48#include <linux/types.h> 13#include <linux/types.h>
49#include <linux/cpumask.h> 14#include <linux/cpumask.h>
50#include <asm/kmap_types.h>
51#include <asm/desc_defs.h>
52
53struct page;
54struct thread_struct;
55struct desc_ptr;
56struct tss_struct;
57struct mm_struct;
58struct desc_struct;
59struct task_struct;
60
61/*
62 * Wrapper type for pointers to code which uses the non-standard
63 * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
64 */
65struct paravirt_callee_save {
66 void *func;
67};
68
69/* general info */
70struct pv_info {
71 unsigned int kernel_rpl;
72 int shared_kernel_pmd;
73 int paravirt_enabled;
74 const char *name;
75};
76
77struct pv_init_ops {
78 /*
79 * Patch may replace one of the defined code sequences with
80 * arbitrary code, subject to the same register constraints.
81 * This generally means the code is not free to clobber any
82 * registers other than EAX. The patch function should return
83 * the number of bytes of code generated, as we nop pad the
84 * rest in generic code.
85 */
86 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
87 unsigned long addr, unsigned len);
88
89 /* Basic arch-specific setup */
90 void (*arch_setup)(void);
91 char *(*memory_setup)(void);
92 void (*post_allocator_init)(void);
93
94 /* Print a banner to identify the environment */
95 void (*banner)(void);
96};
97
98
99struct pv_lazy_ops {
100 /* Set deferred update mode, used for batching operations. */
101 void (*enter)(void);
102 void (*leave)(void);
103};
104
105struct pv_time_ops {
106 void (*time_init)(void);
107
108 /* Set and set time of day */
109 unsigned long (*get_wallclock)(void);
110 int (*set_wallclock)(unsigned long);
111
112 unsigned long long (*sched_clock)(void);
113 unsigned long (*get_tsc_khz)(void);
114};
115
116struct pv_cpu_ops {
117 /* hooks for various privileged instructions */
118 unsigned long (*get_debugreg)(int regno);
119 void (*set_debugreg)(int regno, unsigned long value);
120
121 void (*clts)(void);
122
123 unsigned long (*read_cr0)(void);
124 void (*write_cr0)(unsigned long);
125
126 unsigned long (*read_cr4_safe)(void);
127 unsigned long (*read_cr4)(void);
128 void (*write_cr4)(unsigned long);
129
130#ifdef CONFIG_X86_64
131 unsigned long (*read_cr8)(void);
132 void (*write_cr8)(unsigned long);
133#endif
134
135 /* Segment descriptor handling */
136 void (*load_tr_desc)(void);
137 void (*load_gdt)(const struct desc_ptr *);
138 void (*load_idt)(const struct desc_ptr *);
139 void (*store_gdt)(struct desc_ptr *);
140 void (*store_idt)(struct desc_ptr *);
141 void (*set_ldt)(const void *desc, unsigned entries);
142 unsigned long (*store_tr)(void);
143 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
144#ifdef CONFIG_X86_64
145 void (*load_gs_index)(unsigned int idx);
146#endif
147 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
148 const void *desc);
149 void (*write_gdt_entry)(struct desc_struct *,
150 int entrynum, const void *desc, int size);
151 void (*write_idt_entry)(gate_desc *,
152 int entrynum, const gate_desc *gate);
153 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
154 void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
155
156 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
157
158 void (*set_iopl_mask)(unsigned mask);
159
160 void (*wbinvd)(void);
161 void (*io_delay)(void);
162
163 /* cpuid emulation, mostly so that caps bits can be disabled */
164 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
165 unsigned int *ecx, unsigned int *edx);
166
167 /* MSR, PMC and TSR operations.
168 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
169 u64 (*read_msr_amd)(unsigned int msr, int *err);
170 u64 (*read_msr)(unsigned int msr, int *err);
171 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
172
173 u64 (*read_tsc)(void);
174 u64 (*read_pmc)(int counter);
175 unsigned long long (*read_tscp)(unsigned int *aux);
176
177 /*
178 * Atomically enable interrupts and return to userspace. This
179 * is only ever used to return to 32-bit processes; in a
180 * 64-bit kernel, it's used for 32-on-64 compat processes, but
181 * never native 64-bit processes. (Jump, not call.)
182 */
183 void (*irq_enable_sysexit)(void);
184
185 /*
186 * Switch to usermode gs and return to 64-bit usermode using
187 * sysret. Only used in 64-bit kernels to return to 64-bit
188 * processes. Usermode register state, including %rsp, must
189 * already be restored.
190 */
191 void (*usergs_sysret64)(void);
192
193 /*
194 * Switch to usermode gs and return to 32-bit usermode using
195 * sysret. Used to return to 32-on-64 compat processes.
196 * Other usermode register state, including %esp, must already
197 * be restored.
198 */
199 void (*usergs_sysret32)(void);
200
201 /* Normal iret. Jump to this with the standard iret stack
202 frame set up. */
203 void (*iret)(void);
204
205 void (*swapgs)(void);
206
207 void (*start_context_switch)(struct task_struct *prev);
208 void (*end_context_switch)(struct task_struct *next);
209};
210
211struct pv_irq_ops {
212 void (*init_IRQ)(void);
213
214 /*
215 * Get/set interrupt state. save_fl and restore_fl are only
216 * expected to use X86_EFLAGS_IF; all other bits
217 * returned from save_fl are undefined, and may be ignored by
218 * restore_fl.
219 *
220 * NOTE: These functions callers expect the callee to preserve
221 * more registers than the standard C calling convention.
222 */
223 struct paravirt_callee_save save_fl;
224 struct paravirt_callee_save restore_fl;
225 struct paravirt_callee_save irq_disable;
226 struct paravirt_callee_save irq_enable;
227
228 void (*safe_halt)(void);
229 void (*halt)(void);
230
231#ifdef CONFIG_X86_64
232 void (*adjust_exception_frame)(void);
233#endif
234};
235
236struct pv_apic_ops {
237#ifdef CONFIG_X86_LOCAL_APIC
238 void (*setup_boot_clock)(void);
239 void (*setup_secondary_clock)(void);
240
241 void (*startup_ipi_hook)(int phys_apicid,
242 unsigned long start_eip,
243 unsigned long start_esp);
244#endif
245};
246
247struct pv_mmu_ops {
248 /*
249 * Called before/after init_mm pagetable setup. setup_start
250 * may reset %cr3, and may pre-install parts of the pagetable;
251 * pagetable setup is expected to preserve any existing
252 * mapping.
253 */
254 void (*pagetable_setup_start)(pgd_t *pgd_base);
255 void (*pagetable_setup_done)(pgd_t *pgd_base);
256
257 unsigned long (*read_cr2)(void);
258 void (*write_cr2)(unsigned long);
259
260 unsigned long (*read_cr3)(void);
261 void (*write_cr3)(unsigned long);
262
263 /*
264 * Hooks for intercepting the creation/use/destruction of an
265 * mm_struct.
266 */
267 void (*activate_mm)(struct mm_struct *prev,
268 struct mm_struct *next);
269 void (*dup_mmap)(struct mm_struct *oldmm,
270 struct mm_struct *mm);
271 void (*exit_mmap)(struct mm_struct *mm);
272
273
274 /* TLB operations */
275 void (*flush_tlb_user)(void);
276 void (*flush_tlb_kernel)(void);
277 void (*flush_tlb_single)(unsigned long addr);
278 void (*flush_tlb_others)(const struct cpumask *cpus,
279 struct mm_struct *mm,
280 unsigned long va);
281
282 /* Hooks for allocating and freeing a pagetable top-level */
283 int (*pgd_alloc)(struct mm_struct *mm);
284 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
285
286 /*
287 * Hooks for allocating/releasing pagetable pages when they're
288 * attached to a pagetable
289 */
290 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
291 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
292 void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
293 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
294 void (*release_pte)(unsigned long pfn);
295 void (*release_pmd)(unsigned long pfn);
296 void (*release_pud)(unsigned long pfn);
297
298 /* Pagetable manipulation functions */
299 void (*set_pte)(pte_t *ptep, pte_t pteval);
300 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
301 pte_t *ptep, pte_t pteval);
302 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
303 void (*pte_update)(struct mm_struct *mm, unsigned long addr,
304 pte_t *ptep);
305 void (*pte_update_defer)(struct mm_struct *mm,
306 unsigned long addr, pte_t *ptep);
307
308 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
309 pte_t *ptep);
310 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
311 pte_t *ptep, pte_t pte);
312
313 struct paravirt_callee_save pte_val;
314 struct paravirt_callee_save make_pte;
315
316 struct paravirt_callee_save pgd_val;
317 struct paravirt_callee_save make_pgd;
318
319#if PAGETABLE_LEVELS >= 3
320#ifdef CONFIG_X86_PAE
321 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
322 void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
323 pte_t *ptep);
324 void (*pmd_clear)(pmd_t *pmdp);
325
326#endif /* CONFIG_X86_PAE */
327
328 void (*set_pud)(pud_t *pudp, pud_t pudval);
329
330 struct paravirt_callee_save pmd_val;
331 struct paravirt_callee_save make_pmd;
332
333#if PAGETABLE_LEVELS == 4
334 struct paravirt_callee_save pud_val;
335 struct paravirt_callee_save make_pud;
336
337 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
338#endif /* PAGETABLE_LEVELS == 4 */
339#endif /* PAGETABLE_LEVELS >= 3 */
340
341#ifdef CONFIG_HIGHPTE
342 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
343#endif
344
345 struct pv_lazy_ops lazy_mode;
346
347 /* dom0 ops */
348
349 /* Sometimes the physical address is a pfn, and sometimes its
350 an mfn. We can tell which is which from the index. */
351 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
352 phys_addr_t phys, pgprot_t flags);
353};
354
355struct raw_spinlock;
356struct pv_lock_ops {
357 int (*spin_is_locked)(struct raw_spinlock *lock);
358 int (*spin_is_contended)(struct raw_spinlock *lock);
359 void (*spin_lock)(struct raw_spinlock *lock);
360 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
361 int (*spin_trylock)(struct raw_spinlock *lock);
362 void (*spin_unlock)(struct raw_spinlock *lock);
363};
364
365/* This contains all the paravirt structures: we get a convenient
366 * number for each function using the offset which we use to indicate
367 * what to patch. */
368struct paravirt_patch_template {
369 struct pv_init_ops pv_init_ops;
370 struct pv_time_ops pv_time_ops;
371 struct pv_cpu_ops pv_cpu_ops;
372 struct pv_irq_ops pv_irq_ops;
373 struct pv_apic_ops pv_apic_ops;
374 struct pv_mmu_ops pv_mmu_ops;
375 struct pv_lock_ops pv_lock_ops;
376};
377
378extern struct pv_info pv_info;
379extern struct pv_init_ops pv_init_ops;
380extern struct pv_time_ops pv_time_ops;
381extern struct pv_cpu_ops pv_cpu_ops;
382extern struct pv_irq_ops pv_irq_ops;
383extern struct pv_apic_ops pv_apic_ops;
384extern struct pv_mmu_ops pv_mmu_ops;
385extern struct pv_lock_ops pv_lock_ops;
386
387#define PARAVIRT_PATCH(x) \
388 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
389
390#define paravirt_type(op) \
391 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
392 [paravirt_opptr] "i" (&(op))
393#define paravirt_clobber(clobber) \
394 [paravirt_clobber] "i" (clobber)
395
396/*
397 * Generate some code, and mark it as patchable by the
398 * apply_paravirt() alternate instruction patcher.
399 */
400#define _paravirt_alt(insn_string, type, clobber) \
401 "771:\n\t" insn_string "\n" "772:\n" \
402 ".pushsection .parainstructions,\"a\"\n" \
403 _ASM_ALIGN "\n" \
404 _ASM_PTR " 771b\n" \
405 " .byte " type "\n" \
406 " .byte 772b-771b\n" \
407 " .short " clobber "\n" \
408 ".popsection\n"
409
410/* Generate patchable code, with the default asm parameters. */
411#define paravirt_alt(insn_string) \
412 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
413
414/* Simple instruction patching code. */
415#define DEF_NATIVE(ops, name, code) \
416 extern const char start_##ops##_##name[], end_##ops##_##name[]; \
417 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
418
419unsigned paravirt_patch_nop(void);
420unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
421unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
422unsigned paravirt_patch_ignore(unsigned len);
423unsigned paravirt_patch_call(void *insnbuf,
424 const void *target, u16 tgt_clobbers,
425 unsigned long addr, u16 site_clobbers,
426 unsigned len);
427unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
428 unsigned long addr, unsigned len);
429unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
430 unsigned long addr, unsigned len);
431
432unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
433 const char *start, const char *end);
434
435unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
436 unsigned long addr, unsigned len);
437
438int paravirt_disable_iospace(void);
439
440/*
441 * This generates an indirect call based on the operation type number.
442 * The type number, computed in PARAVIRT_PATCH, is derived from the
443 * offset into the paravirt_patch_template structure, and can therefore be
444 * freely converted back into a structure offset.
445 */
446#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
447
448/*
449 * These macros are intended to wrap calls through one of the paravirt
450 * ops structs, so that they can be later identified and patched at
451 * runtime.
452 *
453 * Normally, a call to a pv_op function is a simple indirect call:
454 * (pv_op_struct.operations)(args...).
455 *
456 * Unfortunately, this is a relatively slow operation for modern CPUs,
457 * because it cannot necessarily determine what the destination
458 * address is. In this case, the address is a runtime constant, so at
459 * the very least we can patch the call to e a simple direct call, or
460 * ideally, patch an inline implementation into the callsite. (Direct
461 * calls are essentially free, because the call and return addresses
462 * are completely predictable.)
463 *
464 * For i386, these macros rely on the standard gcc "regparm(3)" calling
465 * convention, in which the first three arguments are placed in %eax,
466 * %edx, %ecx (in that order), and the remaining arguments are placed
467 * on the stack. All caller-save registers (eax,edx,ecx) are expected
468 * to be modified (either clobbered or used for return values).
469 * X86_64, on the other hand, already specifies a register-based calling
470 * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
471 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
472 * special handling for dealing with 4 arguments, unlike i386.
473 * However, x86_64 also have to clobber all caller saved registers, which
474 * unfortunately, are quite a bit (r8 - r11)
475 *
476 * The call instruction itself is marked by placing its start address
477 * and size into the .parainstructions section, so that
478 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
479 * appropriate patching under the control of the backend pv_init_ops
480 * implementation.
481 *
482 * Unfortunately there's no way to get gcc to generate the args setup
483 * for the call, and then allow the call itself to be generated by an
484 * inline asm. Because of this, we must do the complete arg setup and
485 * return value handling from within these macros. This is fairly
486 * cumbersome.
487 *
488 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
489 * It could be extended to more arguments, but there would be little
490 * to be gained from that. For each number of arguments, there are
491 * the two VCALL and CALL variants for void and non-void functions.
492 *
493 * When there is a return value, the invoker of the macro must specify
494 * the return type. The macro then uses sizeof() on that type to
495 * determine whether its a 32 or 64 bit value, and places the return
496 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
497 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
498 * the return value size.
499 *
500 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
501 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
502 * in low,high order
503 *
504 * Small structures are passed and returned in registers. The macro
505 * calling convention can't directly deal with this, so the wrapper
506 * functions must do this.
507 *
508 * These PVOP_* macros are only defined within this header. This
509 * means that all uses must be wrapped in inline functions. This also
510 * makes sure the incoming and outgoing types are always correct.
511 */
512#ifdef CONFIG_X86_32
513#define PVOP_VCALL_ARGS \
514 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
515#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
516
517#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
518#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
519#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
520
521#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
522 "=c" (__ecx)
523#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
524
525#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
526#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
527
528#define EXTRA_CLOBBERS
529#define VEXTRA_CLOBBERS
530#else /* CONFIG_X86_64 */
531#define PVOP_VCALL_ARGS \
532 unsigned long __edi = __edi, __esi = __esi, \
533 __edx = __edx, __ecx = __ecx
534#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
535
536#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
537#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
538#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
539#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
540
541#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
542 "=S" (__esi), "=d" (__edx), \
543 "=c" (__ecx)
544#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
545
546#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
547#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
548
549#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
550#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
551#endif /* CONFIG_X86_32 */
552
553#ifdef CONFIG_PARAVIRT_DEBUG
554#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
555#else
556#define PVOP_TEST_NULL(op) ((void)op)
557#endif
558
559#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
560 pre, post, ...) \
561 ({ \
562 rettype __ret; \
563 PVOP_CALL_ARGS; \
564 PVOP_TEST_NULL(op); \
565 /* This is 32-bit specific, but is okay in 64-bit */ \
566 /* since this condition will never hold */ \
567 if (sizeof(rettype) > sizeof(unsigned long)) { \
568 asm volatile(pre \
569 paravirt_alt(PARAVIRT_CALL) \
570 post \
571 : call_clbr \
572 : paravirt_type(op), \
573 paravirt_clobber(clbr), \
574 ##__VA_ARGS__ \
575 : "memory", "cc" extra_clbr); \
576 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
577 } else { \
578 asm volatile(pre \
579 paravirt_alt(PARAVIRT_CALL) \
580 post \
581 : call_clbr \
582 : paravirt_type(op), \
583 paravirt_clobber(clbr), \
584 ##__VA_ARGS__ \
585 : "memory", "cc" extra_clbr); \
586 __ret = (rettype)__eax; \
587 } \
588 __ret; \
589 })
590
591#define __PVOP_CALL(rettype, op, pre, post, ...) \
592 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
593 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
594
595#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
596 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
597 PVOP_CALLEE_CLOBBERS, , \
598 pre, post, ##__VA_ARGS__)
599
600
601#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
602 ({ \
603 PVOP_VCALL_ARGS; \
604 PVOP_TEST_NULL(op); \
605 asm volatile(pre \
606 paravirt_alt(PARAVIRT_CALL) \
607 post \
608 : call_clbr \
609 : paravirt_type(op), \
610 paravirt_clobber(clbr), \
611 ##__VA_ARGS__ \
612 : "memory", "cc" extra_clbr); \
613 })
614
615#define __PVOP_VCALL(op, pre, post, ...) \
616 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
617 VEXTRA_CLOBBERS, \
618 pre, post, ##__VA_ARGS__)
619
620#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
621 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
622 PVOP_VCALLEE_CLOBBERS, , \
623 pre, post, ##__VA_ARGS__)
624
625
626
627#define PVOP_CALL0(rettype, op) \
628 __PVOP_CALL(rettype, op, "", "")
629#define PVOP_VCALL0(op) \
630 __PVOP_VCALL(op, "", "")
631
632#define PVOP_CALLEE0(rettype, op) \
633 __PVOP_CALLEESAVE(rettype, op, "", "")
634#define PVOP_VCALLEE0(op) \
635 __PVOP_VCALLEESAVE(op, "", "")
636
637
638#define PVOP_CALL1(rettype, op, arg1) \
639 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
640#define PVOP_VCALL1(op, arg1) \
641 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
642
643#define PVOP_CALLEE1(rettype, op, arg1) \
644 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
645#define PVOP_VCALLEE1(op, arg1) \
646 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
647
648
649#define PVOP_CALL2(rettype, op, arg1, arg2) \
650 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
651 PVOP_CALL_ARG2(arg2))
652#define PVOP_VCALL2(op, arg1, arg2) \
653 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
654 PVOP_CALL_ARG2(arg2))
655
656#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
657 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
658 PVOP_CALL_ARG2(arg2))
659#define PVOP_VCALLEE2(op, arg1, arg2) \
660 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
661 PVOP_CALL_ARG2(arg2))
662
663
664#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
665 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
666 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
667#define PVOP_VCALL3(op, arg1, arg2, arg3) \
668 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
669 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
670
671/* This is the only difference in x86_64. We can make it much simpler */
672#ifdef CONFIG_X86_32
673#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
674 __PVOP_CALL(rettype, op, \
675 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
676 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
677 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
678#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
679 __PVOP_VCALL(op, \
680 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
681 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
682 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
683#else
684#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
685 __PVOP_CALL(rettype, op, "", "", \
686 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
687 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
688#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
689 __PVOP_VCALL(op, "", "", \
690 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
691 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
692#endif
693 15
694static inline int paravirt_enabled(void) 16static inline int paravirt_enabled(void)
695{ 17{
@@ -702,22 +24,6 @@ static inline void load_sp0(struct tss_struct *tss,
702 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread); 24 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
703} 25}
704 26
705#define ARCH_SETUP pv_init_ops.arch_setup();
706static inline unsigned long get_wallclock(void)
707{
708 return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
709}
710
711static inline int set_wallclock(unsigned long nowtime)
712{
713 return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
714}
715
716static inline void (*choose_time_init(void))(void)
717{
718 return pv_time_ops.time_init;
719}
720
721/* The paravirtualized CPUID instruction. */ 27/* The paravirtualized CPUID instruction. */
722static inline void __cpuid(unsigned int *eax, unsigned int *ebx, 28static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
723 unsigned int *ecx, unsigned int *edx) 29 unsigned int *ecx, unsigned int *edx)
@@ -820,15 +126,22 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
820{ 126{
821 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); 127 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
822} 128}
823static inline u64 paravirt_read_msr_amd(unsigned msr, int *err) 129
130static inline int paravirt_rdmsr_regs(u32 *regs)
824{ 131{
825 return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err); 132 return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
826} 133}
134
827static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) 135static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
828{ 136{
829 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); 137 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
830} 138}
831 139
140static inline int paravirt_wrmsr_regs(u32 *regs)
141{
142 return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
143}
144
832/* These should all do BUG_ON(_err), but our headers are too tangled. */ 145/* These should all do BUG_ON(_err), but our headers are too tangled. */
833#define rdmsr(msr, val1, val2) \ 146#define rdmsr(msr, val1, val2) \
834do { \ 147do { \
@@ -862,6 +175,9 @@ do { \
862 _err; \ 175 _err; \
863}) 176})
864 177
178#define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs)
179#define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs)
180
865static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) 181static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
866{ 182{
867 int err; 183 int err;
@@ -871,12 +187,31 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
871} 187}
872static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) 188static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
873{ 189{
190 u32 gprs[8] = { 0 };
874 int err; 191 int err;
875 192
876 *p = paravirt_read_msr_amd(msr, &err); 193 gprs[1] = msr;
194 gprs[7] = 0x9c5a203a;
195
196 err = paravirt_rdmsr_regs(gprs);
197
198 *p = gprs[0] | ((u64)gprs[2] << 32);
199
877 return err; 200 return err;
878} 201}
879 202
203static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
204{
205 u32 gprs[8] = { 0 };
206
207 gprs[0] = (u32)val;
208 gprs[1] = msr;
209 gprs[2] = val >> 32;
210 gprs[7] = 0x9c5a203a;
211
212 return paravirt_wrmsr_regs(gprs);
213}
214
880static inline u64 paravirt_read_tsc(void) 215static inline u64 paravirt_read_tsc(void)
881{ 216{
882 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc); 217 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
@@ -894,7 +229,6 @@ static inline unsigned long long paravirt_sched_clock(void)
894{ 229{
895 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock); 230 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
896} 231}
897#define calibrate_tsc() (pv_time_ops.get_tsc_khz())
898 232
899static inline unsigned long long paravirt_read_pmc(int counter) 233static inline unsigned long long paravirt_read_pmc(int counter)
900{ 234{
@@ -1012,34 +346,6 @@ static inline void slow_down_io(void)
1012#endif 346#endif
1013} 347}
1014 348
1015#ifdef CONFIG_X86_LOCAL_APIC
1016static inline void setup_boot_clock(void)
1017{
1018 PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
1019}
1020
1021static inline void setup_secondary_clock(void)
1022{
1023 PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
1024}
1025#endif
1026
1027static inline void paravirt_post_allocator_init(void)
1028{
1029 if (pv_init_ops.post_allocator_init)
1030 (*pv_init_ops.post_allocator_init)();
1031}
1032
1033static inline void paravirt_pagetable_setup_start(pgd_t *base)
1034{
1035 (*pv_mmu_ops.pagetable_setup_start)(base);
1036}
1037
1038static inline void paravirt_pagetable_setup_done(pgd_t *base)
1039{
1040 (*pv_mmu_ops.pagetable_setup_done)(base);
1041}
1042
1043#ifdef CONFIG_SMP 349#ifdef CONFIG_SMP
1044static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip, 350static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
1045 unsigned long start_esp) 351 unsigned long start_esp)
@@ -1393,20 +699,6 @@ static inline void pmd_clear(pmd_t *pmdp)
1393} 699}
1394#endif /* CONFIG_X86_PAE */ 700#endif /* CONFIG_X86_PAE */
1395 701
1396/* Lazy mode for batching updates / context switch */
1397enum paravirt_lazy_mode {
1398 PARAVIRT_LAZY_NONE,
1399 PARAVIRT_LAZY_MMU,
1400 PARAVIRT_LAZY_CPU,
1401};
1402
1403enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
1404void paravirt_start_context_switch(struct task_struct *prev);
1405void paravirt_end_context_switch(struct task_struct *next);
1406
1407void paravirt_enter_lazy_mmu(void);
1408void paravirt_leave_lazy_mmu(void);
1409
1410#define __HAVE_ARCH_START_CONTEXT_SWITCH 702#define __HAVE_ARCH_START_CONTEXT_SWITCH
1411static inline void arch_start_context_switch(struct task_struct *prev) 703static inline void arch_start_context_switch(struct task_struct *prev)
1412{ 704{
@@ -1437,12 +729,6 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1437 pv_mmu_ops.set_fixmap(idx, phys, flags); 729 pv_mmu_ops.set_fixmap(idx, phys, flags);
1438} 730}
1439 731
1440void _paravirt_nop(void);
1441u32 _paravirt_ident_32(u32);
1442u64 _paravirt_ident_64(u64);
1443
1444#define paravirt_nop ((void *)_paravirt_nop)
1445
1446#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) 732#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
1447 733
1448static inline int __raw_spin_is_locked(struct raw_spinlock *lock) 734static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
@@ -1479,17 +765,6 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
1479 765
1480#endif 766#endif
1481 767
1482/* These all sit in the .parainstructions section to tell us what to patch. */
1483struct paravirt_patch_site {
1484 u8 *instr; /* original instructions */
1485 u8 instrtype; /* type of this instruction */
1486 u8 len; /* length of original instruction */
1487 u16 clobbers; /* what registers you may clobber */
1488};
1489
1490extern struct paravirt_patch_site __parainstructions[],
1491 __parainstructions_end[];
1492
1493#ifdef CONFIG_X86_32 768#ifdef CONFIG_X86_32
1494#define PV_SAVE_REGS "pushl %ecx; pushl %edx;" 769#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
1495#define PV_RESTORE_REGS "popl %edx; popl %ecx;" 770#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
@@ -1628,6 +903,8 @@ static inline unsigned long __raw_local_irq_save(void)
1628#undef PVOP_VCALL4 903#undef PVOP_VCALL4
1629#undef PVOP_CALL4 904#undef PVOP_CALL4
1630 905
906extern void default_banner(void);
907
1631#else /* __ASSEMBLY__ */ 908#else /* __ASSEMBLY__ */
1632 909
1633#define _PVSITE(ptype, clobbers, ops, word, algn) \ 910#define _PVSITE(ptype, clobbers, ops, word, algn) \
@@ -1768,5 +1045,7 @@ static inline unsigned long __raw_local_irq_save(void)
1768#endif /* CONFIG_X86_32 */ 1045#endif /* CONFIG_X86_32 */
1769 1046
1770#endif /* __ASSEMBLY__ */ 1047#endif /* __ASSEMBLY__ */
1771#endif /* CONFIG_PARAVIRT */ 1048#else /* CONFIG_PARAVIRT */
1049# define default_banner x86_init_noop
1050#endif /* !CONFIG_PARAVIRT */
1772#endif /* _ASM_X86_PARAVIRT_H */ 1051#endif /* _ASM_X86_PARAVIRT_H */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
new file mode 100644
index 000000000000..dd0f5b32489d
--- /dev/null
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -0,0 +1,693 @@
1#ifndef _ASM_X86_PARAVIRT_TYPES_H
2#define _ASM_X86_PARAVIRT_TYPES_H
3
4/* Bitmask of what can be clobbered: usually at least eax. */
5#define CLBR_NONE 0
6#define CLBR_EAX (1 << 0)
7#define CLBR_ECX (1 << 1)
8#define CLBR_EDX (1 << 2)
9#define CLBR_EDI (1 << 3)
10
11#ifdef CONFIG_X86_32
12/* CLBR_ANY should match all regs platform has. For i386, that's just it */
13#define CLBR_ANY ((1 << 4) - 1)
14
15#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
16#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
17#define CLBR_SCRATCH (0)
18#else
19#define CLBR_RAX CLBR_EAX
20#define CLBR_RCX CLBR_ECX
21#define CLBR_RDX CLBR_EDX
22#define CLBR_RDI CLBR_EDI
23#define CLBR_RSI (1 << 4)
24#define CLBR_R8 (1 << 5)
25#define CLBR_R9 (1 << 6)
26#define CLBR_R10 (1 << 7)
27#define CLBR_R11 (1 << 8)
28
29#define CLBR_ANY ((1 << 9) - 1)
30
31#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
32 CLBR_RCX | CLBR_R8 | CLBR_R9)
33#define CLBR_RET_REG (CLBR_RAX)
34#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
35
36#endif /* X86_64 */
37
38#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
39
40#ifndef __ASSEMBLY__
41
42#include <asm/desc_defs.h>
43#include <asm/kmap_types.h>
44
45struct page;
46struct thread_struct;
47struct desc_ptr;
48struct tss_struct;
49struct mm_struct;
50struct desc_struct;
51struct task_struct;
52struct cpumask;
53
54/*
55 * Wrapper type for pointers to code which uses the non-standard
56 * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
57 */
58struct paravirt_callee_save {
59 void *func;
60};
61
62/* general info */
63struct pv_info {
64 unsigned int kernel_rpl;
65 int shared_kernel_pmd;
66 int paravirt_enabled;
67 const char *name;
68};
69
70struct pv_init_ops {
71 /*
72 * Patch may replace one of the defined code sequences with
73 * arbitrary code, subject to the same register constraints.
74 * This generally means the code is not free to clobber any
75 * registers other than EAX. The patch function should return
76 * the number of bytes of code generated, as we nop pad the
77 * rest in generic code.
78 */
79 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
80 unsigned long addr, unsigned len);
81};
82
83
84struct pv_lazy_ops {
85 /* Set deferred update mode, used for batching operations. */
86 void (*enter)(void);
87 void (*leave)(void);
88};
89
90struct pv_time_ops {
91 unsigned long long (*sched_clock)(void);
92 unsigned long (*get_tsc_khz)(void);
93};
94
95struct pv_cpu_ops {
96 /* hooks for various privileged instructions */
97 unsigned long (*get_debugreg)(int regno);
98 void (*set_debugreg)(int regno, unsigned long value);
99
100 void (*clts)(void);
101
102 unsigned long (*read_cr0)(void);
103 void (*write_cr0)(unsigned long);
104
105 unsigned long (*read_cr4_safe)(void);
106 unsigned long (*read_cr4)(void);
107 void (*write_cr4)(unsigned long);
108
109#ifdef CONFIG_X86_64
110 unsigned long (*read_cr8)(void);
111 void (*write_cr8)(unsigned long);
112#endif
113
114 /* Segment descriptor handling */
115 void (*load_tr_desc)(void);
116 void (*load_gdt)(const struct desc_ptr *);
117 void (*load_idt)(const struct desc_ptr *);
118 void (*store_gdt)(struct desc_ptr *);
119 void (*store_idt)(struct desc_ptr *);
120 void (*set_ldt)(const void *desc, unsigned entries);
121 unsigned long (*store_tr)(void);
122 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
123#ifdef CONFIG_X86_64
124 void (*load_gs_index)(unsigned int idx);
125#endif
126 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
127 const void *desc);
128 void (*write_gdt_entry)(struct desc_struct *,
129 int entrynum, const void *desc, int size);
130 void (*write_idt_entry)(gate_desc *,
131 int entrynum, const gate_desc *gate);
132 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
133 void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
134
135 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
136
137 void (*set_iopl_mask)(unsigned mask);
138
139 void (*wbinvd)(void);
140 void (*io_delay)(void);
141
142 /* cpuid emulation, mostly so that caps bits can be disabled */
143 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
144 unsigned int *ecx, unsigned int *edx);
145
146 /* MSR, PMC and TSR operations.
147 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
148 u64 (*read_msr)(unsigned int msr, int *err);
149 int (*rdmsr_regs)(u32 *regs);
150 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
151 int (*wrmsr_regs)(u32 *regs);
152
153 u64 (*read_tsc)(void);
154 u64 (*read_pmc)(int counter);
155 unsigned long long (*read_tscp)(unsigned int *aux);
156
157 /*
158 * Atomically enable interrupts and return to userspace. This
159 * is only ever used to return to 32-bit processes; in a
160 * 64-bit kernel, it's used for 32-on-64 compat processes, but
161 * never native 64-bit processes. (Jump, not call.)
162 */
163 void (*irq_enable_sysexit)(void);
164
165 /*
166 * Switch to usermode gs and return to 64-bit usermode using
167 * sysret. Only used in 64-bit kernels to return to 64-bit
168 * processes. Usermode register state, including %rsp, must
169 * already be restored.
170 */
171 void (*usergs_sysret64)(void);
172
173 /*
174 * Switch to usermode gs and return to 32-bit usermode using
175 * sysret. Used to return to 32-on-64 compat processes.
176 * Other usermode register state, including %esp, must already
177 * be restored.
178 */
179 void (*usergs_sysret32)(void);
180
181 /* Normal iret. Jump to this with the standard iret stack
182 frame set up. */
183 void (*iret)(void);
184
185 void (*swapgs)(void);
186
187 void (*start_context_switch)(struct task_struct *prev);
188 void (*end_context_switch)(struct task_struct *next);
189};
190
191struct pv_irq_ops {
192 /*
193 * Get/set interrupt state. save_fl and restore_fl are only
194 * expected to use X86_EFLAGS_IF; all other bits
195 * returned from save_fl are undefined, and may be ignored by
196 * restore_fl.
197 *
198 * NOTE: These functions callers expect the callee to preserve
199 * more registers than the standard C calling convention.
200 */
201 struct paravirt_callee_save save_fl;
202 struct paravirt_callee_save restore_fl;
203 struct paravirt_callee_save irq_disable;
204 struct paravirt_callee_save irq_enable;
205
206 void (*safe_halt)(void);
207 void (*halt)(void);
208
209#ifdef CONFIG_X86_64
210 void (*adjust_exception_frame)(void);
211#endif
212};
213
214struct pv_apic_ops {
215#ifdef CONFIG_X86_LOCAL_APIC
216 void (*startup_ipi_hook)(int phys_apicid,
217 unsigned long start_eip,
218 unsigned long start_esp);
219#endif
220};
221
222struct pv_mmu_ops {
223 unsigned long (*read_cr2)(void);
224 void (*write_cr2)(unsigned long);
225
226 unsigned long (*read_cr3)(void);
227 void (*write_cr3)(unsigned long);
228
229 /*
230 * Hooks for intercepting the creation/use/destruction of an
231 * mm_struct.
232 */
233 void (*activate_mm)(struct mm_struct *prev,
234 struct mm_struct *next);
235 void (*dup_mmap)(struct mm_struct *oldmm,
236 struct mm_struct *mm);
237 void (*exit_mmap)(struct mm_struct *mm);
238
239
240 /* TLB operations */
241 void (*flush_tlb_user)(void);
242 void (*flush_tlb_kernel)(void);
243 void (*flush_tlb_single)(unsigned long addr);
244 void (*flush_tlb_others)(const struct cpumask *cpus,
245 struct mm_struct *mm,
246 unsigned long va);
247
248 /* Hooks for allocating and freeing a pagetable top-level */
249 int (*pgd_alloc)(struct mm_struct *mm);
250 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
251
252 /*
253 * Hooks for allocating/releasing pagetable pages when they're
254 * attached to a pagetable
255 */
256 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
257 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
258 void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
259 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
260 void (*release_pte)(unsigned long pfn);
261 void (*release_pmd)(unsigned long pfn);
262 void (*release_pud)(unsigned long pfn);
263
264 /* Pagetable manipulation functions */
265 void (*set_pte)(pte_t *ptep, pte_t pteval);
266 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
267 pte_t *ptep, pte_t pteval);
268 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
269 void (*pte_update)(struct mm_struct *mm, unsigned long addr,
270 pte_t *ptep);
271 void (*pte_update_defer)(struct mm_struct *mm,
272 unsigned long addr, pte_t *ptep);
273
274 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
275 pte_t *ptep);
276 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
277 pte_t *ptep, pte_t pte);
278
279 struct paravirt_callee_save pte_val;
280 struct paravirt_callee_save make_pte;
281
282 struct paravirt_callee_save pgd_val;
283 struct paravirt_callee_save make_pgd;
284
285#if PAGETABLE_LEVELS >= 3
286#ifdef CONFIG_X86_PAE
287 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
288 void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
289 pte_t *ptep);
290 void (*pmd_clear)(pmd_t *pmdp);
291
292#endif /* CONFIG_X86_PAE */
293
294 void (*set_pud)(pud_t *pudp, pud_t pudval);
295
296 struct paravirt_callee_save pmd_val;
297 struct paravirt_callee_save make_pmd;
298
299#if PAGETABLE_LEVELS == 4
300 struct paravirt_callee_save pud_val;
301 struct paravirt_callee_save make_pud;
302
303 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
304#endif /* PAGETABLE_LEVELS == 4 */
305#endif /* PAGETABLE_LEVELS >= 3 */
306
307#ifdef CONFIG_HIGHPTE
308 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
309#endif
310
311 struct pv_lazy_ops lazy_mode;
312
313 /* dom0 ops */
314
315 /* Sometimes the physical address is a pfn, and sometimes its
316 an mfn. We can tell which is which from the index. */
317 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
318 phys_addr_t phys, pgprot_t flags);
319};
320
321struct raw_spinlock;
322struct pv_lock_ops {
323 int (*spin_is_locked)(struct raw_spinlock *lock);
324 int (*spin_is_contended)(struct raw_spinlock *lock);
325 void (*spin_lock)(struct raw_spinlock *lock);
326 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
327 int (*spin_trylock)(struct raw_spinlock *lock);
328 void (*spin_unlock)(struct raw_spinlock *lock);
329};
330
331/* This contains all the paravirt structures: we get a convenient
332 * number for each function using the offset which we use to indicate
333 * what to patch. */
334struct paravirt_patch_template {
335 struct pv_init_ops pv_init_ops;
336 struct pv_time_ops pv_time_ops;
337 struct pv_cpu_ops pv_cpu_ops;
338 struct pv_irq_ops pv_irq_ops;
339 struct pv_apic_ops pv_apic_ops;
340 struct pv_mmu_ops pv_mmu_ops;
341 struct pv_lock_ops pv_lock_ops;
342};
343
344extern struct pv_info pv_info;
345extern struct pv_init_ops pv_init_ops;
346extern struct pv_time_ops pv_time_ops;
347extern struct pv_cpu_ops pv_cpu_ops;
348extern struct pv_irq_ops pv_irq_ops;
349extern struct pv_apic_ops pv_apic_ops;
350extern struct pv_mmu_ops pv_mmu_ops;
351extern struct pv_lock_ops pv_lock_ops;
352
353#define PARAVIRT_PATCH(x) \
354 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
355
356#define paravirt_type(op) \
357 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
358 [paravirt_opptr] "i" (&(op))
359#define paravirt_clobber(clobber) \
360 [paravirt_clobber] "i" (clobber)
361
362/*
363 * Generate some code, and mark it as patchable by the
364 * apply_paravirt() alternate instruction patcher.
365 */
366#define _paravirt_alt(insn_string, type, clobber) \
367 "771:\n\t" insn_string "\n" "772:\n" \
368 ".pushsection .parainstructions,\"a\"\n" \
369 _ASM_ALIGN "\n" \
370 _ASM_PTR " 771b\n" \
371 " .byte " type "\n" \
372 " .byte 772b-771b\n" \
373 " .short " clobber "\n" \
374 ".popsection\n"
375
376/* Generate patchable code, with the default asm parameters. */
377#define paravirt_alt(insn_string) \
378 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
379
380/* Simple instruction patching code. */
381#define DEF_NATIVE(ops, name, code) \
382 extern const char start_##ops##_##name[], end_##ops##_##name[]; \
383 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
384
385unsigned paravirt_patch_nop(void);
386unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
387unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
388unsigned paravirt_patch_ignore(unsigned len);
389unsigned paravirt_patch_call(void *insnbuf,
390 const void *target, u16 tgt_clobbers,
391 unsigned long addr, u16 site_clobbers,
392 unsigned len);
393unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
394 unsigned long addr, unsigned len);
395unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
396 unsigned long addr, unsigned len);
397
398unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
399 const char *start, const char *end);
400
401unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
402 unsigned long addr, unsigned len);
403
404int paravirt_disable_iospace(void);
405
406/*
407 * This generates an indirect call based on the operation type number.
408 * The type number, computed in PARAVIRT_PATCH, is derived from the
409 * offset into the paravirt_patch_template structure, and can therefore be
410 * freely converted back into a structure offset.
411 */
412#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
413
414/*
415 * These macros are intended to wrap calls through one of the paravirt
416 * ops structs, so that they can be later identified and patched at
417 * runtime.
418 *
419 * Normally, a call to a pv_op function is a simple indirect call:
420 * (pv_op_struct.operations)(args...).
421 *
422 * Unfortunately, this is a relatively slow operation for modern CPUs,
423 * because it cannot necessarily determine what the destination
424 * address is. In this case, the address is a runtime constant, so at
425 * the very least we can patch the call to e a simple direct call, or
426 * ideally, patch an inline implementation into the callsite. (Direct
427 * calls are essentially free, because the call and return addresses
428 * are completely predictable.)
429 *
430 * For i386, these macros rely on the standard gcc "regparm(3)" calling
431 * convention, in which the first three arguments are placed in %eax,
432 * %edx, %ecx (in that order), and the remaining arguments are placed
433 * on the stack. All caller-save registers (eax,edx,ecx) are expected
434 * to be modified (either clobbered or used for return values).
435 * X86_64, on the other hand, already specifies a register-based calling
436 * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
437 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
438 * special handling for dealing with 4 arguments, unlike i386.
439 * However, x86_64 also have to clobber all caller saved registers, which
440 * unfortunately, are quite a bit (r8 - r11)
441 *
442 * The call instruction itself is marked by placing its start address
443 * and size into the .parainstructions section, so that
444 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
445 * appropriate patching under the control of the backend pv_init_ops
446 * implementation.
447 *
448 * Unfortunately there's no way to get gcc to generate the args setup
449 * for the call, and then allow the call itself to be generated by an
450 * inline asm. Because of this, we must do the complete arg setup and
451 * return value handling from within these macros. This is fairly
452 * cumbersome.
453 *
454 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
455 * It could be extended to more arguments, but there would be little
456 * to be gained from that. For each number of arguments, there are
457 * the two VCALL and CALL variants for void and non-void functions.
458 *
459 * When there is a return value, the invoker of the macro must specify
460 * the return type. The macro then uses sizeof() on that type to
461 * determine whether its a 32 or 64 bit value, and places the return
462 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
463 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
464 * the return value size.
465 *
466 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
467 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
468 * in low,high order
469 *
470 * Small structures are passed and returned in registers. The macro
471 * calling convention can't directly deal with this, so the wrapper
472 * functions must do this.
473 *
474 * These PVOP_* macros are only defined within this header. This
475 * means that all uses must be wrapped in inline functions. This also
476 * makes sure the incoming and outgoing types are always correct.
477 */
478#ifdef CONFIG_X86_32
479#define PVOP_VCALL_ARGS \
480 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
481#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
482
483#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
484#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
485#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
486
487#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
488 "=c" (__ecx)
489#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
490
491#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
492#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
493
494#define EXTRA_CLOBBERS
495#define VEXTRA_CLOBBERS
496#else /* CONFIG_X86_64 */
497#define PVOP_VCALL_ARGS \
498 unsigned long __edi = __edi, __esi = __esi, \
499 __edx = __edx, __ecx = __ecx
500#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
501
502#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
503#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
504#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
505#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
506
507#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
508 "=S" (__esi), "=d" (__edx), \
509 "=c" (__ecx)
510#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
511
512#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
513#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
514
515#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
516#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
517#endif /* CONFIG_X86_32 */
518
519#ifdef CONFIG_PARAVIRT_DEBUG
520#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
521#else
522#define PVOP_TEST_NULL(op) ((void)op)
523#endif
524
525#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
526 pre, post, ...) \
527 ({ \
528 rettype __ret; \
529 PVOP_CALL_ARGS; \
530 PVOP_TEST_NULL(op); \
531 /* This is 32-bit specific, but is okay in 64-bit */ \
532 /* since this condition will never hold */ \
533 if (sizeof(rettype) > sizeof(unsigned long)) { \
534 asm volatile(pre \
535 paravirt_alt(PARAVIRT_CALL) \
536 post \
537 : call_clbr \
538 : paravirt_type(op), \
539 paravirt_clobber(clbr), \
540 ##__VA_ARGS__ \
541 : "memory", "cc" extra_clbr); \
542 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
543 } else { \
544 asm volatile(pre \
545 paravirt_alt(PARAVIRT_CALL) \
546 post \
547 : call_clbr \
548 : paravirt_type(op), \
549 paravirt_clobber(clbr), \
550 ##__VA_ARGS__ \
551 : "memory", "cc" extra_clbr); \
552 __ret = (rettype)__eax; \
553 } \
554 __ret; \
555 })
556
557#define __PVOP_CALL(rettype, op, pre, post, ...) \
558 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
559 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
560
561#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
562 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
563 PVOP_CALLEE_CLOBBERS, , \
564 pre, post, ##__VA_ARGS__)
565
566
567#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
568 ({ \
569 PVOP_VCALL_ARGS; \
570 PVOP_TEST_NULL(op); \
571 asm volatile(pre \
572 paravirt_alt(PARAVIRT_CALL) \
573 post \
574 : call_clbr \
575 : paravirt_type(op), \
576 paravirt_clobber(clbr), \
577 ##__VA_ARGS__ \
578 : "memory", "cc" extra_clbr); \
579 })
580
581#define __PVOP_VCALL(op, pre, post, ...) \
582 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
583 VEXTRA_CLOBBERS, \
584 pre, post, ##__VA_ARGS__)
585
586#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
587 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
588 PVOP_VCALLEE_CLOBBERS, , \
589 pre, post, ##__VA_ARGS__)
590
591
592
593#define PVOP_CALL0(rettype, op) \
594 __PVOP_CALL(rettype, op, "", "")
595#define PVOP_VCALL0(op) \
596 __PVOP_VCALL(op, "", "")
597
598#define PVOP_CALLEE0(rettype, op) \
599 __PVOP_CALLEESAVE(rettype, op, "", "")
600#define PVOP_VCALLEE0(op) \
601 __PVOP_VCALLEESAVE(op, "", "")
602
603
604#define PVOP_CALL1(rettype, op, arg1) \
605 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
606#define PVOP_VCALL1(op, arg1) \
607 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
608
609#define PVOP_CALLEE1(rettype, op, arg1) \
610 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
611#define PVOP_VCALLEE1(op, arg1) \
612 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
613
614
615#define PVOP_CALL2(rettype, op, arg1, arg2) \
616 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
617 PVOP_CALL_ARG2(arg2))
618#define PVOP_VCALL2(op, arg1, arg2) \
619 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
620 PVOP_CALL_ARG2(arg2))
621
622#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
623 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
624 PVOP_CALL_ARG2(arg2))
625#define PVOP_VCALLEE2(op, arg1, arg2) \
626 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
627 PVOP_CALL_ARG2(arg2))
628
629
630#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
631 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
632 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
633#define PVOP_VCALL3(op, arg1, arg2, arg3) \
634 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
635 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
636
637/* This is the only difference in x86_64. We can make it much simpler */
638#ifdef CONFIG_X86_32
639#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
640 __PVOP_CALL(rettype, op, \
641 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
642 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
643 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
644#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
645 __PVOP_VCALL(op, \
646 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
647 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
648 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
649#else
650#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
651 __PVOP_CALL(rettype, op, "", "", \
652 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
653 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
654#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
655 __PVOP_VCALL(op, "", "", \
656 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
657 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
658#endif
659
660/* Lazy mode for batching updates / context switch */
661enum paravirt_lazy_mode {
662 PARAVIRT_LAZY_NONE,
663 PARAVIRT_LAZY_MMU,
664 PARAVIRT_LAZY_CPU,
665};
666
667enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
668void paravirt_start_context_switch(struct task_struct *prev);
669void paravirt_end_context_switch(struct task_struct *next);
670
671void paravirt_enter_lazy_mmu(void);
672void paravirt_leave_lazy_mmu(void);
673
674void _paravirt_nop(void);
675u32 _paravirt_ident_32(u32);
676u64 _paravirt_ident_64(u64);
677
678#define paravirt_nop ((void *)_paravirt_nop)
679
680/* These all sit in the .parainstructions section to tell us what to patch. */
681struct paravirt_patch_site {
682 u8 *instr; /* original instructions */
683 u8 instrtype; /* type of this instruction */
684 u8 len; /* length of original instruction */
685 u16 clobbers; /* what registers you may clobber */
686};
687
688extern struct paravirt_patch_site __parainstructions[],
689 __parainstructions_end[];
690
691#endif /* __ASSEMBLY__ */
692
693#endif /* _ASM_X86_PARAVIRT_TYPES_H */
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index 7af14e512f97..e2c1668dde7a 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -19,4 +19,9 @@ extern int free_memtype(u64 start, u64 end);
19extern int kernel_map_sync_memtype(u64 base, unsigned long size, 19extern int kernel_map_sync_memtype(u64 base, unsigned long size,
20 unsigned long flag); 20 unsigned long flag);
21 21
22int io_reserve_memtype(resource_size_t start, resource_size_t end,
23 unsigned long *type);
24
25void io_free_memtype(resource_size_t start, resource_size_t end);
26
22#endif /* _ASM_X86_PAT_H */ 27#endif /* _ASM_X86_PAT_H */
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 1ff685ca221c..f76a162c082c 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -48,7 +48,6 @@ extern unsigned int pcibios_assign_all_busses(void);
48#else 48#else
49#define pcibios_assign_all_busses() 0 49#define pcibios_assign_all_busses() 0
50#endif 50#endif
51#define pcibios_scan_all_fns(a, b) 0
52 51
53extern unsigned long pci_mem_start; 52extern unsigned long pci_mem_start;
54#define PCIBIOS_MIN_IO 0x1000 53#define PCIBIOS_MIN_IO 0x1000
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 103f1ddb0d85..b65a36defeb7 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -49,7 +49,7 @@
49#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x 49#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
50#define __my_cpu_offset percpu_read(this_cpu_off) 50#define __my_cpu_offset percpu_read(this_cpu_off)
51#else 51#else
52#define __percpu_arg(x) "%" #x 52#define __percpu_arg(x) "%P" #x
53#endif 53#endif
54 54
55/* 55/*
@@ -104,36 +104,48 @@ do { \
104 } \ 104 } \
105} while (0) 105} while (0)
106 106
107#define percpu_from_op(op, var) \ 107#define percpu_from_op(op, var, constraint) \
108({ \ 108({ \
109 typeof(var) ret__; \ 109 typeof(var) ret__; \
110 switch (sizeof(var)) { \ 110 switch (sizeof(var)) { \
111 case 1: \ 111 case 1: \
112 asm(op "b "__percpu_arg(1)",%0" \ 112 asm(op "b "__percpu_arg(1)",%0" \
113 : "=q" (ret__) \ 113 : "=q" (ret__) \
114 : "m" (var)); \ 114 : constraint); \
115 break; \ 115 break; \
116 case 2: \ 116 case 2: \
117 asm(op "w "__percpu_arg(1)",%0" \ 117 asm(op "w "__percpu_arg(1)",%0" \
118 : "=r" (ret__) \ 118 : "=r" (ret__) \
119 : "m" (var)); \ 119 : constraint); \
120 break; \ 120 break; \
121 case 4: \ 121 case 4: \
122 asm(op "l "__percpu_arg(1)",%0" \ 122 asm(op "l "__percpu_arg(1)",%0" \
123 : "=r" (ret__) \ 123 : "=r" (ret__) \
124 : "m" (var)); \ 124 : constraint); \
125 break; \ 125 break; \
126 case 8: \ 126 case 8: \
127 asm(op "q "__percpu_arg(1)",%0" \ 127 asm(op "q "__percpu_arg(1)",%0" \
128 : "=r" (ret__) \ 128 : "=r" (ret__) \
129 : "m" (var)); \ 129 : constraint); \
130 break; \ 130 break; \
131 default: __bad_percpu_size(); \ 131 default: __bad_percpu_size(); \
132 } \ 132 } \
133 ret__; \ 133 ret__; \
134}) 134})
135 135
136#define percpu_read(var) percpu_from_op("mov", per_cpu__##var) 136/*
137 * percpu_read() makes gcc load the percpu variable every time it is
138 * accessed while percpu_read_stable() allows the value to be cached.
139 * percpu_read_stable() is more efficient and can be used if its value
140 * is guaranteed to be valid across cpus. The current users include
141 * get_current() and get_thread_info() both of which are actually
142 * per-thread variables implemented as per-cpu variables and thus
143 * stable for the duration of the respective task.
144 */
145#define percpu_read(var) percpu_from_op("mov", per_cpu__##var, \
146 "m" (per_cpu__##var))
147#define percpu_read_stable(var) percpu_from_op("mov", per_cpu__##var, \
148 "p" (&per_cpu__##var))
137#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val) 149#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val)
138#define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val) 150#define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val)
139#define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val) 151#define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val)
@@ -156,15 +168,6 @@ do { \
156/* We can use this directly for local CPU (faster). */ 168/* We can use this directly for local CPU (faster). */
157DECLARE_PER_CPU(unsigned long, this_cpu_off); 169DECLARE_PER_CPU(unsigned long, this_cpu_off);
158 170
159#ifdef CONFIG_NEED_MULTIPLE_NODES
160void *pcpu_lpage_remapped(void *kaddr);
161#else
162static inline void *pcpu_lpage_remapped(void *kaddr)
163{
164 return NULL;
165}
166#endif
167
168#endif /* !__ASSEMBLY__ */ 171#endif /* !__ASSEMBLY__ */
169 172
170#ifdef CONFIG_SMP 173#ifdef CONFIG_SMP
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_event.h
index fa64e401589d..ad7ce3fd5065 100644
--- a/arch/x86/include/asm/perf_counter.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -1,8 +1,8 @@
1#ifndef _ASM_X86_PERF_COUNTER_H 1#ifndef _ASM_X86_PERF_EVENT_H
2#define _ASM_X86_PERF_COUNTER_H 2#define _ASM_X86_PERF_EVENT_H
3 3
4/* 4/*
5 * Performance counter hw details: 5 * Performance event hw details:
6 */ 6 */
7 7
8#define X86_PMC_MAX_GENERIC 8 8#define X86_PMC_MAX_GENERIC 8
@@ -43,7 +43,7 @@
43union cpuid10_eax { 43union cpuid10_eax {
44 struct { 44 struct {
45 unsigned int version_id:8; 45 unsigned int version_id:8;
46 unsigned int num_counters:8; 46 unsigned int num_events:8;
47 unsigned int bit_width:8; 47 unsigned int bit_width:8;
48 unsigned int mask_length:8; 48 unsigned int mask_length:8;
49 } split; 49 } split;
@@ -52,7 +52,7 @@ union cpuid10_eax {
52 52
53union cpuid10_edx { 53union cpuid10_edx {
54 struct { 54 struct {
55 unsigned int num_counters_fixed:4; 55 unsigned int num_events_fixed:4;
56 unsigned int reserved:28; 56 unsigned int reserved:28;
57 } split; 57 } split;
58 unsigned int full; 58 unsigned int full;
@@ -60,7 +60,7 @@ union cpuid10_edx {
60 60
61 61
62/* 62/*
63 * Fixed-purpose performance counters: 63 * Fixed-purpose performance events:
64 */ 64 */
65 65
66/* 66/*
@@ -84,15 +84,25 @@ union cpuid10_edx {
84#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b 84#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
85#define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) 85#define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2)
86 86
87#ifdef CONFIG_PERF_COUNTERS 87/*
88extern void init_hw_perf_counters(void); 88 * We model BTS tracing as another fixed-mode PMC.
89extern void perf_counters_lapic_init(void); 89 *
90 * We choose a value in the middle of the fixed event range, since lower
91 * values are used by actual fixed events and higher values are used
92 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
93 */
94#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
95
96
97#ifdef CONFIG_PERF_EVENTS
98extern void init_hw_perf_events(void);
99extern void perf_events_lapic_init(void);
90 100
91#define PERF_COUNTER_INDEX_OFFSET 0 101#define PERF_EVENT_INDEX_OFFSET 0
92 102
93#else 103#else
94static inline void init_hw_perf_counters(void) { } 104static inline void init_hw_perf_events(void) { }
95static inline void perf_counters_lapic_init(void) { } 105static inline void perf_events_lapic_init(void) { }
96#endif 106#endif
97 107
98#endif /* _ASM_X86_PERF_COUNTER_H */ 108#endif /* _ASM_X86_PERF_EVENT_H */
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 16748077559a..af6fd360ab35 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -56,16 +56,6 @@ extern struct list_head pgd_list;
56#define pte_update(mm, addr, ptep) do { } while (0) 56#define pte_update(mm, addr, ptep) do { } while (0)
57#define pte_update_defer(mm, addr, ptep) do { } while (0) 57#define pte_update_defer(mm, addr, ptep) do { } while (0)
58 58
59static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
60{
61 native_pagetable_setup_start(base);
62}
63
64static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
65{
66 native_pagetable_setup_done(base);
67}
68
69#define pgd_val(x) native_pgd_val(x) 59#define pgd_val(x) native_pgd_val(x)
70#define __pgd(x) native_make_pgd(x) 60#define __pgd(x) native_make_pgd(x)
71 61
@@ -135,6 +125,11 @@ static inline unsigned long pte_pfn(pte_t pte)
135 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; 125 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
136} 126}
137 127
128static inline unsigned long pmd_pfn(pmd_t pmd)
129{
130 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
131}
132
138#define pte_page(pte) pfn_to_page(pte_pfn(pte)) 133#define pte_page(pte) pfn_to_page(pte_pfn(pte))
139 134
140static inline int pmd_large(pmd_t pte) 135static inline int pmd_large(pmd_t pte)
@@ -359,7 +354,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
359 * this macro returns the index of the entry in the pmd page which would 354 * this macro returns the index of the entry in the pmd page which would
360 * control the given virtual address 355 * control the given virtual address
361 */ 356 */
362static inline unsigned pmd_index(unsigned long address) 357static inline unsigned long pmd_index(unsigned long address)
363{ 358{
364 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); 359 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
365} 360}
@@ -379,7 +374,7 @@ static inline unsigned pmd_index(unsigned long address)
379 * this function returns the index of the entry in the pte page which would 374 * this function returns the index of the entry in the pte page which would
380 * control the given virtual address 375 * control the given virtual address
381 */ 376 */
382static inline unsigned pte_index(unsigned long address) 377static inline unsigned long pte_index(unsigned long address)
383{ 378{
384 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); 379 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
385} 380}
@@ -430,11 +425,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
430 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); 425 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
431} 426}
432 427
433static inline unsigned long pmd_pfn(pmd_t pmd)
434{
435 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
436}
437
438static inline int pud_large(pud_t pud) 428static inline int pud_large(pud_t pud)
439{ 429{
440 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == 430 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
@@ -470,7 +460,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
470#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) 460#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
471 461
472/* to find an entry in a page-table-directory. */ 462/* to find an entry in a page-table-directory. */
473static inline unsigned pud_index(unsigned long address) 463static inline unsigned long pud_index(unsigned long address)
474{ 464{
475 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); 465 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
476} 466}
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 54cb697f4900..7b467bf3c680 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -299,8 +299,8 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pte);
299extern void native_pagetable_setup_start(pgd_t *base); 299extern void native_pagetable_setup_start(pgd_t *base);
300extern void native_pagetable_setup_done(pgd_t *base); 300extern void native_pagetable_setup_done(pgd_t *base);
301#else 301#else
302static inline void native_pagetable_setup_start(pgd_t *base) {} 302#define native_pagetable_setup_start x86_init_pgd_noop
303static inline void native_pagetable_setup_done(pgd_t *base) {} 303#define native_pagetable_setup_done x86_init_pgd_noop
304#endif 304#endif
305 305
306struct seq_file; 306struct seq_file;
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c7768269b1cf..c3429e8b2424 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -27,6 +27,7 @@ struct mm_struct;
27#include <linux/cpumask.h> 27#include <linux/cpumask.h>
28#include <linux/cache.h> 28#include <linux/cache.h>
29#include <linux/threads.h> 29#include <linux/threads.h>
30#include <linux/math64.h>
30#include <linux/init.h> 31#include <linux/init.h>
31 32
32/* 33/*
@@ -403,7 +404,17 @@ extern unsigned long kernel_eflags;
403extern asmlinkage void ignore_sysret(void); 404extern asmlinkage void ignore_sysret(void);
404#else /* X86_64 */ 405#else /* X86_64 */
405#ifdef CONFIG_CC_STACKPROTECTOR 406#ifdef CONFIG_CC_STACKPROTECTOR
406DECLARE_PER_CPU(unsigned long, stack_canary); 407/*
408 * Make sure stack canary segment base is cached-aligned:
409 * "For Intel Atom processors, avoid non zero segment base address
410 * that is not aligned to cache line boundary at all cost."
411 * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
412 */
413struct stack_canary {
414 char __pad[20]; /* canary at %gs:20 */
415 unsigned long canary;
416};
417DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
407#endif 418#endif
408#endif /* X86_64 */ 419#endif /* X86_64 */
409 420
@@ -703,13 +714,23 @@ static inline void cpu_relax(void)
703 rep_nop(); 714 rep_nop();
704} 715}
705 716
706/* Stop speculative execution: */ 717/* Stop speculative execution and prefetching of modified code. */
707static inline void sync_core(void) 718static inline void sync_core(void)
708{ 719{
709 int tmp; 720 int tmp;
710 721
711 asm volatile("cpuid" : "=a" (tmp) : "0" (1) 722#if defined(CONFIG_M386) || defined(CONFIG_M486)
712 : "ebx", "ecx", "edx", "memory"); 723 if (boot_cpu_data.x86 < 5)
724 /* There is no speculative execution.
725 * jmp is a barrier to prefetching. */
726 asm volatile("jmp 1f\n1:\n" ::: "memory");
727 else
728#endif
729 /* cpuid is a barrier to speculative execution.
730 * Prefetched instructions are automatically
731 * invalidated when modified. */
732 asm volatile("cpuid" : "=a" (tmp) : "0" (1)
733 : "ebx", "ecx", "edx", "memory");
713} 734}
714 735
715static inline void __monitor(const void *eax, unsigned long ecx, 736static inline void __monitor(const void *eax, unsigned long ecx,
@@ -1000,4 +1021,35 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
1000extern int get_tsc_mode(unsigned long adr); 1021extern int get_tsc_mode(unsigned long adr);
1001extern int set_tsc_mode(unsigned int val); 1022extern int set_tsc_mode(unsigned int val);
1002 1023
1024extern int amd_get_nb_id(int cpu);
1025
1026struct aperfmperf {
1027 u64 aperf, mperf;
1028};
1029
1030static inline void get_aperfmperf(struct aperfmperf *am)
1031{
1032 WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_APERFMPERF));
1033
1034 rdmsrl(MSR_IA32_APERF, am->aperf);
1035 rdmsrl(MSR_IA32_MPERF, am->mperf);
1036}
1037
1038#define APERFMPERF_SHIFT 10
1039
1040static inline
1041unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
1042 struct aperfmperf *new)
1043{
1044 u64 aperf = new->aperf - old->aperf;
1045 u64 mperf = new->mperf - old->mperf;
1046 unsigned long ratio = aperf;
1047
1048 mperf >>= APERFMPERF_SHIFT;
1049 if (mperf)
1050 ratio = div64_u64(aperf, mperf);
1051
1052 return ratio;
1053}
1054
1003#endif /* _ASM_X86_PROCESSOR_H */ 1055#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/scatterlist.h b/arch/x86/include/asm/scatterlist.h
index 263d397d2eef..75af592677ec 100644
--- a/arch/x86/include/asm/scatterlist.h
+++ b/arch/x86/include/asm/scatterlist.h
@@ -1,33 +1,8 @@
1#ifndef _ASM_X86_SCATTERLIST_H 1#ifndef _ASM_X86_SCATTERLIST_H
2#define _ASM_X86_SCATTERLIST_H 2#define _ASM_X86_SCATTERLIST_H
3 3
4#include <asm/types.h>
5
6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
11 unsigned int offset;
12 unsigned int length;
13 dma_addr_t dma_address;
14 unsigned int dma_length;
15};
16
17#define ARCH_HAS_SG_CHAIN
18#define ISA_DMA_THRESHOLD (0x00ffffff) 4#define ISA_DMA_THRESHOLD (0x00ffffff)
19 5
20/* 6#include <asm-generic/scatterlist.h>
21 * These macros should be used after a pci_map_sg call has been done
22 * to get bus addresses of each of the SG entries and their lengths.
23 * You should only work with the number of sg entries pci_map_sg
24 * returns.
25 */
26#define sg_dma_address(sg) ((sg)->dma_address)
27#ifdef CONFIG_X86_32
28# define sg_dma_len(sg) ((sg)->length)
29#else
30# define sg_dma_len(sg) ((sg)->dma_length)
31#endif
32 7
33#endif /* _ASM_X86_SCATTERLIST_H */ 8#endif /* _ASM_X86_SCATTERLIST_H */
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 4093d1ed6db2..18e496c98ff0 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -5,43 +5,6 @@
5 5
6#define COMMAND_LINE_SIZE 2048 6#define COMMAND_LINE_SIZE 2048
7 7
8#ifndef __ASSEMBLY__
9
10/*
11 * Any setup quirks to be performed?
12 */
13struct mpc_cpu;
14struct mpc_bus;
15struct mpc_oemtable;
16
17struct x86_quirks {
18 int (*arch_pre_time_init)(void);
19 int (*arch_time_init)(void);
20 int (*arch_pre_intr_init)(void);
21 int (*arch_intr_init)(void);
22 int (*arch_trap_init)(void);
23 char * (*arch_memory_setup)(void);
24 int (*mach_get_smp_config)(unsigned int early);
25 int (*mach_find_smp_config)(unsigned int reserve);
26
27 int *mpc_record;
28 int (*mpc_apic_id)(struct mpc_cpu *m);
29 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
30 void (*mpc_oem_pci_bus)(struct mpc_bus *m);
31 void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable,
32 unsigned short oemsize);
33 int (*setup_ioapic_ids)(void);
34};
35
36extern void x86_quirk_intr_init(void);
37
38extern void x86_quirk_trap_init(void);
39
40extern void x86_quirk_pre_time_init(void);
41extern void x86_quirk_time_init(void);
42
43#endif /* __ASSEMBLY__ */
44
45#ifdef __i386__ 8#ifdef __i386__
46 9
47#include <linux/pfn.h> 10#include <linux/pfn.h>
@@ -61,6 +24,7 @@ extern void x86_quirk_time_init(void);
61 24
62#ifndef __ASSEMBLY__ 25#ifndef __ASSEMBLY__
63#include <asm/bootparam.h> 26#include <asm/bootparam.h>
27#include <asm/x86_init.h>
64 28
65/* Interrupt control for vSMPowered x86_64 systems */ 29/* Interrupt control for vSMPowered x86_64 systems */
66#ifdef CONFIG_X86_64 30#ifdef CONFIG_X86_64
@@ -79,11 +43,16 @@ static inline void visws_early_detect(void) { }
79static inline int is_visws_box(void) { return 0; } 43static inline int is_visws_box(void) { return 0; }
80#endif 44#endif
81 45
82extern struct x86_quirks *x86_quirks;
83extern unsigned long saved_video_mode; 46extern unsigned long saved_video_mode;
84 47
85#ifndef CONFIG_PARAVIRT 48extern void reserve_standard_io_resources(void);
86#define paravirt_post_allocator_init() do {} while (0) 49extern void i386_reserve_resources(void);
50extern void setup_default_timer_irq(void);
51
52#ifdef CONFIG_X86_MRST
53extern void x86_mrst_early_setup(void);
54#else
55static inline void x86_mrst_early_setup(void) { }
87#endif 56#endif
88 57
89#ifndef _SETUP 58#ifndef _SETUP
diff --git a/arch/x86/include/asm/shmbuf.h b/arch/x86/include/asm/shmbuf.h
index b51413b74971..83c05fc2de38 100644
--- a/arch/x86/include/asm/shmbuf.h
+++ b/arch/x86/include/asm/shmbuf.h
@@ -1,51 +1 @@
1#ifndef _ASM_X86_SHMBUF_H #include <asm-generic/shmbuf.h>
2#define _ASM_X86_SHMBUF_H
3
4/*
5 * The shmid64_ds structure for x86 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space on 32 bit is left for:
10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values
12 *
13 * Pad space on 64 bit is left for:
14 * - 2 miscellaneous 64-bit values
15 */
16
17struct shmid64_ds {
18 struct ipc64_perm shm_perm; /* operation perms */
19 size_t shm_segsz; /* size of segment (bytes) */
20 __kernel_time_t shm_atime; /* last attach time */
21#ifdef __i386__
22 unsigned long __unused1;
23#endif
24 __kernel_time_t shm_dtime; /* last detach time */
25#ifdef __i386__
26 unsigned long __unused2;
27#endif
28 __kernel_time_t shm_ctime; /* last change time */
29#ifdef __i386__
30 unsigned long __unused3;
31#endif
32 __kernel_pid_t shm_cpid; /* pid of creator */
33 __kernel_pid_t shm_lpid; /* pid of last operator */
34 unsigned long shm_nattch; /* no. of current attaches */
35 unsigned long __unused4;
36 unsigned long __unused5;
37};
38
39struct shminfo64 {
40 unsigned long shmmax;
41 unsigned long shmmin;
42 unsigned long shmmni;
43 unsigned long shmseg;
44 unsigned long shmall;
45 unsigned long __unused1;
46 unsigned long __unused2;
47 unsigned long __unused3;
48 unsigned long __unused4;
49};
50
51#endif /* _ASM_X86_SHMBUF_H */
diff --git a/arch/x86/include/asm/socket.h b/arch/x86/include/asm/socket.h
index ca8bf2cd0ba9..6b71384b9d8b 100644
--- a/arch/x86/include/asm/socket.h
+++ b/arch/x86/include/asm/socket.h
@@ -1,60 +1 @@
1#ifndef _ASM_X86_SOCKET_H #include <asm-generic/socket.h>
2#define _ASM_X86_SOCKET_H
3
4#include <asm/sockios.h>
5
6/* For setsockopt(2) */
7#define SOL_SOCKET 1
8
9#define SO_DEBUG 1
10#define SO_REUSEADDR 2
11#define SO_TYPE 3
12#define SO_ERROR 4
13#define SO_DONTROUTE 5
14#define SO_BROADCAST 6
15#define SO_SNDBUF 7
16#define SO_RCVBUF 8
17#define SO_SNDBUFFORCE 32
18#define SO_RCVBUFFORCE 33
19#define SO_KEEPALIVE 9
20#define SO_OOBINLINE 10
21#define SO_NO_CHECK 11
22#define SO_PRIORITY 12
23#define SO_LINGER 13
24#define SO_BSDCOMPAT 14
25/* To add :#define SO_REUSEPORT 15 */
26#define SO_PASSCRED 16
27#define SO_PEERCRED 17
28#define SO_RCVLOWAT 18
29#define SO_SNDLOWAT 19
30#define SO_RCVTIMEO 20
31#define SO_SNDTIMEO 21
32
33/* Security levels - as per NRL IPv6 - don't actually do anything */
34#define SO_SECURITY_AUTHENTICATION 22
35#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
36#define SO_SECURITY_ENCRYPTION_NETWORK 24
37
38#define SO_BINDTODEVICE 25
39
40/* Socket filtering */
41#define SO_ATTACH_FILTER 26
42#define SO_DETACH_FILTER 27
43
44#define SO_PEERNAME 28
45#define SO_TIMESTAMP 29
46#define SCM_TIMESTAMP SO_TIMESTAMP
47
48#define SO_ACCEPTCONN 30
49
50#define SO_PEERSEC 31
51#define SO_PASSSEC 34
52#define SO_TIMESTAMPNS 35
53#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
54
55#define SO_MARK 36
56
57#define SO_TIMESTAMPING 37
58#define SCM_TIMESTAMPING SO_TIMESTAMPING
59
60#endif /* _ASM_X86_SOCKET_H */
diff --git a/arch/x86/include/asm/sockios.h b/arch/x86/include/asm/sockios.h
index 49cc72b5d3c9..def6d4746ee7 100644
--- a/arch/x86/include/asm/sockios.h
+++ b/arch/x86/include/asm/sockios.h
@@ -1,13 +1 @@
1#ifndef _ASM_X86_SOCKIOS_H #include <asm-generic/sockios.h>
2#define _ASM_X86_SOCKIOS_H
3
4/* Socket-level I/O control calls. */
5#define FIOSETOWN 0x8901
6#define SIOCSPGRP 0x8902
7#define FIOGETOWN 0x8903
8#define SIOCGPGRP 0x8904
9#define SIOCATMARK 0x8905
10#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
11#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
12
13#endif /* _ASM_X86_SOCKIOS_H */
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index c2d742c6e15f..157517763565 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -48,7 +48,7 @@
48 * head_32 for boot CPU and setup_per_cpu_areas() for others. 48 * head_32 for boot CPU and setup_per_cpu_areas() for others.
49 */ 49 */
50#define GDT_STACK_CANARY_INIT \ 50#define GDT_STACK_CANARY_INIT \
51 [GDT_ENTRY_STACK_CANARY] = { { { 0x00000018, 0x00409000 } } }, 51 [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
52 52
53/* 53/*
54 * Initialize the stackprotector canary value. 54 * Initialize the stackprotector canary value.
@@ -78,21 +78,19 @@ static __always_inline void boot_init_stack_canary(void)
78#ifdef CONFIG_X86_64 78#ifdef CONFIG_X86_64
79 percpu_write(irq_stack_union.stack_canary, canary); 79 percpu_write(irq_stack_union.stack_canary, canary);
80#else 80#else
81 percpu_write(stack_canary, canary); 81 percpu_write(stack_canary.canary, canary);
82#endif 82#endif
83} 83}
84 84
85static inline void setup_stack_canary_segment(int cpu) 85static inline void setup_stack_canary_segment(int cpu)
86{ 86{
87#ifdef CONFIG_X86_32 87#ifdef CONFIG_X86_32
88 unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu) - 20; 88 unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu);
89 struct desc_struct *gdt_table = get_cpu_gdt_table(cpu); 89 struct desc_struct *gdt_table = get_cpu_gdt_table(cpu);
90 struct desc_struct desc; 90 struct desc_struct desc;
91 91
92 desc = gdt_table[GDT_ENTRY_STACK_CANARY]; 92 desc = gdt_table[GDT_ENTRY_STACK_CANARY];
93 desc.base0 = canary & 0xffff; 93 set_desc_base(&desc, canary);
94 desc.base1 = (canary >> 16) & 0xff;
95 desc.base2 = (canary >> 24) & 0xff;
96 write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S); 94 write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S);
97#endif 95#endif
98} 96}
diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h
index c86f452256de..ae907e617181 100644
--- a/arch/x86/include/asm/string_32.h
+++ b/arch/x86/include/asm/string_32.h
@@ -65,7 +65,6 @@ static __always_inline void *__constant_memcpy(void *to, const void *from,
65 case 4: 65 case 4:
66 *(int *)to = *(int *)from; 66 *(int *)to = *(int *)from;
67 return to; 67 return to;
68
69 case 3: 68 case 3:
70 *(short *)to = *(short *)from; 69 *(short *)to = *(short *)from;
71 *((char *)to + 2) = *((char *)from + 2); 70 *((char *)to + 2) = *((char *)from + 2);
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index d82f39bb7905..8d33bc5462d1 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Access to user system call parameters and results 2 * Access to user system call parameters and results
3 * 3 *
4 * Copyright (C) 2008 Red Hat, Inc. All rights reserved. 4 * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
5 * 5 *
6 * This copyrighted material is made available to anyone wishing to use, 6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions 7 * modify, copy, or redistribute it subject to the terms and conditions
@@ -16,13 +16,13 @@
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/err.h> 17#include <linux/err.h>
18 18
19static inline long syscall_get_nr(struct task_struct *task, 19/*
20 struct pt_regs *regs) 20 * Only the low 32 bits of orig_ax are meaningful, so we return int.
21 * This importantly ignores the high bits on 64-bit, so comparisons
22 * sign-extend the low 32 bits.
23 */
24static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
21{ 25{
22 /*
23 * We always sign-extend a -1 value being set here,
24 * so this is always either -1L or a syscall number.
25 */
26 return regs->orig_ax; 26 return regs->orig_ax;
27} 27}
28 28
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index 643c59b4bc6e..f08f97374892 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -31,7 +31,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
31 "movl %P[task_canary](%[next]), %%ebx\n\t" \ 31 "movl %P[task_canary](%[next]), %%ebx\n\t" \
32 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" 32 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
33#define __switch_canary_oparam \ 33#define __switch_canary_oparam \
34 , [stack_canary] "=m" (per_cpu_var(stack_canary)) 34 , [stack_canary] "=m" (per_cpu_var(stack_canary.canary))
35#define __switch_canary_iparam \ 35#define __switch_canary_iparam \
36 , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) 36 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
37#else /* CC_STACKPROTECTOR */ 37#else /* CC_STACKPROTECTOR */
@@ -150,33 +150,6 @@ do { \
150#endif 150#endif
151 151
152#ifdef __KERNEL__ 152#ifdef __KERNEL__
153#define _set_base(addr, base) do { unsigned long __pr; \
154__asm__ __volatile__ ("movw %%dx,%1\n\t" \
155 "rorl $16,%%edx\n\t" \
156 "movb %%dl,%2\n\t" \
157 "movb %%dh,%3" \
158 :"=&d" (__pr) \
159 :"m" (*((addr)+2)), \
160 "m" (*((addr)+4)), \
161 "m" (*((addr)+7)), \
162 "0" (base) \
163 ); } while (0)
164
165#define _set_limit(addr, limit) do { unsigned long __lr; \
166__asm__ __volatile__ ("movw %%dx,%1\n\t" \
167 "rorl $16,%%edx\n\t" \
168 "movb %2,%%dh\n\t" \
169 "andb $0xf0,%%dh\n\t" \
170 "orb %%dh,%%dl\n\t" \
171 "movb %%dl,%2" \
172 :"=&d" (__lr) \
173 :"m" (*(addr)), \
174 "m" (*((addr)+6)), \
175 "0" (limit) \
176 ); } while (0)
177
178#define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
179#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
180 153
181extern void native_load_gs_index(unsigned); 154extern void native_load_gs_index(unsigned);
182 155
diff --git a/arch/x86/include/asm/termbits.h b/arch/x86/include/asm/termbits.h
index af1b70ea440f..3935b106de79 100644
--- a/arch/x86/include/asm/termbits.h
+++ b/arch/x86/include/asm/termbits.h
@@ -1,198 +1 @@
1#ifndef _ASM_X86_TERMBITS_H #include <asm-generic/termbits.h>
2#define _ASM_X86_TERMBITS_H
3
4#include <linux/posix_types.h>
5
6typedef unsigned char cc_t;
7typedef unsigned int speed_t;
8typedef unsigned int tcflag_t;
9
10#define NCCS 19
11struct termios {
12 tcflag_t c_iflag; /* input mode flags */
13 tcflag_t c_oflag; /* output mode flags */
14 tcflag_t c_cflag; /* control mode flags */
15 tcflag_t c_lflag; /* local mode flags */
16 cc_t c_line; /* line discipline */
17 cc_t c_cc[NCCS]; /* control characters */
18};
19
20struct termios2 {
21 tcflag_t c_iflag; /* input mode flags */
22 tcflag_t c_oflag; /* output mode flags */
23 tcflag_t c_cflag; /* control mode flags */
24 tcflag_t c_lflag; /* local mode flags */
25 cc_t c_line; /* line discipline */
26 cc_t c_cc[NCCS]; /* control characters */
27 speed_t c_ispeed; /* input speed */
28 speed_t c_ospeed; /* output speed */
29};
30
31struct ktermios {
32 tcflag_t c_iflag; /* input mode flags */
33 tcflag_t c_oflag; /* output mode flags */
34 tcflag_t c_cflag; /* control mode flags */
35 tcflag_t c_lflag; /* local mode flags */
36 cc_t c_line; /* line discipline */
37 cc_t c_cc[NCCS]; /* control characters */
38 speed_t c_ispeed; /* input speed */
39 speed_t c_ospeed; /* output speed */
40};
41
42/* c_cc characters */
43#define VINTR 0
44#define VQUIT 1
45#define VERASE 2
46#define VKILL 3
47#define VEOF 4
48#define VTIME 5
49#define VMIN 6
50#define VSWTC 7
51#define VSTART 8
52#define VSTOP 9
53#define VSUSP 10
54#define VEOL 11
55#define VREPRINT 12
56#define VDISCARD 13
57#define VWERASE 14
58#define VLNEXT 15
59#define VEOL2 16
60
61/* c_iflag bits */
62#define IGNBRK 0000001
63#define BRKINT 0000002
64#define IGNPAR 0000004
65#define PARMRK 0000010
66#define INPCK 0000020
67#define ISTRIP 0000040
68#define INLCR 0000100
69#define IGNCR 0000200
70#define ICRNL 0000400
71#define IUCLC 0001000
72#define IXON 0002000
73#define IXANY 0004000
74#define IXOFF 0010000
75#define IMAXBEL 0020000
76#define IUTF8 0040000
77
78/* c_oflag bits */
79#define OPOST 0000001
80#define OLCUC 0000002
81#define ONLCR 0000004
82#define OCRNL 0000010
83#define ONOCR 0000020
84#define ONLRET 0000040
85#define OFILL 0000100
86#define OFDEL 0000200
87#define NLDLY 0000400
88#define NL0 0000000
89#define NL1 0000400
90#define CRDLY 0003000
91#define CR0 0000000
92#define CR1 0001000
93#define CR2 0002000
94#define CR3 0003000
95#define TABDLY 0014000
96#define TAB0 0000000
97#define TAB1 0004000
98#define TAB2 0010000
99#define TAB3 0014000
100#define XTABS 0014000
101#define BSDLY 0020000
102#define BS0 0000000
103#define BS1 0020000
104#define VTDLY 0040000
105#define VT0 0000000
106#define VT1 0040000
107#define FFDLY 0100000
108#define FF0 0000000
109#define FF1 0100000
110
111/* c_cflag bit meaning */
112#define CBAUD 0010017
113#define B0 0000000 /* hang up */
114#define B50 0000001
115#define B75 0000002
116#define B110 0000003
117#define B134 0000004
118#define B150 0000005
119#define B200 0000006
120#define B300 0000007
121#define B600 0000010
122#define B1200 0000011
123#define B1800 0000012
124#define B2400 0000013
125#define B4800 0000014
126#define B9600 0000015
127#define B19200 0000016
128#define B38400 0000017
129#define EXTA B19200
130#define EXTB B38400
131#define CSIZE 0000060
132#define CS5 0000000
133#define CS6 0000020
134#define CS7 0000040
135#define CS8 0000060
136#define CSTOPB 0000100
137#define CREAD 0000200
138#define PARENB 0000400
139#define PARODD 0001000
140#define HUPCL 0002000
141#define CLOCAL 0004000
142#define CBAUDEX 0010000
143#define BOTHER 0010000 /* non standard rate */
144#define B57600 0010001
145#define B115200 0010002
146#define B230400 0010003
147#define B460800 0010004
148#define B500000 0010005
149#define B576000 0010006
150#define B921600 0010007
151#define B1000000 0010010
152#define B1152000 0010011
153#define B1500000 0010012
154#define B2000000 0010013
155#define B2500000 0010014
156#define B3000000 0010015
157#define B3500000 0010016
158#define B4000000 0010017
159#define CIBAUD 002003600000 /* input baud rate */
160#define CMSPAR 010000000000 /* mark or space (stick) parity */
161#define CRTSCTS 020000000000 /* flow control */
162
163#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
164
165/* c_lflag bits */
166#define ISIG 0000001
167#define ICANON 0000002
168#define XCASE 0000004
169#define ECHO 0000010
170#define ECHOE 0000020
171#define ECHOK 0000040
172#define ECHONL 0000100
173#define NOFLSH 0000200
174#define TOSTOP 0000400
175#define ECHOCTL 0001000
176#define ECHOPRT 0002000
177#define ECHOKE 0004000
178#define FLUSHO 0010000
179#define PENDIN 0040000
180#define IEXTEN 0100000
181
182/* tcflow() and TCXONC use these */
183#define TCOOFF 0
184#define TCOON 1
185#define TCIOFF 2
186#define TCION 3
187
188/* tcflush() and TCFLSH use these */
189#define TCIFLUSH 0
190#define TCOFLUSH 1
191#define TCIOFLUSH 2
192
193/* tcsetattr uses these */
194#define TCSANOW 0
195#define TCSADRAIN 1
196#define TCSAFLUSH 2
197
198#endif /* _ASM_X86_TERMBITS_H */
diff --git a/arch/x86/include/asm/termios.h b/arch/x86/include/asm/termios.h
index c4ee8056baca..280d78a9d966 100644
--- a/arch/x86/include/asm/termios.h
+++ b/arch/x86/include/asm/termios.h
@@ -1,114 +1 @@
1#ifndef _ASM_X86_TERMIOS_H #include <asm-generic/termios.h>
2#define _ASM_X86_TERMIOS_H
3
4#include <asm/termbits.h>
5#include <asm/ioctls.h>
6
7struct winsize {
8 unsigned short ws_row;
9 unsigned short ws_col;
10 unsigned short ws_xpixel;
11 unsigned short ws_ypixel;
12};
13
14#define NCC 8
15struct termio {
16 unsigned short c_iflag; /* input mode flags */
17 unsigned short c_oflag; /* output mode flags */
18 unsigned short c_cflag; /* control mode flags */
19 unsigned short c_lflag; /* local mode flags */
20 unsigned char c_line; /* line discipline */
21 unsigned char c_cc[NCC]; /* control characters */
22};
23
24/* modem lines */
25#define TIOCM_LE 0x001
26#define TIOCM_DTR 0x002
27#define TIOCM_RTS 0x004
28#define TIOCM_ST 0x008
29#define TIOCM_SR 0x010
30#define TIOCM_CTS 0x020
31#define TIOCM_CAR 0x040
32#define TIOCM_RNG 0x080
33#define TIOCM_DSR 0x100
34#define TIOCM_CD TIOCM_CAR
35#define TIOCM_RI TIOCM_RNG
36#define TIOCM_OUT1 0x2000
37#define TIOCM_OUT2 0x4000
38#define TIOCM_LOOP 0x8000
39
40/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
41
42#ifdef __KERNEL__
43
44#include <asm/uaccess.h>
45
46/* intr=^C quit=^\ erase=del kill=^U
47 eof=^D vtime=\0 vmin=\1 sxtc=\0
48 start=^Q stop=^S susp=^Z eol=\0
49 reprint=^R discard=^U werase=^W lnext=^V
50 eol2=\0
51*/
52#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
53
54/*
55 * Translate a "termio" structure into a "termios". Ugh.
56 */
57#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
58 unsigned short __tmp; \
59 get_user(__tmp,&(termio)->x); \
60 *(unsigned short *) &(termios)->x = __tmp; \
61}
62
63static inline int user_termio_to_kernel_termios(struct ktermios *termios,
64 struct termio __user *termio)
65{
66 SET_LOW_TERMIOS_BITS(termios, termio, c_iflag);
67 SET_LOW_TERMIOS_BITS(termios, termio, c_oflag);
68 SET_LOW_TERMIOS_BITS(termios, termio, c_cflag);
69 SET_LOW_TERMIOS_BITS(termios, termio, c_lflag);
70 get_user(termios->c_line, &termio->c_line);
71 return copy_from_user(termios->c_cc, termio->c_cc, NCC);
72}
73
74/*
75 * Translate a "termios" structure into a "termio". Ugh.
76 */
77static inline int kernel_termios_to_user_termio(struct termio __user *termio,
78 struct ktermios *termios)
79{
80 put_user((termios)->c_iflag, &(termio)->c_iflag);
81 put_user((termios)->c_oflag, &(termio)->c_oflag);
82 put_user((termios)->c_cflag, &(termio)->c_cflag);
83 put_user((termios)->c_lflag, &(termio)->c_lflag);
84 put_user((termios)->c_line, &(termio)->c_line);
85 return copy_to_user((termio)->c_cc, (termios)->c_cc, NCC);
86}
87
88static inline int user_termios_to_kernel_termios(struct ktermios *k,
89 struct termios2 __user *u)
90{
91 return copy_from_user(k, u, sizeof(struct termios2));
92}
93
94static inline int kernel_termios_to_user_termios(struct termios2 __user *u,
95 struct ktermios *k)
96{
97 return copy_to_user(u, k, sizeof(struct termios2));
98}
99
100static inline int user_termios_to_kernel_termios_1(struct ktermios *k,
101 struct termios __user *u)
102{
103 return copy_from_user(k, u, sizeof(struct termios));
104}
105
106static inline int kernel_termios_to_user_termios_1(struct termios __user *u,
107 struct ktermios *k)
108{
109 return copy_to_user(u, k, sizeof(struct termios));
110}
111
112#endif /* __KERNEL__ */
113
114#endif /* _ASM_X86_TERMIOS_H */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 6f7786aea4fc..d27d0a2fec4c 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -214,7 +214,7 @@ DECLARE_PER_CPU(unsigned long, kernel_stack);
214static inline struct thread_info *current_thread_info(void) 214static inline struct thread_info *current_thread_info(void)
215{ 215{
216 struct thread_info *ti; 216 struct thread_info *ti;
217 ti = (void *)(percpu_read(kernel_stack) + 217 ti = (void *)(percpu_read_stable(kernel_stack) +
218 KERNEL_STACK_OFFSET - THREAD_SIZE); 218 KERNEL_STACK_OFFSET - THREAD_SIZE);
219 return ti; 219 return ti;
220} 220}
diff --git a/arch/x86/include/asm/time.h b/arch/x86/include/asm/time.h
index 50c733aac421..7bdec4e9b739 100644
--- a/arch/x86/include/asm/time.h
+++ b/arch/x86/include/asm/time.h
@@ -4,60 +4,7 @@
4extern void hpet_time_init(void); 4extern void hpet_time_init(void);
5 5
6#include <asm/mc146818rtc.h> 6#include <asm/mc146818rtc.h>
7#ifdef CONFIG_X86_32
8#include <linux/efi.h>
9
10static inline unsigned long native_get_wallclock(void)
11{
12 unsigned long retval;
13
14 if (efi_enabled)
15 retval = efi_get_time();
16 else
17 retval = mach_get_cmos_time();
18
19 return retval;
20}
21
22static inline int native_set_wallclock(unsigned long nowtime)
23{
24 int retval;
25
26 if (efi_enabled)
27 retval = efi_set_rtc_mmss(nowtime);
28 else
29 retval = mach_set_rtc_mmss(nowtime);
30
31 return retval;
32}
33
34#else
35extern void native_time_init_hook(void);
36
37static inline unsigned long native_get_wallclock(void)
38{
39 return mach_get_cmos_time();
40}
41
42static inline int native_set_wallclock(unsigned long nowtime)
43{
44 return mach_set_rtc_mmss(nowtime);
45}
46
47#endif
48 7
49extern void time_init(void); 8extern void time_init(void);
50 9
51#ifdef CONFIG_PARAVIRT
52#include <asm/paravirt.h>
53#else /* !CONFIG_PARAVIRT */
54
55#define get_wallclock() native_get_wallclock()
56#define set_wallclock(x) native_set_wallclock(x)
57#define choose_time_init() hpet_time_init
58
59#endif /* CONFIG_PARAVIRT */
60
61extern unsigned long __init calibrate_cpu(void);
62
63#endif /* _ASM_X86_TIME_H */ 10#endif /* _ASM_X86_TIME_H */
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index 20ca9c4d4686..5469630b27f5 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -8,20 +8,16 @@
8#define TICK_SIZE (tick_nsec / 1000) 8#define TICK_SIZE (tick_nsec / 1000)
9 9
10unsigned long long native_sched_clock(void); 10unsigned long long native_sched_clock(void);
11unsigned long native_calibrate_tsc(void); 11extern int recalibrate_cpu_khz(void);
12 12
13#ifdef CONFIG_X86_32 13#if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC)
14extern int timer_ack; 14extern int timer_ack;
15extern irqreturn_t timer_interrupt(int irq, void *dev_id); 15#else
16#endif /* CONFIG_X86_32 */ 16# define timer_ack (0)
17extern int recalibrate_cpu_khz(void); 17#endif
18 18
19extern int no_timer_check; 19extern int no_timer_check;
20 20
21#ifndef CONFIG_PARAVIRT
22#define calibrate_tsc() native_calibrate_tsc()
23#endif
24
25/* Accelerators for sched_clock() 21/* Accelerators for sched_clock()
26 * convert from cycles(64bits) => nanoseconds (64bits) 22 * convert from cycles(64bits) => nanoseconds (64bits)
27 * basic equation: 23 * basic equation:
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 066ef590d7e0..6f0695d744bf 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -116,38 +116,41 @@ extern unsigned long node_remap_size[];
116 116
117# define SD_CACHE_NICE_TRIES 1 117# define SD_CACHE_NICE_TRIES 1
118# define SD_IDLE_IDX 1 118# define SD_IDLE_IDX 1
119# define SD_NEWIDLE_IDX 2
120# define SD_FORKEXEC_IDX 0
121 119
122#else 120#else
123 121
124# define SD_CACHE_NICE_TRIES 2 122# define SD_CACHE_NICE_TRIES 2
125# define SD_IDLE_IDX 2 123# define SD_IDLE_IDX 2
126# define SD_NEWIDLE_IDX 2
127# define SD_FORKEXEC_IDX 1
128 124
129#endif 125#endif
130 126
131/* sched_domains SD_NODE_INIT for NUMA machines */ 127/* sched_domains SD_NODE_INIT for NUMA machines */
132#define SD_NODE_INIT (struct sched_domain) { \ 128#define SD_NODE_INIT (struct sched_domain) { \
133 .min_interval = 8, \ 129 .min_interval = 8, \
134 .max_interval = 32, \ 130 .max_interval = 32, \
135 .busy_factor = 32, \ 131 .busy_factor = 32, \
136 .imbalance_pct = 125, \ 132 .imbalance_pct = 125, \
137 .cache_nice_tries = SD_CACHE_NICE_TRIES, \ 133 .cache_nice_tries = SD_CACHE_NICE_TRIES, \
138 .busy_idx = 3, \ 134 .busy_idx = 3, \
139 .idle_idx = SD_IDLE_IDX, \ 135 .idle_idx = SD_IDLE_IDX, \
140 .newidle_idx = SD_NEWIDLE_IDX, \ 136 .newidle_idx = 0, \
141 .wake_idx = 1, \ 137 .wake_idx = 0, \
142 .forkexec_idx = SD_FORKEXEC_IDX, \ 138 .forkexec_idx = 0, \
143 .flags = SD_LOAD_BALANCE \ 139 \
144 | SD_BALANCE_EXEC \ 140 .flags = 1*SD_LOAD_BALANCE \
145 | SD_BALANCE_FORK \ 141 | 1*SD_BALANCE_NEWIDLE \
146 | SD_WAKE_AFFINE \ 142 | 1*SD_BALANCE_EXEC \
147 | SD_WAKE_BALANCE \ 143 | 1*SD_BALANCE_FORK \
148 | SD_SERIALIZE, \ 144 | 0*SD_BALANCE_WAKE \
149 .last_balance = jiffies, \ 145 | 1*SD_WAKE_AFFINE \
150 .balance_interval = 1, \ 146 | 0*SD_SHARE_CPUPOWER \
147 | 0*SD_POWERSAVINGS_BALANCE \
148 | 0*SD_SHARE_PKG_RESOURCES \
149 | 1*SD_SERIALIZE \
150 | 0*SD_PREFER_SIBLING \
151 , \
152 .last_balance = jiffies, \
153 .balance_interval = 1, \
151} 154}
152 155
153#ifdef CONFIG_X86_64_ACPI_NUMA 156#ifdef CONFIG_X86_64_ACPI_NUMA
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index bfd74c032fca..4da91ad69e0d 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -81,9 +81,7 @@ extern int panic_on_unrecovered_nmi;
81 81
82void math_error(void __user *); 82void math_error(void __user *);
83void math_emulate(struct math_emu_info *); 83void math_emulate(struct math_emu_info *);
84#ifdef CONFIG_X86_32 84#ifndef CONFIG_X86_32
85unsigned long patch_espfix_desc(unsigned long, unsigned long);
86#else
87asmlinkage void smp_thermal_interrupt(void); 85asmlinkage void smp_thermal_interrupt(void);
88asmlinkage void mce_threshold_interrupt(void); 86asmlinkage void mce_threshold_interrupt(void);
89#endif 87#endif
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 38ae163cc91b..c0427295e8f5 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -48,7 +48,8 @@ static __always_inline cycles_t vget_cycles(void)
48extern void tsc_init(void); 48extern void tsc_init(void);
49extern void mark_tsc_unstable(char *reason); 49extern void mark_tsc_unstable(char *reason);
50extern int unsynchronized_tsc(void); 50extern int unsynchronized_tsc(void);
51int check_tsc_unstable(void); 51extern int check_tsc_unstable(void);
52extern unsigned long native_calibrate_tsc(void);
52 53
53/* 54/*
54 * Boot-time check whether the TSCs are synchronized across 55 * Boot-time check whether the TSCs are synchronized across
diff --git a/arch/x86/include/asm/types.h b/arch/x86/include/asm/types.h
index 09b97745772f..df1da20f4534 100644
--- a/arch/x86/include/asm/types.h
+++ b/arch/x86/include/asm/types.h
@@ -1,19 +1,11 @@
1#ifndef _ASM_X86_TYPES_H 1#ifndef _ASM_X86_TYPES_H
2#define _ASM_X86_TYPES_H 2#define _ASM_X86_TYPES_H
3 3
4#include <asm-generic/int-ll64.h> 4#define dma_addr_t dma_addr_t
5 5
6#ifndef __ASSEMBLY__ 6#include <asm-generic/types.h>
7
8typedef unsigned short umode_t;
9 7
10#endif /* __ASSEMBLY__ */
11
12/*
13 * These aren't exported outside the kernel to avoid name space clashes
14 */
15#ifdef __KERNEL__ 8#ifdef __KERNEL__
16
17#ifndef __ASSEMBLY__ 9#ifndef __ASSEMBLY__
18 10
19typedef u64 dma64_addr_t; 11typedef u64 dma64_addr_t;
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 5e06259e90e5..632fb44b4cb5 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -33,7 +33,7 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
33 * Copy data from kernel space to user space. Caller must check 33 * Copy data from kernel space to user space. Caller must check
34 * the specified block with access_ok() before calling this function. 34 * the specified block with access_ok() before calling this function.
35 * The caller should also make sure he pins the user space address 35 * The caller should also make sure he pins the user space address
36 * so that the we don't result in page fault and sleep. 36 * so that we don't result in page fault and sleep.
37 * 37 *
38 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault 38 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
39 * we return the initial request size (1, 2 or 4), as copy_*_user should do. 39 * we return the initial request size (1, 2 or 4), as copy_*_user should do.
diff --git a/arch/x86/include/asm/ucontext.h b/arch/x86/include/asm/ucontext.h
index 87324cf439d9..b7c29c8017f2 100644
--- a/arch/x86/include/asm/ucontext.h
+++ b/arch/x86/include/asm/ucontext.h
@@ -7,12 +7,6 @@
7 * sigcontext struct (uc_mcontext). 7 * sigcontext struct (uc_mcontext).
8 */ 8 */
9 9
10struct ucontext { 10#include <asm-generic/ucontext.h>
11 unsigned long uc_flags;
12 struct ucontext *uc_link;
13 stack_t uc_stack;
14 struct sigcontext uc_mcontext;
15 sigset_t uc_sigmask; /* mask last for extensibility */
16};
17 11
18#endif /* _ASM_X86_UCONTEXT_H */ 12#endif /* _ASM_X86_UCONTEXT_H */
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h
index 8deaada61bc8..6fb3c209a7e3 100644
--- a/arch/x86/include/asm/unistd_32.h
+++ b/arch/x86/include/asm/unistd_32.h
@@ -341,7 +341,7 @@
341#define __NR_preadv 333 341#define __NR_preadv 333
342#define __NR_pwritev 334 342#define __NR_pwritev 334
343#define __NR_rt_tgsigqueueinfo 335 343#define __NR_rt_tgsigqueueinfo 335
344#define __NR_perf_counter_open 336 344#define __NR_perf_event_open 336
345 345
346#ifdef __KERNEL__ 346#ifdef __KERNEL__
347 347
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
index b9f3c60de5f7..8d3ad0adbc68 100644
--- a/arch/x86/include/asm/unistd_64.h
+++ b/arch/x86/include/asm/unistd_64.h
@@ -659,8 +659,8 @@ __SYSCALL(__NR_preadv, sys_preadv)
659__SYSCALL(__NR_pwritev, sys_pwritev) 659__SYSCALL(__NR_pwritev, sys_pwritev)
660#define __NR_rt_tgsigqueueinfo 297 660#define __NR_rt_tgsigqueueinfo 297
661__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) 661__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
662#define __NR_perf_counter_open 298 662#define __NR_perf_event_open 298
663__SYSCALL(__NR_perf_counter_open, sys_perf_counter_open) 663__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
664 664
665#ifndef __NO_STUBS 665#ifndef __NO_STUBS
666#define __ARCH_WANT_OLD_READDIR 666#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 77a68505419a..04eb6c958b9d 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -15,6 +15,7 @@
15#include <linux/numa.h> 15#include <linux/numa.h>
16#include <linux/percpu.h> 16#include <linux/percpu.h>
17#include <linux/timer.h> 17#include <linux/timer.h>
18#include <linux/io.h>
18#include <asm/types.h> 19#include <asm/types.h>
19#include <asm/percpu.h> 20#include <asm/percpu.h>
20#include <asm/uv/uv_mmrs.h> 21#include <asm/uv/uv_mmrs.h>
@@ -258,13 +259,13 @@ static inline unsigned long *uv_global_mmr32_address(int pnode,
258static inline void uv_write_global_mmr32(int pnode, unsigned long offset, 259static inline void uv_write_global_mmr32(int pnode, unsigned long offset,
259 unsigned long val) 260 unsigned long val)
260{ 261{
261 *uv_global_mmr32_address(pnode, offset) = val; 262 writeq(val, uv_global_mmr32_address(pnode, offset));
262} 263}
263 264
264static inline unsigned long uv_read_global_mmr32(int pnode, 265static inline unsigned long uv_read_global_mmr32(int pnode,
265 unsigned long offset) 266 unsigned long offset)
266{ 267{
267 return *uv_global_mmr32_address(pnode, offset); 268 return readq(uv_global_mmr32_address(pnode, offset));
268} 269}
269 270
270/* 271/*
@@ -281,13 +282,13 @@ static inline unsigned long *uv_global_mmr64_address(int pnode,
281static inline void uv_write_global_mmr64(int pnode, unsigned long offset, 282static inline void uv_write_global_mmr64(int pnode, unsigned long offset,
282 unsigned long val) 283 unsigned long val)
283{ 284{
284 *uv_global_mmr64_address(pnode, offset) = val; 285 writeq(val, uv_global_mmr64_address(pnode, offset));
285} 286}
286 287
287static inline unsigned long uv_read_global_mmr64(int pnode, 288static inline unsigned long uv_read_global_mmr64(int pnode,
288 unsigned long offset) 289 unsigned long offset)
289{ 290{
290 return *uv_global_mmr64_address(pnode, offset); 291 return readq(uv_global_mmr64_address(pnode, offset));
291} 292}
292 293
293/* 294/*
@@ -301,22 +302,22 @@ static inline unsigned long *uv_local_mmr_address(unsigned long offset)
301 302
302static inline unsigned long uv_read_local_mmr(unsigned long offset) 303static inline unsigned long uv_read_local_mmr(unsigned long offset)
303{ 304{
304 return *uv_local_mmr_address(offset); 305 return readq(uv_local_mmr_address(offset));
305} 306}
306 307
307static inline void uv_write_local_mmr(unsigned long offset, unsigned long val) 308static inline void uv_write_local_mmr(unsigned long offset, unsigned long val)
308{ 309{
309 *uv_local_mmr_address(offset) = val; 310 writeq(val, uv_local_mmr_address(offset));
310} 311}
311 312
312static inline unsigned char uv_read_local_mmr8(unsigned long offset) 313static inline unsigned char uv_read_local_mmr8(unsigned long offset)
313{ 314{
314 return *((unsigned char *)uv_local_mmr_address(offset)); 315 return readb(uv_local_mmr_address(offset));
315} 316}
316 317
317static inline void uv_write_local_mmr8(unsigned long offset, unsigned char val) 318static inline void uv_write_local_mmr8(unsigned long offset, unsigned char val)
318{ 319{
319 *((unsigned char *)uv_local_mmr_address(offset)) = val; 320 writeb(val, uv_local_mmr_address(offset));
320} 321}
321 322
322/* 323/*
@@ -422,7 +423,7 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
422 unsigned long val; 423 unsigned long val;
423 424
424 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 425 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
425 ((apicid & 0x3f) << UVH_IPI_INT_APIC_ID_SHFT) | 426 ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
426 (vector << UVH_IPI_INT_VECTOR_SHFT); 427 (vector << UVH_IPI_INT_VECTOR_SHFT);
427 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 428 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
428} 429}
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index dc27a69e5d2a..3d61e204826f 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -21,6 +21,7 @@ struct vsyscall_gtod_data {
21 u32 shift; 21 u32 shift;
22 } clock; 22 } clock;
23 struct timespec wall_to_monotonic; 23 struct timespec wall_to_monotonic;
24 struct timespec wall_time_coarse;
24}; 25};
25extern struct vsyscall_gtod_data __vsyscall_gtod_data 26extern struct vsyscall_gtod_data __vsyscall_gtod_data
26__section_vsyscall_gtod_data; 27__section_vsyscall_gtod_data;
diff --git a/arch/x86/include/asm/vmware.h b/arch/x86/include/asm/vmware.h
index c11b7e100d83..e49ed6d2fd4e 100644
--- a/arch/x86/include/asm/vmware.h
+++ b/arch/x86/include/asm/vmware.h
@@ -20,7 +20,7 @@
20#ifndef ASM_X86__VMWARE_H 20#ifndef ASM_X86__VMWARE_H
21#define ASM_X86__VMWARE_H 21#define ASM_X86__VMWARE_H
22 22
23extern unsigned long vmware_get_tsc_khz(void); 23extern void vmware_platform_setup(void);
24extern int vmware_platform(void); 24extern int vmware_platform(void);
25extern void vmware_set_feature_bits(struct cpuinfo_x86 *c); 25extern void vmware_set_feature_bits(struct cpuinfo_x86 *c);
26 26
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 11be5ad2e0e9..272514c2d456 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -55,6 +55,7 @@
55#define SECONDARY_EXEC_ENABLE_EPT 0x00000002 55#define SECONDARY_EXEC_ENABLE_EPT 0x00000002
56#define SECONDARY_EXEC_ENABLE_VPID 0x00000020 56#define SECONDARY_EXEC_ENABLE_VPID 0x00000020
57#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 57#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
58#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
58 59
59 60
60#define PIN_BASED_EXT_INTR_MASK 0x00000001 61#define PIN_BASED_EXT_INTR_MASK 0x00000001
@@ -351,9 +352,16 @@ enum vmcs_field {
351#define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0 352#define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0
352#define VMX_EPT_EXTENT_CONTEXT 1 353#define VMX_EPT_EXTENT_CONTEXT 1
353#define VMX_EPT_EXTENT_GLOBAL 2 354#define VMX_EPT_EXTENT_GLOBAL 2
355
356#define VMX_EPT_EXECUTE_ONLY_BIT (1ull)
357#define VMX_EPT_PAGE_WALK_4_BIT (1ull << 6)
358#define VMX_EPTP_UC_BIT (1ull << 8)
359#define VMX_EPTP_WB_BIT (1ull << 14)
360#define VMX_EPT_2MB_PAGE_BIT (1ull << 16)
354#define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24) 361#define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24)
355#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) 362#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25)
356#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) 363#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
364
357#define VMX_EPT_DEFAULT_GAW 3 365#define VMX_EPT_DEFAULT_GAW 3
358#define VMX_EPT_MAX_GAW 0x4 366#define VMX_EPT_MAX_GAW 0x4
359#define VMX_EPT_MT_EPTE_SHIFT 3 367#define VMX_EPT_MT_EPTE_SHIFT 3
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
new file mode 100644
index 000000000000..2c756fd4ab0e
--- /dev/null
+++ b/arch/x86/include/asm/x86_init.h
@@ -0,0 +1,133 @@
1#ifndef _ASM_X86_PLATFORM_H
2#define _ASM_X86_PLATFORM_H
3
4#include <asm/pgtable_types.h>
5#include <asm/bootparam.h>
6
7struct mpc_bus;
8struct mpc_cpu;
9struct mpc_table;
10
11/**
12 * struct x86_init_mpparse - platform specific mpparse ops
13 * @mpc_record: platform specific mpc record accounting
14 * @setup_ioapic_ids: platform specific ioapic id override
15 * @mpc_apic_id: platform specific mpc apic id assignment
16 * @smp_read_mpc_oem: platform specific oem mpc table setup
17 * @mpc_oem_pci_bus: platform specific pci bus setup (default NULL)
18 * @mpc_oem_bus_info: platform specific mpc bus info
19 * @find_smp_config: find the smp configuration
20 * @get_smp_config: get the smp configuration
21 */
22struct x86_init_mpparse {
23 void (*mpc_record)(unsigned int mode);
24 void (*setup_ioapic_ids)(void);
25 int (*mpc_apic_id)(struct mpc_cpu *m);
26 void (*smp_read_mpc_oem)(struct mpc_table *mpc);
27 void (*mpc_oem_pci_bus)(struct mpc_bus *m);
28 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
29 void (*find_smp_config)(unsigned int reserve);
30 void (*get_smp_config)(unsigned int early);
31};
32
33/**
34 * struct x86_init_resources - platform specific resource related ops
35 * @probe_roms: probe BIOS roms
36 * @reserve_resources: reserve the standard resources for the
37 * platform
38 * @memory_setup: platform specific memory setup
39 *
40 */
41struct x86_init_resources {
42 void (*probe_roms)(void);
43 void (*reserve_resources)(void);
44 char *(*memory_setup)(void);
45};
46
47/**
48 * struct x86_init_irqs - platform specific interrupt setup
49 * @pre_vector_init: init code to run before interrupt vectors
50 * are set up.
51 * @intr_init: interrupt init code
52 * @trap_init: platform specific trap setup
53 */
54struct x86_init_irqs {
55 void (*pre_vector_init)(void);
56 void (*intr_init)(void);
57 void (*trap_init)(void);
58};
59
60/**
61 * struct x86_init_oem - oem platform specific customizing functions
62 * @arch_setup: platform specific architecure setup
63 * @banner: print a platform specific banner
64 */
65struct x86_init_oem {
66 void (*arch_setup)(void);
67 void (*banner)(void);
68};
69
70/**
71 * struct x86_init_paging - platform specific paging functions
72 * @pagetable_setup_start: platform specific pre paging_init() call
73 * @pagetable_setup_done: platform specific post paging_init() call
74 */
75struct x86_init_paging {
76 void (*pagetable_setup_start)(pgd_t *base);
77 void (*pagetable_setup_done)(pgd_t *base);
78};
79
80/**
81 * struct x86_init_timers - platform specific timer setup
82 * @setup_perpcu_clockev: set up the per cpu clock event device for the
83 * boot cpu
84 * @tsc_pre_init: platform function called before TSC init
85 * @timer_init: initialize the platform timer (default PIT/HPET)
86 */
87struct x86_init_timers {
88 void (*setup_percpu_clockev)(void);
89 void (*tsc_pre_init)(void);
90 void (*timer_init)(void);
91};
92
93/**
94 * struct x86_init_ops - functions for platform specific setup
95 *
96 */
97struct x86_init_ops {
98 struct x86_init_resources resources;
99 struct x86_init_mpparse mpparse;
100 struct x86_init_irqs irqs;
101 struct x86_init_oem oem;
102 struct x86_init_paging paging;
103 struct x86_init_timers timers;
104};
105
106/**
107 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
108 * @setup_percpu_clockev: set up the per cpu clock event device
109 */
110struct x86_cpuinit_ops {
111 void (*setup_percpu_clockev)(void);
112};
113
114/**
115 * struct x86_platform_ops - platform specific runtime functions
116 * @calibrate_tsc: calibrate TSC
117 * @get_wallclock: get time from HW clock like RTC etc.
118 * @set_wallclock: set time back to HW clock
119 */
120struct x86_platform_ops {
121 unsigned long (*calibrate_tsc)(void);
122 unsigned long (*get_wallclock)(void);
123 int (*set_wallclock)(unsigned long nowtime);
124};
125
126extern struct x86_init_ops x86_init;
127extern struct x86_cpuinit_ops x86_cpuinit;
128extern struct x86_platform_ops x86_platform;
129
130extern void x86_init_noop(void);
131extern void x86_init_uint_noop(unsigned int unused);
132
133#endif