aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-08-15 10:16:15 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-15 10:16:15 -0400
commit1a10390708d675ebf1a2f5e169a5165626afbd88 (patch)
treed9ee7d10abd65e580fb74152a501089f51174225 /include/asm-x86
parent239bd83104ec6bcba90221d8b0973d2565142ef8 (diff)
parentb635acec48bcaa9183fcbf4e3955616b0d4119b5 (diff)
Merge branch 'linus' into x86/cpu
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/dma-mapping.h22
-rw-r--r--include/asm-x86/efi.h2
-rw-r--r--include/asm-x86/hw_irq.h12
-rw-r--r--include/asm-x86/i387.h32
-rw-r--r--include/asm-x86/iommu.h2
-rw-r--r--include/asm-x86/irq_vectors.h10
-rw-r--r--include/asm-x86/kvm_host.h6
7 files changed, 61 insertions, 25 deletions
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h
index 0eaa9bf6011f..ad9cd6d49bfc 100644
--- a/include/asm-x86/dma-mapping.h
+++ b/include/asm-x86/dma-mapping.h
@@ -249,25 +249,5 @@ static inline int dma_get_cache_alignment(void)
249 249
250#define dma_is_consistent(d, h) (1) 250#define dma_is_consistent(d, h) (1)
251 251
252#ifdef CONFIG_X86_32 252#include <asm-generic/dma-coherent.h>
253# define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
254struct dma_coherent_mem {
255 void *virt_base;
256 u32 device_base;
257 int size;
258 int flags;
259 unsigned long *bitmap;
260};
261
262extern int
263dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
264 dma_addr_t device_addr, size_t size, int flags);
265
266extern void
267dma_release_declared_memory(struct device *dev);
268
269extern void *
270dma_mark_declared_memory_occupied(struct device *dev,
271 dma_addr_t device_addr, size_t size);
272#endif /* CONFIG_X86_32 */
273#endif 253#endif
diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h
index 7ed2bd7a7f51..d4f2b0abe929 100644
--- a/include/asm-x86/efi.h
+++ b/include/asm-x86/efi.h
@@ -86,7 +86,7 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
86 efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ 86 efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
87 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) 87 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
88 88
89extern void *efi_ioremap(unsigned long addr, unsigned long size); 89extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size);
90 90
91#endif /* CONFIG_X86_32 */ 91#endif /* CONFIG_X86_32 */
92 92
diff --git a/include/asm-x86/hw_irq.h b/include/asm-x86/hw_irq.h
index 77ba51df5668..edd0b95f14d0 100644
--- a/include/asm-x86/hw_irq.h
+++ b/include/asm-x86/hw_irq.h
@@ -98,9 +98,17 @@ extern void (*const interrupt[NR_IRQS])(void);
98#else 98#else
99typedef int vector_irq_t[NR_VECTORS]; 99typedef int vector_irq_t[NR_VECTORS];
100DECLARE_PER_CPU(vector_irq_t, vector_irq); 100DECLARE_PER_CPU(vector_irq_t, vector_irq);
101extern spinlock_t vector_lock;
102#endif 101#endif
103extern void setup_vector_irq(int cpu); 102
103#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_X86_64)
104extern void lock_vector_lock(void);
105extern void unlock_vector_lock(void);
106extern void __setup_vector_irq(int cpu);
107#else
108static inline void lock_vector_lock(void) {}
109static inline void unlock_vector_lock(void) {}
110static inline void __setup_vector_irq(int cpu) {}
111#endif
104 112
105#endif /* !ASSEMBLY_ */ 113#endif /* !ASSEMBLY_ */
106 114
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
index 96fa8449ff11..6d3b21063419 100644
--- a/include/asm-x86/i387.h
+++ b/include/asm-x86/i387.h
@@ -13,6 +13,7 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/kernel_stat.h> 14#include <linux/kernel_stat.h>
15#include <linux/regset.h> 15#include <linux/regset.h>
16#include <linux/hardirq.h>
16#include <asm/asm.h> 17#include <asm/asm.h>
17#include <asm/processor.h> 18#include <asm/processor.h>
18#include <asm/sigcontext.h> 19#include <asm/sigcontext.h>
@@ -236,6 +237,37 @@ static inline void kernel_fpu_end(void)
236 preempt_enable(); 237 preempt_enable();
237} 238}
238 239
240/*
241 * Some instructions like VIA's padlock instructions generate a spurious
242 * DNA fault but don't modify SSE registers. And these instructions
243 * get used from interrupt context aswell. To prevent these kernel instructions
244 * in interrupt context interact wrongly with other user/kernel fpu usage, we
245 * should use them only in the context of irq_ts_save/restore()
246 */
247static inline int irq_ts_save(void)
248{
249 /*
250 * If we are in process context, we are ok to take a spurious DNA fault.
251 * Otherwise, doing clts() in process context require pre-emption to
252 * be disabled or some heavy lifting like kernel_fpu_begin()
253 */
254 if (!in_interrupt())
255 return 0;
256
257 if (read_cr0() & X86_CR0_TS) {
258 clts();
259 return 1;
260 }
261
262 return 0;
263}
264
265static inline void irq_ts_restore(int TS_state)
266{
267 if (TS_state)
268 stts();
269}
270
239#ifdef CONFIG_X86_64 271#ifdef CONFIG_X86_64
240 272
241static inline void save_init_fpu(struct task_struct *tsk) 273static inline void save_init_fpu(struct task_struct *tsk)
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h
index ecc8061904a9..5f888cc5be49 100644
--- a/include/asm-x86/iommu.h
+++ b/include/asm-x86/iommu.h
@@ -7,6 +7,8 @@ extern struct dma_mapping_ops nommu_dma_ops;
7extern int force_iommu, no_iommu; 7extern int force_iommu, no_iommu;
8extern int iommu_detected; 8extern int iommu_detected;
9 9
10extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len);
11
10#ifdef CONFIG_GART_IOMMU 12#ifdef CONFIG_GART_IOMMU
11extern int gart_iommu_aperture; 13extern int gart_iommu_aperture;
12extern int gart_iommu_aperture_allowed; 14extern int gart_iommu_aperture_allowed;
diff --git a/include/asm-x86/irq_vectors.h b/include/asm-x86/irq_vectors.h
index 90b1d1f12f08..b95d167b7fb2 100644
--- a/include/asm-x86/irq_vectors.h
+++ b/include/asm-x86/irq_vectors.h
@@ -109,7 +109,15 @@
109#define LAST_VM86_IRQ 15 109#define LAST_VM86_IRQ 15
110#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) 110#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
111 111
112#if !defined(CONFIG_X86_VOYAGER) 112#ifdef CONFIG_X86_64
113# if NR_CPUS < MAX_IO_APICS
114# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
115# else
116# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
117# endif
118# define NR_IRQ_VECTORS NR_IRQS
119
120#elif !defined(CONFIG_X86_VOYAGER)
113 121
114# if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT) || defined(CONFIG_X86_VISWS) 122# if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT) || defined(CONFIG_X86_VISWS)
115 123
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index bc34dc21f178..0f3c53114614 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -13,6 +13,7 @@
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/mmu_notifier.h>
16 17
17#include <linux/kvm.h> 18#include <linux/kvm.h>
18#include <linux/kvm_para.h> 19#include <linux/kvm_para.h>
@@ -251,6 +252,7 @@ struct kvm_vcpu_arch {
251 gfn_t gfn; /* presumed gfn during guest pte update */ 252 gfn_t gfn; /* presumed gfn during guest pte update */
252 pfn_t pfn; /* pfn corresponding to that gfn */ 253 pfn_t pfn; /* pfn corresponding to that gfn */
253 int largepage; 254 int largepage;
255 unsigned long mmu_seq;
254 } update_pte; 256 } update_pte;
255 257
256 struct i387_fxsave_struct host_fx_image; 258 struct i387_fxsave_struct host_fx_image;
@@ -729,4 +731,8 @@ asmlinkage void kvm_handle_fault_on_reboot(void);
729 KVM_EX_ENTRY " 666b, 667b \n\t" \ 731 KVM_EX_ENTRY " 666b, 667b \n\t" \
730 ".popsection" 732 ".popsection"
731 733
734#define KVM_ARCH_WANT_MMU_NOTIFIER
735int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
736int kvm_age_hva(struct kvm *kvm, unsigned long hva);
737
732#endif 738#endif