diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2009-12-01 02:16:22 -0500 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2009-12-01 02:16:22 -0500 |
commit | 838632438145ac6863377eb12d8b8eef9c55d288 (patch) | |
tree | fbb0757df837f3c75a99c518a3596c38daef162d /arch/x86/include | |
parent | 9996508b3353063f2d6c48c1a28a84543d72d70b (diff) | |
parent | 29e553631b2a0d4eebd23db630572e1027a9967a (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/x86/include')
27 files changed, 128 insertions, 104 deletions
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 20d1465a2ab0..4518dc500903 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h | |||
@@ -144,7 +144,6 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) | |||
144 | 144 | ||
145 | #else /* !CONFIG_ACPI */ | 145 | #else /* !CONFIG_ACPI */ |
146 | 146 | ||
147 | #define acpi_disabled 1 | ||
148 | #define acpi_lapic 0 | 147 | #define acpi_lapic 0 |
149 | #define acpi_ioapic 0 | 148 | #define acpi_ioapic 0 |
150 | static inline void acpi_noirq_set(void) { } | 149 | static inline void acpi_noirq_set(void) { } |
diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h index ac95995b7bad..4b180897e6b5 100644 --- a/arch/x86/include/asm/amd_iommu.h +++ b/arch/x86/include/asm/amd_iommu.h | |||
@@ -31,6 +31,7 @@ extern irqreturn_t amd_iommu_int_handler(int irq, void *data); | |||
31 | extern void amd_iommu_flush_all_domains(void); | 31 | extern void amd_iommu_flush_all_domains(void); |
32 | extern void amd_iommu_flush_all_devices(void); | 32 | extern void amd_iommu_flush_all_devices(void); |
33 | extern void amd_iommu_shutdown(void); | 33 | extern void amd_iommu_shutdown(void); |
34 | extern void amd_iommu_apply_erratum_63(u16 devid); | ||
34 | #else | 35 | #else |
35 | static inline int amd_iommu_init(void) { return -ENODEV; } | 36 | static inline int amd_iommu_init(void) { return -ENODEV; } |
36 | static inline void amd_iommu_detect(void) { } | 37 | static inline void amd_iommu_detect(void) { } |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index c6d21b18806c..474d80d3e6cc 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -66,6 +66,19 @@ static inline void default_inquire_remote_apic(int apicid) | |||
66 | } | 66 | } |
67 | 67 | ||
68 | /* | 68 | /* |
69 | * With 82489DX we can't rely on apic feature bit | ||
70 | * retrieved via cpuid but still have to deal with | ||
71 | * such an apic chip so we assume that SMP configuration | ||
72 | * is found from MP table (64bit case uses ACPI mostly | ||
73 | * which set smp presence flag as well so we are safe | ||
74 | * to use this helper too). | ||
75 | */ | ||
76 | static inline bool apic_from_smp_config(void) | ||
77 | { | ||
78 | return smp_found_config && !disable_apic; | ||
79 | } | ||
80 | |||
81 | /* | ||
69 | * Basic functions accessing APICs. | 82 | * Basic functions accessing APICs. |
70 | */ | 83 | */ |
71 | #ifdef CONFIG_PARAVIRT | 84 | #ifdef CONFIG_PARAVIRT |
diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h index 5d367caa0e36..549860d3be8f 100644 --- a/arch/x86/include/asm/cache.h +++ b/arch/x86/include/asm/cache.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _ASM_X86_CACHE_H | 1 | #ifndef _ASM_X86_CACHE_H |
2 | #define _ASM_X86_CACHE_H | 2 | #define _ASM_X86_CACHE_H |
3 | 3 | ||
4 | #include <linux/linkage.h> | ||
5 | |||
4 | /* L1 cache line size */ | 6 | /* L1 cache line size */ |
5 | #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) | 7 | #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) |
6 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) | 8 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) |
@@ -13,7 +15,7 @@ | |||
13 | #ifdef CONFIG_SMP | 15 | #ifdef CONFIG_SMP |
14 | #define __cacheline_aligned_in_smp \ | 16 | #define __cacheline_aligned_in_smp \ |
15 | __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) \ | 17 | __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) \ |
16 | __attribute__((__section__(".data.page_aligned"))) | 18 | __page_aligned_data |
17 | #endif | 19 | #endif |
18 | #endif | 20 | #endif |
19 | 21 | ||
diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h index 7c5ef8b14d92..46fc474fd819 100644 --- a/arch/x86/include/asm/checksum_32.h +++ b/arch/x86/include/asm/checksum_32.h | |||
@@ -161,7 +161,8 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, | |||
161 | "adcl $0, %0 ;\n" | 161 | "adcl $0, %0 ;\n" |
162 | : "=&r" (sum) | 162 | : "=&r" (sum) |
163 | : "r" (saddr), "r" (daddr), | 163 | : "r" (saddr), "r" (daddr), |
164 | "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)); | 164 | "r" (htonl(len)), "r" (htonl(proto)), "0" (sum) |
165 | : "memory"); | ||
165 | 166 | ||
166 | return csum_fold(sum); | 167 | return csum_fold(sum); |
167 | } | 168 | } |
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h index 82ceb788a981..ee1931be6593 100644 --- a/arch/x86/include/asm/cmpxchg_32.h +++ b/arch/x86/include/asm/cmpxchg_32.h | |||
@@ -312,19 +312,23 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, | |||
312 | 312 | ||
313 | extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64); | 313 | extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64); |
314 | 314 | ||
315 | #define cmpxchg64(ptr, o, n) \ | 315 | #define cmpxchg64(ptr, o, n) \ |
316 | ({ \ | 316 | ({ \ |
317 | __typeof__(*(ptr)) __ret; \ | 317 | __typeof__(*(ptr)) __ret; \ |
318 | if (likely(boot_cpu_data.x86 > 4)) \ | 318 | __typeof__(*(ptr)) __old = (o); \ |
319 | __ret = (__typeof__(*(ptr)))__cmpxchg64((ptr), \ | 319 | __typeof__(*(ptr)) __new = (n); \ |
320 | (unsigned long long)(o), \ | 320 | alternative_io("call cmpxchg8b_emu", \ |
321 | (unsigned long long)(n)); \ | 321 | "lock; cmpxchg8b (%%esi)" , \ |
322 | else \ | 322 | X86_FEATURE_CX8, \ |
323 | __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \ | 323 | "=A" (__ret), \ |
324 | (unsigned long long)(o), \ | 324 | "S" ((ptr)), "0" (__old), \ |
325 | (unsigned long long)(n)); \ | 325 | "b" ((unsigned int)__new), \ |
326 | __ret; \ | 326 | "c" ((unsigned int)(__new>>32)) \ |
327 | }) | 327 | : "memory"); \ |
328 | __ret; }) | ||
329 | |||
330 | |||
331 | |||
328 | #define cmpxchg64_local(ptr, o, n) \ | 332 | #define cmpxchg64_local(ptr, o, n) \ |
329 | ({ \ | 333 | ({ \ |
330 | __typeof__(*(ptr)) __ret; \ | 334 | __typeof__(*(ptr)) __ret; \ |
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index e8de2f6f5ca5..617bd56b3070 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h | |||
@@ -288,7 +288,7 @@ static inline void load_LDT(mm_context_t *pc) | |||
288 | 288 | ||
289 | static inline unsigned long get_desc_base(const struct desc_struct *desc) | 289 | static inline unsigned long get_desc_base(const struct desc_struct *desc) |
290 | { | 290 | { |
291 | return desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24); | 291 | return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); |
292 | } | 292 | } |
293 | 293 | ||
294 | static inline void set_desc_base(struct desc_struct *desc, unsigned long base) | 294 | static inline void set_desc_base(struct desc_struct *desc, unsigned long base) |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 0ee770d23d0e..6a25d5d42836 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -14,6 +14,12 @@ | |||
14 | #include <asm/swiotlb.h> | 14 | #include <asm/swiotlb.h> |
15 | #include <asm-generic/dma-coherent.h> | 15 | #include <asm-generic/dma-coherent.h> |
16 | 16 | ||
17 | #ifdef CONFIG_ISA | ||
18 | # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24) | ||
19 | #else | ||
20 | # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32) | ||
21 | #endif | ||
22 | |||
17 | extern dma_addr_t bad_dma_address; | 23 | extern dma_addr_t bad_dma_address; |
18 | extern int iommu_merge; | 24 | extern int iommu_merge; |
19 | extern struct device x86_dma_fallback_dev; | 25 | extern struct device x86_dma_fallback_dev; |
@@ -124,10 +130,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
124 | if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) | 130 | if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) |
125 | return memory; | 131 | return memory; |
126 | 132 | ||
127 | if (!dev) { | 133 | if (!dev) |
128 | dev = &x86_dma_fallback_dev; | 134 | dev = &x86_dma_fallback_dev; |
129 | gfp |= GFP_DMA; | ||
130 | } | ||
131 | 135 | ||
132 | if (!is_device_dma_capable(dev)) | 136 | if (!is_device_dma_capable(dev)) |
133 | return NULL; | 137 | return NULL; |
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index 5e3f2044f0d3..f5693c81a1db 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h | |||
@@ -49,7 +49,7 @@ BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) | |||
49 | BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) | 49 | BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) |
50 | BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) | 50 | BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) |
51 | 51 | ||
52 | #ifdef CONFIG_PERF_COUNTERS | 52 | #ifdef CONFIG_PERF_EVENTS |
53 | BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR) | 53 | BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR) |
54 | #endif | 54 | #endif |
55 | 55 | ||
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 3be000435fad..d83892226f73 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -796,6 +796,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void); | |||
796 | #define KVM_ARCH_WANT_MMU_NOTIFIER | 796 | #define KVM_ARCH_WANT_MMU_NOTIFIER |
797 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | 797 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); |
798 | int kvm_age_hva(struct kvm *kvm, unsigned long hva); | 798 | int kvm_age_hva(struct kvm *kvm, unsigned long hva); |
799 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | ||
799 | int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); | 800 | int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); |
800 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); | 801 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); |
801 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); | 802 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); |
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index b608a64c5814..f1363b72364f 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -133,6 +133,8 @@ static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {} | |||
133 | static inline void enable_p5_mce(void) {} | 133 | static inline void enable_p5_mce(void) {} |
134 | #endif | 134 | #endif |
135 | 135 | ||
136 | extern void (*x86_mce_decode_callback)(struct mce *m); | ||
137 | |||
136 | void mce_setup(struct mce *m); | 138 | void mce_setup(struct mce *m); |
137 | void mce_log(struct mce *m); | 139 | void mce_log(struct mce *m); |
138 | DECLARE_PER_CPU(struct sys_device, mce_dev); | 140 | DECLARE_PER_CPU(struct sys_device, mce_dev); |
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index f923203dc39a..4a2d4e0c18d9 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h | |||
@@ -37,12 +37,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
37 | 37 | ||
38 | if (likely(prev != next)) { | 38 | if (likely(prev != next)) { |
39 | /* stop flush ipis for the previous mm */ | 39 | /* stop flush ipis for the previous mm */ |
40 | cpu_clear(cpu, prev->cpu_vm_mask); | 40 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); |
41 | #ifdef CONFIG_SMP | 41 | #ifdef CONFIG_SMP |
42 | percpu_write(cpu_tlbstate.state, TLBSTATE_OK); | 42 | percpu_write(cpu_tlbstate.state, TLBSTATE_OK); |
43 | percpu_write(cpu_tlbstate.active_mm, next); | 43 | percpu_write(cpu_tlbstate.active_mm, next); |
44 | #endif | 44 | #endif |
45 | cpu_set(cpu, next->cpu_vm_mask); | 45 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
46 | 46 | ||
47 | /* Re-load page tables */ | 47 | /* Re-load page tables */ |
48 | load_cr3(next->pgd); | 48 | load_cr3(next->pgd); |
@@ -58,7 +58,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
58 | percpu_write(cpu_tlbstate.state, TLBSTATE_OK); | 58 | percpu_write(cpu_tlbstate.state, TLBSTATE_OK); |
59 | BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); | 59 | BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); |
60 | 60 | ||
61 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { | 61 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) { |
62 | /* We were in lazy tlb mode and leave_mm disabled | 62 | /* We were in lazy tlb mode and leave_mm disabled |
63 | * tlb flush IPI delivery. We must reload CR3 | 63 | * tlb flush IPI delivery. We must reload CR3 |
64 | * to make sure to use no freed page tables. | 64 | * to make sure to use no freed page tables. |
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index e63cf7d441e1..139d4c1a33a7 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h | |||
@@ -40,8 +40,7 @@ extern unsigned int nmi_watchdog; | |||
40 | #define NMI_INVALID 3 | 40 | #define NMI_INVALID 3 |
41 | 41 | ||
42 | struct ctl_table; | 42 | struct ctl_table; |
43 | struct file; | 43 | extern int proc_nmi_enabled(struct ctl_table *, int , |
44 | extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, | ||
45 | void __user *, size_t *, loff_t *); | 44 | void __user *, size_t *, loff_t *); |
46 | extern int unknown_nmi_panic; | 45 | extern int unknown_nmi_panic; |
47 | 46 | ||
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 8aebcc41041d..efb38994859c 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -840,42 +840,22 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) | |||
840 | 840 | ||
841 | static inline unsigned long __raw_local_save_flags(void) | 841 | static inline unsigned long __raw_local_save_flags(void) |
842 | { | 842 | { |
843 | unsigned long f; | 843 | return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); |
844 | |||
845 | asm volatile(paravirt_alt(PARAVIRT_CALL) | ||
846 | : "=a"(f) | ||
847 | : paravirt_type(pv_irq_ops.save_fl), | ||
848 | paravirt_clobber(CLBR_EAX) | ||
849 | : "memory", "cc"); | ||
850 | return f; | ||
851 | } | 844 | } |
852 | 845 | ||
853 | static inline void raw_local_irq_restore(unsigned long f) | 846 | static inline void raw_local_irq_restore(unsigned long f) |
854 | { | 847 | { |
855 | asm volatile(paravirt_alt(PARAVIRT_CALL) | 848 | PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); |
856 | : "=a"(f) | ||
857 | : PV_FLAGS_ARG(f), | ||
858 | paravirt_type(pv_irq_ops.restore_fl), | ||
859 | paravirt_clobber(CLBR_EAX) | ||
860 | : "memory", "cc"); | ||
861 | } | 849 | } |
862 | 850 | ||
863 | static inline void raw_local_irq_disable(void) | 851 | static inline void raw_local_irq_disable(void) |
864 | { | 852 | { |
865 | asm volatile(paravirt_alt(PARAVIRT_CALL) | 853 | PVOP_VCALLEE0(pv_irq_ops.irq_disable); |
866 | : | ||
867 | : paravirt_type(pv_irq_ops.irq_disable), | ||
868 | paravirt_clobber(CLBR_EAX) | ||
869 | : "memory", "eax", "cc"); | ||
870 | } | 854 | } |
871 | 855 | ||
872 | static inline void raw_local_irq_enable(void) | 856 | static inline void raw_local_irq_enable(void) |
873 | { | 857 | { |
874 | asm volatile(paravirt_alt(PARAVIRT_CALL) | 858 | PVOP_VCALLEE0(pv_irq_ops.irq_enable); |
875 | : | ||
876 | : paravirt_type(pv_irq_ops.irq_enable), | ||
877 | paravirt_clobber(CLBR_EAX) | ||
878 | : "memory", "eax", "cc"); | ||
879 | } | 859 | } |
880 | 860 | ||
881 | static inline unsigned long __raw_local_irq_save(void) | 861 | static inline unsigned long __raw_local_irq_save(void) |
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index dd0f5b32489d..9357473c8da0 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h | |||
@@ -494,10 +494,11 @@ int paravirt_disable_iospace(void); | |||
494 | #define EXTRA_CLOBBERS | 494 | #define EXTRA_CLOBBERS |
495 | #define VEXTRA_CLOBBERS | 495 | #define VEXTRA_CLOBBERS |
496 | #else /* CONFIG_X86_64 */ | 496 | #else /* CONFIG_X86_64 */ |
497 | /* [re]ax isn't an arg, but the return val */ | ||
497 | #define PVOP_VCALL_ARGS \ | 498 | #define PVOP_VCALL_ARGS \ |
498 | unsigned long __edi = __edi, __esi = __esi, \ | 499 | unsigned long __edi = __edi, __esi = __esi, \ |
499 | __edx = __edx, __ecx = __ecx | 500 | __edx = __edx, __ecx = __ecx, __eax = __eax |
500 | #define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax | 501 | #define PVOP_CALL_ARGS PVOP_VCALL_ARGS |
501 | 502 | ||
502 | #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) | 503 | #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) |
503 | #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) | 504 | #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) |
@@ -509,6 +510,7 @@ int paravirt_disable_iospace(void); | |||
509 | "=c" (__ecx) | 510 | "=c" (__ecx) |
510 | #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) | 511 | #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) |
511 | 512 | ||
513 | /* void functions are still allowed [re]ax for scratch */ | ||
512 | #define PVOP_VCALLEE_CLOBBERS "=a" (__eax) | 514 | #define PVOP_VCALLEE_CLOBBERS "=a" (__eax) |
513 | #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS | 515 | #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS |
514 | 516 | ||
@@ -583,8 +585,8 @@ int paravirt_disable_iospace(void); | |||
583 | VEXTRA_CLOBBERS, \ | 585 | VEXTRA_CLOBBERS, \ |
584 | pre, post, ##__VA_ARGS__) | 586 | pre, post, ##__VA_ARGS__) |
585 | 587 | ||
586 | #define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \ | 588 | #define __PVOP_VCALLEESAVE(op, pre, post, ...) \ |
587 | ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ | 589 | ____PVOP_VCALL(op.func, CLBR_RET_REG, \ |
588 | PVOP_VCALLEE_CLOBBERS, , \ | 590 | PVOP_VCALLEE_CLOBBERS, , \ |
589 | pre, post, ##__VA_ARGS__) | 591 | pre, post, ##__VA_ARGS__) |
590 | 592 | ||
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index f76a162c082c..ada8c201d513 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
@@ -143,7 +143,11 @@ static inline int __pcibus_to_node(const struct pci_bus *bus) | |||
143 | static inline const struct cpumask * | 143 | static inline const struct cpumask * |
144 | cpumask_of_pcibus(const struct pci_bus *bus) | 144 | cpumask_of_pcibus(const struct pci_bus *bus) |
145 | { | 145 | { |
146 | return cpumask_of_node(__pcibus_to_node(bus)); | 146 | int node; |
147 | |||
148 | node = __pcibus_to_node(bus); | ||
149 | return (node == -1) ? cpu_online_mask : | ||
150 | cpumask_of_node(node); | ||
147 | } | 151 | } |
148 | #endif | 152 | #endif |
149 | 153 | ||
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_event.h index e7b7c938ae27..ad7ce3fd5065 100644 --- a/arch/x86/include/asm/perf_counter.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -1,8 +1,8 @@ | |||
1 | #ifndef _ASM_X86_PERF_COUNTER_H | 1 | #ifndef _ASM_X86_PERF_EVENT_H |
2 | #define _ASM_X86_PERF_COUNTER_H | 2 | #define _ASM_X86_PERF_EVENT_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Performance counter hw details: | 5 | * Performance event hw details: |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define X86_PMC_MAX_GENERIC 8 | 8 | #define X86_PMC_MAX_GENERIC 8 |
@@ -43,7 +43,7 @@ | |||
43 | union cpuid10_eax { | 43 | union cpuid10_eax { |
44 | struct { | 44 | struct { |
45 | unsigned int version_id:8; | 45 | unsigned int version_id:8; |
46 | unsigned int num_counters:8; | 46 | unsigned int num_events:8; |
47 | unsigned int bit_width:8; | 47 | unsigned int bit_width:8; |
48 | unsigned int mask_length:8; | 48 | unsigned int mask_length:8; |
49 | } split; | 49 | } split; |
@@ -52,7 +52,7 @@ union cpuid10_eax { | |||
52 | 52 | ||
53 | union cpuid10_edx { | 53 | union cpuid10_edx { |
54 | struct { | 54 | struct { |
55 | unsigned int num_counters_fixed:4; | 55 | unsigned int num_events_fixed:4; |
56 | unsigned int reserved:28; | 56 | unsigned int reserved:28; |
57 | } split; | 57 | } split; |
58 | unsigned int full; | 58 | unsigned int full; |
@@ -60,7 +60,7 @@ union cpuid10_edx { | |||
60 | 60 | ||
61 | 61 | ||
62 | /* | 62 | /* |
63 | * Fixed-purpose performance counters: | 63 | * Fixed-purpose performance events: |
64 | */ | 64 | */ |
65 | 65 | ||
66 | /* | 66 | /* |
@@ -87,22 +87,22 @@ union cpuid10_edx { | |||
87 | /* | 87 | /* |
88 | * We model BTS tracing as another fixed-mode PMC. | 88 | * We model BTS tracing as another fixed-mode PMC. |
89 | * | 89 | * |
90 | * We choose a value in the middle of the fixed counter range, since lower | 90 | * We choose a value in the middle of the fixed event range, since lower |
91 | * values are used by actual fixed counters and higher values are used | 91 | * values are used by actual fixed events and higher values are used |
92 | * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. | 92 | * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. |
93 | */ | 93 | */ |
94 | #define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) | 94 | #define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) |
95 | 95 | ||
96 | 96 | ||
97 | #ifdef CONFIG_PERF_COUNTERS | 97 | #ifdef CONFIG_PERF_EVENTS |
98 | extern void init_hw_perf_counters(void); | 98 | extern void init_hw_perf_events(void); |
99 | extern void perf_counters_lapic_init(void); | 99 | extern void perf_events_lapic_init(void); |
100 | 100 | ||
101 | #define PERF_COUNTER_INDEX_OFFSET 0 | 101 | #define PERF_EVENT_INDEX_OFFSET 0 |
102 | 102 | ||
103 | #else | 103 | #else |
104 | static inline void init_hw_perf_counters(void) { } | 104 | static inline void init_hw_perf_events(void) { } |
105 | static inline void perf_counters_lapic_init(void) { } | 105 | static inline void perf_events_lapic_init(void) { } |
106 | #endif | 106 | #endif |
107 | 107 | ||
108 | #endif /* _ASM_X86_PERF_COUNTER_H */ | 108 | #endif /* _ASM_X86_PERF_EVENT_H */ |
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 7b467bf3c680..d1f4a760be23 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -277,6 +277,7 @@ static inline pteval_t pte_flags(pte_t pte) | |||
277 | typedef struct page *pgtable_t; | 277 | typedef struct page *pgtable_t; |
278 | 278 | ||
279 | extern pteval_t __supported_pte_mask; | 279 | extern pteval_t __supported_pte_mask; |
280 | extern void set_nx(void); | ||
280 | extern int nx_enabled; | 281 | extern int nx_enabled; |
281 | 282 | ||
282 | #define pgprot_writecombine pgprot_writecombine | 283 | #define pgprot_writecombine pgprot_writecombine |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index c3429e8b2424..c9786480f0fe 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -1000,7 +1000,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); | |||
1000 | #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) | 1000 | #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) |
1001 | 1001 | ||
1002 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) | 1002 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) |
1003 | #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ | 1003 | extern unsigned long KSTK_ESP(struct task_struct *task); |
1004 | #endif /* CONFIG_X86_64 */ | 1004 | #endif /* CONFIG_X86_64 */ |
1005 | 1005 | ||
1006 | extern void start_thread(struct pt_regs *regs, unsigned long new_ip, | 1006 | extern void start_thread(struct pt_regs *regs, unsigned long new_ip, |
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 6a84ed166aec..1e796782cd7b 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -121,7 +121,6 @@ static inline void arch_send_call_function_single_ipi(int cpu) | |||
121 | smp_ops.send_call_func_single_ipi(cpu); | 121 | smp_ops.send_call_func_single_ipi(cpu); |
122 | } | 122 | } |
123 | 123 | ||
124 | #define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask | ||
125 | static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) | 124 | static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
126 | { | 125 | { |
127 | smp_ops.send_call_func_ipi(mask); | 126 | smp_ops.send_call_func_ipi(mask); |
diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h index c86f452256de..ae907e617181 100644 --- a/arch/x86/include/asm/string_32.h +++ b/arch/x86/include/asm/string_32.h | |||
@@ -65,7 +65,6 @@ static __always_inline void *__constant_memcpy(void *to, const void *from, | |||
65 | case 4: | 65 | case 4: |
66 | *(int *)to = *(int *)from; | 66 | *(int *)to = *(int *)from; |
67 | return to; | 67 | return to; |
68 | |||
69 | case 3: | 68 | case 3: |
70 | *(short *)to = *(short *)from; | 69 | *(short *)to = *(short *)from; |
71 | *((char *)to + 2) = *((char *)from + 2); | 70 | *((char *)to + 2) = *((char *)from + 2); |
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index d82f39bb7905..8d33bc5462d1 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Access to user system call parameters and results | 2 | * Access to user system call parameters and results |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Red Hat, Inc. All rights reserved. | 4 | * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * This copyrighted material is made available to anyone wishing to use, | 6 | * This copyrighted material is made available to anyone wishing to use, |
7 | * modify, copy, or redistribute it subject to the terms and conditions | 7 | * modify, copy, or redistribute it subject to the terms and conditions |
@@ -16,13 +16,13 @@ | |||
16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | 18 | ||
19 | static inline long syscall_get_nr(struct task_struct *task, | 19 | /* |
20 | struct pt_regs *regs) | 20 | * Only the low 32 bits of orig_ax are meaningful, so we return int. |
21 | * This importantly ignores the high bits on 64-bit, so comparisons | ||
22 | * sign-extend the low 32 bits. | ||
23 | */ | ||
24 | static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) | ||
21 | { | 25 | { |
22 | /* | ||
23 | * We always sign-extend a -1 value being set here, | ||
24 | * so this is always either -1L or a syscall number. | ||
25 | */ | ||
26 | return regs->orig_ax; | 26 | return regs->orig_ax; |
27 | } | 27 | } |
28 | 28 | ||
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 6f0695d744bf..40e37b10c6c0 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h | |||
@@ -143,6 +143,7 @@ extern unsigned long node_remap_size[]; | |||
143 | | 1*SD_BALANCE_FORK \ | 143 | | 1*SD_BALANCE_FORK \ |
144 | | 0*SD_BALANCE_WAKE \ | 144 | | 0*SD_BALANCE_WAKE \ |
145 | | 1*SD_WAKE_AFFINE \ | 145 | | 1*SD_WAKE_AFFINE \ |
146 | | 0*SD_PREFER_LOCAL \ | ||
146 | | 0*SD_SHARE_CPUPOWER \ | 147 | | 0*SD_SHARE_CPUPOWER \ |
147 | | 0*SD_POWERSAVINGS_BALANCE \ | 148 | | 0*SD_POWERSAVINGS_BALANCE \ |
148 | | 0*SD_SHARE_PKG_RESOURCES \ | 149 | | 0*SD_SHARE_PKG_RESOURCES \ |
@@ -165,21 +166,11 @@ static inline int numa_node_id(void) | |||
165 | return 0; | 166 | return 0; |
166 | } | 167 | } |
167 | 168 | ||
168 | static inline int cpu_to_node(int cpu) | ||
169 | { | ||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | static inline int early_cpu_to_node(int cpu) | 169 | static inline int early_cpu_to_node(int cpu) |
174 | { | 170 | { |
175 | return 0; | 171 | return 0; |
176 | } | 172 | } |
177 | 173 | ||
178 | static inline const struct cpumask *cpumask_of_node(int node) | ||
179 | { | ||
180 | return cpu_online_mask; | ||
181 | } | ||
182 | |||
183 | static inline void setup_node_to_cpumask_map(void) { } | 174 | static inline void setup_node_to_cpumask_map(void) { } |
184 | 175 | ||
185 | #endif | 176 | #endif |
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 5e06259e90e5..632fb44b4cb5 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h | |||
@@ -33,7 +33,7 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero | |||
33 | * Copy data from kernel space to user space. Caller must check | 33 | * Copy data from kernel space to user space. Caller must check |
34 | * the specified block with access_ok() before calling this function. | 34 | * the specified block with access_ok() before calling this function. |
35 | * The caller should also make sure he pins the user space address | 35 | * The caller should also make sure he pins the user space address |
36 | * so that the we don't result in page fault and sleep. | 36 | * so that we don't result in page fault and sleep. |
37 | * | 37 | * |
38 | * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault | 38 | * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault |
39 | * we return the initial request size (1, 2 or 4), as copy_*_user should do. | 39 | * we return the initial request size (1, 2 or 4), as copy_*_user should do. |
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h index 8deaada61bc8..6fb3c209a7e3 100644 --- a/arch/x86/include/asm/unistd_32.h +++ b/arch/x86/include/asm/unistd_32.h | |||
@@ -341,7 +341,7 @@ | |||
341 | #define __NR_preadv 333 | 341 | #define __NR_preadv 333 |
342 | #define __NR_pwritev 334 | 342 | #define __NR_pwritev 334 |
343 | #define __NR_rt_tgsigqueueinfo 335 | 343 | #define __NR_rt_tgsigqueueinfo 335 |
344 | #define __NR_perf_counter_open 336 | 344 | #define __NR_perf_event_open 336 |
345 | 345 | ||
346 | #ifdef __KERNEL__ | 346 | #ifdef __KERNEL__ |
347 | 347 | ||
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h index b9f3c60de5f7..8d3ad0adbc68 100644 --- a/arch/x86/include/asm/unistd_64.h +++ b/arch/x86/include/asm/unistd_64.h | |||
@@ -659,8 +659,8 @@ __SYSCALL(__NR_preadv, sys_preadv) | |||
659 | __SYSCALL(__NR_pwritev, sys_pwritev) | 659 | __SYSCALL(__NR_pwritev, sys_pwritev) |
660 | #define __NR_rt_tgsigqueueinfo 297 | 660 | #define __NR_rt_tgsigqueueinfo 297 |
661 | __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) | 661 | __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) |
662 | #define __NR_perf_counter_open 298 | 662 | #define __NR_perf_event_open 298 |
663 | __SYSCALL(__NR_perf_counter_open, sys_perf_counter_open) | 663 | __SYSCALL(__NR_perf_event_open, sys_perf_event_open) |
664 | 664 | ||
665 | #ifndef __NO_STUBS | 665 | #ifndef __NO_STUBS |
666 | #define __ARCH_WANT_OLD_READDIR | 666 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index 77a68505419a..d1414af98559 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h | |||
@@ -15,9 +15,12 @@ | |||
15 | #include <linux/numa.h> | 15 | #include <linux/numa.h> |
16 | #include <linux/percpu.h> | 16 | #include <linux/percpu.h> |
17 | #include <linux/timer.h> | 17 | #include <linux/timer.h> |
18 | #include <linux/io.h> | ||
18 | #include <asm/types.h> | 19 | #include <asm/types.h> |
19 | #include <asm/percpu.h> | 20 | #include <asm/percpu.h> |
20 | #include <asm/uv/uv_mmrs.h> | 21 | #include <asm/uv/uv_mmrs.h> |
22 | #include <asm/irq_vectors.h> | ||
23 | #include <asm/io_apic.h> | ||
21 | 24 | ||
22 | 25 | ||
23 | /* | 26 | /* |
@@ -113,7 +116,7 @@ | |||
113 | /* | 116 | /* |
114 | * The largest possible NASID of a C or M brick (+ 2) | 117 | * The largest possible NASID of a C or M brick (+ 2) |
115 | */ | 118 | */ |
116 | #define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_NODES * 2) | 119 | #define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_BLADES * 2) |
117 | 120 | ||
118 | struct uv_scir_s { | 121 | struct uv_scir_s { |
119 | struct timer_list timer; | 122 | struct timer_list timer; |
@@ -229,6 +232,20 @@ static inline unsigned long uv_gpa(void *v) | |||
229 | return uv_soc_phys_ram_to_gpa(__pa(v)); | 232 | return uv_soc_phys_ram_to_gpa(__pa(v)); |
230 | } | 233 | } |
231 | 234 | ||
235 | /* gnode -> pnode */ | ||
236 | static inline unsigned long uv_gpa_to_gnode(unsigned long gpa) | ||
237 | { | ||
238 | return gpa >> uv_hub_info->m_val; | ||
239 | } | ||
240 | |||
241 | /* gpa -> pnode */ | ||
242 | static inline int uv_gpa_to_pnode(unsigned long gpa) | ||
243 | { | ||
244 | unsigned long n_mask = (1UL << uv_hub_info->n_val) - 1; | ||
245 | |||
246 | return uv_gpa_to_gnode(gpa) & n_mask; | ||
247 | } | ||
248 | |||
232 | /* pnode, offset --> socket virtual */ | 249 | /* pnode, offset --> socket virtual */ |
233 | static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset) | 250 | static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset) |
234 | { | 251 | { |
@@ -258,13 +275,13 @@ static inline unsigned long *uv_global_mmr32_address(int pnode, | |||
258 | static inline void uv_write_global_mmr32(int pnode, unsigned long offset, | 275 | static inline void uv_write_global_mmr32(int pnode, unsigned long offset, |
259 | unsigned long val) | 276 | unsigned long val) |
260 | { | 277 | { |
261 | *uv_global_mmr32_address(pnode, offset) = val; | 278 | writeq(val, uv_global_mmr32_address(pnode, offset)); |
262 | } | 279 | } |
263 | 280 | ||
264 | static inline unsigned long uv_read_global_mmr32(int pnode, | 281 | static inline unsigned long uv_read_global_mmr32(int pnode, |
265 | unsigned long offset) | 282 | unsigned long offset) |
266 | { | 283 | { |
267 | return *uv_global_mmr32_address(pnode, offset); | 284 | return readq(uv_global_mmr32_address(pnode, offset)); |
268 | } | 285 | } |
269 | 286 | ||
270 | /* | 287 | /* |
@@ -281,13 +298,13 @@ static inline unsigned long *uv_global_mmr64_address(int pnode, | |||
281 | static inline void uv_write_global_mmr64(int pnode, unsigned long offset, | 298 | static inline void uv_write_global_mmr64(int pnode, unsigned long offset, |
282 | unsigned long val) | 299 | unsigned long val) |
283 | { | 300 | { |
284 | *uv_global_mmr64_address(pnode, offset) = val; | 301 | writeq(val, uv_global_mmr64_address(pnode, offset)); |
285 | } | 302 | } |
286 | 303 | ||
287 | static inline unsigned long uv_read_global_mmr64(int pnode, | 304 | static inline unsigned long uv_read_global_mmr64(int pnode, |
288 | unsigned long offset) | 305 | unsigned long offset) |
289 | { | 306 | { |
290 | return *uv_global_mmr64_address(pnode, offset); | 307 | return readq(uv_global_mmr64_address(pnode, offset)); |
291 | } | 308 | } |
292 | 309 | ||
293 | /* | 310 | /* |
@@ -301,22 +318,22 @@ static inline unsigned long *uv_local_mmr_address(unsigned long offset) | |||
301 | 318 | ||
302 | static inline unsigned long uv_read_local_mmr(unsigned long offset) | 319 | static inline unsigned long uv_read_local_mmr(unsigned long offset) |
303 | { | 320 | { |
304 | return *uv_local_mmr_address(offset); | 321 | return readq(uv_local_mmr_address(offset)); |
305 | } | 322 | } |
306 | 323 | ||
307 | static inline void uv_write_local_mmr(unsigned long offset, unsigned long val) | 324 | static inline void uv_write_local_mmr(unsigned long offset, unsigned long val) |
308 | { | 325 | { |
309 | *uv_local_mmr_address(offset) = val; | 326 | writeq(val, uv_local_mmr_address(offset)); |
310 | } | 327 | } |
311 | 328 | ||
312 | static inline unsigned char uv_read_local_mmr8(unsigned long offset) | 329 | static inline unsigned char uv_read_local_mmr8(unsigned long offset) |
313 | { | 330 | { |
314 | return *((unsigned char *)uv_local_mmr_address(offset)); | 331 | return readb(uv_local_mmr_address(offset)); |
315 | } | 332 | } |
316 | 333 | ||
317 | static inline void uv_write_local_mmr8(unsigned long offset, unsigned char val) | 334 | static inline void uv_write_local_mmr8(unsigned long offset, unsigned char val) |
318 | { | 335 | { |
319 | *((unsigned char *)uv_local_mmr_address(offset)) = val; | 336 | writeb(val, uv_local_mmr_address(offset)); |
320 | } | 337 | } |
321 | 338 | ||
322 | /* | 339 | /* |
@@ -420,9 +437,14 @@ static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value) | |||
420 | static inline void uv_hub_send_ipi(int pnode, int apicid, int vector) | 437 | static inline void uv_hub_send_ipi(int pnode, int apicid, int vector) |
421 | { | 438 | { |
422 | unsigned long val; | 439 | unsigned long val; |
440 | unsigned long dmode = dest_Fixed; | ||
441 | |||
442 | if (vector == NMI_VECTOR) | ||
443 | dmode = dest_NMI; | ||
423 | 444 | ||
424 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | 445 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | |
425 | ((apicid & 0x3f) << UVH_IPI_INT_APIC_ID_SHFT) | | 446 | ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) | |
447 | (dmode << UVH_IPI_INT_DELIVERY_MODE_SHFT) | | ||
426 | (vector << UVH_IPI_INT_VECTOR_SHFT); | 448 | (vector << UVH_IPI_INT_VECTOR_SHFT); |
427 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); | 449 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); |
428 | } | 450 | } |