aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/cpufeature.h3
-rw-r--r--arch/x86/include/asm/iommu.h1
-rw-r--r--arch/x86/include/asm/kvm_emulate.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h90
-rw-r--r--arch/x86/include/asm/mrst.h2
-rw-r--r--arch/x86/include/asm/percpu.h28
6 files changed, 75 insertions, 51 deletions
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index f3444f700f3..17c5d4bdee5 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -197,7 +197,10 @@
197 197
198/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ 198/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
199#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ 199#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
200#define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
201#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
200#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */ 202#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
203#define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
201#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ 204#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
202 205
203#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 206#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index 345c99cef15..dffc38ee625 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -5,6 +5,7 @@ extern struct dma_map_ops nommu_dma_ops;
5extern int force_iommu, no_iommu; 5extern int force_iommu, no_iommu;
6extern int iommu_detected; 6extern int iommu_detected;
7extern int iommu_pass_through; 7extern int iommu_pass_through;
8extern int iommu_group_mf;
8 9
9/* 10 seconds */ 10/* 10 seconds */
10#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) 11#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index a026507893e..ab4092e3214 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -181,6 +181,7 @@ struct x86_emulate_ops {
181 int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); 181 int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
182 int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data); 182 int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
183 int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata); 183 int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
184 int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata);
184 void (*halt)(struct x86_emulate_ctxt *ctxt); 185 void (*halt)(struct x86_emulate_ctxt *ctxt);
185 void (*wbinvd)(struct x86_emulate_ctxt *ctxt); 186 void (*wbinvd)(struct x86_emulate_ctxt *ctxt);
186 int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt); 187 int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt);
@@ -364,6 +365,7 @@ enum x86_intercept {
364#endif 365#endif
365 366
366int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len); 367int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
368bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
367#define EMULATION_FAILED -1 369#define EMULATION_FAILED -1
368#define EMULATION_OK 0 370#define EMULATION_OK 0
369#define EMULATION_RESTART 1 371#define EMULATION_RESTART 1
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b4973f4dab9..52d6640a5ca 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -16,10 +16,12 @@
16#include <linux/mmu_notifier.h> 16#include <linux/mmu_notifier.h>
17#include <linux/tracepoint.h> 17#include <linux/tracepoint.h>
18#include <linux/cpumask.h> 18#include <linux/cpumask.h>
19#include <linux/irq_work.h>
19 20
20#include <linux/kvm.h> 21#include <linux/kvm.h>
21#include <linux/kvm_para.h> 22#include <linux/kvm_para.h>
22#include <linux/kvm_types.h> 23#include <linux/kvm_types.h>
24#include <linux/perf_event.h>
23 25
24#include <asm/pvclock-abi.h> 26#include <asm/pvclock-abi.h>
25#include <asm/desc.h> 27#include <asm/desc.h>
@@ -31,6 +33,8 @@
31#define KVM_MEMORY_SLOTS 32 33#define KVM_MEMORY_SLOTS 32
32/* memory slots that does not exposed to userspace */ 34/* memory slots that does not exposed to userspace */
33#define KVM_PRIVATE_MEM_SLOTS 4 35#define KVM_PRIVATE_MEM_SLOTS 4
36#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
37
34#define KVM_MMIO_SIZE 16 38#define KVM_MMIO_SIZE 16
35 39
36#define KVM_PIO_PAGE_OFFSET 1 40#define KVM_PIO_PAGE_OFFSET 1
@@ -228,7 +232,7 @@ struct kvm_mmu_page {
228 * One bit set per slot which has memory 232 * One bit set per slot which has memory
229 * in this shadow page. 233 * in this shadow page.
230 */ 234 */
231 DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); 235 DECLARE_BITMAP(slot_bitmap, KVM_MEM_SLOTS_NUM);
232 bool unsync; 236 bool unsync;
233 int root_count; /* Currently serving as active root */ 237 int root_count; /* Currently serving as active root */
234 unsigned int unsync_children; 238 unsigned int unsync_children;
@@ -239,14 +243,9 @@ struct kvm_mmu_page {
239 int clear_spte_count; 243 int clear_spte_count;
240#endif 244#endif
241 245
242 struct rcu_head rcu; 246 int write_flooding_count;
243};
244 247
245struct kvm_pv_mmu_op_buffer { 248 struct rcu_head rcu;
246 void *ptr;
247 unsigned len;
248 unsigned processed;
249 char buf[512] __aligned(sizeof(long));
250}; 249};
251 250
252struct kvm_pio_request { 251struct kvm_pio_request {
@@ -294,6 +293,37 @@ struct kvm_mmu {
294 u64 pdptrs[4]; /* pae */ 293 u64 pdptrs[4]; /* pae */
295}; 294};
296 295
296enum pmc_type {
297 KVM_PMC_GP = 0,
298 KVM_PMC_FIXED,
299};
300
301struct kvm_pmc {
302 enum pmc_type type;
303 u8 idx;
304 u64 counter;
305 u64 eventsel;
306 struct perf_event *perf_event;
307 struct kvm_vcpu *vcpu;
308};
309
310struct kvm_pmu {
311 unsigned nr_arch_gp_counters;
312 unsigned nr_arch_fixed_counters;
313 unsigned available_event_types;
314 u64 fixed_ctr_ctrl;
315 u64 global_ctrl;
316 u64 global_status;
317 u64 global_ovf_ctrl;
318 u64 counter_bitmask[2];
319 u64 global_ctrl_mask;
320 u8 version;
321 struct kvm_pmc gp_counters[X86_PMC_MAX_GENERIC];
322 struct kvm_pmc fixed_counters[X86_PMC_MAX_FIXED];
323 struct irq_work irq_work;
324 u64 reprogram_pmi;
325};
326
297struct kvm_vcpu_arch { 327struct kvm_vcpu_arch {
298 /* 328 /*
299 * rip and regs accesses must go through 329 * rip and regs accesses must go through
@@ -345,19 +375,10 @@ struct kvm_vcpu_arch {
345 */ 375 */
346 struct kvm_mmu *walk_mmu; 376 struct kvm_mmu *walk_mmu;
347 377
348 /* only needed in kvm_pv_mmu_op() path, but it's hot so
349 * put it here to avoid allocation */
350 struct kvm_pv_mmu_op_buffer mmu_op_buffer;
351
352 struct kvm_mmu_memory_cache mmu_pte_list_desc_cache; 378 struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
353 struct kvm_mmu_memory_cache mmu_page_cache; 379 struct kvm_mmu_memory_cache mmu_page_cache;
354 struct kvm_mmu_memory_cache mmu_page_header_cache; 380 struct kvm_mmu_memory_cache mmu_page_header_cache;
355 381
356 gfn_t last_pt_write_gfn;
357 int last_pt_write_count;
358 u64 *last_pte_updated;
359 gfn_t last_pte_gfn;
360
361 struct fpu guest_fpu; 382 struct fpu guest_fpu;
362 u64 xcr0; 383 u64 xcr0;
363 384
@@ -436,6 +457,8 @@ struct kvm_vcpu_arch {
436 unsigned access; 457 unsigned access;
437 gfn_t mmio_gfn; 458 gfn_t mmio_gfn;
438 459
460 struct kvm_pmu pmu;
461
439 /* used for guest single stepping over the given code position */ 462 /* used for guest single stepping over the given code position */
440 unsigned long singlestep_rip; 463 unsigned long singlestep_rip;
441 464
@@ -444,6 +467,9 @@ struct kvm_vcpu_arch {
444 467
445 cpumask_var_t wbinvd_dirty_mask; 468 cpumask_var_t wbinvd_dirty_mask;
446 469
470 unsigned long last_retry_eip;
471 unsigned long last_retry_addr;
472
447 struct { 473 struct {
448 bool halted; 474 bool halted;
449 gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)]; 475 gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
@@ -459,7 +485,6 @@ struct kvm_arch {
459 unsigned int n_requested_mmu_pages; 485 unsigned int n_requested_mmu_pages;
460 unsigned int n_max_mmu_pages; 486 unsigned int n_max_mmu_pages;
461 unsigned int indirect_shadow_pages; 487 unsigned int indirect_shadow_pages;
462 atomic_t invlpg_counter;
463 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 488 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
464 /* 489 /*
465 * Hash table of struct kvm_mmu_page. 490 * Hash table of struct kvm_mmu_page.
@@ -660,6 +685,8 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
660 685
661int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); 686int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
662void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); 687void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
688int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
689 struct kvm_memory_slot *slot);
663void kvm_mmu_zap_all(struct kvm *kvm); 690void kvm_mmu_zap_all(struct kvm *kvm);
664unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); 691unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
665void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 692void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
@@ -668,8 +695,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
668 695
669int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 696int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
670 const void *val, int bytes); 697 const void *val, int bytes);
671int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
672 gpa_t addr, unsigned long *ret);
673u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); 698u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
674 699
675extern bool tdp_enabled; 700extern bool tdp_enabled;
@@ -692,6 +717,7 @@ enum emulation_result {
692#define EMULTYPE_NO_DECODE (1 << 0) 717#define EMULTYPE_NO_DECODE (1 << 0)
693#define EMULTYPE_TRAP_UD (1 << 1) 718#define EMULTYPE_TRAP_UD (1 << 1)
694#define EMULTYPE_SKIP (1 << 2) 719#define EMULTYPE_SKIP (1 << 2)
720#define EMULTYPE_RETRY (1 << 3)
695int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, 721int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
696 int emulation_type, void *insn, int insn_len); 722 int emulation_type, void *insn, int insn_len);
697 723
@@ -734,6 +760,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
734 760
735unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); 761unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
736void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); 762void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
763bool kvm_rdpmc(struct kvm_vcpu *vcpu);
737 764
738void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); 765void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
739void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 766void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
@@ -754,13 +781,14 @@ int fx_init(struct kvm_vcpu *vcpu);
754 781
755void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); 782void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
756void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 783void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
757 const u8 *new, int bytes, 784 const u8 *new, int bytes);
758 bool guest_initiated); 785int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
759int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); 786int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
760void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); 787void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
761int kvm_mmu_load(struct kvm_vcpu *vcpu); 788int kvm_mmu_load(struct kvm_vcpu *vcpu);
762void kvm_mmu_unload(struct kvm_vcpu *vcpu); 789void kvm_mmu_unload(struct kvm_vcpu *vcpu);
763void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); 790void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
791gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
764gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, 792gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
765 struct x86_exception *exception); 793 struct x86_exception *exception);
766gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, 794gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
@@ -782,6 +810,11 @@ void kvm_disable_tdp(void);
782int complete_pio(struct kvm_vcpu *vcpu); 810int complete_pio(struct kvm_vcpu *vcpu);
783bool kvm_check_iopl(struct kvm_vcpu *vcpu); 811bool kvm_check_iopl(struct kvm_vcpu *vcpu);
784 812
813static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
814{
815 return gpa;
816}
817
785static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) 818static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
786{ 819{
787 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 820 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
@@ -894,4 +927,17 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
894 927
895void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); 928void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
896 929
930int kvm_is_in_guest(void);
931
932void kvm_pmu_init(struct kvm_vcpu *vcpu);
933void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
934void kvm_pmu_reset(struct kvm_vcpu *vcpu);
935void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
936bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
937int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
938int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
939int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
940void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
941void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
942
897#endif /* _ASM_X86_KVM_HOST_H */ 943#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h
index 93f79094c22..0a0a9546043 100644
--- a/arch/x86/include/asm/mrst.h
+++ b/arch/x86/include/asm/mrst.h
@@ -67,7 +67,7 @@ extern struct console early_mrst_console;
67extern void mrst_early_console_init(void); 67extern void mrst_early_console_init(void);
68 68
69extern struct console early_hsu_console; 69extern struct console early_hsu_console;
70extern void hsu_early_console_init(void); 70extern void hsu_early_console_init(const char *);
71 71
72extern void intel_scu_devices_create(void); 72extern void intel_scu_devices_create(void);
73extern void intel_scu_devices_destroy(void); 73extern void intel_scu_devices_destroy(void);
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 529bf07e806..7a11910a63c 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -414,22 +414,6 @@ do { \
414#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) 414#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
415#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) 415#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
416 416
417#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
418#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
419#define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
420#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
421#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
422#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
423#define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
424#define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
425#define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
426#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
427#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
428#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
429#define irqsafe_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
430#define irqsafe_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
431#define irqsafe_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
432
433#ifndef CONFIG_M386 417#ifndef CONFIG_M386
434#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) 418#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
435#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) 419#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
@@ -445,9 +429,6 @@ do { \
445#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 429#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
446#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 430#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
447 431
448#define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
449#define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
450#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
451#endif /* !CONFIG_M386 */ 432#endif /* !CONFIG_M386 */
452 433
453#ifdef CONFIG_X86_CMPXCHG64 434#ifdef CONFIG_X86_CMPXCHG64
@@ -464,7 +445,6 @@ do { \
464 445
465#define __this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double 446#define __this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
466#define this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double 447#define this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
467#define irqsafe_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
468#endif /* CONFIG_X86_CMPXCHG64 */ 448#endif /* CONFIG_X86_CMPXCHG64 */
469 449
470/* 450/*
@@ -492,13 +472,6 @@ do { \
492#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) 472#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
493#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) 473#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
494 474
495#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
496#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
497#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
498#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
499#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
500#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
501
502/* 475/*
503 * Pretty complex macro to generate cmpxchg16 instruction. The instruction 476 * Pretty complex macro to generate cmpxchg16 instruction. The instruction
504 * is not supported on early AMD64 processors so we must be able to emulate 477 * is not supported on early AMD64 processors so we must be able to emulate
@@ -521,7 +494,6 @@ do { \
521 494
522#define __this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double 495#define __this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
523#define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double 496#define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
524#define irqsafe_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
525 497
526#endif 498#endif
527 499