aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r--include/asm-x86_64/alternative.h21
-rw-r--r--include/asm-x86_64/calgary.h5
-rw-r--r--include/asm-x86_64/kprobes.h1
-rw-r--r--include/asm-x86_64/page.h2
-rw-r--r--include/asm-x86_64/processor.h6
-rw-r--r--include/asm-x86_64/spinlock.h11
-rw-r--r--include/asm-x86_64/swiotlb.h2
-rw-r--r--include/asm-x86_64/unistd.h11
-rw-r--r--include/asm-x86_64/unwind.h1
9 files changed, 22 insertions, 38 deletions
diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h
index aa67bfd1b3ce..a584826cc570 100644
--- a/include/asm-x86_64/alternative.h
+++ b/include/asm-x86_64/alternative.h
@@ -4,6 +4,7 @@
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <linux/types.h> 6#include <linux/types.h>
7#include <asm/cpufeature.h>
7 8
8struct alt_instr { 9struct alt_instr {
9 u8 *instr; /* original instruction */ 10 u8 *instr; /* original instruction */
@@ -102,9 +103,6 @@ static inline void alternatives_smp_switch(int smp) {}
102/* 103/*
103 * Alternative inline assembly for SMP. 104 * Alternative inline assembly for SMP.
104 * 105 *
105 * alternative_smp() takes two versions (SMP first, UP second) and is
106 * for more complex stuff such as spinlocks.
107 *
108 * The LOCK_PREFIX macro defined here replaces the LOCK and 106 * The LOCK_PREFIX macro defined here replaces the LOCK and
109 * LOCK_PREFIX macros used everywhere in the source tree. 107 * LOCK_PREFIX macros used everywhere in the source tree.
110 * 108 *
@@ -124,21 +122,6 @@ static inline void alternatives_smp_switch(int smp) {}
124 */ 122 */
125 123
126#ifdef CONFIG_SMP 124#ifdef CONFIG_SMP
127#define alternative_smp(smpinstr, upinstr, args...) \
128 asm volatile ("661:\n\t" smpinstr "\n662:\n" \
129 ".section .smp_altinstructions,\"a\"\n" \
130 " .align 8\n" \
131 " .quad 661b\n" /* label */ \
132 " .quad 663f\n" /* new instruction */ \
133 " .byte 0x66\n" /* X86_FEATURE_UP */ \
134 " .byte 662b-661b\n" /* sourcelen */ \
135 " .byte 664f-663f\n" /* replacementlen */ \
136 ".previous\n" \
137 ".section .smp_altinstr_replacement,\"awx\"\n" \
138 "663:\n\t" upinstr "\n" /* replacement */ \
139 "664:\n\t.fill 662b-661b,1,0x42\n" /* space for original */ \
140 ".previous" : args)
141
142#define LOCK_PREFIX \ 125#define LOCK_PREFIX \
143 ".section .smp_locks,\"a\"\n" \ 126 ".section .smp_locks,\"a\"\n" \
144 " .align 8\n" \ 127 " .align 8\n" \
@@ -147,8 +130,6 @@ static inline void alternatives_smp_switch(int smp) {}
147 "661:\n\tlock; " 130 "661:\n\tlock; "
148 131
149#else /* ! CONFIG_SMP */ 132#else /* ! CONFIG_SMP */
150#define alternative_smp(smpinstr, upinstr, args...) \
151 asm volatile (upinstr : args)
152#define LOCK_PREFIX "" 133#define LOCK_PREFIX ""
153#endif 134#endif
154 135
diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h
index fbfb50136edb..4e3919524240 100644
--- a/include/asm-x86_64/calgary.h
+++ b/include/asm-x86_64/calgary.h
@@ -60,9 +60,4 @@ static inline int calgary_iommu_init(void) { return 1; }
60static inline void detect_calgary(void) { return; } 60static inline void detect_calgary(void) { return; }
61#endif 61#endif
62 62
63static inline unsigned int bus_to_phb(unsigned char busno)
64{
65 return ((busno % 15 == 0) ? 0 : busno / 2 + 1);
66}
67
68#endif /* _ASM_X86_64_CALGARY_H */ 63#endif /* _ASM_X86_64_CALGARY_H */
diff --git a/include/asm-x86_64/kprobes.h b/include/asm-x86_64/kprobes.h
index d36febd9bb18..cf5317898fb0 100644
--- a/include/asm-x86_64/kprobes.h
+++ b/include/asm-x86_64/kprobes.h
@@ -47,6 +47,7 @@ typedef u8 kprobe_opcode_t;
47 47
48void kretprobe_trampoline(void); 48void kretprobe_trampoline(void);
49extern void arch_remove_kprobe(struct kprobe *p); 49extern void arch_remove_kprobe(struct kprobe *p);
50#define flush_insn_slot(p) do { } while (0)
50 51
51/* Architecture specific copy of original instruction*/ 52/* Architecture specific copy of original instruction*/
52struct arch_specific_insn { 53struct arch_specific_insn {
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index f7bf875aae40..10f346165cab 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -19,7 +19,7 @@
19#define EXCEPTION_STACK_ORDER 0 19#define EXCEPTION_STACK_ORDER 0
20#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) 20#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
21 21
22#define DEBUG_STACK_ORDER EXCEPTION_STACK_ORDER 22#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
23#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) 23#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
24 24
25#define IRQSTACK_ORDER 2 25#define IRQSTACK_ORDER 2
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 3b3c1217fe61..de9c3147ee4c 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -232,8 +232,14 @@ struct tss_struct {
232 unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; 232 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
233} __attribute__((packed)) ____cacheline_aligned; 233} __attribute__((packed)) ____cacheline_aligned;
234 234
235
235extern struct cpuinfo_x86 boot_cpu_data; 236extern struct cpuinfo_x86 boot_cpu_data;
236DECLARE_PER_CPU(struct tss_struct,init_tss); 237DECLARE_PER_CPU(struct tss_struct,init_tss);
238/* Save the original ist values for checking stack pointers during debugging */
239struct orig_ist {
240 unsigned long ist[7];
241};
242DECLARE_PER_CPU(struct orig_ist, orig_ist);
237 243
238#ifdef CONFIG_X86_VSMP 244#ifdef CONFIG_X86_VSMP
239#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) 245#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
index 8d3421996f94..248a79f0eaff 100644
--- a/include/asm-x86_64/spinlock.h
+++ b/include/asm-x86_64/spinlock.h
@@ -21,7 +21,7 @@
21 21
22#define __raw_spin_lock_string \ 22#define __raw_spin_lock_string \
23 "\n1:\t" \ 23 "\n1:\t" \
24 "lock ; decl %0\n\t" \ 24 LOCK_PREFIX " ; decl %0\n\t" \
25 "js 2f\n" \ 25 "js 2f\n" \
26 LOCK_SECTION_START("") \ 26 LOCK_SECTION_START("") \
27 "2:\t" \ 27 "2:\t" \
@@ -40,10 +40,7 @@
40 40
41static inline void __raw_spin_lock(raw_spinlock_t *lock) 41static inline void __raw_spin_lock(raw_spinlock_t *lock)
42{ 42{
43 alternative_smp( 43 asm volatile(__raw_spin_lock_string : "=m" (lock->slock) : : "memory");
44 __raw_spin_lock_string,
45 __raw_spin_lock_string_up,
46 "=m" (lock->slock) : : "memory");
47} 44}
48 45
49#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 46#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
@@ -125,12 +122,12 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
125 122
126static inline void __raw_read_unlock(raw_rwlock_t *rw) 123static inline void __raw_read_unlock(raw_rwlock_t *rw)
127{ 124{
128 asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); 125 asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory");
129} 126}
130 127
131static inline void __raw_write_unlock(raw_rwlock_t *rw) 128static inline void __raw_write_unlock(raw_rwlock_t *rw)
132{ 129{
133 asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0" 130 asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0"
134 : "=m" (rw->lock) : : "memory"); 131 : "=m" (rw->lock) : : "memory");
135} 132}
136 133
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h
index 5f9a01805821..ba94ab3d2673 100644
--- a/include/asm-x86_64/swiotlb.h
+++ b/include/asm-x86_64/swiotlb.h
@@ -42,6 +42,8 @@ extern void swiotlb_free_coherent (struct device *hwdev, size_t size,
42extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); 42extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
43extern void swiotlb_init(void); 43extern void swiotlb_init(void);
44 44
45extern int swiotlb_force;
46
45#ifdef CONFIG_SWIOTLB 47#ifdef CONFIG_SWIOTLB
46extern int swiotlb; 48extern int swiotlb;
47#else 49#else
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 94387c915e53..2d89d309a2a8 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -620,8 +620,6 @@ __SYSCALL(__NR_vmsplice, sys_vmsplice)
620#define __NR_move_pages 279 620#define __NR_move_pages 279
621__SYSCALL(__NR_move_pages, sys_move_pages) 621__SYSCALL(__NR_move_pages, sys_move_pages)
622 622
623#ifdef __KERNEL__
624
625#define __NR_syscall_max __NR_move_pages 623#define __NR_syscall_max __NR_move_pages
626 624
627#ifndef __NO_STUBS 625#ifndef __NO_STUBS
@@ -746,6 +744,8 @@ __syscall_return(type,__res); \
746 744
747#else /* __KERNEL_SYSCALLS__ */ 745#else /* __KERNEL_SYSCALLS__ */
748 746
747#ifdef __KERNEL__
748
749#include <linux/syscalls.h> 749#include <linux/syscalls.h>
750#include <asm/ptrace.h> 750#include <asm/ptrace.h>
751 751
@@ -838,9 +838,9 @@ asmlinkage long sys_rt_sigaction(int sig,
838 struct sigaction __user *oact, 838 struct sigaction __user *oact,
839 size_t sigsetsize); 839 size_t sigsetsize);
840 840
841#endif /* __ASSEMBLY__ */ 841#endif
842 842
843#endif /* __NO_STUBS */ 843#endif
844 844
845/* 845/*
846 * "Conditional" syscalls 846 * "Conditional" syscalls
@@ -850,5 +850,6 @@ asmlinkage long sys_rt_sigaction(int sig,
850 */ 850 */
851#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") 851#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
852 852
853#endif /* __KERNEL__ */ 853#endif
854
854#endif 855#endif
diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h
index f3e7124effe3..1f6e9bfb569e 100644
--- a/include/asm-x86_64/unwind.h
+++ b/include/asm-x86_64/unwind.h
@@ -95,6 +95,7 @@ static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
95#else 95#else
96 96
97#define UNW_PC(frame) ((void)(frame), 0) 97#define UNW_PC(frame) ((void)(frame), 0)
98#define UNW_SP(frame) ((void)(frame), 0)
98 99
99static inline int arch_unw_user_mode(const void *info) 100static inline int arch_unw_user_mode(const void *info)
100{ 101{