aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/acpi.h2
-rw-r--r--include/asm-i386/apic.h5
-rw-r--r--include/asm-i386/atomic.h1
-rw-r--r--include/asm-i386/bitops.h26
-rw-r--r--include/asm-i386/cache.h2
-rw-r--r--include/asm-i386/cpufeature.h1
-rw-r--r--include/asm-i386/current.h2
-rw-r--r--include/asm-i386/dma-mapping.h14
-rw-r--r--include/asm-i386/edac.h18
-rw-r--r--include/asm-i386/futex.h2
-rw-r--r--include/asm-i386/i387.h8
-rw-r--r--include/asm-i386/io.h5
-rw-r--r--include/asm-i386/ioctl.h86
-rw-r--r--include/asm-i386/irq.h2
-rw-r--r--include/asm-i386/kexec.h47
-rw-r--r--include/asm-i386/kprobes.h9
-rw-r--r--include/asm-i386/mach-default/mach_ipi.h4
-rw-r--r--include/asm-i386/mutex.h136
-rw-r--r--include/asm-i386/processor.h26
-rw-r--r--include/asm-i386/ptrace.h3
-rw-r--r--include/asm-i386/signal.h1
-rw-r--r--include/asm-i386/string.h8
-rw-r--r--include/asm-i386/system.h9
-rw-r--r--include/asm-i386/thread_info.h4
-rw-r--r--include/asm-i386/topology.h1
-rw-r--r--include/asm-i386/uaccess.h8
-rw-r--r--include/asm-i386/unistd.h19
-rw-r--r--include/asm-i386/vm86.h20
28 files changed, 324 insertions, 145 deletions
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h
index df4ed323aa4d..55059abf9c95 100644
--- a/include/asm-i386/acpi.h
+++ b/include/asm-i386/acpi.h
@@ -179,7 +179,7 @@ extern void acpi_reserve_bootmem(void);
179 179
180extern u8 x86_acpiid_to_apicid[]; 180extern u8 x86_acpiid_to_apicid[];
181 181
182#define ARCH_HAS_POWER_PDC_INIT 1 182#define ARCH_HAS_POWER_INIT 1
183 183
184#endif /*__KERNEL__*/ 184#endif /*__KERNEL__*/
185 185
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index 8c454aa58ac6..d30b8571573f 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -132,6 +132,11 @@ extern unsigned int nmi_watchdog;
132 132
133extern int disable_timer_pin_1; 133extern int disable_timer_pin_1;
134 134
135void smp_send_timer_broadcast_ipi(struct pt_regs *regs);
136void switch_APIC_timer_to_ipi(void *cpumask);
137void switch_ipi_to_APIC_timer(void *cpumask);
138#define ARCH_APICTIMER_STOPS_ON_C3 1
139
135#else /* !CONFIG_X86_LOCAL_APIC */ 140#else /* !CONFIG_X86_LOCAL_APIC */
136static inline void lapic_shutdown(void) { } 141static inline void lapic_shutdown(void) { }
137 142
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 7a5472d77091..de649d3aa2d4 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -216,6 +216,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
216} 216}
217 217
218#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) 218#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
219#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
219 220
220/** 221/**
221 * atomic_add_unless - add unless the number is a given value 222 * atomic_add_unless - add unless the number is a given value
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
index 65679aca4b22..88e6ca248cd7 100644
--- a/include/asm-i386/bitops.h
+++ b/include/asm-i386/bitops.h
@@ -43,7 +43,7 @@ static inline void set_bit(int nr, volatile unsigned long * addr)
43{ 43{
44 __asm__ __volatile__( LOCK_PREFIX 44 __asm__ __volatile__( LOCK_PREFIX
45 "btsl %1,%0" 45 "btsl %1,%0"
46 :"=m" (ADDR) 46 :"+m" (ADDR)
47 :"Ir" (nr)); 47 :"Ir" (nr));
48} 48}
49 49
@@ -60,7 +60,7 @@ static inline void __set_bit(int nr, volatile unsigned long * addr)
60{ 60{
61 __asm__( 61 __asm__(
62 "btsl %1,%0" 62 "btsl %1,%0"
63 :"=m" (ADDR) 63 :"+m" (ADDR)
64 :"Ir" (nr)); 64 :"Ir" (nr));
65} 65}
66 66
@@ -78,7 +78,7 @@ static inline void clear_bit(int nr, volatile unsigned long * addr)
78{ 78{
79 __asm__ __volatile__( LOCK_PREFIX 79 __asm__ __volatile__( LOCK_PREFIX
80 "btrl %1,%0" 80 "btrl %1,%0"
81 :"=m" (ADDR) 81 :"+m" (ADDR)
82 :"Ir" (nr)); 82 :"Ir" (nr));
83} 83}
84 84
@@ -86,7 +86,7 @@ static inline void __clear_bit(int nr, volatile unsigned long * addr)
86{ 86{
87 __asm__ __volatile__( 87 __asm__ __volatile__(
88 "btrl %1,%0" 88 "btrl %1,%0"
89 :"=m" (ADDR) 89 :"+m" (ADDR)
90 :"Ir" (nr)); 90 :"Ir" (nr));
91} 91}
92#define smp_mb__before_clear_bit() barrier() 92#define smp_mb__before_clear_bit() barrier()
@@ -105,7 +105,7 @@ static inline void __change_bit(int nr, volatile unsigned long * addr)
105{ 105{
106 __asm__ __volatile__( 106 __asm__ __volatile__(
107 "btcl %1,%0" 107 "btcl %1,%0"
108 :"=m" (ADDR) 108 :"+m" (ADDR)
109 :"Ir" (nr)); 109 :"Ir" (nr));
110} 110}
111 111
@@ -123,7 +123,7 @@ static inline void change_bit(int nr, volatile unsigned long * addr)
123{ 123{
124 __asm__ __volatile__( LOCK_PREFIX 124 __asm__ __volatile__( LOCK_PREFIX
125 "btcl %1,%0" 125 "btcl %1,%0"
126 :"=m" (ADDR) 126 :"+m" (ADDR)
127 :"Ir" (nr)); 127 :"Ir" (nr));
128} 128}
129 129
@@ -142,7 +142,7 @@ static inline int test_and_set_bit(int nr, volatile unsigned long * addr)
142 142
143 __asm__ __volatile__( LOCK_PREFIX 143 __asm__ __volatile__( LOCK_PREFIX
144 "btsl %2,%1\n\tsbbl %0,%0" 144 "btsl %2,%1\n\tsbbl %0,%0"
145 :"=r" (oldbit),"=m" (ADDR) 145 :"=r" (oldbit),"+m" (ADDR)
146 :"Ir" (nr) : "memory"); 146 :"Ir" (nr) : "memory");
147 return oldbit; 147 return oldbit;
148} 148}
@@ -162,7 +162,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long * addr)
162 162
163 __asm__( 163 __asm__(
164 "btsl %2,%1\n\tsbbl %0,%0" 164 "btsl %2,%1\n\tsbbl %0,%0"
165 :"=r" (oldbit),"=m" (ADDR) 165 :"=r" (oldbit),"+m" (ADDR)
166 :"Ir" (nr)); 166 :"Ir" (nr));
167 return oldbit; 167 return oldbit;
168} 168}
@@ -182,7 +182,7 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long * addr)
182 182
183 __asm__ __volatile__( LOCK_PREFIX 183 __asm__ __volatile__( LOCK_PREFIX
184 "btrl %2,%1\n\tsbbl %0,%0" 184 "btrl %2,%1\n\tsbbl %0,%0"
185 :"=r" (oldbit),"=m" (ADDR) 185 :"=r" (oldbit),"+m" (ADDR)
186 :"Ir" (nr) : "memory"); 186 :"Ir" (nr) : "memory");
187 return oldbit; 187 return oldbit;
188} 188}
@@ -202,7 +202,7 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
202 202
203 __asm__( 203 __asm__(
204 "btrl %2,%1\n\tsbbl %0,%0" 204 "btrl %2,%1\n\tsbbl %0,%0"
205 :"=r" (oldbit),"=m" (ADDR) 205 :"=r" (oldbit),"+m" (ADDR)
206 :"Ir" (nr)); 206 :"Ir" (nr));
207 return oldbit; 207 return oldbit;
208} 208}
@@ -214,7 +214,7 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
214 214
215 __asm__ __volatile__( 215 __asm__ __volatile__(
216 "btcl %2,%1\n\tsbbl %0,%0" 216 "btcl %2,%1\n\tsbbl %0,%0"
217 :"=r" (oldbit),"=m" (ADDR) 217 :"=r" (oldbit),"+m" (ADDR)
218 :"Ir" (nr) : "memory"); 218 :"Ir" (nr) : "memory");
219 return oldbit; 219 return oldbit;
220} 220}
@@ -233,7 +233,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long* addr)
233 233
234 __asm__ __volatile__( LOCK_PREFIX 234 __asm__ __volatile__( LOCK_PREFIX
235 "btcl %2,%1\n\tsbbl %0,%0" 235 "btcl %2,%1\n\tsbbl %0,%0"
236 :"=r" (oldbit),"=m" (ADDR) 236 :"=r" (oldbit),"+m" (ADDR)
237 :"Ir" (nr) : "memory"); 237 :"Ir" (nr) : "memory");
238 return oldbit; 238 return oldbit;
239} 239}
@@ -247,7 +247,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long* addr)
247static int test_bit(int nr, const volatile void * addr); 247static int test_bit(int nr, const volatile void * addr);
248#endif 248#endif
249 249
250static inline int constant_test_bit(int nr, const volatile unsigned long *addr) 250static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr)
251{ 251{
252 return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; 252 return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
253} 253}
diff --git a/include/asm-i386/cache.h b/include/asm-i386/cache.h
index 849788710feb..615911e5bd24 100644
--- a/include/asm-i386/cache.h
+++ b/include/asm-i386/cache.h
@@ -10,6 +10,4 @@
10#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) 10#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
11#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 11#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12 12
13#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
14
15#endif 13#endif
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
index ff1187e80c32..c4ec2a4d8fdf 100644
--- a/include/asm-i386/cpufeature.h
+++ b/include/asm-i386/cpufeature.h
@@ -69,6 +69,7 @@
69#define X86_FEATURE_K7 (3*32+ 5) /* Athlon */ 69#define X86_FEATURE_K7 (3*32+ 5) /* Athlon */
70#define X86_FEATURE_P3 (3*32+ 6) /* P3 */ 70#define X86_FEATURE_P3 (3*32+ 6) /* P3 */
71#define X86_FEATURE_P4 (3*32+ 7) /* P4 */ 71#define X86_FEATURE_P4 (3*32+ 7) /* P4 */
72#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
72 73
73/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 74/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
74#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ 75#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
diff --git a/include/asm-i386/current.h b/include/asm-i386/current.h
index d97328951f5f..3cbbecd79016 100644
--- a/include/asm-i386/current.h
+++ b/include/asm-i386/current.h
@@ -5,7 +5,7 @@
5 5
6struct task_struct; 6struct task_struct;
7 7
8static inline struct task_struct * get_current(void) 8static __always_inline struct task_struct * get_current(void)
9{ 9{
10 return current_thread_info()->task; 10 return current_thread_info()->task;
11} 11}
diff --git a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h
index e56c335f8ef9..9cf20cacf76e 100644
--- a/include/asm-i386/dma-mapping.h
+++ b/include/asm-i386/dma-mapping.h
@@ -6,6 +6,7 @@
6#include <asm/cache.h> 6#include <asm/cache.h>
7#include <asm/io.h> 7#include <asm/io.h>
8#include <asm/scatterlist.h> 8#include <asm/scatterlist.h>
9#include <asm/bug.h>
9 10
10#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 11#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
11#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 12#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
@@ -20,7 +21,9 @@ static inline dma_addr_t
20dma_map_single(struct device *dev, void *ptr, size_t size, 21dma_map_single(struct device *dev, void *ptr, size_t size,
21 enum dma_data_direction direction) 22 enum dma_data_direction direction)
22{ 23{
23 BUG_ON(direction == DMA_NONE); 24 if (direction == DMA_NONE)
25 BUG();
26 WARN_ON(size == 0);
24 flush_write_buffers(); 27 flush_write_buffers();
25 return virt_to_phys(ptr); 28 return virt_to_phys(ptr);
26} 29}
@@ -29,7 +32,8 @@ static inline void
29dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 32dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
30 enum dma_data_direction direction) 33 enum dma_data_direction direction)
31{ 34{
32 BUG_ON(direction == DMA_NONE); 35 if (direction == DMA_NONE)
36 BUG();
33} 37}
34 38
35static inline int 39static inline int
@@ -38,7 +42,9 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
38{ 42{
39 int i; 43 int i;
40 44
41 BUG_ON(direction == DMA_NONE); 45 if (direction == DMA_NONE)
46 BUG();
47 WARN_ON(nents == 0 || sg[0].length == 0);
42 48
43 for (i = 0; i < nents; i++ ) { 49 for (i = 0; i < nents; i++ ) {
44 BUG_ON(!sg[i].page); 50 BUG_ON(!sg[i].page);
@@ -150,7 +156,7 @@ dma_get_cache_alignment(void)
150{ 156{
151 /* no easy way to get cache size on all x86, so return the 157 /* no easy way to get cache size on all x86, so return the
152 * maximum possible, to be safe */ 158 * maximum possible, to be safe */
153 return (1 << L1_CACHE_SHIFT_MAX); 159 return (1 << INTERNODE_CACHE_SHIFT);
154} 160}
155 161
156#define dma_is_consistent(d) (1) 162#define dma_is_consistent(d) (1)
diff --git a/include/asm-i386/edac.h b/include/asm-i386/edac.h
new file mode 100644
index 000000000000..3e7dd0ab68ce
--- /dev/null
+++ b/include/asm-i386/edac.h
@@ -0,0 +1,18 @@
1#ifndef ASM_EDAC_H
2#define ASM_EDAC_H
3
4/* ECC atomic, DMA, SMP and interrupt safe scrub function */
5
6static __inline__ void atomic_scrub(void *va, u32 size)
7{
8 unsigned long *virt_addr = va;
9 u32 i;
10
11 for (i = 0; i < size / 4; i++, virt_addr++)
12 /* Very carefully read and write to memory atomically
13 * so we are interrupt, DMA and SMP safe.
14 */
15 __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
16}
17
18#endif
diff --git a/include/asm-i386/futex.h b/include/asm-i386/futex.h
index e7a271d39309..44b9db806474 100644
--- a/include/asm-i386/futex.h
+++ b/include/asm-i386/futex.h
@@ -61,7 +61,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
61 if (op == FUTEX_OP_SET) 61 if (op == FUTEX_OP_SET)
62 __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); 62 __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
63 else { 63 else {
64#if !defined(CONFIG_X86_BSWAP) && !defined(CONFIG_UML) 64#ifndef CONFIG_X86_BSWAP
65 if (boot_cpu_data.x86 == 3) 65 if (boot_cpu_data.x86 == 3)
66 ret = -ENOSYS; 66 ret = -ENOSYS;
67 else 67 else
diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h
index 6747006743f9..152d0baa576a 100644
--- a/include/asm-i386/i387.h
+++ b/include/asm-i386/i387.h
@@ -49,19 +49,19 @@ static inline void __save_init_fpu( struct task_struct *tsk )
49 X86_FEATURE_FXSR, 49 X86_FEATURE_FXSR,
50 "m" (tsk->thread.i387.fxsave) 50 "m" (tsk->thread.i387.fxsave)
51 :"memory"); 51 :"memory");
52 tsk->thread_info->status &= ~TS_USEDFPU; 52 task_thread_info(tsk)->status &= ~TS_USEDFPU;
53} 53}
54 54
55#define __unlazy_fpu( tsk ) do { \ 55#define __unlazy_fpu( tsk ) do { \
56 if ((tsk)->thread_info->status & TS_USEDFPU) \ 56 if (task_thread_info(tsk)->status & TS_USEDFPU) \
57 save_init_fpu( tsk ); \ 57 save_init_fpu( tsk ); \
58} while (0) 58} while (0)
59 59
60#define __clear_fpu( tsk ) \ 60#define __clear_fpu( tsk ) \
61do { \ 61do { \
62 if ((tsk)->thread_info->status & TS_USEDFPU) { \ 62 if (task_thread_info(tsk)->status & TS_USEDFPU) { \
63 asm volatile("fnclex ; fwait"); \ 63 asm volatile("fnclex ; fwait"); \
64 (tsk)->thread_info->status &= ~TS_USEDFPU; \ 64 task_thread_info(tsk)->status &= ~TS_USEDFPU; \
65 stts(); \ 65 stts(); \
66 } \ 66 } \
67} while (0) 67} while (0)
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
index 7babb97a02eb..03233c2ab820 100644
--- a/include/asm-i386/io.h
+++ b/include/asm-i386/io.h
@@ -131,6 +131,11 @@ extern void iounmap(volatile void __iomem *addr);
131extern void *bt_ioremap(unsigned long offset, unsigned long size); 131extern void *bt_ioremap(unsigned long offset, unsigned long size);
132extern void bt_iounmap(void *addr, unsigned long size); 132extern void bt_iounmap(void *addr, unsigned long size);
133 133
134/* Use early IO mappings for DMI because it's initialized early */
135#define dmi_ioremap bt_ioremap
136#define dmi_iounmap bt_iounmap
137#define dmi_alloc alloc_bootmem
138
134/* 139/*
135 * ISA I/O bus memory addresses are 1:1 with the physical address. 140 * ISA I/O bus memory addresses are 1:1 with the physical address.
136 */ 141 */
diff --git a/include/asm-i386/ioctl.h b/include/asm-i386/ioctl.h
index 543f7843d553..b279fe06dfe5 100644
--- a/include/asm-i386/ioctl.h
+++ b/include/asm-i386/ioctl.h
@@ -1,85 +1 @@
1/* $Id: ioctl.h,v 1.5 1993/07/19 21:53:50 root Exp root $ #include <asm-generic/ioctl.h>
2 *
3 * linux/ioctl.h for Linux by H.H. Bergman.
4 */
5
6#ifndef _ASMI386_IOCTL_H
7#define _ASMI386_IOCTL_H
8
9/* ioctl command encoding: 32 bits total, command in lower 16 bits,
10 * size of the parameter structure in the lower 14 bits of the
11 * upper 16 bits.
12 * Encoding the size of the parameter structure in the ioctl request
13 * is useful for catching programs compiled with old versions
14 * and to avoid overwriting user space outside the user buffer area.
15 * The highest 2 bits are reserved for indicating the ``access mode''.
16 * NOTE: This limits the max parameter size to 16kB -1 !
17 */
18
19/*
20 * The following is for compatibility across the various Linux
21 * platforms. The i386 ioctl numbering scheme doesn't really enforce
22 * a type field. De facto, however, the top 8 bits of the lower 16
23 * bits are indeed used as a type field, so we might just as well make
24 * this explicit here. Please be sure to use the decoding macros
25 * below from now on.
26 */
27#define _IOC_NRBITS 8
28#define _IOC_TYPEBITS 8
29#define _IOC_SIZEBITS 14
30#define _IOC_DIRBITS 2
31
32#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
33#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
34#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
35#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
36
37#define _IOC_NRSHIFT 0
38#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
39#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
40#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
41
42/*
43 * Direction bits.
44 */
45#define _IOC_NONE 0U
46#define _IOC_WRITE 1U
47#define _IOC_READ 2U
48
49#define _IOC(dir,type,nr,size) \
50 (((dir) << _IOC_DIRSHIFT) | \
51 ((type) << _IOC_TYPESHIFT) | \
52 ((nr) << _IOC_NRSHIFT) | \
53 ((size) << _IOC_SIZESHIFT))
54
55/* provoke compile error for invalid uses of size argument */
56extern unsigned int __invalid_size_argument_for_IOC;
57#define _IOC_TYPECHECK(t) \
58 ((sizeof(t) == sizeof(t[1]) && \
59 sizeof(t) < (1 << _IOC_SIZEBITS)) ? \
60 sizeof(t) : __invalid_size_argument_for_IOC)
61
62/* used to create numbers */
63#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
64#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size)))
65#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
66#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
67#define _IOR_BAD(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
68#define _IOW_BAD(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
69#define _IOWR_BAD(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
70
71/* used to decode ioctl numbers.. */
72#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
73#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
74#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
75#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
76
77/* ...and for the drivers/sound files... */
78
79#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
80#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
81#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
82#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
83#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
84
85#endif /* _ASMI386_IOCTL_H */
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h
index 270f1986b19f..5169d7af456f 100644
--- a/include/asm-i386/irq.h
+++ b/include/asm-i386/irq.h
@@ -21,8 +21,6 @@ static __inline__ int irq_canonicalize(int irq)
21 return ((irq == 2) ? 9 : irq); 21 return ((irq == 2) ? 9 : irq);
22} 22}
23 23
24extern void release_vm86_irqs(struct task_struct *);
25
26#ifdef CONFIG_X86_LOCAL_APIC 24#ifdef CONFIG_X86_LOCAL_APIC
27# define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */ 25# define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
28#endif 26#endif
diff --git a/include/asm-i386/kexec.h b/include/asm-i386/kexec.h
index 6ed2a03e37b3..53f0e06672dc 100644
--- a/include/asm-i386/kexec.h
+++ b/include/asm-i386/kexec.h
@@ -2,6 +2,8 @@
2#define _I386_KEXEC_H 2#define _I386_KEXEC_H
3 3
4#include <asm/fixmap.h> 4#include <asm/fixmap.h>
5#include <asm/ptrace.h>
6#include <asm/string.h>
5 7
6/* 8/*
7 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. 9 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
@@ -26,8 +28,49 @@
26#define KEXEC_ARCH KEXEC_ARCH_386 28#define KEXEC_ARCH KEXEC_ARCH_386
27 29
28#define MAX_NOTE_BYTES 1024 30#define MAX_NOTE_BYTES 1024
29typedef u32 note_buf_t[MAX_NOTE_BYTES/4];
30 31
31extern note_buf_t crash_notes[]; 32/* CPU does not save ss and esp on stack if execution is already
33 * running in kernel mode at the time of NMI occurrence. This code
34 * fixes it.
35 */
36static inline void crash_fixup_ss_esp(struct pt_regs *newregs,
37 struct pt_regs *oldregs)
38{
39 memcpy(newregs, oldregs, sizeof(*newregs));
40 newregs->esp = (unsigned long)&(oldregs->esp);
41 __asm__ __volatile__(
42 "xorl %%eax, %%eax\n\t"
43 "movw %%ss, %%ax\n\t"
44 :"=a"(newregs->xss));
45}
46
47/*
48 * This function is responsible for capturing register states if coming
49 * via panic otherwise just fix up the ss and esp if coming via kernel
50 * mode exception.
51 */
52static inline void crash_setup_regs(struct pt_regs *newregs,
53 struct pt_regs *oldregs)
54{
55 if (oldregs)
56 crash_fixup_ss_esp(newregs, oldregs);
57 else {
58 __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->ebx));
59 __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->ecx));
60 __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->edx));
61 __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->esi));
62 __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->edi));
63 __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->ebp));
64 __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->eax));
65 __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->esp));
66 __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(newregs->xss));
67 __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(newregs->xcs));
68 __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(newregs->xds));
69 __asm__ __volatile__("movw %%es, %%ax;" :"=a"(newregs->xes));
70 __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->eflags));
71
72 newregs->eip = (unsigned long)current_text_addr();
73 }
74}
32 75
33#endif /* _I386_KEXEC_H */ 76#endif /* _I386_KEXEC_H */
diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h
index ca916a892877..27cac050a60e 100644
--- a/include/asm-i386/kprobes.h
+++ b/include/asm-i386/kprobes.h
@@ -40,6 +40,7 @@ typedef u8 kprobe_opcode_t;
40 40
41#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry 41#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
42#define ARCH_SUPPORTS_KRETPROBES 42#define ARCH_SUPPORTS_KRETPROBES
43#define arch_remove_kprobe(p) do {} while (0)
43 44
44void kretprobe_trampoline(void); 45void kretprobe_trampoline(void);
45 46
@@ -76,14 +77,6 @@ static inline void restore_interrupts(struct pt_regs *regs)
76 local_irq_enable(); 77 local_irq_enable();
77} 78}
78 79
79#ifdef CONFIG_KPROBES
80extern int kprobe_exceptions_notify(struct notifier_block *self, 80extern int kprobe_exceptions_notify(struct notifier_block *self,
81 unsigned long val, void *data); 81 unsigned long val, void *data);
82#else /* !CONFIG_KPROBES */
83static inline int kprobe_exceptions_notify(struct notifier_block *self,
84 unsigned long val, void *data)
85{
86 return 0;
87}
88#endif
89#endif /* _ASM_KPROBES_H */ 82#endif /* _ASM_KPROBES_H */
diff --git a/include/asm-i386/mach-default/mach_ipi.h b/include/asm-i386/mach-default/mach_ipi.h
index cc756a67cd63..a1d0072e36bc 100644
--- a/include/asm-i386/mach-default/mach_ipi.h
+++ b/include/asm-i386/mach-default/mach_ipi.h
@@ -15,11 +15,9 @@ static inline void __local_send_IPI_allbutself(int vector)
15{ 15{
16 if (no_broadcast) { 16 if (no_broadcast) {
17 cpumask_t mask = cpu_online_map; 17 cpumask_t mask = cpu_online_map;
18 int this_cpu = get_cpu();
19 18
20 cpu_clear(this_cpu, mask); 19 cpu_clear(smp_processor_id(), mask);
21 send_IPI_mask(mask, vector); 20 send_IPI_mask(mask, vector);
22 put_cpu();
23 } else 21 } else
24 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); 22 __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
25} 23}
diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h
new file mode 100644
index 000000000000..9b2199e829f3
--- /dev/null
+++ b/include/asm-i386/mutex.h
@@ -0,0 +1,136 @@
1/*
2 * Assembly implementation of the mutex fastpath, based on atomic
3 * decrement/increment.
4 *
5 * started by Ingo Molnar:
6 *
7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 */
9#ifndef _ASM_MUTEX_H
10#define _ASM_MUTEX_H
11
12/**
13 * __mutex_fastpath_lock - try to take the lock by moving the count
14 * from 1 to a 0 value
15 * @count: pointer of type atomic_t
16 * @fn: function to call if the original value was not 1
17 *
18 * Change the count from 1 to a value lower than 1, and call <fn> if it
19 * wasn't 1 originally. This function MUST leave the value lower than 1
20 * even when the "1" assertion wasn't true.
21 */
22#define __mutex_fastpath_lock(count, fail_fn) \
23do { \
24 unsigned int dummy; \
25 \
26 typecheck(atomic_t *, count); \
27 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
28 \
29 __asm__ __volatile__( \
30 LOCK " decl (%%eax) \n" \
31 " js 2f \n" \
32 "1: \n" \
33 \
34 LOCK_SECTION_START("") \
35 "2: call "#fail_fn" \n" \
36 " jmp 1b \n" \
37 LOCK_SECTION_END \
38 \
39 :"=a" (dummy) \
40 : "a" (count) \
41 : "memory", "ecx", "edx"); \
42} while (0)
43
44
45/**
46 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
47 * from 1 to a 0 value
48 * @count: pointer of type atomic_t
49 * @fail_fn: function to call if the original value was not 1
50 *
51 * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
52 * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
53 * or anything the slow path function returns
54 */
55static inline int
56__mutex_fastpath_lock_retval(atomic_t *count,
57 int fastcall (*fail_fn)(atomic_t *))
58{
59 if (unlikely(atomic_dec_return(count) < 0))
60 return fail_fn(count);
61 else
62 return 0;
63}
64
65/**
66 * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
67 * @count: pointer of type atomic_t
68 * @fail_fn: function to call if the original value was not 0
69 *
70 * try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>.
71 * In the failure case, this function is allowed to either set the value
72 * to 1, or to set it to a value lower than 1.
73 *
74 * If the implementation sets it to a value of lower than 1, the
75 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
76 * to return 0 otherwise.
77 */
78#define __mutex_fastpath_unlock(count, fail_fn) \
79do { \
80 unsigned int dummy; \
81 \
82 typecheck(atomic_t *, count); \
83 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
84 \
85 __asm__ __volatile__( \
86 LOCK " incl (%%eax) \n" \
87 " jle 2f \n" \
88 "1: \n" \
89 \
90 LOCK_SECTION_START("") \
91 "2: call "#fail_fn" \n" \
92 " jmp 1b \n" \
93 LOCK_SECTION_END \
94 \
95 :"=a" (dummy) \
96 : "a" (count) \
97 : "memory", "ecx", "edx"); \
98} while (0)
99
100#define __mutex_slowpath_needs_to_unlock() 1
101
102/**
103 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
104 *
105 * @count: pointer of type atomic_t
106 * @fail_fn: fallback function
107 *
108 * Change the count from 1 to a value lower than 1, and return 0 (failure)
109 * if it wasn't 1 originally, or return 1 (success) otherwise. This function
110 * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
111 * Additionally, if the value was < 0 originally, this function must not leave
112 * it to 0 on failure.
113 */
114static inline int
115__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
116{
117 /*
118 * We have two variants here. The cmpxchg based one is the best one
119 * because it never induce a false contention state. It is included
120 * here because architectures using the inc/dec algorithms over the
121 * xchg ones are much more likely to support cmpxchg natively.
122 *
123 * If not we fall back to the spinlock based variant - that is
124 * just as efficient (and simpler) as a 'destructive' probing of
125 * the mutex state would be.
126 */
127#ifdef __HAVE_ARCH_CMPXCHG
128 if (likely(atomic_cmpxchg(count, 1, 0) == 1))
129 return 1;
130 return 0;
131#else
132 return fail_fn(count);
133#endif
134}
135
136#endif
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 5c96cf6dcb39..feca5d961e2b 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -61,9 +61,11 @@ struct cpuinfo_x86 {
61 int x86_cache_size; /* in KB - valid for CPUS which support this 61 int x86_cache_size; /* in KB - valid for CPUS which support this
62 call */ 62 call */
63 int x86_cache_alignment; /* In bytes */ 63 int x86_cache_alignment; /* In bytes */
64 int fdiv_bug; 64 char fdiv_bug;
65 int f00f_bug; 65 char f00f_bug;
66 int coma_bug; 66 char coma_bug;
67 char pad0;
68 int x86_power;
67 unsigned long loops_per_jiffy; 69 unsigned long loops_per_jiffy;
68 unsigned char x86_max_cores; /* cpuid returned max cores value */ 70 unsigned char x86_max_cores; /* cpuid returned max cores value */
69 unsigned char booted_cores; /* number of cores as seen by OS */ 71 unsigned char booted_cores; /* number of cores as seen by OS */
@@ -279,9 +281,11 @@ static inline void clear_in_cr4 (unsigned long mask)
279 outb((data), 0x23); \ 281 outb((data), 0x23); \
280} while (0) 282} while (0)
281 283
282static inline void serialize_cpu(void) 284/* Stop speculative execution */
285static inline void sync_core(void)
283{ 286{
284 __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx"); 287 int tmp;
288 asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
285} 289}
286 290
287static inline void __monitor(const void *eax, unsigned long ecx, 291static inline void __monitor(const void *eax, unsigned long ecx,
@@ -557,10 +561,20 @@ unsigned long get_wchan(struct task_struct *p);
557 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ 561 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
558}) 562})
559 563
564/*
565 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
566 * This is necessary to guarantee that the entire "struct pt_regs"
567 * is accessable even if the CPU haven't stored the SS/ESP registers
568 * on the stack (interrupt gate does not save these registers
569 * when switching to the same priv ring).
570 * Therefore beware: accessing the xss/esp fields of the
571 * "struct pt_regs" is possible, but they may contain the
572 * completely wrong values.
573 */
560#define task_pt_regs(task) \ 574#define task_pt_regs(task) \
561({ \ 575({ \
562 struct pt_regs *__regs__; \ 576 struct pt_regs *__regs__; \
563 __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info); \ 577 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
564 __regs__ - 1; \ 578 __regs__ - 1; \
565}) 579})
566 580
diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h
index 7e0f2945d17d..f324c53b6f9a 100644
--- a/include/asm-i386/ptrace.h
+++ b/include/asm-i386/ptrace.h
@@ -54,6 +54,9 @@ struct pt_regs {
54#define PTRACE_GET_THREAD_AREA 25 54#define PTRACE_GET_THREAD_AREA 25
55#define PTRACE_SET_THREAD_AREA 26 55#define PTRACE_SET_THREAD_AREA 26
56 56
57#define PTRACE_SYSEMU 31
58#define PTRACE_SYSEMU_SINGLESTEP 32
59
57#ifdef __KERNEL__ 60#ifdef __KERNEL__
58 61
59#include <asm/vm86.h> 62#include <asm/vm86.h>
diff --git a/include/asm-i386/signal.h b/include/asm-i386/signal.h
index 76524b4052ac..026fd231488d 100644
--- a/include/asm-i386/signal.h
+++ b/include/asm-i386/signal.h
@@ -218,7 +218,6 @@ static __inline__ int sigfindinword(unsigned long word)
218} 218}
219 219
220struct pt_regs; 220struct pt_regs;
221extern int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset));
222 221
223#define ptrace_signal_deliver(regs, cookie) \ 222#define ptrace_signal_deliver(regs, cookie) \
224 do { \ 223 do { \
diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
index 02c8f5d22065..bb5f88a27f7a 100644
--- a/include/asm-i386/string.h
+++ b/include/asm-i386/string.h
@@ -201,7 +201,7 @@ __asm__ __volatile__(
201return __res; 201return __res;
202} 202}
203 203
204static inline void * __memcpy(void * to, const void * from, size_t n) 204static __always_inline void * __memcpy(void * to, const void * from, size_t n)
205{ 205{
206int d0, d1, d2; 206int d0, d1, d2;
207__asm__ __volatile__( 207__asm__ __volatile__(
@@ -223,7 +223,7 @@ return (to);
223 * This looks ugly, but the compiler can optimize it totally, 223 * This looks ugly, but the compiler can optimize it totally,
224 * as the count is constant. 224 * as the count is constant.
225 */ 225 */
226static inline void * __constant_memcpy(void * to, const void * from, size_t n) 226static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n)
227{ 227{
228 long esi, edi; 228 long esi, edi;
229 if (!n) return to; 229 if (!n) return to;
@@ -367,7 +367,7 @@ return s;
367 * things 32 bits at a time even when we don't know the size of the 367 * things 32 bits at a time even when we don't know the size of the
368 * area at compile-time.. 368 * area at compile-time..
369 */ 369 */
370static inline void * __constant_c_memset(void * s, unsigned long c, size_t count) 370static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
371{ 371{
372int d0, d1; 372int d0, d1;
373__asm__ __volatile__( 373__asm__ __volatile__(
@@ -416,7 +416,7 @@ extern char *strstr(const char *cs, const char *ct);
416 * This looks horribly ugly, but the compiler can optimize it totally, 416 * This looks horribly ugly, but the compiler can optimize it totally,
417 * as we by now know that both pattern and count is constant.. 417 * as we by now know that both pattern and count is constant..
418 */ 418 */
419static inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) 419static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
420{ 420{
421 switch (count) { 421 switch (count) {
422 case 0: 422 case 0:
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 9c0593b7a94e..36a92ed6a9d0 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -548,6 +548,15 @@ void enable_hlt(void);
548extern int es7000_plat; 548extern int es7000_plat;
549void cpu_idle_wait(void); 549void cpu_idle_wait(void);
550 550
551/*
552 * On SMP systems, when the scheduler does migration-cost autodetection,
553 * it needs a way to flush as much of the CPU's caches as possible:
554 */
555static inline void sched_cacheflush(void)
556{
557 wbinvd();
558}
559
551extern unsigned long arch_align_stack(unsigned long sp); 560extern unsigned long arch_align_stack(unsigned long sp);
552 561
553#endif 562#endif
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 8fbf791651bf..e20e99551d71 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -111,8 +111,6 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
111#endif 111#endif
112 112
113#define free_thread_info(info) kfree(info) 113#define free_thread_info(info) kfree(info)
114#define get_thread_info(ti) get_task_struct((ti)->task)
115#define put_thread_info(ti) put_task_struct((ti)->task)
116 114
117#else /* !__ASSEMBLY__ */ 115#else /* !__ASSEMBLY__ */
118 116
@@ -142,6 +140,7 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
142#define TIF_SYSCALL_EMU 6 /* syscall emulation active */ 140#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
143#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 141#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
144#define TIF_SECCOMP 8 /* secure computing */ 142#define TIF_SECCOMP 8 /* secure computing */
143#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
145#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 144#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
146#define TIF_MEMDIE 17 145#define TIF_MEMDIE 17
147 146
@@ -154,6 +153,7 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
154#define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU) 153#define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU)
155#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 154#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
156#define _TIF_SECCOMP (1<<TIF_SECCOMP) 155#define _TIF_SECCOMP (1<<TIF_SECCOMP)
156#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
157#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 157#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
158 158
159/* work to do on interrupt/exception return */ 159/* work to do on interrupt/exception return */
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h
index 0ec27c9e8e45..d7e19eb344b7 100644
--- a/include/asm-i386/topology.h
+++ b/include/asm-i386/topology.h
@@ -72,7 +72,6 @@ static inline int node_to_first_cpu(int node)
72 .max_interval = 32, \ 72 .max_interval = 32, \
73 .busy_factor = 32, \ 73 .busy_factor = 32, \
74 .imbalance_pct = 125, \ 74 .imbalance_pct = 125, \
75 .cache_hot_time = (10*1000000), \
76 .cache_nice_tries = 1, \ 75 .cache_nice_tries = 1, \
77 .busy_idx = 3, \ 76 .busy_idx = 3, \
78 .idle_idx = 1, \ 77 .idle_idx = 1, \
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index 89ab7e2bc5aa..3f1337c34208 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -411,7 +411,7 @@ unsigned long __must_check __copy_from_user_ll(void *to,
411 * Returns number of bytes that could not be copied. 411 * Returns number of bytes that could not be copied.
412 * On success, this will be zero. 412 * On success, this will be zero.
413 */ 413 */
414static inline unsigned long __must_check 414static __always_inline unsigned long __must_check
415__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) 415__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
416{ 416{
417 if (__builtin_constant_p(n)) { 417 if (__builtin_constant_p(n)) {
@@ -432,7 +432,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
432 return __copy_to_user_ll(to, from, n); 432 return __copy_to_user_ll(to, from, n);
433} 433}
434 434
435static inline unsigned long __must_check 435static __always_inline unsigned long __must_check
436__copy_to_user(void __user *to, const void *from, unsigned long n) 436__copy_to_user(void __user *to, const void *from, unsigned long n)
437{ 437{
438 might_sleep(); 438 might_sleep();
@@ -456,7 +456,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
456 * If some data could not be copied, this function will pad the copied 456 * If some data could not be copied, this function will pad the copied
457 * data to the requested size using zero bytes. 457 * data to the requested size using zero bytes.
458 */ 458 */
459static inline unsigned long 459static __always_inline unsigned long
460__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) 460__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
461{ 461{
462 if (__builtin_constant_p(n)) { 462 if (__builtin_constant_p(n)) {
@@ -477,7 +477,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
477 return __copy_from_user_ll(to, from, n); 477 return __copy_from_user_ll(to, from, n);
478} 478}
479 479
480static inline unsigned long 480static __always_inline unsigned long
481__copy_from_user(void *to, const void __user *from, unsigned long n) 481__copy_from_user(void *to, const void __user *from, unsigned long n)
482{ 482{
483 might_sleep(); 483 might_sleep();
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index fe38b9a96233..597496ed2aee 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -299,8 +299,24 @@
299#define __NR_inotify_init 291 299#define __NR_inotify_init 291
300#define __NR_inotify_add_watch 292 300#define __NR_inotify_add_watch 292
301#define __NR_inotify_rm_watch 293 301#define __NR_inotify_rm_watch 293
302#define __NR_migrate_pages 294
303#define __NR_openat 295
304#define __NR_mkdirat 296
305#define __NR_mknodat 297
306#define __NR_fchownat 298
307#define __NR_futimesat 299
308#define __NR_newfstatat 300
309#define __NR_unlinkat 301
310#define __NR_renameat 302
311#define __NR_linkat 303
312#define __NR_symlinkat 304
313#define __NR_readlinkat 305
314#define __NR_fchmodat 306
315#define __NR_faccessat 307
316#define __NR_pselect6 308
317#define __NR_ppoll 309
302 318
303#define NR_syscalls 294 319#define NR_syscalls 310
304 320
305/* 321/*
306 * user-visible error numbers are in the range -1 - -128: see 322 * user-visible error numbers are in the range -1 - -128: see
@@ -416,6 +432,7 @@ __syscall_return(type,__res); \
416#define __ARCH_WANT_SYS_SIGPENDING 432#define __ARCH_WANT_SYS_SIGPENDING
417#define __ARCH_WANT_SYS_SIGPROCMASK 433#define __ARCH_WANT_SYS_SIGPROCMASK
418#define __ARCH_WANT_SYS_RT_SIGACTION 434#define __ARCH_WANT_SYS_RT_SIGACTION
435#define __ARCH_WANT_SYS_RT_SIGSUSPEND
419#endif 436#endif
420 437
421#ifdef __KERNEL_SYSCALLS__ 438#ifdef __KERNEL_SYSCALLS__
diff --git a/include/asm-i386/vm86.h b/include/asm-i386/vm86.h
index 40ec82c6914d..952fd6957380 100644
--- a/include/asm-i386/vm86.h
+++ b/include/asm-i386/vm86.h
@@ -16,7 +16,11 @@
16#define IF_MASK 0x00000200 16#define IF_MASK 0x00000200
17#define IOPL_MASK 0x00003000 17#define IOPL_MASK 0x00003000
18#define NT_MASK 0x00004000 18#define NT_MASK 0x00004000
19#ifdef CONFIG_VM86
19#define VM_MASK 0x00020000 20#define VM_MASK 0x00020000
21#else
22#define VM_MASK 0 /* ignored */
23#endif
20#define AC_MASK 0x00040000 24#define AC_MASK 0x00040000
21#define VIF_MASK 0x00080000 /* virtual interrupt flag */ 25#define VIF_MASK 0x00080000 /* virtual interrupt flag */
22#define VIP_MASK 0x00100000 /* virtual interrupt pending */ 26#define VIP_MASK 0x00100000 /* virtual interrupt pending */
@@ -200,9 +204,25 @@ struct kernel_vm86_struct {
200 */ 204 */
201}; 205};
202 206
207#ifdef CONFIG_VM86
208
203void handle_vm86_fault(struct kernel_vm86_regs *, long); 209void handle_vm86_fault(struct kernel_vm86_regs *, long);
204int handle_vm86_trap(struct kernel_vm86_regs *, long, int); 210int handle_vm86_trap(struct kernel_vm86_regs *, long, int);
205 211
212struct task_struct;
213void release_vm86_irqs(struct task_struct *);
214
215#else
216
217#define handle_vm86_fault(a, b)
218#define release_vm86_irqs(a)
219
220static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) {
221 return 0;
222}
223
224#endif /* CONFIG_VM86 */
225
206#endif /* __KERNEL__ */ 226#endif /* __KERNEL__ */
207 227
208#endif 228#endif