diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2006-03-31 15:34:58 -0500 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-03-31 15:34:58 -0500 |
commit | 86579dd06deecfa6ac88d5e84e4d63c397cd6f6d (patch) | |
tree | b4475d3ccde53015ad84a06e4e55e64591171b75 /include/asm-i386 | |
parent | 7ea9ea832212c4a755650f7c7cc1ff0b63292a41 (diff) | |
parent | a0f067802576d4eb4c65d40b8ee7d6ea3c81dd61 (diff) |
Merge branch 'master'
Diffstat (limited to 'include/asm-i386')
37 files changed, 358 insertions, 278 deletions
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h index 55059abf9c95..20f523954218 100644 --- a/include/asm-i386/acpi.h +++ b/include/asm-i386/acpi.h | |||
@@ -103,6 +103,12 @@ __acpi_release_global_lock (unsigned int *lock) | |||
103 | :"=r"(n_hi), "=r"(n_lo) \ | 103 | :"=r"(n_hi), "=r"(n_lo) \ |
104 | :"0"(n_hi), "1"(n_lo)) | 104 | :"0"(n_hi), "1"(n_lo)) |
105 | 105 | ||
106 | #ifdef CONFIG_X86_IO_APIC | ||
107 | extern void check_acpi_pci(void); | ||
108 | #else | ||
109 | static inline void check_acpi_pci(void) { } | ||
110 | #endif | ||
111 | |||
106 | #ifdef CONFIG_ACPI | 112 | #ifdef CONFIG_ACPI |
107 | extern int acpi_lapic; | 113 | extern int acpi_lapic; |
108 | extern int acpi_ioapic; | 114 | extern int acpi_ioapic; |
@@ -128,8 +134,6 @@ extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); | |||
128 | extern int skip_ioapic_setup; | 134 | extern int skip_ioapic_setup; |
129 | extern int acpi_skip_timer_override; | 135 | extern int acpi_skip_timer_override; |
130 | 136 | ||
131 | extern void check_acpi_pci(void); | ||
132 | |||
133 | static inline void disable_ioapic_setup(void) | 137 | static inline void disable_ioapic_setup(void) |
134 | { | 138 | { |
135 | skip_ioapic_setup = 1; | 139 | skip_ioapic_setup = 1; |
@@ -142,8 +146,6 @@ static inline int ioapic_setup_disabled(void) | |||
142 | 146 | ||
143 | #else | 147 | #else |
144 | static inline void disable_ioapic_setup(void) { } | 148 | static inline void disable_ioapic_setup(void) { } |
145 | static inline void check_acpi_pci(void) { } | ||
146 | |||
147 | #endif | 149 | #endif |
148 | 150 | ||
149 | static inline void acpi_noirq_set(void) { acpi_noirq = 1; } | 151 | static inline void acpi_noirq_set(void) { acpi_noirq = 1; } |
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h new file mode 100644 index 000000000000..e201decea0c9 --- /dev/null +++ b/include/asm-i386/alternative.h | |||
@@ -0,0 +1,129 @@ | |||
1 | #ifndef _I386_ALTERNATIVE_H | ||
2 | #define _I386_ALTERNATIVE_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | struct alt_instr { | ||
7 | u8 *instr; /* original instruction */ | ||
8 | u8 *replacement; | ||
9 | u8 cpuid; /* cpuid bit set for replacement */ | ||
10 | u8 instrlen; /* length of original instruction */ | ||
11 | u8 replacementlen; /* length of new instruction, <= instrlen */ | ||
12 | u8 pad; | ||
13 | }; | ||
14 | |||
15 | extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); | ||
16 | |||
17 | struct module; | ||
18 | extern void alternatives_smp_module_add(struct module *mod, char *name, | ||
19 | void *locks, void *locks_end, | ||
20 | void *text, void *text_end); | ||
21 | extern void alternatives_smp_module_del(struct module *mod); | ||
22 | extern void alternatives_smp_switch(int smp); | ||
23 | |||
24 | #endif | ||
25 | |||
26 | /* | ||
27 | * Alternative instructions for different CPU types or capabilities. | ||
28 | * | ||
29 | * This allows to use optimized instructions even on generic binary | ||
30 | * kernels. | ||
31 | * | ||
32 | * length of oldinstr must be longer or equal the length of newinstr | ||
33 | * It can be padded with nops as needed. | ||
34 | * | ||
35 | * For non barrier like inlines please define new variants | ||
36 | * without volatile and memory clobber. | ||
37 | */ | ||
38 | #define alternative(oldinstr, newinstr, feature) \ | ||
39 | asm volatile ("661:\n\t" oldinstr "\n662:\n" \ | ||
40 | ".section .altinstructions,\"a\"\n" \ | ||
41 | " .align 4\n" \ | ||
42 | " .long 661b\n" /* label */ \ | ||
43 | " .long 663f\n" /* new instruction */ \ | ||
44 | " .byte %c0\n" /* feature bit */ \ | ||
45 | " .byte 662b-661b\n" /* sourcelen */ \ | ||
46 | " .byte 664f-663f\n" /* replacementlen */ \ | ||
47 | ".previous\n" \ | ||
48 | ".section .altinstr_replacement,\"ax\"\n" \ | ||
49 | "663:\n\t" newinstr "\n664:\n" /* replacement */\ | ||
50 | ".previous" :: "i" (feature) : "memory") | ||
51 | |||
52 | /* | ||
53 | * Alternative inline assembly with input. | ||
54 | * | ||
55 | * Pecularities: | ||
56 | * No memory clobber here. | ||
57 | * Argument numbers start with 1. | ||
58 | * Best is to use constraints that are fixed size (like (%1) ... "r") | ||
59 | * If you use variable sized constraints like "m" or "g" in the | ||
60 | * replacement maake sure to pad to the worst case length. | ||
61 | */ | ||
62 | #define alternative_input(oldinstr, newinstr, feature, input...) \ | ||
63 | asm volatile ("661:\n\t" oldinstr "\n662:\n" \ | ||
64 | ".section .altinstructions,\"a\"\n" \ | ||
65 | " .align 4\n" \ | ||
66 | " .long 661b\n" /* label */ \ | ||
67 | " .long 663f\n" /* new instruction */ \ | ||
68 | " .byte %c0\n" /* feature bit */ \ | ||
69 | " .byte 662b-661b\n" /* sourcelen */ \ | ||
70 | " .byte 664f-663f\n" /* replacementlen */ \ | ||
71 | ".previous\n" \ | ||
72 | ".section .altinstr_replacement,\"ax\"\n" \ | ||
73 | "663:\n\t" newinstr "\n664:\n" /* replacement */\ | ||
74 | ".previous" :: "i" (feature), ##input) | ||
75 | |||
76 | /* | ||
77 | * Alternative inline assembly for SMP. | ||
78 | * | ||
79 | * alternative_smp() takes two versions (SMP first, UP second) and is | ||
80 | * for more complex stuff such as spinlocks. | ||
81 | * | ||
82 | * The LOCK_PREFIX macro defined here replaces the LOCK and | ||
83 | * LOCK_PREFIX macros used everywhere in the source tree. | ||
84 | * | ||
85 | * SMP alternatives use the same data structures as the other | ||
86 | * alternatives and the X86_FEATURE_UP flag to indicate the case of a | ||
87 | * UP system running a SMP kernel. The existing apply_alternatives() | ||
88 | * works fine for patching a SMP kernel for UP. | ||
89 | * | ||
90 | * The SMP alternative tables can be kept after boot and contain both | ||
91 | * UP and SMP versions of the instructions to allow switching back to | ||
92 | * SMP at runtime, when hotplugging in a new CPU, which is especially | ||
93 | * useful in virtualized environments. | ||
94 | * | ||
95 | * The very common lock prefix is handled as special case in a | ||
96 | * separate table which is a pure address list without replacement ptr | ||
97 | * and size information. That keeps the table sizes small. | ||
98 | */ | ||
99 | |||
100 | #ifdef CONFIG_SMP | ||
101 | #define alternative_smp(smpinstr, upinstr, args...) \ | ||
102 | asm volatile ("661:\n\t" smpinstr "\n662:\n" \ | ||
103 | ".section .smp_altinstructions,\"a\"\n" \ | ||
104 | " .align 4\n" \ | ||
105 | " .long 661b\n" /* label */ \ | ||
106 | " .long 663f\n" /* new instruction */ \ | ||
107 | " .byte 0x68\n" /* X86_FEATURE_UP */ \ | ||
108 | " .byte 662b-661b\n" /* sourcelen */ \ | ||
109 | " .byte 664f-663f\n" /* replacementlen */ \ | ||
110 | ".previous\n" \ | ||
111 | ".section .smp_altinstr_replacement,\"awx\"\n" \ | ||
112 | "663:\n\t" upinstr "\n" /* replacement */ \ | ||
113 | "664:\n\t.fill 662b-661b,1,0x42\n" /* space for original */ \ | ||
114 | ".previous" : args) | ||
115 | |||
116 | #define LOCK_PREFIX \ | ||
117 | ".section .smp_locks,\"a\"\n" \ | ||
118 | " .align 4\n" \ | ||
119 | " .long 661f\n" /* address */ \ | ||
120 | ".previous\n" \ | ||
121 | "661:\n\tlock; " | ||
122 | |||
123 | #else /* ! CONFIG_SMP */ | ||
124 | #define alternative_smp(smpinstr, upinstr, args...) \ | ||
125 | asm volatile (upinstr : args) | ||
126 | #define LOCK_PREFIX "" | ||
127 | #endif | ||
128 | |||
129 | #endif /* _I386_ALTERNATIVE_H */ | ||
diff --git a/include/asm-i386/arch_hooks.h b/include/asm-i386/arch_hooks.h index 28b96a6fb9fa..238cf4275b96 100644 --- a/include/asm-i386/arch_hooks.h +++ b/include/asm-i386/arch_hooks.h | |||
@@ -24,4 +24,7 @@ extern void trap_init_hook(void); | |||
24 | extern void time_init_hook(void); | 24 | extern void time_init_hook(void); |
25 | extern void mca_nmi_hook(void); | 25 | extern void mca_nmi_hook(void); |
26 | 26 | ||
27 | extern int setup_early_printk(char *); | ||
28 | extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2))); | ||
29 | |||
27 | #endif | 30 | #endif |
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index de649d3aa2d4..22d80ece95cb 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h | |||
@@ -10,12 +10,6 @@ | |||
10 | * resource counting etc.. | 10 | * resource counting etc.. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #ifdef CONFIG_SMP | ||
14 | #define LOCK "lock ; " | ||
15 | #else | ||
16 | #define LOCK "" | ||
17 | #endif | ||
18 | |||
19 | /* | 13 | /* |
20 | * Make sure gcc doesn't try to be clever and move things around | 14 | * Make sure gcc doesn't try to be clever and move things around |
21 | * on us. We need to use _exactly_ the address the user gave us, | 15 | * on us. We need to use _exactly_ the address the user gave us, |
@@ -52,7 +46,7 @@ typedef struct { volatile int counter; } atomic_t; | |||
52 | static __inline__ void atomic_add(int i, atomic_t *v) | 46 | static __inline__ void atomic_add(int i, atomic_t *v) |
53 | { | 47 | { |
54 | __asm__ __volatile__( | 48 | __asm__ __volatile__( |
55 | LOCK "addl %1,%0" | 49 | LOCK_PREFIX "addl %1,%0" |
56 | :"=m" (v->counter) | 50 | :"=m" (v->counter) |
57 | :"ir" (i), "m" (v->counter)); | 51 | :"ir" (i), "m" (v->counter)); |
58 | } | 52 | } |
@@ -67,7 +61,7 @@ static __inline__ void atomic_add(int i, atomic_t *v) | |||
67 | static __inline__ void atomic_sub(int i, atomic_t *v) | 61 | static __inline__ void atomic_sub(int i, atomic_t *v) |
68 | { | 62 | { |
69 | __asm__ __volatile__( | 63 | __asm__ __volatile__( |
70 | LOCK "subl %1,%0" | 64 | LOCK_PREFIX "subl %1,%0" |
71 | :"=m" (v->counter) | 65 | :"=m" (v->counter) |
72 | :"ir" (i), "m" (v->counter)); | 66 | :"ir" (i), "m" (v->counter)); |
73 | } | 67 | } |
@@ -86,7 +80,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) | |||
86 | unsigned char c; | 80 | unsigned char c; |
87 | 81 | ||
88 | __asm__ __volatile__( | 82 | __asm__ __volatile__( |
89 | LOCK "subl %2,%0; sete %1" | 83 | LOCK_PREFIX "subl %2,%0; sete %1" |
90 | :"=m" (v->counter), "=qm" (c) | 84 | :"=m" (v->counter), "=qm" (c) |
91 | :"ir" (i), "m" (v->counter) : "memory"); | 85 | :"ir" (i), "m" (v->counter) : "memory"); |
92 | return c; | 86 | return c; |
@@ -101,7 +95,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) | |||
101 | static __inline__ void atomic_inc(atomic_t *v) | 95 | static __inline__ void atomic_inc(atomic_t *v) |
102 | { | 96 | { |
103 | __asm__ __volatile__( | 97 | __asm__ __volatile__( |
104 | LOCK "incl %0" | 98 | LOCK_PREFIX "incl %0" |
105 | :"=m" (v->counter) | 99 | :"=m" (v->counter) |
106 | :"m" (v->counter)); | 100 | :"m" (v->counter)); |
107 | } | 101 | } |
@@ -115,7 +109,7 @@ static __inline__ void atomic_inc(atomic_t *v) | |||
115 | static __inline__ void atomic_dec(atomic_t *v) | 109 | static __inline__ void atomic_dec(atomic_t *v) |
116 | { | 110 | { |
117 | __asm__ __volatile__( | 111 | __asm__ __volatile__( |
118 | LOCK "decl %0" | 112 | LOCK_PREFIX "decl %0" |
119 | :"=m" (v->counter) | 113 | :"=m" (v->counter) |
120 | :"m" (v->counter)); | 114 | :"m" (v->counter)); |
121 | } | 115 | } |
@@ -133,7 +127,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v) | |||
133 | unsigned char c; | 127 | unsigned char c; |
134 | 128 | ||
135 | __asm__ __volatile__( | 129 | __asm__ __volatile__( |
136 | LOCK "decl %0; sete %1" | 130 | LOCK_PREFIX "decl %0; sete %1" |
137 | :"=m" (v->counter), "=qm" (c) | 131 | :"=m" (v->counter), "=qm" (c) |
138 | :"m" (v->counter) : "memory"); | 132 | :"m" (v->counter) : "memory"); |
139 | return c != 0; | 133 | return c != 0; |
@@ -152,7 +146,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v) | |||
152 | unsigned char c; | 146 | unsigned char c; |
153 | 147 | ||
154 | __asm__ __volatile__( | 148 | __asm__ __volatile__( |
155 | LOCK "incl %0; sete %1" | 149 | LOCK_PREFIX "incl %0; sete %1" |
156 | :"=m" (v->counter), "=qm" (c) | 150 | :"=m" (v->counter), "=qm" (c) |
157 | :"m" (v->counter) : "memory"); | 151 | :"m" (v->counter) : "memory"); |
158 | return c != 0; | 152 | return c != 0; |
@@ -172,7 +166,7 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v) | |||
172 | unsigned char c; | 166 | unsigned char c; |
173 | 167 | ||
174 | __asm__ __volatile__( | 168 | __asm__ __volatile__( |
175 | LOCK "addl %2,%0; sets %1" | 169 | LOCK_PREFIX "addl %2,%0; sets %1" |
176 | :"=m" (v->counter), "=qm" (c) | 170 | :"=m" (v->counter), "=qm" (c) |
177 | :"ir" (i), "m" (v->counter) : "memory"); | 171 | :"ir" (i), "m" (v->counter) : "memory"); |
178 | return c; | 172 | return c; |
@@ -195,7 +189,7 @@ static __inline__ int atomic_add_return(int i, atomic_t *v) | |||
195 | /* Modern 486+ processor */ | 189 | /* Modern 486+ processor */ |
196 | __i = i; | 190 | __i = i; |
197 | __asm__ __volatile__( | 191 | __asm__ __volatile__( |
198 | LOCK "xaddl %0, %1;" | 192 | LOCK_PREFIX "xaddl %0, %1;" |
199 | :"=r"(i) | 193 | :"=r"(i) |
200 | :"m"(v->counter), "0"(i)); | 194 | :"m"(v->counter), "0"(i)); |
201 | return i + __i; | 195 | return i + __i; |
@@ -231,8 +225,14 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) | |||
231 | ({ \ | 225 | ({ \ |
232 | int c, old; \ | 226 | int c, old; \ |
233 | c = atomic_read(v); \ | 227 | c = atomic_read(v); \ |
234 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ | 228 | for (;;) { \ |
229 | if (unlikely(c == (u))) \ | ||
230 | break; \ | ||
231 | old = atomic_cmpxchg((v), c, c + (a)); \ | ||
232 | if (likely(old == c)) \ | ||
233 | break; \ | ||
235 | c = old; \ | 234 | c = old; \ |
235 | } \ | ||
236 | c != (u); \ | 236 | c != (u); \ |
237 | }) | 237 | }) |
238 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | 238 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
@@ -242,11 +242,11 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) | |||
242 | 242 | ||
243 | /* These are x86-specific, used by some header files */ | 243 | /* These are x86-specific, used by some header files */ |
244 | #define atomic_clear_mask(mask, addr) \ | 244 | #define atomic_clear_mask(mask, addr) \ |
245 | __asm__ __volatile__(LOCK "andl %0,%1" \ | 245 | __asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ |
246 | : : "r" (~(mask)),"m" (*addr) : "memory") | 246 | : : "r" (~(mask)),"m" (*addr) : "memory") |
247 | 247 | ||
248 | #define atomic_set_mask(mask, addr) \ | 248 | #define atomic_set_mask(mask, addr) \ |
249 | __asm__ __volatile__(LOCK "orl %0,%1" \ | 249 | __asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ |
250 | : : "r" (mask),"m" (*(addr)) : "memory") | 250 | : : "r" (mask),"m" (*(addr)) : "memory") |
251 | 251 | ||
252 | /* Atomic operations are already serializing on x86 */ | 252 | /* Atomic operations are already serializing on x86 */ |
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h index 88e6ca248cd7..08deaeee6be9 100644 --- a/include/asm-i386/bitops.h +++ b/include/asm-i386/bitops.h | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | #include <linux/config.h> | 8 | #include <linux/config.h> |
9 | #include <linux/compiler.h> | 9 | #include <linux/compiler.h> |
10 | #include <asm/alternative.h> | ||
10 | 11 | ||
11 | /* | 12 | /* |
12 | * These have to be done with inline assembly: that way the bit-setting | 13 | * These have to be done with inline assembly: that way the bit-setting |
@@ -16,12 +17,6 @@ | |||
16 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | 17 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
17 | */ | 18 | */ |
18 | 19 | ||
19 | #ifdef CONFIG_SMP | ||
20 | #define LOCK_PREFIX "lock ; " | ||
21 | #else | ||
22 | #define LOCK_PREFIX "" | ||
23 | #endif | ||
24 | |||
25 | #define ADDR (*(volatile long *) addr) | 20 | #define ADDR (*(volatile long *) addr) |
26 | 21 | ||
27 | /** | 22 | /** |
@@ -367,28 +362,9 @@ static inline unsigned long ffz(unsigned long word) | |||
367 | return word; | 362 | return word; |
368 | } | 363 | } |
369 | 364 | ||
370 | #define fls64(x) generic_fls64(x) | ||
371 | |||
372 | #ifdef __KERNEL__ | 365 | #ifdef __KERNEL__ |
373 | 366 | ||
374 | /* | 367 | #include <asm-generic/bitops/sched.h> |
375 | * Every architecture must define this function. It's the fastest | ||
376 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
377 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
378 | * bits is cleared. | ||
379 | */ | ||
380 | static inline int sched_find_first_bit(const unsigned long *b) | ||
381 | { | ||
382 | if (unlikely(b[0])) | ||
383 | return __ffs(b[0]); | ||
384 | if (unlikely(b[1])) | ||
385 | return __ffs(b[1]) + 32; | ||
386 | if (unlikely(b[2])) | ||
387 | return __ffs(b[2]) + 64; | ||
388 | if (b[3]) | ||
389 | return __ffs(b[3]) + 96; | ||
390 | return __ffs(b[4]) + 128; | ||
391 | } | ||
392 | 368 | ||
393 | /** | 369 | /** |
394 | * ffs - find first bit set | 370 | * ffs - find first bit set |
@@ -426,42 +402,22 @@ static inline int fls(int x) | |||
426 | return r+1; | 402 | return r+1; |
427 | } | 403 | } |
428 | 404 | ||
429 | /** | 405 | #include <asm-generic/bitops/hweight.h> |
430 | * hweightN - returns the hamming weight of a N-bit word | ||
431 | * @x: the word to weigh | ||
432 | * | ||
433 | * The Hamming Weight of a number is the total number of bits set in it. | ||
434 | */ | ||
435 | |||
436 | #define hweight32(x) generic_hweight32(x) | ||
437 | #define hweight16(x) generic_hweight16(x) | ||
438 | #define hweight8(x) generic_hweight8(x) | ||
439 | 406 | ||
440 | #endif /* __KERNEL__ */ | 407 | #endif /* __KERNEL__ */ |
441 | 408 | ||
409 | #include <asm-generic/bitops/fls64.h> | ||
410 | |||
442 | #ifdef __KERNEL__ | 411 | #ifdef __KERNEL__ |
443 | 412 | ||
444 | #define ext2_set_bit(nr,addr) \ | 413 | #include <asm-generic/bitops/ext2-non-atomic.h> |
445 | __test_and_set_bit((nr),(unsigned long*)addr) | 414 | |
446 | #define ext2_set_bit_atomic(lock,nr,addr) \ | 415 | #define ext2_set_bit_atomic(lock,nr,addr) \ |
447 | test_and_set_bit((nr),(unsigned long*)addr) | 416 | test_and_set_bit((nr),(unsigned long*)addr) |
448 | #define ext2_clear_bit(nr, addr) \ | ||
449 | __test_and_clear_bit((nr),(unsigned long*)addr) | ||
450 | #define ext2_clear_bit_atomic(lock,nr, addr) \ | 417 | #define ext2_clear_bit_atomic(lock,nr, addr) \ |
451 | test_and_clear_bit((nr),(unsigned long*)addr) | 418 | test_and_clear_bit((nr),(unsigned long*)addr) |
452 | #define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr) | 419 | |
453 | #define ext2_find_first_zero_bit(addr, size) \ | 420 | #include <asm-generic/bitops/minix.h> |
454 | find_first_zero_bit((unsigned long*)addr, size) | ||
455 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
456 | find_next_zero_bit((unsigned long*)addr, size, off) | ||
457 | |||
458 | /* Bitmap functions for the minix filesystem. */ | ||
459 | #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr) | ||
460 | #define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr) | ||
461 | #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr) | ||
462 | #define minix_test_bit(nr,addr) test_bit(nr,(void*)addr) | ||
463 | #define minix_find_first_zero_bit(addr,size) \ | ||
464 | find_first_zero_bit((void*)addr,size) | ||
465 | 421 | ||
466 | #endif /* __KERNEL__ */ | 422 | #endif /* __KERNEL__ */ |
467 | 423 | ||
diff --git a/include/asm-i386/cache.h b/include/asm-i386/cache.h index 615911e5bd24..ca15c9c665cf 100644 --- a/include/asm-i386/cache.h +++ b/include/asm-i386/cache.h | |||
@@ -10,4 +10,6 @@ | |||
10 | #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) | 10 | #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) |
11 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) | 11 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) |
12 | 12 | ||
13 | #define __read_mostly __attribute__((__section__(".data.read_mostly"))) | ||
14 | |||
13 | #endif | 15 | #endif |
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h index c4ec2a4d8fdf..5c0b5876b931 100644 --- a/include/asm-i386/cpufeature.h +++ b/include/asm-i386/cpufeature.h | |||
@@ -70,6 +70,7 @@ | |||
70 | #define X86_FEATURE_P3 (3*32+ 6) /* P3 */ | 70 | #define X86_FEATURE_P3 (3*32+ 6) /* P3 */ |
71 | #define X86_FEATURE_P4 (3*32+ 7) /* P4 */ | 71 | #define X86_FEATURE_P4 (3*32+ 7) /* P4 */ |
72 | #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ | 72 | #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ |
73 | #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ | ||
73 | 74 | ||
74 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | 75 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
75 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ | 76 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ |
diff --git a/include/asm-i386/dmi.h b/include/asm-i386/dmi.h new file mode 100644 index 000000000000..38d4eeb7fc7e --- /dev/null +++ b/include/asm-i386/dmi.h | |||
@@ -0,0 +1,11 @@ | |||
1 | #ifndef _ASM_DMI_H | ||
2 | #define _ASM_DMI_H 1 | ||
3 | |||
4 | #include <asm/io.h> | ||
5 | |||
6 | /* Use early IO mappings for DMI because it's initialized early */ | ||
7 | #define dmi_ioremap bt_ioremap | ||
8 | #define dmi_iounmap bt_iounmap | ||
9 | #define dmi_alloc alloc_bootmem | ||
10 | |||
11 | #endif | ||
diff --git a/include/asm-i386/futex.h b/include/asm-i386/futex.h index 44b9db806474..7b8ceefd010f 100644 --- a/include/asm-i386/futex.h +++ b/include/asm-i386/futex.h | |||
@@ -104,5 +104,32 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
104 | return ret; | 104 | return ret; |
105 | } | 105 | } |
106 | 106 | ||
107 | static inline int | ||
108 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | ||
109 | { | ||
110 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | ||
111 | return -EFAULT; | ||
112 | |||
113 | __asm__ __volatile__( | ||
114 | "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n" | ||
115 | |||
116 | "2: .section .fixup, \"ax\" \n" | ||
117 | "3: mov %2, %0 \n" | ||
118 | " jmp 2b \n" | ||
119 | " .previous \n" | ||
120 | |||
121 | " .section __ex_table, \"a\" \n" | ||
122 | " .align 8 \n" | ||
123 | " .long 1b,3b \n" | ||
124 | " .previous \n" | ||
125 | |||
126 | : "=a" (oldval), "=m" (*uaddr) | ||
127 | : "i" (-EFAULT), "r" (newval), "0" (oldval) | ||
128 | : "memory" | ||
129 | ); | ||
130 | |||
131 | return oldval; | ||
132 | } | ||
133 | |||
107 | #endif | 134 | #endif |
108 | #endif | 135 | #endif |
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h index 03233c2ab820..79670bb4b0c7 100644 --- a/include/asm-i386/io.h +++ b/include/asm-i386/io.h | |||
@@ -219,23 +219,11 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int | |||
219 | */ | 219 | */ |
220 | #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) | 220 | #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) |
221 | 221 | ||
222 | #define isa_readb(a) readb(__ISA_IO_base + (a)) | ||
223 | #define isa_readw(a) readw(__ISA_IO_base + (a)) | ||
224 | #define isa_readl(a) readl(__ISA_IO_base + (a)) | ||
225 | #define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a)) | ||
226 | #define isa_writew(w,a) writew(w,__ISA_IO_base + (a)) | ||
227 | #define isa_writel(l,a) writel(l,__ISA_IO_base + (a)) | ||
228 | #define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c)) | ||
229 | #define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c)) | ||
230 | #define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c)) | ||
231 | |||
232 | |||
233 | /* | 222 | /* |
234 | * Again, i386 does not require mem IO specific function. | 223 | * Again, i386 does not require mem IO specific function. |
235 | */ | 224 | */ |
236 | 225 | ||
237 | #define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void __force *)(b),(c),(d)) | 226 | #define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void __force *)(b),(c),(d)) |
238 | #define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void __force *)(__ISA_IO_base + (b)),(c),(d)) | ||
239 | 227 | ||
240 | /** | 228 | /** |
241 | * check_signature - find BIOS signatures | 229 | * check_signature - find BIOS signatures |
diff --git a/include/asm-i386/kdebug.h b/include/asm-i386/kdebug.h index 316138e89910..96d0828ce096 100644 --- a/include/asm-i386/kdebug.h +++ b/include/asm-i386/kdebug.h | |||
@@ -17,11 +17,9 @@ struct die_args { | |||
17 | int signr; | 17 | int signr; |
18 | }; | 18 | }; |
19 | 19 | ||
20 | /* Note - you should never unregister because that can race with NMIs. | 20 | extern int register_die_notifier(struct notifier_block *); |
21 | If you really want to do it first unregister - then synchronize_sched - then free. | 21 | extern int unregister_die_notifier(struct notifier_block *); |
22 | */ | 22 | extern struct atomic_notifier_head i386die_chain; |
23 | int register_die_notifier(struct notifier_block *nb); | ||
24 | extern struct notifier_block *i386die_chain; | ||
25 | 23 | ||
26 | 24 | ||
27 | /* Grossly misnamed. */ | 25 | /* Grossly misnamed. */ |
@@ -51,7 +49,7 @@ static inline int notify_die(enum die_val val, const char *str, | |||
51 | .trapnr = trap, | 49 | .trapnr = trap, |
52 | .signr = sig | 50 | .signr = sig |
53 | }; | 51 | }; |
54 | return notifier_call_chain(&i386die_chain, val, &args); | 52 | return atomic_notifier_call_chain(&i386die_chain, val, &args); |
55 | } | 53 | } |
56 | 54 | ||
57 | #endif | 55 | #endif |
diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h index a0d2d74a7dda..57d157c5cf89 100644 --- a/include/asm-i386/kprobes.h +++ b/include/asm-i386/kprobes.h | |||
@@ -34,6 +34,7 @@ struct pt_regs; | |||
34 | 34 | ||
35 | typedef u8 kprobe_opcode_t; | 35 | typedef u8 kprobe_opcode_t; |
36 | #define BREAKPOINT_INSTRUCTION 0xcc | 36 | #define BREAKPOINT_INSTRUCTION 0xcc |
37 | #define RELATIVEJUMP_INSTRUCTION 0xe9 | ||
37 | #define MAX_INSN_SIZE 16 | 38 | #define MAX_INSN_SIZE 16 |
38 | #define MAX_STACK_SIZE 64 | 39 | #define MAX_STACK_SIZE 64 |
39 | #define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ | 40 | #define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ |
@@ -51,6 +52,11 @@ void kretprobe_trampoline(void); | |||
51 | struct arch_specific_insn { | 52 | struct arch_specific_insn { |
52 | /* copy of the original instruction */ | 53 | /* copy of the original instruction */ |
53 | kprobe_opcode_t *insn; | 54 | kprobe_opcode_t *insn; |
55 | /* | ||
56 | * If this flag is not 0, this kprobe can be boost when its | ||
57 | * post_handler and break_handler is not set. | ||
58 | */ | ||
59 | int boostable; | ||
54 | }; | 60 | }; |
55 | 61 | ||
56 | struct prev_kprobe { | 62 | struct prev_kprobe { |
diff --git a/include/asm-i386/mach-default/do_timer.h b/include/asm-i386/mach-default/do_timer.h index 56211414fc95..6312c3e79814 100644 --- a/include/asm-i386/mach-default/do_timer.h +++ b/include/asm-i386/mach-default/do_timer.h | |||
@@ -18,7 +18,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs) | |||
18 | { | 18 | { |
19 | do_timer(regs); | 19 | do_timer(regs); |
20 | #ifndef CONFIG_SMP | 20 | #ifndef CONFIG_SMP |
21 | update_process_times(user_mode(regs)); | 21 | update_process_times(user_mode_vm(regs)); |
22 | #endif | 22 | #endif |
23 | /* | 23 | /* |
24 | * In the SMP case we use the local APIC timer interrupt to do the | 24 | * In the SMP case we use the local APIC timer interrupt to do the |
diff --git a/include/asm-i386/mach-default/mach_time.h b/include/asm-i386/mach-default/mach_time.h index b749aa44a86f..31eb5de6f3dc 100644 --- a/include/asm-i386/mach-default/mach_time.h +++ b/include/asm-i386/mach-default/mach_time.h | |||
@@ -82,21 +82,8 @@ static inline int mach_set_rtc_mmss(unsigned long nowtime) | |||
82 | static inline unsigned long mach_get_cmos_time(void) | 82 | static inline unsigned long mach_get_cmos_time(void) |
83 | { | 83 | { |
84 | unsigned int year, mon, day, hour, min, sec; | 84 | unsigned int year, mon, day, hour, min, sec; |
85 | int i; | ||
86 | 85 | ||
87 | /* The Linux interpretation of the CMOS clock register contents: | 86 | do { |
88 | * When the Update-In-Progress (UIP) flag goes from 1 to 0, the | ||
89 | * RTC registers show the second which has precisely just started. | ||
90 | * Let's hope other operating systems interpret the RTC the same way. | ||
91 | */ | ||
92 | /* read RTC exactly on falling edge of update flag */ | ||
93 | for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */ | ||
94 | if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) | ||
95 | break; | ||
96 | for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */ | ||
97 | if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)) | ||
98 | break; | ||
99 | do { /* Isn't this overkill ? UIP above should guarantee consistency */ | ||
100 | sec = CMOS_READ(RTC_SECONDS); | 87 | sec = CMOS_READ(RTC_SECONDS); |
101 | min = CMOS_READ(RTC_MINUTES); | 88 | min = CMOS_READ(RTC_MINUTES); |
102 | hour = CMOS_READ(RTC_HOURS); | 89 | hour = CMOS_READ(RTC_HOURS); |
@@ -104,16 +91,18 @@ static inline unsigned long mach_get_cmos_time(void) | |||
104 | mon = CMOS_READ(RTC_MONTH); | 91 | mon = CMOS_READ(RTC_MONTH); |
105 | year = CMOS_READ(RTC_YEAR); | 92 | year = CMOS_READ(RTC_YEAR); |
106 | } while (sec != CMOS_READ(RTC_SECONDS)); | 93 | } while (sec != CMOS_READ(RTC_SECONDS)); |
107 | if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) | 94 | |
108 | { | 95 | if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { |
109 | BCD_TO_BIN(sec); | 96 | BCD_TO_BIN(sec); |
110 | BCD_TO_BIN(min); | 97 | BCD_TO_BIN(min); |
111 | BCD_TO_BIN(hour); | 98 | BCD_TO_BIN(hour); |
112 | BCD_TO_BIN(day); | 99 | BCD_TO_BIN(day); |
113 | BCD_TO_BIN(mon); | 100 | BCD_TO_BIN(mon); |
114 | BCD_TO_BIN(year); | 101 | BCD_TO_BIN(year); |
115 | } | 102 | } |
116 | if ((year += 1900) < 1970) | 103 | |
104 | year += 1900; | ||
105 | if (year < 1970) | ||
117 | year += 100; | 106 | year += 100; |
118 | 107 | ||
119 | return mktime(year, mon, day, hour, min, sec); | 108 | return mktime(year, mon, day, hour, min, sec); |
diff --git a/include/asm-i386/mach-es7000/mach_mpparse.h b/include/asm-i386/mach-es7000/mach_mpparse.h index 4a0637a3e208..99f66be240be 100644 --- a/include/asm-i386/mach-es7000/mach_mpparse.h +++ b/include/asm-i386/mach-es7000/mach_mpparse.h | |||
@@ -30,7 +30,8 @@ static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, | |||
30 | return 0; | 30 | return 0; |
31 | } | 31 | } |
32 | 32 | ||
33 | static inline int es7000_check_dsdt() | 33 | #ifdef CONFIG_ACPI |
34 | static inline int es7000_check_dsdt(void) | ||
34 | { | 35 | { |
35 | struct acpi_table_header *header = NULL; | 36 | struct acpi_table_header *header = NULL; |
36 | if(!acpi_get_table_header_early(ACPI_DSDT, &header)) | 37 | if(!acpi_get_table_header_early(ACPI_DSDT, &header)) |
@@ -54,6 +55,11 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
54 | } | 55 | } |
55 | return 0; | 56 | return 0; |
56 | } | 57 | } |
57 | 58 | #else | |
59 | static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
60 | { | ||
61 | return 0; | ||
62 | } | ||
63 | #endif | ||
58 | 64 | ||
59 | #endif /* __ASM_MACH_MPPARSE_H */ | 65 | #endif /* __ASM_MACH_MPPARSE_H */ |
diff --git a/include/asm-i386/mach-visws/do_timer.h b/include/asm-i386/mach-visws/do_timer.h index 92d638fc8b11..95568e6ca91c 100644 --- a/include/asm-i386/mach-visws/do_timer.h +++ b/include/asm-i386/mach-visws/do_timer.h | |||
@@ -11,7 +11,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs) | |||
11 | 11 | ||
12 | do_timer(regs); | 12 | do_timer(regs); |
13 | #ifndef CONFIG_SMP | 13 | #ifndef CONFIG_SMP |
14 | update_process_times(user_mode(regs)); | 14 | update_process_times(user_mode_vm(regs)); |
15 | #endif | 15 | #endif |
16 | /* | 16 | /* |
17 | * In the SMP case we use the local APIC timer interrupt to do the | 17 | * In the SMP case we use the local APIC timer interrupt to do the |
diff --git a/include/asm-i386/mach-voyager/do_timer.h b/include/asm-i386/mach-voyager/do_timer.h index ae510e5d0d78..eaf518098981 100644 --- a/include/asm-i386/mach-voyager/do_timer.h +++ b/include/asm-i386/mach-voyager/do_timer.h | |||
@@ -5,7 +5,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs) | |||
5 | { | 5 | { |
6 | do_timer(regs); | 6 | do_timer(regs); |
7 | #ifndef CONFIG_SMP | 7 | #ifndef CONFIG_SMP |
8 | update_process_times(user_mode(regs)); | 8 | update_process_times(user_mode_vm(regs)); |
9 | #endif | 9 | #endif |
10 | 10 | ||
11 | voyager_timer_interrupt(regs); | 11 | voyager_timer_interrupt(regs); |
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h index 74f595d80579..e33e9f9e4c66 100644 --- a/include/asm-i386/mmzone.h +++ b/include/asm-i386/mmzone.h | |||
@@ -70,8 +70,6 @@ static inline int pfn_to_nid(unsigned long pfn) | |||
70 | #endif | 70 | #endif |
71 | } | 71 | } |
72 | 72 | ||
73 | #define node_localnr(pfn, nid) ((pfn) - node_data[nid]->node_start_pfn) | ||
74 | |||
75 | /* | 73 | /* |
76 | * Following are macros that each numa implmentation must define. | 74 | * Following are macros that each numa implmentation must define. |
77 | */ | 75 | */ |
@@ -86,21 +84,6 @@ static inline int pfn_to_nid(unsigned long pfn) | |||
86 | /* XXX: FIXME -- wli */ | 84 | /* XXX: FIXME -- wli */ |
87 | #define kern_addr_valid(kaddr) (0) | 85 | #define kern_addr_valid(kaddr) (0) |
88 | 86 | ||
89 | #define pfn_to_page(pfn) \ | ||
90 | ({ \ | ||
91 | unsigned long __pfn = pfn; \ | ||
92 | int __node = pfn_to_nid(__pfn); \ | ||
93 | &NODE_DATA(__node)->node_mem_map[node_localnr(__pfn,__node)]; \ | ||
94 | }) | ||
95 | |||
96 | #define page_to_pfn(pg) \ | ||
97 | ({ \ | ||
98 | struct page *__page = pg; \ | ||
99 | struct zone *__zone = page_zone(__page); \ | ||
100 | (unsigned long)(__page - __zone->zone_mem_map) \ | ||
101 | + __zone->zone_start_pfn; \ | ||
102 | }) | ||
103 | |||
104 | #ifdef CONFIG_X86_NUMAQ /* we have contiguous memory on NUMA-Q */ | 87 | #ifdef CONFIG_X86_NUMAQ /* we have contiguous memory on NUMA-Q */ |
105 | #define pfn_valid(pfn) ((pfn) < num_physpages) | 88 | #define pfn_valid(pfn) ((pfn) < num_physpages) |
106 | #else | 89 | #else |
diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h index 64a0b8e6afeb..62113d3bfdc2 100644 --- a/include/asm-i386/mpspec.h +++ b/include/asm-i386/mpspec.h | |||
@@ -22,7 +22,6 @@ extern int mp_bus_id_to_type [MAX_MP_BUSSES]; | |||
22 | extern int mp_irq_entries; | 22 | extern int mp_irq_entries; |
23 | extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; | 23 | extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; |
24 | extern int mpc_default_type; | 24 | extern int mpc_default_type; |
25 | extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; | ||
26 | extern unsigned long mp_lapic_addr; | 25 | extern unsigned long mp_lapic_addr; |
27 | extern int pic_mode; | 26 | extern int pic_mode; |
28 | extern int using_apic_timer; | 27 | extern int using_apic_timer; |
diff --git a/include/asm-i386/mtrr.h b/include/asm-i386/mtrr.h index 5b6ceda68c5f..64cf937c7e33 100644 --- a/include/asm-i386/mtrr.h +++ b/include/asm-i386/mtrr.h | |||
@@ -25,6 +25,7 @@ | |||
25 | 25 | ||
26 | #include <linux/config.h> | 26 | #include <linux/config.h> |
27 | #include <linux/ioctl.h> | 27 | #include <linux/ioctl.h> |
28 | #include <linux/errno.h> | ||
28 | 29 | ||
29 | #define MTRR_IOCTL_BASE 'M' | 30 | #define MTRR_IOCTL_BASE 'M' |
30 | 31 | ||
diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h index 9b2199e829f3..05a538531229 100644 --- a/include/asm-i386/mutex.h +++ b/include/asm-i386/mutex.h | |||
@@ -9,6 +9,8 @@ | |||
9 | #ifndef _ASM_MUTEX_H | 9 | #ifndef _ASM_MUTEX_H |
10 | #define _ASM_MUTEX_H | 10 | #define _ASM_MUTEX_H |
11 | 11 | ||
12 | #include "asm/alternative.h" | ||
13 | |||
12 | /** | 14 | /** |
13 | * __mutex_fastpath_lock - try to take the lock by moving the count | 15 | * __mutex_fastpath_lock - try to take the lock by moving the count |
14 | * from 1 to a 0 value | 16 | * from 1 to a 0 value |
@@ -27,7 +29,7 @@ do { \ | |||
27 | typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ | 29 | typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ |
28 | \ | 30 | \ |
29 | __asm__ __volatile__( \ | 31 | __asm__ __volatile__( \ |
30 | LOCK " decl (%%eax) \n" \ | 32 | LOCK_PREFIX " decl (%%eax) \n" \ |
31 | " js 2f \n" \ | 33 | " js 2f \n" \ |
32 | "1: \n" \ | 34 | "1: \n" \ |
33 | \ | 35 | \ |
@@ -83,7 +85,7 @@ do { \ | |||
83 | typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ | 85 | typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ |
84 | \ | 86 | \ |
85 | __asm__ __volatile__( \ | 87 | __asm__ __volatile__( \ |
86 | LOCK " incl (%%eax) \n" \ | 88 | LOCK_PREFIX " incl (%%eax) \n" \ |
87 | " jle 2f \n" \ | 89 | " jle 2f \n" \ |
88 | "1: \n" \ | 90 | "1: \n" \ |
89 | \ | 91 | \ |
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h index 997ca5d17876..30f52a2263ba 100644 --- a/include/asm-i386/page.h +++ b/include/asm-i386/page.h | |||
@@ -126,8 +126,6 @@ extern int page_is_ram(unsigned long pagenr); | |||
126 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) | 126 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) |
127 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | 127 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
128 | #ifdef CONFIG_FLATMEM | 128 | #ifdef CONFIG_FLATMEM |
129 | #define pfn_to_page(pfn) (mem_map + (pfn)) | ||
130 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) | ||
131 | #define pfn_valid(pfn) ((pfn) < max_mapnr) | 129 | #define pfn_valid(pfn) ((pfn) < max_mapnr) |
132 | #endif /* CONFIG_FLATMEM */ | 130 | #endif /* CONFIG_FLATMEM */ |
133 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | 131 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
@@ -141,6 +139,7 @@ extern int page_is_ram(unsigned long pagenr); | |||
141 | 139 | ||
142 | #endif /* __KERNEL__ */ | 140 | #endif /* __KERNEL__ */ |
143 | 141 | ||
142 | #include <asm-generic/memory_model.h> | ||
144 | #include <asm-generic/page.h> | 143 | #include <asm-generic/page.h> |
145 | 144 | ||
146 | #endif /* _I386_PAGE_H */ | 145 | #endif /* _I386_PAGE_H */ |
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h index 74ef721b534d..27bde973abc7 100644 --- a/include/asm-i386/pgtable-2level.h +++ b/include/asm-i386/pgtable-2level.h | |||
@@ -61,4 +61,6 @@ static inline int pte_exec_kernel(pte_t pte) | |||
61 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) | 61 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) |
62 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | 62 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
63 | 63 | ||
64 | void vmalloc_sync_all(void); | ||
65 | |||
64 | #endif /* _I386_PGTABLE_2LEVEL_H */ | 66 | #endif /* _I386_PGTABLE_2LEVEL_H */ |
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h index f1a8b454920a..36a5aa63cbbf 100644 --- a/include/asm-i386/pgtable-3level.h +++ b/include/asm-i386/pgtable-3level.h | |||
@@ -152,4 +152,6 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) | |||
152 | 152 | ||
153 | #define __pmd_free_tlb(tlb, x) do { } while (0) | 153 | #define __pmd_free_tlb(tlb, x) do { } while (0) |
154 | 154 | ||
155 | #define vmalloc_sync_all() ((void)0) | ||
156 | |||
155 | #endif /* _I386_PGTABLE_3LEVEL_H */ | 157 | #endif /* _I386_PGTABLE_3LEVEL_H */ |
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index 088a945bf26b..ee056c41a9fb 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h | |||
@@ -219,13 +219,12 @@ extern unsigned long pg0[]; | |||
219 | * The following only work if pte_present() is true. | 219 | * The following only work if pte_present() is true. |
220 | * Undefined behaviour if not.. | 220 | * Undefined behaviour if not.. |
221 | */ | 221 | */ |
222 | #define __LARGE_PTE (_PAGE_PSE | _PAGE_PRESENT) | ||
223 | static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; } | 222 | static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; } |
224 | static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; } | 223 | static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; } |
225 | static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; } | 224 | static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; } |
226 | static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; } | 225 | static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; } |
227 | static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; } | 226 | static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; } |
228 | static inline int pte_huge(pte_t pte) { return ((pte).pte_low & __LARGE_PTE) == __LARGE_PTE; } | 227 | static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; } |
229 | 228 | ||
230 | /* | 229 | /* |
231 | * The following only works if pte_present() is not true. | 230 | * The following only works if pte_present() is not true. |
@@ -242,7 +241,7 @@ static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return | |||
242 | static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; } | 241 | static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; } |
243 | static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; } | 242 | static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; } |
244 | static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } | 243 | static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } |
245 | static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= __LARGE_PTE; return pte; } | 244 | static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; } |
246 | 245 | ||
247 | #ifdef CONFIG_X86_PAE | 246 | #ifdef CONFIG_X86_PAE |
248 | # include <asm/pgtable-3level.h> | 247 | # include <asm/pgtable-3level.h> |
diff --git a/include/asm-i386/poll.h b/include/asm-i386/poll.h index aecc80a15d36..2cd4929abd40 100644 --- a/include/asm-i386/poll.h +++ b/include/asm-i386/poll.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #define POLLWRBAND 0x0200 | 16 | #define POLLWRBAND 0x0200 |
17 | #define POLLMSG 0x0400 | 17 | #define POLLMSG 0x0400 |
18 | #define POLLREMOVE 0x1000 | 18 | #define POLLREMOVE 0x1000 |
19 | #define POLLRDHUP 0x2000 | ||
19 | 20 | ||
20 | struct pollfd { | 21 | struct pollfd { |
21 | int fd; | 22 | int fd; |
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index feca5d961e2b..805f0dcda468 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/config.h> | 20 | #include <linux/config.h> |
21 | #include <linux/threads.h> | 21 | #include <linux/threads.h> |
22 | #include <asm/percpu.h> | 22 | #include <asm/percpu.h> |
23 | #include <linux/cpumask.h> | ||
23 | 24 | ||
24 | /* flag for disabling the tsc */ | 25 | /* flag for disabling the tsc */ |
25 | extern int tsc_disable; | 26 | extern int tsc_disable; |
@@ -67,6 +68,9 @@ struct cpuinfo_x86 { | |||
67 | char pad0; | 68 | char pad0; |
68 | int x86_power; | 69 | int x86_power; |
69 | unsigned long loops_per_jiffy; | 70 | unsigned long loops_per_jiffy; |
71 | #ifdef CONFIG_SMP | ||
72 | cpumask_t llc_shared_map; /* cpus sharing the last level cache */ | ||
73 | #endif | ||
70 | unsigned char x86_max_cores; /* cpuid returned max cores value */ | 74 | unsigned char x86_max_cores; /* cpuid returned max cores value */ |
71 | unsigned char booted_cores; /* number of cores as seen by OS */ | 75 | unsigned char booted_cores; /* number of cores as seen by OS */ |
72 | unsigned char apicid; | 76 | unsigned char apicid; |
@@ -103,6 +107,7 @@ extern struct cpuinfo_x86 cpu_data[]; | |||
103 | 107 | ||
104 | extern int phys_proc_id[NR_CPUS]; | 108 | extern int phys_proc_id[NR_CPUS]; |
105 | extern int cpu_core_id[NR_CPUS]; | 109 | extern int cpu_core_id[NR_CPUS]; |
110 | extern int cpu_llc_id[NR_CPUS]; | ||
106 | extern char ignore_fpu_irq; | 111 | extern char ignore_fpu_irq; |
107 | 112 | ||
108 | extern void identify_cpu(struct cpuinfo_x86 *); | 113 | extern void identify_cpu(struct cpuinfo_x86 *); |
@@ -616,8 +621,6 @@ struct extended_sigtable { | |||
616 | unsigned int reserved[3]; | 621 | unsigned int reserved[3]; |
617 | struct extended_signature sigs[0]; | 622 | struct extended_signature sigs[0]; |
618 | }; | 623 | }; |
619 | /* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */ | ||
620 | #define MICROCODE_IOCFREE _IO('6',0) | ||
621 | 624 | ||
622 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | 625 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ |
623 | static inline void rep_nop(void) | 626 | static inline void rep_nop(void) |
diff --git a/include/asm-i386/rwlock.h b/include/asm-i386/rwlock.h index b57cc7afdf7e..94f00195d543 100644 --- a/include/asm-i386/rwlock.h +++ b/include/asm-i386/rwlock.h | |||
@@ -21,21 +21,23 @@ | |||
21 | #define RW_LOCK_BIAS_STR "0x01000000" | 21 | #define RW_LOCK_BIAS_STR "0x01000000" |
22 | 22 | ||
23 | #define __build_read_lock_ptr(rw, helper) \ | 23 | #define __build_read_lock_ptr(rw, helper) \ |
24 | asm volatile(LOCK "subl $1,(%0)\n\t" \ | 24 | alternative_smp("lock; subl $1,(%0)\n\t" \ |
25 | "jns 1f\n" \ | 25 | "jns 1f\n" \ |
26 | "call " helper "\n\t" \ | 26 | "call " helper "\n\t" \ |
27 | "1:\n" \ | 27 | "1:\n", \ |
28 | ::"a" (rw) : "memory") | 28 | "subl $1,(%0)\n\t", \ |
29 | :"a" (rw) : "memory") | ||
29 | 30 | ||
30 | #define __build_read_lock_const(rw, helper) \ | 31 | #define __build_read_lock_const(rw, helper) \ |
31 | asm volatile(LOCK "subl $1,%0\n\t" \ | 32 | alternative_smp("lock; subl $1,%0\n\t" \ |
32 | "jns 1f\n" \ | 33 | "jns 1f\n" \ |
33 | "pushl %%eax\n\t" \ | 34 | "pushl %%eax\n\t" \ |
34 | "leal %0,%%eax\n\t" \ | 35 | "leal %0,%%eax\n\t" \ |
35 | "call " helper "\n\t" \ | 36 | "call " helper "\n\t" \ |
36 | "popl %%eax\n\t" \ | 37 | "popl %%eax\n\t" \ |
37 | "1:\n" \ | 38 | "1:\n", \ |
38 | :"=m" (*(volatile int *)rw) : : "memory") | 39 | "subl $1,%0\n\t", \ |
40 | "=m" (*(volatile int *)rw) : : "memory") | ||
39 | 41 | ||
40 | #define __build_read_lock(rw, helper) do { \ | 42 | #define __build_read_lock(rw, helper) do { \ |
41 | if (__builtin_constant_p(rw)) \ | 43 | if (__builtin_constant_p(rw)) \ |
@@ -45,21 +47,23 @@ | |||
45 | } while (0) | 47 | } while (0) |
46 | 48 | ||
47 | #define __build_write_lock_ptr(rw, helper) \ | 49 | #define __build_write_lock_ptr(rw, helper) \ |
48 | asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ | 50 | alternative_smp("lock; subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ |
49 | "jz 1f\n" \ | 51 | "jz 1f\n" \ |
50 | "call " helper "\n\t" \ | 52 | "call " helper "\n\t" \ |
51 | "1:\n" \ | 53 | "1:\n", \ |
52 | ::"a" (rw) : "memory") | 54 | "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t", \ |
55 | :"a" (rw) : "memory") | ||
53 | 56 | ||
54 | #define __build_write_lock_const(rw, helper) \ | 57 | #define __build_write_lock_const(rw, helper) \ |
55 | asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ | 58 | alternative_smp("lock; subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ |
56 | "jz 1f\n" \ | 59 | "jz 1f\n" \ |
57 | "pushl %%eax\n\t" \ | 60 | "pushl %%eax\n\t" \ |
58 | "leal %0,%%eax\n\t" \ | 61 | "leal %0,%%eax\n\t" \ |
59 | "call " helper "\n\t" \ | 62 | "call " helper "\n\t" \ |
60 | "popl %%eax\n\t" \ | 63 | "popl %%eax\n\t" \ |
61 | "1:\n" \ | 64 | "1:\n", \ |
62 | :"=m" (*(volatile int *)rw) : : "memory") | 65 | "subl $" RW_LOCK_BIAS_STR ",%0\n\t", \ |
66 | "=m" (*(volatile int *)rw) : : "memory") | ||
63 | 67 | ||
64 | #define __build_write_lock(rw, helper) do { \ | 68 | #define __build_write_lock(rw, helper) do { \ |
65 | if (__builtin_constant_p(rw)) \ | 69 | if (__builtin_constant_p(rw)) \ |
diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h index 6a42b2142fd6..f7a0f310c524 100644 --- a/include/asm-i386/semaphore.h +++ b/include/asm-i386/semaphore.h | |||
@@ -99,7 +99,7 @@ static inline void down(struct semaphore * sem) | |||
99 | might_sleep(); | 99 | might_sleep(); |
100 | __asm__ __volatile__( | 100 | __asm__ __volatile__( |
101 | "# atomic down operation\n\t" | 101 | "# atomic down operation\n\t" |
102 | LOCK "decl %0\n\t" /* --sem->count */ | 102 | LOCK_PREFIX "decl %0\n\t" /* --sem->count */ |
103 | "js 2f\n" | 103 | "js 2f\n" |
104 | "1:\n" | 104 | "1:\n" |
105 | LOCK_SECTION_START("") | 105 | LOCK_SECTION_START("") |
@@ -123,7 +123,7 @@ static inline int down_interruptible(struct semaphore * sem) | |||
123 | might_sleep(); | 123 | might_sleep(); |
124 | __asm__ __volatile__( | 124 | __asm__ __volatile__( |
125 | "# atomic interruptible down operation\n\t" | 125 | "# atomic interruptible down operation\n\t" |
126 | LOCK "decl %1\n\t" /* --sem->count */ | 126 | LOCK_PREFIX "decl %1\n\t" /* --sem->count */ |
127 | "js 2f\n\t" | 127 | "js 2f\n\t" |
128 | "xorl %0,%0\n" | 128 | "xorl %0,%0\n" |
129 | "1:\n" | 129 | "1:\n" |
@@ -148,7 +148,7 @@ static inline int down_trylock(struct semaphore * sem) | |||
148 | 148 | ||
149 | __asm__ __volatile__( | 149 | __asm__ __volatile__( |
150 | "# atomic interruptible down operation\n\t" | 150 | "# atomic interruptible down operation\n\t" |
151 | LOCK "decl %1\n\t" /* --sem->count */ | 151 | LOCK_PREFIX "decl %1\n\t" /* --sem->count */ |
152 | "js 2f\n\t" | 152 | "js 2f\n\t" |
153 | "xorl %0,%0\n" | 153 | "xorl %0,%0\n" |
154 | "1:\n" | 154 | "1:\n" |
@@ -173,7 +173,7 @@ static inline void up(struct semaphore * sem) | |||
173 | { | 173 | { |
174 | __asm__ __volatile__( | 174 | __asm__ __volatile__( |
175 | "# atomic up operation\n\t" | 175 | "# atomic up operation\n\t" |
176 | LOCK "incl %0\n\t" /* ++sem->count */ | 176 | LOCK_PREFIX "incl %0\n\t" /* ++sem->count */ |
177 | "jle 2f\n" | 177 | "jle 2f\n" |
178 | "1:\n" | 178 | "1:\n" |
179 | LOCK_SECTION_START("") | 179 | LOCK_SECTION_START("") |
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h index 826a8ca50ac8..ee941457b55d 100644 --- a/include/asm-i386/setup.h +++ b/include/asm-i386/setup.h | |||
@@ -6,9 +6,7 @@ | |||
6 | #ifndef _i386_SETUP_H | 6 | #ifndef _i386_SETUP_H |
7 | #define _i386_SETUP_H | 7 | #define _i386_SETUP_H |
8 | 8 | ||
9 | #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) | 9 | #include <linux/pfn.h> |
10 | #define PFN_DOWN(x) ((x) >> PAGE_SHIFT) | ||
11 | #define PFN_PHYS(x) ((x) << PAGE_SHIFT) | ||
12 | 10 | ||
13 | /* | 11 | /* |
14 | * Reserved space for vmalloc and iomap - defined in asm/page.h | 12 | * Reserved space for vmalloc and iomap - defined in asm/page.h |
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index 23604350cdf4..d76b7693cf1d 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h | |||
@@ -35,31 +35,41 @@ | |||
35 | #define __raw_spin_lock_string_flags \ | 35 | #define __raw_spin_lock_string_flags \ |
36 | "\n1:\t" \ | 36 | "\n1:\t" \ |
37 | "lock ; decb %0\n\t" \ | 37 | "lock ; decb %0\n\t" \ |
38 | "jns 4f\n\t" \ | 38 | "jns 5f\n" \ |
39 | "2:\t" \ | 39 | "2:\t" \ |
40 | "testl $0x200, %1\n\t" \ | 40 | "testl $0x200, %1\n\t" \ |
41 | "jz 3f\n\t" \ | 41 | "jz 4f\n\t" \ |
42 | "sti\n\t" \ | 42 | "sti\n" \ |
43 | "3:\t" \ | 43 | "3:\t" \ |
44 | "rep;nop\n\t" \ | 44 | "rep;nop\n\t" \ |
45 | "cmpb $0, %0\n\t" \ | 45 | "cmpb $0, %0\n\t" \ |
46 | "jle 3b\n\t" \ | 46 | "jle 3b\n\t" \ |
47 | "cli\n\t" \ | 47 | "cli\n\t" \ |
48 | "jmp 1b\n" \ | 48 | "jmp 1b\n" \ |
49 | "4:\n\t" | 49 | "4:\t" \ |
50 | "rep;nop\n\t" \ | ||
51 | "cmpb $0, %0\n\t" \ | ||
52 | "jg 1b\n\t" \ | ||
53 | "jmp 4b\n" \ | ||
54 | "5:\n\t" | ||
55 | |||
56 | #define __raw_spin_lock_string_up \ | ||
57 | "\n\tdecb %0" | ||
50 | 58 | ||
51 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 59 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
52 | { | 60 | { |
53 | __asm__ __volatile__( | 61 | alternative_smp( |
54 | __raw_spin_lock_string | 62 | __raw_spin_lock_string, |
55 | :"=m" (lock->slock) : : "memory"); | 63 | __raw_spin_lock_string_up, |
64 | "=m" (lock->slock) : : "memory"); | ||
56 | } | 65 | } |
57 | 66 | ||
58 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | 67 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) |
59 | { | 68 | { |
60 | __asm__ __volatile__( | 69 | alternative_smp( |
61 | __raw_spin_lock_string_flags | 70 | __raw_spin_lock_string_flags, |
62 | :"=m" (lock->slock) : "r" (flags) : "memory"); | 71 | __raw_spin_lock_string_up, |
72 | "=m" (lock->slock) : "r" (flags) : "memory"); | ||
63 | } | 73 | } |
64 | 74 | ||
65 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 75 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
@@ -178,12 +188,12 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) | |||
178 | 188 | ||
179 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 189 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
180 | { | 190 | { |
181 | asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); | 191 | asm volatile(LOCK_PREFIX "incl %0" :"=m" (rw->lock) : : "memory"); |
182 | } | 192 | } |
183 | 193 | ||
184 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 194 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
185 | { | 195 | { |
186 | asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0" | 196 | asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0" |
187 | : "=m" (rw->lock) : : "memory"); | 197 | : "=m" (rw->lock) : : "memory"); |
188 | } | 198 | } |
189 | 199 | ||
diff --git a/include/asm-i386/stat.h b/include/asm-i386/stat.h index b464f8020ec4..67eae78323ba 100644 --- a/include/asm-i386/stat.h +++ b/include/asm-i386/stat.h | |||
@@ -58,8 +58,7 @@ struct stat64 { | |||
58 | long long st_size; | 58 | long long st_size; |
59 | unsigned long st_blksize; | 59 | unsigned long st_blksize; |
60 | 60 | ||
61 | unsigned long st_blocks; /* Number 512-byte blocks allocated. */ | 61 | unsigned long long st_blocks; /* Number 512-byte blocks allocated. */ |
62 | unsigned long __pad4; /* future possible st_blocks high bits */ | ||
63 | 62 | ||
64 | unsigned long st_atime; | 63 | unsigned long st_atime; |
65 | unsigned long st_atime_nsec; | 64 | unsigned long st_atime_nsec; |
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index 399145a247f2..19cc79c9a35d 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h | |||
@@ -352,67 +352,6 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l | |||
352 | 352 | ||
353 | #endif | 353 | #endif |
354 | 354 | ||
355 | #ifdef __KERNEL__ | ||
356 | struct alt_instr { | ||
357 | __u8 *instr; /* original instruction */ | ||
358 | __u8 *replacement; | ||
359 | __u8 cpuid; /* cpuid bit set for replacement */ | ||
360 | __u8 instrlen; /* length of original instruction */ | ||
361 | __u8 replacementlen; /* length of new instruction, <= instrlen */ | ||
362 | __u8 pad; | ||
363 | }; | ||
364 | #endif | ||
365 | |||
366 | /* | ||
367 | * Alternative instructions for different CPU types or capabilities. | ||
368 | * | ||
369 | * This allows to use optimized instructions even on generic binary | ||
370 | * kernels. | ||
371 | * | ||
372 | * length of oldinstr must be longer or equal the length of newinstr | ||
373 | * It can be padded with nops as needed. | ||
374 | * | ||
375 | * For non barrier like inlines please define new variants | ||
376 | * without volatile and memory clobber. | ||
377 | */ | ||
378 | #define alternative(oldinstr, newinstr, feature) \ | ||
379 | asm volatile ("661:\n\t" oldinstr "\n662:\n" \ | ||
380 | ".section .altinstructions,\"a\"\n" \ | ||
381 | " .align 4\n" \ | ||
382 | " .long 661b\n" /* label */ \ | ||
383 | " .long 663f\n" /* new instruction */ \ | ||
384 | " .byte %c0\n" /* feature bit */ \ | ||
385 | " .byte 662b-661b\n" /* sourcelen */ \ | ||
386 | " .byte 664f-663f\n" /* replacementlen */ \ | ||
387 | ".previous\n" \ | ||
388 | ".section .altinstr_replacement,\"ax\"\n" \ | ||
389 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ | ||
390 | ".previous" :: "i" (feature) : "memory") | ||
391 | |||
392 | /* | ||
393 | * Alternative inline assembly with input. | ||
394 | * | ||
395 | * Pecularities: | ||
396 | * No memory clobber here. | ||
397 | * Argument numbers start with 1. | ||
398 | * Best is to use constraints that are fixed size (like (%1) ... "r") | ||
399 | * If you use variable sized constraints like "m" or "g" in the | ||
400 | * replacement maake sure to pad to the worst case length. | ||
401 | */ | ||
402 | #define alternative_input(oldinstr, newinstr, feature, input...) \ | ||
403 | asm volatile ("661:\n\t" oldinstr "\n662:\n" \ | ||
404 | ".section .altinstructions,\"a\"\n" \ | ||
405 | " .align 4\n" \ | ||
406 | " .long 661b\n" /* label */ \ | ||
407 | " .long 663f\n" /* new instruction */ \ | ||
408 | " .byte %c0\n" /* feature bit */ \ | ||
409 | " .byte 662b-661b\n" /* sourcelen */ \ | ||
410 | " .byte 664f-663f\n" /* replacementlen */ \ | ||
411 | ".previous\n" \ | ||
412 | ".section .altinstr_replacement,\"ax\"\n" \ | ||
413 | "663:\n\t" newinstr "\n664:\n" /* replacement */ \ | ||
414 | ".previous" :: "i" (feature), ##input) | ||
415 | |||
416 | /* | 355 | /* |
417 | * Force strict CPU ordering. | 356 | * Force strict CPU ordering. |
418 | * And yes, this is required on UP too when we're talking | 357 | * And yes, this is required on UP too when we're talking |
@@ -558,5 +497,8 @@ static inline void sched_cacheflush(void) | |||
558 | } | 497 | } |
559 | 498 | ||
560 | extern unsigned long arch_align_stack(unsigned long sp); | 499 | extern unsigned long arch_align_stack(unsigned long sp); |
500 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); | ||
501 | |||
502 | void default_idle(void); | ||
561 | 503 | ||
562 | #endif | 504 | #endif |
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h index aa958c6ee83e..b94e5eeef917 100644 --- a/include/asm-i386/topology.h +++ b/include/asm-i386/topology.h | |||
@@ -112,4 +112,6 @@ extern unsigned long node_remap_size[]; | |||
112 | 112 | ||
113 | #endif /* CONFIG_NUMA */ | 113 | #endif /* CONFIG_NUMA */ |
114 | 114 | ||
115 | extern cpumask_t cpu_coregroup_map(int cpu); | ||
116 | |||
115 | #endif /* _ASM_I386_TOPOLOGY_H */ | 117 | #endif /* _ASM_I386_TOPOLOGY_H */ |
diff --git a/include/asm-i386/types.h b/include/asm-i386/types.h index ced00fe8fe61..e50a08bd7ced 100644 --- a/include/asm-i386/types.h +++ b/include/asm-i386/types.h | |||
@@ -63,6 +63,11 @@ typedef u64 sector_t; | |||
63 | #define HAVE_SECTOR_T | 63 | #define HAVE_SECTOR_T |
64 | #endif | 64 | #endif |
65 | 65 | ||
66 | #ifdef CONFIG_LSF | ||
67 | typedef u64 blkcnt_t; | ||
68 | #define HAVE_BLKCNT_T | ||
69 | #endif | ||
70 | |||
66 | #endif /* __ASSEMBLY__ */ | 71 | #endif /* __ASSEMBLY__ */ |
67 | 72 | ||
68 | #endif /* __KERNEL__ */ | 73 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h index 3f1337c34208..371457b1ceb6 100644 --- a/include/asm-i386/uaccess.h +++ b/include/asm-i386/uaccess.h | |||
@@ -197,13 +197,15 @@ extern void __put_user_8(void); | |||
197 | 197 | ||
198 | #define put_user(x,ptr) \ | 198 | #define put_user(x,ptr) \ |
199 | ({ int __ret_pu; \ | 199 | ({ int __ret_pu; \ |
200 | __typeof__(*(ptr)) __pu_val; \ | ||
200 | __chk_user_ptr(ptr); \ | 201 | __chk_user_ptr(ptr); \ |
202 | __pu_val = x; \ | ||
201 | switch(sizeof(*(ptr))) { \ | 203 | switch(sizeof(*(ptr))) { \ |
202 | case 1: __put_user_1(x, ptr); break; \ | 204 | case 1: __put_user_1(__pu_val, ptr); break; \ |
203 | case 2: __put_user_2(x, ptr); break; \ | 205 | case 2: __put_user_2(__pu_val, ptr); break; \ |
204 | case 4: __put_user_4(x, ptr); break; \ | 206 | case 4: __put_user_4(__pu_val, ptr); break; \ |
205 | case 8: __put_user_8(x, ptr); break; \ | 207 | case 8: __put_user_8(__pu_val, ptr); break; \ |
206 | default:__put_user_X(x, ptr); break; \ | 208 | default:__put_user_X(__pu_val, ptr); break; \ |
207 | } \ | 209 | } \ |
208 | __ret_pu; \ | 210 | __ret_pu; \ |
209 | }) | 211 | }) |
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index dc81a55dd94d..789e9bdd0a40 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h | |||
@@ -316,8 +316,11 @@ | |||
316 | #define __NR_pselect6 308 | 316 | #define __NR_pselect6 308 |
317 | #define __NR_ppoll 309 | 317 | #define __NR_ppoll 309 |
318 | #define __NR_unshare 310 | 318 | #define __NR_unshare 310 |
319 | #define __NR_set_robust_list 311 | ||
320 | #define __NR_get_robust_list 312 | ||
321 | #define __NR_sys_splice 313 | ||
319 | 322 | ||
320 | #define NR_syscalls 311 | 323 | #define NR_syscalls 314 |
321 | 324 | ||
322 | /* | 325 | /* |
323 | * user-visible error numbers are in the range -1 - -128: see | 326 | * user-visible error numbers are in the range -1 - -128: see |
@@ -347,9 +350,9 @@ __syscall_return(type,__res); \ | |||
347 | type name(type1 arg1) \ | 350 | type name(type1 arg1) \ |
348 | { \ | 351 | { \ |
349 | long __res; \ | 352 | long __res; \ |
350 | __asm__ volatile ("int $0x80" \ | 353 | __asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \ |
351 | : "=a" (__res) \ | 354 | : "=a" (__res) \ |
352 | : "0" (__NR_##name),"b" ((long)(arg1)) : "memory"); \ | 355 | : "0" (__NR_##name),"ri" ((long)(arg1)) : "memory"); \ |
353 | __syscall_return(type,__res); \ | 356 | __syscall_return(type,__res); \ |
354 | } | 357 | } |
355 | 358 | ||
@@ -357,9 +360,10 @@ __syscall_return(type,__res); \ | |||
357 | type name(type1 arg1,type2 arg2) \ | 360 | type name(type1 arg1,type2 arg2) \ |
358 | { \ | 361 | { \ |
359 | long __res; \ | 362 | long __res; \ |
360 | __asm__ volatile ("int $0x80" \ | 363 | __asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \ |
361 | : "=a" (__res) \ | 364 | : "=a" (__res) \ |
362 | : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)) : "memory"); \ | 365 | : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)) \ |
366 | : "memory"); \ | ||
363 | __syscall_return(type,__res); \ | 367 | __syscall_return(type,__res); \ |
364 | } | 368 | } |
365 | 369 | ||
@@ -367,9 +371,9 @@ __syscall_return(type,__res); \ | |||
367 | type name(type1 arg1,type2 arg2,type3 arg3) \ | 371 | type name(type1 arg1,type2 arg2,type3 arg3) \ |
368 | { \ | 372 | { \ |
369 | long __res; \ | 373 | long __res; \ |
370 | __asm__ volatile ("int $0x80" \ | 374 | __asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \ |
371 | : "=a" (__res) \ | 375 | : "=a" (__res) \ |
372 | : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ | 376 | : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \ |
373 | "d" ((long)(arg3)) : "memory"); \ | 377 | "d" ((long)(arg3)) : "memory"); \ |
374 | __syscall_return(type,__res); \ | 378 | __syscall_return(type,__res); \ |
375 | } | 379 | } |
@@ -378,9 +382,9 @@ __syscall_return(type,__res); \ | |||
378 | type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ | 382 | type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ |
379 | { \ | 383 | { \ |
380 | long __res; \ | 384 | long __res; \ |
381 | __asm__ volatile ("int $0x80" \ | 385 | __asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \ |
382 | : "=a" (__res) \ | 386 | : "=a" (__res) \ |
383 | : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ | 387 | : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \ |
384 | "d" ((long)(arg3)),"S" ((long)(arg4)) : "memory"); \ | 388 | "d" ((long)(arg3)),"S" ((long)(arg4)) : "memory"); \ |
385 | __syscall_return(type,__res); \ | 389 | __syscall_return(type,__res); \ |
386 | } | 390 | } |
@@ -390,10 +394,12 @@ __syscall_return(type,__res); \ | |||
390 | type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ | 394 | type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ |
391 | { \ | 395 | { \ |
392 | long __res; \ | 396 | long __res; \ |
393 | __asm__ volatile ("int $0x80" \ | 397 | __asm__ volatile ("push %%ebx ; movl %2,%%ebx ; movl %1,%%eax ; " \ |
398 | "int $0x80 ; pop %%ebx" \ | ||
394 | : "=a" (__res) \ | 399 | : "=a" (__res) \ |
395 | : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ | 400 | : "i" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \ |
396 | "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) : "memory"); \ | 401 | "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \ |
402 | : "memory"); \ | ||
397 | __syscall_return(type,__res); \ | 403 | __syscall_return(type,__res); \ |
398 | } | 404 | } |
399 | 405 | ||
@@ -402,11 +408,14 @@ __syscall_return(type,__res); \ | |||
402 | type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \ | 408 | type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \ |
403 | { \ | 409 | { \ |
404 | long __res; \ | 410 | long __res; \ |
405 | __asm__ volatile ("push %%ebp ; movl %%eax,%%ebp ; movl %1,%%eax ; int $0x80 ; pop %%ebp" \ | 411 | struct { long __a1; long __a6; } __s = { (long)arg1, (long)arg6 }; \ |
412 | __asm__ volatile ("push %%ebp ; push %%ebx ; movl 4(%2),%%ebp ; " \ | ||
413 | "movl 0(%2),%%ebx ; movl %1,%%eax ; int $0x80 ; " \ | ||
414 | "pop %%ebx ; pop %%ebp" \ | ||
406 | : "=a" (__res) \ | 415 | : "=a" (__res) \ |
407 | : "i" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ | 416 | : "i" (__NR_##name),"0" ((long)(&__s)),"c" ((long)(arg2)), \ |
408 | "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)), \ | 417 | "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \ |
409 | "0" ((long)(arg6)) : "memory"); \ | 418 | : "memory"); \ |
410 | __syscall_return(type,__res); \ | 419 | __syscall_return(type,__res); \ |
411 | } | 420 | } |
412 | 421 | ||