diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2012-04-19 16:06:26 -0400 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2012-04-19 16:06:26 -0400 |
commit | eac0556750e727ff39144a9a9e59d5ccf1fc0e2a (patch) | |
tree | f5ccff7795b2ad5e47f17fb475599c526f533e79 /arch | |
parent | f71fa31f9f7ac33cba12b8897983f950ad2c7a5b (diff) | |
parent | 19853301ef3289bda2d5264c1093e74efddaeab9 (diff) |
Merge branch 'linus' into queue
Merge reason: development work has dependency on kvm patches merged
upstream.
Conflicts:
Documentation/feature-removal-schedule.txt
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch')
90 files changed, 763 insertions, 919 deletions
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index f62251e82ffa..3bb7ffeae3bc 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <asm/barrier.h> | 5 | #include <asm/barrier.h> |
6 | #include <asm/cmpxchg.h> | ||
6 | 7 | ||
7 | /* | 8 | /* |
8 | * Atomic operations that C can't guarantee us. Useful for | 9 | * Atomic operations that C can't guarantee us. Useful for |
@@ -168,73 +169,6 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) | |||
168 | return result; | 169 | return result; |
169 | } | 170 | } |
170 | 171 | ||
171 | /* | ||
172 | * Atomic exchange routines. | ||
173 | */ | ||
174 | |||
175 | #define __ASM__MB | ||
176 | #define ____xchg(type, args...) __xchg ## type ## _local(args) | ||
177 | #define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args) | ||
178 | #include <asm/xchg.h> | ||
179 | |||
180 | #define xchg_local(ptr,x) \ | ||
181 | ({ \ | ||
182 | __typeof__(*(ptr)) _x_ = (x); \ | ||
183 | (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \ | ||
184 | sizeof(*(ptr))); \ | ||
185 | }) | ||
186 | |||
187 | #define cmpxchg_local(ptr, o, n) \ | ||
188 | ({ \ | ||
189 | __typeof__(*(ptr)) _o_ = (o); \ | ||
190 | __typeof__(*(ptr)) _n_ = (n); \ | ||
191 | (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ | ||
192 | (unsigned long)_n_, \ | ||
193 | sizeof(*(ptr))); \ | ||
194 | }) | ||
195 | |||
196 | #define cmpxchg64_local(ptr, o, n) \ | ||
197 | ({ \ | ||
198 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | ||
199 | cmpxchg_local((ptr), (o), (n)); \ | ||
200 | }) | ||
201 | |||
202 | #ifdef CONFIG_SMP | ||
203 | #undef __ASM__MB | ||
204 | #define __ASM__MB "\tmb\n" | ||
205 | #endif | ||
206 | #undef ____xchg | ||
207 | #undef ____cmpxchg | ||
208 | #define ____xchg(type, args...) __xchg ##type(args) | ||
209 | #define ____cmpxchg(type, args...) __cmpxchg ##type(args) | ||
210 | #include <asm/xchg.h> | ||
211 | |||
212 | #define xchg(ptr,x) \ | ||
213 | ({ \ | ||
214 | __typeof__(*(ptr)) _x_ = (x); \ | ||
215 | (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, \ | ||
216 | sizeof(*(ptr))); \ | ||
217 | }) | ||
218 | |||
219 | #define cmpxchg(ptr, o, n) \ | ||
220 | ({ \ | ||
221 | __typeof__(*(ptr)) _o_ = (o); \ | ||
222 | __typeof__(*(ptr)) _n_ = (n); \ | ||
223 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | ||
224 | (unsigned long)_n_, sizeof(*(ptr)));\ | ||
225 | }) | ||
226 | |||
227 | #define cmpxchg64(ptr, o, n) \ | ||
228 | ({ \ | ||
229 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | ||
230 | cmpxchg((ptr), (o), (n)); \ | ||
231 | }) | ||
232 | |||
233 | #undef __ASM__MB | ||
234 | #undef ____cmpxchg | ||
235 | |||
236 | #define __HAVE_ARCH_CMPXCHG 1 | ||
237 | |||
238 | #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) | 172 | #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) |
239 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | 173 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
240 | 174 | ||
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h new file mode 100644 index 000000000000..429e8cd0d78e --- /dev/null +++ b/arch/alpha/include/asm/cmpxchg.h | |||
@@ -0,0 +1,71 @@ | |||
1 | #ifndef _ALPHA_CMPXCHG_H | ||
2 | #define _ALPHA_CMPXCHG_H | ||
3 | |||
4 | /* | ||
5 | * Atomic exchange routines. | ||
6 | */ | ||
7 | |||
8 | #define __ASM__MB | ||
9 | #define ____xchg(type, args...) __xchg ## type ## _local(args) | ||
10 | #define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args) | ||
11 | #include <asm/xchg.h> | ||
12 | |||
13 | #define xchg_local(ptr, x) \ | ||
14 | ({ \ | ||
15 | __typeof__(*(ptr)) _x_ = (x); \ | ||
16 | (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \ | ||
17 | sizeof(*(ptr))); \ | ||
18 | }) | ||
19 | |||
20 | #define cmpxchg_local(ptr, o, n) \ | ||
21 | ({ \ | ||
22 | __typeof__(*(ptr)) _o_ = (o); \ | ||
23 | __typeof__(*(ptr)) _n_ = (n); \ | ||
24 | (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ | ||
25 | (unsigned long)_n_, \ | ||
26 | sizeof(*(ptr))); \ | ||
27 | }) | ||
28 | |||
29 | #define cmpxchg64_local(ptr, o, n) \ | ||
30 | ({ \ | ||
31 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | ||
32 | cmpxchg_local((ptr), (o), (n)); \ | ||
33 | }) | ||
34 | |||
35 | #ifdef CONFIG_SMP | ||
36 | #undef __ASM__MB | ||
37 | #define __ASM__MB "\tmb\n" | ||
38 | #endif | ||
39 | #undef ____xchg | ||
40 | #undef ____cmpxchg | ||
41 | #define ____xchg(type, args...) __xchg ##type(args) | ||
42 | #define ____cmpxchg(type, args...) __cmpxchg ##type(args) | ||
43 | #include <asm/xchg.h> | ||
44 | |||
45 | #define xchg(ptr, x) \ | ||
46 | ({ \ | ||
47 | __typeof__(*(ptr)) _x_ = (x); \ | ||
48 | (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, \ | ||
49 | sizeof(*(ptr))); \ | ||
50 | }) | ||
51 | |||
52 | #define cmpxchg(ptr, o, n) \ | ||
53 | ({ \ | ||
54 | __typeof__(*(ptr)) _o_ = (o); \ | ||
55 | __typeof__(*(ptr)) _n_ = (n); \ | ||
56 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | ||
57 | (unsigned long)_n_, sizeof(*(ptr)));\ | ||
58 | }) | ||
59 | |||
60 | #define cmpxchg64(ptr, o, n) \ | ||
61 | ({ \ | ||
62 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | ||
63 | cmpxchg((ptr), (o), (n)); \ | ||
64 | }) | ||
65 | |||
66 | #undef __ASM__MB | ||
67 | #undef ____cmpxchg | ||
68 | |||
69 | #define __HAVE_ARCH_CMPXCHG 1 | ||
70 | |||
71 | #endif /* _ALPHA_CMPXCHG_H */ | ||
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h index 1d1b436fbff2..0ca9724597c1 100644 --- a/arch/alpha/include/asm/xchg.h +++ b/arch/alpha/include/asm/xchg.h | |||
@@ -1,10 +1,10 @@ | |||
1 | #ifndef _ALPHA_ATOMIC_H | 1 | #ifndef _ALPHA_CMPXCHG_H |
2 | #error Do not include xchg.h directly! | 2 | #error Do not include xchg.h directly! |
3 | #else | 3 | #else |
4 | /* | 4 | /* |
5 | * xchg/xchg_local and cmpxchg/cmpxchg_local share the same code | 5 | * xchg/xchg_local and cmpxchg/cmpxchg_local share the same code |
6 | * except that local version do not have the expensive memory barrier. | 6 | * except that local version do not have the expensive memory barrier. |
7 | * So this file is included twice from asm/system.h. | 7 | * So this file is included twice from asm/cmpxchg.h. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | /* | 10 | /* |
diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c index 6ce11c481178..797f04bedb47 100644 --- a/arch/arm/boot/compressed/atags_to_fdt.c +++ b/arch/arm/boot/compressed/atags_to_fdt.c | |||
@@ -77,6 +77,8 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space) | |||
77 | } else if (atag->hdr.tag == ATAG_MEM) { | 77 | } else if (atag->hdr.tag == ATAG_MEM) { |
78 | if (memcount >= sizeof(mem_reg_property)/4) | 78 | if (memcount >= sizeof(mem_reg_property)/4) |
79 | continue; | 79 | continue; |
80 | if (!atag->u.mem.size) | ||
81 | continue; | ||
80 | mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.start); | 82 | mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.start); |
81 | mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.size); | 83 | mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.size); |
82 | } else if (atag->hdr.tag == ATAG_INITRD2) { | 84 | } else if (atag->hdr.tag == ATAG_INITRD2) { |
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 5f6045f1766c..dc7e8ce8e6be 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S | |||
@@ -273,7 +273,7 @@ restart: adr r0, LC0 | |||
273 | add r0, r0, #0x100 | 273 | add r0, r0, #0x100 |
274 | mov r1, r6 | 274 | mov r1, r6 |
275 | sub r2, sp, r6 | 275 | sub r2, sp, r6 |
276 | blne atags_to_fdt | 276 | bleq atags_to_fdt |
277 | 277 | ||
278 | ldmfd sp!, {r0-r3, ip, lr} | 278 | ldmfd sp!, {r0-r3, ip, lr} |
279 | sub sp, sp, #0x10000 | 279 | sub sp, sp, #0x10000 |
diff --git a/arch/arm/boot/dts/at91sam9g20.dtsi b/arch/arm/boot/dts/at91sam9g20.dtsi index 799ad1889b51..773ef484037a 100644 --- a/arch/arm/boot/dts/at91sam9g20.dtsi +++ b/arch/arm/boot/dts/at91sam9g20.dtsi | |||
@@ -55,7 +55,6 @@ | |||
55 | #interrupt-cells = <2>; | 55 | #interrupt-cells = <2>; |
56 | compatible = "atmel,at91rm9200-aic"; | 56 | compatible = "atmel,at91rm9200-aic"; |
57 | interrupt-controller; | 57 | interrupt-controller; |
58 | interrupt-parent; | ||
59 | reg = <0xfffff000 0x200>; | 58 | reg = <0xfffff000 0x200>; |
60 | }; | 59 | }; |
61 | 60 | ||
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi index 9e6eb6ecea0e..c8042147eaa2 100644 --- a/arch/arm/boot/dts/at91sam9g45.dtsi +++ b/arch/arm/boot/dts/at91sam9g45.dtsi | |||
@@ -56,7 +56,6 @@ | |||
56 | #interrupt-cells = <2>; | 56 | #interrupt-cells = <2>; |
57 | compatible = "atmel,at91rm9200-aic"; | 57 | compatible = "atmel,at91rm9200-aic"; |
58 | interrupt-controller; | 58 | interrupt-controller; |
59 | interrupt-parent; | ||
60 | reg = <0xfffff000 0x200>; | 59 | reg = <0xfffff000 0x200>; |
61 | }; | 60 | }; |
62 | 61 | ||
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi index 70ab3a4e026f..dd4ed748469a 100644 --- a/arch/arm/boot/dts/at91sam9x5.dtsi +++ b/arch/arm/boot/dts/at91sam9x5.dtsi | |||
@@ -54,7 +54,6 @@ | |||
54 | #interrupt-cells = <2>; | 54 | #interrupt-cells = <2>; |
55 | compatible = "atmel,at91rm9200-aic"; | 55 | compatible = "atmel,at91rm9200-aic"; |
56 | interrupt-controller; | 56 | interrupt-controller; |
57 | interrupt-parent; | ||
58 | reg = <0xfffff000 0x200>; | 57 | reg = <0xfffff000 0x200>; |
59 | }; | 58 | }; |
60 | 59 | ||
diff --git a/arch/arm/boot/dts/db8500.dtsi b/arch/arm/boot/dts/db8500.dtsi index d73dce645667..14bc30705099 100644 --- a/arch/arm/boot/dts/db8500.dtsi +++ b/arch/arm/boot/dts/db8500.dtsi | |||
@@ -24,7 +24,6 @@ | |||
24 | #interrupt-cells = <3>; | 24 | #interrupt-cells = <3>; |
25 | #address-cells = <1>; | 25 | #address-cells = <1>; |
26 | interrupt-controller; | 26 | interrupt-controller; |
27 | interrupt-parent; | ||
28 | reg = <0xa0411000 0x1000>, | 27 | reg = <0xa0411000 0x1000>, |
29 | <0xa0410100 0x100>; | 28 | <0xa0410100 0x100>; |
30 | }; | 29 | }; |
diff --git a/arch/arm/boot/dts/highbank.dts b/arch/arm/boot/dts/highbank.dts index 37c0ff9c8b90..83e72294aefb 100644 --- a/arch/arm/boot/dts/highbank.dts +++ b/arch/arm/boot/dts/highbank.dts | |||
@@ -89,7 +89,6 @@ | |||
89 | #size-cells = <0>; | 89 | #size-cells = <0>; |
90 | #address-cells = <1>; | 90 | #address-cells = <1>; |
91 | interrupt-controller; | 91 | interrupt-controller; |
92 | interrupt-parent; | ||
93 | reg = <0xfff11000 0x1000>, | 92 | reg = <0xfff11000 0x1000>, |
94 | <0xfff10100 0x100>; | 93 | <0xfff10100 0x100>; |
95 | }; | 94 | }; |
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c index 7a66311f3066..7e288f96cedf 100644 --- a/arch/arm/common/vic.c +++ b/arch/arm/common/vic.c | |||
@@ -427,19 +427,18 @@ int __init vic_of_init(struct device_node *node, struct device_node *parent) | |||
427 | 427 | ||
428 | /* | 428 | /* |
429 | * Handle each interrupt in a single VIC. Returns non-zero if we've | 429 | * Handle each interrupt in a single VIC. Returns non-zero if we've |
430 | * handled at least one interrupt. This does a single read of the | 430 | * handled at least one interrupt. This reads the status register |
431 | * status register and handles all interrupts in order from LSB first. | 431 | * before handling each interrupt, which is necessary given that |
432 | * handle_IRQ may briefly re-enable interrupts for soft IRQ handling. | ||
432 | */ | 433 | */ |
433 | static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs) | 434 | static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs) |
434 | { | 435 | { |
435 | u32 stat, irq; | 436 | u32 stat, irq; |
436 | int handled = 0; | 437 | int handled = 0; |
437 | 438 | ||
438 | stat = readl_relaxed(vic->base + VIC_IRQ_STATUS); | 439 | while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) { |
439 | while (stat) { | ||
440 | irq = ffs(stat) - 1; | 440 | irq = ffs(stat) - 1; |
441 | handle_IRQ(irq_find_mapping(vic->domain, irq), regs); | 441 | handle_IRQ(irq_find_mapping(vic->domain, irq), regs); |
442 | stat &= ~(1 << irq); | ||
443 | handled = 1; | 442 | handled = 1; |
444 | } | 443 | } |
445 | 444 | ||
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h index 5c5ca2ea62b0..bfc198c75913 100644 --- a/arch/arm/include/asm/jump_label.h +++ b/arch/arm/include/asm/jump_label.h | |||
@@ -14,7 +14,7 @@ | |||
14 | #define JUMP_LABEL_NOP "nop" | 14 | #define JUMP_LABEL_NOP "nop" |
15 | #endif | 15 | #endif |
16 | 16 | ||
17 | static __always_inline bool arch_static_branch(struct jump_label_key *key) | 17 | static __always_inline bool arch_static_branch(struct static_key *key) |
18 | { | 18 | { |
19 | asm goto("1:\n\t" | 19 | asm goto("1:\n\t" |
20 | JUMP_LABEL_NOP "\n\t" | 20 | JUMP_LABEL_NOP "\n\t" |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index b91411371ae1..ebfac782593f 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -523,7 +523,21 @@ int __init arm_add_memory(phys_addr_t start, unsigned long size) | |||
523 | */ | 523 | */ |
524 | size -= start & ~PAGE_MASK; | 524 | size -= start & ~PAGE_MASK; |
525 | bank->start = PAGE_ALIGN(start); | 525 | bank->start = PAGE_ALIGN(start); |
526 | bank->size = size & PAGE_MASK; | 526 | |
527 | #ifndef CONFIG_LPAE | ||
528 | if (bank->start + size < bank->start) { | ||
529 | printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in " | ||
530 | "32-bit physical address space\n", (long long)start); | ||
531 | /* | ||
532 | * To ensure bank->start + bank->size is representable in | ||
533 | * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB. | ||
534 | * This means we lose a page after masking. | ||
535 | */ | ||
536 | size = ULONG_MAX - bank->start; | ||
537 | } | ||
538 | #endif | ||
539 | |||
540 | bank->size = size & PAGE_MASK; | ||
527 | 541 | ||
528 | /* | 542 | /* |
529 | * Check whether this memory region has non-zero size or | 543 | * Check whether this memory region has non-zero size or |
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index fef42b21cecb..5b150afb995b 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c | |||
@@ -118,10 +118,14 @@ static int twd_cpufreq_transition(struct notifier_block *nb, | |||
118 | * The twd clock events must be reprogrammed to account for the new | 118 | * The twd clock events must be reprogrammed to account for the new |
119 | * frequency. The timer is local to a cpu, so cross-call to the | 119 | * frequency. The timer is local to a cpu, so cross-call to the |
120 | * changing cpu. | 120 | * changing cpu. |
121 | * | ||
122 | * Only wait for it to finish, if the cpu is active to avoid | ||
123 | * deadlock when cpu1 is spinning on while(!cpu_active(cpu1)) during | ||
124 | * booting of that cpu. | ||
121 | */ | 125 | */ |
122 | if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE) | 126 | if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE) |
123 | smp_call_function_single(freqs->cpu, twd_update_frequency, | 127 | smp_call_function_single(freqs->cpu, twd_update_frequency, |
124 | NULL, 1); | 128 | NULL, cpu_active(freqs->cpu)); |
125 | 129 | ||
126 | return NOTIFY_OK; | 130 | return NOTIFY_OK; |
127 | } | 131 | } |
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index 0491ceef1cda..e81c35f936b5 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig | |||
@@ -368,6 +368,7 @@ comment "Flattened Device Tree based board for EXYNOS SoCs" | |||
368 | 368 | ||
369 | config MACH_EXYNOS4_DT | 369 | config MACH_EXYNOS4_DT |
370 | bool "Samsung Exynos4 Machine using device tree" | 370 | bool "Samsung Exynos4 Machine using device tree" |
371 | depends on ARCH_EXYNOS4 | ||
371 | select CPU_EXYNOS4210 | 372 | select CPU_EXYNOS4210 |
372 | select USE_OF | 373 | select USE_OF |
373 | select ARM_AMBA | 374 | select ARM_AMBA |
@@ -380,6 +381,7 @@ config MACH_EXYNOS4_DT | |||
380 | 381 | ||
381 | config MACH_EXYNOS5_DT | 382 | config MACH_EXYNOS5_DT |
382 | bool "SAMSUNG EXYNOS5 Machine using device tree" | 383 | bool "SAMSUNG EXYNOS5 Machine using device tree" |
384 | depends on ARCH_EXYNOS5 | ||
383 | select SOC_EXYNOS5250 | 385 | select SOC_EXYNOS5250 |
384 | select USE_OF | 386 | select USE_OF |
385 | select ARM_AMBA | 387 | select ARM_AMBA |
diff --git a/arch/arm/mach-exynos/include/mach/irqs.h b/arch/arm/mach-exynos/include/mach/irqs.h index 9bee8535d9e0..591e78521a9f 100644 --- a/arch/arm/mach-exynos/include/mach/irqs.h +++ b/arch/arm/mach-exynos/include/mach/irqs.h | |||
@@ -212,6 +212,8 @@ | |||
212 | #define IRQ_MFC EXYNOS4_IRQ_MFC | 212 | #define IRQ_MFC EXYNOS4_IRQ_MFC |
213 | #define IRQ_SDO EXYNOS4_IRQ_SDO | 213 | #define IRQ_SDO EXYNOS4_IRQ_SDO |
214 | 214 | ||
215 | #define IRQ_I2S0 EXYNOS4_IRQ_I2S0 | ||
216 | |||
215 | #define IRQ_ADC EXYNOS4_IRQ_ADC0 | 217 | #define IRQ_ADC EXYNOS4_IRQ_ADC0 |
216 | #define IRQ_TC EXYNOS4_IRQ_PEN0 | 218 | #define IRQ_TC EXYNOS4_IRQ_PEN0 |
217 | 219 | ||
diff --git a/arch/arm/mach-exynos/include/mach/map.h b/arch/arm/mach-exynos/include/mach/map.h index 024d38ff1718..6e6d11ff352a 100644 --- a/arch/arm/mach-exynos/include/mach/map.h +++ b/arch/arm/mach-exynos/include/mach/map.h | |||
@@ -89,6 +89,10 @@ | |||
89 | #define EXYNOS4_PA_MDMA1 0x12840000 | 89 | #define EXYNOS4_PA_MDMA1 0x12840000 |
90 | #define EXYNOS4_PA_PDMA0 0x12680000 | 90 | #define EXYNOS4_PA_PDMA0 0x12680000 |
91 | #define EXYNOS4_PA_PDMA1 0x12690000 | 91 | #define EXYNOS4_PA_PDMA1 0x12690000 |
92 | #define EXYNOS5_PA_MDMA0 0x10800000 | ||
93 | #define EXYNOS5_PA_MDMA1 0x11C10000 | ||
94 | #define EXYNOS5_PA_PDMA0 0x121A0000 | ||
95 | #define EXYNOS5_PA_PDMA1 0x121B0000 | ||
92 | 96 | ||
93 | #define EXYNOS4_PA_SYSMMU_MDMA 0x10A40000 | 97 | #define EXYNOS4_PA_SYSMMU_MDMA 0x10A40000 |
94 | #define EXYNOS4_PA_SYSMMU_SSS 0x10A50000 | 98 | #define EXYNOS4_PA_SYSMMU_SSS 0x10A50000 |
diff --git a/arch/arm/mach-exynos/include/mach/regs-clock.h b/arch/arm/mach-exynos/include/mach/regs-clock.h index e141c1fd68d8..d9578a58ae7f 100644 --- a/arch/arm/mach-exynos/include/mach/regs-clock.h +++ b/arch/arm/mach-exynos/include/mach/regs-clock.h | |||
@@ -255,9 +255,15 @@ | |||
255 | 255 | ||
256 | /* For EXYNOS5250 */ | 256 | /* For EXYNOS5250 */ |
257 | 257 | ||
258 | #define EXYNOS5_APLL_LOCK EXYNOS_CLKREG(0x00000) | ||
258 | #define EXYNOS5_APLL_CON0 EXYNOS_CLKREG(0x00100) | 259 | #define EXYNOS5_APLL_CON0 EXYNOS_CLKREG(0x00100) |
259 | #define EXYNOS5_CLKSRC_CPU EXYNOS_CLKREG(0x00200) | 260 | #define EXYNOS5_CLKSRC_CPU EXYNOS_CLKREG(0x00200) |
261 | #define EXYNOS5_CLKMUX_STATCPU EXYNOS_CLKREG(0x00400) | ||
260 | #define EXYNOS5_CLKDIV_CPU0 EXYNOS_CLKREG(0x00500) | 262 | #define EXYNOS5_CLKDIV_CPU0 EXYNOS_CLKREG(0x00500) |
263 | #define EXYNOS5_CLKDIV_CPU1 EXYNOS_CLKREG(0x00504) | ||
264 | #define EXYNOS5_CLKDIV_STATCPU0 EXYNOS_CLKREG(0x00600) | ||
265 | #define EXYNOS5_CLKDIV_STATCPU1 EXYNOS_CLKREG(0x00604) | ||
266 | |||
261 | #define EXYNOS5_MPLL_CON0 EXYNOS_CLKREG(0x04100) | 267 | #define EXYNOS5_MPLL_CON0 EXYNOS_CLKREG(0x04100) |
262 | #define EXYNOS5_CLKSRC_CORE1 EXYNOS_CLKREG(0x04204) | 268 | #define EXYNOS5_CLKSRC_CORE1 EXYNOS_CLKREG(0x04204) |
263 | 269 | ||
diff --git a/arch/arm/mach-exynos/mach-exynos5-dt.c b/arch/arm/mach-exynos/mach-exynos5-dt.c index 0d26f50081ad..4711c8920e37 100644 --- a/arch/arm/mach-exynos/mach-exynos5-dt.c +++ b/arch/arm/mach-exynos/mach-exynos5-dt.c | |||
@@ -45,7 +45,7 @@ static const struct of_dev_auxdata exynos5250_auxdata_lookup[] __initconst = { | |||
45 | "exynos4210-uart.3", NULL), | 45 | "exynos4210-uart.3", NULL), |
46 | OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA0, "dma-pl330.0", NULL), | 46 | OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA0, "dma-pl330.0", NULL), |
47 | OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA1, "dma-pl330.1", NULL), | 47 | OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA1, "dma-pl330.1", NULL), |
48 | OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA1, "dma-pl330.2", NULL), | 48 | OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_MDMA1, "dma-pl330.2", NULL), |
49 | {}, | 49 | {}, |
50 | }; | 50 | }; |
51 | 51 | ||
diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c index b3982c867c9c..b4f1f902ce6d 100644 --- a/arch/arm/mach-exynos/mach-nuri.c +++ b/arch/arm/mach-exynos/mach-nuri.c | |||
@@ -307,49 +307,7 @@ static struct i2c_board_info i2c1_devs[] __initdata = { | |||
307 | }; | 307 | }; |
308 | 308 | ||
309 | /* TSP */ | 309 | /* TSP */ |
310 | static u8 mxt_init_vals[] = { | ||
311 | /* MXT_GEN_COMMAND(6) */ | ||
312 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
313 | /* MXT_GEN_POWER(7) */ | ||
314 | 0x20, 0xff, 0x32, | ||
315 | /* MXT_GEN_ACQUIRE(8) */ | ||
316 | 0x0a, 0x00, 0x05, 0x00, 0x00, 0x00, 0x09, 0x23, | ||
317 | /* MXT_TOUCH_MULTI(9) */ | ||
318 | 0x00, 0x00, 0x00, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x02, 0x00, | ||
319 | 0x00, 0x01, 0x01, 0x0e, 0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00, | ||
320 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
321 | 0x00, | ||
322 | /* MXT_TOUCH_KEYARRAY(15) */ | ||
323 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, | ||
324 | 0x00, | ||
325 | /* MXT_SPT_GPIOPWM(19) */ | ||
326 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
327 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
328 | /* MXT_PROCI_GRIPFACE(20) */ | ||
329 | 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x28, 0x04, | ||
330 | 0x0f, 0x0a, | ||
331 | /* MXT_PROCG_NOISE(22) */ | ||
332 | 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x23, 0x00, | ||
333 | 0x00, 0x05, 0x0f, 0x19, 0x23, 0x2d, 0x03, | ||
334 | /* MXT_TOUCH_PROXIMITY(23) */ | ||
335 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
336 | 0x00, 0x00, 0x00, 0x00, 0x00, | ||
337 | /* MXT_PROCI_ONETOUCH(24) */ | ||
338 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
339 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
340 | /* MXT_SPT_SELFTEST(25) */ | ||
341 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
342 | 0x00, 0x00, 0x00, 0x00, | ||
343 | /* MXT_PROCI_TWOTOUCH(27) */ | ||
344 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
345 | /* MXT_SPT_CTECONFIG(28) */ | ||
346 | 0x00, 0x00, 0x02, 0x08, 0x10, 0x00, | ||
347 | }; | ||
348 | |||
349 | static struct mxt_platform_data mxt_platform_data = { | 310 | static struct mxt_platform_data mxt_platform_data = { |
350 | .config = mxt_init_vals, | ||
351 | .config_length = ARRAY_SIZE(mxt_init_vals), | ||
352 | |||
353 | .x_line = 18, | 311 | .x_line = 18, |
354 | .y_line = 11, | 312 | .y_line = 11, |
355 | .x_size = 1024, | 313 | .x_size = 1024, |
@@ -571,7 +529,7 @@ static struct regulator_init_data __initdata max8997_ldo7_data = { | |||
571 | 529 | ||
572 | static struct regulator_init_data __initdata max8997_ldo8_data = { | 530 | static struct regulator_init_data __initdata max8997_ldo8_data = { |
573 | .constraints = { | 531 | .constraints = { |
574 | .name = "VUSB/VDAC_3.3V_C210", | 532 | .name = "VUSB+VDAC_3.3V_C210", |
575 | .min_uV = 3300000, | 533 | .min_uV = 3300000, |
576 | .max_uV = 3300000, | 534 | .max_uV = 3300000, |
577 | .valid_ops_mask = REGULATOR_CHANGE_STATUS, | 535 | .valid_ops_mask = REGULATOR_CHANGE_STATUS, |
@@ -1347,6 +1305,7 @@ static struct platform_device *nuri_devices[] __initdata = { | |||
1347 | 1305 | ||
1348 | static void __init nuri_map_io(void) | 1306 | static void __init nuri_map_io(void) |
1349 | { | 1307 | { |
1308 | clk_xusbxti.rate = 24000000; | ||
1350 | exynos_init_io(NULL, 0); | 1309 | exynos_init_io(NULL, 0); |
1351 | s3c24xx_init_clocks(24000000); | 1310 | s3c24xx_init_clocks(24000000); |
1352 | s3c24xx_init_uarts(nuri_uartcfgs, ARRAY_SIZE(nuri_uartcfgs)); | 1311 | s3c24xx_init_uarts(nuri_uartcfgs, ARRAY_SIZE(nuri_uartcfgs)); |
@@ -1379,7 +1338,6 @@ static void __init nuri_machine_init(void) | |||
1379 | nuri_camera_init(); | 1338 | nuri_camera_init(); |
1380 | 1339 | ||
1381 | nuri_ehci_init(); | 1340 | nuri_ehci_init(); |
1382 | clk_xusbxti.rate = 24000000; | ||
1383 | 1341 | ||
1384 | /* Last */ | 1342 | /* Last */ |
1385 | platform_add_devices(nuri_devices, ARRAY_SIZE(nuri_devices)); | 1343 | platform_add_devices(nuri_devices, ARRAY_SIZE(nuri_devices)); |
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c index 6bb9dbdd73fd..7ebf79c2ab34 100644 --- a/arch/arm/mach-exynos/mach-universal_c210.c +++ b/arch/arm/mach-exynos/mach-universal_c210.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <asm/mach-types.h> | 29 | #include <asm/mach-types.h> |
30 | 30 | ||
31 | #include <plat/regs-serial.h> | 31 | #include <plat/regs-serial.h> |
32 | #include <plat/clock.h> | ||
32 | #include <plat/cpu.h> | 33 | #include <plat/cpu.h> |
33 | #include <plat/devs.h> | 34 | #include <plat/devs.h> |
34 | #include <plat/iic.h> | 35 | #include <plat/iic.h> |
@@ -1057,6 +1058,7 @@ static struct platform_device *universal_devices[] __initdata = { | |||
1057 | 1058 | ||
1058 | static void __init universal_map_io(void) | 1059 | static void __init universal_map_io(void) |
1059 | { | 1060 | { |
1061 | clk_xusbxti.rate = 24000000; | ||
1060 | exynos_init_io(NULL, 0); | 1062 | exynos_init_io(NULL, 0); |
1061 | s3c24xx_init_clocks(24000000); | 1063 | s3c24xx_init_clocks(24000000); |
1062 | s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs)); | 1064 | s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs)); |
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c index 3698a370d636..26aac363a064 100644 --- a/arch/arm/mach-msm/board-halibut.c +++ b/arch/arm/mach-msm/board-halibut.c | |||
@@ -86,9 +86,6 @@ static void __init halibut_init(void) | |||
86 | static void __init halibut_fixup(struct tag *tags, char **cmdline, | 86 | static void __init halibut_fixup(struct tag *tags, char **cmdline, |
87 | struct meminfo *mi) | 87 | struct meminfo *mi) |
88 | { | 88 | { |
89 | mi->nr_banks=1; | ||
90 | mi->bank[0].start = PHYS_OFFSET; | ||
91 | mi->bank[0].size = (101*1024*1024); | ||
92 | } | 89 | } |
93 | 90 | ||
94 | static void __init halibut_map_io(void) | 91 | static void __init halibut_map_io(void) |
diff --git a/arch/arm/mach-msm/board-trout-panel.c b/arch/arm/mach-msm/board-trout-panel.c index 25105c1027fe..89bf6b426699 100644 --- a/arch/arm/mach-msm/board-trout-panel.c +++ b/arch/arm/mach-msm/board-trout-panel.c | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | #include <asm/io.h> | 13 | #include <asm/io.h> |
14 | #include <asm/mach-types.h> | 14 | #include <asm/mach-types.h> |
15 | #include <asm/system_info.h> | ||
15 | 16 | ||
16 | #include <mach/msm_fb.h> | 17 | #include <mach/msm_fb.h> |
17 | #include <mach/vreg.h> | 18 | #include <mach/vreg.h> |
diff --git a/arch/arm/mach-msm/board-trout.c b/arch/arm/mach-msm/board-trout.c index 5414f76ec0a9..d4060a37e23d 100644 --- a/arch/arm/mach-msm/board-trout.c +++ b/arch/arm/mach-msm/board-trout.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/clkdev.h> | 20 | #include <linux/clkdev.h> |
21 | 21 | ||
22 | #include <asm/system_info.h> | ||
22 | #include <asm/mach-types.h> | 23 | #include <asm/mach-types.h> |
23 | #include <asm/mach/arch.h> | 24 | #include <asm/mach/arch.h> |
24 | #include <asm/mach/map.h> | 25 | #include <asm/mach/map.h> |
diff --git a/arch/arm/mach-msm/proc_comm.c b/arch/arm/mach-msm/proc_comm.c index 67e701c7f183..9980dc736e7b 100644 --- a/arch/arm/mach-msm/proc_comm.c +++ b/arch/arm/mach-msm/proc_comm.c | |||
@@ -121,7 +121,7 @@ int msm_proc_comm(unsigned cmd, unsigned *data1, unsigned *data2) | |||
121 | * and unknown state. This function should be called early to | 121 | * and unknown state. This function should be called early to |
122 | * wait on the ARM9. | 122 | * wait on the ARM9. |
123 | */ | 123 | */ |
124 | void __init proc_comm_boot_wait(void) | 124 | void __devinit proc_comm_boot_wait(void) |
125 | { | 125 | { |
126 | void __iomem *base = MSM_SHARED_RAM_BASE; | 126 | void __iomem *base = MSM_SHARED_RAM_BASE; |
127 | 127 | ||
diff --git a/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c b/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c index 7072e0d651b1..3d9d746b221a 100644 --- a/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c +++ b/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c | |||
@@ -165,83 +165,3 @@ int omap2_select_table_rate(struct clk *clk, unsigned long rate) | |||
165 | 165 | ||
166 | return 0; | 166 | return 0; |
167 | } | 167 | } |
168 | |||
169 | #ifdef CONFIG_CPU_FREQ | ||
170 | /* | ||
171 | * Walk PRCM rate table and fillout cpufreq freq_table | ||
172 | * XXX This should be replaced by an OPP layer in the near future | ||
173 | */ | ||
174 | static struct cpufreq_frequency_table *freq_table; | ||
175 | |||
176 | void omap2_clk_init_cpufreq_table(struct cpufreq_frequency_table **table) | ||
177 | { | ||
178 | const struct prcm_config *prcm; | ||
179 | int i = 0; | ||
180 | int tbl_sz = 0; | ||
181 | |||
182 | if (!cpu_is_omap24xx()) | ||
183 | return; | ||
184 | |||
185 | for (prcm = rate_table; prcm->mpu_speed; prcm++) { | ||
186 | if (!(prcm->flags & cpu_mask)) | ||
187 | continue; | ||
188 | if (prcm->xtal_speed != sclk->rate) | ||
189 | continue; | ||
190 | |||
191 | /* don't put bypass rates in table */ | ||
192 | if (prcm->dpll_speed == prcm->xtal_speed) | ||
193 | continue; | ||
194 | |||
195 | tbl_sz++; | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * XXX Ensure that we're doing what CPUFreq expects for this error | ||
200 | * case and the following one | ||
201 | */ | ||
202 | if (tbl_sz == 0) { | ||
203 | pr_warning("%s: no matching entries in rate_table\n", | ||
204 | __func__); | ||
205 | return; | ||
206 | } | ||
207 | |||
208 | /* Include the CPUFREQ_TABLE_END terminator entry */ | ||
209 | tbl_sz++; | ||
210 | |||
211 | freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * tbl_sz, | ||
212 | GFP_ATOMIC); | ||
213 | if (!freq_table) { | ||
214 | pr_err("%s: could not kzalloc frequency table\n", __func__); | ||
215 | return; | ||
216 | } | ||
217 | |||
218 | for (prcm = rate_table; prcm->mpu_speed; prcm++) { | ||
219 | if (!(prcm->flags & cpu_mask)) | ||
220 | continue; | ||
221 | if (prcm->xtal_speed != sclk->rate) | ||
222 | continue; | ||
223 | |||
224 | /* don't put bypass rates in table */ | ||
225 | if (prcm->dpll_speed == prcm->xtal_speed) | ||
226 | continue; | ||
227 | |||
228 | freq_table[i].index = i; | ||
229 | freq_table[i].frequency = prcm->mpu_speed / 1000; | ||
230 | i++; | ||
231 | } | ||
232 | |||
233 | freq_table[i].index = i; | ||
234 | freq_table[i].frequency = CPUFREQ_TABLE_END; | ||
235 | |||
236 | *table = &freq_table[0]; | ||
237 | } | ||
238 | |||
239 | void omap2_clk_exit_cpufreq_table(struct cpufreq_frequency_table **table) | ||
240 | { | ||
241 | if (!cpu_is_omap24xx()) | ||
242 | return; | ||
243 | |||
244 | kfree(freq_table); | ||
245 | } | ||
246 | |||
247 | #endif | ||
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c index f57ed5baeccf..d9f4931513f9 100644 --- a/arch/arm/mach-omap2/clock.c +++ b/arch/arm/mach-omap2/clock.c | |||
@@ -536,10 +536,5 @@ struct clk_functions omap2_clk_functions = { | |||
536 | .clk_set_rate = omap2_clk_set_rate, | 536 | .clk_set_rate = omap2_clk_set_rate, |
537 | .clk_set_parent = omap2_clk_set_parent, | 537 | .clk_set_parent = omap2_clk_set_parent, |
538 | .clk_disable_unused = omap2_clk_disable_unused, | 538 | .clk_disable_unused = omap2_clk_disable_unused, |
539 | #ifdef CONFIG_CPU_FREQ | ||
540 | /* These will be removed when the OPP code is integrated */ | ||
541 | .clk_init_cpufreq_table = omap2_clk_init_cpufreq_table, | ||
542 | .clk_exit_cpufreq_table = omap2_clk_exit_cpufreq_table, | ||
543 | #endif | ||
544 | }; | 539 | }; |
545 | 540 | ||
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h index b8c2a686481c..a1bb23a23351 100644 --- a/arch/arm/mach-omap2/clock.h +++ b/arch/arm/mach-omap2/clock.h | |||
@@ -146,14 +146,6 @@ extern const struct clksel_rate gpt_sys_rates[]; | |||
146 | extern const struct clksel_rate gfx_l3_rates[]; | 146 | extern const struct clksel_rate gfx_l3_rates[]; |
147 | extern const struct clksel_rate dsp_ick_rates[]; | 147 | extern const struct clksel_rate dsp_ick_rates[]; |
148 | 148 | ||
149 | #if defined(CONFIG_ARCH_OMAP2) && defined(CONFIG_CPU_FREQ) | ||
150 | extern void omap2_clk_init_cpufreq_table(struct cpufreq_frequency_table **table); | ||
151 | extern void omap2_clk_exit_cpufreq_table(struct cpufreq_frequency_table **table); | ||
152 | #else | ||
153 | #define omap2_clk_init_cpufreq_table 0 | ||
154 | #define omap2_clk_exit_cpufreq_table 0 | ||
155 | #endif | ||
156 | |||
157 | extern const struct clkops clkops_omap2_iclk_dflt_wait; | 149 | extern const struct clkops clkops_omap2_iclk_dflt_wait; |
158 | extern const struct clkops clkops_omap2_iclk_dflt; | 150 | extern const struct clkops clkops_omap2_iclk_dflt; |
159 | extern const struct clkops clkops_omap2_iclk_idle_only; | 151 | extern const struct clkops clkops_omap2_iclk_idle_only; |
diff --git a/arch/arm/mach-s5pv210/dma.c b/arch/arm/mach-s5pv210/dma.c index 86ce62f66190..b8337e248b09 100644 --- a/arch/arm/mach-s5pv210/dma.c +++ b/arch/arm/mach-s5pv210/dma.c | |||
@@ -33,8 +33,6 @@ | |||
33 | #include <mach/irqs.h> | 33 | #include <mach/irqs.h> |
34 | #include <mach/dma.h> | 34 | #include <mach/dma.h> |
35 | 35 | ||
36 | static u64 dma_dmamask = DMA_BIT_MASK(32); | ||
37 | |||
38 | static u8 pdma0_peri[] = { | 36 | static u8 pdma0_peri[] = { |
39 | DMACH_UART0_RX, | 37 | DMACH_UART0_RX, |
40 | DMACH_UART0_TX, | 38 | DMACH_UART0_TX, |
diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c index a9ea64e0da0d..48d018f2332b 100644 --- a/arch/arm/mach-s5pv210/mach-aquila.c +++ b/arch/arm/mach-s5pv210/mach-aquila.c | |||
@@ -484,8 +484,8 @@ static struct wm8994_pdata wm8994_platform_data = { | |||
484 | .gpio_defaults[8] = 0x0100, | 484 | .gpio_defaults[8] = 0x0100, |
485 | .gpio_defaults[9] = 0x0100, | 485 | .gpio_defaults[9] = 0x0100, |
486 | .gpio_defaults[10] = 0x0100, | 486 | .gpio_defaults[10] = 0x0100, |
487 | .ldo[0] = { S5PV210_MP03(6), NULL, &wm8994_ldo1_data }, /* XM0FRNB_2 */ | 487 | .ldo[0] = { S5PV210_MP03(6), &wm8994_ldo1_data }, /* XM0FRNB_2 */ |
488 | .ldo[1] = { 0, NULL, &wm8994_ldo2_data }, | 488 | .ldo[1] = { 0, &wm8994_ldo2_data }, |
489 | }; | 489 | }; |
490 | 490 | ||
491 | /* GPIO I2C PMIC */ | 491 | /* GPIO I2C PMIC */ |
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c index 2cf5ed75f390..a8933de3d627 100644 --- a/arch/arm/mach-s5pv210/mach-goni.c +++ b/arch/arm/mach-s5pv210/mach-goni.c | |||
@@ -674,8 +674,8 @@ static struct wm8994_pdata wm8994_platform_data = { | |||
674 | .gpio_defaults[8] = 0x0100, | 674 | .gpio_defaults[8] = 0x0100, |
675 | .gpio_defaults[9] = 0x0100, | 675 | .gpio_defaults[9] = 0x0100, |
676 | .gpio_defaults[10] = 0x0100, | 676 | .gpio_defaults[10] = 0x0100, |
677 | .ldo[0] = { S5PV210_MP03(6), NULL, &wm8994_ldo1_data }, /* XM0FRNB_2 */ | 677 | .ldo[0] = { S5PV210_MP03(6), &wm8994_ldo1_data }, /* XM0FRNB_2 */ |
678 | .ldo[1] = { 0, NULL, &wm8994_ldo2_data }, | 678 | .ldo[1] = { 0, &wm8994_ldo2_data }, |
679 | }; | 679 | }; |
680 | 680 | ||
681 | /* GPIO I2C PMIC */ | 681 | /* GPIO I2C PMIC */ |
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 7edef9121632..7c8a7d8467bf 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -723,7 +723,7 @@ config CPU_HIGH_VECTOR | |||
723 | bool "Select the High exception vector" | 723 | bool "Select the High exception vector" |
724 | help | 724 | help |
725 | Say Y here to select high exception vector(0xFFFF0000~). | 725 | Say Y here to select high exception vector(0xFFFF0000~). |
726 | The exception vector can be vary depending on the platform | 726 | The exception vector can vary depending on the platform |
727 | design in nommu mode. If your platform needs to select | 727 | design in nommu mode. If your platform needs to select |
728 | high exception vector, say Y. | 728 | high exception vector, say Y. |
729 | Otherwise or if you are unsure, say N, and the low exception | 729 | Otherwise or if you are unsure, say N, and the low exception |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 9055b5a84ec5..f07467533365 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -320,7 +320,7 @@ retry: | |||
320 | */ | 320 | */ |
321 | 321 | ||
322 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); | 322 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); |
323 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | 323 | if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) { |
324 | if (fault & VM_FAULT_MAJOR) { | 324 | if (fault & VM_FAULT_MAJOR) { |
325 | tsk->maj_flt++; | 325 | tsk->maj_flt++; |
326 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, | 326 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, |
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 6486d2f253cd..d51225f90ae2 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <asm/sections.h> | 13 | #include <asm/sections.h> |
14 | #include <asm/page.h> | 14 | #include <asm/page.h> |
15 | #include <asm/setup.h> | 15 | #include <asm/setup.h> |
16 | #include <asm/traps.h> | ||
16 | #include <asm/mach/arch.h> | 17 | #include <asm/mach/arch.h> |
17 | 18 | ||
18 | #include "mm.h" | 19 | #include "mm.h" |
@@ -39,6 +40,7 @@ void __init sanity_check_meminfo(void) | |||
39 | */ | 40 | */ |
40 | void __init paging_init(struct machine_desc *mdesc) | 41 | void __init paging_init(struct machine_desc *mdesc) |
41 | { | 42 | { |
43 | early_trap_init((void *)CONFIG_VECTORS_BASE); | ||
42 | bootmem_init(); | 44 | bootmem_init(); |
43 | } | 45 | } |
44 | 46 | ||
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index f1c8486f7501..c2e2b66f72b5 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -255,6 +255,18 @@ __v7_setup: | |||
255 | mcr p15, 0, r5, c10, c2, 0 @ write PRRR | 255 | mcr p15, 0, r5, c10, c2, 0 @ write PRRR |
256 | mcr p15, 0, r6, c10, c2, 1 @ write NMRR | 256 | mcr p15, 0, r6, c10, c2, 1 @ write NMRR |
257 | #endif | 257 | #endif |
258 | #ifndef CONFIG_ARM_THUMBEE | ||
259 | mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE | ||
260 | and r0, r0, #(0xf << 12) @ ThumbEE enabled field | ||
261 | teq r0, #(1 << 12) @ check if ThumbEE is present | ||
262 | bne 1f | ||
263 | mov r5, #0 | ||
264 | mcr p14, 6, r5, c1, c0, 0 @ Initialize TEEHBR to 0 | ||
265 | mrc p14, 6, r0, c0, c0, 0 @ load TEECR | ||
266 | orr r0, r0, #1 @ set the 1st bit in order to | ||
267 | mcr p14, 6, r0, c0, c0, 0 @ stop userspace TEEHBR access | ||
268 | 1: | ||
269 | #endif | ||
258 | adr r5, v7_crval | 270 | adr r5, v7_crval |
259 | ldmia r5, {r5, r6} | 271 | ldmia r5, {r5, r6} |
260 | #ifdef CONFIG_CPU_ENDIAN_BE8 | 272 | #ifdef CONFIG_CPU_ENDIAN_BE8 |
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c index 8506cbb7fea4..62ec5c452792 100644 --- a/arch/arm/plat-omap/clock.c +++ b/arch/arm/plat-omap/clock.c | |||
@@ -398,32 +398,6 @@ struct clk dummy_ck = { | |||
398 | .ops = &clkops_null, | 398 | .ops = &clkops_null, |
399 | }; | 399 | }; |
400 | 400 | ||
401 | #ifdef CONFIG_CPU_FREQ | ||
402 | void clk_init_cpufreq_table(struct cpufreq_frequency_table **table) | ||
403 | { | ||
404 | unsigned long flags; | ||
405 | |||
406 | if (!arch_clock || !arch_clock->clk_init_cpufreq_table) | ||
407 | return; | ||
408 | |||
409 | spin_lock_irqsave(&clockfw_lock, flags); | ||
410 | arch_clock->clk_init_cpufreq_table(table); | ||
411 | spin_unlock_irqrestore(&clockfw_lock, flags); | ||
412 | } | ||
413 | |||
414 | void clk_exit_cpufreq_table(struct cpufreq_frequency_table **table) | ||
415 | { | ||
416 | unsigned long flags; | ||
417 | |||
418 | if (!arch_clock || !arch_clock->clk_exit_cpufreq_table) | ||
419 | return; | ||
420 | |||
421 | spin_lock_irqsave(&clockfw_lock, flags); | ||
422 | arch_clock->clk_exit_cpufreq_table(table); | ||
423 | spin_unlock_irqrestore(&clockfw_lock, flags); | ||
424 | } | ||
425 | #endif | ||
426 | |||
427 | /* | 401 | /* |
428 | * | 402 | * |
429 | */ | 403 | */ |
diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h index 240a7b9fd946..d0ef57c1d71b 100644 --- a/arch/arm/plat-omap/include/plat/clock.h +++ b/arch/arm/plat-omap/include/plat/clock.h | |||
@@ -272,8 +272,6 @@ struct clk { | |||
272 | #endif | 272 | #endif |
273 | }; | 273 | }; |
274 | 274 | ||
275 | struct cpufreq_frequency_table; | ||
276 | |||
277 | struct clk_functions { | 275 | struct clk_functions { |
278 | int (*clk_enable)(struct clk *clk); | 276 | int (*clk_enable)(struct clk *clk); |
279 | void (*clk_disable)(struct clk *clk); | 277 | void (*clk_disable)(struct clk *clk); |
@@ -283,10 +281,6 @@ struct clk_functions { | |||
283 | void (*clk_allow_idle)(struct clk *clk); | 281 | void (*clk_allow_idle)(struct clk *clk); |
284 | void (*clk_deny_idle)(struct clk *clk); | 282 | void (*clk_deny_idle)(struct clk *clk); |
285 | void (*clk_disable_unused)(struct clk *clk); | 283 | void (*clk_disable_unused)(struct clk *clk); |
286 | #ifdef CONFIG_CPU_FREQ | ||
287 | void (*clk_init_cpufreq_table)(struct cpufreq_frequency_table **); | ||
288 | void (*clk_exit_cpufreq_table)(struct cpufreq_frequency_table **); | ||
289 | #endif | ||
290 | }; | 284 | }; |
291 | 285 | ||
292 | extern int mpurate; | 286 | extern int mpurate; |
@@ -301,10 +295,6 @@ extern void recalculate_root_clocks(void); | |||
301 | extern unsigned long followparent_recalc(struct clk *clk); | 295 | extern unsigned long followparent_recalc(struct clk *clk); |
302 | extern void clk_enable_init_clocks(void); | 296 | extern void clk_enable_init_clocks(void); |
303 | unsigned long omap_fixed_divisor_recalc(struct clk *clk); | 297 | unsigned long omap_fixed_divisor_recalc(struct clk *clk); |
304 | #ifdef CONFIG_CPU_FREQ | ||
305 | extern void clk_init_cpufreq_table(struct cpufreq_frequency_table **table); | ||
306 | extern void clk_exit_cpufreq_table(struct cpufreq_frequency_table **table); | ||
307 | #endif | ||
308 | extern struct clk *omap_clk_get_by_name(const char *name); | 298 | extern struct clk *omap_clk_get_by_name(const char *name); |
309 | extern int omap_clk_enable_autoidle_all(void); | 299 | extern int omap_clk_enable_autoidle_all(void); |
310 | extern int omap_clk_disable_autoidle_all(void); | 300 | extern int omap_clk_disable_autoidle_all(void); |
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig index 71553f410016..a0ffc77da809 100644 --- a/arch/arm/plat-samsung/Kconfig +++ b/arch/arm/plat-samsung/Kconfig | |||
@@ -302,6 +302,7 @@ comment "Power management" | |||
302 | config SAMSUNG_PM_DEBUG | 302 | config SAMSUNG_PM_DEBUG |
303 | bool "S3C2410 PM Suspend debug" | 303 | bool "S3C2410 PM Suspend debug" |
304 | depends on PM | 304 | depends on PM |
305 | select DEBUG_LL | ||
305 | help | 306 | help |
306 | Say Y here if you want verbose debugging from the PM Suspend and | 307 | Say Y here if you want verbose debugging from the PM Suspend and |
307 | Resume code. See <file:Documentation/arm/Samsung-S3C24XX/Suspend.txt> | 308 | Resume code. See <file:Documentation/arm/Samsung-S3C24XX/Suspend.txt> |
diff --git a/arch/c6x/include/asm/irq.h b/arch/c6x/include/asm/irq.h index f13b78d5e1ca..ab4577f93d96 100644 --- a/arch/c6x/include/asm/irq.h +++ b/arch/c6x/include/asm/irq.h | |||
@@ -42,10 +42,6 @@ | |||
42 | /* This number is used when no interrupt has been assigned */ | 42 | /* This number is used when no interrupt has been assigned */ |
43 | #define NO_IRQ 0 | 43 | #define NO_IRQ 0 |
44 | 44 | ||
45 | struct irq_data; | ||
46 | extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d); | ||
47 | extern irq_hw_number_t virq_to_hw(unsigned int virq); | ||
48 | |||
49 | extern void __init init_pic_c64xplus(void); | 45 | extern void __init init_pic_c64xplus(void); |
50 | 46 | ||
51 | extern void init_IRQ(void); | 47 | extern void init_IRQ(void); |
diff --git a/arch/c6x/kernel/irq.c b/arch/c6x/kernel/irq.c index 65b8ddf54b44..c90fb5e82ad7 100644 --- a/arch/c6x/kernel/irq.c +++ b/arch/c6x/kernel/irq.c | |||
@@ -130,16 +130,3 @@ int arch_show_interrupts(struct seq_file *p, int prec) | |||
130 | seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); | 130 | seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); |
131 | return 0; | 131 | return 0; |
132 | } | 132 | } |
133 | |||
134 | irq_hw_number_t irqd_to_hwirq(struct irq_data *d) | ||
135 | { | ||
136 | return d->hwirq; | ||
137 | } | ||
138 | EXPORT_SYMBOL_GPL(irqd_to_hwirq); | ||
139 | |||
140 | irq_hw_number_t virq_to_hw(unsigned int virq) | ||
141 | { | ||
142 | struct irq_data *irq_data = irq_get_irq_data(virq); | ||
143 | return WARN_ON(!irq_data) ? 0 : irq_data->hwirq; | ||
144 | } | ||
145 | EXPORT_SYMBOL_GPL(virq_to_hw); | ||
diff --git a/arch/ia64/include/asm/cmpxchg.h b/arch/ia64/include/asm/cmpxchg.h index 4c96187e2049..4f37dbbb8640 100644 --- a/arch/ia64/include/asm/cmpxchg.h +++ b/arch/ia64/include/asm/cmpxchg.h | |||
@@ -1 +1,147 @@ | |||
1 | #include <asm/intrinsics.h> | 1 | #ifndef _ASM_IA64_CMPXCHG_H |
2 | #define _ASM_IA64_CMPXCHG_H | ||
3 | |||
4 | /* | ||
5 | * Compare/Exchange, forked from asm/intrinsics.h | ||
6 | * which was: | ||
7 | * | ||
8 | * Copyright (C) 2002-2003 Hewlett-Packard Co | ||
9 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
10 | */ | ||
11 | |||
12 | #ifndef __ASSEMBLY__ | ||
13 | |||
14 | #include <linux/types.h> | ||
15 | /* include compiler specific intrinsics */ | ||
16 | #include <asm/ia64regs.h> | ||
17 | #ifdef __INTEL_COMPILER | ||
18 | # include <asm/intel_intrin.h> | ||
19 | #else | ||
20 | # include <asm/gcc_intrin.h> | ||
21 | #endif | ||
22 | |||
23 | /* | ||
24 | * This function doesn't exist, so you'll get a linker error if | ||
25 | * something tries to do an invalid xchg(). | ||
26 | */ | ||
27 | extern void ia64_xchg_called_with_bad_pointer(void); | ||
28 | |||
29 | #define __xchg(x, ptr, size) \ | ||
30 | ({ \ | ||
31 | unsigned long __xchg_result; \ | ||
32 | \ | ||
33 | switch (size) { \ | ||
34 | case 1: \ | ||
35 | __xchg_result = ia64_xchg1((__u8 *)ptr, x); \ | ||
36 | break; \ | ||
37 | \ | ||
38 | case 2: \ | ||
39 | __xchg_result = ia64_xchg2((__u16 *)ptr, x); \ | ||
40 | break; \ | ||
41 | \ | ||
42 | case 4: \ | ||
43 | __xchg_result = ia64_xchg4((__u32 *)ptr, x); \ | ||
44 | break; \ | ||
45 | \ | ||
46 | case 8: \ | ||
47 | __xchg_result = ia64_xchg8((__u64 *)ptr, x); \ | ||
48 | break; \ | ||
49 | default: \ | ||
50 | ia64_xchg_called_with_bad_pointer(); \ | ||
51 | } \ | ||
52 | __xchg_result; \ | ||
53 | }) | ||
54 | |||
55 | #define xchg(ptr, x) \ | ||
56 | ((__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)))) | ||
57 | |||
58 | /* | ||
59 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | ||
60 | * store NEW in MEM. Return the initial value in MEM. Success is | ||
61 | * indicated by comparing RETURN with OLD. | ||
62 | */ | ||
63 | |||
64 | #define __HAVE_ARCH_CMPXCHG 1 | ||
65 | |||
66 | /* | ||
67 | * This function doesn't exist, so you'll get a linker error | ||
68 | * if something tries to do an invalid cmpxchg(). | ||
69 | */ | ||
70 | extern long ia64_cmpxchg_called_with_bad_pointer(void); | ||
71 | |||
72 | #define ia64_cmpxchg(sem, ptr, old, new, size) \ | ||
73 | ({ \ | ||
74 | __u64 _o_, _r_; \ | ||
75 | \ | ||
76 | switch (size) { \ | ||
77 | case 1: \ | ||
78 | _o_ = (__u8) (long) (old); \ | ||
79 | break; \ | ||
80 | case 2: \ | ||
81 | _o_ = (__u16) (long) (old); \ | ||
82 | break; \ | ||
83 | case 4: \ | ||
84 | _o_ = (__u32) (long) (old); \ | ||
85 | break; \ | ||
86 | case 8: \ | ||
87 | _o_ = (__u64) (long) (old); \ | ||
88 | break; \ | ||
89 | default: \ | ||
90 | break; \ | ||
91 | } \ | ||
92 | switch (size) { \ | ||
93 | case 1: \ | ||
94 | _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \ | ||
95 | break; \ | ||
96 | \ | ||
97 | case 2: \ | ||
98 | _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \ | ||
99 | break; \ | ||
100 | \ | ||
101 | case 4: \ | ||
102 | _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \ | ||
103 | break; \ | ||
104 | \ | ||
105 | case 8: \ | ||
106 | _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \ | ||
107 | break; \ | ||
108 | \ | ||
109 | default: \ | ||
110 | _r_ = ia64_cmpxchg_called_with_bad_pointer(); \ | ||
111 | break; \ | ||
112 | } \ | ||
113 | (__typeof__(old)) _r_; \ | ||
114 | }) | ||
115 | |||
116 | #define cmpxchg_acq(ptr, o, n) \ | ||
117 | ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr))) | ||
118 | #define cmpxchg_rel(ptr, o, n) \ | ||
119 | ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr))) | ||
120 | |||
121 | /* for compatibility with other platforms: */ | ||
122 | #define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) | ||
123 | #define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) | ||
124 | |||
125 | #define cmpxchg_local cmpxchg | ||
126 | #define cmpxchg64_local cmpxchg64 | ||
127 | |||
128 | #ifdef CONFIG_IA64_DEBUG_CMPXCHG | ||
129 | # define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128; | ||
130 | # define CMPXCHG_BUGCHECK(v) \ | ||
131 | do { \ | ||
132 | if (_cmpxchg_bugcheck_count-- <= 0) { \ | ||
133 | void *ip; \ | ||
134 | extern int printk(const char *fmt, ...); \ | ||
135 | ip = (void *) ia64_getreg(_IA64_REG_IP); \ | ||
136 | printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v));\ | ||
137 | break; \ | ||
138 | } \ | ||
139 | } while (0) | ||
140 | #else /* !CONFIG_IA64_DEBUG_CMPXCHG */ | ||
141 | # define CMPXCHG_BUGCHECK_DECL | ||
142 | # define CMPXCHG_BUGCHECK(v) | ||
143 | #endif /* !CONFIG_IA64_DEBUG_CMPXCHG */ | ||
144 | |||
145 | #endif /* !__ASSEMBLY__ */ | ||
146 | |||
147 | #endif /* _ASM_IA64_CMPXCHG_H */ | ||
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h index 0ab82cc2dc8f..d2bf1fd5e44f 100644 --- a/arch/ia64/include/asm/futex.h +++ b/arch/ia64/include/asm/futex.h | |||
@@ -106,15 +106,16 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
106 | return -EFAULT; | 106 | return -EFAULT; |
107 | 107 | ||
108 | { | 108 | { |
109 | register unsigned long r8 __asm ("r8") = 0; | 109 | register unsigned long r8 __asm ("r8"); |
110 | unsigned long prev; | 110 | unsigned long prev; |
111 | __asm__ __volatile__( | 111 | __asm__ __volatile__( |
112 | " mf;; \n" | 112 | " mf;; \n" |
113 | " mov ar.ccv=%3;; \n" | 113 | " mov %0=r0 \n" |
114 | "[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n" | 114 | " mov ar.ccv=%4;; \n" |
115 | "[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n" | ||
115 | " .xdata4 \"__ex_table\", 1b-., 2f-. \n" | 116 | " .xdata4 \"__ex_table\", 1b-., 2f-. \n" |
116 | "[2:]" | 117 | "[2:]" |
117 | : "=r" (prev) | 118 | : "=r" (r8), "=r" (prev) |
118 | : "r" (uaddr), "r" (newval), | 119 | : "r" (uaddr), "r" (newval), |
119 | "rO" ((long) (unsigned) oldval) | 120 | "rO" ((long) (unsigned) oldval) |
120 | : "memory"); | 121 | : "memory"); |
diff --git a/arch/ia64/include/asm/intrinsics.h b/arch/ia64/include/asm/intrinsics.h index e4076b511829..d129e367e764 100644 --- a/arch/ia64/include/asm/intrinsics.h +++ b/arch/ia64/include/asm/intrinsics.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #else | 18 | #else |
19 | # include <asm/gcc_intrin.h> | 19 | # include <asm/gcc_intrin.h> |
20 | #endif | 20 | #endif |
21 | #include <asm/cmpxchg.h> | ||
21 | 22 | ||
22 | #define ia64_native_get_psr_i() (ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I) | 23 | #define ia64_native_get_psr_i() (ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I) |
23 | 24 | ||
@@ -81,119 +82,6 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void); | |||
81 | 82 | ||
82 | #define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */ | 83 | #define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */ |
83 | 84 | ||
84 | /* | ||
85 | * This function doesn't exist, so you'll get a linker error if | ||
86 | * something tries to do an invalid xchg(). | ||
87 | */ | ||
88 | extern void ia64_xchg_called_with_bad_pointer (void); | ||
89 | |||
90 | #define __xchg(x,ptr,size) \ | ||
91 | ({ \ | ||
92 | unsigned long __xchg_result; \ | ||
93 | \ | ||
94 | switch (size) { \ | ||
95 | case 1: \ | ||
96 | __xchg_result = ia64_xchg1((__u8 *)ptr, x); \ | ||
97 | break; \ | ||
98 | \ | ||
99 | case 2: \ | ||
100 | __xchg_result = ia64_xchg2((__u16 *)ptr, x); \ | ||
101 | break; \ | ||
102 | \ | ||
103 | case 4: \ | ||
104 | __xchg_result = ia64_xchg4((__u32 *)ptr, x); \ | ||
105 | break; \ | ||
106 | \ | ||
107 | case 8: \ | ||
108 | __xchg_result = ia64_xchg8((__u64 *)ptr, x); \ | ||
109 | break; \ | ||
110 | default: \ | ||
111 | ia64_xchg_called_with_bad_pointer(); \ | ||
112 | } \ | ||
113 | __xchg_result; \ | ||
114 | }) | ||
115 | |||
116 | #define xchg(ptr,x) \ | ||
117 | ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr)))) | ||
118 | |||
119 | /* | ||
120 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | ||
121 | * store NEW in MEM. Return the initial value in MEM. Success is | ||
122 | * indicated by comparing RETURN with OLD. | ||
123 | */ | ||
124 | |||
125 | #define __HAVE_ARCH_CMPXCHG 1 | ||
126 | |||
127 | /* | ||
128 | * This function doesn't exist, so you'll get a linker error | ||
129 | * if something tries to do an invalid cmpxchg(). | ||
130 | */ | ||
131 | extern long ia64_cmpxchg_called_with_bad_pointer (void); | ||
132 | |||
133 | #define ia64_cmpxchg(sem,ptr,old,new,size) \ | ||
134 | ({ \ | ||
135 | __u64 _o_, _r_; \ | ||
136 | \ | ||
137 | switch (size) { \ | ||
138 | case 1: _o_ = (__u8 ) (long) (old); break; \ | ||
139 | case 2: _o_ = (__u16) (long) (old); break; \ | ||
140 | case 4: _o_ = (__u32) (long) (old); break; \ | ||
141 | case 8: _o_ = (__u64) (long) (old); break; \ | ||
142 | default: break; \ | ||
143 | } \ | ||
144 | switch (size) { \ | ||
145 | case 1: \ | ||
146 | _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \ | ||
147 | break; \ | ||
148 | \ | ||
149 | case 2: \ | ||
150 | _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \ | ||
151 | break; \ | ||
152 | \ | ||
153 | case 4: \ | ||
154 | _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \ | ||
155 | break; \ | ||
156 | \ | ||
157 | case 8: \ | ||
158 | _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \ | ||
159 | break; \ | ||
160 | \ | ||
161 | default: \ | ||
162 | _r_ = ia64_cmpxchg_called_with_bad_pointer(); \ | ||
163 | break; \ | ||
164 | } \ | ||
165 | (__typeof__(old)) _r_; \ | ||
166 | }) | ||
167 | |||
168 | #define cmpxchg_acq(ptr, o, n) \ | ||
169 | ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr))) | ||
170 | #define cmpxchg_rel(ptr, o, n) \ | ||
171 | ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr))) | ||
172 | |||
173 | /* for compatibility with other platforms: */ | ||
174 | #define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) | ||
175 | #define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) | ||
176 | |||
177 | #define cmpxchg_local cmpxchg | ||
178 | #define cmpxchg64_local cmpxchg64 | ||
179 | |||
180 | #ifdef CONFIG_IA64_DEBUG_CMPXCHG | ||
181 | # define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128; | ||
182 | # define CMPXCHG_BUGCHECK(v) \ | ||
183 | do { \ | ||
184 | if (_cmpxchg_bugcheck_count-- <= 0) { \ | ||
185 | void *ip; \ | ||
186 | extern int printk(const char *fmt, ...); \ | ||
187 | ip = (void *) ia64_getreg(_IA64_REG_IP); \ | ||
188 | printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \ | ||
189 | break; \ | ||
190 | } \ | ||
191 | } while (0) | ||
192 | #else /* !CONFIG_IA64_DEBUG_CMPXCHG */ | ||
193 | # define CMPXCHG_BUGCHECK_DECL | ||
194 | # define CMPXCHG_BUGCHECK(v) | ||
195 | #endif /* !CONFIG_IA64_DEBUG_CMPXCHG */ | ||
196 | |||
197 | #endif | 85 | #endif |
198 | 86 | ||
199 | #ifdef __KERNEL__ | 87 | #ifdef __KERNEL__ |
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index cf417e510736..e648af92ced1 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h | |||
@@ -33,8 +33,6 @@ extern atomic_t ppc_n_lost_interrupts; | |||
33 | /* Same thing, used by the generic IRQ code */ | 33 | /* Same thing, used by the generic IRQ code */ |
34 | #define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS | 34 | #define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS |
35 | 35 | ||
36 | struct irq_data; | ||
37 | extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d); | ||
38 | extern irq_hw_number_t virq_to_hw(unsigned int virq); | 36 | extern irq_hw_number_t virq_to_hw(unsigned int virq); |
39 | 37 | ||
40 | /** | 38 | /** |
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 3e57a00b8cba..ba3aeb4bc06a 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S | |||
@@ -206,40 +206,43 @@ reenable_mmu: /* re-enable mmu so we can */ | |||
206 | andi. r10,r10,MSR_EE /* Did EE change? */ | 206 | andi. r10,r10,MSR_EE /* Did EE change? */ |
207 | beq 1f | 207 | beq 1f |
208 | 208 | ||
209 | /* Save handler and return address into the 2 unused words | ||
210 | * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything | ||
211 | * else can be recovered from the pt_regs except r3 which for | ||
212 | * normal interrupts has been set to pt_regs and for syscalls | ||
213 | * is an argument, so we temporarily use ORIG_GPR3 to save it | ||
214 | */ | ||
215 | stw r9,8(r1) | ||
216 | stw r11,12(r1) | ||
217 | stw r3,ORIG_GPR3(r1) | ||
218 | /* | 209 | /* |
219 | * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1. | 210 | * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1. |
220 | * If from user mode there is only one stack frame on the stack, and | 211 | * If from user mode there is only one stack frame on the stack, and |
221 | * accessing CALLER_ADDR1 will cause oops. So we need create a dummy | 212 | * accessing CALLER_ADDR1 will cause oops. So we need create a dummy |
222 | * stack frame to make trace_hardirqs_off happy. | 213 | * stack frame to make trace_hardirqs_off happy. |
214 | * | ||
215 | * This is handy because we also need to save a bunch of GPRs, | ||
216 | * r3 can be different from GPR3(r1) at this point, r9 and r11 | ||
217 | * contains the old MSR and handler address respectively, | ||
218 | * r4 & r5 can contain page fault arguments that need to be passed | ||
219 | * along as well. r12, CCR, CTR, XER etc... are left clobbered as | ||
220 | * they aren't useful past this point (aren't syscall arguments), | ||
221 | * the rest is restored from the exception frame. | ||
223 | */ | 222 | */ |
223 | stwu r1,-32(r1) | ||
224 | stw r9,8(r1) | ||
225 | stw r11,12(r1) | ||
226 | stw r3,16(r1) | ||
227 | stw r4,20(r1) | ||
228 | stw r5,24(r1) | ||
224 | andi. r12,r12,MSR_PR | 229 | andi. r12,r12,MSR_PR |
225 | beq 11f | 230 | b 11f |
226 | stwu r1,-16(r1) | ||
227 | bl trace_hardirqs_off | 231 | bl trace_hardirqs_off |
228 | addi r1,r1,16 | ||
229 | b 12f | 232 | b 12f |
230 | |||
231 | 11: | 233 | 11: |
232 | bl trace_hardirqs_off | 234 | bl trace_hardirqs_off |
233 | 12: | 235 | 12: |
236 | lwz r5,24(r1) | ||
237 | lwz r4,20(r1) | ||
238 | lwz r3,16(r1) | ||
239 | lwz r11,12(r1) | ||
240 | lwz r9,8(r1) | ||
241 | addi r1,r1,32 | ||
234 | lwz r0,GPR0(r1) | 242 | lwz r0,GPR0(r1) |
235 | lwz r3,ORIG_GPR3(r1) | ||
236 | lwz r4,GPR4(r1) | ||
237 | lwz r5,GPR5(r1) | ||
238 | lwz r6,GPR6(r1) | 243 | lwz r6,GPR6(r1) |
239 | lwz r7,GPR7(r1) | 244 | lwz r7,GPR7(r1) |
240 | lwz r8,GPR8(r1) | 245 | lwz r8,GPR8(r1) |
241 | lwz r9,8(r1) | ||
242 | lwz r11,12(r1) | ||
243 | 1: mtctr r11 | 246 | 1: mtctr r11 |
244 | mtlr r9 | 247 | mtlr r9 |
245 | bctr /* jump to handler */ | 248 | bctr /* jump to handler */ |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 243dbabfe74d..5ec1b2354ca6 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -560,12 +560,6 @@ void do_softirq(void) | |||
560 | local_irq_restore(flags); | 560 | local_irq_restore(flags); |
561 | } | 561 | } |
562 | 562 | ||
563 | irq_hw_number_t irqd_to_hwirq(struct irq_data *d) | ||
564 | { | ||
565 | return d->hwirq; | ||
566 | } | ||
567 | EXPORT_SYMBOL_GPL(irqd_to_hwirq); | ||
568 | |||
569 | irq_hw_number_t virq_to_hw(unsigned int virq) | 563 | irq_hw_number_t virq_to_hw(unsigned int virq) |
570 | { | 564 | { |
571 | struct irq_data *irq_data = irq_get_irq_data(virq); | 565 | struct irq_data *irq_data = irq_get_irq_data(virq); |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index f88698c0f332..4937c9690090 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -1235,7 +1235,7 @@ void __ppc64_runlatch_on(void) | |||
1235 | ctrl |= CTRL_RUNLATCH; | 1235 | ctrl |= CTRL_RUNLATCH; |
1236 | mtspr(SPRN_CTRLT, ctrl); | 1236 | mtspr(SPRN_CTRLT, ctrl); |
1237 | 1237 | ||
1238 | ti->local_flags |= TLF_RUNLATCH; | 1238 | ti->local_flags |= _TLF_RUNLATCH; |
1239 | } | 1239 | } |
1240 | 1240 | ||
1241 | /* Called with hard IRQs off */ | 1241 | /* Called with hard IRQs off */ |
@@ -1244,7 +1244,7 @@ void __ppc64_runlatch_off(void) | |||
1244 | struct thread_info *ti = current_thread_info(); | 1244 | struct thread_info *ti = current_thread_info(); |
1245 | unsigned long ctrl; | 1245 | unsigned long ctrl; |
1246 | 1246 | ||
1247 | ti->local_flags &= ~TLF_RUNLATCH; | 1247 | ti->local_flags &= ~_TLF_RUNLATCH; |
1248 | 1248 | ||
1249 | ctrl = mfspr(SPRN_CTRLF); | 1249 | ctrl = mfspr(SPRN_CTRLF); |
1250 | ctrl &= ~CTRL_RUNLATCH; | 1250 | ctrl &= ~CTRL_RUNLATCH; |
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index db360fc4cf0e..d09f3e8e6867 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c | |||
@@ -392,7 +392,7 @@ static int axon_msi_probe(struct platform_device *device) | |||
392 | } | 392 | } |
393 | memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); | 393 | memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); |
394 | 394 | ||
395 | msic->irq_domain = irq_domain_add_nomap(dn, &msic_host_ops, msic); | 395 | msic->irq_domain = irq_domain_add_nomap(dn, 0, &msic_host_ops, msic); |
396 | if (!msic->irq_domain) { | 396 | if (!msic->irq_domain) { |
397 | printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n", | 397 | printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n", |
398 | dn->full_name); | 398 | dn->full_name); |
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c index e5c3a2c6090d..f9a48af335cb 100644 --- a/arch/powerpc/platforms/cell/beat_interrupt.c +++ b/arch/powerpc/platforms/cell/beat_interrupt.c | |||
@@ -239,7 +239,7 @@ void __init beatic_init_IRQ(void) | |||
239 | ppc_md.get_irq = beatic_get_irq; | 239 | ppc_md.get_irq = beatic_get_irq; |
240 | 240 | ||
241 | /* Allocate an irq host */ | 241 | /* Allocate an irq host */ |
242 | beatic_host = irq_domain_add_nomap(NULL, &beatic_pic_host_ops, NULL); | 242 | beatic_host = irq_domain_add_nomap(NULL, 0, &beatic_pic_host_ops, NULL); |
243 | BUG_ON(beatic_host == NULL); | 243 | BUG_ON(beatic_host == NULL); |
244 | irq_set_default_host(beatic_host); | 244 | irq_set_default_host(beatic_host); |
245 | } | 245 | } |
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index a81e5a88fbdf..b4ddaa3fbb29 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c | |||
@@ -192,7 +192,7 @@ static int psurge_secondary_ipi_init(void) | |||
192 | { | 192 | { |
193 | int rc = -ENOMEM; | 193 | int rc = -ENOMEM; |
194 | 194 | ||
195 | psurge_host = irq_domain_add_nomap(NULL, &psurge_host_ops, NULL); | 195 | psurge_host = irq_domain_add_nomap(NULL, 0, &psurge_host_ops, NULL); |
196 | 196 | ||
197 | if (psurge_host) | 197 | if (psurge_host) |
198 | psurge_secondary_virq = irq_create_direct_mapping(psurge_host); | 198 | psurge_secondary_virq = irq_create_direct_mapping(psurge_host); |
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c index 2a4ff86cc21f..5f3b23220b8e 100644 --- a/arch/powerpc/platforms/ps3/interrupt.c +++ b/arch/powerpc/platforms/ps3/interrupt.c | |||
@@ -753,9 +753,8 @@ void __init ps3_init_IRQ(void) | |||
753 | unsigned cpu; | 753 | unsigned cpu; |
754 | struct irq_domain *host; | 754 | struct irq_domain *host; |
755 | 755 | ||
756 | host = irq_domain_add_nomap(NULL, &ps3_host_ops, NULL); | 756 | host = irq_domain_add_nomap(NULL, PS3_PLUG_MAX + 1, &ps3_host_ops, NULL); |
757 | irq_set_default_host(host); | 757 | irq_set_default_host(host); |
758 | irq_set_virq_count(PS3_PLUG_MAX + 1); | ||
759 | 758 | ||
760 | for_each_possible_cpu(cpu) { | 759 | for_each_possible_cpu(cpu) { |
761 | struct ps3_private *pd = &per_cpu(ps3_private, cpu); | 760 | struct ps3_private *pd = &per_cpu(ps3_private, cpu); |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 2b7c0fbe578e..9015060919a0 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -90,7 +90,6 @@ config S390 | |||
90 | select HAVE_KERNEL_XZ | 90 | select HAVE_KERNEL_XZ |
91 | select HAVE_ARCH_MUTEX_CPU_RELAX | 91 | select HAVE_ARCH_MUTEX_CPU_RELAX |
92 | select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 | 92 | select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 |
93 | select HAVE_RCU_TABLE_FREE if SMP | ||
94 | select ARCH_SAVE_PAGE_KEYS if HIBERNATION | 93 | select ARCH_SAVE_PAGE_KEYS if HIBERNATION |
95 | select HAVE_MEMBLOCK | 94 | select HAVE_MEMBLOCK |
96 | select HAVE_MEMBLOCK_NODE_MAP | 95 | select HAVE_MEMBLOCK_NODE_MAP |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 6cf8e26b3137..1957a9dd256d 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
@@ -1,8 +1,12 @@ | |||
1 | CONFIG_EXPERIMENTAL=y | 1 | CONFIG_EXPERIMENTAL=y |
2 | CONFIG_SYSVIPC=y | 2 | CONFIG_SYSVIPC=y |
3 | CONFIG_POSIX_MQUEUE=y | 3 | CONFIG_POSIX_MQUEUE=y |
4 | CONFIG_FHANDLE=y | ||
5 | CONFIG_TASKSTATS=y | ||
6 | CONFIG_TASK_DELAY_ACCT=y | ||
7 | CONFIG_TASK_XACCT=y | ||
8 | CONFIG_TASK_IO_ACCOUNTING=y | ||
4 | CONFIG_AUDIT=y | 9 | CONFIG_AUDIT=y |
5 | CONFIG_RCU_TRACE=y | ||
6 | CONFIG_IKCONFIG=y | 10 | CONFIG_IKCONFIG=y |
7 | CONFIG_IKCONFIG_PROC=y | 11 | CONFIG_IKCONFIG_PROC=y |
8 | CONFIG_CGROUPS=y | 12 | CONFIG_CGROUPS=y |
@@ -14,16 +18,22 @@ CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y | |||
14 | CONFIG_CGROUP_SCHED=y | 18 | CONFIG_CGROUP_SCHED=y |
15 | CONFIG_RT_GROUP_SCHED=y | 19 | CONFIG_RT_GROUP_SCHED=y |
16 | CONFIG_BLK_CGROUP=y | 20 | CONFIG_BLK_CGROUP=y |
21 | CONFIG_NAMESPACES=y | ||
17 | CONFIG_BLK_DEV_INITRD=y | 22 | CONFIG_BLK_DEV_INITRD=y |
18 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 23 | CONFIG_RD_BZIP2=y |
24 | CONFIG_RD_LZMA=y | ||
25 | CONFIG_RD_XZ=y | ||
26 | CONFIG_RD_LZO=y | ||
27 | CONFIG_EXPERT=y | ||
19 | # CONFIG_COMPAT_BRK is not set | 28 | # CONFIG_COMPAT_BRK is not set |
20 | CONFIG_SLAB=y | ||
21 | CONFIG_PROFILING=y | 29 | CONFIG_PROFILING=y |
22 | CONFIG_OPROFILE=y | 30 | CONFIG_OPROFILE=y |
23 | CONFIG_KPROBES=y | 31 | CONFIG_KPROBES=y |
24 | CONFIG_MODULES=y | 32 | CONFIG_MODULES=y |
25 | CONFIG_MODULE_UNLOAD=y | 33 | CONFIG_MODULE_UNLOAD=y |
26 | CONFIG_MODVERSIONS=y | 34 | CONFIG_MODVERSIONS=y |
35 | CONFIG_PARTITION_ADVANCED=y | ||
36 | CONFIG_IBM_PARTITION=y | ||
27 | CONFIG_DEFAULT_DEADLINE=y | 37 | CONFIG_DEFAULT_DEADLINE=y |
28 | CONFIG_NO_HZ=y | 38 | CONFIG_NO_HZ=y |
29 | CONFIG_HIGH_RES_TIMERS=y | 39 | CONFIG_HIGH_RES_TIMERS=y |
@@ -34,18 +44,15 @@ CONFIG_KSM=y | |||
34 | CONFIG_BINFMT_MISC=m | 44 | CONFIG_BINFMT_MISC=m |
35 | CONFIG_CMM=m | 45 | CONFIG_CMM=m |
36 | CONFIG_HZ_100=y | 46 | CONFIG_HZ_100=y |
37 | CONFIG_KEXEC=y | 47 | CONFIG_CRASH_DUMP=y |
38 | CONFIG_PM=y | ||
39 | CONFIG_HIBERNATION=y | 48 | CONFIG_HIBERNATION=y |
40 | CONFIG_PACKET=y | 49 | CONFIG_PACKET=y |
41 | CONFIG_UNIX=y | 50 | CONFIG_UNIX=y |
42 | CONFIG_NET_KEY=y | 51 | CONFIG_NET_KEY=y |
43 | CONFIG_AFIUCV=m | ||
44 | CONFIG_INET=y | 52 | CONFIG_INET=y |
45 | CONFIG_IP_MULTICAST=y | 53 | CONFIG_IP_MULTICAST=y |
46 | # CONFIG_INET_LRO is not set | 54 | # CONFIG_INET_LRO is not set |
47 | CONFIG_IPV6=y | 55 | CONFIG_IPV6=y |
48 | CONFIG_NET_SCTPPROBE=m | ||
49 | CONFIG_L2TP=m | 56 | CONFIG_L2TP=m |
50 | CONFIG_L2TP_DEBUGFS=m | 57 | CONFIG_L2TP_DEBUGFS=m |
51 | CONFIG_VLAN_8021Q=y | 58 | CONFIG_VLAN_8021Q=y |
@@ -84,15 +91,14 @@ CONFIG_SCSI_CONSTANTS=y | |||
84 | CONFIG_SCSI_LOGGING=y | 91 | CONFIG_SCSI_LOGGING=y |
85 | CONFIG_SCSI_SCAN_ASYNC=y | 92 | CONFIG_SCSI_SCAN_ASYNC=y |
86 | CONFIG_ZFCP=y | 93 | CONFIG_ZFCP=y |
87 | CONFIG_ZFCP_DIF=y | ||
88 | CONFIG_NETDEVICES=y | 94 | CONFIG_NETDEVICES=y |
89 | CONFIG_DUMMY=m | ||
90 | CONFIG_BONDING=m | 95 | CONFIG_BONDING=m |
96 | CONFIG_DUMMY=m | ||
91 | CONFIG_EQUALIZER=m | 97 | CONFIG_EQUALIZER=m |
92 | CONFIG_TUN=m | 98 | CONFIG_TUN=m |
93 | CONFIG_NET_ETHERNET=y | ||
94 | CONFIG_VIRTIO_NET=y | 99 | CONFIG_VIRTIO_NET=y |
95 | CONFIG_RAW_DRIVER=m | 100 | CONFIG_RAW_DRIVER=m |
101 | CONFIG_VIRTIO_BALLOON=y | ||
96 | CONFIG_EXT2_FS=y | 102 | CONFIG_EXT2_FS=y |
97 | CONFIG_EXT3_FS=y | 103 | CONFIG_EXT3_FS=y |
98 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | 104 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set |
@@ -103,27 +109,21 @@ CONFIG_PROC_KCORE=y | |||
103 | CONFIG_TMPFS=y | 109 | CONFIG_TMPFS=y |
104 | CONFIG_TMPFS_POSIX_ACL=y | 110 | CONFIG_TMPFS_POSIX_ACL=y |
105 | # CONFIG_NETWORK_FILESYSTEMS is not set | 111 | # CONFIG_NETWORK_FILESYSTEMS is not set |
106 | CONFIG_PARTITION_ADVANCED=y | ||
107 | CONFIG_IBM_PARTITION=y | ||
108 | CONFIG_DLM=m | ||
109 | CONFIG_MAGIC_SYSRQ=y | 112 | CONFIG_MAGIC_SYSRQ=y |
110 | CONFIG_DEBUG_KERNEL=y | ||
111 | CONFIG_TIMER_STATS=y | 113 | CONFIG_TIMER_STATS=y |
112 | CONFIG_PROVE_LOCKING=y | 114 | CONFIG_PROVE_LOCKING=y |
113 | CONFIG_PROVE_RCU=y | 115 | CONFIG_PROVE_RCU=y |
114 | CONFIG_LOCK_STAT=y | 116 | CONFIG_LOCK_STAT=y |
115 | CONFIG_DEBUG_LOCKDEP=y | 117 | CONFIG_DEBUG_LOCKDEP=y |
116 | CONFIG_DEBUG_SPINLOCK_SLEEP=y | ||
117 | CONFIG_DEBUG_LIST=y | 118 | CONFIG_DEBUG_LIST=y |
118 | CONFIG_DEBUG_NOTIFIERS=y | 119 | CONFIG_DEBUG_NOTIFIERS=y |
119 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | 120 | CONFIG_RCU_TRACE=y |
120 | CONFIG_KPROBES_SANITY_TEST=y | 121 | CONFIG_KPROBES_SANITY_TEST=y |
121 | CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y | 122 | CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y |
122 | CONFIG_CPU_NOTIFIER_ERROR_INJECT=m | 123 | CONFIG_CPU_NOTIFIER_ERROR_INJECT=m |
123 | CONFIG_LATENCYTOP=y | 124 | CONFIG_LATENCYTOP=y |
124 | CONFIG_SYSCTL_SYSCALL_CHECK=y | ||
125 | CONFIG_DEBUG_PAGEALLOC=y | 125 | CONFIG_DEBUG_PAGEALLOC=y |
126 | # CONFIG_FTRACE is not set | 126 | CONFIG_BLK_DEV_IO_TRACE=y |
127 | # CONFIG_STRICT_DEVMEM is not set | 127 | # CONFIG_STRICT_DEVMEM is not set |
128 | CONFIG_CRYPTO_NULL=m | 128 | CONFIG_CRYPTO_NULL=m |
129 | CONFIG_CRYPTO_CRYPTD=m | 129 | CONFIG_CRYPTO_CRYPTD=m |
@@ -173,4 +173,3 @@ CONFIG_CRYPTO_SHA512_S390=m | |||
173 | CONFIG_CRYPTO_DES_S390=m | 173 | CONFIG_CRYPTO_DES_S390=m |
174 | CONFIG_CRYPTO_AES_S390=m | 174 | CONFIG_CRYPTO_AES_S390=m |
175 | CONFIG_CRC7=m | 175 | CONFIG_CRC7=m |
176 | CONFIG_VIRTIO_BALLOON=y | ||
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h index 1e5b27edc0c9..2ee66a65f2d4 100644 --- a/arch/s390/include/asm/facility.h +++ b/arch/s390/include/asm/facility.h | |||
@@ -38,12 +38,11 @@ static inline void stfle(u64 *stfle_fac_list, int size) | |||
38 | unsigned long nr; | 38 | unsigned long nr; |
39 | 39 | ||
40 | preempt_disable(); | 40 | preempt_disable(); |
41 | S390_lowcore.stfl_fac_list = 0; | ||
42 | asm volatile( | 41 | asm volatile( |
43 | " .insn s,0xb2b10000,0(0)\n" /* stfl */ | 42 | " .insn s,0xb2b10000,0(0)\n" /* stfl */ |
44 | "0:\n" | 43 | "0:\n" |
45 | EX_TABLE(0b, 0b) | 44 | EX_TABLE(0b, 0b) |
46 | : "=m" (S390_lowcore.stfl_fac_list)); | 45 | : "+m" (S390_lowcore.stfl_fac_list)); |
47 | nr = 4; /* bytes stored by stfl */ | 46 | nr = 4; /* bytes stored by stfl */ |
48 | memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4); | 47 | memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4); |
49 | if (S390_lowcore.stfl_fac_list & 0x01000000) { | 48 | if (S390_lowcore.stfl_fac_list & 0x01000000) { |
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 8eef9b5b3cf4..78e3041919de 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h | |||
@@ -22,10 +22,7 @@ void crst_table_free(struct mm_struct *, unsigned long *); | |||
22 | 22 | ||
23 | unsigned long *page_table_alloc(struct mm_struct *, unsigned long); | 23 | unsigned long *page_table_alloc(struct mm_struct *, unsigned long); |
24 | void page_table_free(struct mm_struct *, unsigned long *); | 24 | void page_table_free(struct mm_struct *, unsigned long *); |
25 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | ||
26 | void page_table_free_rcu(struct mmu_gather *, unsigned long *); | 25 | void page_table_free_rcu(struct mmu_gather *, unsigned long *); |
27 | void __tlb_remove_table(void *_table); | ||
28 | #endif | ||
29 | 26 | ||
30 | static inline void clear_table(unsigned long *s, unsigned long val, size_t n) | 27 | static inline void clear_table(unsigned long *s, unsigned long val, size_t n) |
31 | { | 28 | { |
diff --git a/arch/s390/include/asm/swab.h b/arch/s390/include/asm/swab.h index 6bdee21c077e..a3e4ebb32090 100644 --- a/arch/s390/include/asm/swab.h +++ b/arch/s390/include/asm/swab.h | |||
@@ -77,7 +77,7 @@ static inline __u16 __arch_swab16p(const __u16 *x) | |||
77 | 77 | ||
78 | asm volatile( | 78 | asm volatile( |
79 | #ifndef __s390x__ | 79 | #ifndef __s390x__ |
80 | " icm %0,2,%O+1(%R1)\n" | 80 | " icm %0,2,%O1+1(%R1)\n" |
81 | " ic %0,%1\n" | 81 | " ic %0,%1\n" |
82 | : "=&d" (result) : "Q" (*x) : "cc"); | 82 | : "=&d" (result) : "Q" (*x) : "cc"); |
83 | #else /* __s390x__ */ | 83 | #else /* __s390x__ */ |
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index c687a2c83462..775a5eea8f9e 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h | |||
@@ -30,14 +30,10 @@ | |||
30 | 30 | ||
31 | struct mmu_gather { | 31 | struct mmu_gather { |
32 | struct mm_struct *mm; | 32 | struct mm_struct *mm; |
33 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | ||
34 | struct mmu_table_batch *batch; | 33 | struct mmu_table_batch *batch; |
35 | #endif | ||
36 | unsigned int fullmm; | 34 | unsigned int fullmm; |
37 | unsigned int need_flush; | ||
38 | }; | 35 | }; |
39 | 36 | ||
40 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | ||
41 | struct mmu_table_batch { | 37 | struct mmu_table_batch { |
42 | struct rcu_head rcu; | 38 | struct rcu_head rcu; |
43 | unsigned int nr; | 39 | unsigned int nr; |
@@ -49,7 +45,6 @@ struct mmu_table_batch { | |||
49 | 45 | ||
50 | extern void tlb_table_flush(struct mmu_gather *tlb); | 46 | extern void tlb_table_flush(struct mmu_gather *tlb); |
51 | extern void tlb_remove_table(struct mmu_gather *tlb, void *table); | 47 | extern void tlb_remove_table(struct mmu_gather *tlb, void *table); |
52 | #endif | ||
53 | 48 | ||
54 | static inline void tlb_gather_mmu(struct mmu_gather *tlb, | 49 | static inline void tlb_gather_mmu(struct mmu_gather *tlb, |
55 | struct mm_struct *mm, | 50 | struct mm_struct *mm, |
@@ -57,29 +52,20 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb, | |||
57 | { | 52 | { |
58 | tlb->mm = mm; | 53 | tlb->mm = mm; |
59 | tlb->fullmm = full_mm_flush; | 54 | tlb->fullmm = full_mm_flush; |
60 | tlb->need_flush = 0; | ||
61 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | ||
62 | tlb->batch = NULL; | 55 | tlb->batch = NULL; |
63 | #endif | ||
64 | if (tlb->fullmm) | 56 | if (tlb->fullmm) |
65 | __tlb_flush_mm(mm); | 57 | __tlb_flush_mm(mm); |
66 | } | 58 | } |
67 | 59 | ||
68 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) | 60 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) |
69 | { | 61 | { |
70 | if (!tlb->need_flush) | ||
71 | return; | ||
72 | tlb->need_flush = 0; | ||
73 | __tlb_flush_mm(tlb->mm); | ||
74 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | ||
75 | tlb_table_flush(tlb); | 62 | tlb_table_flush(tlb); |
76 | #endif | ||
77 | } | 63 | } |
78 | 64 | ||
79 | static inline void tlb_finish_mmu(struct mmu_gather *tlb, | 65 | static inline void tlb_finish_mmu(struct mmu_gather *tlb, |
80 | unsigned long start, unsigned long end) | 66 | unsigned long start, unsigned long end) |
81 | { | 67 | { |
82 | tlb_flush_mmu(tlb); | 68 | tlb_table_flush(tlb); |
83 | } | 69 | } |
84 | 70 | ||
85 | /* | 71 | /* |
@@ -105,10 +91,8 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |||
105 | static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | 91 | static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, |
106 | unsigned long address) | 92 | unsigned long address) |
107 | { | 93 | { |
108 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | ||
109 | if (!tlb->fullmm) | 94 | if (!tlb->fullmm) |
110 | return page_table_free_rcu(tlb, (unsigned long *) pte); | 95 | return page_table_free_rcu(tlb, (unsigned long *) pte); |
111 | #endif | ||
112 | page_table_free(tlb->mm, (unsigned long *) pte); | 96 | page_table_free(tlb->mm, (unsigned long *) pte); |
113 | } | 97 | } |
114 | 98 | ||
@@ -125,10 +109,8 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, | |||
125 | #ifdef __s390x__ | 109 | #ifdef __s390x__ |
126 | if (tlb->mm->context.asce_limit <= (1UL << 31)) | 110 | if (tlb->mm->context.asce_limit <= (1UL << 31)) |
127 | return; | 111 | return; |
128 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | ||
129 | if (!tlb->fullmm) | 112 | if (!tlb->fullmm) |
130 | return tlb_remove_table(tlb, pmd); | 113 | return tlb_remove_table(tlb, pmd); |
131 | #endif | ||
132 | crst_table_free(tlb->mm, (unsigned long *) pmd); | 114 | crst_table_free(tlb->mm, (unsigned long *) pmd); |
133 | #endif | 115 | #endif |
134 | } | 116 | } |
@@ -146,10 +128,8 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, | |||
146 | #ifdef __s390x__ | 128 | #ifdef __s390x__ |
147 | if (tlb->mm->context.asce_limit <= (1UL << 42)) | 129 | if (tlb->mm->context.asce_limit <= (1UL << 42)) |
148 | return; | 130 | return; |
149 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | ||
150 | if (!tlb->fullmm) | 131 | if (!tlb->fullmm) |
151 | return tlb_remove_table(tlb, pud); | 132 | return tlb_remove_table(tlb, pud); |
152 | #endif | ||
153 | crst_table_free(tlb->mm, (unsigned long *) pud); | 133 | crst_table_free(tlb->mm, (unsigned long *) pud); |
154 | #endif | 134 | #endif |
155 | } | 135 | } |
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index c27a0727f930..adccd908ebc7 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S | |||
@@ -474,9 +474,9 @@ ENTRY(startup_kdump) | |||
474 | stck __LC_LAST_UPDATE_CLOCK | 474 | stck __LC_LAST_UPDATE_CLOCK |
475 | spt 5f-.LPG0(%r13) | 475 | spt 5f-.LPG0(%r13) |
476 | mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13) | 476 | mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13) |
477 | xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST | ||
477 | #ifndef CONFIG_MARCH_G5 | 478 | #ifndef CONFIG_MARCH_G5 |
478 | # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} | 479 | # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} |
479 | xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST | ||
480 | .insn s,0xb2b10000,__LC_STFL_FAC_LIST # store facility list | 480 | .insn s,0xb2b10000,__LC_STFL_FAC_LIST # store facility list |
481 | tm __LC_STFL_FAC_LIST,0x01 # stfle available ? | 481 | tm __LC_STFL_FAC_LIST,0x01 # stfle available ? |
482 | jz 0f | 482 | jz 0f |
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 1c2cdd59ccd0..8a22c27219dd 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c | |||
@@ -118,9 +118,10 @@ asmlinkage void do_softirq(void) | |||
118 | "a" (__do_softirq) | 118 | "a" (__do_softirq) |
119 | : "0", "1", "2", "3", "4", "5", "14", | 119 | : "0", "1", "2", "3", "4", "5", "14", |
120 | "cc", "memory" ); | 120 | "cc", "memory" ); |
121 | } else | 121 | } else { |
122 | /* We are already on the async stack. */ | 122 | /* We are already on the async stack. */ |
123 | __do_softirq(); | 123 | __do_softirq(); |
124 | } | ||
124 | } | 125 | } |
125 | 126 | ||
126 | local_irq_restore(flags); | 127 | local_irq_restore(flags); |
@@ -192,11 +193,12 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler) | |||
192 | int index = ext_hash(code); | 193 | int index = ext_hash(code); |
193 | 194 | ||
194 | spin_lock_irqsave(&ext_int_hash_lock, flags); | 195 | spin_lock_irqsave(&ext_int_hash_lock, flags); |
195 | list_for_each_entry_rcu(p, &ext_int_hash[index], entry) | 196 | list_for_each_entry_rcu(p, &ext_int_hash[index], entry) { |
196 | if (p->code == code && p->handler == handler) { | 197 | if (p->code == code && p->handler == handler) { |
197 | list_del_rcu(&p->entry); | 198 | list_del_rcu(&p->entry); |
198 | kfree_rcu(p, rcu); | 199 | kfree_rcu(p, rcu); |
199 | } | 200 | } |
201 | } | ||
200 | spin_unlock_irqrestore(&ext_int_hash_lock, flags); | 202 | spin_unlock_irqrestore(&ext_int_hash_lock, flags); |
201 | return 0; | 203 | return 0; |
202 | } | 204 | } |
@@ -211,9 +213,10 @@ void __irq_entry do_extint(struct pt_regs *regs, struct ext_code ext_code, | |||
211 | 213 | ||
212 | old_regs = set_irq_regs(regs); | 214 | old_regs = set_irq_regs(regs); |
213 | irq_enter(); | 215 | irq_enter(); |
214 | if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) | 216 | if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) { |
215 | /* Serve timer interrupts first. */ | 217 | /* Serve timer interrupts first. */ |
216 | clock_comparator_work(); | 218 | clock_comparator_work(); |
219 | } | ||
217 | kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; | 220 | kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; |
218 | if (ext_code.code != 0x1004) | 221 | if (ext_code.code != 0x1004) |
219 | __get_cpu_var(s390_idle).nohz_delay = 1; | 222 | __get_cpu_var(s390_idle).nohz_delay = 1; |
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index 46405086479c..cb019f429e88 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c | |||
@@ -178,7 +178,7 @@ static void cpumf_pmu_enable(struct pmu *pmu) | |||
178 | err = lcctl(cpuhw->state); | 178 | err = lcctl(cpuhw->state); |
179 | if (err) { | 179 | if (err) { |
180 | pr_err("Enabling the performance measuring unit " | 180 | pr_err("Enabling the performance measuring unit " |
181 | "failed with rc=%lx\n", err); | 181 | "failed with rc=%x\n", err); |
182 | return; | 182 | return; |
183 | } | 183 | } |
184 | 184 | ||
@@ -203,7 +203,7 @@ static void cpumf_pmu_disable(struct pmu *pmu) | |||
203 | err = lcctl(inactive); | 203 | err = lcctl(inactive); |
204 | if (err) { | 204 | if (err) { |
205 | pr_err("Disabling the performance measuring unit " | 205 | pr_err("Disabling the performance measuring unit " |
206 | "failed with rc=%lx\n", err); | 206 | "failed with rc=%x\n", err); |
207 | return; | 207 | return; |
208 | } | 208 | } |
209 | 209 | ||
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index 7bb15fcca75e..e1335dc2b1b7 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c | |||
@@ -61,21 +61,14 @@ long probe_kernel_write(void *dst, const void *src, size_t size) | |||
61 | return copied < 0 ? -EFAULT : 0; | 61 | return copied < 0 ? -EFAULT : 0; |
62 | } | 62 | } |
63 | 63 | ||
64 | /* | 64 | static int __memcpy_real(void *dest, void *src, size_t count) |
65 | * Copy memory in real mode (kernel to kernel) | ||
66 | */ | ||
67 | int memcpy_real(void *dest, void *src, size_t count) | ||
68 | { | 65 | { |
69 | register unsigned long _dest asm("2") = (unsigned long) dest; | 66 | register unsigned long _dest asm("2") = (unsigned long) dest; |
70 | register unsigned long _len1 asm("3") = (unsigned long) count; | 67 | register unsigned long _len1 asm("3") = (unsigned long) count; |
71 | register unsigned long _src asm("4") = (unsigned long) src; | 68 | register unsigned long _src asm("4") = (unsigned long) src; |
72 | register unsigned long _len2 asm("5") = (unsigned long) count; | 69 | register unsigned long _len2 asm("5") = (unsigned long) count; |
73 | unsigned long flags; | ||
74 | int rc = -EFAULT; | 70 | int rc = -EFAULT; |
75 | 71 | ||
76 | if (!count) | ||
77 | return 0; | ||
78 | flags = __arch_local_irq_stnsm(0xf8UL); | ||
79 | asm volatile ( | 72 | asm volatile ( |
80 | "0: mvcle %1,%2,0x0\n" | 73 | "0: mvcle %1,%2,0x0\n" |
81 | "1: jo 0b\n" | 74 | "1: jo 0b\n" |
@@ -86,7 +79,23 @@ int memcpy_real(void *dest, void *src, size_t count) | |||
86 | "+d" (_len2), "=m" (*((long *) dest)) | 79 | "+d" (_len2), "=m" (*((long *) dest)) |
87 | : "m" (*((long *) src)) | 80 | : "m" (*((long *) src)) |
88 | : "cc", "memory"); | 81 | : "cc", "memory"); |
89 | arch_local_irq_restore(flags); | 82 | return rc; |
83 | } | ||
84 | |||
85 | /* | ||
86 | * Copy memory in real mode (kernel to kernel) | ||
87 | */ | ||
88 | int memcpy_real(void *dest, void *src, size_t count) | ||
89 | { | ||
90 | unsigned long flags; | ||
91 | int rc; | ||
92 | |||
93 | if (!count) | ||
94 | return 0; | ||
95 | local_irq_save(flags); | ||
96 | __arch_local_irq_stnsm(0xfbUL); | ||
97 | rc = __memcpy_real(dest, src, count); | ||
98 | local_irq_restore(flags); | ||
90 | return rc; | 99 | return rc; |
91 | } | 100 | } |
92 | 101 | ||
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 373adf69b01c..6e765bf00670 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -678,8 +678,6 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) | |||
678 | } | 678 | } |
679 | } | 679 | } |
680 | 680 | ||
681 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | ||
682 | |||
683 | static void __page_table_free_rcu(void *table, unsigned bit) | 681 | static void __page_table_free_rcu(void *table, unsigned bit) |
684 | { | 682 | { |
685 | struct page *page; | 683 | struct page *page; |
@@ -733,7 +731,66 @@ void __tlb_remove_table(void *_table) | |||
733 | free_pages((unsigned long) table, ALLOC_ORDER); | 731 | free_pages((unsigned long) table, ALLOC_ORDER); |
734 | } | 732 | } |
735 | 733 | ||
736 | #endif | 734 | static void tlb_remove_table_smp_sync(void *arg) |
735 | { | ||
736 | /* Simply deliver the interrupt */ | ||
737 | } | ||
738 | |||
739 | static void tlb_remove_table_one(void *table) | ||
740 | { | ||
741 | /* | ||
742 | * This isn't an RCU grace period and hence the page-tables cannot be | ||
743 | * assumed to be actually RCU-freed. | ||
744 | * | ||
745 | * It is however sufficient for software page-table walkers that rely | ||
746 | * on IRQ disabling. See the comment near struct mmu_table_batch. | ||
747 | */ | ||
748 | smp_call_function(tlb_remove_table_smp_sync, NULL, 1); | ||
749 | __tlb_remove_table(table); | ||
750 | } | ||
751 | |||
752 | static void tlb_remove_table_rcu(struct rcu_head *head) | ||
753 | { | ||
754 | struct mmu_table_batch *batch; | ||
755 | int i; | ||
756 | |||
757 | batch = container_of(head, struct mmu_table_batch, rcu); | ||
758 | |||
759 | for (i = 0; i < batch->nr; i++) | ||
760 | __tlb_remove_table(batch->tables[i]); | ||
761 | |||
762 | free_page((unsigned long)batch); | ||
763 | } | ||
764 | |||
765 | void tlb_table_flush(struct mmu_gather *tlb) | ||
766 | { | ||
767 | struct mmu_table_batch **batch = &tlb->batch; | ||
768 | |||
769 | if (*batch) { | ||
770 | __tlb_flush_mm(tlb->mm); | ||
771 | call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); | ||
772 | *batch = NULL; | ||
773 | } | ||
774 | } | ||
775 | |||
776 | void tlb_remove_table(struct mmu_gather *tlb, void *table) | ||
777 | { | ||
778 | struct mmu_table_batch **batch = &tlb->batch; | ||
779 | |||
780 | if (*batch == NULL) { | ||
781 | *batch = (struct mmu_table_batch *) | ||
782 | __get_free_page(GFP_NOWAIT | __GFP_NOWARN); | ||
783 | if (*batch == NULL) { | ||
784 | __tlb_flush_mm(tlb->mm); | ||
785 | tlb_remove_table_one(table); | ||
786 | return; | ||
787 | } | ||
788 | (*batch)->nr = 0; | ||
789 | } | ||
790 | (*batch)->tables[(*batch)->nr++] = table; | ||
791 | if ((*batch)->nr == MAX_TABLE_BATCH) | ||
792 | tlb_table_flush(tlb); | ||
793 | } | ||
737 | 794 | ||
738 | /* | 795 | /* |
739 | * switch on pgstes for its userspace process (for kvm) | 796 | * switch on pgstes for its userspace process (for kvm) |
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index fea13c7b1aee..b93c2c9ccb1d 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c | |||
@@ -1264,4 +1264,4 @@ static int __init ds_init(void) | |||
1264 | return vio_register_driver(&ds_driver); | 1264 | return vio_register_driver(&ds_driver); |
1265 | } | 1265 | } |
1266 | 1266 | ||
1267 | subsys_initcall(ds_init); | 1267 | fs_initcall(ds_init); |
diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c index aba6b958b2a5..19f56058742b 100644 --- a/arch/sparc/kernel/leon_pci.c +++ b/arch/sparc/kernel/leon_pci.c | |||
@@ -45,7 +45,6 @@ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info) | |||
45 | 45 | ||
46 | void __devinit pcibios_fixup_bus(struct pci_bus *pbus) | 46 | void __devinit pcibios_fixup_bus(struct pci_bus *pbus) |
47 | { | 47 | { |
48 | struct leon_pci_info *info = pbus->sysdata; | ||
49 | struct pci_dev *dev; | 48 | struct pci_dev *dev; |
50 | int i, has_io, has_mem; | 49 | int i, has_io, has_mem; |
51 | u16 cmd; | 50 | u16 cmd; |
@@ -111,18 +110,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) | |||
111 | return pci_enable_resources(dev, mask); | 110 | return pci_enable_resources(dev, mask); |
112 | } | 111 | } |
113 | 112 | ||
114 | struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) | ||
115 | { | ||
116 | /* | ||
117 | * Currently the OpenBoot nodes are not connected with the PCI device, | ||
118 | * this is because the LEON PROM does not create PCI nodes. Eventually | ||
119 | * this will change and the same approach as pcic.c can be used to | ||
120 | * match PROM nodes with pci devices. | ||
121 | */ | ||
122 | return NULL; | ||
123 | } | ||
124 | EXPORT_SYMBOL(pci_device_to_OF_node); | ||
125 | |||
126 | void __devinit pcibios_update_irq(struct pci_dev *dev, int irq) | 113 | void __devinit pcibios_update_irq(struct pci_dev *dev, int irq) |
127 | { | 114 | { |
128 | #ifdef CONFIG_PCI_DEBUG | 115 | #ifdef CONFIG_PCI_DEBUG |
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index 77f1b95e0806..9171fc238def 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S | |||
@@ -20,11 +20,6 @@ | |||
20 | 20 | ||
21 | .text | 21 | .text |
22 | .align 32 | 22 | .align 32 |
23 | __handle_softirq: | ||
24 | call do_softirq | ||
25 | nop | ||
26 | ba,a,pt %xcc, __handle_softirq_continue | ||
27 | nop | ||
28 | __handle_preemption: | 23 | __handle_preemption: |
29 | call schedule | 24 | call schedule |
30 | wrpr %g0, RTRAP_PSTATE, %pstate | 25 | wrpr %g0, RTRAP_PSTATE, %pstate |
@@ -89,9 +84,7 @@ rtrap: | |||
89 | cmp %l1, 0 | 84 | cmp %l1, 0 |
90 | 85 | ||
91 | /* mm/ultra.S:xcall_report_regs KNOWS about this load. */ | 86 | /* mm/ultra.S:xcall_report_regs KNOWS about this load. */ |
92 | bne,pn %icc, __handle_softirq | ||
93 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 | 87 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 |
94 | __handle_softirq_continue: | ||
95 | rtrap_xcall: | 88 | rtrap_xcall: |
96 | sethi %hi(0xf << 20), %l4 | 89 | sethi %hi(0xf << 20), %l4 |
97 | and %l1, %l4, %l4 | 90 | and %l1, %l4, %l4 |
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 7705c6731e28..df3155a17991 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c | |||
@@ -225,6 +225,8 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, | |||
225 | unsigned long g2; | 225 | unsigned long g2; |
226 | int from_user = !(regs->psr & PSR_PS); | 226 | int from_user = !(regs->psr & PSR_PS); |
227 | int fault, code; | 227 | int fault, code; |
228 | unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | | ||
229 | (write ? FAULT_FLAG_WRITE : 0)); | ||
228 | 230 | ||
229 | if(text_fault) | 231 | if(text_fault) |
230 | address = regs->pc; | 232 | address = regs->pc; |
@@ -251,6 +253,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, | |||
251 | 253 | ||
252 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | 254 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
253 | 255 | ||
256 | retry: | ||
254 | down_read(&mm->mmap_sem); | 257 | down_read(&mm->mmap_sem); |
255 | 258 | ||
256 | /* | 259 | /* |
@@ -289,7 +292,11 @@ good_area: | |||
289 | * make sure we exit gracefully rather than endlessly redo | 292 | * make sure we exit gracefully rather than endlessly redo |
290 | * the fault. | 293 | * the fault. |
291 | */ | 294 | */ |
292 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); | 295 | fault = handle_mm_fault(mm, vma, address, flags); |
296 | |||
297 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) | ||
298 | return; | ||
299 | |||
293 | if (unlikely(fault & VM_FAULT_ERROR)) { | 300 | if (unlikely(fault & VM_FAULT_ERROR)) { |
294 | if (fault & VM_FAULT_OOM) | 301 | if (fault & VM_FAULT_OOM) |
295 | goto out_of_memory; | 302 | goto out_of_memory; |
@@ -297,13 +304,29 @@ good_area: | |||
297 | goto do_sigbus; | 304 | goto do_sigbus; |
298 | BUG(); | 305 | BUG(); |
299 | } | 306 | } |
300 | if (fault & VM_FAULT_MAJOR) { | 307 | |
301 | current->maj_flt++; | 308 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
302 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); | 309 | if (fault & VM_FAULT_MAJOR) { |
303 | } else { | 310 | current->maj_flt++; |
304 | current->min_flt++; | 311 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, |
305 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); | 312 | 1, regs, address); |
313 | } else { | ||
314 | current->min_flt++; | ||
315 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, | ||
316 | 1, regs, address); | ||
317 | } | ||
318 | if (fault & VM_FAULT_RETRY) { | ||
319 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | ||
320 | |||
321 | /* No need to up_read(&mm->mmap_sem) as we would | ||
322 | * have already released it in __lock_page_or_retry | ||
323 | * in mm/filemap.c. | ||
324 | */ | ||
325 | |||
326 | goto retry; | ||
327 | } | ||
306 | } | 328 | } |
329 | |||
307 | up_read(&mm->mmap_sem); | 330 | up_read(&mm->mmap_sem); |
308 | return; | 331 | return; |
309 | 332 | ||
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 504c0622f729..1fe0429b6314 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c | |||
@@ -279,6 +279,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) | |||
279 | unsigned int insn = 0; | 279 | unsigned int insn = 0; |
280 | int si_code, fault_code, fault; | 280 | int si_code, fault_code, fault; |
281 | unsigned long address, mm_rss; | 281 | unsigned long address, mm_rss; |
282 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; | ||
282 | 283 | ||
283 | fault_code = get_thread_fault_code(); | 284 | fault_code = get_thread_fault_code(); |
284 | 285 | ||
@@ -333,6 +334,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) | |||
333 | insn = get_fault_insn(regs, insn); | 334 | insn = get_fault_insn(regs, insn); |
334 | goto handle_kernel_fault; | 335 | goto handle_kernel_fault; |
335 | } | 336 | } |
337 | |||
338 | retry: | ||
336 | down_read(&mm->mmap_sem); | 339 | down_read(&mm->mmap_sem); |
337 | } | 340 | } |
338 | 341 | ||
@@ -423,7 +426,12 @@ good_area: | |||
423 | goto bad_area; | 426 | goto bad_area; |
424 | } | 427 | } |
425 | 428 | ||
426 | fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0); | 429 | flags |= ((fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0); |
430 | fault = handle_mm_fault(mm, vma, address, flags); | ||
431 | |||
432 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) | ||
433 | return; | ||
434 | |||
427 | if (unlikely(fault & VM_FAULT_ERROR)) { | 435 | if (unlikely(fault & VM_FAULT_ERROR)) { |
428 | if (fault & VM_FAULT_OOM) | 436 | if (fault & VM_FAULT_OOM) |
429 | goto out_of_memory; | 437 | goto out_of_memory; |
@@ -431,12 +439,27 @@ good_area: | |||
431 | goto do_sigbus; | 439 | goto do_sigbus; |
432 | BUG(); | 440 | BUG(); |
433 | } | 441 | } |
434 | if (fault & VM_FAULT_MAJOR) { | 442 | |
435 | current->maj_flt++; | 443 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
436 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); | 444 | if (fault & VM_FAULT_MAJOR) { |
437 | } else { | 445 | current->maj_flt++; |
438 | current->min_flt++; | 446 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, |
439 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); | 447 | 1, regs, address); |
448 | } else { | ||
449 | current->min_flt++; | ||
450 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, | ||
451 | 1, regs, address); | ||
452 | } | ||
453 | if (fault & VM_FAULT_RETRY) { | ||
454 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | ||
455 | |||
456 | /* No need to up_read(&mm->mmap_sem) as we would | ||
457 | * have already released it in __lock_page_or_retry | ||
458 | * in mm/filemap.c. | ||
459 | */ | ||
460 | |||
461 | goto retry; | ||
462 | } | ||
440 | } | 463 | } |
441 | up_read(&mm->mmap_sem); | 464 | up_read(&mm->mmap_sem); |
442 | 465 | ||
diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c index 7a9327046404..446a7f52cc11 100644 --- a/arch/tile/kernel/proc.c +++ b/arch/tile/kernel/proc.c | |||
@@ -146,7 +146,6 @@ static ctl_table unaligned_table[] = { | |||
146 | }, | 146 | }, |
147 | {} | 147 | {} |
148 | }; | 148 | }; |
149 | #endif | ||
150 | 149 | ||
151 | static struct ctl_path tile_path[] = { | 150 | static struct ctl_path tile_path[] = { |
152 | { .procname = "tile" }, | 151 | { .procname = "tile" }, |
@@ -155,10 +154,9 @@ static struct ctl_path tile_path[] = { | |||
155 | 154 | ||
156 | static int __init proc_sys_tile_init(void) | 155 | static int __init proc_sys_tile_init(void) |
157 | { | 156 | { |
158 | #ifndef __tilegx__ /* FIXME: GX: no support for unaligned access yet */ | ||
159 | register_sysctl_paths(tile_path, unaligned_table); | 157 | register_sysctl_paths(tile_path, unaligned_table); |
160 | #endif | ||
161 | return 0; | 158 | return 0; |
162 | } | 159 | } |
163 | 160 | ||
164 | arch_initcall(proc_sys_tile_init); | 161 | arch_initcall(proc_sys_tile_init); |
162 | #endif | ||
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c index b949edcec200..172aef7d3159 100644 --- a/arch/tile/kernel/smpboot.c +++ b/arch/tile/kernel/smpboot.c | |||
@@ -196,6 +196,8 @@ void __cpuinit online_secondary(void) | |||
196 | /* This must be done before setting cpu_online_mask */ | 196 | /* This must be done before setting cpu_online_mask */ |
197 | wmb(); | 197 | wmb(); |
198 | 198 | ||
199 | notify_cpu_starting(smp_processor_id()); | ||
200 | |||
199 | /* | 201 | /* |
200 | * We need to hold call_lock, so there is no inconsistency | 202 | * We need to hold call_lock, so there is no inconsistency |
201 | * between the time smp_call_function() determines number of | 203 | * between the time smp_call_function() determines number of |
diff --git a/arch/um/drivers/cow.h b/arch/um/drivers/cow.h index dc36b222100b..6673508f3426 100644 --- a/arch/um/drivers/cow.h +++ b/arch/um/drivers/cow.h | |||
@@ -3,41 +3,6 @@ | |||
3 | 3 | ||
4 | #include <asm/types.h> | 4 | #include <asm/types.h> |
5 | 5 | ||
6 | #if defined(__KERNEL__) | ||
7 | |||
8 | # include <asm/byteorder.h> | ||
9 | |||
10 | # if defined(__BIG_ENDIAN) | ||
11 | # define ntohll(x) (x) | ||
12 | # define htonll(x) (x) | ||
13 | # elif defined(__LITTLE_ENDIAN) | ||
14 | # define ntohll(x) be64_to_cpu(x) | ||
15 | # define htonll(x) cpu_to_be64(x) | ||
16 | # else | ||
17 | # error "Could not determine byte order" | ||
18 | # endif | ||
19 | |||
20 | #else | ||
21 | /* For the definition of ntohl, htonl and __BYTE_ORDER */ | ||
22 | #include <endian.h> | ||
23 | #include <netinet/in.h> | ||
24 | #if defined(__BYTE_ORDER) | ||
25 | |||
26 | # if __BYTE_ORDER == __BIG_ENDIAN | ||
27 | # define ntohll(x) (x) | ||
28 | # define htonll(x) (x) | ||
29 | # elif __BYTE_ORDER == __LITTLE_ENDIAN | ||
30 | # define ntohll(x) bswap_64(x) | ||
31 | # define htonll(x) bswap_64(x) | ||
32 | # else | ||
33 | # error "Could not determine byte order: __BYTE_ORDER uncorrectly defined" | ||
34 | # endif | ||
35 | |||
36 | #else /* ! defined(__BYTE_ORDER) */ | ||
37 | # error "Could not determine byte order: __BYTE_ORDER not defined" | ||
38 | #endif | ||
39 | #endif /* ! defined(__KERNEL__) */ | ||
40 | |||
41 | extern int init_cow_file(int fd, char *cow_file, char *backing_file, | 6 | extern int init_cow_file(int fd, char *cow_file, char *backing_file, |
42 | int sectorsize, int alignment, int *bitmap_offset_out, | 7 | int sectorsize, int alignment, int *bitmap_offset_out, |
43 | unsigned long *bitmap_len_out, int *data_offset_out); | 8 | unsigned long *bitmap_len_out, int *data_offset_out); |
diff --git a/arch/um/drivers/cow_user.c b/arch/um/drivers/cow_user.c index 9cbb426c0b91..0ee9cc6cc4c7 100644 --- a/arch/um/drivers/cow_user.c +++ b/arch/um/drivers/cow_user.c | |||
@@ -8,11 +8,10 @@ | |||
8 | * that. | 8 | * that. |
9 | */ | 9 | */ |
10 | #include <unistd.h> | 10 | #include <unistd.h> |
11 | #include <byteswap.h> | ||
12 | #include <errno.h> | 11 | #include <errno.h> |
13 | #include <string.h> | 12 | #include <string.h> |
14 | #include <arpa/inet.h> | 13 | #include <arpa/inet.h> |
15 | #include <asm/types.h> | 14 | #include <endian.h> |
16 | #include "cow.h" | 15 | #include "cow.h" |
17 | #include "cow_sys.h" | 16 | #include "cow_sys.h" |
18 | 17 | ||
@@ -214,8 +213,8 @@ int write_cow_header(char *cow_file, int fd, char *backing_file, | |||
214 | "header\n"); | 213 | "header\n"); |
215 | goto out; | 214 | goto out; |
216 | } | 215 | } |
217 | header->magic = htonl(COW_MAGIC); | 216 | header->magic = htobe32(COW_MAGIC); |
218 | header->version = htonl(COW_VERSION); | 217 | header->version = htobe32(COW_VERSION); |
219 | 218 | ||
220 | err = -EINVAL; | 219 | err = -EINVAL; |
221 | if (strlen(backing_file) > sizeof(header->backing_file) - 1) { | 220 | if (strlen(backing_file) > sizeof(header->backing_file) - 1) { |
@@ -246,10 +245,10 @@ int write_cow_header(char *cow_file, int fd, char *backing_file, | |||
246 | goto out_free; | 245 | goto out_free; |
247 | } | 246 | } |
248 | 247 | ||
249 | header->mtime = htonl(modtime); | 248 | header->mtime = htobe32(modtime); |
250 | header->size = htonll(*size); | 249 | header->size = htobe64(*size); |
251 | header->sectorsize = htonl(sectorsize); | 250 | header->sectorsize = htobe32(sectorsize); |
252 | header->alignment = htonl(alignment); | 251 | header->alignment = htobe32(alignment); |
253 | header->cow_format = COW_BITMAP; | 252 | header->cow_format = COW_BITMAP; |
254 | 253 | ||
255 | err = cow_write_file(fd, header, sizeof(*header)); | 254 | err = cow_write_file(fd, header, sizeof(*header)); |
@@ -301,8 +300,8 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg, | |||
301 | magic = header->v1.magic; | 300 | magic = header->v1.magic; |
302 | if (magic == COW_MAGIC) | 301 | if (magic == COW_MAGIC) |
303 | version = header->v1.version; | 302 | version = header->v1.version; |
304 | else if (magic == ntohl(COW_MAGIC)) | 303 | else if (magic == be32toh(COW_MAGIC)) |
305 | version = ntohl(header->v1.version); | 304 | version = be32toh(header->v1.version); |
306 | /* No error printed because the non-COW case comes through here */ | 305 | /* No error printed because the non-COW case comes through here */ |
307 | else goto out; | 306 | else goto out; |
308 | 307 | ||
@@ -327,9 +326,9 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg, | |||
327 | "header\n"); | 326 | "header\n"); |
328 | goto out; | 327 | goto out; |
329 | } | 328 | } |
330 | *mtime_out = ntohl(header->v2.mtime); | 329 | *mtime_out = be32toh(header->v2.mtime); |
331 | *size_out = ntohll(header->v2.size); | 330 | *size_out = be64toh(header->v2.size); |
332 | *sectorsize_out = ntohl(header->v2.sectorsize); | 331 | *sectorsize_out = be32toh(header->v2.sectorsize); |
333 | *bitmap_offset_out = sizeof(header->v2); | 332 | *bitmap_offset_out = sizeof(header->v2); |
334 | *align_out = *sectorsize_out; | 333 | *align_out = *sectorsize_out; |
335 | file = header->v2.backing_file; | 334 | file = header->v2.backing_file; |
@@ -341,10 +340,10 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg, | |||
341 | "header\n"); | 340 | "header\n"); |
342 | goto out; | 341 | goto out; |
343 | } | 342 | } |
344 | *mtime_out = ntohl(header->v3.mtime); | 343 | *mtime_out = be32toh(header->v3.mtime); |
345 | *size_out = ntohll(header->v3.size); | 344 | *size_out = be64toh(header->v3.size); |
346 | *sectorsize_out = ntohl(header->v3.sectorsize); | 345 | *sectorsize_out = be32toh(header->v3.sectorsize); |
347 | *align_out = ntohl(header->v3.alignment); | 346 | *align_out = be32toh(header->v3.alignment); |
348 | if (*align_out == 0) { | 347 | if (*align_out == 0) { |
349 | cow_printf("read_cow_header - invalid COW header, " | 348 | cow_printf("read_cow_header - invalid COW header, " |
350 | "align == 0\n"); | 349 | "align == 0\n"); |
@@ -366,16 +365,16 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg, | |||
366 | * this was used until Dec2005 - 64bits are needed to represent | 365 | * this was used until Dec2005 - 64bits are needed to represent |
367 | * 2038+. I.e. we can safely do this truncating cast. | 366 | * 2038+. I.e. we can safely do this truncating cast. |
368 | * | 367 | * |
369 | * Additionally, we must use ntohl() instead of ntohll(), since | 368 | * Additionally, we must use be32toh() instead of be64toh(), since |
370 | * the program used to use the former (tested - I got mtime | 369 | * the program used to use the former (tested - I got mtime |
371 | * mismatch "0 vs whatever"). | 370 | * mismatch "0 vs whatever"). |
372 | * | 371 | * |
373 | * Ever heard about bug-to-bug-compatibility ? ;-) */ | 372 | * Ever heard about bug-to-bug-compatibility ? ;-) */ |
374 | *mtime_out = (time32_t) ntohl(header->v3_b.mtime); | 373 | *mtime_out = (time32_t) be32toh(header->v3_b.mtime); |
375 | 374 | ||
376 | *size_out = ntohll(header->v3_b.size); | 375 | *size_out = be64toh(header->v3_b.size); |
377 | *sectorsize_out = ntohl(header->v3_b.sectorsize); | 376 | *sectorsize_out = be32toh(header->v3_b.sectorsize); |
378 | *align_out = ntohl(header->v3_b.alignment); | 377 | *align_out = be32toh(header->v3_b.alignment); |
379 | if (*align_out == 0) { | 378 | if (*align_out == 0) { |
380 | cow_printf("read_cow_header - invalid COW header, " | 379 | cow_printf("read_cow_header - invalid COW header, " |
381 | "align == 0\n"); | 380 | "align == 0\n"); |
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index e672bd6d43e3..43b39d61b538 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/workqueue.h> | 22 | #include <linux/workqueue.h> |
23 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
24 | #include <asm/uaccess.h> | 24 | #include <asm/uaccess.h> |
25 | #include <asm/switch_to.h> | ||
25 | 26 | ||
26 | #include "init.h" | 27 | #include "init.h" |
27 | #include "irq_kern.h" | 28 | #include "irq_kern.h" |
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 8419f5cf2ac7..fff24352255d 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild | |||
@@ -1,3 +1,4 @@ | |||
1 | generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h | 1 | generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h |
2 | generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h | 2 | generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h |
3 | generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h | 3 | generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h |
4 | generic-y += switch_to.h | ||
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile index 492bc4c1b62b..65a1c3d690ea 100644 --- a/arch/um/kernel/Makefile +++ b/arch/um/kernel/Makefile | |||
@@ -3,9 +3,10 @@ | |||
3 | # Licensed under the GPL | 3 | # Licensed under the GPL |
4 | # | 4 | # |
5 | 5 | ||
6 | CPPFLAGS_vmlinux.lds := -DSTART=$(LDS_START) \ | 6 | CPPFLAGS_vmlinux.lds := -DSTART=$(LDS_START) \ |
7 | -DELF_ARCH=$(LDS_ELF_ARCH) \ | 7 | -DELF_ARCH=$(LDS_ELF_ARCH) \ |
8 | -DELF_FORMAT=$(LDS_ELF_FORMAT) | 8 | -DELF_FORMAT=$(LDS_ELF_FORMAT) \ |
9 | $(LDS_EXTRA) | ||
9 | extra-y := vmlinux.lds | 10 | extra-y := vmlinux.lds |
10 | clean-files := | 11 | clean-files := |
11 | 12 | ||
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index f386d04a84a5..2b73dedb44ca 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c | |||
@@ -88,11 +88,8 @@ static inline void set_current(struct task_struct *task) | |||
88 | 88 | ||
89 | extern void arch_switch_to(struct task_struct *to); | 89 | extern void arch_switch_to(struct task_struct *to); |
90 | 90 | ||
91 | void *_switch_to(void *prev, void *next, void *last) | 91 | void *__switch_to(struct task_struct *from, struct task_struct *to) |
92 | { | 92 | { |
93 | struct task_struct *from = prev; | ||
94 | struct task_struct *to = next; | ||
95 | |||
96 | to->thread.prev_sched = from; | 93 | to->thread.prev_sched = from; |
97 | set_current(to); | 94 | set_current(to); |
98 | 95 | ||
@@ -111,7 +108,6 @@ void *_switch_to(void *prev, void *next, void *last) | |||
111 | } while (current->thread.saved_task); | 108 | } while (current->thread.saved_task); |
112 | 109 | ||
113 | return current->thread.prev_sched; | 110 | return current->thread.prev_sched; |
114 | |||
115 | } | 111 | } |
116 | 112 | ||
117 | void interrupt_end(void) | 113 | void interrupt_end(void) |
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index 4947b319f53a..0a49ef0c2bf4 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c | |||
@@ -103,7 +103,6 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm) | |||
103 | 103 | ||
104 | void uml_setup_stubs(struct mm_struct *mm) | 104 | void uml_setup_stubs(struct mm_struct *mm) |
105 | { | 105 | { |
106 | struct page **pages; | ||
107 | int err, ret; | 106 | int err, ret; |
108 | 107 | ||
109 | if (!skas_needs_stub) | 108 | if (!skas_needs_stub) |
diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um index 4be406abeefd..36b62bc52638 100644 --- a/arch/x86/Makefile.um +++ b/arch/x86/Makefile.um | |||
@@ -14,6 +14,9 @@ LINK-y += $(call cc-option,-m32) | |||
14 | 14 | ||
15 | export LDFLAGS | 15 | export LDFLAGS |
16 | 16 | ||
17 | LDS_EXTRA := -Ui386 | ||
18 | export LDS_EXTRA | ||
19 | |||
17 | # First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y. | 20 | # First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y. |
18 | include $(srctree)/arch/x86/Makefile_32.cpu | 21 | include $(srctree)/arch/x86/Makefile_32.cpu |
19 | 22 | ||
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index b3b733262909..99480e55973d 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h | |||
@@ -43,7 +43,7 @@ extern void __add_wrong_size(void) | |||
43 | switch (sizeof(*(ptr))) { \ | 43 | switch (sizeof(*(ptr))) { \ |
44 | case __X86_CASE_B: \ | 44 | case __X86_CASE_B: \ |
45 | asm volatile (lock #op "b %b0, %1\n" \ | 45 | asm volatile (lock #op "b %b0, %1\n" \ |
46 | : "+r" (__ret), "+m" (*(ptr)) \ | 46 | : "+q" (__ret), "+m" (*(ptr)) \ |
47 | : : "memory", "cc"); \ | 47 | : : "memory", "cc"); \ |
48 | break; \ | 48 | break; \ |
49 | case __X86_CASE_W: \ | 49 | case __X86_CASE_W: \ |
@@ -173,7 +173,7 @@ extern void __add_wrong_size(void) | |||
173 | switch (sizeof(*(ptr))) { \ | 173 | switch (sizeof(*(ptr))) { \ |
174 | case __X86_CASE_B: \ | 174 | case __X86_CASE_B: \ |
175 | asm volatile (lock "addb %b1, %0\n" \ | 175 | asm volatile (lock "addb %b1, %0\n" \ |
176 | : "+m" (*(ptr)) : "ri" (inc) \ | 176 | : "+m" (*(ptr)) : "qi" (inc) \ |
177 | : "memory", "cc"); \ | 177 | : "memory", "cc"); \ |
178 | break; \ | 178 | break; \ |
179 | case __X86_CASE_W: \ | 179 | case __X86_CASE_W: \ |
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 8be5f54d9360..e0544597cfe7 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -557,6 +557,8 @@ struct __large_struct { unsigned long buf[100]; }; | |||
557 | 557 | ||
558 | extern unsigned long | 558 | extern unsigned long |
559 | copy_from_user_nmi(void *to, const void __user *from, unsigned long n); | 559 | copy_from_user_nmi(void *to, const void __user *from, unsigned long n); |
560 | extern __must_check long | ||
561 | strncpy_from_user(char *dst, const char __user *src, long count); | ||
560 | 562 | ||
561 | /* | 563 | /* |
562 | * movsl can be slow when source and dest are not both 8-byte aligned | 564 | * movsl can be slow when source and dest are not both 8-byte aligned |
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 566e803cc602..8084bc73b18c 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h | |||
@@ -213,11 +213,6 @@ static inline unsigned long __must_check copy_from_user(void *to, | |||
213 | return n; | 213 | return n; |
214 | } | 214 | } |
215 | 215 | ||
216 | long __must_check strncpy_from_user(char *dst, const char __user *src, | ||
217 | long count); | ||
218 | long __must_check __strncpy_from_user(char *dst, | ||
219 | const char __user *src, long count); | ||
220 | |||
221 | /** | 216 | /** |
222 | * strlen_user: - Get the size of a string in user space. | 217 | * strlen_user: - Get the size of a string in user space. |
223 | * @str: The string to measure. | 218 | * @str: The string to measure. |
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 1c66d30971ad..fcd4b6f3ef02 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h | |||
@@ -208,10 +208,6 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) | |||
208 | } | 208 | } |
209 | } | 209 | } |
210 | 210 | ||
211 | __must_check long | ||
212 | strncpy_from_user(char *dst, const char __user *src, long count); | ||
213 | __must_check long | ||
214 | __strncpy_from_user(char *dst, const char __user *src, long count); | ||
215 | __must_check long strnlen_user(const char __user *str, long n); | 211 | __must_check long strnlen_user(const char __user *str, long n); |
216 | __must_check long __strnlen_user(const char __user *str, long n); | 212 | __must_check long __strnlen_user(const char __user *str, long n); |
217 | __must_check long strlen_user(const char __user *str); | 213 | __must_check long strlen_user(const char __user *str); |
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index f386dc49f988..7515cf0e1805 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -216,9 +216,9 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) | |||
216 | current_thread_info()->sig_on_uaccess_error = 1; | 216 | current_thread_info()->sig_on_uaccess_error = 1; |
217 | 217 | ||
218 | /* | 218 | /* |
219 | * 0 is a valid user pointer (in the access_ok sense) on 32-bit and | 219 | * NULL is a valid user pointer (in the access_ok sense) on 32-bit and |
220 | * 64-bit, so we don't need to special-case it here. For all the | 220 | * 64-bit, so we don't need to special-case it here. For all the |
221 | * vsyscalls, 0 means "don't write anything" not "write it at | 221 | * vsyscalls, NULL means "don't write anything" not "write it at |
222 | * address 0". | 222 | * address 0". |
223 | */ | 223 | */ |
224 | ret = -EFAULT; | 224 | ret = -EFAULT; |
@@ -247,7 +247,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) | |||
247 | 247 | ||
248 | ret = sys_getcpu((unsigned __user *)regs->di, | 248 | ret = sys_getcpu((unsigned __user *)regs->di, |
249 | (unsigned __user *)regs->si, | 249 | (unsigned __user *)regs->si, |
250 | 0); | 250 | NULL); |
251 | break; | 251 | break; |
252 | } | 252 | } |
253 | 253 | ||
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 173df38dbda5..2e88438ffd83 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c | |||
@@ -459,17 +459,17 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu) | |||
459 | pmu->available_event_types = ~entry->ebx & ((1ull << bitmap_len) - 1); | 459 | pmu->available_event_types = ~entry->ebx & ((1ull << bitmap_len) - 1); |
460 | 460 | ||
461 | if (pmu->version == 1) { | 461 | if (pmu->version == 1) { |
462 | pmu->global_ctrl = (1 << pmu->nr_arch_gp_counters) - 1; | 462 | pmu->nr_arch_fixed_counters = 0; |
463 | return; | 463 | } else { |
464 | pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f), | ||
465 | X86_PMC_MAX_FIXED); | ||
466 | pmu->counter_bitmask[KVM_PMC_FIXED] = | ||
467 | ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1; | ||
464 | } | 468 | } |
465 | 469 | ||
466 | pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f), | 470 | pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) | |
467 | X86_PMC_MAX_FIXED); | 471 | (((1ull << pmu->nr_arch_fixed_counters) - 1) << X86_PMC_IDX_FIXED); |
468 | pmu->counter_bitmask[KVM_PMC_FIXED] = | 472 | pmu->global_ctrl_mask = ~pmu->global_ctrl; |
469 | ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1; | ||
470 | pmu->global_ctrl_mask = ~(((1 << pmu->nr_arch_gp_counters) - 1) | ||
471 | | (((1ull << pmu->nr_arch_fixed_counters) - 1) | ||
472 | << X86_PMC_IDX_FIXED)); | ||
473 | } | 473 | } |
474 | 474 | ||
475 | void kvm_pmu_init(struct kvm_vcpu *vcpu) | 475 | void kvm_pmu_init(struct kvm_vcpu *vcpu) |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 52f685635766..61ebdb6390ee 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -2217,9 +2217,12 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
2217 | msr = find_msr_entry(vmx, msr_index); | 2217 | msr = find_msr_entry(vmx, msr_index); |
2218 | if (msr) { | 2218 | if (msr) { |
2219 | msr->data = data; | 2219 | msr->data = data; |
2220 | if (msr - vmx->guest_msrs < vmx->save_nmsrs) | 2220 | if (msr - vmx->guest_msrs < vmx->save_nmsrs) { |
2221 | preempt_disable(); | ||
2221 | kvm_set_shared_msr(msr->index, msr->data, | 2222 | kvm_set_shared_msr(msr->index, msr->data, |
2222 | msr->mask); | 2223 | msr->mask); |
2224 | preempt_enable(); | ||
2225 | } | ||
2223 | break; | 2226 | break; |
2224 | } | 2227 | } |
2225 | ret = kvm_set_msr_common(vcpu, msr_index, data); | 2228 | ret = kvm_set_msr_common(vcpu, msr_index, data); |
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c index 25feb1ae71c5..b1e6c4b2e8eb 100644 --- a/arch/x86/lib/insn.c +++ b/arch/x86/lib/insn.c | |||
@@ -379,8 +379,8 @@ err_out: | |||
379 | return; | 379 | return; |
380 | } | 380 | } |
381 | 381 | ||
382 | /* Decode moffset16/32/64 */ | 382 | /* Decode moffset16/32/64. Return 0 if failed */ |
383 | static void __get_moffset(struct insn *insn) | 383 | static int __get_moffset(struct insn *insn) |
384 | { | 384 | { |
385 | switch (insn->addr_bytes) { | 385 | switch (insn->addr_bytes) { |
386 | case 2: | 386 | case 2: |
@@ -397,15 +397,19 @@ static void __get_moffset(struct insn *insn) | |||
397 | insn->moffset2.value = get_next(int, insn); | 397 | insn->moffset2.value = get_next(int, insn); |
398 | insn->moffset2.nbytes = 4; | 398 | insn->moffset2.nbytes = 4; |
399 | break; | 399 | break; |
400 | default: /* opnd_bytes must be modified manually */ | ||
401 | goto err_out; | ||
400 | } | 402 | } |
401 | insn->moffset1.got = insn->moffset2.got = 1; | 403 | insn->moffset1.got = insn->moffset2.got = 1; |
402 | 404 | ||
405 | return 1; | ||
406 | |||
403 | err_out: | 407 | err_out: |
404 | return; | 408 | return 0; |
405 | } | 409 | } |
406 | 410 | ||
407 | /* Decode imm v32(Iz) */ | 411 | /* Decode imm v32(Iz). Return 0 if failed */ |
408 | static void __get_immv32(struct insn *insn) | 412 | static int __get_immv32(struct insn *insn) |
409 | { | 413 | { |
410 | switch (insn->opnd_bytes) { | 414 | switch (insn->opnd_bytes) { |
411 | case 2: | 415 | case 2: |
@@ -417,14 +421,18 @@ static void __get_immv32(struct insn *insn) | |||
417 | insn->immediate.value = get_next(int, insn); | 421 | insn->immediate.value = get_next(int, insn); |
418 | insn->immediate.nbytes = 4; | 422 | insn->immediate.nbytes = 4; |
419 | break; | 423 | break; |
424 | default: /* opnd_bytes must be modified manually */ | ||
425 | goto err_out; | ||
420 | } | 426 | } |
421 | 427 | ||
428 | return 1; | ||
429 | |||
422 | err_out: | 430 | err_out: |
423 | return; | 431 | return 0; |
424 | } | 432 | } |
425 | 433 | ||
426 | /* Decode imm v64(Iv/Ov) */ | 434 | /* Decode imm v64(Iv/Ov), Return 0 if failed */ |
427 | static void __get_immv(struct insn *insn) | 435 | static int __get_immv(struct insn *insn) |
428 | { | 436 | { |
429 | switch (insn->opnd_bytes) { | 437 | switch (insn->opnd_bytes) { |
430 | case 2: | 438 | case 2: |
@@ -441,15 +449,18 @@ static void __get_immv(struct insn *insn) | |||
441 | insn->immediate2.value = get_next(int, insn); | 449 | insn->immediate2.value = get_next(int, insn); |
442 | insn->immediate2.nbytes = 4; | 450 | insn->immediate2.nbytes = 4; |
443 | break; | 451 | break; |
452 | default: /* opnd_bytes must be modified manually */ | ||
453 | goto err_out; | ||
444 | } | 454 | } |
445 | insn->immediate1.got = insn->immediate2.got = 1; | 455 | insn->immediate1.got = insn->immediate2.got = 1; |
446 | 456 | ||
457 | return 1; | ||
447 | err_out: | 458 | err_out: |
448 | return; | 459 | return 0; |
449 | } | 460 | } |
450 | 461 | ||
451 | /* Decode ptr16:16/32(Ap) */ | 462 | /* Decode ptr16:16/32(Ap) */ |
452 | static void __get_immptr(struct insn *insn) | 463 | static int __get_immptr(struct insn *insn) |
453 | { | 464 | { |
454 | switch (insn->opnd_bytes) { | 465 | switch (insn->opnd_bytes) { |
455 | case 2: | 466 | case 2: |
@@ -462,14 +473,17 @@ static void __get_immptr(struct insn *insn) | |||
462 | break; | 473 | break; |
463 | case 8: | 474 | case 8: |
464 | /* ptr16:64 is not exist (no segment) */ | 475 | /* ptr16:64 is not exist (no segment) */ |
465 | return; | 476 | return 0; |
477 | default: /* opnd_bytes must be modified manually */ | ||
478 | goto err_out; | ||
466 | } | 479 | } |
467 | insn->immediate2.value = get_next(unsigned short, insn); | 480 | insn->immediate2.value = get_next(unsigned short, insn); |
468 | insn->immediate2.nbytes = 2; | 481 | insn->immediate2.nbytes = 2; |
469 | insn->immediate1.got = insn->immediate2.got = 1; | 482 | insn->immediate1.got = insn->immediate2.got = 1; |
470 | 483 | ||
484 | return 1; | ||
471 | err_out: | 485 | err_out: |
472 | return; | 486 | return 0; |
473 | } | 487 | } |
474 | 488 | ||
475 | /** | 489 | /** |
@@ -489,7 +503,8 @@ void insn_get_immediate(struct insn *insn) | |||
489 | insn_get_displacement(insn); | 503 | insn_get_displacement(insn); |
490 | 504 | ||
491 | if (inat_has_moffset(insn->attr)) { | 505 | if (inat_has_moffset(insn->attr)) { |
492 | __get_moffset(insn); | 506 | if (!__get_moffset(insn)) |
507 | goto err_out; | ||
493 | goto done; | 508 | goto done; |
494 | } | 509 | } |
495 | 510 | ||
@@ -517,16 +532,20 @@ void insn_get_immediate(struct insn *insn) | |||
517 | insn->immediate2.nbytes = 4; | 532 | insn->immediate2.nbytes = 4; |
518 | break; | 533 | break; |
519 | case INAT_IMM_PTR: | 534 | case INAT_IMM_PTR: |
520 | __get_immptr(insn); | 535 | if (!__get_immptr(insn)) |
536 | goto err_out; | ||
521 | break; | 537 | break; |
522 | case INAT_IMM_VWORD32: | 538 | case INAT_IMM_VWORD32: |
523 | __get_immv32(insn); | 539 | if (!__get_immv32(insn)) |
540 | goto err_out; | ||
524 | break; | 541 | break; |
525 | case INAT_IMM_VWORD: | 542 | case INAT_IMM_VWORD: |
526 | __get_immv(insn); | 543 | if (!__get_immv(insn)) |
544 | goto err_out; | ||
527 | break; | 545 | break; |
528 | default: | 546 | default: |
529 | break; | 547 | /* Here, insn must have an immediate, but failed */ |
548 | goto err_out; | ||
530 | } | 549 | } |
531 | if (inat_has_second_immediate(insn->attr)) { | 550 | if (inat_has_second_immediate(insn->attr)) { |
532 | insn->immediate2.value = get_next(char, insn); | 551 | insn->immediate2.value = get_next(char, insn); |
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c index 97be9cb54483..d6ae30bbd7bb 100644 --- a/arch/x86/lib/usercopy.c +++ b/arch/x86/lib/usercopy.c | |||
@@ -7,6 +7,8 @@ | |||
7 | #include <linux/highmem.h> | 7 | #include <linux/highmem.h> |
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | 9 | ||
10 | #include <asm/word-at-a-time.h> | ||
11 | |||
10 | /* | 12 | /* |
11 | * best effort, GUP based copy_from_user() that is NMI-safe | 13 | * best effort, GUP based copy_from_user() that is NMI-safe |
12 | */ | 14 | */ |
@@ -41,3 +43,104 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) | |||
41 | return len; | 43 | return len; |
42 | } | 44 | } |
43 | EXPORT_SYMBOL_GPL(copy_from_user_nmi); | 45 | EXPORT_SYMBOL_GPL(copy_from_user_nmi); |
46 | |||
47 | static inline unsigned long count_bytes(unsigned long mask) | ||
48 | { | ||
49 | mask = (mask - 1) & ~mask; | ||
50 | mask >>= 7; | ||
51 | return count_masked_bytes(mask); | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * Do a strncpy, return length of string without final '\0'. | ||
56 | * 'count' is the user-supplied count (return 'count' if we | ||
57 | * hit it), 'max' is the address space maximum (and we return | ||
58 | * -EFAULT if we hit it). | ||
59 | */ | ||
60 | static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max) | ||
61 | { | ||
62 | long res = 0; | ||
63 | |||
64 | /* | ||
65 | * Truncate 'max' to the user-specified limit, so that | ||
66 | * we only have one limit we need to check in the loop | ||
67 | */ | ||
68 | if (max > count) | ||
69 | max = count; | ||
70 | |||
71 | while (max >= sizeof(unsigned long)) { | ||
72 | unsigned long c; | ||
73 | |||
74 | /* Fall back to byte-at-a-time if we get a page fault */ | ||
75 | if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) | ||
76 | break; | ||
77 | /* This can write a few bytes past the NUL character, but that's ok */ | ||
78 | *(unsigned long *)(dst+res) = c; | ||
79 | c = has_zero(c); | ||
80 | if (c) | ||
81 | return res + count_bytes(c); | ||
82 | res += sizeof(unsigned long); | ||
83 | max -= sizeof(unsigned long); | ||
84 | } | ||
85 | |||
86 | while (max) { | ||
87 | char c; | ||
88 | |||
89 | if (unlikely(__get_user(c,src+res))) | ||
90 | return -EFAULT; | ||
91 | dst[res] = c; | ||
92 | if (!c) | ||
93 | return res; | ||
94 | res++; | ||
95 | max--; | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * Uhhuh. We hit 'max'. But was that the user-specified maximum | ||
100 | * too? If so, that's ok - we got as much as the user asked for. | ||
101 | */ | ||
102 | if (res >= count) | ||
103 | return res; | ||
104 | |||
105 | /* | ||
106 | * Nope: we hit the address space limit, and we still had more | ||
107 | * characters the caller would have wanted. That's an EFAULT. | ||
108 | */ | ||
109 | return -EFAULT; | ||
110 | } | ||
111 | |||
112 | /** | ||
113 | * strncpy_from_user: - Copy a NUL terminated string from userspace. | ||
114 | * @dst: Destination address, in kernel space. This buffer must be at | ||
115 | * least @count bytes long. | ||
116 | * @src: Source address, in user space. | ||
117 | * @count: Maximum number of bytes to copy, including the trailing NUL. | ||
118 | * | ||
119 | * Copies a NUL-terminated string from userspace to kernel space. | ||
120 | * | ||
121 | * On success, returns the length of the string (not including the trailing | ||
122 | * NUL). | ||
123 | * | ||
124 | * If access to userspace fails, returns -EFAULT (some data may have been | ||
125 | * copied). | ||
126 | * | ||
127 | * If @count is smaller than the length of the string, copies @count bytes | ||
128 | * and returns @count. | ||
129 | */ | ||
130 | long | ||
131 | strncpy_from_user(char *dst, const char __user *src, long count) | ||
132 | { | ||
133 | unsigned long max_addr, src_addr; | ||
134 | |||
135 | if (unlikely(count <= 0)) | ||
136 | return 0; | ||
137 | |||
138 | max_addr = current_thread_info()->addr_limit.seg; | ||
139 | src_addr = (unsigned long)src; | ||
140 | if (likely(src_addr < max_addr)) { | ||
141 | unsigned long max = max_addr - src_addr; | ||
142 | return do_strncpy_from_user(dst, src, count, max); | ||
143 | } | ||
144 | return -EFAULT; | ||
145 | } | ||
146 | EXPORT_SYMBOL(strncpy_from_user); | ||
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index d9b094ca7aaa..ef2a6a5d78e3 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c | |||
@@ -33,93 +33,6 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon | |||
33 | __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n)) | 33 | __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n)) |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * Copy a null terminated string from userspace. | ||
37 | */ | ||
38 | |||
39 | #define __do_strncpy_from_user(dst, src, count, res) \ | ||
40 | do { \ | ||
41 | int __d0, __d1, __d2; \ | ||
42 | might_fault(); \ | ||
43 | __asm__ __volatile__( \ | ||
44 | " testl %1,%1\n" \ | ||
45 | " jz 2f\n" \ | ||
46 | "0: lodsb\n" \ | ||
47 | " stosb\n" \ | ||
48 | " testb %%al,%%al\n" \ | ||
49 | " jz 1f\n" \ | ||
50 | " decl %1\n" \ | ||
51 | " jnz 0b\n" \ | ||
52 | "1: subl %1,%0\n" \ | ||
53 | "2:\n" \ | ||
54 | ".section .fixup,\"ax\"\n" \ | ||
55 | "3: movl %5,%0\n" \ | ||
56 | " jmp 2b\n" \ | ||
57 | ".previous\n" \ | ||
58 | _ASM_EXTABLE(0b,3b) \ | ||
59 | : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \ | ||
60 | "=&D" (__d2) \ | ||
61 | : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ | ||
62 | : "memory"); \ | ||
63 | } while (0) | ||
64 | |||
65 | /** | ||
66 | * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking. | ||
67 | * @dst: Destination address, in kernel space. This buffer must be at | ||
68 | * least @count bytes long. | ||
69 | * @src: Source address, in user space. | ||
70 | * @count: Maximum number of bytes to copy, including the trailing NUL. | ||
71 | * | ||
72 | * Copies a NUL-terminated string from userspace to kernel space. | ||
73 | * Caller must check the specified block with access_ok() before calling | ||
74 | * this function. | ||
75 | * | ||
76 | * On success, returns the length of the string (not including the trailing | ||
77 | * NUL). | ||
78 | * | ||
79 | * If access to userspace fails, returns -EFAULT (some data may have been | ||
80 | * copied). | ||
81 | * | ||
82 | * If @count is smaller than the length of the string, copies @count bytes | ||
83 | * and returns @count. | ||
84 | */ | ||
85 | long | ||
86 | __strncpy_from_user(char *dst, const char __user *src, long count) | ||
87 | { | ||
88 | long res; | ||
89 | __do_strncpy_from_user(dst, src, count, res); | ||
90 | return res; | ||
91 | } | ||
92 | EXPORT_SYMBOL(__strncpy_from_user); | ||
93 | |||
94 | /** | ||
95 | * strncpy_from_user: - Copy a NUL terminated string from userspace. | ||
96 | * @dst: Destination address, in kernel space. This buffer must be at | ||
97 | * least @count bytes long. | ||
98 | * @src: Source address, in user space. | ||
99 | * @count: Maximum number of bytes to copy, including the trailing NUL. | ||
100 | * | ||
101 | * Copies a NUL-terminated string from userspace to kernel space. | ||
102 | * | ||
103 | * On success, returns the length of the string (not including the trailing | ||
104 | * NUL). | ||
105 | * | ||
106 | * If access to userspace fails, returns -EFAULT (some data may have been | ||
107 | * copied). | ||
108 | * | ||
109 | * If @count is smaller than the length of the string, copies @count bytes | ||
110 | * and returns @count. | ||
111 | */ | ||
112 | long | ||
113 | strncpy_from_user(char *dst, const char __user *src, long count) | ||
114 | { | ||
115 | long res = -EFAULT; | ||
116 | if (access_ok(VERIFY_READ, src, 1)) | ||
117 | __do_strncpy_from_user(dst, src, count, res); | ||
118 | return res; | ||
119 | } | ||
120 | EXPORT_SYMBOL(strncpy_from_user); | ||
121 | |||
122 | /* | ||
123 | * Zero Userspace | 36 | * Zero Userspace |
124 | */ | 37 | */ |
125 | 38 | ||
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index b7c2849ffb66..0d0326f388c0 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c | |||
@@ -9,55 +9,6 @@ | |||
9 | #include <asm/uaccess.h> | 9 | #include <asm/uaccess.h> |
10 | 10 | ||
11 | /* | 11 | /* |
12 | * Copy a null terminated string from userspace. | ||
13 | */ | ||
14 | |||
15 | #define __do_strncpy_from_user(dst,src,count,res) \ | ||
16 | do { \ | ||
17 | long __d0, __d1, __d2; \ | ||
18 | might_fault(); \ | ||
19 | __asm__ __volatile__( \ | ||
20 | " testq %1,%1\n" \ | ||
21 | " jz 2f\n" \ | ||
22 | "0: lodsb\n" \ | ||
23 | " stosb\n" \ | ||
24 | " testb %%al,%%al\n" \ | ||
25 | " jz 1f\n" \ | ||
26 | " decq %1\n" \ | ||
27 | " jnz 0b\n" \ | ||
28 | "1: subq %1,%0\n" \ | ||
29 | "2:\n" \ | ||
30 | ".section .fixup,\"ax\"\n" \ | ||
31 | "3: movq %5,%0\n" \ | ||
32 | " jmp 2b\n" \ | ||
33 | ".previous\n" \ | ||
34 | _ASM_EXTABLE(0b,3b) \ | ||
35 | : "=&r"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \ | ||
36 | "=&D" (__d2) \ | ||
37 | : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ | ||
38 | : "memory"); \ | ||
39 | } while (0) | ||
40 | |||
41 | long | ||
42 | __strncpy_from_user(char *dst, const char __user *src, long count) | ||
43 | { | ||
44 | long res; | ||
45 | __do_strncpy_from_user(dst, src, count, res); | ||
46 | return res; | ||
47 | } | ||
48 | EXPORT_SYMBOL(__strncpy_from_user); | ||
49 | |||
50 | long | ||
51 | strncpy_from_user(char *dst, const char __user *src, long count) | ||
52 | { | ||
53 | long res = -EFAULT; | ||
54 | if (access_ok(VERIFY_READ, src, 1)) | ||
55 | return __strncpy_from_user(dst, src, count); | ||
56 | return res; | ||
57 | } | ||
58 | EXPORT_SYMBOL(strncpy_from_user); | ||
59 | |||
60 | /* | ||
61 | * Zero Userspace | 12 | * Zero Userspace |
62 | */ | 13 | */ |
63 | 14 | ||
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h new file mode 100644 index 000000000000..7d01b8c56c00 --- /dev/null +++ b/arch/x86/um/asm/barrier.h | |||
@@ -0,0 +1,75 @@ | |||
1 | #ifndef _ASM_UM_BARRIER_H_ | ||
2 | #define _ASM_UM_BARRIER_H_ | ||
3 | |||
4 | #include <asm/asm.h> | ||
5 | #include <asm/segment.h> | ||
6 | #include <asm/cpufeature.h> | ||
7 | #include <asm/cmpxchg.h> | ||
8 | #include <asm/nops.h> | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/irqflags.h> | ||
12 | |||
13 | /* | ||
14 | * Force strict CPU ordering. | ||
15 | * And yes, this is required on UP too when we're talking | ||
16 | * to devices. | ||
17 | */ | ||
18 | #ifdef CONFIG_X86_32 | ||
19 | |||
20 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | ||
21 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | ||
22 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | ||
23 | |||
24 | #else /* CONFIG_X86_32 */ | ||
25 | |||
26 | #define mb() asm volatile("mfence" : : : "memory") | ||
27 | #define rmb() asm volatile("lfence" : : : "memory") | ||
28 | #define wmb() asm volatile("sfence" : : : "memory") | ||
29 | |||
30 | #endif /* CONFIG_X86_32 */ | ||
31 | |||
32 | #define read_barrier_depends() do { } while (0) | ||
33 | |||
34 | #ifdef CONFIG_SMP | ||
35 | |||
36 | #define smp_mb() mb() | ||
37 | #ifdef CONFIG_X86_PPRO_FENCE | ||
38 | #define smp_rmb() rmb() | ||
39 | #else /* CONFIG_X86_PPRO_FENCE */ | ||
40 | #define smp_rmb() barrier() | ||
41 | #endif /* CONFIG_X86_PPRO_FENCE */ | ||
42 | |||
43 | #ifdef CONFIG_X86_OOSTORE | ||
44 | #define smp_wmb() wmb() | ||
45 | #else /* CONFIG_X86_OOSTORE */ | ||
46 | #define smp_wmb() barrier() | ||
47 | #endif /* CONFIG_X86_OOSTORE */ | ||
48 | |||
49 | #define smp_read_barrier_depends() read_barrier_depends() | ||
50 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | ||
51 | |||
52 | #else /* CONFIG_SMP */ | ||
53 | |||
54 | #define smp_mb() barrier() | ||
55 | #define smp_rmb() barrier() | ||
56 | #define smp_wmb() barrier() | ||
57 | #define smp_read_barrier_depends() do { } while (0) | ||
58 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | ||
59 | |||
60 | #endif /* CONFIG_SMP */ | ||
61 | |||
62 | /* | ||
63 | * Stop RDTSC speculation. This is needed when you need to use RDTSC | ||
64 | * (or get_cycles or vread that possibly accesses the TSC) in a defined | ||
65 | * code region. | ||
66 | * | ||
67 | * (Could use an alternative three way for this if there was one.) | ||
68 | */ | ||
69 | static inline void rdtsc_barrier(void) | ||
70 | { | ||
71 | alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); | ||
72 | alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); | ||
73 | } | ||
74 | |||
75 | #endif | ||
diff --git a/arch/x86/um/asm/system.h b/arch/x86/um/asm/system.h deleted file mode 100644 index a459fd9b7598..000000000000 --- a/arch/x86/um/asm/system.h +++ /dev/null | |||
@@ -1,135 +0,0 @@ | |||
1 | #ifndef _ASM_X86_SYSTEM_H_ | ||
2 | #define _ASM_X86_SYSTEM_H_ | ||
3 | |||
4 | #include <asm/asm.h> | ||
5 | #include <asm/segment.h> | ||
6 | #include <asm/cpufeature.h> | ||
7 | #include <asm/cmpxchg.h> | ||
8 | #include <asm/nops.h> | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/irqflags.h> | ||
12 | |||
13 | /* entries in ARCH_DLINFO: */ | ||
14 | #ifdef CONFIG_IA32_EMULATION | ||
15 | # define AT_VECTOR_SIZE_ARCH 2 | ||
16 | #else | ||
17 | # define AT_VECTOR_SIZE_ARCH 1 | ||
18 | #endif | ||
19 | |||
20 | extern unsigned long arch_align_stack(unsigned long sp); | ||
21 | |||
22 | void default_idle(void); | ||
23 | |||
24 | /* | ||
25 | * Force strict CPU ordering. | ||
26 | * And yes, this is required on UP too when we're talking | ||
27 | * to devices. | ||
28 | */ | ||
29 | #ifdef CONFIG_X86_32 | ||
30 | /* | ||
31 | * Some non-Intel clones support out of order store. wmb() ceases to be a | ||
32 | * nop for these. | ||
33 | */ | ||
34 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | ||
35 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | ||
36 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | ||
37 | #else | ||
38 | #define mb() asm volatile("mfence":::"memory") | ||
39 | #define rmb() asm volatile("lfence":::"memory") | ||
40 | #define wmb() asm volatile("sfence" ::: "memory") | ||
41 | #endif | ||
42 | |||
43 | /** | ||
44 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
45 | * depend on. | ||
46 | * | ||
47 | * No data-dependent reads from memory-like regions are ever reordered | ||
48 | * over this barrier. All reads preceding this primitive are guaranteed | ||
49 | * to access memory (but not necessarily other CPUs' caches) before any | ||
50 | * reads following this primitive that depend on the data return by | ||
51 | * any of the preceding reads. This primitive is much lighter weight than | ||
52 | * rmb() on most CPUs, and is never heavier weight than is | ||
53 | * rmb(). | ||
54 | * | ||
55 | * These ordering constraints are respected by both the local CPU | ||
56 | * and the compiler. | ||
57 | * | ||
58 | * Ordering is not guaranteed by anything other than these primitives, | ||
59 | * not even by data dependencies. See the documentation for | ||
60 | * memory_barrier() for examples and URLs to more information. | ||
61 | * | ||
62 | * For example, the following code would force ordering (the initial | ||
63 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
64 | * | ||
65 | * <programlisting> | ||
66 | * CPU 0 CPU 1 | ||
67 | * | ||
68 | * b = 2; | ||
69 | * memory_barrier(); | ||
70 | * p = &b; q = p; | ||
71 | * read_barrier_depends(); | ||
72 | * d = *q; | ||
73 | * </programlisting> | ||
74 | * | ||
75 | * because the read of "*q" depends on the read of "p" and these | ||
76 | * two reads are separated by a read_barrier_depends(). However, | ||
77 | * the following code, with the same initial values for "a" and "b": | ||
78 | * | ||
79 | * <programlisting> | ||
80 | * CPU 0 CPU 1 | ||
81 | * | ||
82 | * a = 2; | ||
83 | * memory_barrier(); | ||
84 | * b = 3; y = b; | ||
85 | * read_barrier_depends(); | ||
86 | * x = a; | ||
87 | * </programlisting> | ||
88 | * | ||
89 | * does not enforce ordering, since there is no data dependency between | ||
90 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
91 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
92 | * in cases like this where there are no data dependencies. | ||
93 | **/ | ||
94 | |||
95 | #define read_barrier_depends() do { } while (0) | ||
96 | |||
97 | #ifdef CONFIG_SMP | ||
98 | #define smp_mb() mb() | ||
99 | #ifdef CONFIG_X86_PPRO_FENCE | ||
100 | # define smp_rmb() rmb() | ||
101 | #else | ||
102 | # define smp_rmb() barrier() | ||
103 | #endif | ||
104 | #ifdef CONFIG_X86_OOSTORE | ||
105 | # define smp_wmb() wmb() | ||
106 | #else | ||
107 | # define smp_wmb() barrier() | ||
108 | #endif | ||
109 | #define smp_read_barrier_depends() read_barrier_depends() | ||
110 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | ||
111 | #else | ||
112 | #define smp_mb() barrier() | ||
113 | #define smp_rmb() barrier() | ||
114 | #define smp_wmb() barrier() | ||
115 | #define smp_read_barrier_depends() do { } while (0) | ||
116 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | ||
117 | #endif | ||
118 | |||
119 | /* | ||
120 | * Stop RDTSC speculation. This is needed when you need to use RDTSC | ||
121 | * (or get_cycles or vread that possibly accesses the TSC) in a defined | ||
122 | * code region. | ||
123 | * | ||
124 | * (Could use an alternative three way for this if there was one.) | ||
125 | */ | ||
126 | static inline void rdtsc_barrier(void) | ||
127 | { | ||
128 | alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); | ||
129 | alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); | ||
130 | } | ||
131 | |||
132 | extern void *_switch_to(void *prev, void *next, void *last); | ||
133 | #define switch_to(prev, next, last) prev = _switch_to(prev, next, last) | ||
134 | |||
135 | #endif | ||