diff options
author | Ingo Molnar <mingo@kernel.org> | 2014-01-25 03:16:14 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-01-25 03:16:14 -0500 |
commit | 2b45e0f9f34f718725e093f4e335600811d7105a (patch) | |
tree | 3c6d594539eb16fc955906da65b9fa7aacbc9145 /arch | |
parent | a85eba8814631d0d48361c8b9a7ee0984e80c03c (diff) | |
parent | 15c81026204da897a05424c79263aea861a782cc (diff) |
Merge branch 'linus' into x86/urgent
Merge in the x86 changes to apply a fix.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
376 files changed, 9389 insertions, 7819 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index f1cf895c040f..80bbb8ccd0d1 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -336,6 +336,73 @@ config SECCOMP_FILTER | |||
336 | 336 | ||
337 | See Documentation/prctl/seccomp_filter.txt for details. | 337 | See Documentation/prctl/seccomp_filter.txt for details. |
338 | 338 | ||
339 | config HAVE_CC_STACKPROTECTOR | ||
340 | bool | ||
341 | help | ||
342 | An arch should select this symbol if: | ||
343 | - its compiler supports the -fstack-protector option | ||
344 | - it has implemented a stack canary (e.g. __stack_chk_guard) | ||
345 | |||
346 | config CC_STACKPROTECTOR | ||
347 | def_bool n | ||
348 | help | ||
349 | Set when a stack-protector mode is enabled, so that the build | ||
350 | can enable kernel-side support for the GCC feature. | ||
351 | |||
352 | choice | ||
353 | prompt "Stack Protector buffer overflow detection" | ||
354 | depends on HAVE_CC_STACKPROTECTOR | ||
355 | default CC_STACKPROTECTOR_NONE | ||
356 | help | ||
357 | This option turns on the "stack-protector" GCC feature. This | ||
358 | feature puts, at the beginning of functions, a canary value on | ||
359 | the stack just before the return address, and validates | ||
360 | the value just before actually returning. Stack based buffer | ||
361 | overflows (that need to overwrite this return address) now also | ||
362 | overwrite the canary, which gets detected and the attack is then | ||
363 | neutralized via a kernel panic. | ||
364 | |||
365 | config CC_STACKPROTECTOR_NONE | ||
366 | bool "None" | ||
367 | help | ||
368 | Disable "stack-protector" GCC feature. | ||
369 | |||
370 | config CC_STACKPROTECTOR_REGULAR | ||
371 | bool "Regular" | ||
372 | select CC_STACKPROTECTOR | ||
373 | help | ||
374 | Functions will have the stack-protector canary logic added if they | ||
375 | have an 8-byte or larger character array on the stack. | ||
376 | |||
377 | This feature requires gcc version 4.2 or above, or a distribution | ||
378 | gcc with the feature backported ("-fstack-protector"). | ||
379 | |||
380 | On an x86 "defconfig" build, this feature adds canary checks to | ||
381 | about 3% of all kernel functions, which increases kernel code size | ||
382 | by about 0.3%. | ||
383 | |||
384 | config CC_STACKPROTECTOR_STRONG | ||
385 | bool "Strong" | ||
386 | select CC_STACKPROTECTOR | ||
387 | help | ||
388 | Functions will have the stack-protector canary logic added in any | ||
389 | of the following conditions: | ||
390 | |||
391 | - local variable's address used as part of the right hand side of an | ||
392 | assignment or function argument | ||
393 | - local variable is an array (or union containing an array), | ||
394 | regardless of array type or length | ||
395 | - uses register local variables | ||
396 | |||
397 | This feature requires gcc version 4.9 or above, or a distribution | ||
398 | gcc with the feature backported ("-fstack-protector-strong"). | ||
399 | |||
400 | On an x86 "defconfig" build, this feature adds canary checks to | ||
401 | about 20% of all kernel functions, which increases the kernel code | ||
402 | size by about 2%. | ||
403 | |||
404 | endchoice | ||
405 | |||
339 | config HAVE_CONTEXT_TRACKING | 406 | config HAVE_CONTEXT_TRACKING |
340 | bool | 407 | bool |
341 | help | 408 | help |
diff --git a/arch/alpha/include/asm/barrier.h b/arch/alpha/include/asm/barrier.h index ce8860a0b32d..3832bdb794fe 100644 --- a/arch/alpha/include/asm/barrier.h +++ b/arch/alpha/include/asm/barrier.h | |||
@@ -3,33 +3,18 @@ | |||
3 | 3 | ||
4 | #include <asm/compiler.h> | 4 | #include <asm/compiler.h> |
5 | 5 | ||
6 | #define mb() \ | 6 | #define mb() __asm__ __volatile__("mb": : :"memory") |
7 | __asm__ __volatile__("mb": : :"memory") | 7 | #define rmb() __asm__ __volatile__("mb": : :"memory") |
8 | #define wmb() __asm__ __volatile__("wmb": : :"memory") | ||
8 | 9 | ||
9 | #define rmb() \ | 10 | #define read_barrier_depends() __asm__ __volatile__("mb": : :"memory") |
10 | __asm__ __volatile__("mb": : :"memory") | ||
11 | |||
12 | #define wmb() \ | ||
13 | __asm__ __volatile__("wmb": : :"memory") | ||
14 | |||
15 | #define read_barrier_depends() \ | ||
16 | __asm__ __volatile__("mb": : :"memory") | ||
17 | 11 | ||
18 | #ifdef CONFIG_SMP | 12 | #ifdef CONFIG_SMP |
19 | #define __ASM_SMP_MB "\tmb\n" | 13 | #define __ASM_SMP_MB "\tmb\n" |
20 | #define smp_mb() mb() | ||
21 | #define smp_rmb() rmb() | ||
22 | #define smp_wmb() wmb() | ||
23 | #define smp_read_barrier_depends() read_barrier_depends() | ||
24 | #else | 14 | #else |
25 | #define __ASM_SMP_MB | 15 | #define __ASM_SMP_MB |
26 | #define smp_mb() barrier() | ||
27 | #define smp_rmb() barrier() | ||
28 | #define smp_wmb() barrier() | ||
29 | #define smp_read_barrier_depends() do { } while (0) | ||
30 | #endif | 16 | #endif |
31 | 17 | ||
32 | #define set_mb(var, value) \ | 18 | #include <asm-generic/barrier.h> |
33 | do { var = value; mb(); } while (0) | ||
34 | 19 | ||
35 | #endif /* __BARRIER_H */ | 20 | #endif /* __BARRIER_H */ |
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index 5943f7f9d325..9ae21c198007 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild | |||
@@ -1,4 +1,5 @@ | |||
1 | generic-y += auxvec.h | 1 | generic-y += auxvec.h |
2 | generic-y += barrier.h | ||
2 | generic-y += bugs.h | 3 | generic-y += bugs.h |
3 | generic-y += bitsperlong.h | 4 | generic-y += bitsperlong.h |
4 | generic-y += clkdev.h | 5 | generic-y += clkdev.h |
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 83f03ca6caf6..03e494f695d1 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h | |||
@@ -190,6 +190,11 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | |||
190 | 190 | ||
191 | #endif /* !CONFIG_ARC_HAS_LLSC */ | 191 | #endif /* !CONFIG_ARC_HAS_LLSC */ |
192 | 192 | ||
193 | #define smp_mb__before_atomic_dec() barrier() | ||
194 | #define smp_mb__after_atomic_dec() barrier() | ||
195 | #define smp_mb__before_atomic_inc() barrier() | ||
196 | #define smp_mb__after_atomic_inc() barrier() | ||
197 | |||
193 | /** | 198 | /** |
194 | * __atomic_add_unless - add unless the number is a given value | 199 | * __atomic_add_unless - add unless the number is a given value |
195 | * @v: pointer of type atomic_t | 200 | * @v: pointer of type atomic_t |
diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h index f6cb7c4ffb35..c32245c3d1e9 100644 --- a/arch/arc/include/asm/barrier.h +++ b/arch/arc/include/asm/barrier.h | |||
@@ -30,11 +30,6 @@ | |||
30 | #define smp_wmb() barrier() | 30 | #define smp_wmb() barrier() |
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | #define smp_mb__before_atomic_dec() barrier() | ||
34 | #define smp_mb__after_atomic_dec() barrier() | ||
35 | #define smp_mb__before_atomic_inc() barrier() | ||
36 | #define smp_mb__after_atomic_inc() barrier() | ||
37 | |||
38 | #define smp_read_barrier_depends() do { } while (0) | 33 | #define smp_read_barrier_depends() do { } while (0) |
39 | 34 | ||
40 | #endif | 35 | #endif |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index c1f1a7eee953..9c909fc29272 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -30,6 +30,7 @@ config ARM | |||
30 | select HAVE_BPF_JIT | 30 | select HAVE_BPF_JIT |
31 | select HAVE_CONTEXT_TRACKING | 31 | select HAVE_CONTEXT_TRACKING |
32 | select HAVE_C_RECORDMCOUNT | 32 | select HAVE_C_RECORDMCOUNT |
33 | select HAVE_CC_STACKPROTECTOR | ||
33 | select HAVE_DEBUG_KMEMLEAK | 34 | select HAVE_DEBUG_KMEMLEAK |
34 | select HAVE_DMA_API_DEBUG | 35 | select HAVE_DMA_API_DEBUG |
35 | select HAVE_DMA_ATTRS | 36 | select HAVE_DMA_ATTRS |
@@ -1856,18 +1857,6 @@ config SECCOMP | |||
1856 | and the task is only allowed to execute a few safe syscalls | 1857 | and the task is only allowed to execute a few safe syscalls |
1857 | defined by each seccomp mode. | 1858 | defined by each seccomp mode. |
1858 | 1859 | ||
1859 | config CC_STACKPROTECTOR | ||
1860 | bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" | ||
1861 | help | ||
1862 | This option turns on the -fstack-protector GCC feature. This | ||
1863 | feature puts, at the beginning of functions, a canary value on | ||
1864 | the stack just before the return address, and validates | ||
1865 | the value just before actually returning. Stack based buffer | ||
1866 | overflows (that need to overwrite this return address) now also | ||
1867 | overwrite the canary, which gets detected and the attack is then | ||
1868 | neutralized via a kernel panic. | ||
1869 | This feature requires gcc version 4.2 or above. | ||
1870 | |||
1871 | config SWIOTLB | 1860 | config SWIOTLB |
1872 | def_bool y | 1861 | def_bool y |
1873 | 1862 | ||
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index c99b1086d83d..55b4255ad6ed 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -40,10 +40,6 @@ ifeq ($(CONFIG_FRAME_POINTER),y) | |||
40 | KBUILD_CFLAGS +=-fno-omit-frame-pointer -mapcs -mno-sched-prolog | 40 | KBUILD_CFLAGS +=-fno-omit-frame-pointer -mapcs -mno-sched-prolog |
41 | endif | 41 | endif |
42 | 42 | ||
43 | ifeq ($(CONFIG_CC_STACKPROTECTOR),y) | ||
44 | KBUILD_CFLAGS +=-fstack-protector | ||
45 | endif | ||
46 | |||
47 | ifeq ($(CONFIG_CPU_BIG_ENDIAN),y) | 43 | ifeq ($(CONFIG_CPU_BIG_ENDIAN),y) |
48 | KBUILD_CPPFLAGS += -mbig-endian | 44 | KBUILD_CPPFLAGS += -mbig-endian |
49 | AS += -EB | 45 | AS += -EB |
diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c index 31bd43b82095..d4f891f56996 100644 --- a/arch/arm/boot/compressed/misc.c +++ b/arch/arm/boot/compressed/misc.c | |||
@@ -127,6 +127,18 @@ asmlinkage void __div0(void) | |||
127 | error("Attempting division by 0!"); | 127 | error("Attempting division by 0!"); |
128 | } | 128 | } |
129 | 129 | ||
130 | unsigned long __stack_chk_guard; | ||
131 | |||
132 | void __stack_chk_guard_setup(void) | ||
133 | { | ||
134 | __stack_chk_guard = 0x000a0dff; | ||
135 | } | ||
136 | |||
137 | void __stack_chk_fail(void) | ||
138 | { | ||
139 | error("stack-protector: Kernel stack is corrupted\n"); | ||
140 | } | ||
141 | |||
130 | extern int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)); | 142 | extern int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)); |
131 | 143 | ||
132 | 144 | ||
@@ -137,6 +149,8 @@ decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p, | |||
137 | { | 149 | { |
138 | int ret; | 150 | int ret; |
139 | 151 | ||
152 | __stack_chk_guard_setup(); | ||
153 | |||
140 | output_data = (unsigned char *)output_start; | 154 | output_data = (unsigned char *)output_start; |
141 | free_mem_ptr = free_mem_ptr_p; | 155 | free_mem_ptr = free_mem_ptr_p; |
142 | free_mem_end_ptr = free_mem_ptr_end_p; | 156 | free_mem_end_ptr = free_mem_ptr_end_p; |
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi index 52476742a104..e674c94c7206 100644 --- a/arch/arm/boot/dts/sun5i-a10s.dtsi +++ b/arch/arm/boot/dts/sun5i-a10s.dtsi | |||
@@ -332,5 +332,12 @@ | |||
332 | clock-frequency = <100000>; | 332 | clock-frequency = <100000>; |
333 | status = "disabled"; | 333 | status = "disabled"; |
334 | }; | 334 | }; |
335 | |||
336 | timer@01c60000 { | ||
337 | compatible = "allwinner,sun5i-a13-hstimer"; | ||
338 | reg = <0x01c60000 0x1000>; | ||
339 | interrupts = <82>, <83>; | ||
340 | clocks = <&ahb_gates 28>; | ||
341 | }; | ||
335 | }; | 342 | }; |
336 | }; | 343 | }; |
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index ce8ef2a45be0..1ccd75d37f49 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi | |||
@@ -273,5 +273,12 @@ | |||
273 | clock-frequency = <100000>; | 273 | clock-frequency = <100000>; |
274 | status = "disabled"; | 274 | status = "disabled"; |
275 | }; | 275 | }; |
276 | |||
277 | timer@01c60000 { | ||
278 | compatible = "allwinner,sun5i-a13-hstimer"; | ||
279 | reg = <0x01c60000 0x1000>; | ||
280 | interrupts = <82>, <83>; | ||
281 | clocks = <&ahb_gates 28>; | ||
282 | }; | ||
276 | }; | 283 | }; |
277 | }; | 284 | }; |
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 367611a0730b..0135039eff96 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi | |||
@@ -395,6 +395,16 @@ | |||
395 | status = "disabled"; | 395 | status = "disabled"; |
396 | }; | 396 | }; |
397 | 397 | ||
398 | hstimer@01c60000 { | ||
399 | compatible = "allwinner,sun7i-a20-hstimer"; | ||
400 | reg = <0x01c60000 0x1000>; | ||
401 | interrupts = <0 81 1>, | ||
402 | <0 82 1>, | ||
403 | <0 83 1>, | ||
404 | <0 84 1>; | ||
405 | clocks = <&ahb_gates 28>; | ||
406 | }; | ||
407 | |||
398 | gic: interrupt-controller@01c81000 { | 408 | gic: interrupt-controller@01c81000 { |
399 | compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic"; | 409 | compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic"; |
400 | reg = <0x01c81000 0x1000>, | 410 | reg = <0x01c81000 0x1000>, |
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 60f15e274e6d..2f59f7443396 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h | |||
@@ -59,6 +59,21 @@ | |||
59 | #define smp_wmb() dmb(ishst) | 59 | #define smp_wmb() dmb(ishst) |
60 | #endif | 60 | #endif |
61 | 61 | ||
62 | #define smp_store_release(p, v) \ | ||
63 | do { \ | ||
64 | compiletime_assert_atomic_type(*p); \ | ||
65 | smp_mb(); \ | ||
66 | ACCESS_ONCE(*p) = (v); \ | ||
67 | } while (0) | ||
68 | |||
69 | #define smp_load_acquire(p) \ | ||
70 | ({ \ | ||
71 | typeof(*p) ___p1 = ACCESS_ONCE(*p); \ | ||
72 | compiletime_assert_atomic_type(*p); \ | ||
73 | smp_mb(); \ | ||
74 | ___p1; \ | ||
75 | }) | ||
76 | |||
62 | #define read_barrier_depends() do { } while(0) | 77 | #define read_barrier_depends() do { } while(0) |
63 | #define smp_read_barrier_depends() do { } while(0) | 78 | #define smp_read_barrier_depends() do { } while(0) |
64 | 79 | ||
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 141baa3f9a72..acabef1a75df 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h | |||
@@ -15,7 +15,7 @@ | |||
15 | 15 | ||
16 | #include <uapi/asm/unistd.h> | 16 | #include <uapi/asm/unistd.h> |
17 | 17 | ||
18 | #define __NR_syscalls (380) | 18 | #define __NR_syscalls (384) |
19 | #define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0) | 19 | #define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0) |
20 | 20 | ||
21 | #define __ARCH_WANT_STAT64 | 21 | #define __ARCH_WANT_STAT64 |
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index af33b44990ed..fb5584d0cc05 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h | |||
@@ -406,6 +406,8 @@ | |||
406 | #define __NR_process_vm_writev (__NR_SYSCALL_BASE+377) | 406 | #define __NR_process_vm_writev (__NR_SYSCALL_BASE+377) |
407 | #define __NR_kcmp (__NR_SYSCALL_BASE+378) | 407 | #define __NR_kcmp (__NR_SYSCALL_BASE+378) |
408 | #define __NR_finit_module (__NR_SYSCALL_BASE+379) | 408 | #define __NR_finit_module (__NR_SYSCALL_BASE+379) |
409 | #define __NR_sched_setattr (__NR_SYSCALL_BASE+380) | ||
410 | #define __NR_sched_getattr (__NR_SYSCALL_BASE+381) | ||
409 | 411 | ||
410 | /* | 412 | /* |
411 | * This may need to be greater than __NR_last_syscall+1 in order to | 413 | * This may need to be greater than __NR_last_syscall+1 in order to |
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index c6ca7e376773..166e945de832 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S | |||
@@ -389,6 +389,8 @@ | |||
389 | CALL(sys_process_vm_writev) | 389 | CALL(sys_process_vm_writev) |
390 | CALL(sys_kcmp) | 390 | CALL(sys_kcmp) |
391 | CALL(sys_finit_module) | 391 | CALL(sys_finit_module) |
392 | /* 380 */ CALL(sys_sched_setattr) | ||
393 | CALL(sys_sched_getattr) | ||
392 | #ifndef syscalls_counted | 394 | #ifndef syscalls_counted |
393 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls | 395 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls |
394 | #define syscalls_counted | 396 | #define syscalls_counted |
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index 739c3dfc1da2..34d5fd585bbb 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c | |||
@@ -171,7 +171,7 @@ void __init arm_dt_init_cpu_maps(void) | |||
171 | 171 | ||
172 | bool arch_match_cpu_phys_id(int cpu, u64 phys_id) | 172 | bool arch_match_cpu_phys_id(int cpu, u64 phys_id) |
173 | { | 173 | { |
174 | return (phys_id & MPIDR_HWID_BITMASK) == cpu_logical_map(cpu); | 174 | return phys_id == cpu_logical_map(cpu); |
175 | } | 175 | } |
176 | 176 | ||
177 | static const void * __init arch_get_next_mach(const char *const **match) | 177 | static const void * __init arch_get_next_mach(const char *const **match) |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index bc3f2efa0d86..789d846a9184 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -99,10 +99,6 @@ int armpmu_event_set_period(struct perf_event *event) | |||
99 | s64 period = hwc->sample_period; | 99 | s64 period = hwc->sample_period; |
100 | int ret = 0; | 100 | int ret = 0; |
101 | 101 | ||
102 | /* The period may have been changed by PERF_EVENT_IOC_PERIOD */ | ||
103 | if (unlikely(period != hwc->last_period)) | ||
104 | left = period - (hwc->last_period - left); | ||
105 | |||
106 | if (unlikely(left <= -period)) { | 102 | if (unlikely(left <= -period)) { |
107 | left = period; | 103 | left = period; |
108 | local64_set(&hwc->period_left, left); | 104 | local64_set(&hwc->period_left, left); |
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index d85055cd24ba..20d553c9f5e2 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c | |||
@@ -254,7 +254,7 @@ static int probe_current_pmu(struct arm_pmu *pmu) | |||
254 | static int cpu_pmu_device_probe(struct platform_device *pdev) | 254 | static int cpu_pmu_device_probe(struct platform_device *pdev) |
255 | { | 255 | { |
256 | const struct of_device_id *of_id; | 256 | const struct of_device_id *of_id; |
257 | int (*init_fn)(struct arm_pmu *); | 257 | const int (*init_fn)(struct arm_pmu *); |
258 | struct device_node *node = pdev->dev.of_node; | 258 | struct device_node *node = pdev->dev.of_node; |
259 | struct arm_pmu *pmu; | 259 | struct arm_pmu *pmu; |
260 | int ret = -ENODEV; | 260 | int ret = -ENODEV; |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 6eda3bf85c52..4636d56af2db 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -431,9 +431,10 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) | |||
431 | instr2 = __mem_to_opcode_thumb16(instr2); | 431 | instr2 = __mem_to_opcode_thumb16(instr2); |
432 | instr = __opcode_thumb32_compose(instr, instr2); | 432 | instr = __opcode_thumb32_compose(instr, instr2); |
433 | } | 433 | } |
434 | } else if (get_user(instr, (u32 __user *)pc)) { | 434 | } else { |
435 | if (get_user(instr, (u32 __user *)pc)) | ||
436 | goto die_sig; | ||
435 | instr = __mem_to_opcode_arm(instr); | 437 | instr = __mem_to_opcode_arm(instr); |
436 | goto die_sig; | ||
437 | } | 438 | } |
438 | 439 | ||
439 | if (call_undef_hook(regs, instr) == 0) | 440 | if (call_undef_hook(regs, instr) == 0) |
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index bd3bf66ce344..c7de89b263dd 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c | |||
@@ -53,6 +53,7 @@ static void __init highbank_scu_map_io(void) | |||
53 | 53 | ||
54 | static void highbank_l2x0_disable(void) | 54 | static void highbank_l2x0_disable(void) |
55 | { | 55 | { |
56 | outer_flush_all(); | ||
56 | /* Disable PL310 L2 Cache controller */ | 57 | /* Disable PL310 L2 Cache controller */ |
57 | highbank_smc1(0x102, 0x0); | 58 | highbank_smc1(0x102, 0x0); |
58 | } | 59 | } |
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index b39efd46abf9..c0ab9b26be3d 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c | |||
@@ -162,6 +162,7 @@ void __iomem *omap4_get_l2cache_base(void) | |||
162 | 162 | ||
163 | static void omap4_l2x0_disable(void) | 163 | static void omap4_l2x0_disable(void) |
164 | { | 164 | { |
165 | outer_flush_all(); | ||
165 | /* Disable PL310 L2 Cache controller */ | 166 | /* Disable PL310 L2 Cache controller */ |
166 | omap_smc1(0x102, 0x0); | 167 | omap_smc1(0x102, 0x0); |
167 | } | 168 | } |
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig index c9e72c89066a..bce0d4277f71 100644 --- a/arch/arm/mach-sunxi/Kconfig +++ b/arch/arm/mach-sunxi/Kconfig | |||
@@ -12,3 +12,4 @@ config ARCH_SUNXI | |||
12 | select PINCTRL_SUNXI | 12 | select PINCTRL_SUNXI |
13 | select SPARSE_IRQ | 13 | select SPARSE_IRQ |
14 | select SUN4I_TIMER | 14 | select SUN4I_TIMER |
15 | select SUN5I_HSTIMER | ||
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 1f7b19a47060..3e8f106ee5fe 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -229,7 +229,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc) | |||
229 | #ifdef CONFIG_ZONE_DMA | 229 | #ifdef CONFIG_ZONE_DMA |
230 | if (mdesc->dma_zone_size) { | 230 | if (mdesc->dma_zone_size) { |
231 | arm_dma_zone_size = mdesc->dma_zone_size; | 231 | arm_dma_zone_size = mdesc->dma_zone_size; |
232 | arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1; | 232 | arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; |
233 | } else | 233 | } else |
234 | arm_dma_limit = 0xffffffff; | 234 | arm_dma_limit = 0xffffffff; |
235 | arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; | 235 | arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; |
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 9ed155ad0f97..271b5e971568 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c | |||
@@ -641,10 +641,10 @@ load_ind: | |||
641 | emit(ARM_MUL(r_A, r_A, r_X), ctx); | 641 | emit(ARM_MUL(r_A, r_A, r_X), ctx); |
642 | break; | 642 | break; |
643 | case BPF_S_ALU_DIV_K: | 643 | case BPF_S_ALU_DIV_K: |
644 | /* current k == reciprocal_value(userspace k) */ | 644 | if (k == 1) |
645 | break; | ||
645 | emit_mov_i(r_scratch, k, ctx); | 646 | emit_mov_i(r_scratch, k, ctx); |
646 | /* A = top 32 bits of the product */ | 647 | emit_udiv(r_A, r_A, r_scratch, ctx); |
647 | emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx); | ||
648 | break; | 648 | break; |
649 | case BPF_S_ALU_DIV_X: | 649 | case BPF_S_ALU_DIV_X: |
650 | update_on_xread(ctx); | 650 | update_on_xread(ctx); |
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index d4a63338a53c..78e20ba8806b 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h | |||
@@ -35,10 +35,60 @@ | |||
35 | #define smp_mb() barrier() | 35 | #define smp_mb() barrier() |
36 | #define smp_rmb() barrier() | 36 | #define smp_rmb() barrier() |
37 | #define smp_wmb() barrier() | 37 | #define smp_wmb() barrier() |
38 | |||
39 | #define smp_store_release(p, v) \ | ||
40 | do { \ | ||
41 | compiletime_assert_atomic_type(*p); \ | ||
42 | smp_mb(); \ | ||
43 | ACCESS_ONCE(*p) = (v); \ | ||
44 | } while (0) | ||
45 | |||
46 | #define smp_load_acquire(p) \ | ||
47 | ({ \ | ||
48 | typeof(*p) ___p1 = ACCESS_ONCE(*p); \ | ||
49 | compiletime_assert_atomic_type(*p); \ | ||
50 | smp_mb(); \ | ||
51 | ___p1; \ | ||
52 | }) | ||
53 | |||
38 | #else | 54 | #else |
55 | |||
39 | #define smp_mb() asm volatile("dmb ish" : : : "memory") | 56 | #define smp_mb() asm volatile("dmb ish" : : : "memory") |
40 | #define smp_rmb() asm volatile("dmb ishld" : : : "memory") | 57 | #define smp_rmb() asm volatile("dmb ishld" : : : "memory") |
41 | #define smp_wmb() asm volatile("dmb ishst" : : : "memory") | 58 | #define smp_wmb() asm volatile("dmb ishst" : : : "memory") |
59 | |||
60 | #define smp_store_release(p, v) \ | ||
61 | do { \ | ||
62 | compiletime_assert_atomic_type(*p); \ | ||
63 | switch (sizeof(*p)) { \ | ||
64 | case 4: \ | ||
65 | asm volatile ("stlr %w1, %0" \ | ||
66 | : "=Q" (*p) : "r" (v) : "memory"); \ | ||
67 | break; \ | ||
68 | case 8: \ | ||
69 | asm volatile ("stlr %1, %0" \ | ||
70 | : "=Q" (*p) : "r" (v) : "memory"); \ | ||
71 | break; \ | ||
72 | } \ | ||
73 | } while (0) | ||
74 | |||
75 | #define smp_load_acquire(p) \ | ||
76 | ({ \ | ||
77 | typeof(*p) ___p1; \ | ||
78 | compiletime_assert_atomic_type(*p); \ | ||
79 | switch (sizeof(*p)) { \ | ||
80 | case 4: \ | ||
81 | asm volatile ("ldar %w0, %1" \ | ||
82 | : "=r" (___p1) : "Q" (*p) : "memory"); \ | ||
83 | break; \ | ||
84 | case 8: \ | ||
85 | asm volatile ("ldar %0, %1" \ | ||
86 | : "=r" (___p1) : "Q" (*p) : "memory"); \ | ||
87 | break; \ | ||
88 | } \ | ||
89 | ___p1; \ | ||
90 | }) | ||
91 | |||
42 | #endif | 92 | #endif |
43 | 93 | ||
44 | #define read_barrier_depends() do { } while(0) | 94 | #define read_barrier_depends() do { } while(0) |
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 572769727227..4cc813eddacb 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h | |||
@@ -229,7 +229,7 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot | |||
229 | extern void __iounmap(volatile void __iomem *addr); | 229 | extern void __iounmap(volatile void __iomem *addr); |
230 | extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); | 230 | extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); |
231 | 231 | ||
232 | #define PROT_DEFAULT (pgprot_default | PTE_DIRTY) | 232 | #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) |
233 | #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) | 233 | #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) |
234 | #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) | 234 | #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) |
235 | #define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) | 235 | #define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) |
diff --git a/arch/avr32/include/asm/barrier.h b/arch/avr32/include/asm/barrier.h index 0961275373db..715100790fd0 100644 --- a/arch/avr32/include/asm/barrier.h +++ b/arch/avr32/include/asm/barrier.h | |||
@@ -8,22 +8,15 @@ | |||
8 | #ifndef __ASM_AVR32_BARRIER_H | 8 | #ifndef __ASM_AVR32_BARRIER_H |
9 | #define __ASM_AVR32_BARRIER_H | 9 | #define __ASM_AVR32_BARRIER_H |
10 | 10 | ||
11 | #define nop() asm volatile("nop") | 11 | /* |
12 | 12 | * Weirdest thing ever.. no full barrier, but it has a write barrier! | |
13 | #define mb() asm volatile("" : : : "memory") | 13 | */ |
14 | #define rmb() mb() | 14 | #define wmb() asm volatile("sync 0" : : : "memory") |
15 | #define wmb() asm volatile("sync 0" : : : "memory") | ||
16 | #define read_barrier_depends() do { } while(0) | ||
17 | #define set_mb(var, value) do { var = value; mb(); } while(0) | ||
18 | 15 | ||
19 | #ifdef CONFIG_SMP | 16 | #ifdef CONFIG_SMP |
20 | # error "The AVR32 port does not support SMP" | 17 | # error "The AVR32 port does not support SMP" |
21 | #else | ||
22 | # define smp_mb() barrier() | ||
23 | # define smp_rmb() barrier() | ||
24 | # define smp_wmb() barrier() | ||
25 | # define smp_read_barrier_depends() do { } while(0) | ||
26 | #endif | 18 | #endif |
27 | 19 | ||
20 | #include <asm-generic/barrier.h> | ||
28 | 21 | ||
29 | #endif /* __ASM_AVR32_BARRIER_H */ | 22 | #endif /* __ASM_AVR32_BARRIER_H */ |
diff --git a/arch/blackfin/include/asm/barrier.h b/arch/blackfin/include/asm/barrier.h index ebb189507dd7..19283a16ac08 100644 --- a/arch/blackfin/include/asm/barrier.h +++ b/arch/blackfin/include/asm/barrier.h | |||
@@ -23,26 +23,10 @@ | |||
23 | # define rmb() do { barrier(); smp_check_barrier(); } while (0) | 23 | # define rmb() do { barrier(); smp_check_barrier(); } while (0) |
24 | # define wmb() do { barrier(); smp_mark_barrier(); } while (0) | 24 | # define wmb() do { barrier(); smp_mark_barrier(); } while (0) |
25 | # define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) | 25 | # define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) |
26 | #else | ||
27 | # define mb() barrier() | ||
28 | # define rmb() barrier() | ||
29 | # define wmb() barrier() | ||
30 | # define read_barrier_depends() do { } while (0) | ||
31 | #endif | 26 | #endif |
32 | 27 | ||
33 | #else /* !CONFIG_SMP */ | ||
34 | |||
35 | #define mb() barrier() | ||
36 | #define rmb() barrier() | ||
37 | #define wmb() barrier() | ||
38 | #define read_barrier_depends() do { } while (0) | ||
39 | |||
40 | #endif /* !CONFIG_SMP */ | 28 | #endif /* !CONFIG_SMP */ |
41 | 29 | ||
42 | #define smp_mb() mb() | 30 | #include <asm-generic/barrier.h> |
43 | #define smp_rmb() rmb() | ||
44 | #define smp_wmb() wmb() | ||
45 | #define set_mb(var, value) do { var = value; mb(); } while (0) | ||
46 | #define smp_read_barrier_depends() read_barrier_depends() | ||
47 | 31 | ||
48 | #endif /* _BLACKFIN_BARRIER_H */ | 32 | #endif /* _BLACKFIN_BARRIER_H */ |
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index b06caf649a95..199b1a9dab89 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild | |||
@@ -3,6 +3,7 @@ header-y += arch-v10/ | |||
3 | header-y += arch-v32/ | 3 | header-y += arch-v32/ |
4 | 4 | ||
5 | 5 | ||
6 | generic-y += barrier.h | ||
6 | generic-y += clkdev.h | 7 | generic-y += clkdev.h |
7 | generic-y += exec.h | 8 | generic-y += exec.h |
8 | generic-y += kvm_para.h | 9 | generic-y += kvm_para.h |
diff --git a/arch/cris/include/asm/barrier.h b/arch/cris/include/asm/barrier.h deleted file mode 100644 index 198ad7fa6b25..000000000000 --- a/arch/cris/include/asm/barrier.h +++ /dev/null | |||
@@ -1,25 +0,0 @@ | |||
1 | #ifndef __ASM_CRIS_BARRIER_H | ||
2 | #define __ASM_CRIS_BARRIER_H | ||
3 | |||
4 | #define nop() __asm__ __volatile__ ("nop"); | ||
5 | |||
6 | #define barrier() __asm__ __volatile__("": : :"memory") | ||
7 | #define mb() barrier() | ||
8 | #define rmb() mb() | ||
9 | #define wmb() mb() | ||
10 | #define read_barrier_depends() do { } while(0) | ||
11 | #define set_mb(var, value) do { var = value; mb(); } while (0) | ||
12 | |||
13 | #ifdef CONFIG_SMP | ||
14 | #define smp_mb() mb() | ||
15 | #define smp_rmb() rmb() | ||
16 | #define smp_wmb() wmb() | ||
17 | #define smp_read_barrier_depends() read_barrier_depends() | ||
18 | #else | ||
19 | #define smp_mb() barrier() | ||
20 | #define smp_rmb() barrier() | ||
21 | #define smp_wmb() barrier() | ||
22 | #define smp_read_barrier_depends() do { } while(0) | ||
23 | #endif | ||
24 | |||
25 | #endif /* __ASM_CRIS_BARRIER_H */ | ||
diff --git a/arch/frv/include/asm/barrier.h b/arch/frv/include/asm/barrier.h index 06776ad9f5e9..abbef470154c 100644 --- a/arch/frv/include/asm/barrier.h +++ b/arch/frv/include/asm/barrier.h | |||
@@ -17,13 +17,7 @@ | |||
17 | #define mb() asm volatile ("membar" : : :"memory") | 17 | #define mb() asm volatile ("membar" : : :"memory") |
18 | #define rmb() asm volatile ("membar" : : :"memory") | 18 | #define rmb() asm volatile ("membar" : : :"memory") |
19 | #define wmb() asm volatile ("membar" : : :"memory") | 19 | #define wmb() asm volatile ("membar" : : :"memory") |
20 | #define read_barrier_depends() do { } while (0) | ||
21 | 20 | ||
22 | #define smp_mb() barrier() | 21 | #include <asm-generic/barrier.h> |
23 | #define smp_rmb() barrier() | ||
24 | #define smp_wmb() barrier() | ||
25 | #define smp_read_barrier_depends() do {} while(0) | ||
26 | #define set_mb(var, value) \ | ||
27 | do { var = (value); barrier(); } while (0) | ||
28 | 22 | ||
29 | #endif /* _ASM_BARRIER_H */ | 23 | #endif /* _ASM_BARRIER_H */ |
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index 67c3450309b7..ada843c701ef 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild | |||
@@ -2,6 +2,7 @@ | |||
2 | header-y += ucontext.h | 2 | header-y += ucontext.h |
3 | 3 | ||
4 | generic-y += auxvec.h | 4 | generic-y += auxvec.h |
5 | generic-y += barrier.h | ||
5 | generic-y += bug.h | 6 | generic-y += bug.h |
6 | generic-y += bugs.h | 7 | generic-y += bugs.h |
7 | generic-y += clkdev.h | 8 | generic-y += clkdev.h |
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index 8a64ff2337f6..7aae4cb2a29a 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h | |||
@@ -160,8 +160,12 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |||
160 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0) | 160 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0) |
161 | #define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0) | 161 | #define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0) |
162 | 162 | ||
163 | |||
164 | #define atomic_inc_return(v) (atomic_add_return(1, v)) | 163 | #define atomic_inc_return(v) (atomic_add_return(1, v)) |
165 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) | 164 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) |
166 | 165 | ||
166 | #define smp_mb__before_atomic_dec() barrier() | ||
167 | #define smp_mb__after_atomic_dec() barrier() | ||
168 | #define smp_mb__before_atomic_inc() barrier() | ||
169 | #define smp_mb__after_atomic_inc() barrier() | ||
170 | |||
167 | #endif | 171 | #endif |
diff --git a/arch/hexagon/include/asm/barrier.h b/arch/hexagon/include/asm/barrier.h index 1041a8e70ce8..4e863daea25b 100644 --- a/arch/hexagon/include/asm/barrier.h +++ b/arch/hexagon/include/asm/barrier.h | |||
@@ -29,10 +29,6 @@ | |||
29 | #define smp_read_barrier_depends() barrier() | 29 | #define smp_read_barrier_depends() barrier() |
30 | #define smp_wmb() barrier() | 30 | #define smp_wmb() barrier() |
31 | #define smp_mb() barrier() | 31 | #define smp_mb() barrier() |
32 | #define smp_mb__before_atomic_dec() barrier() | ||
33 | #define smp_mb__after_atomic_dec() barrier() | ||
34 | #define smp_mb__before_atomic_inc() barrier() | ||
35 | #define smp_mb__after_atomic_inc() barrier() | ||
36 | 32 | ||
37 | /* Set a value and use a memory barrier. Used by the scheduler somewhere. */ | 33 | /* Set a value and use a memory barrier. Used by the scheduler somewhere. */ |
38 | #define set_mb(var, value) \ | 34 | #define set_mb(var, value) \ |
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 4e4119b0e691..a8c3a11dc5ab 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -147,9 +147,6 @@ config PARAVIRT | |||
147 | over full virtualization. However, when run without a hypervisor | 147 | over full virtualization. However, when run without a hypervisor |
148 | the kernel is theoretically slower and slightly larger. | 148 | the kernel is theoretically slower and slightly larger. |
149 | 149 | ||
150 | |||
151 | source "arch/ia64/xen/Kconfig" | ||
152 | |||
153 | endif | 150 | endif |
154 | 151 | ||
155 | choice | 152 | choice |
@@ -175,7 +172,6 @@ config IA64_GENERIC | |||
175 | SGI-SN2 For SGI Altix systems | 172 | SGI-SN2 For SGI Altix systems |
176 | SGI-UV For SGI UV systems | 173 | SGI-UV For SGI UV systems |
177 | Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/> | 174 | Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/> |
178 | Xen-domU For xen domU system | ||
179 | 175 | ||
180 | If you don't know what to do, choose "generic". | 176 | If you don't know what to do, choose "generic". |
181 | 177 | ||
@@ -231,14 +227,6 @@ config IA64_HP_SIM | |||
231 | bool "Ski-simulator" | 227 | bool "Ski-simulator" |
232 | select SWIOTLB | 228 | select SWIOTLB |
233 | 229 | ||
234 | config IA64_XEN_GUEST | ||
235 | bool "Xen guest" | ||
236 | select SWIOTLB | ||
237 | depends on XEN | ||
238 | help | ||
239 | Build a kernel that runs on Xen guest domain. At this moment only | ||
240 | 16KB page size in supported. | ||
241 | |||
242 | endchoice | 230 | endchoice |
243 | 231 | ||
244 | choice | 232 | choice |
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index be7bfa12b705..f37238f45bcd 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile | |||
@@ -51,11 +51,9 @@ core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/ | |||
51 | core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ | 51 | core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ |
52 | core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ | 52 | core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ |
53 | core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ | 53 | core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ |
54 | core-$(CONFIG_IA64_XEN_GUEST) += arch/ia64/dig/ | ||
55 | core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ | 54 | core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ |
56 | core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/ | 55 | core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/ |
57 | core-$(CONFIG_KVM) += arch/ia64/kvm/ | 56 | core-$(CONFIG_KVM) += arch/ia64/kvm/ |
58 | core-$(CONFIG_XEN) += arch/ia64/xen/ | ||
59 | 57 | ||
60 | drivers-$(CONFIG_PCI) += arch/ia64/pci/ | 58 | drivers-$(CONFIG_PCI) += arch/ia64/pci/ |
61 | drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ | 59 | drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ |
diff --git a/arch/ia64/configs/xen_domu_defconfig b/arch/ia64/configs/xen_domu_defconfig deleted file mode 100644 index b025acfde5c1..000000000000 --- a/arch/ia64/configs/xen_domu_defconfig +++ /dev/null | |||
@@ -1,199 +0,0 @@ | |||
1 | CONFIG_EXPERIMENTAL=y | ||
2 | CONFIG_SYSVIPC=y | ||
3 | CONFIG_POSIX_MQUEUE=y | ||
4 | CONFIG_IKCONFIG=y | ||
5 | CONFIG_IKCONFIG_PROC=y | ||
6 | CONFIG_LOG_BUF_SHIFT=20 | ||
7 | CONFIG_SYSFS_DEPRECATED_V2=y | ||
8 | CONFIG_BLK_DEV_INITRD=y | ||
9 | CONFIG_KALLSYMS_ALL=y | ||
10 | CONFIG_MODULES=y | ||
11 | CONFIG_MODULE_UNLOAD=y | ||
12 | CONFIG_MODVERSIONS=y | ||
13 | CONFIG_MODULE_SRCVERSION_ALL=y | ||
14 | # CONFIG_BLK_DEV_BSG is not set | ||
15 | CONFIG_PARAVIRT_GUEST=y | ||
16 | CONFIG_IA64_XEN_GUEST=y | ||
17 | CONFIG_MCKINLEY=y | ||
18 | CONFIG_IA64_CYCLONE=y | ||
19 | CONFIG_SMP=y | ||
20 | CONFIG_NR_CPUS=16 | ||
21 | CONFIG_HOTPLUG_CPU=y | ||
22 | CONFIG_PERMIT_BSP_REMOVE=y | ||
23 | CONFIG_FORCE_CPEI_RETARGET=y | ||
24 | CONFIG_IA64_MCA_RECOVERY=y | ||
25 | CONFIG_PERFMON=y | ||
26 | CONFIG_IA64_PALINFO=y | ||
27 | CONFIG_KEXEC=y | ||
28 | CONFIG_EFI_VARS=y | ||
29 | CONFIG_BINFMT_MISC=m | ||
30 | CONFIG_ACPI_PROCFS=y | ||
31 | CONFIG_ACPI_BUTTON=m | ||
32 | CONFIG_ACPI_FAN=m | ||
33 | CONFIG_ACPI_PROCESSOR=m | ||
34 | CONFIG_ACPI_CONTAINER=m | ||
35 | CONFIG_HOTPLUG_PCI=y | ||
36 | CONFIG_HOTPLUG_PCI_ACPI=m | ||
37 | CONFIG_PACKET=y | ||
38 | CONFIG_UNIX=y | ||
39 | CONFIG_INET=y | ||
40 | CONFIG_IP_MULTICAST=y | ||
41 | CONFIG_ARPD=y | ||
42 | CONFIG_SYN_COOKIES=y | ||
43 | # CONFIG_INET_LRO is not set | ||
44 | # CONFIG_IPV6 is not set | ||
45 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
46 | CONFIG_BLK_DEV_LOOP=m | ||
47 | CONFIG_BLK_DEV_CRYPTOLOOP=m | ||
48 | CONFIG_BLK_DEV_NBD=m | ||
49 | CONFIG_BLK_DEV_RAM=y | ||
50 | CONFIG_IDE=y | ||
51 | CONFIG_BLK_DEV_IDECD=y | ||
52 | CONFIG_BLK_DEV_GENERIC=y | ||
53 | CONFIG_BLK_DEV_CMD64X=y | ||
54 | CONFIG_BLK_DEV_PIIX=y | ||
55 | CONFIG_SCSI=y | ||
56 | CONFIG_BLK_DEV_SD=y | ||
57 | CONFIG_CHR_DEV_ST=m | ||
58 | CONFIG_BLK_DEV_SR=m | ||
59 | CONFIG_CHR_DEV_SG=m | ||
60 | CONFIG_SCSI_SYM53C8XX_2=y | ||
61 | CONFIG_SCSI_QLOGIC_1280=y | ||
62 | CONFIG_MD=y | ||
63 | CONFIG_BLK_DEV_MD=m | ||
64 | CONFIG_MD_LINEAR=m | ||
65 | CONFIG_MD_RAID0=m | ||
66 | CONFIG_MD_RAID1=m | ||
67 | CONFIG_MD_MULTIPATH=m | ||
68 | CONFIG_BLK_DEV_DM=m | ||
69 | CONFIG_DM_CRYPT=m | ||
70 | CONFIG_DM_SNAPSHOT=m | ||
71 | CONFIG_DM_MIRROR=m | ||
72 | CONFIG_DM_ZERO=m | ||
73 | CONFIG_FUSION=y | ||
74 | CONFIG_FUSION_SPI=y | ||
75 | CONFIG_FUSION_FC=y | ||
76 | CONFIG_FUSION_CTL=y | ||
77 | CONFIG_NETDEVICES=y | ||
78 | CONFIG_DUMMY=m | ||
79 | CONFIG_NET_ETHERNET=y | ||
80 | CONFIG_NET_TULIP=y | ||
81 | CONFIG_TULIP=m | ||
82 | CONFIG_NET_PCI=y | ||
83 | CONFIG_NET_VENDOR_INTEL=y | ||
84 | CONFIG_E100=m | ||
85 | CONFIG_E1000=y | ||
86 | CONFIG_TIGON3=y | ||
87 | CONFIG_NETCONSOLE=y | ||
88 | # CONFIG_SERIO_SERPORT is not set | ||
89 | CONFIG_GAMEPORT=m | ||
90 | CONFIG_SERIAL_NONSTANDARD=y | ||
91 | CONFIG_SERIAL_8250=y | ||
92 | CONFIG_SERIAL_8250_CONSOLE=y | ||
93 | CONFIG_SERIAL_8250_NR_UARTS=6 | ||
94 | CONFIG_SERIAL_8250_EXTENDED=y | ||
95 | CONFIG_SERIAL_8250_SHARE_IRQ=y | ||
96 | # CONFIG_HW_RANDOM is not set | ||
97 | CONFIG_EFI_RTC=y | ||
98 | CONFIG_RAW_DRIVER=m | ||
99 | CONFIG_HPET=y | ||
100 | CONFIG_AGP=m | ||
101 | CONFIG_DRM=m | ||
102 | CONFIG_DRM_TDFX=m | ||
103 | CONFIG_DRM_R128=m | ||
104 | CONFIG_DRM_RADEON=m | ||
105 | CONFIG_DRM_MGA=m | ||
106 | CONFIG_DRM_SIS=m | ||
107 | CONFIG_HID_GYRATION=y | ||
108 | CONFIG_HID_NTRIG=y | ||
109 | CONFIG_HID_PANTHERLORD=y | ||
110 | CONFIG_HID_PETALYNX=y | ||
111 | CONFIG_HID_SAMSUNG=y | ||
112 | CONFIG_HID_SONY=y | ||
113 | CONFIG_HID_SUNPLUS=y | ||
114 | CONFIG_HID_TOPSEED=y | ||
115 | CONFIG_USB=y | ||
116 | CONFIG_USB_DEVICEFS=y | ||
117 | CONFIG_USB_EHCI_HCD=m | ||
118 | CONFIG_USB_OHCI_HCD=m | ||
119 | CONFIG_USB_UHCI_HCD=y | ||
120 | CONFIG_USB_STORAGE=m | ||
121 | CONFIG_EXT2_FS=y | ||
122 | CONFIG_EXT2_FS_XATTR=y | ||
123 | CONFIG_EXT2_FS_POSIX_ACL=y | ||
124 | CONFIG_EXT2_FS_SECURITY=y | ||
125 | CONFIG_EXT3_FS=y | ||
126 | CONFIG_EXT3_FS_POSIX_ACL=y | ||
127 | CONFIG_EXT3_FS_SECURITY=y | ||
128 | CONFIG_REISERFS_FS=y | ||
129 | CONFIG_REISERFS_FS_XATTR=y | ||
130 | CONFIG_REISERFS_FS_POSIX_ACL=y | ||
131 | CONFIG_REISERFS_FS_SECURITY=y | ||
132 | CONFIG_XFS_FS=y | ||
133 | CONFIG_AUTOFS_FS=y | ||
134 | CONFIG_AUTOFS4_FS=y | ||
135 | CONFIG_ISO9660_FS=m | ||
136 | CONFIG_JOLIET=y | ||
137 | CONFIG_UDF_FS=m | ||
138 | CONFIG_VFAT_FS=y | ||
139 | CONFIG_NTFS_FS=m | ||
140 | CONFIG_PROC_KCORE=y | ||
141 | CONFIG_TMPFS=y | ||
142 | CONFIG_HUGETLBFS=y | ||
143 | CONFIG_NFS_FS=m | ||
144 | CONFIG_NFS_V3=y | ||
145 | CONFIG_NFS_V4=y | ||
146 | CONFIG_NFSD=m | ||
147 | CONFIG_NFSD_V4=y | ||
148 | CONFIG_SMB_FS=m | ||
149 | CONFIG_SMB_NLS_DEFAULT=y | ||
150 | CONFIG_CIFS=m | ||
151 | CONFIG_PARTITION_ADVANCED=y | ||
152 | CONFIG_SGI_PARTITION=y | ||
153 | CONFIG_EFI_PARTITION=y | ||
154 | CONFIG_NLS_CODEPAGE_437=y | ||
155 | CONFIG_NLS_CODEPAGE_737=m | ||
156 | CONFIG_NLS_CODEPAGE_775=m | ||
157 | CONFIG_NLS_CODEPAGE_850=m | ||
158 | CONFIG_NLS_CODEPAGE_852=m | ||
159 | CONFIG_NLS_CODEPAGE_855=m | ||
160 | CONFIG_NLS_CODEPAGE_857=m | ||
161 | CONFIG_NLS_CODEPAGE_860=m | ||
162 | CONFIG_NLS_CODEPAGE_861=m | ||
163 | CONFIG_NLS_CODEPAGE_862=m | ||
164 | CONFIG_NLS_CODEPAGE_863=m | ||
165 | CONFIG_NLS_CODEPAGE_864=m | ||
166 | CONFIG_NLS_CODEPAGE_865=m | ||
167 | CONFIG_NLS_CODEPAGE_866=m | ||
168 | CONFIG_NLS_CODEPAGE_869=m | ||
169 | CONFIG_NLS_CODEPAGE_936=m | ||
170 | CONFIG_NLS_CODEPAGE_950=m | ||
171 | CONFIG_NLS_CODEPAGE_932=m | ||
172 | CONFIG_NLS_CODEPAGE_949=m | ||
173 | CONFIG_NLS_CODEPAGE_874=m | ||
174 | CONFIG_NLS_ISO8859_8=m | ||
175 | CONFIG_NLS_CODEPAGE_1250=m | ||
176 | CONFIG_NLS_CODEPAGE_1251=m | ||
177 | CONFIG_NLS_ISO8859_1=y | ||
178 | CONFIG_NLS_ISO8859_2=m | ||
179 | CONFIG_NLS_ISO8859_3=m | ||
180 | CONFIG_NLS_ISO8859_4=m | ||
181 | CONFIG_NLS_ISO8859_5=m | ||
182 | CONFIG_NLS_ISO8859_6=m | ||
183 | CONFIG_NLS_ISO8859_7=m | ||
184 | CONFIG_NLS_ISO8859_9=m | ||
185 | CONFIG_NLS_ISO8859_13=m | ||
186 | CONFIG_NLS_ISO8859_14=m | ||
187 | CONFIG_NLS_ISO8859_15=m | ||
188 | CONFIG_NLS_KOI8_R=m | ||
189 | CONFIG_NLS_KOI8_U=m | ||
190 | CONFIG_NLS_UTF8=m | ||
191 | CONFIG_MAGIC_SYSRQ=y | ||
192 | CONFIG_DEBUG_KERNEL=y | ||
193 | CONFIG_DEBUG_MUTEXES=y | ||
194 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | ||
195 | CONFIG_IA64_GRANULE_16MB=y | ||
196 | CONFIG_CRYPTO_ECB=m | ||
197 | CONFIG_CRYPTO_PCBC=m | ||
198 | CONFIG_CRYPTO_MD5=y | ||
199 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | ||
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index faa1bf0da815..d651102a4d45 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h | |||
@@ -111,8 +111,6 @@ static inline const char *acpi_get_sysname (void) | |||
111 | return "uv"; | 111 | return "uv"; |
112 | # elif defined (CONFIG_IA64_DIG) | 112 | # elif defined (CONFIG_IA64_DIG) |
113 | return "dig"; | 113 | return "dig"; |
114 | # elif defined (CONFIG_IA64_XEN_GUEST) | ||
115 | return "xen"; | ||
116 | # elif defined(CONFIG_IA64_DIG_VTD) | 114 | # elif defined(CONFIG_IA64_DIG_VTD) |
117 | return "dig_vtd"; | 115 | return "dig_vtd"; |
118 | # else | 116 | # else |
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h index 60576e06b6fb..d0a69aa35e27 100644 --- a/arch/ia64/include/asm/barrier.h +++ b/arch/ia64/include/asm/barrier.h | |||
@@ -45,14 +45,37 @@ | |||
45 | # define smp_rmb() rmb() | 45 | # define smp_rmb() rmb() |
46 | # define smp_wmb() wmb() | 46 | # define smp_wmb() wmb() |
47 | # define smp_read_barrier_depends() read_barrier_depends() | 47 | # define smp_read_barrier_depends() read_barrier_depends() |
48 | |||
48 | #else | 49 | #else |
50 | |||
49 | # define smp_mb() barrier() | 51 | # define smp_mb() barrier() |
50 | # define smp_rmb() barrier() | 52 | # define smp_rmb() barrier() |
51 | # define smp_wmb() barrier() | 53 | # define smp_wmb() barrier() |
52 | # define smp_read_barrier_depends() do { } while(0) | 54 | # define smp_read_barrier_depends() do { } while(0) |
55 | |||
53 | #endif | 56 | #endif |
54 | 57 | ||
55 | /* | 58 | /* |
59 | * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no | ||
60 | * need for asm trickery! | ||
61 | */ | ||
62 | |||
63 | #define smp_store_release(p, v) \ | ||
64 | do { \ | ||
65 | compiletime_assert_atomic_type(*p); \ | ||
66 | barrier(); \ | ||
67 | ACCESS_ONCE(*p) = (v); \ | ||
68 | } while (0) | ||
69 | |||
70 | #define smp_load_acquire(p) \ | ||
71 | ({ \ | ||
72 | typeof(*p) ___p1 = ACCESS_ONCE(*p); \ | ||
73 | compiletime_assert_atomic_type(*p); \ | ||
74 | barrier(); \ | ||
75 | ___p1; \ | ||
76 | }) | ||
77 | |||
78 | /* | ||
56 | * XXX check on this ---I suspect what Linus really wants here is | 79 | * XXX check on this ---I suspect what Linus really wants here is |
57 | * acquire vs release semantics but we can't discuss this stuff with | 80 | * acquire vs release semantics but we can't discuss this stuff with |
58 | * Linus just yet. Grrr... | 81 | * Linus just yet. Grrr... |
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h index 2d1ad4b11a85..9c39bdfc2da8 100644 --- a/arch/ia64/include/asm/machvec.h +++ b/arch/ia64/include/asm/machvec.h | |||
@@ -113,8 +113,6 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); | |||
113 | # include <asm/machvec_sn2.h> | 113 | # include <asm/machvec_sn2.h> |
114 | # elif defined (CONFIG_IA64_SGI_UV) | 114 | # elif defined (CONFIG_IA64_SGI_UV) |
115 | # include <asm/machvec_uv.h> | 115 | # include <asm/machvec_uv.h> |
116 | # elif defined (CONFIG_IA64_XEN_GUEST) | ||
117 | # include <asm/machvec_xen.h> | ||
118 | # elif defined (CONFIG_IA64_GENERIC) | 116 | # elif defined (CONFIG_IA64_GENERIC) |
119 | 117 | ||
120 | # ifdef MACHVEC_PLATFORM_HEADER | 118 | # ifdef MACHVEC_PLATFORM_HEADER |
diff --git a/arch/ia64/include/asm/machvec_xen.h b/arch/ia64/include/asm/machvec_xen.h deleted file mode 100644 index 8b8bd0eb3923..000000000000 --- a/arch/ia64/include/asm/machvec_xen.h +++ /dev/null | |||
@@ -1,22 +0,0 @@ | |||
1 | #ifndef _ASM_IA64_MACHVEC_XEN_h | ||
2 | #define _ASM_IA64_MACHVEC_XEN_h | ||
3 | |||
4 | extern ia64_mv_setup_t dig_setup; | ||
5 | extern ia64_mv_cpu_init_t xen_cpu_init; | ||
6 | extern ia64_mv_irq_init_t xen_irq_init; | ||
7 | extern ia64_mv_send_ipi_t xen_platform_send_ipi; | ||
8 | |||
9 | /* | ||
10 | * This stuff has dual use! | ||
11 | * | ||
12 | * For a generic kernel, the macros are used to initialize the | ||
13 | * platform's machvec structure. When compiling a non-generic kernel, | ||
14 | * the macros are used directly. | ||
15 | */ | ||
16 | #define ia64_platform_name "xen" | ||
17 | #define platform_setup dig_setup | ||
18 | #define platform_cpu_init xen_cpu_init | ||
19 | #define platform_irq_init xen_irq_init | ||
20 | #define platform_send_ipi xen_platform_send_ipi | ||
21 | |||
22 | #endif /* _ASM_IA64_MACHVEC_XEN_h */ | ||
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h index 61c7b1750b16..092f1c91b36c 100644 --- a/arch/ia64/include/asm/meminit.h +++ b/arch/ia64/include/asm/meminit.h | |||
@@ -18,7 +18,6 @@ | |||
18 | * - crash dumping code reserved region | 18 | * - crash dumping code reserved region |
19 | * - Kernel memory map built from EFI memory map | 19 | * - Kernel memory map built from EFI memory map |
20 | * - ELF core header | 20 | * - ELF core header |
21 | * - xen start info if CONFIG_XEN | ||
22 | * | 21 | * |
23 | * More could be added if necessary | 22 | * More could be added if necessary |
24 | */ | 23 | */ |
diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h index b149b88ea795..b53518a98026 100644 --- a/arch/ia64/include/asm/paravirt.h +++ b/arch/ia64/include/asm/paravirt.h | |||
@@ -75,7 +75,6 @@ void *paravirt_get_gate_section(void); | |||
75 | #ifdef CONFIG_PARAVIRT_GUEST | 75 | #ifdef CONFIG_PARAVIRT_GUEST |
76 | 76 | ||
77 | #define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0 | 77 | #define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0 |
78 | #define PARAVIRT_HYPERVISOR_TYPE_XEN 1 | ||
79 | 78 | ||
80 | #ifndef __ASSEMBLY__ | 79 | #ifndef __ASSEMBLY__ |
81 | 80 | ||
diff --git a/arch/ia64/include/asm/pvclock-abi.h b/arch/ia64/include/asm/pvclock-abi.h index 44ef9ef8f5b3..42b233bedeb5 100644 --- a/arch/ia64/include/asm/pvclock-abi.h +++ b/arch/ia64/include/asm/pvclock-abi.h | |||
@@ -11,7 +11,7 @@ | |||
11 | /* | 11 | /* |
12 | * These structs MUST NOT be changed. | 12 | * These structs MUST NOT be changed. |
13 | * They are the ABI between hypervisor and guest OS. | 13 | * They are the ABI between hypervisor and guest OS. |
14 | * Both Xen and KVM are using this. | 14 | * KVM is using this. |
15 | * | 15 | * |
16 | * pvclock_vcpu_time_info holds the system time and the tsc timestamp | 16 | * pvclock_vcpu_time_info holds the system time and the tsc timestamp |
17 | * of the last update. So the guest can use the tsc delta to get a | 17 | * of the last update. So the guest can use the tsc delta to get a |
diff --git a/arch/ia64/include/asm/sync_bitops.h b/arch/ia64/include/asm/sync_bitops.h deleted file mode 100644 index 593c12eeb270..000000000000 --- a/arch/ia64/include/asm/sync_bitops.h +++ /dev/null | |||
@@ -1,51 +0,0 @@ | |||
1 | #ifndef _ASM_IA64_SYNC_BITOPS_H | ||
2 | #define _ASM_IA64_SYNC_BITOPS_H | ||
3 | |||
4 | /* | ||
5 | * Copyright (C) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
6 | * | ||
7 | * Based on synch_bitops.h which Dan Magenhaimer wrote. | ||
8 | * | ||
9 | * bit operations which provide guaranteed strong synchronisation | ||
10 | * when communicating with Xen or other guest OSes running on other CPUs. | ||
11 | */ | ||
12 | |||
13 | static inline void sync_set_bit(int nr, volatile void *addr) | ||
14 | { | ||
15 | set_bit(nr, addr); | ||
16 | } | ||
17 | |||
18 | static inline void sync_clear_bit(int nr, volatile void *addr) | ||
19 | { | ||
20 | clear_bit(nr, addr); | ||
21 | } | ||
22 | |||
23 | static inline void sync_change_bit(int nr, volatile void *addr) | ||
24 | { | ||
25 | change_bit(nr, addr); | ||
26 | } | ||
27 | |||
28 | static inline int sync_test_and_set_bit(int nr, volatile void *addr) | ||
29 | { | ||
30 | return test_and_set_bit(nr, addr); | ||
31 | } | ||
32 | |||
33 | static inline int sync_test_and_clear_bit(int nr, volatile void *addr) | ||
34 | { | ||
35 | return test_and_clear_bit(nr, addr); | ||
36 | } | ||
37 | |||
38 | static inline int sync_test_and_change_bit(int nr, volatile void *addr) | ||
39 | { | ||
40 | return test_and_change_bit(nr, addr); | ||
41 | } | ||
42 | |||
43 | static inline int sync_test_bit(int nr, const volatile void *addr) | ||
44 | { | ||
45 | return test_bit(nr, addr); | ||
46 | } | ||
47 | |||
48 | #define sync_cmpxchg(ptr, old, new) \ | ||
49 | ((__typeof__(*(ptr)))cmpxchg_acq((ptr), (old), (new))) | ||
50 | |||
51 | #endif /* _ASM_IA64_SYNC_BITOPS_H */ | ||
diff --git a/arch/ia64/include/asm/xen/events.h b/arch/ia64/include/asm/xen/events.h deleted file mode 100644 index baa74c82aa71..000000000000 --- a/arch/ia64/include/asm/xen/events.h +++ /dev/null | |||
@@ -1,41 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/include/asm/xen/events.h | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | #ifndef _ASM_IA64_XEN_EVENTS_H | ||
23 | #define _ASM_IA64_XEN_EVENTS_H | ||
24 | |||
25 | enum ipi_vector { | ||
26 | XEN_RESCHEDULE_VECTOR, | ||
27 | XEN_IPI_VECTOR, | ||
28 | XEN_CMCP_VECTOR, | ||
29 | XEN_CPEP_VECTOR, | ||
30 | |||
31 | XEN_NR_IPIS, | ||
32 | }; | ||
33 | |||
34 | static inline int xen_irqs_disabled(struct pt_regs *regs) | ||
35 | { | ||
36 | return !(ia64_psr(regs)->i); | ||
37 | } | ||
38 | |||
39 | #define irq_ctx_init(cpu) do { } while (0) | ||
40 | |||
41 | #endif /* _ASM_IA64_XEN_EVENTS_H */ | ||
diff --git a/arch/ia64/include/asm/xen/hypercall.h b/arch/ia64/include/asm/xen/hypercall.h deleted file mode 100644 index ed28bcd5bb85..000000000000 --- a/arch/ia64/include/asm/xen/hypercall.h +++ /dev/null | |||
@@ -1,265 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * hypercall.h | ||
3 | * | ||
4 | * Linux-specific hypervisor handling. | ||
5 | * | ||
6 | * Copyright (c) 2002-2004, K A Fraser | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version 2 | ||
10 | * as published by the Free Software Foundation; or, when distributed | ||
11 | * separately from the Linux kernel or incorporated into other | ||
12 | * software packages, subject to the following license: | ||
13 | * | ||
14 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
15 | * of this source file (the "Software"), to deal in the Software without | ||
16 | * restriction, including without limitation the rights to use, copy, modify, | ||
17 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
18 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
19 | * the following conditions: | ||
20 | * | ||
21 | * The above copyright notice and this permission notice shall be included in | ||
22 | * all copies or substantial portions of the Software. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
25 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
26 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
27 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
28 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
29 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
30 | * IN THE SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #ifndef _ASM_IA64_XEN_HYPERCALL_H | ||
34 | #define _ASM_IA64_XEN_HYPERCALL_H | ||
35 | |||
36 | #include <xen/interface/xen.h> | ||
37 | #include <xen/interface/physdev.h> | ||
38 | #include <xen/interface/sched.h> | ||
39 | #include <asm/xen/xcom_hcall.h> | ||
40 | struct xencomm_handle; | ||
41 | extern unsigned long __hypercall(unsigned long a1, unsigned long a2, | ||
42 | unsigned long a3, unsigned long a4, | ||
43 | unsigned long a5, unsigned long cmd); | ||
44 | |||
45 | /* | ||
46 | * Assembler stubs for hyper-calls. | ||
47 | */ | ||
48 | |||
49 | #define _hypercall0(type, name) \ | ||
50 | ({ \ | ||
51 | long __res; \ | ||
52 | __res = __hypercall(0, 0, 0, 0, 0, __HYPERVISOR_##name);\ | ||
53 | (type)__res; \ | ||
54 | }) | ||
55 | |||
56 | #define _hypercall1(type, name, a1) \ | ||
57 | ({ \ | ||
58 | long __res; \ | ||
59 | __res = __hypercall((unsigned long)a1, \ | ||
60 | 0, 0, 0, 0, __HYPERVISOR_##name); \ | ||
61 | (type)__res; \ | ||
62 | }) | ||
63 | |||
64 | #define _hypercall2(type, name, a1, a2) \ | ||
65 | ({ \ | ||
66 | long __res; \ | ||
67 | __res = __hypercall((unsigned long)a1, \ | ||
68 | (unsigned long)a2, \ | ||
69 | 0, 0, 0, __HYPERVISOR_##name); \ | ||
70 | (type)__res; \ | ||
71 | }) | ||
72 | |||
73 | #define _hypercall3(type, name, a1, a2, a3) \ | ||
74 | ({ \ | ||
75 | long __res; \ | ||
76 | __res = __hypercall((unsigned long)a1, \ | ||
77 | (unsigned long)a2, \ | ||
78 | (unsigned long)a3, \ | ||
79 | 0, 0, __HYPERVISOR_##name); \ | ||
80 | (type)__res; \ | ||
81 | }) | ||
82 | |||
83 | #define _hypercall4(type, name, a1, a2, a3, a4) \ | ||
84 | ({ \ | ||
85 | long __res; \ | ||
86 | __res = __hypercall((unsigned long)a1, \ | ||
87 | (unsigned long)a2, \ | ||
88 | (unsigned long)a3, \ | ||
89 | (unsigned long)a4, \ | ||
90 | 0, __HYPERVISOR_##name); \ | ||
91 | (type)__res; \ | ||
92 | }) | ||
93 | |||
94 | #define _hypercall5(type, name, a1, a2, a3, a4, a5) \ | ||
95 | ({ \ | ||
96 | long __res; \ | ||
97 | __res = __hypercall((unsigned long)a1, \ | ||
98 | (unsigned long)a2, \ | ||
99 | (unsigned long)a3, \ | ||
100 | (unsigned long)a4, \ | ||
101 | (unsigned long)a5, \ | ||
102 | __HYPERVISOR_##name); \ | ||
103 | (type)__res; \ | ||
104 | }) | ||
105 | |||
106 | |||
107 | static inline int | ||
108 | xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg) | ||
109 | { | ||
110 | return _hypercall2(int, sched_op, cmd, arg); | ||
111 | } | ||
112 | |||
113 | static inline long | ||
114 | HYPERVISOR_set_timer_op(u64 timeout) | ||
115 | { | ||
116 | unsigned long timeout_hi = (unsigned long)(timeout >> 32); | ||
117 | unsigned long timeout_lo = (unsigned long)timeout; | ||
118 | return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi); | ||
119 | } | ||
120 | |||
121 | static inline int | ||
122 | xencomm_arch_hypercall_multicall(struct xencomm_handle *call_list, | ||
123 | int nr_calls) | ||
124 | { | ||
125 | return _hypercall2(int, multicall, call_list, nr_calls); | ||
126 | } | ||
127 | |||
128 | static inline int | ||
129 | xencomm_arch_hypercall_memory_op(unsigned int cmd, struct xencomm_handle *arg) | ||
130 | { | ||
131 | return _hypercall2(int, memory_op, cmd, arg); | ||
132 | } | ||
133 | |||
134 | static inline int | ||
135 | xencomm_arch_hypercall_event_channel_op(int cmd, struct xencomm_handle *arg) | ||
136 | { | ||
137 | return _hypercall2(int, event_channel_op, cmd, arg); | ||
138 | } | ||
139 | |||
140 | static inline int | ||
141 | xencomm_arch_hypercall_xen_version(int cmd, struct xencomm_handle *arg) | ||
142 | { | ||
143 | return _hypercall2(int, xen_version, cmd, arg); | ||
144 | } | ||
145 | |||
146 | static inline int | ||
147 | xencomm_arch_hypercall_console_io(int cmd, int count, | ||
148 | struct xencomm_handle *str) | ||
149 | { | ||
150 | return _hypercall3(int, console_io, cmd, count, str); | ||
151 | } | ||
152 | |||
153 | static inline int | ||
154 | xencomm_arch_hypercall_physdev_op(int cmd, struct xencomm_handle *arg) | ||
155 | { | ||
156 | return _hypercall2(int, physdev_op, cmd, arg); | ||
157 | } | ||
158 | |||
159 | static inline int | ||
160 | xencomm_arch_hypercall_grant_table_op(unsigned int cmd, | ||
161 | struct xencomm_handle *uop, | ||
162 | unsigned int count) | ||
163 | { | ||
164 | return _hypercall3(int, grant_table_op, cmd, uop, count); | ||
165 | } | ||
166 | |||
167 | int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count); | ||
168 | |||
169 | extern int xencomm_arch_hypercall_suspend(struct xencomm_handle *arg); | ||
170 | |||
171 | static inline int | ||
172 | xencomm_arch_hypercall_callback_op(int cmd, struct xencomm_handle *arg) | ||
173 | { | ||
174 | return _hypercall2(int, callback_op, cmd, arg); | ||
175 | } | ||
176 | |||
177 | static inline long | ||
178 | xencomm_arch_hypercall_vcpu_op(int cmd, int cpu, void *arg) | ||
179 | { | ||
180 | return _hypercall3(long, vcpu_op, cmd, cpu, arg); | ||
181 | } | ||
182 | |||
183 | static inline int | ||
184 | HYPERVISOR_physdev_op(int cmd, void *arg) | ||
185 | { | ||
186 | switch (cmd) { | ||
187 | case PHYSDEVOP_eoi: | ||
188 | return _hypercall1(int, ia64_fast_eoi, | ||
189 | ((struct physdev_eoi *)arg)->irq); | ||
190 | default: | ||
191 | return xencomm_hypercall_physdev_op(cmd, arg); | ||
192 | } | ||
193 | } | ||
194 | |||
195 | static inline long | ||
196 | xencomm_arch_hypercall_opt_feature(struct xencomm_handle *arg) | ||
197 | { | ||
198 | return _hypercall1(long, opt_feature, arg); | ||
199 | } | ||
200 | |||
201 | /* for balloon driver */ | ||
202 | #define HYPERVISOR_update_va_mapping(va, new_val, flags) (0) | ||
203 | |||
204 | /* Use xencomm to do hypercalls. */ | ||
205 | #define HYPERVISOR_sched_op xencomm_hypercall_sched_op | ||
206 | #define HYPERVISOR_event_channel_op xencomm_hypercall_event_channel_op | ||
207 | #define HYPERVISOR_callback_op xencomm_hypercall_callback_op | ||
208 | #define HYPERVISOR_multicall xencomm_hypercall_multicall | ||
209 | #define HYPERVISOR_xen_version xencomm_hypercall_xen_version | ||
210 | #define HYPERVISOR_console_io xencomm_hypercall_console_io | ||
211 | #define HYPERVISOR_memory_op xencomm_hypercall_memory_op | ||
212 | #define HYPERVISOR_suspend xencomm_hypercall_suspend | ||
213 | #define HYPERVISOR_vcpu_op xencomm_hypercall_vcpu_op | ||
214 | #define HYPERVISOR_opt_feature xencomm_hypercall_opt_feature | ||
215 | |||
216 | /* to compile gnttab_copy_grant_page() in drivers/xen/core/gnttab.c */ | ||
217 | #define HYPERVISOR_mmu_update(req, count, success_count, domid) ({ BUG(); 0; }) | ||
218 | |||
219 | static inline int | ||
220 | HYPERVISOR_shutdown( | ||
221 | unsigned int reason) | ||
222 | { | ||
223 | struct sched_shutdown sched_shutdown = { | ||
224 | .reason = reason | ||
225 | }; | ||
226 | |||
227 | int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown); | ||
228 | |||
229 | return rc; | ||
230 | } | ||
231 | |||
232 | /* for netfront.c, netback.c */ | ||
233 | #define MULTI_UVMFLAGS_INDEX 0 /* XXX any value */ | ||
234 | |||
235 | static inline void | ||
236 | MULTI_update_va_mapping( | ||
237 | struct multicall_entry *mcl, unsigned long va, | ||
238 | pte_t new_val, unsigned long flags) | ||
239 | { | ||
240 | mcl->op = __HYPERVISOR_update_va_mapping; | ||
241 | mcl->result = 0; | ||
242 | } | ||
243 | |||
244 | static inline void | ||
245 | MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd, | ||
246 | void *uop, unsigned int count) | ||
247 | { | ||
248 | mcl->op = __HYPERVISOR_grant_table_op; | ||
249 | mcl->args[0] = cmd; | ||
250 | mcl->args[1] = (unsigned long)uop; | ||
251 | mcl->args[2] = count; | ||
252 | } | ||
253 | |||
254 | static inline void | ||
255 | MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req, | ||
256 | int count, int *success_count, domid_t domid) | ||
257 | { | ||
258 | mcl->op = __HYPERVISOR_mmu_update; | ||
259 | mcl->args[0] = (unsigned long)req; | ||
260 | mcl->args[1] = count; | ||
261 | mcl->args[2] = (unsigned long)success_count; | ||
262 | mcl->args[3] = domid; | ||
263 | } | ||
264 | |||
265 | #endif /* _ASM_IA64_XEN_HYPERCALL_H */ | ||
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h deleted file mode 100644 index 67455c2ed2b1..000000000000 --- a/arch/ia64/include/asm/xen/hypervisor.h +++ /dev/null | |||
@@ -1,61 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * hypervisor.h | ||
3 | * | ||
4 | * Linux-specific hypervisor handling. | ||
5 | * | ||
6 | * Copyright (c) 2002-2004, K A Fraser | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version 2 | ||
10 | * as published by the Free Software Foundation; or, when distributed | ||
11 | * separately from the Linux kernel or incorporated into other | ||
12 | * software packages, subject to the following license: | ||
13 | * | ||
14 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
15 | * of this source file (the "Software"), to deal in the Software without | ||
16 | * restriction, including without limitation the rights to use, copy, modify, | ||
17 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
18 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
19 | * the following conditions: | ||
20 | * | ||
21 | * The above copyright notice and this permission notice shall be included in | ||
22 | * all copies or substantial portions of the Software. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
25 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
26 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
27 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
28 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
29 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
30 | * IN THE SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #ifndef _ASM_IA64_XEN_HYPERVISOR_H | ||
34 | #define _ASM_IA64_XEN_HYPERVISOR_H | ||
35 | |||
36 | #include <linux/err.h> | ||
37 | #include <xen/interface/xen.h> | ||
38 | #include <xen/interface/version.h> /* to compile feature.c */ | ||
39 | #include <xen/features.h> /* to comiple xen-netfront.c */ | ||
40 | #include <xen/xen.h> | ||
41 | #include <asm/xen/hypercall.h> | ||
42 | |||
43 | #ifdef CONFIG_XEN | ||
44 | extern struct shared_info *HYPERVISOR_shared_info; | ||
45 | extern struct start_info *xen_start_info; | ||
46 | |||
47 | void __init xen_setup_vcpu_info_placement(void); | ||
48 | void force_evtchn_callback(void); | ||
49 | |||
50 | /* for drivers/xen/balloon/balloon.c */ | ||
51 | #ifdef CONFIG_XEN_SCRUB_PAGES | ||
52 | #define scrub_pages(_p, _n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT) | ||
53 | #else | ||
54 | #define scrub_pages(_p, _n) ((void)0) | ||
55 | #endif | ||
56 | |||
57 | /* For setup_arch() in arch/ia64/kernel/setup.c */ | ||
58 | void xen_ia64_enable_opt_feature(void); | ||
59 | #endif | ||
60 | |||
61 | #endif /* _ASM_IA64_XEN_HYPERVISOR_H */ | ||
diff --git a/arch/ia64/include/asm/xen/inst.h b/arch/ia64/include/asm/xen/inst.h deleted file mode 100644 index c53a47611208..000000000000 --- a/arch/ia64/include/asm/xen/inst.h +++ /dev/null | |||
@@ -1,486 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/include/asm/xen/inst.h | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <asm/xen/privop.h> | ||
24 | |||
25 | #define ia64_ivt xen_ivt | ||
26 | #define DO_SAVE_MIN XEN_DO_SAVE_MIN | ||
27 | |||
28 | #define __paravirt_switch_to xen_switch_to | ||
29 | #define __paravirt_leave_syscall xen_leave_syscall | ||
30 | #define __paravirt_work_processed_syscall xen_work_processed_syscall | ||
31 | #define __paravirt_leave_kernel xen_leave_kernel | ||
32 | #define __paravirt_pending_syscall_end xen_work_pending_syscall_end | ||
33 | #define __paravirt_work_processed_syscall_target \ | ||
34 | xen_work_processed_syscall | ||
35 | |||
36 | #define paravirt_fsyscall_table xen_fsyscall_table | ||
37 | #define paravirt_fsys_bubble_down xen_fsys_bubble_down | ||
38 | |||
39 | #define MOV_FROM_IFA(reg) \ | ||
40 | movl reg = XSI_IFA; \ | ||
41 | ;; \ | ||
42 | ld8 reg = [reg] | ||
43 | |||
44 | #define MOV_FROM_ITIR(reg) \ | ||
45 | movl reg = XSI_ITIR; \ | ||
46 | ;; \ | ||
47 | ld8 reg = [reg] | ||
48 | |||
49 | #define MOV_FROM_ISR(reg) \ | ||
50 | movl reg = XSI_ISR; \ | ||
51 | ;; \ | ||
52 | ld8 reg = [reg] | ||
53 | |||
54 | #define MOV_FROM_IHA(reg) \ | ||
55 | movl reg = XSI_IHA; \ | ||
56 | ;; \ | ||
57 | ld8 reg = [reg] | ||
58 | |||
59 | #define MOV_FROM_IPSR(pred, reg) \ | ||
60 | (pred) movl reg = XSI_IPSR; \ | ||
61 | ;; \ | ||
62 | (pred) ld8 reg = [reg] | ||
63 | |||
64 | #define MOV_FROM_IIM(reg) \ | ||
65 | movl reg = XSI_IIM; \ | ||
66 | ;; \ | ||
67 | ld8 reg = [reg] | ||
68 | |||
69 | #define MOV_FROM_IIP(reg) \ | ||
70 | movl reg = XSI_IIP; \ | ||
71 | ;; \ | ||
72 | ld8 reg = [reg] | ||
73 | |||
74 | .macro __MOV_FROM_IVR reg, clob | ||
75 | .ifc "\reg", "r8" | ||
76 | XEN_HYPER_GET_IVR | ||
77 | .exitm | ||
78 | .endif | ||
79 | .ifc "\clob", "r8" | ||
80 | XEN_HYPER_GET_IVR | ||
81 | ;; | ||
82 | mov \reg = r8 | ||
83 | .exitm | ||
84 | .endif | ||
85 | |||
86 | mov \clob = r8 | ||
87 | ;; | ||
88 | XEN_HYPER_GET_IVR | ||
89 | ;; | ||
90 | mov \reg = r8 | ||
91 | ;; | ||
92 | mov r8 = \clob | ||
93 | .endm | ||
94 | #define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob | ||
95 | |||
96 | .macro __MOV_FROM_PSR pred, reg, clob | ||
97 | .ifc "\reg", "r8" | ||
98 | (\pred) XEN_HYPER_GET_PSR; | ||
99 | .exitm | ||
100 | .endif | ||
101 | .ifc "\clob", "r8" | ||
102 | (\pred) XEN_HYPER_GET_PSR | ||
103 | ;; | ||
104 | (\pred) mov \reg = r8 | ||
105 | .exitm | ||
106 | .endif | ||
107 | |||
108 | (\pred) mov \clob = r8 | ||
109 | (\pred) XEN_HYPER_GET_PSR | ||
110 | ;; | ||
111 | (\pred) mov \reg = r8 | ||
112 | (\pred) mov r8 = \clob | ||
113 | .endm | ||
114 | #define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob | ||
115 | |||
116 | /* assuming ar.itc is read with interrupt disabled. */ | ||
117 | #define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ | ||
118 | (pred) movl clob = XSI_ITC_OFFSET; \ | ||
119 | ;; \ | ||
120 | (pred) ld8 clob = [clob]; \ | ||
121 | (pred) mov reg = ar.itc; \ | ||
122 | ;; \ | ||
123 | (pred) add reg = reg, clob; \ | ||
124 | ;; \ | ||
125 | (pred) movl clob = XSI_ITC_LAST; \ | ||
126 | ;; \ | ||
127 | (pred) ld8 clob = [clob]; \ | ||
128 | ;; \ | ||
129 | (pred) cmp.geu.unc pred_clob, p0 = clob, reg; \ | ||
130 | ;; \ | ||
131 | (pred_clob) add reg = 1, clob; \ | ||
132 | ;; \ | ||
133 | (pred) movl clob = XSI_ITC_LAST; \ | ||
134 | ;; \ | ||
135 | (pred) st8 [clob] = reg | ||
136 | |||
137 | |||
138 | #define MOV_TO_IFA(reg, clob) \ | ||
139 | movl clob = XSI_IFA; \ | ||
140 | ;; \ | ||
141 | st8 [clob] = reg \ | ||
142 | |||
143 | #define MOV_TO_ITIR(pred, reg, clob) \ | ||
144 | (pred) movl clob = XSI_ITIR; \ | ||
145 | ;; \ | ||
146 | (pred) st8 [clob] = reg | ||
147 | |||
148 | #define MOV_TO_IHA(pred, reg, clob) \ | ||
149 | (pred) movl clob = XSI_IHA; \ | ||
150 | ;; \ | ||
151 | (pred) st8 [clob] = reg | ||
152 | |||
153 | #define MOV_TO_IPSR(pred, reg, clob) \ | ||
154 | (pred) movl clob = XSI_IPSR; \ | ||
155 | ;; \ | ||
156 | (pred) st8 [clob] = reg; \ | ||
157 | ;; | ||
158 | |||
159 | #define MOV_TO_IFS(pred, reg, clob) \ | ||
160 | (pred) movl clob = XSI_IFS; \ | ||
161 | ;; \ | ||
162 | (pred) st8 [clob] = reg; \ | ||
163 | ;; | ||
164 | |||
165 | #define MOV_TO_IIP(reg, clob) \ | ||
166 | movl clob = XSI_IIP; \ | ||
167 | ;; \ | ||
168 | st8 [clob] = reg | ||
169 | |||
170 | .macro ____MOV_TO_KR kr, reg, clob0, clob1 | ||
171 | .ifc "\clob0", "r9" | ||
172 | .error "clob0 \clob0 must not be r9" | ||
173 | .endif | ||
174 | .ifc "\clob1", "r8" | ||
175 | .error "clob1 \clob1 must not be r8" | ||
176 | .endif | ||
177 | |||
178 | .ifnc "\reg", "r9" | ||
179 | .ifnc "\clob1", "r9" | ||
180 | mov \clob1 = r9 | ||
181 | .endif | ||
182 | mov r9 = \reg | ||
183 | .endif | ||
184 | .ifnc "\clob0", "r8" | ||
185 | mov \clob0 = r8 | ||
186 | .endif | ||
187 | mov r8 = \kr | ||
188 | ;; | ||
189 | XEN_HYPER_SET_KR | ||
190 | |||
191 | .ifnc "\reg", "r9" | ||
192 | .ifnc "\clob1", "r9" | ||
193 | mov r9 = \clob1 | ||
194 | .endif | ||
195 | .endif | ||
196 | .ifnc "\clob0", "r8" | ||
197 | mov r8 = \clob0 | ||
198 | .endif | ||
199 | .endm | ||
200 | |||
201 | .macro __MOV_TO_KR kr, reg, clob0, clob1 | ||
202 | .ifc "\clob0", "r9" | ||
203 | ____MOV_TO_KR \kr, \reg, \clob1, \clob0 | ||
204 | .exitm | ||
205 | .endif | ||
206 | .ifc "\clob1", "r8" | ||
207 | ____MOV_TO_KR \kr, \reg, \clob1, \clob0 | ||
208 | .exitm | ||
209 | .endif | ||
210 | |||
211 | ____MOV_TO_KR \kr, \reg, \clob0, \clob1 | ||
212 | .endm | ||
213 | |||
214 | #define MOV_TO_KR(kr, reg, clob0, clob1) \ | ||
215 | __MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1 | ||
216 | |||
217 | |||
218 | .macro __ITC_I pred, reg, clob | ||
219 | .ifc "\reg", "r8" | ||
220 | (\pred) XEN_HYPER_ITC_I | ||
221 | .exitm | ||
222 | .endif | ||
223 | .ifc "\clob", "r8" | ||
224 | (\pred) mov r8 = \reg | ||
225 | ;; | ||
226 | (\pred) XEN_HYPER_ITC_I | ||
227 | .exitm | ||
228 | .endif | ||
229 | |||
230 | (\pred) mov \clob = r8 | ||
231 | (\pred) mov r8 = \reg | ||
232 | ;; | ||
233 | (\pred) XEN_HYPER_ITC_I | ||
234 | ;; | ||
235 | (\pred) mov r8 = \clob | ||
236 | ;; | ||
237 | .endm | ||
238 | #define ITC_I(pred, reg, clob) __ITC_I pred, reg, clob | ||
239 | |||
240 | .macro __ITC_D pred, reg, clob | ||
241 | .ifc "\reg", "r8" | ||
242 | (\pred) XEN_HYPER_ITC_D | ||
243 | ;; | ||
244 | .exitm | ||
245 | .endif | ||
246 | .ifc "\clob", "r8" | ||
247 | (\pred) mov r8 = \reg | ||
248 | ;; | ||
249 | (\pred) XEN_HYPER_ITC_D | ||
250 | ;; | ||
251 | .exitm | ||
252 | .endif | ||
253 | |||
254 | (\pred) mov \clob = r8 | ||
255 | (\pred) mov r8 = \reg | ||
256 | ;; | ||
257 | (\pred) XEN_HYPER_ITC_D | ||
258 | ;; | ||
259 | (\pred) mov r8 = \clob | ||
260 | ;; | ||
261 | .endm | ||
262 | #define ITC_D(pred, reg, clob) __ITC_D pred, reg, clob | ||
263 | |||
264 | .macro __ITC_I_AND_D pred_i, pred_d, reg, clob | ||
265 | .ifc "\reg", "r8" | ||
266 | (\pred_i)XEN_HYPER_ITC_I | ||
267 | ;; | ||
268 | (\pred_d)XEN_HYPER_ITC_D | ||
269 | ;; | ||
270 | .exitm | ||
271 | .endif | ||
272 | .ifc "\clob", "r8" | ||
273 | mov r8 = \reg | ||
274 | ;; | ||
275 | (\pred_i)XEN_HYPER_ITC_I | ||
276 | ;; | ||
277 | (\pred_d)XEN_HYPER_ITC_D | ||
278 | ;; | ||
279 | .exitm | ||
280 | .endif | ||
281 | |||
282 | mov \clob = r8 | ||
283 | mov r8 = \reg | ||
284 | ;; | ||
285 | (\pred_i)XEN_HYPER_ITC_I | ||
286 | ;; | ||
287 | (\pred_d)XEN_HYPER_ITC_D | ||
288 | ;; | ||
289 | mov r8 = \clob | ||
290 | ;; | ||
291 | .endm | ||
292 | #define ITC_I_AND_D(pred_i, pred_d, reg, clob) \ | ||
293 | __ITC_I_AND_D pred_i, pred_d, reg, clob | ||
294 | |||
295 | .macro __THASH pred, reg0, reg1, clob | ||
296 | .ifc "\reg0", "r8" | ||
297 | (\pred) mov r8 = \reg1 | ||
298 | (\pred) XEN_HYPER_THASH | ||
299 | .exitm | ||
300 | .endc | ||
301 | .ifc "\reg1", "r8" | ||
302 | (\pred) XEN_HYPER_THASH | ||
303 | ;; | ||
304 | (\pred) mov \reg0 = r8 | ||
305 | ;; | ||
306 | .exitm | ||
307 | .endif | ||
308 | .ifc "\clob", "r8" | ||
309 | (\pred) mov r8 = \reg1 | ||
310 | (\pred) XEN_HYPER_THASH | ||
311 | ;; | ||
312 | (\pred) mov \reg0 = r8 | ||
313 | ;; | ||
314 | .exitm | ||
315 | .endif | ||
316 | |||
317 | (\pred) mov \clob = r8 | ||
318 | (\pred) mov r8 = \reg1 | ||
319 | (\pred) XEN_HYPER_THASH | ||
320 | ;; | ||
321 | (\pred) mov \reg0 = r8 | ||
322 | (\pred) mov r8 = \clob | ||
323 | ;; | ||
324 | .endm | ||
325 | #define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob | ||
326 | |||
327 | #define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \ | ||
328 | mov clob0 = 1; \ | ||
329 | movl clob1 = XSI_PSR_IC; \ | ||
330 | ;; \ | ||
331 | st4 [clob1] = clob0 \ | ||
332 | ;; | ||
333 | |||
334 | #define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \ | ||
335 | ;; \ | ||
336 | srlz.d; \ | ||
337 | mov clob1 = 1; \ | ||
338 | movl clob0 = XSI_PSR_IC; \ | ||
339 | ;; \ | ||
340 | st4 [clob0] = clob1 | ||
341 | |||
342 | #define RSM_PSR_IC(clob) \ | ||
343 | movl clob = XSI_PSR_IC; \ | ||
344 | ;; \ | ||
345 | st4 [clob] = r0; \ | ||
346 | ;; | ||
347 | |||
348 | /* pred will be clobbered */ | ||
349 | #define MASK_TO_PEND_OFS (-1) | ||
350 | #define SSM_PSR_I(pred, pred_clob, clob) \ | ||
351 | (pred) movl clob = XSI_PSR_I_ADDR \ | ||
352 | ;; \ | ||
353 | (pred) ld8 clob = [clob] \ | ||
354 | ;; \ | ||
355 | /* if (pred) vpsr.i = 1 */ \ | ||
356 | /* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */ \ | ||
357 | (pred) st1 [clob] = r0, MASK_TO_PEND_OFS \ | ||
358 | ;; \ | ||
359 | /* if (vcpu->vcpu_info->evtchn_upcall_pending) */ \ | ||
360 | (pred) ld1 clob = [clob] \ | ||
361 | ;; \ | ||
362 | (pred) cmp.ne.unc pred_clob, p0 = clob, r0 \ | ||
363 | ;; \ | ||
364 | (pred_clob)XEN_HYPER_SSM_I /* do areal ssm psr.i */ | ||
365 | |||
366 | #define RSM_PSR_I(pred, clob0, clob1) \ | ||
367 | movl clob0 = XSI_PSR_I_ADDR; \ | ||
368 | mov clob1 = 1; \ | ||
369 | ;; \ | ||
370 | ld8 clob0 = [clob0]; \ | ||
371 | ;; \ | ||
372 | (pred) st1 [clob0] = clob1 | ||
373 | |||
374 | #define RSM_PSR_I_IC(clob0, clob1, clob2) \ | ||
375 | movl clob0 = XSI_PSR_I_ADDR; \ | ||
376 | movl clob1 = XSI_PSR_IC; \ | ||
377 | ;; \ | ||
378 | ld8 clob0 = [clob0]; \ | ||
379 | mov clob2 = 1; \ | ||
380 | ;; \ | ||
381 | /* note: clears both vpsr.i and vpsr.ic! */ \ | ||
382 | st1 [clob0] = clob2; \ | ||
383 | st4 [clob1] = r0; \ | ||
384 | ;; | ||
385 | |||
386 | #define RSM_PSR_DT \ | ||
387 | XEN_HYPER_RSM_PSR_DT | ||
388 | |||
389 | #define RSM_PSR_BE_I(clob0, clob1) \ | ||
390 | RSM_PSR_I(p0, clob0, clob1); \ | ||
391 | rum psr.be | ||
392 | |||
393 | #define SSM_PSR_DT_AND_SRLZ_I \ | ||
394 | XEN_HYPER_SSM_PSR_DT | ||
395 | |||
396 | #define BSW_0(clob0, clob1, clob2) \ | ||
397 | ;; \ | ||
398 | /* r16-r31 all now hold bank1 values */ \ | ||
399 | mov clob2 = ar.unat; \ | ||
400 | movl clob0 = XSI_BANK1_R16; \ | ||
401 | movl clob1 = XSI_BANK1_R16 + 8; \ | ||
402 | ;; \ | ||
403 | .mem.offset 0, 0; st8.spill [clob0] = r16, 16; \ | ||
404 | .mem.offset 8, 0; st8.spill [clob1] = r17, 16; \ | ||
405 | ;; \ | ||
406 | .mem.offset 0, 0; st8.spill [clob0] = r18, 16; \ | ||
407 | .mem.offset 8, 0; st8.spill [clob1] = r19, 16; \ | ||
408 | ;; \ | ||
409 | .mem.offset 0, 0; st8.spill [clob0] = r20, 16; \ | ||
410 | .mem.offset 8, 0; st8.spill [clob1] = r21, 16; \ | ||
411 | ;; \ | ||
412 | .mem.offset 0, 0; st8.spill [clob0] = r22, 16; \ | ||
413 | .mem.offset 8, 0; st8.spill [clob1] = r23, 16; \ | ||
414 | ;; \ | ||
415 | .mem.offset 0, 0; st8.spill [clob0] = r24, 16; \ | ||
416 | .mem.offset 8, 0; st8.spill [clob1] = r25, 16; \ | ||
417 | ;; \ | ||
418 | .mem.offset 0, 0; st8.spill [clob0] = r26, 16; \ | ||
419 | .mem.offset 8, 0; st8.spill [clob1] = r27, 16; \ | ||
420 | ;; \ | ||
421 | .mem.offset 0, 0; st8.spill [clob0] = r28, 16; \ | ||
422 | .mem.offset 8, 0; st8.spill [clob1] = r29, 16; \ | ||
423 | ;; \ | ||
424 | .mem.offset 0, 0; st8.spill [clob0] = r30, 16; \ | ||
425 | .mem.offset 8, 0; st8.spill [clob1] = r31, 16; \ | ||
426 | ;; \ | ||
427 | mov clob1 = ar.unat; \ | ||
428 | movl clob0 = XSI_B1NAT; \ | ||
429 | ;; \ | ||
430 | st8 [clob0] = clob1; \ | ||
431 | mov ar.unat = clob2; \ | ||
432 | movl clob0 = XSI_BANKNUM; \ | ||
433 | ;; \ | ||
434 | st4 [clob0] = r0 | ||
435 | |||
436 | |||
437 | /* FIXME: THIS CODE IS NOT NaT SAFE! */ | ||
438 | #define XEN_BSW_1(clob) \ | ||
439 | mov clob = ar.unat; \ | ||
440 | movl r30 = XSI_B1NAT; \ | ||
441 | ;; \ | ||
442 | ld8 r30 = [r30]; \ | ||
443 | mov r31 = 1; \ | ||
444 | ;; \ | ||
445 | mov ar.unat = r30; \ | ||
446 | movl r30 = XSI_BANKNUM; \ | ||
447 | ;; \ | ||
448 | st4 [r30] = r31; \ | ||
449 | movl r30 = XSI_BANK1_R16; \ | ||
450 | movl r31 = XSI_BANK1_R16+8; \ | ||
451 | ;; \ | ||
452 | ld8.fill r16 = [r30], 16; \ | ||
453 | ld8.fill r17 = [r31], 16; \ | ||
454 | ;; \ | ||
455 | ld8.fill r18 = [r30], 16; \ | ||
456 | ld8.fill r19 = [r31], 16; \ | ||
457 | ;; \ | ||
458 | ld8.fill r20 = [r30], 16; \ | ||
459 | ld8.fill r21 = [r31], 16; \ | ||
460 | ;; \ | ||
461 | ld8.fill r22 = [r30], 16; \ | ||
462 | ld8.fill r23 = [r31], 16; \ | ||
463 | ;; \ | ||
464 | ld8.fill r24 = [r30], 16; \ | ||
465 | ld8.fill r25 = [r31], 16; \ | ||
466 | ;; \ | ||
467 | ld8.fill r26 = [r30], 16; \ | ||
468 | ld8.fill r27 = [r31], 16; \ | ||
469 | ;; \ | ||
470 | ld8.fill r28 = [r30], 16; \ | ||
471 | ld8.fill r29 = [r31], 16; \ | ||
472 | ;; \ | ||
473 | ld8.fill r30 = [r30]; \ | ||
474 | ld8.fill r31 = [r31]; \ | ||
475 | ;; \ | ||
476 | mov ar.unat = clob | ||
477 | |||
478 | #define BSW_1(clob0, clob1) XEN_BSW_1(clob1) | ||
479 | |||
480 | |||
481 | #define COVER \ | ||
482 | XEN_HYPER_COVER | ||
483 | |||
484 | #define RFI \ | ||
485 | XEN_HYPER_RFI; \ | ||
486 | dv_serialize_data | ||
diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h deleted file mode 100644 index e88c5de27410..000000000000 --- a/arch/ia64/include/asm/xen/interface.h +++ /dev/null | |||
@@ -1,363 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch-ia64/hypervisor-if.h | ||
3 | * | ||
4 | * Guest OS interface to IA64 Xen. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
7 | * of this software and associated documentation files (the "Software"), to | ||
8 | * deal in the Software without restriction, including without limitation the | ||
9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or | ||
10 | * sell copies of the Software, and to permit persons to whom the Software is | ||
11 | * furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Copyright by those who contributed. (in alphabetical order) | ||
25 | * | ||
26 | * Anthony Xu <anthony.xu@intel.com> | ||
27 | * Eddie Dong <eddie.dong@intel.com> | ||
28 | * Fred Yang <fred.yang@intel.com> | ||
29 | * Kevin Tian <kevin.tian@intel.com> | ||
30 | * Alex Williamson <alex.williamson@hp.com> | ||
31 | * Chris Wright <chrisw@sous-sol.org> | ||
32 | * Christian Limpach <Christian.Limpach@cl.cam.ac.uk> | ||
33 | * Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com> | ||
34 | * Hollis Blanchard <hollisb@us.ibm.com> | ||
35 | * Isaku Yamahata <yamahata@valinux.co.jp> | ||
36 | * Jan Beulich <jbeulich@novell.com> | ||
37 | * John Levon <john.levon@sun.com> | ||
38 | * Kazuhiro Suzuki <kaz@jp.fujitsu.com> | ||
39 | * Keir Fraser <keir.fraser@citrix.com> | ||
40 | * Kouya Shimura <kouya@jp.fujitsu.com> | ||
41 | * Masaki Kanno <kanno.masaki@jp.fujitsu.com> | ||
42 | * Matt Chapman <matthewc@hp.com> | ||
43 | * Matthew Chapman <matthewc@hp.com> | ||
44 | * Samuel Thibault <samuel.thibault@eu.citrix.com> | ||
45 | * Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com> | ||
46 | * Tristan Gingold <tgingold@free.fr> | ||
47 | * Tsunehisa Doi <Doi.Tsunehisa@jp.fujitsu.com> | ||
48 | * Yutaka Ezaki <yutaka.ezaki@jp.fujitsu.com> | ||
49 | * Zhang Xin <xing.z.zhang@intel.com> | ||
50 | * Zhang xiantao <xiantao.zhang@intel.com> | ||
51 | * dan.magenheimer@hp.com | ||
52 | * ian.pratt@cl.cam.ac.uk | ||
53 | * michael.fetterman@cl.cam.ac.uk | ||
54 | */ | ||
55 | |||
56 | #ifndef _ASM_IA64_XEN_INTERFACE_H | ||
57 | #define _ASM_IA64_XEN_INTERFACE_H | ||
58 | |||
59 | #define __DEFINE_GUEST_HANDLE(name, type) \ | ||
60 | typedef struct { type *p; } __guest_handle_ ## name | ||
61 | |||
62 | #define DEFINE_GUEST_HANDLE_STRUCT(name) \ | ||
63 | __DEFINE_GUEST_HANDLE(name, struct name) | ||
64 | #define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) | ||
65 | #define GUEST_HANDLE(name) __guest_handle_ ## name | ||
66 | #define GUEST_HANDLE_64(name) GUEST_HANDLE(name) | ||
67 | #define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0) | ||
68 | |||
69 | #ifndef __ASSEMBLY__ | ||
70 | /* Explicitly size integers that represent pfns in the public interface | ||
71 | * with Xen so that we could have one ABI that works for 32 and 64 bit | ||
72 | * guests. */ | ||
73 | typedef unsigned long xen_pfn_t; | ||
74 | typedef unsigned long xen_ulong_t; | ||
75 | /* Guest handles for primitive C types. */ | ||
76 | __DEFINE_GUEST_HANDLE(uchar, unsigned char); | ||
77 | __DEFINE_GUEST_HANDLE(uint, unsigned int); | ||
78 | __DEFINE_GUEST_HANDLE(ulong, unsigned long); | ||
79 | |||
80 | DEFINE_GUEST_HANDLE(char); | ||
81 | DEFINE_GUEST_HANDLE(int); | ||
82 | DEFINE_GUEST_HANDLE(long); | ||
83 | DEFINE_GUEST_HANDLE(void); | ||
84 | DEFINE_GUEST_HANDLE(uint64_t); | ||
85 | DEFINE_GUEST_HANDLE(uint32_t); | ||
86 | |||
87 | DEFINE_GUEST_HANDLE(xen_pfn_t); | ||
88 | #define PRI_xen_pfn "lx" | ||
89 | #endif | ||
90 | |||
91 | /* Arch specific VIRQs definition */ | ||
92 | #define VIRQ_ITC VIRQ_ARCH_0 /* V. Virtual itc timer */ | ||
93 | #define VIRQ_MCA_CMC VIRQ_ARCH_1 /* MCA cmc interrupt */ | ||
94 | #define VIRQ_MCA_CPE VIRQ_ARCH_2 /* MCA cpe interrupt */ | ||
95 | |||
96 | /* Maximum number of virtual CPUs in multi-processor guests. */ | ||
97 | /* keep sizeof(struct shared_page) <= PAGE_SIZE. | ||
98 | * this is checked in arch/ia64/xen/hypervisor.c. */ | ||
99 | #define MAX_VIRT_CPUS 64 | ||
100 | |||
101 | #ifndef __ASSEMBLY__ | ||
102 | |||
103 | #define INVALID_MFN (~0UL) | ||
104 | |||
105 | union vac { | ||
106 | unsigned long value; | ||
107 | struct { | ||
108 | int a_int:1; | ||
109 | int a_from_int_cr:1; | ||
110 | int a_to_int_cr:1; | ||
111 | int a_from_psr:1; | ||
112 | int a_from_cpuid:1; | ||
113 | int a_cover:1; | ||
114 | int a_bsw:1; | ||
115 | long reserved:57; | ||
116 | }; | ||
117 | }; | ||
118 | |||
119 | union vdc { | ||
120 | unsigned long value; | ||
121 | struct { | ||
122 | int d_vmsw:1; | ||
123 | int d_extint:1; | ||
124 | int d_ibr_dbr:1; | ||
125 | int d_pmc:1; | ||
126 | int d_to_pmd:1; | ||
127 | int d_itm:1; | ||
128 | long reserved:58; | ||
129 | }; | ||
130 | }; | ||
131 | |||
132 | struct mapped_regs { | ||
133 | union vac vac; | ||
134 | union vdc vdc; | ||
135 | unsigned long virt_env_vaddr; | ||
136 | unsigned long reserved1[29]; | ||
137 | unsigned long vhpi; | ||
138 | unsigned long reserved2[95]; | ||
139 | union { | ||
140 | unsigned long vgr[16]; | ||
141 | unsigned long bank1_regs[16]; /* bank1 regs (r16-r31) | ||
142 | when bank0 active */ | ||
143 | }; | ||
144 | union { | ||
145 | unsigned long vbgr[16]; | ||
146 | unsigned long bank0_regs[16]; /* bank0 regs (r16-r31) | ||
147 | when bank1 active */ | ||
148 | }; | ||
149 | unsigned long vnat; | ||
150 | unsigned long vbnat; | ||
151 | unsigned long vcpuid[5]; | ||
152 | unsigned long reserved3[11]; | ||
153 | unsigned long vpsr; | ||
154 | unsigned long vpr; | ||
155 | unsigned long reserved4[76]; | ||
156 | union { | ||
157 | unsigned long vcr[128]; | ||
158 | struct { | ||
159 | unsigned long dcr; /* CR0 */ | ||
160 | unsigned long itm; | ||
161 | unsigned long iva; | ||
162 | unsigned long rsv1[5]; | ||
163 | unsigned long pta; /* CR8 */ | ||
164 | unsigned long rsv2[7]; | ||
165 | unsigned long ipsr; /* CR16 */ | ||
166 | unsigned long isr; | ||
167 | unsigned long rsv3; | ||
168 | unsigned long iip; | ||
169 | unsigned long ifa; | ||
170 | unsigned long itir; | ||
171 | unsigned long iipa; | ||
172 | unsigned long ifs; | ||
173 | unsigned long iim; /* CR24 */ | ||
174 | unsigned long iha; | ||
175 | unsigned long rsv4[38]; | ||
176 | unsigned long lid; /* CR64 */ | ||
177 | unsigned long ivr; | ||
178 | unsigned long tpr; | ||
179 | unsigned long eoi; | ||
180 | unsigned long irr[4]; | ||
181 | unsigned long itv; /* CR72 */ | ||
182 | unsigned long pmv; | ||
183 | unsigned long cmcv; | ||
184 | unsigned long rsv5[5]; | ||
185 | unsigned long lrr0; /* CR80 */ | ||
186 | unsigned long lrr1; | ||
187 | unsigned long rsv6[46]; | ||
188 | }; | ||
189 | }; | ||
190 | union { | ||
191 | unsigned long reserved5[128]; | ||
192 | struct { | ||
193 | unsigned long precover_ifs; | ||
194 | unsigned long unat; /* not sure if this is needed | ||
195 | until NaT arch is done */ | ||
196 | int interrupt_collection_enabled; /* virtual psr.ic */ | ||
197 | |||
198 | /* virtual interrupt deliverable flag is | ||
199 | * evtchn_upcall_mask in shared info area now. | ||
200 | * interrupt_mask_addr is the address | ||
201 | * of evtchn_upcall_mask for current vcpu | ||
202 | */ | ||
203 | unsigned char *interrupt_mask_addr; | ||
204 | int pending_interruption; | ||
205 | unsigned char vpsr_pp; | ||
206 | unsigned char vpsr_dfh; | ||
207 | unsigned char hpsr_dfh; | ||
208 | unsigned char hpsr_mfh; | ||
209 | unsigned long reserved5_1[4]; | ||
210 | int metaphysical_mode; /* 1 = use metaphys mapping | ||
211 | 0 = use virtual */ | ||
212 | int banknum; /* 0 or 1, which virtual | ||
213 | register bank is active */ | ||
214 | unsigned long rrs[8]; /* region registers */ | ||
215 | unsigned long krs[8]; /* kernel registers */ | ||
216 | unsigned long tmp[16]; /* temp registers | ||
217 | (e.g. for hyperprivops) */ | ||
218 | |||
219 | /* itc paravirtualization | ||
220 | * vAR.ITC = mAR.ITC + itc_offset | ||
221 | * itc_last is one which was lastly passed to | ||
222 | * the guest OS in order to prevent it from | ||
223 | * going backwords. | ||
224 | */ | ||
225 | unsigned long itc_offset; | ||
226 | unsigned long itc_last; | ||
227 | }; | ||
228 | }; | ||
229 | }; | ||
230 | |||
231 | struct arch_vcpu_info { | ||
232 | /* nothing */ | ||
233 | }; | ||
234 | |||
235 | /* | ||
236 | * This structure is used for magic page in domain pseudo physical address | ||
237 | * space and the result of XENMEM_machine_memory_map. | ||
238 | * As the XENMEM_machine_memory_map result, | ||
239 | * xen_memory_map::nr_entries indicates the size in bytes | ||
240 | * including struct xen_ia64_memmap_info. Not the number of entries. | ||
241 | */ | ||
242 | struct xen_ia64_memmap_info { | ||
243 | uint64_t efi_memmap_size; /* size of EFI memory map */ | ||
244 | uint64_t efi_memdesc_size; /* size of an EFI memory map | ||
245 | * descriptor */ | ||
246 | uint32_t efi_memdesc_version; /* memory descriptor version */ | ||
247 | void *memdesc[0]; /* array of efi_memory_desc_t */ | ||
248 | }; | ||
249 | |||
250 | struct arch_shared_info { | ||
251 | /* PFN of the start_info page. */ | ||
252 | unsigned long start_info_pfn; | ||
253 | |||
254 | /* Interrupt vector for event channel. */ | ||
255 | int evtchn_vector; | ||
256 | |||
257 | /* PFN of memmap_info page */ | ||
258 | unsigned int memmap_info_num_pages; /* currently only = 1 case is | ||
259 | supported. */ | ||
260 | unsigned long memmap_info_pfn; | ||
261 | |||
262 | uint64_t pad[31]; | ||
263 | }; | ||
264 | |||
265 | struct xen_callback { | ||
266 | unsigned long ip; | ||
267 | }; | ||
268 | typedef struct xen_callback xen_callback_t; | ||
269 | |||
270 | #endif /* !__ASSEMBLY__ */ | ||
271 | |||
272 | #include <asm/pvclock-abi.h> | ||
273 | |||
274 | /* Size of the shared_info area (this is not related to page size). */ | ||
275 | #define XSI_SHIFT 14 | ||
276 | #define XSI_SIZE (1 << XSI_SHIFT) | ||
277 | /* Log size of mapped_regs area (64 KB - only 4KB is used). */ | ||
278 | #define XMAPPEDREGS_SHIFT 12 | ||
279 | #define XMAPPEDREGS_SIZE (1 << XMAPPEDREGS_SHIFT) | ||
280 | /* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */ | ||
281 | #define XMAPPEDREGS_OFS XSI_SIZE | ||
282 | |||
283 | /* Hyperprivops. */ | ||
284 | #define HYPERPRIVOP_START 0x1 | ||
285 | #define HYPERPRIVOP_RFI (HYPERPRIVOP_START + 0x0) | ||
286 | #define HYPERPRIVOP_RSM_DT (HYPERPRIVOP_START + 0x1) | ||
287 | #define HYPERPRIVOP_SSM_DT (HYPERPRIVOP_START + 0x2) | ||
288 | #define HYPERPRIVOP_COVER (HYPERPRIVOP_START + 0x3) | ||
289 | #define HYPERPRIVOP_ITC_D (HYPERPRIVOP_START + 0x4) | ||
290 | #define HYPERPRIVOP_ITC_I (HYPERPRIVOP_START + 0x5) | ||
291 | #define HYPERPRIVOP_SSM_I (HYPERPRIVOP_START + 0x6) | ||
292 | #define HYPERPRIVOP_GET_IVR (HYPERPRIVOP_START + 0x7) | ||
293 | #define HYPERPRIVOP_GET_TPR (HYPERPRIVOP_START + 0x8) | ||
294 | #define HYPERPRIVOP_SET_TPR (HYPERPRIVOP_START + 0x9) | ||
295 | #define HYPERPRIVOP_EOI (HYPERPRIVOP_START + 0xa) | ||
296 | #define HYPERPRIVOP_SET_ITM (HYPERPRIVOP_START + 0xb) | ||
297 | #define HYPERPRIVOP_THASH (HYPERPRIVOP_START + 0xc) | ||
298 | #define HYPERPRIVOP_PTC_GA (HYPERPRIVOP_START + 0xd) | ||
299 | #define HYPERPRIVOP_ITR_D (HYPERPRIVOP_START + 0xe) | ||
300 | #define HYPERPRIVOP_GET_RR (HYPERPRIVOP_START + 0xf) | ||
301 | #define HYPERPRIVOP_SET_RR (HYPERPRIVOP_START + 0x10) | ||
302 | #define HYPERPRIVOP_SET_KR (HYPERPRIVOP_START + 0x11) | ||
303 | #define HYPERPRIVOP_FC (HYPERPRIVOP_START + 0x12) | ||
304 | #define HYPERPRIVOP_GET_CPUID (HYPERPRIVOP_START + 0x13) | ||
305 | #define HYPERPRIVOP_GET_PMD (HYPERPRIVOP_START + 0x14) | ||
306 | #define HYPERPRIVOP_GET_EFLAG (HYPERPRIVOP_START + 0x15) | ||
307 | #define HYPERPRIVOP_SET_EFLAG (HYPERPRIVOP_START + 0x16) | ||
308 | #define HYPERPRIVOP_RSM_BE (HYPERPRIVOP_START + 0x17) | ||
309 | #define HYPERPRIVOP_GET_PSR (HYPERPRIVOP_START + 0x18) | ||
310 | #define HYPERPRIVOP_SET_RR0_TO_RR4 (HYPERPRIVOP_START + 0x19) | ||
311 | #define HYPERPRIVOP_MAX (0x1a) | ||
312 | |||
313 | /* Fast and light hypercalls. */ | ||
314 | #define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1 | ||
315 | |||
316 | /* Xencomm macros. */ | ||
317 | #define XENCOMM_INLINE_MASK 0xf800000000000000UL | ||
318 | #define XENCOMM_INLINE_FLAG 0x8000000000000000UL | ||
319 | |||
320 | #ifndef __ASSEMBLY__ | ||
321 | |||
322 | /* | ||
323 | * Optimization features. | ||
324 | * The hypervisor may do some special optimizations for guests. This hypercall | ||
325 | * can be used to switch on/of these special optimizations. | ||
326 | */ | ||
327 | #define __HYPERVISOR_opt_feature 0x700UL | ||
328 | |||
329 | #define XEN_IA64_OPTF_OFF 0x0 | ||
330 | #define XEN_IA64_OPTF_ON 0x1 | ||
331 | |||
332 | /* | ||
333 | * If this feature is switched on, the hypervisor inserts the | ||
334 | * tlb entries without calling the guests traphandler. | ||
335 | * This is useful in guests using region 7 for identity mapping | ||
336 | * like the linux kernel does. | ||
337 | */ | ||
338 | #define XEN_IA64_OPTF_IDENT_MAP_REG7 1 | ||
339 | |||
340 | /* Identity mapping of region 4 addresses in HVM. */ | ||
341 | #define XEN_IA64_OPTF_IDENT_MAP_REG4 2 | ||
342 | |||
343 | /* Identity mapping of region 5 addresses in HVM. */ | ||
344 | #define XEN_IA64_OPTF_IDENT_MAP_REG5 3 | ||
345 | |||
346 | #define XEN_IA64_OPTF_IDENT_MAP_NOT_SET (0) | ||
347 | |||
348 | struct xen_ia64_opt_feature { | ||
349 | unsigned long cmd; /* Which feature */ | ||
350 | unsigned char on; /* Switch feature on/off */ | ||
351 | union { | ||
352 | struct { | ||
353 | /* The page protection bit mask of the pte. | ||
354 | * This will be or'ed with the pte. */ | ||
355 | unsigned long pgprot; | ||
356 | unsigned long key; /* A protection key for itir.*/ | ||
357 | }; | ||
358 | }; | ||
359 | }; | ||
360 | |||
361 | #endif /* __ASSEMBLY__ */ | ||
362 | |||
363 | #endif /* _ASM_IA64_XEN_INTERFACE_H */ | ||
diff --git a/arch/ia64/include/asm/xen/irq.h b/arch/ia64/include/asm/xen/irq.h deleted file mode 100644 index a90450983003..000000000000 --- a/arch/ia64/include/asm/xen/irq.h +++ /dev/null | |||
@@ -1,44 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/include/asm/xen/irq.h | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef _ASM_IA64_XEN_IRQ_H | ||
24 | #define _ASM_IA64_XEN_IRQ_H | ||
25 | |||
26 | /* | ||
27 | * The flat IRQ space is divided into two regions: | ||
28 | * 1. A one-to-one mapping of real physical IRQs. This space is only used | ||
29 | * if we have physical device-access privilege. This region is at the | ||
30 | * start of the IRQ space so that existing device drivers do not need | ||
31 | * to be modified to translate physical IRQ numbers into our IRQ space. | ||
32 | * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These | ||
33 | * are bound using the provided bind/unbind functions. | ||
34 | */ | ||
35 | |||
36 | #define XEN_PIRQ_BASE 0 | ||
37 | #define XEN_NR_PIRQS 256 | ||
38 | |||
39 | #define XEN_DYNIRQ_BASE (XEN_PIRQ_BASE + XEN_NR_PIRQS) | ||
40 | #define XEN_NR_DYNIRQS (NR_CPUS * 8) | ||
41 | |||
42 | #define XEN_NR_IRQS (XEN_NR_PIRQS + XEN_NR_DYNIRQS) | ||
43 | |||
44 | #endif /* _ASM_IA64_XEN_IRQ_H */ | ||
diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h deleted file mode 100644 index 00cf03e0cb82..000000000000 --- a/arch/ia64/include/asm/xen/minstate.h +++ /dev/null | |||
@@ -1,143 +0,0 @@ | |||
1 | |||
2 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | ||
3 | /* read ar.itc in advance, and use it before leaving bank 0 */ | ||
4 | #define XEN_ACCOUNT_GET_STAMP \ | ||
5 | MOV_FROM_ITC(pUStk, p6, r20, r2); | ||
6 | #else | ||
7 | #define XEN_ACCOUNT_GET_STAMP | ||
8 | #endif | ||
9 | |||
10 | /* | ||
11 | * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves | ||
12 | * the minimum state necessary that allows us to turn psr.ic back | ||
13 | * on. | ||
14 | * | ||
15 | * Assumed state upon entry: | ||
16 | * psr.ic: off | ||
17 | * r31: contains saved predicates (pr) | ||
18 | * | ||
19 | * Upon exit, the state is as follows: | ||
20 | * psr.ic: off | ||
21 | * r2 = points to &pt_regs.r16 | ||
22 | * r8 = contents of ar.ccv | ||
23 | * r9 = contents of ar.csd | ||
24 | * r10 = contents of ar.ssd | ||
25 | * r11 = FPSR_DEFAULT | ||
26 | * r12 = kernel sp (kernel virtual address) | ||
27 | * r13 = points to current task_struct (kernel virtual address) | ||
28 | * p15 = TRUE if psr.i is set in cr.ipsr | ||
29 | * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15: | ||
30 | * preserved | ||
31 | * CONFIG_XEN note: p6/p7 are not preserved | ||
32 | * | ||
33 | * Note that psr.ic is NOT turned on by this macro. This is so that | ||
34 | * we can pass interruption state as arguments to a handler. | ||
35 | */ | ||
36 | #define XEN_DO_SAVE_MIN(__COVER,SAVE_IFS,EXTRA,WORKAROUND) \ | ||
37 | mov r16=IA64_KR(CURRENT); /* M */ \ | ||
38 | mov r27=ar.rsc; /* M */ \ | ||
39 | mov r20=r1; /* A */ \ | ||
40 | mov r25=ar.unat; /* M */ \ | ||
41 | MOV_FROM_IPSR(p0,r29); /* M */ \ | ||
42 | MOV_FROM_IIP(r28); /* M */ \ | ||
43 | mov r21=ar.fpsr; /* M */ \ | ||
44 | mov r26=ar.pfs; /* I */ \ | ||
45 | __COVER; /* B;; (or nothing) */ \ | ||
46 | adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \ | ||
47 | ;; \ | ||
48 | ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \ | ||
49 | st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \ | ||
50 | adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \ | ||
51 | /* switch from user to kernel RBS: */ \ | ||
52 | ;; \ | ||
53 | invala; /* M */ \ | ||
54 | /* SAVE_IFS;*/ /* see xen special handling below */ \ | ||
55 | cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \ | ||
56 | ;; \ | ||
57 | (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ | ||
58 | ;; \ | ||
59 | (pUStk) mov.m r24=ar.rnat; \ | ||
60 | (pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \ | ||
61 | (pKStk) mov r1=sp; /* get sp */ \ | ||
62 | ;; \ | ||
63 | (pUStk) lfetch.fault.excl.nt1 [r22]; \ | ||
64 | (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ | ||
65 | (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \ | ||
66 | ;; \ | ||
67 | (pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \ | ||
68 | (pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \ | ||
69 | ;; \ | ||
70 | (pUStk) mov r18=ar.bsp; \ | ||
71 | (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \ | ||
72 | adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \ | ||
73 | adds r16=PT(CR_IPSR),r1; \ | ||
74 | ;; \ | ||
75 | lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \ | ||
76 | st8 [r16]=r29; /* save cr.ipsr */ \ | ||
77 | ;; \ | ||
78 | lfetch.fault.excl.nt1 [r17]; \ | ||
79 | tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \ | ||
80 | mov r29=b0 \ | ||
81 | ;; \ | ||
82 | WORKAROUND; \ | ||
83 | adds r16=PT(R8),r1; /* initialize first base pointer */ \ | ||
84 | adds r17=PT(R9),r1; /* initialize second base pointer */ \ | ||
85 | (pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \ | ||
86 | ;; \ | ||
87 | .mem.offset 0,0; st8.spill [r16]=r8,16; \ | ||
88 | .mem.offset 8,0; st8.spill [r17]=r9,16; \ | ||
89 | ;; \ | ||
90 | .mem.offset 0,0; st8.spill [r16]=r10,24; \ | ||
91 | movl r8=XSI_PRECOVER_IFS; \ | ||
92 | .mem.offset 8,0; st8.spill [r17]=r11,24; \ | ||
93 | ;; \ | ||
94 | /* xen special handling for possibly lazy cover */ \ | ||
95 | /* SAVE_MIN case in dispatch_ia32_handler: mov r30=r0 */ \ | ||
96 | ld8 r30=[r8]; \ | ||
97 | (pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \ | ||
98 | st8 [r16]=r28,16; /* save cr.iip */ \ | ||
99 | ;; \ | ||
100 | st8 [r17]=r30,16; /* save cr.ifs */ \ | ||
101 | mov r8=ar.ccv; \ | ||
102 | mov r9=ar.csd; \ | ||
103 | mov r10=ar.ssd; \ | ||
104 | movl r11=FPSR_DEFAULT; /* L-unit */ \ | ||
105 | ;; \ | ||
106 | st8 [r16]=r25,16; /* save ar.unat */ \ | ||
107 | st8 [r17]=r26,16; /* save ar.pfs */ \ | ||
108 | shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \ | ||
109 | ;; \ | ||
110 | st8 [r16]=r27,16; /* save ar.rsc */ \ | ||
111 | (pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \ | ||
112 | (pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \ | ||
113 | ;; /* avoid RAW on r16 & r17 */ \ | ||
114 | (pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \ | ||
115 | st8 [r17]=r31,16; /* save predicates */ \ | ||
116 | (pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \ | ||
117 | ;; \ | ||
118 | st8 [r16]=r29,16; /* save b0 */ \ | ||
119 | st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \ | ||
120 | cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \ | ||
121 | ;; \ | ||
122 | .mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \ | ||
123 | .mem.offset 8,0; st8.spill [r17]=r12,16; \ | ||
124 | adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \ | ||
125 | ;; \ | ||
126 | .mem.offset 0,0; st8.spill [r16]=r13,16; \ | ||
127 | .mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \ | ||
128 | mov r13=IA64_KR(CURRENT); /* establish `current' */ \ | ||
129 | ;; \ | ||
130 | .mem.offset 0,0; st8.spill [r16]=r15,16; \ | ||
131 | .mem.offset 8,0; st8.spill [r17]=r14,16; \ | ||
132 | ;; \ | ||
133 | .mem.offset 0,0; st8.spill [r16]=r2,16; \ | ||
134 | .mem.offset 8,0; st8.spill [r17]=r3,16; \ | ||
135 | XEN_ACCOUNT_GET_STAMP \ | ||
136 | adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ | ||
137 | ;; \ | ||
138 | EXTRA; \ | ||
139 | movl r1=__gp; /* establish kernel global pointer */ \ | ||
140 | ;; \ | ||
141 | ACCOUNT_SYS_ENTER \ | ||
142 | BSW_1(r3,r14); /* switch back to bank 1 (must be last in insn group) */ \ | ||
143 | ;; | ||
diff --git a/arch/ia64/include/asm/xen/page-coherent.h b/arch/ia64/include/asm/xen/page-coherent.h deleted file mode 100644 index 96e42f97fa1f..000000000000 --- a/arch/ia64/include/asm/xen/page-coherent.h +++ /dev/null | |||
@@ -1,38 +0,0 @@ | |||
1 | #ifndef _ASM_IA64_XEN_PAGE_COHERENT_H | ||
2 | #define _ASM_IA64_XEN_PAGE_COHERENT_H | ||
3 | |||
4 | #include <asm/page.h> | ||
5 | #include <linux/dma-attrs.h> | ||
6 | #include <linux/dma-mapping.h> | ||
7 | |||
8 | static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, | ||
9 | dma_addr_t *dma_handle, gfp_t flags, | ||
10 | struct dma_attrs *attrs) | ||
11 | { | ||
12 | void *vstart = (void*)__get_free_pages(flags, get_order(size)); | ||
13 | *dma_handle = virt_to_phys(vstart); | ||
14 | return vstart; | ||
15 | } | ||
16 | |||
17 | static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, | ||
18 | void *cpu_addr, dma_addr_t dma_handle, | ||
19 | struct dma_attrs *attrs) | ||
20 | { | ||
21 | free_pages((unsigned long) cpu_addr, get_order(size)); | ||
22 | } | ||
23 | |||
24 | static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | ||
25 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
26 | struct dma_attrs *attrs) { } | ||
27 | |||
28 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | ||
29 | size_t size, enum dma_data_direction dir, | ||
30 | struct dma_attrs *attrs) { } | ||
31 | |||
32 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, | ||
33 | dma_addr_t handle, size_t size, enum dma_data_direction dir) { } | ||
34 | |||
35 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, | ||
36 | dma_addr_t handle, size_t size, enum dma_data_direction dir) { } | ||
37 | |||
38 | #endif /* _ASM_IA64_XEN_PAGE_COHERENT_H */ | ||
diff --git a/arch/ia64/include/asm/xen/page.h b/arch/ia64/include/asm/xen/page.h deleted file mode 100644 index 03441a780b5b..000000000000 --- a/arch/ia64/include/asm/xen/page.h +++ /dev/null | |||
@@ -1,65 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/include/asm/xen/page.h | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef _ASM_IA64_XEN_PAGE_H | ||
24 | #define _ASM_IA64_XEN_PAGE_H | ||
25 | |||
26 | #define INVALID_P2M_ENTRY (~0UL) | ||
27 | |||
28 | static inline unsigned long mfn_to_pfn(unsigned long mfn) | ||
29 | { | ||
30 | return mfn; | ||
31 | } | ||
32 | |||
33 | static inline unsigned long pfn_to_mfn(unsigned long pfn) | ||
34 | { | ||
35 | return pfn; | ||
36 | } | ||
37 | |||
38 | #define phys_to_machine_mapping_valid(_x) (1) | ||
39 | |||
40 | static inline void *mfn_to_virt(unsigned long mfn) | ||
41 | { | ||
42 | return __va(mfn << PAGE_SHIFT); | ||
43 | } | ||
44 | |||
45 | static inline unsigned long virt_to_mfn(void *virt) | ||
46 | { | ||
47 | return __pa(virt) >> PAGE_SHIFT; | ||
48 | } | ||
49 | |||
50 | /* for tpmfront.c */ | ||
51 | static inline unsigned long virt_to_machine(void *virt) | ||
52 | { | ||
53 | return __pa(virt); | ||
54 | } | ||
55 | |||
56 | static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) | ||
57 | { | ||
58 | /* nothing */ | ||
59 | } | ||
60 | |||
61 | #define pte_mfn(_x) pte_pfn(_x) | ||
62 | #define mfn_pte(_x, _y) __pte_ma(0) /* unmodified use */ | ||
63 | #define __pte_ma(_x) ((pte_t) {(_x)}) /* unmodified use */ | ||
64 | |||
65 | #endif /* _ASM_IA64_XEN_PAGE_H */ | ||
diff --git a/arch/ia64/include/asm/xen/patchlist.h b/arch/ia64/include/asm/xen/patchlist.h deleted file mode 100644 index eae944e88846..000000000000 --- a/arch/ia64/include/asm/xen/patchlist.h +++ /dev/null | |||
@@ -1,38 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/include/asm/xen/patchlist.h | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #define __paravirt_start_gate_fsyscall_patchlist \ | ||
24 | __xen_start_gate_fsyscall_patchlist | ||
25 | #define __paravirt_end_gate_fsyscall_patchlist \ | ||
26 | __xen_end_gate_fsyscall_patchlist | ||
27 | #define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \ | ||
28 | __xen_start_gate_brl_fsys_bubble_down_patchlist | ||
29 | #define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \ | ||
30 | __xen_end_gate_brl_fsys_bubble_down_patchlist | ||
31 | #define __paravirt_start_gate_vtop_patchlist \ | ||
32 | __xen_start_gate_vtop_patchlist | ||
33 | #define __paravirt_end_gate_vtop_patchlist \ | ||
34 | __xen_end_gate_vtop_patchlist | ||
35 | #define __paravirt_start_gate_mckinley_e9_patchlist \ | ||
36 | __xen_start_gate_mckinley_e9_patchlist | ||
37 | #define __paravirt_end_gate_mckinley_e9_patchlist \ | ||
38 | __xen_end_gate_mckinley_e9_patchlist | ||
diff --git a/arch/ia64/include/asm/xen/privop.h b/arch/ia64/include/asm/xen/privop.h deleted file mode 100644 index fb4ec5e0b066..000000000000 --- a/arch/ia64/include/asm/xen/privop.h +++ /dev/null | |||
@@ -1,135 +0,0 @@ | |||
1 | #ifndef _ASM_IA64_XEN_PRIVOP_H | ||
2 | #define _ASM_IA64_XEN_PRIVOP_H | ||
3 | |||
4 | /* | ||
5 | * Copyright (C) 2005 Hewlett-Packard Co | ||
6 | * Dan Magenheimer <dan.magenheimer@hp.com> | ||
7 | * | ||
8 | * Paravirtualizations of privileged operations for Xen/ia64 | ||
9 | * | ||
10 | * | ||
11 | * inline privop and paravirt_alt support | ||
12 | * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp> | ||
13 | * VA Linux Systems Japan K.K. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #ifndef __ASSEMBLY__ | ||
18 | #include <linux/types.h> /* arch-ia64.h requires uint64_t */ | ||
19 | #endif | ||
20 | #include <asm/xen/interface.h> | ||
21 | |||
22 | /* At 1 MB, before per-cpu space but still addressable using addl instead | ||
23 | of movl. */ | ||
24 | #define XSI_BASE 0xfffffffffff00000 | ||
25 | |||
26 | /* Address of mapped regs. */ | ||
27 | #define XMAPPEDREGS_BASE (XSI_BASE + XSI_SIZE) | ||
28 | |||
29 | #ifdef __ASSEMBLY__ | ||
30 | #define XEN_HYPER_RFI break HYPERPRIVOP_RFI | ||
31 | #define XEN_HYPER_RSM_PSR_DT break HYPERPRIVOP_RSM_DT | ||
32 | #define XEN_HYPER_SSM_PSR_DT break HYPERPRIVOP_SSM_DT | ||
33 | #define XEN_HYPER_COVER break HYPERPRIVOP_COVER | ||
34 | #define XEN_HYPER_ITC_D break HYPERPRIVOP_ITC_D | ||
35 | #define XEN_HYPER_ITC_I break HYPERPRIVOP_ITC_I | ||
36 | #define XEN_HYPER_SSM_I break HYPERPRIVOP_SSM_I | ||
37 | #define XEN_HYPER_GET_IVR break HYPERPRIVOP_GET_IVR | ||
38 | #define XEN_HYPER_THASH break HYPERPRIVOP_THASH | ||
39 | #define XEN_HYPER_ITR_D break HYPERPRIVOP_ITR_D | ||
40 | #define XEN_HYPER_SET_KR break HYPERPRIVOP_SET_KR | ||
41 | #define XEN_HYPER_GET_PSR break HYPERPRIVOP_GET_PSR | ||
42 | #define XEN_HYPER_SET_RR0_TO_RR4 break HYPERPRIVOP_SET_RR0_TO_RR4 | ||
43 | |||
44 | #define XSI_IFS (XSI_BASE + XSI_IFS_OFS) | ||
45 | #define XSI_PRECOVER_IFS (XSI_BASE + XSI_PRECOVER_IFS_OFS) | ||
46 | #define XSI_IFA (XSI_BASE + XSI_IFA_OFS) | ||
47 | #define XSI_ISR (XSI_BASE + XSI_ISR_OFS) | ||
48 | #define XSI_IIM (XSI_BASE + XSI_IIM_OFS) | ||
49 | #define XSI_ITIR (XSI_BASE + XSI_ITIR_OFS) | ||
50 | #define XSI_PSR_I_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS) | ||
51 | #define XSI_PSR_IC (XSI_BASE + XSI_PSR_IC_OFS) | ||
52 | #define XSI_IPSR (XSI_BASE + XSI_IPSR_OFS) | ||
53 | #define XSI_IIP (XSI_BASE + XSI_IIP_OFS) | ||
54 | #define XSI_B1NAT (XSI_BASE + XSI_B1NATS_OFS) | ||
55 | #define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS) | ||
56 | #define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS) | ||
57 | #define XSI_IHA (XSI_BASE + XSI_IHA_OFS) | ||
58 | #define XSI_ITC_OFFSET (XSI_BASE + XSI_ITC_OFFSET_OFS) | ||
59 | #define XSI_ITC_LAST (XSI_BASE + XSI_ITC_LAST_OFS) | ||
60 | #endif | ||
61 | |||
62 | #ifndef __ASSEMBLY__ | ||
63 | |||
64 | /************************************************/ | ||
65 | /* Instructions paravirtualized for correctness */ | ||
66 | /************************************************/ | ||
67 | |||
68 | /* "fc" and "thash" are privilege-sensitive instructions, meaning they | ||
69 | * may have different semantics depending on whether they are executed | ||
70 | * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't | ||
71 | * be allowed to execute directly, lest incorrect semantics result. */ | ||
72 | extern void xen_fc(void *addr); | ||
73 | extern unsigned long xen_thash(unsigned long addr); | ||
74 | |||
75 | /* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" | ||
76 | * is not currently used (though it may be in a long-format VHPT system!) | ||
77 | * and the semantics of cover only change if psr.ic is off which is very | ||
78 | * rare (and currently non-existent outside of assembly code */ | ||
79 | |||
80 | /* There are also privilege-sensitive registers. These registers are | ||
81 | * readable at any privilege level but only writable at PL0. */ | ||
82 | extern unsigned long xen_get_cpuid(int index); | ||
83 | extern unsigned long xen_get_pmd(int index); | ||
84 | |||
85 | #ifndef ASM_SUPPORTED | ||
86 | extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */ | ||
87 | extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ | ||
88 | #endif | ||
89 | |||
90 | /************************************************/ | ||
91 | /* Instructions paravirtualized for performance */ | ||
92 | /************************************************/ | ||
93 | |||
94 | /* Xen uses memory-mapped virtual privileged registers for access to many | ||
95 | * performance-sensitive privileged registers. Some, like the processor | ||
96 | * status register (psr), are broken up into multiple memory locations. | ||
97 | * Others, like "pend", are abstractions based on privileged registers. | ||
98 | * "Pend" is guaranteed to be set if reading cr.ivr would return a | ||
99 | * (non-spurious) interrupt. */ | ||
100 | #define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE) | ||
101 | |||
102 | #define XSI_PSR_I \ | ||
103 | (*XEN_MAPPEDREGS->interrupt_mask_addr) | ||
104 | #define xen_get_virtual_psr_i() \ | ||
105 | (!XSI_PSR_I) | ||
106 | #define xen_set_virtual_psr_i(_val) \ | ||
107 | ({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; }) | ||
108 | #define xen_set_virtual_psr_ic(_val) \ | ||
109 | ({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; }) | ||
110 | #define xen_get_virtual_pend() \ | ||
111 | (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1)) | ||
112 | |||
113 | #ifndef ASM_SUPPORTED | ||
114 | /* Although all privileged operations can be left to trap and will | ||
115 | * be properly handled by Xen, some are frequent enough that we use | ||
116 | * hyperprivops for performance. */ | ||
117 | extern unsigned long xen_get_psr(void); | ||
118 | extern unsigned long xen_get_ivr(void); | ||
119 | extern unsigned long xen_get_tpr(void); | ||
120 | extern void xen_hyper_ssm_i(void); | ||
121 | extern void xen_set_itm(unsigned long); | ||
122 | extern void xen_set_tpr(unsigned long); | ||
123 | extern void xen_eoi(unsigned long); | ||
124 | extern unsigned long xen_get_rr(unsigned long index); | ||
125 | extern void xen_set_rr(unsigned long index, unsigned long val); | ||
126 | extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, | ||
127 | unsigned long val2, unsigned long val3, | ||
128 | unsigned long val4); | ||
129 | extern void xen_set_kr(unsigned long index, unsigned long val); | ||
130 | extern void xen_ptcga(unsigned long addr, unsigned long size); | ||
131 | #endif /* !ASM_SUPPORTED */ | ||
132 | |||
133 | #endif /* !__ASSEMBLY__ */ | ||
134 | |||
135 | #endif /* _ASM_IA64_XEN_PRIVOP_H */ | ||
diff --git a/arch/ia64/include/asm/xen/xcom_hcall.h b/arch/ia64/include/asm/xen/xcom_hcall.h deleted file mode 100644 index 20b2950c71b6..000000000000 --- a/arch/ia64/include/asm/xen/xcom_hcall.h +++ /dev/null | |||
@@ -1,51 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Tristan Gingold <tristan.gingold@bull.net>, Bull SAS | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | |||
19 | #ifndef _ASM_IA64_XEN_XCOM_HCALL_H | ||
20 | #define _ASM_IA64_XEN_XCOM_HCALL_H | ||
21 | |||
22 | /* These function creates inline or mini descriptor for the parameters and | ||
23 | calls the corresponding xencomm_arch_hypercall_X. | ||
24 | Architectures should defines HYPERVISOR_xxx as xencomm_hypercall_xxx unless | ||
25 | they want to use their own wrapper. */ | ||
26 | extern int xencomm_hypercall_console_io(int cmd, int count, char *str); | ||
27 | |||
28 | extern int xencomm_hypercall_event_channel_op(int cmd, void *op); | ||
29 | |||
30 | extern int xencomm_hypercall_xen_version(int cmd, void *arg); | ||
31 | |||
32 | extern int xencomm_hypercall_physdev_op(int cmd, void *op); | ||
33 | |||
34 | extern int xencomm_hypercall_grant_table_op(unsigned int cmd, void *op, | ||
35 | unsigned int count); | ||
36 | |||
37 | extern int xencomm_hypercall_sched_op(int cmd, void *arg); | ||
38 | |||
39 | extern int xencomm_hypercall_multicall(void *call_list, int nr_calls); | ||
40 | |||
41 | extern int xencomm_hypercall_callback_op(int cmd, void *arg); | ||
42 | |||
43 | extern int xencomm_hypercall_memory_op(unsigned int cmd, void *arg); | ||
44 | |||
45 | extern int xencomm_hypercall_suspend(unsigned long srec); | ||
46 | |||
47 | extern long xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg); | ||
48 | |||
49 | extern long xencomm_hypercall_opt_feature(void *arg); | ||
50 | |||
51 | #endif /* _ASM_IA64_XEN_XCOM_HCALL_H */ | ||
diff --git a/arch/ia64/include/asm/xen/xencomm.h b/arch/ia64/include/asm/xen/xencomm.h deleted file mode 100644 index cded677bebf2..000000000000 --- a/arch/ia64/include/asm/xen/xencomm.h +++ /dev/null | |||
@@ -1,42 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | |||
19 | #ifndef _ASM_IA64_XEN_XENCOMM_H | ||
20 | #define _ASM_IA64_XEN_XENCOMM_H | ||
21 | |||
22 | #include <xen/xencomm.h> | ||
23 | #include <asm/pgtable.h> | ||
24 | |||
25 | /* Must be called before any hypercall. */ | ||
26 | extern void xencomm_initialize(void); | ||
27 | extern int xencomm_is_initialized(void); | ||
28 | |||
29 | /* Check if virtual contiguity means physical contiguity | ||
30 | * where the passed address is a pointer value in virtual address. | ||
31 | * On ia64, identity mapping area in region 7 or the piece of region 5 | ||
32 | * that is mapped by itr[IA64_TR_KERNEL]/dtr[IA64_TR_KERNEL] | ||
33 | */ | ||
34 | static inline int xencomm_is_phys_contiguous(unsigned long addr) | ||
35 | { | ||
36 | return (PAGE_OFFSET <= addr && | ||
37 | addr < (PAGE_OFFSET + (1UL << IA64_MAX_PHYS_BITS))) || | ||
38 | (KERNEL_START <= addr && | ||
39 | addr < KERNEL_START + KERNEL_TR_PAGE_SIZE); | ||
40 | } | ||
41 | |||
42 | #endif /* _ASM_IA64_XEN_XENCOMM_H */ | ||
diff --git a/arch/ia64/include/uapi/asm/break.h b/arch/ia64/include/uapi/asm/break.h index e90c40ec9edf..f03402039896 100644 --- a/arch/ia64/include/uapi/asm/break.h +++ b/arch/ia64/include/uapi/asm/break.h | |||
@@ -20,13 +20,4 @@ | |||
20 | */ | 20 | */ |
21 | #define __IA64_BREAK_SYSCALL 0x100000 | 21 | #define __IA64_BREAK_SYSCALL 0x100000 |
22 | 22 | ||
23 | /* | ||
24 | * Xen specific break numbers: | ||
25 | */ | ||
26 | #define __IA64_XEN_HYPERCALL 0x1000 | ||
27 | /* [__IA64_XEN_HYPERPRIVOP_START, __IA64_XEN_HYPERPRIVOP_MAX] is used | ||
28 | for xen hyperprivops */ | ||
29 | #define __IA64_XEN_HYPERPRIVOP_START 0x1 | ||
30 | #define __IA64_XEN_HYPERPRIVOP_MAX 0x1a | ||
31 | |||
32 | #endif /* _ASM_IA64_BREAK_H */ | 23 | #endif /* _ASM_IA64_BREAK_H */ |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 59d52e3aef12..bfa19311e09c 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -53,7 +53,6 @@ | |||
53 | #include <asm/numa.h> | 53 | #include <asm/numa.h> |
54 | #include <asm/sal.h> | 54 | #include <asm/sal.h> |
55 | #include <asm/cyclone.h> | 55 | #include <asm/cyclone.h> |
56 | #include <asm/xen/hypervisor.h> | ||
57 | 56 | ||
58 | #define BAD_MADT_ENTRY(entry, end) ( \ | 57 | #define BAD_MADT_ENTRY(entry, end) ( \ |
59 | (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ | 58 | (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ |
@@ -120,8 +119,6 @@ acpi_get_sysname(void) | |||
120 | return "uv"; | 119 | return "uv"; |
121 | else | 120 | else |
122 | return "sn2"; | 121 | return "sn2"; |
123 | } else if (xen_pv_domain() && !strcmp(hdr->oem_id, "XEN")) { | ||
124 | return "xen"; | ||
125 | } | 122 | } |
126 | 123 | ||
127 | #ifdef CONFIG_INTEL_IOMMU | 124 | #ifdef CONFIG_INTEL_IOMMU |
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 46c9e3007315..60ef83e6db71 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c | |||
@@ -16,9 +16,6 @@ | |||
16 | #include <asm/sigcontext.h> | 16 | #include <asm/sigcontext.h> |
17 | #include <asm/mca.h> | 17 | #include <asm/mca.h> |
18 | 18 | ||
19 | #include <asm/xen/interface.h> | ||
20 | #include <asm/xen/hypervisor.h> | ||
21 | |||
22 | #include "../kernel/sigframe.h" | 19 | #include "../kernel/sigframe.h" |
23 | #include "../kernel/fsyscall_gtod_data.h" | 20 | #include "../kernel/fsyscall_gtod_data.h" |
24 | 21 | ||
@@ -290,33 +287,4 @@ void foo(void) | |||
290 | DEFINE(IA64_ITC_LASTCYCLE_OFFSET, | 287 | DEFINE(IA64_ITC_LASTCYCLE_OFFSET, |
291 | offsetof (struct itc_jitter_data_t, itc_lastcycle)); | 288 | offsetof (struct itc_jitter_data_t, itc_lastcycle)); |
292 | 289 | ||
293 | #ifdef CONFIG_XEN | ||
294 | BLANK(); | ||
295 | |||
296 | DEFINE(XEN_NATIVE_ASM, XEN_NATIVE); | ||
297 | DEFINE(XEN_PV_DOMAIN_ASM, XEN_PV_DOMAIN); | ||
298 | |||
299 | #define DEFINE_MAPPED_REG_OFS(sym, field) \ | ||
300 | DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(struct mapped_regs, field))) | ||
301 | |||
302 | DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr); | ||
303 | DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr); | ||
304 | DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip); | ||
305 | DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs); | ||
306 | DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs); | ||
307 | DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr); | ||
308 | DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa); | ||
309 | DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa); | ||
310 | DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim); | ||
311 | DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha); | ||
312 | DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir); | ||
313 | DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled); | ||
314 | DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum); | ||
315 | DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]); | ||
316 | DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]); | ||
317 | DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat); | ||
318 | DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat); | ||
319 | DEFINE_MAPPED_REG_OFS(XSI_ITC_OFFSET_OFS, itc_offset); | ||
320 | DEFINE_MAPPED_REG_OFS(XSI_ITC_LAST_OFS, itc_last); | ||
321 | #endif /* CONFIG_XEN */ | ||
322 | } | 290 | } |
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 991ca336b8a2..e6f80fcf013b 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
@@ -416,8 +416,6 @@ start_ap: | |||
416 | 416 | ||
417 | default_setup_hook = 0 // Currently nothing needs to be done. | 417 | default_setup_hook = 0 // Currently nothing needs to be done. |
418 | 418 | ||
419 | .weak xen_setup_hook | ||
420 | |||
421 | .global hypervisor_type | 419 | .global hypervisor_type |
422 | hypervisor_type: | 420 | hypervisor_type: |
423 | data8 PARAVIRT_HYPERVISOR_TYPE_DEFAULT | 421 | data8 PARAVIRT_HYPERVISOR_TYPE_DEFAULT |
@@ -426,7 +424,6 @@ hypervisor_type: | |||
426 | 424 | ||
427 | hypervisor_setup_hooks: | 425 | hypervisor_setup_hooks: |
428 | data8 default_setup_hook | 426 | data8 default_setup_hook |
429 | data8 xen_setup_hook | ||
430 | num_hypervisor_hooks = (. - hypervisor_setup_hooks) / 8 | 427 | num_hypervisor_hooks = (. - hypervisor_setup_hooks) / 8 |
431 | .previous | 428 | .previous |
432 | 429 | ||
diff --git a/arch/ia64/kernel/nr-irqs.c b/arch/ia64/kernel/nr-irqs.c index ee564575148e..f6769cd54bd9 100644 --- a/arch/ia64/kernel/nr-irqs.c +++ b/arch/ia64/kernel/nr-irqs.c | |||
@@ -10,15 +10,11 @@ | |||
10 | #include <linux/kbuild.h> | 10 | #include <linux/kbuild.h> |
11 | #include <linux/threads.h> | 11 | #include <linux/threads.h> |
12 | #include <asm/native/irq.h> | 12 | #include <asm/native/irq.h> |
13 | #include <asm/xen/irq.h> | ||
14 | 13 | ||
15 | void foo(void) | 14 | void foo(void) |
16 | { | 15 | { |
17 | union paravirt_nr_irqs_max { | 16 | union paravirt_nr_irqs_max { |
18 | char ia64_native_nr_irqs[IA64_NATIVE_NR_IRQS]; | 17 | char ia64_native_nr_irqs[IA64_NATIVE_NR_IRQS]; |
19 | #ifdef CONFIG_XEN | ||
20 | char xen_nr_irqs[XEN_NR_IRQS]; | ||
21 | #endif | ||
22 | }; | 18 | }; |
23 | 19 | ||
24 | DEFINE(NR_IRQS, sizeof (union paravirt_nr_irqs_max)); | 20 | DEFINE(NR_IRQS, sizeof (union paravirt_nr_irqs_max)); |
diff --git a/arch/ia64/kernel/paravirt_inst.h b/arch/ia64/kernel/paravirt_inst.h index 64d6d810c64b..1ad7512b5f65 100644 --- a/arch/ia64/kernel/paravirt_inst.h +++ b/arch/ia64/kernel/paravirt_inst.h | |||
@@ -22,9 +22,6 @@ | |||
22 | 22 | ||
23 | #ifdef __IA64_ASM_PARAVIRTUALIZED_PVCHECK | 23 | #ifdef __IA64_ASM_PARAVIRTUALIZED_PVCHECK |
24 | #include <asm/native/pvchk_inst.h> | 24 | #include <asm/native/pvchk_inst.h> |
25 | #elif defined(__IA64_ASM_PARAVIRTUALIZED_XEN) | ||
26 | #include <asm/xen/inst.h> | ||
27 | #include <asm/xen/minstate.h> | ||
28 | #else | 25 | #else |
29 | #include <asm/native/inst.h> | 26 | #include <asm/native/inst.h> |
30 | #endif | 27 | #endif |
diff --git a/arch/ia64/kernel/paravirt_patchlist.h b/arch/ia64/kernel/paravirt_patchlist.h index 0684aa6c6507..67cffc3643a3 100644 --- a/arch/ia64/kernel/paravirt_patchlist.h +++ b/arch/ia64/kernel/paravirt_patchlist.h | |||
@@ -20,9 +20,5 @@ | |||
20 | * | 20 | * |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #if defined(__IA64_GATE_PARAVIRTUALIZED_XEN) | ||
24 | #include <asm/xen/patchlist.h> | ||
25 | #else | ||
26 | #include <asm/native/patchlist.h> | 23 | #include <asm/native/patchlist.h> |
27 | #endif | ||
28 | 24 | ||
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 0ccb28fab27e..84f8a52ac5ae 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S | |||
@@ -182,12 +182,6 @@ SECTIONS { | |||
182 | __start_gate_section = .; | 182 | __start_gate_section = .; |
183 | *(.data..gate) | 183 | *(.data..gate) |
184 | __stop_gate_section = .; | 184 | __stop_gate_section = .; |
185 | #ifdef CONFIG_XEN | ||
186 | . = ALIGN(PAGE_SIZE); | ||
187 | __xen_start_gate_section = .; | ||
188 | *(.data..gate.xen) | ||
189 | __xen_stop_gate_section = .; | ||
190 | #endif | ||
191 | } | 185 | } |
192 | /* | 186 | /* |
193 | * make sure the gate page doesn't expose | 187 | * make sure the gate page doesn't expose |
diff --git a/arch/ia64/xen/Kconfig b/arch/ia64/xen/Kconfig deleted file mode 100644 index 5d8a06b0ddf7..000000000000 --- a/arch/ia64/xen/Kconfig +++ /dev/null | |||
@@ -1,25 +0,0 @@ | |||
1 | # | ||
2 | # This Kconfig describes xen/ia64 options | ||
3 | # | ||
4 | |||
5 | config XEN | ||
6 | bool "Xen hypervisor support" | ||
7 | default y | ||
8 | depends on PARAVIRT && MCKINLEY && IA64_PAGE_SIZE_16KB | ||
9 | select XEN_XENCOMM | ||
10 | select NO_IDLE_HZ | ||
11 | # followings are required to save/restore. | ||
12 | select ARCH_SUSPEND_POSSIBLE | ||
13 | select SUSPEND | ||
14 | select PM_SLEEP | ||
15 | help | ||
16 | Enable Xen hypervisor support. Resulting kernel runs | ||
17 | both as a guest OS on Xen and natively on hardware. | ||
18 | |||
19 | config XEN_XENCOMM | ||
20 | depends on XEN | ||
21 | bool | ||
22 | |||
23 | config NO_IDLE_HZ | ||
24 | depends on XEN | ||
25 | bool | ||
diff --git a/arch/ia64/xen/Makefile b/arch/ia64/xen/Makefile deleted file mode 100644 index e6f4a0a74228..000000000000 --- a/arch/ia64/xen/Makefile +++ /dev/null | |||
@@ -1,37 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for Xen components | ||
3 | # | ||
4 | |||
5 | obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \ | ||
6 | hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o \ | ||
7 | gate-data.o | ||
8 | |||
9 | obj-$(CONFIG_IA64_GENERIC) += machvec.o | ||
10 | |||
11 | # The gate DSO image is built using a special linker script. | ||
12 | include $(srctree)/arch/ia64/kernel/Makefile.gate | ||
13 | |||
14 | # tell compiled for xen | ||
15 | CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_XEN | ||
16 | AFLAGS_gate.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN -D__IA64_GATE_PARAVIRTUALIZED_XEN | ||
17 | |||
18 | # use same file of native. | ||
19 | $(obj)/gate.o: $(src)/../kernel/gate.S FORCE | ||
20 | $(call if_changed_dep,as_o_S) | ||
21 | $(obj)/gate.lds: $(src)/../kernel/gate.lds.S FORCE | ||
22 | $(call if_changed_dep,cpp_lds_S) | ||
23 | |||
24 | |||
25 | AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN | ||
26 | |||
27 | # xen multi compile | ||
28 | ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S fsys.S | ||
29 | ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o)) | ||
30 | obj-y += $(ASM_PARAVIRT_OBJS) | ||
31 | define paravirtualized_xen | ||
32 | AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_XEN | ||
33 | endef | ||
34 | $(foreach o,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_xen,$(o)))) | ||
35 | |||
36 | $(obj)/xen-%.o: $(src)/../kernel/%.S FORCE | ||
37 | $(call if_changed_dep,as_o_S) | ||
diff --git a/arch/ia64/xen/gate-data.S b/arch/ia64/xen/gate-data.S deleted file mode 100644 index 6f95b6b32a4e..000000000000 --- a/arch/ia64/xen/gate-data.S +++ /dev/null | |||
@@ -1,3 +0,0 @@ | |||
1 | .section .data..gate.xen, "aw" | ||
2 | |||
3 | .incbin "arch/ia64/xen/gate.so" | ||
diff --git a/arch/ia64/xen/grant-table.c b/arch/ia64/xen/grant-table.c deleted file mode 100644 index c18281332f84..000000000000 --- a/arch/ia64/xen/grant-table.c +++ /dev/null | |||
@@ -1,94 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/xen/grant-table.c | ||
3 | * | ||
4 | * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/module.h> | ||
24 | #include <linux/vmalloc.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/mm.h> | ||
27 | |||
28 | #include <xen/interface/xen.h> | ||
29 | #include <xen/interface/memory.h> | ||
30 | #include <xen/grant_table.h> | ||
31 | |||
32 | #include <asm/xen/hypervisor.h> | ||
33 | |||
34 | /**************************************************************************** | ||
35 | * grant table hack | ||
36 | * cmd: GNTTABOP_xxx | ||
37 | */ | ||
38 | |||
39 | int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, | ||
40 | unsigned long max_nr_gframes, | ||
41 | struct grant_entry **__shared) | ||
42 | { | ||
43 | *__shared = __va(frames[0] << PAGE_SHIFT); | ||
44 | return 0; | ||
45 | } | ||
46 | |||
47 | void arch_gnttab_unmap_shared(struct grant_entry *shared, | ||
48 | unsigned long nr_gframes) | ||
49 | { | ||
50 | /* nothing */ | ||
51 | } | ||
52 | |||
53 | static void | ||
54 | gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop) | ||
55 | { | ||
56 | uint32_t flags; | ||
57 | |||
58 | flags = uop->flags; | ||
59 | |||
60 | if (flags & GNTMAP_host_map) { | ||
61 | if (flags & GNTMAP_application_map) { | ||
62 | printk(KERN_DEBUG | ||
63 | "GNTMAP_application_map is not supported yet: " | ||
64 | "flags 0x%x\n", flags); | ||
65 | BUG(); | ||
66 | } | ||
67 | if (flags & GNTMAP_contains_pte) { | ||
68 | printk(KERN_DEBUG | ||
69 | "GNTMAP_contains_pte is not supported yet: " | ||
70 | "flags 0x%x\n", flags); | ||
71 | BUG(); | ||
72 | } | ||
73 | } else if (flags & GNTMAP_device_map) { | ||
74 | printk("GNTMAP_device_map is not supported yet 0x%x\n", flags); | ||
75 | BUG(); /* not yet. actually this flag is not used. */ | ||
76 | } else { | ||
77 | BUG(); | ||
78 | } | ||
79 | } | ||
80 | |||
81 | int | ||
82 | HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count) | ||
83 | { | ||
84 | if (cmd == GNTTABOP_map_grant_ref) { | ||
85 | unsigned int i; | ||
86 | for (i = 0; i < count; i++) { | ||
87 | gnttab_map_grant_ref_pre( | ||
88 | (struct gnttab_map_grant_ref *)uop + i); | ||
89 | } | ||
90 | } | ||
91 | return xencomm_hypercall_grant_table_op(cmd, uop, count); | ||
92 | } | ||
93 | |||
94 | EXPORT_SYMBOL(HYPERVISOR_grant_table_op); | ||
diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S deleted file mode 100644 index 08847aa12583..000000000000 --- a/arch/ia64/xen/hypercall.S +++ /dev/null | |||
@@ -1,88 +0,0 @@ | |||
1 | /* | ||
2 | * Support routines for Xen hypercalls | ||
3 | * | ||
4 | * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com> | ||
5 | * Copyright (C) 2008 Yaozu (Eddie) Dong <eddie.dong@intel.com> | ||
6 | */ | ||
7 | |||
8 | #include <asm/asmmacro.h> | ||
9 | #include <asm/intrinsics.h> | ||
10 | #include <asm/xen/privop.h> | ||
11 | |||
12 | #ifdef __INTEL_COMPILER | ||
13 | /* | ||
14 | * Hypercalls without parameter. | ||
15 | */ | ||
16 | #define __HCALL0(name,hcall) \ | ||
17 | GLOBAL_ENTRY(name); \ | ||
18 | break hcall; \ | ||
19 | br.ret.sptk.many rp; \ | ||
20 | END(name) | ||
21 | |||
22 | /* | ||
23 | * Hypercalls with 1 parameter. | ||
24 | */ | ||
25 | #define __HCALL1(name,hcall) \ | ||
26 | GLOBAL_ENTRY(name); \ | ||
27 | mov r8=r32; \ | ||
28 | break hcall; \ | ||
29 | br.ret.sptk.many rp; \ | ||
30 | END(name) | ||
31 | |||
32 | /* | ||
33 | * Hypercalls with 2 parameters. | ||
34 | */ | ||
35 | #define __HCALL2(name,hcall) \ | ||
36 | GLOBAL_ENTRY(name); \ | ||
37 | mov r8=r32; \ | ||
38 | mov r9=r33; \ | ||
39 | break hcall; \ | ||
40 | br.ret.sptk.many rp; \ | ||
41 | END(name) | ||
42 | |||
43 | __HCALL0(xen_get_psr, HYPERPRIVOP_GET_PSR) | ||
44 | __HCALL0(xen_get_ivr, HYPERPRIVOP_GET_IVR) | ||
45 | __HCALL0(xen_get_tpr, HYPERPRIVOP_GET_TPR) | ||
46 | __HCALL0(xen_hyper_ssm_i, HYPERPRIVOP_SSM_I) | ||
47 | |||
48 | __HCALL1(xen_set_tpr, HYPERPRIVOP_SET_TPR) | ||
49 | __HCALL1(xen_eoi, HYPERPRIVOP_EOI) | ||
50 | __HCALL1(xen_thash, HYPERPRIVOP_THASH) | ||
51 | __HCALL1(xen_set_itm, HYPERPRIVOP_SET_ITM) | ||
52 | __HCALL1(xen_get_rr, HYPERPRIVOP_GET_RR) | ||
53 | __HCALL1(xen_fc, HYPERPRIVOP_FC) | ||
54 | __HCALL1(xen_get_cpuid, HYPERPRIVOP_GET_CPUID) | ||
55 | __HCALL1(xen_get_pmd, HYPERPRIVOP_GET_PMD) | ||
56 | |||
57 | __HCALL2(xen_ptcga, HYPERPRIVOP_PTC_GA) | ||
58 | __HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR) | ||
59 | __HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR) | ||
60 | |||
61 | GLOBAL_ENTRY(xen_set_rr0_to_rr4) | ||
62 | mov r8=r32 | ||
63 | mov r9=r33 | ||
64 | mov r10=r34 | ||
65 | mov r11=r35 | ||
66 | mov r14=r36 | ||
67 | XEN_HYPER_SET_RR0_TO_RR4 | ||
68 | br.ret.sptk.many rp | ||
69 | ;; | ||
70 | END(xen_set_rr0_to_rr4) | ||
71 | #endif | ||
72 | |||
73 | GLOBAL_ENTRY(xen_send_ipi) | ||
74 | mov r14=r32 | ||
75 | mov r15=r33 | ||
76 | mov r2=0x400 | ||
77 | break 0x1000 | ||
78 | ;; | ||
79 | br.ret.sptk.many rp | ||
80 | ;; | ||
81 | END(xen_send_ipi) | ||
82 | |||
83 | GLOBAL_ENTRY(__hypercall) | ||
84 | mov r2=r37 | ||
85 | break 0x1000 | ||
86 | br.ret.sptk.many b0 | ||
87 | ;; | ||
88 | END(__hypercall) | ||
diff --git a/arch/ia64/xen/hypervisor.c b/arch/ia64/xen/hypervisor.c deleted file mode 100644 index fab62528a80b..000000000000 --- a/arch/ia64/xen/hypervisor.c +++ /dev/null | |||
@@ -1,97 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/xen/hypervisor.c | ||
3 | * | ||
4 | * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/efi.h> | ||
24 | #include <linux/export.h> | ||
25 | #include <asm/xen/hypervisor.h> | ||
26 | #include <asm/xen/privop.h> | ||
27 | |||
28 | #include "irq_xen.h" | ||
29 | |||
30 | struct shared_info *HYPERVISOR_shared_info __read_mostly = | ||
31 | (struct shared_info *)XSI_BASE; | ||
32 | EXPORT_SYMBOL(HYPERVISOR_shared_info); | ||
33 | |||
34 | DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); | ||
35 | |||
36 | struct start_info *xen_start_info; | ||
37 | EXPORT_SYMBOL(xen_start_info); | ||
38 | |||
39 | EXPORT_SYMBOL(xen_domain_type); | ||
40 | |||
41 | EXPORT_SYMBOL(__hypercall); | ||
42 | |||
43 | /* Stolen from arch/x86/xen/enlighten.c */ | ||
44 | /* | ||
45 | * Flag to determine whether vcpu info placement is available on all | ||
46 | * VCPUs. We assume it is to start with, and then set it to zero on | ||
47 | * the first failure. This is because it can succeed on some VCPUs | ||
48 | * and not others, since it can involve hypervisor memory allocation, | ||
49 | * or because the guest failed to guarantee all the appropriate | ||
50 | * constraints on all VCPUs (ie buffer can't cross a page boundary). | ||
51 | * | ||
52 | * Note that any particular CPU may be using a placed vcpu structure, | ||
53 | * but we can only optimise if the all are. | ||
54 | * | ||
55 | * 0: not available, 1: available | ||
56 | */ | ||
57 | |||
58 | static void __init xen_vcpu_setup(int cpu) | ||
59 | { | ||
60 | /* | ||
61 | * WARNING: | ||
62 | * before changing MAX_VIRT_CPUS, | ||
63 | * check that shared_info fits on a page | ||
64 | */ | ||
65 | BUILD_BUG_ON(sizeof(struct shared_info) > PAGE_SIZE); | ||
66 | per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; | ||
67 | } | ||
68 | |||
69 | void __init xen_setup_vcpu_info_placement(void) | ||
70 | { | ||
71 | int cpu; | ||
72 | |||
73 | for_each_possible_cpu(cpu) | ||
74 | xen_vcpu_setup(cpu); | ||
75 | } | ||
76 | |||
77 | void | ||
78 | xen_cpu_init(void) | ||
79 | { | ||
80 | xen_smp_intr_init(); | ||
81 | } | ||
82 | |||
83 | /************************************************************************** | ||
84 | * opt feature | ||
85 | */ | ||
86 | void | ||
87 | xen_ia64_enable_opt_feature(void) | ||
88 | { | ||
89 | /* Enable region 7 identity map optimizations in Xen */ | ||
90 | struct xen_ia64_opt_feature optf; | ||
91 | |||
92 | optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG7; | ||
93 | optf.on = XEN_IA64_OPTF_ON; | ||
94 | optf.pgprot = pgprot_val(PAGE_KERNEL); | ||
95 | optf.key = 0; /* No key on linux. */ | ||
96 | HYPERVISOR_opt_feature(&optf); | ||
97 | } | ||
diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c deleted file mode 100644 index efb74dafec4d..000000000000 --- a/arch/ia64/xen/irq_xen.c +++ /dev/null | |||
@@ -1,443 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/xen/irq_xen.c | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/cpu.h> | ||
24 | |||
25 | #include <xen/interface/xen.h> | ||
26 | #include <xen/interface/callback.h> | ||
27 | #include <xen/events.h> | ||
28 | |||
29 | #include <asm/xen/privop.h> | ||
30 | |||
31 | #include "irq_xen.h" | ||
32 | |||
33 | /*************************************************************************** | ||
34 | * pv_irq_ops | ||
35 | * irq operations | ||
36 | */ | ||
37 | |||
38 | static int | ||
39 | xen_assign_irq_vector(int irq) | ||
40 | { | ||
41 | struct physdev_irq irq_op; | ||
42 | |||
43 | irq_op.irq = irq; | ||
44 | if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) | ||
45 | return -ENOSPC; | ||
46 | |||
47 | return irq_op.vector; | ||
48 | } | ||
49 | |||
50 | static void | ||
51 | xen_free_irq_vector(int vector) | ||
52 | { | ||
53 | struct physdev_irq irq_op; | ||
54 | |||
55 | if (vector < IA64_FIRST_DEVICE_VECTOR || | ||
56 | vector > IA64_LAST_DEVICE_VECTOR) | ||
57 | return; | ||
58 | |||
59 | irq_op.vector = vector; | ||
60 | if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op)) | ||
61 | printk(KERN_WARNING "%s: xen_free_irq_vector fail vector=%d\n", | ||
62 | __func__, vector); | ||
63 | } | ||
64 | |||
65 | |||
66 | static DEFINE_PER_CPU(int, xen_timer_irq) = -1; | ||
67 | static DEFINE_PER_CPU(int, xen_ipi_irq) = -1; | ||
68 | static DEFINE_PER_CPU(int, xen_resched_irq) = -1; | ||
69 | static DEFINE_PER_CPU(int, xen_cmc_irq) = -1; | ||
70 | static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1; | ||
71 | static DEFINE_PER_CPU(int, xen_cpep_irq) = -1; | ||
72 | #define NAME_SIZE 15 | ||
73 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name); | ||
74 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name); | ||
75 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name); | ||
76 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name); | ||
77 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name); | ||
78 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name); | ||
79 | #undef NAME_SIZE | ||
80 | |||
81 | struct saved_irq { | ||
82 | unsigned int irq; | ||
83 | struct irqaction *action; | ||
84 | }; | ||
85 | /* 16 should be far optimistic value, since only several percpu irqs | ||
86 | * are registered early. | ||
87 | */ | ||
88 | #define MAX_LATE_IRQ 16 | ||
89 | static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ]; | ||
90 | static unsigned short late_irq_cnt; | ||
91 | static unsigned short saved_irq_cnt; | ||
92 | static int xen_slab_ready; | ||
93 | |||
94 | #ifdef CONFIG_SMP | ||
95 | #include <linux/sched.h> | ||
96 | |||
97 | /* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ, | ||
98 | * it ends up to issue several memory accesses upon percpu data and | ||
99 | * thus adds unnecessary traffic to other paths. | ||
100 | */ | ||
101 | static irqreturn_t | ||
102 | xen_dummy_handler(int irq, void *dev_id) | ||
103 | { | ||
104 | return IRQ_HANDLED; | ||
105 | } | ||
106 | |||
107 | static irqreturn_t | ||
108 | xen_resched_handler(int irq, void *dev_id) | ||
109 | { | ||
110 | scheduler_ipi(); | ||
111 | return IRQ_HANDLED; | ||
112 | } | ||
113 | |||
114 | static struct irqaction xen_ipi_irqaction = { | ||
115 | .handler = handle_IPI, | ||
116 | .flags = IRQF_DISABLED, | ||
117 | .name = "IPI" | ||
118 | }; | ||
119 | |||
120 | static struct irqaction xen_resched_irqaction = { | ||
121 | .handler = xen_resched_handler, | ||
122 | .flags = IRQF_DISABLED, | ||
123 | .name = "resched" | ||
124 | }; | ||
125 | |||
126 | static struct irqaction xen_tlb_irqaction = { | ||
127 | .handler = xen_dummy_handler, | ||
128 | .flags = IRQF_DISABLED, | ||
129 | .name = "tlb_flush" | ||
130 | }; | ||
131 | #endif | ||
132 | |||
133 | /* | ||
134 | * This is xen version percpu irq registration, which needs bind | ||
135 | * to xen specific evtchn sub-system. One trick here is that xen | ||
136 | * evtchn binding interface depends on kmalloc because related | ||
137 | * port needs to be freed at device/cpu down. So we cache the | ||
138 | * registration on BSP before slab is ready and then deal them | ||
139 | * at later point. For rest instances happening after slab ready, | ||
140 | * we hook them to xen evtchn immediately. | ||
141 | * | ||
142 | * FIXME: MCA is not supported by far, and thus "nomca" boot param is | ||
143 | * required. | ||
144 | */ | ||
145 | static void | ||
146 | __xen_register_percpu_irq(unsigned int cpu, unsigned int vec, | ||
147 | struct irqaction *action, int save) | ||
148 | { | ||
149 | int irq = 0; | ||
150 | |||
151 | if (xen_slab_ready) { | ||
152 | switch (vec) { | ||
153 | case IA64_TIMER_VECTOR: | ||
154 | snprintf(per_cpu(xen_timer_name, cpu), | ||
155 | sizeof(per_cpu(xen_timer_name, cpu)), | ||
156 | "%s%d", action->name, cpu); | ||
157 | irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu, | ||
158 | action->handler, action->flags, | ||
159 | per_cpu(xen_timer_name, cpu), action->dev_id); | ||
160 | per_cpu(xen_timer_irq, cpu) = irq; | ||
161 | break; | ||
162 | case IA64_IPI_RESCHEDULE: | ||
163 | snprintf(per_cpu(xen_resched_name, cpu), | ||
164 | sizeof(per_cpu(xen_resched_name, cpu)), | ||
165 | "%s%d", action->name, cpu); | ||
166 | irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, | ||
167 | action->handler, action->flags, | ||
168 | per_cpu(xen_resched_name, cpu), action->dev_id); | ||
169 | per_cpu(xen_resched_irq, cpu) = irq; | ||
170 | break; | ||
171 | case IA64_IPI_VECTOR: | ||
172 | snprintf(per_cpu(xen_ipi_name, cpu), | ||
173 | sizeof(per_cpu(xen_ipi_name, cpu)), | ||
174 | "%s%d", action->name, cpu); | ||
175 | irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu, | ||
176 | action->handler, action->flags, | ||
177 | per_cpu(xen_ipi_name, cpu), action->dev_id); | ||
178 | per_cpu(xen_ipi_irq, cpu) = irq; | ||
179 | break; | ||
180 | case IA64_CMC_VECTOR: | ||
181 | snprintf(per_cpu(xen_cmc_name, cpu), | ||
182 | sizeof(per_cpu(xen_cmc_name, cpu)), | ||
183 | "%s%d", action->name, cpu); | ||
184 | irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu, | ||
185 | action->handler, | ||
186 | action->flags, | ||
187 | per_cpu(xen_cmc_name, cpu), | ||
188 | action->dev_id); | ||
189 | per_cpu(xen_cmc_irq, cpu) = irq; | ||
190 | break; | ||
191 | case IA64_CMCP_VECTOR: | ||
192 | snprintf(per_cpu(xen_cmcp_name, cpu), | ||
193 | sizeof(per_cpu(xen_cmcp_name, cpu)), | ||
194 | "%s%d", action->name, cpu); | ||
195 | irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu, | ||
196 | action->handler, | ||
197 | action->flags, | ||
198 | per_cpu(xen_cmcp_name, cpu), | ||
199 | action->dev_id); | ||
200 | per_cpu(xen_cmcp_irq, cpu) = irq; | ||
201 | break; | ||
202 | case IA64_CPEP_VECTOR: | ||
203 | snprintf(per_cpu(xen_cpep_name, cpu), | ||
204 | sizeof(per_cpu(xen_cpep_name, cpu)), | ||
205 | "%s%d", action->name, cpu); | ||
206 | irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu, | ||
207 | action->handler, | ||
208 | action->flags, | ||
209 | per_cpu(xen_cpep_name, cpu), | ||
210 | action->dev_id); | ||
211 | per_cpu(xen_cpep_irq, cpu) = irq; | ||
212 | break; | ||
213 | case IA64_CPE_VECTOR: | ||
214 | case IA64_MCA_RENDEZ_VECTOR: | ||
215 | case IA64_PERFMON_VECTOR: | ||
216 | case IA64_MCA_WAKEUP_VECTOR: | ||
217 | case IA64_SPURIOUS_INT_VECTOR: | ||
218 | /* No need to complain, these aren't supported. */ | ||
219 | break; | ||
220 | default: | ||
221 | printk(KERN_WARNING "Percpu irq %d is unsupported " | ||
222 | "by xen!\n", vec); | ||
223 | break; | ||
224 | } | ||
225 | BUG_ON(irq < 0); | ||
226 | |||
227 | if (irq > 0) { | ||
228 | /* | ||
229 | * Mark percpu. Without this, migrate_irqs() will | ||
230 | * mark the interrupt for migrations and trigger it | ||
231 | * on cpu hotplug. | ||
232 | */ | ||
233 | irq_set_status_flags(irq, IRQ_PER_CPU); | ||
234 | } | ||
235 | } | ||
236 | |||
237 | /* For BSP, we cache registered percpu irqs, and then re-walk | ||
238 | * them when initializing APs | ||
239 | */ | ||
240 | if (!cpu && save) { | ||
241 | BUG_ON(saved_irq_cnt == MAX_LATE_IRQ); | ||
242 | saved_percpu_irqs[saved_irq_cnt].irq = vec; | ||
243 | saved_percpu_irqs[saved_irq_cnt].action = action; | ||
244 | saved_irq_cnt++; | ||
245 | if (!xen_slab_ready) | ||
246 | late_irq_cnt++; | ||
247 | } | ||
248 | } | ||
249 | |||
250 | static void | ||
251 | xen_register_percpu_irq(ia64_vector vec, struct irqaction *action) | ||
252 | { | ||
253 | __xen_register_percpu_irq(smp_processor_id(), vec, action, 1); | ||
254 | } | ||
255 | |||
256 | static void | ||
257 | xen_bind_early_percpu_irq(void) | ||
258 | { | ||
259 | int i; | ||
260 | |||
261 | xen_slab_ready = 1; | ||
262 | /* There's no race when accessing this cached array, since only | ||
263 | * BSP will face with such step shortly | ||
264 | */ | ||
265 | for (i = 0; i < late_irq_cnt; i++) | ||
266 | __xen_register_percpu_irq(smp_processor_id(), | ||
267 | saved_percpu_irqs[i].irq, | ||
268 | saved_percpu_irqs[i].action, 0); | ||
269 | } | ||
270 | |||
271 | /* FIXME: There's no obvious point to check whether slab is ready. So | ||
272 | * a hack is used here by utilizing a late time hook. | ||
273 | */ | ||
274 | |||
275 | #ifdef CONFIG_HOTPLUG_CPU | ||
276 | static int unbind_evtchn_callback(struct notifier_block *nfb, | ||
277 | unsigned long action, void *hcpu) | ||
278 | { | ||
279 | unsigned int cpu = (unsigned long)hcpu; | ||
280 | |||
281 | if (action == CPU_DEAD) { | ||
282 | /* Unregister evtchn. */ | ||
283 | if (per_cpu(xen_cpep_irq, cpu) >= 0) { | ||
284 | unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu), | ||
285 | NULL); | ||
286 | per_cpu(xen_cpep_irq, cpu) = -1; | ||
287 | } | ||
288 | if (per_cpu(xen_cmcp_irq, cpu) >= 0) { | ||
289 | unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu), | ||
290 | NULL); | ||
291 | per_cpu(xen_cmcp_irq, cpu) = -1; | ||
292 | } | ||
293 | if (per_cpu(xen_cmc_irq, cpu) >= 0) { | ||
294 | unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL); | ||
295 | per_cpu(xen_cmc_irq, cpu) = -1; | ||
296 | } | ||
297 | if (per_cpu(xen_ipi_irq, cpu) >= 0) { | ||
298 | unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL); | ||
299 | per_cpu(xen_ipi_irq, cpu) = -1; | ||
300 | } | ||
301 | if (per_cpu(xen_resched_irq, cpu) >= 0) { | ||
302 | unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), | ||
303 | NULL); | ||
304 | per_cpu(xen_resched_irq, cpu) = -1; | ||
305 | } | ||
306 | if (per_cpu(xen_timer_irq, cpu) >= 0) { | ||
307 | unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu), | ||
308 | NULL); | ||
309 | per_cpu(xen_timer_irq, cpu) = -1; | ||
310 | } | ||
311 | } | ||
312 | return NOTIFY_OK; | ||
313 | } | ||
314 | |||
315 | static struct notifier_block unbind_evtchn_notifier = { | ||
316 | .notifier_call = unbind_evtchn_callback, | ||
317 | .priority = 0 | ||
318 | }; | ||
319 | #endif | ||
320 | |||
321 | void xen_smp_intr_init_early(unsigned int cpu) | ||
322 | { | ||
323 | #ifdef CONFIG_SMP | ||
324 | unsigned int i; | ||
325 | |||
326 | for (i = 0; i < saved_irq_cnt; i++) | ||
327 | __xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq, | ||
328 | saved_percpu_irqs[i].action, 0); | ||
329 | #endif | ||
330 | } | ||
331 | |||
332 | void xen_smp_intr_init(void) | ||
333 | { | ||
334 | #ifdef CONFIG_SMP | ||
335 | unsigned int cpu = smp_processor_id(); | ||
336 | struct callback_register event = { | ||
337 | .type = CALLBACKTYPE_event, | ||
338 | .address = { .ip = (unsigned long)&xen_event_callback }, | ||
339 | }; | ||
340 | |||
341 | if (cpu == 0) { | ||
342 | /* Initialization was already done for boot cpu. */ | ||
343 | #ifdef CONFIG_HOTPLUG_CPU | ||
344 | /* Register the notifier only once. */ | ||
345 | register_cpu_notifier(&unbind_evtchn_notifier); | ||
346 | #endif | ||
347 | return; | ||
348 | } | ||
349 | |||
350 | /* This should be piggyback when setup vcpu guest context */ | ||
351 | BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event)); | ||
352 | #endif /* CONFIG_SMP */ | ||
353 | } | ||
354 | |||
355 | void __init | ||
356 | xen_irq_init(void) | ||
357 | { | ||
358 | struct callback_register event = { | ||
359 | .type = CALLBACKTYPE_event, | ||
360 | .address = { .ip = (unsigned long)&xen_event_callback }, | ||
361 | }; | ||
362 | |||
363 | xen_init_IRQ(); | ||
364 | BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event)); | ||
365 | late_time_init = xen_bind_early_percpu_irq; | ||
366 | } | ||
367 | |||
368 | void | ||
369 | xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect) | ||
370 | { | ||
371 | #ifdef CONFIG_SMP | ||
372 | /* TODO: we need to call vcpu_up here */ | ||
373 | if (unlikely(vector == ap_wakeup_vector)) { | ||
374 | /* XXX | ||
375 | * This should be in __cpu_up(cpu) in ia64 smpboot.c | ||
376 | * like x86. But don't want to modify it, | ||
377 | * keep it untouched. | ||
378 | */ | ||
379 | xen_smp_intr_init_early(cpu); | ||
380 | |||
381 | xen_send_ipi(cpu, vector); | ||
382 | /* vcpu_prepare_and_up(cpu); */ | ||
383 | return; | ||
384 | } | ||
385 | #endif | ||
386 | |||
387 | switch (vector) { | ||
388 | case IA64_IPI_VECTOR: | ||
389 | xen_send_IPI_one(cpu, XEN_IPI_VECTOR); | ||
390 | break; | ||
391 | case IA64_IPI_RESCHEDULE: | ||
392 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); | ||
393 | break; | ||
394 | case IA64_CMCP_VECTOR: | ||
395 | xen_send_IPI_one(cpu, XEN_CMCP_VECTOR); | ||
396 | break; | ||
397 | case IA64_CPEP_VECTOR: | ||
398 | xen_send_IPI_one(cpu, XEN_CPEP_VECTOR); | ||
399 | break; | ||
400 | case IA64_TIMER_VECTOR: { | ||
401 | /* this is used only once by check_sal_cache_flush() | ||
402 | at boot time */ | ||
403 | static int used = 0; | ||
404 | if (!used) { | ||
405 | xen_send_ipi(cpu, IA64_TIMER_VECTOR); | ||
406 | used = 1; | ||
407 | break; | ||
408 | } | ||
409 | /* fallthrough */ | ||
410 | } | ||
411 | default: | ||
412 | printk(KERN_WARNING "Unsupported IPI type 0x%x\n", | ||
413 | vector); | ||
414 | notify_remote_via_irq(0); /* defaults to 0 irq */ | ||
415 | break; | ||
416 | } | ||
417 | } | ||
418 | |||
419 | static void __init | ||
420 | xen_register_ipi(void) | ||
421 | { | ||
422 | #ifdef CONFIG_SMP | ||
423 | register_percpu_irq(IA64_IPI_VECTOR, &xen_ipi_irqaction); | ||
424 | register_percpu_irq(IA64_IPI_RESCHEDULE, &xen_resched_irqaction); | ||
425 | register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &xen_tlb_irqaction); | ||
426 | #endif | ||
427 | } | ||
428 | |||
429 | static void | ||
430 | xen_resend_irq(unsigned int vector) | ||
431 | { | ||
432 | (void)resend_irq_on_evtchn(vector); | ||
433 | } | ||
434 | |||
435 | const struct pv_irq_ops xen_irq_ops __initconst = { | ||
436 | .register_ipi = xen_register_ipi, | ||
437 | |||
438 | .assign_irq_vector = xen_assign_irq_vector, | ||
439 | .free_irq_vector = xen_free_irq_vector, | ||
440 | .register_percpu_irq = xen_register_percpu_irq, | ||
441 | |||
442 | .resend_irq = xen_resend_irq, | ||
443 | }; | ||
diff --git a/arch/ia64/xen/irq_xen.h b/arch/ia64/xen/irq_xen.h deleted file mode 100644 index 1778517b90fe..000000000000 --- a/arch/ia64/xen/irq_xen.h +++ /dev/null | |||
@@ -1,34 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/xen/irq_xen.h | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef IRQ_XEN_H | ||
24 | #define IRQ_XEN_H | ||
25 | |||
26 | extern void (*late_time_init)(void); | ||
27 | extern char xen_event_callback; | ||
28 | void __init xen_init_IRQ(void); | ||
29 | |||
30 | extern const struct pv_irq_ops xen_irq_ops __initconst; | ||
31 | extern void xen_smp_intr_init(void); | ||
32 | extern void xen_send_ipi(int cpu, int vec); | ||
33 | |||
34 | #endif /* IRQ_XEN_H */ | ||
diff --git a/arch/ia64/xen/machvec.c b/arch/ia64/xen/machvec.c deleted file mode 100644 index 4ad588a7c279..000000000000 --- a/arch/ia64/xen/machvec.c +++ /dev/null | |||
@@ -1,4 +0,0 @@ | |||
1 | #define MACHVEC_PLATFORM_NAME xen | ||
2 | #define MACHVEC_PLATFORM_HEADER <asm/machvec_xen.h> | ||
3 | #include <asm/machvec_init.h> | ||
4 | |||
diff --git a/arch/ia64/xen/suspend.c b/arch/ia64/xen/suspend.c deleted file mode 100644 index 419c8620945a..000000000000 --- a/arch/ia64/xen/suspend.c +++ /dev/null | |||
@@ -1,59 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/xen/suspend.c | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | * suspend/resume | ||
22 | */ | ||
23 | |||
24 | #include <xen/xen-ops.h> | ||
25 | #include <asm/xen/hypervisor.h> | ||
26 | #include "time.h" | ||
27 | |||
28 | void | ||
29 | xen_mm_pin_all(void) | ||
30 | { | ||
31 | /* nothing */ | ||
32 | } | ||
33 | |||
34 | void | ||
35 | xen_mm_unpin_all(void) | ||
36 | { | ||
37 | /* nothing */ | ||
38 | } | ||
39 | |||
40 | void | ||
41 | xen_arch_pre_suspend() | ||
42 | { | ||
43 | /* nothing */ | ||
44 | } | ||
45 | |||
46 | void | ||
47 | xen_arch_post_suspend(int suspend_cancelled) | ||
48 | { | ||
49 | if (suspend_cancelled) | ||
50 | return; | ||
51 | |||
52 | xen_ia64_enable_opt_feature(); | ||
53 | /* add more if necessary */ | ||
54 | } | ||
55 | |||
56 | void xen_arch_resume(void) | ||
57 | { | ||
58 | xen_timer_resume_on_aps(); | ||
59 | } | ||
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c deleted file mode 100644 index 1f8244a78bee..000000000000 --- a/arch/ia64/xen/time.c +++ /dev/null | |||
@@ -1,257 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/xen/time.c | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/delay.h> | ||
24 | #include <linux/kernel_stat.h> | ||
25 | #include <linux/posix-timers.h> | ||
26 | #include <linux/irq.h> | ||
27 | #include <linux/clocksource.h> | ||
28 | |||
29 | #include <asm/timex.h> | ||
30 | |||
31 | #include <asm/xen/hypervisor.h> | ||
32 | |||
33 | #include <xen/interface/vcpu.h> | ||
34 | |||
35 | #include "../kernel/fsyscall_gtod_data.h" | ||
36 | |||
37 | static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate); | ||
38 | static DEFINE_PER_CPU(unsigned long, xen_stolen_time); | ||
39 | static DEFINE_PER_CPU(unsigned long, xen_blocked_time); | ||
40 | |||
41 | /* taken from i386/kernel/time-xen.c */ | ||
42 | static void xen_init_missing_ticks_accounting(int cpu) | ||
43 | { | ||
44 | struct vcpu_register_runstate_memory_area area; | ||
45 | struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu); | ||
46 | int rc; | ||
47 | |||
48 | memset(runstate, 0, sizeof(*runstate)); | ||
49 | |||
50 | area.addr.v = runstate; | ||
51 | rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, | ||
52 | &area); | ||
53 | WARN_ON(rc && rc != -ENOSYS); | ||
54 | |||
55 | per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked]; | ||
56 | per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable] | ||
57 | + runstate->time[RUNSTATE_offline]; | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | * Runstate accounting | ||
62 | */ | ||
63 | /* stolen from arch/x86/xen/time.c */ | ||
64 | static void get_runstate_snapshot(struct vcpu_runstate_info *res) | ||
65 | { | ||
66 | u64 state_time; | ||
67 | struct vcpu_runstate_info *state; | ||
68 | |||
69 | BUG_ON(preemptible()); | ||
70 | |||
71 | state = &__get_cpu_var(xen_runstate); | ||
72 | |||
73 | /* | ||
74 | * The runstate info is always updated by the hypervisor on | ||
75 | * the current CPU, so there's no need to use anything | ||
76 | * stronger than a compiler barrier when fetching it. | ||
77 | */ | ||
78 | do { | ||
79 | state_time = state->state_entry_time; | ||
80 | rmb(); | ||
81 | *res = *state; | ||
82 | rmb(); | ||
83 | } while (state->state_entry_time != state_time); | ||
84 | } | ||
85 | |||
86 | #define NS_PER_TICK (1000000000LL/HZ) | ||
87 | |||
88 | static unsigned long | ||
89 | consider_steal_time(unsigned long new_itm) | ||
90 | { | ||
91 | unsigned long stolen, blocked; | ||
92 | unsigned long delta_itm = 0, stolentick = 0; | ||
93 | int cpu = smp_processor_id(); | ||
94 | struct vcpu_runstate_info runstate; | ||
95 | struct task_struct *p = current; | ||
96 | |||
97 | get_runstate_snapshot(&runstate); | ||
98 | |||
99 | /* | ||
100 | * Check for vcpu migration effect | ||
101 | * In this case, itc value is reversed. | ||
102 | * This causes huge stolen value. | ||
103 | * This function just checks and reject this effect. | ||
104 | */ | ||
105 | if (!time_after_eq(runstate.time[RUNSTATE_blocked], | ||
106 | per_cpu(xen_blocked_time, cpu))) | ||
107 | blocked = 0; | ||
108 | |||
109 | if (!time_after_eq(runstate.time[RUNSTATE_runnable] + | ||
110 | runstate.time[RUNSTATE_offline], | ||
111 | per_cpu(xen_stolen_time, cpu))) | ||
112 | stolen = 0; | ||
113 | |||
114 | if (!time_after(delta_itm + new_itm, ia64_get_itc())) | ||
115 | stolentick = ia64_get_itc() - new_itm; | ||
116 | |||
117 | do_div(stolentick, NS_PER_TICK); | ||
118 | stolentick++; | ||
119 | |||
120 | do_div(stolen, NS_PER_TICK); | ||
121 | |||
122 | if (stolen > stolentick) | ||
123 | stolen = stolentick; | ||
124 | |||
125 | stolentick -= stolen; | ||
126 | do_div(blocked, NS_PER_TICK); | ||
127 | |||
128 | if (blocked > stolentick) | ||
129 | blocked = stolentick; | ||
130 | |||
131 | if (stolen > 0 || blocked > 0) { | ||
132 | account_steal_ticks(stolen); | ||
133 | account_idle_ticks(blocked); | ||
134 | run_local_timers(); | ||
135 | |||
136 | rcu_check_callbacks(cpu, user_mode(get_irq_regs())); | ||
137 | |||
138 | scheduler_tick(); | ||
139 | run_posix_cpu_timers(p); | ||
140 | delta_itm += local_cpu_data->itm_delta * (stolen + blocked); | ||
141 | |||
142 | if (cpu == time_keeper_id) | ||
143 | xtime_update(stolen + blocked); | ||
144 | |||
145 | local_cpu_data->itm_next = delta_itm + new_itm; | ||
146 | |||
147 | per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen; | ||
148 | per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked; | ||
149 | } | ||
150 | return delta_itm; | ||
151 | } | ||
152 | |||
153 | static int xen_do_steal_accounting(unsigned long *new_itm) | ||
154 | { | ||
155 | unsigned long delta_itm; | ||
156 | delta_itm = consider_steal_time(*new_itm); | ||
157 | *new_itm += delta_itm; | ||
158 | if (time_after(*new_itm, ia64_get_itc()) && delta_itm) | ||
159 | return 1; | ||
160 | |||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | static void xen_itc_jitter_data_reset(void) | ||
165 | { | ||
166 | u64 lcycle, ret; | ||
167 | |||
168 | do { | ||
169 | lcycle = itc_jitter_data.itc_lastcycle; | ||
170 | ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, 0); | ||
171 | } while (unlikely(ret != lcycle)); | ||
172 | } | ||
173 | |||
174 | /* based on xen_sched_clock() in arch/x86/xen/time.c. */ | ||
175 | /* | ||
176 | * This relies on HAVE_UNSTABLE_SCHED_CLOCK. If it can't be defined, | ||
177 | * something similar logic should be implemented here. | ||
178 | */ | ||
179 | /* | ||
180 | * Xen sched_clock implementation. Returns the number of unstolen | ||
181 | * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED | ||
182 | * states. | ||
183 | */ | ||
184 | static unsigned long long xen_sched_clock(void) | ||
185 | { | ||
186 | struct vcpu_runstate_info runstate; | ||
187 | |||
188 | unsigned long long now; | ||
189 | unsigned long long offset; | ||
190 | unsigned long long ret; | ||
191 | |||
192 | /* | ||
193 | * Ideally sched_clock should be called on a per-cpu basis | ||
194 | * anyway, so preempt should already be disabled, but that's | ||
195 | * not current practice at the moment. | ||
196 | */ | ||
197 | preempt_disable(); | ||
198 | |||
199 | /* | ||
200 | * both ia64_native_sched_clock() and xen's runstate are | ||
201 | * based on mAR.ITC. So difference of them makes sense. | ||
202 | */ | ||
203 | now = ia64_native_sched_clock(); | ||
204 | |||
205 | get_runstate_snapshot(&runstate); | ||
206 | |||
207 | WARN_ON(runstate.state != RUNSTATE_running); | ||
208 | |||
209 | offset = 0; | ||
210 | if (now > runstate.state_entry_time) | ||
211 | offset = now - runstate.state_entry_time; | ||
212 | ret = runstate.time[RUNSTATE_blocked] + | ||
213 | runstate.time[RUNSTATE_running] + | ||
214 | offset; | ||
215 | |||
216 | preempt_enable(); | ||
217 | |||
218 | return ret; | ||
219 | } | ||
220 | |||
221 | struct pv_time_ops xen_time_ops __initdata = { | ||
222 | .init_missing_ticks_accounting = xen_init_missing_ticks_accounting, | ||
223 | .do_steal_accounting = xen_do_steal_accounting, | ||
224 | .clocksource_resume = xen_itc_jitter_data_reset, | ||
225 | .sched_clock = xen_sched_clock, | ||
226 | }; | ||
227 | |||
228 | /* Called after suspend, to resume time. */ | ||
229 | static void xen_local_tick_resume(void) | ||
230 | { | ||
231 | /* Just trigger a tick. */ | ||
232 | ia64_cpu_local_tick(); | ||
233 | touch_softlockup_watchdog(); | ||
234 | } | ||
235 | |||
236 | void | ||
237 | xen_timer_resume(void) | ||
238 | { | ||
239 | unsigned int cpu; | ||
240 | |||
241 | xen_local_tick_resume(); | ||
242 | |||
243 | for_each_online_cpu(cpu) | ||
244 | xen_init_missing_ticks_accounting(cpu); | ||
245 | } | ||
246 | |||
247 | static void ia64_cpu_local_tick_fn(void *unused) | ||
248 | { | ||
249 | xen_local_tick_resume(); | ||
250 | xen_init_missing_ticks_accounting(smp_processor_id()); | ||
251 | } | ||
252 | |||
253 | void | ||
254 | xen_timer_resume_on_aps(void) | ||
255 | { | ||
256 | smp_call_function(&ia64_cpu_local_tick_fn, NULL, 1); | ||
257 | } | ||
diff --git a/arch/ia64/xen/time.h b/arch/ia64/xen/time.h deleted file mode 100644 index f98d7e1a42f0..000000000000 --- a/arch/ia64/xen/time.h +++ /dev/null | |||
@@ -1,24 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/xen/time.h | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | extern struct pv_time_ops xen_time_ops __initdata; | ||
24 | void xen_timer_resume_on_aps(void); | ||
diff --git a/arch/ia64/xen/xcom_hcall.c b/arch/ia64/xen/xcom_hcall.c deleted file mode 100644 index ccaf7431f7c8..000000000000 --- a/arch/ia64/xen/xcom_hcall.c +++ /dev/null | |||
@@ -1,441 +0,0 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
15 | * | ||
16 | * Tristan Gingold <tristan.gingold@bull.net> | ||
17 | * | ||
18 | * Copyright (c) 2007 | ||
19 | * Isaku Yamahata <yamahata at valinux co jp> | ||
20 | * VA Linux Systems Japan K.K. | ||
21 | * consolidate mini and inline version. | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <xen/interface/xen.h> | ||
26 | #include <xen/interface/memory.h> | ||
27 | #include <xen/interface/grant_table.h> | ||
28 | #include <xen/interface/callback.h> | ||
29 | #include <xen/interface/vcpu.h> | ||
30 | #include <asm/xen/hypervisor.h> | ||
31 | #include <asm/xen/xencomm.h> | ||
32 | |||
33 | /* Xencomm notes: | ||
34 | * This file defines hypercalls to be used by xencomm. The hypercalls simply | ||
35 | * create inlines or mini descriptors for pointers and then call the raw arch | ||
36 | * hypercall xencomm_arch_hypercall_XXX | ||
37 | * | ||
38 | * If the arch wants to directly use these hypercalls, simply define macros | ||
39 | * in asm/xen/hypercall.h, eg: | ||
40 | * #define HYPERVISOR_sched_op xencomm_hypercall_sched_op | ||
41 | * | ||
42 | * The arch may also define HYPERVISOR_xxx as a function and do more operations | ||
43 | * before/after doing the hypercall. | ||
44 | * | ||
45 | * Note: because only inline or mini descriptors are created these functions | ||
46 | * must only be called with in kernel memory parameters. | ||
47 | */ | ||
48 | |||
49 | int | ||
50 | xencomm_hypercall_console_io(int cmd, int count, char *str) | ||
51 | { | ||
52 | /* xen early printk uses console io hypercall before | ||
53 | * xencomm initialization. In that case, we just ignore it. | ||
54 | */ | ||
55 | if (!xencomm_is_initialized()) | ||
56 | return 0; | ||
57 | |||
58 | return xencomm_arch_hypercall_console_io | ||
59 | (cmd, count, xencomm_map_no_alloc(str, count)); | ||
60 | } | ||
61 | EXPORT_SYMBOL_GPL(xencomm_hypercall_console_io); | ||
62 | |||
63 | int | ||
64 | xencomm_hypercall_event_channel_op(int cmd, void *op) | ||
65 | { | ||
66 | struct xencomm_handle *desc; | ||
67 | desc = xencomm_map_no_alloc(op, sizeof(struct evtchn_op)); | ||
68 | if (desc == NULL) | ||
69 | return -EINVAL; | ||
70 | |||
71 | return xencomm_arch_hypercall_event_channel_op(cmd, desc); | ||
72 | } | ||
73 | EXPORT_SYMBOL_GPL(xencomm_hypercall_event_channel_op); | ||
74 | |||
75 | int | ||
76 | xencomm_hypercall_xen_version(int cmd, void *arg) | ||
77 | { | ||
78 | struct xencomm_handle *desc; | ||
79 | unsigned int argsize; | ||
80 | |||
81 | switch (cmd) { | ||
82 | case XENVER_version: | ||
83 | /* do not actually pass an argument */ | ||
84 | return xencomm_arch_hypercall_xen_version(cmd, 0); | ||
85 | case XENVER_extraversion: | ||
86 | argsize = sizeof(struct xen_extraversion); | ||
87 | break; | ||
88 | case XENVER_compile_info: | ||
89 | argsize = sizeof(struct xen_compile_info); | ||
90 | break; | ||
91 | case XENVER_capabilities: | ||
92 | argsize = sizeof(struct xen_capabilities_info); | ||
93 | break; | ||
94 | case XENVER_changeset: | ||
95 | argsize = sizeof(struct xen_changeset_info); | ||
96 | break; | ||
97 | case XENVER_platform_parameters: | ||
98 | argsize = sizeof(struct xen_platform_parameters); | ||
99 | break; | ||
100 | case XENVER_get_features: | ||
101 | argsize = (arg == NULL) ? 0 : sizeof(struct xen_feature_info); | ||
102 | break; | ||
103 | |||
104 | default: | ||
105 | printk(KERN_DEBUG | ||
106 | "%s: unknown version op %d\n", __func__, cmd); | ||
107 | return -ENOSYS; | ||
108 | } | ||
109 | |||
110 | desc = xencomm_map_no_alloc(arg, argsize); | ||
111 | if (desc == NULL) | ||
112 | return -EINVAL; | ||
113 | |||
114 | return xencomm_arch_hypercall_xen_version(cmd, desc); | ||
115 | } | ||
116 | EXPORT_SYMBOL_GPL(xencomm_hypercall_xen_version); | ||
117 | |||
118 | int | ||
119 | xencomm_hypercall_physdev_op(int cmd, void *op) | ||
120 | { | ||
121 | unsigned int argsize; | ||
122 | |||
123 | switch (cmd) { | ||
124 | case PHYSDEVOP_apic_read: | ||
125 | case PHYSDEVOP_apic_write: | ||
126 | argsize = sizeof(struct physdev_apic); | ||
127 | break; | ||
128 | case PHYSDEVOP_alloc_irq_vector: | ||
129 | case PHYSDEVOP_free_irq_vector: | ||
130 | argsize = sizeof(struct physdev_irq); | ||
131 | break; | ||
132 | case PHYSDEVOP_irq_status_query: | ||
133 | argsize = sizeof(struct physdev_irq_status_query); | ||
134 | break; | ||
135 | |||
136 | default: | ||
137 | printk(KERN_DEBUG | ||
138 | "%s: unknown physdev op %d\n", __func__, cmd); | ||
139 | return -ENOSYS; | ||
140 | } | ||
141 | |||
142 | return xencomm_arch_hypercall_physdev_op | ||
143 | (cmd, xencomm_map_no_alloc(op, argsize)); | ||
144 | } | ||
145 | |||
146 | static int | ||
147 | xencommize_grant_table_op(struct xencomm_mini **xc_area, | ||
148 | unsigned int cmd, void *op, unsigned int count, | ||
149 | struct xencomm_handle **desc) | ||
150 | { | ||
151 | struct xencomm_handle *desc1; | ||
152 | unsigned int argsize; | ||
153 | |||
154 | switch (cmd) { | ||
155 | case GNTTABOP_map_grant_ref: | ||
156 | argsize = sizeof(struct gnttab_map_grant_ref); | ||
157 | break; | ||
158 | case GNTTABOP_unmap_grant_ref: | ||
159 | argsize = sizeof(struct gnttab_unmap_grant_ref); | ||
160 | break; | ||
161 | case GNTTABOP_setup_table: | ||
162 | { | ||
163 | struct gnttab_setup_table *setup = op; | ||
164 | |||
165 | argsize = sizeof(*setup); | ||
166 | |||
167 | if (count != 1) | ||
168 | return -EINVAL; | ||
169 | desc1 = __xencomm_map_no_alloc | ||
170 | (xen_guest_handle(setup->frame_list), | ||
171 | setup->nr_frames * | ||
172 | sizeof(*xen_guest_handle(setup->frame_list)), | ||
173 | *xc_area); | ||
174 | if (desc1 == NULL) | ||
175 | return -EINVAL; | ||
176 | (*xc_area)++; | ||
177 | set_xen_guest_handle(setup->frame_list, (void *)desc1); | ||
178 | break; | ||
179 | } | ||
180 | case GNTTABOP_dump_table: | ||
181 | argsize = sizeof(struct gnttab_dump_table); | ||
182 | break; | ||
183 | case GNTTABOP_transfer: | ||
184 | argsize = sizeof(struct gnttab_transfer); | ||
185 | break; | ||
186 | case GNTTABOP_copy: | ||
187 | argsize = sizeof(struct gnttab_copy); | ||
188 | break; | ||
189 | case GNTTABOP_query_size: | ||
190 | argsize = sizeof(struct gnttab_query_size); | ||
191 | break; | ||
192 | default: | ||
193 | printk(KERN_DEBUG "%s: unknown hypercall grant table op %d\n", | ||
194 | __func__, cmd); | ||
195 | BUG(); | ||
196 | } | ||
197 | |||
198 | *desc = __xencomm_map_no_alloc(op, count * argsize, *xc_area); | ||
199 | if (*desc == NULL) | ||
200 | return -EINVAL; | ||
201 | (*xc_area)++; | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | int | ||
207 | xencomm_hypercall_grant_table_op(unsigned int cmd, void *op, | ||
208 | unsigned int count) | ||
209 | { | ||
210 | int rc; | ||
211 | struct xencomm_handle *desc; | ||
212 | XENCOMM_MINI_ALIGNED(xc_area, 2); | ||
213 | |||
214 | rc = xencommize_grant_table_op(&xc_area, cmd, op, count, &desc); | ||
215 | if (rc) | ||
216 | return rc; | ||
217 | |||
218 | return xencomm_arch_hypercall_grant_table_op(cmd, desc, count); | ||
219 | } | ||
220 | EXPORT_SYMBOL_GPL(xencomm_hypercall_grant_table_op); | ||
221 | |||
222 | int | ||
223 | xencomm_hypercall_sched_op(int cmd, void *arg) | ||
224 | { | ||
225 | struct xencomm_handle *desc; | ||
226 | unsigned int argsize; | ||
227 | |||
228 | switch (cmd) { | ||
229 | case SCHEDOP_yield: | ||
230 | case SCHEDOP_block: | ||
231 | argsize = 0; | ||
232 | break; | ||
233 | case SCHEDOP_shutdown: | ||
234 | argsize = sizeof(struct sched_shutdown); | ||
235 | break; | ||
236 | case SCHEDOP_poll: | ||
237 | { | ||
238 | struct sched_poll *poll = arg; | ||
239 | struct xencomm_handle *ports; | ||
240 | |||
241 | argsize = sizeof(struct sched_poll); | ||
242 | ports = xencomm_map_no_alloc(xen_guest_handle(poll->ports), | ||
243 | sizeof(*xen_guest_handle(poll->ports))); | ||
244 | |||
245 | set_xen_guest_handle(poll->ports, (void *)ports); | ||
246 | break; | ||
247 | } | ||
248 | default: | ||
249 | printk(KERN_DEBUG "%s: unknown sched op %d\n", __func__, cmd); | ||
250 | return -ENOSYS; | ||
251 | } | ||
252 | |||
253 | desc = xencomm_map_no_alloc(arg, argsize); | ||
254 | if (desc == NULL) | ||
255 | return -EINVAL; | ||
256 | |||
257 | return xencomm_arch_hypercall_sched_op(cmd, desc); | ||
258 | } | ||
259 | EXPORT_SYMBOL_GPL(xencomm_hypercall_sched_op); | ||
260 | |||
261 | int | ||
262 | xencomm_hypercall_multicall(void *call_list, int nr_calls) | ||
263 | { | ||
264 | int rc; | ||
265 | int i; | ||
266 | struct multicall_entry *mce; | ||
267 | struct xencomm_handle *desc; | ||
268 | XENCOMM_MINI_ALIGNED(xc_area, nr_calls * 2); | ||
269 | |||
270 | for (i = 0; i < nr_calls; i++) { | ||
271 | mce = (struct multicall_entry *)call_list + i; | ||
272 | |||
273 | switch (mce->op) { | ||
274 | case __HYPERVISOR_update_va_mapping: | ||
275 | case __HYPERVISOR_mmu_update: | ||
276 | /* No-op on ia64. */ | ||
277 | break; | ||
278 | case __HYPERVISOR_grant_table_op: | ||
279 | rc = xencommize_grant_table_op | ||
280 | (&xc_area, | ||
281 | mce->args[0], (void *)mce->args[1], | ||
282 | mce->args[2], &desc); | ||
283 | if (rc) | ||
284 | return rc; | ||
285 | mce->args[1] = (unsigned long)desc; | ||
286 | break; | ||
287 | case __HYPERVISOR_memory_op: | ||
288 | default: | ||
289 | printk(KERN_DEBUG | ||
290 | "%s: unhandled multicall op entry op %lu\n", | ||
291 | __func__, mce->op); | ||
292 | return -ENOSYS; | ||
293 | } | ||
294 | } | ||
295 | |||
296 | desc = xencomm_map_no_alloc(call_list, | ||
297 | nr_calls * sizeof(struct multicall_entry)); | ||
298 | if (desc == NULL) | ||
299 | return -EINVAL; | ||
300 | |||
301 | return xencomm_arch_hypercall_multicall(desc, nr_calls); | ||
302 | } | ||
303 | EXPORT_SYMBOL_GPL(xencomm_hypercall_multicall); | ||
304 | |||
305 | int | ||
306 | xencomm_hypercall_callback_op(int cmd, void *arg) | ||
307 | { | ||
308 | unsigned int argsize; | ||
309 | switch (cmd) { | ||
310 | case CALLBACKOP_register: | ||
311 | argsize = sizeof(struct callback_register); | ||
312 | break; | ||
313 | case CALLBACKOP_unregister: | ||
314 | argsize = sizeof(struct callback_unregister); | ||
315 | break; | ||
316 | default: | ||
317 | printk(KERN_DEBUG | ||
318 | "%s: unknown callback op %d\n", __func__, cmd); | ||
319 | return -ENOSYS; | ||
320 | } | ||
321 | |||
322 | return xencomm_arch_hypercall_callback_op | ||
323 | (cmd, xencomm_map_no_alloc(arg, argsize)); | ||
324 | } | ||
325 | |||
326 | static int | ||
327 | xencommize_memory_reservation(struct xencomm_mini *xc_area, | ||
328 | struct xen_memory_reservation *mop) | ||
329 | { | ||
330 | struct xencomm_handle *desc; | ||
331 | |||
332 | desc = __xencomm_map_no_alloc(xen_guest_handle(mop->extent_start), | ||
333 | mop->nr_extents * | ||
334 | sizeof(*xen_guest_handle(mop->extent_start)), | ||
335 | xc_area); | ||
336 | if (desc == NULL) | ||
337 | return -EINVAL; | ||
338 | |||
339 | set_xen_guest_handle(mop->extent_start, (void *)desc); | ||
340 | return 0; | ||
341 | } | ||
342 | |||
343 | int | ||
344 | xencomm_hypercall_memory_op(unsigned int cmd, void *arg) | ||
345 | { | ||
346 | GUEST_HANDLE(xen_pfn_t) extent_start_va[2] = { {NULL}, {NULL} }; | ||
347 | struct xen_memory_reservation *xmr = NULL; | ||
348 | int rc; | ||
349 | struct xencomm_handle *desc; | ||
350 | unsigned int argsize; | ||
351 | XENCOMM_MINI_ALIGNED(xc_area, 2); | ||
352 | |||
353 | switch (cmd) { | ||
354 | case XENMEM_increase_reservation: | ||
355 | case XENMEM_decrease_reservation: | ||
356 | case XENMEM_populate_physmap: | ||
357 | xmr = (struct xen_memory_reservation *)arg; | ||
358 | set_xen_guest_handle(extent_start_va[0], | ||
359 | xen_guest_handle(xmr->extent_start)); | ||
360 | |||
361 | argsize = sizeof(*xmr); | ||
362 | rc = xencommize_memory_reservation(xc_area, xmr); | ||
363 | if (rc) | ||
364 | return rc; | ||
365 | xc_area++; | ||
366 | break; | ||
367 | |||
368 | case XENMEM_maximum_ram_page: | ||
369 | argsize = 0; | ||
370 | break; | ||
371 | |||
372 | case XENMEM_add_to_physmap: | ||
373 | argsize = sizeof(struct xen_add_to_physmap); | ||
374 | break; | ||
375 | |||
376 | default: | ||
377 | printk(KERN_DEBUG "%s: unknown memory op %d\n", __func__, cmd); | ||
378 | return -ENOSYS; | ||
379 | } | ||
380 | |||
381 | desc = xencomm_map_no_alloc(arg, argsize); | ||
382 | if (desc == NULL) | ||
383 | return -EINVAL; | ||
384 | |||
385 | rc = xencomm_arch_hypercall_memory_op(cmd, desc); | ||
386 | |||
387 | switch (cmd) { | ||
388 | case XENMEM_increase_reservation: | ||
389 | case XENMEM_decrease_reservation: | ||
390 | case XENMEM_populate_physmap: | ||
391 | set_xen_guest_handle(xmr->extent_start, | ||
392 | xen_guest_handle(extent_start_va[0])); | ||
393 | break; | ||
394 | } | ||
395 | |||
396 | return rc; | ||
397 | } | ||
398 | EXPORT_SYMBOL_GPL(xencomm_hypercall_memory_op); | ||
399 | |||
400 | int | ||
401 | xencomm_hypercall_suspend(unsigned long srec) | ||
402 | { | ||
403 | struct sched_shutdown arg; | ||
404 | |||
405 | arg.reason = SHUTDOWN_suspend; | ||
406 | |||
407 | return xencomm_arch_hypercall_sched_op( | ||
408 | SCHEDOP_shutdown, xencomm_map_no_alloc(&arg, sizeof(arg))); | ||
409 | } | ||
410 | |||
411 | long | ||
412 | xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg) | ||
413 | { | ||
414 | unsigned int argsize; | ||
415 | switch (cmd) { | ||
416 | case VCPUOP_register_runstate_memory_area: { | ||
417 | struct vcpu_register_runstate_memory_area *area = | ||
418 | (struct vcpu_register_runstate_memory_area *)arg; | ||
419 | argsize = sizeof(*arg); | ||
420 | set_xen_guest_handle(area->addr.h, | ||
421 | (void *)xencomm_map_no_alloc(area->addr.v, | ||
422 | sizeof(area->addr.v))); | ||
423 | break; | ||
424 | } | ||
425 | |||
426 | default: | ||
427 | printk(KERN_DEBUG "%s: unknown vcpu op %d\n", __func__, cmd); | ||
428 | return -ENOSYS; | ||
429 | } | ||
430 | |||
431 | return xencomm_arch_hypercall_vcpu_op(cmd, cpu, | ||
432 | xencomm_map_no_alloc(arg, argsize)); | ||
433 | } | ||
434 | |||
435 | long | ||
436 | xencomm_hypercall_opt_feature(void *arg) | ||
437 | { | ||
438 | return xencomm_arch_hypercall_opt_feature( | ||
439 | xencomm_map_no_alloc(arg, | ||
440 | sizeof(struct xen_ia64_opt_feature))); | ||
441 | } | ||
diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c deleted file mode 100644 index 3e8d350fdf39..000000000000 --- a/arch/ia64/xen/xen_pv_ops.c +++ /dev/null | |||
@@ -1,1141 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * arch/ia64/xen/xen_pv_ops.c | ||
3 | * | ||
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
5 | * VA Linux Systems Japan K.K. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/console.h> | ||
24 | #include <linux/irq.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/pm.h> | ||
27 | #include <linux/unistd.h> | ||
28 | |||
29 | #include <asm/xen/hypervisor.h> | ||
30 | #include <asm/xen/xencomm.h> | ||
31 | #include <asm/xen/privop.h> | ||
32 | |||
33 | #include "irq_xen.h" | ||
34 | #include "time.h" | ||
35 | |||
36 | /*************************************************************************** | ||
37 | * general info | ||
38 | */ | ||
39 | static struct pv_info xen_info __initdata = { | ||
40 | .kernel_rpl = 2, /* or 1: determin at runtime */ | ||
41 | .paravirt_enabled = 1, | ||
42 | .name = "Xen/ia64", | ||
43 | }; | ||
44 | |||
45 | #define IA64_RSC_PL_SHIFT 2 | ||
46 | #define IA64_RSC_PL_BIT_SIZE 2 | ||
47 | #define IA64_RSC_PL_MASK \ | ||
48 | (((1UL << IA64_RSC_PL_BIT_SIZE) - 1) << IA64_RSC_PL_SHIFT) | ||
49 | |||
50 | static void __init | ||
51 | xen_info_init(void) | ||
52 | { | ||
53 | /* Xenified Linux/ia64 may run on pl = 1 or 2. | ||
54 | * determin at run time. */ | ||
55 | unsigned long rsc = ia64_getreg(_IA64_REG_AR_RSC); | ||
56 | unsigned int rpl = (rsc & IA64_RSC_PL_MASK) >> IA64_RSC_PL_SHIFT; | ||
57 | xen_info.kernel_rpl = rpl; | ||
58 | } | ||
59 | |||
60 | /*************************************************************************** | ||
61 | * pv_init_ops | ||
62 | * initialization hooks. | ||
63 | */ | ||
64 | |||
65 | static void | ||
66 | xen_panic_hypercall(struct unw_frame_info *info, void *arg) | ||
67 | { | ||
68 | current->thread.ksp = (__u64)info->sw - 16; | ||
69 | HYPERVISOR_shutdown(SHUTDOWN_crash); | ||
70 | /* we're never actually going to get here... */ | ||
71 | } | ||
72 | |||
73 | static int | ||
74 | xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) | ||
75 | { | ||
76 | unw_init_running(xen_panic_hypercall, NULL); | ||
77 | /* we're never actually going to get here... */ | ||
78 | return NOTIFY_DONE; | ||
79 | } | ||
80 | |||
81 | static struct notifier_block xen_panic_block = { | ||
82 | xen_panic_event, NULL, 0 /* try to go last */ | ||
83 | }; | ||
84 | |||
85 | static void xen_pm_power_off(void) | ||
86 | { | ||
87 | local_irq_disable(); | ||
88 | HYPERVISOR_shutdown(SHUTDOWN_poweroff); | ||
89 | } | ||
90 | |||
91 | static void __init | ||
92 | xen_banner(void) | ||
93 | { | ||
94 | printk(KERN_INFO | ||
95 | "Running on Xen! pl = %d start_info_pfn=0x%lx nr_pages=%ld " | ||
96 | "flags=0x%x\n", | ||
97 | xen_info.kernel_rpl, | ||
98 | HYPERVISOR_shared_info->arch.start_info_pfn, | ||
99 | xen_start_info->nr_pages, xen_start_info->flags); | ||
100 | } | ||
101 | |||
102 | static int __init | ||
103 | xen_reserve_memory(struct rsvd_region *region) | ||
104 | { | ||
105 | region->start = (unsigned long)__va( | ||
106 | (HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT)); | ||
107 | region->end = region->start + PAGE_SIZE; | ||
108 | return 1; | ||
109 | } | ||
110 | |||
111 | static void __init | ||
112 | xen_arch_setup_early(void) | ||
113 | { | ||
114 | struct shared_info *s; | ||
115 | BUG_ON(!xen_pv_domain()); | ||
116 | |||
117 | s = HYPERVISOR_shared_info; | ||
118 | xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT); | ||
119 | |||
120 | /* Must be done before any hypercall. */ | ||
121 | xencomm_initialize(); | ||
122 | |||
123 | xen_setup_features(); | ||
124 | /* Register a call for panic conditions. */ | ||
125 | atomic_notifier_chain_register(&panic_notifier_list, | ||
126 | &xen_panic_block); | ||
127 | pm_power_off = xen_pm_power_off; | ||
128 | |||
129 | xen_ia64_enable_opt_feature(); | ||
130 | } | ||
131 | |||
132 | static void __init | ||
133 | xen_arch_setup_console(char **cmdline_p) | ||
134 | { | ||
135 | add_preferred_console("xenboot", 0, NULL); | ||
136 | add_preferred_console("tty", 0, NULL); | ||
137 | /* use hvc_xen */ | ||
138 | add_preferred_console("hvc", 0, NULL); | ||
139 | |||
140 | #if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE) | ||
141 | conswitchp = NULL; | ||
142 | #endif | ||
143 | } | ||
144 | |||
145 | static int __init | ||
146 | xen_arch_setup_nomca(void) | ||
147 | { | ||
148 | return 1; | ||
149 | } | ||
150 | |||
151 | static void __init | ||
152 | xen_post_smp_prepare_boot_cpu(void) | ||
153 | { | ||
154 | xen_setup_vcpu_info_placement(); | ||
155 | } | ||
156 | |||
157 | #ifdef ASM_SUPPORTED | ||
158 | static unsigned long __init_or_module | ||
159 | xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type); | ||
160 | #endif | ||
161 | static void __init | ||
162 | xen_patch_branch(unsigned long tag, unsigned long type); | ||
163 | |||
164 | static const struct pv_init_ops xen_init_ops __initconst = { | ||
165 | .banner = xen_banner, | ||
166 | |||
167 | .reserve_memory = xen_reserve_memory, | ||
168 | |||
169 | .arch_setup_early = xen_arch_setup_early, | ||
170 | .arch_setup_console = xen_arch_setup_console, | ||
171 | .arch_setup_nomca = xen_arch_setup_nomca, | ||
172 | |||
173 | .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu, | ||
174 | #ifdef ASM_SUPPORTED | ||
175 | .patch_bundle = xen_patch_bundle, | ||
176 | #endif | ||
177 | .patch_branch = xen_patch_branch, | ||
178 | }; | ||
179 | |||
180 | /*************************************************************************** | ||
181 | * pv_fsys_data | ||
182 | * addresses for fsys | ||
183 | */ | ||
184 | |||
185 | extern unsigned long xen_fsyscall_table[NR_syscalls]; | ||
186 | extern char xen_fsys_bubble_down[]; | ||
187 | struct pv_fsys_data xen_fsys_data __initdata = { | ||
188 | .fsyscall_table = (unsigned long *)xen_fsyscall_table, | ||
189 | .fsys_bubble_down = (void *)xen_fsys_bubble_down, | ||
190 | }; | ||
191 | |||
192 | /*************************************************************************** | ||
193 | * pv_patchdata | ||
194 | * patchdata addresses | ||
195 | */ | ||
196 | |||
197 | #define DECLARE(name) \ | ||
198 | extern unsigned long __xen_start_gate_##name##_patchlist[]; \ | ||
199 | extern unsigned long __xen_end_gate_##name##_patchlist[] | ||
200 | |||
201 | DECLARE(fsyscall); | ||
202 | DECLARE(brl_fsys_bubble_down); | ||
203 | DECLARE(vtop); | ||
204 | DECLARE(mckinley_e9); | ||
205 | |||
206 | extern unsigned long __xen_start_gate_section[]; | ||
207 | |||
208 | #define ASSIGN(name) \ | ||
209 | .start_##name##_patchlist = \ | ||
210 | (unsigned long)__xen_start_gate_##name##_patchlist, \ | ||
211 | .end_##name##_patchlist = \ | ||
212 | (unsigned long)__xen_end_gate_##name##_patchlist | ||
213 | |||
214 | static struct pv_patchdata xen_patchdata __initdata = { | ||
215 | ASSIGN(fsyscall), | ||
216 | ASSIGN(brl_fsys_bubble_down), | ||
217 | ASSIGN(vtop), | ||
218 | ASSIGN(mckinley_e9), | ||
219 | |||
220 | .gate_section = (void*)__xen_start_gate_section, | ||
221 | }; | ||
222 | |||
223 | /*************************************************************************** | ||
224 | * pv_cpu_ops | ||
225 | * intrinsics hooks. | ||
226 | */ | ||
227 | |||
228 | #ifndef ASM_SUPPORTED | ||
229 | static void | ||
230 | xen_set_itm_with_offset(unsigned long val) | ||
231 | { | ||
232 | /* ia64_cpu_local_tick() calls this with interrupt enabled. */ | ||
233 | /* WARN_ON(!irqs_disabled()); */ | ||
234 | xen_set_itm(val - XEN_MAPPEDREGS->itc_offset); | ||
235 | } | ||
236 | |||
237 | static unsigned long | ||
238 | xen_get_itm_with_offset(void) | ||
239 | { | ||
240 | /* unused at this moment */ | ||
241 | printk(KERN_DEBUG "%s is called.\n", __func__); | ||
242 | |||
243 | WARN_ON(!irqs_disabled()); | ||
244 | return ia64_native_getreg(_IA64_REG_CR_ITM) + | ||
245 | XEN_MAPPEDREGS->itc_offset; | ||
246 | } | ||
247 | |||
248 | /* ia64_set_itc() is only called by | ||
249 | * cpu_init() with ia64_set_itc(0) and ia64_sync_itc(). | ||
250 | * So XEN_MAPPEDRESG->itc_offset cal be considered as almost constant. | ||
251 | */ | ||
252 | static void | ||
253 | xen_set_itc(unsigned long val) | ||
254 | { | ||
255 | unsigned long mitc; | ||
256 | |||
257 | WARN_ON(!irqs_disabled()); | ||
258 | mitc = ia64_native_getreg(_IA64_REG_AR_ITC); | ||
259 | XEN_MAPPEDREGS->itc_offset = val - mitc; | ||
260 | XEN_MAPPEDREGS->itc_last = val; | ||
261 | } | ||
262 | |||
263 | static unsigned long | ||
264 | xen_get_itc(void) | ||
265 | { | ||
266 | unsigned long res; | ||
267 | unsigned long itc_offset; | ||
268 | unsigned long itc_last; | ||
269 | unsigned long ret_itc_last; | ||
270 | |||
271 | itc_offset = XEN_MAPPEDREGS->itc_offset; | ||
272 | do { | ||
273 | itc_last = XEN_MAPPEDREGS->itc_last; | ||
274 | res = ia64_native_getreg(_IA64_REG_AR_ITC); | ||
275 | res += itc_offset; | ||
276 | if (itc_last >= res) | ||
277 | res = itc_last + 1; | ||
278 | ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last, | ||
279 | itc_last, res); | ||
280 | } while (unlikely(ret_itc_last != itc_last)); | ||
281 | return res; | ||
282 | |||
283 | #if 0 | ||
284 | /* ia64_itc_udelay() calls ia64_get_itc() with interrupt enabled. | ||
285 | Should it be paravirtualized instead? */ | ||
286 | WARN_ON(!irqs_disabled()); | ||
287 | itc_offset = XEN_MAPPEDREGS->itc_offset; | ||
288 | itc_last = XEN_MAPPEDREGS->itc_last; | ||
289 | res = ia64_native_getreg(_IA64_REG_AR_ITC); | ||
290 | res += itc_offset; | ||
291 | if (itc_last >= res) | ||
292 | res = itc_last + 1; | ||
293 | XEN_MAPPEDREGS->itc_last = res; | ||
294 | return res; | ||
295 | #endif | ||
296 | } | ||
297 | |||
298 | static void xen_setreg(int regnum, unsigned long val) | ||
299 | { | ||
300 | switch (regnum) { | ||
301 | case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7: | ||
302 | xen_set_kr(regnum - _IA64_REG_AR_KR0, val); | ||
303 | break; | ||
304 | case _IA64_REG_AR_ITC: | ||
305 | xen_set_itc(val); | ||
306 | break; | ||
307 | case _IA64_REG_CR_TPR: | ||
308 | xen_set_tpr(val); | ||
309 | break; | ||
310 | case _IA64_REG_CR_ITM: | ||
311 | xen_set_itm_with_offset(val); | ||
312 | break; | ||
313 | case _IA64_REG_CR_EOI: | ||
314 | xen_eoi(val); | ||
315 | break; | ||
316 | default: | ||
317 | ia64_native_setreg_func(regnum, val); | ||
318 | break; | ||
319 | } | ||
320 | } | ||
321 | |||
322 | static unsigned long xen_getreg(int regnum) | ||
323 | { | ||
324 | unsigned long res; | ||
325 | |||
326 | switch (regnum) { | ||
327 | case _IA64_REG_PSR: | ||
328 | res = xen_get_psr(); | ||
329 | break; | ||
330 | case _IA64_REG_AR_ITC: | ||
331 | res = xen_get_itc(); | ||
332 | break; | ||
333 | case _IA64_REG_CR_ITM: | ||
334 | res = xen_get_itm_with_offset(); | ||
335 | break; | ||
336 | case _IA64_REG_CR_IVR: | ||
337 | res = xen_get_ivr(); | ||
338 | break; | ||
339 | case _IA64_REG_CR_TPR: | ||
340 | res = xen_get_tpr(); | ||
341 | break; | ||
342 | default: | ||
343 | res = ia64_native_getreg_func(regnum); | ||
344 | break; | ||
345 | } | ||
346 | return res; | ||
347 | } | ||
348 | |||
349 | /* turning on interrupts is a bit more complicated.. write to the | ||
350 | * memory-mapped virtual psr.i bit first (to avoid race condition), | ||
351 | * then if any interrupts were pending, we have to execute a hyperprivop | ||
352 | * to ensure the pending interrupt gets delivered; else we're done! */ | ||
353 | static void | ||
354 | xen_ssm_i(void) | ||
355 | { | ||
356 | int old = xen_get_virtual_psr_i(); | ||
357 | xen_set_virtual_psr_i(1); | ||
358 | barrier(); | ||
359 | if (!old && xen_get_virtual_pend()) | ||
360 | xen_hyper_ssm_i(); | ||
361 | } | ||
362 | |||
363 | /* turning off interrupts can be paravirtualized simply by writing | ||
364 | * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */ | ||
365 | static void | ||
366 | xen_rsm_i(void) | ||
367 | { | ||
368 | xen_set_virtual_psr_i(0); | ||
369 | barrier(); | ||
370 | } | ||
371 | |||
372 | static unsigned long | ||
373 | xen_get_psr_i(void) | ||
374 | { | ||
375 | return xen_get_virtual_psr_i() ? IA64_PSR_I : 0; | ||
376 | } | ||
377 | |||
378 | static void | ||
379 | xen_intrin_local_irq_restore(unsigned long mask) | ||
380 | { | ||
381 | if (mask & IA64_PSR_I) | ||
382 | xen_ssm_i(); | ||
383 | else | ||
384 | xen_rsm_i(); | ||
385 | } | ||
386 | #else | ||
387 | #define __DEFINE_FUNC(name, code) \ | ||
388 | extern const char xen_ ## name ## _direct_start[]; \ | ||
389 | extern const char xen_ ## name ## _direct_end[]; \ | ||
390 | asm (".align 32\n" \ | ||
391 | ".proc xen_" #name "\n" \ | ||
392 | "xen_" #name ":\n" \ | ||
393 | "xen_" #name "_direct_start:\n" \ | ||
394 | code \ | ||
395 | "xen_" #name "_direct_end:\n" \ | ||
396 | "br.cond.sptk.many b6\n" \ | ||
397 | ".endp xen_" #name "\n") | ||
398 | |||
399 | #define DEFINE_VOID_FUNC0(name, code) \ | ||
400 | extern void \ | ||
401 | xen_ ## name (void); \ | ||
402 | __DEFINE_FUNC(name, code) | ||
403 | |||
404 | #define DEFINE_VOID_FUNC1(name, code) \ | ||
405 | extern void \ | ||
406 | xen_ ## name (unsigned long arg); \ | ||
407 | __DEFINE_FUNC(name, code) | ||
408 | |||
409 | #define DEFINE_VOID_FUNC1_VOID(name, code) \ | ||
410 | extern void \ | ||
411 | xen_ ## name (void *arg); \ | ||
412 | __DEFINE_FUNC(name, code) | ||
413 | |||
414 | #define DEFINE_VOID_FUNC2(name, code) \ | ||
415 | extern void \ | ||
416 | xen_ ## name (unsigned long arg0, \ | ||
417 | unsigned long arg1); \ | ||
418 | __DEFINE_FUNC(name, code) | ||
419 | |||
420 | #define DEFINE_FUNC0(name, code) \ | ||
421 | extern unsigned long \ | ||
422 | xen_ ## name (void); \ | ||
423 | __DEFINE_FUNC(name, code) | ||
424 | |||
425 | #define DEFINE_FUNC1(name, type, code) \ | ||
426 | extern unsigned long \ | ||
427 | xen_ ## name (type arg); \ | ||
428 | __DEFINE_FUNC(name, code) | ||
429 | |||
430 | #define XEN_PSR_I_ADDR_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS) | ||
431 | |||
432 | /* | ||
433 | * static void xen_set_itm_with_offset(unsigned long val) | ||
434 | * xen_set_itm(val - XEN_MAPPEDREGS->itc_offset); | ||
435 | */ | ||
436 | /* 2 bundles */ | ||
437 | DEFINE_VOID_FUNC1(set_itm_with_offset, | ||
438 | "mov r2 = " __stringify(XSI_BASE) " + " | ||
439 | __stringify(XSI_ITC_OFFSET_OFS) "\n" | ||
440 | ";;\n" | ||
441 | "ld8 r3 = [r2]\n" | ||
442 | ";;\n" | ||
443 | "sub r8 = r8, r3\n" | ||
444 | "break " __stringify(HYPERPRIVOP_SET_ITM) "\n"); | ||
445 | |||
446 | /* | ||
447 | * static unsigned long xen_get_itm_with_offset(void) | ||
448 | * return ia64_native_getreg(_IA64_REG_CR_ITM) + XEN_MAPPEDREGS->itc_offset; | ||
449 | */ | ||
450 | /* 2 bundles */ | ||
451 | DEFINE_FUNC0(get_itm_with_offset, | ||
452 | "mov r2 = " __stringify(XSI_BASE) " + " | ||
453 | __stringify(XSI_ITC_OFFSET_OFS) "\n" | ||
454 | ";;\n" | ||
455 | "ld8 r3 = [r2]\n" | ||
456 | "mov r8 = cr.itm\n" | ||
457 | ";;\n" | ||
458 | "add r8 = r8, r2\n"); | ||
459 | |||
460 | /* | ||
461 | * static void xen_set_itc(unsigned long val) | ||
462 | * unsigned long mitc; | ||
463 | * | ||
464 | * WARN_ON(!irqs_disabled()); | ||
465 | * mitc = ia64_native_getreg(_IA64_REG_AR_ITC); | ||
466 | * XEN_MAPPEDREGS->itc_offset = val - mitc; | ||
467 | * XEN_MAPPEDREGS->itc_last = val; | ||
468 | */ | ||
469 | /* 2 bundles */ | ||
470 | DEFINE_VOID_FUNC1(set_itc, | ||
471 | "mov r2 = " __stringify(XSI_BASE) " + " | ||
472 | __stringify(XSI_ITC_LAST_OFS) "\n" | ||
473 | "mov r3 = ar.itc\n" | ||
474 | ";;\n" | ||
475 | "sub r3 = r8, r3\n" | ||
476 | "st8 [r2] = r8, " | ||
477 | __stringify(XSI_ITC_LAST_OFS) " - " | ||
478 | __stringify(XSI_ITC_OFFSET_OFS) "\n" | ||
479 | ";;\n" | ||
480 | "st8 [r2] = r3\n"); | ||
481 | |||
482 | /* | ||
483 | * static unsigned long xen_get_itc(void) | ||
484 | * unsigned long res; | ||
485 | * unsigned long itc_offset; | ||
486 | * unsigned long itc_last; | ||
487 | * unsigned long ret_itc_last; | ||
488 | * | ||
489 | * itc_offset = XEN_MAPPEDREGS->itc_offset; | ||
490 | * do { | ||
491 | * itc_last = XEN_MAPPEDREGS->itc_last; | ||
492 | * res = ia64_native_getreg(_IA64_REG_AR_ITC); | ||
493 | * res += itc_offset; | ||
494 | * if (itc_last >= res) | ||
495 | * res = itc_last + 1; | ||
496 | * ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last, | ||
497 | * itc_last, res); | ||
498 | * } while (unlikely(ret_itc_last != itc_last)); | ||
499 | * return res; | ||
500 | */ | ||
501 | /* 5 bundles */ | ||
502 | DEFINE_FUNC0(get_itc, | ||
503 | "mov r2 = " __stringify(XSI_BASE) " + " | ||
504 | __stringify(XSI_ITC_OFFSET_OFS) "\n" | ||
505 | ";;\n" | ||
506 | "ld8 r9 = [r2], " __stringify(XSI_ITC_LAST_OFS) " - " | ||
507 | __stringify(XSI_ITC_OFFSET_OFS) "\n" | ||
508 | /* r9 = itc_offset */ | ||
509 | /* r2 = XSI_ITC_OFFSET */ | ||
510 | "888:\n" | ||
511 | "mov r8 = ar.itc\n" /* res = ar.itc */ | ||
512 | ";;\n" | ||
513 | "ld8 r3 = [r2]\n" /* r3 = itc_last */ | ||
514 | "add r8 = r8, r9\n" /* res = ar.itc + itc_offset */ | ||
515 | ";;\n" | ||
516 | "cmp.gtu p6, p0 = r3, r8\n" | ||
517 | ";;\n" | ||
518 | "(p6) add r8 = 1, r3\n" /* if (itc_last > res) itc_last + 1 */ | ||
519 | ";;\n" | ||
520 | "mov ar.ccv = r8\n" | ||
521 | ";;\n" | ||
522 | "cmpxchg8.acq r10 = [r2], r8, ar.ccv\n" | ||
523 | ";;\n" | ||
524 | "cmp.ne p6, p0 = r10, r3\n" | ||
525 | "(p6) hint @pause\n" | ||
526 | "(p6) br.cond.spnt 888b\n"); | ||
527 | |||
528 | DEFINE_VOID_FUNC1_VOID(fc, | ||
529 | "break " __stringify(HYPERPRIVOP_FC) "\n"); | ||
530 | |||
531 | /* | ||
532 | * psr_i_addr_addr = XEN_PSR_I_ADDR_ADDR | ||
533 | * masked_addr = *psr_i_addr_addr | ||
534 | * pending_intr_addr = masked_addr - 1 | ||
535 | * if (val & IA64_PSR_I) { | ||
536 | * masked = *masked_addr | ||
537 | * *masked_addr = 0:xen_set_virtual_psr_i(1) | ||
538 | * compiler barrier | ||
539 | * if (masked) { | ||
540 | * uint8_t pending = *pending_intr_addr; | ||
541 | * if (pending) | ||
542 | * XEN_HYPER_SSM_I | ||
543 | * } | ||
544 | * } else { | ||
545 | * *masked_addr = 1:xen_set_virtual_psr_i(0) | ||
546 | * } | ||
547 | */ | ||
548 | /* 6 bundles */ | ||
549 | DEFINE_VOID_FUNC1(intrin_local_irq_restore, | ||
550 | /* r8 = input value: 0 or IA64_PSR_I | ||
551 | * p6 = (flags & IA64_PSR_I) | ||
552 | * = if clause | ||
553 | * p7 = !(flags & IA64_PSR_I) | ||
554 | * = else clause | ||
555 | */ | ||
556 | "cmp.ne p6, p7 = r8, r0\n" | ||
557 | "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" | ||
558 | ";;\n" | ||
559 | /* r9 = XEN_PSR_I_ADDR */ | ||
560 | "ld8 r9 = [r9]\n" | ||
561 | ";;\n" | ||
562 | |||
563 | /* r10 = masked previous value */ | ||
564 | "(p6) ld1.acq r10 = [r9]\n" | ||
565 | ";;\n" | ||
566 | |||
567 | /* p8 = !masked interrupt masked previously? */ | ||
568 | "(p6) cmp.ne.unc p8, p0 = r10, r0\n" | ||
569 | |||
570 | /* p7 = else clause */ | ||
571 | "(p7) mov r11 = 1\n" | ||
572 | ";;\n" | ||
573 | /* masked = 1 */ | ||
574 | "(p7) st1.rel [r9] = r11\n" | ||
575 | |||
576 | /* p6 = if clause */ | ||
577 | /* masked = 0 | ||
578 | * r9 = masked_addr - 1 | ||
579 | * = pending_intr_addr | ||
580 | */ | ||
581 | "(p8) st1.rel [r9] = r0, -1\n" | ||
582 | ";;\n" | ||
583 | /* r8 = pending_intr */ | ||
584 | "(p8) ld1.acq r11 = [r9]\n" | ||
585 | ";;\n" | ||
586 | /* p9 = interrupt pending? */ | ||
587 | "(p8) cmp.ne.unc p9, p10 = r11, r0\n" | ||
588 | ";;\n" | ||
589 | "(p10) mf\n" | ||
590 | /* issue hypercall to trigger interrupt */ | ||
591 | "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n"); | ||
592 | |||
593 | DEFINE_VOID_FUNC2(ptcga, | ||
594 | "break " __stringify(HYPERPRIVOP_PTC_GA) "\n"); | ||
595 | DEFINE_VOID_FUNC2(set_rr, | ||
596 | "break " __stringify(HYPERPRIVOP_SET_RR) "\n"); | ||
597 | |||
598 | /* | ||
599 | * tmp = XEN_MAPPEDREGS->interrupt_mask_addr = XEN_PSR_I_ADDR_ADDR; | ||
600 | * tmp = *tmp | ||
601 | * tmp = *tmp; | ||
602 | * psr_i = tmp? 0: IA64_PSR_I; | ||
603 | */ | ||
604 | /* 4 bundles */ | ||
605 | DEFINE_FUNC0(get_psr_i, | ||
606 | "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" | ||
607 | ";;\n" | ||
608 | "ld8 r9 = [r9]\n" /* r9 = XEN_PSR_I_ADDR */ | ||
609 | "mov r8 = 0\n" /* psr_i = 0 */ | ||
610 | ";;\n" | ||
611 | "ld1.acq r9 = [r9]\n" /* r9 = XEN_PSR_I */ | ||
612 | ";;\n" | ||
613 | "cmp.eq.unc p6, p0 = r9, r0\n" /* p6 = (XEN_PSR_I != 0) */ | ||
614 | ";;\n" | ||
615 | "(p6) mov r8 = " __stringify(1 << IA64_PSR_I_BIT) "\n"); | ||
616 | |||
617 | DEFINE_FUNC1(thash, unsigned long, | ||
618 | "break " __stringify(HYPERPRIVOP_THASH) "\n"); | ||
619 | DEFINE_FUNC1(get_cpuid, int, | ||
620 | "break " __stringify(HYPERPRIVOP_GET_CPUID) "\n"); | ||
621 | DEFINE_FUNC1(get_pmd, int, | ||
622 | "break " __stringify(HYPERPRIVOP_GET_PMD) "\n"); | ||
623 | DEFINE_FUNC1(get_rr, unsigned long, | ||
624 | "break " __stringify(HYPERPRIVOP_GET_RR) "\n"); | ||
625 | |||
626 | /* | ||
627 | * void xen_privop_ssm_i(void) | ||
628 | * | ||
629 | * int masked = !xen_get_virtual_psr_i(); | ||
630 | * // masked = *(*XEN_MAPPEDREGS->interrupt_mask_addr) | ||
631 | * xen_set_virtual_psr_i(1) | ||
632 | * // *(*XEN_MAPPEDREGS->interrupt_mask_addr) = 0 | ||
633 | * // compiler barrier | ||
634 | * if (masked) { | ||
635 | * uint8_t* pend_int_addr = | ||
636 | * (uint8_t*)(*XEN_MAPPEDREGS->interrupt_mask_addr) - 1; | ||
637 | * uint8_t pending = *pend_int_addr; | ||
638 | * if (pending) | ||
639 | * XEN_HYPER_SSM_I | ||
640 | * } | ||
641 | */ | ||
642 | /* 4 bundles */ | ||
643 | DEFINE_VOID_FUNC0(ssm_i, | ||
644 | "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" | ||
645 | ";;\n" | ||
646 | "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I_ADDR */ | ||
647 | ";;\n" | ||
648 | "ld1.acq r9 = [r8]\n" /* r9 = XEN_PSR_I */ | ||
649 | ";;\n" | ||
650 | "st1.rel [r8] = r0, -1\n" /* psr_i = 0. enable interrupt | ||
651 | * r8 = XEN_PSR_I_ADDR - 1 | ||
652 | * = pend_int_addr | ||
653 | */ | ||
654 | "cmp.eq.unc p0, p6 = r9, r0\n"/* p6 = !XEN_PSR_I | ||
655 | * previously interrupt | ||
656 | * masked? | ||
657 | */ | ||
658 | ";;\n" | ||
659 | "(p6) ld1.acq r8 = [r8]\n" /* r8 = xen_pend_int */ | ||
660 | ";;\n" | ||
661 | "(p6) cmp.eq.unc p6, p7 = r8, r0\n" /*interrupt pending?*/ | ||
662 | ";;\n" | ||
663 | /* issue hypercall to get interrupt */ | ||
664 | "(p7) break " __stringify(HYPERPRIVOP_SSM_I) "\n" | ||
665 | ";;\n"); | ||
666 | |||
667 | /* | ||
668 | * psr_i_addr_addr = XEN_MAPPEDREGS->interrupt_mask_addr | ||
669 | * = XEN_PSR_I_ADDR_ADDR; | ||
670 | * psr_i_addr = *psr_i_addr_addr; | ||
671 | * *psr_i_addr = 1; | ||
672 | */ | ||
673 | /* 2 bundles */ | ||
674 | DEFINE_VOID_FUNC0(rsm_i, | ||
675 | "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" | ||
676 | /* r8 = XEN_PSR_I_ADDR */ | ||
677 | "mov r9 = 1\n" | ||
678 | ";;\n" | ||
679 | "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I */ | ||
680 | ";;\n" | ||
681 | "st1.rel [r8] = r9\n"); /* XEN_PSR_I = 1 */ | ||
682 | |||
683 | extern void | ||
684 | xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, | ||
685 | unsigned long val2, unsigned long val3, | ||
686 | unsigned long val4); | ||
687 | __DEFINE_FUNC(set_rr0_to_rr4, | ||
688 | "break " __stringify(HYPERPRIVOP_SET_RR0_TO_RR4) "\n"); | ||
689 | |||
690 | |||
691 | extern unsigned long xen_getreg(int regnum); | ||
692 | #define __DEFINE_GET_REG(id, privop) \ | ||
693 | "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ | ||
694 | ";;\n" \ | ||
695 | "cmp.eq p6, p0 = r2, r8\n" \ | ||
696 | ";;\n" \ | ||
697 | "(p6) break " __stringify(HYPERPRIVOP_GET_ ## privop) "\n" \ | ||
698 | "(p6) br.cond.sptk.many b6\n" \ | ||
699 | ";;\n" | ||
700 | |||
701 | __DEFINE_FUNC(getreg, | ||
702 | __DEFINE_GET_REG(PSR, PSR) | ||
703 | |||
704 | /* get_itc */ | ||
705 | "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" | ||
706 | ";;\n" | ||
707 | "cmp.eq p6, p0 = r2, r8\n" | ||
708 | ";;\n" | ||
709 | "(p6) br.cond.spnt xen_get_itc\n" | ||
710 | ";;\n" | ||
711 | |||
712 | /* get itm */ | ||
713 | "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n" | ||
714 | ";;\n" | ||
715 | "cmp.eq p6, p0 = r2, r8\n" | ||
716 | ";;\n" | ||
717 | "(p6) br.cond.spnt xen_get_itm_with_offset\n" | ||
718 | ";;\n" | ||
719 | |||
720 | __DEFINE_GET_REG(CR_IVR, IVR) | ||
721 | __DEFINE_GET_REG(CR_TPR, TPR) | ||
722 | |||
723 | /* fall back */ | ||
724 | "movl r2 = ia64_native_getreg_func\n" | ||
725 | ";;\n" | ||
726 | "mov b7 = r2\n" | ||
727 | ";;\n" | ||
728 | "br.cond.sptk.many b7\n"); | ||
729 | |||
730 | extern void xen_setreg(int regnum, unsigned long val); | ||
731 | #define __DEFINE_SET_REG(id, privop) \ | ||
732 | "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ | ||
733 | ";;\n" \ | ||
734 | "cmp.eq p6, p0 = r2, r9\n" \ | ||
735 | ";;\n" \ | ||
736 | "(p6) break " __stringify(HYPERPRIVOP_ ## privop) "\n" \ | ||
737 | "(p6) br.cond.sptk.many b6\n" \ | ||
738 | ";;\n" | ||
739 | |||
740 | __DEFINE_FUNC(setreg, | ||
741 | /* kr0 .. kr 7*/ | ||
742 | /* | ||
743 | * if (_IA64_REG_AR_KR0 <= regnum && | ||
744 | * regnum <= _IA64_REG_AR_KR7) { | ||
745 | * register __index asm ("r8") = regnum - _IA64_REG_AR_KR0 | ||
746 | * register __val asm ("r9") = val | ||
747 | * "break HYPERPRIVOP_SET_KR" | ||
748 | * } | ||
749 | */ | ||
750 | "mov r17 = r9\n" | ||
751 | "mov r2 = " __stringify(_IA64_REG_AR_KR0) "\n" | ||
752 | ";;\n" | ||
753 | "cmp.ge p6, p0 = r9, r2\n" | ||
754 | "sub r17 = r17, r2\n" | ||
755 | ";;\n" | ||
756 | "(p6) cmp.ge.unc p7, p0 = " | ||
757 | __stringify(_IA64_REG_AR_KR7) " - " __stringify(_IA64_REG_AR_KR0) | ||
758 | ", r17\n" | ||
759 | ";;\n" | ||
760 | "(p7) mov r9 = r8\n" | ||
761 | ";;\n" | ||
762 | "(p7) mov r8 = r17\n" | ||
763 | "(p7) break " __stringify(HYPERPRIVOP_SET_KR) "\n" | ||
764 | |||
765 | /* set itm */ | ||
766 | "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n" | ||
767 | ";;\n" | ||
768 | "cmp.eq p6, p0 = r2, r8\n" | ||
769 | ";;\n" | ||
770 | "(p6) br.cond.spnt xen_set_itm_with_offset\n" | ||
771 | |||
772 | /* set itc */ | ||
773 | "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" | ||
774 | ";;\n" | ||
775 | "cmp.eq p6, p0 = r2, r8\n" | ||
776 | ";;\n" | ||
777 | "(p6) br.cond.spnt xen_set_itc\n" | ||
778 | |||
779 | __DEFINE_SET_REG(CR_TPR, SET_TPR) | ||
780 | __DEFINE_SET_REG(CR_EOI, EOI) | ||
781 | |||
782 | /* fall back */ | ||
783 | "movl r2 = ia64_native_setreg_func\n" | ||
784 | ";;\n" | ||
785 | "mov b7 = r2\n" | ||
786 | ";;\n" | ||
787 | "br.cond.sptk.many b7\n"); | ||
788 | #endif | ||
789 | |||
790 | static const struct pv_cpu_ops xen_cpu_ops __initconst = { | ||
791 | .fc = xen_fc, | ||
792 | .thash = xen_thash, | ||
793 | .get_cpuid = xen_get_cpuid, | ||
794 | .get_pmd = xen_get_pmd, | ||
795 | .getreg = xen_getreg, | ||
796 | .setreg = xen_setreg, | ||
797 | .ptcga = xen_ptcga, | ||
798 | .get_rr = xen_get_rr, | ||
799 | .set_rr = xen_set_rr, | ||
800 | .set_rr0_to_rr4 = xen_set_rr0_to_rr4, | ||
801 | .ssm_i = xen_ssm_i, | ||
802 | .rsm_i = xen_rsm_i, | ||
803 | .get_psr_i = xen_get_psr_i, | ||
804 | .intrin_local_irq_restore | ||
805 | = xen_intrin_local_irq_restore, | ||
806 | }; | ||
807 | |||
808 | /****************************************************************************** | ||
809 | * replacement of hand written assembly codes. | ||
810 | */ | ||
811 | |||
812 | extern char xen_switch_to; | ||
813 | extern char xen_leave_syscall; | ||
814 | extern char xen_work_processed_syscall; | ||
815 | extern char xen_leave_kernel; | ||
816 | |||
817 | const struct pv_cpu_asm_switch xen_cpu_asm_switch = { | ||
818 | .switch_to = (unsigned long)&xen_switch_to, | ||
819 | .leave_syscall = (unsigned long)&xen_leave_syscall, | ||
820 | .work_processed_syscall = (unsigned long)&xen_work_processed_syscall, | ||
821 | .leave_kernel = (unsigned long)&xen_leave_kernel, | ||
822 | }; | ||
823 | |||
824 | /*************************************************************************** | ||
825 | * pv_iosapic_ops | ||
826 | * iosapic read/write hooks. | ||
827 | */ | ||
828 | static void | ||
829 | xen_pcat_compat_init(void) | ||
830 | { | ||
831 | /* nothing */ | ||
832 | } | ||
833 | |||
834 | static struct irq_chip* | ||
835 | xen_iosapic_get_irq_chip(unsigned long trigger) | ||
836 | { | ||
837 | return NULL; | ||
838 | } | ||
839 | |||
840 | static unsigned int | ||
841 | xen_iosapic_read(char __iomem *iosapic, unsigned int reg) | ||
842 | { | ||
843 | struct physdev_apic apic_op; | ||
844 | int ret; | ||
845 | |||
846 | apic_op.apic_physbase = (unsigned long)iosapic - | ||
847 | __IA64_UNCACHED_OFFSET; | ||
848 | apic_op.reg = reg; | ||
849 | ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op); | ||
850 | if (ret) | ||
851 | return ret; | ||
852 | return apic_op.value; | ||
853 | } | ||
854 | |||
855 | static void | ||
856 | xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) | ||
857 | { | ||
858 | struct physdev_apic apic_op; | ||
859 | |||
860 | apic_op.apic_physbase = (unsigned long)iosapic - | ||
861 | __IA64_UNCACHED_OFFSET; | ||
862 | apic_op.reg = reg; | ||
863 | apic_op.value = val; | ||
864 | HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op); | ||
865 | } | ||
866 | |||
867 | static struct pv_iosapic_ops xen_iosapic_ops __initdata = { | ||
868 | .pcat_compat_init = xen_pcat_compat_init, | ||
869 | .__get_irq_chip = xen_iosapic_get_irq_chip, | ||
870 | |||
871 | .__read = xen_iosapic_read, | ||
872 | .__write = xen_iosapic_write, | ||
873 | }; | ||
874 | |||
875 | /*************************************************************************** | ||
876 | * pv_ops initialization | ||
877 | */ | ||
878 | |||
879 | void __init | ||
880 | xen_setup_pv_ops(void) | ||
881 | { | ||
882 | xen_info_init(); | ||
883 | pv_info = xen_info; | ||
884 | pv_init_ops = xen_init_ops; | ||
885 | pv_fsys_data = xen_fsys_data; | ||
886 | pv_patchdata = xen_patchdata; | ||
887 | pv_cpu_ops = xen_cpu_ops; | ||
888 | pv_iosapic_ops = xen_iosapic_ops; | ||
889 | pv_irq_ops = xen_irq_ops; | ||
890 | pv_time_ops = xen_time_ops; | ||
891 | |||
892 | paravirt_cpu_asm_init(&xen_cpu_asm_switch); | ||
893 | } | ||
894 | |||
895 | #ifdef ASM_SUPPORTED | ||
896 | /*************************************************************************** | ||
897 | * binary pacthing | ||
898 | * pv_init_ops.patch_bundle | ||
899 | */ | ||
900 | |||
901 | #define DEFINE_FUNC_GETREG(name, privop) \ | ||
902 | DEFINE_FUNC0(get_ ## name, \ | ||
903 | "break "__stringify(HYPERPRIVOP_GET_ ## privop) "\n") | ||
904 | |||
905 | DEFINE_FUNC_GETREG(psr, PSR); | ||
906 | DEFINE_FUNC_GETREG(eflag, EFLAG); | ||
907 | DEFINE_FUNC_GETREG(ivr, IVR); | ||
908 | DEFINE_FUNC_GETREG(tpr, TPR); | ||
909 | |||
910 | #define DEFINE_FUNC_SET_KR(n) \ | ||
911 | DEFINE_VOID_FUNC0(set_kr ## n, \ | ||
912 | ";;\n" \ | ||
913 | "mov r9 = r8\n" \ | ||
914 | "mov r8 = " #n "\n" \ | ||
915 | "break " __stringify(HYPERPRIVOP_SET_KR) "\n") | ||
916 | |||
917 | DEFINE_FUNC_SET_KR(0); | ||
918 | DEFINE_FUNC_SET_KR(1); | ||
919 | DEFINE_FUNC_SET_KR(2); | ||
920 | DEFINE_FUNC_SET_KR(3); | ||
921 | DEFINE_FUNC_SET_KR(4); | ||
922 | DEFINE_FUNC_SET_KR(5); | ||
923 | DEFINE_FUNC_SET_KR(6); | ||
924 | DEFINE_FUNC_SET_KR(7); | ||
925 | |||
926 | #define __DEFINE_FUNC_SETREG(name, privop) \ | ||
927 | DEFINE_VOID_FUNC0(name, \ | ||
928 | "break "__stringify(HYPERPRIVOP_ ## privop) "\n") | ||
929 | |||
930 | #define DEFINE_FUNC_SETREG(name, privop) \ | ||
931 | __DEFINE_FUNC_SETREG(set_ ## name, SET_ ## privop) | ||
932 | |||
933 | DEFINE_FUNC_SETREG(eflag, EFLAG); | ||
934 | DEFINE_FUNC_SETREG(tpr, TPR); | ||
935 | __DEFINE_FUNC_SETREG(eoi, EOI); | ||
936 | |||
937 | extern const char xen_check_events[]; | ||
938 | extern const char __xen_intrin_local_irq_restore_direct_start[]; | ||
939 | extern const char __xen_intrin_local_irq_restore_direct_end[]; | ||
940 | extern const unsigned long __xen_intrin_local_irq_restore_direct_reloc; | ||
941 | |||
942 | asm ( | ||
943 | ".align 32\n" | ||
944 | ".proc xen_check_events\n" | ||
945 | "xen_check_events:\n" | ||
946 | /* masked = 0 | ||
947 | * r9 = masked_addr - 1 | ||
948 | * = pending_intr_addr | ||
949 | */ | ||
950 | "st1.rel [r9] = r0, -1\n" | ||
951 | ";;\n" | ||
952 | /* r8 = pending_intr */ | ||
953 | "ld1.acq r11 = [r9]\n" | ||
954 | ";;\n" | ||
955 | /* p9 = interrupt pending? */ | ||
956 | "cmp.ne p9, p10 = r11, r0\n" | ||
957 | ";;\n" | ||
958 | "(p10) mf\n" | ||
959 | /* issue hypercall to trigger interrupt */ | ||
960 | "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n" | ||
961 | "br.cond.sptk.many b6\n" | ||
962 | ".endp xen_check_events\n" | ||
963 | "\n" | ||
964 | ".align 32\n" | ||
965 | ".proc __xen_intrin_local_irq_restore_direct\n" | ||
966 | "__xen_intrin_local_irq_restore_direct:\n" | ||
967 | "__xen_intrin_local_irq_restore_direct_start:\n" | ||
968 | "1:\n" | ||
969 | "{\n" | ||
970 | "cmp.ne p6, p7 = r8, r0\n" | ||
971 | "mov r17 = ip\n" /* get ip to calc return address */ | ||
972 | "mov r9 = "__stringify(XEN_PSR_I_ADDR_ADDR) "\n" | ||
973 | ";;\n" | ||
974 | "}\n" | ||
975 | "{\n" | ||
976 | /* r9 = XEN_PSR_I_ADDR */ | ||
977 | "ld8 r9 = [r9]\n" | ||
978 | ";;\n" | ||
979 | /* r10 = masked previous value */ | ||
980 | "(p6) ld1.acq r10 = [r9]\n" | ||
981 | "adds r17 = 1f - 1b, r17\n" /* calculate return address */ | ||
982 | ";;\n" | ||
983 | "}\n" | ||
984 | "{\n" | ||
985 | /* p8 = !masked interrupt masked previously? */ | ||
986 | "(p6) cmp.ne.unc p8, p0 = r10, r0\n" | ||
987 | "\n" | ||
988 | /* p7 = else clause */ | ||
989 | "(p7) mov r11 = 1\n" | ||
990 | ";;\n" | ||
991 | "(p8) mov b6 = r17\n" /* set return address */ | ||
992 | "}\n" | ||
993 | "{\n" | ||
994 | /* masked = 1 */ | ||
995 | "(p7) st1.rel [r9] = r11\n" | ||
996 | "\n" | ||
997 | "[99:]\n" | ||
998 | "(p8) brl.cond.dptk.few xen_check_events\n" | ||
999 | "}\n" | ||
1000 | /* pv calling stub is 5 bundles. fill nop to adjust return address */ | ||
1001 | "{\n" | ||
1002 | "nop 0\n" | ||
1003 | "nop 0\n" | ||
1004 | "nop 0\n" | ||
1005 | "}\n" | ||
1006 | "1:\n" | ||
1007 | "__xen_intrin_local_irq_restore_direct_end:\n" | ||
1008 | ".endp __xen_intrin_local_irq_restore_direct\n" | ||
1009 | "\n" | ||
1010 | ".align 8\n" | ||
1011 | "__xen_intrin_local_irq_restore_direct_reloc:\n" | ||
1012 | "data8 99b\n" | ||
1013 | ); | ||
1014 | |||
1015 | static struct paravirt_patch_bundle_elem xen_patch_bundle_elems[] | ||
1016 | __initdata_or_module = | ||
1017 | { | ||
1018 | #define XEN_PATCH_BUNDLE_ELEM(name, type) \ | ||
1019 | { \ | ||
1020 | (void*)xen_ ## name ## _direct_start, \ | ||
1021 | (void*)xen_ ## name ## _direct_end, \ | ||
1022 | PARAVIRT_PATCH_TYPE_ ## type, \ | ||
1023 | } | ||
1024 | |||
1025 | XEN_PATCH_BUNDLE_ELEM(fc, FC), | ||
1026 | XEN_PATCH_BUNDLE_ELEM(thash, THASH), | ||
1027 | XEN_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID), | ||
1028 | XEN_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD), | ||
1029 | XEN_PATCH_BUNDLE_ELEM(ptcga, PTCGA), | ||
1030 | XEN_PATCH_BUNDLE_ELEM(get_rr, GET_RR), | ||
1031 | XEN_PATCH_BUNDLE_ELEM(set_rr, SET_RR), | ||
1032 | XEN_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4), | ||
1033 | XEN_PATCH_BUNDLE_ELEM(ssm_i, SSM_I), | ||
1034 | XEN_PATCH_BUNDLE_ELEM(rsm_i, RSM_I), | ||
1035 | XEN_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I), | ||
1036 | { | ||
1037 | (void*)__xen_intrin_local_irq_restore_direct_start, | ||
1038 | (void*)__xen_intrin_local_irq_restore_direct_end, | ||
1039 | PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE, | ||
1040 | }, | ||
1041 | |||
1042 | #define XEN_PATCH_BUNDLE_ELEM_GETREG(name, reg) \ | ||
1043 | { \ | ||
1044 | xen_get_ ## name ## _direct_start, \ | ||
1045 | xen_get_ ## name ## _direct_end, \ | ||
1046 | PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \ | ||
1047 | } | ||
1048 | |||
1049 | XEN_PATCH_BUNDLE_ELEM_GETREG(psr, PSR), | ||
1050 | XEN_PATCH_BUNDLE_ELEM_GETREG(eflag, AR_EFLAG), | ||
1051 | |||
1052 | XEN_PATCH_BUNDLE_ELEM_GETREG(ivr, CR_IVR), | ||
1053 | XEN_PATCH_BUNDLE_ELEM_GETREG(tpr, CR_TPR), | ||
1054 | |||
1055 | XEN_PATCH_BUNDLE_ELEM_GETREG(itc, AR_ITC), | ||
1056 | XEN_PATCH_BUNDLE_ELEM_GETREG(itm_with_offset, CR_ITM), | ||
1057 | |||
1058 | |||
1059 | #define __XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ | ||
1060 | { \ | ||
1061 | xen_ ## name ## _direct_start, \ | ||
1062 | xen_ ## name ## _direct_end, \ | ||
1063 | PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \ | ||
1064 | } | ||
1065 | |||
1066 | #define XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ | ||
1067 | __XEN_PATCH_BUNDLE_ELEM_SETREG(set_ ## name, reg) | ||
1068 | |||
1069 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr0, AR_KR0), | ||
1070 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr1, AR_KR1), | ||
1071 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr2, AR_KR2), | ||
1072 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr3, AR_KR3), | ||
1073 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr4, AR_KR4), | ||
1074 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr5, AR_KR5), | ||
1075 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr6, AR_KR6), | ||
1076 | XEN_PATCH_BUNDLE_ELEM_SETREG(kr7, AR_KR7), | ||
1077 | |||
1078 | XEN_PATCH_BUNDLE_ELEM_SETREG(eflag, AR_EFLAG), | ||
1079 | XEN_PATCH_BUNDLE_ELEM_SETREG(tpr, CR_TPR), | ||
1080 | __XEN_PATCH_BUNDLE_ELEM_SETREG(eoi, CR_EOI), | ||
1081 | |||
1082 | XEN_PATCH_BUNDLE_ELEM_SETREG(itc, AR_ITC), | ||
1083 | XEN_PATCH_BUNDLE_ELEM_SETREG(itm_with_offset, CR_ITM), | ||
1084 | }; | ||
1085 | |||
1086 | static unsigned long __init_or_module | ||
1087 | xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type) | ||
1088 | { | ||
1089 | const unsigned long nelems = sizeof(xen_patch_bundle_elems) / | ||
1090 | sizeof(xen_patch_bundle_elems[0]); | ||
1091 | unsigned long used; | ||
1092 | const struct paravirt_patch_bundle_elem *found; | ||
1093 | |||
1094 | used = __paravirt_patch_apply_bundle(sbundle, ebundle, type, | ||
1095 | xen_patch_bundle_elems, nelems, | ||
1096 | &found); | ||
1097 | |||
1098 | if (found == NULL) | ||
1099 | /* fallback */ | ||
1100 | return ia64_native_patch_bundle(sbundle, ebundle, type); | ||
1101 | if (used == 0) | ||
1102 | return used; | ||
1103 | |||
1104 | /* relocation */ | ||
1105 | switch (type) { | ||
1106 | case PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE: { | ||
1107 | unsigned long reloc = | ||
1108 | __xen_intrin_local_irq_restore_direct_reloc; | ||
1109 | unsigned long reloc_offset = reloc - (unsigned long) | ||
1110 | __xen_intrin_local_irq_restore_direct_start; | ||
1111 | unsigned long tag = (unsigned long)sbundle + reloc_offset; | ||
1112 | paravirt_patch_reloc_brl(tag, xen_check_events); | ||
1113 | break; | ||
1114 | } | ||
1115 | default: | ||
1116 | /* nothing */ | ||
1117 | break; | ||
1118 | } | ||
1119 | return used; | ||
1120 | } | ||
1121 | #endif /* ASM_SUPPOTED */ | ||
1122 | |||
1123 | const struct paravirt_patch_branch_target xen_branch_target[] | ||
1124 | __initconst = { | ||
1125 | #define PARAVIRT_BR_TARGET(name, type) \ | ||
1126 | { \ | ||
1127 | &xen_ ## name, \ | ||
1128 | PARAVIRT_PATCH_TYPE_BR_ ## type, \ | ||
1129 | } | ||
1130 | PARAVIRT_BR_TARGET(switch_to, SWITCH_TO), | ||
1131 | PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL), | ||
1132 | PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL), | ||
1133 | PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL), | ||
1134 | }; | ||
1135 | |||
1136 | static void __init | ||
1137 | xen_patch_branch(unsigned long tag, unsigned long type) | ||
1138 | { | ||
1139 | __paravirt_patch_apply_branch(tag, type, xen_branch_target, | ||
1140 | ARRAY_SIZE(xen_branch_target)); | ||
1141 | } | ||
diff --git a/arch/ia64/xen/xencomm.c b/arch/ia64/xen/xencomm.c deleted file mode 100644 index 73d903ca2d64..000000000000 --- a/arch/ia64/xen/xencomm.c +++ /dev/null | |||
@@ -1,106 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | |||
19 | #include <linux/mm.h> | ||
20 | #include <linux/err.h> | ||
21 | |||
22 | static unsigned long kernel_virtual_offset; | ||
23 | static int is_xencomm_initialized; | ||
24 | |||
25 | /* for xen early printk. It uses console io hypercall which uses xencomm. | ||
26 | * However early printk may use it before xencomm initialization. | ||
27 | */ | ||
28 | int | ||
29 | xencomm_is_initialized(void) | ||
30 | { | ||
31 | return is_xencomm_initialized; | ||
32 | } | ||
33 | |||
34 | void | ||
35 | xencomm_initialize(void) | ||
36 | { | ||
37 | kernel_virtual_offset = KERNEL_START - ia64_tpa(KERNEL_START); | ||
38 | is_xencomm_initialized = 1; | ||
39 | } | ||
40 | |||
41 | /* Translate virtual address to physical address. */ | ||
42 | unsigned long | ||
43 | xencomm_vtop(unsigned long vaddr) | ||
44 | { | ||
45 | struct page *page; | ||
46 | struct vm_area_struct *vma; | ||
47 | |||
48 | if (vaddr == 0) | ||
49 | return 0UL; | ||
50 | |||
51 | if (REGION_NUMBER(vaddr) == 5) { | ||
52 | pgd_t *pgd; | ||
53 | pud_t *pud; | ||
54 | pmd_t *pmd; | ||
55 | pte_t *ptep; | ||
56 | |||
57 | /* On ia64, TASK_SIZE refers to current. It is not initialized | ||
58 | during boot. | ||
59 | Furthermore the kernel is relocatable and __pa() doesn't | ||
60 | work on addresses. */ | ||
61 | if (vaddr >= KERNEL_START | ||
62 | && vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE)) | ||
63 | return vaddr - kernel_virtual_offset; | ||
64 | |||
65 | /* In kernel area -- virtually mapped. */ | ||
66 | pgd = pgd_offset_k(vaddr); | ||
67 | if (pgd_none(*pgd) || pgd_bad(*pgd)) | ||
68 | return ~0UL; | ||
69 | |||
70 | pud = pud_offset(pgd, vaddr); | ||
71 | if (pud_none(*pud) || pud_bad(*pud)) | ||
72 | return ~0UL; | ||
73 | |||
74 | pmd = pmd_offset(pud, vaddr); | ||
75 | if (pmd_none(*pmd) || pmd_bad(*pmd)) | ||
76 | return ~0UL; | ||
77 | |||
78 | ptep = pte_offset_kernel(pmd, vaddr); | ||
79 | if (!ptep) | ||
80 | return ~0UL; | ||
81 | |||
82 | return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK); | ||
83 | } | ||
84 | |||
85 | if (vaddr > TASK_SIZE) { | ||
86 | /* percpu variables */ | ||
87 | if (REGION_NUMBER(vaddr) == 7 && | ||
88 | REGION_OFFSET(vaddr) >= (1ULL << IA64_MAX_PHYS_BITS)) | ||
89 | ia64_tpa(vaddr); | ||
90 | |||
91 | /* kernel address */ | ||
92 | return __pa(vaddr); | ||
93 | } | ||
94 | |||
95 | /* XXX double-check (lack of) locking */ | ||
96 | vma = find_extend_vma(current->mm, vaddr); | ||
97 | if (!vma) | ||
98 | return ~0UL; | ||
99 | |||
100 | /* We assume the page is modified. */ | ||
101 | page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH); | ||
102 | if (IS_ERR_OR_NULL(page)) | ||
103 | return ~0UL; | ||
104 | |||
105 | return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK); | ||
106 | } | ||
diff --git a/arch/ia64/xen/xenivt.S b/arch/ia64/xen/xenivt.S deleted file mode 100644 index 3e71d50584d9..000000000000 --- a/arch/ia64/xen/xenivt.S +++ /dev/null | |||
@@ -1,52 +0,0 @@ | |||
1 | /* | ||
2 | * arch/ia64/xen/ivt.S | ||
3 | * | ||
4 | * Copyright (C) 2005 Hewlett-Packard Co | ||
5 | * Dan Magenheimer <dan.magenheimer@hp.com> | ||
6 | * | ||
7 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | ||
8 | * VA Linux Systems Japan K.K. | ||
9 | * pv_ops. | ||
10 | */ | ||
11 | |||
12 | #include <asm/asmmacro.h> | ||
13 | #include <asm/kregs.h> | ||
14 | #include <asm/pgtable.h> | ||
15 | |||
16 | #include "../kernel/minstate.h" | ||
17 | |||
18 | .section .text,"ax" | ||
19 | GLOBAL_ENTRY(xen_event_callback) | ||
20 | mov r31=pr // prepare to save predicates | ||
21 | ;; | ||
22 | SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3 | ||
23 | ;; | ||
24 | movl r3=XSI_PSR_IC | ||
25 | mov r14=1 | ||
26 | ;; | ||
27 | st4 [r3]=r14 | ||
28 | ;; | ||
29 | adds r3=8,r2 // set up second base pointer for SAVE_REST | ||
30 | srlz.i // ensure everybody knows psr.ic is back on | ||
31 | ;; | ||
32 | SAVE_REST | ||
33 | ;; | ||
34 | 1: | ||
35 | alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group | ||
36 | add out0=16,sp // pass pointer to pt_regs as first arg | ||
37 | ;; | ||
38 | br.call.sptk.many b0=xen_evtchn_do_upcall | ||
39 | ;; | ||
40 | movl r20=XSI_PSR_I_ADDR | ||
41 | ;; | ||
42 | ld8 r20=[r20] | ||
43 | ;; | ||
44 | adds r20=-1,r20 // vcpu_info->evtchn_upcall_pending | ||
45 | ;; | ||
46 | ld1 r20=[r20] | ||
47 | ;; | ||
48 | cmp.ne p6,p0=r20,r0 // if there are pending events, | ||
49 | (p6) br.spnt.few 1b // call evtchn_do_upcall again. | ||
50 | br.sptk.many xen_leave_kernel // we know ia64_leave_kernel is | ||
51 | // paravirtualized as xen_leave_kernel | ||
52 | END(xen_event_callback) | ||
diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S deleted file mode 100644 index e29519ebe2d2..000000000000 --- a/arch/ia64/xen/xensetup.S +++ /dev/null | |||
@@ -1,80 +0,0 @@ | |||
1 | /* | ||
2 | * Support routines for Xen | ||
3 | * | ||
4 | * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com> | ||
5 | */ | ||
6 | |||
7 | #include <asm/processor.h> | ||
8 | #include <asm/asmmacro.h> | ||
9 | #include <asm/pgtable.h> | ||
10 | #include <asm/paravirt.h> | ||
11 | #include <asm/xen/privop.h> | ||
12 | #include <linux/elfnote.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <xen/interface/elfnote.h> | ||
15 | |||
16 | .section .data..read_mostly | ||
17 | .align 8 | ||
18 | .global xen_domain_type | ||
19 | xen_domain_type: | ||
20 | data4 XEN_NATIVE_ASM | ||
21 | .previous | ||
22 | |||
23 | __INIT | ||
24 | ENTRY(startup_xen) | ||
25 | // Calculate load offset. | ||
26 | // The constant, LOAD_OFFSET, can't be used because the boot | ||
27 | // loader doesn't always load to the LMA specified by the vmlinux.lds. | ||
28 | mov r9=ip // must be the first instruction to make sure | ||
29 | // that r9 = the physical address of startup_xen. | ||
30 | // Usually r9 = startup_xen - LOAD_OFFSET | ||
31 | movl r8=startup_xen | ||
32 | ;; | ||
33 | sub r9=r9,r8 // Usually r9 = -LOAD_OFFSET. | ||
34 | |||
35 | mov r10=PARAVIRT_HYPERVISOR_TYPE_XEN | ||
36 | movl r11=_start | ||
37 | ;; | ||
38 | add r11=r11,r9 | ||
39 | movl r8=hypervisor_type | ||
40 | ;; | ||
41 | add r8=r8,r9 | ||
42 | mov b0=r11 | ||
43 | ;; | ||
44 | st8 [r8]=r10 | ||
45 | br.cond.sptk.many b0 | ||
46 | ;; | ||
47 | END(startup_xen) | ||
48 | |||
49 | ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") | ||
50 | ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6") | ||
51 | ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0") | ||
52 | ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, data8.ua startup_xen - LOAD_OFFSET) | ||
53 | |||
54 | #define isBP p3 // are we the Bootstrap Processor? | ||
55 | |||
56 | GLOBAL_ENTRY(xen_setup_hook) | ||
57 | mov r8=XEN_PV_DOMAIN_ASM | ||
58 | (isBP) movl r9=xen_domain_type;; | ||
59 | (isBP) st4 [r9]=r8 | ||
60 | movl r10=xen_ivt;; | ||
61 | |||
62 | mov cr.iva=r10 | ||
63 | |||
64 | /* Set xsi base. */ | ||
65 | #define FW_HYPERCALL_SET_SHARED_INFO_VA 0x600 | ||
66 | (isBP) mov r2=FW_HYPERCALL_SET_SHARED_INFO_VA | ||
67 | (isBP) movl r28=XSI_BASE;; | ||
68 | (isBP) break 0x1000;; | ||
69 | |||
70 | /* setup pv_ops */ | ||
71 | (isBP) mov r4=rp | ||
72 | ;; | ||
73 | (isBP) br.call.sptk.many rp=xen_setup_pv_ops | ||
74 | ;; | ||
75 | (isBP) mov rp=r4 | ||
76 | ;; | ||
77 | |||
78 | br.ret.sptk.many rp | ||
79 | ;; | ||
80 | END(xen_setup_hook) | ||
diff --git a/arch/m32r/include/asm/barrier.h b/arch/m32r/include/asm/barrier.h index 6976621efd3f..1a40265e8d88 100644 --- a/arch/m32r/include/asm/barrier.h +++ b/arch/m32r/include/asm/barrier.h | |||
@@ -11,84 +11,6 @@ | |||
11 | 11 | ||
12 | #define nop() __asm__ __volatile__ ("nop" : : ) | 12 | #define nop() __asm__ __volatile__ ("nop" : : ) |
13 | 13 | ||
14 | /* | 14 | #include <asm-generic/barrier.h> |
15 | * Memory barrier. | ||
16 | * | ||
17 | * mb() prevents loads and stores being reordered across this point. | ||
18 | * rmb() prevents loads being reordered across this point. | ||
19 | * wmb() prevents stores being reordered across this point. | ||
20 | */ | ||
21 | #define mb() barrier() | ||
22 | #define rmb() mb() | ||
23 | #define wmb() mb() | ||
24 | |||
25 | /** | ||
26 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
27 | * depend on. | ||
28 | * | ||
29 | * No data-dependent reads from memory-like regions are ever reordered | ||
30 | * over this barrier. All reads preceding this primitive are guaranteed | ||
31 | * to access memory (but not necessarily other CPUs' caches) before any | ||
32 | * reads following this primitive that depend on the data return by | ||
33 | * any of the preceding reads. This primitive is much lighter weight than | ||
34 | * rmb() on most CPUs, and is never heavier weight than is | ||
35 | * rmb(). | ||
36 | * | ||
37 | * These ordering constraints are respected by both the local CPU | ||
38 | * and the compiler. | ||
39 | * | ||
40 | * Ordering is not guaranteed by anything other than these primitives, | ||
41 | * not even by data dependencies. See the documentation for | ||
42 | * memory_barrier() for examples and URLs to more information. | ||
43 | * | ||
44 | * For example, the following code would force ordering (the initial | ||
45 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
46 | * | ||
47 | * <programlisting> | ||
48 | * CPU 0 CPU 1 | ||
49 | * | ||
50 | * b = 2; | ||
51 | * memory_barrier(); | ||
52 | * p = &b; q = p; | ||
53 | * read_barrier_depends(); | ||
54 | * d = *q; | ||
55 | * </programlisting> | ||
56 | * | ||
57 | * | ||
58 | * because the read of "*q" depends on the read of "p" and these | ||
59 | * two reads are separated by a read_barrier_depends(). However, | ||
60 | * the following code, with the same initial values for "a" and "b": | ||
61 | * | ||
62 | * <programlisting> | ||
63 | * CPU 0 CPU 1 | ||
64 | * | ||
65 | * a = 2; | ||
66 | * memory_barrier(); | ||
67 | * b = 3; y = b; | ||
68 | * read_barrier_depends(); | ||
69 | * x = a; | ||
70 | * </programlisting> | ||
71 | * | ||
72 | * does not enforce ordering, since there is no data dependency between | ||
73 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
74 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
75 | * in cases like this where there are no data dependencies. | ||
76 | **/ | ||
77 | |||
78 | #define read_barrier_depends() do { } while (0) | ||
79 | |||
80 | #ifdef CONFIG_SMP | ||
81 | #define smp_mb() mb() | ||
82 | #define smp_rmb() rmb() | ||
83 | #define smp_wmb() wmb() | ||
84 | #define smp_read_barrier_depends() read_barrier_depends() | ||
85 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) | ||
86 | #else | ||
87 | #define smp_mb() barrier() | ||
88 | #define smp_rmb() barrier() | ||
89 | #define smp_wmb() barrier() | ||
90 | #define smp_read_barrier_depends() do { } while (0) | ||
91 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | ||
92 | #endif | ||
93 | 15 | ||
94 | #endif /* _ASM_M32R_BARRIER_H */ | 16 | #endif /* _ASM_M32R_BARRIER_H */ |
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 75f25a8e3001..dbdd2231c75d 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig | |||
@@ -87,6 +87,30 @@ config MMU_SUN3 | |||
87 | bool | 87 | bool |
88 | depends on MMU && !MMU_MOTOROLA && !MMU_COLDFIRE | 88 | depends on MMU && !MMU_MOTOROLA && !MMU_COLDFIRE |
89 | 89 | ||
90 | config KEXEC | ||
91 | bool "kexec system call" | ||
92 | depends on M68KCLASSIC | ||
93 | help | ||
94 | kexec is a system call that implements the ability to shutdown your | ||
95 | current kernel, and to start another kernel. It is like a reboot | ||
96 | but it is independent of the system firmware. And like a reboot | ||
97 | you can start any kernel with it, not just Linux. | ||
98 | |||
99 | The name comes from the similarity to the exec system call. | ||
100 | |||
101 | It is an ongoing process to be certain the hardware in a machine | ||
102 | is properly shutdown, so do not be surprised if this code does not | ||
103 | initially work for you. As of this writing the exact hardware | ||
104 | interface is strongly in flux, so no good recommendation can be | ||
105 | made. | ||
106 | |||
107 | config BOOTINFO_PROC | ||
108 | bool "Export bootinfo in procfs" | ||
109 | depends on KEXEC && M68KCLASSIC | ||
110 | help | ||
111 | Say Y to export the bootinfo used to boot the kernel in a | ||
112 | "bootinfo" file in procfs. This is useful with kexec. | ||
113 | |||
90 | menu "Platform setup" | 114 | menu "Platform setup" |
91 | 115 | ||
92 | source arch/m68k/Kconfig.cpu | 116 | source arch/m68k/Kconfig.cpu |
diff --git a/arch/m68k/amiga/chipram.c b/arch/m68k/amiga/chipram.c index 99449fbf9a72..ba03cec3f711 100644 --- a/arch/m68k/amiga/chipram.c +++ b/arch/m68k/amiga/chipram.c | |||
@@ -87,7 +87,7 @@ void *amiga_chip_alloc_res(unsigned long size, struct resource *res) | |||
87 | 87 | ||
88 | atomic_sub(size, &chipavail); | 88 | atomic_sub(size, &chipavail); |
89 | pr_debug("amiga_chip_alloc_res: returning %pR\n", res); | 89 | pr_debug("amiga_chip_alloc_res: returning %pR\n", res); |
90 | return (void *)ZTWO_VADDR(res->start); | 90 | return ZTWO_VADDR(res->start); |
91 | } | 91 | } |
92 | 92 | ||
93 | void amiga_chip_free(void *ptr) | 93 | void amiga_chip_free(void *ptr) |
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c index b819390e29cd..9625b7132227 100644 --- a/arch/m68k/amiga/config.c +++ b/arch/m68k/amiga/config.c | |||
@@ -28,6 +28,8 @@ | |||
28 | #include <linux/keyboard.h> | 28 | #include <linux/keyboard.h> |
29 | 29 | ||
30 | #include <asm/bootinfo.h> | 30 | #include <asm/bootinfo.h> |
31 | #include <asm/bootinfo-amiga.h> | ||
32 | #include <asm/byteorder.h> | ||
31 | #include <asm/setup.h> | 33 | #include <asm/setup.h> |
32 | #include <asm/pgtable.h> | 34 | #include <asm/pgtable.h> |
33 | #include <asm/amigahw.h> | 35 | #include <asm/amigahw.h> |
@@ -140,46 +142,46 @@ static struct resource ram_resource[NUM_MEMINFO]; | |||
140 | * Parse an Amiga-specific record in the bootinfo | 142 | * Parse an Amiga-specific record in the bootinfo |
141 | */ | 143 | */ |
142 | 144 | ||
143 | int amiga_parse_bootinfo(const struct bi_record *record) | 145 | int __init amiga_parse_bootinfo(const struct bi_record *record) |
144 | { | 146 | { |
145 | int unknown = 0; | 147 | int unknown = 0; |
146 | const unsigned long *data = record->data; | 148 | const void *data = record->data; |
147 | 149 | ||
148 | switch (record->tag) { | 150 | switch (be16_to_cpu(record->tag)) { |
149 | case BI_AMIGA_MODEL: | 151 | case BI_AMIGA_MODEL: |
150 | amiga_model = *data; | 152 | amiga_model = be32_to_cpup(data); |
151 | break; | 153 | break; |
152 | 154 | ||
153 | case BI_AMIGA_ECLOCK: | 155 | case BI_AMIGA_ECLOCK: |
154 | amiga_eclock = *data; | 156 | amiga_eclock = be32_to_cpup(data); |
155 | break; | 157 | break; |
156 | 158 | ||
157 | case BI_AMIGA_CHIPSET: | 159 | case BI_AMIGA_CHIPSET: |
158 | amiga_chipset = *data; | 160 | amiga_chipset = be32_to_cpup(data); |
159 | break; | 161 | break; |
160 | 162 | ||
161 | case BI_AMIGA_CHIP_SIZE: | 163 | case BI_AMIGA_CHIP_SIZE: |
162 | amiga_chip_size = *(const int *)data; | 164 | amiga_chip_size = be32_to_cpup(data); |
163 | break; | 165 | break; |
164 | 166 | ||
165 | case BI_AMIGA_VBLANK: | 167 | case BI_AMIGA_VBLANK: |
166 | amiga_vblank = *(const unsigned char *)data; | 168 | amiga_vblank = *(const __u8 *)data; |
167 | break; | 169 | break; |
168 | 170 | ||
169 | case BI_AMIGA_PSFREQ: | 171 | case BI_AMIGA_PSFREQ: |
170 | amiga_psfreq = *(const unsigned char *)data; | 172 | amiga_psfreq = *(const __u8 *)data; |
171 | break; | 173 | break; |
172 | 174 | ||
173 | case BI_AMIGA_AUTOCON: | 175 | case BI_AMIGA_AUTOCON: |
174 | #ifdef CONFIG_ZORRO | 176 | #ifdef CONFIG_ZORRO |
175 | if (zorro_num_autocon < ZORRO_NUM_AUTO) { | 177 | if (zorro_num_autocon < ZORRO_NUM_AUTO) { |
176 | const struct ConfigDev *cd = (struct ConfigDev *)data; | 178 | const struct ConfigDev *cd = data; |
177 | struct zorro_dev *dev = &zorro_autocon[zorro_num_autocon++]; | 179 | struct zorro_dev_init *dev = &zorro_autocon_init[zorro_num_autocon++]; |
178 | dev->rom = cd->cd_Rom; | 180 | dev->rom = cd->cd_Rom; |
179 | dev->slotaddr = cd->cd_SlotAddr; | 181 | dev->slotaddr = be16_to_cpu(cd->cd_SlotAddr); |
180 | dev->slotsize = cd->cd_SlotSize; | 182 | dev->slotsize = be16_to_cpu(cd->cd_SlotSize); |
181 | dev->resource.start = (unsigned long)cd->cd_BoardAddr; | 183 | dev->boardaddr = be32_to_cpu(cd->cd_BoardAddr); |
182 | dev->resource.end = dev->resource.start + cd->cd_BoardSize - 1; | 184 | dev->boardsize = be32_to_cpu(cd->cd_BoardSize); |
183 | } else | 185 | } else |
184 | printk("amiga_parse_bootinfo: too many AutoConfig devices\n"); | 186 | printk("amiga_parse_bootinfo: too many AutoConfig devices\n"); |
185 | #endif /* CONFIG_ZORRO */ | 187 | #endif /* CONFIG_ZORRO */ |
@@ -358,6 +360,14 @@ static void __init amiga_identify(void) | |||
358 | #undef AMIGAHW_ANNOUNCE | 360 | #undef AMIGAHW_ANNOUNCE |
359 | } | 361 | } |
360 | 362 | ||
363 | |||
364 | static unsigned long amiga_random_get_entropy(void) | ||
365 | { | ||
366 | /* VPOSR/VHPOSR provide at least 17 bits of data changing at 1.79 MHz */ | ||
367 | return *(unsigned long *)&amiga_custom.vposr; | ||
368 | } | ||
369 | |||
370 | |||
361 | /* | 371 | /* |
362 | * Setup the Amiga configuration info | 372 | * Setup the Amiga configuration info |
363 | */ | 373 | */ |
@@ -395,6 +405,8 @@ void __init config_amiga(void) | |||
395 | mach_heartbeat = amiga_heartbeat; | 405 | mach_heartbeat = amiga_heartbeat; |
396 | #endif | 406 | #endif |
397 | 407 | ||
408 | mach_random_get_entropy = amiga_random_get_entropy; | ||
409 | |||
398 | /* Fill in the clock value (based on the 700 kHz E-Clock) */ | 410 | /* Fill in the clock value (based on the 700 kHz E-Clock) */ |
399 | amiga_colorclock = 5*amiga_eclock; /* 3.5 MHz */ | 411 | amiga_colorclock = 5*amiga_eclock; /* 3.5 MHz */ |
400 | 412 | ||
@@ -608,6 +620,8 @@ static void amiga_mem_console_write(struct console *co, const char *s, | |||
608 | 620 | ||
609 | static int __init amiga_savekmsg_setup(char *arg) | 621 | static int __init amiga_savekmsg_setup(char *arg) |
610 | { | 622 | { |
623 | bool registered; | ||
624 | |||
611 | if (!MACH_IS_AMIGA || strcmp(arg, "mem")) | 625 | if (!MACH_IS_AMIGA || strcmp(arg, "mem")) |
612 | return 0; | 626 | return 0; |
613 | 627 | ||
@@ -618,14 +632,16 @@ static int __init amiga_savekmsg_setup(char *arg) | |||
618 | 632 | ||
619 | /* Just steal the block, the chipram allocator isn't functional yet */ | 633 | /* Just steal the block, the chipram allocator isn't functional yet */ |
620 | amiga_chip_size -= SAVEKMSG_MAXMEM; | 634 | amiga_chip_size -= SAVEKMSG_MAXMEM; |
621 | savekmsg = (void *)ZTWO_VADDR(CHIP_PHYSADDR + amiga_chip_size); | 635 | savekmsg = ZTWO_VADDR(CHIP_PHYSADDR + amiga_chip_size); |
622 | savekmsg->magic1 = SAVEKMSG_MAGIC1; | 636 | savekmsg->magic1 = SAVEKMSG_MAGIC1; |
623 | savekmsg->magic2 = SAVEKMSG_MAGIC2; | 637 | savekmsg->magic2 = SAVEKMSG_MAGIC2; |
624 | savekmsg->magicptr = ZTWO_PADDR(savekmsg); | 638 | savekmsg->magicptr = ZTWO_PADDR(savekmsg); |
625 | savekmsg->size = 0; | 639 | savekmsg->size = 0; |
626 | 640 | ||
641 | registered = !!amiga_console_driver.write; | ||
627 | amiga_console_driver.write = amiga_mem_console_write; | 642 | amiga_console_driver.write = amiga_mem_console_write; |
628 | register_console(&amiga_console_driver); | 643 | if (!registered) |
644 | register_console(&amiga_console_driver); | ||
629 | return 0; | 645 | return 0; |
630 | } | 646 | } |
631 | 647 | ||
@@ -707,11 +723,16 @@ void amiga_serial_gets(struct console *co, char *s, int len) | |||
707 | 723 | ||
708 | static int __init amiga_debug_setup(char *arg) | 724 | static int __init amiga_debug_setup(char *arg) |
709 | { | 725 | { |
710 | if (MACH_IS_AMIGA && !strcmp(arg, "ser")) { | 726 | bool registered; |
711 | /* no initialization required (?) */ | 727 | |
712 | amiga_console_driver.write = amiga_serial_console_write; | 728 | if (!MACH_IS_AMIGA || strcmp(arg, "ser")) |
729 | return 0; | ||
730 | |||
731 | /* no initialization required (?) */ | ||
732 | registered = !!amiga_console_driver.write; | ||
733 | amiga_console_driver.write = amiga_serial_console_write; | ||
734 | if (!registered) | ||
713 | register_console(&amiga_console_driver); | 735 | register_console(&amiga_console_driver); |
714 | } | ||
715 | return 0; | 736 | return 0; |
716 | } | 737 | } |
717 | 738 | ||
diff --git a/arch/m68k/amiga/platform.c b/arch/m68k/amiga/platform.c index dacd9f911f71..d34029d7b058 100644 --- a/arch/m68k/amiga/platform.c +++ b/arch/m68k/amiga/platform.c | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <asm/amigahw.h> | 14 | #include <asm/amigahw.h> |
15 | #include <asm/amigayle.h> | 15 | #include <asm/amigayle.h> |
16 | #include <asm/byteorder.h> | ||
16 | 17 | ||
17 | 18 | ||
18 | #ifdef CONFIG_ZORRO | 19 | #ifdef CONFIG_ZORRO |
@@ -66,10 +67,12 @@ static int __init z_dev_present(zorro_id id) | |||
66 | { | 67 | { |
67 | unsigned int i; | 68 | unsigned int i; |
68 | 69 | ||
69 | for (i = 0; i < zorro_num_autocon; i++) | 70 | for (i = 0; i < zorro_num_autocon; i++) { |
70 | if (zorro_autocon[i].rom.er_Manufacturer == ZORRO_MANUF(id) && | 71 | const struct ExpansionRom *rom = &zorro_autocon_init[i].rom; |
71 | zorro_autocon[i].rom.er_Product == ZORRO_PROD(id)) | 72 | if (be16_to_cpu(rom->er_Manufacturer) == ZORRO_MANUF(id) && |
73 | rom->er_Product == ZORRO_PROD(id)) | ||
72 | return 1; | 74 | return 1; |
75 | } | ||
73 | 76 | ||
74 | return 0; | 77 | return 0; |
75 | } | 78 | } |
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c index 3ea56b90e718..9268c0f96376 100644 --- a/arch/m68k/apollo/config.c +++ b/arch/m68k/apollo/config.c | |||
@@ -1,3 +1,4 @@ | |||
1 | #include <linux/init.h> | ||
1 | #include <linux/types.h> | 2 | #include <linux/types.h> |
2 | #include <linux/kernel.h> | 3 | #include <linux/kernel.h> |
3 | #include <linux/mm.h> | 4 | #include <linux/mm.h> |
@@ -9,6 +10,8 @@ | |||
9 | 10 | ||
10 | #include <asm/setup.h> | 11 | #include <asm/setup.h> |
11 | #include <asm/bootinfo.h> | 12 | #include <asm/bootinfo.h> |
13 | #include <asm/bootinfo-apollo.h> | ||
14 | #include <asm/byteorder.h> | ||
12 | #include <asm/pgtable.h> | 15 | #include <asm/pgtable.h> |
13 | #include <asm/apollohw.h> | 16 | #include <asm/apollohw.h> |
14 | #include <asm/irq.h> | 17 | #include <asm/irq.h> |
@@ -43,26 +46,25 @@ static const char *apollo_models[] = { | |||
43 | [APOLLO_DN4500-APOLLO_DN3000] = "DN4500 (Roadrunner)" | 46 | [APOLLO_DN4500-APOLLO_DN3000] = "DN4500 (Roadrunner)" |
44 | }; | 47 | }; |
45 | 48 | ||
46 | int apollo_parse_bootinfo(const struct bi_record *record) { | 49 | int __init apollo_parse_bootinfo(const struct bi_record *record) |
47 | 50 | { | |
48 | int unknown = 0; | 51 | int unknown = 0; |
49 | const unsigned long *data = record->data; | 52 | const void *data = record->data; |
50 | 53 | ||
51 | switch(record->tag) { | 54 | switch (be16_to_cpu(record->tag)) { |
52 | case BI_APOLLO_MODEL: | 55 | case BI_APOLLO_MODEL: |
53 | apollo_model=*data; | 56 | apollo_model = be32_to_cpup(data); |
54 | break; | 57 | break; |
55 | 58 | ||
56 | default: | 59 | default: |
57 | unknown=1; | 60 | unknown=1; |
58 | } | 61 | } |
59 | 62 | ||
60 | return unknown; | 63 | return unknown; |
61 | } | 64 | } |
62 | 65 | ||
63 | void dn_setup_model(void) { | 66 | static void __init dn_setup_model(void) |
64 | 67 | { | |
65 | |||
66 | printk("Apollo hardware found: "); | 68 | printk("Apollo hardware found: "); |
67 | printk("[%s]\n", apollo_models[apollo_model - APOLLO_DN3000]); | 69 | printk("[%s]\n", apollo_models[apollo_model - APOLLO_DN3000]); |
68 | 70 | ||
diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c index 20cde4e9fc77..3e73a63c066f 100644 --- a/arch/m68k/atari/ataints.c +++ b/arch/m68k/atari/ataints.c | |||
@@ -333,6 +333,9 @@ void __init atari_init_IRQ(void) | |||
333 | m68k_setup_irq_controller(&atari_mfptimer_chip, handle_simple_irq, | 333 | m68k_setup_irq_controller(&atari_mfptimer_chip, handle_simple_irq, |
334 | IRQ_MFP_TIMER1, 8); | 334 | IRQ_MFP_TIMER1, 8); |
335 | 335 | ||
336 | irq_set_status_flags(IRQ_MFP_TIMER1, IRQ_IS_POLLED); | ||
337 | irq_set_status_flags(IRQ_MFP_TIMER2, IRQ_IS_POLLED); | ||
338 | |||
336 | /* prepare timer D data for use as poll interrupt */ | 339 | /* prepare timer D data for use as poll interrupt */ |
337 | /* set Timer D data Register - needs to be > 0 */ | 340 | /* set Timer D data Register - needs to be > 0 */ |
338 | st_mfp.tim_dt_d = 254; /* < 100 Hz */ | 341 | st_mfp.tim_dt_d = 254; /* < 100 Hz */ |
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c index fb2d0bd9b3ad..01a62161b08a 100644 --- a/arch/m68k/atari/config.c +++ b/arch/m68k/atari/config.c | |||
@@ -37,6 +37,8 @@ | |||
37 | #include <linux/module.h> | 37 | #include <linux/module.h> |
38 | 38 | ||
39 | #include <asm/bootinfo.h> | 39 | #include <asm/bootinfo.h> |
40 | #include <asm/bootinfo-atari.h> | ||
41 | #include <asm/byteorder.h> | ||
40 | #include <asm/setup.h> | 42 | #include <asm/setup.h> |
41 | #include <asm/atarihw.h> | 43 | #include <asm/atarihw.h> |
42 | #include <asm/atariints.h> | 44 | #include <asm/atariints.h> |
@@ -129,14 +131,14 @@ static int __init scc_test(volatile char *ctla) | |||
129 | int __init atari_parse_bootinfo(const struct bi_record *record) | 131 | int __init atari_parse_bootinfo(const struct bi_record *record) |
130 | { | 132 | { |
131 | int unknown = 0; | 133 | int unknown = 0; |
132 | const u_long *data = record->data; | 134 | const void *data = record->data; |
133 | 135 | ||
134 | switch (record->tag) { | 136 | switch (be16_to_cpu(record->tag)) { |
135 | case BI_ATARI_MCH_COOKIE: | 137 | case BI_ATARI_MCH_COOKIE: |
136 | atari_mch_cookie = *data; | 138 | atari_mch_cookie = be32_to_cpup(data); |
137 | break; | 139 | break; |
138 | case BI_ATARI_MCH_TYPE: | 140 | case BI_ATARI_MCH_TYPE: |
139 | atari_mch_type = *data; | 141 | atari_mch_type = be32_to_cpup(data); |
140 | break; | 142 | break; |
141 | default: | 143 | default: |
142 | unknown = 1; | 144 | unknown = 1; |
diff --git a/arch/m68k/atari/debug.c b/arch/m68k/atari/debug.c index a547ba9683d1..03cb5e08d7cf 100644 --- a/arch/m68k/atari/debug.c +++ b/arch/m68k/atari/debug.c | |||
@@ -287,6 +287,8 @@ static void __init atari_init_midi_port(int cflag) | |||
287 | 287 | ||
288 | static int __init atari_debug_setup(char *arg) | 288 | static int __init atari_debug_setup(char *arg) |
289 | { | 289 | { |
290 | bool registered; | ||
291 | |||
290 | if (!MACH_IS_ATARI) | 292 | if (!MACH_IS_ATARI) |
291 | return 0; | 293 | return 0; |
292 | 294 | ||
@@ -294,6 +296,7 @@ static int __init atari_debug_setup(char *arg) | |||
294 | /* defaults to ser2 for a Falcon and ser1 otherwise */ | 296 | /* defaults to ser2 for a Falcon and ser1 otherwise */ |
295 | arg = MACH_IS_FALCON ? "ser2" : "ser1"; | 297 | arg = MACH_IS_FALCON ? "ser2" : "ser1"; |
296 | 298 | ||
299 | registered = !!atari_console_driver.write; | ||
297 | if (!strcmp(arg, "ser1")) { | 300 | if (!strcmp(arg, "ser1")) { |
298 | /* ST-MFP Modem1 serial port */ | 301 | /* ST-MFP Modem1 serial port */ |
299 | atari_init_mfp_port(B9600|CS8); | 302 | atari_init_mfp_port(B9600|CS8); |
@@ -317,7 +320,7 @@ static int __init atari_debug_setup(char *arg) | |||
317 | sound_ym.wd_data = sound_ym.rd_data_reg_sel | 0x20; /* strobe H */ | 320 | sound_ym.wd_data = sound_ym.rd_data_reg_sel | 0x20; /* strobe H */ |
318 | atari_console_driver.write = atari_par_console_write; | 321 | atari_console_driver.write = atari_par_console_write; |
319 | } | 322 | } |
320 | if (atari_console_driver.write) | 323 | if (atari_console_driver.write && !registered) |
321 | register_console(&atari_console_driver); | 324 | register_console(&atari_console_driver); |
322 | 325 | ||
323 | return 0; | 326 | return 0; |
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c index 8943aa4c18e6..478623dbb209 100644 --- a/arch/m68k/bvme6000/config.c +++ b/arch/m68k/bvme6000/config.c | |||
@@ -28,6 +28,8 @@ | |||
28 | #include <linux/bcd.h> | 28 | #include <linux/bcd.h> |
29 | 29 | ||
30 | #include <asm/bootinfo.h> | 30 | #include <asm/bootinfo.h> |
31 | #include <asm/bootinfo-vme.h> | ||
32 | #include <asm/byteorder.h> | ||
31 | #include <asm/pgtable.h> | 33 | #include <asm/pgtable.h> |
32 | #include <asm/setup.h> | 34 | #include <asm/setup.h> |
33 | #include <asm/irq.h> | 35 | #include <asm/irq.h> |
@@ -50,9 +52,9 @@ void bvme6000_set_vectors (void); | |||
50 | static irq_handler_t tick_handler; | 52 | static irq_handler_t tick_handler; |
51 | 53 | ||
52 | 54 | ||
53 | int bvme6000_parse_bootinfo(const struct bi_record *bi) | 55 | int __init bvme6000_parse_bootinfo(const struct bi_record *bi) |
54 | { | 56 | { |
55 | if (bi->tag == BI_VME_TYPE) | 57 | if (be16_to_cpu(bi->tag) == BI_VME_TYPE) |
56 | return 0; | 58 | return 0; |
57 | else | 59 | else |
58 | return 1; | 60 | return 1; |
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 19325e117eea..559ff3af8ff7 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig | |||
@@ -52,7 +52,6 @@ CONFIG_IP_PNP_RARP=y | |||
52 | CONFIG_NET_IPIP=m | 52 | CONFIG_NET_IPIP=m |
53 | CONFIG_NET_IPGRE_DEMUX=m | 53 | CONFIG_NET_IPGRE_DEMUX=m |
54 | CONFIG_NET_IPGRE=m | 54 | CONFIG_NET_IPGRE=m |
55 | CONFIG_SYN_COOKIES=y | ||
56 | CONFIG_NET_IPVTI=m | 55 | CONFIG_NET_IPVTI=m |
57 | CONFIG_INET_AH=m | 56 | CONFIG_INET_AH=m |
58 | CONFIG_INET_ESP=m | 57 | CONFIG_INET_ESP=m |
@@ -63,11 +62,11 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
63 | # CONFIG_INET_LRO is not set | 62 | # CONFIG_INET_LRO is not set |
64 | CONFIG_INET_DIAG=m | 63 | CONFIG_INET_DIAG=m |
65 | CONFIG_INET_UDP_DIAG=m | 64 | CONFIG_INET_UDP_DIAG=m |
66 | CONFIG_IPV6_PRIVACY=y | ||
67 | CONFIG_IPV6_ROUTER_PREF=y | 65 | CONFIG_IPV6_ROUTER_PREF=y |
68 | CONFIG_INET6_AH=m | 66 | CONFIG_INET6_AH=m |
69 | CONFIG_INET6_ESP=m | 67 | CONFIG_INET6_ESP=m |
70 | CONFIG_INET6_IPCOMP=m | 68 | CONFIG_INET6_IPCOMP=m |
69 | CONFIG_IPV6_VTI=m | ||
71 | CONFIG_IPV6_GRE=m | 70 | CONFIG_IPV6_GRE=m |
72 | CONFIG_NETFILTER=y | 71 | CONFIG_NETFILTER=y |
73 | CONFIG_NF_CONNTRACK=m | 72 | CONFIG_NF_CONNTRACK=m |
@@ -85,6 +84,17 @@ CONFIG_NF_CONNTRACK_PPTP=m | |||
85 | CONFIG_NF_CONNTRACK_SANE=m | 84 | CONFIG_NF_CONNTRACK_SANE=m |
86 | CONFIG_NF_CONNTRACK_SIP=m | 85 | CONFIG_NF_CONNTRACK_SIP=m |
87 | CONFIG_NF_CONNTRACK_TFTP=m | 86 | CONFIG_NF_CONNTRACK_TFTP=m |
87 | CONFIG_NF_TABLES=m | ||
88 | CONFIG_NFT_EXTHDR=m | ||
89 | CONFIG_NFT_META=m | ||
90 | CONFIG_NFT_CT=m | ||
91 | CONFIG_NFT_RBTREE=m | ||
92 | CONFIG_NFT_HASH=m | ||
93 | CONFIG_NFT_COUNTER=m | ||
94 | CONFIG_NFT_LOG=m | ||
95 | CONFIG_NFT_LIMIT=m | ||
96 | CONFIG_NFT_NAT=m | ||
97 | CONFIG_NFT_COMPAT=m | ||
88 | CONFIG_NETFILTER_XT_SET=m | 98 | CONFIG_NETFILTER_XT_SET=m |
89 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m | 99 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m |
90 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m | 100 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m |
@@ -98,6 +108,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m | |||
98 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m | 108 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m |
99 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m | 109 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m |
100 | CONFIG_NETFILTER_XT_TARGET_TEE=m | 110 | CONFIG_NETFILTER_XT_TARGET_TEE=m |
111 | CONFIG_NETFILTER_XT_TARGET_TPROXY=m | ||
101 | CONFIG_NETFILTER_XT_TARGET_TRACE=m | 112 | CONFIG_NETFILTER_XT_TARGET_TRACE=m |
102 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m | 113 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m |
103 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m | 114 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m |
@@ -130,6 +141,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m | |||
130 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m | 141 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m |
131 | CONFIG_NETFILTER_XT_MATCH_REALM=m | 142 | CONFIG_NETFILTER_XT_MATCH_REALM=m |
132 | CONFIG_NETFILTER_XT_MATCH_RECENT=m | 143 | CONFIG_NETFILTER_XT_MATCH_RECENT=m |
144 | CONFIG_NETFILTER_XT_MATCH_SOCKET=m | ||
133 | CONFIG_NETFILTER_XT_MATCH_STATE=m | 145 | CONFIG_NETFILTER_XT_MATCH_STATE=m |
134 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m | 146 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m |
135 | CONFIG_NETFILTER_XT_MATCH_STRING=m | 147 | CONFIG_NETFILTER_XT_MATCH_STRING=m |
@@ -144,11 +156,18 @@ CONFIG_IP_SET_HASH_IP=m | |||
144 | CONFIG_IP_SET_HASH_IPPORT=m | 156 | CONFIG_IP_SET_HASH_IPPORT=m |
145 | CONFIG_IP_SET_HASH_IPPORTIP=m | 157 | CONFIG_IP_SET_HASH_IPPORTIP=m |
146 | CONFIG_IP_SET_HASH_IPPORTNET=m | 158 | CONFIG_IP_SET_HASH_IPPORTNET=m |
159 | CONFIG_IP_SET_HASH_NETPORTNET=m | ||
147 | CONFIG_IP_SET_HASH_NET=m | 160 | CONFIG_IP_SET_HASH_NET=m |
161 | CONFIG_IP_SET_HASH_NETNET=m | ||
148 | CONFIG_IP_SET_HASH_NETPORT=m | 162 | CONFIG_IP_SET_HASH_NETPORT=m |
149 | CONFIG_IP_SET_HASH_NETIFACE=m | 163 | CONFIG_IP_SET_HASH_NETIFACE=m |
150 | CONFIG_IP_SET_LIST_SET=m | 164 | CONFIG_IP_SET_LIST_SET=m |
151 | CONFIG_NF_CONNTRACK_IPV4=m | 165 | CONFIG_NF_CONNTRACK_IPV4=m |
166 | CONFIG_NF_TABLES_IPV4=m | ||
167 | CONFIG_NFT_REJECT_IPV4=m | ||
168 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | ||
169 | CONFIG_NFT_CHAIN_NAT_IPV4=m | ||
170 | CONFIG_NF_TABLES_ARP=m | ||
152 | CONFIG_IP_NF_IPTABLES=m | 171 | CONFIG_IP_NF_IPTABLES=m |
153 | CONFIG_IP_NF_MATCH_AH=m | 172 | CONFIG_IP_NF_MATCH_AH=m |
154 | CONFIG_IP_NF_MATCH_ECN=m | 173 | CONFIG_IP_NF_MATCH_ECN=m |
@@ -156,6 +175,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m | |||
156 | CONFIG_IP_NF_MATCH_TTL=m | 175 | CONFIG_IP_NF_MATCH_TTL=m |
157 | CONFIG_IP_NF_FILTER=m | 176 | CONFIG_IP_NF_FILTER=m |
158 | CONFIG_IP_NF_TARGET_REJECT=m | 177 | CONFIG_IP_NF_TARGET_REJECT=m |
178 | CONFIG_IP_NF_TARGET_SYNPROXY=m | ||
159 | CONFIG_IP_NF_TARGET_ULOG=m | 179 | CONFIG_IP_NF_TARGET_ULOG=m |
160 | CONFIG_NF_NAT_IPV4=m | 180 | CONFIG_NF_NAT_IPV4=m |
161 | CONFIG_IP_NF_TARGET_MASQUERADE=m | 181 | CONFIG_IP_NF_TARGET_MASQUERADE=m |
@@ -170,6 +190,9 @@ CONFIG_IP_NF_ARPTABLES=m | |||
170 | CONFIG_IP_NF_ARPFILTER=m | 190 | CONFIG_IP_NF_ARPFILTER=m |
171 | CONFIG_IP_NF_ARP_MANGLE=m | 191 | CONFIG_IP_NF_ARP_MANGLE=m |
172 | CONFIG_NF_CONNTRACK_IPV6=m | 192 | CONFIG_NF_CONNTRACK_IPV6=m |
193 | CONFIG_NF_TABLES_IPV6=m | ||
194 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | ||
195 | CONFIG_NFT_CHAIN_NAT_IPV6=m | ||
173 | CONFIG_IP6_NF_IPTABLES=m | 196 | CONFIG_IP6_NF_IPTABLES=m |
174 | CONFIG_IP6_NF_MATCH_AH=m | 197 | CONFIG_IP6_NF_MATCH_AH=m |
175 | CONFIG_IP6_NF_MATCH_EUI64=m | 198 | CONFIG_IP6_NF_MATCH_EUI64=m |
@@ -183,11 +206,13 @@ CONFIG_IP6_NF_MATCH_RT=m | |||
183 | CONFIG_IP6_NF_TARGET_HL=m | 206 | CONFIG_IP6_NF_TARGET_HL=m |
184 | CONFIG_IP6_NF_FILTER=m | 207 | CONFIG_IP6_NF_FILTER=m |
185 | CONFIG_IP6_NF_TARGET_REJECT=m | 208 | CONFIG_IP6_NF_TARGET_REJECT=m |
209 | CONFIG_IP6_NF_TARGET_SYNPROXY=m | ||
186 | CONFIG_IP6_NF_MANGLE=m | 210 | CONFIG_IP6_NF_MANGLE=m |
187 | CONFIG_IP6_NF_RAW=m | 211 | CONFIG_IP6_NF_RAW=m |
188 | CONFIG_NF_NAT_IPV6=m | 212 | CONFIG_NF_NAT_IPV6=m |
189 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | 213 | CONFIG_IP6_NF_TARGET_MASQUERADE=m |
190 | CONFIG_IP6_NF_TARGET_NPT=m | 214 | CONFIG_IP6_NF_TARGET_NPT=m |
215 | CONFIG_NF_TABLES_BRIDGE=m | ||
191 | CONFIG_IP_DCCP=m | 216 | CONFIG_IP_DCCP=m |
192 | # CONFIG_IP_DCCP_CCID3 is not set | 217 | # CONFIG_IP_DCCP_CCID3 is not set |
193 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y | 218 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y |
@@ -195,10 +220,13 @@ CONFIG_RDS=m | |||
195 | CONFIG_RDS_TCP=m | 220 | CONFIG_RDS_TCP=m |
196 | CONFIG_L2TP=m | 221 | CONFIG_L2TP=m |
197 | CONFIG_ATALK=m | 222 | CONFIG_ATALK=m |
223 | CONFIG_DNS_RESOLVER=y | ||
198 | CONFIG_BATMAN_ADV=m | 224 | CONFIG_BATMAN_ADV=m |
199 | CONFIG_BATMAN_ADV_DAT=y | 225 | CONFIG_BATMAN_ADV_DAT=y |
226 | CONFIG_BATMAN_ADV_NC=y | ||
227 | CONFIG_NETLINK_DIAG=m | ||
228 | CONFIG_NET_MPLS_GSO=m | ||
200 | # CONFIG_WIRELESS is not set | 229 | # CONFIG_WIRELESS is not set |
201 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
202 | CONFIG_DEVTMPFS=y | 230 | CONFIG_DEVTMPFS=y |
203 | # CONFIG_FIRMWARE_IN_KERNEL is not set | 231 | # CONFIG_FIRMWARE_IN_KERNEL is not set |
204 | # CONFIG_FW_LOADER_USER_HELPER is not set | 232 | # CONFIG_FW_LOADER_USER_HELPER is not set |
@@ -216,6 +244,7 @@ CONFIG_BLK_DEV_NBD=m | |||
216 | CONFIG_BLK_DEV_RAM=y | 244 | CONFIG_BLK_DEV_RAM=y |
217 | CONFIG_CDROM_PKTCDVD=m | 245 | CONFIG_CDROM_PKTCDVD=m |
218 | CONFIG_ATA_OVER_ETH=m | 246 | CONFIG_ATA_OVER_ETH=m |
247 | CONFIG_DUMMY_IRQ=m | ||
219 | CONFIG_IDE=y | 248 | CONFIG_IDE=y |
220 | CONFIG_IDE_GD_ATAPI=y | 249 | CONFIG_IDE_GD_ATAPI=y |
221 | CONFIG_BLK_DEV_IDECD=y | 250 | CONFIG_BLK_DEV_IDECD=y |
@@ -262,6 +291,7 @@ CONFIG_EQUALIZER=m | |||
262 | CONFIG_NET_TEAM=m | 291 | CONFIG_NET_TEAM=m |
263 | CONFIG_NET_TEAM_MODE_BROADCAST=m | 292 | CONFIG_NET_TEAM_MODE_BROADCAST=m |
264 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m | 293 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m |
294 | CONFIG_NET_TEAM_MODE_RANDOM=m | ||
265 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m | 295 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m |
266 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m | 296 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m |
267 | CONFIG_VXLAN=m | 297 | CONFIG_VXLAN=m |
@@ -271,10 +301,10 @@ CONFIG_VETH=m | |||
271 | # CONFIG_NET_VENDOR_3COM is not set | 301 | # CONFIG_NET_VENDOR_3COM is not set |
272 | CONFIG_A2065=y | 302 | CONFIG_A2065=y |
273 | CONFIG_ARIADNE=y | 303 | CONFIG_ARIADNE=y |
304 | # CONFIG_NET_VENDOR_ARC is not set | ||
274 | # CONFIG_NET_CADENCE is not set | 305 | # CONFIG_NET_CADENCE is not set |
275 | # CONFIG_NET_VENDOR_BROADCOM is not set | 306 | # CONFIG_NET_VENDOR_BROADCOM is not set |
276 | # CONFIG_NET_VENDOR_CIRRUS is not set | 307 | # CONFIG_NET_VENDOR_CIRRUS is not set |
277 | # CONFIG_NET_VENDOR_FUJITSU is not set | ||
278 | # CONFIG_NET_VENDOR_HP is not set | 308 | # CONFIG_NET_VENDOR_HP is not set |
279 | # CONFIG_NET_VENDOR_INTEL is not set | 309 | # CONFIG_NET_VENDOR_INTEL is not set |
280 | # CONFIG_NET_VENDOR_MARVELL is not set | 310 | # CONFIG_NET_VENDOR_MARVELL is not set |
@@ -285,6 +315,7 @@ CONFIG_ZORRO8390=y | |||
285 | # CONFIG_NET_VENDOR_SEEQ is not set | 315 | # CONFIG_NET_VENDOR_SEEQ is not set |
286 | # CONFIG_NET_VENDOR_SMSC is not set | 316 | # CONFIG_NET_VENDOR_SMSC is not set |
287 | # CONFIG_NET_VENDOR_STMICRO is not set | 317 | # CONFIG_NET_VENDOR_STMICRO is not set |
318 | # CONFIG_NET_VENDOR_VIA is not set | ||
288 | # CONFIG_NET_VENDOR_WIZNET is not set | 319 | # CONFIG_NET_VENDOR_WIZNET is not set |
289 | CONFIG_PPP=m | 320 | CONFIG_PPP=m |
290 | CONFIG_PPP_BSDCOMP=m | 321 | CONFIG_PPP_BSDCOMP=m |
@@ -311,7 +342,6 @@ CONFIG_JOYSTICK_AMIGA=m | |||
311 | CONFIG_INPUT_MISC=y | 342 | CONFIG_INPUT_MISC=y |
312 | CONFIG_INPUT_M68K_BEEP=m | 343 | CONFIG_INPUT_M68K_BEEP=m |
313 | # CONFIG_SERIO is not set | 344 | # CONFIG_SERIO is not set |
314 | CONFIG_VT_HW_CONSOLE_BINDING=y | ||
315 | # CONFIG_LEGACY_PTYS is not set | 345 | # CONFIG_LEGACY_PTYS is not set |
316 | # CONFIG_DEVKMEM is not set | 346 | # CONFIG_DEVKMEM is not set |
317 | CONFIG_PRINTER=m | 347 | CONFIG_PRINTER=m |
@@ -345,10 +375,6 @@ CONFIG_HEARTBEAT=y | |||
345 | CONFIG_PROC_HARDWARE=y | 375 | CONFIG_PROC_HARDWARE=y |
346 | CONFIG_AMIGA_BUILTIN_SERIAL=y | 376 | CONFIG_AMIGA_BUILTIN_SERIAL=y |
347 | CONFIG_SERIAL_CONSOLE=y | 377 | CONFIG_SERIAL_CONSOLE=y |
348 | CONFIG_EXT2_FS=y | ||
349 | CONFIG_EXT3_FS=y | ||
350 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | ||
351 | # CONFIG_EXT3_FS_XATTR is not set | ||
352 | CONFIG_EXT4_FS=y | 378 | CONFIG_EXT4_FS=y |
353 | CONFIG_REISERFS_FS=m | 379 | CONFIG_REISERFS_FS=m |
354 | CONFIG_JFS_FS=m | 380 | CONFIG_JFS_FS=m |
@@ -385,7 +411,7 @@ CONFIG_QNX6FS_FS=m | |||
385 | CONFIG_SYSV_FS=m | 411 | CONFIG_SYSV_FS=m |
386 | CONFIG_UFS_FS=m | 412 | CONFIG_UFS_FS=m |
387 | CONFIG_NFS_FS=y | 413 | CONFIG_NFS_FS=y |
388 | CONFIG_NFS_V4=y | 414 | CONFIG_NFS_V4=m |
389 | CONFIG_NFS_SWAP=y | 415 | CONFIG_NFS_SWAP=y |
390 | CONFIG_ROOT_NFS=y | 416 | CONFIG_ROOT_NFS=y |
391 | CONFIG_NFSD=m | 417 | CONFIG_NFSD=m |
@@ -444,10 +470,10 @@ CONFIG_NLS_MAC_TURKISH=m | |||
444 | CONFIG_DLM=m | 470 | CONFIG_DLM=m |
445 | CONFIG_MAGIC_SYSRQ=y | 471 | CONFIG_MAGIC_SYSRQ=y |
446 | CONFIG_ASYNC_RAID6_TEST=m | 472 | CONFIG_ASYNC_RAID6_TEST=m |
473 | CONFIG_TEST_STRING_HELPERS=m | ||
447 | CONFIG_ENCRYPTED_KEYS=m | 474 | CONFIG_ENCRYPTED_KEYS=m |
448 | CONFIG_CRYPTO_MANAGER=y | 475 | CONFIG_CRYPTO_MANAGER=y |
449 | CONFIG_CRYPTO_USER=m | 476 | CONFIG_CRYPTO_USER=m |
450 | CONFIG_CRYPTO_NULL=m | ||
451 | CONFIG_CRYPTO_CRYPTD=m | 477 | CONFIG_CRYPTO_CRYPTD=m |
452 | CONFIG_CRYPTO_TEST=m | 478 | CONFIG_CRYPTO_TEST=m |
453 | CONFIG_CRYPTO_CCM=m | 479 | CONFIG_CRYPTO_CCM=m |
@@ -480,6 +506,8 @@ CONFIG_CRYPTO_TEA=m | |||
480 | CONFIG_CRYPTO_TWOFISH=m | 506 | CONFIG_CRYPTO_TWOFISH=m |
481 | CONFIG_CRYPTO_ZLIB=m | 507 | CONFIG_CRYPTO_ZLIB=m |
482 | CONFIG_CRYPTO_LZO=m | 508 | CONFIG_CRYPTO_LZO=m |
509 | CONFIG_CRYPTO_LZ4=m | ||
510 | CONFIG_CRYPTO_LZ4HC=m | ||
483 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 511 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
484 | CONFIG_CRYPTO_USER_API_HASH=m | 512 | CONFIG_CRYPTO_USER_API_HASH=m |
485 | CONFIG_CRYPTO_USER_API_SKCIPHER=m | 513 | CONFIG_CRYPTO_USER_API_SKCIPHER=m |
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 14dc6ccda7f4..cb1f55df69b6 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig | |||
@@ -50,7 +50,6 @@ CONFIG_IP_PNP_RARP=y | |||
50 | CONFIG_NET_IPIP=m | 50 | CONFIG_NET_IPIP=m |
51 | CONFIG_NET_IPGRE_DEMUX=m | 51 | CONFIG_NET_IPGRE_DEMUX=m |
52 | CONFIG_NET_IPGRE=m | 52 | CONFIG_NET_IPGRE=m |
53 | CONFIG_SYN_COOKIES=y | ||
54 | CONFIG_NET_IPVTI=m | 53 | CONFIG_NET_IPVTI=m |
55 | CONFIG_INET_AH=m | 54 | CONFIG_INET_AH=m |
56 | CONFIG_INET_ESP=m | 55 | CONFIG_INET_ESP=m |
@@ -61,11 +60,11 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
61 | # CONFIG_INET_LRO is not set | 60 | # CONFIG_INET_LRO is not set |
62 | CONFIG_INET_DIAG=m | 61 | CONFIG_INET_DIAG=m |
63 | CONFIG_INET_UDP_DIAG=m | 62 | CONFIG_INET_UDP_DIAG=m |
64 | CONFIG_IPV6_PRIVACY=y | ||
65 | CONFIG_IPV6_ROUTER_PREF=y | 63 | CONFIG_IPV6_ROUTER_PREF=y |
66 | CONFIG_INET6_AH=m | 64 | CONFIG_INET6_AH=m |
67 | CONFIG_INET6_ESP=m | 65 | CONFIG_INET6_ESP=m |
68 | CONFIG_INET6_IPCOMP=m | 66 | CONFIG_INET6_IPCOMP=m |
67 | CONFIG_IPV6_VTI=m | ||
69 | CONFIG_IPV6_GRE=m | 68 | CONFIG_IPV6_GRE=m |
70 | CONFIG_NETFILTER=y | 69 | CONFIG_NETFILTER=y |
71 | CONFIG_NF_CONNTRACK=m | 70 | CONFIG_NF_CONNTRACK=m |
@@ -83,6 +82,17 @@ CONFIG_NF_CONNTRACK_PPTP=m | |||
83 | CONFIG_NF_CONNTRACK_SANE=m | 82 | CONFIG_NF_CONNTRACK_SANE=m |
84 | CONFIG_NF_CONNTRACK_SIP=m | 83 | CONFIG_NF_CONNTRACK_SIP=m |
85 | CONFIG_NF_CONNTRACK_TFTP=m | 84 | CONFIG_NF_CONNTRACK_TFTP=m |
85 | CONFIG_NF_TABLES=m | ||
86 | CONFIG_NFT_EXTHDR=m | ||
87 | CONFIG_NFT_META=m | ||
88 | CONFIG_NFT_CT=m | ||
89 | CONFIG_NFT_RBTREE=m | ||
90 | CONFIG_NFT_HASH=m | ||
91 | CONFIG_NFT_COUNTER=m | ||
92 | CONFIG_NFT_LOG=m | ||
93 | CONFIG_NFT_LIMIT=m | ||
94 | CONFIG_NFT_NAT=m | ||
95 | CONFIG_NFT_COMPAT=m | ||
86 | CONFIG_NETFILTER_XT_SET=m | 96 | CONFIG_NETFILTER_XT_SET=m |
87 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m | 97 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m |
88 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m | 98 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m |
@@ -96,6 +106,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m | |||
96 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m | 106 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m |
97 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m | 107 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m |
98 | CONFIG_NETFILTER_XT_TARGET_TEE=m | 108 | CONFIG_NETFILTER_XT_TARGET_TEE=m |
109 | CONFIG_NETFILTER_XT_TARGET_TPROXY=m | ||
99 | CONFIG_NETFILTER_XT_TARGET_TRACE=m | 110 | CONFIG_NETFILTER_XT_TARGET_TRACE=m |
100 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m | 111 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m |
101 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m | 112 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m |
@@ -128,6 +139,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m | |||
128 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m | 139 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m |
129 | CONFIG_NETFILTER_XT_MATCH_REALM=m | 140 | CONFIG_NETFILTER_XT_MATCH_REALM=m |
130 | CONFIG_NETFILTER_XT_MATCH_RECENT=m | 141 | CONFIG_NETFILTER_XT_MATCH_RECENT=m |
142 | CONFIG_NETFILTER_XT_MATCH_SOCKET=m | ||
131 | CONFIG_NETFILTER_XT_MATCH_STATE=m | 143 | CONFIG_NETFILTER_XT_MATCH_STATE=m |
132 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m | 144 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m |
133 | CONFIG_NETFILTER_XT_MATCH_STRING=m | 145 | CONFIG_NETFILTER_XT_MATCH_STRING=m |
@@ -142,11 +154,18 @@ CONFIG_IP_SET_HASH_IP=m | |||
142 | CONFIG_IP_SET_HASH_IPPORT=m | 154 | CONFIG_IP_SET_HASH_IPPORT=m |
143 | CONFIG_IP_SET_HASH_IPPORTIP=m | 155 | CONFIG_IP_SET_HASH_IPPORTIP=m |
144 | CONFIG_IP_SET_HASH_IPPORTNET=m | 156 | CONFIG_IP_SET_HASH_IPPORTNET=m |
157 | CONFIG_IP_SET_HASH_NETPORTNET=m | ||
145 | CONFIG_IP_SET_HASH_NET=m | 158 | CONFIG_IP_SET_HASH_NET=m |
159 | CONFIG_IP_SET_HASH_NETNET=m | ||
146 | CONFIG_IP_SET_HASH_NETPORT=m | 160 | CONFIG_IP_SET_HASH_NETPORT=m |
147 | CONFIG_IP_SET_HASH_NETIFACE=m | 161 | CONFIG_IP_SET_HASH_NETIFACE=m |
148 | CONFIG_IP_SET_LIST_SET=m | 162 | CONFIG_IP_SET_LIST_SET=m |
149 | CONFIG_NF_CONNTRACK_IPV4=m | 163 | CONFIG_NF_CONNTRACK_IPV4=m |
164 | CONFIG_NF_TABLES_IPV4=m | ||
165 | CONFIG_NFT_REJECT_IPV4=m | ||
166 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | ||
167 | CONFIG_NFT_CHAIN_NAT_IPV4=m | ||
168 | CONFIG_NF_TABLES_ARP=m | ||
150 | CONFIG_IP_NF_IPTABLES=m | 169 | CONFIG_IP_NF_IPTABLES=m |
151 | CONFIG_IP_NF_MATCH_AH=m | 170 | CONFIG_IP_NF_MATCH_AH=m |
152 | CONFIG_IP_NF_MATCH_ECN=m | 171 | CONFIG_IP_NF_MATCH_ECN=m |
@@ -154,6 +173,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m | |||
154 | CONFIG_IP_NF_MATCH_TTL=m | 173 | CONFIG_IP_NF_MATCH_TTL=m |
155 | CONFIG_IP_NF_FILTER=m | 174 | CONFIG_IP_NF_FILTER=m |
156 | CONFIG_IP_NF_TARGET_REJECT=m | 175 | CONFIG_IP_NF_TARGET_REJECT=m |
176 | CONFIG_IP_NF_TARGET_SYNPROXY=m | ||
157 | CONFIG_IP_NF_TARGET_ULOG=m | 177 | CONFIG_IP_NF_TARGET_ULOG=m |
158 | CONFIG_NF_NAT_IPV4=m | 178 | CONFIG_NF_NAT_IPV4=m |
159 | CONFIG_IP_NF_TARGET_MASQUERADE=m | 179 | CONFIG_IP_NF_TARGET_MASQUERADE=m |
@@ -168,6 +188,9 @@ CONFIG_IP_NF_ARPTABLES=m | |||
168 | CONFIG_IP_NF_ARPFILTER=m | 188 | CONFIG_IP_NF_ARPFILTER=m |
169 | CONFIG_IP_NF_ARP_MANGLE=m | 189 | CONFIG_IP_NF_ARP_MANGLE=m |
170 | CONFIG_NF_CONNTRACK_IPV6=m | 190 | CONFIG_NF_CONNTRACK_IPV6=m |
191 | CONFIG_NF_TABLES_IPV6=m | ||
192 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | ||
193 | CONFIG_NFT_CHAIN_NAT_IPV6=m | ||
171 | CONFIG_IP6_NF_IPTABLES=m | 194 | CONFIG_IP6_NF_IPTABLES=m |
172 | CONFIG_IP6_NF_MATCH_AH=m | 195 | CONFIG_IP6_NF_MATCH_AH=m |
173 | CONFIG_IP6_NF_MATCH_EUI64=m | 196 | CONFIG_IP6_NF_MATCH_EUI64=m |
@@ -181,11 +204,13 @@ CONFIG_IP6_NF_MATCH_RT=m | |||
181 | CONFIG_IP6_NF_TARGET_HL=m | 204 | CONFIG_IP6_NF_TARGET_HL=m |
182 | CONFIG_IP6_NF_FILTER=m | 205 | CONFIG_IP6_NF_FILTER=m |
183 | CONFIG_IP6_NF_TARGET_REJECT=m | 206 | CONFIG_IP6_NF_TARGET_REJECT=m |
207 | CONFIG_IP6_NF_TARGET_SYNPROXY=m | ||
184 | CONFIG_IP6_NF_MANGLE=m | 208 | CONFIG_IP6_NF_MANGLE=m |
185 | CONFIG_IP6_NF_RAW=m | 209 | CONFIG_IP6_NF_RAW=m |
186 | CONFIG_NF_NAT_IPV6=m | 210 | CONFIG_NF_NAT_IPV6=m |
187 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | 211 | CONFIG_IP6_NF_TARGET_MASQUERADE=m |
188 | CONFIG_IP6_NF_TARGET_NPT=m | 212 | CONFIG_IP6_NF_TARGET_NPT=m |
213 | CONFIG_NF_TABLES_BRIDGE=m | ||
189 | CONFIG_IP_DCCP=m | 214 | CONFIG_IP_DCCP=m |
190 | # CONFIG_IP_DCCP_CCID3 is not set | 215 | # CONFIG_IP_DCCP_CCID3 is not set |
191 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y | 216 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y |
@@ -193,10 +218,13 @@ CONFIG_RDS=m | |||
193 | CONFIG_RDS_TCP=m | 218 | CONFIG_RDS_TCP=m |
194 | CONFIG_L2TP=m | 219 | CONFIG_L2TP=m |
195 | CONFIG_ATALK=m | 220 | CONFIG_ATALK=m |
221 | CONFIG_DNS_RESOLVER=y | ||
196 | CONFIG_BATMAN_ADV=m | 222 | CONFIG_BATMAN_ADV=m |
197 | CONFIG_BATMAN_ADV_DAT=y | 223 | CONFIG_BATMAN_ADV_DAT=y |
224 | CONFIG_BATMAN_ADV_NC=y | ||
225 | CONFIG_NETLINK_DIAG=m | ||
226 | CONFIG_NET_MPLS_GSO=m | ||
198 | # CONFIG_WIRELESS is not set | 227 | # CONFIG_WIRELESS is not set |
199 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
200 | CONFIG_DEVTMPFS=y | 228 | CONFIG_DEVTMPFS=y |
201 | # CONFIG_FIRMWARE_IN_KERNEL is not set | 229 | # CONFIG_FIRMWARE_IN_KERNEL is not set |
202 | # CONFIG_FW_LOADER_USER_HELPER is not set | 230 | # CONFIG_FW_LOADER_USER_HELPER is not set |
@@ -208,6 +236,7 @@ CONFIG_BLK_DEV_NBD=m | |||
208 | CONFIG_BLK_DEV_RAM=y | 236 | CONFIG_BLK_DEV_RAM=y |
209 | CONFIG_CDROM_PKTCDVD=m | 237 | CONFIG_CDROM_PKTCDVD=m |
210 | CONFIG_ATA_OVER_ETH=m | 238 | CONFIG_ATA_OVER_ETH=m |
239 | CONFIG_DUMMY_IRQ=m | ||
211 | CONFIG_RAID_ATTRS=m | 240 | CONFIG_RAID_ATTRS=m |
212 | CONFIG_SCSI=y | 241 | CONFIG_SCSI=y |
213 | CONFIG_SCSI_TGT=m | 242 | CONFIG_SCSI_TGT=m |
@@ -244,12 +273,14 @@ CONFIG_EQUALIZER=m | |||
244 | CONFIG_NET_TEAM=m | 273 | CONFIG_NET_TEAM=m |
245 | CONFIG_NET_TEAM_MODE_BROADCAST=m | 274 | CONFIG_NET_TEAM_MODE_BROADCAST=m |
246 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m | 275 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m |
276 | CONFIG_NET_TEAM_MODE_RANDOM=m | ||
247 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m | 277 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m |
248 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m | 278 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m |
249 | CONFIG_VXLAN=m | 279 | CONFIG_VXLAN=m |
250 | CONFIG_NETCONSOLE=m | 280 | CONFIG_NETCONSOLE=m |
251 | CONFIG_NETCONSOLE_DYNAMIC=y | 281 | CONFIG_NETCONSOLE_DYNAMIC=y |
252 | CONFIG_VETH=m | 282 | CONFIG_VETH=m |
283 | # CONFIG_NET_VENDOR_ARC is not set | ||
253 | # CONFIG_NET_CADENCE is not set | 284 | # CONFIG_NET_CADENCE is not set |
254 | # CONFIG_NET_VENDOR_BROADCOM is not set | 285 | # CONFIG_NET_VENDOR_BROADCOM is not set |
255 | # CONFIG_NET_VENDOR_INTEL is not set | 286 | # CONFIG_NET_VENDOR_INTEL is not set |
@@ -258,6 +289,7 @@ CONFIG_VETH=m | |||
258 | # CONFIG_NET_VENDOR_NATSEMI is not set | 289 | # CONFIG_NET_VENDOR_NATSEMI is not set |
259 | # CONFIG_NET_VENDOR_SEEQ is not set | 290 | # CONFIG_NET_VENDOR_SEEQ is not set |
260 | # CONFIG_NET_VENDOR_STMICRO is not set | 291 | # CONFIG_NET_VENDOR_STMICRO is not set |
292 | # CONFIG_NET_VENDOR_VIA is not set | ||
261 | # CONFIG_NET_VENDOR_WIZNET is not set | 293 | # CONFIG_NET_VENDOR_WIZNET is not set |
262 | CONFIG_PPP=m | 294 | CONFIG_PPP=m |
263 | CONFIG_PPP_BSDCOMP=m | 295 | CONFIG_PPP_BSDCOMP=m |
@@ -279,7 +311,6 @@ CONFIG_INPUT_EVDEV=m | |||
279 | # CONFIG_MOUSE_PS2 is not set | 311 | # CONFIG_MOUSE_PS2 is not set |
280 | CONFIG_MOUSE_SERIAL=m | 312 | CONFIG_MOUSE_SERIAL=m |
281 | CONFIG_SERIO=m | 313 | CONFIG_SERIO=m |
282 | CONFIG_VT_HW_CONSOLE_BINDING=y | ||
283 | # CONFIG_LEGACY_PTYS is not set | 314 | # CONFIG_LEGACY_PTYS is not set |
284 | # CONFIG_DEVKMEM is not set | 315 | # CONFIG_DEVKMEM is not set |
285 | # CONFIG_HW_RANDOM is not set | 316 | # CONFIG_HW_RANDOM is not set |
@@ -302,10 +333,6 @@ CONFIG_RTC_DRV_GENERIC=m | |||
302 | # CONFIG_IOMMU_SUPPORT is not set | 333 | # CONFIG_IOMMU_SUPPORT is not set |
303 | CONFIG_HEARTBEAT=y | 334 | CONFIG_HEARTBEAT=y |
304 | CONFIG_PROC_HARDWARE=y | 335 | CONFIG_PROC_HARDWARE=y |
305 | CONFIG_EXT2_FS=y | ||
306 | CONFIG_EXT3_FS=y | ||
307 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | ||
308 | # CONFIG_EXT3_FS_XATTR is not set | ||
309 | CONFIG_EXT4_FS=y | 336 | CONFIG_EXT4_FS=y |
310 | CONFIG_REISERFS_FS=m | 337 | CONFIG_REISERFS_FS=m |
311 | CONFIG_JFS_FS=m | 338 | CONFIG_JFS_FS=m |
@@ -342,7 +369,7 @@ CONFIG_QNX6FS_FS=m | |||
342 | CONFIG_SYSV_FS=m | 369 | CONFIG_SYSV_FS=m |
343 | CONFIG_UFS_FS=m | 370 | CONFIG_UFS_FS=m |
344 | CONFIG_NFS_FS=y | 371 | CONFIG_NFS_FS=y |
345 | CONFIG_NFS_V4=y | 372 | CONFIG_NFS_V4=m |
346 | CONFIG_NFS_SWAP=y | 373 | CONFIG_NFS_SWAP=y |
347 | CONFIG_ROOT_NFS=y | 374 | CONFIG_ROOT_NFS=y |
348 | CONFIG_NFSD=m | 375 | CONFIG_NFSD=m |
@@ -401,10 +428,10 @@ CONFIG_NLS_MAC_TURKISH=m | |||
401 | CONFIG_DLM=m | 428 | CONFIG_DLM=m |
402 | CONFIG_MAGIC_SYSRQ=y | 429 | CONFIG_MAGIC_SYSRQ=y |
403 | CONFIG_ASYNC_RAID6_TEST=m | 430 | CONFIG_ASYNC_RAID6_TEST=m |
431 | CONFIG_TEST_STRING_HELPERS=m | ||
404 | CONFIG_ENCRYPTED_KEYS=m | 432 | CONFIG_ENCRYPTED_KEYS=m |
405 | CONFIG_CRYPTO_MANAGER=y | 433 | CONFIG_CRYPTO_MANAGER=y |
406 | CONFIG_CRYPTO_USER=m | 434 | CONFIG_CRYPTO_USER=m |
407 | CONFIG_CRYPTO_NULL=m | ||
408 | CONFIG_CRYPTO_CRYPTD=m | 435 | CONFIG_CRYPTO_CRYPTD=m |
409 | CONFIG_CRYPTO_TEST=m | 436 | CONFIG_CRYPTO_TEST=m |
410 | CONFIG_CRYPTO_CCM=m | 437 | CONFIG_CRYPTO_CCM=m |
@@ -437,6 +464,8 @@ CONFIG_CRYPTO_TEA=m | |||
437 | CONFIG_CRYPTO_TWOFISH=m | 464 | CONFIG_CRYPTO_TWOFISH=m |
438 | CONFIG_CRYPTO_ZLIB=m | 465 | CONFIG_CRYPTO_ZLIB=m |
439 | CONFIG_CRYPTO_LZO=m | 466 | CONFIG_CRYPTO_LZO=m |
467 | CONFIG_CRYPTO_LZ4=m | ||
468 | CONFIG_CRYPTO_LZ4HC=m | ||
440 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 469 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
441 | CONFIG_CRYPTO_USER_API_HASH=m | 470 | CONFIG_CRYPTO_USER_API_HASH=m |
442 | CONFIG_CRYPTO_USER_API_SKCIPHER=m | 471 | CONFIG_CRYPTO_USER_API_SKCIPHER=m |
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index 6d5370c914b2..e880cfbb62d9 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig | |||
@@ -49,7 +49,6 @@ CONFIG_IP_PNP_RARP=y | |||
49 | CONFIG_NET_IPIP=m | 49 | CONFIG_NET_IPIP=m |
50 | CONFIG_NET_IPGRE_DEMUX=m | 50 | CONFIG_NET_IPGRE_DEMUX=m |
51 | CONFIG_NET_IPGRE=m | 51 | CONFIG_NET_IPGRE=m |
52 | CONFIG_SYN_COOKIES=y | ||
53 | CONFIG_NET_IPVTI=m | 52 | CONFIG_NET_IPVTI=m |
54 | CONFIG_INET_AH=m | 53 | CONFIG_INET_AH=m |
55 | CONFIG_INET_ESP=m | 54 | CONFIG_INET_ESP=m |
@@ -60,11 +59,11 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
60 | # CONFIG_INET_LRO is not set | 59 | # CONFIG_INET_LRO is not set |
61 | CONFIG_INET_DIAG=m | 60 | CONFIG_INET_DIAG=m |
62 | CONFIG_INET_UDP_DIAG=m | 61 | CONFIG_INET_UDP_DIAG=m |
63 | CONFIG_IPV6_PRIVACY=y | ||
64 | CONFIG_IPV6_ROUTER_PREF=y | 62 | CONFIG_IPV6_ROUTER_PREF=y |
65 | CONFIG_INET6_AH=m | 63 | CONFIG_INET6_AH=m |
66 | CONFIG_INET6_ESP=m | 64 | CONFIG_INET6_ESP=m |
67 | CONFIG_INET6_IPCOMP=m | 65 | CONFIG_INET6_IPCOMP=m |
66 | CONFIG_IPV6_VTI=m | ||
68 | CONFIG_IPV6_GRE=m | 67 | CONFIG_IPV6_GRE=m |
69 | CONFIG_NETFILTER=y | 68 | CONFIG_NETFILTER=y |
70 | CONFIG_NF_CONNTRACK=m | 69 | CONFIG_NF_CONNTRACK=m |
@@ -82,6 +81,17 @@ CONFIG_NF_CONNTRACK_PPTP=m | |||
82 | CONFIG_NF_CONNTRACK_SANE=m | 81 | CONFIG_NF_CONNTRACK_SANE=m |
83 | CONFIG_NF_CONNTRACK_SIP=m | 82 | CONFIG_NF_CONNTRACK_SIP=m |
84 | CONFIG_NF_CONNTRACK_TFTP=m | 83 | CONFIG_NF_CONNTRACK_TFTP=m |
84 | CONFIG_NF_TABLES=m | ||
85 | CONFIG_NFT_EXTHDR=m | ||
86 | CONFIG_NFT_META=m | ||
87 | CONFIG_NFT_CT=m | ||
88 | CONFIG_NFT_RBTREE=m | ||
89 | CONFIG_NFT_HASH=m | ||
90 | CONFIG_NFT_COUNTER=m | ||
91 | CONFIG_NFT_LOG=m | ||
92 | CONFIG_NFT_LIMIT=m | ||
93 | CONFIG_NFT_NAT=m | ||
94 | CONFIG_NFT_COMPAT=m | ||
85 | CONFIG_NETFILTER_XT_SET=m | 95 | CONFIG_NETFILTER_XT_SET=m |
86 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m | 96 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m |
87 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m | 97 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m |
@@ -95,6 +105,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m | |||
95 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m | 105 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m |
96 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m | 106 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m |
97 | CONFIG_NETFILTER_XT_TARGET_TEE=m | 107 | CONFIG_NETFILTER_XT_TARGET_TEE=m |
108 | CONFIG_NETFILTER_XT_TARGET_TPROXY=m | ||
98 | CONFIG_NETFILTER_XT_TARGET_TRACE=m | 109 | CONFIG_NETFILTER_XT_TARGET_TRACE=m |
99 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m | 110 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m |
100 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m | 111 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m |
@@ -127,6 +138,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m | |||
127 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m | 138 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m |
128 | CONFIG_NETFILTER_XT_MATCH_REALM=m | 139 | CONFIG_NETFILTER_XT_MATCH_REALM=m |
129 | CONFIG_NETFILTER_XT_MATCH_RECENT=m | 140 | CONFIG_NETFILTER_XT_MATCH_RECENT=m |
141 | CONFIG_NETFILTER_XT_MATCH_SOCKET=m | ||
130 | CONFIG_NETFILTER_XT_MATCH_STATE=m | 142 | CONFIG_NETFILTER_XT_MATCH_STATE=m |
131 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m | 143 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m |
132 | CONFIG_NETFILTER_XT_MATCH_STRING=m | 144 | CONFIG_NETFILTER_XT_MATCH_STRING=m |
@@ -141,11 +153,18 @@ CONFIG_IP_SET_HASH_IP=m | |||
141 | CONFIG_IP_SET_HASH_IPPORT=m | 153 | CONFIG_IP_SET_HASH_IPPORT=m |
142 | CONFIG_IP_SET_HASH_IPPORTIP=m | 154 | CONFIG_IP_SET_HASH_IPPORTIP=m |
143 | CONFIG_IP_SET_HASH_IPPORTNET=m | 155 | CONFIG_IP_SET_HASH_IPPORTNET=m |
156 | CONFIG_IP_SET_HASH_NETPORTNET=m | ||
144 | CONFIG_IP_SET_HASH_NET=m | 157 | CONFIG_IP_SET_HASH_NET=m |
158 | CONFIG_IP_SET_HASH_NETNET=m | ||
145 | CONFIG_IP_SET_HASH_NETPORT=m | 159 | CONFIG_IP_SET_HASH_NETPORT=m |
146 | CONFIG_IP_SET_HASH_NETIFACE=m | 160 | CONFIG_IP_SET_HASH_NETIFACE=m |
147 | CONFIG_IP_SET_LIST_SET=m | 161 | CONFIG_IP_SET_LIST_SET=m |
148 | CONFIG_NF_CONNTRACK_IPV4=m | 162 | CONFIG_NF_CONNTRACK_IPV4=m |
163 | CONFIG_NF_TABLES_IPV4=m | ||
164 | CONFIG_NFT_REJECT_IPV4=m | ||
165 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | ||
166 | CONFIG_NFT_CHAIN_NAT_IPV4=m | ||
167 | CONFIG_NF_TABLES_ARP=m | ||
149 | CONFIG_IP_NF_IPTABLES=m | 168 | CONFIG_IP_NF_IPTABLES=m |
150 | CONFIG_IP_NF_MATCH_AH=m | 169 | CONFIG_IP_NF_MATCH_AH=m |
151 | CONFIG_IP_NF_MATCH_ECN=m | 170 | CONFIG_IP_NF_MATCH_ECN=m |
@@ -153,6 +172,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m | |||
153 | CONFIG_IP_NF_MATCH_TTL=m | 172 | CONFIG_IP_NF_MATCH_TTL=m |
154 | CONFIG_IP_NF_FILTER=m | 173 | CONFIG_IP_NF_FILTER=m |
155 | CONFIG_IP_NF_TARGET_REJECT=m | 174 | CONFIG_IP_NF_TARGET_REJECT=m |
175 | CONFIG_IP_NF_TARGET_SYNPROXY=m | ||
156 | CONFIG_IP_NF_TARGET_ULOG=m | 176 | CONFIG_IP_NF_TARGET_ULOG=m |
157 | CONFIG_NF_NAT_IPV4=m | 177 | CONFIG_NF_NAT_IPV4=m |
158 | CONFIG_IP_NF_TARGET_MASQUERADE=m | 178 | CONFIG_IP_NF_TARGET_MASQUERADE=m |
@@ -167,6 +187,9 @@ CONFIG_IP_NF_ARPTABLES=m | |||
167 | CONFIG_IP_NF_ARPFILTER=m | 187 | CONFIG_IP_NF_ARPFILTER=m |
168 | CONFIG_IP_NF_ARP_MANGLE=m | 188 | CONFIG_IP_NF_ARP_MANGLE=m |
169 | CONFIG_NF_CONNTRACK_IPV6=m | 189 | CONFIG_NF_CONNTRACK_IPV6=m |
190 | CONFIG_NF_TABLES_IPV6=m | ||
191 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | ||
192 | CONFIG_NFT_CHAIN_NAT_IPV6=m | ||
170 | CONFIG_IP6_NF_IPTABLES=m | 193 | CONFIG_IP6_NF_IPTABLES=m |
171 | CONFIG_IP6_NF_MATCH_AH=m | 194 | CONFIG_IP6_NF_MATCH_AH=m |
172 | CONFIG_IP6_NF_MATCH_EUI64=m | 195 | CONFIG_IP6_NF_MATCH_EUI64=m |
@@ -180,11 +203,13 @@ CONFIG_IP6_NF_MATCH_RT=m | |||
180 | CONFIG_IP6_NF_TARGET_HL=m | 203 | CONFIG_IP6_NF_TARGET_HL=m |
181 | CONFIG_IP6_NF_FILTER=m | 204 | CONFIG_IP6_NF_FILTER=m |
182 | CONFIG_IP6_NF_TARGET_REJECT=m | 205 | CONFIG_IP6_NF_TARGET_REJECT=m |
206 | CONFIG_IP6_NF_TARGET_SYNPROXY=m | ||
183 | CONFIG_IP6_NF_MANGLE=m | 207 | CONFIG_IP6_NF_MANGLE=m |
184 | CONFIG_IP6_NF_RAW=m | 208 | CONFIG_IP6_NF_RAW=m |
185 | CONFIG_NF_NAT_IPV6=m | 209 | CONFIG_NF_NAT_IPV6=m |
186 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | 210 | CONFIG_IP6_NF_TARGET_MASQUERADE=m |
187 | CONFIG_IP6_NF_TARGET_NPT=m | 211 | CONFIG_IP6_NF_TARGET_NPT=m |
212 | CONFIG_NF_TABLES_BRIDGE=m | ||
188 | CONFIG_IP_DCCP=m | 213 | CONFIG_IP_DCCP=m |
189 | # CONFIG_IP_DCCP_CCID3 is not set | 214 | # CONFIG_IP_DCCP_CCID3 is not set |
190 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y | 215 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y |
@@ -192,10 +217,13 @@ CONFIG_RDS=m | |||
192 | CONFIG_RDS_TCP=m | 217 | CONFIG_RDS_TCP=m |
193 | CONFIG_L2TP=m | 218 | CONFIG_L2TP=m |
194 | CONFIG_ATALK=m | 219 | CONFIG_ATALK=m |
220 | CONFIG_DNS_RESOLVER=y | ||
195 | CONFIG_BATMAN_ADV=m | 221 | CONFIG_BATMAN_ADV=m |
196 | CONFIG_BATMAN_ADV_DAT=y | 222 | CONFIG_BATMAN_ADV_DAT=y |
223 | CONFIG_BATMAN_ADV_NC=y | ||
224 | CONFIG_NETLINK_DIAG=m | ||
225 | CONFIG_NET_MPLS_GSO=m | ||
197 | # CONFIG_WIRELESS is not set | 226 | # CONFIG_WIRELESS is not set |
198 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
199 | CONFIG_DEVTMPFS=y | 227 | CONFIG_DEVTMPFS=y |
200 | # CONFIG_FIRMWARE_IN_KERNEL is not set | 228 | # CONFIG_FIRMWARE_IN_KERNEL is not set |
201 | # CONFIG_FW_LOADER_USER_HELPER is not set | 229 | # CONFIG_FW_LOADER_USER_HELPER is not set |
@@ -211,6 +239,7 @@ CONFIG_BLK_DEV_NBD=m | |||
211 | CONFIG_BLK_DEV_RAM=y | 239 | CONFIG_BLK_DEV_RAM=y |
212 | CONFIG_CDROM_PKTCDVD=m | 240 | CONFIG_CDROM_PKTCDVD=m |
213 | CONFIG_ATA_OVER_ETH=m | 241 | CONFIG_ATA_OVER_ETH=m |
242 | CONFIG_DUMMY_IRQ=m | ||
214 | CONFIG_IDE=y | 243 | CONFIG_IDE=y |
215 | CONFIG_IDE_GD_ATAPI=y | 244 | CONFIG_IDE_GD_ATAPI=y |
216 | CONFIG_BLK_DEV_IDECD=y | 245 | CONFIG_BLK_DEV_IDECD=y |
@@ -249,10 +278,10 @@ CONFIG_TCM_PSCSI=m | |||
249 | CONFIG_NETDEVICES=y | 278 | CONFIG_NETDEVICES=y |
250 | CONFIG_DUMMY=m | 279 | CONFIG_DUMMY=m |
251 | CONFIG_EQUALIZER=m | 280 | CONFIG_EQUALIZER=m |
252 | CONFIG_MII=y | ||
253 | CONFIG_NET_TEAM=m | 281 | CONFIG_NET_TEAM=m |
254 | CONFIG_NET_TEAM_MODE_BROADCAST=m | 282 | CONFIG_NET_TEAM_MODE_BROADCAST=m |
255 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m | 283 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m |
284 | CONFIG_NET_TEAM_MODE_RANDOM=m | ||
256 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m | 285 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m |
257 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m | 286 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m |
258 | CONFIG_VXLAN=m | 287 | CONFIG_VXLAN=m |
@@ -260,6 +289,7 @@ CONFIG_NETCONSOLE=m | |||
260 | CONFIG_NETCONSOLE_DYNAMIC=y | 289 | CONFIG_NETCONSOLE_DYNAMIC=y |
261 | CONFIG_VETH=m | 290 | CONFIG_VETH=m |
262 | CONFIG_ATARILANCE=y | 291 | CONFIG_ATARILANCE=y |
292 | # CONFIG_NET_VENDOR_ARC is not set | ||
263 | # CONFIG_NET_CADENCE is not set | 293 | # CONFIG_NET_CADENCE is not set |
264 | # CONFIG_NET_VENDOR_BROADCOM is not set | 294 | # CONFIG_NET_VENDOR_BROADCOM is not set |
265 | # CONFIG_NET_VENDOR_INTEL is not set | 295 | # CONFIG_NET_VENDOR_INTEL is not set |
@@ -267,6 +297,7 @@ CONFIG_ATARILANCE=y | |||
267 | # CONFIG_NET_VENDOR_MICREL is not set | 297 | # CONFIG_NET_VENDOR_MICREL is not set |
268 | # CONFIG_NET_VENDOR_SEEQ is not set | 298 | # CONFIG_NET_VENDOR_SEEQ is not set |
269 | # CONFIG_NET_VENDOR_STMICRO is not set | 299 | # CONFIG_NET_VENDOR_STMICRO is not set |
300 | # CONFIG_NET_VENDOR_VIA is not set | ||
270 | # CONFIG_NET_VENDOR_WIZNET is not set | 301 | # CONFIG_NET_VENDOR_WIZNET is not set |
271 | CONFIG_PPP=m | 302 | CONFIG_PPP=m |
272 | CONFIG_PPP_BSDCOMP=m | 303 | CONFIG_PPP_BSDCOMP=m |
@@ -291,7 +322,6 @@ CONFIG_MOUSE_ATARI=m | |||
291 | CONFIG_INPUT_MISC=y | 322 | CONFIG_INPUT_MISC=y |
292 | CONFIG_INPUT_M68K_BEEP=m | 323 | CONFIG_INPUT_M68K_BEEP=m |
293 | # CONFIG_SERIO is not set | 324 | # CONFIG_SERIO is not set |
294 | CONFIG_VT_HW_CONSOLE_BINDING=y | ||
295 | # CONFIG_LEGACY_PTYS is not set | 325 | # CONFIG_LEGACY_PTYS is not set |
296 | # CONFIG_DEVKMEM is not set | 326 | # CONFIG_DEVKMEM is not set |
297 | CONFIG_PRINTER=m | 327 | CONFIG_PRINTER=m |
@@ -320,10 +350,6 @@ CONFIG_NFBLOCK=y | |||
320 | CONFIG_NFCON=y | 350 | CONFIG_NFCON=y |
321 | CONFIG_NFETH=y | 351 | CONFIG_NFETH=y |
322 | CONFIG_ATARI_DSP56K=m | 352 | CONFIG_ATARI_DSP56K=m |
323 | CONFIG_EXT2_FS=y | ||
324 | CONFIG_EXT3_FS=y | ||
325 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | ||
326 | # CONFIG_EXT3_FS_XATTR is not set | ||
327 | CONFIG_EXT4_FS=y | 353 | CONFIG_EXT4_FS=y |
328 | CONFIG_REISERFS_FS=m | 354 | CONFIG_REISERFS_FS=m |
329 | CONFIG_JFS_FS=m | 355 | CONFIG_JFS_FS=m |
@@ -360,7 +386,7 @@ CONFIG_QNX6FS_FS=m | |||
360 | CONFIG_SYSV_FS=m | 386 | CONFIG_SYSV_FS=m |
361 | CONFIG_UFS_FS=m | 387 | CONFIG_UFS_FS=m |
362 | CONFIG_NFS_FS=y | 388 | CONFIG_NFS_FS=y |
363 | CONFIG_NFS_V4=y | 389 | CONFIG_NFS_V4=m |
364 | CONFIG_NFS_SWAP=y | 390 | CONFIG_NFS_SWAP=y |
365 | CONFIG_ROOT_NFS=y | 391 | CONFIG_ROOT_NFS=y |
366 | CONFIG_NFSD=m | 392 | CONFIG_NFSD=m |
@@ -419,10 +445,10 @@ CONFIG_NLS_MAC_TURKISH=m | |||
419 | CONFIG_DLM=m | 445 | CONFIG_DLM=m |
420 | CONFIG_MAGIC_SYSRQ=y | 446 | CONFIG_MAGIC_SYSRQ=y |
421 | CONFIG_ASYNC_RAID6_TEST=m | 447 | CONFIG_ASYNC_RAID6_TEST=m |
448 | CONFIG_TEST_STRING_HELPERS=m | ||
422 | CONFIG_ENCRYPTED_KEYS=m | 449 | CONFIG_ENCRYPTED_KEYS=m |
423 | CONFIG_CRYPTO_MANAGER=y | 450 | CONFIG_CRYPTO_MANAGER=y |
424 | CONFIG_CRYPTO_USER=m | 451 | CONFIG_CRYPTO_USER=m |
425 | CONFIG_CRYPTO_NULL=m | ||
426 | CONFIG_CRYPTO_CRYPTD=m | 452 | CONFIG_CRYPTO_CRYPTD=m |
427 | CONFIG_CRYPTO_TEST=m | 453 | CONFIG_CRYPTO_TEST=m |
428 | CONFIG_CRYPTO_CCM=m | 454 | CONFIG_CRYPTO_CCM=m |
@@ -455,6 +481,8 @@ CONFIG_CRYPTO_TEA=m | |||
455 | CONFIG_CRYPTO_TWOFISH=m | 481 | CONFIG_CRYPTO_TWOFISH=m |
456 | CONFIG_CRYPTO_ZLIB=m | 482 | CONFIG_CRYPTO_ZLIB=m |
457 | CONFIG_CRYPTO_LZO=m | 483 | CONFIG_CRYPTO_LZO=m |
484 | CONFIG_CRYPTO_LZ4=m | ||
485 | CONFIG_CRYPTO_LZ4HC=m | ||
458 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 486 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
459 | CONFIG_CRYPTO_USER_API_HASH=m | 487 | CONFIG_CRYPTO_USER_API_HASH=m |
460 | CONFIG_CRYPTO_USER_API_SKCIPHER=m | 488 | CONFIG_CRYPTO_USER_API_SKCIPHER=m |
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index c015ddb6fd80..4aa4f45e52a8 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig | |||
@@ -48,7 +48,6 @@ CONFIG_IP_PNP_RARP=y | |||
48 | CONFIG_NET_IPIP=m | 48 | CONFIG_NET_IPIP=m |
49 | CONFIG_NET_IPGRE_DEMUX=m | 49 | CONFIG_NET_IPGRE_DEMUX=m |
50 | CONFIG_NET_IPGRE=m | 50 | CONFIG_NET_IPGRE=m |
51 | CONFIG_SYN_COOKIES=y | ||
52 | CONFIG_NET_IPVTI=m | 51 | CONFIG_NET_IPVTI=m |
53 | CONFIG_INET_AH=m | 52 | CONFIG_INET_AH=m |
54 | CONFIG_INET_ESP=m | 53 | CONFIG_INET_ESP=m |
@@ -59,11 +58,11 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
59 | # CONFIG_INET_LRO is not set | 58 | # CONFIG_INET_LRO is not set |
60 | CONFIG_INET_DIAG=m | 59 | CONFIG_INET_DIAG=m |
61 | CONFIG_INET_UDP_DIAG=m | 60 | CONFIG_INET_UDP_DIAG=m |
62 | CONFIG_IPV6_PRIVACY=y | ||
63 | CONFIG_IPV6_ROUTER_PREF=y | 61 | CONFIG_IPV6_ROUTER_PREF=y |
64 | CONFIG_INET6_AH=m | 62 | CONFIG_INET6_AH=m |
65 | CONFIG_INET6_ESP=m | 63 | CONFIG_INET6_ESP=m |
66 | CONFIG_INET6_IPCOMP=m | 64 | CONFIG_INET6_IPCOMP=m |
65 | CONFIG_IPV6_VTI=m | ||
67 | CONFIG_IPV6_GRE=m | 66 | CONFIG_IPV6_GRE=m |
68 | CONFIG_NETFILTER=y | 67 | CONFIG_NETFILTER=y |
69 | CONFIG_NF_CONNTRACK=m | 68 | CONFIG_NF_CONNTRACK=m |
@@ -81,6 +80,17 @@ CONFIG_NF_CONNTRACK_PPTP=m | |||
81 | CONFIG_NF_CONNTRACK_SANE=m | 80 | CONFIG_NF_CONNTRACK_SANE=m |
82 | CONFIG_NF_CONNTRACK_SIP=m | 81 | CONFIG_NF_CONNTRACK_SIP=m |
83 | CONFIG_NF_CONNTRACK_TFTP=m | 82 | CONFIG_NF_CONNTRACK_TFTP=m |
83 | CONFIG_NF_TABLES=m | ||
84 | CONFIG_NFT_EXTHDR=m | ||
85 | CONFIG_NFT_META=m | ||
86 | CONFIG_NFT_CT=m | ||
87 | CONFIG_NFT_RBTREE=m | ||
88 | CONFIG_NFT_HASH=m | ||
89 | CONFIG_NFT_COUNTER=m | ||
90 | CONFIG_NFT_LOG=m | ||
91 | CONFIG_NFT_LIMIT=m | ||
92 | CONFIG_NFT_NAT=m | ||
93 | CONFIG_NFT_COMPAT=m | ||
84 | CONFIG_NETFILTER_XT_SET=m | 94 | CONFIG_NETFILTER_XT_SET=m |
85 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m | 95 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m |
86 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m | 96 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m |
@@ -94,6 +104,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m | |||
94 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m | 104 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m |
95 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m | 105 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m |
96 | CONFIG_NETFILTER_XT_TARGET_TEE=m | 106 | CONFIG_NETFILTER_XT_TARGET_TEE=m |
107 | CONFIG_NETFILTER_XT_TARGET_TPROXY=m | ||
97 | CONFIG_NETFILTER_XT_TARGET_TRACE=m | 108 | CONFIG_NETFILTER_XT_TARGET_TRACE=m |
98 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m | 109 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m |
99 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m | 110 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m |
@@ -126,6 +137,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m | |||
126 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m | 137 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m |
127 | CONFIG_NETFILTER_XT_MATCH_REALM=m | 138 | CONFIG_NETFILTER_XT_MATCH_REALM=m |
128 | CONFIG_NETFILTER_XT_MATCH_RECENT=m | 139 | CONFIG_NETFILTER_XT_MATCH_RECENT=m |
140 | CONFIG_NETFILTER_XT_MATCH_SOCKET=m | ||
129 | CONFIG_NETFILTER_XT_MATCH_STATE=m | 141 | CONFIG_NETFILTER_XT_MATCH_STATE=m |
130 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m | 142 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m |
131 | CONFIG_NETFILTER_XT_MATCH_STRING=m | 143 | CONFIG_NETFILTER_XT_MATCH_STRING=m |
@@ -140,11 +152,18 @@ CONFIG_IP_SET_HASH_IP=m | |||
140 | CONFIG_IP_SET_HASH_IPPORT=m | 152 | CONFIG_IP_SET_HASH_IPPORT=m |
141 | CONFIG_IP_SET_HASH_IPPORTIP=m | 153 | CONFIG_IP_SET_HASH_IPPORTIP=m |
142 | CONFIG_IP_SET_HASH_IPPORTNET=m | 154 | CONFIG_IP_SET_HASH_IPPORTNET=m |
155 | CONFIG_IP_SET_HASH_NETPORTNET=m | ||
143 | CONFIG_IP_SET_HASH_NET=m | 156 | CONFIG_IP_SET_HASH_NET=m |
157 | CONFIG_IP_SET_HASH_NETNET=m | ||
144 | CONFIG_IP_SET_HASH_NETPORT=m | 158 | CONFIG_IP_SET_HASH_NETPORT=m |
145 | CONFIG_IP_SET_HASH_NETIFACE=m | 159 | CONFIG_IP_SET_HASH_NETIFACE=m |
146 | CONFIG_IP_SET_LIST_SET=m | 160 | CONFIG_IP_SET_LIST_SET=m |
147 | CONFIG_NF_CONNTRACK_IPV4=m | 161 | CONFIG_NF_CONNTRACK_IPV4=m |
162 | CONFIG_NF_TABLES_IPV4=m | ||
163 | CONFIG_NFT_REJECT_IPV4=m | ||
164 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | ||
165 | CONFIG_NFT_CHAIN_NAT_IPV4=m | ||
166 | CONFIG_NF_TABLES_ARP=m | ||
148 | CONFIG_IP_NF_IPTABLES=m | 167 | CONFIG_IP_NF_IPTABLES=m |
149 | CONFIG_IP_NF_MATCH_AH=m | 168 | CONFIG_IP_NF_MATCH_AH=m |
150 | CONFIG_IP_NF_MATCH_ECN=m | 169 | CONFIG_IP_NF_MATCH_ECN=m |
@@ -152,6 +171,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m | |||
152 | CONFIG_IP_NF_MATCH_TTL=m | 171 | CONFIG_IP_NF_MATCH_TTL=m |
153 | CONFIG_IP_NF_FILTER=m | 172 | CONFIG_IP_NF_FILTER=m |
154 | CONFIG_IP_NF_TARGET_REJECT=m | 173 | CONFIG_IP_NF_TARGET_REJECT=m |
174 | CONFIG_IP_NF_TARGET_SYNPROXY=m | ||
155 | CONFIG_IP_NF_TARGET_ULOG=m | 175 | CONFIG_IP_NF_TARGET_ULOG=m |
156 | CONFIG_NF_NAT_IPV4=m | 176 | CONFIG_NF_NAT_IPV4=m |
157 | CONFIG_IP_NF_TARGET_MASQUERADE=m | 177 | CONFIG_IP_NF_TARGET_MASQUERADE=m |
@@ -166,6 +186,9 @@ CONFIG_IP_NF_ARPTABLES=m | |||
166 | CONFIG_IP_NF_ARPFILTER=m | 186 | CONFIG_IP_NF_ARPFILTER=m |
167 | CONFIG_IP_NF_ARP_MANGLE=m | 187 | CONFIG_IP_NF_ARP_MANGLE=m |
168 | CONFIG_NF_CONNTRACK_IPV6=m | 188 | CONFIG_NF_CONNTRACK_IPV6=m |
189 | CONFIG_NF_TABLES_IPV6=m | ||
190 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | ||
191 | CONFIG_NFT_CHAIN_NAT_IPV6=m | ||
169 | CONFIG_IP6_NF_IPTABLES=m | 192 | CONFIG_IP6_NF_IPTABLES=m |
170 | CONFIG_IP6_NF_MATCH_AH=m | 193 | CONFIG_IP6_NF_MATCH_AH=m |
171 | CONFIG_IP6_NF_MATCH_EUI64=m | 194 | CONFIG_IP6_NF_MATCH_EUI64=m |
@@ -179,11 +202,13 @@ CONFIG_IP6_NF_MATCH_RT=m | |||
179 | CONFIG_IP6_NF_TARGET_HL=m | 202 | CONFIG_IP6_NF_TARGET_HL=m |
180 | CONFIG_IP6_NF_FILTER=m | 203 | CONFIG_IP6_NF_FILTER=m |
181 | CONFIG_IP6_NF_TARGET_REJECT=m | 204 | CONFIG_IP6_NF_TARGET_REJECT=m |
205 | CONFIG_IP6_NF_TARGET_SYNPROXY=m | ||
182 | CONFIG_IP6_NF_MANGLE=m | 206 | CONFIG_IP6_NF_MANGLE=m |
183 | CONFIG_IP6_NF_RAW=m | 207 | CONFIG_IP6_NF_RAW=m |
184 | CONFIG_NF_NAT_IPV6=m | 208 | CONFIG_NF_NAT_IPV6=m |
185 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | 209 | CONFIG_IP6_NF_TARGET_MASQUERADE=m |
186 | CONFIG_IP6_NF_TARGET_NPT=m | 210 | CONFIG_IP6_NF_TARGET_NPT=m |
211 | CONFIG_NF_TABLES_BRIDGE=m | ||
187 | CONFIG_IP_DCCP=m | 212 | CONFIG_IP_DCCP=m |
188 | # CONFIG_IP_DCCP_CCID3 is not set | 213 | # CONFIG_IP_DCCP_CCID3 is not set |
189 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y | 214 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y |
@@ -191,10 +216,13 @@ CONFIG_RDS=m | |||
191 | CONFIG_RDS_TCP=m | 216 | CONFIG_RDS_TCP=m |
192 | CONFIG_L2TP=m | 217 | CONFIG_L2TP=m |
193 | CONFIG_ATALK=m | 218 | CONFIG_ATALK=m |
219 | CONFIG_DNS_RESOLVER=y | ||
194 | CONFIG_BATMAN_ADV=m | 220 | CONFIG_BATMAN_ADV=m |
195 | CONFIG_BATMAN_ADV_DAT=y | 221 | CONFIG_BATMAN_ADV_DAT=y |
222 | CONFIG_BATMAN_ADV_NC=y | ||
223 | CONFIG_NETLINK_DIAG=m | ||
224 | CONFIG_NET_MPLS_GSO=m | ||
196 | # CONFIG_WIRELESS is not set | 225 | # CONFIG_WIRELESS is not set |
197 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
198 | CONFIG_DEVTMPFS=y | 226 | CONFIG_DEVTMPFS=y |
199 | # CONFIG_FIRMWARE_IN_KERNEL is not set | 227 | # CONFIG_FIRMWARE_IN_KERNEL is not set |
200 | # CONFIG_FW_LOADER_USER_HELPER is not set | 228 | # CONFIG_FW_LOADER_USER_HELPER is not set |
@@ -206,6 +234,7 @@ CONFIG_BLK_DEV_NBD=m | |||
206 | CONFIG_BLK_DEV_RAM=y | 234 | CONFIG_BLK_DEV_RAM=y |
207 | CONFIG_CDROM_PKTCDVD=m | 235 | CONFIG_CDROM_PKTCDVD=m |
208 | CONFIG_ATA_OVER_ETH=m | 236 | CONFIG_ATA_OVER_ETH=m |
237 | CONFIG_DUMMY_IRQ=m | ||
209 | CONFIG_RAID_ATTRS=m | 238 | CONFIG_RAID_ATTRS=m |
210 | CONFIG_SCSI=y | 239 | CONFIG_SCSI=y |
211 | CONFIG_SCSI_TGT=m | 240 | CONFIG_SCSI_TGT=m |
@@ -243,12 +272,14 @@ CONFIG_EQUALIZER=m | |||
243 | CONFIG_NET_TEAM=m | 272 | CONFIG_NET_TEAM=m |
244 | CONFIG_NET_TEAM_MODE_BROADCAST=m | 273 | CONFIG_NET_TEAM_MODE_BROADCAST=m |
245 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m | 274 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m |
275 | CONFIG_NET_TEAM_MODE_RANDOM=m | ||
246 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m | 276 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m |
247 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m | 277 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m |
248 | CONFIG_VXLAN=m | 278 | CONFIG_VXLAN=m |
249 | CONFIG_NETCONSOLE=m | 279 | CONFIG_NETCONSOLE=m |
250 | CONFIG_NETCONSOLE_DYNAMIC=y | 280 | CONFIG_NETCONSOLE_DYNAMIC=y |
251 | CONFIG_VETH=m | 281 | CONFIG_VETH=m |
282 | # CONFIG_NET_VENDOR_ARC is not set | ||
252 | # CONFIG_NET_CADENCE is not set | 283 | # CONFIG_NET_CADENCE is not set |
253 | # CONFIG_NET_VENDOR_BROADCOM is not set | 284 | # CONFIG_NET_VENDOR_BROADCOM is not set |
254 | CONFIG_BVME6000_NET=y | 285 | CONFIG_BVME6000_NET=y |
@@ -257,6 +288,7 @@ CONFIG_BVME6000_NET=y | |||
257 | # CONFIG_NET_VENDOR_NATSEMI is not set | 288 | # CONFIG_NET_VENDOR_NATSEMI is not set |
258 | # CONFIG_NET_VENDOR_SEEQ is not set | 289 | # CONFIG_NET_VENDOR_SEEQ is not set |
259 | # CONFIG_NET_VENDOR_STMICRO is not set | 290 | # CONFIG_NET_VENDOR_STMICRO is not set |
291 | # CONFIG_NET_VENDOR_VIA is not set | ||
260 | # CONFIG_NET_VENDOR_WIZNET is not set | 292 | # CONFIG_NET_VENDOR_WIZNET is not set |
261 | CONFIG_PPP=m | 293 | CONFIG_PPP=m |
262 | CONFIG_PPP_BSDCOMP=m | 294 | CONFIG_PPP_BSDCOMP=m |
@@ -294,10 +326,6 @@ CONFIG_RTC_CLASS=y | |||
294 | CONFIG_RTC_DRV_GENERIC=m | 326 | CONFIG_RTC_DRV_GENERIC=m |
295 | # CONFIG_IOMMU_SUPPORT is not set | 327 | # CONFIG_IOMMU_SUPPORT is not set |
296 | CONFIG_PROC_HARDWARE=y | 328 | CONFIG_PROC_HARDWARE=y |
297 | CONFIG_EXT2_FS=y | ||
298 | CONFIG_EXT3_FS=y | ||
299 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | ||
300 | # CONFIG_EXT3_FS_XATTR is not set | ||
301 | CONFIG_EXT4_FS=y | 329 | CONFIG_EXT4_FS=y |
302 | CONFIG_REISERFS_FS=m | 330 | CONFIG_REISERFS_FS=m |
303 | CONFIG_JFS_FS=m | 331 | CONFIG_JFS_FS=m |
@@ -334,7 +362,7 @@ CONFIG_QNX6FS_FS=m | |||
334 | CONFIG_SYSV_FS=m | 362 | CONFIG_SYSV_FS=m |
335 | CONFIG_UFS_FS=m | 363 | CONFIG_UFS_FS=m |
336 | CONFIG_NFS_FS=y | 364 | CONFIG_NFS_FS=y |
337 | CONFIG_NFS_V4=y | 365 | CONFIG_NFS_V4=m |
338 | CONFIG_NFS_SWAP=y | 366 | CONFIG_NFS_SWAP=y |
339 | CONFIG_ROOT_NFS=y | 367 | CONFIG_ROOT_NFS=y |
340 | CONFIG_NFSD=m | 368 | CONFIG_NFSD=m |
@@ -393,10 +421,10 @@ CONFIG_NLS_MAC_TURKISH=m | |||
393 | CONFIG_DLM=m | 421 | CONFIG_DLM=m |
394 | CONFIG_MAGIC_SYSRQ=y | 422 | CONFIG_MAGIC_SYSRQ=y |
395 | CONFIG_ASYNC_RAID6_TEST=m | 423 | CONFIG_ASYNC_RAID6_TEST=m |
424 | CONFIG_TEST_STRING_HELPERS=m | ||
396 | CONFIG_ENCRYPTED_KEYS=m | 425 | CONFIG_ENCRYPTED_KEYS=m |
397 | CONFIG_CRYPTO_MANAGER=y | 426 | CONFIG_CRYPTO_MANAGER=y |
398 | CONFIG_CRYPTO_USER=m | 427 | CONFIG_CRYPTO_USER=m |
399 | CONFIG_CRYPTO_NULL=m | ||
400 | CONFIG_CRYPTO_CRYPTD=m | 428 | CONFIG_CRYPTO_CRYPTD=m |
401 | CONFIG_CRYPTO_TEST=m | 429 | CONFIG_CRYPTO_TEST=m |
402 | CONFIG_CRYPTO_CCM=m | 430 | CONFIG_CRYPTO_CCM=m |
@@ -429,6 +457,8 @@ CONFIG_CRYPTO_TEA=m | |||
429 | CONFIG_CRYPTO_TWOFISH=m | 457 | CONFIG_CRYPTO_TWOFISH=m |
430 | CONFIG_CRYPTO_ZLIB=m | 458 | CONFIG_CRYPTO_ZLIB=m |
431 | CONFIG_CRYPTO_LZO=m | 459 | CONFIG_CRYPTO_LZO=m |
460 | CONFIG_CRYPTO_LZ4=m | ||
461 | CONFIG_CRYPTO_LZ4HC=m | ||
432 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 462 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
433 | CONFIG_CRYPTO_USER_API_HASH=m | 463 | CONFIG_CRYPTO_USER_API_HASH=m |
434 | CONFIG_CRYPTO_USER_API_SKCIPHER=m | 464 | CONFIG_CRYPTO_USER_API_SKCIPHER=m |
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index ec7382d8afff..7cd9d9f456fb 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig | |||
@@ -50,7 +50,6 @@ CONFIG_IP_PNP_RARP=y | |||
50 | CONFIG_NET_IPIP=m | 50 | CONFIG_NET_IPIP=m |
51 | CONFIG_NET_IPGRE_DEMUX=m | 51 | CONFIG_NET_IPGRE_DEMUX=m |
52 | CONFIG_NET_IPGRE=m | 52 | CONFIG_NET_IPGRE=m |
53 | CONFIG_SYN_COOKIES=y | ||
54 | CONFIG_NET_IPVTI=m | 53 | CONFIG_NET_IPVTI=m |
55 | CONFIG_INET_AH=m | 54 | CONFIG_INET_AH=m |
56 | CONFIG_INET_ESP=m | 55 | CONFIG_INET_ESP=m |
@@ -61,11 +60,11 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
61 | # CONFIG_INET_LRO is not set | 60 | # CONFIG_INET_LRO is not set |
62 | CONFIG_INET_DIAG=m | 61 | CONFIG_INET_DIAG=m |
63 | CONFIG_INET_UDP_DIAG=m | 62 | CONFIG_INET_UDP_DIAG=m |
64 | CONFIG_IPV6_PRIVACY=y | ||
65 | CONFIG_IPV6_ROUTER_PREF=y | 63 | CONFIG_IPV6_ROUTER_PREF=y |
66 | CONFIG_INET6_AH=m | 64 | CONFIG_INET6_AH=m |
67 | CONFIG_INET6_ESP=m | 65 | CONFIG_INET6_ESP=m |
68 | CONFIG_INET6_IPCOMP=m | 66 | CONFIG_INET6_IPCOMP=m |
67 | CONFIG_IPV6_VTI=m | ||
69 | CONFIG_IPV6_GRE=m | 68 | CONFIG_IPV6_GRE=m |
70 | CONFIG_NETFILTER=y | 69 | CONFIG_NETFILTER=y |
71 | CONFIG_NF_CONNTRACK=m | 70 | CONFIG_NF_CONNTRACK=m |
@@ -83,6 +82,17 @@ CONFIG_NF_CONNTRACK_PPTP=m | |||
83 | CONFIG_NF_CONNTRACK_SANE=m | 82 | CONFIG_NF_CONNTRACK_SANE=m |
84 | CONFIG_NF_CONNTRACK_SIP=m | 83 | CONFIG_NF_CONNTRACK_SIP=m |
85 | CONFIG_NF_CONNTRACK_TFTP=m | 84 | CONFIG_NF_CONNTRACK_TFTP=m |
85 | CONFIG_NF_TABLES=m | ||
86 | CONFIG_NFT_EXTHDR=m | ||
87 | CONFIG_NFT_META=m | ||
88 | CONFIG_NFT_CT=m | ||
89 | CONFIG_NFT_RBTREE=m | ||
90 | CONFIG_NFT_HASH=m | ||
91 | CONFIG_NFT_COUNTER=m | ||
92 | CONFIG_NFT_LOG=m | ||
93 | CONFIG_NFT_LIMIT=m | ||
94 | CONFIG_NFT_NAT=m | ||
95 | CONFIG_NFT_COMPAT=m | ||
86 | CONFIG_NETFILTER_XT_SET=m | 96 | CONFIG_NETFILTER_XT_SET=m |
87 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m | 97 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m |
88 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m | 98 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m |
@@ -96,6 +106,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m | |||
96 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m | 106 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m |
97 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m | 107 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m |
98 | CONFIG_NETFILTER_XT_TARGET_TEE=m | 108 | CONFIG_NETFILTER_XT_TARGET_TEE=m |
109 | CONFIG_NETFILTER_XT_TARGET_TPROXY=m | ||
99 | CONFIG_NETFILTER_XT_TARGET_TRACE=m | 110 | CONFIG_NETFILTER_XT_TARGET_TRACE=m |
100 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m | 111 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m |
101 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m | 112 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m |
@@ -128,6 +139,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m | |||
128 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m | 139 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m |
129 | CONFIG_NETFILTER_XT_MATCH_REALM=m | 140 | CONFIG_NETFILTER_XT_MATCH_REALM=m |
130 | CONFIG_NETFILTER_XT_MATCH_RECENT=m | 141 | CONFIG_NETFILTER_XT_MATCH_RECENT=m |
142 | CONFIG_NETFILTER_XT_MATCH_SOCKET=m | ||
131 | CONFIG_NETFILTER_XT_MATCH_STATE=m | 143 | CONFIG_NETFILTER_XT_MATCH_STATE=m |
132 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m | 144 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m |
133 | CONFIG_NETFILTER_XT_MATCH_STRING=m | 145 | CONFIG_NETFILTER_XT_MATCH_STRING=m |
@@ -142,11 +154,18 @@ CONFIG_IP_SET_HASH_IP=m | |||
142 | CONFIG_IP_SET_HASH_IPPORT=m | 154 | CONFIG_IP_SET_HASH_IPPORT=m |
143 | CONFIG_IP_SET_HASH_IPPORTIP=m | 155 | CONFIG_IP_SET_HASH_IPPORTIP=m |
144 | CONFIG_IP_SET_HASH_IPPORTNET=m | 156 | CONFIG_IP_SET_HASH_IPPORTNET=m |
157 | CONFIG_IP_SET_HASH_NETPORTNET=m | ||
145 | CONFIG_IP_SET_HASH_NET=m | 158 | CONFIG_IP_SET_HASH_NET=m |
159 | CONFIG_IP_SET_HASH_NETNET=m | ||
146 | CONFIG_IP_SET_HASH_NETPORT=m | 160 | CONFIG_IP_SET_HASH_NETPORT=m |
147 | CONFIG_IP_SET_HASH_NETIFACE=m | 161 | CONFIG_IP_SET_HASH_NETIFACE=m |
148 | CONFIG_IP_SET_LIST_SET=m | 162 | CONFIG_IP_SET_LIST_SET=m |
149 | CONFIG_NF_CONNTRACK_IPV4=m | 163 | CONFIG_NF_CONNTRACK_IPV4=m |
164 | CONFIG_NF_TABLES_IPV4=m | ||
165 | CONFIG_NFT_REJECT_IPV4=m | ||
166 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | ||
167 | CONFIG_NFT_CHAIN_NAT_IPV4=m | ||
168 | CONFIG_NF_TABLES_ARP=m | ||
150 | CONFIG_IP_NF_IPTABLES=m | 169 | CONFIG_IP_NF_IPTABLES=m |
151 | CONFIG_IP_NF_MATCH_AH=m | 170 | CONFIG_IP_NF_MATCH_AH=m |
152 | CONFIG_IP_NF_MATCH_ECN=m | 171 | CONFIG_IP_NF_MATCH_ECN=m |
@@ -154,6 +173,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m | |||
154 | CONFIG_IP_NF_MATCH_TTL=m | 173 | CONFIG_IP_NF_MATCH_TTL=m |
155 | CONFIG_IP_NF_FILTER=m | 174 | CONFIG_IP_NF_FILTER=m |
156 | CONFIG_IP_NF_TARGET_REJECT=m | 175 | CONFIG_IP_NF_TARGET_REJECT=m |
176 | CONFIG_IP_NF_TARGET_SYNPROXY=m | ||
157 | CONFIG_IP_NF_TARGET_ULOG=m | 177 | CONFIG_IP_NF_TARGET_ULOG=m |
158 | CONFIG_NF_NAT_IPV4=m | 178 | CONFIG_NF_NAT_IPV4=m |
159 | CONFIG_IP_NF_TARGET_MASQUERADE=m | 179 | CONFIG_IP_NF_TARGET_MASQUERADE=m |
@@ -168,6 +188,9 @@ CONFIG_IP_NF_ARPTABLES=m | |||
168 | CONFIG_IP_NF_ARPFILTER=m | 188 | CONFIG_IP_NF_ARPFILTER=m |
169 | CONFIG_IP_NF_ARP_MANGLE=m | 189 | CONFIG_IP_NF_ARP_MANGLE=m |
170 | CONFIG_NF_CONNTRACK_IPV6=m | 190 | CONFIG_NF_CONNTRACK_IPV6=m |
191 | CONFIG_NF_TABLES_IPV6=m | ||
192 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | ||
193 | CONFIG_NFT_CHAIN_NAT_IPV6=m | ||
171 | CONFIG_IP6_NF_IPTABLES=m | 194 | CONFIG_IP6_NF_IPTABLES=m |
172 | CONFIG_IP6_NF_MATCH_AH=m | 195 | CONFIG_IP6_NF_MATCH_AH=m |
173 | CONFIG_IP6_NF_MATCH_EUI64=m | 196 | CONFIG_IP6_NF_MATCH_EUI64=m |
@@ -181,11 +204,13 @@ CONFIG_IP6_NF_MATCH_RT=m | |||
181 | CONFIG_IP6_NF_TARGET_HL=m | 204 | CONFIG_IP6_NF_TARGET_HL=m |
182 | CONFIG_IP6_NF_FILTER=m | 205 | CONFIG_IP6_NF_FILTER=m |
183 | CONFIG_IP6_NF_TARGET_REJECT=m | 206 | CONFIG_IP6_NF_TARGET_REJECT=m |
207 | CONFIG_IP6_NF_TARGET_SYNPROXY=m | ||
184 | CONFIG_IP6_NF_MANGLE=m | 208 | CONFIG_IP6_NF_MANGLE=m |
185 | CONFIG_IP6_NF_RAW=m | 209 | CONFIG_IP6_NF_RAW=m |
186 | CONFIG_NF_NAT_IPV6=m | 210 | CONFIG_NF_NAT_IPV6=m |
187 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | 211 | CONFIG_IP6_NF_TARGET_MASQUERADE=m |
188 | CONFIG_IP6_NF_TARGET_NPT=m | 212 | CONFIG_IP6_NF_TARGET_NPT=m |
213 | CONFIG_NF_TABLES_BRIDGE=m | ||
189 | CONFIG_IP_DCCP=m | 214 | CONFIG_IP_DCCP=m |
190 | # CONFIG_IP_DCCP_CCID3 is not set | 215 | # CONFIG_IP_DCCP_CCID3 is not set |
191 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y | 216 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y |
@@ -193,10 +218,13 @@ CONFIG_RDS=m | |||
193 | CONFIG_RDS_TCP=m | 218 | CONFIG_RDS_TCP=m |
194 | CONFIG_L2TP=m | 219 | CONFIG_L2TP=m |
195 | CONFIG_ATALK=m | 220 | CONFIG_ATALK=m |
221 | CONFIG_DNS_RESOLVER=y | ||
196 | CONFIG_BATMAN_ADV=m | 222 | CONFIG_BATMAN_ADV=m |
197 | CONFIG_BATMAN_ADV_DAT=y | 223 | CONFIG_BATMAN_ADV_DAT=y |
224 | CONFIG_BATMAN_ADV_NC=y | ||
225 | CONFIG_NETLINK_DIAG=m | ||
226 | CONFIG_NET_MPLS_GSO=m | ||
198 | # CONFIG_WIRELESS is not set | 227 | # CONFIG_WIRELESS is not set |
199 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
200 | CONFIG_DEVTMPFS=y | 228 | CONFIG_DEVTMPFS=y |
201 | # CONFIG_FIRMWARE_IN_KERNEL is not set | 229 | # CONFIG_FIRMWARE_IN_KERNEL is not set |
202 | # CONFIG_FW_LOADER_USER_HELPER is not set | 230 | # CONFIG_FW_LOADER_USER_HELPER is not set |
@@ -208,6 +236,7 @@ CONFIG_BLK_DEV_NBD=m | |||
208 | CONFIG_BLK_DEV_RAM=y | 236 | CONFIG_BLK_DEV_RAM=y |
209 | CONFIG_CDROM_PKTCDVD=m | 237 | CONFIG_CDROM_PKTCDVD=m |
210 | CONFIG_ATA_OVER_ETH=m | 238 | CONFIG_ATA_OVER_ETH=m |
239 | CONFIG_DUMMY_IRQ=m | ||
211 | CONFIG_RAID_ATTRS=m | 240 | CONFIG_RAID_ATTRS=m |
212 | CONFIG_SCSI=y | 241 | CONFIG_SCSI=y |
213 | CONFIG_SCSI_TGT=m | 242 | CONFIG_SCSI_TGT=m |
@@ -244,6 +273,7 @@ CONFIG_EQUALIZER=m | |||
244 | CONFIG_NET_TEAM=m | 273 | CONFIG_NET_TEAM=m |
245 | CONFIG_NET_TEAM_MODE_BROADCAST=m | 274 | CONFIG_NET_TEAM_MODE_BROADCAST=m |
246 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m | 275 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m |
276 | CONFIG_NET_TEAM_MODE_RANDOM=m | ||
247 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m | 277 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m |
248 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m | 278 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m |
249 | CONFIG_VXLAN=m | 279 | CONFIG_VXLAN=m |
@@ -251,6 +281,7 @@ CONFIG_NETCONSOLE=m | |||
251 | CONFIG_NETCONSOLE_DYNAMIC=y | 281 | CONFIG_NETCONSOLE_DYNAMIC=y |
252 | CONFIG_VETH=m | 282 | CONFIG_VETH=m |
253 | CONFIG_HPLANCE=y | 283 | CONFIG_HPLANCE=y |
284 | # CONFIG_NET_VENDOR_ARC is not set | ||
254 | # CONFIG_NET_CADENCE is not set | 285 | # CONFIG_NET_CADENCE is not set |
255 | # CONFIG_NET_VENDOR_BROADCOM is not set | 286 | # CONFIG_NET_VENDOR_BROADCOM is not set |
256 | # CONFIG_NET_VENDOR_INTEL is not set | 287 | # CONFIG_NET_VENDOR_INTEL is not set |
@@ -259,6 +290,7 @@ CONFIG_HPLANCE=y | |||
259 | # CONFIG_NET_VENDOR_NATSEMI is not set | 290 | # CONFIG_NET_VENDOR_NATSEMI is not set |
260 | # CONFIG_NET_VENDOR_SEEQ is not set | 291 | # CONFIG_NET_VENDOR_SEEQ is not set |
261 | # CONFIG_NET_VENDOR_STMICRO is not set | 292 | # CONFIG_NET_VENDOR_STMICRO is not set |
293 | # CONFIG_NET_VENDOR_VIA is not set | ||
262 | # CONFIG_NET_VENDOR_WIZNET is not set | 294 | # CONFIG_NET_VENDOR_WIZNET is not set |
263 | CONFIG_PPP=m | 295 | CONFIG_PPP=m |
264 | CONFIG_PPP_BSDCOMP=m | 296 | CONFIG_PPP_BSDCOMP=m |
@@ -282,7 +314,6 @@ CONFIG_MOUSE_SERIAL=m | |||
282 | CONFIG_INPUT_MISC=y | 314 | CONFIG_INPUT_MISC=y |
283 | CONFIG_HP_SDC_RTC=m | 315 | CONFIG_HP_SDC_RTC=m |
284 | CONFIG_SERIO_SERPORT=m | 316 | CONFIG_SERIO_SERPORT=m |
285 | CONFIG_VT_HW_CONSOLE_BINDING=y | ||
286 | # CONFIG_LEGACY_PTYS is not set | 317 | # CONFIG_LEGACY_PTYS is not set |
287 | # CONFIG_DEVKMEM is not set | 318 | # CONFIG_DEVKMEM is not set |
288 | # CONFIG_HW_RANDOM is not set | 319 | # CONFIG_HW_RANDOM is not set |
@@ -304,10 +335,6 @@ CONFIG_RTC_CLASS=y | |||
304 | CONFIG_RTC_DRV_GENERIC=m | 335 | CONFIG_RTC_DRV_GENERIC=m |
305 | # CONFIG_IOMMU_SUPPORT is not set | 336 | # CONFIG_IOMMU_SUPPORT is not set |
306 | CONFIG_PROC_HARDWARE=y | 337 | CONFIG_PROC_HARDWARE=y |
307 | CONFIG_EXT2_FS=y | ||
308 | CONFIG_EXT3_FS=y | ||
309 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | ||
310 | # CONFIG_EXT3_FS_XATTR is not set | ||
311 | CONFIG_EXT4_FS=y | 338 | CONFIG_EXT4_FS=y |
312 | CONFIG_REISERFS_FS=m | 339 | CONFIG_REISERFS_FS=m |
313 | CONFIG_JFS_FS=m | 340 | CONFIG_JFS_FS=m |
@@ -344,7 +371,7 @@ CONFIG_QNX6FS_FS=m | |||
344 | CONFIG_SYSV_FS=m | 371 | CONFIG_SYSV_FS=m |
345 | CONFIG_UFS_FS=m | 372 | CONFIG_UFS_FS=m |
346 | CONFIG_NFS_FS=y | 373 | CONFIG_NFS_FS=y |
347 | CONFIG_NFS_V4=y | 374 | CONFIG_NFS_V4=m |
348 | CONFIG_NFS_SWAP=y | 375 | CONFIG_NFS_SWAP=y |
349 | CONFIG_ROOT_NFS=y | 376 | CONFIG_ROOT_NFS=y |
350 | CONFIG_NFSD=m | 377 | CONFIG_NFSD=m |
@@ -403,10 +430,10 @@ CONFIG_NLS_MAC_TURKISH=m | |||
403 | CONFIG_DLM=m | 430 | CONFIG_DLM=m |
404 | CONFIG_MAGIC_SYSRQ=y | 431 | CONFIG_MAGIC_SYSRQ=y |
405 | CONFIG_ASYNC_RAID6_TEST=m | 432 | CONFIG_ASYNC_RAID6_TEST=m |
433 | CONFIG_TEST_STRING_HELPERS=m | ||
406 | CONFIG_ENCRYPTED_KEYS=m | 434 | CONFIG_ENCRYPTED_KEYS=m |
407 | CONFIG_CRYPTO_MANAGER=y | 435 | CONFIG_CRYPTO_MANAGER=y |
408 | CONFIG_CRYPTO_USER=m | 436 | CONFIG_CRYPTO_USER=m |
409 | CONFIG_CRYPTO_NULL=m | ||
410 | CONFIG_CRYPTO_CRYPTD=m | 437 | CONFIG_CRYPTO_CRYPTD=m |
411 | CONFIG_CRYPTO_TEST=m | 438 | CONFIG_CRYPTO_TEST=m |
412 | CONFIG_CRYPTO_CCM=m | 439 | CONFIG_CRYPTO_CCM=m |
@@ -439,6 +466,8 @@ CONFIG_CRYPTO_TEA=m | |||
439 | CONFIG_CRYPTO_TWOFISH=m | 466 | CONFIG_CRYPTO_TWOFISH=m |
440 | CONFIG_CRYPTO_ZLIB=m | 467 | CONFIG_CRYPTO_ZLIB=m |
441 | CONFIG_CRYPTO_LZO=m | 468 | CONFIG_CRYPTO_LZO=m |
469 | CONFIG_CRYPTO_LZ4=m | ||
470 | CONFIG_CRYPTO_LZ4HC=m | ||
442 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 471 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
443 | CONFIG_CRYPTO_USER_API_HASH=m | 472 | CONFIG_CRYPTO_USER_API_HASH=m |
444 | CONFIG_CRYPTO_USER_API_SKCIPHER=m | 473 | CONFIG_CRYPTO_USER_API_SKCIPHER=m |
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index 7d46fbec7042..31f5bd061d14 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig | |||
@@ -49,7 +49,6 @@ CONFIG_IP_PNP_RARP=y | |||
49 | CONFIG_NET_IPIP=m | 49 | CONFIG_NET_IPIP=m |
50 | CONFIG_NET_IPGRE_DEMUX=m | 50 | CONFIG_NET_IPGRE_DEMUX=m |
51 | CONFIG_NET_IPGRE=m | 51 | CONFIG_NET_IPGRE=m |
52 | CONFIG_SYN_COOKIES=y | ||
53 | CONFIG_NET_IPVTI=m | 52 | CONFIG_NET_IPVTI=m |
54 | CONFIG_INET_AH=m | 53 | CONFIG_INET_AH=m |
55 | CONFIG_INET_ESP=m | 54 | CONFIG_INET_ESP=m |
@@ -60,11 +59,11 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
60 | # CONFIG_INET_LRO is not set | 59 | # CONFIG_INET_LRO is not set |
61 | CONFIG_INET_DIAG=m | 60 | CONFIG_INET_DIAG=m |
62 | CONFIG_INET_UDP_DIAG=m | 61 | CONFIG_INET_UDP_DIAG=m |
63 | CONFIG_IPV6_PRIVACY=y | ||
64 | CONFIG_IPV6_ROUTER_PREF=y | 62 | CONFIG_IPV6_ROUTER_PREF=y |
65 | CONFIG_INET6_AH=m | 63 | CONFIG_INET6_AH=m |
66 | CONFIG_INET6_ESP=m | 64 | CONFIG_INET6_ESP=m |
67 | CONFIG_INET6_IPCOMP=m | 65 | CONFIG_INET6_IPCOMP=m |
66 | CONFIG_IPV6_VTI=m | ||
68 | CONFIG_IPV6_GRE=m | 67 | CONFIG_IPV6_GRE=m |
69 | CONFIG_NETFILTER=y | 68 | CONFIG_NETFILTER=y |
70 | CONFIG_NF_CONNTRACK=m | 69 | CONFIG_NF_CONNTRACK=m |
@@ -82,6 +81,17 @@ CONFIG_NF_CONNTRACK_PPTP=m | |||
82 | CONFIG_NF_CONNTRACK_SANE=m | 81 | CONFIG_NF_CONNTRACK_SANE=m |
83 | CONFIG_NF_CONNTRACK_SIP=m | 82 | CONFIG_NF_CONNTRACK_SIP=m |
84 | CONFIG_NF_CONNTRACK_TFTP=m | 83 | CONFIG_NF_CONNTRACK_TFTP=m |
84 | CONFIG_NF_TABLES=m | ||
85 | CONFIG_NFT_EXTHDR=m | ||
86 | CONFIG_NFT_META=m | ||
87 | CONFIG_NFT_CT=m | ||
88 | CONFIG_NFT_RBTREE=m | ||
89 | CONFIG_NFT_HASH=m | ||
90 | CONFIG_NFT_COUNTER=m | ||
91 | CONFIG_NFT_LOG=m | ||
92 | CONFIG_NFT_LIMIT=m | ||
93 | CONFIG_NFT_NAT=m | ||
94 | CONFIG_NFT_COMPAT=m | ||
85 | CONFIG_NETFILTER_XT_SET=m | 95 | CONFIG_NETFILTER_XT_SET=m |
86 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m | 96 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m |
87 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m | 97 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m |
@@ -95,6 +105,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m | |||
95 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m | 105 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m |
96 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m | 106 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m |
97 | CONFIG_NETFILTER_XT_TARGET_TEE=m | 107 | CONFIG_NETFILTER_XT_TARGET_TEE=m |
108 | CONFIG_NETFILTER_XT_TARGET_TPROXY=m | ||
98 | CONFIG_NETFILTER_XT_TARGET_TRACE=m | 109 | CONFIG_NETFILTER_XT_TARGET_TRACE=m |
99 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m | 110 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m |
100 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m | 111 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m |
@@ -127,6 +138,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m | |||
127 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m | 138 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m |
128 | CONFIG_NETFILTER_XT_MATCH_REALM=m | 139 | CONFIG_NETFILTER_XT_MATCH_REALM=m |
129 | CONFIG_NETFILTER_XT_MATCH_RECENT=m | 140 | CONFIG_NETFILTER_XT_MATCH_RECENT=m |
141 | CONFIG_NETFILTER_XT_MATCH_SOCKET=m | ||
130 | CONFIG_NETFILTER_XT_MATCH_STATE=m | 142 | CONFIG_NETFILTER_XT_MATCH_STATE=m |
131 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m | 143 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m |
132 | CONFIG_NETFILTER_XT_MATCH_STRING=m | 144 | CONFIG_NETFILTER_XT_MATCH_STRING=m |
@@ -141,11 +153,18 @@ CONFIG_IP_SET_HASH_IP=m | |||
141 | CONFIG_IP_SET_HASH_IPPORT=m | 153 | CONFIG_IP_SET_HASH_IPPORT=m |
142 | CONFIG_IP_SET_HASH_IPPORTIP=m | 154 | CONFIG_IP_SET_HASH_IPPORTIP=m |
143 | CONFIG_IP_SET_HASH_IPPORTNET=m | 155 | CONFIG_IP_SET_HASH_IPPORTNET=m |
156 | CONFIG_IP_SET_HASH_NETPORTNET=m | ||
144 | CONFIG_IP_SET_HASH_NET=m | 157 | CONFIG_IP_SET_HASH_NET=m |
158 | CONFIG_IP_SET_HASH_NETNET=m | ||
145 | CONFIG_IP_SET_HASH_NETPORT=m | 159 | CONFIG_IP_SET_HASH_NETPORT=m |
146 | CONFIG_IP_SET_HASH_NETIFACE=m | 160 | CONFIG_IP_SET_HASH_NETIFACE=m |
147 | CONFIG_IP_SET_LIST_SET=m | 161 | CONFIG_IP_SET_LIST_SET=m |
148 | CONFIG_NF_CONNTRACK_IPV4=m | 162 | CONFIG_NF_CONNTRACK_IPV4=m |
163 | CONFIG_NF_TABLES_IPV4=m | ||
164 | CONFIG_NFT_REJECT_IPV4=m | ||
165 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | ||
166 | CONFIG_NFT_CHAIN_NAT_IPV4=m | ||
167 | CONFIG_NF_TABLES_ARP=m | ||
149 | CONFIG_IP_NF_IPTABLES=m | 168 | CONFIG_IP_NF_IPTABLES=m |
150 | CONFIG_IP_NF_MATCH_AH=m | 169 | CONFIG_IP_NF_MATCH_AH=m |
151 | CONFIG_IP_NF_MATCH_ECN=m | 170 | CONFIG_IP_NF_MATCH_ECN=m |
@@ -153,6 +172,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m | |||
153 | CONFIG_IP_NF_MATCH_TTL=m | 172 | CONFIG_IP_NF_MATCH_TTL=m |
154 | CONFIG_IP_NF_FILTER=m | 173 | CONFIG_IP_NF_FILTER=m |
155 | CONFIG_IP_NF_TARGET_REJECT=m | 174 | CONFIG_IP_NF_TARGET_REJECT=m |
175 | CONFIG_IP_NF_TARGET_SYNPROXY=m | ||
156 | CONFIG_IP_NF_TARGET_ULOG=m | 176 | CONFIG_IP_NF_TARGET_ULOG=m |
157 | CONFIG_NF_NAT_IPV4=m | 177 | CONFIG_NF_NAT_IPV4=m |
158 | CONFIG_IP_NF_TARGET_MASQUERADE=m | 178 | CONFIG_IP_NF_TARGET_MASQUERADE=m |
@@ -167,6 +187,9 @@ CONFIG_IP_NF_ARPTABLES=m | |||
167 | CONFIG_IP_NF_ARPFILTER=m | 187 | CONFIG_IP_NF_ARPFILTER=m |
168 | CONFIG_IP_NF_ARP_MANGLE=m | 188 | CONFIG_IP_NF_ARP_MANGLE=m |
169 | CONFIG_NF_CONNTRACK_IPV6=m | 189 | CONFIG_NF_CONNTRACK_IPV6=m |
190 | CONFIG_NF_TABLES_IPV6=m | ||
191 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | ||
192 | CONFIG_NFT_CHAIN_NAT_IPV6=m | ||
170 | CONFIG_IP6_NF_IPTABLES=m | 193 | CONFIG_IP6_NF_IPTABLES=m |
171 | CONFIG_IP6_NF_MATCH_AH=m | 194 | CONFIG_IP6_NF_MATCH_AH=m |
172 | CONFIG_IP6_NF_MATCH_EUI64=m | 195 | CONFIG_IP6_NF_MATCH_EUI64=m |
@@ -180,11 +203,13 @@ CONFIG_IP6_NF_MATCH_RT=m | |||
180 | CONFIG_IP6_NF_TARGET_HL=m | 203 | CONFIG_IP6_NF_TARGET_HL=m |
181 | CONFIG_IP6_NF_FILTER=m | 204 | CONFIG_IP6_NF_FILTER=m |
182 | CONFIG_IP6_NF_TARGET_REJECT=m | 205 | CONFIG_IP6_NF_TARGET_REJECT=m |
206 | CONFIG_IP6_NF_TARGET_SYNPROXY=m | ||
183 | CONFIG_IP6_NF_MANGLE=m | 207 | CONFIG_IP6_NF_MANGLE=m |
184 | CONFIG_IP6_NF_RAW=m | 208 | CONFIG_IP6_NF_RAW=m |
185 | CONFIG_NF_NAT_IPV6=m | 209 | CONFIG_NF_NAT_IPV6=m |
186 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | 210 | CONFIG_IP6_NF_TARGET_MASQUERADE=m |
187 | CONFIG_IP6_NF_TARGET_NPT=m | 211 | CONFIG_IP6_NF_TARGET_NPT=m |
212 | CONFIG_NF_TABLES_BRIDGE=m | ||
188 | CONFIG_IP_DCCP=m | 213 | CONFIG_IP_DCCP=m |
189 | # CONFIG_IP_DCCP_CCID3 is not set | 214 | # CONFIG_IP_DCCP_CCID3 is not set |
190 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y | 215 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y |
@@ -195,11 +220,13 @@ CONFIG_ATALK=m | |||
195 | CONFIG_DEV_APPLETALK=m | 220 | CONFIG_DEV_APPLETALK=m |
196 | CONFIG_IPDDP=m | 221 | CONFIG_IPDDP=m |
197 | CONFIG_IPDDP_ENCAP=y | 222 | CONFIG_IPDDP_ENCAP=y |
198 | CONFIG_IPDDP_DECAP=y | 223 | CONFIG_DNS_RESOLVER=y |
199 | CONFIG_BATMAN_ADV=m | 224 | CONFIG_BATMAN_ADV=m |
200 | CONFIG_BATMAN_ADV_DAT=y | 225 | CONFIG_BATMAN_ADV_DAT=y |
226 | CONFIG_BATMAN_ADV_NC=y | ||
227 | CONFIG_NETLINK_DIAG=m | ||
228 | CONFIG_NET_MPLS_GSO=m | ||
201 | # CONFIG_WIRELESS is not set | 229 | # CONFIG_WIRELESS is not set |
202 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
203 | CONFIG_DEVTMPFS=y | 230 | CONFIG_DEVTMPFS=y |
204 | # CONFIG_FIRMWARE_IN_KERNEL is not set | 231 | # CONFIG_FIRMWARE_IN_KERNEL is not set |
205 | # CONFIG_FW_LOADER_USER_HELPER is not set | 232 | # CONFIG_FW_LOADER_USER_HELPER is not set |
@@ -212,6 +239,7 @@ CONFIG_BLK_DEV_NBD=m | |||
212 | CONFIG_BLK_DEV_RAM=y | 239 | CONFIG_BLK_DEV_RAM=y |
213 | CONFIG_CDROM_PKTCDVD=m | 240 | CONFIG_CDROM_PKTCDVD=m |
214 | CONFIG_ATA_OVER_ETH=m | 241 | CONFIG_ATA_OVER_ETH=m |
242 | CONFIG_DUMMY_IRQ=m | ||
215 | CONFIG_IDE=y | 243 | CONFIG_IDE=y |
216 | CONFIG_IDE_GD_ATAPI=y | 244 | CONFIG_IDE_GD_ATAPI=y |
217 | CONFIG_BLK_DEV_IDECD=y | 245 | CONFIG_BLK_DEV_IDECD=y |
@@ -261,6 +289,7 @@ CONFIG_EQUALIZER=m | |||
261 | CONFIG_NET_TEAM=m | 289 | CONFIG_NET_TEAM=m |
262 | CONFIG_NET_TEAM_MODE_BROADCAST=m | 290 | CONFIG_NET_TEAM_MODE_BROADCAST=m |
263 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m | 291 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m |
292 | CONFIG_NET_TEAM_MODE_RANDOM=m | ||
264 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m | 293 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m |
265 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m | 294 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m |
266 | CONFIG_VXLAN=m | 295 | CONFIG_VXLAN=m |
@@ -268,6 +297,7 @@ CONFIG_NETCONSOLE=m | |||
268 | CONFIG_NETCONSOLE_DYNAMIC=y | 297 | CONFIG_NETCONSOLE_DYNAMIC=y |
269 | CONFIG_VETH=m | 298 | CONFIG_VETH=m |
270 | CONFIG_MACMACE=y | 299 | CONFIG_MACMACE=y |
300 | # CONFIG_NET_VENDOR_ARC is not set | ||
271 | # CONFIG_NET_CADENCE is not set | 301 | # CONFIG_NET_CADENCE is not set |
272 | # CONFIG_NET_VENDOR_BROADCOM is not set | 302 | # CONFIG_NET_VENDOR_BROADCOM is not set |
273 | CONFIG_MAC89x0=y | 303 | CONFIG_MAC89x0=y |
@@ -279,6 +309,7 @@ CONFIG_MAC8390=y | |||
279 | # CONFIG_NET_VENDOR_SEEQ is not set | 309 | # CONFIG_NET_VENDOR_SEEQ is not set |
280 | # CONFIG_NET_VENDOR_SMSC is not set | 310 | # CONFIG_NET_VENDOR_SMSC is not set |
281 | # CONFIG_NET_VENDOR_STMICRO is not set | 311 | # CONFIG_NET_VENDOR_STMICRO is not set |
312 | # CONFIG_NET_VENDOR_VIA is not set | ||
282 | # CONFIG_NET_VENDOR_WIZNET is not set | 313 | # CONFIG_NET_VENDOR_WIZNET is not set |
283 | CONFIG_PPP=m | 314 | CONFIG_PPP=m |
284 | CONFIG_PPP_BSDCOMP=m | 315 | CONFIG_PPP_BSDCOMP=m |
@@ -302,7 +333,6 @@ CONFIG_MOUSE_SERIAL=m | |||
302 | CONFIG_INPUT_MISC=y | 333 | CONFIG_INPUT_MISC=y |
303 | CONFIG_INPUT_M68K_BEEP=m | 334 | CONFIG_INPUT_M68K_BEEP=m |
304 | CONFIG_SERIO=m | 335 | CONFIG_SERIO=m |
305 | CONFIG_VT_HW_CONSOLE_BINDING=y | ||
306 | # CONFIG_LEGACY_PTYS is not set | 336 | # CONFIG_LEGACY_PTYS is not set |
307 | # CONFIG_DEVKMEM is not set | 337 | # CONFIG_DEVKMEM is not set |
308 | CONFIG_SERIAL_PMACZILOG=y | 338 | CONFIG_SERIAL_PMACZILOG=y |
@@ -327,10 +357,6 @@ CONFIG_RTC_CLASS=y | |||
327 | CONFIG_RTC_DRV_GENERIC=m | 357 | CONFIG_RTC_DRV_GENERIC=m |
328 | # CONFIG_IOMMU_SUPPORT is not set | 358 | # CONFIG_IOMMU_SUPPORT is not set |
329 | CONFIG_PROC_HARDWARE=y | 359 | CONFIG_PROC_HARDWARE=y |
330 | CONFIG_EXT2_FS=y | ||
331 | CONFIG_EXT3_FS=y | ||
332 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | ||
333 | # CONFIG_EXT3_FS_XATTR is not set | ||
334 | CONFIG_EXT4_FS=y | 360 | CONFIG_EXT4_FS=y |
335 | CONFIG_REISERFS_FS=m | 361 | CONFIG_REISERFS_FS=m |
336 | CONFIG_JFS_FS=m | 362 | CONFIG_JFS_FS=m |
@@ -367,7 +393,7 @@ CONFIG_QNX6FS_FS=m | |||
367 | CONFIG_SYSV_FS=m | 393 | CONFIG_SYSV_FS=m |
368 | CONFIG_UFS_FS=m | 394 | CONFIG_UFS_FS=m |
369 | CONFIG_NFS_FS=y | 395 | CONFIG_NFS_FS=y |
370 | CONFIG_NFS_V4=y | 396 | CONFIG_NFS_V4=m |
371 | CONFIG_NFS_SWAP=y | 397 | CONFIG_NFS_SWAP=y |
372 | CONFIG_ROOT_NFS=y | 398 | CONFIG_ROOT_NFS=y |
373 | CONFIG_NFSD=m | 399 | CONFIG_NFSD=m |
@@ -426,10 +452,11 @@ CONFIG_NLS_MAC_TURKISH=m | |||
426 | CONFIG_DLM=m | 452 | CONFIG_DLM=m |
427 | CONFIG_MAGIC_SYSRQ=y | 453 | CONFIG_MAGIC_SYSRQ=y |
428 | CONFIG_ASYNC_RAID6_TEST=m | 454 | CONFIG_ASYNC_RAID6_TEST=m |
455 | CONFIG_TEST_STRING_HELPERS=m | ||
456 | CONFIG_EARLY_PRINTK=y | ||
429 | CONFIG_ENCRYPTED_KEYS=m | 457 | CONFIG_ENCRYPTED_KEYS=m |
430 | CONFIG_CRYPTO_MANAGER=y | 458 | CONFIG_CRYPTO_MANAGER=y |
431 | CONFIG_CRYPTO_USER=m | 459 | CONFIG_CRYPTO_USER=m |
432 | CONFIG_CRYPTO_NULL=m | ||
433 | CONFIG_CRYPTO_CRYPTD=m | 460 | CONFIG_CRYPTO_CRYPTD=m |
434 | CONFIG_CRYPTO_TEST=m | 461 | CONFIG_CRYPTO_TEST=m |
435 | CONFIG_CRYPTO_CCM=m | 462 | CONFIG_CRYPTO_CCM=m |
@@ -462,6 +489,8 @@ CONFIG_CRYPTO_TEA=m | |||
462 | CONFIG_CRYPTO_TWOFISH=m | 489 | CONFIG_CRYPTO_TWOFISH=m |
463 | CONFIG_CRYPTO_ZLIB=m | 490 | CONFIG_CRYPTO_ZLIB=m |
464 | CONFIG_CRYPTO_LZO=m | 491 | CONFIG_CRYPTO_LZO=m |
492 | CONFIG_CRYPTO_LZ4=m | ||
493 | CONFIG_CRYPTO_LZ4HC=m | ||
465 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 494 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
466 | CONFIG_CRYPTO_USER_API_HASH=m | 495 | CONFIG_CRYPTO_USER_API_HASH=m |
467 | CONFIG_CRYPTO_USER_API_SKCIPHER=m | 496 | CONFIG_CRYPTO_USER_API_SKCIPHER=m |
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index b17a8837f0e1..4e5adff326ee 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig | |||
@@ -58,7 +58,6 @@ CONFIG_IP_PNP_RARP=y | |||
58 | CONFIG_NET_IPIP=m | 58 | CONFIG_NET_IPIP=m |
59 | CONFIG_NET_IPGRE_DEMUX=m | 59 | CONFIG_NET_IPGRE_DEMUX=m |
60 | CONFIG_NET_IPGRE=m | 60 | CONFIG_NET_IPGRE=m |
61 | CONFIG_SYN_COOKIES=y | ||
62 | CONFIG_NET_IPVTI=m | 61 | CONFIG_NET_IPVTI=m |
63 | CONFIG_INET_AH=m | 62 | CONFIG_INET_AH=m |
64 | CONFIG_INET_ESP=m | 63 | CONFIG_INET_ESP=m |
@@ -69,11 +68,11 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
69 | # CONFIG_INET_LRO is not set | 68 | # CONFIG_INET_LRO is not set |
70 | CONFIG_INET_DIAG=m | 69 | CONFIG_INET_DIAG=m |
71 | CONFIG_INET_UDP_DIAG=m | 70 | CONFIG_INET_UDP_DIAG=m |
72 | CONFIG_IPV6_PRIVACY=y | ||
73 | CONFIG_IPV6_ROUTER_PREF=y | 71 | CONFIG_IPV6_ROUTER_PREF=y |
74 | CONFIG_INET6_AH=m | 72 | CONFIG_INET6_AH=m |
75 | CONFIG_INET6_ESP=m | 73 | CONFIG_INET6_ESP=m |
76 | CONFIG_INET6_IPCOMP=m | 74 | CONFIG_INET6_IPCOMP=m |
75 | CONFIG_IPV6_VTI=m | ||
77 | CONFIG_IPV6_GRE=m | 76 | CONFIG_IPV6_GRE=m |
78 | CONFIG_NETFILTER=y | 77 | CONFIG_NETFILTER=y |
79 | CONFIG_NF_CONNTRACK=m | 78 | CONFIG_NF_CONNTRACK=m |
@@ -91,6 +90,17 @@ CONFIG_NF_CONNTRACK_PPTP=m | |||
91 | CONFIG_NF_CONNTRACK_SANE=m | 90 | CONFIG_NF_CONNTRACK_SANE=m |
92 | CONFIG_NF_CONNTRACK_SIP=m | 91 | CONFIG_NF_CONNTRACK_SIP=m |
93 | CONFIG_NF_CONNTRACK_TFTP=m | 92 | CONFIG_NF_CONNTRACK_TFTP=m |
93 | CONFIG_NF_TABLES=m | ||
94 | CONFIG_NFT_EXTHDR=m | ||
95 | CONFIG_NFT_META=m | ||
96 | CONFIG_NFT_CT=m | ||
97 | CONFIG_NFT_RBTREE=m | ||
98 | CONFIG_NFT_HASH=m | ||
99 | CONFIG_NFT_COUNTER=m | ||
100 | CONFIG_NFT_LOG=m | ||
101 | CONFIG_NFT_LIMIT=m | ||
102 | CONFIG_NFT_NAT=m | ||
103 | CONFIG_NFT_COMPAT=m | ||
94 | CONFIG_NETFILTER_XT_SET=m | 104 | CONFIG_NETFILTER_XT_SET=m |
95 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m | 105 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m |
96 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m | 106 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m |
@@ -104,6 +114,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m | |||
104 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m | 114 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m |
105 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m | 115 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m |
106 | CONFIG_NETFILTER_XT_TARGET_TEE=m | 116 | CONFIG_NETFILTER_XT_TARGET_TEE=m |
117 | CONFIG_NETFILTER_XT_TARGET_TPROXY=m | ||
107 | CONFIG_NETFILTER_XT_TARGET_TRACE=m | 118 | CONFIG_NETFILTER_XT_TARGET_TRACE=m |
108 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m | 119 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m |
109 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m | 120 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m |
@@ -136,6 +147,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m | |||
136 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m | 147 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m |
137 | CONFIG_NETFILTER_XT_MATCH_REALM=m | 148 | CONFIG_NETFILTER_XT_MATCH_REALM=m |
138 | CONFIG_NETFILTER_XT_MATCH_RECENT=m | 149 | CONFIG_NETFILTER_XT_MATCH_RECENT=m |
150 | CONFIG_NETFILTER_XT_MATCH_SOCKET=m | ||
139 | CONFIG_NETFILTER_XT_MATCH_STATE=m | 151 | CONFIG_NETFILTER_XT_MATCH_STATE=m |
140 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m | 152 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m |
141 | CONFIG_NETFILTER_XT_MATCH_STRING=m | 153 | CONFIG_NETFILTER_XT_MATCH_STRING=m |
@@ -150,11 +162,18 @@ CONFIG_IP_SET_HASH_IP=m | |||
150 | CONFIG_IP_SET_HASH_IPPORT=m | 162 | CONFIG_IP_SET_HASH_IPPORT=m |
151 | CONFIG_IP_SET_HASH_IPPORTIP=m | 163 | CONFIG_IP_SET_HASH_IPPORTIP=m |
152 | CONFIG_IP_SET_HASH_IPPORTNET=m | 164 | CONFIG_IP_SET_HASH_IPPORTNET=m |
165 | CONFIG_IP_SET_HASH_NETPORTNET=m | ||
153 | CONFIG_IP_SET_HASH_NET=m | 166 | CONFIG_IP_SET_HASH_NET=m |
167 | CONFIG_IP_SET_HASH_NETNET=m | ||
154 | CONFIG_IP_SET_HASH_NETPORT=m | 168 | CONFIG_IP_SET_HASH_NETPORT=m |
155 | CONFIG_IP_SET_HASH_NETIFACE=m | 169 | CONFIG_IP_SET_HASH_NETIFACE=m |
156 | CONFIG_IP_SET_LIST_SET=m | 170 | CONFIG_IP_SET_LIST_SET=m |
157 | CONFIG_NF_CONNTRACK_IPV4=m | 171 | CONFIG_NF_CONNTRACK_IPV4=m |
172 | CONFIG_NF_TABLES_IPV4=m | ||
173 | CONFIG_NFT_REJECT_IPV4=m | ||
174 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | ||
175 | CONFIG_NFT_CHAIN_NAT_IPV4=m | ||
176 | CONFIG_NF_TABLES_ARP=m | ||
158 | CONFIG_IP_NF_IPTABLES=m | 177 | CONFIG_IP_NF_IPTABLES=m |
159 | CONFIG_IP_NF_MATCH_AH=m | 178 | CONFIG_IP_NF_MATCH_AH=m |
160 | CONFIG_IP_NF_MATCH_ECN=m | 179 | CONFIG_IP_NF_MATCH_ECN=m |
@@ -162,6 +181,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m | |||
162 | CONFIG_IP_NF_MATCH_TTL=m | 181 | CONFIG_IP_NF_MATCH_TTL=m |
163 | CONFIG_IP_NF_FILTER=m | 182 | CONFIG_IP_NF_FILTER=m |
164 | CONFIG_IP_NF_TARGET_REJECT=m | 183 | CONFIG_IP_NF_TARGET_REJECT=m |
184 | CONFIG_IP_NF_TARGET_SYNPROXY=m | ||
165 | CONFIG_IP_NF_TARGET_ULOG=m | 185 | CONFIG_IP_NF_TARGET_ULOG=m |
166 | CONFIG_NF_NAT_IPV4=m | 186 | CONFIG_NF_NAT_IPV4=m |
167 | CONFIG_IP_NF_TARGET_MASQUERADE=m | 187 | CONFIG_IP_NF_TARGET_MASQUERADE=m |
@@ -176,6 +196,9 @@ CONFIG_IP_NF_ARPTABLES=m | |||
176 | CONFIG_IP_NF_ARPFILTER=m | 196 | CONFIG_IP_NF_ARPFILTER=m |
177 | CONFIG_IP_NF_ARP_MANGLE=m | 197 | CONFIG_IP_NF_ARP_MANGLE=m |
178 | CONFIG_NF_CONNTRACK_IPV6=m | 198 | CONFIG_NF_CONNTRACK_IPV6=m |
199 | CONFIG_NF_TABLES_IPV6=m | ||
200 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | ||
201 | CONFIG_NFT_CHAIN_NAT_IPV6=m | ||
179 | CONFIG_IP6_NF_IPTABLES=m | 202 | CONFIG_IP6_NF_IPTABLES=m |
180 | CONFIG_IP6_NF_MATCH_AH=m | 203 | CONFIG_IP6_NF_MATCH_AH=m |
181 | CONFIG_IP6_NF_MATCH_EUI64=m | 204 | CONFIG_IP6_NF_MATCH_EUI64=m |
@@ -189,11 +212,13 @@ CONFIG_IP6_NF_MATCH_RT=m | |||
189 | CONFIG_IP6_NF_TARGET_HL=m | 212 | CONFIG_IP6_NF_TARGET_HL=m |
190 | CONFIG_IP6_NF_FILTER=m | 213 | CONFIG_IP6_NF_FILTER=m |
191 | CONFIG_IP6_NF_TARGET_REJECT=m | 214 | CONFIG_IP6_NF_TARGET_REJECT=m |
215 | CONFIG_IP6_NF_TARGET_SYNPROXY=m | ||
192 | CONFIG_IP6_NF_MANGLE=m | 216 | CONFIG_IP6_NF_MANGLE=m |
193 | CONFIG_IP6_NF_RAW=m | 217 | CONFIG_IP6_NF_RAW=m |
194 | CONFIG_NF_NAT_IPV6=m | 218 | CONFIG_NF_NAT_IPV6=m |
195 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | 219 | CONFIG_IP6_NF_TARGET_MASQUERADE=m |
196 | CONFIG_IP6_NF_TARGET_NPT=m | 220 | CONFIG_IP6_NF_TARGET_NPT=m |
221 | CONFIG_NF_TABLES_BRIDGE=m | ||
197 | CONFIG_IP_DCCP=m | 222 | CONFIG_IP_DCCP=m |
198 | # CONFIG_IP_DCCP_CCID3 is not set | 223 | # CONFIG_IP_DCCP_CCID3 is not set |
199 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y | 224 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y |
@@ -204,11 +229,13 @@ CONFIG_ATALK=m | |||
204 | CONFIG_DEV_APPLETALK=m | 229 | CONFIG_DEV_APPLETALK=m |
205 | CONFIG_IPDDP=m | 230 | CONFIG_IPDDP=m |
206 | CONFIG_IPDDP_ENCAP=y | 231 | CONFIG_IPDDP_ENCAP=y |
207 | CONFIG_IPDDP_DECAP=y | 232 | CONFIG_DNS_RESOLVER=y |
208 | CONFIG_BATMAN_ADV=m | 233 | CONFIG_BATMAN_ADV=m |
209 | CONFIG_BATMAN_ADV_DAT=y | 234 | CONFIG_BATMAN_ADV_DAT=y |
235 | CONFIG_BATMAN_ADV_NC=y | ||
236 | CONFIG_NETLINK_DIAG=m | ||
237 | CONFIG_NET_MPLS_GSO=m | ||
210 | # CONFIG_WIRELESS is not set | 238 | # CONFIG_WIRELESS is not set |
211 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
212 | CONFIG_DEVTMPFS=y | 239 | CONFIG_DEVTMPFS=y |
213 | # CONFIG_FIRMWARE_IN_KERNEL is not set | 240 | # CONFIG_FIRMWARE_IN_KERNEL is not set |
214 | # CONFIG_FW_LOADER_USER_HELPER is not set | 241 | # CONFIG_FW_LOADER_USER_HELPER is not set |
@@ -230,6 +257,7 @@ CONFIG_BLK_DEV_NBD=m | |||
230 | CONFIG_BLK_DEV_RAM=y | 257 | CONFIG_BLK_DEV_RAM=y |
231 | CONFIG_CDROM_PKTCDVD=m | 258 | CONFIG_CDROM_PKTCDVD=m |
232 | CONFIG_ATA_OVER_ETH=m | 259 | CONFIG_ATA_OVER_ETH=m |
260 | CONFIG_DUMMY_IRQ=m | ||
233 | CONFIG_IDE=y | 261 | CONFIG_IDE=y |
234 | CONFIG_IDE_GD_ATAPI=y | 262 | CONFIG_IDE_GD_ATAPI=y |
235 | CONFIG_BLK_DEV_IDECD=y | 263 | CONFIG_BLK_DEV_IDECD=y |
@@ -290,10 +318,10 @@ CONFIG_MAC_EMUMOUSEBTN=y | |||
290 | CONFIG_NETDEVICES=y | 318 | CONFIG_NETDEVICES=y |
291 | CONFIG_DUMMY=m | 319 | CONFIG_DUMMY=m |
292 | CONFIG_EQUALIZER=m | 320 | CONFIG_EQUALIZER=m |
293 | CONFIG_MII=y | ||
294 | CONFIG_NET_TEAM=m | 321 | CONFIG_NET_TEAM=m |
295 | CONFIG_NET_TEAM_MODE_BROADCAST=m | 322 | CONFIG_NET_TEAM_MODE_BROADCAST=m |
296 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m | 323 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m |
324 | CONFIG_NET_TEAM_MODE_RANDOM=m | ||
297 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m | 325 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m |
298 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m | 326 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m |
299 | CONFIG_VXLAN=m | 327 | CONFIG_VXLAN=m |
@@ -308,10 +336,10 @@ CONFIG_HPLANCE=y | |||
308 | CONFIG_MVME147_NET=y | 336 | CONFIG_MVME147_NET=y |
309 | CONFIG_SUN3LANCE=y | 337 | CONFIG_SUN3LANCE=y |
310 | CONFIG_MACMACE=y | 338 | CONFIG_MACMACE=y |
339 | # CONFIG_NET_VENDOR_ARC is not set | ||
311 | # CONFIG_NET_CADENCE is not set | 340 | # CONFIG_NET_CADENCE is not set |
312 | # CONFIG_NET_VENDOR_BROADCOM is not set | 341 | # CONFIG_NET_VENDOR_BROADCOM is not set |
313 | CONFIG_MAC89x0=y | 342 | CONFIG_MAC89x0=y |
314 | # CONFIG_NET_VENDOR_FUJITSU is not set | ||
315 | # CONFIG_NET_VENDOR_HP is not set | 343 | # CONFIG_NET_VENDOR_HP is not set |
316 | CONFIG_BVME6000_NET=y | 344 | CONFIG_BVME6000_NET=y |
317 | CONFIG_MVME16x_NET=y | 345 | CONFIG_MVME16x_NET=y |
@@ -325,6 +353,7 @@ CONFIG_APNE=y | |||
325 | CONFIG_ZORRO8390=y | 353 | CONFIG_ZORRO8390=y |
326 | # CONFIG_NET_VENDOR_SEEQ is not set | 354 | # CONFIG_NET_VENDOR_SEEQ is not set |
327 | # CONFIG_NET_VENDOR_STMICRO is not set | 355 | # CONFIG_NET_VENDOR_STMICRO is not set |
356 | # CONFIG_NET_VENDOR_VIA is not set | ||
328 | # CONFIG_NET_VENDOR_WIZNET is not set | 357 | # CONFIG_NET_VENDOR_WIZNET is not set |
329 | CONFIG_PLIP=m | 358 | CONFIG_PLIP=m |
330 | CONFIG_PPP=m | 359 | CONFIG_PPP=m |
@@ -357,7 +386,6 @@ CONFIG_INPUT_MISC=y | |||
357 | CONFIG_INPUT_M68K_BEEP=m | 386 | CONFIG_INPUT_M68K_BEEP=m |
358 | CONFIG_HP_SDC_RTC=m | 387 | CONFIG_HP_SDC_RTC=m |
359 | CONFIG_SERIO_Q40KBD=y | 388 | CONFIG_SERIO_Q40KBD=y |
360 | CONFIG_VT_HW_CONSOLE_BINDING=y | ||
361 | # CONFIG_LEGACY_PTYS is not set | 389 | # CONFIG_LEGACY_PTYS is not set |
362 | # CONFIG_DEVKMEM is not set | 390 | # CONFIG_DEVKMEM is not set |
363 | CONFIG_SERIAL_PMACZILOG=y | 391 | CONFIG_SERIAL_PMACZILOG=y |
@@ -405,10 +433,6 @@ CONFIG_NFETH=y | |||
405 | CONFIG_ATARI_DSP56K=m | 433 | CONFIG_ATARI_DSP56K=m |
406 | CONFIG_AMIGA_BUILTIN_SERIAL=y | 434 | CONFIG_AMIGA_BUILTIN_SERIAL=y |
407 | CONFIG_SERIAL_CONSOLE=y | 435 | CONFIG_SERIAL_CONSOLE=y |
408 | CONFIG_EXT2_FS=y | ||
409 | CONFIG_EXT3_FS=y | ||
410 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | ||
411 | # CONFIG_EXT3_FS_XATTR is not set | ||
412 | CONFIG_EXT4_FS=y | 436 | CONFIG_EXT4_FS=y |
413 | CONFIG_REISERFS_FS=m | 437 | CONFIG_REISERFS_FS=m |
414 | CONFIG_JFS_FS=m | 438 | CONFIG_JFS_FS=m |
@@ -445,7 +469,7 @@ CONFIG_QNX6FS_FS=m | |||
445 | CONFIG_SYSV_FS=m | 469 | CONFIG_SYSV_FS=m |
446 | CONFIG_UFS_FS=m | 470 | CONFIG_UFS_FS=m |
447 | CONFIG_NFS_FS=y | 471 | CONFIG_NFS_FS=y |
448 | CONFIG_NFS_V4=y | 472 | CONFIG_NFS_V4=m |
449 | CONFIG_NFS_SWAP=y | 473 | CONFIG_NFS_SWAP=y |
450 | CONFIG_ROOT_NFS=y | 474 | CONFIG_ROOT_NFS=y |
451 | CONFIG_NFSD=m | 475 | CONFIG_NFSD=m |
@@ -504,10 +528,11 @@ CONFIG_NLS_MAC_TURKISH=m | |||
504 | CONFIG_DLM=m | 528 | CONFIG_DLM=m |
505 | CONFIG_MAGIC_SYSRQ=y | 529 | CONFIG_MAGIC_SYSRQ=y |
506 | CONFIG_ASYNC_RAID6_TEST=m | 530 | CONFIG_ASYNC_RAID6_TEST=m |
531 | CONFIG_TEST_STRING_HELPERS=m | ||
532 | CONFIG_EARLY_PRINTK=y | ||
507 | CONFIG_ENCRYPTED_KEYS=m | 533 | CONFIG_ENCRYPTED_KEYS=m |
508 | CONFIG_CRYPTO_MANAGER=y | 534 | CONFIG_CRYPTO_MANAGER=y |
509 | CONFIG_CRYPTO_USER=m | 535 | CONFIG_CRYPTO_USER=m |
510 | CONFIG_CRYPTO_NULL=m | ||
511 | CONFIG_CRYPTO_CRYPTD=m | 536 | CONFIG_CRYPTO_CRYPTD=m |
512 | CONFIG_CRYPTO_TEST=m | 537 | CONFIG_CRYPTO_TEST=m |
513 | CONFIG_CRYPTO_CCM=m | 538 | CONFIG_CRYPTO_CCM=m |
@@ -540,6 +565,8 @@ CONFIG_CRYPTO_TEA=m | |||
540 | CONFIG_CRYPTO_TWOFISH=m | 565 | CONFIG_CRYPTO_TWOFISH=m |
541 | CONFIG_CRYPTO_ZLIB=m | 566 | CONFIG_CRYPTO_ZLIB=m |
542 | CONFIG_CRYPTO_LZO=m | 567 | CONFIG_CRYPTO_LZO=m |
568 | CONFIG_CRYPTO_LZ4=m | ||
569 | CONFIG_CRYPTO_LZ4HC=m | ||
543 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 570 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
544 | CONFIG_CRYPTO_USER_API_HASH=m | 571 | CONFIG_CRYPTO_USER_API_HASH=m |
545 | CONFIG_CRYPTO_USER_API_SKCIPHER=m | 572 | CONFIG_CRYPTO_USER_API_SKCIPHER=m |
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index 5586c6529fce..02cdbac5565e 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig | |||
@@ -47,7 +47,6 @@ CONFIG_IP_PNP_RARP=y | |||
47 | CONFIG_NET_IPIP=m | 47 | CONFIG_NET_IPIP=m |
48 | CONFIG_NET_IPGRE_DEMUX=m | 48 | CONFIG_NET_IPGRE_DEMUX=m |
49 | CONFIG_NET_IPGRE=m | 49 | CONFIG_NET_IPGRE=m |
50 | CONFIG_SYN_COOKIES=y | ||
51 | CONFIG_NET_IPVTI=m | 50 | CONFIG_NET_IPVTI=m |
52 | CONFIG_INET_AH=m | 51 | CONFIG_INET_AH=m |
53 | CONFIG_INET_ESP=m | 52 | CONFIG_INET_ESP=m |
@@ -58,11 +57,11 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
58 | # CONFIG_INET_LRO is not set | 57 | # CONFIG_INET_LRO is not set |
59 | CONFIG_INET_DIAG=m | 58 | CONFIG_INET_DIAG=m |
60 | CONFIG_INET_UDP_DIAG=m | 59 | CONFIG_INET_UDP_DIAG=m |
61 | CONFIG_IPV6_PRIVACY=y | ||
62 | CONFIG_IPV6_ROUTER_PREF=y | 60 | CONFIG_IPV6_ROUTER_PREF=y |
63 | CONFIG_INET6_AH=m | 61 | CONFIG_INET6_AH=m |
64 | CONFIG_INET6_ESP=m | 62 | CONFIG_INET6_ESP=m |
65 | CONFIG_INET6_IPCOMP=m | 63 | CONFIG_INET6_IPCOMP=m |
64 | CONFIG_IPV6_VTI=m | ||
66 | CONFIG_IPV6_GRE=m | 65 | CONFIG_IPV6_GRE=m |
67 | CONFIG_NETFILTER=y | 66 | CONFIG_NETFILTER=y |
68 | CONFIG_NF_CONNTRACK=m | 67 | CONFIG_NF_CONNTRACK=m |
@@ -80,6 +79,17 @@ CONFIG_NF_CONNTRACK_PPTP=m | |||
80 | CONFIG_NF_CONNTRACK_SANE=m | 79 | CONFIG_NF_CONNTRACK_SANE=m |
81 | CONFIG_NF_CONNTRACK_SIP=m | 80 | CONFIG_NF_CONNTRACK_SIP=m |
82 | CONFIG_NF_CONNTRACK_TFTP=m | 81 | CONFIG_NF_CONNTRACK_TFTP=m |
82 | CONFIG_NF_TABLES=m | ||
83 | CONFIG_NFT_EXTHDR=m | ||
84 | CONFIG_NFT_META=m | ||
85 | CONFIG_NFT_CT=m | ||
86 | CONFIG_NFT_RBTREE=m | ||
87 | CONFIG_NFT_HASH=m | ||
88 | CONFIG_NFT_COUNTER=m | ||
89 | CONFIG_NFT_LOG=m | ||
90 | CONFIG_NFT_LIMIT=m | ||
91 | CONFIG_NFT_NAT=m | ||
92 | CONFIG_NFT_COMPAT=m | ||
83 | CONFIG_NETFILTER_XT_SET=m | 93 | CONFIG_NETFILTER_XT_SET=m |
84 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m | 94 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m |
85 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m | 95 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m |
@@ -93,6 +103,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m | |||
93 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m | 103 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m |
94 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m | 104 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m |
95 | CONFIG_NETFILTER_XT_TARGET_TEE=m | 105 | CONFIG_NETFILTER_XT_TARGET_TEE=m |
106 | CONFIG_NETFILTER_XT_TARGET_TPROXY=m | ||
96 | CONFIG_NETFILTER_XT_TARGET_TRACE=m | 107 | CONFIG_NETFILTER_XT_TARGET_TRACE=m |
97 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m | 108 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m |
98 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m | 109 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m |
@@ -125,6 +136,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m | |||
125 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m | 136 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m |
126 | CONFIG_NETFILTER_XT_MATCH_REALM=m | 137 | CONFIG_NETFILTER_XT_MATCH_REALM=m |
127 | CONFIG_NETFILTER_XT_MATCH_RECENT=m | 138 | CONFIG_NETFILTER_XT_MATCH_RECENT=m |
139 | CONFIG_NETFILTER_XT_MATCH_SOCKET=m | ||
128 | CONFIG_NETFILTER_XT_MATCH_STATE=m | 140 | CONFIG_NETFILTER_XT_MATCH_STATE=m |
129 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m | 141 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m |
130 | CONFIG_NETFILTER_XT_MATCH_STRING=m | 142 | CONFIG_NETFILTER_XT_MATCH_STRING=m |
@@ -139,11 +151,18 @@ CONFIG_IP_SET_HASH_IP=m | |||
139 | CONFIG_IP_SET_HASH_IPPORT=m | 151 | CONFIG_IP_SET_HASH_IPPORT=m |
140 | CONFIG_IP_SET_HASH_IPPORTIP=m | 152 | CONFIG_IP_SET_HASH_IPPORTIP=m |
141 | CONFIG_IP_SET_HASH_IPPORTNET=m | 153 | CONFIG_IP_SET_HASH_IPPORTNET=m |
154 | CONFIG_IP_SET_HASH_NETPORTNET=m | ||
142 | CONFIG_IP_SET_HASH_NET=m | 155 | CONFIG_IP_SET_HASH_NET=m |
156 | CONFIG_IP_SET_HASH_NETNET=m | ||
143 | CONFIG_IP_SET_HASH_NETPORT=m | 157 | CONFIG_IP_SET_HASH_NETPORT=m |
144 | CONFIG_IP_SET_HASH_NETIFACE=m | 158 | CONFIG_IP_SET_HASH_NETIFACE=m |
145 | CONFIG_IP_SET_LIST_SET=m | 159 | CONFIG_IP_SET_LIST_SET=m |
146 | CONFIG_NF_CONNTRACK_IPV4=m | 160 | CONFIG_NF_CONNTRACK_IPV4=m |
161 | CONFIG_NF_TABLES_IPV4=m | ||
162 | CONFIG_NFT_REJECT_IPV4=m | ||
163 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | ||
164 | CONFIG_NFT_CHAIN_NAT_IPV4=m | ||
165 | CONFIG_NF_TABLES_ARP=m | ||
147 | CONFIG_IP_NF_IPTABLES=m | 166 | CONFIG_IP_NF_IPTABLES=m |
148 | CONFIG_IP_NF_MATCH_AH=m | 167 | CONFIG_IP_NF_MATCH_AH=m |
149 | CONFIG_IP_NF_MATCH_ECN=m | 168 | CONFIG_IP_NF_MATCH_ECN=m |
@@ -151,6 +170,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m | |||
151 | CONFIG_IP_NF_MATCH_TTL=m | 170 | CONFIG_IP_NF_MATCH_TTL=m |
152 | CONFIG_IP_NF_FILTER=m | 171 | CONFIG_IP_NF_FILTER=m |
153 | CONFIG_IP_NF_TARGET_REJECT=m | 172 | CONFIG_IP_NF_TARGET_REJECT=m |
173 | CONFIG_IP_NF_TARGET_SYNPROXY=m | ||
154 | CONFIG_IP_NF_TARGET_ULOG=m | 174 | CONFIG_IP_NF_TARGET_ULOG=m |
155 | CONFIG_NF_NAT_IPV4=m | 175 | CONFIG_NF_NAT_IPV4=m |
156 | CONFIG_IP_NF_TARGET_MASQUERADE=m | 176 | CONFIG_IP_NF_TARGET_MASQUERADE=m |
@@ -165,6 +185,9 @@ CONFIG_IP_NF_ARPTABLES=m | |||
165 | CONFIG_IP_NF_ARPFILTER=m | 185 | CONFIG_IP_NF_ARPFILTER=m |
166 | CONFIG_IP_NF_ARP_MANGLE=m | 186 | CONFIG_IP_NF_ARP_MANGLE=m |
167 | CONFIG_NF_CONNTRACK_IPV6=m | 187 | CONFIG_NF_CONNTRACK_IPV6=m |
188 | CONFIG_NF_TABLES_IPV6=m | ||
189 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | ||
190 | CONFIG_NFT_CHAIN_NAT_IPV6=m | ||
168 | CONFIG_IP6_NF_IPTABLES=m | 191 | CONFIG_IP6_NF_IPTABLES=m |
169 | CONFIG_IP6_NF_MATCH_AH=m | 192 | CONFIG_IP6_NF_MATCH_AH=m |
170 | CONFIG_IP6_NF_MATCH_EUI64=m | 193 | CONFIG_IP6_NF_MATCH_EUI64=m |
@@ -178,11 +201,13 @@ CONFIG_IP6_NF_MATCH_RT=m | |||
178 | CONFIG_IP6_NF_TARGET_HL=m | 201 | CONFIG_IP6_NF_TARGET_HL=m |
179 | CONFIG_IP6_NF_FILTER=m | 202 | CONFIG_IP6_NF_FILTER=m |
180 | CONFIG_IP6_NF_TARGET_REJECT=m | 203 | CONFIG_IP6_NF_TARGET_REJECT=m |
204 | CONFIG_IP6_NF_TARGET_SYNPROXY=m | ||
181 | CONFIG_IP6_NF_MANGLE=m | 205 | CONFIG_IP6_NF_MANGLE=m |
182 | CONFIG_IP6_NF_RAW=m | 206 | CONFIG_IP6_NF_RAW=m |
183 | CONFIG_NF_NAT_IPV6=m | 207 | CONFIG_NF_NAT_IPV6=m |
184 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | 208 | CONFIG_IP6_NF_TARGET_MASQUERADE=m |
185 | CONFIG_IP6_NF_TARGET_NPT=m | 209 | CONFIG_IP6_NF_TARGET_NPT=m |
210 | CONFIG_NF_TABLES_BRIDGE=m | ||
186 | CONFIG_IP_DCCP=m | 211 | CONFIG_IP_DCCP=m |
187 | # CONFIG_IP_DCCP_CCID3 is not set | 212 | # CONFIG_IP_DCCP_CCID3 is not set |
188 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y | 213 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y |
@@ -190,10 +215,13 @@ CONFIG_RDS=m | |||
190 | CONFIG_RDS_TCP=m | 215 | CONFIG_RDS_TCP=m |
191 | CONFIG_L2TP=m | 216 | CONFIG_L2TP=m |
192 | CONFIG_ATALK=m | 217 | CONFIG_ATALK=m |
218 | CONFIG_DNS_RESOLVER=y | ||
193 | CONFIG_BATMAN_ADV=m | 219 | CONFIG_BATMAN_ADV=m |
194 | CONFIG_BATMAN_ADV_DAT=y | 220 | CONFIG_BATMAN_ADV_DAT=y |
221 | CONFIG_BATMAN_ADV_NC=y | ||
222 | CONFIG_NETLINK_DIAG=m | ||
223 | CONFIG_NET_MPLS_GSO=m | ||
195 | # CONFIG_WIRELESS is not set | 224 | # CONFIG_WIRELESS is not set |
196 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
197 | CONFIG_DEVTMPFS=y | 225 | CONFIG_DEVTMPFS=y |
198 | # CONFIG_FIRMWARE_IN_KERNEL is not set | 226 | # CONFIG_FIRMWARE_IN_KERNEL is not set |
199 | # CONFIG_FW_LOADER_USER_HELPER is not set | 227 | # CONFIG_FW_LOADER_USER_HELPER is not set |
@@ -205,6 +233,7 @@ CONFIG_BLK_DEV_NBD=m | |||
205 | CONFIG_BLK_DEV_RAM=y | 233 | CONFIG_BLK_DEV_RAM=y |
206 | CONFIG_CDROM_PKTCDVD=m | 234 | CONFIG_CDROM_PKTCDVD=m |
207 | CONFIG_ATA_OVER_ETH=m | 235 | CONFIG_ATA_OVER_ETH=m |
236 | CONFIG_DUMMY_IRQ=m | ||
208 | CONFIG_RAID_ATTRS=m | 237 | CONFIG_RAID_ATTRS=m |
209 | CONFIG_SCSI=y | 238 | CONFIG_SCSI=y |
210 | CONFIG_SCSI_TGT=m | 239 | CONFIG_SCSI_TGT=m |
@@ -242,6 +271,7 @@ CONFIG_EQUALIZER=m | |||
242 | CONFIG_NET_TEAM=m | 271 | CONFIG_NET_TEAM=m |
243 | CONFIG_NET_TEAM_MODE_BROADCAST=m | 272 | CONFIG_NET_TEAM_MODE_BROADCAST=m |
244 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m | 273 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m |
274 | CONFIG_NET_TEAM_MODE_RANDOM=m | ||
245 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m | 275 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m |
246 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m | 276 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m |
247 | CONFIG_VXLAN=m | 277 | CONFIG_VXLAN=m |
@@ -249,6 +279,7 @@ CONFIG_NETCONSOLE=m | |||
249 | CONFIG_NETCONSOLE_DYNAMIC=y | 279 | CONFIG_NETCONSOLE_DYNAMIC=y |
250 | CONFIG_VETH=m | 280 | CONFIG_VETH=m |
251 | CONFIG_MVME147_NET=y | 281 | CONFIG_MVME147_NET=y |
282 | # CONFIG_NET_VENDOR_ARC is not set | ||
252 | # CONFIG_NET_CADENCE is not set | 283 | # CONFIG_NET_CADENCE is not set |
253 | # CONFIG_NET_VENDOR_BROADCOM is not set | 284 | # CONFIG_NET_VENDOR_BROADCOM is not set |
254 | # CONFIG_NET_VENDOR_INTEL is not set | 285 | # CONFIG_NET_VENDOR_INTEL is not set |
@@ -257,6 +288,7 @@ CONFIG_MVME147_NET=y | |||
257 | # CONFIG_NET_VENDOR_NATSEMI is not set | 288 | # CONFIG_NET_VENDOR_NATSEMI is not set |
258 | # CONFIG_NET_VENDOR_SEEQ is not set | 289 | # CONFIG_NET_VENDOR_SEEQ is not set |
259 | # CONFIG_NET_VENDOR_STMICRO is not set | 290 | # CONFIG_NET_VENDOR_STMICRO is not set |
291 | # CONFIG_NET_VENDOR_VIA is not set | ||
260 | # CONFIG_NET_VENDOR_WIZNET is not set | 292 | # CONFIG_NET_VENDOR_WIZNET is not set |
261 | CONFIG_PPP=m | 293 | CONFIG_PPP=m |
262 | CONFIG_PPP_BSDCOMP=m | 294 | CONFIG_PPP_BSDCOMP=m |
@@ -294,10 +326,6 @@ CONFIG_RTC_CLASS=y | |||
294 | CONFIG_RTC_DRV_GENERIC=m | 326 | CONFIG_RTC_DRV_GENERIC=m |
295 | # CONFIG_IOMMU_SUPPORT is not set | 327 | # CONFIG_IOMMU_SUPPORT is not set |
296 | CONFIG_PROC_HARDWARE=y | 328 | CONFIG_PROC_HARDWARE=y |
297 | CONFIG_EXT2_FS=y | ||
298 | CONFIG_EXT3_FS=y | ||
299 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | ||
300 | # CONFIG_EXT3_FS_XATTR is not set | ||
301 | CONFIG_EXT4_FS=y | 329 | CONFIG_EXT4_FS=y |
302 | CONFIG_REISERFS_FS=m | 330 | CONFIG_REISERFS_FS=m |
303 | CONFIG_JFS_FS=m | 331 | CONFIG_JFS_FS=m |
@@ -334,7 +362,7 @@ CONFIG_QNX6FS_FS=m | |||
334 | CONFIG_SYSV_FS=m | 362 | CONFIG_SYSV_FS=m |
335 | CONFIG_UFS_FS=m | 363 | CONFIG_UFS_FS=m |
336 | CONFIG_NFS_FS=y | 364 | CONFIG_NFS_FS=y |
337 | CONFIG_NFS_V4=y | 365 | CONFIG_NFS_V4=m |
338 | CONFIG_NFS_SWAP=y | 366 | CONFIG_NFS_SWAP=y |
339 | CONFIG_ROOT_NFS=y | 367 | CONFIG_ROOT_NFS=y |
340 | CONFIG_NFSD=m | 368 | CONFIG_NFSD=m |
@@ -393,10 +421,10 @@ CONFIG_NLS_MAC_TURKISH=m | |||
393 | CONFIG_DLM=m | 421 | CONFIG_DLM=m |
394 | CONFIG_MAGIC_SYSRQ=y | 422 | CONFIG_MAGIC_SYSRQ=y |
395 | CONFIG_ASYNC_RAID6_TEST=m | 423 | CONFIG_ASYNC_RAID6_TEST=m |
424 | CONFIG_TEST_STRING_HELPERS=m | ||
396 | CONFIG_ENCRYPTED_KEYS=m | 425 | CONFIG_ENCRYPTED_KEYS=m |
397 | CONFIG_CRYPTO_MANAGER=y | 426 | CONFIG_CRYPTO_MANAGER=y |
398 | CONFIG_CRYPTO_USER=m | 427 | CONFIG_CRYPTO_USER=m |
399 | CONFIG_CRYPTO_NULL=m | ||
400 | CONFIG_CRYPTO_CRYPTD=m | 428 | CONFIG_CRYPTO_CRYPTD=m |
401 | CONFIG_CRYPTO_TEST=m | 429 | CONFIG_CRYPTO_TEST=m |
402 | CONFIG_CRYPTO_CCM=m | 430 | CONFIG_CRYPTO_CCM=m |
@@ -429,6 +457,8 @@ CONFIG_CRYPTO_TEA=m | |||
429 | CONFIG_CRYPTO_TWOFISH=m | 457 | CONFIG_CRYPTO_TWOFISH=m |
430 | CONFIG_CRYPTO_ZLIB=m | 458 | CONFIG_CRYPTO_ZLIB=m |
431 | CONFIG_CRYPTO_LZO=m | 459 | CONFIG_CRYPTO_LZO=m |
460 | CONFIG_CRYPTO_LZ4=m | ||
461 | CONFIG_CRYPTO_LZ4HC=m | ||
432 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 462 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
433 | CONFIG_CRYPTO_USER_API_HASH=m | 463 | CONFIG_CRYPTO_USER_API_HASH=m |
434 | CONFIG_CRYPTO_USER_API_SKCIPHER=m | 464 | CONFIG_CRYPTO_USER_API_SKCIPHER=m |
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index e5e8262bbacd..05a990a9dbd4 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig | |||
@@ -48,7 +48,6 @@ CONFIG_IP_PNP_RARP=y | |||
48 | CONFIG_NET_IPIP=m | 48 | CONFIG_NET_IPIP=m |
49 | CONFIG_NET_IPGRE_DEMUX=m | 49 | CONFIG_NET_IPGRE_DEMUX=m |
50 | CONFIG_NET_IPGRE=m | 50 | CONFIG_NET_IPGRE=m |
51 | CONFIG_SYN_COOKIES=y | ||
52 | CONFIG_NET_IPVTI=m | 51 | CONFIG_NET_IPVTI=m |
53 | CONFIG_INET_AH=m | 52 | CONFIG_INET_AH=m |
54 | CONFIG_INET_ESP=m | 53 | CONFIG_INET_ESP=m |
@@ -59,11 +58,11 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
59 | # CONFIG_INET_LRO is not set | 58 | # CONFIG_INET_LRO is not set |
60 | CONFIG_INET_DIAG=m | 59 | CONFIG_INET_DIAG=m |
61 | CONFIG_INET_UDP_DIAG=m | 60 | CONFIG_INET_UDP_DIAG=m |
62 | CONFIG_IPV6_PRIVACY=y | ||
63 | CONFIG_IPV6_ROUTER_PREF=y | 61 | CONFIG_IPV6_ROUTER_PREF=y |
64 | CONFIG_INET6_AH=m | 62 | CONFIG_INET6_AH=m |
65 | CONFIG_INET6_ESP=m | 63 | CONFIG_INET6_ESP=m |
66 | CONFIG_INET6_IPCOMP=m | 64 | CONFIG_INET6_IPCOMP=m |
65 | CONFIG_IPV6_VTI=m | ||
67 | CONFIG_IPV6_GRE=m | 66 | CONFIG_IPV6_GRE=m |
68 | CONFIG_NETFILTER=y | 67 | CONFIG_NETFILTER=y |
69 | CONFIG_NF_CONNTRACK=m | 68 | CONFIG_NF_CONNTRACK=m |
@@ -81,6 +80,17 @@ CONFIG_NF_CONNTRACK_PPTP=m | |||
81 | CONFIG_NF_CONNTRACK_SANE=m | 80 | CONFIG_NF_CONNTRACK_SANE=m |
82 | CONFIG_NF_CONNTRACK_SIP=m | 81 | CONFIG_NF_CONNTRACK_SIP=m |
83 | CONFIG_NF_CONNTRACK_TFTP=m | 82 | CONFIG_NF_CONNTRACK_TFTP=m |
83 | CONFIG_NF_TABLES=m | ||
84 | CONFIG_NFT_EXTHDR=m | ||
85 | CONFIG_NFT_META=m | ||
86 | CONFIG_NFT_CT=m | ||
87 | CONFIG_NFT_RBTREE=m | ||
88 | CONFIG_NFT_HASH=m | ||
89 | CONFIG_NFT_COUNTER=m | ||
90 | CONFIG_NFT_LOG=m | ||
91 | CONFIG_NFT_LIMIT=m | ||
92 | CONFIG_NFT_NAT=m | ||
93 | CONFIG_NFT_COMPAT=m | ||
84 | CONFIG_NETFILTER_XT_SET=m | 94 | CONFIG_NETFILTER_XT_SET=m |
85 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m | 95 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m |
86 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m | 96 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m |
@@ -94,6 +104,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m | |||
94 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m | 104 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m |
95 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m | 105 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m |
96 | CONFIG_NETFILTER_XT_TARGET_TEE=m | 106 | CONFIG_NETFILTER_XT_TARGET_TEE=m |
107 | CONFIG_NETFILTER_XT_TARGET_TPROXY=m | ||
97 | CONFIG_NETFILTER_XT_TARGET_TRACE=m | 108 | CONFIG_NETFILTER_XT_TARGET_TRACE=m |
98 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m | 109 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m |
99 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m | 110 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m |
@@ -126,6 +137,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m | |||
126 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m | 137 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m |
127 | CONFIG_NETFILTER_XT_MATCH_REALM=m | 138 | CONFIG_NETFILTER_XT_MATCH_REALM=m |
128 | CONFIG_NETFILTER_XT_MATCH_RECENT=m | 139 | CONFIG_NETFILTER_XT_MATCH_RECENT=m |
140 | CONFIG_NETFILTER_XT_MATCH_SOCKET=m | ||
129 | CONFIG_NETFILTER_XT_MATCH_STATE=m | 141 | CONFIG_NETFILTER_XT_MATCH_STATE=m |
130 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m | 142 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m |
131 | CONFIG_NETFILTER_XT_MATCH_STRING=m | 143 | CONFIG_NETFILTER_XT_MATCH_STRING=m |
@@ -140,11 +152,18 @@ CONFIG_IP_SET_HASH_IP=m | |||
140 | CONFIG_IP_SET_HASH_IPPORT=m | 152 | CONFIG_IP_SET_HASH_IPPORT=m |
141 | CONFIG_IP_SET_HASH_IPPORTIP=m | 153 | CONFIG_IP_SET_HASH_IPPORTIP=m |
142 | CONFIG_IP_SET_HASH_IPPORTNET=m | 154 | CONFIG_IP_SET_HASH_IPPORTNET=m |
155 | CONFIG_IP_SET_HASH_NETPORTNET=m | ||
143 | CONFIG_IP_SET_HASH_NET=m | 156 | CONFIG_IP_SET_HASH_NET=m |
157 | CONFIG_IP_SET_HASH_NETNET=m | ||
144 | CONFIG_IP_SET_HASH_NETPORT=m | 158 | CONFIG_IP_SET_HASH_NETPORT=m |
145 | CONFIG_IP_SET_HASH_NETIFACE=m | 159 | CONFIG_IP_SET_HASH_NETIFACE=m |
146 | CONFIG_IP_SET_LIST_SET=m | 160 | CONFIG_IP_SET_LIST_SET=m |
147 | CONFIG_NF_CONNTRACK_IPV4=m | 161 | CONFIG_NF_CONNTRACK_IPV4=m |
162 | CONFIG_NF_TABLES_IPV4=m | ||
163 | CONFIG_NFT_REJECT_IPV4=m | ||
164 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | ||
165 | CONFIG_NFT_CHAIN_NAT_IPV4=m | ||
166 | CONFIG_NF_TABLES_ARP=m | ||
148 | CONFIG_IP_NF_IPTABLES=m | 167 | CONFIG_IP_NF_IPTABLES=m |
149 | CONFIG_IP_NF_MATCH_AH=m | 168 | CONFIG_IP_NF_MATCH_AH=m |
150 | CONFIG_IP_NF_MATCH_ECN=m | 169 | CONFIG_IP_NF_MATCH_ECN=m |
@@ -152,6 +171,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m | |||
152 | CONFIG_IP_NF_MATCH_TTL=m | 171 | CONFIG_IP_NF_MATCH_TTL=m |
153 | CONFIG_IP_NF_FILTER=m | 172 | CONFIG_IP_NF_FILTER=m |
154 | CONFIG_IP_NF_TARGET_REJECT=m | 173 | CONFIG_IP_NF_TARGET_REJECT=m |
174 | CONFIG_IP_NF_TARGET_SYNPROXY=m | ||
155 | CONFIG_IP_NF_TARGET_ULOG=m | 175 | CONFIG_IP_NF_TARGET_ULOG=m |
156 | CONFIG_NF_NAT_IPV4=m | 176 | CONFIG_NF_NAT_IPV4=m |
157 | CONFIG_IP_NF_TARGET_MASQUERADE=m | 177 | CONFIG_IP_NF_TARGET_MASQUERADE=m |
@@ -166,6 +186,9 @@ CONFIG_IP_NF_ARPTABLES=m | |||
166 | CONFIG_IP_NF_ARPFILTER=m | 186 | CONFIG_IP_NF_ARPFILTER=m |
167 | CONFIG_IP_NF_ARP_MANGLE=m | 187 | CONFIG_IP_NF_ARP_MANGLE=m |
168 | CONFIG_NF_CONNTRACK_IPV6=m | 188 | CONFIG_NF_CONNTRACK_IPV6=m |
189 | CONFIG_NF_TABLES_IPV6=m | ||
190 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | ||
191 | CONFIG_NFT_CHAIN_NAT_IPV6=m | ||
169 | CONFIG_IP6_NF_IPTABLES=m | 192 | CONFIG_IP6_NF_IPTABLES=m |
170 | CONFIG_IP6_NF_MATCH_AH=m | 193 | CONFIG_IP6_NF_MATCH_AH=m |
171 | CONFIG_IP6_NF_MATCH_EUI64=m | 194 | CONFIG_IP6_NF_MATCH_EUI64=m |
@@ -179,11 +202,13 @@ CONFIG_IP6_NF_MATCH_RT=m | |||
179 | CONFIG_IP6_NF_TARGET_HL=m | 202 | CONFIG_IP6_NF_TARGET_HL=m |
180 | CONFIG_IP6_NF_FILTER=m | 203 | CONFIG_IP6_NF_FILTER=m |
181 | CONFIG_IP6_NF_TARGET_REJECT=m | 204 | CONFIG_IP6_NF_TARGET_REJECT=m |
205 | CONFIG_IP6_NF_TARGET_SYNPROXY=m | ||
182 | CONFIG_IP6_NF_MANGLE=m | 206 | CONFIG_IP6_NF_MANGLE=m |
183 | CONFIG_IP6_NF_RAW=m | 207 | CONFIG_IP6_NF_RAW=m |
184 | CONFIG_NF_NAT_IPV6=m | 208 | CONFIG_NF_NAT_IPV6=m |
185 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | 209 | CONFIG_IP6_NF_TARGET_MASQUERADE=m |
186 | CONFIG_IP6_NF_TARGET_NPT=m | 210 | CONFIG_IP6_NF_TARGET_NPT=m |
211 | CONFIG_NF_TABLES_BRIDGE=m | ||
187 | CONFIG_IP_DCCP=m | 212 | CONFIG_IP_DCCP=m |
188 | # CONFIG_IP_DCCP_CCID3 is not set | 213 | # CONFIG_IP_DCCP_CCID3 is not set |
189 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y | 214 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y |
@@ -191,10 +216,13 @@ CONFIG_RDS=m | |||
191 | CONFIG_RDS_TCP=m | 216 | CONFIG_RDS_TCP=m |
192 | CONFIG_L2TP=m | 217 | CONFIG_L2TP=m |
193 | CONFIG_ATALK=m | 218 | CONFIG_ATALK=m |
219 | CONFIG_DNS_RESOLVER=y | ||
194 | CONFIG_BATMAN_ADV=m | 220 | CONFIG_BATMAN_ADV=m |
195 | CONFIG_BATMAN_ADV_DAT=y | 221 | CONFIG_BATMAN_ADV_DAT=y |
222 | CONFIG_BATMAN_ADV_NC=y | ||
223 | CONFIG_NETLINK_DIAG=m | ||
224 | CONFIG_NET_MPLS_GSO=m | ||
196 | # CONFIG_WIRELESS is not set | 225 | # CONFIG_WIRELESS is not set |
197 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
198 | CONFIG_DEVTMPFS=y | 226 | CONFIG_DEVTMPFS=y |
199 | # CONFIG_FIRMWARE_IN_KERNEL is not set | 227 | # CONFIG_FIRMWARE_IN_KERNEL is not set |
200 | # CONFIG_FW_LOADER_USER_HELPER is not set | 228 | # CONFIG_FW_LOADER_USER_HELPER is not set |
@@ -206,6 +234,7 @@ CONFIG_BLK_DEV_NBD=m | |||
206 | CONFIG_BLK_DEV_RAM=y | 234 | CONFIG_BLK_DEV_RAM=y |
207 | CONFIG_CDROM_PKTCDVD=m | 235 | CONFIG_CDROM_PKTCDVD=m |
208 | CONFIG_ATA_OVER_ETH=m | 236 | CONFIG_ATA_OVER_ETH=m |
237 | CONFIG_DUMMY_IRQ=m | ||
209 | CONFIG_RAID_ATTRS=m | 238 | CONFIG_RAID_ATTRS=m |
210 | CONFIG_SCSI=y | 239 | CONFIG_SCSI=y |
211 | CONFIG_SCSI_TGT=m | 240 | CONFIG_SCSI_TGT=m |
@@ -243,12 +272,14 @@ CONFIG_EQUALIZER=m | |||
243 | CONFIG_NET_TEAM=m | 272 | CONFIG_NET_TEAM=m |
244 | CONFIG_NET_TEAM_MODE_BROADCAST=m | 273 | CONFIG_NET_TEAM_MODE_BROADCAST=m |
245 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m | 274 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m |
275 | CONFIG_NET_TEAM_MODE_RANDOM=m | ||
246 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m | 276 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m |
247 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m | 277 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m |
248 | CONFIG_VXLAN=m | 278 | CONFIG_VXLAN=m |
249 | CONFIG_NETCONSOLE=m | 279 | CONFIG_NETCONSOLE=m |
250 | CONFIG_NETCONSOLE_DYNAMIC=y | 280 | CONFIG_NETCONSOLE_DYNAMIC=y |
251 | CONFIG_VETH=m | 281 | CONFIG_VETH=m |
282 | # CONFIG_NET_VENDOR_ARC is not set | ||
252 | # CONFIG_NET_CADENCE is not set | 283 | # CONFIG_NET_CADENCE is not set |
253 | # CONFIG_NET_VENDOR_BROADCOM is not set | 284 | # CONFIG_NET_VENDOR_BROADCOM is not set |
254 | CONFIG_MVME16x_NET=y | 285 | CONFIG_MVME16x_NET=y |
@@ -257,6 +288,7 @@ CONFIG_MVME16x_NET=y | |||
257 | # CONFIG_NET_VENDOR_NATSEMI is not set | 288 | # CONFIG_NET_VENDOR_NATSEMI is not set |
258 | # CONFIG_NET_VENDOR_SEEQ is not set | 289 | # CONFIG_NET_VENDOR_SEEQ is not set |
259 | # CONFIG_NET_VENDOR_STMICRO is not set | 290 | # CONFIG_NET_VENDOR_STMICRO is not set |
291 | # CONFIG_NET_VENDOR_VIA is not set | ||
260 | # CONFIG_NET_VENDOR_WIZNET is not set | 292 | # CONFIG_NET_VENDOR_WIZNET is not set |
261 | CONFIG_PPP=m | 293 | CONFIG_PPP=m |
262 | CONFIG_PPP_BSDCOMP=m | 294 | CONFIG_PPP_BSDCOMP=m |
@@ -294,10 +326,6 @@ CONFIG_RTC_CLASS=y | |||
294 | CONFIG_RTC_DRV_GENERIC=m | 326 | CONFIG_RTC_DRV_GENERIC=m |
295 | # CONFIG_IOMMU_SUPPORT is not set | 327 | # CONFIG_IOMMU_SUPPORT is not set |
296 | CONFIG_PROC_HARDWARE=y | 328 | CONFIG_PROC_HARDWARE=y |
297 | CONFIG_EXT2_FS=y | ||
298 | CONFIG_EXT3_FS=y | ||
299 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | ||
300 | # CONFIG_EXT3_FS_XATTR is not set | ||
301 | CONFIG_EXT4_FS=y | 329 | CONFIG_EXT4_FS=y |
302 | CONFIG_REISERFS_FS=m | 330 | CONFIG_REISERFS_FS=m |
303 | CONFIG_JFS_FS=m | 331 | CONFIG_JFS_FS=m |
@@ -334,7 +362,7 @@ CONFIG_QNX6FS_FS=m | |||
334 | CONFIG_SYSV_FS=m | 362 | CONFIG_SYSV_FS=m |
335 | CONFIG_UFS_FS=m | 363 | CONFIG_UFS_FS=m |
336 | CONFIG_NFS_FS=y | 364 | CONFIG_NFS_FS=y |
337 | CONFIG_NFS_V4=y | 365 | CONFIG_NFS_V4=m |
338 | CONFIG_NFS_SWAP=y | 366 | CONFIG_NFS_SWAP=y |
339 | CONFIG_ROOT_NFS=y | 367 | CONFIG_ROOT_NFS=y |
340 | CONFIG_NFSD=m | 368 | CONFIG_NFSD=m |
@@ -393,10 +421,11 @@ CONFIG_NLS_MAC_TURKISH=m | |||
393 | CONFIG_DLM=m | 421 | CONFIG_DLM=m |
394 | CONFIG_MAGIC_SYSRQ=y | 422 | CONFIG_MAGIC_SYSRQ=y |
395 | CONFIG_ASYNC_RAID6_TEST=m | 423 | CONFIG_ASYNC_RAID6_TEST=m |
424 | CONFIG_TEST_STRING_HELPERS=m | ||
425 | CONFIG_EARLY_PRINTK=y | ||
396 | CONFIG_ENCRYPTED_KEYS=m | 426 | CONFIG_ENCRYPTED_KEYS=m |
397 | CONFIG_CRYPTO_MANAGER=y | 427 | CONFIG_CRYPTO_MANAGER=y |
398 | CONFIG_CRYPTO_USER=m | 428 | CONFIG_CRYPTO_USER=m |
399 | CONFIG_CRYPTO_NULL=m | ||
400 | CONFIG_CRYPTO_CRYPTD=m | 429 | CONFIG_CRYPTO_CRYPTD=m |
401 | CONFIG_CRYPTO_TEST=m | 430 | CONFIG_CRYPTO_TEST=m |
402 | CONFIG_CRYPTO_CCM=m | 431 | CONFIG_CRYPTO_CCM=m |
@@ -429,6 +458,8 @@ CONFIG_CRYPTO_TEA=m | |||
429 | CONFIG_CRYPTO_TWOFISH=m | 458 | CONFIG_CRYPTO_TWOFISH=m |
430 | CONFIG_CRYPTO_ZLIB=m | 459 | CONFIG_CRYPTO_ZLIB=m |
431 | CONFIG_CRYPTO_LZO=m | 460 | CONFIG_CRYPTO_LZO=m |
461 | CONFIG_CRYPTO_LZ4=m | ||
462 | CONFIG_CRYPTO_LZ4HC=m | ||
432 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 463 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
433 | CONFIG_CRYPTO_USER_API_HASH=m | 464 | CONFIG_CRYPTO_USER_API_HASH=m |
434 | CONFIG_CRYPTO_USER_API_SKCIPHER=m | 465 | CONFIG_CRYPTO_USER_API_SKCIPHER=m |
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index be1496ed9b66..568e2a98f976 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig | |||
@@ -48,7 +48,6 @@ CONFIG_IP_PNP_RARP=y | |||
48 | CONFIG_NET_IPIP=m | 48 | CONFIG_NET_IPIP=m |
49 | CONFIG_NET_IPGRE_DEMUX=m | 49 | CONFIG_NET_IPGRE_DEMUX=m |
50 | CONFIG_NET_IPGRE=m | 50 | CONFIG_NET_IPGRE=m |
51 | CONFIG_SYN_COOKIES=y | ||
52 | CONFIG_NET_IPVTI=m | 51 | CONFIG_NET_IPVTI=m |
53 | CONFIG_INET_AH=m | 52 | CONFIG_INET_AH=m |
54 | CONFIG_INET_ESP=m | 53 | CONFIG_INET_ESP=m |
@@ -59,11 +58,11 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
59 | # CONFIG_INET_LRO is not set | 58 | # CONFIG_INET_LRO is not set |
60 | CONFIG_INET_DIAG=m | 59 | CONFIG_INET_DIAG=m |
61 | CONFIG_INET_UDP_DIAG=m | 60 | CONFIG_INET_UDP_DIAG=m |
62 | CONFIG_IPV6_PRIVACY=y | ||
63 | CONFIG_IPV6_ROUTER_PREF=y | 61 | CONFIG_IPV6_ROUTER_PREF=y |
64 | CONFIG_INET6_AH=m | 62 | CONFIG_INET6_AH=m |
65 | CONFIG_INET6_ESP=m | 63 | CONFIG_INET6_ESP=m |
66 | CONFIG_INET6_IPCOMP=m | 64 | CONFIG_INET6_IPCOMP=m |
65 | CONFIG_IPV6_VTI=m | ||
67 | CONFIG_IPV6_GRE=m | 66 | CONFIG_IPV6_GRE=m |
68 | CONFIG_NETFILTER=y | 67 | CONFIG_NETFILTER=y |
69 | CONFIG_NF_CONNTRACK=m | 68 | CONFIG_NF_CONNTRACK=m |
@@ -81,6 +80,17 @@ CONFIG_NF_CONNTRACK_PPTP=m | |||
81 | CONFIG_NF_CONNTRACK_SANE=m | 80 | CONFIG_NF_CONNTRACK_SANE=m |
82 | CONFIG_NF_CONNTRACK_SIP=m | 81 | CONFIG_NF_CONNTRACK_SIP=m |
83 | CONFIG_NF_CONNTRACK_TFTP=m | 82 | CONFIG_NF_CONNTRACK_TFTP=m |
83 | CONFIG_NF_TABLES=m | ||
84 | CONFIG_NFT_EXTHDR=m | ||
85 | CONFIG_NFT_META=m | ||
86 | CONFIG_NFT_CT=m | ||
87 | CONFIG_NFT_RBTREE=m | ||
88 | CONFIG_NFT_HASH=m | ||
89 | CONFIG_NFT_COUNTER=m | ||
90 | CONFIG_NFT_LOG=m | ||
91 | CONFIG_NFT_LIMIT=m | ||
92 | CONFIG_NFT_NAT=m | ||
93 | CONFIG_NFT_COMPAT=m | ||
84 | CONFIG_NETFILTER_XT_SET=m | 94 | CONFIG_NETFILTER_XT_SET=m |
85 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m | 95 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m |
86 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m | 96 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m |
@@ -94,6 +104,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m | |||
94 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m | 104 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m |
95 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m | 105 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m |
96 | CONFIG_NETFILTER_XT_TARGET_TEE=m | 106 | CONFIG_NETFILTER_XT_TARGET_TEE=m |
107 | CONFIG_NETFILTER_XT_TARGET_TPROXY=m | ||
97 | CONFIG_NETFILTER_XT_TARGET_TRACE=m | 108 | CONFIG_NETFILTER_XT_TARGET_TRACE=m |
98 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m | 109 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m |
99 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m | 110 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m |
@@ -126,6 +137,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m | |||
126 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m | 137 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m |
127 | CONFIG_NETFILTER_XT_MATCH_REALM=m | 138 | CONFIG_NETFILTER_XT_MATCH_REALM=m |
128 | CONFIG_NETFILTER_XT_MATCH_RECENT=m | 139 | CONFIG_NETFILTER_XT_MATCH_RECENT=m |
140 | CONFIG_NETFILTER_XT_MATCH_SOCKET=m | ||
129 | CONFIG_NETFILTER_XT_MATCH_STATE=m | 141 | CONFIG_NETFILTER_XT_MATCH_STATE=m |
130 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m | 142 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m |
131 | CONFIG_NETFILTER_XT_MATCH_STRING=m | 143 | CONFIG_NETFILTER_XT_MATCH_STRING=m |
@@ -140,11 +152,18 @@ CONFIG_IP_SET_HASH_IP=m | |||
140 | CONFIG_IP_SET_HASH_IPPORT=m | 152 | CONFIG_IP_SET_HASH_IPPORT=m |
141 | CONFIG_IP_SET_HASH_IPPORTIP=m | 153 | CONFIG_IP_SET_HASH_IPPORTIP=m |
142 | CONFIG_IP_SET_HASH_IPPORTNET=m | 154 | CONFIG_IP_SET_HASH_IPPORTNET=m |
155 | CONFIG_IP_SET_HASH_NETPORTNET=m | ||
143 | CONFIG_IP_SET_HASH_NET=m | 156 | CONFIG_IP_SET_HASH_NET=m |
157 | CONFIG_IP_SET_HASH_NETNET=m | ||
144 | CONFIG_IP_SET_HASH_NETPORT=m | 158 | CONFIG_IP_SET_HASH_NETPORT=m |
145 | CONFIG_IP_SET_HASH_NETIFACE=m | 159 | CONFIG_IP_SET_HASH_NETIFACE=m |
146 | CONFIG_IP_SET_LIST_SET=m | 160 | CONFIG_IP_SET_LIST_SET=m |
147 | CONFIG_NF_CONNTRACK_IPV4=m | 161 | CONFIG_NF_CONNTRACK_IPV4=m |
162 | CONFIG_NF_TABLES_IPV4=m | ||
163 | CONFIG_NFT_REJECT_IPV4=m | ||
164 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | ||
165 | CONFIG_NFT_CHAIN_NAT_IPV4=m | ||
166 | CONFIG_NF_TABLES_ARP=m | ||
148 | CONFIG_IP_NF_IPTABLES=m | 167 | CONFIG_IP_NF_IPTABLES=m |
149 | CONFIG_IP_NF_MATCH_AH=m | 168 | CONFIG_IP_NF_MATCH_AH=m |
150 | CONFIG_IP_NF_MATCH_ECN=m | 169 | CONFIG_IP_NF_MATCH_ECN=m |
@@ -152,6 +171,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m | |||
152 | CONFIG_IP_NF_MATCH_TTL=m | 171 | CONFIG_IP_NF_MATCH_TTL=m |
153 | CONFIG_IP_NF_FILTER=m | 172 | CONFIG_IP_NF_FILTER=m |
154 | CONFIG_IP_NF_TARGET_REJECT=m | 173 | CONFIG_IP_NF_TARGET_REJECT=m |
174 | CONFIG_IP_NF_TARGET_SYNPROXY=m | ||
155 | CONFIG_IP_NF_TARGET_ULOG=m | 175 | CONFIG_IP_NF_TARGET_ULOG=m |
156 | CONFIG_NF_NAT_IPV4=m | 176 | CONFIG_NF_NAT_IPV4=m |
157 | CONFIG_IP_NF_TARGET_MASQUERADE=m | 177 | CONFIG_IP_NF_TARGET_MASQUERADE=m |
@@ -166,6 +186,9 @@ CONFIG_IP_NF_ARPTABLES=m | |||
166 | CONFIG_IP_NF_ARPFILTER=m | 186 | CONFIG_IP_NF_ARPFILTER=m |
167 | CONFIG_IP_NF_ARP_MANGLE=m | 187 | CONFIG_IP_NF_ARP_MANGLE=m |
168 | CONFIG_NF_CONNTRACK_IPV6=m | 188 | CONFIG_NF_CONNTRACK_IPV6=m |
189 | CONFIG_NF_TABLES_IPV6=m | ||
190 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | ||
191 | CONFIG_NFT_CHAIN_NAT_IPV6=m | ||
169 | CONFIG_IP6_NF_IPTABLES=m | 192 | CONFIG_IP6_NF_IPTABLES=m |
170 | CONFIG_IP6_NF_MATCH_AH=m | 193 | CONFIG_IP6_NF_MATCH_AH=m |
171 | CONFIG_IP6_NF_MATCH_EUI64=m | 194 | CONFIG_IP6_NF_MATCH_EUI64=m |
@@ -179,11 +202,13 @@ CONFIG_IP6_NF_MATCH_RT=m | |||
179 | CONFIG_IP6_NF_TARGET_HL=m | 202 | CONFIG_IP6_NF_TARGET_HL=m |
180 | CONFIG_IP6_NF_FILTER=m | 203 | CONFIG_IP6_NF_FILTER=m |
181 | CONFIG_IP6_NF_TARGET_REJECT=m | 204 | CONFIG_IP6_NF_TARGET_REJECT=m |
205 | CONFIG_IP6_NF_TARGET_SYNPROXY=m | ||
182 | CONFIG_IP6_NF_MANGLE=m | 206 | CONFIG_IP6_NF_MANGLE=m |
183 | CONFIG_IP6_NF_RAW=m | 207 | CONFIG_IP6_NF_RAW=m |
184 | CONFIG_NF_NAT_IPV6=m | 208 | CONFIG_NF_NAT_IPV6=m |
185 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | 209 | CONFIG_IP6_NF_TARGET_MASQUERADE=m |
186 | CONFIG_IP6_NF_TARGET_NPT=m | 210 | CONFIG_IP6_NF_TARGET_NPT=m |
211 | CONFIG_NF_TABLES_BRIDGE=m | ||
187 | CONFIG_IP_DCCP=m | 212 | CONFIG_IP_DCCP=m |
188 | # CONFIG_IP_DCCP_CCID3 is not set | 213 | # CONFIG_IP_DCCP_CCID3 is not set |
189 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y | 214 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y |
@@ -191,10 +216,13 @@ CONFIG_RDS=m | |||
191 | CONFIG_RDS_TCP=m | 216 | CONFIG_RDS_TCP=m |
192 | CONFIG_L2TP=m | 217 | CONFIG_L2TP=m |
193 | CONFIG_ATALK=m | 218 | CONFIG_ATALK=m |
219 | CONFIG_DNS_RESOLVER=y | ||
194 | CONFIG_BATMAN_ADV=m | 220 | CONFIG_BATMAN_ADV=m |
195 | CONFIG_BATMAN_ADV_DAT=y | 221 | CONFIG_BATMAN_ADV_DAT=y |
222 | CONFIG_BATMAN_ADV_NC=y | ||
223 | CONFIG_NETLINK_DIAG=m | ||
224 | CONFIG_NET_MPLS_GSO=m | ||
196 | # CONFIG_WIRELESS is not set | 225 | # CONFIG_WIRELESS is not set |
197 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
198 | CONFIG_DEVTMPFS=y | 226 | CONFIG_DEVTMPFS=y |
199 | # CONFIG_FIRMWARE_IN_KERNEL is not set | 227 | # CONFIG_FIRMWARE_IN_KERNEL is not set |
200 | # CONFIG_FW_LOADER_USER_HELPER is not set | 228 | # CONFIG_FW_LOADER_USER_HELPER is not set |
@@ -209,6 +237,7 @@ CONFIG_BLK_DEV_NBD=m | |||
209 | CONFIG_BLK_DEV_RAM=y | 237 | CONFIG_BLK_DEV_RAM=y |
210 | CONFIG_CDROM_PKTCDVD=m | 238 | CONFIG_CDROM_PKTCDVD=m |
211 | CONFIG_ATA_OVER_ETH=m | 239 | CONFIG_ATA_OVER_ETH=m |
240 | CONFIG_DUMMY_IRQ=m | ||
212 | CONFIG_IDE=y | 241 | CONFIG_IDE=y |
213 | CONFIG_IDE_GD_ATAPI=y | 242 | CONFIG_IDE_GD_ATAPI=y |
214 | CONFIG_BLK_DEV_IDECD=y | 243 | CONFIG_BLK_DEV_IDECD=y |
@@ -249,6 +278,7 @@ CONFIG_EQUALIZER=m | |||
249 | CONFIG_NET_TEAM=m | 278 | CONFIG_NET_TEAM=m |
250 | CONFIG_NET_TEAM_MODE_BROADCAST=m | 279 | CONFIG_NET_TEAM_MODE_BROADCAST=m |
251 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m | 280 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m |
281 | CONFIG_NET_TEAM_MODE_RANDOM=m | ||
252 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m | 282 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m |
253 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m | 283 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m |
254 | CONFIG_VXLAN=m | 284 | CONFIG_VXLAN=m |
@@ -257,10 +287,10 @@ CONFIG_NETCONSOLE_DYNAMIC=y | |||
257 | CONFIG_VETH=m | 287 | CONFIG_VETH=m |
258 | # CONFIG_NET_VENDOR_3COM is not set | 288 | # CONFIG_NET_VENDOR_3COM is not set |
259 | # CONFIG_NET_VENDOR_AMD is not set | 289 | # CONFIG_NET_VENDOR_AMD is not set |
290 | # CONFIG_NET_VENDOR_ARC is not set | ||
260 | # CONFIG_NET_CADENCE is not set | 291 | # CONFIG_NET_CADENCE is not set |
261 | # CONFIG_NET_VENDOR_BROADCOM is not set | 292 | # CONFIG_NET_VENDOR_BROADCOM is not set |
262 | # CONFIG_NET_VENDOR_CIRRUS is not set | 293 | # CONFIG_NET_VENDOR_CIRRUS is not set |
263 | # CONFIG_NET_VENDOR_FUJITSU is not set | ||
264 | # CONFIG_NET_VENDOR_HP is not set | 294 | # CONFIG_NET_VENDOR_HP is not set |
265 | # CONFIG_NET_VENDOR_INTEL is not set | 295 | # CONFIG_NET_VENDOR_INTEL is not set |
266 | # CONFIG_NET_VENDOR_MARVELL is not set | 296 | # CONFIG_NET_VENDOR_MARVELL is not set |
@@ -269,6 +299,7 @@ CONFIG_NE2000=m | |||
269 | # CONFIG_NET_VENDOR_SEEQ is not set | 299 | # CONFIG_NET_VENDOR_SEEQ is not set |
270 | # CONFIG_NET_VENDOR_SMSC is not set | 300 | # CONFIG_NET_VENDOR_SMSC is not set |
271 | # CONFIG_NET_VENDOR_STMICRO is not set | 301 | # CONFIG_NET_VENDOR_STMICRO is not set |
302 | # CONFIG_NET_VENDOR_VIA is not set | ||
272 | # CONFIG_NET_VENDOR_WIZNET is not set | 303 | # CONFIG_NET_VENDOR_WIZNET is not set |
273 | CONFIG_PLIP=m | 304 | CONFIG_PLIP=m |
274 | CONFIG_PPP=m | 305 | CONFIG_PPP=m |
@@ -293,7 +324,6 @@ CONFIG_MOUSE_SERIAL=m | |||
293 | CONFIG_INPUT_MISC=y | 324 | CONFIG_INPUT_MISC=y |
294 | CONFIG_INPUT_M68K_BEEP=m | 325 | CONFIG_INPUT_M68K_BEEP=m |
295 | CONFIG_SERIO_Q40KBD=y | 326 | CONFIG_SERIO_Q40KBD=y |
296 | CONFIG_VT_HW_CONSOLE_BINDING=y | ||
297 | # CONFIG_LEGACY_PTYS is not set | 327 | # CONFIG_LEGACY_PTYS is not set |
298 | # CONFIG_DEVKMEM is not set | 328 | # CONFIG_DEVKMEM is not set |
299 | CONFIG_PRINTER=m | 329 | CONFIG_PRINTER=m |
@@ -318,10 +348,6 @@ CONFIG_RTC_DRV_GENERIC=m | |||
318 | # CONFIG_IOMMU_SUPPORT is not set | 348 | # CONFIG_IOMMU_SUPPORT is not set |
319 | CONFIG_HEARTBEAT=y | 349 | CONFIG_HEARTBEAT=y |
320 | CONFIG_PROC_HARDWARE=y | 350 | CONFIG_PROC_HARDWARE=y |
321 | CONFIG_EXT2_FS=y | ||
322 | CONFIG_EXT3_FS=y | ||
323 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | ||
324 | # CONFIG_EXT3_FS_XATTR is not set | ||
325 | CONFIG_EXT4_FS=y | 351 | CONFIG_EXT4_FS=y |
326 | CONFIG_REISERFS_FS=m | 352 | CONFIG_REISERFS_FS=m |
327 | CONFIG_JFS_FS=m | 353 | CONFIG_JFS_FS=m |
@@ -358,7 +384,7 @@ CONFIG_QNX6FS_FS=m | |||
358 | CONFIG_SYSV_FS=m | 384 | CONFIG_SYSV_FS=m |
359 | CONFIG_UFS_FS=m | 385 | CONFIG_UFS_FS=m |
360 | CONFIG_NFS_FS=y | 386 | CONFIG_NFS_FS=y |
361 | CONFIG_NFS_V4=y | 387 | CONFIG_NFS_V4=m |
362 | CONFIG_NFS_SWAP=y | 388 | CONFIG_NFS_SWAP=y |
363 | CONFIG_ROOT_NFS=y | 389 | CONFIG_ROOT_NFS=y |
364 | CONFIG_NFSD=m | 390 | CONFIG_NFSD=m |
@@ -417,10 +443,10 @@ CONFIG_NLS_MAC_TURKISH=m | |||
417 | CONFIG_DLM=m | 443 | CONFIG_DLM=m |
418 | CONFIG_MAGIC_SYSRQ=y | 444 | CONFIG_MAGIC_SYSRQ=y |
419 | CONFIG_ASYNC_RAID6_TEST=m | 445 | CONFIG_ASYNC_RAID6_TEST=m |
446 | CONFIG_TEST_STRING_HELPERS=m | ||
420 | CONFIG_ENCRYPTED_KEYS=m | 447 | CONFIG_ENCRYPTED_KEYS=m |
421 | CONFIG_CRYPTO_MANAGER=y | 448 | CONFIG_CRYPTO_MANAGER=y |
422 | CONFIG_CRYPTO_USER=m | 449 | CONFIG_CRYPTO_USER=m |
423 | CONFIG_CRYPTO_NULL=m | ||
424 | CONFIG_CRYPTO_CRYPTD=m | 450 | CONFIG_CRYPTO_CRYPTD=m |
425 | CONFIG_CRYPTO_TEST=m | 451 | CONFIG_CRYPTO_TEST=m |
426 | CONFIG_CRYPTO_CCM=m | 452 | CONFIG_CRYPTO_CCM=m |
@@ -453,6 +479,8 @@ CONFIG_CRYPTO_TEA=m | |||
453 | CONFIG_CRYPTO_TWOFISH=m | 479 | CONFIG_CRYPTO_TWOFISH=m |
454 | CONFIG_CRYPTO_ZLIB=m | 480 | CONFIG_CRYPTO_ZLIB=m |
455 | CONFIG_CRYPTO_LZO=m | 481 | CONFIG_CRYPTO_LZO=m |
482 | CONFIG_CRYPTO_LZ4=m | ||
483 | CONFIG_CRYPTO_LZ4HC=m | ||
456 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 484 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
457 | CONFIG_CRYPTO_USER_API_HASH=m | 485 | CONFIG_CRYPTO_USER_API_HASH=m |
458 | CONFIG_CRYPTO_USER_API_SKCIPHER=m | 486 | CONFIG_CRYPTO_USER_API_SKCIPHER=m |
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 54674d61e001..60b0aeac5742 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig | |||
@@ -45,7 +45,6 @@ CONFIG_IP_PNP_RARP=y | |||
45 | CONFIG_NET_IPIP=m | 45 | CONFIG_NET_IPIP=m |
46 | CONFIG_NET_IPGRE_DEMUX=m | 46 | CONFIG_NET_IPGRE_DEMUX=m |
47 | CONFIG_NET_IPGRE=m | 47 | CONFIG_NET_IPGRE=m |
48 | CONFIG_SYN_COOKIES=y | ||
49 | CONFIG_NET_IPVTI=m | 48 | CONFIG_NET_IPVTI=m |
50 | CONFIG_INET_AH=m | 49 | CONFIG_INET_AH=m |
51 | CONFIG_INET_ESP=m | 50 | CONFIG_INET_ESP=m |
@@ -56,11 +55,11 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
56 | # CONFIG_INET_LRO is not set | 55 | # CONFIG_INET_LRO is not set |
57 | CONFIG_INET_DIAG=m | 56 | CONFIG_INET_DIAG=m |
58 | CONFIG_INET_UDP_DIAG=m | 57 | CONFIG_INET_UDP_DIAG=m |
59 | CONFIG_IPV6_PRIVACY=y | ||
60 | CONFIG_IPV6_ROUTER_PREF=y | 58 | CONFIG_IPV6_ROUTER_PREF=y |
61 | CONFIG_INET6_AH=m | 59 | CONFIG_INET6_AH=m |
62 | CONFIG_INET6_ESP=m | 60 | CONFIG_INET6_ESP=m |
63 | CONFIG_INET6_IPCOMP=m | 61 | CONFIG_INET6_IPCOMP=m |
62 | CONFIG_IPV6_VTI=m | ||
64 | CONFIG_IPV6_GRE=m | 63 | CONFIG_IPV6_GRE=m |
65 | CONFIG_NETFILTER=y | 64 | CONFIG_NETFILTER=y |
66 | CONFIG_NF_CONNTRACK=m | 65 | CONFIG_NF_CONNTRACK=m |
@@ -78,6 +77,17 @@ CONFIG_NF_CONNTRACK_PPTP=m | |||
78 | CONFIG_NF_CONNTRACK_SANE=m | 77 | CONFIG_NF_CONNTRACK_SANE=m |
79 | CONFIG_NF_CONNTRACK_SIP=m | 78 | CONFIG_NF_CONNTRACK_SIP=m |
80 | CONFIG_NF_CONNTRACK_TFTP=m | 79 | CONFIG_NF_CONNTRACK_TFTP=m |
80 | CONFIG_NF_TABLES=m | ||
81 | CONFIG_NFT_EXTHDR=m | ||
82 | CONFIG_NFT_META=m | ||
83 | CONFIG_NFT_CT=m | ||
84 | CONFIG_NFT_RBTREE=m | ||
85 | CONFIG_NFT_HASH=m | ||
86 | CONFIG_NFT_COUNTER=m | ||
87 | CONFIG_NFT_LOG=m | ||
88 | CONFIG_NFT_LIMIT=m | ||
89 | CONFIG_NFT_NAT=m | ||
90 | CONFIG_NFT_COMPAT=m | ||
81 | CONFIG_NETFILTER_XT_SET=m | 91 | CONFIG_NETFILTER_XT_SET=m |
82 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m | 92 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m |
83 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m | 93 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m |
@@ -91,6 +101,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m | |||
91 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m | 101 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m |
92 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m | 102 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m |
93 | CONFIG_NETFILTER_XT_TARGET_TEE=m | 103 | CONFIG_NETFILTER_XT_TARGET_TEE=m |
104 | CONFIG_NETFILTER_XT_TARGET_TPROXY=m | ||
94 | CONFIG_NETFILTER_XT_TARGET_TRACE=m | 105 | CONFIG_NETFILTER_XT_TARGET_TRACE=m |
95 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m | 106 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m |
96 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m | 107 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m |
@@ -123,6 +134,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m | |||
123 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m | 134 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m |
124 | CONFIG_NETFILTER_XT_MATCH_REALM=m | 135 | CONFIG_NETFILTER_XT_MATCH_REALM=m |
125 | CONFIG_NETFILTER_XT_MATCH_RECENT=m | 136 | CONFIG_NETFILTER_XT_MATCH_RECENT=m |
137 | CONFIG_NETFILTER_XT_MATCH_SOCKET=m | ||
126 | CONFIG_NETFILTER_XT_MATCH_STATE=m | 138 | CONFIG_NETFILTER_XT_MATCH_STATE=m |
127 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m | 139 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m |
128 | CONFIG_NETFILTER_XT_MATCH_STRING=m | 140 | CONFIG_NETFILTER_XT_MATCH_STRING=m |
@@ -137,11 +149,18 @@ CONFIG_IP_SET_HASH_IP=m | |||
137 | CONFIG_IP_SET_HASH_IPPORT=m | 149 | CONFIG_IP_SET_HASH_IPPORT=m |
138 | CONFIG_IP_SET_HASH_IPPORTIP=m | 150 | CONFIG_IP_SET_HASH_IPPORTIP=m |
139 | CONFIG_IP_SET_HASH_IPPORTNET=m | 151 | CONFIG_IP_SET_HASH_IPPORTNET=m |
152 | CONFIG_IP_SET_HASH_NETPORTNET=m | ||
140 | CONFIG_IP_SET_HASH_NET=m | 153 | CONFIG_IP_SET_HASH_NET=m |
154 | CONFIG_IP_SET_HASH_NETNET=m | ||
141 | CONFIG_IP_SET_HASH_NETPORT=m | 155 | CONFIG_IP_SET_HASH_NETPORT=m |
142 | CONFIG_IP_SET_HASH_NETIFACE=m | 156 | CONFIG_IP_SET_HASH_NETIFACE=m |
143 | CONFIG_IP_SET_LIST_SET=m | 157 | CONFIG_IP_SET_LIST_SET=m |
144 | CONFIG_NF_CONNTRACK_IPV4=m | 158 | CONFIG_NF_CONNTRACK_IPV4=m |
159 | CONFIG_NF_TABLES_IPV4=m | ||
160 | CONFIG_NFT_REJECT_IPV4=m | ||
161 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | ||
162 | CONFIG_NFT_CHAIN_NAT_IPV4=m | ||
163 | CONFIG_NF_TABLES_ARP=m | ||
145 | CONFIG_IP_NF_IPTABLES=m | 164 | CONFIG_IP_NF_IPTABLES=m |
146 | CONFIG_IP_NF_MATCH_AH=m | 165 | CONFIG_IP_NF_MATCH_AH=m |
147 | CONFIG_IP_NF_MATCH_ECN=m | 166 | CONFIG_IP_NF_MATCH_ECN=m |
@@ -149,6 +168,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m | |||
149 | CONFIG_IP_NF_MATCH_TTL=m | 168 | CONFIG_IP_NF_MATCH_TTL=m |
150 | CONFIG_IP_NF_FILTER=m | 169 | CONFIG_IP_NF_FILTER=m |
151 | CONFIG_IP_NF_TARGET_REJECT=m | 170 | CONFIG_IP_NF_TARGET_REJECT=m |
171 | CONFIG_IP_NF_TARGET_SYNPROXY=m | ||
152 | CONFIG_IP_NF_TARGET_ULOG=m | 172 | CONFIG_IP_NF_TARGET_ULOG=m |
153 | CONFIG_NF_NAT_IPV4=m | 173 | CONFIG_NF_NAT_IPV4=m |
154 | CONFIG_IP_NF_TARGET_MASQUERADE=m | 174 | CONFIG_IP_NF_TARGET_MASQUERADE=m |
@@ -163,6 +183,9 @@ CONFIG_IP_NF_ARPTABLES=m | |||
163 | CONFIG_IP_NF_ARPFILTER=m | 183 | CONFIG_IP_NF_ARPFILTER=m |
164 | CONFIG_IP_NF_ARP_MANGLE=m | 184 | CONFIG_IP_NF_ARP_MANGLE=m |
165 | CONFIG_NF_CONNTRACK_IPV6=m | 185 | CONFIG_NF_CONNTRACK_IPV6=m |
186 | CONFIG_NF_TABLES_IPV6=m | ||
187 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | ||
188 | CONFIG_NFT_CHAIN_NAT_IPV6=m | ||
166 | CONFIG_IP6_NF_IPTABLES=m | 189 | CONFIG_IP6_NF_IPTABLES=m |
167 | CONFIG_IP6_NF_MATCH_AH=m | 190 | CONFIG_IP6_NF_MATCH_AH=m |
168 | CONFIG_IP6_NF_MATCH_EUI64=m | 191 | CONFIG_IP6_NF_MATCH_EUI64=m |
@@ -176,11 +199,13 @@ CONFIG_IP6_NF_MATCH_RT=m | |||
176 | CONFIG_IP6_NF_TARGET_HL=m | 199 | CONFIG_IP6_NF_TARGET_HL=m |
177 | CONFIG_IP6_NF_FILTER=m | 200 | CONFIG_IP6_NF_FILTER=m |
178 | CONFIG_IP6_NF_TARGET_REJECT=m | 201 | CONFIG_IP6_NF_TARGET_REJECT=m |
202 | CONFIG_IP6_NF_TARGET_SYNPROXY=m | ||
179 | CONFIG_IP6_NF_MANGLE=m | 203 | CONFIG_IP6_NF_MANGLE=m |
180 | CONFIG_IP6_NF_RAW=m | 204 | CONFIG_IP6_NF_RAW=m |
181 | CONFIG_NF_NAT_IPV6=m | 205 | CONFIG_NF_NAT_IPV6=m |
182 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | 206 | CONFIG_IP6_NF_TARGET_MASQUERADE=m |
183 | CONFIG_IP6_NF_TARGET_NPT=m | 207 | CONFIG_IP6_NF_TARGET_NPT=m |
208 | CONFIG_NF_TABLES_BRIDGE=m | ||
184 | CONFIG_IP_DCCP=m | 209 | CONFIG_IP_DCCP=m |
185 | # CONFIG_IP_DCCP_CCID3 is not set | 210 | # CONFIG_IP_DCCP_CCID3 is not set |
186 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y | 211 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y |
@@ -188,10 +213,13 @@ CONFIG_RDS=m | |||
188 | CONFIG_RDS_TCP=m | 213 | CONFIG_RDS_TCP=m |
189 | CONFIG_L2TP=m | 214 | CONFIG_L2TP=m |
190 | CONFIG_ATALK=m | 215 | CONFIG_ATALK=m |
216 | CONFIG_DNS_RESOLVER=y | ||
191 | CONFIG_BATMAN_ADV=m | 217 | CONFIG_BATMAN_ADV=m |
192 | CONFIG_BATMAN_ADV_DAT=y | 218 | CONFIG_BATMAN_ADV_DAT=y |
219 | CONFIG_BATMAN_ADV_NC=y | ||
220 | CONFIG_NETLINK_DIAG=m | ||
221 | CONFIG_NET_MPLS_GSO=m | ||
193 | # CONFIG_WIRELESS is not set | 222 | # CONFIG_WIRELESS is not set |
194 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
195 | CONFIG_DEVTMPFS=y | 223 | CONFIG_DEVTMPFS=y |
196 | # CONFIG_FIRMWARE_IN_KERNEL is not set | 224 | # CONFIG_FIRMWARE_IN_KERNEL is not set |
197 | # CONFIG_FW_LOADER_USER_HELPER is not set | 225 | # CONFIG_FW_LOADER_USER_HELPER is not set |
@@ -203,6 +231,7 @@ CONFIG_BLK_DEV_NBD=m | |||
203 | CONFIG_BLK_DEV_RAM=y | 231 | CONFIG_BLK_DEV_RAM=y |
204 | CONFIG_CDROM_PKTCDVD=m | 232 | CONFIG_CDROM_PKTCDVD=m |
205 | CONFIG_ATA_OVER_ETH=m | 233 | CONFIG_ATA_OVER_ETH=m |
234 | CONFIG_DUMMY_IRQ=m | ||
206 | CONFIG_RAID_ATTRS=m | 235 | CONFIG_RAID_ATTRS=m |
207 | CONFIG_SCSI=y | 236 | CONFIG_SCSI=y |
208 | CONFIG_SCSI_TGT=m | 237 | CONFIG_SCSI_TGT=m |
@@ -240,6 +269,7 @@ CONFIG_EQUALIZER=m | |||
240 | CONFIG_NET_TEAM=m | 269 | CONFIG_NET_TEAM=m |
241 | CONFIG_NET_TEAM_MODE_BROADCAST=m | 270 | CONFIG_NET_TEAM_MODE_BROADCAST=m |
242 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m | 271 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m |
272 | CONFIG_NET_TEAM_MODE_RANDOM=m | ||
243 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m | 273 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m |
244 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m | 274 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m |
245 | CONFIG_VXLAN=m | 275 | CONFIG_VXLAN=m |
@@ -247,6 +277,7 @@ CONFIG_NETCONSOLE=m | |||
247 | CONFIG_NETCONSOLE_DYNAMIC=y | 277 | CONFIG_NETCONSOLE_DYNAMIC=y |
248 | CONFIG_VETH=m | 278 | CONFIG_VETH=m |
249 | CONFIG_SUN3LANCE=y | 279 | CONFIG_SUN3LANCE=y |
280 | # CONFIG_NET_VENDOR_ARC is not set | ||
250 | # CONFIG_NET_CADENCE is not set | 281 | # CONFIG_NET_CADENCE is not set |
251 | CONFIG_SUN3_82586=y | 282 | CONFIG_SUN3_82586=y |
252 | # CONFIG_NET_VENDOR_MARVELL is not set | 283 | # CONFIG_NET_VENDOR_MARVELL is not set |
@@ -255,6 +286,7 @@ CONFIG_SUN3_82586=y | |||
255 | # CONFIG_NET_VENDOR_SEEQ is not set | 286 | # CONFIG_NET_VENDOR_SEEQ is not set |
256 | # CONFIG_NET_VENDOR_STMICRO is not set | 287 | # CONFIG_NET_VENDOR_STMICRO is not set |
257 | # CONFIG_NET_VENDOR_SUN is not set | 288 | # CONFIG_NET_VENDOR_SUN is not set |
289 | # CONFIG_NET_VENDOR_VIA is not set | ||
258 | # CONFIG_NET_VENDOR_WIZNET is not set | 290 | # CONFIG_NET_VENDOR_WIZNET is not set |
259 | CONFIG_PPP=m | 291 | CONFIG_PPP=m |
260 | CONFIG_PPP_BSDCOMP=m | 292 | CONFIG_PPP_BSDCOMP=m |
@@ -276,7 +308,6 @@ CONFIG_INPUT_EVDEV=m | |||
276 | CONFIG_KEYBOARD_SUNKBD=y | 308 | CONFIG_KEYBOARD_SUNKBD=y |
277 | # CONFIG_MOUSE_PS2 is not set | 309 | # CONFIG_MOUSE_PS2 is not set |
278 | CONFIG_MOUSE_SERIAL=m | 310 | CONFIG_MOUSE_SERIAL=m |
279 | CONFIG_VT_HW_CONSOLE_BINDING=y | ||
280 | # CONFIG_LEGACY_PTYS is not set | 311 | # CONFIG_LEGACY_PTYS is not set |
281 | # CONFIG_DEVKMEM is not set | 312 | # CONFIG_DEVKMEM is not set |
282 | # CONFIG_HW_RANDOM is not set | 313 | # CONFIG_HW_RANDOM is not set |
@@ -296,10 +327,6 @@ CONFIG_RTC_CLASS=y | |||
296 | CONFIG_RTC_DRV_GENERIC=m | 327 | CONFIG_RTC_DRV_GENERIC=m |
297 | # CONFIG_IOMMU_SUPPORT is not set | 328 | # CONFIG_IOMMU_SUPPORT is not set |
298 | CONFIG_PROC_HARDWARE=y | 329 | CONFIG_PROC_HARDWARE=y |
299 | CONFIG_EXT2_FS=y | ||
300 | CONFIG_EXT3_FS=y | ||
301 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | ||
302 | # CONFIG_EXT3_FS_XATTR is not set | ||
303 | CONFIG_EXT4_FS=y | 330 | CONFIG_EXT4_FS=y |
304 | CONFIG_REISERFS_FS=m | 331 | CONFIG_REISERFS_FS=m |
305 | CONFIG_JFS_FS=m | 332 | CONFIG_JFS_FS=m |
@@ -336,7 +363,7 @@ CONFIG_QNX6FS_FS=m | |||
336 | CONFIG_SYSV_FS=m | 363 | CONFIG_SYSV_FS=m |
337 | CONFIG_UFS_FS=m | 364 | CONFIG_UFS_FS=m |
338 | CONFIG_NFS_FS=y | 365 | CONFIG_NFS_FS=y |
339 | CONFIG_NFS_V4=y | 366 | CONFIG_NFS_V4=m |
340 | CONFIG_NFS_SWAP=y | 367 | CONFIG_NFS_SWAP=y |
341 | CONFIG_ROOT_NFS=y | 368 | CONFIG_ROOT_NFS=y |
342 | CONFIG_NFSD=m | 369 | CONFIG_NFSD=m |
@@ -395,10 +422,10 @@ CONFIG_NLS_MAC_TURKISH=m | |||
395 | CONFIG_DLM=m | 422 | CONFIG_DLM=m |
396 | CONFIG_MAGIC_SYSRQ=y | 423 | CONFIG_MAGIC_SYSRQ=y |
397 | CONFIG_ASYNC_RAID6_TEST=m | 424 | CONFIG_ASYNC_RAID6_TEST=m |
425 | CONFIG_TEST_STRING_HELPERS=m | ||
398 | CONFIG_ENCRYPTED_KEYS=m | 426 | CONFIG_ENCRYPTED_KEYS=m |
399 | CONFIG_CRYPTO_MANAGER=y | 427 | CONFIG_CRYPTO_MANAGER=y |
400 | CONFIG_CRYPTO_USER=m | 428 | CONFIG_CRYPTO_USER=m |
401 | CONFIG_CRYPTO_NULL=m | ||
402 | CONFIG_CRYPTO_CRYPTD=m | 429 | CONFIG_CRYPTO_CRYPTD=m |
403 | CONFIG_CRYPTO_TEST=m | 430 | CONFIG_CRYPTO_TEST=m |
404 | CONFIG_CRYPTO_CCM=m | 431 | CONFIG_CRYPTO_CCM=m |
@@ -431,6 +458,8 @@ CONFIG_CRYPTO_TEA=m | |||
431 | CONFIG_CRYPTO_TWOFISH=m | 458 | CONFIG_CRYPTO_TWOFISH=m |
432 | CONFIG_CRYPTO_ZLIB=m | 459 | CONFIG_CRYPTO_ZLIB=m |
433 | CONFIG_CRYPTO_LZO=m | 460 | CONFIG_CRYPTO_LZO=m |
461 | CONFIG_CRYPTO_LZ4=m | ||
462 | CONFIG_CRYPTO_LZ4HC=m | ||
434 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 463 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
435 | CONFIG_CRYPTO_USER_API_HASH=m | 464 | CONFIG_CRYPTO_USER_API_HASH=m |
436 | CONFIG_CRYPTO_USER_API_SKCIPHER=m | 465 | CONFIG_CRYPTO_USER_API_SKCIPHER=m |
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 832d9539f441..21bda331eebb 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig | |||
@@ -45,7 +45,6 @@ CONFIG_IP_PNP_RARP=y | |||
45 | CONFIG_NET_IPIP=m | 45 | CONFIG_NET_IPIP=m |
46 | CONFIG_NET_IPGRE_DEMUX=m | 46 | CONFIG_NET_IPGRE_DEMUX=m |
47 | CONFIG_NET_IPGRE=m | 47 | CONFIG_NET_IPGRE=m |
48 | CONFIG_SYN_COOKIES=y | ||
49 | CONFIG_NET_IPVTI=m | 48 | CONFIG_NET_IPVTI=m |
50 | CONFIG_INET_AH=m | 49 | CONFIG_INET_AH=m |
51 | CONFIG_INET_ESP=m | 50 | CONFIG_INET_ESP=m |
@@ -56,11 +55,11 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
56 | # CONFIG_INET_LRO is not set | 55 | # CONFIG_INET_LRO is not set |
57 | CONFIG_INET_DIAG=m | 56 | CONFIG_INET_DIAG=m |
58 | CONFIG_INET_UDP_DIAG=m | 57 | CONFIG_INET_UDP_DIAG=m |
59 | CONFIG_IPV6_PRIVACY=y | ||
60 | CONFIG_IPV6_ROUTER_PREF=y | 58 | CONFIG_IPV6_ROUTER_PREF=y |
61 | CONFIG_INET6_AH=m | 59 | CONFIG_INET6_AH=m |
62 | CONFIG_INET6_ESP=m | 60 | CONFIG_INET6_ESP=m |
63 | CONFIG_INET6_IPCOMP=m | 61 | CONFIG_INET6_IPCOMP=m |
62 | CONFIG_IPV6_VTI=m | ||
64 | CONFIG_IPV6_GRE=m | 63 | CONFIG_IPV6_GRE=m |
65 | CONFIG_NETFILTER=y | 64 | CONFIG_NETFILTER=y |
66 | CONFIG_NF_CONNTRACK=m | 65 | CONFIG_NF_CONNTRACK=m |
@@ -78,6 +77,17 @@ CONFIG_NF_CONNTRACK_PPTP=m | |||
78 | CONFIG_NF_CONNTRACK_SANE=m | 77 | CONFIG_NF_CONNTRACK_SANE=m |
79 | CONFIG_NF_CONNTRACK_SIP=m | 78 | CONFIG_NF_CONNTRACK_SIP=m |
80 | CONFIG_NF_CONNTRACK_TFTP=m | 79 | CONFIG_NF_CONNTRACK_TFTP=m |
80 | CONFIG_NF_TABLES=m | ||
81 | CONFIG_NFT_EXTHDR=m | ||
82 | CONFIG_NFT_META=m | ||
83 | CONFIG_NFT_CT=m | ||
84 | CONFIG_NFT_RBTREE=m | ||
85 | CONFIG_NFT_HASH=m | ||
86 | CONFIG_NFT_COUNTER=m | ||
87 | CONFIG_NFT_LOG=m | ||
88 | CONFIG_NFT_LIMIT=m | ||
89 | CONFIG_NFT_NAT=m | ||
90 | CONFIG_NFT_COMPAT=m | ||
81 | CONFIG_NETFILTER_XT_SET=m | 91 | CONFIG_NETFILTER_XT_SET=m |
82 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m | 92 | CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m |
83 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m | 93 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m |
@@ -91,6 +101,7 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m | |||
91 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m | 101 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m |
92 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m | 102 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=m |
93 | CONFIG_NETFILTER_XT_TARGET_TEE=m | 103 | CONFIG_NETFILTER_XT_TARGET_TEE=m |
104 | CONFIG_NETFILTER_XT_TARGET_TPROXY=m | ||
94 | CONFIG_NETFILTER_XT_TARGET_TRACE=m | 105 | CONFIG_NETFILTER_XT_TARGET_TRACE=m |
95 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m | 106 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=m |
96 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m | 107 | CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m |
@@ -123,6 +134,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m | |||
123 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m | 134 | CONFIG_NETFILTER_XT_MATCH_RATEEST=m |
124 | CONFIG_NETFILTER_XT_MATCH_REALM=m | 135 | CONFIG_NETFILTER_XT_MATCH_REALM=m |
125 | CONFIG_NETFILTER_XT_MATCH_RECENT=m | 136 | CONFIG_NETFILTER_XT_MATCH_RECENT=m |
137 | CONFIG_NETFILTER_XT_MATCH_SOCKET=m | ||
126 | CONFIG_NETFILTER_XT_MATCH_STATE=m | 138 | CONFIG_NETFILTER_XT_MATCH_STATE=m |
127 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m | 139 | CONFIG_NETFILTER_XT_MATCH_STATISTIC=m |
128 | CONFIG_NETFILTER_XT_MATCH_STRING=m | 140 | CONFIG_NETFILTER_XT_MATCH_STRING=m |
@@ -137,11 +149,18 @@ CONFIG_IP_SET_HASH_IP=m | |||
137 | CONFIG_IP_SET_HASH_IPPORT=m | 149 | CONFIG_IP_SET_HASH_IPPORT=m |
138 | CONFIG_IP_SET_HASH_IPPORTIP=m | 150 | CONFIG_IP_SET_HASH_IPPORTIP=m |
139 | CONFIG_IP_SET_HASH_IPPORTNET=m | 151 | CONFIG_IP_SET_HASH_IPPORTNET=m |
152 | CONFIG_IP_SET_HASH_NETPORTNET=m | ||
140 | CONFIG_IP_SET_HASH_NET=m | 153 | CONFIG_IP_SET_HASH_NET=m |
154 | CONFIG_IP_SET_HASH_NETNET=m | ||
141 | CONFIG_IP_SET_HASH_NETPORT=m | 155 | CONFIG_IP_SET_HASH_NETPORT=m |
142 | CONFIG_IP_SET_HASH_NETIFACE=m | 156 | CONFIG_IP_SET_HASH_NETIFACE=m |
143 | CONFIG_IP_SET_LIST_SET=m | 157 | CONFIG_IP_SET_LIST_SET=m |
144 | CONFIG_NF_CONNTRACK_IPV4=m | 158 | CONFIG_NF_CONNTRACK_IPV4=m |
159 | CONFIG_NF_TABLES_IPV4=m | ||
160 | CONFIG_NFT_REJECT_IPV4=m | ||
161 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | ||
162 | CONFIG_NFT_CHAIN_NAT_IPV4=m | ||
163 | CONFIG_NF_TABLES_ARP=m | ||
145 | CONFIG_IP_NF_IPTABLES=m | 164 | CONFIG_IP_NF_IPTABLES=m |
146 | CONFIG_IP_NF_MATCH_AH=m | 165 | CONFIG_IP_NF_MATCH_AH=m |
147 | CONFIG_IP_NF_MATCH_ECN=m | 166 | CONFIG_IP_NF_MATCH_ECN=m |
@@ -149,6 +168,7 @@ CONFIG_IP_NF_MATCH_RPFILTER=m | |||
149 | CONFIG_IP_NF_MATCH_TTL=m | 168 | CONFIG_IP_NF_MATCH_TTL=m |
150 | CONFIG_IP_NF_FILTER=m | 169 | CONFIG_IP_NF_FILTER=m |
151 | CONFIG_IP_NF_TARGET_REJECT=m | 170 | CONFIG_IP_NF_TARGET_REJECT=m |
171 | CONFIG_IP_NF_TARGET_SYNPROXY=m | ||
152 | CONFIG_IP_NF_TARGET_ULOG=m | 172 | CONFIG_IP_NF_TARGET_ULOG=m |
153 | CONFIG_NF_NAT_IPV4=m | 173 | CONFIG_NF_NAT_IPV4=m |
154 | CONFIG_IP_NF_TARGET_MASQUERADE=m | 174 | CONFIG_IP_NF_TARGET_MASQUERADE=m |
@@ -163,6 +183,9 @@ CONFIG_IP_NF_ARPTABLES=m | |||
163 | CONFIG_IP_NF_ARPFILTER=m | 183 | CONFIG_IP_NF_ARPFILTER=m |
164 | CONFIG_IP_NF_ARP_MANGLE=m | 184 | CONFIG_IP_NF_ARP_MANGLE=m |
165 | CONFIG_NF_CONNTRACK_IPV6=m | 185 | CONFIG_NF_CONNTRACK_IPV6=m |
186 | CONFIG_NF_TABLES_IPV6=m | ||
187 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | ||
188 | CONFIG_NFT_CHAIN_NAT_IPV6=m | ||
166 | CONFIG_IP6_NF_IPTABLES=m | 189 | CONFIG_IP6_NF_IPTABLES=m |
167 | CONFIG_IP6_NF_MATCH_AH=m | 190 | CONFIG_IP6_NF_MATCH_AH=m |
168 | CONFIG_IP6_NF_MATCH_EUI64=m | 191 | CONFIG_IP6_NF_MATCH_EUI64=m |
@@ -176,11 +199,13 @@ CONFIG_IP6_NF_MATCH_RT=m | |||
176 | CONFIG_IP6_NF_TARGET_HL=m | 199 | CONFIG_IP6_NF_TARGET_HL=m |
177 | CONFIG_IP6_NF_FILTER=m | 200 | CONFIG_IP6_NF_FILTER=m |
178 | CONFIG_IP6_NF_TARGET_REJECT=m | 201 | CONFIG_IP6_NF_TARGET_REJECT=m |
202 | CONFIG_IP6_NF_TARGET_SYNPROXY=m | ||
179 | CONFIG_IP6_NF_MANGLE=m | 203 | CONFIG_IP6_NF_MANGLE=m |
180 | CONFIG_IP6_NF_RAW=m | 204 | CONFIG_IP6_NF_RAW=m |
181 | CONFIG_NF_NAT_IPV6=m | 205 | CONFIG_NF_NAT_IPV6=m |
182 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | 206 | CONFIG_IP6_NF_TARGET_MASQUERADE=m |
183 | CONFIG_IP6_NF_TARGET_NPT=m | 207 | CONFIG_IP6_NF_TARGET_NPT=m |
208 | CONFIG_NF_TABLES_BRIDGE=m | ||
184 | CONFIG_IP_DCCP=m | 209 | CONFIG_IP_DCCP=m |
185 | # CONFIG_IP_DCCP_CCID3 is not set | 210 | # CONFIG_IP_DCCP_CCID3 is not set |
186 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y | 211 | CONFIG_SCTP_COOKIE_HMAC_SHA1=y |
@@ -188,10 +213,13 @@ CONFIG_RDS=m | |||
188 | CONFIG_RDS_TCP=m | 213 | CONFIG_RDS_TCP=m |
189 | CONFIG_L2TP=m | 214 | CONFIG_L2TP=m |
190 | CONFIG_ATALK=m | 215 | CONFIG_ATALK=m |
216 | CONFIG_DNS_RESOLVER=y | ||
191 | CONFIG_BATMAN_ADV=m | 217 | CONFIG_BATMAN_ADV=m |
192 | CONFIG_BATMAN_ADV_DAT=y | 218 | CONFIG_BATMAN_ADV_DAT=y |
219 | CONFIG_BATMAN_ADV_NC=y | ||
220 | CONFIG_NETLINK_DIAG=m | ||
221 | CONFIG_NET_MPLS_GSO=m | ||
193 | # CONFIG_WIRELESS is not set | 222 | # CONFIG_WIRELESS is not set |
194 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
195 | CONFIG_DEVTMPFS=y | 223 | CONFIG_DEVTMPFS=y |
196 | # CONFIG_FIRMWARE_IN_KERNEL is not set | 224 | # CONFIG_FIRMWARE_IN_KERNEL is not set |
197 | # CONFIG_FW_LOADER_USER_HELPER is not set | 225 | # CONFIG_FW_LOADER_USER_HELPER is not set |
@@ -203,6 +231,7 @@ CONFIG_BLK_DEV_NBD=m | |||
203 | CONFIG_BLK_DEV_RAM=y | 231 | CONFIG_BLK_DEV_RAM=y |
204 | CONFIG_CDROM_PKTCDVD=m | 232 | CONFIG_CDROM_PKTCDVD=m |
205 | CONFIG_ATA_OVER_ETH=m | 233 | CONFIG_ATA_OVER_ETH=m |
234 | CONFIG_DUMMY_IRQ=m | ||
206 | CONFIG_RAID_ATTRS=m | 235 | CONFIG_RAID_ATTRS=m |
207 | CONFIG_SCSI=y | 236 | CONFIG_SCSI=y |
208 | CONFIG_SCSI_TGT=m | 237 | CONFIG_SCSI_TGT=m |
@@ -240,6 +269,7 @@ CONFIG_EQUALIZER=m | |||
240 | CONFIG_NET_TEAM=m | 269 | CONFIG_NET_TEAM=m |
241 | CONFIG_NET_TEAM_MODE_BROADCAST=m | 270 | CONFIG_NET_TEAM_MODE_BROADCAST=m |
242 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m | 271 | CONFIG_NET_TEAM_MODE_ROUNDROBIN=m |
272 | CONFIG_NET_TEAM_MODE_RANDOM=m | ||
243 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m | 273 | CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m |
244 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m | 274 | CONFIG_NET_TEAM_MODE_LOADBALANCE=m |
245 | CONFIG_VXLAN=m | 275 | CONFIG_VXLAN=m |
@@ -247,6 +277,7 @@ CONFIG_NETCONSOLE=m | |||
247 | CONFIG_NETCONSOLE_DYNAMIC=y | 277 | CONFIG_NETCONSOLE_DYNAMIC=y |
248 | CONFIG_VETH=m | 278 | CONFIG_VETH=m |
249 | CONFIG_SUN3LANCE=y | 279 | CONFIG_SUN3LANCE=y |
280 | # CONFIG_NET_VENDOR_ARC is not set | ||
250 | # CONFIG_NET_CADENCE is not set | 281 | # CONFIG_NET_CADENCE is not set |
251 | # CONFIG_NET_VENDOR_BROADCOM is not set | 282 | # CONFIG_NET_VENDOR_BROADCOM is not set |
252 | # CONFIG_NET_VENDOR_INTEL is not set | 283 | # CONFIG_NET_VENDOR_INTEL is not set |
@@ -255,6 +286,7 @@ CONFIG_SUN3LANCE=y | |||
255 | # CONFIG_NET_VENDOR_NATSEMI is not set | 286 | # CONFIG_NET_VENDOR_NATSEMI is not set |
256 | # CONFIG_NET_VENDOR_SEEQ is not set | 287 | # CONFIG_NET_VENDOR_SEEQ is not set |
257 | # CONFIG_NET_VENDOR_STMICRO is not set | 288 | # CONFIG_NET_VENDOR_STMICRO is not set |
289 | # CONFIG_NET_VENDOR_VIA is not set | ||
258 | # CONFIG_NET_VENDOR_WIZNET is not set | 290 | # CONFIG_NET_VENDOR_WIZNET is not set |
259 | CONFIG_PPP=m | 291 | CONFIG_PPP=m |
260 | CONFIG_PPP_BSDCOMP=m | 292 | CONFIG_PPP_BSDCOMP=m |
@@ -276,7 +308,6 @@ CONFIG_INPUT_EVDEV=m | |||
276 | CONFIG_KEYBOARD_SUNKBD=y | 308 | CONFIG_KEYBOARD_SUNKBD=y |
277 | # CONFIG_MOUSE_PS2 is not set | 309 | # CONFIG_MOUSE_PS2 is not set |
278 | CONFIG_MOUSE_SERIAL=m | 310 | CONFIG_MOUSE_SERIAL=m |
279 | CONFIG_VT_HW_CONSOLE_BINDING=y | ||
280 | # CONFIG_LEGACY_PTYS is not set | 311 | # CONFIG_LEGACY_PTYS is not set |
281 | # CONFIG_DEVKMEM is not set | 312 | # CONFIG_DEVKMEM is not set |
282 | # CONFIG_HW_RANDOM is not set | 313 | # CONFIG_HW_RANDOM is not set |
@@ -296,10 +327,6 @@ CONFIG_RTC_CLASS=y | |||
296 | CONFIG_RTC_DRV_GENERIC=m | 327 | CONFIG_RTC_DRV_GENERIC=m |
297 | # CONFIG_IOMMU_SUPPORT is not set | 328 | # CONFIG_IOMMU_SUPPORT is not set |
298 | CONFIG_PROC_HARDWARE=y | 329 | CONFIG_PROC_HARDWARE=y |
299 | CONFIG_EXT2_FS=y | ||
300 | CONFIG_EXT3_FS=y | ||
301 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | ||
302 | # CONFIG_EXT3_FS_XATTR is not set | ||
303 | CONFIG_EXT4_FS=y | 330 | CONFIG_EXT4_FS=y |
304 | CONFIG_REISERFS_FS=m | 331 | CONFIG_REISERFS_FS=m |
305 | CONFIG_JFS_FS=m | 332 | CONFIG_JFS_FS=m |
@@ -336,7 +363,7 @@ CONFIG_QNX6FS_FS=m | |||
336 | CONFIG_SYSV_FS=m | 363 | CONFIG_SYSV_FS=m |
337 | CONFIG_UFS_FS=m | 364 | CONFIG_UFS_FS=m |
338 | CONFIG_NFS_FS=y | 365 | CONFIG_NFS_FS=y |
339 | CONFIG_NFS_V4=y | 366 | CONFIG_NFS_V4=m |
340 | CONFIG_NFS_SWAP=y | 367 | CONFIG_NFS_SWAP=y |
341 | CONFIG_ROOT_NFS=y | 368 | CONFIG_ROOT_NFS=y |
342 | CONFIG_NFSD=m | 369 | CONFIG_NFSD=m |
@@ -395,10 +422,10 @@ CONFIG_NLS_MAC_TURKISH=m | |||
395 | CONFIG_DLM=m | 422 | CONFIG_DLM=m |
396 | CONFIG_MAGIC_SYSRQ=y | 423 | CONFIG_MAGIC_SYSRQ=y |
397 | CONFIG_ASYNC_RAID6_TEST=m | 424 | CONFIG_ASYNC_RAID6_TEST=m |
425 | CONFIG_TEST_STRING_HELPERS=m | ||
398 | CONFIG_ENCRYPTED_KEYS=m | 426 | CONFIG_ENCRYPTED_KEYS=m |
399 | CONFIG_CRYPTO_MANAGER=y | 427 | CONFIG_CRYPTO_MANAGER=y |
400 | CONFIG_CRYPTO_USER=m | 428 | CONFIG_CRYPTO_USER=m |
401 | CONFIG_CRYPTO_NULL=m | ||
402 | CONFIG_CRYPTO_CRYPTD=m | 429 | CONFIG_CRYPTO_CRYPTD=m |
403 | CONFIG_CRYPTO_TEST=m | 430 | CONFIG_CRYPTO_TEST=m |
404 | CONFIG_CRYPTO_CCM=m | 431 | CONFIG_CRYPTO_CCM=m |
@@ -431,6 +458,8 @@ CONFIG_CRYPTO_TEA=m | |||
431 | CONFIG_CRYPTO_TWOFISH=m | 458 | CONFIG_CRYPTO_TWOFISH=m |
432 | CONFIG_CRYPTO_ZLIB=m | 459 | CONFIG_CRYPTO_ZLIB=m |
433 | CONFIG_CRYPTO_LZO=m | 460 | CONFIG_CRYPTO_LZO=m |
461 | CONFIG_CRYPTO_LZ4=m | ||
462 | CONFIG_CRYPTO_LZ4HC=m | ||
434 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 463 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
435 | CONFIG_CRYPTO_USER_API_HASH=m | 464 | CONFIG_CRYPTO_USER_API_HASH=m |
436 | CONFIG_CRYPTO_USER_API_SKCIPHER=m | 465 | CONFIG_CRYPTO_USER_API_SKCIPHER=m |
diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c index 121a6660ad4e..71b78ecee75c 100644 --- a/arch/m68k/emu/natfeat.c +++ b/arch/m68k/emu/natfeat.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * the GNU General Public License (GPL), incorporated herein by reference. | 9 | * the GNU General Public License (GPL), incorporated herein by reference. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/init.h> | ||
12 | #include <linux/types.h> | 13 | #include <linux/types.h> |
13 | #include <linux/console.h> | 14 | #include <linux/console.h> |
14 | #include <linux/string.h> | 15 | #include <linux/string.h> |
@@ -70,7 +71,7 @@ static void nf_poweroff(void) | |||
70 | nf_call(id); | 71 | nf_call(id); |
71 | } | 72 | } |
72 | 73 | ||
73 | void nf_init(void) | 74 | void __init nf_init(void) |
74 | { | 75 | { |
75 | unsigned long id, version; | 76 | unsigned long id, version; |
76 | char buf[256]; | 77 | char buf[256]; |
diff --git a/arch/m68k/hp300/config.c b/arch/m68k/hp300/config.c index b7609f791522..2e5a787ea11b 100644 --- a/arch/m68k/hp300/config.c +++ b/arch/m68k/hp300/config.c | |||
@@ -14,6 +14,8 @@ | |||
14 | #include <linux/console.h> | 14 | #include <linux/console.h> |
15 | 15 | ||
16 | #include <asm/bootinfo.h> | 16 | #include <asm/bootinfo.h> |
17 | #include <asm/bootinfo-hp300.h> | ||
18 | #include <asm/byteorder.h> | ||
17 | #include <asm/machdep.h> | 19 | #include <asm/machdep.h> |
18 | #include <asm/blinken.h> | 20 | #include <asm/blinken.h> |
19 | #include <asm/io.h> /* readb() and writeb() */ | 21 | #include <asm/io.h> /* readb() and writeb() */ |
@@ -70,15 +72,15 @@ extern int hp300_setup_serial_console(void) __init; | |||
70 | int __init hp300_parse_bootinfo(const struct bi_record *record) | 72 | int __init hp300_parse_bootinfo(const struct bi_record *record) |
71 | { | 73 | { |
72 | int unknown = 0; | 74 | int unknown = 0; |
73 | const unsigned long *data = record->data; | 75 | const void *data = record->data; |
74 | 76 | ||
75 | switch (record->tag) { | 77 | switch (be16_to_cpu(record->tag)) { |
76 | case BI_HP300_MODEL: | 78 | case BI_HP300_MODEL: |
77 | hp300_model = *data; | 79 | hp300_model = be32_to_cpup(data); |
78 | break; | 80 | break; |
79 | 81 | ||
80 | case BI_HP300_UART_SCODE: | 82 | case BI_HP300_UART_SCODE: |
81 | hp300_uart_scode = *data; | 83 | hp300_uart_scode = be32_to_cpup(data); |
82 | break; | 84 | break; |
83 | 85 | ||
84 | case BI_HP300_UART_ADDR: | 86 | case BI_HP300_UART_ADDR: |
diff --git a/arch/m68k/include/asm/amigahw.h b/arch/m68k/include/asm/amigahw.h index 7a19b5686a4a..5ad568110f17 100644 --- a/arch/m68k/include/asm/amigahw.h +++ b/arch/m68k/include/asm/amigahw.h | |||
@@ -18,26 +18,7 @@ | |||
18 | 18 | ||
19 | #include <linux/ioport.h> | 19 | #include <linux/ioport.h> |
20 | 20 | ||
21 | /* | 21 | #include <asm/bootinfo-amiga.h> |
22 | * Different Amiga models | ||
23 | */ | ||
24 | |||
25 | #define AMI_UNKNOWN (0) | ||
26 | #define AMI_500 (1) | ||
27 | #define AMI_500PLUS (2) | ||
28 | #define AMI_600 (3) | ||
29 | #define AMI_1000 (4) | ||
30 | #define AMI_1200 (5) | ||
31 | #define AMI_2000 (6) | ||
32 | #define AMI_2500 (7) | ||
33 | #define AMI_3000 (8) | ||
34 | #define AMI_3000T (9) | ||
35 | #define AMI_3000PLUS (10) | ||
36 | #define AMI_4000 (11) | ||
37 | #define AMI_4000T (12) | ||
38 | #define AMI_CDTV (13) | ||
39 | #define AMI_CD32 (14) | ||
40 | #define AMI_DRACO (15) | ||
41 | 22 | ||
42 | 23 | ||
43 | /* | 24 | /* |
@@ -46,11 +27,6 @@ | |||
46 | 27 | ||
47 | extern unsigned long amiga_chipset; | 28 | extern unsigned long amiga_chipset; |
48 | 29 | ||
49 | #define CS_STONEAGE (0) | ||
50 | #define CS_OCS (1) | ||
51 | #define CS_ECS (2) | ||
52 | #define CS_AGA (3) | ||
53 | |||
54 | 30 | ||
55 | /* | 31 | /* |
56 | * Miscellaneous | 32 | * Miscellaneous |
@@ -266,7 +242,7 @@ struct CIA { | |||
266 | 242 | ||
267 | #define zTwoBase (0x80000000) | 243 | #define zTwoBase (0x80000000) |
268 | #define ZTWO_PADDR(x) (((unsigned long)(x))-zTwoBase) | 244 | #define ZTWO_PADDR(x) (((unsigned long)(x))-zTwoBase) |
269 | #define ZTWO_VADDR(x) (((unsigned long)(x))+zTwoBase) | 245 | #define ZTWO_VADDR(x) ((void __iomem *)(((unsigned long)(x))+zTwoBase)) |
270 | 246 | ||
271 | #define CUSTOM_PHYSADDR (0xdff000) | 247 | #define CUSTOM_PHYSADDR (0xdff000) |
272 | #define amiga_custom ((*(volatile struct CUSTOM *)(zTwoBase+CUSTOM_PHYSADDR))) | 248 | #define amiga_custom ((*(volatile struct CUSTOM *)(zTwoBase+CUSTOM_PHYSADDR))) |
diff --git a/arch/m68k/include/asm/apollohw.h b/arch/m68k/include/asm/apollohw.h index 6c19e0c22411..87fc899d32ee 100644 --- a/arch/m68k/include/asm/apollohw.h +++ b/arch/m68k/include/asm/apollohw.h | |||
@@ -5,18 +5,11 @@ | |||
5 | 5 | ||
6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
7 | 7 | ||
8 | /* | 8 | #include <asm/bootinfo-apollo.h> |
9 | apollo models | 9 | |
10 | */ | ||
11 | 10 | ||
12 | extern u_long apollo_model; | 11 | extern u_long apollo_model; |
13 | 12 | ||
14 | #define APOLLO_UNKNOWN (0) | ||
15 | #define APOLLO_DN3000 (1) | ||
16 | #define APOLLO_DN3010 (2) | ||
17 | #define APOLLO_DN3500 (3) | ||
18 | #define APOLLO_DN4000 (4) | ||
19 | #define APOLLO_DN4500 (5) | ||
20 | 13 | ||
21 | /* | 14 | /* |
22 | see scn2681 data sheet for more info. | 15 | see scn2681 data sheet for more info. |
diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h index d887050e6da6..972c8f33f055 100644 --- a/arch/m68k/include/asm/atarihw.h +++ b/arch/m68k/include/asm/atarihw.h | |||
@@ -21,7 +21,7 @@ | |||
21 | #define _LINUX_ATARIHW_H_ | 21 | #define _LINUX_ATARIHW_H_ |
22 | 22 | ||
23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | #include <asm/bootinfo.h> | 24 | #include <asm/bootinfo-atari.h> |
25 | #include <asm/raw_io.h> | 25 | #include <asm/raw_io.h> |
26 | 26 | ||
27 | extern u_long atari_mch_cookie; | 27 | extern u_long atari_mch_cookie; |
diff --git a/arch/m68k/include/asm/barrier.h b/arch/m68k/include/asm/barrier.h index 445ce22c23cb..15c5f77c1614 100644 --- a/arch/m68k/include/asm/barrier.h +++ b/arch/m68k/include/asm/barrier.h | |||
@@ -1,20 +1,8 @@ | |||
1 | #ifndef _M68K_BARRIER_H | 1 | #ifndef _M68K_BARRIER_H |
2 | #define _M68K_BARRIER_H | 2 | #define _M68K_BARRIER_H |
3 | 3 | ||
4 | /* | ||
5 | * Force strict CPU ordering. | ||
6 | * Not really required on m68k... | ||
7 | */ | ||
8 | #define nop() do { asm volatile ("nop"); barrier(); } while (0) | 4 | #define nop() do { asm volatile ("nop"); barrier(); } while (0) |
9 | #define mb() barrier() | ||
10 | #define rmb() barrier() | ||
11 | #define wmb() barrier() | ||
12 | #define read_barrier_depends() ((void)0) | ||
13 | #define set_mb(var, value) ({ (var) = (value); wmb(); }) | ||
14 | 5 | ||
15 | #define smp_mb() barrier() | 6 | #include <asm-generic/barrier.h> |
16 | #define smp_rmb() barrier() | ||
17 | #define smp_wmb() barrier() | ||
18 | #define smp_read_barrier_depends() ((void)0) | ||
19 | 7 | ||
20 | #endif /* _M68K_BARRIER_H */ | 8 | #endif /* _M68K_BARRIER_H */ |
diff --git a/arch/m68k/include/asm/bootinfo.h b/arch/m68k/include/asm/bootinfo.h index 67e7a78ad96b..8e213267f8e7 100644 --- a/arch/m68k/include/asm/bootinfo.h +++ b/arch/m68k/include/asm/bootinfo.h | |||
@@ -6,373 +6,23 @@ | |||
6 | ** This file is subject to the terms and conditions of the GNU General Public | 6 | ** This file is subject to the terms and conditions of the GNU General Public |
7 | ** License. See the file COPYING in the main directory of this archive | 7 | ** License. See the file COPYING in the main directory of this archive |
8 | ** for more details. | 8 | ** for more details. |
9 | ** | ||
10 | ** Created 09/29/92 by Greg Harp | ||
11 | ** | ||
12 | ** 5/2/94 Roman Hodek: | ||
13 | ** Added bi_atari part of the machine dependent union bi_un; for now it | ||
14 | ** contains just a model field to distinguish between TT and Falcon. | ||
15 | ** 26/7/96 Roman Zippel: | ||
16 | ** Renamed to setup.h; added some useful macros to allow gcc some | ||
17 | ** optimizations if possible. | ||
18 | ** 5/10/96 Geert Uytterhoeven: | ||
19 | ** Redesign of the boot information structure; renamed to bootinfo.h again | ||
20 | ** 27/11/96 Geert Uytterhoeven: | ||
21 | ** Backwards compatibility with bootinfo interface version 1.0 | ||
22 | */ | 9 | */ |
23 | 10 | ||
24 | #ifndef _M68K_BOOTINFO_H | 11 | #ifndef _M68K_BOOTINFO_H |
25 | #define _M68K_BOOTINFO_H | 12 | #define _M68K_BOOTINFO_H |
26 | 13 | ||
14 | #include <uapi/asm/bootinfo.h> | ||
27 | 15 | ||
28 | /* | ||
29 | * Bootinfo definitions | ||
30 | * | ||
31 | * This is an easily parsable and extendable structure containing all | ||
32 | * information to be passed from the bootstrap to the kernel. | ||
33 | * | ||
34 | * This way I hope to keep all future changes back/forewards compatible. | ||
35 | * Thus, keep your fingers crossed... | ||
36 | * | ||
37 | * This structure is copied right after the kernel bss by the bootstrap | ||
38 | * routine. | ||
39 | */ | ||
40 | 16 | ||
41 | #ifndef __ASSEMBLY__ | 17 | #ifndef __ASSEMBLY__ |
42 | 18 | ||
43 | struct bi_record { | 19 | #ifdef CONFIG_BOOTINFO_PROC |
44 | unsigned short tag; /* tag ID */ | 20 | extern void save_bootinfo(const struct bi_record *bi); |
45 | unsigned short size; /* size of record (in bytes) */ | 21 | #else |
46 | unsigned long data[0]; /* data */ | 22 | static inline void save_bootinfo(const struct bi_record *bi) {} |
47 | }; | ||
48 | |||
49 | #endif /* __ASSEMBLY__ */ | ||
50 | |||
51 | |||
52 | /* | ||
53 | * Tag Definitions | ||
54 | * | ||
55 | * Machine independent tags start counting from 0x0000 | ||
56 | * Machine dependent tags start counting from 0x8000 | ||
57 | */ | ||
58 | |||
59 | #define BI_LAST 0x0000 /* last record (sentinel) */ | ||
60 | #define BI_MACHTYPE 0x0001 /* machine type (u_long) */ | ||
61 | #define BI_CPUTYPE 0x0002 /* cpu type (u_long) */ | ||
62 | #define BI_FPUTYPE 0x0003 /* fpu type (u_long) */ | ||
63 | #define BI_MMUTYPE 0x0004 /* mmu type (u_long) */ | ||
64 | #define BI_MEMCHUNK 0x0005 /* memory chunk address and size */ | ||
65 | /* (struct mem_info) */ | ||
66 | #define BI_RAMDISK 0x0006 /* ramdisk address and size */ | ||
67 | /* (struct mem_info) */ | ||
68 | #define BI_COMMAND_LINE 0x0007 /* kernel command line parameters */ | ||
69 | /* (string) */ | ||
70 | |||
71 | /* | ||
72 | * Amiga-specific tags | ||
73 | */ | ||
74 | |||
75 | #define BI_AMIGA_MODEL 0x8000 /* model (u_long) */ | ||
76 | #define BI_AMIGA_AUTOCON 0x8001 /* AutoConfig device */ | ||
77 | /* (struct ConfigDev) */ | ||
78 | #define BI_AMIGA_CHIP_SIZE 0x8002 /* size of Chip RAM (u_long) */ | ||
79 | #define BI_AMIGA_VBLANK 0x8003 /* VBLANK frequency (u_char) */ | ||
80 | #define BI_AMIGA_PSFREQ 0x8004 /* power supply frequency (u_char) */ | ||
81 | #define BI_AMIGA_ECLOCK 0x8005 /* EClock frequency (u_long) */ | ||
82 | #define BI_AMIGA_CHIPSET 0x8006 /* native chipset present (u_long) */ | ||
83 | #define BI_AMIGA_SERPER 0x8007 /* serial port period (u_short) */ | ||
84 | |||
85 | /* | ||
86 | * Atari-specific tags | ||
87 | */ | ||
88 | |||
89 | #define BI_ATARI_MCH_COOKIE 0x8000 /* _MCH cookie from TOS (u_long) */ | ||
90 | #define BI_ATARI_MCH_TYPE 0x8001 /* special machine type (u_long) */ | ||
91 | /* (values are ATARI_MACH_* defines */ | ||
92 | |||
93 | /* mch_cookie values (upper word) */ | ||
94 | #define ATARI_MCH_ST 0 | ||
95 | #define ATARI_MCH_STE 1 | ||
96 | #define ATARI_MCH_TT 2 | ||
97 | #define ATARI_MCH_FALCON 3 | ||
98 | |||
99 | /* mch_type values */ | ||
100 | #define ATARI_MACH_NORMAL 0 /* no special machine type */ | ||
101 | #define ATARI_MACH_MEDUSA 1 /* Medusa 040 */ | ||
102 | #define ATARI_MACH_HADES 2 /* Hades 040 or 060 */ | ||
103 | #define ATARI_MACH_AB40 3 /* Afterburner040 on Falcon */ | ||
104 | |||
105 | /* | ||
106 | * VME-specific tags | ||
107 | */ | ||
108 | |||
109 | #define BI_VME_TYPE 0x8000 /* VME sub-architecture (u_long) */ | ||
110 | #define BI_VME_BRDINFO 0x8001 /* VME board information (struct) */ | ||
111 | |||
112 | /* BI_VME_TYPE codes */ | ||
113 | #define VME_TYPE_TP34V 0x0034 /* Tadpole TP34V */ | ||
114 | #define VME_TYPE_MVME147 0x0147 /* Motorola MVME147 */ | ||
115 | #define VME_TYPE_MVME162 0x0162 /* Motorola MVME162 */ | ||
116 | #define VME_TYPE_MVME166 0x0166 /* Motorola MVME166 */ | ||
117 | #define VME_TYPE_MVME167 0x0167 /* Motorola MVME167 */ | ||
118 | #define VME_TYPE_MVME172 0x0172 /* Motorola MVME172 */ | ||
119 | #define VME_TYPE_MVME177 0x0177 /* Motorola MVME177 */ | ||
120 | #define VME_TYPE_BVME4000 0x4000 /* BVM Ltd. BVME4000 */ | ||
121 | #define VME_TYPE_BVME6000 0x6000 /* BVM Ltd. BVME6000 */ | ||
122 | |||
123 | /* BI_VME_BRDINFO is a 32 byte struct as returned by the Bug code on | ||
124 | * Motorola VME boards. Contains board number, Bug version, board | ||
125 | * configuration options, etc. See include/asm/mvme16xhw.h for details. | ||
126 | */ | ||
127 | |||
128 | |||
129 | /* | ||
130 | * Macintosh-specific tags (all u_long) | ||
131 | */ | ||
132 | |||
133 | #define BI_MAC_MODEL 0x8000 /* Mac Gestalt ID (model type) */ | ||
134 | #define BI_MAC_VADDR 0x8001 /* Mac video base address */ | ||
135 | #define BI_MAC_VDEPTH 0x8002 /* Mac video depth */ | ||
136 | #define BI_MAC_VROW 0x8003 /* Mac video rowbytes */ | ||
137 | #define BI_MAC_VDIM 0x8004 /* Mac video dimensions */ | ||
138 | #define BI_MAC_VLOGICAL 0x8005 /* Mac video logical base */ | ||
139 | #define BI_MAC_SCCBASE 0x8006 /* Mac SCC base address */ | ||
140 | #define BI_MAC_BTIME 0x8007 /* Mac boot time */ | ||
141 | #define BI_MAC_GMTBIAS 0x8008 /* Mac GMT timezone offset */ | ||
142 | #define BI_MAC_MEMSIZE 0x8009 /* Mac RAM size (sanity check) */ | ||
143 | #define BI_MAC_CPUID 0x800a /* Mac CPU type (sanity check) */ | ||
144 | #define BI_MAC_ROMBASE 0x800b /* Mac system ROM base address */ | ||
145 | |||
146 | /* | ||
147 | * Macintosh hardware profile data - unused, see macintosh.h for | ||
148 | * reasonable type values | ||
149 | */ | ||
150 | |||
151 | #define BI_MAC_VIA1BASE 0x8010 /* Mac VIA1 base address (always present) */ | ||
152 | #define BI_MAC_VIA2BASE 0x8011 /* Mac VIA2 base address (type varies) */ | ||
153 | #define BI_MAC_VIA2TYPE 0x8012 /* Mac VIA2 type (VIA, RBV, OSS) */ | ||
154 | #define BI_MAC_ADBTYPE 0x8013 /* Mac ADB interface type */ | ||
155 | #define BI_MAC_ASCBASE 0x8014 /* Mac Apple Sound Chip base address */ | ||
156 | #define BI_MAC_SCSI5380 0x8015 /* Mac NCR 5380 SCSI (base address, multi) */ | ||
157 | #define BI_MAC_SCSIDMA 0x8016 /* Mac SCSI DMA (base address) */ | ||
158 | #define BI_MAC_SCSI5396 0x8017 /* Mac NCR 53C96 SCSI (base address, multi) */ | ||
159 | #define BI_MAC_IDETYPE 0x8018 /* Mac IDE interface type */ | ||
160 | #define BI_MAC_IDEBASE 0x8019 /* Mac IDE interface base address */ | ||
161 | #define BI_MAC_NUBUS 0x801a /* Mac Nubus type (none, regular, pseudo) */ | ||
162 | #define BI_MAC_SLOTMASK 0x801b /* Mac Nubus slots present */ | ||
163 | #define BI_MAC_SCCTYPE 0x801c /* Mac SCC serial type (normal, IOP) */ | ||
164 | #define BI_MAC_ETHTYPE 0x801d /* Mac builtin ethernet type (Sonic, MACE */ | ||
165 | #define BI_MAC_ETHBASE 0x801e /* Mac builtin ethernet base address */ | ||
166 | #define BI_MAC_PMU 0x801f /* Mac power management / poweroff hardware */ | ||
167 | #define BI_MAC_IOP_SWIM 0x8020 /* Mac SWIM floppy IOP */ | ||
168 | #define BI_MAC_IOP_ADB 0x8021 /* Mac ADB IOP */ | ||
169 | |||
170 | /* | ||
171 | * Mac: compatibility with old booter data format (temporarily) | ||
172 | * Fields unused with the new bootinfo can be deleted now; instead of | ||
173 | * adding new fields the struct might be splitted into a hardware address | ||
174 | * part and a hardware type part | ||
175 | */ | ||
176 | |||
177 | #ifndef __ASSEMBLY__ | ||
178 | |||
179 | struct mac_booter_data | ||
180 | { | ||
181 | unsigned long videoaddr; | ||
182 | unsigned long videorow; | ||
183 | unsigned long videodepth; | ||
184 | unsigned long dimensions; | ||
185 | unsigned long args; | ||
186 | unsigned long boottime; | ||
187 | unsigned long gmtbias; | ||
188 | unsigned long bootver; | ||
189 | unsigned long videological; | ||
190 | unsigned long sccbase; | ||
191 | unsigned long id; | ||
192 | unsigned long memsize; | ||
193 | unsigned long serialmf; | ||
194 | unsigned long serialhsk; | ||
195 | unsigned long serialgpi; | ||
196 | unsigned long printmf; | ||
197 | unsigned long printhsk; | ||
198 | unsigned long printgpi; | ||
199 | unsigned long cpuid; | ||
200 | unsigned long rombase; | ||
201 | unsigned long adbdelay; | ||
202 | unsigned long timedbra; | ||
203 | }; | ||
204 | |||
205 | extern struct mac_booter_data | ||
206 | mac_bi_data; | ||
207 | |||
208 | #endif | 23 | #endif |
209 | 24 | ||
210 | /* | ||
211 | * Apollo-specific tags | ||
212 | */ | ||
213 | |||
214 | #define BI_APOLLO_MODEL 0x8000 /* model (u_long) */ | ||
215 | |||
216 | /* | ||
217 | * HP300-specific tags | ||
218 | */ | ||
219 | |||
220 | #define BI_HP300_MODEL 0x8000 /* model (u_long) */ | ||
221 | #define BI_HP300_UART_SCODE 0x8001 /* UART select code (u_long) */ | ||
222 | #define BI_HP300_UART_ADDR 0x8002 /* phys. addr of UART (u_long) */ | ||
223 | |||
224 | /* | ||
225 | * Stuff for bootinfo interface versioning | ||
226 | * | ||
227 | * At the start of kernel code, a 'struct bootversion' is located. | ||
228 | * bootstrap checks for a matching version of the interface before booting | ||
229 | * a kernel, to avoid user confusion if kernel and bootstrap don't work | ||
230 | * together :-) | ||
231 | * | ||
232 | * If incompatible changes are made to the bootinfo interface, the major | ||
233 | * number below should be stepped (and the minor reset to 0) for the | ||
234 | * appropriate machine. If a change is backward-compatible, the minor | ||
235 | * should be stepped. "Backwards-compatible" means that booting will work, | ||
236 | * but certain features may not. | ||
237 | */ | ||
238 | |||
239 | #define BOOTINFOV_MAGIC 0x4249561A /* 'BIV^Z' */ | ||
240 | #define MK_BI_VERSION(major,minor) (((major)<<16)+(minor)) | ||
241 | #define BI_VERSION_MAJOR(v) (((v) >> 16) & 0xffff) | ||
242 | #define BI_VERSION_MINOR(v) ((v) & 0xffff) | ||
243 | |||
244 | #ifndef __ASSEMBLY__ | ||
245 | |||
246 | struct bootversion { | ||
247 | unsigned short branch; | ||
248 | unsigned long magic; | ||
249 | struct { | ||
250 | unsigned long machtype; | ||
251 | unsigned long version; | ||
252 | } machversions[0]; | ||
253 | }; | ||
254 | |||
255 | #endif /* __ASSEMBLY__ */ | 25 | #endif /* __ASSEMBLY__ */ |
256 | 26 | ||
257 | #define AMIGA_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) | ||
258 | #define ATARI_BOOTI_VERSION MK_BI_VERSION( 2, 1 ) | ||
259 | #define MAC_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) | ||
260 | #define MVME147_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) | ||
261 | #define MVME16x_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) | ||
262 | #define BVME6000_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) | ||
263 | #define Q40_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) | ||
264 | #define HP300_BOOTI_VERSION MK_BI_VERSION( 2, 0 ) | ||
265 | |||
266 | #ifdef BOOTINFO_COMPAT_1_0 | ||
267 | |||
268 | /* | ||
269 | * Backwards compatibility with bootinfo interface version 1.0 | ||
270 | */ | ||
271 | |||
272 | #define COMPAT_AMIGA_BOOTI_VERSION MK_BI_VERSION( 1, 0 ) | ||
273 | #define COMPAT_ATARI_BOOTI_VERSION MK_BI_VERSION( 1, 0 ) | ||
274 | #define COMPAT_MAC_BOOTI_VERSION MK_BI_VERSION( 1, 0 ) | ||
275 | |||
276 | #include <linux/zorro.h> | ||
277 | |||
278 | #define COMPAT_NUM_AUTO 16 | ||
279 | |||
280 | struct compat_bi_Amiga { | ||
281 | int model; | ||
282 | int num_autocon; | ||
283 | struct ConfigDev autocon[COMPAT_NUM_AUTO]; | ||
284 | unsigned long chip_size; | ||
285 | unsigned char vblank; | ||
286 | unsigned char psfreq; | ||
287 | unsigned long eclock; | ||
288 | unsigned long chipset; | ||
289 | unsigned long hw_present; | ||
290 | }; | ||
291 | |||
292 | struct compat_bi_Atari { | ||
293 | unsigned long hw_present; | ||
294 | unsigned long mch_cookie; | ||
295 | }; | ||
296 | |||
297 | #ifndef __ASSEMBLY__ | ||
298 | |||
299 | struct compat_bi_Macintosh | ||
300 | { | ||
301 | unsigned long videoaddr; | ||
302 | unsigned long videorow; | ||
303 | unsigned long videodepth; | ||
304 | unsigned long dimensions; | ||
305 | unsigned long args; | ||
306 | unsigned long boottime; | ||
307 | unsigned long gmtbias; | ||
308 | unsigned long bootver; | ||
309 | unsigned long videological; | ||
310 | unsigned long sccbase; | ||
311 | unsigned long id; | ||
312 | unsigned long memsize; | ||
313 | unsigned long serialmf; | ||
314 | unsigned long serialhsk; | ||
315 | unsigned long serialgpi; | ||
316 | unsigned long printmf; | ||
317 | unsigned long printhsk; | ||
318 | unsigned long printgpi; | ||
319 | unsigned long cpuid; | ||
320 | unsigned long rombase; | ||
321 | unsigned long adbdelay; | ||
322 | unsigned long timedbra; | ||
323 | }; | ||
324 | |||
325 | #endif | ||
326 | |||
327 | struct compat_mem_info { | ||
328 | unsigned long addr; | ||
329 | unsigned long size; | ||
330 | }; | ||
331 | |||
332 | #define COMPAT_NUM_MEMINFO 4 | ||
333 | |||
334 | #define COMPAT_CPUB_68020 0 | ||
335 | #define COMPAT_CPUB_68030 1 | ||
336 | #define COMPAT_CPUB_68040 2 | ||
337 | #define COMPAT_CPUB_68060 3 | ||
338 | #define COMPAT_FPUB_68881 5 | ||
339 | #define COMPAT_FPUB_68882 6 | ||
340 | #define COMPAT_FPUB_68040 7 | ||
341 | #define COMPAT_FPUB_68060 8 | ||
342 | |||
343 | #define COMPAT_CPU_68020 (1<<COMPAT_CPUB_68020) | ||
344 | #define COMPAT_CPU_68030 (1<<COMPAT_CPUB_68030) | ||
345 | #define COMPAT_CPU_68040 (1<<COMPAT_CPUB_68040) | ||
346 | #define COMPAT_CPU_68060 (1<<COMPAT_CPUB_68060) | ||
347 | #define COMPAT_CPU_MASK (31) | ||
348 | #define COMPAT_FPU_68881 (1<<COMPAT_FPUB_68881) | ||
349 | #define COMPAT_FPU_68882 (1<<COMPAT_FPUB_68882) | ||
350 | #define COMPAT_FPU_68040 (1<<COMPAT_FPUB_68040) | ||
351 | #define COMPAT_FPU_68060 (1<<COMPAT_FPUB_68060) | ||
352 | #define COMPAT_FPU_MASK (0xfe0) | ||
353 | |||
354 | #define COMPAT_CL_SIZE (256) | ||
355 | |||
356 | struct compat_bootinfo { | ||
357 | unsigned long machtype; | ||
358 | unsigned long cputype; | ||
359 | struct compat_mem_info memory[COMPAT_NUM_MEMINFO]; | ||
360 | int num_memory; | ||
361 | unsigned long ramdisk_size; | ||
362 | unsigned long ramdisk_addr; | ||
363 | char command_line[COMPAT_CL_SIZE]; | ||
364 | union { | ||
365 | struct compat_bi_Amiga bi_ami; | ||
366 | struct compat_bi_Atari bi_ata; | ||
367 | struct compat_bi_Macintosh bi_mac; | ||
368 | } bi_un; | ||
369 | }; | ||
370 | |||
371 | #define bi_amiga bi_un.bi_ami | ||
372 | #define bi_atari bi_un.bi_ata | ||
373 | #define bi_mac bi_un.bi_mac | ||
374 | |||
375 | #endif /* BOOTINFO_COMPAT_1_0 */ | ||
376 | |||
377 | 27 | ||
378 | #endif /* _M68K_BOOTINFO_H */ | 28 | #endif /* _M68K_BOOTINFO_H */ |
diff --git a/arch/m68k/include/asm/hp300hw.h b/arch/m68k/include/asm/hp300hw.h index d998ea67c19c..64f5271dd7be 100644 --- a/arch/m68k/include/asm/hp300hw.h +++ b/arch/m68k/include/asm/hp300hw.h | |||
@@ -1,25 +1,9 @@ | |||
1 | #ifndef _M68K_HP300HW_H | 1 | #ifndef _M68K_HP300HW_H |
2 | #define _M68K_HP300HW_H | 2 | #define _M68K_HP300HW_H |
3 | 3 | ||
4 | extern unsigned long hp300_model; | 4 | #include <asm/bootinfo-hp300.h> |
5 | 5 | ||
6 | /* This information was taken from NetBSD */ | ||
7 | #define HP_320 (0) /* 16MHz 68020+HP MMU+16K external cache */ | ||
8 | #define HP_330 (1) /* 16MHz 68020+68851 MMU */ | ||
9 | #define HP_340 (2) /* 16MHz 68030 */ | ||
10 | #define HP_345 (3) /* 50MHz 68030+32K external cache */ | ||
11 | #define HP_350 (4) /* 25MHz 68020+HP MMU+32K external cache */ | ||
12 | #define HP_360 (5) /* 25MHz 68030 */ | ||
13 | #define HP_370 (6) /* 33MHz 68030+64K external cache */ | ||
14 | #define HP_375 (7) /* 50MHz 68030+32K external cache */ | ||
15 | #define HP_380 (8) /* 25MHz 68040 */ | ||
16 | #define HP_385 (9) /* 33MHz 68040 */ | ||
17 | 6 | ||
18 | #define HP_400 (10) /* 50MHz 68030+32K external cache */ | 7 | extern unsigned long hp300_model; |
19 | #define HP_425T (11) /* 25MHz 68040 - model 425t */ | ||
20 | #define HP_425S (12) /* 25MHz 68040 - model 425s */ | ||
21 | #define HP_425E (13) /* 25MHz 68040 - model 425e */ | ||
22 | #define HP_433T (14) /* 33MHz 68040 - model 433t */ | ||
23 | #define HP_433S (15) /* 33MHz 68040 - model 433s */ | ||
24 | 8 | ||
25 | #endif /* _M68K_HP300HW_H */ | 9 | #endif /* _M68K_HP300HW_H */ |
diff --git a/arch/m68k/include/asm/kexec.h b/arch/m68k/include/asm/kexec.h new file mode 100644 index 000000000000..3df97abac147 --- /dev/null +++ b/arch/m68k/include/asm/kexec.h | |||
@@ -0,0 +1,29 @@ | |||
1 | #ifndef _ASM_M68K_KEXEC_H | ||
2 | #define _ASM_M68K_KEXEC_H | ||
3 | |||
4 | #ifdef CONFIG_KEXEC | ||
5 | |||
6 | /* Maximum physical address we can use pages from */ | ||
7 | #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) | ||
8 | /* Maximum address we can reach in physical address mode */ | ||
9 | #define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) | ||
10 | /* Maximum address we can use for the control code buffer */ | ||
11 | #define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) | ||
12 | |||
13 | #define KEXEC_CONTROL_PAGE_SIZE 4096 | ||
14 | |||
15 | #define KEXEC_ARCH KEXEC_ARCH_68K | ||
16 | |||
17 | #ifndef __ASSEMBLY__ | ||
18 | |||
19 | static inline void crash_setup_regs(struct pt_regs *newregs, | ||
20 | struct pt_regs *oldregs) | ||
21 | { | ||
22 | /* Dummy implementation for now */ | ||
23 | } | ||
24 | |||
25 | #endif /* __ASSEMBLY__ */ | ||
26 | |||
27 | #endif /* CONFIG_KEXEC */ | ||
28 | |||
29 | #endif /* _ASM_M68K_KEXEC_H */ | ||
diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h index aeeedf8b2d25..fe3fc9ae1b69 100644 --- a/arch/m68k/include/asm/mac_via.h +++ b/arch/m68k/include/asm/mac_via.h | |||
@@ -254,6 +254,8 @@ | |||
254 | extern volatile __u8 *via1,*via2; | 254 | extern volatile __u8 *via1,*via2; |
255 | extern int rbv_present,via_alt_mapping; | 255 | extern int rbv_present,via_alt_mapping; |
256 | 256 | ||
257 | struct irq_desc; | ||
258 | |||
257 | extern void via_register_interrupts(void); | 259 | extern void via_register_interrupts(void); |
258 | extern void via_irq_enable(int); | 260 | extern void via_irq_enable(int); |
259 | extern void via_irq_disable(int); | 261 | extern void via_irq_disable(int); |
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h index 682a1a2ff55f..d323b2c2d07d 100644 --- a/arch/m68k/include/asm/macintosh.h +++ b/arch/m68k/include/asm/macintosh.h | |||
@@ -4,6 +4,9 @@ | |||
4 | #include <linux/seq_file.h> | 4 | #include <linux/seq_file.h> |
5 | #include <linux/interrupt.h> | 5 | #include <linux/interrupt.h> |
6 | 6 | ||
7 | #include <asm/bootinfo-mac.h> | ||
8 | |||
9 | |||
7 | /* | 10 | /* |
8 | * Apple Macintoshisms | 11 | * Apple Macintoshisms |
9 | */ | 12 | */ |
@@ -74,65 +77,29 @@ struct mac_model | |||
74 | #define MAC_FLOPPY_SWIM_IOP 3 | 77 | #define MAC_FLOPPY_SWIM_IOP 3 |
75 | #define MAC_FLOPPY_AV 4 | 78 | #define MAC_FLOPPY_AV 4 |
76 | 79 | ||
77 | /* | 80 | extern struct mac_model *macintosh_config; |
78 | * Gestalt numbers | ||
79 | */ | ||
80 | 81 | ||
81 | #define MAC_MODEL_II 6 | ||
82 | #define MAC_MODEL_IIX 7 | ||
83 | #define MAC_MODEL_IICX 8 | ||
84 | #define MAC_MODEL_SE30 9 | ||
85 | #define MAC_MODEL_IICI 11 | ||
86 | #define MAC_MODEL_IIFX 13 /* And well numbered it is too */ | ||
87 | #define MAC_MODEL_IISI 18 | ||
88 | #define MAC_MODEL_LC 19 | ||
89 | #define MAC_MODEL_Q900 20 | ||
90 | #define MAC_MODEL_PB170 21 | ||
91 | #define MAC_MODEL_Q700 22 | ||
92 | #define MAC_MODEL_CLII 23 /* aka: P200 */ | ||
93 | #define MAC_MODEL_PB140 25 | ||
94 | #define MAC_MODEL_Q950 26 /* aka: WGS95 */ | ||
95 | #define MAC_MODEL_LCIII 27 /* aka: P450 */ | ||
96 | #define MAC_MODEL_PB210 29 | ||
97 | #define MAC_MODEL_C650 30 | ||
98 | #define MAC_MODEL_PB230 32 | ||
99 | #define MAC_MODEL_PB180 33 | ||
100 | #define MAC_MODEL_PB160 34 | ||
101 | #define MAC_MODEL_Q800 35 /* aka: WGS80 */ | ||
102 | #define MAC_MODEL_Q650 36 | ||
103 | #define MAC_MODEL_LCII 37 /* aka: P400/405/410/430 */ | ||
104 | #define MAC_MODEL_PB250 38 | ||
105 | #define MAC_MODEL_IIVI 44 | ||
106 | #define MAC_MODEL_P600 45 /* aka: P600CD */ | ||
107 | #define MAC_MODEL_IIVX 48 | ||
108 | #define MAC_MODEL_CCL 49 /* aka: P250 */ | ||
109 | #define MAC_MODEL_PB165C 50 | ||
110 | #define MAC_MODEL_C610 52 /* aka: WGS60 */ | ||
111 | #define MAC_MODEL_Q610 53 | ||
112 | #define MAC_MODEL_PB145 54 /* aka: PB145B */ | ||
113 | #define MAC_MODEL_P520 56 /* aka: LC520 */ | ||
114 | #define MAC_MODEL_C660 60 | ||
115 | #define MAC_MODEL_P460 62 /* aka: LCIII+, P466/P467 */ | ||
116 | #define MAC_MODEL_PB180C 71 | ||
117 | #define MAC_MODEL_PB520 72 /* aka: PB520C, PB540, PB540C, PB550C */ | ||
118 | #define MAC_MODEL_PB270C 77 | ||
119 | #define MAC_MODEL_Q840 78 | ||
120 | #define MAC_MODEL_P550 80 /* aka: LC550, P560 */ | ||
121 | #define MAC_MODEL_CCLII 83 /* aka: P275 */ | ||
122 | #define MAC_MODEL_PB165 84 | ||
123 | #define MAC_MODEL_PB190 85 /* aka: PB190CS */ | ||
124 | #define MAC_MODEL_TV 88 | ||
125 | #define MAC_MODEL_P475 89 /* aka: LC475, P476 */ | ||
126 | #define MAC_MODEL_P475F 90 /* aka: P475 w/ FPU (no LC040) */ | ||
127 | #define MAC_MODEL_P575 92 /* aka: LC575, P577/P578 */ | ||
128 | #define MAC_MODEL_Q605 94 | ||
129 | #define MAC_MODEL_Q605_ACC 95 /* Q605 accelerated to 33 MHz */ | ||
130 | #define MAC_MODEL_Q630 98 /* aka: LC630, P630/631/635/636/637/638/640 */ | ||
131 | #define MAC_MODEL_P588 99 /* aka: LC580, P580 */ | ||
132 | #define MAC_MODEL_PB280 102 | ||
133 | #define MAC_MODEL_PB280C 103 | ||
134 | #define MAC_MODEL_PB150 115 | ||
135 | 82 | ||
136 | extern struct mac_model *macintosh_config; | 83 | /* |
84 | * Internal representation of the Mac hardware, filled in from bootinfo | ||
85 | */ | ||
86 | |||
87 | struct mac_booter_data | ||
88 | { | ||
89 | unsigned long videoaddr; | ||
90 | unsigned long videorow; | ||
91 | unsigned long videodepth; | ||
92 | unsigned long dimensions; | ||
93 | unsigned long boottime; | ||
94 | unsigned long gmtbias; | ||
95 | unsigned long videological; | ||
96 | unsigned long sccbase; | ||
97 | unsigned long id; | ||
98 | unsigned long memsize; | ||
99 | unsigned long cpuid; | ||
100 | unsigned long rombase; | ||
101 | }; | ||
102 | |||
103 | extern struct mac_booter_data mac_bi_data; | ||
137 | 104 | ||
138 | #endif | 105 | #endif |
diff --git a/arch/m68k/include/asm/mc146818rtc.h b/arch/m68k/include/asm/mc146818rtc.h index 9f70a01f73dc..05b43bf5cdf3 100644 --- a/arch/m68k/include/asm/mc146818rtc.h +++ b/arch/m68k/include/asm/mc146818rtc.h | |||
@@ -10,16 +10,16 @@ | |||
10 | 10 | ||
11 | #include <asm/atarihw.h> | 11 | #include <asm/atarihw.h> |
12 | 12 | ||
13 | #define RTC_PORT(x) (TT_RTC_BAS + 2*(x)) | 13 | #define ATARI_RTC_PORT(x) (TT_RTC_BAS + 2*(x)) |
14 | #define RTC_ALWAYS_BCD 0 | 14 | #define RTC_ALWAYS_BCD 0 |
15 | 15 | ||
16 | #define CMOS_READ(addr) ({ \ | 16 | #define CMOS_READ(addr) ({ \ |
17 | atari_outb_p((addr),RTC_PORT(0)); \ | 17 | atari_outb_p((addr), ATARI_RTC_PORT(0)); \ |
18 | atari_inb_p(RTC_PORT(1)); \ | 18 | atari_inb_p(ATARI_RTC_PORT(1)); \ |
19 | }) | 19 | }) |
20 | #define CMOS_WRITE(val, addr) ({ \ | 20 | #define CMOS_WRITE(val, addr) ({ \ |
21 | atari_outb_p((addr),RTC_PORT(0)); \ | 21 | atari_outb_p((addr), ATARI_RTC_PORT(0)); \ |
22 | atari_outb_p((val),RTC_PORT(1)); \ | 22 | atari_outb_p((val), ATARI_RTC_PORT(1)); \ |
23 | }) | 23 | }) |
24 | #endif /* CONFIG_ATARI */ | 24 | #endif /* CONFIG_ATARI */ |
25 | 25 | ||
diff --git a/arch/m68k/include/asm/mvme16xhw.h b/arch/m68k/include/asm/mvme16xhw.h index 6117f56653d2..1eb89de631e5 100644 --- a/arch/m68k/include/asm/mvme16xhw.h +++ b/arch/m68k/include/asm/mvme16xhw.h | |||
@@ -3,23 +3,6 @@ | |||
3 | 3 | ||
4 | #include <asm/irq.h> | 4 | #include <asm/irq.h> |
5 | 5 | ||
6 | /* Board ID data structure - pointer to this retrieved from Bug by head.S */ | ||
7 | |||
8 | /* Note, bytes 12 and 13 are board no in BCD (0162,0166,0167,0177,etc) */ | ||
9 | |||
10 | extern long mvme_bdid_ptr; | ||
11 | |||
12 | typedef struct { | ||
13 | char bdid[4]; | ||
14 | u_char rev, mth, day, yr; | ||
15 | u_short size, reserved; | ||
16 | u_short brdno; | ||
17 | char brdsuffix[2]; | ||
18 | u_long options; | ||
19 | u_short clun, dlun, ctype, dnum; | ||
20 | u_long option2; | ||
21 | } t_bdid, *p_bdid; | ||
22 | |||
23 | 6 | ||
24 | typedef struct { | 7 | typedef struct { |
25 | u_char ack_icr, | 8 | u_char ack_icr, |
diff --git a/arch/m68k/include/asm/setup.h b/arch/m68k/include/asm/setup.h index 65e78a2dad64..8f2023f8c1c4 100644 --- a/arch/m68k/include/asm/setup.h +++ b/arch/m68k/include/asm/setup.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #ifndef _M68K_SETUP_H | 22 | #ifndef _M68K_SETUP_H |
23 | #define _M68K_SETUP_H | 23 | #define _M68K_SETUP_H |
24 | 24 | ||
25 | #include <uapi/asm/bootinfo.h> | ||
25 | #include <uapi/asm/setup.h> | 26 | #include <uapi/asm/setup.h> |
26 | 27 | ||
27 | 28 | ||
@@ -297,14 +298,14 @@ extern int m68k_is040or060; | |||
297 | #define NUM_MEMINFO 4 | 298 | #define NUM_MEMINFO 4 |
298 | 299 | ||
299 | #ifndef __ASSEMBLY__ | 300 | #ifndef __ASSEMBLY__ |
300 | struct mem_info { | 301 | struct m68k_mem_info { |
301 | unsigned long addr; /* physical address of memory chunk */ | 302 | unsigned long addr; /* physical address of memory chunk */ |
302 | unsigned long size; /* length of memory chunk (in bytes) */ | 303 | unsigned long size; /* length of memory chunk (in bytes) */ |
303 | }; | 304 | }; |
304 | 305 | ||
305 | extern int m68k_num_memory; /* # of memory blocks found (and used) */ | 306 | extern int m68k_num_memory; /* # of memory blocks found (and used) */ |
306 | extern int m68k_realnum_memory; /* real # of memory blocks found */ | 307 | extern int m68k_realnum_memory; /* real # of memory blocks found */ |
307 | extern struct mem_info m68k_memory[NUM_MEMINFO];/* memory description */ | 308 | extern struct m68k_mem_info m68k_memory[NUM_MEMINFO];/* memory description */ |
308 | #endif | 309 | #endif |
309 | 310 | ||
310 | #endif /* _M68K_SETUP_H */ | 311 | #endif /* _M68K_SETUP_H */ |
diff --git a/arch/m68k/include/asm/timex.h b/arch/m68k/include/asm/timex.h index 6759dad954f6..efc1f4892357 100644 --- a/arch/m68k/include/asm/timex.h +++ b/arch/m68k/include/asm/timex.h | |||
@@ -28,4 +28,14 @@ static inline cycles_t get_cycles(void) | |||
28 | return 0; | 28 | return 0; |
29 | } | 29 | } |
30 | 30 | ||
31 | extern unsigned long (*mach_random_get_entropy)(void); | ||
32 | |||
33 | static inline unsigned long random_get_entropy(void) | ||
34 | { | ||
35 | if (mach_random_get_entropy) | ||
36 | return mach_random_get_entropy(); | ||
37 | return 0; | ||
38 | } | ||
39 | #define random_get_entropy random_get_entropy | ||
40 | |||
31 | #endif | 41 | #endif |
diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild index 1fef45ada097..6a2d257bdfb2 100644 --- a/arch/m68k/include/uapi/asm/Kbuild +++ b/arch/m68k/include/uapi/asm/Kbuild | |||
@@ -11,6 +11,14 @@ generic-y += termbits.h | |||
11 | generic-y += termios.h | 11 | generic-y += termios.h |
12 | 12 | ||
13 | header-y += a.out.h | 13 | header-y += a.out.h |
14 | header-y += bootinfo.h | ||
15 | header-y += bootinfo-amiga.h | ||
16 | header-y += bootinfo-apollo.h | ||
17 | header-y += bootinfo-atari.h | ||
18 | header-y += bootinfo-hp300.h | ||
19 | header-y += bootinfo-mac.h | ||
20 | header-y += bootinfo-q40.h | ||
21 | header-y += bootinfo-vme.h | ||
14 | header-y += byteorder.h | 22 | header-y += byteorder.h |
15 | header-y += cachectl.h | 23 | header-y += cachectl.h |
16 | header-y += fcntl.h | 24 | header-y += fcntl.h |
diff --git a/arch/m68k/include/uapi/asm/bootinfo-amiga.h b/arch/m68k/include/uapi/asm/bootinfo-amiga.h new file mode 100644 index 000000000000..daad3c58d2da --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-amiga.h | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | ** asm/bootinfo-amiga.h -- Amiga-specific boot information definitions | ||
3 | */ | ||
4 | |||
5 | #ifndef _UAPI_ASM_M68K_BOOTINFO_AMIGA_H | ||
6 | #define _UAPI_ASM_M68K_BOOTINFO_AMIGA_H | ||
7 | |||
8 | |||
9 | /* | ||
10 | * Amiga-specific tags | ||
11 | */ | ||
12 | |||
13 | #define BI_AMIGA_MODEL 0x8000 /* model (__be32) */ | ||
14 | #define BI_AMIGA_AUTOCON 0x8001 /* AutoConfig device */ | ||
15 | /* (AmigaOS struct ConfigDev) */ | ||
16 | #define BI_AMIGA_CHIP_SIZE 0x8002 /* size of Chip RAM (__be32) */ | ||
17 | #define BI_AMIGA_VBLANK 0x8003 /* VBLANK frequency (__u8) */ | ||
18 | #define BI_AMIGA_PSFREQ 0x8004 /* power supply frequency (__u8) */ | ||
19 | #define BI_AMIGA_ECLOCK 0x8005 /* EClock frequency (__be32) */ | ||
20 | #define BI_AMIGA_CHIPSET 0x8006 /* native chipset present (__be32) */ | ||
21 | #define BI_AMIGA_SERPER 0x8007 /* serial port period (__be16) */ | ||
22 | |||
23 | |||
24 | /* | ||
25 | * Amiga models (BI_AMIGA_MODEL) | ||
26 | */ | ||
27 | |||
28 | #define AMI_UNKNOWN 0 | ||
29 | #define AMI_500 1 | ||
30 | #define AMI_500PLUS 2 | ||
31 | #define AMI_600 3 | ||
32 | #define AMI_1000 4 | ||
33 | #define AMI_1200 5 | ||
34 | #define AMI_2000 6 | ||
35 | #define AMI_2500 7 | ||
36 | #define AMI_3000 8 | ||
37 | #define AMI_3000T 9 | ||
38 | #define AMI_3000PLUS 10 | ||
39 | #define AMI_4000 11 | ||
40 | #define AMI_4000T 12 | ||
41 | #define AMI_CDTV 13 | ||
42 | #define AMI_CD32 14 | ||
43 | #define AMI_DRACO 15 | ||
44 | |||
45 | |||
46 | /* | ||
47 | * Amiga chipsets (BI_AMIGA_CHIPSET) | ||
48 | */ | ||
49 | |||
50 | #define CS_STONEAGE 0 | ||
51 | #define CS_OCS 1 | ||
52 | #define CS_ECS 2 | ||
53 | #define CS_AGA 3 | ||
54 | |||
55 | |||
56 | /* | ||
57 | * Latest Amiga bootinfo version | ||
58 | */ | ||
59 | |||
60 | #define AMIGA_BOOTI_VERSION MK_BI_VERSION(2, 0) | ||
61 | |||
62 | |||
63 | #endif /* _UAPI_ASM_M68K_BOOTINFO_AMIGA_H */ | ||
diff --git a/arch/m68k/include/uapi/asm/bootinfo-apollo.h b/arch/m68k/include/uapi/asm/bootinfo-apollo.h new file mode 100644 index 000000000000..a93e0af1c6fe --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-apollo.h | |||
@@ -0,0 +1,28 @@ | |||
1 | /* | ||
2 | ** asm/bootinfo-apollo.h -- Apollo-specific boot information definitions | ||
3 | */ | ||
4 | |||
5 | #ifndef _UAPI_ASM_M68K_BOOTINFO_APOLLO_H | ||
6 | #define _UAPI_ASM_M68K_BOOTINFO_APOLLO_H | ||
7 | |||
8 | |||
9 | /* | ||
10 | * Apollo-specific tags | ||
11 | */ | ||
12 | |||
13 | #define BI_APOLLO_MODEL 0x8000 /* model (__be32) */ | ||
14 | |||
15 | |||
16 | /* | ||
17 | * Apollo models (BI_APOLLO_MODEL) | ||
18 | */ | ||
19 | |||
20 | #define APOLLO_UNKNOWN 0 | ||
21 | #define APOLLO_DN3000 1 | ||
22 | #define APOLLO_DN3010 2 | ||
23 | #define APOLLO_DN3500 3 | ||
24 | #define APOLLO_DN4000 4 | ||
25 | #define APOLLO_DN4500 5 | ||
26 | |||
27 | |||
28 | #endif /* _UAPI_ASM_M68K_BOOTINFO_APOLLO_H */ | ||
diff --git a/arch/m68k/include/uapi/asm/bootinfo-atari.h b/arch/m68k/include/uapi/asm/bootinfo-atari.h new file mode 100644 index 000000000000..a817854049bb --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-atari.h | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | ** asm/bootinfo-atari.h -- Atari-specific boot information definitions | ||
3 | */ | ||
4 | |||
5 | #ifndef _UAPI_ASM_M68K_BOOTINFO_ATARI_H | ||
6 | #define _UAPI_ASM_M68K_BOOTINFO_ATARI_H | ||
7 | |||
8 | |||
9 | /* | ||
10 | * Atari-specific tags | ||
11 | */ | ||
12 | |||
13 | #define BI_ATARI_MCH_COOKIE 0x8000 /* _MCH cookie from TOS (__be32) */ | ||
14 | #define BI_ATARI_MCH_TYPE 0x8001 /* special machine type (__be32) */ | ||
15 | |||
16 | |||
17 | /* | ||
18 | * mch_cookie values (upper word of BI_ATARI_MCH_COOKIE) | ||
19 | */ | ||
20 | |||
21 | #define ATARI_MCH_ST 0 | ||
22 | #define ATARI_MCH_STE 1 | ||
23 | #define ATARI_MCH_TT 2 | ||
24 | #define ATARI_MCH_FALCON 3 | ||
25 | |||
26 | |||
27 | /* | ||
28 | * Atari machine types (BI_ATARI_MCH_TYPE) | ||
29 | */ | ||
30 | |||
31 | #define ATARI_MACH_NORMAL 0 /* no special machine type */ | ||
32 | #define ATARI_MACH_MEDUSA 1 /* Medusa 040 */ | ||
33 | #define ATARI_MACH_HADES 2 /* Hades 040 or 060 */ | ||
34 | #define ATARI_MACH_AB40 3 /* Afterburner040 on Falcon */ | ||
35 | |||
36 | |||
37 | /* | ||
38 | * Latest Atari bootinfo version | ||
39 | */ | ||
40 | |||
41 | #define ATARI_BOOTI_VERSION MK_BI_VERSION(2, 1) | ||
42 | |||
43 | |||
44 | #endif /* _UAPI_ASM_M68K_BOOTINFO_ATARI_H */ | ||
diff --git a/arch/m68k/include/uapi/asm/bootinfo-hp300.h b/arch/m68k/include/uapi/asm/bootinfo-hp300.h new file mode 100644 index 000000000000..c90cb71ed89a --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-hp300.h | |||
@@ -0,0 +1,50 @@ | |||
1 | /* | ||
2 | ** asm/bootinfo-hp300.h -- HP9000/300-specific boot information definitions | ||
3 | */ | ||
4 | |||
5 | #ifndef _UAPI_ASM_M68K_BOOTINFO_HP300_H | ||
6 | #define _UAPI_ASM_M68K_BOOTINFO_HP300_H | ||
7 | |||
8 | |||
9 | /* | ||
10 | * HP9000/300-specific tags | ||
11 | */ | ||
12 | |||
13 | #define BI_HP300_MODEL 0x8000 /* model (__be32) */ | ||
14 | #define BI_HP300_UART_SCODE 0x8001 /* UART select code (__be32) */ | ||
15 | #define BI_HP300_UART_ADDR 0x8002 /* phys. addr of UART (__be32) */ | ||
16 | |||
17 | |||
18 | /* | ||
19 | * HP9000/300 and /400 models (BI_HP300_MODEL) | ||
20 | * | ||
21 | * This information was taken from NetBSD | ||
22 | */ | ||
23 | |||
24 | #define HP_320 0 /* 16MHz 68020+HP MMU+16K external cache */ | ||
25 | #define HP_330 1 /* 16MHz 68020+68851 MMU */ | ||
26 | #define HP_340 2 /* 16MHz 68030 */ | ||
27 | #define HP_345 3 /* 50MHz 68030+32K external cache */ | ||
28 | #define HP_350 4 /* 25MHz 68020+HP MMU+32K external cache */ | ||
29 | #define HP_360 5 /* 25MHz 68030 */ | ||
30 | #define HP_370 6 /* 33MHz 68030+64K external cache */ | ||
31 | #define HP_375 7 /* 50MHz 68030+32K external cache */ | ||
32 | #define HP_380 8 /* 25MHz 68040 */ | ||
33 | #define HP_385 9 /* 33MHz 68040 */ | ||
34 | |||
35 | #define HP_400 10 /* 50MHz 68030+32K external cache */ | ||
36 | #define HP_425T 11 /* 25MHz 68040 - model 425t */ | ||
37 | #define HP_425S 12 /* 25MHz 68040 - model 425s */ | ||
38 | #define HP_425E 13 /* 25MHz 68040 - model 425e */ | ||
39 | #define HP_433T 14 /* 33MHz 68040 - model 433t */ | ||
40 | #define HP_433S 15 /* 33MHz 68040 - model 433s */ | ||
41 | |||
42 | |||
43 | /* | ||
44 | * Latest HP9000/300 bootinfo version | ||
45 | */ | ||
46 | |||
47 | #define HP300_BOOTI_VERSION MK_BI_VERSION(2, 0) | ||
48 | |||
49 | |||
50 | #endif /* _UAPI_ASM_M68K_BOOTINFO_HP300_H */ | ||
diff --git a/arch/m68k/include/uapi/asm/bootinfo-mac.h b/arch/m68k/include/uapi/asm/bootinfo-mac.h new file mode 100644 index 000000000000..b44ff73898a9 --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-mac.h | |||
@@ -0,0 +1,119 @@ | |||
1 | /* | ||
2 | ** asm/bootinfo-mac.h -- Macintosh-specific boot information definitions | ||
3 | */ | ||
4 | |||
5 | #ifndef _UAPI_ASM_M68K_BOOTINFO_MAC_H | ||
6 | #define _UAPI_ASM_M68K_BOOTINFO_MAC_H | ||
7 | |||
8 | |||
9 | /* | ||
10 | * Macintosh-specific tags (all __be32) | ||
11 | */ | ||
12 | |||
13 | #define BI_MAC_MODEL 0x8000 /* Mac Gestalt ID (model type) */ | ||
14 | #define BI_MAC_VADDR 0x8001 /* Mac video base address */ | ||
15 | #define BI_MAC_VDEPTH 0x8002 /* Mac video depth */ | ||
16 | #define BI_MAC_VROW 0x8003 /* Mac video rowbytes */ | ||
17 | #define BI_MAC_VDIM 0x8004 /* Mac video dimensions */ | ||
18 | #define BI_MAC_VLOGICAL 0x8005 /* Mac video logical base */ | ||
19 | #define BI_MAC_SCCBASE 0x8006 /* Mac SCC base address */ | ||
20 | #define BI_MAC_BTIME 0x8007 /* Mac boot time */ | ||
21 | #define BI_MAC_GMTBIAS 0x8008 /* Mac GMT timezone offset */ | ||
22 | #define BI_MAC_MEMSIZE 0x8009 /* Mac RAM size (sanity check) */ | ||
23 | #define BI_MAC_CPUID 0x800a /* Mac CPU type (sanity check) */ | ||
24 | #define BI_MAC_ROMBASE 0x800b /* Mac system ROM base address */ | ||
25 | |||
26 | |||
27 | /* | ||
28 | * Macintosh hardware profile data - unused, see macintosh.h for | ||
29 | * reasonable type values | ||
30 | */ | ||
31 | |||
32 | #define BI_MAC_VIA1BASE 0x8010 /* Mac VIA1 base address (always present) */ | ||
33 | #define BI_MAC_VIA2BASE 0x8011 /* Mac VIA2 base address (type varies) */ | ||
34 | #define BI_MAC_VIA2TYPE 0x8012 /* Mac VIA2 type (VIA, RBV, OSS) */ | ||
35 | #define BI_MAC_ADBTYPE 0x8013 /* Mac ADB interface type */ | ||
36 | #define BI_MAC_ASCBASE 0x8014 /* Mac Apple Sound Chip base address */ | ||
37 | #define BI_MAC_SCSI5380 0x8015 /* Mac NCR 5380 SCSI (base address, multi) */ | ||
38 | #define BI_MAC_SCSIDMA 0x8016 /* Mac SCSI DMA (base address) */ | ||
39 | #define BI_MAC_SCSI5396 0x8017 /* Mac NCR 53C96 SCSI (base address, multi) */ | ||
40 | #define BI_MAC_IDETYPE 0x8018 /* Mac IDE interface type */ | ||
41 | #define BI_MAC_IDEBASE 0x8019 /* Mac IDE interface base address */ | ||
42 | #define BI_MAC_NUBUS 0x801a /* Mac Nubus type (none, regular, pseudo) */ | ||
43 | #define BI_MAC_SLOTMASK 0x801b /* Mac Nubus slots present */ | ||
44 | #define BI_MAC_SCCTYPE 0x801c /* Mac SCC serial type (normal, IOP) */ | ||
45 | #define BI_MAC_ETHTYPE 0x801d /* Mac builtin ethernet type (Sonic, MACE */ | ||
46 | #define BI_MAC_ETHBASE 0x801e /* Mac builtin ethernet base address */ | ||
47 | #define BI_MAC_PMU 0x801f /* Mac power management / poweroff hardware */ | ||
48 | #define BI_MAC_IOP_SWIM 0x8020 /* Mac SWIM floppy IOP */ | ||
49 | #define BI_MAC_IOP_ADB 0x8021 /* Mac ADB IOP */ | ||
50 | |||
51 | |||
52 | /* | ||
53 | * Macintosh Gestalt numbers (BI_MAC_MODEL) | ||
54 | */ | ||
55 | |||
56 | #define MAC_MODEL_II 6 | ||
57 | #define MAC_MODEL_IIX 7 | ||
58 | #define MAC_MODEL_IICX 8 | ||
59 | #define MAC_MODEL_SE30 9 | ||
60 | #define MAC_MODEL_IICI 11 | ||
61 | #define MAC_MODEL_IIFX 13 /* And well numbered it is too */ | ||
62 | #define MAC_MODEL_IISI 18 | ||
63 | #define MAC_MODEL_LC 19 | ||
64 | #define MAC_MODEL_Q900 20 | ||
65 | #define MAC_MODEL_PB170 21 | ||
66 | #define MAC_MODEL_Q700 22 | ||
67 | #define MAC_MODEL_CLII 23 /* aka: P200 */ | ||
68 | #define MAC_MODEL_PB140 25 | ||
69 | #define MAC_MODEL_Q950 26 /* aka: WGS95 */ | ||
70 | #define MAC_MODEL_LCIII 27 /* aka: P450 */ | ||
71 | #define MAC_MODEL_PB210 29 | ||
72 | #define MAC_MODEL_C650 30 | ||
73 | #define MAC_MODEL_PB230 32 | ||
74 | #define MAC_MODEL_PB180 33 | ||
75 | #define MAC_MODEL_PB160 34 | ||
76 | #define MAC_MODEL_Q800 35 /* aka: WGS80 */ | ||
77 | #define MAC_MODEL_Q650 36 | ||
78 | #define MAC_MODEL_LCII 37 /* aka: P400/405/410/430 */ | ||
79 | #define MAC_MODEL_PB250 38 | ||
80 | #define MAC_MODEL_IIVI 44 | ||
81 | #define MAC_MODEL_P600 45 /* aka: P600CD */ | ||
82 | #define MAC_MODEL_IIVX 48 | ||
83 | #define MAC_MODEL_CCL 49 /* aka: P250 */ | ||
84 | #define MAC_MODEL_PB165C 50 | ||
85 | #define MAC_MODEL_C610 52 /* aka: WGS60 */ | ||
86 | #define MAC_MODEL_Q610 53 | ||
87 | #define MAC_MODEL_PB145 54 /* aka: PB145B */ | ||
88 | #define MAC_MODEL_P520 56 /* aka: LC520 */ | ||
89 | #define MAC_MODEL_C660 60 | ||
90 | #define MAC_MODEL_P460 62 /* aka: LCIII+, P466/P467 */ | ||
91 | #define MAC_MODEL_PB180C 71 | ||
92 | #define MAC_MODEL_PB520 72 /* aka: PB520C, PB540, PB540C, PB550C */ | ||
93 | #define MAC_MODEL_PB270C 77 | ||
94 | #define MAC_MODEL_Q840 78 | ||
95 | #define MAC_MODEL_P550 80 /* aka: LC550, P560 */ | ||
96 | #define MAC_MODEL_CCLII 83 /* aka: P275 */ | ||
97 | #define MAC_MODEL_PB165 84 | ||
98 | #define MAC_MODEL_PB190 85 /* aka: PB190CS */ | ||
99 | #define MAC_MODEL_TV 88 | ||
100 | #define MAC_MODEL_P475 89 /* aka: LC475, P476 */ | ||
101 | #define MAC_MODEL_P475F 90 /* aka: P475 w/ FPU (no LC040) */ | ||
102 | #define MAC_MODEL_P575 92 /* aka: LC575, P577/P578 */ | ||
103 | #define MAC_MODEL_Q605 94 | ||
104 | #define MAC_MODEL_Q605_ACC 95 /* Q605 accelerated to 33 MHz */ | ||
105 | #define MAC_MODEL_Q630 98 /* aka: LC630, P630/631/635/636/637/638/640 */ | ||
106 | #define MAC_MODEL_P588 99 /* aka: LC580, P580 */ | ||
107 | #define MAC_MODEL_PB280 102 | ||
108 | #define MAC_MODEL_PB280C 103 | ||
109 | #define MAC_MODEL_PB150 115 | ||
110 | |||
111 | |||
112 | /* | ||
113 | * Latest Macintosh bootinfo version | ||
114 | */ | ||
115 | |||
116 | #define MAC_BOOTI_VERSION MK_BI_VERSION(2, 0) | ||
117 | |||
118 | |||
119 | #endif /* _UAPI_ASM_M68K_BOOTINFO_MAC_H */ | ||
diff --git a/arch/m68k/include/uapi/asm/bootinfo-q40.h b/arch/m68k/include/uapi/asm/bootinfo-q40.h new file mode 100644 index 000000000000..c79fea7e555b --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-q40.h | |||
@@ -0,0 +1,16 @@ | |||
1 | /* | ||
2 | ** asm/bootinfo-q40.h -- Q40-specific boot information definitions | ||
3 | */ | ||
4 | |||
5 | #ifndef _UAPI_ASM_M68K_BOOTINFO_Q40_H | ||
6 | #define _UAPI_ASM_M68K_BOOTINFO_Q40_H | ||
7 | |||
8 | |||
9 | /* | ||
10 | * Latest Q40 bootinfo version | ||
11 | */ | ||
12 | |||
13 | #define Q40_BOOTI_VERSION MK_BI_VERSION(2, 0) | ||
14 | |||
15 | |||
16 | #endif /* _UAPI_ASM_M68K_BOOTINFO_Q40_H */ | ||
diff --git a/arch/m68k/include/uapi/asm/bootinfo-vme.h b/arch/m68k/include/uapi/asm/bootinfo-vme.h new file mode 100644 index 000000000000..a135eb41d672 --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo-vme.h | |||
@@ -0,0 +1,70 @@ | |||
1 | /* | ||
2 | ** asm/bootinfo-vme.h -- VME-specific boot information definitions | ||
3 | */ | ||
4 | |||
5 | #ifndef _UAPI_ASM_M68K_BOOTINFO_VME_H | ||
6 | #define _UAPI_ASM_M68K_BOOTINFO_VME_H | ||
7 | |||
8 | |||
9 | #include <linux/types.h> | ||
10 | |||
11 | |||
12 | /* | ||
13 | * VME-specific tags | ||
14 | */ | ||
15 | |||
16 | #define BI_VME_TYPE 0x8000 /* VME sub-architecture (__be32) */ | ||
17 | #define BI_VME_BRDINFO 0x8001 /* VME board information (struct) */ | ||
18 | |||
19 | |||
20 | /* | ||
21 | * VME models (BI_VME_TYPE) | ||
22 | */ | ||
23 | |||
24 | #define VME_TYPE_TP34V 0x0034 /* Tadpole TP34V */ | ||
25 | #define VME_TYPE_MVME147 0x0147 /* Motorola MVME147 */ | ||
26 | #define VME_TYPE_MVME162 0x0162 /* Motorola MVME162 */ | ||
27 | #define VME_TYPE_MVME166 0x0166 /* Motorola MVME166 */ | ||
28 | #define VME_TYPE_MVME167 0x0167 /* Motorola MVME167 */ | ||
29 | #define VME_TYPE_MVME172 0x0172 /* Motorola MVME172 */ | ||
30 | #define VME_TYPE_MVME177 0x0177 /* Motorola MVME177 */ | ||
31 | #define VME_TYPE_BVME4000 0x4000 /* BVM Ltd. BVME4000 */ | ||
32 | #define VME_TYPE_BVME6000 0x6000 /* BVM Ltd. BVME6000 */ | ||
33 | |||
34 | |||
35 | #ifndef __ASSEMBLY__ | ||
36 | |||
37 | /* | ||
38 | * Board ID data structure - pointer to this retrieved from Bug by head.S | ||
39 | * | ||
40 | * BI_VME_BRDINFO is a 32 byte struct as returned by the Bug code on | ||
41 | * Motorola VME boards. Contains board number, Bug version, board | ||
42 | * configuration options, etc. | ||
43 | * | ||
44 | * Note, bytes 12 and 13 are board no in BCD (0162,0166,0167,0177,etc) | ||
45 | */ | ||
46 | |||
47 | typedef struct { | ||
48 | char bdid[4]; | ||
49 | __u8 rev, mth, day, yr; | ||
50 | __be16 size, reserved; | ||
51 | __be16 brdno; | ||
52 | char brdsuffix[2]; | ||
53 | __be32 options; | ||
54 | __be16 clun, dlun, ctype, dnum; | ||
55 | __be32 option2; | ||
56 | } t_bdid, *p_bdid; | ||
57 | |||
58 | #endif /* __ASSEMBLY__ */ | ||
59 | |||
60 | |||
61 | /* | ||
62 | * Latest VME bootinfo versions | ||
63 | */ | ||
64 | |||
65 | #define MVME147_BOOTI_VERSION MK_BI_VERSION(2, 0) | ||
66 | #define MVME16x_BOOTI_VERSION MK_BI_VERSION(2, 0) | ||
67 | #define BVME6000_BOOTI_VERSION MK_BI_VERSION(2, 0) | ||
68 | |||
69 | |||
70 | #endif /* _UAPI_ASM_M68K_BOOTINFO_VME_H */ | ||
diff --git a/arch/m68k/include/uapi/asm/bootinfo.h b/arch/m68k/include/uapi/asm/bootinfo.h new file mode 100644 index 000000000000..cdeb26a015b0 --- /dev/null +++ b/arch/m68k/include/uapi/asm/bootinfo.h | |||
@@ -0,0 +1,174 @@ | |||
1 | /* | ||
2 | * asm/bootinfo.h -- Definition of the Linux/m68k boot information structure | ||
3 | * | ||
4 | * Copyright 1992 by Greg Harp | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file COPYING in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #ifndef _UAPI_ASM_M68K_BOOTINFO_H | ||
12 | #define _UAPI_ASM_M68K_BOOTINFO_H | ||
13 | |||
14 | |||
15 | #include <linux/types.h> | ||
16 | |||
17 | |||
18 | #ifndef __ASSEMBLY__ | ||
19 | |||
20 | /* | ||
21 | * Bootinfo definitions | ||
22 | * | ||
23 | * This is an easily parsable and extendable structure containing all | ||
24 | * information to be passed from the bootstrap to the kernel. | ||
25 | * | ||
26 | * This way I hope to keep all future changes back/forewards compatible. | ||
27 | * Thus, keep your fingers crossed... | ||
28 | * | ||
29 | * This structure is copied right after the kernel by the bootstrap | ||
30 | * routine. | ||
31 | */ | ||
32 | |||
33 | struct bi_record { | ||
34 | __be16 tag; /* tag ID */ | ||
35 | __be16 size; /* size of record (in bytes) */ | ||
36 | __be32 data[0]; /* data */ | ||
37 | }; | ||
38 | |||
39 | |||
40 | struct mem_info { | ||
41 | __be32 addr; /* physical address of memory chunk */ | ||
42 | __be32 size; /* length of memory chunk (in bytes) */ | ||
43 | }; | ||
44 | |||
45 | #endif /* __ASSEMBLY__ */ | ||
46 | |||
47 | |||
48 | /* | ||
49 | * Tag Definitions | ||
50 | * | ||
51 | * Machine independent tags start counting from 0x0000 | ||
52 | * Machine dependent tags start counting from 0x8000 | ||
53 | */ | ||
54 | |||
55 | #define BI_LAST 0x0000 /* last record (sentinel) */ | ||
56 | #define BI_MACHTYPE 0x0001 /* machine type (__be32) */ | ||
57 | #define BI_CPUTYPE 0x0002 /* cpu type (__be32) */ | ||
58 | #define BI_FPUTYPE 0x0003 /* fpu type (__be32) */ | ||
59 | #define BI_MMUTYPE 0x0004 /* mmu type (__be32) */ | ||
60 | #define BI_MEMCHUNK 0x0005 /* memory chunk address and size */ | ||
61 | /* (struct mem_info) */ | ||
62 | #define BI_RAMDISK 0x0006 /* ramdisk address and size */ | ||
63 | /* (struct mem_info) */ | ||
64 | #define BI_COMMAND_LINE 0x0007 /* kernel command line parameters */ | ||
65 | /* (string) */ | ||
66 | |||
67 | |||
68 | /* | ||
69 | * Linux/m68k Architectures (BI_MACHTYPE) | ||
70 | */ | ||
71 | |||
72 | #define MACH_AMIGA 1 | ||
73 | #define MACH_ATARI 2 | ||
74 | #define MACH_MAC 3 | ||
75 | #define MACH_APOLLO 4 | ||
76 | #define MACH_SUN3 5 | ||
77 | #define MACH_MVME147 6 | ||
78 | #define MACH_MVME16x 7 | ||
79 | #define MACH_BVME6000 8 | ||
80 | #define MACH_HP300 9 | ||
81 | #define MACH_Q40 10 | ||
82 | #define MACH_SUN3X 11 | ||
83 | #define MACH_M54XX 12 | ||
84 | |||
85 | |||
86 | /* | ||
87 | * CPU, FPU and MMU types (BI_CPUTYPE, BI_FPUTYPE, BI_MMUTYPE) | ||
88 | * | ||
89 | * Note: we may rely on the following equalities: | ||
90 | * | ||
91 | * CPU_68020 == MMU_68851 | ||
92 | * CPU_68030 == MMU_68030 | ||
93 | * CPU_68040 == FPU_68040 == MMU_68040 | ||
94 | * CPU_68060 == FPU_68060 == MMU_68060 | ||
95 | */ | ||
96 | |||
97 | #define CPUB_68020 0 | ||
98 | #define CPUB_68030 1 | ||
99 | #define CPUB_68040 2 | ||
100 | #define CPUB_68060 3 | ||
101 | #define CPUB_COLDFIRE 4 | ||
102 | |||
103 | #define CPU_68020 (1 << CPUB_68020) | ||
104 | #define CPU_68030 (1 << CPUB_68030) | ||
105 | #define CPU_68040 (1 << CPUB_68040) | ||
106 | #define CPU_68060 (1 << CPUB_68060) | ||
107 | #define CPU_COLDFIRE (1 << CPUB_COLDFIRE) | ||
108 | |||
109 | #define FPUB_68881 0 | ||
110 | #define FPUB_68882 1 | ||
111 | #define FPUB_68040 2 /* Internal FPU */ | ||
112 | #define FPUB_68060 3 /* Internal FPU */ | ||
113 | #define FPUB_SUNFPA 4 /* Sun-3 FPA */ | ||
114 | #define FPUB_COLDFIRE 5 /* ColdFire FPU */ | ||
115 | |||
116 | #define FPU_68881 (1 << FPUB_68881) | ||
117 | #define FPU_68882 (1 << FPUB_68882) | ||
118 | #define FPU_68040 (1 << FPUB_68040) | ||
119 | #define FPU_68060 (1 << FPUB_68060) | ||
120 | #define FPU_SUNFPA (1 << FPUB_SUNFPA) | ||
121 | #define FPU_COLDFIRE (1 << FPUB_COLDFIRE) | ||
122 | |||
123 | #define MMUB_68851 0 | ||
124 | #define MMUB_68030 1 /* Internal MMU */ | ||
125 | #define MMUB_68040 2 /* Internal MMU */ | ||
126 | #define MMUB_68060 3 /* Internal MMU */ | ||
127 | #define MMUB_APOLLO 4 /* Custom Apollo */ | ||
128 | #define MMUB_SUN3 5 /* Custom Sun-3 */ | ||
129 | #define MMUB_COLDFIRE 6 /* Internal MMU */ | ||
130 | |||
131 | #define MMU_68851 (1 << MMUB_68851) | ||
132 | #define MMU_68030 (1 << MMUB_68030) | ||
133 | #define MMU_68040 (1 << MMUB_68040) | ||
134 | #define MMU_68060 (1 << MMUB_68060) | ||
135 | #define MMU_SUN3 (1 << MMUB_SUN3) | ||
136 | #define MMU_APOLLO (1 << MMUB_APOLLO) | ||
137 | #define MMU_COLDFIRE (1 << MMUB_COLDFIRE) | ||
138 | |||
139 | |||
140 | /* | ||
141 | * Stuff for bootinfo interface versioning | ||
142 | * | ||
143 | * At the start of kernel code, a 'struct bootversion' is located. | ||
144 | * bootstrap checks for a matching version of the interface before booting | ||
145 | * a kernel, to avoid user confusion if kernel and bootstrap don't work | ||
146 | * together :-) | ||
147 | * | ||
148 | * If incompatible changes are made to the bootinfo interface, the major | ||
149 | * number below should be stepped (and the minor reset to 0) for the | ||
150 | * appropriate machine. If a change is backward-compatible, the minor | ||
151 | * should be stepped. "Backwards-compatible" means that booting will work, | ||
152 | * but certain features may not. | ||
153 | */ | ||
154 | |||
155 | #define BOOTINFOV_MAGIC 0x4249561A /* 'BIV^Z' */ | ||
156 | #define MK_BI_VERSION(major, minor) (((major) << 16) + (minor)) | ||
157 | #define BI_VERSION_MAJOR(v) (((v) >> 16) & 0xffff) | ||
158 | #define BI_VERSION_MINOR(v) ((v) & 0xffff) | ||
159 | |||
160 | #ifndef __ASSEMBLY__ | ||
161 | |||
162 | struct bootversion { | ||
163 | __be16 branch; | ||
164 | __be32 magic; | ||
165 | struct { | ||
166 | __be32 machtype; | ||
167 | __be32 version; | ||
168 | } machversions[0]; | ||
169 | } __packed; | ||
170 | |||
171 | #endif /* __ASSEMBLY__ */ | ||
172 | |||
173 | |||
174 | #endif /* _UAPI_ASM_M68K_BOOTINFO_H */ | ||
diff --git a/arch/m68k/include/uapi/asm/setup.h b/arch/m68k/include/uapi/asm/setup.h index 85579bff455c..6a6dc636761e 100644 --- a/arch/m68k/include/uapi/asm/setup.h +++ b/arch/m68k/include/uapi/asm/setup.h | |||
@@ -6,98 +6,11 @@ | |||
6 | ** This file is subject to the terms and conditions of the GNU General Public | 6 | ** This file is subject to the terms and conditions of the GNU General Public |
7 | ** License. See the file COPYING in the main directory of this archive | 7 | ** License. See the file COPYING in the main directory of this archive |
8 | ** for more details. | 8 | ** for more details. |
9 | ** | ||
10 | ** Created 09/29/92 by Greg Harp | ||
11 | ** | ||
12 | ** 5/2/94 Roman Hodek: | ||
13 | ** Added bi_atari part of the machine dependent union bi_un; for now it | ||
14 | ** contains just a model field to distinguish between TT and Falcon. | ||
15 | ** 26/7/96 Roman Zippel: | ||
16 | ** Renamed to setup.h; added some useful macros to allow gcc some | ||
17 | ** optimizations if possible. | ||
18 | ** 5/10/96 Geert Uytterhoeven: | ||
19 | ** Redesign of the boot information structure; moved boot information | ||
20 | ** structure to bootinfo.h | ||
21 | */ | 9 | */ |
22 | 10 | ||
23 | #ifndef _UAPI_M68K_SETUP_H | 11 | #ifndef _UAPI_M68K_SETUP_H |
24 | #define _UAPI_M68K_SETUP_H | 12 | #define _UAPI_M68K_SETUP_H |
25 | 13 | ||
26 | |||
27 | |||
28 | /* | ||
29 | * Linux/m68k Architectures | ||
30 | */ | ||
31 | |||
32 | #define MACH_AMIGA 1 | ||
33 | #define MACH_ATARI 2 | ||
34 | #define MACH_MAC 3 | ||
35 | #define MACH_APOLLO 4 | ||
36 | #define MACH_SUN3 5 | ||
37 | #define MACH_MVME147 6 | ||
38 | #define MACH_MVME16x 7 | ||
39 | #define MACH_BVME6000 8 | ||
40 | #define MACH_HP300 9 | ||
41 | #define MACH_Q40 10 | ||
42 | #define MACH_SUN3X 11 | ||
43 | #define MACH_M54XX 12 | ||
44 | |||
45 | #define COMMAND_LINE_SIZE 256 | 14 | #define COMMAND_LINE_SIZE 256 |
46 | 15 | ||
47 | |||
48 | |||
49 | /* | ||
50 | * CPU, FPU and MMU types | ||
51 | * | ||
52 | * Note: we may rely on the following equalities: | ||
53 | * | ||
54 | * CPU_68020 == MMU_68851 | ||
55 | * CPU_68030 == MMU_68030 | ||
56 | * CPU_68040 == FPU_68040 == MMU_68040 | ||
57 | * CPU_68060 == FPU_68060 == MMU_68060 | ||
58 | */ | ||
59 | |||
60 | #define CPUB_68020 0 | ||
61 | #define CPUB_68030 1 | ||
62 | #define CPUB_68040 2 | ||
63 | #define CPUB_68060 3 | ||
64 | #define CPUB_COLDFIRE 4 | ||
65 | |||
66 | #define CPU_68020 (1<<CPUB_68020) | ||
67 | #define CPU_68030 (1<<CPUB_68030) | ||
68 | #define CPU_68040 (1<<CPUB_68040) | ||
69 | #define CPU_68060 (1<<CPUB_68060) | ||
70 | #define CPU_COLDFIRE (1<<CPUB_COLDFIRE) | ||
71 | |||
72 | #define FPUB_68881 0 | ||
73 | #define FPUB_68882 1 | ||
74 | #define FPUB_68040 2 /* Internal FPU */ | ||
75 | #define FPUB_68060 3 /* Internal FPU */ | ||
76 | #define FPUB_SUNFPA 4 /* Sun-3 FPA */ | ||
77 | #define FPUB_COLDFIRE 5 /* ColdFire FPU */ | ||
78 | |||
79 | #define FPU_68881 (1<<FPUB_68881) | ||
80 | #define FPU_68882 (1<<FPUB_68882) | ||
81 | #define FPU_68040 (1<<FPUB_68040) | ||
82 | #define FPU_68060 (1<<FPUB_68060) | ||
83 | #define FPU_SUNFPA (1<<FPUB_SUNFPA) | ||
84 | #define FPU_COLDFIRE (1<<FPUB_COLDFIRE) | ||
85 | |||
86 | #define MMUB_68851 0 | ||
87 | #define MMUB_68030 1 /* Internal MMU */ | ||
88 | #define MMUB_68040 2 /* Internal MMU */ | ||
89 | #define MMUB_68060 3 /* Internal MMU */ | ||
90 | #define MMUB_APOLLO 4 /* Custom Apollo */ | ||
91 | #define MMUB_SUN3 5 /* Custom Sun-3 */ | ||
92 | #define MMUB_COLDFIRE 6 /* Internal MMU */ | ||
93 | |||
94 | #define MMU_68851 (1<<MMUB_68851) | ||
95 | #define MMU_68030 (1<<MMUB_68030) | ||
96 | #define MMU_68040 (1<<MMUB_68040) | ||
97 | #define MMU_68060 (1<<MMUB_68060) | ||
98 | #define MMU_SUN3 (1<<MMUB_SUN3) | ||
99 | #define MMU_APOLLO (1<<MMUB_APOLLO) | ||
100 | #define MMU_COLDFIRE (1<<MMUB_COLDFIRE) | ||
101 | |||
102 | |||
103 | #endif /* _UAPI_M68K_SETUP_H */ | 16 | #endif /* _UAPI_M68K_SETUP_H */ |
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile index 655347d80780..2d5d9be16273 100644 --- a/arch/m68k/kernel/Makefile +++ b/arch/m68k/kernel/Makefile | |||
@@ -22,3 +22,6 @@ obj-$(CONFIG_PCI) += pcibios.o | |||
22 | 22 | ||
23 | obj-$(CONFIG_HAS_DMA) += dma.o | 23 | obj-$(CONFIG_HAS_DMA) += dma.o |
24 | 24 | ||
25 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o | ||
26 | obj-$(CONFIG_BOOTINFO_PROC) += bootinfo_proc.o | ||
27 | |||
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c index 8b7b22846366..3a386341aa6e 100644 --- a/arch/m68k/kernel/asm-offsets.c +++ b/arch/m68k/kernel/asm-offsets.c | |||
@@ -98,6 +98,9 @@ int main(void) | |||
98 | DEFINE(CIABBASE, &ciab); | 98 | DEFINE(CIABBASE, &ciab); |
99 | DEFINE(C_PRA, offsetof(struct CIA, pra)); | 99 | DEFINE(C_PRA, offsetof(struct CIA, pra)); |
100 | DEFINE(ZTWOBASE, zTwoBase); | 100 | DEFINE(ZTWOBASE, zTwoBase); |
101 | |||
102 | /* enum m68k_fixup_type */ | ||
103 | DEFINE(M68K_FIXUP_MEMOFFSET, m68k_fixup_memoffset); | ||
101 | #endif | 104 | #endif |
102 | 105 | ||
103 | return 0; | 106 | return 0; |
diff --git a/arch/m68k/kernel/bootinfo_proc.c b/arch/m68k/kernel/bootinfo_proc.c new file mode 100644 index 000000000000..7ee853e1432b --- /dev/null +++ b/arch/m68k/kernel/bootinfo_proc.c | |||
@@ -0,0 +1,80 @@ | |||
1 | /* | ||
2 | * Based on arch/arm/kernel/atags_proc.c | ||
3 | */ | ||
4 | |||
5 | #include <linux/fs.h> | ||
6 | #include <linux/init.h> | ||
7 | #include <linux/printk.h> | ||
8 | #include <linux/proc_fs.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <linux/string.h> | ||
11 | |||
12 | #include <asm/bootinfo.h> | ||
13 | #include <asm/byteorder.h> | ||
14 | |||
15 | |||
16 | static char bootinfo_tmp[1536] __initdata; | ||
17 | |||
18 | static void *bootinfo_copy; | ||
19 | static size_t bootinfo_size; | ||
20 | |||
21 | static ssize_t bootinfo_read(struct file *file, char __user *buf, | ||
22 | size_t count, loff_t *ppos) | ||
23 | { | ||
24 | return simple_read_from_buffer(buf, count, ppos, bootinfo_copy, | ||
25 | bootinfo_size); | ||
26 | } | ||
27 | |||
28 | static const struct file_operations bootinfo_fops = { | ||
29 | .read = bootinfo_read, | ||
30 | .llseek = default_llseek, | ||
31 | }; | ||
32 | |||
33 | void __init save_bootinfo(const struct bi_record *bi) | ||
34 | { | ||
35 | const void *start = bi; | ||
36 | size_t size = sizeof(bi->tag); | ||
37 | |||
38 | while (be16_to_cpu(bi->tag) != BI_LAST) { | ||
39 | uint16_t n = be16_to_cpu(bi->size); | ||
40 | size += n; | ||
41 | bi = (struct bi_record *)((unsigned long)bi + n); | ||
42 | } | ||
43 | |||
44 | if (size > sizeof(bootinfo_tmp)) { | ||
45 | pr_err("Cannot save %zu bytes of bootinfo\n", size); | ||
46 | return; | ||
47 | } | ||
48 | |||
49 | pr_info("Saving %zu bytes of bootinfo\n", size); | ||
50 | memcpy(bootinfo_tmp, start, size); | ||
51 | bootinfo_size = size; | ||
52 | } | ||
53 | |||
54 | static int __init init_bootinfo_procfs(void) | ||
55 | { | ||
56 | /* | ||
57 | * This cannot go into save_bootinfo() because kmalloc and proc don't | ||
58 | * work yet when it is called. | ||
59 | */ | ||
60 | struct proc_dir_entry *pde; | ||
61 | |||
62 | if (!bootinfo_size) | ||
63 | return -EINVAL; | ||
64 | |||
65 | bootinfo_copy = kmalloc(bootinfo_size, GFP_KERNEL); | ||
66 | if (!bootinfo_copy) | ||
67 | return -ENOMEM; | ||
68 | |||
69 | memcpy(bootinfo_copy, bootinfo_tmp, bootinfo_size); | ||
70 | |||
71 | pde = proc_create_data("bootinfo", 0400, NULL, &bootinfo_fops, NULL); | ||
72 | if (!pde) { | ||
73 | kfree(bootinfo_copy); | ||
74 | return -ENOMEM; | ||
75 | } | ||
76 | |||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | arch_initcall(init_bootinfo_procfs); | ||
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S index ac85f16534af..4c99bab7e664 100644 --- a/arch/m68k/kernel/head.S +++ b/arch/m68k/kernel/head.S | |||
@@ -23,7 +23,7 @@ | |||
23 | ** 98/04/25 Phil Blundell: added HP300 support | 23 | ** 98/04/25 Phil Blundell: added HP300 support |
24 | ** 1998/08/30 David Kilzer: Added support for font_desc structures | 24 | ** 1998/08/30 David Kilzer: Added support for font_desc structures |
25 | ** for linux-2.1.115 | 25 | ** for linux-2.1.115 |
26 | ** 9/02/11 Richard Zidlicky: added Q40 support (initial vesion 99/01/01) | 26 | ** 1999/02/11 Richard Zidlicky: added Q40 support (initial version 99/01/01) |
27 | ** 2004/05/13 Kars de Jong: Finalised HP300 support | 27 | ** 2004/05/13 Kars de Jong: Finalised HP300 support |
28 | ** | 28 | ** |
29 | ** This file is subject to the terms and conditions of the GNU General Public | 29 | ** This file is subject to the terms and conditions of the GNU General Public |
@@ -257,6 +257,12 @@ | |||
257 | #include <linux/linkage.h> | 257 | #include <linux/linkage.h> |
258 | #include <linux/init.h> | 258 | #include <linux/init.h> |
259 | #include <asm/bootinfo.h> | 259 | #include <asm/bootinfo.h> |
260 | #include <asm/bootinfo-amiga.h> | ||
261 | #include <asm/bootinfo-atari.h> | ||
262 | #include <asm/bootinfo-hp300.h> | ||
263 | #include <asm/bootinfo-mac.h> | ||
264 | #include <asm/bootinfo-q40.h> | ||
265 | #include <asm/bootinfo-vme.h> | ||
260 | #include <asm/setup.h> | 266 | #include <asm/setup.h> |
261 | #include <asm/entry.h> | 267 | #include <asm/entry.h> |
262 | #include <asm/pgtable.h> | 268 | #include <asm/pgtable.h> |
@@ -1532,7 +1538,7 @@ L(cache_done): | |||
1532 | 1538 | ||
1533 | /* | 1539 | /* |
1534 | * Find a tag record in the bootinfo structure | 1540 | * Find a tag record in the bootinfo structure |
1535 | * The bootinfo structure is located right after the kernel bss | 1541 | * The bootinfo structure is located right after the kernel |
1536 | * Returns: d0: size (-1 if not found) | 1542 | * Returns: d0: size (-1 if not found) |
1537 | * a0: data pointer (end-of-records if not found) | 1543 | * a0: data pointer (end-of-records if not found) |
1538 | */ | 1544 | */ |
@@ -2909,7 +2915,9 @@ func_start serial_init,%d0/%d1/%a0/%a1 | |||
2909 | 2915 | ||
2910 | #if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B) | 2916 | #if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B) |
2911 | movel %pc@(L(mac_sccbase)),%a0 | 2917 | movel %pc@(L(mac_sccbase)),%a0 |
2912 | /* Reset SCC device */ | 2918 | /* Reset SCC register pointer */ |
2919 | moveb %a0@(mac_scc_cha_a_ctrl_offset),%d0 | ||
2920 | /* Reset SCC device: write register pointer then register value */ | ||
2913 | moveb #9,%a0@(mac_scc_cha_a_ctrl_offset) | 2921 | moveb #9,%a0@(mac_scc_cha_a_ctrl_offset) |
2914 | moveb #0xc0,%a0@(mac_scc_cha_a_ctrl_offset) | 2922 | moveb #0xc0,%a0@(mac_scc_cha_a_ctrl_offset) |
2915 | /* Wait for 5 PCLK cycles, which is about 68 CPU cycles */ | 2923 | /* Wait for 5 PCLK cycles, which is about 68 CPU cycles */ |
@@ -3896,8 +3904,6 @@ BVME_SCC_DATA_A = 0xffb0000f | |||
3896 | #endif | 3904 | #endif |
3897 | 3905 | ||
3898 | #if defined(CONFIG_MAC) | 3906 | #if defined(CONFIG_MAC) |
3899 | L(mac_booter_data): | ||
3900 | .long 0 | ||
3901 | L(mac_videobase): | 3907 | L(mac_videobase): |
3902 | .long 0 | 3908 | .long 0 |
3903 | L(mac_videodepth): | 3909 | L(mac_videodepth): |
diff --git a/arch/m68k/kernel/machine_kexec.c b/arch/m68k/kernel/machine_kexec.c new file mode 100644 index 000000000000..d4affc917d9d --- /dev/null +++ b/arch/m68k/kernel/machine_kexec.c | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * machine_kexec.c - handle transition of Linux booting another kernel | ||
3 | */ | ||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/kexec.h> | ||
6 | #include <linux/mm.h> | ||
7 | #include <linux/delay.h> | ||
8 | |||
9 | #include <asm/cacheflush.h> | ||
10 | #include <asm/page.h> | ||
11 | #include <asm/setup.h> | ||
12 | |||
13 | extern const unsigned char relocate_new_kernel[]; | ||
14 | extern const size_t relocate_new_kernel_size; | ||
15 | |||
16 | int machine_kexec_prepare(struct kimage *kimage) | ||
17 | { | ||
18 | return 0; | ||
19 | } | ||
20 | |||
21 | void machine_kexec_cleanup(struct kimage *kimage) | ||
22 | { | ||
23 | } | ||
24 | |||
25 | void machine_shutdown(void) | ||
26 | { | ||
27 | } | ||
28 | |||
29 | void machine_crash_shutdown(struct pt_regs *regs) | ||
30 | { | ||
31 | } | ||
32 | |||
33 | typedef void (*relocate_kernel_t)(unsigned long ptr, | ||
34 | unsigned long start, | ||
35 | unsigned long cpu_mmu_flags) __noreturn; | ||
36 | |||
37 | void machine_kexec(struct kimage *image) | ||
38 | { | ||
39 | void *reboot_code_buffer; | ||
40 | unsigned long cpu_mmu_flags; | ||
41 | |||
42 | reboot_code_buffer = page_address(image->control_code_page); | ||
43 | |||
44 | memcpy(reboot_code_buffer, relocate_new_kernel, | ||
45 | relocate_new_kernel_size); | ||
46 | |||
47 | /* | ||
48 | * we do not want to be bothered. | ||
49 | */ | ||
50 | local_irq_disable(); | ||
51 | |||
52 | pr_info("Will call new kernel at 0x%08lx. Bye...\n", image->start); | ||
53 | __flush_cache_all(); | ||
54 | cpu_mmu_flags = m68k_cputype | m68k_mmutype << 8; | ||
55 | ((relocate_kernel_t) reboot_code_buffer)(image->head & PAGE_MASK, | ||
56 | image->start, | ||
57 | cpu_mmu_flags); | ||
58 | } | ||
diff --git a/arch/m68k/kernel/relocate_kernel.S b/arch/m68k/kernel/relocate_kernel.S new file mode 100644 index 000000000000..3e09a89067ad --- /dev/null +++ b/arch/m68k/kernel/relocate_kernel.S | |||
@@ -0,0 +1,159 @@ | |||
1 | #include <linux/linkage.h> | ||
2 | |||
3 | #include <asm/asm-offsets.h> | ||
4 | #include <asm/page.h> | ||
5 | #include <asm/setup.h> | ||
6 | |||
7 | |||
8 | #define MMU_BASE 8 /* MMU flags base in cpu_mmu_flags */ | ||
9 | |||
10 | .text | ||
11 | |||
12 | ENTRY(relocate_new_kernel) | ||
13 | movel %sp@(4),%a0 /* a0 = ptr */ | ||
14 | movel %sp@(8),%a1 /* a1 = start */ | ||
15 | movel %sp@(12),%d1 /* d1 = cpu_mmu_flags */ | ||
16 | movew #PAGE_MASK,%d2 /* d2 = PAGE_MASK */ | ||
17 | |||
18 | /* Disable MMU */ | ||
19 | |||
20 | btst #MMU_BASE + MMUB_68851,%d1 | ||
21 | jeq 3f | ||
22 | |||
23 | 1: /* 68851 or 68030 */ | ||
24 | |||
25 | lea %pc@(.Lcopy),%a4 | ||
26 | 2: addl #0x00000000,%a4 /* virt_to_phys() */ | ||
27 | |||
28 | .section ".m68k_fixup","aw" | ||
29 | .long M68K_FIXUP_MEMOFFSET, 2b+2 | ||
30 | .previous | ||
31 | |||
32 | .chip 68030 | ||
33 | pmove %tc,%d0 /* Disable MMU */ | ||
34 | bclr #7,%d0 | ||
35 | pmove %d0,%tc | ||
36 | jmp %a4@ /* Jump to physical .Lcopy */ | ||
37 | .chip 68k | ||
38 | |||
39 | 3: | ||
40 | btst #MMU_BASE + MMUB_68030,%d1 | ||
41 | jne 1b | ||
42 | |||
43 | btst #MMU_BASE + MMUB_68040,%d1 | ||
44 | jeq 6f | ||
45 | |||
46 | 4: /* 68040 or 68060 */ | ||
47 | |||
48 | lea %pc@(.Lcont040),%a4 | ||
49 | 5: addl #0x00000000,%a4 /* virt_to_phys() */ | ||
50 | |||
51 | .section ".m68k_fixup","aw" | ||
52 | .long M68K_FIXUP_MEMOFFSET, 5b+2 | ||
53 | .previous | ||
54 | |||
55 | movel %a4,%d0 | ||
56 | andl #0xff000000,%d0 | ||
57 | orw #0xe020,%d0 /* Map 16 MiB, enable, cacheable */ | ||
58 | .chip 68040 | ||
59 | movec %d0,%itt0 | ||
60 | movec %d0,%dtt0 | ||
61 | .chip 68k | ||
62 | jmp %a4@ /* Jump to physical .Lcont040 */ | ||
63 | |||
64 | .Lcont040: | ||
65 | moveq #0,%d0 | ||
66 | .chip 68040 | ||
67 | movec %d0,%tc /* Disable MMU */ | ||
68 | movec %d0,%itt0 | ||
69 | movec %d0,%itt1 | ||
70 | movec %d0,%dtt0 | ||
71 | movec %d0,%dtt1 | ||
72 | .chip 68k | ||
73 | jra .Lcopy | ||
74 | |||
75 | 6: | ||
76 | btst #MMU_BASE + MMUB_68060,%d1 | ||
77 | jne 4b | ||
78 | |||
79 | .Lcopy: | ||
80 | movel %a0@+,%d0 /* d0 = entry = *ptr */ | ||
81 | jeq .Lflush | ||
82 | |||
83 | btst #2,%d0 /* entry & IND_DONE? */ | ||
84 | jne .Lflush | ||
85 | |||
86 | btst #1,%d0 /* entry & IND_INDIRECTION? */ | ||
87 | jeq 1f | ||
88 | andw %d2,%d0 | ||
89 | movel %d0,%a0 /* ptr = entry & PAGE_MASK */ | ||
90 | jra .Lcopy | ||
91 | |||
92 | 1: | ||
93 | btst #0,%d0 /* entry & IND_DESTINATION? */ | ||
94 | jeq 2f | ||
95 | andw %d2,%d0 | ||
96 | movel %d0,%a2 /* a2 = dst = entry & PAGE_MASK */ | ||
97 | jra .Lcopy | ||
98 | |||
99 | 2: | ||
100 | btst #3,%d0 /* entry & IND_SOURCE? */ | ||
101 | jeq .Lcopy | ||
102 | |||
103 | andw %d2,%d0 | ||
104 | movel %d0,%a3 /* a3 = src = entry & PAGE_MASK */ | ||
105 | movew #PAGE_SIZE/32 - 1,%d0 /* d0 = PAGE_SIZE/32 - 1 */ | ||
106 | 3: | ||
107 | movel %a3@+,%a2@+ /* *dst++ = *src++ */ | ||
108 | movel %a3@+,%a2@+ /* *dst++ = *src++ */ | ||
109 | movel %a3@+,%a2@+ /* *dst++ = *src++ */ | ||
110 | movel %a3@+,%a2@+ /* *dst++ = *src++ */ | ||
111 | movel %a3@+,%a2@+ /* *dst++ = *src++ */ | ||
112 | movel %a3@+,%a2@+ /* *dst++ = *src++ */ | ||
113 | movel %a3@+,%a2@+ /* *dst++ = *src++ */ | ||
114 | movel %a3@+,%a2@+ /* *dst++ = *src++ */ | ||
115 | dbf %d0, 3b | ||
116 | jra .Lcopy | ||
117 | |||
118 | .Lflush: | ||
119 | /* Flush all caches */ | ||
120 | |||
121 | btst #CPUB_68020,%d1 | ||
122 | jeq 2f | ||
123 | |||
124 | 1: /* 68020 or 68030 */ | ||
125 | .chip 68030 | ||
126 | movec %cacr,%d0 | ||
127 | orw #0x808,%d0 | ||
128 | movec %d0,%cacr | ||
129 | .chip 68k | ||
130 | jra .Lreincarnate | ||
131 | |||
132 | 2: | ||
133 | btst #CPUB_68030,%d1 | ||
134 | jne 1b | ||
135 | |||
136 | btst #CPUB_68040,%d1 | ||
137 | jeq 4f | ||
138 | |||
139 | 3: /* 68040 or 68060 */ | ||
140 | .chip 68040 | ||
141 | nop | ||
142 | cpusha %bc | ||
143 | nop | ||
144 | cinva %bc | ||
145 | nop | ||
146 | .chip 68k | ||
147 | jra .Lreincarnate | ||
148 | |||
149 | 4: | ||
150 | btst #CPUB_68060,%d1 | ||
151 | jne 3b | ||
152 | |||
153 | .Lreincarnate: | ||
154 | jmp %a1@ | ||
155 | |||
156 | relocate_new_kernel_end: | ||
157 | |||
158 | ENTRY(relocate_new_kernel_size) | ||
159 | .long relocate_new_kernel_end - relocate_new_kernel | ||
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c index e67e53159573..5b8ec4d5f8e8 100644 --- a/arch/m68k/kernel/setup_mm.c +++ b/arch/m68k/kernel/setup_mm.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/initrd.h> | 26 | #include <linux/initrd.h> |
27 | 27 | ||
28 | #include <asm/bootinfo.h> | 28 | #include <asm/bootinfo.h> |
29 | #include <asm/byteorder.h> | ||
29 | #include <asm/sections.h> | 30 | #include <asm/sections.h> |
30 | #include <asm/setup.h> | 31 | #include <asm/setup.h> |
31 | #include <asm/fpu.h> | 32 | #include <asm/fpu.h> |
@@ -71,12 +72,12 @@ EXPORT_SYMBOL(m68k_num_memory); | |||
71 | int m68k_realnum_memory; | 72 | int m68k_realnum_memory; |
72 | EXPORT_SYMBOL(m68k_realnum_memory); | 73 | EXPORT_SYMBOL(m68k_realnum_memory); |
73 | unsigned long m68k_memoffset; | 74 | unsigned long m68k_memoffset; |
74 | struct mem_info m68k_memory[NUM_MEMINFO]; | 75 | struct m68k_mem_info m68k_memory[NUM_MEMINFO]; |
75 | EXPORT_SYMBOL(m68k_memory); | 76 | EXPORT_SYMBOL(m68k_memory); |
76 | 77 | ||
77 | struct mem_info m68k_ramdisk; | 78 | static struct m68k_mem_info m68k_ramdisk __initdata; |
78 | 79 | ||
79 | static char m68k_command_line[CL_SIZE]; | 80 | static char m68k_command_line[CL_SIZE] __initdata; |
80 | 81 | ||
81 | void (*mach_sched_init) (irq_handler_t handler) __initdata = NULL; | 82 | void (*mach_sched_init) (irq_handler_t handler) __initdata = NULL; |
82 | /* machine dependent irq functions */ | 83 | /* machine dependent irq functions */ |
@@ -143,11 +144,16 @@ extern void paging_init(void); | |||
143 | 144 | ||
144 | static void __init m68k_parse_bootinfo(const struct bi_record *record) | 145 | static void __init m68k_parse_bootinfo(const struct bi_record *record) |
145 | { | 146 | { |
146 | while (record->tag != BI_LAST) { | 147 | uint16_t tag; |
148 | |||
149 | save_bootinfo(record); | ||
150 | |||
151 | while ((tag = be16_to_cpu(record->tag)) != BI_LAST) { | ||
147 | int unknown = 0; | 152 | int unknown = 0; |
148 | const unsigned long *data = record->data; | 153 | const void *data = record->data; |
154 | uint16_t size = be16_to_cpu(record->size); | ||
149 | 155 | ||
150 | switch (record->tag) { | 156 | switch (tag) { |
151 | case BI_MACHTYPE: | 157 | case BI_MACHTYPE: |
152 | case BI_CPUTYPE: | 158 | case BI_CPUTYPE: |
153 | case BI_FPUTYPE: | 159 | case BI_FPUTYPE: |
@@ -157,20 +163,27 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record) | |||
157 | 163 | ||
158 | case BI_MEMCHUNK: | 164 | case BI_MEMCHUNK: |
159 | if (m68k_num_memory < NUM_MEMINFO) { | 165 | if (m68k_num_memory < NUM_MEMINFO) { |
160 | m68k_memory[m68k_num_memory].addr = data[0]; | 166 | const struct mem_info *m = data; |
161 | m68k_memory[m68k_num_memory].size = data[1]; | 167 | m68k_memory[m68k_num_memory].addr = |
168 | be32_to_cpu(m->addr); | ||
169 | m68k_memory[m68k_num_memory].size = | ||
170 | be32_to_cpu(m->size); | ||
162 | m68k_num_memory++; | 171 | m68k_num_memory++; |
163 | } else | 172 | } else |
164 | printk("m68k_parse_bootinfo: too many memory chunks\n"); | 173 | pr_warn("%s: too many memory chunks\n", |
174 | __func__); | ||
165 | break; | 175 | break; |
166 | 176 | ||
167 | case BI_RAMDISK: | 177 | case BI_RAMDISK: |
168 | m68k_ramdisk.addr = data[0]; | 178 | { |
169 | m68k_ramdisk.size = data[1]; | 179 | const struct mem_info *m = data; |
180 | m68k_ramdisk.addr = be32_to_cpu(m->addr); | ||
181 | m68k_ramdisk.size = be32_to_cpu(m->size); | ||
182 | } | ||
170 | break; | 183 | break; |
171 | 184 | ||
172 | case BI_COMMAND_LINE: | 185 | case BI_COMMAND_LINE: |
173 | strlcpy(m68k_command_line, (const char *)data, | 186 | strlcpy(m68k_command_line, data, |
174 | sizeof(m68k_command_line)); | 187 | sizeof(m68k_command_line)); |
175 | break; | 188 | break; |
176 | 189 | ||
@@ -197,17 +210,16 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record) | |||
197 | unknown = 1; | 210 | unknown = 1; |
198 | } | 211 | } |
199 | if (unknown) | 212 | if (unknown) |
200 | printk("m68k_parse_bootinfo: unknown tag 0x%04x ignored\n", | 213 | pr_warn("%s: unknown tag 0x%04x ignored\n", __func__, |
201 | record->tag); | 214 | tag); |
202 | record = (struct bi_record *)((unsigned long)record + | 215 | record = (struct bi_record *)((unsigned long)record + size); |
203 | record->size); | ||
204 | } | 216 | } |
205 | 217 | ||
206 | m68k_realnum_memory = m68k_num_memory; | 218 | m68k_realnum_memory = m68k_num_memory; |
207 | #ifdef CONFIG_SINGLE_MEMORY_CHUNK | 219 | #ifdef CONFIG_SINGLE_MEMORY_CHUNK |
208 | if (m68k_num_memory > 1) { | 220 | if (m68k_num_memory > 1) { |
209 | printk("Ignoring last %i chunks of physical memory\n", | 221 | pr_warn("%s: ignoring last %i chunks of physical memory\n", |
210 | (m68k_num_memory - 1)); | 222 | __func__, (m68k_num_memory - 1)); |
211 | m68k_num_memory = 1; | 223 | m68k_num_memory = 1; |
212 | } | 224 | } |
213 | #endif | 225 | #endif |
@@ -219,7 +231,7 @@ void __init setup_arch(char **cmdline_p) | |||
219 | int i; | 231 | int i; |
220 | #endif | 232 | #endif |
221 | 233 | ||
222 | /* The bootinfo is located right after the kernel bss */ | 234 | /* The bootinfo is located right after the kernel */ |
223 | if (!CPU_IS_COLDFIRE) | 235 | if (!CPU_IS_COLDFIRE) |
224 | m68k_parse_bootinfo((const struct bi_record *)_end); | 236 | m68k_parse_bootinfo((const struct bi_record *)_end); |
225 | 237 | ||
@@ -247,7 +259,7 @@ void __init setup_arch(char **cmdline_p) | |||
247 | asm (".chip 68060; movec %%pcr,%0; .chip 68k" | 259 | asm (".chip 68060; movec %%pcr,%0; .chip 68k" |
248 | : "=d" (pcr)); | 260 | : "=d" (pcr)); |
249 | if (((pcr >> 8) & 0xff) <= 5) { | 261 | if (((pcr >> 8) & 0xff) <= 5) { |
250 | printk("Enabling workaround for errata I14\n"); | 262 | pr_warn("Enabling workaround for errata I14\n"); |
251 | asm (".chip 68060; movec %0,%%pcr; .chip 68k" | 263 | asm (".chip 68060; movec %0,%%pcr; .chip 68k" |
252 | : : "d" (pcr | 0x20)); | 264 | : : "d" (pcr | 0x20)); |
253 | } | 265 | } |
@@ -336,12 +348,12 @@ void __init setup_arch(char **cmdline_p) | |||
336 | panic("No configuration setup"); | 348 | panic("No configuration setup"); |
337 | } | 349 | } |
338 | 350 | ||
351 | paging_init(); | ||
352 | |||
339 | #ifdef CONFIG_NATFEAT | 353 | #ifdef CONFIG_NATFEAT |
340 | nf_init(); | 354 | nf_init(); |
341 | #endif | 355 | #endif |
342 | 356 | ||
343 | paging_init(); | ||
344 | |||
345 | #ifndef CONFIG_SUN3 | 357 | #ifndef CONFIG_SUN3 |
346 | for (i = 1; i < m68k_num_memory; i++) | 358 | for (i = 1; i < m68k_num_memory; i++) |
347 | free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr, | 359 | free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr, |
@@ -353,7 +365,7 @@ void __init setup_arch(char **cmdline_p) | |||
353 | BOOTMEM_DEFAULT); | 365 | BOOTMEM_DEFAULT); |
354 | initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr); | 366 | initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr); |
355 | initrd_end = initrd_start + m68k_ramdisk.size; | 367 | initrd_end = initrd_start + m68k_ramdisk.size; |
356 | printk("initrd: %08lx - %08lx\n", initrd_start, initrd_end); | 368 | pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end); |
357 | } | 369 | } |
358 | #endif | 370 | #endif |
359 | 371 | ||
@@ -538,9 +550,9 @@ void check_bugs(void) | |||
538 | { | 550 | { |
539 | #ifndef CONFIG_M68KFPU_EMU | 551 | #ifndef CONFIG_M68KFPU_EMU |
540 | if (m68k_fputype == 0) { | 552 | if (m68k_fputype == 0) { |
541 | printk(KERN_EMERG "*** YOU DO NOT HAVE A FLOATING POINT UNIT, " | 553 | pr_emerg("*** YOU DO NOT HAVE A FLOATING POINT UNIT, " |
542 | "WHICH IS REQUIRED BY LINUX/M68K ***\n"); | 554 | "WHICH IS REQUIRED BY LINUX/M68K ***\n"); |
543 | printk(KERN_EMERG "Upgrade your hardware or join the FPU " | 555 | pr_emerg("Upgrade your hardware or join the FPU " |
544 | "emulation project\n"); | 556 | "emulation project\n"); |
545 | panic("no FPU"); | 557 | panic("no FPU"); |
546 | } | 558 | } |
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c index 7eb9792009f8..958f1adb9d0c 100644 --- a/arch/m68k/kernel/time.c +++ b/arch/m68k/kernel/time.c | |||
@@ -28,6 +28,10 @@ | |||
28 | #include <linux/timex.h> | 28 | #include <linux/timex.h> |
29 | #include <linux/profile.h> | 29 | #include <linux/profile.h> |
30 | 30 | ||
31 | |||
32 | unsigned long (*mach_random_get_entropy)(void); | ||
33 | |||
34 | |||
31 | /* | 35 | /* |
32 | * timer_interrupt() needs to keep up the real-time clock, | 36 | * timer_interrupt() needs to keep up the real-time clock, |
33 | * as well as call the "xtime_update()" routine every clocktick | 37 | * as well as call the "xtime_update()" routine every clocktick |
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index 88fcd8c70e7b..6c9ca24830e9 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c | |||
@@ -133,9 +133,7 @@ static inline void access_error060 (struct frame *fp) | |||
133 | { | 133 | { |
134 | unsigned long fslw = fp->un.fmt4.pc; /* is really FSLW for access error */ | 134 | unsigned long fslw = fp->un.fmt4.pc; /* is really FSLW for access error */ |
135 | 135 | ||
136 | #ifdef DEBUG | 136 | pr_debug("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr); |
137 | printk("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr); | ||
138 | #endif | ||
139 | 137 | ||
140 | if (fslw & MMU060_BPE) { | 138 | if (fslw & MMU060_BPE) { |
141 | /* branch prediction error -> clear branch cache */ | 139 | /* branch prediction error -> clear branch cache */ |
@@ -162,9 +160,7 @@ static inline void access_error060 (struct frame *fp) | |||
162 | } | 160 | } |
163 | if (fslw & MMU060_W) | 161 | if (fslw & MMU060_W) |
164 | errorcode |= 2; | 162 | errorcode |= 2; |
165 | #ifdef DEBUG | 163 | pr_debug("errorcode = %ld\n", errorcode); |
166 | printk("errorcode = %d\n", errorcode ); | ||
167 | #endif | ||
168 | do_page_fault(&fp->ptregs, addr, errorcode); | 164 | do_page_fault(&fp->ptregs, addr, errorcode); |
169 | } else if (fslw & (MMU060_SEE)){ | 165 | } else if (fslw & (MMU060_SEE)){ |
170 | /* Software Emulation Error. | 166 | /* Software Emulation Error. |
@@ -173,8 +169,9 @@ static inline void access_error060 (struct frame *fp) | |||
173 | send_fault_sig(&fp->ptregs); | 169 | send_fault_sig(&fp->ptregs); |
174 | } else if (!(fslw & (MMU060_RE|MMU060_WE)) || | 170 | } else if (!(fslw & (MMU060_RE|MMU060_WE)) || |
175 | send_fault_sig(&fp->ptregs) > 0) { | 171 | send_fault_sig(&fp->ptregs) > 0) { |
176 | printk("pc=%#lx, fa=%#lx\n", fp->ptregs.pc, fp->un.fmt4.effaddr); | 172 | pr_err("pc=%#lx, fa=%#lx\n", fp->ptregs.pc, |
177 | printk( "68060 access error, fslw=%lx\n", fslw ); | 173 | fp->un.fmt4.effaddr); |
174 | pr_err("68060 access error, fslw=%lx\n", fslw); | ||
178 | trap_c( fp ); | 175 | trap_c( fp ); |
179 | } | 176 | } |
180 | } | 177 | } |
@@ -225,9 +222,7 @@ static inline int do_040writeback1(unsigned short wbs, unsigned long wba, | |||
225 | set_fs(old_fs); | 222 | set_fs(old_fs); |
226 | 223 | ||
227 | 224 | ||
228 | #ifdef DEBUG | 225 | pr_debug("do_040writeback1, res=%d\n", res); |
229 | printk("do_040writeback1, res=%d\n",res); | ||
230 | #endif | ||
231 | 226 | ||
232 | return res; | 227 | return res; |
233 | } | 228 | } |
@@ -249,7 +244,7 @@ static inline void do_040writebacks(struct frame *fp) | |||
249 | int res = 0; | 244 | int res = 0; |
250 | #if 0 | 245 | #if 0 |
251 | if (fp->un.fmt7.wb1s & WBV_040) | 246 | if (fp->un.fmt7.wb1s & WBV_040) |
252 | printk("access_error040: cannot handle 1st writeback. oops.\n"); | 247 | pr_err("access_error040: cannot handle 1st writeback. oops.\n"); |
253 | #endif | 248 | #endif |
254 | 249 | ||
255 | if ((fp->un.fmt7.wb2s & WBV_040) && | 250 | if ((fp->un.fmt7.wb2s & WBV_040) && |
@@ -302,14 +297,12 @@ static inline void access_error040(struct frame *fp) | |||
302 | unsigned short ssw = fp->un.fmt7.ssw; | 297 | unsigned short ssw = fp->un.fmt7.ssw; |
303 | unsigned long mmusr; | 298 | unsigned long mmusr; |
304 | 299 | ||
305 | #ifdef DEBUG | 300 | pr_debug("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr); |
306 | printk("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr); | 301 | pr_debug("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s, |
307 | printk("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s, | ||
308 | fp->un.fmt7.wb2s, fp->un.fmt7.wb3s); | 302 | fp->un.fmt7.wb2s, fp->un.fmt7.wb3s); |
309 | printk ("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n", | 303 | pr_debug("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n", |
310 | fp->un.fmt7.wb2a, fp->un.fmt7.wb3a, | 304 | fp->un.fmt7.wb2a, fp->un.fmt7.wb3a, |
311 | fp->un.fmt7.wb2d, fp->un.fmt7.wb3d); | 305 | fp->un.fmt7.wb2d, fp->un.fmt7.wb3d); |
312 | #endif | ||
313 | 306 | ||
314 | if (ssw & ATC_040) { | 307 | if (ssw & ATC_040) { |
315 | unsigned long addr = fp->un.fmt7.faddr; | 308 | unsigned long addr = fp->un.fmt7.faddr; |
@@ -324,9 +317,7 @@ static inline void access_error040(struct frame *fp) | |||
324 | 317 | ||
325 | /* MMU error, get the MMUSR info for this access */ | 318 | /* MMU error, get the MMUSR info for this access */ |
326 | mmusr = probe040(!(ssw & RW_040), addr, ssw); | 319 | mmusr = probe040(!(ssw & RW_040), addr, ssw); |
327 | #ifdef DEBUG | 320 | pr_debug("mmusr = %lx\n", mmusr); |
328 | printk("mmusr = %lx\n", mmusr); | ||
329 | #endif | ||
330 | errorcode = 1; | 321 | errorcode = 1; |
331 | if (!(mmusr & MMU_R_040)) { | 322 | if (!(mmusr & MMU_R_040)) { |
332 | /* clear the invalid atc entry */ | 323 | /* clear the invalid atc entry */ |
@@ -340,14 +331,10 @@ static inline void access_error040(struct frame *fp) | |||
340 | errorcode |= 2; | 331 | errorcode |= 2; |
341 | 332 | ||
342 | if (do_page_fault(&fp->ptregs, addr, errorcode)) { | 333 | if (do_page_fault(&fp->ptregs, addr, errorcode)) { |
343 | #ifdef DEBUG | 334 | pr_debug("do_page_fault() !=0\n"); |
344 | printk("do_page_fault() !=0\n"); | ||
345 | #endif | ||
346 | if (user_mode(&fp->ptregs)){ | 335 | if (user_mode(&fp->ptregs)){ |
347 | /* delay writebacks after signal delivery */ | 336 | /* delay writebacks after signal delivery */ |
348 | #ifdef DEBUG | 337 | pr_debug(".. was usermode - return\n"); |
349 | printk(".. was usermode - return\n"); | ||
350 | #endif | ||
351 | return; | 338 | return; |
352 | } | 339 | } |
353 | /* disable writeback into user space from kernel | 340 | /* disable writeback into user space from kernel |
@@ -355,9 +342,7 @@ static inline void access_error040(struct frame *fp) | |||
355 | * the writeback won't do good) | 342 | * the writeback won't do good) |
356 | */ | 343 | */ |
357 | disable_wb: | 344 | disable_wb: |
358 | #ifdef DEBUG | 345 | pr_debug(".. disabling wb2\n"); |
359 | printk(".. disabling wb2\n"); | ||
360 | #endif | ||
361 | if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr) | 346 | if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr) |
362 | fp->un.fmt7.wb2s &= ~WBV_040; | 347 | fp->un.fmt7.wb2s &= ~WBV_040; |
363 | if (fp->un.fmt7.wb3a == fp->un.fmt7.faddr) | 348 | if (fp->un.fmt7.wb3a == fp->un.fmt7.faddr) |
@@ -371,7 +356,7 @@ disable_wb: | |||
371 | current->thread.signo = SIGBUS; | 356 | current->thread.signo = SIGBUS; |
372 | current->thread.faddr = fp->un.fmt7.faddr; | 357 | current->thread.faddr = fp->un.fmt7.faddr; |
373 | if (send_fault_sig(&fp->ptregs) >= 0) | 358 | if (send_fault_sig(&fp->ptregs) >= 0) |
374 | printk("68040 bus error (ssw=%x, faddr=%lx)\n", ssw, | 359 | pr_err("68040 bus error (ssw=%x, faddr=%lx)\n", ssw, |
375 | fp->un.fmt7.faddr); | 360 | fp->un.fmt7.faddr); |
376 | goto disable_wb; | 361 | goto disable_wb; |
377 | } | 362 | } |
@@ -394,19 +379,17 @@ static inline void bus_error030 (struct frame *fp) | |||
394 | unsigned short ssw = fp->un.fmtb.ssw; | 379 | unsigned short ssw = fp->un.fmtb.ssw; |
395 | extern unsigned long _sun3_map_test_start, _sun3_map_test_end; | 380 | extern unsigned long _sun3_map_test_start, _sun3_map_test_end; |
396 | 381 | ||
397 | #ifdef DEBUG | ||
398 | if (ssw & (FC | FB)) | 382 | if (ssw & (FC | FB)) |
399 | printk ("Instruction fault at %#010lx\n", | 383 | pr_debug("Instruction fault at %#010lx\n", |
400 | ssw & FC ? | 384 | ssw & FC ? |
401 | fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2 | 385 | fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2 |
402 | : | 386 | : |
403 | fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr); | 387 | fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr); |
404 | if (ssw & DF) | 388 | if (ssw & DF) |
405 | printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n", | 389 | pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n", |
406 | ssw & RW ? "read" : "write", | 390 | ssw & RW ? "read" : "write", |
407 | fp->un.fmtb.daddr, | 391 | fp->un.fmtb.daddr, |
408 | space_names[ssw & DFC], fp->ptregs.pc); | 392 | space_names[ssw & DFC], fp->ptregs.pc); |
409 | #endif | ||
410 | 393 | ||
411 | /* | 394 | /* |
412 | * Check if this page should be demand-mapped. This needs to go before | 395 | * Check if this page should be demand-mapped. This needs to go before |
@@ -429,7 +412,7 @@ static inline void bus_error030 (struct frame *fp) | |||
429 | return; | 412 | return; |
430 | /* instruction fault or kernel data fault! */ | 413 | /* instruction fault or kernel data fault! */ |
431 | if (ssw & (FC | FB)) | 414 | if (ssw & (FC | FB)) |
432 | printk ("Instruction fault at %#010lx\n", | 415 | pr_err("Instruction fault at %#010lx\n", |
433 | fp->ptregs.pc); | 416 | fp->ptregs.pc); |
434 | if (ssw & DF) { | 417 | if (ssw & DF) { |
435 | /* was this fault incurred testing bus mappings? */ | 418 | /* was this fault incurred testing bus mappings? */ |
@@ -439,12 +422,12 @@ static inline void bus_error030 (struct frame *fp) | |||
439 | return; | 422 | return; |
440 | } | 423 | } |
441 | 424 | ||
442 | printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n", | 425 | pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n", |
443 | ssw & RW ? "read" : "write", | 426 | ssw & RW ? "read" : "write", |
444 | fp->un.fmtb.daddr, | 427 | fp->un.fmtb.daddr, |
445 | space_names[ssw & DFC], fp->ptregs.pc); | 428 | space_names[ssw & DFC], fp->ptregs.pc); |
446 | } | 429 | } |
447 | printk ("BAD KERNEL BUSERR\n"); | 430 | pr_err("BAD KERNEL BUSERR\n"); |
448 | 431 | ||
449 | die_if_kernel("Oops", &fp->ptregs,0); | 432 | die_if_kernel("Oops", &fp->ptregs,0); |
450 | force_sig(SIGKILL, current); | 433 | force_sig(SIGKILL, current); |
@@ -473,12 +456,11 @@ static inline void bus_error030 (struct frame *fp) | |||
473 | else if (buserr_type & SUN3_BUSERR_INVALID) | 456 | else if (buserr_type & SUN3_BUSERR_INVALID) |
474 | errorcode = 0x00; | 457 | errorcode = 0x00; |
475 | else { | 458 | else { |
476 | #ifdef DEBUG | 459 | pr_debug("*** unexpected busfault type=%#04x\n", |
477 | printk ("*** unexpected busfault type=%#04x\n", buserr_type); | 460 | buserr_type); |
478 | printk ("invalid %s access at %#lx from pc %#lx\n", | 461 | pr_debug("invalid %s access at %#lx from pc %#lx\n", |
479 | !(ssw & RW) ? "write" : "read", addr, | 462 | !(ssw & RW) ? "write" : "read", addr, |
480 | fp->ptregs.pc); | 463 | fp->ptregs.pc); |
481 | #endif | ||
482 | die_if_kernel ("Oops", &fp->ptregs, buserr_type); | 464 | die_if_kernel ("Oops", &fp->ptregs, buserr_type); |
483 | force_sig (SIGBUS, current); | 465 | force_sig (SIGBUS, current); |
484 | return; | 466 | return; |
@@ -509,9 +491,7 @@ static inline void bus_error030 (struct frame *fp) | |||
509 | if (!mmu_emu_handle_fault(addr, 1, 0)) | 491 | if (!mmu_emu_handle_fault(addr, 1, 0)) |
510 | do_page_fault (&fp->ptregs, addr, 0); | 492 | do_page_fault (&fp->ptregs, addr, 0); |
511 | } else { | 493 | } else { |
512 | #ifdef DEBUG | 494 | pr_debug("protection fault on insn access (segv).\n"); |
513 | printk ("protection fault on insn access (segv).\n"); | ||
514 | #endif | ||
515 | force_sig (SIGSEGV, current); | 495 | force_sig (SIGSEGV, current); |
516 | } | 496 | } |
517 | } | 497 | } |
@@ -525,22 +505,22 @@ static inline void bus_error030 (struct frame *fp) | |||
525 | unsigned short ssw = fp->un.fmtb.ssw; | 505 | unsigned short ssw = fp->un.fmtb.ssw; |
526 | #ifdef DEBUG | 506 | #ifdef DEBUG |
527 | unsigned long desc; | 507 | unsigned long desc; |
508 | #endif | ||
528 | 509 | ||
529 | printk ("pid = %x ", current->pid); | 510 | pr_debug("pid = %x ", current->pid); |
530 | printk ("SSW=%#06x ", ssw); | 511 | pr_debug("SSW=%#06x ", ssw); |
531 | 512 | ||
532 | if (ssw & (FC | FB)) | 513 | if (ssw & (FC | FB)) |
533 | printk ("Instruction fault at %#010lx\n", | 514 | pr_debug("Instruction fault at %#010lx\n", |
534 | ssw & FC ? | 515 | ssw & FC ? |
535 | fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2 | 516 | fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2 |
536 | : | 517 | : |
537 | fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr); | 518 | fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr); |
538 | if (ssw & DF) | 519 | if (ssw & DF) |
539 | printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n", | 520 | pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n", |
540 | ssw & RW ? "read" : "write", | 521 | ssw & RW ? "read" : "write", |
541 | fp->un.fmtb.daddr, | 522 | fp->un.fmtb.daddr, |
542 | space_names[ssw & DFC], fp->ptregs.pc); | 523 | space_names[ssw & DFC], fp->ptregs.pc); |
543 | #endif | ||
544 | 524 | ||
545 | /* ++andreas: If a data fault and an instruction fault happen | 525 | /* ++andreas: If a data fault and an instruction fault happen |
546 | at the same time map in both pages. */ | 526 | at the same time map in both pages. */ |
@@ -554,27 +534,23 @@ static inline void bus_error030 (struct frame *fp) | |||
554 | "pmove %%psr,%1" | 534 | "pmove %%psr,%1" |
555 | : "=a&" (desc), "=m" (temp) | 535 | : "=a&" (desc), "=m" (temp) |
556 | : "a" (addr), "d" (ssw)); | 536 | : "a" (addr), "d" (ssw)); |
537 | pr_debug("mmusr is %#x for addr %#lx in task %p\n", | ||
538 | temp, addr, current); | ||
539 | pr_debug("descriptor address is 0x%p, contents %#lx\n", | ||
540 | __va(desc), *(unsigned long *)__va(desc)); | ||
557 | #else | 541 | #else |
558 | asm volatile ("ptestr %2,%1@,#7\n\t" | 542 | asm volatile ("ptestr %2,%1@,#7\n\t" |
559 | "pmove %%psr,%0" | 543 | "pmove %%psr,%0" |
560 | : "=m" (temp) : "a" (addr), "d" (ssw)); | 544 | : "=m" (temp) : "a" (addr), "d" (ssw)); |
561 | #endif | 545 | #endif |
562 | mmusr = temp; | 546 | mmusr = temp; |
563 | |||
564 | #ifdef DEBUG | ||
565 | printk("mmusr is %#x for addr %#lx in task %p\n", | ||
566 | mmusr, addr, current); | ||
567 | printk("descriptor address is %#lx, contents %#lx\n", | ||
568 | __va(desc), *(unsigned long *)__va(desc)); | ||
569 | #endif | ||
570 | |||
571 | errorcode = (mmusr & MMU_I) ? 0 : 1; | 547 | errorcode = (mmusr & MMU_I) ? 0 : 1; |
572 | if (!(ssw & RW) || (ssw & RM)) | 548 | if (!(ssw & RW) || (ssw & RM)) |
573 | errorcode |= 2; | 549 | errorcode |= 2; |
574 | 550 | ||
575 | if (mmusr & (MMU_I | MMU_WP)) { | 551 | if (mmusr & (MMU_I | MMU_WP)) { |
576 | if (ssw & 4) { | 552 | if (ssw & 4) { |
577 | printk("Data %s fault at %#010lx in %s (pc=%#lx)\n", | 553 | pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n", |
578 | ssw & RW ? "read" : "write", | 554 | ssw & RW ? "read" : "write", |
579 | fp->un.fmtb.daddr, | 555 | fp->un.fmtb.daddr, |
580 | space_names[ssw & DFC], fp->ptregs.pc); | 556 | space_names[ssw & DFC], fp->ptregs.pc); |
@@ -587,9 +563,10 @@ static inline void bus_error030 (struct frame *fp) | |||
587 | } else if (!(mmusr & MMU_I)) { | 563 | } else if (!(mmusr & MMU_I)) { |
588 | /* probably a 020 cas fault */ | 564 | /* probably a 020 cas fault */ |
589 | if (!(ssw & RM) && send_fault_sig(&fp->ptregs) > 0) | 565 | if (!(ssw & RM) && send_fault_sig(&fp->ptregs) > 0) |
590 | printk("unexpected bus error (%#x,%#x)\n", ssw, mmusr); | 566 | pr_err("unexpected bus error (%#x,%#x)\n", ssw, |
567 | mmusr); | ||
591 | } else if (mmusr & (MMU_B|MMU_L|MMU_S)) { | 568 | } else if (mmusr & (MMU_B|MMU_L|MMU_S)) { |
592 | printk("invalid %s access at %#lx from pc %#lx\n", | 569 | pr_err("invalid %s access at %#lx from pc %#lx\n", |
593 | !(ssw & RW) ? "write" : "read", addr, | 570 | !(ssw & RW) ? "write" : "read", addr, |
594 | fp->ptregs.pc); | 571 | fp->ptregs.pc); |
595 | die_if_kernel("Oops",&fp->ptregs,mmusr); | 572 | die_if_kernel("Oops",&fp->ptregs,mmusr); |
@@ -600,7 +577,7 @@ static inline void bus_error030 (struct frame *fp) | |||
600 | static volatile long tlong; | 577 | static volatile long tlong; |
601 | #endif | 578 | #endif |
602 | 579 | ||
603 | printk("weird %s access at %#lx from pc %#lx (ssw is %#x)\n", | 580 | pr_err("weird %s access at %#lx from pc %#lx (ssw is %#x)\n", |
604 | !(ssw & RW) ? "write" : "read", addr, | 581 | !(ssw & RW) ? "write" : "read", addr, |
605 | fp->ptregs.pc, ssw); | 582 | fp->ptregs.pc, ssw); |
606 | asm volatile ("ptestr #1,%1@,#0\n\t" | 583 | asm volatile ("ptestr #1,%1@,#0\n\t" |
@@ -609,18 +586,16 @@ static inline void bus_error030 (struct frame *fp) | |||
609 | : "a" (addr)); | 586 | : "a" (addr)); |
610 | mmusr = temp; | 587 | mmusr = temp; |
611 | 588 | ||
612 | printk ("level 0 mmusr is %#x\n", mmusr); | 589 | pr_err("level 0 mmusr is %#x\n", mmusr); |
613 | #if 0 | 590 | #if 0 |
614 | asm volatile ("pmove %%tt0,%0" | 591 | asm volatile ("pmove %%tt0,%0" |
615 | : "=m" (tlong)); | 592 | : "=m" (tlong)); |
616 | printk("tt0 is %#lx, ", tlong); | 593 | pr_debug("tt0 is %#lx, ", tlong); |
617 | asm volatile ("pmove %%tt1,%0" | 594 | asm volatile ("pmove %%tt1,%0" |
618 | : "=m" (tlong)); | 595 | : "=m" (tlong)); |
619 | printk("tt1 is %#lx\n", tlong); | 596 | pr_debug("tt1 is %#lx\n", tlong); |
620 | #endif | ||
621 | #ifdef DEBUG | ||
622 | printk("Unknown SIGSEGV - 1\n"); | ||
623 | #endif | 597 | #endif |
598 | pr_debug("Unknown SIGSEGV - 1\n"); | ||
624 | die_if_kernel("Oops",&fp->ptregs,mmusr); | 599 | die_if_kernel("Oops",&fp->ptregs,mmusr); |
625 | force_sig(SIGSEGV, current); | 600 | force_sig(SIGSEGV, current); |
626 | return; | 601 | return; |
@@ -641,10 +616,9 @@ static inline void bus_error030 (struct frame *fp) | |||
641 | return; | 616 | return; |
642 | 617 | ||
643 | if (fp->ptregs.sr & PS_S) { | 618 | if (fp->ptregs.sr & PS_S) { |
644 | printk("Instruction fault at %#010lx\n", | 619 | pr_err("Instruction fault at %#010lx\n", fp->ptregs.pc); |
645 | fp->ptregs.pc); | ||
646 | buserr: | 620 | buserr: |
647 | printk ("BAD KERNEL BUSERR\n"); | 621 | pr_err("BAD KERNEL BUSERR\n"); |
648 | die_if_kernel("Oops",&fp->ptregs,0); | 622 | die_if_kernel("Oops",&fp->ptregs,0); |
649 | force_sig(SIGKILL, current); | 623 | force_sig(SIGKILL, current); |
650 | return; | 624 | return; |
@@ -668,28 +642,22 @@ static inline void bus_error030 (struct frame *fp) | |||
668 | "pmove %%psr,%1" | 642 | "pmove %%psr,%1" |
669 | : "=a&" (desc), "=m" (temp) | 643 | : "=a&" (desc), "=m" (temp) |
670 | : "a" (addr)); | 644 | : "a" (addr)); |
645 | pr_debug("mmusr is %#x for addr %#lx in task %p\n", | ||
646 | temp, addr, current); | ||
647 | pr_debug("descriptor address is 0x%p, contents %#lx\n", | ||
648 | __va(desc), *(unsigned long *)__va(desc)); | ||
671 | #else | 649 | #else |
672 | asm volatile ("ptestr #1,%1@,#7\n\t" | 650 | asm volatile ("ptestr #1,%1@,#7\n\t" |
673 | "pmove %%psr,%0" | 651 | "pmove %%psr,%0" |
674 | : "=m" (temp) : "a" (addr)); | 652 | : "=m" (temp) : "a" (addr)); |
675 | #endif | 653 | #endif |
676 | mmusr = temp; | 654 | mmusr = temp; |
677 | |||
678 | #ifdef DEBUG | ||
679 | printk ("mmusr is %#x for addr %#lx in task %p\n", | ||
680 | mmusr, addr, current); | ||
681 | printk ("descriptor address is %#lx, contents %#lx\n", | ||
682 | __va(desc), *(unsigned long *)__va(desc)); | ||
683 | #endif | ||
684 | |||
685 | if (mmusr & MMU_I) | 655 | if (mmusr & MMU_I) |
686 | do_page_fault (&fp->ptregs, addr, 0); | 656 | do_page_fault (&fp->ptregs, addr, 0); |
687 | else if (mmusr & (MMU_B|MMU_L|MMU_S)) { | 657 | else if (mmusr & (MMU_B|MMU_L|MMU_S)) { |
688 | printk ("invalid insn access at %#lx from pc %#lx\n", | 658 | pr_err("invalid insn access at %#lx from pc %#lx\n", |
689 | addr, fp->ptregs.pc); | 659 | addr, fp->ptregs.pc); |
690 | #ifdef DEBUG | 660 | pr_debug("Unknown SIGSEGV - 2\n"); |
691 | printk("Unknown SIGSEGV - 2\n"); | ||
692 | #endif | ||
693 | die_if_kernel("Oops",&fp->ptregs,mmusr); | 661 | die_if_kernel("Oops",&fp->ptregs,mmusr); |
694 | force_sig(SIGSEGV, current); | 662 | force_sig(SIGSEGV, current); |
695 | return; | 663 | return; |
@@ -791,9 +759,7 @@ asmlinkage void buserr_c(struct frame *fp) | |||
791 | if (user_mode(&fp->ptregs)) | 759 | if (user_mode(&fp->ptregs)) |
792 | current->thread.esp0 = (unsigned long) fp; | 760 | current->thread.esp0 = (unsigned long) fp; |
793 | 761 | ||
794 | #ifdef DEBUG | 762 | pr_debug("*** Bus Error *** Format is %x\n", fp->ptregs.format); |
795 | printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format); | ||
796 | #endif | ||
797 | 763 | ||
798 | #if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU) | 764 | #if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU) |
799 | if (CPU_IS_COLDFIRE) { | 765 | if (CPU_IS_COLDFIRE) { |
@@ -836,9 +802,7 @@ asmlinkage void buserr_c(struct frame *fp) | |||
836 | #endif | 802 | #endif |
837 | default: | 803 | default: |
838 | die_if_kernel("bad frame format",&fp->ptregs,0); | 804 | die_if_kernel("bad frame format",&fp->ptregs,0); |
839 | #ifdef DEBUG | 805 | pr_debug("Unknown SIGSEGV - 4\n"); |
840 | printk("Unknown SIGSEGV - 4\n"); | ||
841 | #endif | ||
842 | force_sig(SIGSEGV, current); | 806 | force_sig(SIGSEGV, current); |
843 | } | 807 | } |
844 | } | 808 | } |
@@ -852,7 +816,7 @@ void show_trace(unsigned long *stack) | |||
852 | unsigned long addr; | 816 | unsigned long addr; |
853 | int i; | 817 | int i; |
854 | 818 | ||
855 | printk("Call Trace:"); | 819 | pr_info("Call Trace:"); |
856 | addr = (unsigned long)stack + THREAD_SIZE - 1; | 820 | addr = (unsigned long)stack + THREAD_SIZE - 1; |
857 | endstack = (unsigned long *)(addr & -THREAD_SIZE); | 821 | endstack = (unsigned long *)(addr & -THREAD_SIZE); |
858 | i = 0; | 822 | i = 0; |
@@ -869,13 +833,13 @@ void show_trace(unsigned long *stack) | |||
869 | if (__kernel_text_address(addr)) { | 833 | if (__kernel_text_address(addr)) { |
870 | #ifndef CONFIG_KALLSYMS | 834 | #ifndef CONFIG_KALLSYMS |
871 | if (i % 5 == 0) | 835 | if (i % 5 == 0) |
872 | printk("\n "); | 836 | pr_cont("\n "); |
873 | #endif | 837 | #endif |
874 | printk(" [<%08lx>] %pS\n", addr, (void *)addr); | 838 | pr_cont(" [<%08lx>] %pS\n", addr, (void *)addr); |
875 | i++; | 839 | i++; |
876 | } | 840 | } |
877 | } | 841 | } |
878 | printk("\n"); | 842 | pr_cont("\n"); |
879 | } | 843 | } |
880 | 844 | ||
881 | void show_registers(struct pt_regs *regs) | 845 | void show_registers(struct pt_regs *regs) |
@@ -887,81 +851,87 @@ void show_registers(struct pt_regs *regs) | |||
887 | int i; | 851 | int i; |
888 | 852 | ||
889 | print_modules(); | 853 | print_modules(); |
890 | printk("PC: [<%08lx>] %pS\n", regs->pc, (void *)regs->pc); | 854 | pr_info("PC: [<%08lx>] %pS\n", regs->pc, (void *)regs->pc); |
891 | printk("SR: %04x SP: %p a2: %08lx\n", regs->sr, regs, regs->a2); | 855 | pr_info("SR: %04x SP: %p a2: %08lx\n", regs->sr, regs, regs->a2); |
892 | printk("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n", | 856 | pr_info("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n", |
893 | regs->d0, regs->d1, regs->d2, regs->d3); | 857 | regs->d0, regs->d1, regs->d2, regs->d3); |
894 | printk("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n", | 858 | pr_info("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n", |
895 | regs->d4, regs->d5, regs->a0, regs->a1); | 859 | regs->d4, regs->d5, regs->a0, regs->a1); |
896 | 860 | ||
897 | printk("Process %s (pid: %d, task=%p)\n", | 861 | pr_info("Process %s (pid: %d, task=%p)\n", |
898 | current->comm, task_pid_nr(current), current); | 862 | current->comm, task_pid_nr(current), current); |
899 | addr = (unsigned long)&fp->un; | 863 | addr = (unsigned long)&fp->un; |
900 | printk("Frame format=%X ", regs->format); | 864 | pr_info("Frame format=%X ", regs->format); |
901 | switch (regs->format) { | 865 | switch (regs->format) { |
902 | case 0x2: | 866 | case 0x2: |
903 | printk("instr addr=%08lx\n", fp->un.fmt2.iaddr); | 867 | pr_cont("instr addr=%08lx\n", fp->un.fmt2.iaddr); |
904 | addr += sizeof(fp->un.fmt2); | 868 | addr += sizeof(fp->un.fmt2); |
905 | break; | 869 | break; |
906 | case 0x3: | 870 | case 0x3: |
907 | printk("eff addr=%08lx\n", fp->un.fmt3.effaddr); | 871 | pr_cont("eff addr=%08lx\n", fp->un.fmt3.effaddr); |
908 | addr += sizeof(fp->un.fmt3); | 872 | addr += sizeof(fp->un.fmt3); |
909 | break; | 873 | break; |
910 | case 0x4: | 874 | case 0x4: |
911 | printk((CPU_IS_060 ? "fault addr=%08lx fslw=%08lx\n" | 875 | if (CPU_IS_060) |
912 | : "eff addr=%08lx pc=%08lx\n"), | 876 | pr_cont("fault addr=%08lx fslw=%08lx\n", |
913 | fp->un.fmt4.effaddr, fp->un.fmt4.pc); | 877 | fp->un.fmt4.effaddr, fp->un.fmt4.pc); |
878 | else | ||
879 | pr_cont("eff addr=%08lx pc=%08lx\n", | ||
880 | fp->un.fmt4.effaddr, fp->un.fmt4.pc); | ||
914 | addr += sizeof(fp->un.fmt4); | 881 | addr += sizeof(fp->un.fmt4); |
915 | break; | 882 | break; |
916 | case 0x7: | 883 | case 0x7: |
917 | printk("eff addr=%08lx ssw=%04x faddr=%08lx\n", | 884 | pr_cont("eff addr=%08lx ssw=%04x faddr=%08lx\n", |
918 | fp->un.fmt7.effaddr, fp->un.fmt7.ssw, fp->un.fmt7.faddr); | 885 | fp->un.fmt7.effaddr, fp->un.fmt7.ssw, fp->un.fmt7.faddr); |
919 | printk("wb 1 stat/addr/data: %04x %08lx %08lx\n", | 886 | pr_info("wb 1 stat/addr/data: %04x %08lx %08lx\n", |
920 | fp->un.fmt7.wb1s, fp->un.fmt7.wb1a, fp->un.fmt7.wb1dpd0); | 887 | fp->un.fmt7.wb1s, fp->un.fmt7.wb1a, fp->un.fmt7.wb1dpd0); |
921 | printk("wb 2 stat/addr/data: %04x %08lx %08lx\n", | 888 | pr_info("wb 2 stat/addr/data: %04x %08lx %08lx\n", |
922 | fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, fp->un.fmt7.wb2d); | 889 | fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, fp->un.fmt7.wb2d); |
923 | printk("wb 3 stat/addr/data: %04x %08lx %08lx\n", | 890 | pr_info("wb 3 stat/addr/data: %04x %08lx %08lx\n", |
924 | fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, fp->un.fmt7.wb3d); | 891 | fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, fp->un.fmt7.wb3d); |
925 | printk("push data: %08lx %08lx %08lx %08lx\n", | 892 | pr_info("push data: %08lx %08lx %08lx %08lx\n", |
926 | fp->un.fmt7.wb1dpd0, fp->un.fmt7.pd1, fp->un.fmt7.pd2, | 893 | fp->un.fmt7.wb1dpd0, fp->un.fmt7.pd1, fp->un.fmt7.pd2, |
927 | fp->un.fmt7.pd3); | 894 | fp->un.fmt7.pd3); |
928 | addr += sizeof(fp->un.fmt7); | 895 | addr += sizeof(fp->un.fmt7); |
929 | break; | 896 | break; |
930 | case 0x9: | 897 | case 0x9: |
931 | printk("instr addr=%08lx\n", fp->un.fmt9.iaddr); | 898 | pr_cont("instr addr=%08lx\n", fp->un.fmt9.iaddr); |
932 | addr += sizeof(fp->un.fmt9); | 899 | addr += sizeof(fp->un.fmt9); |
933 | break; | 900 | break; |
934 | case 0xa: | 901 | case 0xa: |
935 | printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n", | 902 | pr_cont("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n", |
936 | fp->un.fmta.ssw, fp->un.fmta.isc, fp->un.fmta.isb, | 903 | fp->un.fmta.ssw, fp->un.fmta.isc, fp->un.fmta.isb, |
937 | fp->un.fmta.daddr, fp->un.fmta.dobuf); | 904 | fp->un.fmta.daddr, fp->un.fmta.dobuf); |
938 | addr += sizeof(fp->un.fmta); | 905 | addr += sizeof(fp->un.fmta); |
939 | break; | 906 | break; |
940 | case 0xb: | 907 | case 0xb: |
941 | printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n", | 908 | pr_cont("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n", |
942 | fp->un.fmtb.ssw, fp->un.fmtb.isc, fp->un.fmtb.isb, | 909 | fp->un.fmtb.ssw, fp->un.fmtb.isc, fp->un.fmtb.isb, |
943 | fp->un.fmtb.daddr, fp->un.fmtb.dobuf); | 910 | fp->un.fmtb.daddr, fp->un.fmtb.dobuf); |
944 | printk("baddr=%08lx dibuf=%08lx ver=%x\n", | 911 | pr_info("baddr=%08lx dibuf=%08lx ver=%x\n", |
945 | fp->un.fmtb.baddr, fp->un.fmtb.dibuf, fp->un.fmtb.ver); | 912 | fp->un.fmtb.baddr, fp->un.fmtb.dibuf, fp->un.fmtb.ver); |
946 | addr += sizeof(fp->un.fmtb); | 913 | addr += sizeof(fp->un.fmtb); |
947 | break; | 914 | break; |
948 | default: | 915 | default: |
949 | printk("\n"); | 916 | pr_cont("\n"); |
950 | } | 917 | } |
951 | show_stack(NULL, (unsigned long *)addr); | 918 | show_stack(NULL, (unsigned long *)addr); |
952 | 919 | ||
953 | printk("Code:"); | 920 | pr_info("Code:"); |
954 | set_fs(KERNEL_DS); | 921 | set_fs(KERNEL_DS); |
955 | cp = (u16 *)regs->pc; | 922 | cp = (u16 *)regs->pc; |
956 | for (i = -8; i < 16; i++) { | 923 | for (i = -8; i < 16; i++) { |
957 | if (get_user(c, cp + i) && i >= 0) { | 924 | if (get_user(c, cp + i) && i >= 0) { |
958 | printk(" Bad PC value."); | 925 | pr_cont(" Bad PC value."); |
959 | break; | 926 | break; |
960 | } | 927 | } |
961 | printk(i ? " %04x" : " <%04x>", c); | 928 | if (i) |
929 | pr_cont(" %04x", c); | ||
930 | else | ||
931 | pr_cont(" <%04x>", c); | ||
962 | } | 932 | } |
963 | set_fs(old_fs); | 933 | set_fs(old_fs); |
964 | printk ("\n"); | 934 | pr_cont("\n"); |
965 | } | 935 | } |
966 | 936 | ||
967 | void show_stack(struct task_struct *task, unsigned long *stack) | 937 | void show_stack(struct task_struct *task, unsigned long *stack) |
@@ -978,16 +948,16 @@ void show_stack(struct task_struct *task, unsigned long *stack) | |||
978 | } | 948 | } |
979 | endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE); | 949 | endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE); |
980 | 950 | ||
981 | printk("Stack from %08lx:", (unsigned long)stack); | 951 | pr_info("Stack from %08lx:", (unsigned long)stack); |
982 | p = stack; | 952 | p = stack; |
983 | for (i = 0; i < kstack_depth_to_print; i++) { | 953 | for (i = 0; i < kstack_depth_to_print; i++) { |
984 | if (p + 1 > endstack) | 954 | if (p + 1 > endstack) |
985 | break; | 955 | break; |
986 | if (i % 8 == 0) | 956 | if (i % 8 == 0) |
987 | printk("\n "); | 957 | pr_cont("\n "); |
988 | printk(" %08lx", *p++); | 958 | pr_cont(" %08lx", *p++); |
989 | } | 959 | } |
990 | printk("\n"); | 960 | pr_cont("\n"); |
991 | show_trace(stack); | 961 | show_trace(stack); |
992 | } | 962 | } |
993 | 963 | ||
@@ -1005,32 +975,32 @@ void bad_super_trap (struct frame *fp) | |||
1005 | 975 | ||
1006 | console_verbose(); | 976 | console_verbose(); |
1007 | if (vector < ARRAY_SIZE(vec_names)) | 977 | if (vector < ARRAY_SIZE(vec_names)) |
1008 | printk ("*** %s *** FORMAT=%X\n", | 978 | pr_err("*** %s *** FORMAT=%X\n", |
1009 | vec_names[vector], | 979 | vec_names[vector], |
1010 | fp->ptregs.format); | 980 | fp->ptregs.format); |
1011 | else | 981 | else |
1012 | printk ("*** Exception %d *** FORMAT=%X\n", | 982 | pr_err("*** Exception %d *** FORMAT=%X\n", |
1013 | vector, fp->ptregs.format); | 983 | vector, fp->ptregs.format); |
1014 | if (vector == VEC_ADDRERR && CPU_IS_020_OR_030) { | 984 | if (vector == VEC_ADDRERR && CPU_IS_020_OR_030) { |
1015 | unsigned short ssw = fp->un.fmtb.ssw; | 985 | unsigned short ssw = fp->un.fmtb.ssw; |
1016 | 986 | ||
1017 | printk ("SSW=%#06x ", ssw); | 987 | pr_err("SSW=%#06x ", ssw); |
1018 | 988 | ||
1019 | if (ssw & RC) | 989 | if (ssw & RC) |
1020 | printk ("Pipe stage C instruction fault at %#010lx\n", | 990 | pr_err("Pipe stage C instruction fault at %#010lx\n", |
1021 | (fp->ptregs.format) == 0xA ? | 991 | (fp->ptregs.format) == 0xA ? |
1022 | fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2); | 992 | fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2); |
1023 | if (ssw & RB) | 993 | if (ssw & RB) |
1024 | printk ("Pipe stage B instruction fault at %#010lx\n", | 994 | pr_err("Pipe stage B instruction fault at %#010lx\n", |
1025 | (fp->ptregs.format) == 0xA ? | 995 | (fp->ptregs.format) == 0xA ? |
1026 | fp->ptregs.pc + 4 : fp->un.fmtb.baddr); | 996 | fp->ptregs.pc + 4 : fp->un.fmtb.baddr); |
1027 | if (ssw & DF) | 997 | if (ssw & DF) |
1028 | printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n", | 998 | pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n", |
1029 | ssw & RW ? "read" : "write", | 999 | ssw & RW ? "read" : "write", |
1030 | fp->un.fmtb.daddr, space_names[ssw & DFC], | 1000 | fp->un.fmtb.daddr, space_names[ssw & DFC], |
1031 | fp->ptregs.pc); | 1001 | fp->ptregs.pc); |
1032 | } | 1002 | } |
1033 | printk ("Current process id is %d\n", task_pid_nr(current)); | 1003 | pr_err("Current process id is %d\n", task_pid_nr(current)); |
1034 | die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0); | 1004 | die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0); |
1035 | } | 1005 | } |
1036 | 1006 | ||
@@ -1162,7 +1132,7 @@ void die_if_kernel (char *str, struct pt_regs *fp, int nr) | |||
1162 | return; | 1132 | return; |
1163 | 1133 | ||
1164 | console_verbose(); | 1134 | console_verbose(); |
1165 | printk("%s: %08x\n",str,nr); | 1135 | pr_crit("%s: %08x\n", str, nr); |
1166 | show_registers(fp); | 1136 | show_registers(fp); |
1167 | add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); | 1137 | add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); |
1168 | do_exit(SIGSEGV); | 1138 | do_exit(SIGSEGV); |
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index afb95d5fb26b..982c3fe73c4a 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c | |||
@@ -26,9 +26,10 @@ | |||
26 | #include <linux/adb.h> | 26 | #include <linux/adb.h> |
27 | #include <linux/cuda.h> | 27 | #include <linux/cuda.h> |
28 | 28 | ||
29 | #define BOOTINFO_COMPAT_1_0 | ||
30 | #include <asm/setup.h> | 29 | #include <asm/setup.h> |
31 | #include <asm/bootinfo.h> | 30 | #include <asm/bootinfo.h> |
31 | #include <asm/bootinfo-mac.h> | ||
32 | #include <asm/byteorder.h> | ||
32 | 33 | ||
33 | #include <asm/io.h> | 34 | #include <asm/io.h> |
34 | #include <asm/irq.h> | 35 | #include <asm/irq.h> |
@@ -107,45 +108,46 @@ static void __init mac_sched_init(irq_handler_t vector) | |||
107 | int __init mac_parse_bootinfo(const struct bi_record *record) | 108 | int __init mac_parse_bootinfo(const struct bi_record *record) |
108 | { | 109 | { |
109 | int unknown = 0; | 110 | int unknown = 0; |
110 | const u_long *data = record->data; | 111 | const void *data = record->data; |
111 | 112 | ||
112 | switch (record->tag) { | 113 | switch (be16_to_cpu(record->tag)) { |
113 | case BI_MAC_MODEL: | 114 | case BI_MAC_MODEL: |
114 | mac_bi_data.id = *data; | 115 | mac_bi_data.id = be32_to_cpup(data); |
115 | break; | 116 | break; |
116 | case BI_MAC_VADDR: | 117 | case BI_MAC_VADDR: |
117 | mac_bi_data.videoaddr = *data; | 118 | mac_bi_data.videoaddr = be32_to_cpup(data); |
118 | break; | 119 | break; |
119 | case BI_MAC_VDEPTH: | 120 | case BI_MAC_VDEPTH: |
120 | mac_bi_data.videodepth = *data; | 121 | mac_bi_data.videodepth = be32_to_cpup(data); |
121 | break; | 122 | break; |
122 | case BI_MAC_VROW: | 123 | case BI_MAC_VROW: |
123 | mac_bi_data.videorow = *data; | 124 | mac_bi_data.videorow = be32_to_cpup(data); |
124 | break; | 125 | break; |
125 | case BI_MAC_VDIM: | 126 | case BI_MAC_VDIM: |
126 | mac_bi_data.dimensions = *data; | 127 | mac_bi_data.dimensions = be32_to_cpup(data); |
127 | break; | 128 | break; |
128 | case BI_MAC_VLOGICAL: | 129 | case BI_MAC_VLOGICAL: |
129 | mac_bi_data.videological = VIDEOMEMBASE + (*data & ~VIDEOMEMMASK); | 130 | mac_orig_videoaddr = be32_to_cpup(data); |
130 | mac_orig_videoaddr = *data; | 131 | mac_bi_data.videological = |
132 | VIDEOMEMBASE + (mac_orig_videoaddr & ~VIDEOMEMMASK); | ||
131 | break; | 133 | break; |
132 | case BI_MAC_SCCBASE: | 134 | case BI_MAC_SCCBASE: |
133 | mac_bi_data.sccbase = *data; | 135 | mac_bi_data.sccbase = be32_to_cpup(data); |
134 | break; | 136 | break; |
135 | case BI_MAC_BTIME: | 137 | case BI_MAC_BTIME: |
136 | mac_bi_data.boottime = *data; | 138 | mac_bi_data.boottime = be32_to_cpup(data); |
137 | break; | 139 | break; |
138 | case BI_MAC_GMTBIAS: | 140 | case BI_MAC_GMTBIAS: |
139 | mac_bi_data.gmtbias = *data; | 141 | mac_bi_data.gmtbias = be32_to_cpup(data); |
140 | break; | 142 | break; |
141 | case BI_MAC_MEMSIZE: | 143 | case BI_MAC_MEMSIZE: |
142 | mac_bi_data.memsize = *data; | 144 | mac_bi_data.memsize = be32_to_cpup(data); |
143 | break; | 145 | break; |
144 | case BI_MAC_CPUID: | 146 | case BI_MAC_CPUID: |
145 | mac_bi_data.cpuid = *data; | 147 | mac_bi_data.cpuid = be32_to_cpup(data); |
146 | break; | 148 | break; |
147 | case BI_MAC_ROMBASE: | 149 | case BI_MAC_ROMBASE: |
148 | mac_bi_data.rombase = *data; | 150 | mac_bi_data.rombase = be32_to_cpup(data); |
149 | break; | 151 | break; |
150 | default: | 152 | default: |
151 | unknown = 1; | 153 | unknown = 1; |
diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c index 7d8d46127ad9..4d2adfb32a2a 100644 --- a/arch/m68k/mac/iop.c +++ b/arch/m68k/mac/iop.c | |||
@@ -111,16 +111,15 @@ | |||
111 | #include <linux/init.h> | 111 | #include <linux/init.h> |
112 | #include <linux/interrupt.h> | 112 | #include <linux/interrupt.h> |
113 | 113 | ||
114 | #include <asm/bootinfo.h> | ||
115 | #include <asm/macintosh.h> | 114 | #include <asm/macintosh.h> |
116 | #include <asm/macints.h> | 115 | #include <asm/macints.h> |
117 | #include <asm/mac_iop.h> | 116 | #include <asm/mac_iop.h> |
118 | 117 | ||
119 | /*#define DEBUG_IOP*/ | 118 | /*#define DEBUG_IOP*/ |
120 | 119 | ||
121 | /* Set to non-zero if the IOPs are present. Set by iop_init() */ | 120 | /* Non-zero if the IOPs are present */ |
122 | 121 | ||
123 | int iop_scc_present,iop_ism_present; | 122 | int iop_scc_present, iop_ism_present; |
124 | 123 | ||
125 | /* structure for tracking channel listeners */ | 124 | /* structure for tracking channel listeners */ |
126 | 125 | ||
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c index 5e085554ac7f..707b61aea203 100644 --- a/arch/m68k/mac/misc.c +++ b/arch/m68k/mac/misc.c | |||
@@ -25,8 +25,6 @@ | |||
25 | #include <asm/mac_via.h> | 25 | #include <asm/mac_via.h> |
26 | #include <asm/mac_oss.h> | 26 | #include <asm/mac_oss.h> |
27 | 27 | ||
28 | #define BOOTINFO_COMPAT_1_0 | ||
29 | #include <asm/bootinfo.h> | ||
30 | #include <asm/machdep.h> | 28 | #include <asm/machdep.h> |
31 | 29 | ||
32 | /* Offset between Unix time (1970-based) and Mac time (1904-based) */ | 30 | /* Offset between Unix time (1970-based) and Mac time (1904-based) */ |
diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c index 6c4c882c126e..54037125ebf8 100644 --- a/arch/m68k/mac/oss.c +++ b/arch/m68k/mac/oss.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/irq.h> | 22 | #include <linux/irq.h> |
23 | 23 | ||
24 | #include <asm/bootinfo.h> | ||
25 | #include <asm/macintosh.h> | 24 | #include <asm/macintosh.h> |
26 | #include <asm/macints.h> | 25 | #include <asm/macints.h> |
27 | #include <asm/mac_via.h> | 26 | #include <asm/mac_via.h> |
diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c index 6f026fc302fa..835fa04511c8 100644 --- a/arch/m68k/mac/psc.c +++ b/arch/m68k/mac/psc.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/irq.h> | 21 | #include <linux/irq.h> |
22 | 22 | ||
23 | #include <asm/traps.h> | 23 | #include <asm/traps.h> |
24 | #include <asm/bootinfo.h> | ||
25 | #include <asm/macintosh.h> | 24 | #include <asm/macintosh.h> |
26 | #include <asm/macints.h> | 25 | #include <asm/macints.h> |
27 | #include <asm/mac_psc.h> | 26 | #include <asm/mac_psc.h> |
@@ -54,7 +53,7 @@ static void psc_debug_dump(void) | |||
54 | * expanded to cover what I think are the other 7 channels. | 53 | * expanded to cover what I think are the other 7 channels. |
55 | */ | 54 | */ |
56 | 55 | ||
57 | static void psc_dma_die_die_die(void) | 56 | static __init void psc_dma_die_die_die(void) |
58 | { | 57 | { |
59 | int i; | 58 | int i; |
60 | 59 | ||
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c index 5d1458bb871b..e198dec868e4 100644 --- a/arch/m68k/mac/via.c +++ b/arch/m68k/mac/via.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/irq.h> | 31 | #include <linux/irq.h> |
32 | 32 | ||
33 | #include <asm/bootinfo.h> | ||
34 | #include <asm/macintosh.h> | 33 | #include <asm/macintosh.h> |
35 | #include <asm/macints.h> | 34 | #include <asm/macints.h> |
36 | #include <asm/mac_via.h> | 35 | #include <asm/mac_via.h> |
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index eb1d61f68725..2bd7487440c4 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c | |||
@@ -25,9 +25,8 @@ int send_fault_sig(struct pt_regs *regs) | |||
25 | siginfo.si_signo = current->thread.signo; | 25 | siginfo.si_signo = current->thread.signo; |
26 | siginfo.si_code = current->thread.code; | 26 | siginfo.si_code = current->thread.code; |
27 | siginfo.si_addr = (void *)current->thread.faddr; | 27 | siginfo.si_addr = (void *)current->thread.faddr; |
28 | #ifdef DEBUG | 28 | pr_debug("send_fault_sig: %p,%d,%d\n", siginfo.si_addr, |
29 | printk("send_fault_sig: %p,%d,%d\n", siginfo.si_addr, siginfo.si_signo, siginfo.si_code); | 29 | siginfo.si_signo, siginfo.si_code); |
30 | #endif | ||
31 | 30 | ||
32 | if (user_mode(regs)) { | 31 | if (user_mode(regs)) { |
33 | force_sig_info(siginfo.si_signo, | 32 | force_sig_info(siginfo.si_signo, |
@@ -45,10 +44,10 @@ int send_fault_sig(struct pt_regs *regs) | |||
45 | * terminate things with extreme prejudice. | 44 | * terminate things with extreme prejudice. |
46 | */ | 45 | */ |
47 | if ((unsigned long)siginfo.si_addr < PAGE_SIZE) | 46 | if ((unsigned long)siginfo.si_addr < PAGE_SIZE) |
48 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); | 47 | pr_alert("Unable to handle kernel NULL pointer dereference"); |
49 | else | 48 | else |
50 | printk(KERN_ALERT "Unable to handle kernel access"); | 49 | pr_alert("Unable to handle kernel access"); |
51 | printk(" at virtual address %p\n", siginfo.si_addr); | 50 | pr_cont(" at virtual address %p\n", siginfo.si_addr); |
52 | die_if_kernel("Oops", regs, 0 /*error_code*/); | 51 | die_if_kernel("Oops", regs, 0 /*error_code*/); |
53 | do_exit(SIGKILL); | 52 | do_exit(SIGKILL); |
54 | } | 53 | } |
@@ -75,11 +74,8 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, | |||
75 | int fault; | 74 | int fault; |
76 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; | 75 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
77 | 76 | ||
78 | #ifdef DEBUG | 77 | pr_debug("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n", |
79 | printk ("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n", | 78 | regs->sr, regs->pc, address, error_code, mm ? mm->pgd : NULL); |
80 | regs->sr, regs->pc, address, error_code, | ||
81 | current->mm->pgd); | ||
82 | #endif | ||
83 | 79 | ||
84 | /* | 80 | /* |
85 | * If we're in an interrupt or have no user | 81 | * If we're in an interrupt or have no user |
@@ -118,9 +114,7 @@ retry: | |||
118 | * we can handle it.. | 114 | * we can handle it.. |
119 | */ | 115 | */ |
120 | good_area: | 116 | good_area: |
121 | #ifdef DEBUG | 117 | pr_debug("do_page_fault: good_area\n"); |
122 | printk("do_page_fault: good_area\n"); | ||
123 | #endif | ||
124 | switch (error_code & 3) { | 118 | switch (error_code & 3) { |
125 | default: /* 3: write, present */ | 119 | default: /* 3: write, present */ |
126 | /* fall through */ | 120 | /* fall through */ |
@@ -143,9 +137,7 @@ good_area: | |||
143 | */ | 137 | */ |
144 | 138 | ||
145 | fault = handle_mm_fault(mm, vma, address, flags); | 139 | fault = handle_mm_fault(mm, vma, address, flags); |
146 | #ifdef DEBUG | 140 | pr_debug("handle_mm_fault returns %d\n", fault); |
147 | printk("handle_mm_fault returns %d\n",fault); | ||
148 | #endif | ||
149 | 141 | ||
150 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) | 142 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) |
151 | return 0; | 143 | return 0; |
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 6b4baa6e4d31..acaff6a49e35 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c | |||
@@ -59,7 +59,7 @@ EXPORT_SYMBOL(pg_data_table); | |||
59 | void __init m68k_setup_node(int node) | 59 | void __init m68k_setup_node(int node) |
60 | { | 60 | { |
61 | #ifndef CONFIG_SINGLE_MEMORY_CHUNK | 61 | #ifndef CONFIG_SINGLE_MEMORY_CHUNK |
62 | struct mem_info *info = m68k_memory + node; | 62 | struct m68k_mem_info *info = m68k_memory + node; |
63 | int i, end; | 63 | int i, end; |
64 | 64 | ||
65 | i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift(); | 65 | i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift(); |
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c index 568cfad3ceb8..6e4955bc542b 100644 --- a/arch/m68k/mm/kmap.c +++ b/arch/m68k/mm/kmap.c | |||
@@ -27,9 +27,9 @@ | |||
27 | 27 | ||
28 | /* | 28 | /* |
29 | * For 040/060 we can use the virtual memory area like other architectures, | 29 | * For 040/060 we can use the virtual memory area like other architectures, |
30 | * but for 020/030 we want to use early termination page descriptor and we | 30 | * but for 020/030 we want to use early termination page descriptors and we |
31 | * can't mix this with normal page descriptors, so we have to copy that code | 31 | * can't mix this with normal page descriptors, so we have to copy that code |
32 | * (mm/vmalloc.c) and return appriorate aligned addresses. | 32 | * (mm/vmalloc.c) and return appropriately aligned addresses. |
33 | */ | 33 | */ |
34 | 34 | ||
35 | #ifdef CPU_M68040_OR_M68060_ONLY | 35 | #ifdef CPU_M68040_OR_M68060_ONLY |
@@ -224,7 +224,7 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla | |||
224 | EXPORT_SYMBOL(__ioremap); | 224 | EXPORT_SYMBOL(__ioremap); |
225 | 225 | ||
226 | /* | 226 | /* |
227 | * Unmap a ioremap()ed region again | 227 | * Unmap an ioremap()ed region again |
228 | */ | 228 | */ |
229 | void iounmap(void __iomem *addr) | 229 | void iounmap(void __iomem *addr) |
230 | { | 230 | { |
@@ -241,8 +241,8 @@ EXPORT_SYMBOL(iounmap); | |||
241 | 241 | ||
242 | /* | 242 | /* |
243 | * __iounmap unmaps nearly everything, so be careful | 243 | * __iounmap unmaps nearly everything, so be careful |
244 | * it doesn't free currently pointer/page tables anymore but it | 244 | * Currently it doesn't free pointer/page tables anymore but this |
245 | * wans't used anyway and might be added later. | 245 | * wasn't used anyway and might be added later. |
246 | */ | 246 | */ |
247 | void __iounmap(void *addr, unsigned long size) | 247 | void __iounmap(void *addr, unsigned long size) |
248 | { | 248 | { |
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 251c5437787b..7d4024432163 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c | |||
@@ -233,7 +233,7 @@ void __init paging_init(void) | |||
233 | printk("Fix your bootloader or use a memfile to make use of this area!\n"); | 233 | printk("Fix your bootloader or use a memfile to make use of this area!\n"); |
234 | m68k_num_memory--; | 234 | m68k_num_memory--; |
235 | memmove(m68k_memory + i, m68k_memory + i + 1, | 235 | memmove(m68k_memory + i, m68k_memory + i + 1, |
236 | (m68k_num_memory - i) * sizeof(struct mem_info)); | 236 | (m68k_num_memory - i) * sizeof(struct m68k_mem_info)); |
237 | continue; | 237 | continue; |
238 | } | 238 | } |
239 | addr = m68k_memory[i].addr + m68k_memory[i].size; | 239 | addr = m68k_memory[i].addr + m68k_memory[i].size; |
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c index 1c6262803b94..1bb3ce6634d3 100644 --- a/arch/m68k/mvme147/config.c +++ b/arch/m68k/mvme147/config.c | |||
@@ -26,6 +26,8 @@ | |||
26 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
27 | 27 | ||
28 | #include <asm/bootinfo.h> | 28 | #include <asm/bootinfo.h> |
29 | #include <asm/bootinfo-vme.h> | ||
30 | #include <asm/byteorder.h> | ||
29 | #include <asm/pgtable.h> | 31 | #include <asm/pgtable.h> |
30 | #include <asm/setup.h> | 32 | #include <asm/setup.h> |
31 | #include <asm/irq.h> | 33 | #include <asm/irq.h> |
@@ -51,9 +53,10 @@ static int bcd2int (unsigned char b); | |||
51 | irq_handler_t tick_handler; | 53 | irq_handler_t tick_handler; |
52 | 54 | ||
53 | 55 | ||
54 | int mvme147_parse_bootinfo(const struct bi_record *bi) | 56 | int __init mvme147_parse_bootinfo(const struct bi_record *bi) |
55 | { | 57 | { |
56 | if (bi->tag == BI_VME_TYPE || bi->tag == BI_VME_BRDINFO) | 58 | uint16_t tag = be16_to_cpu(bi->tag); |
59 | if (tag == BI_VME_TYPE || tag == BI_VME_BRDINFO) | ||
57 | return 0; | 60 | return 0; |
58 | else | 61 | else |
59 | return 1; | 62 | return 1; |
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c index 080a342458a1..eab7d342757e 100644 --- a/arch/m68k/mvme16x/config.c +++ b/arch/m68k/mvme16x/config.c | |||
@@ -29,6 +29,8 @@ | |||
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | 30 | ||
31 | #include <asm/bootinfo.h> | 31 | #include <asm/bootinfo.h> |
32 | #include <asm/bootinfo-vme.h> | ||
33 | #include <asm/byteorder.h> | ||
32 | #include <asm/pgtable.h> | 34 | #include <asm/pgtable.h> |
33 | #include <asm/setup.h> | 35 | #include <asm/setup.h> |
34 | #include <asm/irq.h> | 36 | #include <asm/irq.h> |
@@ -60,9 +62,10 @@ unsigned short mvme16x_config; | |||
60 | EXPORT_SYMBOL(mvme16x_config); | 62 | EXPORT_SYMBOL(mvme16x_config); |
61 | 63 | ||
62 | 64 | ||
63 | int mvme16x_parse_bootinfo(const struct bi_record *bi) | 65 | int __init mvme16x_parse_bootinfo(const struct bi_record *bi) |
64 | { | 66 | { |
65 | if (bi->tag == BI_VME_TYPE || bi->tag == BI_VME_BRDINFO) | 67 | uint16_t tag = be16_to_cpu(bi->tag); |
68 | if (tag == BI_VME_TYPE || tag == BI_VME_BRDINFO) | ||
66 | return 0; | 69 | return 0; |
67 | else | 70 | else |
68 | return 1; | 71 | return 1; |
@@ -87,15 +90,15 @@ static void mvme16x_get_model(char *model) | |||
87 | suf[3] = '\0'; | 90 | suf[3] = '\0'; |
88 | suf[0] = suf[1] ? '-' : '\0'; | 91 | suf[0] = suf[1] ? '-' : '\0'; |
89 | 92 | ||
90 | sprintf(model, "Motorola MVME%x%s", p->brdno, suf); | 93 | sprintf(model, "Motorola MVME%x%s", be16_to_cpu(p->brdno), suf); |
91 | } | 94 | } |
92 | 95 | ||
93 | 96 | ||
94 | static void mvme16x_get_hardware_list(struct seq_file *m) | 97 | static void mvme16x_get_hardware_list(struct seq_file *m) |
95 | { | 98 | { |
96 | p_bdid p = &mvme_bdid; | 99 | uint16_t brdno = be16_to_cpu(mvme_bdid.brdno); |
97 | 100 | ||
98 | if (p->brdno == 0x0162 || p->brdno == 0x0172) | 101 | if (brdno == 0x0162 || brdno == 0x0172) |
99 | { | 102 | { |
100 | unsigned char rev = *(unsigned char *)MVME162_VERSION_REG; | 103 | unsigned char rev = *(unsigned char *)MVME162_VERSION_REG; |
101 | 104 | ||
@@ -285,6 +288,7 @@ void __init config_mvme16x(void) | |||
285 | { | 288 | { |
286 | p_bdid p = &mvme_bdid; | 289 | p_bdid p = &mvme_bdid; |
287 | char id[40]; | 290 | char id[40]; |
291 | uint16_t brdno = be16_to_cpu(p->brdno); | ||
288 | 292 | ||
289 | mach_max_dma_address = 0xffffffff; | 293 | mach_max_dma_address = 0xffffffff; |
290 | mach_sched_init = mvme16x_sched_init; | 294 | mach_sched_init = mvme16x_sched_init; |
@@ -306,18 +310,18 @@ void __init config_mvme16x(void) | |||
306 | } | 310 | } |
307 | /* Board type is only set by newer versions of vmelilo/tftplilo */ | 311 | /* Board type is only set by newer versions of vmelilo/tftplilo */ |
308 | if (vme_brdtype == 0) | 312 | if (vme_brdtype == 0) |
309 | vme_brdtype = p->brdno; | 313 | vme_brdtype = brdno; |
310 | 314 | ||
311 | mvme16x_get_model(id); | 315 | mvme16x_get_model(id); |
312 | printk ("\nBRD_ID: %s BUG %x.%x %02x/%02x/%02x\n", id, p->rev>>4, | 316 | printk ("\nBRD_ID: %s BUG %x.%x %02x/%02x/%02x\n", id, p->rev>>4, |
313 | p->rev&0xf, p->yr, p->mth, p->day); | 317 | p->rev&0xf, p->yr, p->mth, p->day); |
314 | if (p->brdno == 0x0162 || p->brdno == 0x172) | 318 | if (brdno == 0x0162 || brdno == 0x172) |
315 | { | 319 | { |
316 | unsigned char rev = *(unsigned char *)MVME162_VERSION_REG; | 320 | unsigned char rev = *(unsigned char *)MVME162_VERSION_REG; |
317 | 321 | ||
318 | mvme16x_config = rev | MVME16x_CONFIG_GOT_SCCA; | 322 | mvme16x_config = rev | MVME16x_CONFIG_GOT_SCCA; |
319 | 323 | ||
320 | printk ("MVME%x Hardware status:\n", p->brdno); | 324 | printk ("MVME%x Hardware status:\n", brdno); |
321 | printk (" CPU Type 68%s040\n", | 325 | printk (" CPU Type 68%s040\n", |
322 | rev & MVME16x_CONFIG_GOT_FPU ? "" : "LC"); | 326 | rev & MVME16x_CONFIG_GOT_FPU ? "" : "LC"); |
323 | printk (" CPU clock %dMHz\n", | 327 | printk (" CPU clock %dMHz\n", |
@@ -347,12 +351,12 @@ void __init config_mvme16x(void) | |||
347 | 351 | ||
348 | static irqreturn_t mvme16x_abort_int (int irq, void *dev_id) | 352 | static irqreturn_t mvme16x_abort_int (int irq, void *dev_id) |
349 | { | 353 | { |
350 | p_bdid p = &mvme_bdid; | ||
351 | unsigned long *new = (unsigned long *)vectors; | 354 | unsigned long *new = (unsigned long *)vectors; |
352 | unsigned long *old = (unsigned long *)0xffe00000; | 355 | unsigned long *old = (unsigned long *)0xffe00000; |
353 | volatile unsigned char uc, *ucp; | 356 | volatile unsigned char uc, *ucp; |
357 | uint16_t brdno = be16_to_cpu(mvme_bdid.brdno); | ||
354 | 358 | ||
355 | if (p->brdno == 0x0162 || p->brdno == 0x172) | 359 | if (brdno == 0x0162 || brdno == 0x172) |
356 | { | 360 | { |
357 | ucp = (volatile unsigned char *)0xfff42043; | 361 | ucp = (volatile unsigned char *)0xfff42043; |
358 | uc = *ucp | 8; | 362 | uc = *ucp | 8; |
@@ -366,7 +370,7 @@ static irqreturn_t mvme16x_abort_int (int irq, void *dev_id) | |||
366 | *(new+9) = *(old+9); /* Trace */ | 370 | *(new+9) = *(old+9); /* Trace */ |
367 | *(new+47) = *(old+47); /* Trap #15 */ | 371 | *(new+47) = *(old+47); /* Trap #15 */ |
368 | 372 | ||
369 | if (p->brdno == 0x0162 || p->brdno == 0x172) | 373 | if (brdno == 0x0162 || brdno == 0x172) |
370 | *(new+0x5e) = *(old+0x5e); /* ABORT switch */ | 374 | *(new+0x5e) = *(old+0x5e); /* ABORT switch */ |
371 | else | 375 | else |
372 | *(new+0x6e) = *(old+0x6e); /* ABORT switch */ | 376 | *(new+0x6e) = *(old+0x6e); /* ABORT switch */ |
@@ -381,7 +385,7 @@ static irqreturn_t mvme16x_timer_int (int irq, void *dev_id) | |||
381 | 385 | ||
382 | void mvme16x_sched_init (irq_handler_t timer_routine) | 386 | void mvme16x_sched_init (irq_handler_t timer_routine) |
383 | { | 387 | { |
384 | p_bdid p = &mvme_bdid; | 388 | uint16_t brdno = be16_to_cpu(mvme_bdid.brdno); |
385 | int irq; | 389 | int irq; |
386 | 390 | ||
387 | tick_handler = timer_routine; | 391 | tick_handler = timer_routine; |
@@ -394,7 +398,7 @@ void mvme16x_sched_init (irq_handler_t timer_routine) | |||
394 | "timer", mvme16x_timer_int)) | 398 | "timer", mvme16x_timer_int)) |
395 | panic ("Couldn't register timer int"); | 399 | panic ("Couldn't register timer int"); |
396 | 400 | ||
397 | if (p->brdno == 0x0162 || p->brdno == 0x172) | 401 | if (brdno == 0x0162 || brdno == 0x172) |
398 | irq = MVME162_IRQ_ABORT; | 402 | irq = MVME162_IRQ_ABORT; |
399 | else | 403 | else |
400 | irq = MVME167_IRQ_ABORT; | 404 | irq = MVME167_IRQ_ABORT; |
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c index 078bb744b5fe..e90fe903613e 100644 --- a/arch/m68k/q40/config.c +++ b/arch/m68k/q40/config.c | |||
@@ -154,7 +154,7 @@ static unsigned int serports[] = | |||
154 | 0x3f8,0x2f8,0x3e8,0x2e8,0 | 154 | 0x3f8,0x2f8,0x3e8,0x2e8,0 |
155 | }; | 155 | }; |
156 | 156 | ||
157 | static void q40_disable_irqs(void) | 157 | static void __init q40_disable_irqs(void) |
158 | { | 158 | { |
159 | unsigned i, j; | 159 | unsigned i, j; |
160 | 160 | ||
@@ -198,7 +198,7 @@ void __init config_q40(void) | |||
198 | } | 198 | } |
199 | 199 | ||
200 | 200 | ||
201 | int q40_parse_bootinfo(const struct bi_record *rec) | 201 | int __init q40_parse_bootinfo(const struct bi_record *rec) |
202 | { | 202 | { |
203 | return 1; | 203 | return 1; |
204 | } | 204 | } |
diff --git a/arch/m68k/sun3/dvma.c b/arch/m68k/sun3/dvma.c index d522eaab4551..d95506e06c2a 100644 --- a/arch/m68k/sun3/dvma.c +++ b/arch/m68k/sun3/dvma.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * | 7 | * |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/init.h> | ||
10 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
11 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
12 | #include <linux/bootmem.h> | 13 | #include <linux/bootmem.h> |
@@ -62,10 +63,7 @@ int dvma_map_iommu(unsigned long kaddr, unsigned long baddr, | |||
62 | 63 | ||
63 | } | 64 | } |
64 | 65 | ||
65 | void sun3_dvma_init(void) | 66 | void __init sun3_dvma_init(void) |
66 | { | 67 | { |
67 | |||
68 | memset(ptelist, 0, sizeof(ptelist)); | 68 | memset(ptelist, 0, sizeof(ptelist)); |
69 | |||
70 | |||
71 | } | 69 | } |
diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c index 8edc510a21be..3f258e230ba5 100644 --- a/arch/m68k/sun3/mmu_emu.c +++ b/arch/m68k/sun3/mmu_emu.c | |||
@@ -6,6 +6,7 @@ | |||
6 | ** Started 1/16/98 @ 2:22 am | 6 | ** Started 1/16/98 @ 2:22 am |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/init.h> | ||
9 | #include <linux/mman.h> | 10 | #include <linux/mman.h> |
10 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
11 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
@@ -122,7 +123,7 @@ void print_pte_vaddr (unsigned long vaddr) | |||
122 | /* | 123 | /* |
123 | * Initialise the MMU emulator. | 124 | * Initialise the MMU emulator. |
124 | */ | 125 | */ |
125 | void mmu_emu_init(unsigned long bootmem_end) | 126 | void __init mmu_emu_init(unsigned long bootmem_end) |
126 | { | 127 | { |
127 | unsigned long seg, num; | 128 | unsigned long seg, num; |
128 | int i,j; | 129 | int i,j; |
diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c index cab54482ca34..b37521a5259d 100644 --- a/arch/m68k/sun3/sun3dvma.c +++ b/arch/m68k/sun3/sun3dvma.c | |||
@@ -6,6 +6,8 @@ | |||
6 | * Contains common routines for sun3/sun3x DVMA management. | 6 | * Contains common routines for sun3/sun3x DVMA management. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/bootmem.h> | ||
10 | #include <linux/init.h> | ||
9 | #include <linux/module.h> | 11 | #include <linux/module.h> |
10 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
11 | #include <linux/gfp.h> | 13 | #include <linux/gfp.h> |
@@ -30,7 +32,7 @@ static inline void dvma_unmap_iommu(unsigned long a, int b) | |||
30 | extern void sun3_dvma_init(void); | 32 | extern void sun3_dvma_init(void); |
31 | #endif | 33 | #endif |
32 | 34 | ||
33 | static unsigned long iommu_use[IOMMU_TOTAL_ENTRIES]; | 35 | static unsigned long *iommu_use; |
34 | 36 | ||
35 | #define dvma_index(baddr) ((baddr - DVMA_START) >> DVMA_PAGE_SHIFT) | 37 | #define dvma_index(baddr) ((baddr - DVMA_START) >> DVMA_PAGE_SHIFT) |
36 | 38 | ||
@@ -245,7 +247,7 @@ static inline int free_baddr(unsigned long baddr) | |||
245 | 247 | ||
246 | } | 248 | } |
247 | 249 | ||
248 | void dvma_init(void) | 250 | void __init dvma_init(void) |
249 | { | 251 | { |
250 | 252 | ||
251 | struct hole *hole; | 253 | struct hole *hole; |
@@ -265,7 +267,7 @@ void dvma_init(void) | |||
265 | 267 | ||
266 | list_add(&(hole->list), &hole_list); | 268 | list_add(&(hole->list), &hole_list); |
267 | 269 | ||
268 | memset(iommu_use, 0, sizeof(iommu_use)); | 270 | iommu_use = alloc_bootmem(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long)); |
269 | 271 | ||
270 | dvma_unmap_iommu(DVMA_START, DVMA_SIZE); | 272 | dvma_unmap_iommu(DVMA_START, DVMA_SIZE); |
271 | 273 | ||
diff --git a/arch/m68k/sun3x/prom.c b/arch/m68k/sun3x/prom.c index a7b7e818d627..0898c3f81508 100644 --- a/arch/m68k/sun3x/prom.c +++ b/arch/m68k/sun3x/prom.c | |||
@@ -10,7 +10,6 @@ | |||
10 | 10 | ||
11 | #include <asm/page.h> | 11 | #include <asm/page.h> |
12 | #include <asm/pgtable.h> | 12 | #include <asm/pgtable.h> |
13 | #include <asm/bootinfo.h> | ||
14 | #include <asm/setup.h> | 13 | #include <asm/setup.h> |
15 | #include <asm/traps.h> | 14 | #include <asm/traps.h> |
16 | #include <asm/sun3xprom.h> | 15 | #include <asm/sun3xprom.h> |
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h index c90bfc6bf648..5d6b4b407dda 100644 --- a/arch/metag/include/asm/barrier.h +++ b/arch/metag/include/asm/barrier.h | |||
@@ -82,4 +82,19 @@ static inline void fence(void) | |||
82 | #define smp_read_barrier_depends() do { } while (0) | 82 | #define smp_read_barrier_depends() do { } while (0) |
83 | #define set_mb(var, value) do { var = value; smp_mb(); } while (0) | 83 | #define set_mb(var, value) do { var = value; smp_mb(); } while (0) |
84 | 84 | ||
85 | #define smp_store_release(p, v) \ | ||
86 | do { \ | ||
87 | compiletime_assert_atomic_type(*p); \ | ||
88 | smp_mb(); \ | ||
89 | ACCESS_ONCE(*p) = (v); \ | ||
90 | } while (0) | ||
91 | |||
92 | #define smp_load_acquire(p) \ | ||
93 | ({ \ | ||
94 | typeof(*p) ___p1 = ACCESS_ONCE(*p); \ | ||
95 | compiletime_assert_atomic_type(*p); \ | ||
96 | smp_mb(); \ | ||
97 | ___p1; \ | ||
98 | }) | ||
99 | |||
85 | #endif /* _ASM_METAG_BARRIER_H */ | 100 | #endif /* _ASM_METAG_BARRIER_H */ |
diff --git a/arch/metag/include/asm/smp.h b/arch/metag/include/asm/smp.h index e0373f81a117..1d7e770f7a54 100644 --- a/arch/metag/include/asm/smp.h +++ b/arch/metag/include/asm/smp.h | |||
@@ -7,13 +7,11 @@ | |||
7 | 7 | ||
8 | enum ipi_msg_type { | 8 | enum ipi_msg_type { |
9 | IPI_CALL_FUNC, | 9 | IPI_CALL_FUNC, |
10 | IPI_CALL_FUNC_SINGLE, | ||
11 | IPI_RESCHEDULE, | 10 | IPI_RESCHEDULE, |
12 | }; | 11 | }; |
13 | 12 | ||
14 | extern void arch_send_call_function_single_ipi(int cpu); | 13 | extern void arch_send_call_function_single_ipi(int cpu); |
15 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); | 14 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); |
16 | #define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask | ||
17 | 15 | ||
18 | asmlinkage void secondary_start_kernel(void); | 16 | asmlinkage void secondary_start_kernel(void); |
19 | 17 | ||
diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c index db589ad5dbc4..c700d625067a 100644 --- a/arch/metag/kernel/dma.c +++ b/arch/metag/kernel/dma.c | |||
@@ -399,11 +399,6 @@ static int __init dma_alloc_init(void) | |||
399 | pgd = pgd_offset(&init_mm, CONSISTENT_START); | 399 | pgd = pgd_offset(&init_mm, CONSISTENT_START); |
400 | pud = pud_alloc(&init_mm, pgd, CONSISTENT_START); | 400 | pud = pud_alloc(&init_mm, pgd, CONSISTENT_START); |
401 | pmd = pmd_alloc(&init_mm, pud, CONSISTENT_START); | 401 | pmd = pmd_alloc(&init_mm, pud, CONSISTENT_START); |
402 | if (!pmd) { | ||
403 | pr_err("%s: no pmd tables\n", __func__); | ||
404 | ret = -ENOMEM; | ||
405 | break; | ||
406 | } | ||
407 | WARN_ON(!pmd_none(*pmd)); | 402 | WARN_ON(!pmd_none(*pmd)); |
408 | 403 | ||
409 | pte = pte_alloc_kernel(pmd, CONSISTENT_START); | 404 | pte = pte_alloc_kernel(pmd, CONSISTENT_START); |
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c index 7c0113142981..f006d2276f40 100644 --- a/arch/metag/kernel/smp.c +++ b/arch/metag/kernel/smp.c | |||
@@ -68,7 +68,7 @@ static DECLARE_COMPLETION(cpu_running); | |||
68 | /* | 68 | /* |
69 | * "thread" is assumed to be a valid Meta hardware thread ID. | 69 | * "thread" is assumed to be a valid Meta hardware thread ID. |
70 | */ | 70 | */ |
71 | int boot_secondary(unsigned int thread, struct task_struct *idle) | 71 | static int boot_secondary(unsigned int thread, struct task_struct *idle) |
72 | { | 72 | { |
73 | u32 val; | 73 | u32 val; |
74 | 74 | ||
@@ -491,7 +491,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) | |||
491 | 491 | ||
492 | void arch_send_call_function_single_ipi(int cpu) | 492 | void arch_send_call_function_single_ipi(int cpu) |
493 | { | 493 | { |
494 | send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); | 494 | send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); |
495 | } | 495 | } |
496 | 496 | ||
497 | void show_ipi_list(struct seq_file *p) | 497 | void show_ipi_list(struct seq_file *p) |
@@ -517,11 +517,10 @@ static DEFINE_SPINLOCK(stop_lock); | |||
517 | * | 517 | * |
518 | * Bit 0 - Inter-processor function call | 518 | * Bit 0 - Inter-processor function call |
519 | */ | 519 | */ |
520 | static int do_IPI(struct pt_regs *regs) | 520 | static int do_IPI(void) |
521 | { | 521 | { |
522 | unsigned int cpu = smp_processor_id(); | 522 | unsigned int cpu = smp_processor_id(); |
523 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | 523 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); |
524 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
525 | unsigned long msgs, nextmsg; | 524 | unsigned long msgs, nextmsg; |
526 | int handled = 0; | 525 | int handled = 0; |
527 | 526 | ||
@@ -546,10 +545,6 @@ static int do_IPI(struct pt_regs *regs) | |||
546 | generic_smp_call_function_interrupt(); | 545 | generic_smp_call_function_interrupt(); |
547 | break; | 546 | break; |
548 | 547 | ||
549 | case IPI_CALL_FUNC_SINGLE: | ||
550 | generic_smp_call_function_single_interrupt(); | ||
551 | break; | ||
552 | |||
553 | default: | 548 | default: |
554 | pr_crit("CPU%u: Unknown IPI message 0x%lx\n", | 549 | pr_crit("CPU%u: Unknown IPI message 0x%lx\n", |
555 | cpu, nextmsg); | 550 | cpu, nextmsg); |
@@ -557,8 +552,6 @@ static int do_IPI(struct pt_regs *regs) | |||
557 | } | 552 | } |
558 | } | 553 | } |
559 | 554 | ||
560 | set_irq_regs(old_regs); | ||
561 | |||
562 | return handled; | 555 | return handled; |
563 | } | 556 | } |
564 | 557 | ||
@@ -624,7 +617,7 @@ static void kick_raise_softirq(cpumask_t callmap, unsigned int irq) | |||
624 | static TBIRES ipi_handler(TBIRES State, int SigNum, int Triggers, | 617 | static TBIRES ipi_handler(TBIRES State, int SigNum, int Triggers, |
625 | int Inst, PTBI pTBI, int *handled) | 618 | int Inst, PTBI pTBI, int *handled) |
626 | { | 619 | { |
627 | *handled = do_IPI((struct pt_regs *)State.Sig.pCtx); | 620 | *handled = do_IPI(); |
628 | 621 | ||
629 | return State; | 622 | return State; |
630 | } | 623 | } |
diff --git a/arch/metag/kernel/topology.c b/arch/metag/kernel/topology.c index bec3dec4922e..4ba595701f7d 100644 --- a/arch/metag/kernel/topology.c +++ b/arch/metag/kernel/topology.c | |||
@@ -19,6 +19,7 @@ | |||
19 | DEFINE_PER_CPU(struct cpuinfo_metag, cpu_data); | 19 | DEFINE_PER_CPU(struct cpuinfo_metag, cpu_data); |
20 | 20 | ||
21 | cpumask_t cpu_core_map[NR_CPUS]; | 21 | cpumask_t cpu_core_map[NR_CPUS]; |
22 | EXPORT_SYMBOL(cpu_core_map); | ||
22 | 23 | ||
23 | static cpumask_t cpu_coregroup_map(unsigned int cpu) | 24 | static cpumask_t cpu_coregroup_map(unsigned int cpu) |
24 | { | 25 | { |
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index ce0bbf8f5640..a82426589fff 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild | |||
@@ -1,4 +1,5 @@ | |||
1 | 1 | ||
2 | generic-y += barrier.h | ||
2 | generic-y += clkdev.h | 3 | generic-y += clkdev.h |
3 | generic-y += exec.h | 4 | generic-y += exec.h |
4 | generic-y += trace_clock.h | 5 | generic-y += trace_clock.h |
diff --git a/arch/microblaze/include/asm/barrier.h b/arch/microblaze/include/asm/barrier.h deleted file mode 100644 index df5be3e87044..000000000000 --- a/arch/microblaze/include/asm/barrier.h +++ /dev/null | |||
@@ -1,27 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Atmark Techno, Inc. | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_MICROBLAZE_BARRIER_H | ||
10 | #define _ASM_MICROBLAZE_BARRIER_H | ||
11 | |||
12 | #define nop() asm volatile ("nop") | ||
13 | |||
14 | #define smp_read_barrier_depends() do {} while (0) | ||
15 | #define read_barrier_depends() do {} while (0) | ||
16 | |||
17 | #define mb() barrier() | ||
18 | #define rmb() mb() | ||
19 | #define wmb() mb() | ||
20 | #define set_mb(var, value) do { var = value; mb(); } while (0) | ||
21 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | ||
22 | |||
23 | #define smp_mb() mb() | ||
24 | #define smp_rmb() rmb() | ||
25 | #define smp_wmb() wmb() | ||
26 | |||
27 | #endif /* _ASM_MICROBLAZE_BARRIER_H */ | ||
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 650de3976e7a..c93d92beb3d6 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -47,6 +47,7 @@ config MIPS | |||
47 | select MODULES_USE_ELF_RELA if MODULES && 64BIT | 47 | select MODULES_USE_ELF_RELA if MODULES && 64BIT |
48 | select CLONE_BACKWARDS | 48 | select CLONE_BACKWARDS |
49 | select HAVE_DEBUG_STACKOVERFLOW | 49 | select HAVE_DEBUG_STACKOVERFLOW |
50 | select HAVE_CC_STACKPROTECTOR | ||
50 | 51 | ||
51 | menu "Machine selection" | 52 | menu "Machine selection" |
52 | 53 | ||
@@ -2322,19 +2323,6 @@ config SECCOMP | |||
2322 | 2323 | ||
2323 | If unsure, say Y. Only embedded should say N here. | 2324 | If unsure, say Y. Only embedded should say N here. |
2324 | 2325 | ||
2325 | config CC_STACKPROTECTOR | ||
2326 | bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" | ||
2327 | help | ||
2328 | This option turns on the -fstack-protector GCC feature. This | ||
2329 | feature puts, at the beginning of functions, a canary value on | ||
2330 | the stack just before the return address, and validates | ||
2331 | the value just before actually returning. Stack based buffer | ||
2332 | overflows (that need to overwrite this return address) now also | ||
2333 | overwrite the canary, which gets detected and the attack is then | ||
2334 | neutralized via a kernel panic. | ||
2335 | |||
2336 | This feature requires gcc version 4.2 or above. | ||
2337 | |||
2338 | config USE_OF | 2326 | config USE_OF |
2339 | bool | 2327 | bool |
2340 | select OF | 2328 | select OF |
diff --git a/arch/mips/Makefile b/arch/mips/Makefile index de300b993607..efe50787cd89 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile | |||
@@ -232,10 +232,6 @@ bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \ | |||
232 | 232 | ||
233 | LDFLAGS += -m $(ld-emul) | 233 | LDFLAGS += -m $(ld-emul) |
234 | 234 | ||
235 | ifdef CONFIG_CC_STACKPROTECTOR | ||
236 | KBUILD_CFLAGS += -fstack-protector | ||
237 | endif | ||
238 | |||
239 | ifdef CONFIG_MIPS | 235 | ifdef CONFIG_MIPS |
240 | CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \ | 236 | CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \ |
241 | egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \ | 237 | egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \ |
diff --git a/arch/mips/ar7/setup.c b/arch/mips/ar7/setup.c index 9a357fffcfbe..820b7a313d9b 100644 --- a/arch/mips/ar7/setup.c +++ b/arch/mips/ar7/setup.c | |||
@@ -92,7 +92,6 @@ void __init plat_mem_setup(void) | |||
92 | _machine_restart = ar7_machine_restart; | 92 | _machine_restart = ar7_machine_restart; |
93 | _machine_halt = ar7_machine_halt; | 93 | _machine_halt = ar7_machine_halt; |
94 | pm_power_off = ar7_machine_power_off; | 94 | pm_power_off = ar7_machine_power_off; |
95 | panic_timeout = 3; | ||
96 | 95 | ||
97 | io_base = (unsigned long)ioremap(AR7_REGS_BASE, 0x10000); | 96 | io_base = (unsigned long)ioremap(AR7_REGS_BASE, 0x10000); |
98 | if (!io_base) | 97 | if (!io_base) |
diff --git a/arch/mips/emma/markeins/setup.c b/arch/mips/emma/markeins/setup.c index d71005835c00..9100122e5cef 100644 --- a/arch/mips/emma/markeins/setup.c +++ b/arch/mips/emma/markeins/setup.c | |||
@@ -111,9 +111,6 @@ void __init plat_mem_setup(void) | |||
111 | iomem_resource.start = EMMA2RH_IO_BASE; | 111 | iomem_resource.start = EMMA2RH_IO_BASE; |
112 | iomem_resource.end = EMMA2RH_ROM_BASE - 1; | 112 | iomem_resource.end = EMMA2RH_ROM_BASE - 1; |
113 | 113 | ||
114 | /* Reboot on panic */ | ||
115 | panic_timeout = 180; | ||
116 | |||
117 | markeins_sio_setup(); | 114 | markeins_sio_setup(); |
118 | } | 115 | } |
119 | 116 | ||
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index f26d8e1bf3c3..e1aa4e4c2984 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h | |||
@@ -180,4 +180,19 @@ | |||
180 | #define nudge_writes() mb() | 180 | #define nudge_writes() mb() |
181 | #endif | 181 | #endif |
182 | 182 | ||
183 | #define smp_store_release(p, v) \ | ||
184 | do { \ | ||
185 | compiletime_assert_atomic_type(*p); \ | ||
186 | smp_mb(); \ | ||
187 | ACCESS_ONCE(*p) = (v); \ | ||
188 | } while (0) | ||
189 | |||
190 | #define smp_load_acquire(p) \ | ||
191 | ({ \ | ||
192 | typeof(*p) ___p1 = ACCESS_ONCE(*p); \ | ||
193 | compiletime_assert_atomic_type(*p); \ | ||
194 | smp_mb(); \ | ||
195 | ___p1; \ | ||
196 | }) | ||
197 | |||
183 | #endif /* __ASM_BARRIER_H */ | 198 | #endif /* __ASM_BARRIER_H */ |
diff --git a/arch/mips/include/asm/cacheops.h b/arch/mips/include/asm/cacheops.h index c75025f27c20..06b9bc7ea14b 100644 --- a/arch/mips/include/asm/cacheops.h +++ b/arch/mips/include/asm/cacheops.h | |||
@@ -83,6 +83,6 @@ | |||
83 | /* | 83 | /* |
84 | * Loongson2-specific cacheops | 84 | * Loongson2-specific cacheops |
85 | */ | 85 | */ |
86 | #define Hit_Invalidate_I_Loongson23 0x00 | 86 | #define Hit_Invalidate_I_Loongson2 0x00 |
87 | 87 | ||
88 | #endif /* __ASM_CACHEOPS_H */ | 88 | #endif /* __ASM_CACHEOPS_H */ |
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 34d1a1917125..c84caddb8bde 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h | |||
@@ -165,7 +165,7 @@ static inline void flush_icache_line(unsigned long addr) | |||
165 | __iflush_prologue | 165 | __iflush_prologue |
166 | switch (boot_cpu_type()) { | 166 | switch (boot_cpu_type()) { |
167 | case CPU_LOONGSON2: | 167 | case CPU_LOONGSON2: |
168 | cache_op(Hit_Invalidate_I_Loongson23, addr); | 168 | cache_op(Hit_Invalidate_I_Loongson2, addr); |
169 | break; | 169 | break; |
170 | 170 | ||
171 | default: | 171 | default: |
@@ -219,7 +219,7 @@ static inline void protected_flush_icache_line(unsigned long addr) | |||
219 | { | 219 | { |
220 | switch (boot_cpu_type()) { | 220 | switch (boot_cpu_type()) { |
221 | case CPU_LOONGSON2: | 221 | case CPU_LOONGSON2: |
222 | protected_cache_op(Hit_Invalidate_I_Loongson23, addr); | 222 | protected_cache_op(Hit_Invalidate_I_Loongson2, addr); |
223 | break; | 223 | break; |
224 | 224 | ||
225 | default: | 225 | default: |
@@ -357,8 +357,8 @@ static inline void invalidate_tcache_page(unsigned long addr) | |||
357 | "i" (op)); | 357 | "i" (op)); |
358 | 358 | ||
359 | /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */ | 359 | /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */ |
360 | #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \ | 360 | #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \ |
361 | static inline void blast_##pfx##cache##lsize(void) \ | 361 | static inline void extra##blast_##pfx##cache##lsize(void) \ |
362 | { \ | 362 | { \ |
363 | unsigned long start = INDEX_BASE; \ | 363 | unsigned long start = INDEX_BASE; \ |
364 | unsigned long end = start + current_cpu_data.desc.waysize; \ | 364 | unsigned long end = start + current_cpu_data.desc.waysize; \ |
@@ -376,7 +376,7 @@ static inline void blast_##pfx##cache##lsize(void) \ | |||
376 | __##pfx##flush_epilogue \ | 376 | __##pfx##flush_epilogue \ |
377 | } \ | 377 | } \ |
378 | \ | 378 | \ |
379 | static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ | 379 | static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \ |
380 | { \ | 380 | { \ |
381 | unsigned long start = page; \ | 381 | unsigned long start = page; \ |
382 | unsigned long end = page + PAGE_SIZE; \ | 382 | unsigned long end = page + PAGE_SIZE; \ |
@@ -391,7 +391,7 @@ static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ | |||
391 | __##pfx##flush_epilogue \ | 391 | __##pfx##flush_epilogue \ |
392 | } \ | 392 | } \ |
393 | \ | 393 | \ |
394 | static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ | 394 | static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ |
395 | { \ | 395 | { \ |
396 | unsigned long indexmask = current_cpu_data.desc.waysize - 1; \ | 396 | unsigned long indexmask = current_cpu_data.desc.waysize - 1; \ |
397 | unsigned long start = INDEX_BASE + (page & indexmask); \ | 397 | unsigned long start = INDEX_BASE + (page & indexmask); \ |
@@ -410,23 +410,24 @@ static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) | |||
410 | __##pfx##flush_epilogue \ | 410 | __##pfx##flush_epilogue \ |
411 | } | 411 | } |
412 | 412 | ||
413 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16) | 413 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, ) |
414 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16) | 414 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, ) |
415 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16) | 415 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, ) |
416 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32) | 416 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, ) |
417 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32) | 417 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, ) |
418 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32) | 418 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_) |
419 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64) | 419 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, ) |
420 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64) | 420 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, ) |
421 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64) | 421 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, ) |
422 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128) | 422 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, ) |
423 | 423 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, ) | |
424 | __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16) | 424 | |
425 | __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32) | 425 | __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, ) |
426 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16) | 426 | __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, ) |
427 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32) | 427 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, ) |
428 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64) | 428 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, ) |
429 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128) | 429 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, ) |
430 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, ) | ||
430 | 431 | ||
431 | /* build blast_xxx_range, protected_blast_xxx_range */ | 432 | /* build blast_xxx_range, protected_blast_xxx_range */ |
432 | #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \ | 433 | #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \ |
@@ -452,8 +453,8 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, | |||
452 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, ) | 453 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, ) |
453 | __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, ) | 454 | __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, ) |
454 | __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, ) | 455 | __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, ) |
455 | __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson23, \ | 456 | __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \ |
456 | protected_, loongson23_) | 457 | protected_, loongson2_) |
457 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , ) | 458 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , ) |
458 | __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , ) | 459 | __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , ) |
459 | /* blast_inv_dcache_range */ | 460 | /* blast_inv_dcache_range */ |
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 62ffd20ea869..49e572d879e1 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -237,6 +237,8 @@ static void r4k_blast_icache_page_setup(void) | |||
237 | r4k_blast_icache_page = (void *)cache_noop; | 237 | r4k_blast_icache_page = (void *)cache_noop; |
238 | else if (ic_lsize == 16) | 238 | else if (ic_lsize == 16) |
239 | r4k_blast_icache_page = blast_icache16_page; | 239 | r4k_blast_icache_page = blast_icache16_page; |
240 | else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2) | ||
241 | r4k_blast_icache_page = loongson2_blast_icache32_page; | ||
240 | else if (ic_lsize == 32) | 242 | else if (ic_lsize == 32) |
241 | r4k_blast_icache_page = blast_icache32_page; | 243 | r4k_blast_icache_page = blast_icache32_page; |
242 | else if (ic_lsize == 64) | 244 | else if (ic_lsize == 64) |
@@ -261,6 +263,9 @@ static void r4k_blast_icache_page_indexed_setup(void) | |||
261 | else if (TX49XX_ICACHE_INDEX_INV_WAR) | 263 | else if (TX49XX_ICACHE_INDEX_INV_WAR) |
262 | r4k_blast_icache_page_indexed = | 264 | r4k_blast_icache_page_indexed = |
263 | tx49_blast_icache32_page_indexed; | 265 | tx49_blast_icache32_page_indexed; |
266 | else if (current_cpu_type() == CPU_LOONGSON2) | ||
267 | r4k_blast_icache_page_indexed = | ||
268 | loongson2_blast_icache32_page_indexed; | ||
264 | else | 269 | else |
265 | r4k_blast_icache_page_indexed = | 270 | r4k_blast_icache_page_indexed = |
266 | blast_icache32_page_indexed; | 271 | blast_icache32_page_indexed; |
@@ -284,6 +289,8 @@ static void r4k_blast_icache_setup(void) | |||
284 | r4k_blast_icache = blast_r4600_v1_icache32; | 289 | r4k_blast_icache = blast_r4600_v1_icache32; |
285 | else if (TX49XX_ICACHE_INDEX_INV_WAR) | 290 | else if (TX49XX_ICACHE_INDEX_INV_WAR) |
286 | r4k_blast_icache = tx49_blast_icache32; | 291 | r4k_blast_icache = tx49_blast_icache32; |
292 | else if (current_cpu_type() == CPU_LOONGSON2) | ||
293 | r4k_blast_icache = loongson2_blast_icache32; | ||
287 | else | 294 | else |
288 | r4k_blast_icache = blast_icache32; | 295 | r4k_blast_icache = blast_icache32; |
289 | } else if (ic_lsize == 64) | 296 | } else if (ic_lsize == 64) |
@@ -580,11 +587,11 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo | |||
580 | else { | 587 | else { |
581 | switch (boot_cpu_type()) { | 588 | switch (boot_cpu_type()) { |
582 | case CPU_LOONGSON2: | 589 | case CPU_LOONGSON2: |
583 | protected_blast_icache_range(start, end); | 590 | protected_loongson2_blast_icache_range(start, end); |
584 | break; | 591 | break; |
585 | 592 | ||
586 | default: | 593 | default: |
587 | protected_loongson23_blast_icache_range(start, end); | 594 | protected_blast_icache_range(start, end); |
588 | break; | 595 | break; |
589 | } | 596 | } |
590 | } | 597 | } |
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c index 6d981bb337ec..54e75c77184b 100644 --- a/arch/mips/netlogic/xlp/setup.c +++ b/arch/mips/netlogic/xlp/setup.c | |||
@@ -92,7 +92,6 @@ static void __init xlp_init_mem_from_bars(void) | |||
92 | 92 | ||
93 | void __init plat_mem_setup(void) | 93 | void __init plat_mem_setup(void) |
94 | { | 94 | { |
95 | panic_timeout = 5; | ||
96 | _machine_restart = (void (*)(char *))nlm_linux_exit; | 95 | _machine_restart = (void (*)(char *))nlm_linux_exit; |
97 | _machine_halt = nlm_linux_exit; | 96 | _machine_halt = nlm_linux_exit; |
98 | pm_power_off = nlm_linux_exit; | 97 | pm_power_off = nlm_linux_exit; |
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c index 214d123b79fa..921be5f77797 100644 --- a/arch/mips/netlogic/xlr/setup.c +++ b/arch/mips/netlogic/xlr/setup.c | |||
@@ -92,7 +92,6 @@ static void nlm_linux_exit(void) | |||
92 | 92 | ||
93 | void __init plat_mem_setup(void) | 93 | void __init plat_mem_setup(void) |
94 | { | 94 | { |
95 | panic_timeout = 5; | ||
96 | _machine_restart = (void (*)(char *))nlm_linux_exit; | 95 | _machine_restart = (void (*)(char *))nlm_linux_exit; |
97 | _machine_halt = nlm_linux_exit; | 96 | _machine_halt = nlm_linux_exit; |
98 | pm_power_off = nlm_linux_exit; | 97 | pm_power_off = nlm_linux_exit; |
diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c index 41707a245dea..3462c831d0ea 100644 --- a/arch/mips/sibyte/swarm/setup.c +++ b/arch/mips/sibyte/swarm/setup.c | |||
@@ -134,8 +134,6 @@ void __init plat_mem_setup(void) | |||
134 | #error invalid SiByte board configuration | 134 | #error invalid SiByte board configuration |
135 | #endif | 135 | #endif |
136 | 136 | ||
137 | panic_timeout = 5; /* For debug. */ | ||
138 | |||
139 | board_be_handler = swarm_be_handler; | 137 | board_be_handler = swarm_be_handler; |
140 | 138 | ||
141 | if (xicor_probe()) | 139 | if (xicor_probe()) |
diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild index 74742dc6a3da..032143ec2324 100644 --- a/arch/mn10300/include/asm/Kbuild +++ b/arch/mn10300/include/asm/Kbuild | |||
@@ -1,4 +1,5 @@ | |||
1 | 1 | ||
2 | generic-y += barrier.h | ||
2 | generic-y += clkdev.h | 3 | generic-y += clkdev.h |
3 | generic-y += exec.h | 4 | generic-y += exec.h |
4 | generic-y += trace_clock.h | 5 | generic-y += trace_clock.h |
diff --git a/arch/mn10300/include/asm/barrier.h b/arch/mn10300/include/asm/barrier.h deleted file mode 100644 index 2bd97a5c8af7..000000000000 --- a/arch/mn10300/include/asm/barrier.h +++ /dev/null | |||
@@ -1,37 +0,0 @@ | |||
1 | /* MN10300 memory barrier definitions | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | #ifndef _ASM_BARRIER_H | ||
12 | #define _ASM_BARRIER_H | ||
13 | |||
14 | #define nop() asm volatile ("nop") | ||
15 | |||
16 | #define mb() asm volatile ("": : :"memory") | ||
17 | #define rmb() mb() | ||
18 | #define wmb() asm volatile ("": : :"memory") | ||
19 | |||
20 | #ifdef CONFIG_SMP | ||
21 | #define smp_mb() mb() | ||
22 | #define smp_rmb() rmb() | ||
23 | #define smp_wmb() wmb() | ||
24 | #define set_mb(var, value) do { xchg(&var, value); } while (0) | ||
25 | #else /* CONFIG_SMP */ | ||
26 | #define smp_mb() barrier() | ||
27 | #define smp_rmb() barrier() | ||
28 | #define smp_wmb() barrier() | ||
29 | #define set_mb(var, value) do { var = value; mb(); } while (0) | ||
30 | #endif /* CONFIG_SMP */ | ||
31 | |||
32 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | ||
33 | |||
34 | #define read_barrier_depends() do {} while (0) | ||
35 | #define smp_read_barrier_depends() do {} while (0) | ||
36 | |||
37 | #endif /* _ASM_BARRIER_H */ | ||
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index a603b9ebe54c..34b0be4ca52d 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild | |||
@@ -1,4 +1,5 @@ | |||
1 | 1 | ||
2 | generic-y += barrier.h | ||
2 | generic-y += word-at-a-time.h auxvec.h user.h cputime.h emergency-restart.h \ | 3 | generic-y += word-at-a-time.h auxvec.h user.h cputime.h emergency-restart.h \ |
3 | segment.h topology.h vga.h device.h percpu.h hw_irq.h mutex.h \ | 4 | segment.h topology.h vga.h device.h percpu.h hw_irq.h mutex.h \ |
4 | div64.h irq_regs.h kdebug.h kvm_para.h local64.h local.h param.h \ | 5 | div64.h irq_regs.h kdebug.h kvm_para.h local64.h local.h param.h \ |
diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h deleted file mode 100644 index e77d834aa803..000000000000 --- a/arch/parisc/include/asm/barrier.h +++ /dev/null | |||
@@ -1,35 +0,0 @@ | |||
1 | #ifndef __PARISC_BARRIER_H | ||
2 | #define __PARISC_BARRIER_H | ||
3 | |||
4 | /* | ||
5 | ** This is simply the barrier() macro from linux/kernel.h but when serial.c | ||
6 | ** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h | ||
7 | ** hasn't yet been included yet so it fails, thus repeating the macro here. | ||
8 | ** | ||
9 | ** PA-RISC architecture allows for weakly ordered memory accesses although | ||
10 | ** none of the processors use it. There is a strong ordered bit that is | ||
11 | ** set in the O-bit of the page directory entry. Operating systems that | ||
12 | ** can not tolerate out of order accesses should set this bit when mapping | ||
13 | ** pages. The O-bit of the PSW should also be set to 1 (I don't believe any | ||
14 | ** of the processor implemented the PSW O-bit). The PCX-W ERS states that | ||
15 | ** the TLB O-bit is not implemented so the page directory does not need to | ||
16 | ** have the O-bit set when mapping pages (section 3.1). This section also | ||
17 | ** states that the PSW Y, Z, G, and O bits are not implemented. | ||
18 | ** So it looks like nothing needs to be done for parisc-linux (yet). | ||
19 | ** (thanks to chada for the above comment -ggg) | ||
20 | ** | ||
21 | ** The __asm__ op below simple prevents gcc/ld from reordering | ||
22 | ** instructions across the mb() "call". | ||
23 | */ | ||
24 | #define mb() __asm__ __volatile__("":::"memory") /* barrier() */ | ||
25 | #define rmb() mb() | ||
26 | #define wmb() mb() | ||
27 | #define smp_mb() mb() | ||
28 | #define smp_rmb() mb() | ||
29 | #define smp_wmb() mb() | ||
30 | #define smp_read_barrier_depends() do { } while(0) | ||
31 | #define read_barrier_depends() do { } while(0) | ||
32 | |||
33 | #define set_mb(var, value) do { var = value; mb(); } while (0) | ||
34 | |||
35 | #endif /* __PARISC_BARRIER_H */ | ||
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index f33113a6141e..70b3674dac4e 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h | |||
@@ -75,6 +75,6 @@ | |||
75 | 75 | ||
76 | #define SO_BUSY_POLL 0x4027 | 76 | #define SO_BUSY_POLL 0x4027 |
77 | 77 | ||
78 | #define SO_MAX_PACING_RATE 0x4048 | 78 | #define SO_MAX_PACING_RATE 0x4028 |
79 | 79 | ||
80 | #endif /* _UAPI_ASM_SOCKET_H */ | 80 | #endif /* _UAPI_ASM_SOCKET_H */ |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index b44b52c0a8f0..b2be8e8cb5c7 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -147,6 +147,10 @@ config EARLY_PRINTK | |||
147 | bool | 147 | bool |
148 | default y | 148 | default y |
149 | 149 | ||
150 | config PANIC_TIMEOUT | ||
151 | int | ||
152 | default 180 | ||
153 | |||
150 | config COMPAT | 154 | config COMPAT |
151 | bool | 155 | bool |
152 | default y if PPC64 | 156 | default y if PPC64 |
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index ae782254e731..f89da808ce31 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h | |||
@@ -45,11 +45,15 @@ | |||
45 | # define SMPWMB eieio | 45 | # define SMPWMB eieio |
46 | #endif | 46 | #endif |
47 | 47 | ||
48 | #define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") | ||
49 | |||
48 | #define smp_mb() mb() | 50 | #define smp_mb() mb() |
49 | #define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") | 51 | #define smp_rmb() __lwsync() |
50 | #define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") | 52 | #define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") |
51 | #define smp_read_barrier_depends() read_barrier_depends() | 53 | #define smp_read_barrier_depends() read_barrier_depends() |
52 | #else | 54 | #else |
55 | #define __lwsync() barrier() | ||
56 | |||
53 | #define smp_mb() barrier() | 57 | #define smp_mb() barrier() |
54 | #define smp_rmb() barrier() | 58 | #define smp_rmb() barrier() |
55 | #define smp_wmb() barrier() | 59 | #define smp_wmb() barrier() |
@@ -65,4 +69,19 @@ | |||
65 | #define data_barrier(x) \ | 69 | #define data_barrier(x) \ |
66 | asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); | 70 | asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); |
67 | 71 | ||
72 | #define smp_store_release(p, v) \ | ||
73 | do { \ | ||
74 | compiletime_assert_atomic_type(*p); \ | ||
75 | __lwsync(); \ | ||
76 | ACCESS_ONCE(*p) = (v); \ | ||
77 | } while (0) | ||
78 | |||
79 | #define smp_load_acquire(p) \ | ||
80 | ({ \ | ||
81 | typeof(*p) ___p1 = ACCESS_ONCE(*p); \ | ||
82 | compiletime_assert_atomic_type(*p); \ | ||
83 | __lwsync(); \ | ||
84 | ___p1; \ | ||
85 | }) | ||
86 | |||
68 | #endif /* _ASM_POWERPC_BARRIER_H */ | 87 | #endif /* _ASM_POWERPC_BARRIER_H */ |
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 703a8412dac2..11ba86e17631 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h | |||
@@ -26,6 +26,7 @@ extern void reloc_got2(unsigned long); | |||
26 | void check_for_initrd(void); | 26 | void check_for_initrd(void); |
27 | void do_init_bootmem(void); | 27 | void do_init_bootmem(void); |
28 | void setup_panic(void); | 28 | void setup_panic(void); |
29 | #define ARCH_PANIC_TIMEOUT 180 | ||
29 | 30 | ||
30 | #endif /* !__ASSEMBLY__ */ | 31 | #endif /* !__ASSEMBLY__ */ |
31 | 32 | ||
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 5f54a744dcc5..f6e78d63fb6a 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h | |||
@@ -28,6 +28,8 @@ | |||
28 | #include <asm/synch.h> | 28 | #include <asm/synch.h> |
29 | #include <asm/ppc-opcode.h> | 29 | #include <asm/ppc-opcode.h> |
30 | 30 | ||
31 | #define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ | ||
32 | |||
31 | #define arch_spin_is_locked(x) ((x)->slock != 0) | 33 | #define arch_spin_is_locked(x) ((x)->slock != 0) |
32 | 34 | ||
33 | #ifdef CONFIG_PPC64 | 35 | #ifdef CONFIG_PPC64 |
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h index 75c6ecdb8f37..7422a999a39a 100644 --- a/arch/powerpc/include/asm/uprobes.h +++ b/arch/powerpc/include/asm/uprobes.h | |||
@@ -36,9 +36,8 @@ typedef ppc_opcode_t uprobe_opcode_t; | |||
36 | 36 | ||
37 | struct arch_uprobe { | 37 | struct arch_uprobe { |
38 | union { | 38 | union { |
39 | u8 insn[MAX_UINSN_BYTES]; | 39 | u32 insn; |
40 | u8 ixol[MAX_UINSN_BYTES]; | 40 | u32 ixol; |
41 | u32 ainsn; | ||
42 | }; | 41 | }; |
43 | }; | 42 | }; |
44 | 43 | ||
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index b903dc5cf944..2b0da27eaee4 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -296,9 +296,6 @@ void __init setup_arch(char **cmdline_p) | |||
296 | if (cpu_has_feature(CPU_FTR_UNIFIED_ID_CACHE)) | 296 | if (cpu_has_feature(CPU_FTR_UNIFIED_ID_CACHE)) |
297 | ucache_bsize = icache_bsize = dcache_bsize; | 297 | ucache_bsize = icache_bsize = dcache_bsize; |
298 | 298 | ||
299 | /* reboot on panic */ | ||
300 | panic_timeout = 180; | ||
301 | |||
302 | if (ppc_md.panic) | 299 | if (ppc_md.panic) |
303 | setup_panic(); | 300 | setup_panic(); |
304 | 301 | ||
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 4085aaa9478f..856dd4e99bfe 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -588,9 +588,6 @@ void __init setup_arch(char **cmdline_p) | |||
588 | dcache_bsize = ppc64_caches.dline_size; | 588 | dcache_bsize = ppc64_caches.dline_size; |
589 | icache_bsize = ppc64_caches.iline_size; | 589 | icache_bsize = ppc64_caches.iline_size; |
590 | 590 | ||
591 | /* reboot on panic */ | ||
592 | panic_timeout = 180; | ||
593 | |||
594 | if (ppc_md.panic) | 591 | if (ppc_md.panic) |
595 | setup_panic(); | 592 | setup_panic(); |
596 | 593 | ||
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c index 59f419b935f2..003b20964ea0 100644 --- a/arch/powerpc/kernel/uprobes.c +++ b/arch/powerpc/kernel/uprobes.c | |||
@@ -186,7 +186,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) | |||
186 | * emulate_step() returns 1 if the insn was successfully emulated. | 186 | * emulate_step() returns 1 if the insn was successfully emulated. |
187 | * For all other cases, we need to single-step in hardware. | 187 | * For all other cases, we need to single-step in hardware. |
188 | */ | 188 | */ |
189 | ret = emulate_step(regs, auprobe->ainsn); | 189 | ret = emulate_step(regs, auprobe->insn); |
190 | if (ret > 0) | 190 | if (ret > 0) |
191 | return true; | 191 | return true; |
192 | 192 | ||
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index ac3c2a10dafd..555034f8505e 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c | |||
@@ -223,10 +223,11 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, | |||
223 | } | 223 | } |
224 | PPC_DIVWU(r_A, r_A, r_X); | 224 | PPC_DIVWU(r_A, r_A, r_X); |
225 | break; | 225 | break; |
226 | case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */ | 226 | case BPF_S_ALU_DIV_K: /* A /= K */ |
227 | if (K == 1) | ||
228 | break; | ||
227 | PPC_LI32(r_scratch1, K); | 229 | PPC_LI32(r_scratch1, K); |
228 | /* Top 32 bits of 64bit result -> A */ | 230 | PPC_DIVWU(r_A, r_A, r_scratch1); |
229 | PPC_MULHWU(r_A, r_A, r_scratch1); | ||
230 | break; | 231 | break; |
231 | case BPF_S_ALU_AND_X: | 232 | case BPF_S_ALU_AND_X: |
232 | ctx->seen |= SEEN_XREG; | 233 | ctx->seen |= SEEN_XREG; |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index c1f190858701..6f76ae417f47 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -470,7 +470,7 @@ static long pseries_little_endian_exceptions(void) | |||
470 | 470 | ||
471 | static void __init pSeries_setup_arch(void) | 471 | static void __init pSeries_setup_arch(void) |
472 | { | 472 | { |
473 | panic_timeout = 10; | 473 | set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); |
474 | 474 | ||
475 | /* Discover PIC type and setup ppc_md accordingly */ | 475 | /* Discover PIC type and setup ppc_md accordingly */ |
476 | pseries_discover_pic(); | 476 | pseries_discover_pic(); |
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 16760eeb79b0..578680f6207a 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h | |||
@@ -32,4 +32,19 @@ | |||
32 | 32 | ||
33 | #define set_mb(var, value) do { var = value; mb(); } while (0) | 33 | #define set_mb(var, value) do { var = value; mb(); } while (0) |
34 | 34 | ||
35 | #define smp_store_release(p, v) \ | ||
36 | do { \ | ||
37 | compiletime_assert_atomic_type(*p); \ | ||
38 | barrier(); \ | ||
39 | ACCESS_ONCE(*p) = (v); \ | ||
40 | } while (0) | ||
41 | |||
42 | #define smp_load_acquire(p) \ | ||
43 | ({ \ | ||
44 | typeof(*p) ___p1 = ACCESS_ONCE(*p); \ | ||
45 | compiletime_assert_atomic_type(*p); \ | ||
46 | barrier(); \ | ||
47 | ___p1; \ | ||
48 | }) | ||
49 | |||
35 | #endif /* __ASM_BARRIER_H */ | 50 | #endif /* __ASM_BARRIER_H */ |
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index 4bf9da03591e..5d7e8cf83bd6 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h | |||
@@ -38,7 +38,8 @@ | |||
38 | 38 | ||
39 | #define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \ | 39 | #define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \ |
40 | PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \ | 40 | PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \ |
41 | PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | PSW32_ASC_HOME) | 41 | PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | \ |
42 | PSW32_ASC_PRIMARY) | ||
42 | 43 | ||
43 | #define COMPAT_USER_HZ 100 | 44 | #define COMPAT_USER_HZ 100 |
44 | #define COMPAT_UTS_MACHINE "s390\0\0\0\0" | 45 | #define COMPAT_UTS_MACHINE "s390\0\0\0\0" |
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index c879fad404c8..cb700d54bd83 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h | |||
@@ -56,6 +56,96 @@ struct cpumf_ctr_info { | |||
56 | u32 reserved2[12]; | 56 | u32 reserved2[12]; |
57 | } __packed; | 57 | } __packed; |
58 | 58 | ||
59 | /* QUERY SAMPLING INFORMATION block */ | ||
60 | struct hws_qsi_info_block { /* Bit(s) */ | ||
61 | unsigned int b0_13:14; /* 0-13: zeros */ | ||
62 | unsigned int as:1; /* 14: basic-sampling authorization */ | ||
63 | unsigned int ad:1; /* 15: diag-sampling authorization */ | ||
64 | unsigned int b16_21:6; /* 16-21: zeros */ | ||
65 | unsigned int es:1; /* 22: basic-sampling enable control */ | ||
66 | unsigned int ed:1; /* 23: diag-sampling enable control */ | ||
67 | unsigned int b24_29:6; /* 24-29: zeros */ | ||
68 | unsigned int cs:1; /* 30: basic-sampling activation control */ | ||
69 | unsigned int cd:1; /* 31: diag-sampling activation control */ | ||
70 | unsigned int bsdes:16; /* 4-5: size of basic sampling entry */ | ||
71 | unsigned int dsdes:16; /* 6-7: size of diagnostic sampling entry */ | ||
72 | unsigned long min_sampl_rate; /* 8-15: minimum sampling interval */ | ||
73 | unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/ | ||
74 | unsigned long tear; /* 24-31: TEAR contents */ | ||
75 | unsigned long dear; /* 32-39: DEAR contents */ | ||
76 | unsigned int rsvrd0; /* 40-43: reserved */ | ||
77 | unsigned int cpu_speed; /* 44-47: CPU speed */ | ||
78 | unsigned long long rsvrd1; /* 48-55: reserved */ | ||
79 | unsigned long long rsvrd2; /* 56-63: reserved */ | ||
80 | } __packed; | ||
81 | |||
82 | /* SET SAMPLING CONTROLS request block */ | ||
83 | struct hws_lsctl_request_block { | ||
84 | unsigned int s:1; /* 0: maximum buffer indicator */ | ||
85 | unsigned int h:1; /* 1: part. level reserved for VM use*/ | ||
86 | unsigned long long b2_53:52;/* 2-53: zeros */ | ||
87 | unsigned int es:1; /* 54: basic-sampling enable control */ | ||
88 | unsigned int ed:1; /* 55: diag-sampling enable control */ | ||
89 | unsigned int b56_61:6; /* 56-61: - zeros */ | ||
90 | unsigned int cs:1; /* 62: basic-sampling activation control */ | ||
91 | unsigned int cd:1; /* 63: diag-sampling activation control */ | ||
92 | unsigned long interval; /* 8-15: sampling interval */ | ||
93 | unsigned long tear; /* 16-23: TEAR contents */ | ||
94 | unsigned long dear; /* 24-31: DEAR contents */ | ||
95 | /* 32-63: */ | ||
96 | unsigned long rsvrd1; /* reserved */ | ||
97 | unsigned long rsvrd2; /* reserved */ | ||
98 | unsigned long rsvrd3; /* reserved */ | ||
99 | unsigned long rsvrd4; /* reserved */ | ||
100 | } __packed; | ||
101 | |||
102 | struct hws_basic_entry { | ||
103 | unsigned int def:16; /* 0-15 Data Entry Format */ | ||
104 | unsigned int R:4; /* 16-19 reserved */ | ||
105 | unsigned int U:4; /* 20-23 Number of unique instruct. */ | ||
106 | unsigned int z:2; /* zeros */ | ||
107 | unsigned int T:1; /* 26 PSW DAT mode */ | ||
108 | unsigned int W:1; /* 27 PSW wait state */ | ||
109 | unsigned int P:1; /* 28 PSW Problem state */ | ||
110 | unsigned int AS:2; /* 29-30 PSW address-space control */ | ||
111 | unsigned int I:1; /* 31 entry valid or invalid */ | ||
112 | unsigned int:16; | ||
113 | unsigned int prim_asn:16; /* primary ASN */ | ||
114 | unsigned long long ia; /* Instruction Address */ | ||
115 | unsigned long long gpp; /* Guest Program Parameter */ | ||
116 | unsigned long long hpp; /* Host Program Parameter */ | ||
117 | } __packed; | ||
118 | |||
119 | struct hws_diag_entry { | ||
120 | unsigned int def:16; /* 0-15 Data Entry Format */ | ||
121 | unsigned int R:14; /* 16-19 and 20-30 reserved */ | ||
122 | unsigned int I:1; /* 31 entry valid or invalid */ | ||
123 | u8 data[]; /* Machine-dependent sample data */ | ||
124 | } __packed; | ||
125 | |||
126 | struct hws_combined_entry { | ||
127 | struct hws_basic_entry basic; /* Basic-sampling data entry */ | ||
128 | struct hws_diag_entry diag; /* Diagnostic-sampling data entry */ | ||
129 | } __packed; | ||
130 | |||
131 | struct hws_trailer_entry { | ||
132 | union { | ||
133 | struct { | ||
134 | unsigned int f:1; /* 0 - Block Full Indicator */ | ||
135 | unsigned int a:1; /* 1 - Alert request control */ | ||
136 | unsigned int t:1; /* 2 - Timestamp format */ | ||
137 | unsigned long long:61; /* 3 - 63: Reserved */ | ||
138 | }; | ||
139 | unsigned long long flags; /* 0 - 63: All indicators */ | ||
140 | }; | ||
141 | unsigned long long overflow; /* 64 - sample Overflow count */ | ||
142 | unsigned char timestamp[16]; /* 16 - 31 timestamp */ | ||
143 | unsigned long long reserved1; /* 32 -Reserved */ | ||
144 | unsigned long long reserved2; /* */ | ||
145 | unsigned long long progusage1; /* 48 - reserved for programming use */ | ||
146 | unsigned long long progusage2; /* */ | ||
147 | } __packed; | ||
148 | |||
59 | /* Query counter information */ | 149 | /* Query counter information */ |
60 | static inline int qctri(struct cpumf_ctr_info *info) | 150 | static inline int qctri(struct cpumf_ctr_info *info) |
61 | { | 151 | { |
@@ -99,4 +189,95 @@ static inline int ecctr(u64 ctr, u64 *val) | |||
99 | return cc; | 189 | return cc; |
100 | } | 190 | } |
101 | 191 | ||
192 | /* Query sampling information */ | ||
193 | static inline int qsi(struct hws_qsi_info_block *info) | ||
194 | { | ||
195 | int cc; | ||
196 | cc = 1; | ||
197 | |||
198 | asm volatile( | ||
199 | "0: .insn s,0xb2860000,0(%1)\n" | ||
200 | "1: lhi %0,0\n" | ||
201 | "2:\n" | ||
202 | EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) | ||
203 | : "=d" (cc), "+a" (info) | ||
204 | : "m" (*info) | ||
205 | : "cc", "memory"); | ||
206 | |||
207 | return cc ? -EINVAL : 0; | ||
208 | } | ||
209 | |||
210 | /* Load sampling controls */ | ||
211 | static inline int lsctl(struct hws_lsctl_request_block *req) | ||
212 | { | ||
213 | int cc; | ||
214 | |||
215 | cc = 1; | ||
216 | asm volatile( | ||
217 | "0: .insn s,0xb2870000,0(%1)\n" | ||
218 | "1: ipm %0\n" | ||
219 | " srl %0,28\n" | ||
220 | "2:\n" | ||
221 | EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) | ||
222 | : "+d" (cc), "+a" (req) | ||
223 | : "m" (*req) | ||
224 | : "cc", "memory"); | ||
225 | |||
226 | return cc ? -EINVAL : 0; | ||
227 | } | ||
228 | |||
229 | /* Sampling control helper functions */ | ||
230 | |||
231 | #include <linux/time.h> | ||
232 | |||
233 | static inline unsigned long freq_to_sample_rate(struct hws_qsi_info_block *qsi, | ||
234 | unsigned long freq) | ||
235 | { | ||
236 | return (USEC_PER_SEC / freq) * qsi->cpu_speed; | ||
237 | } | ||
238 | |||
239 | static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi, | ||
240 | unsigned long rate) | ||
241 | { | ||
242 | return USEC_PER_SEC * qsi->cpu_speed / rate; | ||
243 | } | ||
244 | |||
245 | #define SDB_TE_ALERT_REQ_MASK 0x4000000000000000UL | ||
246 | #define SDB_TE_BUFFER_FULL_MASK 0x8000000000000000UL | ||
247 | |||
248 | /* Return TOD timestamp contained in an trailer entry */ | ||
249 | static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te) | ||
250 | { | ||
251 | /* TOD in STCKE format */ | ||
252 | if (te->t) | ||
253 | return *((unsigned long long *) &te->timestamp[1]); | ||
254 | |||
255 | /* TOD in STCK format */ | ||
256 | return *((unsigned long long *) &te->timestamp[0]); | ||
257 | } | ||
258 | |||
259 | /* Return pointer to trailer entry of an sample data block */ | ||
260 | static inline unsigned long *trailer_entry_ptr(unsigned long v) | ||
261 | { | ||
262 | void *ret; | ||
263 | |||
264 | ret = (void *) v; | ||
265 | ret += PAGE_SIZE; | ||
266 | ret -= sizeof(struct hws_trailer_entry); | ||
267 | |||
268 | return (unsigned long *) ret; | ||
269 | } | ||
270 | |||
271 | /* Return if the entry in the sample data block table (sdbt) | ||
272 | * is a link to the next sdbt */ | ||
273 | static inline int is_link_entry(unsigned long *s) | ||
274 | { | ||
275 | return *s & 0x1ul ? 1 : 0; | ||
276 | } | ||
277 | |||
278 | /* Return pointer to the linked sdbt */ | ||
279 | static inline unsigned long *get_next_sdbt(unsigned long *s) | ||
280 | { | ||
281 | return (unsigned long *) (*s & ~0x1ul); | ||
282 | } | ||
102 | #endif /* _ASM_S390_CPU_MF_H */ | 283 | #endif /* _ASM_S390_CPU_MF_H */ |
diff --git a/arch/s390/include/asm/css_chars.h b/arch/s390/include/asm/css_chars.h index 7e1c917bbba2..09d1dd46bd57 100644 --- a/arch/s390/include/asm/css_chars.h +++ b/arch/s390/include/asm/css_chars.h | |||
@@ -29,6 +29,8 @@ struct css_general_char { | |||
29 | u32 fcx : 1; /* bit 88 */ | 29 | u32 fcx : 1; /* bit 88 */ |
30 | u32 : 19; | 30 | u32 : 19; |
31 | u32 alt_ssi : 1; /* bit 108 */ | 31 | u32 alt_ssi : 1; /* bit 108 */ |
32 | u32:1; | ||
33 | u32 narf:1; /* bit 110 */ | ||
32 | } __packed; | 34 | } __packed; |
33 | 35 | ||
34 | extern struct css_general_char css_general_characteristics; | 36 | extern struct css_general_char css_general_characteristics; |
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index c129ab2ac731..2583466f576b 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h | |||
@@ -144,6 +144,7 @@ int clp_disable_fh(struct zpci_dev *); | |||
144 | void zpci_event_error(void *); | 144 | void zpci_event_error(void *); |
145 | void zpci_event_availability(void *); | 145 | void zpci_event_availability(void *); |
146 | void zpci_rescan(void); | 146 | void zpci_rescan(void); |
147 | bool zpci_is_enabled(void); | ||
147 | #else /* CONFIG_PCI */ | 148 | #else /* CONFIG_PCI */ |
148 | static inline void zpci_event_error(void *e) {} | 149 | static inline void zpci_event_error(void *e) {} |
149 | static inline void zpci_event_availability(void *e) {} | 150 | static inline void zpci_event_availability(void *e) {} |
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index 1141fb3e7b21..159a8ec6da9a 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h | |||
@@ -1,21 +1,40 @@ | |||
1 | /* | 1 | /* |
2 | * Performance event support - s390 specific definitions. | 2 | * Performance event support - s390 specific definitions. |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2009, 2012 | 4 | * Copyright IBM Corp. 2009, 2013 |
5 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> | 5 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
6 | * Hendrik Brueckner <brueckner@linux.vnet.ibm.com> | 6 | * Hendrik Brueckner <brueckner@linux.vnet.ibm.com> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <asm/cpu_mf.h> | 9 | #ifndef _ASM_S390_PERF_EVENT_H |
10 | #define _ASM_S390_PERF_EVENT_H | ||
10 | 11 | ||
11 | /* CPU-measurement counter facility */ | 12 | #ifdef CONFIG_64BIT |
12 | #define PERF_CPUM_CF_MAX_CTR 256 | 13 | |
14 | #include <linux/perf_event.h> | ||
15 | #include <linux/device.h> | ||
16 | #include <asm/cpu_mf.h> | ||
13 | 17 | ||
14 | /* Per-CPU flags for PMU states */ | 18 | /* Per-CPU flags for PMU states */ |
15 | #define PMU_F_RESERVED 0x1000 | 19 | #define PMU_F_RESERVED 0x1000 |
16 | #define PMU_F_ENABLED 0x2000 | 20 | #define PMU_F_ENABLED 0x2000 |
21 | #define PMU_F_IN_USE 0x4000 | ||
22 | #define PMU_F_ERR_IBE 0x0100 | ||
23 | #define PMU_F_ERR_LSDA 0x0200 | ||
24 | #define PMU_F_ERR_MASK (PMU_F_ERR_IBE|PMU_F_ERR_LSDA) | ||
25 | |||
26 | /* Perf defintions for PMU event attributes in sysfs */ | ||
27 | extern __init const struct attribute_group **cpumf_cf_event_group(void); | ||
28 | extern ssize_t cpumf_events_sysfs_show(struct device *dev, | ||
29 | struct device_attribute *attr, | ||
30 | char *page); | ||
31 | #define EVENT_VAR(_cat, _name) event_attr_##_cat##_##_name | ||
32 | #define EVENT_PTR(_cat, _name) (&EVENT_VAR(_cat, _name).attr.attr) | ||
33 | |||
34 | #define CPUMF_EVENT_ATTR(cat, name, id) \ | ||
35 | PMU_EVENT_ATTR(name, EVENT_VAR(cat, name), id, cpumf_events_sysfs_show) | ||
36 | #define CPUMF_EVENT_PTR(cat, name) EVENT_PTR(cat, name) | ||
17 | 37 | ||
18 | #ifdef CONFIG_64BIT | ||
19 | 38 | ||
20 | /* Perf callbacks */ | 39 | /* Perf callbacks */ |
21 | struct pt_regs; | 40 | struct pt_regs; |
@@ -23,4 +42,55 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs); | |||
23 | extern unsigned long perf_misc_flags(struct pt_regs *regs); | 42 | extern unsigned long perf_misc_flags(struct pt_regs *regs); |
24 | #define perf_misc_flags(regs) perf_misc_flags(regs) | 43 | #define perf_misc_flags(regs) perf_misc_flags(regs) |
25 | 44 | ||
45 | /* Perf pt_regs extension for sample-data-entry indicators */ | ||
46 | struct perf_sf_sde_regs { | ||
47 | unsigned char in_guest:1; /* guest sample */ | ||
48 | unsigned long reserved:63; /* reserved */ | ||
49 | }; | ||
50 | |||
51 | /* Perf PMU definitions for the counter facility */ | ||
52 | #define PERF_CPUM_CF_MAX_CTR 256 | ||
53 | |||
54 | /* Perf PMU definitions for the sampling facility */ | ||
55 | #define PERF_CPUM_SF_MAX_CTR 2 | ||
56 | #define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */ | ||
57 | #define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */ | ||
58 | #define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */ | ||
59 | #define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */ | ||
60 | #define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \ | ||
61 | PERF_CPUM_SF_DIAG_MODE) | ||
62 | #define PERF_CPUM_SF_FULL_BLOCKS 0x0004 /* Process full SDBs only */ | ||
63 | |||
64 | #define REG_NONE 0 | ||
65 | #define REG_OVERFLOW 1 | ||
66 | #define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config) | ||
67 | #define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc) | ||
68 | #define RAWSAMPLE_REG(hwc) ((hwc)->config) | ||
69 | #define TEAR_REG(hwc) ((hwc)->last_tag) | ||
70 | #define SAMPL_RATE(hwc) ((hwc)->event_base) | ||
71 | #define SAMPL_FLAGS(hwc) ((hwc)->config_base) | ||
72 | #define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE) | ||
73 | #define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS) | ||
74 | |||
75 | /* Structure for sampling data entries to be passed as perf raw sample data | ||
76 | * to user space. Note that raw sample data must be aligned and, thus, might | ||
77 | * be padded with zeros. | ||
78 | */ | ||
79 | struct sf_raw_sample { | ||
80 | #define SF_RAW_SAMPLE_BASIC PERF_CPUM_SF_BASIC_MODE | ||
81 | #define SF_RAW_SAMPLE_DIAG PERF_CPUM_SF_DIAG_MODE | ||
82 | u64 format; | ||
83 | u32 size; /* Size of sf_raw_sample */ | ||
84 | u16 bsdes; /* Basic-sampling data entry size */ | ||
85 | u16 dsdes; /* Diagnostic-sampling data entry size */ | ||
86 | struct hws_basic_entry basic; /* Basic-sampling data entry */ | ||
87 | struct hws_diag_entry diag; /* Diagnostic-sampling data entry */ | ||
88 | u8 padding[]; /* Padding to next multiple of 8 */ | ||
89 | } __packed; | ||
90 | |||
91 | /* Perf hardware reserve and release functions */ | ||
92 | int perf_reserve_sampling(void); | ||
93 | void perf_release_sampling(void); | ||
94 | |||
26 | #endif /* CONFIG_64BIT */ | 95 | #endif /* CONFIG_64BIT */ |
96 | #endif /* _ASM_S390_PERF_EVENT_H */ | ||
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index 57d0d7e794b1..d786c634e052 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h | |||
@@ -336,7 +336,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, | |||
336 | #define QDIO_FLAG_CLEANUP_USING_HALT 0x02 | 336 | #define QDIO_FLAG_CLEANUP_USING_HALT 0x02 |
337 | 337 | ||
338 | /** | 338 | /** |
339 | * struct qdio_initialize - qdio initalization data | 339 | * struct qdio_initialize - qdio initialization data |
340 | * @cdev: associated ccw device | 340 | * @cdev: associated ccw device |
341 | * @q_format: queue format | 341 | * @q_format: queue format |
342 | * @adapter_name: name for the adapter | 342 | * @adapter_name: name for the adapter |
@@ -378,6 +378,34 @@ struct qdio_initialize { | |||
378 | struct qdio_outbuf_state *output_sbal_state_array; | 378 | struct qdio_outbuf_state *output_sbal_state_array; |
379 | }; | 379 | }; |
380 | 380 | ||
381 | /** | ||
382 | * enum qdio_brinfo_entry_type - type of address entry for qdio_brinfo_desc() | ||
383 | * @l3_ipv6_addr: entry contains IPv6 address | ||
384 | * @l3_ipv4_addr: entry contains IPv4 address | ||
385 | * @l2_addr_lnid: entry contains MAC address and VLAN ID | ||
386 | */ | ||
387 | enum qdio_brinfo_entry_type {l3_ipv6_addr, l3_ipv4_addr, l2_addr_lnid}; | ||
388 | |||
389 | /** | ||
390 | * struct qdio_brinfo_entry_XXX - Address entry for qdio_brinfo_desc() | ||
391 | * @nit: Network interface token | ||
392 | * @addr: Address of one of the three types | ||
393 | * | ||
394 | * The struct is passed to the callback function by qdio_brinfo_desc() | ||
395 | */ | ||
396 | struct qdio_brinfo_entry_l3_ipv6 { | ||
397 | u64 nit; | ||
398 | struct { unsigned char _s6_addr[16]; } addr; | ||
399 | } __packed; | ||
400 | struct qdio_brinfo_entry_l3_ipv4 { | ||
401 | u64 nit; | ||
402 | struct { uint32_t _s_addr; } addr; | ||
403 | } __packed; | ||
404 | struct qdio_brinfo_entry_l2 { | ||
405 | u64 nit; | ||
406 | struct { u8 mac[6]; u16 lnid; } addr_lnid; | ||
407 | } __packed; | ||
408 | |||
381 | #define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */ | 409 | #define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */ |
382 | #define QDIO_STATE_ESTABLISHED 0x00000004 /* after qdio_establish */ | 410 | #define QDIO_STATE_ESTABLISHED 0x00000004 /* after qdio_establish */ |
383 | #define QDIO_STATE_ACTIVE 0x00000008 /* after qdio_activate */ | 411 | #define QDIO_STATE_ACTIVE 0x00000008 /* after qdio_activate */ |
@@ -399,5 +427,10 @@ extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *); | |||
399 | extern int qdio_shutdown(struct ccw_device *, int); | 427 | extern int qdio_shutdown(struct ccw_device *, int); |
400 | extern int qdio_free(struct ccw_device *); | 428 | extern int qdio_free(struct ccw_device *); |
401 | extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *); | 429 | extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *); |
430 | extern int qdio_pnso_brinfo(struct subchannel_id schid, | ||
431 | int cnc, u16 *response, | ||
432 | void (*cb)(void *priv, enum qdio_brinfo_entry_type type, | ||
433 | void *entry), | ||
434 | void *priv); | ||
402 | 435 | ||
403 | #endif /* __QDIO_H__ */ | 436 | #endif /* __QDIO_H__ */ |
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 2f390956c7c1..220e171413f8 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h | |||
@@ -52,8 +52,8 @@ int sclp_chp_configure(struct chp_id chpid); | |||
52 | int sclp_chp_deconfigure(struct chp_id chpid); | 52 | int sclp_chp_deconfigure(struct chp_id chpid); |
53 | int sclp_chp_read_info(struct sclp_chp_info *info); | 53 | int sclp_chp_read_info(struct sclp_chp_info *info); |
54 | void sclp_get_ipl_info(struct sclp_ipl_info *info); | 54 | void sclp_get_ipl_info(struct sclp_ipl_info *info); |
55 | bool sclp_has_linemode(void); | 55 | bool __init sclp_has_linemode(void); |
56 | bool sclp_has_vt220(void); | 56 | bool __init sclp_has_vt220(void); |
57 | int sclp_pci_configure(u32 fid); | 57 | int sclp_pci_configure(u32 fid); |
58 | int sclp_pci_deconfigure(u32 fid); | 58 | int sclp_pci_deconfigure(u32 fid); |
59 | int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode); | 59 | int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode); |
diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h index e83fc116f5bf..f2b18eacaca8 100644 --- a/arch/s390/include/uapi/asm/zcrypt.h +++ b/arch/s390/include/uapi/asm/zcrypt.h | |||
@@ -154,6 +154,67 @@ struct ica_xcRB { | |||
154 | unsigned short priority_window; | 154 | unsigned short priority_window; |
155 | unsigned int status; | 155 | unsigned int status; |
156 | } __attribute__((packed)); | 156 | } __attribute__((packed)); |
157 | |||
158 | /** | ||
159 | * struct ep11_cprb - EP11 connectivity programming request block | ||
160 | * @cprb_len: CPRB header length [0x0020] | ||
161 | * @cprb_ver_id: CPRB version id. [0x04] | ||
162 | * @pad_000: Alignment pad bytes | ||
163 | * @flags: Admin cmd [0x80] or functional cmd [0x00] | ||
164 | * @func_id: Function id / subtype [0x5434] | ||
165 | * @source_id: Source id [originator id] | ||
166 | * @target_id: Target id [usage/ctrl domain id] | ||
167 | * @ret_code: Return code | ||
168 | * @reserved1: Reserved | ||
169 | * @reserved2: Reserved | ||
170 | * @payload_len: Payload length | ||
171 | */ | ||
172 | struct ep11_cprb { | ||
173 | uint16_t cprb_len; | ||
174 | unsigned char cprb_ver_id; | ||
175 | unsigned char pad_000[2]; | ||
176 | unsigned char flags; | ||
177 | unsigned char func_id[2]; | ||
178 | uint32_t source_id; | ||
179 | uint32_t target_id; | ||
180 | uint32_t ret_code; | ||
181 | uint32_t reserved1; | ||
182 | uint32_t reserved2; | ||
183 | uint32_t payload_len; | ||
184 | } __attribute__((packed)); | ||
185 | |||
186 | /** | ||
187 | * struct ep11_target_dev - EP11 target device list | ||
188 | * @ap_id: AP device id | ||
189 | * @dom_id: Usage domain id | ||
190 | */ | ||
191 | struct ep11_target_dev { | ||
192 | uint16_t ap_id; | ||
193 | uint16_t dom_id; | ||
194 | }; | ||
195 | |||
196 | /** | ||
197 | * struct ep11_urb - EP11 user request block | ||
198 | * @targets_num: Number of target adapters | ||
199 | * @targets: Addr to target adapter list | ||
200 | * @weight: Level of request priority | ||
201 | * @req_no: Request id/number | ||
202 | * @req_len: Request length | ||
203 | * @req: Addr to request block | ||
204 | * @resp_len: Response length | ||
205 | * @resp: Addr to response block | ||
206 | */ | ||
207 | struct ep11_urb { | ||
208 | uint16_t targets_num; | ||
209 | uint64_t targets; | ||
210 | uint64_t weight; | ||
211 | uint64_t req_no; | ||
212 | uint64_t req_len; | ||
213 | uint64_t req; | ||
214 | uint64_t resp_len; | ||
215 | uint64_t resp; | ||
216 | } __attribute__((packed)); | ||
217 | |||
157 | #define AUTOSELECT ((unsigned int)0xFFFFFFFF) | 218 | #define AUTOSELECT ((unsigned int)0xFFFFFFFF) |
158 | 219 | ||
159 | #define ZCRYPT_IOCTL_MAGIC 'z' | 220 | #define ZCRYPT_IOCTL_MAGIC 'z' |
@@ -183,6 +244,9 @@ struct ica_xcRB { | |||
183 | * ZSECSENDCPRB | 244 | * ZSECSENDCPRB |
184 | * Send an arbitrary CPRB to a crypto card. | 245 | * Send an arbitrary CPRB to a crypto card. |
185 | * | 246 | * |
247 | * ZSENDEP11CPRB | ||
248 | * Send an arbitrary EP11 CPRB to an EP11 coprocessor crypto card. | ||
249 | * | ||
186 | * Z90STAT_STATUS_MASK | 250 | * Z90STAT_STATUS_MASK |
187 | * Return an 64 element array of unsigned chars for the status of | 251 | * Return an 64 element array of unsigned chars for the status of |
188 | * all devices. | 252 | * all devices. |
@@ -256,6 +320,7 @@ struct ica_xcRB { | |||
256 | #define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x05, 0) | 320 | #define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x05, 0) |
257 | #define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0) | 321 | #define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0) |
258 | #define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0) | 322 | #define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0) |
323 | #define ZSENDEP11CPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0) | ||
259 | 324 | ||
260 | /* New status calls */ | 325 | /* New status calls */ |
261 | #define Z90STAT_TOTALCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int) | 326 | #define Z90STAT_TOTALCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int) |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 2403303cfed7..1b3ac09c11b6 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -60,7 +60,8 @@ obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o | |||
60 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | 60 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o |
61 | 61 | ||
62 | ifdef CONFIG_64BIT | 62 | ifdef CONFIG_64BIT |
63 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o | 63 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \ |
64 | perf_cpum_cf_events.o | ||
64 | obj-y += runtime_instr.o cache.o | 65 | obj-y += runtime_instr.o cache.o |
65 | endif | 66 | endif |
66 | 67 | ||
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 95e7ba0fbb7e..8b84bc373e94 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c | |||
@@ -412,8 +412,9 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
412 | regs->gprs[14] = (__u64 __force) ka->sa.sa_restorer | PSW32_ADDR_AMODE; | 412 | regs->gprs[14] = (__u64 __force) ka->sa.sa_restorer | PSW32_ADDR_AMODE; |
413 | } else { | 413 | } else { |
414 | regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE; | 414 | regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE; |
415 | err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, | 415 | if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, |
416 | (u16 __force __user *)(frame->retcode)); | 416 | (u16 __force __user *)(frame->retcode))) |
417 | goto give_sigsegv; | ||
417 | } | 418 | } |
418 | 419 | ||
419 | /* Set up backchain. */ | 420 | /* Set up backchain. */ |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index e5b43c97a834..384e609b4711 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -74,7 +74,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ | |||
74 | .endm | 74 | .endm |
75 | 75 | ||
76 | .macro LPP newpp | 76 | .macro LPP newpp |
77 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | 77 | #if IS_ENABLED(CONFIG_KVM) |
78 | tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP | 78 | tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP |
79 | jz .+8 | 79 | jz .+8 |
80 | .insn s,0xb2800000,\newpp | 80 | .insn s,0xb2800000,\newpp |
@@ -82,7 +82,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ | |||
82 | .endm | 82 | .endm |
83 | 83 | ||
84 | .macro HANDLE_SIE_INTERCEPT scratch,reason | 84 | .macro HANDLE_SIE_INTERCEPT scratch,reason |
85 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | 85 | #if IS_ENABLED(CONFIG_KVM) |
86 | tmhh %r8,0x0001 # interrupting from user ? | 86 | tmhh %r8,0x0001 # interrupting from user ? |
87 | jnz .+62 | 87 | jnz .+62 |
88 | lgr \scratch,%r9 | 88 | lgr \scratch,%r9 |
@@ -946,7 +946,7 @@ cleanup_idle_insn: | |||
946 | .quad __critical_end - __critical_start | 946 | .quad __critical_end - __critical_start |
947 | 947 | ||
948 | 948 | ||
949 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | 949 | #if IS_ENABLED(CONFIG_KVM) |
950 | /* | 950 | /* |
951 | * sie64a calling convention: | 951 | * sie64a calling convention: |
952 | * %r2 pointer to sie control block | 952 | * %r2 pointer to sie control block |
@@ -975,7 +975,7 @@ sie_done: | |||
975 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | 975 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce |
976 | # some program checks are suppressing. C code (e.g. do_protection_exception) | 976 | # some program checks are suppressing. C code (e.g. do_protection_exception) |
977 | # will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other | 977 | # will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other |
978 | # instructions beween sie64a and sie_done should not cause program | 978 | # instructions between sie64a and sie_done should not cause program |
979 | # interrupts. So lets use a nop (47 00 00 00) as a landing pad. | 979 | # interrupts. So lets use a nop (47 00 00 00) as a landing pad. |
980 | # See also HANDLE_SIE_INTERCEPT | 980 | # See also HANDLE_SIE_INTERCEPT |
981 | rewind_pad: | 981 | rewind_pad: |
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index 1105502bf6e9..f51214c04858 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c | |||
@@ -680,6 +680,7 @@ static int __init cpumf_pmu_init(void) | |||
680 | goto out; | 680 | goto out; |
681 | } | 681 | } |
682 | 682 | ||
683 | cpumf_pmu.attr_groups = cpumf_cf_event_group(); | ||
683 | rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); | 684 | rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); |
684 | if (rc) { | 685 | if (rc) { |
685 | pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc); | 686 | pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc); |
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c new file mode 100644 index 000000000000..4554a4bae39e --- /dev/null +++ b/arch/s390/kernel/perf_cpum_cf_events.c | |||
@@ -0,0 +1,322 @@ | |||
1 | /* | ||
2 | * Perf PMU sysfs events attributes for available CPU-measurement counters | ||
3 | * | ||
4 | */ | ||
5 | |||
6 | #include <linux/slab.h> | ||
7 | #include <linux/perf_event.h> | ||
8 | |||
9 | |||
10 | /* BEGIN: CPUM_CF COUNTER DEFINITIONS =================================== */ | ||
11 | |||
12 | CPUMF_EVENT_ATTR(cf, CPU_CYCLES, 0x0000); | ||
13 | CPUMF_EVENT_ATTR(cf, INSTRUCTIONS, 0x0001); | ||
14 | CPUMF_EVENT_ATTR(cf, L1I_DIR_WRITES, 0x0002); | ||
15 | CPUMF_EVENT_ATTR(cf, L1I_PENALTY_CYCLES, 0x0003); | ||
16 | CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_CPU_CYCLES, 0x0020); | ||
17 | CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_INSTRUCTIONS, 0x0021); | ||
18 | CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1I_DIR_WRITES, 0x0022); | ||
19 | CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1I_PENALTY_CYCLES, 0x0023); | ||
20 | CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1D_DIR_WRITES, 0x0024); | ||
21 | CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1D_PENALTY_CYCLES, 0x0025); | ||
22 | CPUMF_EVENT_ATTR(cf, L1D_DIR_WRITES, 0x0004); | ||
23 | CPUMF_EVENT_ATTR(cf, L1D_PENALTY_CYCLES, 0x0005); | ||
24 | CPUMF_EVENT_ATTR(cf, PRNG_FUNCTIONS, 0x0040); | ||
25 | CPUMF_EVENT_ATTR(cf, PRNG_CYCLES, 0x0041); | ||
26 | CPUMF_EVENT_ATTR(cf, PRNG_BLOCKED_FUNCTIONS, 0x0042); | ||
27 | CPUMF_EVENT_ATTR(cf, PRNG_BLOCKED_CYCLES, 0x0043); | ||
28 | CPUMF_EVENT_ATTR(cf, SHA_FUNCTIONS, 0x0044); | ||
29 | CPUMF_EVENT_ATTR(cf, SHA_CYCLES, 0x0045); | ||
30 | CPUMF_EVENT_ATTR(cf, SHA_BLOCKED_FUNCTIONS, 0x0046); | ||
31 | CPUMF_EVENT_ATTR(cf, SHA_BLOCKED_CYCLES, 0x0047); | ||
32 | CPUMF_EVENT_ATTR(cf, DEA_FUNCTIONS, 0x0048); | ||
33 | CPUMF_EVENT_ATTR(cf, DEA_CYCLES, 0x0049); | ||
34 | CPUMF_EVENT_ATTR(cf, DEA_BLOCKED_FUNCTIONS, 0x004a); | ||
35 | CPUMF_EVENT_ATTR(cf, DEA_BLOCKED_CYCLES, 0x004b); | ||
36 | CPUMF_EVENT_ATTR(cf, AES_FUNCTIONS, 0x004c); | ||
37 | CPUMF_EVENT_ATTR(cf, AES_CYCLES, 0x004d); | ||
38 | CPUMF_EVENT_ATTR(cf, AES_BLOCKED_FUNCTIONS, 0x004e); | ||
39 | CPUMF_EVENT_ATTR(cf, AES_BLOCKED_CYCLES, 0x004f); | ||
40 | CPUMF_EVENT_ATTR(cf_z10, L1I_L2_SOURCED_WRITES, 0x0080); | ||
41 | CPUMF_EVENT_ATTR(cf_z10, L1D_L2_SOURCED_WRITES, 0x0081); | ||
42 | CPUMF_EVENT_ATTR(cf_z10, L1I_L3_LOCAL_WRITES, 0x0082); | ||
43 | CPUMF_EVENT_ATTR(cf_z10, L1D_L3_LOCAL_WRITES, 0x0083); | ||
44 | CPUMF_EVENT_ATTR(cf_z10, L1I_L3_REMOTE_WRITES, 0x0084); | ||
45 | CPUMF_EVENT_ATTR(cf_z10, L1D_L3_REMOTE_WRITES, 0x0085); | ||
46 | CPUMF_EVENT_ATTR(cf_z10, L1D_LMEM_SOURCED_WRITES, 0x0086); | ||
47 | CPUMF_EVENT_ATTR(cf_z10, L1I_LMEM_SOURCED_WRITES, 0x0087); | ||
48 | CPUMF_EVENT_ATTR(cf_z10, L1D_RO_EXCL_WRITES, 0x0088); | ||
49 | CPUMF_EVENT_ATTR(cf_z10, L1I_CACHELINE_INVALIDATES, 0x0089); | ||
50 | CPUMF_EVENT_ATTR(cf_z10, ITLB1_WRITES, 0x008a); | ||
51 | CPUMF_EVENT_ATTR(cf_z10, DTLB1_WRITES, 0x008b); | ||
52 | CPUMF_EVENT_ATTR(cf_z10, TLB2_PTE_WRITES, 0x008c); | ||
53 | CPUMF_EVENT_ATTR(cf_z10, TLB2_CRSTE_WRITES, 0x008d); | ||
54 | CPUMF_EVENT_ATTR(cf_z10, TLB2_CRSTE_HPAGE_WRITES, 0x008e); | ||
55 | CPUMF_EVENT_ATTR(cf_z10, ITLB1_MISSES, 0x0091); | ||
56 | CPUMF_EVENT_ATTR(cf_z10, DTLB1_MISSES, 0x0092); | ||
57 | CPUMF_EVENT_ATTR(cf_z10, L2C_STORES_SENT, 0x0093); | ||
58 | CPUMF_EVENT_ATTR(cf_z196, L1D_L2_SOURCED_WRITES, 0x0080); | ||
59 | CPUMF_EVENT_ATTR(cf_z196, L1I_L2_SOURCED_WRITES, 0x0081); | ||
60 | CPUMF_EVENT_ATTR(cf_z196, DTLB1_MISSES, 0x0082); | ||
61 | CPUMF_EVENT_ATTR(cf_z196, ITLB1_MISSES, 0x0083); | ||
62 | CPUMF_EVENT_ATTR(cf_z196, L2C_STORES_SENT, 0x0085); | ||
63 | CPUMF_EVENT_ATTR(cf_z196, L1D_OFFBOOK_L3_SOURCED_WRITES, 0x0086); | ||
64 | CPUMF_EVENT_ATTR(cf_z196, L1D_ONBOOK_L4_SOURCED_WRITES, 0x0087); | ||
65 | CPUMF_EVENT_ATTR(cf_z196, L1I_ONBOOK_L4_SOURCED_WRITES, 0x0088); | ||
66 | CPUMF_EVENT_ATTR(cf_z196, L1D_RO_EXCL_WRITES, 0x0089); | ||
67 | CPUMF_EVENT_ATTR(cf_z196, L1D_OFFBOOK_L4_SOURCED_WRITES, 0x008a); | ||
68 | CPUMF_EVENT_ATTR(cf_z196, L1I_OFFBOOK_L4_SOURCED_WRITES, 0x008b); | ||
69 | CPUMF_EVENT_ATTR(cf_z196, DTLB1_HPAGE_WRITES, 0x008c); | ||
70 | CPUMF_EVENT_ATTR(cf_z196, L1D_LMEM_SOURCED_WRITES, 0x008d); | ||
71 | CPUMF_EVENT_ATTR(cf_z196, L1I_LMEM_SOURCED_WRITES, 0x008e); | ||
72 | CPUMF_EVENT_ATTR(cf_z196, L1I_OFFBOOK_L3_SOURCED_WRITES, 0x008f); | ||
73 | CPUMF_EVENT_ATTR(cf_z196, DTLB1_WRITES, 0x0090); | ||
74 | CPUMF_EVENT_ATTR(cf_z196, ITLB1_WRITES, 0x0091); | ||
75 | CPUMF_EVENT_ATTR(cf_z196, TLB2_PTE_WRITES, 0x0092); | ||
76 | CPUMF_EVENT_ATTR(cf_z196, TLB2_CRSTE_HPAGE_WRITES, 0x0093); | ||
77 | CPUMF_EVENT_ATTR(cf_z196, TLB2_CRSTE_WRITES, 0x0094); | ||
78 | CPUMF_EVENT_ATTR(cf_z196, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0096); | ||
79 | CPUMF_EVENT_ATTR(cf_z196, L1D_OFFCHIP_L3_SOURCED_WRITES, 0x0098); | ||
80 | CPUMF_EVENT_ATTR(cf_z196, L1I_ONCHIP_L3_SOURCED_WRITES, 0x0099); | ||
81 | CPUMF_EVENT_ATTR(cf_z196, L1I_OFFCHIP_L3_SOURCED_WRITES, 0x009b); | ||
82 | CPUMF_EVENT_ATTR(cf_zec12, DTLB1_MISSES, 0x0080); | ||
83 | CPUMF_EVENT_ATTR(cf_zec12, ITLB1_MISSES, 0x0081); | ||
84 | CPUMF_EVENT_ATTR(cf_zec12, L1D_L2I_SOURCED_WRITES, 0x0082); | ||
85 | CPUMF_EVENT_ATTR(cf_zec12, L1I_L2I_SOURCED_WRITES, 0x0083); | ||
86 | CPUMF_EVENT_ATTR(cf_zec12, L1D_L2D_SOURCED_WRITES, 0x0084); | ||
87 | CPUMF_EVENT_ATTR(cf_zec12, DTLB1_WRITES, 0x0085); | ||
88 | CPUMF_EVENT_ATTR(cf_zec12, L1D_LMEM_SOURCED_WRITES, 0x0087); | ||
89 | CPUMF_EVENT_ATTR(cf_zec12, L1I_LMEM_SOURCED_WRITES, 0x0089); | ||
90 | CPUMF_EVENT_ATTR(cf_zec12, L1D_RO_EXCL_WRITES, 0x008a); | ||
91 | CPUMF_EVENT_ATTR(cf_zec12, DTLB1_HPAGE_WRITES, 0x008b); | ||
92 | CPUMF_EVENT_ATTR(cf_zec12, ITLB1_WRITES, 0x008c); | ||
93 | CPUMF_EVENT_ATTR(cf_zec12, TLB2_PTE_WRITES, 0x008d); | ||
94 | CPUMF_EVENT_ATTR(cf_zec12, TLB2_CRSTE_HPAGE_WRITES, 0x008e); | ||
95 | CPUMF_EVENT_ATTR(cf_zec12, TLB2_CRSTE_WRITES, 0x008f); | ||
96 | CPUMF_EVENT_ATTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0090); | ||
97 | CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES, 0x0091); | ||
98 | CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES, 0x0092); | ||
99 | CPUMF_EVENT_ATTR(cf_zec12, L1D_ONBOOK_L4_SOURCED_WRITES, 0x0093); | ||
100 | CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L4_SOURCED_WRITES, 0x0094); | ||
101 | CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TEND, 0x0095); | ||
102 | CPUMF_EVENT_ATTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES_IV, 0x0096); | ||
103 | CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES_IV, 0x0097); | ||
104 | CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES_IV, 0x0098); | ||
105 | CPUMF_EVENT_ATTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES, 0x0099); | ||
106 | CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES, 0x009a); | ||
107 | CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES, 0x009b); | ||
108 | CPUMF_EVENT_ATTR(cf_zec12, L1I_ONBOOK_L4_SOURCED_WRITES, 0x009c); | ||
109 | CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L4_SOURCED_WRITES, 0x009d); | ||
110 | CPUMF_EVENT_ATTR(cf_zec12, TX_C_TEND, 0x009e); | ||
111 | CPUMF_EVENT_ATTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES_IV, 0x009f); | ||
112 | CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES_IV, 0x00a0); | ||
113 | CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV, 0x00a1); | ||
114 | CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TABORT, 0x00b1); | ||
115 | CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_NO_SPECIAL, 0x00b2); | ||
116 | CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_SPECIAL, 0x00b3); | ||
117 | |||
118 | static struct attribute *cpumcf_pmu_event_attr[] = { | ||
119 | CPUMF_EVENT_PTR(cf, CPU_CYCLES), | ||
120 | CPUMF_EVENT_PTR(cf, INSTRUCTIONS), | ||
121 | CPUMF_EVENT_PTR(cf, L1I_DIR_WRITES), | ||
122 | CPUMF_EVENT_PTR(cf, L1I_PENALTY_CYCLES), | ||
123 | CPUMF_EVENT_PTR(cf, PROBLEM_STATE_CPU_CYCLES), | ||
124 | CPUMF_EVENT_PTR(cf, PROBLEM_STATE_INSTRUCTIONS), | ||
125 | CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1I_DIR_WRITES), | ||
126 | CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1I_PENALTY_CYCLES), | ||
127 | CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1D_DIR_WRITES), | ||
128 | CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1D_PENALTY_CYCLES), | ||
129 | CPUMF_EVENT_PTR(cf, L1D_DIR_WRITES), | ||
130 | CPUMF_EVENT_PTR(cf, L1D_PENALTY_CYCLES), | ||
131 | CPUMF_EVENT_PTR(cf, PRNG_FUNCTIONS), | ||
132 | CPUMF_EVENT_PTR(cf, PRNG_CYCLES), | ||
133 | CPUMF_EVENT_PTR(cf, PRNG_BLOCKED_FUNCTIONS), | ||
134 | CPUMF_EVENT_PTR(cf, PRNG_BLOCKED_CYCLES), | ||
135 | CPUMF_EVENT_PTR(cf, SHA_FUNCTIONS), | ||
136 | CPUMF_EVENT_PTR(cf, SHA_CYCLES), | ||
137 | CPUMF_EVENT_PTR(cf, SHA_BLOCKED_FUNCTIONS), | ||
138 | CPUMF_EVENT_PTR(cf, SHA_BLOCKED_CYCLES), | ||
139 | CPUMF_EVENT_PTR(cf, DEA_FUNCTIONS), | ||
140 | CPUMF_EVENT_PTR(cf, DEA_CYCLES), | ||
141 | CPUMF_EVENT_PTR(cf, DEA_BLOCKED_FUNCTIONS), | ||
142 | CPUMF_EVENT_PTR(cf, DEA_BLOCKED_CYCLES), | ||
143 | CPUMF_EVENT_PTR(cf, AES_FUNCTIONS), | ||
144 | CPUMF_EVENT_PTR(cf, AES_CYCLES), | ||
145 | CPUMF_EVENT_PTR(cf, AES_BLOCKED_FUNCTIONS), | ||
146 | CPUMF_EVENT_PTR(cf, AES_BLOCKED_CYCLES), | ||
147 | NULL, | ||
148 | }; | ||
149 | |||
150 | static struct attribute *cpumcf_z10_pmu_event_attr[] __initdata = { | ||
151 | CPUMF_EVENT_PTR(cf_z10, L1I_L2_SOURCED_WRITES), | ||
152 | CPUMF_EVENT_PTR(cf_z10, L1D_L2_SOURCED_WRITES), | ||
153 | CPUMF_EVENT_PTR(cf_z10, L1I_L3_LOCAL_WRITES), | ||
154 | CPUMF_EVENT_PTR(cf_z10, L1D_L3_LOCAL_WRITES), | ||
155 | CPUMF_EVENT_PTR(cf_z10, L1I_L3_REMOTE_WRITES), | ||
156 | CPUMF_EVENT_PTR(cf_z10, L1D_L3_REMOTE_WRITES), | ||
157 | CPUMF_EVENT_PTR(cf_z10, L1D_LMEM_SOURCED_WRITES), | ||
158 | CPUMF_EVENT_PTR(cf_z10, L1I_LMEM_SOURCED_WRITES), | ||
159 | CPUMF_EVENT_PTR(cf_z10, L1D_RO_EXCL_WRITES), | ||
160 | CPUMF_EVENT_PTR(cf_z10, L1I_CACHELINE_INVALIDATES), | ||
161 | CPUMF_EVENT_PTR(cf_z10, ITLB1_WRITES), | ||
162 | CPUMF_EVENT_PTR(cf_z10, DTLB1_WRITES), | ||
163 | CPUMF_EVENT_PTR(cf_z10, TLB2_PTE_WRITES), | ||
164 | CPUMF_EVENT_PTR(cf_z10, TLB2_CRSTE_WRITES), | ||
165 | CPUMF_EVENT_PTR(cf_z10, TLB2_CRSTE_HPAGE_WRITES), | ||
166 | CPUMF_EVENT_PTR(cf_z10, ITLB1_MISSES), | ||
167 | CPUMF_EVENT_PTR(cf_z10, DTLB1_MISSES), | ||
168 | CPUMF_EVENT_PTR(cf_z10, L2C_STORES_SENT), | ||
169 | NULL, | ||
170 | }; | ||
171 | |||
172 | static struct attribute *cpumcf_z196_pmu_event_attr[] __initdata = { | ||
173 | CPUMF_EVENT_PTR(cf_z196, L1D_L2_SOURCED_WRITES), | ||
174 | CPUMF_EVENT_PTR(cf_z196, L1I_L2_SOURCED_WRITES), | ||
175 | CPUMF_EVENT_PTR(cf_z196, DTLB1_MISSES), | ||
176 | CPUMF_EVENT_PTR(cf_z196, ITLB1_MISSES), | ||
177 | CPUMF_EVENT_PTR(cf_z196, L2C_STORES_SENT), | ||
178 | CPUMF_EVENT_PTR(cf_z196, L1D_OFFBOOK_L3_SOURCED_WRITES), | ||
179 | CPUMF_EVENT_PTR(cf_z196, L1D_ONBOOK_L4_SOURCED_WRITES), | ||
180 | CPUMF_EVENT_PTR(cf_z196, L1I_ONBOOK_L4_SOURCED_WRITES), | ||
181 | CPUMF_EVENT_PTR(cf_z196, L1D_RO_EXCL_WRITES), | ||
182 | CPUMF_EVENT_PTR(cf_z196, L1D_OFFBOOK_L4_SOURCED_WRITES), | ||
183 | CPUMF_EVENT_PTR(cf_z196, L1I_OFFBOOK_L4_SOURCED_WRITES), | ||
184 | CPUMF_EVENT_PTR(cf_z196, DTLB1_HPAGE_WRITES), | ||
185 | CPUMF_EVENT_PTR(cf_z196, L1D_LMEM_SOURCED_WRITES), | ||
186 | CPUMF_EVENT_PTR(cf_z196, L1I_LMEM_SOURCED_WRITES), | ||
187 | CPUMF_EVENT_PTR(cf_z196, L1I_OFFBOOK_L3_SOURCED_WRITES), | ||
188 | CPUMF_EVENT_PTR(cf_z196, DTLB1_WRITES), | ||
189 | CPUMF_EVENT_PTR(cf_z196, ITLB1_WRITES), | ||
190 | CPUMF_EVENT_PTR(cf_z196, TLB2_PTE_WRITES), | ||
191 | CPUMF_EVENT_PTR(cf_z196, TLB2_CRSTE_HPAGE_WRITES), | ||
192 | CPUMF_EVENT_PTR(cf_z196, TLB2_CRSTE_WRITES), | ||
193 | CPUMF_EVENT_PTR(cf_z196, L1D_ONCHIP_L3_SOURCED_WRITES), | ||
194 | CPUMF_EVENT_PTR(cf_z196, L1D_OFFCHIP_L3_SOURCED_WRITES), | ||
195 | CPUMF_EVENT_PTR(cf_z196, L1I_ONCHIP_L3_SOURCED_WRITES), | ||
196 | CPUMF_EVENT_PTR(cf_z196, L1I_OFFCHIP_L3_SOURCED_WRITES), | ||
197 | NULL, | ||
198 | }; | ||
199 | |||
200 | static struct attribute *cpumcf_zec12_pmu_event_attr[] __initdata = { | ||
201 | CPUMF_EVENT_PTR(cf_zec12, DTLB1_MISSES), | ||
202 | CPUMF_EVENT_PTR(cf_zec12, ITLB1_MISSES), | ||
203 | CPUMF_EVENT_PTR(cf_zec12, L1D_L2I_SOURCED_WRITES), | ||
204 | CPUMF_EVENT_PTR(cf_zec12, L1I_L2I_SOURCED_WRITES), | ||
205 | CPUMF_EVENT_PTR(cf_zec12, L1D_L2D_SOURCED_WRITES), | ||
206 | CPUMF_EVENT_PTR(cf_zec12, DTLB1_WRITES), | ||
207 | CPUMF_EVENT_PTR(cf_zec12, L1D_LMEM_SOURCED_WRITES), | ||
208 | CPUMF_EVENT_PTR(cf_zec12, L1I_LMEM_SOURCED_WRITES), | ||
209 | CPUMF_EVENT_PTR(cf_zec12, L1D_RO_EXCL_WRITES), | ||
210 | CPUMF_EVENT_PTR(cf_zec12, DTLB1_HPAGE_WRITES), | ||
211 | CPUMF_EVENT_PTR(cf_zec12, ITLB1_WRITES), | ||
212 | CPUMF_EVENT_PTR(cf_zec12, TLB2_PTE_WRITES), | ||
213 | CPUMF_EVENT_PTR(cf_zec12, TLB2_CRSTE_HPAGE_WRITES), | ||
214 | CPUMF_EVENT_PTR(cf_zec12, TLB2_CRSTE_WRITES), | ||
215 | CPUMF_EVENT_PTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES), | ||
216 | CPUMF_EVENT_PTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES), | ||
217 | CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES), | ||
218 | CPUMF_EVENT_PTR(cf_zec12, L1D_ONBOOK_L4_SOURCED_WRITES), | ||
219 | CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L4_SOURCED_WRITES), | ||
220 | CPUMF_EVENT_PTR(cf_zec12, TX_NC_TEND), | ||
221 | CPUMF_EVENT_PTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES_IV), | ||
222 | CPUMF_EVENT_PTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES_IV), | ||
223 | CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES_IV), | ||
224 | CPUMF_EVENT_PTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES), | ||
225 | CPUMF_EVENT_PTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES), | ||
226 | CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES), | ||
227 | CPUMF_EVENT_PTR(cf_zec12, L1I_ONBOOK_L4_SOURCED_WRITES), | ||
228 | CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L4_SOURCED_WRITES), | ||
229 | CPUMF_EVENT_PTR(cf_zec12, TX_C_TEND), | ||
230 | CPUMF_EVENT_PTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES_IV), | ||
231 | CPUMF_EVENT_PTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES_IV), | ||
232 | CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV), | ||
233 | CPUMF_EVENT_PTR(cf_zec12, TX_NC_TABORT), | ||
234 | CPUMF_EVENT_PTR(cf_zec12, TX_C_TABORT_NO_SPECIAL), | ||
235 | CPUMF_EVENT_PTR(cf_zec12, TX_C_TABORT_SPECIAL), | ||
236 | NULL, | ||
237 | }; | ||
238 | |||
239 | /* END: CPUM_CF COUNTER DEFINITIONS ===================================== */ | ||
240 | |||
241 | static struct attribute_group cpumsf_pmu_events_group = { | ||
242 | .name = "events", | ||
243 | .attrs = cpumcf_pmu_event_attr, | ||
244 | }; | ||
245 | |||
246 | PMU_FORMAT_ATTR(event, "config:0-63"); | ||
247 | |||
248 | static struct attribute *cpumsf_pmu_format_attr[] = { | ||
249 | &format_attr_event.attr, | ||
250 | NULL, | ||
251 | }; | ||
252 | |||
253 | static struct attribute_group cpumsf_pmu_format_group = { | ||
254 | .name = "format", | ||
255 | .attrs = cpumsf_pmu_format_attr, | ||
256 | }; | ||
257 | |||
258 | static const struct attribute_group *cpumsf_pmu_attr_groups[] = { | ||
259 | &cpumsf_pmu_events_group, | ||
260 | &cpumsf_pmu_format_group, | ||
261 | NULL, | ||
262 | }; | ||
263 | |||
264 | |||
265 | static __init struct attribute **merge_attr(struct attribute **a, | ||
266 | struct attribute **b) | ||
267 | { | ||
268 | struct attribute **new; | ||
269 | int j, i; | ||
270 | |||
271 | for (j = 0; a[j]; j++) | ||
272 | ; | ||
273 | for (i = 0; b[i]; i++) | ||
274 | j++; | ||
275 | j++; | ||
276 | |||
277 | new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL); | ||
278 | if (!new) | ||
279 | return NULL; | ||
280 | j = 0; | ||
281 | for (i = 0; a[i]; i++) | ||
282 | new[j++] = a[i]; | ||
283 | for (i = 0; b[i]; i++) | ||
284 | new[j++] = b[i]; | ||
285 | new[j] = NULL; | ||
286 | |||
287 | return new; | ||
288 | } | ||
289 | |||
290 | __init const struct attribute_group **cpumf_cf_event_group(void) | ||
291 | { | ||
292 | struct attribute **combined, **model; | ||
293 | struct cpuid cpu_id; | ||
294 | |||
295 | get_cpu_id(&cpu_id); | ||
296 | switch (cpu_id.machine) { | ||
297 | case 0x2097: | ||
298 | case 0x2098: | ||
299 | model = cpumcf_z10_pmu_event_attr; | ||
300 | break; | ||
301 | case 0x2817: | ||
302 | case 0x2818: | ||
303 | model = cpumcf_z196_pmu_event_attr; | ||
304 | break; | ||
305 | case 0x2827: | ||
306 | case 0x2828: | ||
307 | model = cpumcf_zec12_pmu_event_attr; | ||
308 | break; | ||
309 | default: | ||
310 | model = NULL; | ||
311 | break; | ||
312 | }; | ||
313 | |||
314 | if (!model) | ||
315 | goto out; | ||
316 | |||
317 | combined = merge_attr(cpumcf_pmu_event_attr, model); | ||
318 | if (combined) | ||
319 | cpumsf_pmu_events_group.attrs = combined; | ||
320 | out: | ||
321 | return cpumsf_pmu_attr_groups; | ||
322 | } | ||
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c new file mode 100644 index 000000000000..6c0d29827cb6 --- /dev/null +++ b/arch/s390/kernel/perf_cpum_sf.c | |||
@@ -0,0 +1,1641 @@ | |||
1 | /* | ||
2 | * Performance event support for the System z CPU-measurement Sampling Facility | ||
3 | * | ||
4 | * Copyright IBM Corp. 2013 | ||
5 | * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License (version 2 only) | ||
9 | * as published by the Free Software Foundation. | ||
10 | */ | ||
11 | #define KMSG_COMPONENT "cpum_sf" | ||
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/kernel_stat.h> | ||
16 | #include <linux/perf_event.h> | ||
17 | #include <linux/percpu.h> | ||
18 | #include <linux/notifier.h> | ||
19 | #include <linux/export.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/moduleparam.h> | ||
23 | #include <asm/cpu_mf.h> | ||
24 | #include <asm/irq.h> | ||
25 | #include <asm/debug.h> | ||
26 | #include <asm/timex.h> | ||
27 | |||
28 | /* Minimum number of sample-data-block-tables: | ||
29 | * At least one table is required for the sampling buffer structure. | ||
30 | * A single table contains up to 511 pointers to sample-data-blocks. | ||
31 | */ | ||
32 | #define CPUM_SF_MIN_SDBT 1 | ||
33 | |||
34 | /* Number of sample-data-blocks per sample-data-block-table (SDBT): | ||
35 | * A table contains SDB pointers (8 bytes) and one table-link entry | ||
36 | * that points to the origin of the next SDBT. | ||
37 | */ | ||
38 | #define CPUM_SF_SDB_PER_TABLE ((PAGE_SIZE - 8) / 8) | ||
39 | |||
40 | /* Maximum page offset for an SDBT table-link entry: | ||
41 | * If this page offset is reached, a table-link entry to the next SDBT | ||
42 | * must be added. | ||
43 | */ | ||
44 | #define CPUM_SF_SDBT_TL_OFFSET (CPUM_SF_SDB_PER_TABLE * 8) | ||
45 | static inline int require_table_link(const void *sdbt) | ||
46 | { | ||
47 | return ((unsigned long) sdbt & ~PAGE_MASK) == CPUM_SF_SDBT_TL_OFFSET; | ||
48 | } | ||
49 | |||
50 | /* Minimum and maximum sampling buffer sizes: | ||
51 | * | ||
52 | * This number represents the maximum size of the sampling buffer taking | ||
53 | * the number of sample-data-block-tables into account. Note that these | ||
54 | * numbers apply to the basic-sampling function only. | ||
55 | * The maximum number of SDBs is increased by CPUM_SF_SDB_DIAG_FACTOR if | ||
56 | * the diagnostic-sampling function is active. | ||
57 | * | ||
58 | * Sampling buffer size Buffer characteristics | ||
59 | * --------------------------------------------------- | ||
60 | * 64KB == 16 pages (4KB per page) | ||
61 | * 1 page for SDB-tables | ||
62 | * 15 pages for SDBs | ||
63 | * | ||
64 | * 32MB == 8192 pages (4KB per page) | ||
65 | * 16 pages for SDB-tables | ||
66 | * 8176 pages for SDBs | ||
67 | */ | ||
68 | static unsigned long __read_mostly CPUM_SF_MIN_SDB = 15; | ||
69 | static unsigned long __read_mostly CPUM_SF_MAX_SDB = 8176; | ||
70 | static unsigned long __read_mostly CPUM_SF_SDB_DIAG_FACTOR = 1; | ||
71 | |||
72 | struct sf_buffer { | ||
73 | unsigned long *sdbt; /* Sample-data-block-table origin */ | ||
74 | /* buffer characteristics (required for buffer increments) */ | ||
75 | unsigned long num_sdb; /* Number of sample-data-blocks */ | ||
76 | unsigned long num_sdbt; /* Number of sample-data-block-tables */ | ||
77 | unsigned long *tail; /* last sample-data-block-table */ | ||
78 | }; | ||
79 | |||
80 | struct cpu_hw_sf { | ||
81 | /* CPU-measurement sampling information block */ | ||
82 | struct hws_qsi_info_block qsi; | ||
83 | /* CPU-measurement sampling control block */ | ||
84 | struct hws_lsctl_request_block lsctl; | ||
85 | struct sf_buffer sfb; /* Sampling buffer */ | ||
86 | unsigned int flags; /* Status flags */ | ||
87 | struct perf_event *event; /* Scheduled perf event */ | ||
88 | }; | ||
89 | static DEFINE_PER_CPU(struct cpu_hw_sf, cpu_hw_sf); | ||
90 | |||
91 | /* Debug feature */ | ||
92 | static debug_info_t *sfdbg; | ||
93 | |||
94 | /* | ||
95 | * sf_disable() - Switch off sampling facility | ||
96 | */ | ||
97 | static int sf_disable(void) | ||
98 | { | ||
99 | struct hws_lsctl_request_block sreq; | ||
100 | |||
101 | memset(&sreq, 0, sizeof(sreq)); | ||
102 | return lsctl(&sreq); | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * sf_buffer_available() - Check for an allocated sampling buffer | ||
107 | */ | ||
108 | static int sf_buffer_available(struct cpu_hw_sf *cpuhw) | ||
109 | { | ||
110 | return !!cpuhw->sfb.sdbt; | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * deallocate sampling facility buffer | ||
115 | */ | ||
116 | static void free_sampling_buffer(struct sf_buffer *sfb) | ||
117 | { | ||
118 | unsigned long *sdbt, *curr; | ||
119 | |||
120 | if (!sfb->sdbt) | ||
121 | return; | ||
122 | |||
123 | sdbt = sfb->sdbt; | ||
124 | curr = sdbt; | ||
125 | |||
126 | /* Free the SDBT after all SDBs are processed... */ | ||
127 | while (1) { | ||
128 | if (!*curr || !sdbt) | ||
129 | break; | ||
130 | |||
131 | /* Process table-link entries */ | ||
132 | if (is_link_entry(curr)) { | ||
133 | curr = get_next_sdbt(curr); | ||
134 | if (sdbt) | ||
135 | free_page((unsigned long) sdbt); | ||
136 | |||
137 | /* If the origin is reached, sampling buffer is freed */ | ||
138 | if (curr == sfb->sdbt) | ||
139 | break; | ||
140 | else | ||
141 | sdbt = curr; | ||
142 | } else { | ||
143 | /* Process SDB pointer */ | ||
144 | if (*curr) { | ||
145 | free_page(*curr); | ||
146 | curr++; | ||
147 | } | ||
148 | } | ||
149 | } | ||
150 | |||
151 | debug_sprintf_event(sfdbg, 5, | ||
152 | "free_sampling_buffer: freed sdbt=%p\n", sfb->sdbt); | ||
153 | memset(sfb, 0, sizeof(*sfb)); | ||
154 | } | ||
155 | |||
156 | static int alloc_sample_data_block(unsigned long *sdbt, gfp_t gfp_flags) | ||
157 | { | ||
158 | unsigned long sdb, *trailer; | ||
159 | |||
160 | /* Allocate and initialize sample-data-block */ | ||
161 | sdb = get_zeroed_page(gfp_flags); | ||
162 | if (!sdb) | ||
163 | return -ENOMEM; | ||
164 | trailer = trailer_entry_ptr(sdb); | ||
165 | *trailer = SDB_TE_ALERT_REQ_MASK; | ||
166 | |||
167 | /* Link SDB into the sample-data-block-table */ | ||
168 | *sdbt = sdb; | ||
169 | |||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | /* | ||
174 | * realloc_sampling_buffer() - extend sampler memory | ||
175 | * | ||
176 | * Allocates new sample-data-blocks and adds them to the specified sampling | ||
177 | * buffer memory. | ||
178 | * | ||
179 | * Important: This modifies the sampling buffer and must be called when the | ||
180 | * sampling facility is disabled. | ||
181 | * | ||
182 | * Returns zero on success, non-zero otherwise. | ||
183 | */ | ||
184 | static int realloc_sampling_buffer(struct sf_buffer *sfb, | ||
185 | unsigned long num_sdb, gfp_t gfp_flags) | ||
186 | { | ||
187 | int i, rc; | ||
188 | unsigned long *new, *tail; | ||
189 | |||
190 | if (!sfb->sdbt || !sfb->tail) | ||
191 | return -EINVAL; | ||
192 | |||
193 | if (!is_link_entry(sfb->tail)) | ||
194 | return -EINVAL; | ||
195 | |||
196 | /* Append to the existing sampling buffer, overwriting the table-link | ||
197 | * register. | ||
198 | * The tail variables always points to the "tail" (last and table-link) | ||
199 | * entry in an SDB-table. | ||
200 | */ | ||
201 | tail = sfb->tail; | ||
202 | |||
203 | /* Do a sanity check whether the table-link entry points to | ||
204 | * the sampling buffer origin. | ||
205 | */ | ||
206 | if (sfb->sdbt != get_next_sdbt(tail)) { | ||
207 | debug_sprintf_event(sfdbg, 3, "realloc_sampling_buffer: " | ||
208 | "sampling buffer is not linked: origin=%p" | ||
209 | "tail=%p\n", | ||
210 | (void *) sfb->sdbt, (void *) tail); | ||
211 | return -EINVAL; | ||
212 | } | ||
213 | |||
214 | /* Allocate remaining SDBs */ | ||
215 | rc = 0; | ||
216 | for (i = 0; i < num_sdb; i++) { | ||
217 | /* Allocate a new SDB-table if it is full. */ | ||
218 | if (require_table_link(tail)) { | ||
219 | new = (unsigned long *) get_zeroed_page(gfp_flags); | ||
220 | if (!new) { | ||
221 | rc = -ENOMEM; | ||
222 | break; | ||
223 | } | ||
224 | sfb->num_sdbt++; | ||
225 | /* Link current page to tail of chain */ | ||
226 | *tail = (unsigned long)(void *) new + 1; | ||
227 | tail = new; | ||
228 | } | ||
229 | |||
230 | /* Allocate a new sample-data-block. | ||
231 | * If there is not enough memory, stop the realloc process | ||
232 | * and simply use what was allocated. If this is a temporary | ||
233 | * issue, a new realloc call (if required) might succeed. | ||
234 | */ | ||
235 | rc = alloc_sample_data_block(tail, gfp_flags); | ||
236 | if (rc) | ||
237 | break; | ||
238 | sfb->num_sdb++; | ||
239 | tail++; | ||
240 | } | ||
241 | |||
242 | /* Link sampling buffer to its origin */ | ||
243 | *tail = (unsigned long) sfb->sdbt + 1; | ||
244 | sfb->tail = tail; | ||
245 | |||
246 | debug_sprintf_event(sfdbg, 4, "realloc_sampling_buffer: new buffer" | ||
247 | " settings: sdbt=%lu sdb=%lu\n", | ||
248 | sfb->num_sdbt, sfb->num_sdb); | ||
249 | return rc; | ||
250 | } | ||
251 | |||
252 | /* | ||
253 | * allocate_sampling_buffer() - allocate sampler memory | ||
254 | * | ||
255 | * Allocates and initializes a sampling buffer structure using the | ||
256 | * specified number of sample-data-blocks (SDB). For each allocation, | ||
257 | * a 4K page is used. The number of sample-data-block-tables (SDBT) | ||
258 | * are calculated from SDBs. | ||
259 | * Also set the ALERT_REQ mask in each SDBs trailer. | ||
260 | * | ||
261 | * Returns zero on success, non-zero otherwise. | ||
262 | */ | ||
263 | static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb) | ||
264 | { | ||
265 | int rc; | ||
266 | |||
267 | if (sfb->sdbt) | ||
268 | return -EINVAL; | ||
269 | |||
270 | /* Allocate the sample-data-block-table origin */ | ||
271 | sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL); | ||
272 | if (!sfb->sdbt) | ||
273 | return -ENOMEM; | ||
274 | sfb->num_sdb = 0; | ||
275 | sfb->num_sdbt = 1; | ||
276 | |||
277 | /* Link the table origin to point to itself to prepare for | ||
278 | * realloc_sampling_buffer() invocation. | ||
279 | */ | ||
280 | sfb->tail = sfb->sdbt; | ||
281 | *sfb->tail = (unsigned long)(void *) sfb->sdbt + 1; | ||
282 | |||
283 | /* Allocate requested number of sample-data-blocks */ | ||
284 | rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL); | ||
285 | if (rc) { | ||
286 | free_sampling_buffer(sfb); | ||
287 | debug_sprintf_event(sfdbg, 4, "alloc_sampling_buffer: " | ||
288 | "realloc_sampling_buffer failed with rc=%i\n", rc); | ||
289 | } else | ||
290 | debug_sprintf_event(sfdbg, 4, | ||
291 | "alloc_sampling_buffer: tear=%p dear=%p\n", | ||
292 | sfb->sdbt, (void *) *sfb->sdbt); | ||
293 | return rc; | ||
294 | } | ||
295 | |||
296 | static void sfb_set_limits(unsigned long min, unsigned long max) | ||
297 | { | ||
298 | struct hws_qsi_info_block si; | ||
299 | |||
300 | CPUM_SF_MIN_SDB = min; | ||
301 | CPUM_SF_MAX_SDB = max; | ||
302 | |||
303 | memset(&si, 0, sizeof(si)); | ||
304 | if (!qsi(&si)) | ||
305 | CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes); | ||
306 | } | ||
307 | |||
308 | static unsigned long sfb_max_limit(struct hw_perf_event *hwc) | ||
309 | { | ||
310 | return SAMPL_DIAG_MODE(hwc) ? CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR | ||
311 | : CPUM_SF_MAX_SDB; | ||
312 | } | ||
313 | |||
314 | static unsigned long sfb_pending_allocs(struct sf_buffer *sfb, | ||
315 | struct hw_perf_event *hwc) | ||
316 | { | ||
317 | if (!sfb->sdbt) | ||
318 | return SFB_ALLOC_REG(hwc); | ||
319 | if (SFB_ALLOC_REG(hwc) > sfb->num_sdb) | ||
320 | return SFB_ALLOC_REG(hwc) - sfb->num_sdb; | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | static int sfb_has_pending_allocs(struct sf_buffer *sfb, | ||
325 | struct hw_perf_event *hwc) | ||
326 | { | ||
327 | return sfb_pending_allocs(sfb, hwc) > 0; | ||
328 | } | ||
329 | |||
330 | static void sfb_account_allocs(unsigned long num, struct hw_perf_event *hwc) | ||
331 | { | ||
332 | /* Limit the number of SDBs to not exceed the maximum */ | ||
333 | num = min_t(unsigned long, num, sfb_max_limit(hwc) - SFB_ALLOC_REG(hwc)); | ||
334 | if (num) | ||
335 | SFB_ALLOC_REG(hwc) += num; | ||
336 | } | ||
337 | |||
338 | static void sfb_init_allocs(unsigned long num, struct hw_perf_event *hwc) | ||
339 | { | ||
340 | SFB_ALLOC_REG(hwc) = 0; | ||
341 | sfb_account_allocs(num, hwc); | ||
342 | } | ||
343 | |||
344 | static size_t event_sample_size(struct hw_perf_event *hwc) | ||
345 | { | ||
346 | struct sf_raw_sample *sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(hwc); | ||
347 | size_t sample_size; | ||
348 | |||
349 | /* The sample size depends on the sampling function: The basic-sampling | ||
350 | * function must be always enabled, diagnostic-sampling function is | ||
351 | * optional. | ||
352 | */ | ||
353 | sample_size = sfr->bsdes; | ||
354 | if (SAMPL_DIAG_MODE(hwc)) | ||
355 | sample_size += sfr->dsdes; | ||
356 | |||
357 | return sample_size; | ||
358 | } | ||
359 | |||
360 | static void deallocate_buffers(struct cpu_hw_sf *cpuhw) | ||
361 | { | ||
362 | if (cpuhw->sfb.sdbt) | ||
363 | free_sampling_buffer(&cpuhw->sfb); | ||
364 | } | ||
365 | |||
366 | static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc) | ||
367 | { | ||
368 | unsigned long n_sdb, freq, factor; | ||
369 | size_t sfr_size, sample_size; | ||
370 | struct sf_raw_sample *sfr; | ||
371 | |||
372 | /* Allocate raw sample buffer | ||
373 | * | ||
374 | * The raw sample buffer is used to temporarily store sampling data | ||
375 | * entries for perf raw sample processing. The buffer size mainly | ||
376 | * depends on the size of diagnostic-sampling data entries which is | ||
377 | * machine-specific. The exact size calculation includes: | ||
378 | * 1. The first 4 bytes of diagnostic-sampling data entries are | ||
379 | * already reflected in the sf_raw_sample structure. Subtract | ||
380 | * these bytes. | ||
381 | * 2. The perf raw sample data must be 8-byte aligned (u64) and | ||
382 | * perf's internal data size must be considered too. So add | ||
383 | * an additional u32 for correct alignment and subtract before | ||
384 | * allocating the buffer. | ||
385 | * 3. Store the raw sample buffer pointer in the perf event | ||
386 | * hardware structure. | ||
387 | */ | ||
388 | sfr_size = ALIGN((sizeof(*sfr) - sizeof(sfr->diag) + cpuhw->qsi.dsdes) + | ||
389 | sizeof(u32), sizeof(u64)); | ||
390 | sfr_size -= sizeof(u32); | ||
391 | sfr = kzalloc(sfr_size, GFP_KERNEL); | ||
392 | if (!sfr) | ||
393 | return -ENOMEM; | ||
394 | sfr->size = sfr_size; | ||
395 | sfr->bsdes = cpuhw->qsi.bsdes; | ||
396 | sfr->dsdes = cpuhw->qsi.dsdes; | ||
397 | RAWSAMPLE_REG(hwc) = (unsigned long) sfr; | ||
398 | |||
399 | /* Calculate sampling buffers using 4K pages | ||
400 | * | ||
401 | * 1. Determine the sample data size which depends on the used | ||
402 | * sampling functions, for example, basic-sampling or | ||
403 | * basic-sampling with diagnostic-sampling. | ||
404 | * | ||
405 | * 2. Use the sampling frequency as input. The sampling buffer is | ||
406 | * designed for almost one second. This can be adjusted through | ||
407 | * the "factor" variable. | ||
408 | * In any case, alloc_sampling_buffer() sets the Alert Request | ||
409 | * Control indicator to trigger a measurement-alert to harvest | ||
410 | * sample-data-blocks (sdb). | ||
411 | * | ||
412 | * 3. Compute the number of sample-data-blocks and ensure a minimum | ||
413 | * of CPUM_SF_MIN_SDB. Also ensure the upper limit does not | ||
414 | * exceed a "calculated" maximum. The symbolic maximum is | ||
415 | * designed for basic-sampling only and needs to be increased if | ||
416 | * diagnostic-sampling is active. | ||
417 | * See also the remarks for these symbolic constants. | ||
418 | * | ||
419 | * 4. Compute the number of sample-data-block-tables (SDBT) and | ||
420 | * ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up | ||
421 | * to 511 SDBs). | ||
422 | */ | ||
423 | sample_size = event_sample_size(hwc); | ||
424 | freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc)); | ||
425 | factor = 1; | ||
426 | n_sdb = DIV_ROUND_UP(freq, factor * ((PAGE_SIZE-64) / sample_size)); | ||
427 | if (n_sdb < CPUM_SF_MIN_SDB) | ||
428 | n_sdb = CPUM_SF_MIN_SDB; | ||
429 | |||
430 | /* If there is already a sampling buffer allocated, it is very likely | ||
431 | * that the sampling facility is enabled too. If the event to be | ||
432 | * initialized requires a greater sampling buffer, the allocation must | ||
433 | * be postponed. Changing the sampling buffer requires the sampling | ||
434 | * facility to be in the disabled state. So, account the number of | ||
435 | * required SDBs and let cpumsf_pmu_enable() resize the buffer just | ||
436 | * before the event is started. | ||
437 | */ | ||
438 | sfb_init_allocs(n_sdb, hwc); | ||
439 | if (sf_buffer_available(cpuhw)) | ||
440 | return 0; | ||
441 | |||
442 | debug_sprintf_event(sfdbg, 3, | ||
443 | "allocate_buffers: rate=%lu f=%lu sdb=%lu/%lu" | ||
444 | " sample_size=%lu cpuhw=%p\n", | ||
445 | SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc), | ||
446 | sample_size, cpuhw); | ||
447 | |||
448 | return alloc_sampling_buffer(&cpuhw->sfb, | ||
449 | sfb_pending_allocs(&cpuhw->sfb, hwc)); | ||
450 | } | ||
451 | |||
452 | static unsigned long min_percent(unsigned int percent, unsigned long base, | ||
453 | unsigned long min) | ||
454 | { | ||
455 | return min_t(unsigned long, min, DIV_ROUND_UP(percent * base, 100)); | ||
456 | } | ||
457 | |||
458 | static unsigned long compute_sfb_extent(unsigned long ratio, unsigned long base) | ||
459 | { | ||
460 | /* Use a percentage-based approach to extend the sampling facility | ||
461 | * buffer. Accept up to 5% sample data loss. | ||
462 | * Vary the extents between 1% to 5% of the current number of | ||
463 | * sample-data-blocks. | ||
464 | */ | ||
465 | if (ratio <= 5) | ||
466 | return 0; | ||
467 | if (ratio <= 25) | ||
468 | return min_percent(1, base, 1); | ||
469 | if (ratio <= 50) | ||
470 | return min_percent(1, base, 1); | ||
471 | if (ratio <= 75) | ||
472 | return min_percent(2, base, 2); | ||
473 | if (ratio <= 100) | ||
474 | return min_percent(3, base, 3); | ||
475 | if (ratio <= 250) | ||
476 | return min_percent(4, base, 4); | ||
477 | |||
478 | return min_percent(5, base, 8); | ||
479 | } | ||
480 | |||
481 | static void sfb_account_overflows(struct cpu_hw_sf *cpuhw, | ||
482 | struct hw_perf_event *hwc) | ||
483 | { | ||
484 | unsigned long ratio, num; | ||
485 | |||
486 | if (!OVERFLOW_REG(hwc)) | ||
487 | return; | ||
488 | |||
489 | /* The sample_overflow contains the average number of sample data | ||
490 | * that has been lost because sample-data-blocks were full. | ||
491 | * | ||
492 | * Calculate the total number of sample data entries that has been | ||
493 | * discarded. Then calculate the ratio of lost samples to total samples | ||
494 | * per second in percent. | ||
495 | */ | ||
496 | ratio = DIV_ROUND_UP(100 * OVERFLOW_REG(hwc) * cpuhw->sfb.num_sdb, | ||
497 | sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc))); | ||
498 | |||
499 | /* Compute number of sample-data-blocks */ | ||
500 | num = compute_sfb_extent(ratio, cpuhw->sfb.num_sdb); | ||
501 | if (num) | ||
502 | sfb_account_allocs(num, hwc); | ||
503 | |||
504 | debug_sprintf_event(sfdbg, 5, "sfb: overflow: overflow=%llu ratio=%lu" | ||
505 | " num=%lu\n", OVERFLOW_REG(hwc), ratio, num); | ||
506 | OVERFLOW_REG(hwc) = 0; | ||
507 | } | ||
508 | |||
509 | /* extend_sampling_buffer() - Extend sampling buffer | ||
510 | * @sfb: Sampling buffer structure (for local CPU) | ||
511 | * @hwc: Perf event hardware structure | ||
512 | * | ||
513 | * Use this function to extend the sampling buffer based on the overflow counter | ||
514 | * and postponed allocation extents stored in the specified Perf event hardware. | ||
515 | * | ||
516 | * Important: This function disables the sampling facility in order to safely | ||
517 | * change the sampling buffer structure. Do not call this function | ||
518 | * when the PMU is active. | ||
519 | */ | ||
520 | static void extend_sampling_buffer(struct sf_buffer *sfb, | ||
521 | struct hw_perf_event *hwc) | ||
522 | { | ||
523 | unsigned long num, num_old; | ||
524 | int rc; | ||
525 | |||
526 | num = sfb_pending_allocs(sfb, hwc); | ||
527 | if (!num) | ||
528 | return; | ||
529 | num_old = sfb->num_sdb; | ||
530 | |||
531 | /* Disable the sampling facility to reset any states and also | ||
532 | * clear pending measurement alerts. | ||
533 | */ | ||
534 | sf_disable(); | ||
535 | |||
536 | /* Extend the sampling buffer. | ||
537 | * This memory allocation typically happens in an atomic context when | ||
538 | * called by perf. Because this is a reallocation, it is fine if the | ||
539 | * new SDB-request cannot be satisfied immediately. | ||
540 | */ | ||
541 | rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC); | ||
542 | if (rc) | ||
543 | debug_sprintf_event(sfdbg, 5, "sfb: extend: realloc " | ||
544 | "failed with rc=%i\n", rc); | ||
545 | |||
546 | if (sfb_has_pending_allocs(sfb, hwc)) | ||
547 | debug_sprintf_event(sfdbg, 5, "sfb: extend: " | ||
548 | "req=%lu alloc=%lu remaining=%lu\n", | ||
549 | num, sfb->num_sdb - num_old, | ||
550 | sfb_pending_allocs(sfb, hwc)); | ||
551 | } | ||
552 | |||
553 | |||
554 | /* Number of perf events counting hardware events */ | ||
555 | static atomic_t num_events; | ||
556 | /* Used to avoid races in calling reserve/release_cpumf_hardware */ | ||
557 | static DEFINE_MUTEX(pmc_reserve_mutex); | ||
558 | |||
559 | #define PMC_INIT 0 | ||
560 | #define PMC_RELEASE 1 | ||
561 | #define PMC_FAILURE 2 | ||
562 | static void setup_pmc_cpu(void *flags) | ||
563 | { | ||
564 | int err; | ||
565 | struct cpu_hw_sf *cpusf = &__get_cpu_var(cpu_hw_sf); | ||
566 | |||
567 | err = 0; | ||
568 | switch (*((int *) flags)) { | ||
569 | case PMC_INIT: | ||
570 | memset(cpusf, 0, sizeof(*cpusf)); | ||
571 | err = qsi(&cpusf->qsi); | ||
572 | if (err) | ||
573 | break; | ||
574 | cpusf->flags |= PMU_F_RESERVED; | ||
575 | err = sf_disable(); | ||
576 | if (err) | ||
577 | pr_err("Switching off the sampling facility failed " | ||
578 | "with rc=%i\n", err); | ||
579 | debug_sprintf_event(sfdbg, 5, | ||
580 | "setup_pmc_cpu: initialized: cpuhw=%p\n", cpusf); | ||
581 | break; | ||
582 | case PMC_RELEASE: | ||
583 | cpusf->flags &= ~PMU_F_RESERVED; | ||
584 | err = sf_disable(); | ||
585 | if (err) { | ||
586 | pr_err("Switching off the sampling facility failed " | ||
587 | "with rc=%i\n", err); | ||
588 | } else | ||
589 | deallocate_buffers(cpusf); | ||
590 | debug_sprintf_event(sfdbg, 5, | ||
591 | "setup_pmc_cpu: released: cpuhw=%p\n", cpusf); | ||
592 | break; | ||
593 | } | ||
594 | if (err) | ||
595 | *((int *) flags) |= PMC_FAILURE; | ||
596 | } | ||
597 | |||
598 | static void release_pmc_hardware(void) | ||
599 | { | ||
600 | int flags = PMC_RELEASE; | ||
601 | |||
602 | irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); | ||
603 | on_each_cpu(setup_pmc_cpu, &flags, 1); | ||
604 | perf_release_sampling(); | ||
605 | } | ||
606 | |||
607 | static int reserve_pmc_hardware(void) | ||
608 | { | ||
609 | int flags = PMC_INIT; | ||
610 | int err; | ||
611 | |||
612 | err = perf_reserve_sampling(); | ||
613 | if (err) | ||
614 | return err; | ||
615 | on_each_cpu(setup_pmc_cpu, &flags, 1); | ||
616 | if (flags & PMC_FAILURE) { | ||
617 | release_pmc_hardware(); | ||
618 | return -ENODEV; | ||
619 | } | ||
620 | irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); | ||
621 | |||
622 | return 0; | ||
623 | } | ||
624 | |||
625 | static void hw_perf_event_destroy(struct perf_event *event) | ||
626 | { | ||
627 | /* Free raw sample buffer */ | ||
628 | if (RAWSAMPLE_REG(&event->hw)) | ||
629 | kfree((void *) RAWSAMPLE_REG(&event->hw)); | ||
630 | |||
631 | /* Release PMC if this is the last perf event */ | ||
632 | if (!atomic_add_unless(&num_events, -1, 1)) { | ||
633 | mutex_lock(&pmc_reserve_mutex); | ||
634 | if (atomic_dec_return(&num_events) == 0) | ||
635 | release_pmc_hardware(); | ||
636 | mutex_unlock(&pmc_reserve_mutex); | ||
637 | } | ||
638 | } | ||
639 | |||
640 | static void hw_init_period(struct hw_perf_event *hwc, u64 period) | ||
641 | { | ||
642 | hwc->sample_period = period; | ||
643 | hwc->last_period = hwc->sample_period; | ||
644 | local64_set(&hwc->period_left, hwc->sample_period); | ||
645 | } | ||
646 | |||
647 | static void hw_reset_registers(struct hw_perf_event *hwc, | ||
648 | unsigned long *sdbt_origin) | ||
649 | { | ||
650 | struct sf_raw_sample *sfr; | ||
651 | |||
652 | /* (Re)set to first sample-data-block-table */ | ||
653 | TEAR_REG(hwc) = (unsigned long) sdbt_origin; | ||
654 | |||
655 | /* (Re)set raw sampling buffer register */ | ||
656 | sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(hwc); | ||
657 | memset(&sfr->basic, 0, sizeof(sfr->basic)); | ||
658 | memset(&sfr->diag, 0, sfr->dsdes); | ||
659 | } | ||
660 | |||
661 | static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si, | ||
662 | unsigned long rate) | ||
663 | { | ||
664 | return clamp_t(unsigned long, rate, | ||
665 | si->min_sampl_rate, si->max_sampl_rate); | ||
666 | } | ||
667 | |||
668 | static int __hw_perf_event_init(struct perf_event *event) | ||
669 | { | ||
670 | struct cpu_hw_sf *cpuhw; | ||
671 | struct hws_qsi_info_block si; | ||
672 | struct perf_event_attr *attr = &event->attr; | ||
673 | struct hw_perf_event *hwc = &event->hw; | ||
674 | unsigned long rate; | ||
675 | int cpu, err; | ||
676 | |||
677 | /* Reserve CPU-measurement sampling facility */ | ||
678 | err = 0; | ||
679 | if (!atomic_inc_not_zero(&num_events)) { | ||
680 | mutex_lock(&pmc_reserve_mutex); | ||
681 | if (atomic_read(&num_events) == 0 && reserve_pmc_hardware()) | ||
682 | err = -EBUSY; | ||
683 | else | ||
684 | atomic_inc(&num_events); | ||
685 | mutex_unlock(&pmc_reserve_mutex); | ||
686 | } | ||
687 | event->destroy = hw_perf_event_destroy; | ||
688 | |||
689 | if (err) | ||
690 | goto out; | ||
691 | |||
692 | /* Access per-CPU sampling information (query sampling info) */ | ||
693 | /* | ||
694 | * The event->cpu value can be -1 to count on every CPU, for example, | ||
695 | * when attaching to a task. If this is specified, use the query | ||
696 | * sampling info from the current CPU, otherwise use event->cpu to | ||
697 | * retrieve the per-CPU information. | ||
698 | * Later, cpuhw indicates whether to allocate sampling buffers for a | ||
699 | * particular CPU (cpuhw!=NULL) or each online CPU (cpuw==NULL). | ||
700 | */ | ||
701 | memset(&si, 0, sizeof(si)); | ||
702 | cpuhw = NULL; | ||
703 | if (event->cpu == -1) | ||
704 | qsi(&si); | ||
705 | else { | ||
706 | /* Event is pinned to a particular CPU, retrieve the per-CPU | ||
707 | * sampling structure for accessing the CPU-specific QSI. | ||
708 | */ | ||
709 | cpuhw = &per_cpu(cpu_hw_sf, event->cpu); | ||
710 | si = cpuhw->qsi; | ||
711 | } | ||
712 | |||
713 | /* Check sampling facility authorization and, if not authorized, | ||
714 | * fall back to other PMUs. It is safe to check any CPU because | ||
715 | * the authorization is identical for all configured CPUs. | ||
716 | */ | ||
717 | if (!si.as) { | ||
718 | err = -ENOENT; | ||
719 | goto out; | ||
720 | } | ||
721 | |||
722 | /* Always enable basic sampling */ | ||
723 | SAMPL_FLAGS(hwc) = PERF_CPUM_SF_BASIC_MODE; | ||
724 | |||
725 | /* Check if diagnostic sampling is requested. Deny if the required | ||
726 | * sampling authorization is missing. | ||
727 | */ | ||
728 | if (attr->config == PERF_EVENT_CPUM_SF_DIAG) { | ||
729 | if (!si.ad) { | ||
730 | err = -EPERM; | ||
731 | goto out; | ||
732 | } | ||
733 | SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_DIAG_MODE; | ||
734 | } | ||
735 | |||
736 | /* Check and set other sampling flags */ | ||
737 | if (attr->config1 & PERF_CPUM_SF_FULL_BLOCKS) | ||
738 | SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FULL_BLOCKS; | ||
739 | |||
740 | /* The sampling information (si) contains information about the | ||
741 | * min/max sampling intervals and the CPU speed. So calculate the | ||
742 | * correct sampling interval and avoid the whole period adjust | ||
743 | * feedback loop. | ||
744 | */ | ||
745 | rate = 0; | ||
746 | if (attr->freq) { | ||
747 | rate = freq_to_sample_rate(&si, attr->sample_freq); | ||
748 | rate = hw_limit_rate(&si, rate); | ||
749 | attr->freq = 0; | ||
750 | attr->sample_period = rate; | ||
751 | } else { | ||
752 | /* The min/max sampling rates specifies the valid range | ||
753 | * of sample periods. If the specified sample period is | ||
754 | * out of range, limit the period to the range boundary. | ||
755 | */ | ||
756 | rate = hw_limit_rate(&si, hwc->sample_period); | ||
757 | |||
758 | /* The perf core maintains a maximum sample rate that is | ||
759 | * configurable through the sysctl interface. Ensure the | ||
760 | * sampling rate does not exceed this value. This also helps | ||
761 | * to avoid throttling when pushing samples with | ||
762 | * perf_event_overflow(). | ||
763 | */ | ||
764 | if (sample_rate_to_freq(&si, rate) > | ||
765 | sysctl_perf_event_sample_rate) { | ||
766 | err = -EINVAL; | ||
767 | debug_sprintf_event(sfdbg, 1, "Sampling rate exceeds maximum perf sample rate\n"); | ||
768 | goto out; | ||
769 | } | ||
770 | } | ||
771 | SAMPL_RATE(hwc) = rate; | ||
772 | hw_init_period(hwc, SAMPL_RATE(hwc)); | ||
773 | |||
774 | /* Initialize sample data overflow accounting */ | ||
775 | hwc->extra_reg.reg = REG_OVERFLOW; | ||
776 | OVERFLOW_REG(hwc) = 0; | ||
777 | |||
778 | /* Allocate the per-CPU sampling buffer using the CPU information | ||
779 | * from the event. If the event is not pinned to a particular | ||
780 | * CPU (event->cpu == -1; or cpuhw == NULL), allocate sampling | ||
781 | * buffers for each online CPU. | ||
782 | */ | ||
783 | if (cpuhw) | ||
784 | /* Event is pinned to a particular CPU */ | ||
785 | err = allocate_buffers(cpuhw, hwc); | ||
786 | else { | ||
787 | /* Event is not pinned, allocate sampling buffer on | ||
788 | * each online CPU | ||
789 | */ | ||
790 | for_each_online_cpu(cpu) { | ||
791 | cpuhw = &per_cpu(cpu_hw_sf, cpu); | ||
792 | err = allocate_buffers(cpuhw, hwc); | ||
793 | if (err) | ||
794 | break; | ||
795 | } | ||
796 | } | ||
797 | out: | ||
798 | return err; | ||
799 | } | ||
800 | |||
801 | static int cpumsf_pmu_event_init(struct perf_event *event) | ||
802 | { | ||
803 | int err; | ||
804 | |||
805 | /* No support for taken branch sampling */ | ||
806 | if (has_branch_stack(event)) | ||
807 | return -EOPNOTSUPP; | ||
808 | |||
809 | switch (event->attr.type) { | ||
810 | case PERF_TYPE_RAW: | ||
811 | if ((event->attr.config != PERF_EVENT_CPUM_SF) && | ||
812 | (event->attr.config != PERF_EVENT_CPUM_SF_DIAG)) | ||
813 | return -ENOENT; | ||
814 | break; | ||
815 | case PERF_TYPE_HARDWARE: | ||
816 | /* Support sampling of CPU cycles in addition to the | ||
817 | * counter facility. However, the counter facility | ||
818 | * is more precise and, hence, restrict this PMU to | ||
819 | * sampling events only. | ||
820 | */ | ||
821 | if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES) | ||
822 | return -ENOENT; | ||
823 | if (!is_sampling_event(event)) | ||
824 | return -ENOENT; | ||
825 | break; | ||
826 | default: | ||
827 | return -ENOENT; | ||
828 | } | ||
829 | |||
830 | /* Check online status of the CPU to which the event is pinned */ | ||
831 | if (event->cpu >= nr_cpumask_bits || | ||
832 | (event->cpu >= 0 && !cpu_online(event->cpu))) | ||
833 | return -ENODEV; | ||
834 | |||
835 | /* Force reset of idle/hv excludes regardless of what the | ||
836 | * user requested. | ||
837 | */ | ||
838 | if (event->attr.exclude_hv) | ||
839 | event->attr.exclude_hv = 0; | ||
840 | if (event->attr.exclude_idle) | ||
841 | event->attr.exclude_idle = 0; | ||
842 | |||
843 | err = __hw_perf_event_init(event); | ||
844 | if (unlikely(err)) | ||
845 | if (event->destroy) | ||
846 | event->destroy(event); | ||
847 | return err; | ||
848 | } | ||
849 | |||
850 | static void cpumsf_pmu_enable(struct pmu *pmu) | ||
851 | { | ||
852 | struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); | ||
853 | struct hw_perf_event *hwc; | ||
854 | int err; | ||
855 | |||
856 | if (cpuhw->flags & PMU_F_ENABLED) | ||
857 | return; | ||
858 | |||
859 | if (cpuhw->flags & PMU_F_ERR_MASK) | ||
860 | return; | ||
861 | |||
862 | /* Check whether to extent the sampling buffer. | ||
863 | * | ||
864 | * Two conditions trigger an increase of the sampling buffer for a | ||
865 | * perf event: | ||
866 | * 1. Postponed buffer allocations from the event initialization. | ||
867 | * 2. Sampling overflows that contribute to pending allocations. | ||
868 | * | ||
869 | * Note that the extend_sampling_buffer() function disables the sampling | ||
870 | * facility, but it can be fully re-enabled using sampling controls that | ||
871 | * have been saved in cpumsf_pmu_disable(). | ||
872 | */ | ||
873 | if (cpuhw->event) { | ||
874 | hwc = &cpuhw->event->hw; | ||
875 | /* Account number of overflow-designated buffer extents */ | ||
876 | sfb_account_overflows(cpuhw, hwc); | ||
877 | if (sfb_has_pending_allocs(&cpuhw->sfb, hwc)) | ||
878 | extend_sampling_buffer(&cpuhw->sfb, hwc); | ||
879 | } | ||
880 | |||
881 | /* (Re)enable the PMU and sampling facility */ | ||
882 | cpuhw->flags |= PMU_F_ENABLED; | ||
883 | barrier(); | ||
884 | |||
885 | err = lsctl(&cpuhw->lsctl); | ||
886 | if (err) { | ||
887 | cpuhw->flags &= ~PMU_F_ENABLED; | ||
888 | pr_err("Loading sampling controls failed: op=%i err=%i\n", | ||
889 | 1, err); | ||
890 | return; | ||
891 | } | ||
892 | |||
893 | debug_sprintf_event(sfdbg, 6, "pmu_enable: es=%i cs=%i ed=%i cd=%i " | ||
894 | "tear=%p dear=%p\n", cpuhw->lsctl.es, cpuhw->lsctl.cs, | ||
895 | cpuhw->lsctl.ed, cpuhw->lsctl.cd, | ||
896 | (void *) cpuhw->lsctl.tear, (void *) cpuhw->lsctl.dear); | ||
897 | } | ||
898 | |||
899 | static void cpumsf_pmu_disable(struct pmu *pmu) | ||
900 | { | ||
901 | struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); | ||
902 | struct hws_lsctl_request_block inactive; | ||
903 | struct hws_qsi_info_block si; | ||
904 | int err; | ||
905 | |||
906 | if (!(cpuhw->flags & PMU_F_ENABLED)) | ||
907 | return; | ||
908 | |||
909 | if (cpuhw->flags & PMU_F_ERR_MASK) | ||
910 | return; | ||
911 | |||
912 | /* Switch off sampling activation control */ | ||
913 | inactive = cpuhw->lsctl; | ||
914 | inactive.cs = 0; | ||
915 | inactive.cd = 0; | ||
916 | |||
917 | err = lsctl(&inactive); | ||
918 | if (err) { | ||
919 | pr_err("Loading sampling controls failed: op=%i err=%i\n", | ||
920 | 2, err); | ||
921 | return; | ||
922 | } | ||
923 | |||
924 | /* Save state of TEAR and DEAR register contents */ | ||
925 | if (!qsi(&si)) { | ||
926 | /* TEAR/DEAR values are valid only if the sampling facility is | ||
927 | * enabled. Note that cpumsf_pmu_disable() might be called even | ||
928 | * for a disabled sampling facility because cpumsf_pmu_enable() | ||
929 | * controls the enable/disable state. | ||
930 | */ | ||
931 | if (si.es) { | ||
932 | cpuhw->lsctl.tear = si.tear; | ||
933 | cpuhw->lsctl.dear = si.dear; | ||
934 | } | ||
935 | } else | ||
936 | debug_sprintf_event(sfdbg, 3, "cpumsf_pmu_disable: " | ||
937 | "qsi() failed with err=%i\n", err); | ||
938 | |||
939 | cpuhw->flags &= ~PMU_F_ENABLED; | ||
940 | } | ||
941 | |||
942 | /* perf_exclude_event() - Filter event | ||
943 | * @event: The perf event | ||
944 | * @regs: pt_regs structure | ||
945 | * @sde_regs: Sample-data-entry (sde) regs structure | ||
946 | * | ||
947 | * Filter perf events according to their exclude specification. | ||
948 | * | ||
949 | * Return non-zero if the event shall be excluded. | ||
950 | */ | ||
951 | static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs, | ||
952 | struct perf_sf_sde_regs *sde_regs) | ||
953 | { | ||
954 | if (event->attr.exclude_user && user_mode(regs)) | ||
955 | return 1; | ||
956 | if (event->attr.exclude_kernel && !user_mode(regs)) | ||
957 | return 1; | ||
958 | if (event->attr.exclude_guest && sde_regs->in_guest) | ||
959 | return 1; | ||
960 | if (event->attr.exclude_host && !sde_regs->in_guest) | ||
961 | return 1; | ||
962 | return 0; | ||
963 | } | ||
964 | |||
965 | /* perf_push_sample() - Push samples to perf | ||
966 | * @event: The perf event | ||
967 | * @sample: Hardware sample data | ||
968 | * | ||
969 | * Use the hardware sample data to create perf event sample. The sample | ||
970 | * is the pushed to the event subsystem and the function checks for | ||
971 | * possible event overflows. If an event overflow occurs, the PMU is | ||
972 | * stopped. | ||
973 | * | ||
974 | * Return non-zero if an event overflow occurred. | ||
975 | */ | ||
976 | static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr) | ||
977 | { | ||
978 | int overflow; | ||
979 | struct pt_regs regs; | ||
980 | struct perf_sf_sde_regs *sde_regs; | ||
981 | struct perf_sample_data data; | ||
982 | struct perf_raw_record raw; | ||
983 | |||
984 | /* Setup perf sample */ | ||
985 | perf_sample_data_init(&data, 0, event->hw.last_period); | ||
986 | raw.size = sfr->size; | ||
987 | raw.data = sfr; | ||
988 | data.raw = &raw; | ||
989 | |||
990 | /* Setup pt_regs to look like an CPU-measurement external interrupt | ||
991 | * using the Program Request Alert code. The regs.int_parm_long | ||
992 | * field which is unused contains additional sample-data-entry related | ||
993 | * indicators. | ||
994 | */ | ||
995 | memset(®s, 0, sizeof(regs)); | ||
996 | regs.int_code = 0x1407; | ||
997 | regs.int_parm = CPU_MF_INT_SF_PRA; | ||
998 | sde_regs = (struct perf_sf_sde_regs *) ®s.int_parm_long; | ||
999 | |||
1000 | regs.psw.addr = sfr->basic.ia; | ||
1001 | if (sfr->basic.T) | ||
1002 | regs.psw.mask |= PSW_MASK_DAT; | ||
1003 | if (sfr->basic.W) | ||
1004 | regs.psw.mask |= PSW_MASK_WAIT; | ||
1005 | if (sfr->basic.P) | ||
1006 | regs.psw.mask |= PSW_MASK_PSTATE; | ||
1007 | switch (sfr->basic.AS) { | ||
1008 | case 0x0: | ||
1009 | regs.psw.mask |= PSW_ASC_PRIMARY; | ||
1010 | break; | ||
1011 | case 0x1: | ||
1012 | regs.psw.mask |= PSW_ASC_ACCREG; | ||
1013 | break; | ||
1014 | case 0x2: | ||
1015 | regs.psw.mask |= PSW_ASC_SECONDARY; | ||
1016 | break; | ||
1017 | case 0x3: | ||
1018 | regs.psw.mask |= PSW_ASC_HOME; | ||
1019 | break; | ||
1020 | } | ||
1021 | |||
1022 | /* The host-program-parameter (hpp) contains the sie control | ||
1023 | * block that is set by sie64a() in entry64.S. Check if hpp | ||
1024 | * refers to a valid control block and set sde_regs flags | ||
1025 | * accordingly. This would allow to use hpp values for other | ||
1026 | * purposes too. | ||
1027 | * For now, simply use a non-zero value as guest indicator. | ||
1028 | */ | ||
1029 | if (sfr->basic.hpp) | ||
1030 | sde_regs->in_guest = 1; | ||
1031 | |||
1032 | overflow = 0; | ||
1033 | if (perf_exclude_event(event, ®s, sde_regs)) | ||
1034 | goto out; | ||
1035 | if (perf_event_overflow(event, &data, ®s)) { | ||
1036 | overflow = 1; | ||
1037 | event->pmu->stop(event, 0); | ||
1038 | } | ||
1039 | perf_event_update_userpage(event); | ||
1040 | out: | ||
1041 | return overflow; | ||
1042 | } | ||
1043 | |||
1044 | static void perf_event_count_update(struct perf_event *event, u64 count) | ||
1045 | { | ||
1046 | local64_add(count, &event->count); | ||
1047 | } | ||
1048 | |||
1049 | static int sample_format_is_valid(struct hws_combined_entry *sample, | ||
1050 | unsigned int flags) | ||
1051 | { | ||
1052 | if (likely(flags & PERF_CPUM_SF_BASIC_MODE)) | ||
1053 | /* Only basic-sampling data entries with data-entry-format | ||
1054 | * version of 0x0001 can be processed. | ||
1055 | */ | ||
1056 | if (sample->basic.def != 0x0001) | ||
1057 | return 0; | ||
1058 | if (flags & PERF_CPUM_SF_DIAG_MODE) | ||
1059 | /* The data-entry-format number of diagnostic-sampling data | ||
1060 | * entries can vary. Because diagnostic data is just passed | ||
1061 | * through, do only a sanity check on the DEF. | ||
1062 | */ | ||
1063 | if (sample->diag.def < 0x8001) | ||
1064 | return 0; | ||
1065 | return 1; | ||
1066 | } | ||
1067 | |||
1068 | static int sample_is_consistent(struct hws_combined_entry *sample, | ||
1069 | unsigned long flags) | ||
1070 | { | ||
1071 | /* This check applies only to basic-sampling data entries of potentially | ||
1072 | * combined-sampling data entries. Invalid entries cannot be processed | ||
1073 | * by the PMU and, thus, do not deliver an associated | ||
1074 | * diagnostic-sampling data entry. | ||
1075 | */ | ||
1076 | if (unlikely(!(flags & PERF_CPUM_SF_BASIC_MODE))) | ||
1077 | return 0; | ||
1078 | /* | ||
1079 | * Samples are skipped, if they are invalid or for which the | ||
1080 | * instruction address is not predictable, i.e., the wait-state bit is | ||
1081 | * set. | ||
1082 | */ | ||
1083 | if (sample->basic.I || sample->basic.W) | ||
1084 | return 0; | ||
1085 | return 1; | ||
1086 | } | ||
1087 | |||
1088 | static void reset_sample_slot(struct hws_combined_entry *sample, | ||
1089 | unsigned long flags) | ||
1090 | { | ||
1091 | if (likely(flags & PERF_CPUM_SF_BASIC_MODE)) | ||
1092 | sample->basic.def = 0; | ||
1093 | if (flags & PERF_CPUM_SF_DIAG_MODE) | ||
1094 | sample->diag.def = 0; | ||
1095 | } | ||
1096 | |||
1097 | static void sfr_store_sample(struct sf_raw_sample *sfr, | ||
1098 | struct hws_combined_entry *sample) | ||
1099 | { | ||
1100 | if (likely(sfr->format & PERF_CPUM_SF_BASIC_MODE)) | ||
1101 | sfr->basic = sample->basic; | ||
1102 | if (sfr->format & PERF_CPUM_SF_DIAG_MODE) | ||
1103 | memcpy(&sfr->diag, &sample->diag, sfr->dsdes); | ||
1104 | } | ||
1105 | |||
1106 | static void debug_sample_entry(struct hws_combined_entry *sample, | ||
1107 | struct hws_trailer_entry *te, | ||
1108 | unsigned long flags) | ||
1109 | { | ||
1110 | debug_sprintf_event(sfdbg, 4, "hw_collect_samples: Found unknown " | ||
1111 | "sampling data entry: te->f=%i basic.def=%04x (%p)" | ||
1112 | " diag.def=%04x (%p)\n", te->f, | ||
1113 | sample->basic.def, &sample->basic, | ||
1114 | (flags & PERF_CPUM_SF_DIAG_MODE) | ||
1115 | ? sample->diag.def : 0xFFFF, | ||
1116 | (flags & PERF_CPUM_SF_DIAG_MODE) | ||
1117 | ? &sample->diag : NULL); | ||
1118 | } | ||
1119 | |||
1120 | /* hw_collect_samples() - Walk through a sample-data-block and collect samples | ||
1121 | * @event: The perf event | ||
1122 | * @sdbt: Sample-data-block table | ||
1123 | * @overflow: Event overflow counter | ||
1124 | * | ||
1125 | * Walks through a sample-data-block and collects sampling data entries that are | ||
1126 | * then pushed to the perf event subsystem. Depending on the sampling function, | ||
1127 | * there can be either basic-sampling or combined-sampling data entries. A | ||
1128 | * combined-sampling data entry consists of a basic- and a diagnostic-sampling | ||
1129 | * data entry. The sampling function is determined by the flags in the perf | ||
1130 | * event hardware structure. The function always works with a combined-sampling | ||
1131 | * data entry but ignores the the diagnostic portion if it is not available. | ||
1132 | * | ||
1133 | * Note that the implementation focuses on basic-sampling data entries and, if | ||
1134 | * such an entry is not valid, the entire combined-sampling data entry is | ||
1135 | * ignored. | ||
1136 | * | ||
1137 | * The overflow variables counts the number of samples that has been discarded | ||
1138 | * due to a perf event overflow. | ||
1139 | */ | ||
1140 | static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, | ||
1141 | unsigned long long *overflow) | ||
1142 | { | ||
1143 | unsigned long flags = SAMPL_FLAGS(&event->hw); | ||
1144 | struct hws_combined_entry *sample; | ||
1145 | struct hws_trailer_entry *te; | ||
1146 | struct sf_raw_sample *sfr; | ||
1147 | size_t sample_size; | ||
1148 | |||
1149 | /* Prepare and initialize raw sample data */ | ||
1150 | sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(&event->hw); | ||
1151 | sfr->format = flags & PERF_CPUM_SF_MODE_MASK; | ||
1152 | |||
1153 | sample_size = event_sample_size(&event->hw); | ||
1154 | te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt); | ||
1155 | sample = (struct hws_combined_entry *) *sdbt; | ||
1156 | while ((unsigned long *) sample < (unsigned long *) te) { | ||
1157 | /* Check for an empty sample */ | ||
1158 | if (!sample->basic.def) | ||
1159 | break; | ||
1160 | |||
1161 | /* Update perf event period */ | ||
1162 | perf_event_count_update(event, SAMPL_RATE(&event->hw)); | ||
1163 | |||
1164 | /* Check sampling data entry */ | ||
1165 | if (sample_format_is_valid(sample, flags)) { | ||
1166 | /* If an event overflow occurred, the PMU is stopped to | ||
1167 | * throttle event delivery. Remaining sample data is | ||
1168 | * discarded. | ||
1169 | */ | ||
1170 | if (!*overflow) { | ||
1171 | if (sample_is_consistent(sample, flags)) { | ||
1172 | /* Deliver sample data to perf */ | ||
1173 | sfr_store_sample(sfr, sample); | ||
1174 | *overflow = perf_push_sample(event, sfr); | ||
1175 | } | ||
1176 | } else | ||
1177 | /* Count discarded samples */ | ||
1178 | *overflow += 1; | ||
1179 | } else { | ||
1180 | debug_sample_entry(sample, te, flags); | ||
1181 | /* Sample slot is not yet written or other record. | ||
1182 | * | ||
1183 | * This condition can occur if the buffer was reused | ||
1184 | * from a combined basic- and diagnostic-sampling. | ||
1185 | * If only basic-sampling is then active, entries are | ||
1186 | * written into the larger diagnostic entries. | ||
1187 | * This is typically the case for sample-data-blocks | ||
1188 | * that are not full. Stop processing if the first | ||
1189 | * invalid format was detected. | ||
1190 | */ | ||
1191 | if (!te->f) | ||
1192 | break; | ||
1193 | } | ||
1194 | |||
1195 | /* Reset sample slot and advance to next sample */ | ||
1196 | reset_sample_slot(sample, flags); | ||
1197 | sample += sample_size; | ||
1198 | } | ||
1199 | } | ||
1200 | |||
1201 | /* hw_perf_event_update() - Process sampling buffer | ||
1202 | * @event: The perf event | ||
1203 | * @flush_all: Flag to also flush partially filled sample-data-blocks | ||
1204 | * | ||
1205 | * Processes the sampling buffer and create perf event samples. | ||
1206 | * The sampling buffer position are retrieved and saved in the TEAR_REG | ||
1207 | * register of the specified perf event. | ||
1208 | * | ||
1209 | * Only full sample-data-blocks are processed. Specify the flash_all flag | ||
1210 | * to also walk through partially filled sample-data-blocks. It is ignored | ||
1211 | * if PERF_CPUM_SF_FULL_BLOCKS is set. The PERF_CPUM_SF_FULL_BLOCKS flag | ||
1212 | * enforces the processing of full sample-data-blocks only (trailer entries | ||
1213 | * with the block-full-indicator bit set). | ||
1214 | */ | ||
1215 | static void hw_perf_event_update(struct perf_event *event, int flush_all) | ||
1216 | { | ||
1217 | struct hw_perf_event *hwc = &event->hw; | ||
1218 | struct hws_trailer_entry *te; | ||
1219 | unsigned long *sdbt; | ||
1220 | unsigned long long event_overflow, sampl_overflow, num_sdb, te_flags; | ||
1221 | int done; | ||
1222 | |||
1223 | if (flush_all && SDB_FULL_BLOCKS(hwc)) | ||
1224 | flush_all = 0; | ||
1225 | |||
1226 | sdbt = (unsigned long *) TEAR_REG(hwc); | ||
1227 | done = event_overflow = sampl_overflow = num_sdb = 0; | ||
1228 | while (!done) { | ||
1229 | /* Get the trailer entry of the sample-data-block */ | ||
1230 | te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt); | ||
1231 | |||
1232 | /* Leave loop if no more work to do (block full indicator) */ | ||
1233 | if (!te->f) { | ||
1234 | done = 1; | ||
1235 | if (!flush_all) | ||
1236 | break; | ||
1237 | } | ||
1238 | |||
1239 | /* Check the sample overflow count */ | ||
1240 | if (te->overflow) | ||
1241 | /* Account sample overflows and, if a particular limit | ||
1242 | * is reached, extend the sampling buffer. | ||
1243 | * For details, see sfb_account_overflows(). | ||
1244 | */ | ||
1245 | sampl_overflow += te->overflow; | ||
1246 | |||
1247 | /* Timestamps are valid for full sample-data-blocks only */ | ||
1248 | debug_sprintf_event(sfdbg, 6, "hw_perf_event_update: sdbt=%p " | ||
1249 | "overflow=%llu timestamp=0x%llx\n", | ||
1250 | sdbt, te->overflow, | ||
1251 | (te->f) ? trailer_timestamp(te) : 0ULL); | ||
1252 | |||
1253 | /* Collect all samples from a single sample-data-block and | ||
1254 | * flag if an (perf) event overflow happened. If so, the PMU | ||
1255 | * is stopped and remaining samples will be discarded. | ||
1256 | */ | ||
1257 | hw_collect_samples(event, sdbt, &event_overflow); | ||
1258 | num_sdb++; | ||
1259 | |||
1260 | /* Reset trailer (using compare-double-and-swap) */ | ||
1261 | do { | ||
1262 | te_flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK; | ||
1263 | te_flags |= SDB_TE_ALERT_REQ_MASK; | ||
1264 | } while (!cmpxchg_double(&te->flags, &te->overflow, | ||
1265 | te->flags, te->overflow, | ||
1266 | te_flags, 0ULL)); | ||
1267 | |||
1268 | /* Advance to next sample-data-block */ | ||
1269 | sdbt++; | ||
1270 | if (is_link_entry(sdbt)) | ||
1271 | sdbt = get_next_sdbt(sdbt); | ||
1272 | |||
1273 | /* Update event hardware registers */ | ||
1274 | TEAR_REG(hwc) = (unsigned long) sdbt; | ||
1275 | |||
1276 | /* Stop processing sample-data if all samples of the current | ||
1277 | * sample-data-block were flushed even if it was not full. | ||
1278 | */ | ||
1279 | if (flush_all && done) | ||
1280 | break; | ||
1281 | |||
1282 | /* If an event overflow happened, discard samples by | ||
1283 | * processing any remaining sample-data-blocks. | ||
1284 | */ | ||
1285 | if (event_overflow) | ||
1286 | flush_all = 1; | ||
1287 | } | ||
1288 | |||
1289 | /* Account sample overflows in the event hardware structure */ | ||
1290 | if (sampl_overflow) | ||
1291 | OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) + | ||
1292 | sampl_overflow, 1 + num_sdb); | ||
1293 | if (sampl_overflow || event_overflow) | ||
1294 | debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: " | ||
1295 | "overflow stats: sample=%llu event=%llu\n", | ||
1296 | sampl_overflow, event_overflow); | ||
1297 | } | ||
1298 | |||
1299 | static void cpumsf_pmu_read(struct perf_event *event) | ||
1300 | { | ||
1301 | /* Nothing to do ... updates are interrupt-driven */ | ||
1302 | } | ||
1303 | |||
1304 | /* Activate sampling control. | ||
1305 | * Next call of pmu_enable() starts sampling. | ||
1306 | */ | ||
1307 | static void cpumsf_pmu_start(struct perf_event *event, int flags) | ||
1308 | { | ||
1309 | struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); | ||
1310 | |||
1311 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) | ||
1312 | return; | ||
1313 | |||
1314 | if (flags & PERF_EF_RELOAD) | ||
1315 | WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); | ||
1316 | |||
1317 | perf_pmu_disable(event->pmu); | ||
1318 | event->hw.state = 0; | ||
1319 | cpuhw->lsctl.cs = 1; | ||
1320 | if (SAMPL_DIAG_MODE(&event->hw)) | ||
1321 | cpuhw->lsctl.cd = 1; | ||
1322 | perf_pmu_enable(event->pmu); | ||
1323 | } | ||
1324 | |||
1325 | /* Deactivate sampling control. | ||
1326 | * Next call of pmu_enable() stops sampling. | ||
1327 | */ | ||
1328 | static void cpumsf_pmu_stop(struct perf_event *event, int flags) | ||
1329 | { | ||
1330 | struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); | ||
1331 | |||
1332 | if (event->hw.state & PERF_HES_STOPPED) | ||
1333 | return; | ||
1334 | |||
1335 | perf_pmu_disable(event->pmu); | ||
1336 | cpuhw->lsctl.cs = 0; | ||
1337 | cpuhw->lsctl.cd = 0; | ||
1338 | event->hw.state |= PERF_HES_STOPPED; | ||
1339 | |||
1340 | if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { | ||
1341 | hw_perf_event_update(event, 1); | ||
1342 | event->hw.state |= PERF_HES_UPTODATE; | ||
1343 | } | ||
1344 | perf_pmu_enable(event->pmu); | ||
1345 | } | ||
1346 | |||
1347 | static int cpumsf_pmu_add(struct perf_event *event, int flags) | ||
1348 | { | ||
1349 | struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); | ||
1350 | int err; | ||
1351 | |||
1352 | if (cpuhw->flags & PMU_F_IN_USE) | ||
1353 | return -EAGAIN; | ||
1354 | |||
1355 | if (!cpuhw->sfb.sdbt) | ||
1356 | return -EINVAL; | ||
1357 | |||
1358 | err = 0; | ||
1359 | perf_pmu_disable(event->pmu); | ||
1360 | |||
1361 | event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; | ||
1362 | |||
1363 | /* Set up sampling controls. Always program the sampling register | ||
1364 | * using the SDB-table start. Reset TEAR_REG event hardware register | ||
1365 | * that is used by hw_perf_event_update() to store the sampling buffer | ||
1366 | * position after samples have been flushed. | ||
1367 | */ | ||
1368 | cpuhw->lsctl.s = 0; | ||
1369 | cpuhw->lsctl.h = 1; | ||
1370 | cpuhw->lsctl.tear = (unsigned long) cpuhw->sfb.sdbt; | ||
1371 | cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt; | ||
1372 | cpuhw->lsctl.interval = SAMPL_RATE(&event->hw); | ||
1373 | hw_reset_registers(&event->hw, cpuhw->sfb.sdbt); | ||
1374 | |||
1375 | /* Ensure sampling functions are in the disabled state. If disabled, | ||
1376 | * switch on sampling enable control. */ | ||
1377 | if (WARN_ON_ONCE(cpuhw->lsctl.es == 1 || cpuhw->lsctl.ed == 1)) { | ||
1378 | err = -EAGAIN; | ||
1379 | goto out; | ||
1380 | } | ||
1381 | cpuhw->lsctl.es = 1; | ||
1382 | if (SAMPL_DIAG_MODE(&event->hw)) | ||
1383 | cpuhw->lsctl.ed = 1; | ||
1384 | |||
1385 | /* Set in_use flag and store event */ | ||
1386 | event->hw.idx = 0; /* only one sampling event per CPU supported */ | ||
1387 | cpuhw->event = event; | ||
1388 | cpuhw->flags |= PMU_F_IN_USE; | ||
1389 | |||
1390 | if (flags & PERF_EF_START) | ||
1391 | cpumsf_pmu_start(event, PERF_EF_RELOAD); | ||
1392 | out: | ||
1393 | perf_event_update_userpage(event); | ||
1394 | perf_pmu_enable(event->pmu); | ||
1395 | return err; | ||
1396 | } | ||
1397 | |||
1398 | static void cpumsf_pmu_del(struct perf_event *event, int flags) | ||
1399 | { | ||
1400 | struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf); | ||
1401 | |||
1402 | perf_pmu_disable(event->pmu); | ||
1403 | cpumsf_pmu_stop(event, PERF_EF_UPDATE); | ||
1404 | |||
1405 | cpuhw->lsctl.es = 0; | ||
1406 | cpuhw->lsctl.ed = 0; | ||
1407 | cpuhw->flags &= ~PMU_F_IN_USE; | ||
1408 | cpuhw->event = NULL; | ||
1409 | |||
1410 | perf_event_update_userpage(event); | ||
1411 | perf_pmu_enable(event->pmu); | ||
1412 | } | ||
1413 | |||
1414 | static int cpumsf_pmu_event_idx(struct perf_event *event) | ||
1415 | { | ||
1416 | return event->hw.idx; | ||
1417 | } | ||
1418 | |||
1419 | CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF); | ||
1420 | CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); | ||
1421 | |||
1422 | static struct attribute *cpumsf_pmu_events_attr[] = { | ||
1423 | CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC), | ||
1424 | CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG), | ||
1425 | NULL, | ||
1426 | }; | ||
1427 | |||
1428 | PMU_FORMAT_ATTR(event, "config:0-63"); | ||
1429 | |||
1430 | static struct attribute *cpumsf_pmu_format_attr[] = { | ||
1431 | &format_attr_event.attr, | ||
1432 | NULL, | ||
1433 | }; | ||
1434 | |||
1435 | static struct attribute_group cpumsf_pmu_events_group = { | ||
1436 | .name = "events", | ||
1437 | .attrs = cpumsf_pmu_events_attr, | ||
1438 | }; | ||
1439 | static struct attribute_group cpumsf_pmu_format_group = { | ||
1440 | .name = "format", | ||
1441 | .attrs = cpumsf_pmu_format_attr, | ||
1442 | }; | ||
1443 | static const struct attribute_group *cpumsf_pmu_attr_groups[] = { | ||
1444 | &cpumsf_pmu_events_group, | ||
1445 | &cpumsf_pmu_format_group, | ||
1446 | NULL, | ||
1447 | }; | ||
1448 | |||
1449 | static struct pmu cpumf_sampling = { | ||
1450 | .pmu_enable = cpumsf_pmu_enable, | ||
1451 | .pmu_disable = cpumsf_pmu_disable, | ||
1452 | |||
1453 | .event_init = cpumsf_pmu_event_init, | ||
1454 | .add = cpumsf_pmu_add, | ||
1455 | .del = cpumsf_pmu_del, | ||
1456 | |||
1457 | .start = cpumsf_pmu_start, | ||
1458 | .stop = cpumsf_pmu_stop, | ||
1459 | .read = cpumsf_pmu_read, | ||
1460 | |||
1461 | .event_idx = cpumsf_pmu_event_idx, | ||
1462 | .attr_groups = cpumsf_pmu_attr_groups, | ||
1463 | }; | ||
1464 | |||
1465 | static void cpumf_measurement_alert(struct ext_code ext_code, | ||
1466 | unsigned int alert, unsigned long unused) | ||
1467 | { | ||
1468 | struct cpu_hw_sf *cpuhw; | ||
1469 | |||
1470 | if (!(alert & CPU_MF_INT_SF_MASK)) | ||
1471 | return; | ||
1472 | inc_irq_stat(IRQEXT_CMS); | ||
1473 | cpuhw = &__get_cpu_var(cpu_hw_sf); | ||
1474 | |||
1475 | /* Measurement alerts are shared and might happen when the PMU | ||
1476 | * is not reserved. Ignore these alerts in this case. */ | ||
1477 | if (!(cpuhw->flags & PMU_F_RESERVED)) | ||
1478 | return; | ||
1479 | |||
1480 | /* The processing below must take care of multiple alert events that | ||
1481 | * might be indicated concurrently. */ | ||
1482 | |||
1483 | /* Program alert request */ | ||
1484 | if (alert & CPU_MF_INT_SF_PRA) { | ||
1485 | if (cpuhw->flags & PMU_F_IN_USE) | ||
1486 | hw_perf_event_update(cpuhw->event, 0); | ||
1487 | else | ||
1488 | WARN_ON_ONCE(!(cpuhw->flags & PMU_F_IN_USE)); | ||
1489 | } | ||
1490 | |||
1491 | /* Report measurement alerts only for non-PRA codes */ | ||
1492 | if (alert != CPU_MF_INT_SF_PRA) | ||
1493 | debug_sprintf_event(sfdbg, 6, "measurement alert: 0x%x\n", alert); | ||
1494 | |||
1495 | /* Sampling authorization change request */ | ||
1496 | if (alert & CPU_MF_INT_SF_SACA) | ||
1497 | qsi(&cpuhw->qsi); | ||
1498 | |||
1499 | /* Loss of sample data due to high-priority machine activities */ | ||
1500 | if (alert & CPU_MF_INT_SF_LSDA) { | ||
1501 | pr_err("Sample data was lost\n"); | ||
1502 | cpuhw->flags |= PMU_F_ERR_LSDA; | ||
1503 | sf_disable(); | ||
1504 | } | ||
1505 | |||
1506 | /* Invalid sampling buffer entry */ | ||
1507 | if (alert & (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE)) { | ||
1508 | pr_err("A sampling buffer entry is incorrect (alert=0x%x)\n", | ||
1509 | alert); | ||
1510 | cpuhw->flags |= PMU_F_ERR_IBE; | ||
1511 | sf_disable(); | ||
1512 | } | ||
1513 | } | ||
1514 | |||
1515 | static int cpumf_pmu_notifier(struct notifier_block *self, | ||
1516 | unsigned long action, void *hcpu) | ||
1517 | { | ||
1518 | unsigned int cpu = (long) hcpu; | ||
1519 | int flags; | ||
1520 | |||
1521 | /* Ignore the notification if no events are scheduled on the PMU. | ||
1522 | * This might be racy... | ||
1523 | */ | ||
1524 | if (!atomic_read(&num_events)) | ||
1525 | return NOTIFY_OK; | ||
1526 | |||
1527 | switch (action & ~CPU_TASKS_FROZEN) { | ||
1528 | case CPU_ONLINE: | ||
1529 | case CPU_ONLINE_FROZEN: | ||
1530 | flags = PMC_INIT; | ||
1531 | smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1); | ||
1532 | break; | ||
1533 | case CPU_DOWN_PREPARE: | ||
1534 | flags = PMC_RELEASE; | ||
1535 | smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1); | ||
1536 | break; | ||
1537 | default: | ||
1538 | break; | ||
1539 | } | ||
1540 | |||
1541 | return NOTIFY_OK; | ||
1542 | } | ||
1543 | |||
1544 | static int param_get_sfb_size(char *buffer, const struct kernel_param *kp) | ||
1545 | { | ||
1546 | if (!cpum_sf_avail()) | ||
1547 | return -ENODEV; | ||
1548 | return sprintf(buffer, "%lu,%lu", CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); | ||
1549 | } | ||
1550 | |||
1551 | static int param_set_sfb_size(const char *val, const struct kernel_param *kp) | ||
1552 | { | ||
1553 | int rc; | ||
1554 | unsigned long min, max; | ||
1555 | |||
1556 | if (!cpum_sf_avail()) | ||
1557 | return -ENODEV; | ||
1558 | if (!val || !strlen(val)) | ||
1559 | return -EINVAL; | ||
1560 | |||
1561 | /* Valid parameter values: "min,max" or "max" */ | ||
1562 | min = CPUM_SF_MIN_SDB; | ||
1563 | max = CPUM_SF_MAX_SDB; | ||
1564 | if (strchr(val, ',')) | ||
1565 | rc = (sscanf(val, "%lu,%lu", &min, &max) == 2) ? 0 : -EINVAL; | ||
1566 | else | ||
1567 | rc = kstrtoul(val, 10, &max); | ||
1568 | |||
1569 | if (min < 2 || min >= max || max > get_num_physpages()) | ||
1570 | rc = -EINVAL; | ||
1571 | if (rc) | ||
1572 | return rc; | ||
1573 | |||
1574 | sfb_set_limits(min, max); | ||
1575 | pr_info("The sampling buffer limits have changed to: " | ||
1576 | "min=%lu max=%lu (diag=x%lu)\n", | ||
1577 | CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB, CPUM_SF_SDB_DIAG_FACTOR); | ||
1578 | return 0; | ||
1579 | } | ||
1580 | |||
1581 | #define param_check_sfb_size(name, p) __param_check(name, p, void) | ||
1582 | static struct kernel_param_ops param_ops_sfb_size = { | ||
1583 | .set = param_set_sfb_size, | ||
1584 | .get = param_get_sfb_size, | ||
1585 | }; | ||
1586 | |||
1587 | #define RS_INIT_FAILURE_QSI 0x0001 | ||
1588 | #define RS_INIT_FAILURE_BSDES 0x0002 | ||
1589 | #define RS_INIT_FAILURE_ALRT 0x0003 | ||
1590 | #define RS_INIT_FAILURE_PERF 0x0004 | ||
1591 | static void __init pr_cpumsf_err(unsigned int reason) | ||
1592 | { | ||
1593 | pr_err("Sampling facility support for perf is not available: " | ||
1594 | "reason=%04x\n", reason); | ||
1595 | } | ||
1596 | |||
1597 | static int __init init_cpum_sampling_pmu(void) | ||
1598 | { | ||
1599 | struct hws_qsi_info_block si; | ||
1600 | int err; | ||
1601 | |||
1602 | if (!cpum_sf_avail()) | ||
1603 | return -ENODEV; | ||
1604 | |||
1605 | memset(&si, 0, sizeof(si)); | ||
1606 | if (qsi(&si)) { | ||
1607 | pr_cpumsf_err(RS_INIT_FAILURE_QSI); | ||
1608 | return -ENODEV; | ||
1609 | } | ||
1610 | |||
1611 | if (si.bsdes != sizeof(struct hws_basic_entry)) { | ||
1612 | pr_cpumsf_err(RS_INIT_FAILURE_BSDES); | ||
1613 | return -EINVAL; | ||
1614 | } | ||
1615 | |||
1616 | if (si.ad) | ||
1617 | sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); | ||
1618 | |||
1619 | sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); | ||
1620 | if (!sfdbg) | ||
1621 | pr_err("Registering for s390dbf failed\n"); | ||
1622 | debug_register_view(sfdbg, &debug_sprintf_view); | ||
1623 | |||
1624 | err = register_external_interrupt(0x1407, cpumf_measurement_alert); | ||
1625 | if (err) { | ||
1626 | pr_cpumsf_err(RS_INIT_FAILURE_ALRT); | ||
1627 | goto out; | ||
1628 | } | ||
1629 | |||
1630 | err = perf_pmu_register(&cpumf_sampling, "cpum_sf", PERF_TYPE_RAW); | ||
1631 | if (err) { | ||
1632 | pr_cpumsf_err(RS_INIT_FAILURE_PERF); | ||
1633 | unregister_external_interrupt(0x1407, cpumf_measurement_alert); | ||
1634 | goto out; | ||
1635 | } | ||
1636 | perf_cpu_notifier(cpumf_pmu_notifier); | ||
1637 | out: | ||
1638 | return err; | ||
1639 | } | ||
1640 | arch_initcall(init_cpum_sampling_pmu); | ||
1641 | core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0640); | ||
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index 2343c218b8f9..5d2dfa31c4ef 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Performance event support for s390x | 2 | * Performance event support for s390x |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2012 | 4 | * Copyright IBM Corp. 2012, 2013 |
5 | * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> | 5 | * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
@@ -16,15 +16,19 @@ | |||
16 | #include <linux/kvm_host.h> | 16 | #include <linux/kvm_host.h> |
17 | #include <linux/percpu.h> | 17 | #include <linux/percpu.h> |
18 | #include <linux/export.h> | 18 | #include <linux/export.h> |
19 | #include <linux/seq_file.h> | ||
20 | #include <linux/spinlock.h> | ||
21 | #include <linux/sysfs.h> | ||
19 | #include <asm/irq.h> | 22 | #include <asm/irq.h> |
20 | #include <asm/cpu_mf.h> | 23 | #include <asm/cpu_mf.h> |
21 | #include <asm/lowcore.h> | 24 | #include <asm/lowcore.h> |
22 | #include <asm/processor.h> | 25 | #include <asm/processor.h> |
26 | #include <asm/sysinfo.h> | ||
23 | 27 | ||
24 | const char *perf_pmu_name(void) | 28 | const char *perf_pmu_name(void) |
25 | { | 29 | { |
26 | if (cpum_cf_avail() || cpum_sf_avail()) | 30 | if (cpum_cf_avail() || cpum_sf_avail()) |
27 | return "CPU-measurement facilities (CPUMF)"; | 31 | return "CPU-Measurement Facilities (CPU-MF)"; |
28 | return "pmu"; | 32 | return "pmu"; |
29 | } | 33 | } |
30 | EXPORT_SYMBOL(perf_pmu_name); | 34 | EXPORT_SYMBOL(perf_pmu_name); |
@@ -35,6 +39,8 @@ int perf_num_counters(void) | |||
35 | 39 | ||
36 | if (cpum_cf_avail()) | 40 | if (cpum_cf_avail()) |
37 | num += PERF_CPUM_CF_MAX_CTR; | 41 | num += PERF_CPUM_CF_MAX_CTR; |
42 | if (cpum_sf_avail()) | ||
43 | num += PERF_CPUM_SF_MAX_CTR; | ||
38 | 44 | ||
39 | return num; | 45 | return num; |
40 | } | 46 | } |
@@ -54,7 +60,7 @@ static bool is_in_guest(struct pt_regs *regs) | |||
54 | { | 60 | { |
55 | if (user_mode(regs)) | 61 | if (user_mode(regs)) |
56 | return false; | 62 | return false; |
57 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | 63 | #if IS_ENABLED(CONFIG_KVM) |
58 | return instruction_pointer(regs) == (unsigned long) &sie_exit; | 64 | return instruction_pointer(regs) == (unsigned long) &sie_exit; |
59 | #else | 65 | #else |
60 | return false; | 66 | return false; |
@@ -83,8 +89,31 @@ static unsigned long perf_misc_guest_flags(struct pt_regs *regs) | |||
83 | : PERF_RECORD_MISC_GUEST_KERNEL; | 89 | : PERF_RECORD_MISC_GUEST_KERNEL; |
84 | } | 90 | } |
85 | 91 | ||
92 | static unsigned long perf_misc_flags_sf(struct pt_regs *regs) | ||
93 | { | ||
94 | struct perf_sf_sde_regs *sde_regs; | ||
95 | unsigned long flags; | ||
96 | |||
97 | sde_regs = (struct perf_sf_sde_regs *) ®s->int_parm_long; | ||
98 | if (sde_regs->in_guest) | ||
99 | flags = user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER | ||
100 | : PERF_RECORD_MISC_GUEST_KERNEL; | ||
101 | else | ||
102 | flags = user_mode(regs) ? PERF_RECORD_MISC_USER | ||
103 | : PERF_RECORD_MISC_KERNEL; | ||
104 | return flags; | ||
105 | } | ||
106 | |||
86 | unsigned long perf_misc_flags(struct pt_regs *regs) | 107 | unsigned long perf_misc_flags(struct pt_regs *regs) |
87 | { | 108 | { |
109 | /* Check if the cpum_sf PMU has created the pt_regs structure. | ||
110 | * In this case, perf misc flags can be easily extracted. Otherwise, | ||
111 | * do regular checks on the pt_regs content. | ||
112 | */ | ||
113 | if (regs->int_code == 0x1407 && regs->int_parm == CPU_MF_INT_SF_PRA) | ||
114 | if (!regs->gprs[15]) | ||
115 | return perf_misc_flags_sf(regs); | ||
116 | |||
88 | if (is_in_guest(regs)) | 117 | if (is_in_guest(regs)) |
89 | return perf_misc_guest_flags(regs); | 118 | return perf_misc_guest_flags(regs); |
90 | 119 | ||
@@ -92,27 +121,107 @@ unsigned long perf_misc_flags(struct pt_regs *regs) | |||
92 | : PERF_RECORD_MISC_KERNEL; | 121 | : PERF_RECORD_MISC_KERNEL; |
93 | } | 122 | } |
94 | 123 | ||
95 | void perf_event_print_debug(void) | 124 | void print_debug_cf(void) |
96 | { | 125 | { |
97 | struct cpumf_ctr_info cf_info; | 126 | struct cpumf_ctr_info cf_info; |
98 | unsigned long flags; | 127 | int cpu = smp_processor_id(); |
99 | int cpu; | ||
100 | |||
101 | if (!cpum_cf_avail()) | ||
102 | return; | ||
103 | |||
104 | local_irq_save(flags); | ||
105 | 128 | ||
106 | cpu = smp_processor_id(); | ||
107 | memset(&cf_info, 0, sizeof(cf_info)); | 129 | memset(&cf_info, 0, sizeof(cf_info)); |
108 | if (!qctri(&cf_info)) | 130 | if (!qctri(&cf_info)) |
109 | pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n", | 131 | pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n", |
110 | cpu, cf_info.cfvn, cf_info.csvn, | 132 | cpu, cf_info.cfvn, cf_info.csvn, |
111 | cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl); | 133 | cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl); |
134 | } | ||
135 | |||
136 | static void print_debug_sf(void) | ||
137 | { | ||
138 | struct hws_qsi_info_block si; | ||
139 | int cpu = smp_processor_id(); | ||
112 | 140 | ||
141 | memset(&si, 0, sizeof(si)); | ||
142 | if (qsi(&si)) | ||
143 | return; | ||
144 | |||
145 | pr_info("CPU[%i] CPUM_SF: basic=%i diag=%i min=%lu max=%lu cpu_speed=%u\n", | ||
146 | cpu, si.as, si.ad, si.min_sampl_rate, si.max_sampl_rate, | ||
147 | si.cpu_speed); | ||
148 | |||
149 | if (si.as) | ||
150 | pr_info("CPU[%i] CPUM_SF: Basic-sampling: a=%i e=%i c=%i" | ||
151 | " bsdes=%i tear=%016lx dear=%016lx\n", cpu, | ||
152 | si.as, si.es, si.cs, si.bsdes, si.tear, si.dear); | ||
153 | if (si.ad) | ||
154 | pr_info("CPU[%i] CPUM_SF: Diagnostic-sampling: a=%i e=%i c=%i" | ||
155 | " dsdes=%i tear=%016lx dear=%016lx\n", cpu, | ||
156 | si.ad, si.ed, si.cd, si.dsdes, si.tear, si.dear); | ||
157 | } | ||
158 | |||
159 | void perf_event_print_debug(void) | ||
160 | { | ||
161 | unsigned long flags; | ||
162 | |||
163 | local_irq_save(flags); | ||
164 | if (cpum_cf_avail()) | ||
165 | print_debug_cf(); | ||
166 | if (cpum_sf_avail()) | ||
167 | print_debug_sf(); | ||
113 | local_irq_restore(flags); | 168 | local_irq_restore(flags); |
114 | } | 169 | } |
115 | 170 | ||
171 | /* Service level infrastructure */ | ||
172 | static void sl_print_counter(struct seq_file *m) | ||
173 | { | ||
174 | struct cpumf_ctr_info ci; | ||
175 | |||
176 | memset(&ci, 0, sizeof(ci)); | ||
177 | if (qctri(&ci)) | ||
178 | return; | ||
179 | |||
180 | seq_printf(m, "CPU-MF: Counter facility: version=%u.%u " | ||
181 | "authorization=%04x\n", ci.cfvn, ci.csvn, ci.auth_ctl); | ||
182 | } | ||
183 | |||
184 | static void sl_print_sampling(struct seq_file *m) | ||
185 | { | ||
186 | struct hws_qsi_info_block si; | ||
187 | |||
188 | memset(&si, 0, sizeof(si)); | ||
189 | if (qsi(&si)) | ||
190 | return; | ||
191 | |||
192 | if (!si.as && !si.ad) | ||
193 | return; | ||
194 | |||
195 | seq_printf(m, "CPU-MF: Sampling facility: min_rate=%lu max_rate=%lu" | ||
196 | " cpu_speed=%u\n", si.min_sampl_rate, si.max_sampl_rate, | ||
197 | si.cpu_speed); | ||
198 | if (si.as) | ||
199 | seq_printf(m, "CPU-MF: Sampling facility: mode=basic" | ||
200 | " sample_size=%u\n", si.bsdes); | ||
201 | if (si.ad) | ||
202 | seq_printf(m, "CPU-MF: Sampling facility: mode=diagnostic" | ||
203 | " sample_size=%u\n", si.dsdes); | ||
204 | } | ||
205 | |||
206 | static void service_level_perf_print(struct seq_file *m, | ||
207 | struct service_level *sl) | ||
208 | { | ||
209 | if (cpum_cf_avail()) | ||
210 | sl_print_counter(m); | ||
211 | if (cpum_sf_avail()) | ||
212 | sl_print_sampling(m); | ||
213 | } | ||
214 | |||
215 | static struct service_level service_level_perf = { | ||
216 | .seq_print = service_level_perf_print, | ||
217 | }; | ||
218 | |||
219 | static int __init service_level_perf_register(void) | ||
220 | { | ||
221 | return register_service_level(&service_level_perf); | ||
222 | } | ||
223 | arch_initcall(service_level_perf_register); | ||
224 | |||
116 | /* See also arch/s390/kernel/traps.c */ | 225 | /* See also arch/s390/kernel/traps.c */ |
117 | static unsigned long __store_trace(struct perf_callchain_entry *entry, | 226 | static unsigned long __store_trace(struct perf_callchain_entry *entry, |
118 | unsigned long sp, | 227 | unsigned long sp, |
@@ -172,3 +281,44 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry, | |||
172 | __store_trace(entry, head, S390_lowcore.thread_info, | 281 | __store_trace(entry, head, S390_lowcore.thread_info, |
173 | S390_lowcore.thread_info + THREAD_SIZE); | 282 | S390_lowcore.thread_info + THREAD_SIZE); |
174 | } | 283 | } |
284 | |||
285 | /* Perf defintions for PMU event attributes in sysfs */ | ||
286 | ssize_t cpumf_events_sysfs_show(struct device *dev, | ||
287 | struct device_attribute *attr, char *page) | ||
288 | { | ||
289 | struct perf_pmu_events_attr *pmu_attr; | ||
290 | |||
291 | pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); | ||
292 | return sprintf(page, "event=0x%04llx,name=%s\n", | ||
293 | pmu_attr->id, attr->attr.name); | ||
294 | } | ||
295 | |||
296 | /* Reserve/release functions for sharing perf hardware */ | ||
297 | static DEFINE_SPINLOCK(perf_hw_owner_lock); | ||
298 | static void *perf_sampling_owner; | ||
299 | |||
300 | int perf_reserve_sampling(void) | ||
301 | { | ||
302 | int err; | ||
303 | |||
304 | err = 0; | ||
305 | spin_lock(&perf_hw_owner_lock); | ||
306 | if (perf_sampling_owner) { | ||
307 | pr_warn("The sampling facility is already reserved by %p\n", | ||
308 | perf_sampling_owner); | ||
309 | err = -EBUSY; | ||
310 | } else | ||
311 | perf_sampling_owner = __builtin_return_address(0); | ||
312 | spin_unlock(&perf_hw_owner_lock); | ||
313 | return err; | ||
314 | } | ||
315 | EXPORT_SYMBOL(perf_reserve_sampling); | ||
316 | |||
317 | void perf_release_sampling(void) | ||
318 | { | ||
319 | spin_lock(&perf_hw_owner_lock); | ||
320 | WARN_ON(!perf_sampling_owner); | ||
321 | perf_sampling_owner = NULL; | ||
322 | spin_unlock(&perf_hw_owner_lock); | ||
323 | } | ||
324 | EXPORT_SYMBOL(perf_release_sampling); | ||
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 7ed0d4e2a435..dd145321d215 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -261,20 +261,18 @@ static inline unsigned long brk_rnd(void) | |||
261 | 261 | ||
262 | unsigned long arch_randomize_brk(struct mm_struct *mm) | 262 | unsigned long arch_randomize_brk(struct mm_struct *mm) |
263 | { | 263 | { |
264 | unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd()); | 264 | unsigned long ret; |
265 | 265 | ||
266 | if (ret < mm->brk) | 266 | ret = PAGE_ALIGN(mm->brk + brk_rnd()); |
267 | return mm->brk; | 267 | return (ret > mm->brk) ? ret : mm->brk; |
268 | return ret; | ||
269 | } | 268 | } |
270 | 269 | ||
271 | unsigned long randomize_et_dyn(unsigned long base) | 270 | unsigned long randomize_et_dyn(unsigned long base) |
272 | { | 271 | { |
273 | unsigned long ret = PAGE_ALIGN(base + brk_rnd()); | 272 | unsigned long ret; |
274 | 273 | ||
275 | if (!(current->flags & PF_RANDOMIZE)) | 274 | if (!(current->flags & PF_RANDOMIZE)) |
276 | return base; | 275 | return base; |
277 | if (ret < base) | 276 | ret = PAGE_ALIGN(base + brk_rnd()); |
278 | return base; | 277 | return (ret > base) ? ret : base; |
279 | return ret; | ||
280 | } | 278 | } |
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index e65c91c591e8..f6be6087a0e9 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -56,25 +56,26 @@ void update_cr_regs(struct task_struct *task) | |||
56 | #ifdef CONFIG_64BIT | 56 | #ifdef CONFIG_64BIT |
57 | /* Take care of the enable/disable of transactional execution. */ | 57 | /* Take care of the enable/disable of transactional execution. */ |
58 | if (MACHINE_HAS_TE) { | 58 | if (MACHINE_HAS_TE) { |
59 | unsigned long cr[3], cr_new[3]; | 59 | unsigned long cr, cr_new; |
60 | 60 | ||
61 | __ctl_store(cr, 0, 2); | 61 | __ctl_store(cr, 0, 0); |
62 | cr_new[1] = cr[1]; | ||
63 | /* Set or clear transaction execution TXC bit 8. */ | 62 | /* Set or clear transaction execution TXC bit 8. */ |
63 | cr_new = cr | (1UL << 55); | ||
64 | if (task->thread.per_flags & PER_FLAG_NO_TE) | 64 | if (task->thread.per_flags & PER_FLAG_NO_TE) |
65 | cr_new[0] = cr[0] & ~(1UL << 55); | 65 | cr_new &= ~(1UL << 55); |
66 | else | 66 | if (cr_new != cr) |
67 | cr_new[0] = cr[0] | (1UL << 55); | 67 | __ctl_load(cr, 0, 0); |
68 | /* Set or clear transaction execution TDC bits 62 and 63. */ | 68 | /* Set or clear transaction execution TDC bits 62 and 63. */ |
69 | cr_new[2] = cr[2] & ~3UL; | 69 | __ctl_store(cr, 2, 2); |
70 | cr_new = cr & ~3UL; | ||
70 | if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { | 71 | if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { |
71 | if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND) | 72 | if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND) |
72 | cr_new[2] |= 1UL; | 73 | cr_new |= 1UL; |
73 | else | 74 | else |
74 | cr_new[2] |= 2UL; | 75 | cr_new |= 2UL; |
75 | } | 76 | } |
76 | if (memcmp(&cr_new, &cr, sizeof(cr))) | 77 | if (cr_new != cr) |
77 | __ctl_load(cr_new, 0, 2); | 78 | __ctl_load(cr_new, 2, 2); |
78 | } | 79 | } |
79 | #endif | 80 | #endif |
80 | /* Copy user specified PER registers */ | 81 | /* Copy user specified PER registers */ |
@@ -107,15 +108,11 @@ void update_cr_regs(struct task_struct *task) | |||
107 | void user_enable_single_step(struct task_struct *task) | 108 | void user_enable_single_step(struct task_struct *task) |
108 | { | 109 | { |
109 | set_tsk_thread_flag(task, TIF_SINGLE_STEP); | 110 | set_tsk_thread_flag(task, TIF_SINGLE_STEP); |
110 | if (task == current) | ||
111 | update_cr_regs(task); | ||
112 | } | 111 | } |
113 | 112 | ||
114 | void user_disable_single_step(struct task_struct *task) | 113 | void user_disable_single_step(struct task_struct *task) |
115 | { | 114 | { |
116 | clear_tsk_thread_flag(task, TIF_SINGLE_STEP); | 115 | clear_tsk_thread_flag(task, TIF_SINGLE_STEP); |
117 | if (task == current) | ||
118 | update_cr_regs(task); | ||
119 | } | 116 | } |
120 | 117 | ||
121 | /* | 118 | /* |
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index 3bac589844a7..9f60467938d1 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c | |||
@@ -5,7 +5,7 @@ | |||
5 | #ifdef CONFIG_FUNCTION_TRACER | 5 | #ifdef CONFIG_FUNCTION_TRACER |
6 | EXPORT_SYMBOL(_mcount); | 6 | EXPORT_SYMBOL(_mcount); |
7 | #endif | 7 | #endif |
8 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | 8 | #if IS_ENABLED(CONFIG_KVM) |
9 | EXPORT_SYMBOL(sie64a); | 9 | EXPORT_SYMBOL(sie64a); |
10 | EXPORT_SYMBOL(sie_exit); | 10 | EXPORT_SYMBOL(sie_exit); |
11 | #endif | 11 | #endif |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 0f3d44ecbfc6..09e2f468f48b 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -373,7 +373,7 @@ static void __init setup_lowcore(void) | |||
373 | 373 | ||
374 | /* | 374 | /* |
375 | * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant | 375 | * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant |
376 | * restart data to the absolute zero lowcore. This is necesary if | 376 | * restart data to the absolute zero lowcore. This is necessary if |
377 | * PSW restart is done on an offline CPU that has lowcore zero. | 377 | * PSW restart is done on an offline CPU that has lowcore zero. |
378 | */ | 378 | */ |
379 | lc->restart_stack = (unsigned long) restart_stack; | 379 | lc->restart_stack = (unsigned long) restart_stack; |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 958704798f4a..a7125b62a9a6 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -59,7 +59,7 @@ enum { | |||
59 | }; | 59 | }; |
60 | 60 | ||
61 | struct pcpu { | 61 | struct pcpu { |
62 | struct cpu cpu; | 62 | struct cpu *cpu; |
63 | struct _lowcore *lowcore; /* lowcore page(s) for the cpu */ | 63 | struct _lowcore *lowcore; /* lowcore page(s) for the cpu */ |
64 | unsigned long async_stack; /* async stack for the cpu */ | 64 | unsigned long async_stack; /* async stack for the cpu */ |
65 | unsigned long panic_stack; /* panic stack for the cpu */ | 65 | unsigned long panic_stack; /* panic stack for the cpu */ |
@@ -159,9 +159,9 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) | |||
159 | { | 159 | { |
160 | int order; | 160 | int order; |
161 | 161 | ||
162 | set_bit(ec_bit, &pcpu->ec_mask); | 162 | if (test_and_set_bit(ec_bit, &pcpu->ec_mask)) |
163 | order = pcpu_running(pcpu) ? | 163 | return; |
164 | SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; | 164 | order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; |
165 | pcpu_sigp_retry(pcpu, order, 0); | 165 | pcpu_sigp_retry(pcpu, order, 0); |
166 | } | 166 | } |
167 | 167 | ||
@@ -965,7 +965,7 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action, | |||
965 | void *hcpu) | 965 | void *hcpu) |
966 | { | 966 | { |
967 | unsigned int cpu = (unsigned int)(long)hcpu; | 967 | unsigned int cpu = (unsigned int)(long)hcpu; |
968 | struct cpu *c = &pcpu_devices[cpu].cpu; | 968 | struct cpu *c = pcpu_devices[cpu].cpu; |
969 | struct device *s = &c->dev; | 969 | struct device *s = &c->dev; |
970 | int err = 0; | 970 | int err = 0; |
971 | 971 | ||
@@ -982,10 +982,15 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action, | |||
982 | 982 | ||
983 | static int smp_add_present_cpu(int cpu) | 983 | static int smp_add_present_cpu(int cpu) |
984 | { | 984 | { |
985 | struct cpu *c = &pcpu_devices[cpu].cpu; | 985 | struct device *s; |
986 | struct device *s = &c->dev; | 986 | struct cpu *c; |
987 | int rc; | 987 | int rc; |
988 | 988 | ||
989 | c = kzalloc(sizeof(*c), GFP_KERNEL); | ||
990 | if (!c) | ||
991 | return -ENOMEM; | ||
992 | pcpu_devices[cpu].cpu = c; | ||
993 | s = &c->dev; | ||
989 | c->hotpluggable = 1; | 994 | c->hotpluggable = 1; |
990 | rc = register_cpu(c, cpu); | 995 | rc = register_cpu(c, cpu); |
991 | if (rc) | 996 | if (rc) |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 2440602e6df1..d101dae62771 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -275,7 +275,7 @@ static int handle_io_inst(struct kvm_vcpu *vcpu) | |||
275 | return -EOPNOTSUPP; | 275 | return -EOPNOTSUPP; |
276 | } else { | 276 | } else { |
277 | /* | 277 | /* |
278 | * Set condition code 3 to stop the guest from issueing channel | 278 | * Set condition code 3 to stop the guest from issuing channel |
279 | * I/O instructions. | 279 | * I/O instructions. |
280 | */ | 280 | */ |
281 | kvm_s390_set_psw_cc(vcpu, 3); | 281 | kvm_s390_set_psw_cc(vcpu, 3); |
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index dbdab3e7a1a6..0632dc50da78 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c | |||
@@ -74,8 +74,8 @@ static size_t copy_in_kernel(size_t count, void __user *to, | |||
74 | 74 | ||
75 | /* | 75 | /* |
76 | * Returns kernel address for user virtual address. If the returned address is | 76 | * Returns kernel address for user virtual address. If the returned address is |
77 | * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address | 77 | * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occurred and the |
78 | * contains the (negative) exception code. | 78 | * address contains the (negative) exception code. |
79 | */ | 79 | */ |
80 | #ifdef CONFIG_64BIT | 80 | #ifdef CONFIG_64BIT |
81 | 81 | ||
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index e794c88f699a..3584ed9b20a1 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -293,7 +293,7 @@ static int gmap_alloc_table(struct gmap *gmap, | |||
293 | * @addr: address in the guest address space | 293 | * @addr: address in the guest address space |
294 | * @len: length of the memory area to unmap | 294 | * @len: length of the memory area to unmap |
295 | * | 295 | * |
296 | * Returns 0 if the unmap succeded, -EINVAL if not. | 296 | * Returns 0 if the unmap succeeded, -EINVAL if not. |
297 | */ | 297 | */ |
298 | int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) | 298 | int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) |
299 | { | 299 | { |
@@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(gmap_unmap_segment); | |||
344 | * @from: source address in the parent address space | 344 | * @from: source address in the parent address space |
345 | * @to: target address in the guest address space | 345 | * @to: target address in the guest address space |
346 | * | 346 | * |
347 | * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not. | 347 | * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not. |
348 | */ | 348 | */ |
349 | int gmap_map_segment(struct gmap *gmap, unsigned long from, | 349 | int gmap_map_segment(struct gmap *gmap, unsigned long from, |
350 | unsigned long to, unsigned long len) | 350 | unsigned long to, unsigned long len) |
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 16871da37371..708d60e40066 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c | |||
@@ -368,14 +368,16 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, | |||
368 | EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg)); | 368 | EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg)); |
369 | /* lhi %r4,0 */ | 369 | /* lhi %r4,0 */ |
370 | EMIT4(0xa7480000); | 370 | EMIT4(0xa7480000); |
371 | /* dr %r4,%r12 */ | 371 | /* dlr %r4,%r12 */ |
372 | EMIT2(0x1d4c); | 372 | EMIT4(0xb997004c); |
373 | break; | 373 | break; |
374 | case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K) */ | 374 | case BPF_S_ALU_DIV_K: /* A /= K */ |
375 | /* m %r4,<d(K)>(%r13) */ | 375 | if (K == 1) |
376 | EMIT4_DISP(0x5c40d000, EMIT_CONST(K)); | 376 | break; |
377 | /* lr %r5,%r4 */ | 377 | /* lhi %r4,0 */ |
378 | EMIT2(0x1854); | 378 | EMIT4(0xa7480000); |
379 | /* dl %r4,<d(K)>(%r13) */ | ||
380 | EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K)); | ||
379 | break; | 381 | break; |
380 | case BPF_S_ALU_MOD_X: /* A %= X */ | 382 | case BPF_S_ALU_MOD_X: /* A %= X */ |
381 | jit->seen |= SEEN_XREG | SEEN_RET0; | 383 | jit->seen |= SEEN_XREG | SEEN_RET0; |
@@ -385,16 +387,21 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, | |||
385 | EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg)); | 387 | EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg)); |
386 | /* lhi %r4,0 */ | 388 | /* lhi %r4,0 */ |
387 | EMIT4(0xa7480000); | 389 | EMIT4(0xa7480000); |
388 | /* dr %r4,%r12 */ | 390 | /* dlr %r4,%r12 */ |
389 | EMIT2(0x1d4c); | 391 | EMIT4(0xb997004c); |
390 | /* lr %r5,%r4 */ | 392 | /* lr %r5,%r4 */ |
391 | EMIT2(0x1854); | 393 | EMIT2(0x1854); |
392 | break; | 394 | break; |
393 | case BPF_S_ALU_MOD_K: /* A %= K */ | 395 | case BPF_S_ALU_MOD_K: /* A %= K */ |
396 | if (K == 1) { | ||
397 | /* lhi %r5,0 */ | ||
398 | EMIT4(0xa7580000); | ||
399 | break; | ||
400 | } | ||
394 | /* lhi %r4,0 */ | 401 | /* lhi %r4,0 */ |
395 | EMIT4(0xa7480000); | 402 | EMIT4(0xa7480000); |
396 | /* d %r4,<d(K)>(%r13) */ | 403 | /* dl %r4,<d(K)>(%r13) */ |
397 | EMIT4_DISP(0x5d40d000, EMIT_CONST(K)); | 404 | EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K)); |
398 | /* lr %r5,%r4 */ | 405 | /* lr %r5,%r4 */ |
399 | EMIT2(0x1854); | 406 | EMIT2(0x1854); |
400 | break; | 407 | break; |
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c index 231cecafc2f1..a32c96761eab 100644 --- a/arch/s390/oprofile/hwsampler.c +++ b/arch/s390/oprofile/hwsampler.c | |||
@@ -26,9 +26,6 @@ | |||
26 | #define MAX_NUM_SDB 511 | 26 | #define MAX_NUM_SDB 511 |
27 | #define MIN_NUM_SDB 1 | 27 | #define MIN_NUM_SDB 1 |
28 | 28 | ||
29 | #define ALERT_REQ_MASK 0x4000000000000000ul | ||
30 | #define BUFFER_FULL_MASK 0x8000000000000000ul | ||
31 | |||
32 | DECLARE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer); | 29 | DECLARE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer); |
33 | 30 | ||
34 | struct hws_execute_parms { | 31 | struct hws_execute_parms { |
@@ -44,6 +41,7 @@ static DEFINE_MUTEX(hws_sem_oom); | |||
44 | 41 | ||
45 | static unsigned char hws_flush_all; | 42 | static unsigned char hws_flush_all; |
46 | static unsigned int hws_oom; | 43 | static unsigned int hws_oom; |
44 | static unsigned int hws_alert; | ||
47 | static struct workqueue_struct *hws_wq; | 45 | static struct workqueue_struct *hws_wq; |
48 | 46 | ||
49 | static unsigned int hws_state; | 47 | static unsigned int hws_state; |
@@ -65,43 +63,6 @@ static unsigned long interval; | |||
65 | static unsigned long min_sampler_rate; | 63 | static unsigned long min_sampler_rate; |
66 | static unsigned long max_sampler_rate; | 64 | static unsigned long max_sampler_rate; |
67 | 65 | ||
68 | static int ssctl(void *buffer) | ||
69 | { | ||
70 | int cc; | ||
71 | |||
72 | /* set in order to detect a program check */ | ||
73 | cc = 1; | ||
74 | |||
75 | asm volatile( | ||
76 | "0: .insn s,0xB2870000,0(%1)\n" | ||
77 | "1: ipm %0\n" | ||
78 | " srl %0,28\n" | ||
79 | "2:\n" | ||
80 | EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) | ||
81 | : "+d" (cc), "+a" (buffer) | ||
82 | : "m" (*((struct hws_ssctl_request_block *)buffer)) | ||
83 | : "cc", "memory"); | ||
84 | |||
85 | return cc ? -EINVAL : 0 ; | ||
86 | } | ||
87 | |||
88 | static int qsi(void *buffer) | ||
89 | { | ||
90 | int cc; | ||
91 | cc = 1; | ||
92 | |||
93 | asm volatile( | ||
94 | "0: .insn s,0xB2860000,0(%1)\n" | ||
95 | "1: lhi %0,0\n" | ||
96 | "2:\n" | ||
97 | EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) | ||
98 | : "=d" (cc), "+a" (buffer) | ||
99 | : "m" (*((struct hws_qsi_info_block *)buffer)) | ||
100 | : "cc", "memory"); | ||
101 | |||
102 | return cc ? -EINVAL : 0; | ||
103 | } | ||
104 | |||
105 | static void execute_qsi(void *parms) | 66 | static void execute_qsi(void *parms) |
106 | { | 67 | { |
107 | struct hws_execute_parms *ep = parms; | 68 | struct hws_execute_parms *ep = parms; |
@@ -113,7 +74,7 @@ static void execute_ssctl(void *parms) | |||
113 | { | 74 | { |
114 | struct hws_execute_parms *ep = parms; | 75 | struct hws_execute_parms *ep = parms; |
115 | 76 | ||
116 | ep->rc = ssctl(ep->buffer); | 77 | ep->rc = lsctl(ep->buffer); |
117 | } | 78 | } |
118 | 79 | ||
119 | static int smp_ctl_ssctl_stop(int cpu) | 80 | static int smp_ctl_ssctl_stop(int cpu) |
@@ -214,17 +175,6 @@ static int smp_ctl_qsi(int cpu) | |||
214 | return ep.rc; | 175 | return ep.rc; |
215 | } | 176 | } |
216 | 177 | ||
217 | static inline unsigned long *trailer_entry_ptr(unsigned long v) | ||
218 | { | ||
219 | void *ret; | ||
220 | |||
221 | ret = (void *)v; | ||
222 | ret += PAGE_SIZE; | ||
223 | ret -= sizeof(struct hws_trailer_entry); | ||
224 | |||
225 | return (unsigned long *) ret; | ||
226 | } | ||
227 | |||
228 | static void hws_ext_handler(struct ext_code ext_code, | 178 | static void hws_ext_handler(struct ext_code ext_code, |
229 | unsigned int param32, unsigned long param64) | 179 | unsigned int param32, unsigned long param64) |
230 | { | 180 | { |
@@ -233,6 +183,9 @@ static void hws_ext_handler(struct ext_code ext_code, | |||
233 | if (!(param32 & CPU_MF_INT_SF_MASK)) | 183 | if (!(param32 & CPU_MF_INT_SF_MASK)) |
234 | return; | 184 | return; |
235 | 185 | ||
186 | if (!hws_alert) | ||
187 | return; | ||
188 | |||
236 | inc_irq_stat(IRQEXT_CMS); | 189 | inc_irq_stat(IRQEXT_CMS); |
237 | atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32); | 190 | atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32); |
238 | 191 | ||
@@ -256,16 +209,6 @@ static void init_all_cpu_buffers(void) | |||
256 | } | 209 | } |
257 | } | 210 | } |
258 | 211 | ||
259 | static int is_link_entry(unsigned long *s) | ||
260 | { | ||
261 | return *s & 0x1ul ? 1 : 0; | ||
262 | } | ||
263 | |||
264 | static unsigned long *get_next_sdbt(unsigned long *s) | ||
265 | { | ||
266 | return (unsigned long *) (*s & ~0x1ul); | ||
267 | } | ||
268 | |||
269 | static int prepare_cpu_buffers(void) | 212 | static int prepare_cpu_buffers(void) |
270 | { | 213 | { |
271 | int cpu; | 214 | int cpu; |
@@ -353,7 +296,7 @@ static int allocate_sdbt(int cpu) | |||
353 | } | 296 | } |
354 | *sdbt = sdb; | 297 | *sdbt = sdb; |
355 | trailer = trailer_entry_ptr(*sdbt); | 298 | trailer = trailer_entry_ptr(*sdbt); |
356 | *trailer = ALERT_REQ_MASK; | 299 | *trailer = SDB_TE_ALERT_REQ_MASK; |
357 | sdbt++; | 300 | sdbt++; |
358 | mutex_unlock(&hws_sem_oom); | 301 | mutex_unlock(&hws_sem_oom); |
359 | } | 302 | } |
@@ -829,7 +772,7 @@ static void worker_on_interrupt(unsigned int cpu) | |||
829 | 772 | ||
830 | trailer = trailer_entry_ptr(*sdbt); | 773 | trailer = trailer_entry_ptr(*sdbt); |
831 | /* leave loop if no more work to do */ | 774 | /* leave loop if no more work to do */ |
832 | if (!(*trailer & BUFFER_FULL_MASK)) { | 775 | if (!(*trailer & SDB_TE_BUFFER_FULL_MASK)) { |
833 | done = 1; | 776 | done = 1; |
834 | if (!hws_flush_all) | 777 | if (!hws_flush_all) |
835 | continue; | 778 | continue; |
@@ -856,7 +799,7 @@ static void worker_on_interrupt(unsigned int cpu) | |||
856 | static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, | 799 | static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, |
857 | unsigned long *dear) | 800 | unsigned long *dear) |
858 | { | 801 | { |
859 | struct hws_data_entry *sample_data_ptr; | 802 | struct hws_basic_entry *sample_data_ptr; |
860 | unsigned long *trailer; | 803 | unsigned long *trailer; |
861 | 804 | ||
862 | trailer = trailer_entry_ptr(*sdbt); | 805 | trailer = trailer_entry_ptr(*sdbt); |
@@ -866,7 +809,7 @@ static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, | |||
866 | trailer = dear; | 809 | trailer = dear; |
867 | } | 810 | } |
868 | 811 | ||
869 | sample_data_ptr = (struct hws_data_entry *)(*sdbt); | 812 | sample_data_ptr = (struct hws_basic_entry *)(*sdbt); |
870 | 813 | ||
871 | while ((unsigned long *)sample_data_ptr < trailer) { | 814 | while ((unsigned long *)sample_data_ptr < trailer) { |
872 | struct pt_regs *regs = NULL; | 815 | struct pt_regs *regs = NULL; |
@@ -1002,6 +945,7 @@ int hwsampler_deallocate(void) | |||
1002 | goto deallocate_exit; | 945 | goto deallocate_exit; |
1003 | 946 | ||
1004 | irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); | 947 | irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); |
948 | hws_alert = 0; | ||
1005 | deallocate_sdbt(); | 949 | deallocate_sdbt(); |
1006 | 950 | ||
1007 | hws_state = HWS_DEALLOCATED; | 951 | hws_state = HWS_DEALLOCATED; |
@@ -1116,6 +1060,7 @@ int hwsampler_shutdown(void) | |||
1116 | 1060 | ||
1117 | if (hws_state == HWS_STOPPED) { | 1061 | if (hws_state == HWS_STOPPED) { |
1118 | irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); | 1062 | irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); |
1063 | hws_alert = 0; | ||
1119 | deallocate_sdbt(); | 1064 | deallocate_sdbt(); |
1120 | } | 1065 | } |
1121 | if (hws_wq) { | 1066 | if (hws_wq) { |
@@ -1190,6 +1135,7 @@ start_all_exit: | |||
1190 | hws_oom = 1; | 1135 | hws_oom = 1; |
1191 | hws_flush_all = 0; | 1136 | hws_flush_all = 0; |
1192 | /* now let them in, 1407 CPUMF external interrupts */ | 1137 | /* now let them in, 1407 CPUMF external interrupts */ |
1138 | hws_alert = 1; | ||
1193 | irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); | 1139 | irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); |
1194 | 1140 | ||
1195 | return 0; | 1141 | return 0; |
diff --git a/arch/s390/oprofile/hwsampler.h b/arch/s390/oprofile/hwsampler.h index 0022e1ebfbde..a483d06f2fa7 100644 --- a/arch/s390/oprofile/hwsampler.h +++ b/arch/s390/oprofile/hwsampler.h | |||
@@ -9,27 +9,7 @@ | |||
9 | #define HWSAMPLER_H_ | 9 | #define HWSAMPLER_H_ |
10 | 10 | ||
11 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
12 | 12 | #include <asm/cpu_mf.h> | |
13 | struct hws_qsi_info_block /* QUERY SAMPLING information block */ | ||
14 | { /* Bit(s) */ | ||
15 | unsigned int b0_13:14; /* 0-13: zeros */ | ||
16 | unsigned int as:1; /* 14: sampling authorisation control*/ | ||
17 | unsigned int b15_21:7; /* 15-21: zeros */ | ||
18 | unsigned int es:1; /* 22: sampling enable control */ | ||
19 | unsigned int b23_29:7; /* 23-29: zeros */ | ||
20 | unsigned int cs:1; /* 30: sampling activation control */ | ||
21 | unsigned int:1; /* 31: reserved */ | ||
22 | unsigned int bsdes:16; /* 4-5: size of sampling entry */ | ||
23 | unsigned int:16; /* 6-7: reserved */ | ||
24 | unsigned long min_sampl_rate; /* 8-15: minimum sampling interval */ | ||
25 | unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/ | ||
26 | unsigned long tear; /* 24-31: TEAR contents */ | ||
27 | unsigned long dear; /* 32-39: DEAR contents */ | ||
28 | unsigned int rsvrd0; /* 40-43: reserved */ | ||
29 | unsigned int cpu_speed; /* 44-47: CPU speed */ | ||
30 | unsigned long long rsvrd1; /* 48-55: reserved */ | ||
31 | unsigned long long rsvrd2; /* 56-63: reserved */ | ||
32 | }; | ||
33 | 13 | ||
34 | struct hws_ssctl_request_block /* SET SAMPLING CONTROLS req block */ | 14 | struct hws_ssctl_request_block /* SET SAMPLING CONTROLS req block */ |
35 | { /* bytes 0 - 7 Bit(s) */ | 15 | { /* bytes 0 - 7 Bit(s) */ |
@@ -68,36 +48,6 @@ struct hws_cpu_buffer { | |||
68 | unsigned int stop_mode:1; | 48 | unsigned int stop_mode:1; |
69 | }; | 49 | }; |
70 | 50 | ||
71 | struct hws_data_entry { | ||
72 | unsigned int def:16; /* 0-15 Data Entry Format */ | ||
73 | unsigned int R:4; /* 16-19 reserved */ | ||
74 | unsigned int U:4; /* 20-23 Number of unique instruct. */ | ||
75 | unsigned int z:2; /* zeros */ | ||
76 | unsigned int T:1; /* 26 PSW DAT mode */ | ||
77 | unsigned int W:1; /* 27 PSW wait state */ | ||
78 | unsigned int P:1; /* 28 PSW Problem state */ | ||
79 | unsigned int AS:2; /* 29-30 PSW address-space control */ | ||
80 | unsigned int I:1; /* 31 entry valid or invalid */ | ||
81 | unsigned int:16; | ||
82 | unsigned int prim_asn:16; /* primary ASN */ | ||
83 | unsigned long long ia; /* Instruction Address */ | ||
84 | unsigned long long gpp; /* Guest Program Parameter */ | ||
85 | unsigned long long hpp; /* Host Program Parameter */ | ||
86 | }; | ||
87 | |||
88 | struct hws_trailer_entry { | ||
89 | unsigned int f:1; /* 0 - Block Full Indicator */ | ||
90 | unsigned int a:1; /* 1 - Alert request control */ | ||
91 | unsigned long:62; /* 2 - 63: Reserved */ | ||
92 | unsigned long overflow; /* 64 - sample Overflow count */ | ||
93 | unsigned long timestamp; /* 16 - time-stamp */ | ||
94 | unsigned long timestamp1; /* */ | ||
95 | unsigned long reserved1; /* 32 -Reserved */ | ||
96 | unsigned long reserved2; /* */ | ||
97 | unsigned long progusage1; /* 48 - reserved for programming use */ | ||
98 | unsigned long progusage2; /* */ | ||
99 | }; | ||
100 | |||
101 | int hwsampler_setup(void); | 51 | int hwsampler_setup(void); |
102 | int hwsampler_shutdown(void); | 52 | int hwsampler_shutdown(void); |
103 | int hwsampler_allocate(unsigned long sdbt, unsigned long sdb); | 53 | int hwsampler_allocate(unsigned long sdbt, unsigned long sdb); |
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index 04e1b6a85362..9ffe645d5989 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/oprofile.h> | 12 | #include <linux/oprofile.h> |
13 | #include <linux/perf_event.h> | ||
13 | #include <linux/init.h> | 14 | #include <linux/init.h> |
14 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
15 | #include <linux/fs.h> | 16 | #include <linux/fs.h> |
@@ -67,6 +68,21 @@ module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0); | |||
67 | MODULE_PARM_DESC(cpu_type, "Force legacy basic mode sampling" | 68 | MODULE_PARM_DESC(cpu_type, "Force legacy basic mode sampling" |
68 | "(report cpu_type \"timer\""); | 69 | "(report cpu_type \"timer\""); |
69 | 70 | ||
71 | static int __oprofile_hwsampler_start(void) | ||
72 | { | ||
73 | int retval; | ||
74 | |||
75 | retval = hwsampler_allocate(oprofile_sdbt_blocks, oprofile_sdb_blocks); | ||
76 | if (retval) | ||
77 | return retval; | ||
78 | |||
79 | retval = hwsampler_start_all(oprofile_hw_interval); | ||
80 | if (retval) | ||
81 | hwsampler_deallocate(); | ||
82 | |||
83 | return retval; | ||
84 | } | ||
85 | |||
70 | static int oprofile_hwsampler_start(void) | 86 | static int oprofile_hwsampler_start(void) |
71 | { | 87 | { |
72 | int retval; | 88 | int retval; |
@@ -76,13 +92,13 @@ static int oprofile_hwsampler_start(void) | |||
76 | if (!hwsampler_running) | 92 | if (!hwsampler_running) |
77 | return timer_ops.start(); | 93 | return timer_ops.start(); |
78 | 94 | ||
79 | retval = hwsampler_allocate(oprofile_sdbt_blocks, oprofile_sdb_blocks); | 95 | retval = perf_reserve_sampling(); |
80 | if (retval) | 96 | if (retval) |
81 | return retval; | 97 | return retval; |
82 | 98 | ||
83 | retval = hwsampler_start_all(oprofile_hw_interval); | 99 | retval = __oprofile_hwsampler_start(); |
84 | if (retval) | 100 | if (retval) |
85 | hwsampler_deallocate(); | 101 | perf_release_sampling(); |
86 | 102 | ||
87 | return retval; | 103 | return retval; |
88 | } | 104 | } |
@@ -96,6 +112,7 @@ static void oprofile_hwsampler_stop(void) | |||
96 | 112 | ||
97 | hwsampler_stop_all(); | 113 | hwsampler_stop_all(); |
98 | hwsampler_deallocate(); | 114 | hwsampler_deallocate(); |
115 | perf_release_sampling(); | ||
99 | return; | 116 | return; |
100 | } | 117 | } |
101 | 118 | ||
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index bf7c73d71eef..0820362c7b0f 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c | |||
@@ -919,17 +919,23 @@ static void zpci_mem_exit(void) | |||
919 | kmem_cache_destroy(zdev_fmb_cache); | 919 | kmem_cache_destroy(zdev_fmb_cache); |
920 | } | 920 | } |
921 | 921 | ||
922 | static unsigned int s390_pci_probe; | 922 | static unsigned int s390_pci_probe = 1; |
923 | static unsigned int s390_pci_initialized; | ||
923 | 924 | ||
924 | char * __init pcibios_setup(char *str) | 925 | char * __init pcibios_setup(char *str) |
925 | { | 926 | { |
926 | if (!strcmp(str, "on")) { | 927 | if (!strcmp(str, "off")) { |
927 | s390_pci_probe = 1; | 928 | s390_pci_probe = 0; |
928 | return NULL; | 929 | return NULL; |
929 | } | 930 | } |
930 | return str; | 931 | return str; |
931 | } | 932 | } |
932 | 933 | ||
934 | bool zpci_is_enabled(void) | ||
935 | { | ||
936 | return s390_pci_initialized; | ||
937 | } | ||
938 | |||
933 | static int __init pci_base_init(void) | 939 | static int __init pci_base_init(void) |
934 | { | 940 | { |
935 | int rc; | 941 | int rc; |
@@ -961,6 +967,7 @@ static int __init pci_base_init(void) | |||
961 | if (rc) | 967 | if (rc) |
962 | goto out_find; | 968 | goto out_find; |
963 | 969 | ||
970 | s390_pci_initialized = 1; | ||
964 | return 0; | 971 | return 0; |
965 | 972 | ||
966 | out_find: | 973 | out_find: |
@@ -978,5 +985,6 @@ subsys_initcall_sync(pci_base_init); | |||
978 | 985 | ||
979 | void zpci_rescan(void) | 986 | void zpci_rescan(void) |
980 | { | 987 | { |
981 | clp_rescan_pci_devices_simple(); | 988 | if (zpci_is_enabled()) |
989 | clp_rescan_pci_devices_simple(); | ||
982 | } | 990 | } |
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 9b83d080902d..60c11a629d96 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c | |||
@@ -285,7 +285,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, | |||
285 | flags |= ZPCI_TABLE_PROTECTED; | 285 | flags |= ZPCI_TABLE_PROTECTED; |
286 | 286 | ||
287 | if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { | 287 | if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { |
288 | atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages); | 288 | atomic64_add(nr_pages, &zdev->fmb->mapped_pages); |
289 | return dma_addr + (offset & ~PAGE_MASK); | 289 | return dma_addr + (offset & ~PAGE_MASK); |
290 | } | 290 | } |
291 | 291 | ||
@@ -313,7 +313,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, | |||
313 | zpci_err_hex(&dma_addr, sizeof(dma_addr)); | 313 | zpci_err_hex(&dma_addr, sizeof(dma_addr)); |
314 | } | 314 | } |
315 | 315 | ||
316 | atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages); | 316 | atomic64_add(npages, &zdev->fmb->unmapped_pages); |
317 | iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT; | 317 | iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT; |
318 | dma_free_iommu(zdev, iommu_page_index, npages); | 318 | dma_free_iommu(zdev, iommu_page_index, npages); |
319 | } | 319 | } |
@@ -332,7 +332,6 @@ static void *s390_dma_alloc(struct device *dev, size_t size, | |||
332 | if (!page) | 332 | if (!page) |
333 | return NULL; | 333 | return NULL; |
334 | 334 | ||
335 | atomic64_add(size / PAGE_SIZE, (atomic64_t *) &zdev->fmb->allocated_pages); | ||
336 | pa = page_to_phys(page); | 335 | pa = page_to_phys(page); |
337 | memset((void *) pa, 0, size); | 336 | memset((void *) pa, 0, size); |
338 | 337 | ||
@@ -343,6 +342,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size, | |||
343 | return NULL; | 342 | return NULL; |
344 | } | 343 | } |
345 | 344 | ||
345 | atomic64_add(size / PAGE_SIZE, &zdev->fmb->allocated_pages); | ||
346 | if (dma_handle) | 346 | if (dma_handle) |
347 | *dma_handle = map; | 347 | *dma_handle = map; |
348 | return (void *) pa; | 348 | return (void *) pa; |
@@ -352,8 +352,11 @@ static void s390_dma_free(struct device *dev, size_t size, | |||
352 | void *pa, dma_addr_t dma_handle, | 352 | void *pa, dma_addr_t dma_handle, |
353 | struct dma_attrs *attrs) | 353 | struct dma_attrs *attrs) |
354 | { | 354 | { |
355 | s390_dma_unmap_pages(dev, dma_handle, PAGE_ALIGN(size), | 355 | struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); |
356 | DMA_BIDIRECTIONAL, NULL); | 356 | |
357 | size = PAGE_ALIGN(size); | ||
358 | atomic64_sub(size / PAGE_SIZE, &zdev->fmb->allocated_pages); | ||
359 | s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); | ||
357 | free_pages((unsigned long) pa, get_order(size)); | 360 | free_pages((unsigned long) pa, get_order(size)); |
358 | } | 361 | } |
359 | 362 | ||
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index 069607209a30..01e251b1da0c 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c | |||
@@ -43,9 +43,8 @@ struct zpci_ccdf_avail { | |||
43 | u16 pec; /* PCI event code */ | 43 | u16 pec; /* PCI event code */ |
44 | } __packed; | 44 | } __packed; |
45 | 45 | ||
46 | void zpci_event_error(void *data) | 46 | static void __zpci_event_error(struct zpci_ccdf_err *ccdf) |
47 | { | 47 | { |
48 | struct zpci_ccdf_err *ccdf = data; | ||
49 | struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); | 48 | struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); |
50 | 49 | ||
51 | zpci_err("error CCDF:\n"); | 50 | zpci_err("error CCDF:\n"); |
@@ -58,9 +57,14 @@ void zpci_event_error(void *data) | |||
58 | pci_name(zdev->pdev), ccdf->pec, ccdf->fid); | 57 | pci_name(zdev->pdev), ccdf->pec, ccdf->fid); |
59 | } | 58 | } |
60 | 59 | ||
61 | void zpci_event_availability(void *data) | 60 | void zpci_event_error(void *data) |
61 | { | ||
62 | if (zpci_is_enabled()) | ||
63 | __zpci_event_error(data); | ||
64 | } | ||
65 | |||
66 | static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) | ||
62 | { | 67 | { |
63 | struct zpci_ccdf_avail *ccdf = data; | ||
64 | struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); | 68 | struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); |
65 | struct pci_dev *pdev = zdev ? zdev->pdev : NULL; | 69 | struct pci_dev *pdev = zdev ? zdev->pdev : NULL; |
66 | int ret; | 70 | int ret; |
@@ -99,8 +103,12 @@ void zpci_event_availability(void *data) | |||
99 | 103 | ||
100 | break; | 104 | break; |
101 | case 0x0304: /* Configured -> Standby */ | 105 | case 0x0304: /* Configured -> Standby */ |
102 | if (pdev) | 106 | if (pdev) { |
107 | /* Give the driver a hint that the function is | ||
108 | * already unusable. */ | ||
109 | pdev->error_state = pci_channel_io_perm_failure; | ||
103 | pci_stop_and_remove_bus_device(pdev); | 110 | pci_stop_and_remove_bus_device(pdev); |
111 | } | ||
104 | 112 | ||
105 | zdev->fh = ccdf->fh; | 113 | zdev->fh = ccdf->fh; |
106 | zpci_disable_device(zdev); | 114 | zpci_disable_device(zdev); |
@@ -110,6 +118,8 @@ void zpci_event_availability(void *data) | |||
110 | clp_rescan_pci_devices(); | 118 | clp_rescan_pci_devices(); |
111 | break; | 119 | break; |
112 | case 0x0308: /* Standby -> Reserved */ | 120 | case 0x0308: /* Standby -> Reserved */ |
121 | if (!zdev) | ||
122 | break; | ||
113 | pci_stop_root_bus(zdev->bus); | 123 | pci_stop_root_bus(zdev->bus); |
114 | pci_remove_root_bus(zdev->bus); | 124 | pci_remove_root_bus(zdev->bus); |
115 | break; | 125 | break; |
@@ -117,3 +127,9 @@ void zpci_event_availability(void *data) | |||
117 | break; | 127 | break; |
118 | } | 128 | } |
119 | } | 129 | } |
130 | |||
131 | void zpci_event_availability(void *data) | ||
132 | { | ||
133 | if (zpci_is_enabled()) | ||
134 | __zpci_event_availability(data); | ||
135 | } | ||
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild index f3414ade77a3..fe7471eb0167 100644 --- a/arch/score/include/asm/Kbuild +++ b/arch/score/include/asm/Kbuild | |||
@@ -1,6 +1,7 @@ | |||
1 | 1 | ||
2 | header-y += | 2 | header-y += |
3 | 3 | ||
4 | generic-y += barrier.h | ||
4 | generic-y += clkdev.h | 5 | generic-y += clkdev.h |
5 | generic-y += trace_clock.h | 6 | generic-y += trace_clock.h |
6 | generic-y += xor.h | 7 | generic-y += xor.h |
diff --git a/arch/score/include/asm/barrier.h b/arch/score/include/asm/barrier.h deleted file mode 100644 index 0eacb6471e6d..000000000000 --- a/arch/score/include/asm/barrier.h +++ /dev/null | |||
@@ -1,16 +0,0 @@ | |||
1 | #ifndef _ASM_SCORE_BARRIER_H | ||
2 | #define _ASM_SCORE_BARRIER_H | ||
3 | |||
4 | #define mb() barrier() | ||
5 | #define rmb() barrier() | ||
6 | #define wmb() barrier() | ||
7 | #define smp_mb() barrier() | ||
8 | #define smp_rmb() barrier() | ||
9 | #define smp_wmb() barrier() | ||
10 | |||
11 | #define read_barrier_depends() do {} while (0) | ||
12 | #define smp_read_barrier_depends() do {} while (0) | ||
13 | |||
14 | #define set_mb(var, value) do {var = value; wmb(); } while (0) | ||
15 | |||
16 | #endif /* _ASM_SCORE_BARRIER_H */ | ||
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 9b0979f4df7a..ce298317a73e 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -66,6 +66,7 @@ config SUPERH32 | |||
66 | select PERF_EVENTS | 66 | select PERF_EVENTS |
67 | select ARCH_HIBERNATION_POSSIBLE if MMU | 67 | select ARCH_HIBERNATION_POSSIBLE if MMU |
68 | select SPARSE_IRQ | 68 | select SPARSE_IRQ |
69 | select HAVE_CC_STACKPROTECTOR | ||
69 | 70 | ||
70 | config SUPERH64 | 71 | config SUPERH64 |
71 | def_bool ARCH = "sh64" | 72 | def_bool ARCH = "sh64" |
@@ -695,20 +696,6 @@ config SECCOMP | |||
695 | 696 | ||
696 | If unsure, say N. | 697 | If unsure, say N. |
697 | 698 | ||
698 | config CC_STACKPROTECTOR | ||
699 | bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" | ||
700 | depends on SUPERH32 | ||
701 | help | ||
702 | This option turns on the -fstack-protector GCC feature. This | ||
703 | feature puts, at the beginning of functions, a canary value on | ||
704 | the stack just before the return address, and validates | ||
705 | the value just before actually returning. Stack based buffer | ||
706 | overflows (that need to overwrite this return address) now also | ||
707 | overwrite the canary, which gets detected and the attack is then | ||
708 | neutralized via a kernel panic. | ||
709 | |||
710 | This feature requires gcc version 4.2 or above. | ||
711 | |||
712 | config SMP | 699 | config SMP |
713 | bool "Symmetric multi-processing support" | 700 | bool "Symmetric multi-processing support" |
714 | depends on SYS_SUPPORTS_SMP | 701 | depends on SYS_SUPPORTS_SMP |
diff --git a/arch/sh/Makefile b/arch/sh/Makefile index aed701c7b11b..d4d16e4be07c 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile | |||
@@ -199,10 +199,6 @@ ifeq ($(CONFIG_DWARF_UNWINDER),y) | |||
199 | KBUILD_CFLAGS += -fasynchronous-unwind-tables | 199 | KBUILD_CFLAGS += -fasynchronous-unwind-tables |
200 | endif | 200 | endif |
201 | 201 | ||
202 | ifeq ($(CONFIG_CC_STACKPROTECTOR),y) | ||
203 | KBUILD_CFLAGS += -fstack-protector | ||
204 | endif | ||
205 | |||
206 | libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y) | 202 | libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y) |
207 | libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y) | 203 | libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y) |
208 | 204 | ||
diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h index 72c103dae300..43715308b068 100644 --- a/arch/sh/include/asm/barrier.h +++ b/arch/sh/include/asm/barrier.h | |||
@@ -26,29 +26,14 @@ | |||
26 | #if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) | 26 | #if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) |
27 | #define mb() __asm__ __volatile__ ("synco": : :"memory") | 27 | #define mb() __asm__ __volatile__ ("synco": : :"memory") |
28 | #define rmb() mb() | 28 | #define rmb() mb() |
29 | #define wmb() __asm__ __volatile__ ("synco": : :"memory") | 29 | #define wmb() mb() |
30 | #define ctrl_barrier() __icbi(PAGE_OFFSET) | 30 | #define ctrl_barrier() __icbi(PAGE_OFFSET) |
31 | #define read_barrier_depends() do { } while(0) | ||
32 | #else | 31 | #else |
33 | #define mb() __asm__ __volatile__ ("": : :"memory") | ||
34 | #define rmb() mb() | ||
35 | #define wmb() __asm__ __volatile__ ("": : :"memory") | ||
36 | #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") | 32 | #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") |
37 | #define read_barrier_depends() do { } while(0) | ||
38 | #endif | ||
39 | |||
40 | #ifdef CONFIG_SMP | ||
41 | #define smp_mb() mb() | ||
42 | #define smp_rmb() rmb() | ||
43 | #define smp_wmb() wmb() | ||
44 | #define smp_read_barrier_depends() read_barrier_depends() | ||
45 | #else | ||
46 | #define smp_mb() barrier() | ||
47 | #define smp_rmb() barrier() | ||
48 | #define smp_wmb() barrier() | ||
49 | #define smp_read_barrier_depends() do { } while(0) | ||
50 | #endif | 33 | #endif |
51 | 34 | ||
52 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | 35 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) |
53 | 36 | ||
37 | #include <asm-generic/barrier.h> | ||
38 | |||
54 | #endif /* __ASM_SH_BARRIER_H */ | 39 | #endif /* __ASM_SH_BARRIER_H */ |
diff --git a/arch/sparc/include/asm/barrier_32.h b/arch/sparc/include/asm/barrier_32.h index c1b76654ee76..ae69eda288f4 100644 --- a/arch/sparc/include/asm/barrier_32.h +++ b/arch/sparc/include/asm/barrier_32.h | |||
@@ -1,15 +1,7 @@ | |||
1 | #ifndef __SPARC_BARRIER_H | 1 | #ifndef __SPARC_BARRIER_H |
2 | #define __SPARC_BARRIER_H | 2 | #define __SPARC_BARRIER_H |
3 | 3 | ||
4 | /* XXX Change this if we ever use a PSO mode kernel. */ | 4 | #include <asm/processor.h> /* for nop() */ |
5 | #define mb() __asm__ __volatile__ ("" : : : "memory") | 5 | #include <asm-generic/barrier.h> |
6 | #define rmb() mb() | ||
7 | #define wmb() mb() | ||
8 | #define read_barrier_depends() do { } while(0) | ||
9 | #define set_mb(__var, __value) do { __var = __value; mb(); } while(0) | ||
10 | #define smp_mb() __asm__ __volatile__("":::"memory") | ||
11 | #define smp_rmb() __asm__ __volatile__("":::"memory") | ||
12 | #define smp_wmb() __asm__ __volatile__("":::"memory") | ||
13 | #define smp_read_barrier_depends() do { } while(0) | ||
14 | 6 | ||
15 | #endif /* !(__SPARC_BARRIER_H) */ | 7 | #endif /* !(__SPARC_BARRIER_H) */ |
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h index 95d45986f908..b5aad964558e 100644 --- a/arch/sparc/include/asm/barrier_64.h +++ b/arch/sparc/include/asm/barrier_64.h | |||
@@ -53,4 +53,19 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ | |||
53 | 53 | ||
54 | #define smp_read_barrier_depends() do { } while(0) | 54 | #define smp_read_barrier_depends() do { } while(0) |
55 | 55 | ||
56 | #define smp_store_release(p, v) \ | ||
57 | do { \ | ||
58 | compiletime_assert_atomic_type(*p); \ | ||
59 | barrier(); \ | ||
60 | ACCESS_ONCE(*p) = (v); \ | ||
61 | } while (0) | ||
62 | |||
63 | #define smp_load_acquire(p) \ | ||
64 | ({ \ | ||
65 | typeof(*p) ___p1 = ACCESS_ONCE(*p); \ | ||
66 | compiletime_assert_atomic_type(*p); \ | ||
67 | barrier(); \ | ||
68 | ___p1; \ | ||
69 | }) | ||
70 | |||
56 | #endif /* !(__SPARC64_BARRIER_H) */ | 71 | #endif /* !(__SPARC64_BARRIER_H) */ |
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c index 218b6b23c378..01fe9946d388 100644 --- a/arch/sparc/net/bpf_jit_comp.c +++ b/arch/sparc/net/bpf_jit_comp.c | |||
@@ -497,9 +497,20 @@ void bpf_jit_compile(struct sk_filter *fp) | |||
497 | case BPF_S_ALU_MUL_K: /* A *= K */ | 497 | case BPF_S_ALU_MUL_K: /* A *= K */ |
498 | emit_alu_K(MUL, K); | 498 | emit_alu_K(MUL, K); |
499 | break; | 499 | break; |
500 | case BPF_S_ALU_DIV_K: /* A /= K */ | 500 | case BPF_S_ALU_DIV_K: /* A /= K with K != 0*/ |
501 | emit_alu_K(MUL, K); | 501 | if (K == 1) |
502 | emit_read_y(r_A); | 502 | break; |
503 | emit_write_y(G0); | ||
504 | #ifdef CONFIG_SPARC32 | ||
505 | /* The Sparc v8 architecture requires | ||
506 | * three instructions between a %y | ||
507 | * register write and the first use. | ||
508 | */ | ||
509 | emit_nop(); | ||
510 | emit_nop(); | ||
511 | emit_nop(); | ||
512 | #endif | ||
513 | emit_alu_K(DIV, K); | ||
503 | break; | 514 | break; |
504 | case BPF_S_ALU_DIV_X: /* A /= X; */ | 515 | case BPF_S_ALU_DIV_X: /* A /= X; */ |
505 | emit_cmpi(r_X, 0); | 516 | emit_cmpi(r_X, 0); |
diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h index a9a73da5865d..b5a05d050a8f 100644 --- a/arch/tile/include/asm/barrier.h +++ b/arch/tile/include/asm/barrier.h | |||
@@ -22,59 +22,6 @@ | |||
22 | #include <arch/spr_def.h> | 22 | #include <arch/spr_def.h> |
23 | #include <asm/timex.h> | 23 | #include <asm/timex.h> |
24 | 24 | ||
25 | /* | ||
26 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
27 | * depend on. | ||
28 | * | ||
29 | * No data-dependent reads from memory-like regions are ever reordered | ||
30 | * over this barrier. All reads preceding this primitive are guaranteed | ||
31 | * to access memory (but not necessarily other CPUs' caches) before any | ||
32 | * reads following this primitive that depend on the data return by | ||
33 | * any of the preceding reads. This primitive is much lighter weight than | ||
34 | * rmb() on most CPUs, and is never heavier weight than is | ||
35 | * rmb(). | ||
36 | * | ||
37 | * These ordering constraints are respected by both the local CPU | ||
38 | * and the compiler. | ||
39 | * | ||
40 | * Ordering is not guaranteed by anything other than these primitives, | ||
41 | * not even by data dependencies. See the documentation for | ||
42 | * memory_barrier() for examples and URLs to more information. | ||
43 | * | ||
44 | * For example, the following code would force ordering (the initial | ||
45 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
46 | * | ||
47 | * <programlisting> | ||
48 | * CPU 0 CPU 1 | ||
49 | * | ||
50 | * b = 2; | ||
51 | * memory_barrier(); | ||
52 | * p = &b; q = p; | ||
53 | * read_barrier_depends(); | ||
54 | * d = *q; | ||
55 | * </programlisting> | ||
56 | * | ||
57 | * because the read of "*q" depends on the read of "p" and these | ||
58 | * two reads are separated by a read_barrier_depends(). However, | ||
59 | * the following code, with the same initial values for "a" and "b": | ||
60 | * | ||
61 | * <programlisting> | ||
62 | * CPU 0 CPU 1 | ||
63 | * | ||
64 | * a = 2; | ||
65 | * memory_barrier(); | ||
66 | * b = 3; y = b; | ||
67 | * read_barrier_depends(); | ||
68 | * x = a; | ||
69 | * </programlisting> | ||
70 | * | ||
71 | * does not enforce ordering, since there is no data dependency between | ||
72 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
73 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
74 | * in cases like this where there are no data dependencies. | ||
75 | */ | ||
76 | #define read_barrier_depends() do { } while (0) | ||
77 | |||
78 | #define __sync() __insn_mf() | 25 | #define __sync() __insn_mf() |
79 | 26 | ||
80 | #include <hv/syscall_public.h> | 27 | #include <hv/syscall_public.h> |
@@ -125,20 +72,7 @@ mb_incoherent(void) | |||
125 | #define mb() fast_mb() | 72 | #define mb() fast_mb() |
126 | #define iob() fast_iob() | 73 | #define iob() fast_iob() |
127 | 74 | ||
128 | #ifdef CONFIG_SMP | 75 | #include <asm-generic/barrier.h> |
129 | #define smp_mb() mb() | ||
130 | #define smp_rmb() rmb() | ||
131 | #define smp_wmb() wmb() | ||
132 | #define smp_read_barrier_depends() read_barrier_depends() | ||
133 | #else | ||
134 | #define smp_mb() barrier() | ||
135 | #define smp_rmb() barrier() | ||
136 | #define smp_wmb() barrier() | ||
137 | #define smp_read_barrier_depends() do { } while (0) | ||
138 | #endif | ||
139 | |||
140 | #define set_mb(var, value) \ | ||
141 | do { var = value; mb(); } while (0) | ||
142 | 76 | ||
143 | #endif /* !__ASSEMBLY__ */ | 77 | #endif /* !__ASSEMBLY__ */ |
144 | #endif /* _ASM_TILE_BARRIER_H */ | 78 | #endif /* _ASM_TILE_BARRIER_H */ |
diff --git a/arch/unicore32/include/asm/barrier.h b/arch/unicore32/include/asm/barrier.h index a6620e5336b6..83d6a520f4bd 100644 --- a/arch/unicore32/include/asm/barrier.h +++ b/arch/unicore32/include/asm/barrier.h | |||
@@ -14,15 +14,6 @@ | |||
14 | #define dsb() __asm__ __volatile__ ("" : : : "memory") | 14 | #define dsb() __asm__ __volatile__ ("" : : : "memory") |
15 | #define dmb() __asm__ __volatile__ ("" : : : "memory") | 15 | #define dmb() __asm__ __volatile__ ("" : : : "memory") |
16 | 16 | ||
17 | #define mb() barrier() | 17 | #include <asm-generic/barrier.h> |
18 | #define rmb() barrier() | ||
19 | #define wmb() barrier() | ||
20 | #define smp_mb() barrier() | ||
21 | #define smp_rmb() barrier() | ||
22 | #define smp_wmb() barrier() | ||
23 | #define read_barrier_depends() do { } while (0) | ||
24 | #define smp_read_barrier_depends() do { } while (0) | ||
25 | |||
26 | #define set_mb(var, value) do { var = value; smp_mb(); } while (0) | ||
27 | 18 | ||
28 | #endif /* __UNICORE_BARRIER_H__ */ | 19 | #endif /* __UNICORE_BARRIER_H__ */ |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 64199bc08d66..cb9af474dfca 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -125,6 +125,7 @@ config X86 | |||
125 | select RTC_LIB | 125 | select RTC_LIB |
126 | select HAVE_DEBUG_STACKOVERFLOW | 126 | select HAVE_DEBUG_STACKOVERFLOW |
127 | select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64 | 127 | select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64 |
128 | select HAVE_CC_STACKPROTECTOR | ||
128 | 129 | ||
129 | config INSTRUCTION_DECODER | 130 | config INSTRUCTION_DECODER |
130 | def_bool y | 131 | def_bool y |
@@ -438,42 +439,26 @@ config X86_INTEL_CE | |||
438 | This option compiles in support for the CE4100 SOC for settop | 439 | This option compiles in support for the CE4100 SOC for settop |
439 | boxes and media devices. | 440 | boxes and media devices. |
440 | 441 | ||
441 | config X86_WANT_INTEL_MID | 442 | config X86_INTEL_MID |
442 | bool "Intel MID platform support" | 443 | bool "Intel MID platform support" |
443 | depends on X86_32 | 444 | depends on X86_32 |
444 | depends on X86_EXTENDED_PLATFORM | 445 | depends on X86_EXTENDED_PLATFORM |
445 | ---help--- | ||
446 | Select to build a kernel capable of supporting Intel MID platform | ||
447 | systems which do not have the PCI legacy interfaces (Moorestown, | ||
448 | Medfield). If you are building for a PC class system say N here. | ||
449 | |||
450 | if X86_WANT_INTEL_MID | ||
451 | |||
452 | config X86_INTEL_MID | ||
453 | bool | ||
454 | |||
455 | config X86_MDFLD | ||
456 | bool "Medfield MID platform" | ||
457 | depends on PCI | 446 | depends on PCI |
458 | depends on PCI_GOANY | 447 | depends on PCI_GOANY |
459 | depends on X86_IO_APIC | 448 | depends on X86_IO_APIC |
460 | select X86_INTEL_MID | ||
461 | select SFI | 449 | select SFI |
450 | select I2C | ||
462 | select DW_APB_TIMER | 451 | select DW_APB_TIMER |
463 | select APB_TIMER | 452 | select APB_TIMER |
464 | select I2C | ||
465 | select SPI | ||
466 | select INTEL_SCU_IPC | 453 | select INTEL_SCU_IPC |
467 | select X86_PLATFORM_DEVICES | ||
468 | select MFD_INTEL_MSIC | 454 | select MFD_INTEL_MSIC |
469 | ---help--- | 455 | ---help--- |
470 | Medfield is Intel's Low Power Intel Architecture (LPIA) based Moblin | 456 | Select to build a kernel capable of supporting Intel MID (Mobile |
471 | Internet Device(MID) platform. | 457 | Internet Device) platform systems which do not have the PCI legacy |
472 | Unlike standard x86 PCs, Medfield does not have many legacy devices | 458 | interfaces. If you are building for a PC class system say N here. |
473 | nor standard legacy replacement devices/features. e.g. Medfield does | ||
474 | not contain i8259, i8254, HPET, legacy BIOS, most of the io ports. | ||
475 | 459 | ||
476 | endif | 460 | Intel MID platforms are based on an Intel processor and chipset which |
461 | consume less power than most of the x86 derivatives. | ||
477 | 462 | ||
478 | config X86_INTEL_LPSS | 463 | config X86_INTEL_LPSS |
479 | bool "Intel Low Power Subsystem Support" | 464 | bool "Intel Low Power Subsystem Support" |
@@ -1080,10 +1065,6 @@ config MICROCODE_OLD_INTERFACE | |||
1080 | def_bool y | 1065 | def_bool y |
1081 | depends on MICROCODE | 1066 | depends on MICROCODE |
1082 | 1067 | ||
1083 | config MICROCODE_INTEL_LIB | ||
1084 | def_bool y | ||
1085 | depends on MICROCODE_INTEL | ||
1086 | |||
1087 | config MICROCODE_INTEL_EARLY | 1068 | config MICROCODE_INTEL_EARLY |
1088 | def_bool n | 1069 | def_bool n |
1089 | 1070 | ||
@@ -1617,22 +1598,6 @@ config SECCOMP | |||
1617 | 1598 | ||
1618 | If unsure, say Y. Only embedded should say N here. | 1599 | If unsure, say Y. Only embedded should say N here. |
1619 | 1600 | ||
1620 | config CC_STACKPROTECTOR | ||
1621 | bool "Enable -fstack-protector buffer overflow detection" | ||
1622 | ---help--- | ||
1623 | This option turns on the -fstack-protector GCC feature. This | ||
1624 | feature puts, at the beginning of functions, a canary value on | ||
1625 | the stack just before the return address, and validates | ||
1626 | the value just before actually returning. Stack based buffer | ||
1627 | overflows (that need to overwrite this return address) now also | ||
1628 | overwrite the canary, which gets detected and the attack is then | ||
1629 | neutralized via a kernel panic. | ||
1630 | |||
1631 | This feature requires gcc version 4.2 or above, or a distribution | ||
1632 | gcc with the feature backported. Older versions are automatically | ||
1633 | detected and for those versions, this configuration option is | ||
1634 | ignored. (and a warning is printed during bootup) | ||
1635 | |||
1636 | source kernel/Kconfig.hz | 1601 | source kernel/Kconfig.hz |
1637 | 1602 | ||
1638 | config KEXEC | 1603 | config KEXEC |
@@ -1728,16 +1693,67 @@ config RELOCATABLE | |||
1728 | 1693 | ||
1729 | Note: If CONFIG_RELOCATABLE=y, then the kernel runs from the address | 1694 | Note: If CONFIG_RELOCATABLE=y, then the kernel runs from the address |
1730 | it has been loaded at and the compile time physical address | 1695 | it has been loaded at and the compile time physical address |
1731 | (CONFIG_PHYSICAL_START) is ignored. | 1696 | (CONFIG_PHYSICAL_START) is used as the minimum location. |
1732 | 1697 | ||
1733 | # Relocation on x86-32 needs some additional build support | 1698 | config RANDOMIZE_BASE |
1699 | bool "Randomize the address of the kernel image" | ||
1700 | depends on RELOCATABLE | ||
1701 | depends on !HIBERNATION | ||
1702 | default n | ||
1703 | ---help--- | ||
1704 | Randomizes the physical and virtual address at which the | ||
1705 | kernel image is decompressed, as a security feature that | ||
1706 | deters exploit attempts relying on knowledge of the location | ||
1707 | of kernel internals. | ||
1708 | |||
1709 | Entropy is generated using the RDRAND instruction if it is | ||
1710 | supported. If RDTSC is supported, it is used as well. If | ||
1711 | neither RDRAND nor RDTSC are supported, then randomness is | ||
1712 | read from the i8254 timer. | ||
1713 | |||
1714 | The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET, | ||
1715 | and aligned according to PHYSICAL_ALIGN. Since the kernel is | ||
1716 | built using 2GiB addressing, and PHYSICAL_ALGIN must be at a | ||
1717 | minimum of 2MiB, only 10 bits of entropy is theoretically | ||
1718 | possible. At best, due to page table layouts, 64-bit can use | ||
1719 | 9 bits of entropy and 32-bit uses 8 bits. | ||
1720 | |||
1721 | If unsure, say N. | ||
1722 | |||
1723 | config RANDOMIZE_BASE_MAX_OFFSET | ||
1724 | hex "Maximum kASLR offset allowed" if EXPERT | ||
1725 | depends on RANDOMIZE_BASE | ||
1726 | range 0x0 0x20000000 if X86_32 | ||
1727 | default "0x20000000" if X86_32 | ||
1728 | range 0x0 0x40000000 if X86_64 | ||
1729 | default "0x40000000" if X86_64 | ||
1730 | ---help--- | ||
1731 | The lesser of RANDOMIZE_BASE_MAX_OFFSET and available physical | ||
1732 | memory is used to determine the maximal offset in bytes that will | ||
1733 | be applied to the kernel when kernel Address Space Layout | ||
1734 | Randomization (kASLR) is active. This must be a multiple of | ||
1735 | PHYSICAL_ALIGN. | ||
1736 | |||
1737 | On 32-bit this is limited to 512MiB by page table layouts. The | ||
1738 | default is 512MiB. | ||
1739 | |||
1740 | On 64-bit this is limited by how the kernel fixmap page table is | ||
1741 | positioned, so this cannot be larger than 1GiB currently. Without | ||
1742 | RANDOMIZE_BASE, there is a 512MiB to 1.5GiB split between kernel | ||
1743 | and modules. When RANDOMIZE_BASE_MAX_OFFSET is above 512MiB, the | ||
1744 | modules area will shrink to compensate, up to the current maximum | ||
1745 | 1GiB to 1GiB split. The default is 1GiB. | ||
1746 | |||
1747 | If unsure, leave at the default value. | ||
1748 | |||
1749 | # Relocation on x86 needs some additional build support | ||
1734 | config X86_NEED_RELOCS | 1750 | config X86_NEED_RELOCS |
1735 | def_bool y | 1751 | def_bool y |
1736 | depends on X86_32 && RELOCATABLE | 1752 | depends on RANDOMIZE_BASE || (X86_32 && RELOCATABLE) |
1737 | 1753 | ||
1738 | config PHYSICAL_ALIGN | 1754 | config PHYSICAL_ALIGN |
1739 | hex "Alignment value to which kernel should be aligned" | 1755 | hex "Alignment value to which kernel should be aligned" |
1740 | default "0x1000000" | 1756 | default "0x200000" |
1741 | range 0x2000 0x1000000 if X86_32 | 1757 | range 0x2000 0x1000000 if X86_32 |
1742 | range 0x200000 0x1000000 if X86_64 | 1758 | range 0x200000 0x1000000 if X86_64 |
1743 | ---help--- | 1759 | ---help--- |
@@ -2393,6 +2409,14 @@ config X86_DMA_REMAP | |||
2393 | bool | 2409 | bool |
2394 | depends on STA2X11 | 2410 | depends on STA2X11 |
2395 | 2411 | ||
2412 | config IOSF_MBI | ||
2413 | bool | ||
2414 | depends on PCI | ||
2415 | ---help--- | ||
2416 | To be selected by modules requiring access to the Intel OnChip System | ||
2417 | Fabric (IOSF) Sideband MailBox Interface (MBI). For MBI platforms | ||
2418 | enumerable by PCI. | ||
2419 | |||
2396 | source "net/Kconfig" | 2420 | source "net/Kconfig" |
2397 | 2421 | ||
2398 | source "drivers/Kconfig" | 2422 | source "drivers/Kconfig" |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 57d021507120..13b22e0f681d 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
@@ -89,13 +89,11 @@ else | |||
89 | KBUILD_CFLAGS += -maccumulate-outgoing-args | 89 | KBUILD_CFLAGS += -maccumulate-outgoing-args |
90 | endif | 90 | endif |
91 | 91 | ||
92 | # Make sure compiler does not have buggy stack-protector support. | ||
92 | ifdef CONFIG_CC_STACKPROTECTOR | 93 | ifdef CONFIG_CC_STACKPROTECTOR |
93 | cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh | 94 | cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh |
94 | ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y) | 95 | ifneq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y) |
95 | stackp-y := -fstack-protector | 96 | $(warning stack-protector enabled but compiler support broken) |
96 | KBUILD_CFLAGS += $(stackp-y) | ||
97 | else | ||
98 | $(warning stack protector enabled but no compiler support) | ||
99 | endif | 97 | endif |
100 | endif | 98 | endif |
101 | 99 | ||
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index d9c11956fce0..de7066918005 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile | |||
@@ -20,7 +20,7 @@ targets := vmlinux.bin setup.bin setup.elf bzImage | |||
20 | targets += fdimage fdimage144 fdimage288 image.iso mtools.conf | 20 | targets += fdimage fdimage144 fdimage288 image.iso mtools.conf |
21 | subdir- := compressed | 21 | subdir- := compressed |
22 | 22 | ||
23 | setup-y += a20.o bioscall.o cmdline.o copy.o cpu.o cpucheck.o | 23 | setup-y += a20.o bioscall.o cmdline.o copy.o cpu.o cpuflags.o cpucheck.o |
24 | setup-y += early_serial_console.o edd.o header.o main.o mca.o memory.o | 24 | setup-y += early_serial_console.o edd.o header.o main.o mca.o memory.o |
25 | setup-y += pm.o pmjump.o printf.o regs.o string.o tty.o video.o | 25 | setup-y += pm.o pmjump.o printf.o regs.o string.o tty.o video.o |
26 | setup-y += video-mode.o version.o | 26 | setup-y += video-mode.o version.o |
diff --git a/arch/x86/boot/bioscall.S b/arch/x86/boot/bioscall.S index 1dfbf64e52a2..d401b4a262b0 100644 --- a/arch/x86/boot/bioscall.S +++ b/arch/x86/boot/bioscall.S | |||
@@ -1,6 +1,6 @@ | |||
1 | /* ----------------------------------------------------------------------- | 1 | /* ----------------------------------------------------------------------- |
2 | * | 2 | * |
3 | * Copyright 2009 Intel Corporation; author H. Peter Anvin | 3 | * Copyright 2009-2014 Intel Corporation; author H. Peter Anvin |
4 | * | 4 | * |
5 | * This file is part of the Linux kernel, and is made available under | 5 | * This file is part of the Linux kernel, and is made available under |
6 | * the terms of the GNU General Public License version 2 or (at your | 6 | * the terms of the GNU General Public License version 2 or (at your |
@@ -13,8 +13,8 @@ | |||
13 | * touching registers they shouldn't be. | 13 | * touching registers they shouldn't be. |
14 | */ | 14 | */ |
15 | 15 | ||
16 | .code16gcc | 16 | .code16 |
17 | .text | 17 | .section ".inittext","ax" |
18 | .globl intcall | 18 | .globl intcall |
19 | .type intcall, @function | 19 | .type intcall, @function |
20 | intcall: | 20 | intcall: |
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index ef72baeff484..50f8c5e0f37e 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h | |||
@@ -26,9 +26,8 @@ | |||
26 | #include <asm/boot.h> | 26 | #include <asm/boot.h> |
27 | #include <asm/setup.h> | 27 | #include <asm/setup.h> |
28 | #include "bitops.h" | 28 | #include "bitops.h" |
29 | #include <asm/cpufeature.h> | ||
30 | #include <asm/processor-flags.h> | ||
31 | #include "ctype.h" | 29 | #include "ctype.h" |
30 | #include "cpuflags.h" | ||
32 | 31 | ||
33 | /* Useful macros */ | 32 | /* Useful macros */ |
34 | #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) | 33 | #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) |
@@ -307,14 +306,7 @@ static inline int cmdline_find_option_bool(const char *option) | |||
307 | return __cmdline_find_option_bool(cmd_line_ptr, option); | 306 | return __cmdline_find_option_bool(cmd_line_ptr, option); |
308 | } | 307 | } |
309 | 308 | ||
310 | |||
311 | /* cpu.c, cpucheck.c */ | 309 | /* cpu.c, cpucheck.c */ |
312 | struct cpu_features { | ||
313 | int level; /* Family, or 64 for x86-64 */ | ||
314 | int model; | ||
315 | u32 flags[NCAPINTS]; | ||
316 | }; | ||
317 | extern struct cpu_features cpu; | ||
318 | int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr); | 310 | int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr); |
319 | int validate_cpu(void); | 311 | int validate_cpu(void); |
320 | 312 | ||
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index c8a6792e7842..0fcd9133790c 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile | |||
@@ -28,7 +28,7 @@ HOST_EXTRACFLAGS += -I$(srctree)/tools/include | |||
28 | 28 | ||
29 | VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \ | 29 | VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \ |
30 | $(obj)/string.o $(obj)/cmdline.o $(obj)/early_serial_console.o \ | 30 | $(obj)/string.o $(obj)/cmdline.o $(obj)/early_serial_console.o \ |
31 | $(obj)/piggy.o | 31 | $(obj)/piggy.o $(obj)/cpuflags.o $(obj)/aslr.o |
32 | 32 | ||
33 | $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone | 33 | $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone |
34 | 34 | ||
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c new file mode 100644 index 000000000000..90a21f430117 --- /dev/null +++ b/arch/x86/boot/compressed/aslr.c | |||
@@ -0,0 +1,316 @@ | |||
1 | #include "misc.h" | ||
2 | |||
3 | #ifdef CONFIG_RANDOMIZE_BASE | ||
4 | #include <asm/msr.h> | ||
5 | #include <asm/archrandom.h> | ||
6 | #include <asm/e820.h> | ||
7 | |||
8 | #include <generated/compile.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/uts.h> | ||
11 | #include <linux/utsname.h> | ||
12 | #include <generated/utsrelease.h> | ||
13 | |||
14 | /* Simplified build-specific string for starting entropy. */ | ||
15 | static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" | ||
16 | LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; | ||
17 | |||
18 | #define I8254_PORT_CONTROL 0x43 | ||
19 | #define I8254_PORT_COUNTER0 0x40 | ||
20 | #define I8254_CMD_READBACK 0xC0 | ||
21 | #define I8254_SELECT_COUNTER0 0x02 | ||
22 | #define I8254_STATUS_NOTREADY 0x40 | ||
23 | static inline u16 i8254(void) | ||
24 | { | ||
25 | u16 status, timer; | ||
26 | |||
27 | do { | ||
28 | outb(I8254_PORT_CONTROL, | ||
29 | I8254_CMD_READBACK | I8254_SELECT_COUNTER0); | ||
30 | status = inb(I8254_PORT_COUNTER0); | ||
31 | timer = inb(I8254_PORT_COUNTER0); | ||
32 | timer |= inb(I8254_PORT_COUNTER0) << 8; | ||
33 | } while (status & I8254_STATUS_NOTREADY); | ||
34 | |||
35 | return timer; | ||
36 | } | ||
37 | |||
38 | static unsigned long rotate_xor(unsigned long hash, const void *area, | ||
39 | size_t size) | ||
40 | { | ||
41 | size_t i; | ||
42 | unsigned long *ptr = (unsigned long *)area; | ||
43 | |||
44 | for (i = 0; i < size / sizeof(hash); i++) { | ||
45 | /* Rotate by odd number of bits and XOR. */ | ||
46 | hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7); | ||
47 | hash ^= ptr[i]; | ||
48 | } | ||
49 | |||
50 | return hash; | ||
51 | } | ||
52 | |||
53 | /* Attempt to create a simple but unpredictable starting entropy. */ | ||
54 | static unsigned long get_random_boot(void) | ||
55 | { | ||
56 | unsigned long hash = 0; | ||
57 | |||
58 | hash = rotate_xor(hash, build_str, sizeof(build_str)); | ||
59 | hash = rotate_xor(hash, real_mode, sizeof(*real_mode)); | ||
60 | |||
61 | return hash; | ||
62 | } | ||
63 | |||
64 | static unsigned long get_random_long(void) | ||
65 | { | ||
66 | #ifdef CONFIG_X86_64 | ||
67 | const unsigned long mix_const = 0x5d6008cbf3848dd3UL; | ||
68 | #else | ||
69 | const unsigned long mix_const = 0x3f39e593UL; | ||
70 | #endif | ||
71 | unsigned long raw, random = get_random_boot(); | ||
72 | bool use_i8254 = true; | ||
73 | |||
74 | debug_putstr("KASLR using"); | ||
75 | |||
76 | if (has_cpuflag(X86_FEATURE_RDRAND)) { | ||
77 | debug_putstr(" RDRAND"); | ||
78 | if (rdrand_long(&raw)) { | ||
79 | random ^= raw; | ||
80 | use_i8254 = false; | ||
81 | } | ||
82 | } | ||
83 | |||
84 | if (has_cpuflag(X86_FEATURE_TSC)) { | ||
85 | debug_putstr(" RDTSC"); | ||
86 | rdtscll(raw); | ||
87 | |||
88 | random ^= raw; | ||
89 | use_i8254 = false; | ||
90 | } | ||
91 | |||
92 | if (use_i8254) { | ||
93 | debug_putstr(" i8254"); | ||
94 | random ^= i8254(); | ||
95 | } | ||
96 | |||
97 | /* Circular multiply for better bit diffusion */ | ||
98 | asm("mul %3" | ||
99 | : "=a" (random), "=d" (raw) | ||
100 | : "a" (random), "rm" (mix_const)); | ||
101 | random += raw; | ||
102 | |||
103 | debug_putstr("...\n"); | ||
104 | |||
105 | return random; | ||
106 | } | ||
107 | |||
108 | struct mem_vector { | ||
109 | unsigned long start; | ||
110 | unsigned long size; | ||
111 | }; | ||
112 | |||
113 | #define MEM_AVOID_MAX 5 | ||
114 | struct mem_vector mem_avoid[MEM_AVOID_MAX]; | ||
115 | |||
116 | static bool mem_contains(struct mem_vector *region, struct mem_vector *item) | ||
117 | { | ||
118 | /* Item at least partially before region. */ | ||
119 | if (item->start < region->start) | ||
120 | return false; | ||
121 | /* Item at least partially after region. */ | ||
122 | if (item->start + item->size > region->start + region->size) | ||
123 | return false; | ||
124 | return true; | ||
125 | } | ||
126 | |||
127 | static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two) | ||
128 | { | ||
129 | /* Item one is entirely before item two. */ | ||
130 | if (one->start + one->size <= two->start) | ||
131 | return false; | ||
132 | /* Item one is entirely after item two. */ | ||
133 | if (one->start >= two->start + two->size) | ||
134 | return false; | ||
135 | return true; | ||
136 | } | ||
137 | |||
138 | static void mem_avoid_init(unsigned long input, unsigned long input_size, | ||
139 | unsigned long output, unsigned long output_size) | ||
140 | { | ||
141 | u64 initrd_start, initrd_size; | ||
142 | u64 cmd_line, cmd_line_size; | ||
143 | unsigned long unsafe, unsafe_len; | ||
144 | char *ptr; | ||
145 | |||
146 | /* | ||
147 | * Avoid the region that is unsafe to overlap during | ||
148 | * decompression (see calculations at top of misc.c). | ||
149 | */ | ||
150 | unsafe_len = (output_size >> 12) + 32768 + 18; | ||
151 | unsafe = (unsigned long)input + input_size - unsafe_len; | ||
152 | mem_avoid[0].start = unsafe; | ||
153 | mem_avoid[0].size = unsafe_len; | ||
154 | |||
155 | /* Avoid initrd. */ | ||
156 | initrd_start = (u64)real_mode->ext_ramdisk_image << 32; | ||
157 | initrd_start |= real_mode->hdr.ramdisk_image; | ||
158 | initrd_size = (u64)real_mode->ext_ramdisk_size << 32; | ||
159 | initrd_size |= real_mode->hdr.ramdisk_size; | ||
160 | mem_avoid[1].start = initrd_start; | ||
161 | mem_avoid[1].size = initrd_size; | ||
162 | |||
163 | /* Avoid kernel command line. */ | ||
164 | cmd_line = (u64)real_mode->ext_cmd_line_ptr << 32; | ||
165 | cmd_line |= real_mode->hdr.cmd_line_ptr; | ||
166 | /* Calculate size of cmd_line. */ | ||
167 | ptr = (char *)(unsigned long)cmd_line; | ||
168 | for (cmd_line_size = 0; ptr[cmd_line_size++]; ) | ||
169 | ; | ||
170 | mem_avoid[2].start = cmd_line; | ||
171 | mem_avoid[2].size = cmd_line_size; | ||
172 | |||
173 | /* Avoid heap memory. */ | ||
174 | mem_avoid[3].start = (unsigned long)free_mem_ptr; | ||
175 | mem_avoid[3].size = BOOT_HEAP_SIZE; | ||
176 | |||
177 | /* Avoid stack memory. */ | ||
178 | mem_avoid[4].start = (unsigned long)free_mem_end_ptr; | ||
179 | mem_avoid[4].size = BOOT_STACK_SIZE; | ||
180 | } | ||
181 | |||
182 | /* Does this memory vector overlap a known avoided area? */ | ||
183 | bool mem_avoid_overlap(struct mem_vector *img) | ||
184 | { | ||
185 | int i; | ||
186 | |||
187 | for (i = 0; i < MEM_AVOID_MAX; i++) { | ||
188 | if (mem_overlaps(img, &mem_avoid[i])) | ||
189 | return true; | ||
190 | } | ||
191 | |||
192 | return false; | ||
193 | } | ||
194 | |||
195 | unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET / CONFIG_PHYSICAL_ALIGN]; | ||
196 | unsigned long slot_max = 0; | ||
197 | |||
198 | static void slots_append(unsigned long addr) | ||
199 | { | ||
200 | /* Overflowing the slots list should be impossible. */ | ||
201 | if (slot_max >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET / | ||
202 | CONFIG_PHYSICAL_ALIGN) | ||
203 | return; | ||
204 | |||
205 | slots[slot_max++] = addr; | ||
206 | } | ||
207 | |||
208 | static unsigned long slots_fetch_random(void) | ||
209 | { | ||
210 | /* Handle case of no slots stored. */ | ||
211 | if (slot_max == 0) | ||
212 | return 0; | ||
213 | |||
214 | return slots[get_random_long() % slot_max]; | ||
215 | } | ||
216 | |||
217 | static void process_e820_entry(struct e820entry *entry, | ||
218 | unsigned long minimum, | ||
219 | unsigned long image_size) | ||
220 | { | ||
221 | struct mem_vector region, img; | ||
222 | |||
223 | /* Skip non-RAM entries. */ | ||
224 | if (entry->type != E820_RAM) | ||
225 | return; | ||
226 | |||
227 | /* Ignore entries entirely above our maximum. */ | ||
228 | if (entry->addr >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET) | ||
229 | return; | ||
230 | |||
231 | /* Ignore entries entirely below our minimum. */ | ||
232 | if (entry->addr + entry->size < minimum) | ||
233 | return; | ||
234 | |||
235 | region.start = entry->addr; | ||
236 | region.size = entry->size; | ||
237 | |||
238 | /* Potentially raise address to minimum location. */ | ||
239 | if (region.start < minimum) | ||
240 | region.start = minimum; | ||
241 | |||
242 | /* Potentially raise address to meet alignment requirements. */ | ||
243 | region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN); | ||
244 | |||
245 | /* Did we raise the address above the bounds of this e820 region? */ | ||
246 | if (region.start > entry->addr + entry->size) | ||
247 | return; | ||
248 | |||
249 | /* Reduce size by any delta from the original address. */ | ||
250 | region.size -= region.start - entry->addr; | ||
251 | |||
252 | /* Reduce maximum size to fit end of image within maximum limit. */ | ||
253 | if (region.start + region.size > CONFIG_RANDOMIZE_BASE_MAX_OFFSET) | ||
254 | region.size = CONFIG_RANDOMIZE_BASE_MAX_OFFSET - region.start; | ||
255 | |||
256 | /* Walk each aligned slot and check for avoided areas. */ | ||
257 | for (img.start = region.start, img.size = image_size ; | ||
258 | mem_contains(®ion, &img) ; | ||
259 | img.start += CONFIG_PHYSICAL_ALIGN) { | ||
260 | if (mem_avoid_overlap(&img)) | ||
261 | continue; | ||
262 | slots_append(img.start); | ||
263 | } | ||
264 | } | ||
265 | |||
266 | static unsigned long find_random_addr(unsigned long minimum, | ||
267 | unsigned long size) | ||
268 | { | ||
269 | int i; | ||
270 | unsigned long addr; | ||
271 | |||
272 | /* Make sure minimum is aligned. */ | ||
273 | minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN); | ||
274 | |||
275 | /* Verify potential e820 positions, appending to slots list. */ | ||
276 | for (i = 0; i < real_mode->e820_entries; i++) { | ||
277 | process_e820_entry(&real_mode->e820_map[i], minimum, size); | ||
278 | } | ||
279 | |||
280 | return slots_fetch_random(); | ||
281 | } | ||
282 | |||
283 | unsigned char *choose_kernel_location(unsigned char *input, | ||
284 | unsigned long input_size, | ||
285 | unsigned char *output, | ||
286 | unsigned long output_size) | ||
287 | { | ||
288 | unsigned long choice = (unsigned long)output; | ||
289 | unsigned long random; | ||
290 | |||
291 | if (cmdline_find_option_bool("nokaslr")) { | ||
292 | debug_putstr("KASLR disabled...\n"); | ||
293 | goto out; | ||
294 | } | ||
295 | |||
296 | /* Record the various known unsafe memory ranges. */ | ||
297 | mem_avoid_init((unsigned long)input, input_size, | ||
298 | (unsigned long)output, output_size); | ||
299 | |||
300 | /* Walk e820 and find a random address. */ | ||
301 | random = find_random_addr(choice, output_size); | ||
302 | if (!random) { | ||
303 | debug_putstr("KASLR could not find suitable E820 region...\n"); | ||
304 | goto out; | ||
305 | } | ||
306 | |||
307 | /* Always enforce the minimum. */ | ||
308 | if (random < choice) | ||
309 | goto out; | ||
310 | |||
311 | choice = random; | ||
312 | out: | ||
313 | return (unsigned char *)choice; | ||
314 | } | ||
315 | |||
316 | #endif /* CONFIG_RANDOMIZE_BASE */ | ||
diff --git a/arch/x86/boot/compressed/cmdline.c b/arch/x86/boot/compressed/cmdline.c index bffd73b45b1f..b68e3033e6b9 100644 --- a/arch/x86/boot/compressed/cmdline.c +++ b/arch/x86/boot/compressed/cmdline.c | |||
@@ -1,6 +1,6 @@ | |||
1 | #include "misc.h" | 1 | #include "misc.h" |
2 | 2 | ||
3 | #ifdef CONFIG_EARLY_PRINTK | 3 | #if CONFIG_EARLY_PRINTK || CONFIG_RANDOMIZE_BASE |
4 | 4 | ||
5 | static unsigned long fs; | 5 | static unsigned long fs; |
6 | static inline void set_fs(unsigned long seg) | 6 | static inline void set_fs(unsigned long seg) |
diff --git a/arch/x86/boot/compressed/cpuflags.c b/arch/x86/boot/compressed/cpuflags.c new file mode 100644 index 000000000000..aa313466118b --- /dev/null +++ b/arch/x86/boot/compressed/cpuflags.c | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifdef CONFIG_RANDOMIZE_BASE | ||
2 | |||
3 | #include "../cpuflags.c" | ||
4 | |||
5 | bool has_cpuflag(int flag) | ||
6 | { | ||
7 | get_cpuflags(); | ||
8 | |||
9 | return test_bit(flag, cpu.flags); | ||
10 | } | ||
11 | |||
12 | #endif | ||
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index 5d6f6891b188..9116aac232c7 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S | |||
@@ -117,9 +117,11 @@ preferred_addr: | |||
117 | addl %eax, %ebx | 117 | addl %eax, %ebx |
118 | notl %eax | 118 | notl %eax |
119 | andl %eax, %ebx | 119 | andl %eax, %ebx |
120 | #else | 120 | cmpl $LOAD_PHYSICAL_ADDR, %ebx |
121 | movl $LOAD_PHYSICAL_ADDR, %ebx | 121 | jge 1f |
122 | #endif | 122 | #endif |
123 | movl $LOAD_PHYSICAL_ADDR, %ebx | ||
124 | 1: | ||
123 | 125 | ||
124 | /* Target address to relocate to for decompression */ | 126 | /* Target address to relocate to for decompression */ |
125 | addl $z_extract_offset, %ebx | 127 | addl $z_extract_offset, %ebx |
@@ -191,14 +193,14 @@ relocated: | |||
191 | leal boot_heap(%ebx), %eax | 193 | leal boot_heap(%ebx), %eax |
192 | pushl %eax /* heap area */ | 194 | pushl %eax /* heap area */ |
193 | pushl %esi /* real mode pointer */ | 195 | pushl %esi /* real mode pointer */ |
194 | call decompress_kernel | 196 | call decompress_kernel /* returns kernel location in %eax */ |
195 | addl $24, %esp | 197 | addl $24, %esp |
196 | 198 | ||
197 | /* | 199 | /* |
198 | * Jump to the decompressed kernel. | 200 | * Jump to the decompressed kernel. |
199 | */ | 201 | */ |
200 | xorl %ebx, %ebx | 202 | xorl %ebx, %ebx |
201 | jmp *%ebp | 203 | jmp *%eax |
202 | 204 | ||
203 | /* | 205 | /* |
204 | * Stack and heap for uncompression | 206 | * Stack and heap for uncompression |
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index c337422b575d..c5c1ae0997e7 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S | |||
@@ -94,9 +94,11 @@ ENTRY(startup_32) | |||
94 | addl %eax, %ebx | 94 | addl %eax, %ebx |
95 | notl %eax | 95 | notl %eax |
96 | andl %eax, %ebx | 96 | andl %eax, %ebx |
97 | #else | 97 | cmpl $LOAD_PHYSICAL_ADDR, %ebx |
98 | movl $LOAD_PHYSICAL_ADDR, %ebx | 98 | jge 1f |
99 | #endif | 99 | #endif |
100 | movl $LOAD_PHYSICAL_ADDR, %ebx | ||
101 | 1: | ||
100 | 102 | ||
101 | /* Target address to relocate to for decompression */ | 103 | /* Target address to relocate to for decompression */ |
102 | addl $z_extract_offset, %ebx | 104 | addl $z_extract_offset, %ebx |
@@ -269,9 +271,11 @@ preferred_addr: | |||
269 | addq %rax, %rbp | 271 | addq %rax, %rbp |
270 | notq %rax | 272 | notq %rax |
271 | andq %rax, %rbp | 273 | andq %rax, %rbp |
272 | #else | 274 | cmpq $LOAD_PHYSICAL_ADDR, %rbp |
273 | movq $LOAD_PHYSICAL_ADDR, %rbp | 275 | jge 1f |
274 | #endif | 276 | #endif |
277 | movq $LOAD_PHYSICAL_ADDR, %rbp | ||
278 | 1: | ||
275 | 279 | ||
276 | /* Target address to relocate to for decompression */ | 280 | /* Target address to relocate to for decompression */ |
277 | leaq z_extract_offset(%rbp), %rbx | 281 | leaq z_extract_offset(%rbp), %rbx |
@@ -339,13 +343,13 @@ relocated: | |||
339 | movl $z_input_len, %ecx /* input_len */ | 343 | movl $z_input_len, %ecx /* input_len */ |
340 | movq %rbp, %r8 /* output target address */ | 344 | movq %rbp, %r8 /* output target address */ |
341 | movq $z_output_len, %r9 /* decompressed length */ | 345 | movq $z_output_len, %r9 /* decompressed length */ |
342 | call decompress_kernel | 346 | call decompress_kernel /* returns kernel location in %rax */ |
343 | popq %rsi | 347 | popq %rsi |
344 | 348 | ||
345 | /* | 349 | /* |
346 | * Jump to the decompressed kernel. | 350 | * Jump to the decompressed kernel. |
347 | */ | 351 | */ |
348 | jmp *%rbp | 352 | jmp *%rax |
349 | 353 | ||
350 | .code32 | 354 | .code32 |
351 | no_longmode: | 355 | no_longmode: |
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 434f077d2c4d..196eaf373a06 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c | |||
@@ -112,14 +112,8 @@ struct boot_params *real_mode; /* Pointer to real-mode data */ | |||
112 | void *memset(void *s, int c, size_t n); | 112 | void *memset(void *s, int c, size_t n); |
113 | void *memcpy(void *dest, const void *src, size_t n); | 113 | void *memcpy(void *dest, const void *src, size_t n); |
114 | 114 | ||
115 | #ifdef CONFIG_X86_64 | 115 | memptr free_mem_ptr; |
116 | #define memptr long | 116 | memptr free_mem_end_ptr; |
117 | #else | ||
118 | #define memptr unsigned | ||
119 | #endif | ||
120 | |||
121 | static memptr free_mem_ptr; | ||
122 | static memptr free_mem_end_ptr; | ||
123 | 117 | ||
124 | static char *vidmem; | 118 | static char *vidmem; |
125 | static int vidport; | 119 | static int vidport; |
@@ -395,7 +389,7 @@ static void parse_elf(void *output) | |||
395 | free(phdrs); | 389 | free(phdrs); |
396 | } | 390 | } |
397 | 391 | ||
398 | asmlinkage void decompress_kernel(void *rmode, memptr heap, | 392 | asmlinkage void *decompress_kernel(void *rmode, memptr heap, |
399 | unsigned char *input_data, | 393 | unsigned char *input_data, |
400 | unsigned long input_len, | 394 | unsigned long input_len, |
401 | unsigned char *output, | 395 | unsigned char *output, |
@@ -422,6 +416,10 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, | |||
422 | free_mem_ptr = heap; /* Heap */ | 416 | free_mem_ptr = heap; /* Heap */ |
423 | free_mem_end_ptr = heap + BOOT_HEAP_SIZE; | 417 | free_mem_end_ptr = heap + BOOT_HEAP_SIZE; |
424 | 418 | ||
419 | output = choose_kernel_location(input_data, input_len, | ||
420 | output, output_len); | ||
421 | |||
422 | /* Validate memory location choices. */ | ||
425 | if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1)) | 423 | if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1)) |
426 | error("Destination address inappropriately aligned"); | 424 | error("Destination address inappropriately aligned"); |
427 | #ifdef CONFIG_X86_64 | 425 | #ifdef CONFIG_X86_64 |
@@ -441,5 +439,5 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, | |||
441 | parse_elf(output); | 439 | parse_elf(output); |
442 | handle_relocations(output, output_len); | 440 | handle_relocations(output, output_len); |
443 | debug_putstr("done.\nBooting the kernel.\n"); | 441 | debug_putstr("done.\nBooting the kernel.\n"); |
444 | return; | 442 | return output; |
445 | } | 443 | } |
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index 674019d8e235..24e3e569a13c 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h | |||
@@ -23,7 +23,15 @@ | |||
23 | #define BOOT_BOOT_H | 23 | #define BOOT_BOOT_H |
24 | #include "../ctype.h" | 24 | #include "../ctype.h" |
25 | 25 | ||
26 | #ifdef CONFIG_X86_64 | ||
27 | #define memptr long | ||
28 | #else | ||
29 | #define memptr unsigned | ||
30 | #endif | ||
31 | |||
26 | /* misc.c */ | 32 | /* misc.c */ |
33 | extern memptr free_mem_ptr; | ||
34 | extern memptr free_mem_end_ptr; | ||
27 | extern struct boot_params *real_mode; /* Pointer to real-mode data */ | 35 | extern struct boot_params *real_mode; /* Pointer to real-mode data */ |
28 | void __putstr(const char *s); | 36 | void __putstr(const char *s); |
29 | #define error_putstr(__x) __putstr(__x) | 37 | #define error_putstr(__x) __putstr(__x) |
@@ -39,23 +47,40 @@ static inline void debug_putstr(const char *s) | |||
39 | 47 | ||
40 | #endif | 48 | #endif |
41 | 49 | ||
42 | #ifdef CONFIG_EARLY_PRINTK | 50 | #if CONFIG_EARLY_PRINTK || CONFIG_RANDOMIZE_BASE |
43 | |||
44 | /* cmdline.c */ | 51 | /* cmdline.c */ |
45 | int cmdline_find_option(const char *option, char *buffer, int bufsize); | 52 | int cmdline_find_option(const char *option, char *buffer, int bufsize); |
46 | int cmdline_find_option_bool(const char *option); | 53 | int cmdline_find_option_bool(const char *option); |
54 | #endif | ||
47 | 55 | ||
48 | /* early_serial_console.c */ | ||
49 | extern int early_serial_base; | ||
50 | void console_init(void); | ||
51 | 56 | ||
57 | #if CONFIG_RANDOMIZE_BASE | ||
58 | /* aslr.c */ | ||
59 | unsigned char *choose_kernel_location(unsigned char *input, | ||
60 | unsigned long input_size, | ||
61 | unsigned char *output, | ||
62 | unsigned long output_size); | ||
63 | /* cpuflags.c */ | ||
64 | bool has_cpuflag(int flag); | ||
52 | #else | 65 | #else |
66 | static inline | ||
67 | unsigned char *choose_kernel_location(unsigned char *input, | ||
68 | unsigned long input_size, | ||
69 | unsigned char *output, | ||
70 | unsigned long output_size) | ||
71 | { | ||
72 | return output; | ||
73 | } | ||
74 | #endif | ||
53 | 75 | ||
76 | #ifdef CONFIG_EARLY_PRINTK | ||
54 | /* early_serial_console.c */ | 77 | /* early_serial_console.c */ |
78 | extern int early_serial_base; | ||
79 | void console_init(void); | ||
80 | #else | ||
55 | static const int early_serial_base; | 81 | static const int early_serial_base; |
56 | static inline void console_init(void) | 82 | static inline void console_init(void) |
57 | { } | 83 | { } |
58 | |||
59 | #endif | 84 | #endif |
60 | 85 | ||
61 | #endif | 86 | #endif |
diff --git a/arch/x86/boot/copy.S b/arch/x86/boot/copy.S index 11f272c6f5e9..1eb7d298b47d 100644 --- a/arch/x86/boot/copy.S +++ b/arch/x86/boot/copy.S | |||
@@ -14,7 +14,7 @@ | |||
14 | * Memory copy routines | 14 | * Memory copy routines |
15 | */ | 15 | */ |
16 | 16 | ||
17 | .code16gcc | 17 | .code16 |
18 | .text | 18 | .text |
19 | 19 | ||
20 | GLOBAL(memcpy) | 20 | GLOBAL(memcpy) |
@@ -30,7 +30,7 @@ GLOBAL(memcpy) | |||
30 | rep; movsb | 30 | rep; movsb |
31 | popw %di | 31 | popw %di |
32 | popw %si | 32 | popw %si |
33 | ret | 33 | retl |
34 | ENDPROC(memcpy) | 34 | ENDPROC(memcpy) |
35 | 35 | ||
36 | GLOBAL(memset) | 36 | GLOBAL(memset) |
@@ -45,25 +45,25 @@ GLOBAL(memset) | |||
45 | andw $3, %cx | 45 | andw $3, %cx |
46 | rep; stosb | 46 | rep; stosb |
47 | popw %di | 47 | popw %di |
48 | ret | 48 | retl |
49 | ENDPROC(memset) | 49 | ENDPROC(memset) |
50 | 50 | ||
51 | GLOBAL(copy_from_fs) | 51 | GLOBAL(copy_from_fs) |
52 | pushw %ds | 52 | pushw %ds |
53 | pushw %fs | 53 | pushw %fs |
54 | popw %ds | 54 | popw %ds |
55 | call memcpy | 55 | calll memcpy |
56 | popw %ds | 56 | popw %ds |
57 | ret | 57 | retl |
58 | ENDPROC(copy_from_fs) | 58 | ENDPROC(copy_from_fs) |
59 | 59 | ||
60 | GLOBAL(copy_to_fs) | 60 | GLOBAL(copy_to_fs) |
61 | pushw %es | 61 | pushw %es |
62 | pushw %fs | 62 | pushw %fs |
63 | popw %es | 63 | popw %es |
64 | call memcpy | 64 | calll memcpy |
65 | popw %es | 65 | popw %es |
66 | ret | 66 | retl |
67 | ENDPROC(copy_to_fs) | 67 | ENDPROC(copy_to_fs) |
68 | 68 | ||
69 | #if 0 /* Not currently used, but can be enabled as needed */ | 69 | #if 0 /* Not currently used, but can be enabled as needed */ |
@@ -71,17 +71,17 @@ GLOBAL(copy_from_gs) | |||
71 | pushw %ds | 71 | pushw %ds |
72 | pushw %gs | 72 | pushw %gs |
73 | popw %ds | 73 | popw %ds |
74 | call memcpy | 74 | calll memcpy |
75 | popw %ds | 75 | popw %ds |
76 | ret | 76 | retl |
77 | ENDPROC(copy_from_gs) | 77 | ENDPROC(copy_from_gs) |
78 | 78 | ||
79 | GLOBAL(copy_to_gs) | 79 | GLOBAL(copy_to_gs) |
80 | pushw %es | 80 | pushw %es |
81 | pushw %gs | 81 | pushw %gs |
82 | popw %es | 82 | popw %es |
83 | call memcpy | 83 | calll memcpy |
84 | popw %es | 84 | popw %es |
85 | ret | 85 | retl |
86 | ENDPROC(copy_to_gs) | 86 | ENDPROC(copy_to_gs) |
87 | #endif | 87 | #endif |
diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c index 4d3ff037201f..100a9a10076a 100644 --- a/arch/x86/boot/cpucheck.c +++ b/arch/x86/boot/cpucheck.c | |||
@@ -28,8 +28,6 @@ | |||
28 | #include <asm/required-features.h> | 28 | #include <asm/required-features.h> |
29 | #include <asm/msr-index.h> | 29 | #include <asm/msr-index.h> |
30 | 30 | ||
31 | struct cpu_features cpu; | ||
32 | static u32 cpu_vendor[3]; | ||
33 | static u32 err_flags[NCAPINTS]; | 31 | static u32 err_flags[NCAPINTS]; |
34 | 32 | ||
35 | static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY; | 33 | static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY; |
@@ -69,92 +67,8 @@ static int is_transmeta(void) | |||
69 | cpu_vendor[2] == A32('M', 'x', '8', '6'); | 67 | cpu_vendor[2] == A32('M', 'x', '8', '6'); |
70 | } | 68 | } |
71 | 69 | ||
72 | static int has_fpu(void) | ||
73 | { | ||
74 | u16 fcw = -1, fsw = -1; | ||
75 | u32 cr0; | ||
76 | |||
77 | asm("movl %%cr0,%0" : "=r" (cr0)); | ||
78 | if (cr0 & (X86_CR0_EM|X86_CR0_TS)) { | ||
79 | cr0 &= ~(X86_CR0_EM|X86_CR0_TS); | ||
80 | asm volatile("movl %0,%%cr0" : : "r" (cr0)); | ||
81 | } | ||
82 | |||
83 | asm volatile("fninit ; fnstsw %0 ; fnstcw %1" | ||
84 | : "+m" (fsw), "+m" (fcw)); | ||
85 | |||
86 | return fsw == 0 && (fcw & 0x103f) == 0x003f; | ||
87 | } | ||
88 | |||
89 | static int has_eflag(u32 mask) | ||
90 | { | ||
91 | u32 f0, f1; | ||
92 | |||
93 | asm("pushfl ; " | ||
94 | "pushfl ; " | ||
95 | "popl %0 ; " | ||
96 | "movl %0,%1 ; " | ||
97 | "xorl %2,%1 ; " | ||
98 | "pushl %1 ; " | ||
99 | "popfl ; " | ||
100 | "pushfl ; " | ||
101 | "popl %1 ; " | ||
102 | "popfl" | ||
103 | : "=&r" (f0), "=&r" (f1) | ||
104 | : "ri" (mask)); | ||
105 | |||
106 | return !!((f0^f1) & mask); | ||
107 | } | ||
108 | |||
109 | static void get_flags(void) | ||
110 | { | ||
111 | u32 max_intel_level, max_amd_level; | ||
112 | u32 tfms; | ||
113 | |||
114 | if (has_fpu()) | ||
115 | set_bit(X86_FEATURE_FPU, cpu.flags); | ||
116 | |||
117 | if (has_eflag(X86_EFLAGS_ID)) { | ||
118 | asm("cpuid" | ||
119 | : "=a" (max_intel_level), | ||
120 | "=b" (cpu_vendor[0]), | ||
121 | "=d" (cpu_vendor[1]), | ||
122 | "=c" (cpu_vendor[2]) | ||
123 | : "a" (0)); | ||
124 | |||
125 | if (max_intel_level >= 0x00000001 && | ||
126 | max_intel_level <= 0x0000ffff) { | ||
127 | asm("cpuid" | ||
128 | : "=a" (tfms), | ||
129 | "=c" (cpu.flags[4]), | ||
130 | "=d" (cpu.flags[0]) | ||
131 | : "a" (0x00000001) | ||
132 | : "ebx"); | ||
133 | cpu.level = (tfms >> 8) & 15; | ||
134 | cpu.model = (tfms >> 4) & 15; | ||
135 | if (cpu.level >= 6) | ||
136 | cpu.model += ((tfms >> 16) & 0xf) << 4; | ||
137 | } | ||
138 | |||
139 | asm("cpuid" | ||
140 | : "=a" (max_amd_level) | ||
141 | : "a" (0x80000000) | ||
142 | : "ebx", "ecx", "edx"); | ||
143 | |||
144 | if (max_amd_level >= 0x80000001 && | ||
145 | max_amd_level <= 0x8000ffff) { | ||
146 | u32 eax = 0x80000001; | ||
147 | asm("cpuid" | ||
148 | : "+a" (eax), | ||
149 | "=c" (cpu.flags[6]), | ||
150 | "=d" (cpu.flags[1]) | ||
151 | : : "ebx"); | ||
152 | } | ||
153 | } | ||
154 | } | ||
155 | |||
156 | /* Returns a bitmask of which words we have error bits in */ | 70 | /* Returns a bitmask of which words we have error bits in */ |
157 | static int check_flags(void) | 71 | static int check_cpuflags(void) |
158 | { | 72 | { |
159 | u32 err; | 73 | u32 err; |
160 | int i; | 74 | int i; |
@@ -187,8 +101,8 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) | |||
187 | if (has_eflag(X86_EFLAGS_AC)) | 101 | if (has_eflag(X86_EFLAGS_AC)) |
188 | cpu.level = 4; | 102 | cpu.level = 4; |
189 | 103 | ||
190 | get_flags(); | 104 | get_cpuflags(); |
191 | err = check_flags(); | 105 | err = check_cpuflags(); |
192 | 106 | ||
193 | if (test_bit(X86_FEATURE_LM, cpu.flags)) | 107 | if (test_bit(X86_FEATURE_LM, cpu.flags)) |
194 | cpu.level = 64; | 108 | cpu.level = 64; |
@@ -207,8 +121,8 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) | |||
207 | eax &= ~(1 << 15); | 121 | eax &= ~(1 << 15); |
208 | asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); | 122 | asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); |
209 | 123 | ||
210 | get_flags(); /* Make sure it really did something */ | 124 | get_cpuflags(); /* Make sure it really did something */ |
211 | err = check_flags(); | 125 | err = check_cpuflags(); |
212 | } else if (err == 0x01 && | 126 | } else if (err == 0x01 && |
213 | !(err_flags[0] & ~(1 << X86_FEATURE_CX8)) && | 127 | !(err_flags[0] & ~(1 << X86_FEATURE_CX8)) && |
214 | is_centaur() && cpu.model >= 6) { | 128 | is_centaur() && cpu.model >= 6) { |
@@ -223,7 +137,7 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) | |||
223 | asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); | 137 | asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); |
224 | 138 | ||
225 | set_bit(X86_FEATURE_CX8, cpu.flags); | 139 | set_bit(X86_FEATURE_CX8, cpu.flags); |
226 | err = check_flags(); | 140 | err = check_cpuflags(); |
227 | } else if (err == 0x01 && is_transmeta()) { | 141 | } else if (err == 0x01 && is_transmeta()) { |
228 | /* Transmeta might have masked feature bits in word 0 */ | 142 | /* Transmeta might have masked feature bits in word 0 */ |
229 | 143 | ||
@@ -238,7 +152,7 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) | |||
238 | : : "ecx", "ebx"); | 152 | : : "ecx", "ebx"); |
239 | asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); | 153 | asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); |
240 | 154 | ||
241 | err = check_flags(); | 155 | err = check_cpuflags(); |
242 | } | 156 | } |
243 | 157 | ||
244 | if (err_flags_ptr) | 158 | if (err_flags_ptr) |
diff --git a/arch/x86/boot/cpuflags.c b/arch/x86/boot/cpuflags.c new file mode 100644 index 000000000000..a9fcb7cfb241 --- /dev/null +++ b/arch/x86/boot/cpuflags.c | |||
@@ -0,0 +1,104 @@ | |||
1 | #include <linux/types.h> | ||
2 | #include "bitops.h" | ||
3 | |||
4 | #include <asm/processor-flags.h> | ||
5 | #include <asm/required-features.h> | ||
6 | #include <asm/msr-index.h> | ||
7 | #include "cpuflags.h" | ||
8 | |||
9 | struct cpu_features cpu; | ||
10 | u32 cpu_vendor[3]; | ||
11 | |||
12 | static bool loaded_flags; | ||
13 | |||
14 | static int has_fpu(void) | ||
15 | { | ||
16 | u16 fcw = -1, fsw = -1; | ||
17 | unsigned long cr0; | ||
18 | |||
19 | asm volatile("mov %%cr0,%0" : "=r" (cr0)); | ||
20 | if (cr0 & (X86_CR0_EM|X86_CR0_TS)) { | ||
21 | cr0 &= ~(X86_CR0_EM|X86_CR0_TS); | ||
22 | asm volatile("mov %0,%%cr0" : : "r" (cr0)); | ||
23 | } | ||
24 | |||
25 | asm volatile("fninit ; fnstsw %0 ; fnstcw %1" | ||
26 | : "+m" (fsw), "+m" (fcw)); | ||
27 | |||
28 | return fsw == 0 && (fcw & 0x103f) == 0x003f; | ||
29 | } | ||
30 | |||
31 | int has_eflag(unsigned long mask) | ||
32 | { | ||
33 | unsigned long f0, f1; | ||
34 | |||
35 | asm volatile("pushf \n\t" | ||
36 | "pushf \n\t" | ||
37 | "pop %0 \n\t" | ||
38 | "mov %0,%1 \n\t" | ||
39 | "xor %2,%1 \n\t" | ||
40 | "push %1 \n\t" | ||
41 | "popf \n\t" | ||
42 | "pushf \n\t" | ||
43 | "pop %1 \n\t" | ||
44 | "popf" | ||
45 | : "=&r" (f0), "=&r" (f1) | ||
46 | : "ri" (mask)); | ||
47 | |||
48 | return !!((f0^f1) & mask); | ||
49 | } | ||
50 | |||
51 | /* Handle x86_32 PIC using ebx. */ | ||
52 | #if defined(__i386__) && defined(__PIC__) | ||
53 | # define EBX_REG "=r" | ||
54 | #else | ||
55 | # define EBX_REG "=b" | ||
56 | #endif | ||
57 | |||
58 | static inline void cpuid(u32 id, u32 *a, u32 *b, u32 *c, u32 *d) | ||
59 | { | ||
60 | asm volatile(".ifnc %%ebx,%3 ; movl %%ebx,%3 ; .endif \n\t" | ||
61 | "cpuid \n\t" | ||
62 | ".ifnc %%ebx,%3 ; xchgl %%ebx,%3 ; .endif \n\t" | ||
63 | : "=a" (*a), "=c" (*c), "=d" (*d), EBX_REG (*b) | ||
64 | : "a" (id) | ||
65 | ); | ||
66 | } | ||
67 | |||
68 | void get_cpuflags(void) | ||
69 | { | ||
70 | u32 max_intel_level, max_amd_level; | ||
71 | u32 tfms; | ||
72 | u32 ignored; | ||
73 | |||
74 | if (loaded_flags) | ||
75 | return; | ||
76 | loaded_flags = true; | ||
77 | |||
78 | if (has_fpu()) | ||
79 | set_bit(X86_FEATURE_FPU, cpu.flags); | ||
80 | |||
81 | if (has_eflag(X86_EFLAGS_ID)) { | ||
82 | cpuid(0x0, &max_intel_level, &cpu_vendor[0], &cpu_vendor[2], | ||
83 | &cpu_vendor[1]); | ||
84 | |||
85 | if (max_intel_level >= 0x00000001 && | ||
86 | max_intel_level <= 0x0000ffff) { | ||
87 | cpuid(0x1, &tfms, &ignored, &cpu.flags[4], | ||
88 | &cpu.flags[0]); | ||
89 | cpu.level = (tfms >> 8) & 15; | ||
90 | cpu.model = (tfms >> 4) & 15; | ||
91 | if (cpu.level >= 6) | ||
92 | cpu.model += ((tfms >> 16) & 0xf) << 4; | ||
93 | } | ||
94 | |||
95 | cpuid(0x80000000, &max_amd_level, &ignored, &ignored, | ||
96 | &ignored); | ||
97 | |||
98 | if (max_amd_level >= 0x80000001 && | ||
99 | max_amd_level <= 0x8000ffff) { | ||
100 | cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6], | ||
101 | &cpu.flags[1]); | ||
102 | } | ||
103 | } | ||
104 | } | ||
diff --git a/arch/x86/boot/cpuflags.h b/arch/x86/boot/cpuflags.h new file mode 100644 index 000000000000..ea97697e51e4 --- /dev/null +++ b/arch/x86/boot/cpuflags.h | |||
@@ -0,0 +1,19 @@ | |||
1 | #ifndef BOOT_CPUFLAGS_H | ||
2 | #define BOOT_CPUFLAGS_H | ||
3 | |||
4 | #include <asm/cpufeature.h> | ||
5 | #include <asm/processor-flags.h> | ||
6 | |||
7 | struct cpu_features { | ||
8 | int level; /* Family, or 64 for x86-64 */ | ||
9 | int model; | ||
10 | u32 flags[NCAPINTS]; | ||
11 | }; | ||
12 | |||
13 | extern struct cpu_features cpu; | ||
14 | extern u32 cpu_vendor[3]; | ||
15 | |||
16 | int has_eflag(unsigned long mask); | ||
17 | void get_cpuflags(void); | ||
18 | |||
19 | #endif | ||
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 9ec06a1f6d61..ec3b8ba68096 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S | |||
@@ -391,7 +391,14 @@ xloadflags: | |||
391 | #else | 391 | #else |
392 | # define XLF23 0 | 392 | # define XLF23 0 |
393 | #endif | 393 | #endif |
394 | .word XLF0 | XLF1 | XLF23 | 394 | |
395 | #if defined(CONFIG_X86_64) && defined(CONFIG_EFI) && defined(CONFIG_KEXEC) | ||
396 | # define XLF4 XLF_EFI_KEXEC | ||
397 | #else | ||
398 | # define XLF4 0 | ||
399 | #endif | ||
400 | |||
401 | .word XLF0 | XLF1 | XLF23 | XLF4 | ||
395 | 402 | ||
396 | cmdline_size: .long COMMAND_LINE_SIZE-1 #length of the command line, | 403 | cmdline_size: .long COMMAND_LINE_SIZE-1 #length of the command line, |
397 | #added with boot protocol | 404 | #added with boot protocol |
diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h index 0d9ec770f2f8..e6a92455740e 100644 --- a/arch/x86/include/asm/archrandom.h +++ b/arch/x86/include/asm/archrandom.h | |||
@@ -39,6 +39,20 @@ | |||
39 | 39 | ||
40 | #ifdef CONFIG_ARCH_RANDOM | 40 | #ifdef CONFIG_ARCH_RANDOM |
41 | 41 | ||
42 | /* Instead of arch_get_random_long() when alternatives haven't run. */ | ||
43 | static inline int rdrand_long(unsigned long *v) | ||
44 | { | ||
45 | int ok; | ||
46 | asm volatile("1: " RDRAND_LONG "\n\t" | ||
47 | "jc 2f\n\t" | ||
48 | "decl %0\n\t" | ||
49 | "jnz 1b\n\t" | ||
50 | "2:" | ||
51 | : "=r" (ok), "=a" (*v) | ||
52 | : "0" (RDRAND_RETRY_LOOPS)); | ||
53 | return ok; | ||
54 | } | ||
55 | |||
42 | #define GET_RANDOM(name, type, rdrand, nop) \ | 56 | #define GET_RANDOM(name, type, rdrand, nop) \ |
43 | static inline int name(type *v) \ | 57 | static inline int name(type *v) \ |
44 | { \ | 58 | { \ |
@@ -68,6 +82,13 @@ GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3); | |||
68 | 82 | ||
69 | #endif /* CONFIG_X86_64 */ | 83 | #endif /* CONFIG_X86_64 */ |
70 | 84 | ||
85 | #else | ||
86 | |||
87 | static inline int rdrand_long(unsigned long *v) | ||
88 | { | ||
89 | return 0; | ||
90 | } | ||
91 | |||
71 | #endif /* CONFIG_ARCH_RANDOM */ | 92 | #endif /* CONFIG_ARCH_RANDOM */ |
72 | 93 | ||
73 | extern void x86_init_rdrand(struct cpuinfo_x86 *c); | 94 | extern void x86_init_rdrand(struct cpuinfo_x86 *c); |
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index c6cd358a1eec..04a48903b2eb 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h | |||
@@ -92,12 +92,53 @@ | |||
92 | #endif | 92 | #endif |
93 | #define smp_read_barrier_depends() read_barrier_depends() | 93 | #define smp_read_barrier_depends() read_barrier_depends() |
94 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | 94 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) |
95 | #else | 95 | #else /* !SMP */ |
96 | #define smp_mb() barrier() | 96 | #define smp_mb() barrier() |
97 | #define smp_rmb() barrier() | 97 | #define smp_rmb() barrier() |
98 | #define smp_wmb() barrier() | 98 | #define smp_wmb() barrier() |
99 | #define smp_read_barrier_depends() do { } while (0) | 99 | #define smp_read_barrier_depends() do { } while (0) |
100 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | 100 | #define set_mb(var, value) do { var = value; barrier(); } while (0) |
101 | #endif /* SMP */ | ||
102 | |||
103 | #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) | ||
104 | |||
105 | /* | ||
106 | * For either of these options x86 doesn't have a strong TSO memory | ||
107 | * model and we should fall back to full barriers. | ||
108 | */ | ||
109 | |||
110 | #define smp_store_release(p, v) \ | ||
111 | do { \ | ||
112 | compiletime_assert_atomic_type(*p); \ | ||
113 | smp_mb(); \ | ||
114 | ACCESS_ONCE(*p) = (v); \ | ||
115 | } while (0) | ||
116 | |||
117 | #define smp_load_acquire(p) \ | ||
118 | ({ \ | ||
119 | typeof(*p) ___p1 = ACCESS_ONCE(*p); \ | ||
120 | compiletime_assert_atomic_type(*p); \ | ||
121 | smp_mb(); \ | ||
122 | ___p1; \ | ||
123 | }) | ||
124 | |||
125 | #else /* regular x86 TSO memory ordering */ | ||
126 | |||
127 | #define smp_store_release(p, v) \ | ||
128 | do { \ | ||
129 | compiletime_assert_atomic_type(*p); \ | ||
130 | barrier(); \ | ||
131 | ACCESS_ONCE(*p) = (v); \ | ||
132 | } while (0) | ||
133 | |||
134 | #define smp_load_acquire(p) \ | ||
135 | ({ \ | ||
136 | typeof(*p) ___p1 = ACCESS_ONCE(*p); \ | ||
137 | compiletime_assert_atomic_type(*p); \ | ||
138 | barrier(); \ | ||
139 | ___p1; \ | ||
140 | }) | ||
141 | |||
101 | #endif | 142 | #endif |
102 | 143 | ||
103 | /* | 144 | /* |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 89270b4318db..e099f9502ace 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -216,6 +216,7 @@ | |||
216 | #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ | 216 | #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ |
217 | #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */ | 217 | #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */ |
218 | #define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */ | 218 | #define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */ |
219 | #define X86_FEATURE_MPX (9*32+14) /* Memory Protection Extension */ | ||
219 | #define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */ | 220 | #define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */ |
220 | #define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */ | 221 | #define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */ |
221 | #define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */ | 222 | #define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */ |
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 65c6e6e3a552..3b978c472d08 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
@@ -1,6 +1,24 @@ | |||
1 | #ifndef _ASM_X86_EFI_H | 1 | #ifndef _ASM_X86_EFI_H |
2 | #define _ASM_X86_EFI_H | 2 | #define _ASM_X86_EFI_H |
3 | 3 | ||
4 | /* | ||
5 | * We map the EFI regions needed for runtime services non-contiguously, | ||
6 | * with preserved alignment on virtual addresses starting from -4G down | ||
7 | * for a total max space of 64G. This way, we provide for stable runtime | ||
8 | * services addresses across kernels so that a kexec'd kernel can still | ||
9 | * use them. | ||
10 | * | ||
11 | * This is the main reason why we're doing stable VA mappings for RT | ||
12 | * services. | ||
13 | * | ||
14 | * This flag is used in conjuction with a chicken bit called | ||
15 | * "efi=old_map" which can be used as a fallback to the old runtime | ||
16 | * services mapping method in case there's some b0rkage with a | ||
17 | * particular EFI implementation (haha, it is hard to hold up the | ||
18 | * sarcasm here...). | ||
19 | */ | ||
20 | #define EFI_OLD_MEMMAP EFI_ARCH_1 | ||
21 | |||
4 | #ifdef CONFIG_X86_32 | 22 | #ifdef CONFIG_X86_32 |
5 | 23 | ||
6 | #define EFI_LOADER_SIGNATURE "EL32" | 24 | #define EFI_LOADER_SIGNATURE "EL32" |
@@ -69,24 +87,31 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3, | |||
69 | efi_call6((f), (u64)(a1), (u64)(a2), (u64)(a3), \ | 87 | efi_call6((f), (u64)(a1), (u64)(a2), (u64)(a3), \ |
70 | (u64)(a4), (u64)(a5), (u64)(a6)) | 88 | (u64)(a4), (u64)(a5), (u64)(a6)) |
71 | 89 | ||
90 | #define _efi_call_virtX(x, f, ...) \ | ||
91 | ({ \ | ||
92 | efi_status_t __s; \ | ||
93 | \ | ||
94 | efi_sync_low_kernel_mappings(); \ | ||
95 | preempt_disable(); \ | ||
96 | __s = efi_call##x((void *)efi.systab->runtime->f, __VA_ARGS__); \ | ||
97 | preempt_enable(); \ | ||
98 | __s; \ | ||
99 | }) | ||
100 | |||
72 | #define efi_call_virt0(f) \ | 101 | #define efi_call_virt0(f) \ |
73 | efi_call0((efi.systab->runtime->f)) | 102 | _efi_call_virtX(0, f) |
74 | #define efi_call_virt1(f, a1) \ | 103 | #define efi_call_virt1(f, a1) \ |
75 | efi_call1((efi.systab->runtime->f), (u64)(a1)) | 104 | _efi_call_virtX(1, f, (u64)(a1)) |
76 | #define efi_call_virt2(f, a1, a2) \ | 105 | #define efi_call_virt2(f, a1, a2) \ |
77 | efi_call2((efi.systab->runtime->f), (u64)(a1), (u64)(a2)) | 106 | _efi_call_virtX(2, f, (u64)(a1), (u64)(a2)) |
78 | #define efi_call_virt3(f, a1, a2, a3) \ | 107 | #define efi_call_virt3(f, a1, a2, a3) \ |
79 | efi_call3((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ | 108 | _efi_call_virtX(3, f, (u64)(a1), (u64)(a2), (u64)(a3)) |
80 | (u64)(a3)) | 109 | #define efi_call_virt4(f, a1, a2, a3, a4) \ |
81 | #define efi_call_virt4(f, a1, a2, a3, a4) \ | 110 | _efi_call_virtX(4, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4)) |
82 | efi_call4((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ | 111 | #define efi_call_virt5(f, a1, a2, a3, a4, a5) \ |
83 | (u64)(a3), (u64)(a4)) | 112 | _efi_call_virtX(5, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5)) |
84 | #define efi_call_virt5(f, a1, a2, a3, a4, a5) \ | 113 | #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ |
85 | efi_call5((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ | 114 | _efi_call_virtX(6, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) |
86 | (u64)(a3), (u64)(a4), (u64)(a5)) | ||
87 | #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ | ||
88 | efi_call6((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ | ||
89 | (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) | ||
90 | 115 | ||
91 | extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, | 116 | extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, |
92 | u32 type, u64 attribute); | 117 | u32 type, u64 attribute); |
@@ -95,12 +120,28 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, | |||
95 | 120 | ||
96 | extern int add_efi_memmap; | 121 | extern int add_efi_memmap; |
97 | extern unsigned long x86_efi_facility; | 122 | extern unsigned long x86_efi_facility; |
123 | extern struct efi_scratch efi_scratch; | ||
98 | extern void efi_set_executable(efi_memory_desc_t *md, bool executable); | 124 | extern void efi_set_executable(efi_memory_desc_t *md, bool executable); |
99 | extern int efi_memblock_x86_reserve_range(void); | 125 | extern int efi_memblock_x86_reserve_range(void); |
100 | extern void efi_call_phys_prelog(void); | 126 | extern void efi_call_phys_prelog(void); |
101 | extern void efi_call_phys_epilog(void); | 127 | extern void efi_call_phys_epilog(void); |
102 | extern void efi_unmap_memmap(void); | 128 | extern void efi_unmap_memmap(void); |
103 | extern void efi_memory_uc(u64 addr, unsigned long size); | 129 | extern void efi_memory_uc(u64 addr, unsigned long size); |
130 | extern void __init efi_map_region(efi_memory_desc_t *md); | ||
131 | extern void __init efi_map_region_fixed(efi_memory_desc_t *md); | ||
132 | extern void efi_sync_low_kernel_mappings(void); | ||
133 | extern void efi_setup_page_tables(void); | ||
134 | extern void __init old_map_region(efi_memory_desc_t *md); | ||
135 | |||
136 | struct efi_setup_data { | ||
137 | u64 fw_vendor; | ||
138 | u64 runtime; | ||
139 | u64 tables; | ||
140 | u64 smbios; | ||
141 | u64 reserved[8]; | ||
142 | }; | ||
143 | |||
144 | extern u64 efi_setup; | ||
104 | 145 | ||
105 | #ifdef CONFIG_EFI | 146 | #ifdef CONFIG_EFI |
106 | 147 | ||
@@ -110,7 +151,7 @@ static inline bool efi_is_native(void) | |||
110 | } | 151 | } |
111 | 152 | ||
112 | extern struct console early_efi_console; | 153 | extern struct console early_efi_console; |
113 | 154 | extern void parse_efi_setup(u64 phys_addr, u32 data_len); | |
114 | #else | 155 | #else |
115 | /* | 156 | /* |
116 | * IF EFI is not configured, have the EFI calls return -ENOSYS. | 157 | * IF EFI is not configured, have the EFI calls return -ENOSYS. |
@@ -122,6 +163,7 @@ extern struct console early_efi_console; | |||
122 | #define efi_call4(_f, _a1, _a2, _a3, _a4) (-ENOSYS) | 163 | #define efi_call4(_f, _a1, _a2, _a3, _a4) (-ENOSYS) |
123 | #define efi_call5(_f, _a1, _a2, _a3, _a4, _a5) (-ENOSYS) | 164 | #define efi_call5(_f, _a1, _a2, _a3, _a4, _a5) (-ENOSYS) |
124 | #define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6) (-ENOSYS) | 165 | #define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6) (-ENOSYS) |
166 | static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {} | ||
125 | #endif /* CONFIG_EFI */ | 167 | #endif /* CONFIG_EFI */ |
126 | 168 | ||
127 | #endif /* _ASM_X86_EFI_H */ | 169 | #endif /* _ASM_X86_EFI_H */ |
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h index be27ba1e947a..b4c1f5453436 100644 --- a/arch/x86/include/asm/futex.h +++ b/arch/x86/include/asm/futex.h | |||
@@ -110,26 +110,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | |||
110 | static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | 110 | static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
111 | u32 oldval, u32 newval) | 111 | u32 oldval, u32 newval) |
112 | { | 112 | { |
113 | int ret = 0; | 113 | return user_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval); |
114 | |||
115 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | ||
116 | return -EFAULT; | ||
117 | |||
118 | asm volatile("\t" ASM_STAC "\n" | ||
119 | "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" | ||
120 | "2:\t" ASM_CLAC "\n" | ||
121 | "\t.section .fixup, \"ax\"\n" | ||
122 | "3:\tmov %3, %0\n" | ||
123 | "\tjmp 2b\n" | ||
124 | "\t.previous\n" | ||
125 | _ASM_EXTABLE(1b, 3b) | ||
126 | : "+r" (ret), "=a" (oldval), "+m" (*uaddr) | ||
127 | : "i" (-EFAULT), "r" (newval), "1" (oldval) | ||
128 | : "memory" | ||
129 | ); | ||
130 | |||
131 | *uval = oldval; | ||
132 | return ret; | ||
133 | } | 114 | } |
134 | 115 | ||
135 | #endif | 116 | #endif |
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index cba45d99ac1a..67d69b8e2d20 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -191,6 +191,9 @@ extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void); | |||
191 | #define trace_interrupt interrupt | 191 | #define trace_interrupt interrupt |
192 | #endif | 192 | #endif |
193 | 193 | ||
194 | #define VECTOR_UNDEFINED -1 | ||
195 | #define VECTOR_RETRIGGERED -2 | ||
196 | |||
194 | typedef int vector_irq_t[NR_VECTORS]; | 197 | typedef int vector_irq_t[NR_VECTORS]; |
195 | DECLARE_PER_CPU(vector_irq_t, vector_irq); | 198 | DECLARE_PER_CPU(vector_irq_t, vector_irq); |
196 | extern void setup_vector_irq(int cpu); | 199 | extern void setup_vector_irq(int cpu); |
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h index 459769d39263..e34e097b6f9d 100644 --- a/arch/x86/include/asm/intel-mid.h +++ b/arch/x86/include/asm/intel-mid.h | |||
@@ -51,10 +51,41 @@ struct devs_id { | |||
51 | enum intel_mid_cpu_type { | 51 | enum intel_mid_cpu_type { |
52 | /* 1 was Moorestown */ | 52 | /* 1 was Moorestown */ |
53 | INTEL_MID_CPU_CHIP_PENWELL = 2, | 53 | INTEL_MID_CPU_CHIP_PENWELL = 2, |
54 | INTEL_MID_CPU_CHIP_CLOVERVIEW, | ||
55 | INTEL_MID_CPU_CHIP_TANGIER, | ||
54 | }; | 56 | }; |
55 | 57 | ||
56 | extern enum intel_mid_cpu_type __intel_mid_cpu_chip; | 58 | extern enum intel_mid_cpu_type __intel_mid_cpu_chip; |
57 | 59 | ||
60 | /** | ||
61 | * struct intel_mid_ops - Interface between intel-mid & sub archs | ||
62 | * @arch_setup: arch_setup function to re-initialize platform | ||
63 | * structures (x86_init, x86_platform_init) | ||
64 | * | ||
65 | * This structure can be extended if any new interface is required | ||
66 | * between intel-mid & its sub arch files. | ||
67 | */ | ||
68 | struct intel_mid_ops { | ||
69 | void (*arch_setup)(void); | ||
70 | }; | ||
71 | |||
72 | /* Helper API's for INTEL_MID_OPS_INIT */ | ||
73 | #define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid) \ | ||
74 | [cpuid] = get_##cpuname##_ops | ||
75 | |||
76 | /* Maximum number of CPU ops */ | ||
77 | #define MAX_CPU_OPS(a) (sizeof(a)/sizeof(void *)) | ||
78 | |||
79 | /* | ||
80 | * For every new cpu addition, a weak get_<cpuname>_ops() function needs be | ||
81 | * declared in arch/x86/platform/intel_mid/intel_mid_weak_decls.h. | ||
82 | */ | ||
83 | #define INTEL_MID_OPS_INIT {\ | ||
84 | DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL), \ | ||
85 | DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW), \ | ||
86 | DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER) \ | ||
87 | }; | ||
88 | |||
58 | #ifdef CONFIG_X86_INTEL_MID | 89 | #ifdef CONFIG_X86_INTEL_MID |
59 | 90 | ||
60 | static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void) | 91 | static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void) |
@@ -86,8 +117,21 @@ extern enum intel_mid_timer_options intel_mid_timer_options; | |||
86 | * Penwell uses spread spectrum clock, so the freq number is not exactly | 117 | * Penwell uses spread spectrum clock, so the freq number is not exactly |
87 | * the same as reported by MSR based on SDM. | 118 | * the same as reported by MSR based on SDM. |
88 | */ | 119 | */ |
89 | #define PENWELL_FSB_FREQ_83SKU 83200 | 120 | #define FSB_FREQ_83SKU 83200 |
90 | #define PENWELL_FSB_FREQ_100SKU 99840 | 121 | #define FSB_FREQ_100SKU 99840 |
122 | #define FSB_FREQ_133SKU 133000 | ||
123 | |||
124 | #define FSB_FREQ_167SKU 167000 | ||
125 | #define FSB_FREQ_200SKU 200000 | ||
126 | #define FSB_FREQ_267SKU 267000 | ||
127 | #define FSB_FREQ_333SKU 333000 | ||
128 | #define FSB_FREQ_400SKU 400000 | ||
129 | |||
130 | /* Bus Select SoC Fuse value */ | ||
131 | #define BSEL_SOC_FUSE_MASK 0x7 | ||
132 | #define BSEL_SOC_FUSE_001 0x1 /* FSB 133MHz */ | ||
133 | #define BSEL_SOC_FUSE_101 0x5 /* FSB 100MHz */ | ||
134 | #define BSEL_SOC_FUSE_111 0x7 /* FSB 83MHz */ | ||
91 | 135 | ||
92 | #define SFI_MTMR_MAX_NUM 8 | 136 | #define SFI_MTMR_MAX_NUM 8 |
93 | #define SFI_MRTC_MAX 8 | 137 | #define SFI_MRTC_MAX 8 |
diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h new file mode 100644 index 000000000000..8e71c7941767 --- /dev/null +++ b/arch/x86/include/asm/iosf_mbi.h | |||
@@ -0,0 +1,90 @@ | |||
1 | /* | ||
2 | * iosf_mbi.h: Intel OnChip System Fabric MailBox access support | ||
3 | */ | ||
4 | |||
5 | #ifndef IOSF_MBI_SYMS_H | ||
6 | #define IOSF_MBI_SYMS_H | ||
7 | |||
8 | #define MBI_MCR_OFFSET 0xD0 | ||
9 | #define MBI_MDR_OFFSET 0xD4 | ||
10 | #define MBI_MCRX_OFFSET 0xD8 | ||
11 | |||
12 | #define MBI_RD_MASK 0xFEFFFFFF | ||
13 | #define MBI_WR_MASK 0X01000000 | ||
14 | |||
15 | #define MBI_MASK_HI 0xFFFFFF00 | ||
16 | #define MBI_MASK_LO 0x000000FF | ||
17 | #define MBI_ENABLE 0xF0 | ||
18 | |||
19 | /* Baytrail available units */ | ||
20 | #define BT_MBI_UNIT_AUNIT 0x00 | ||
21 | #define BT_MBI_UNIT_SMC 0x01 | ||
22 | #define BT_MBI_UNIT_CPU 0x02 | ||
23 | #define BT_MBI_UNIT_BUNIT 0x03 | ||
24 | #define BT_MBI_UNIT_PMC 0x04 | ||
25 | #define BT_MBI_UNIT_GFX 0x06 | ||
26 | #define BT_MBI_UNIT_SMI 0x0C | ||
27 | #define BT_MBI_UNIT_USB 0x43 | ||
28 | #define BT_MBI_UNIT_SATA 0xA3 | ||
29 | #define BT_MBI_UNIT_PCIE 0xA6 | ||
30 | |||
31 | /* Baytrail read/write opcodes */ | ||
32 | #define BT_MBI_AUNIT_READ 0x10 | ||
33 | #define BT_MBI_AUNIT_WRITE 0x11 | ||
34 | #define BT_MBI_SMC_READ 0x10 | ||
35 | #define BT_MBI_SMC_WRITE 0x11 | ||
36 | #define BT_MBI_CPU_READ 0x10 | ||
37 | #define BT_MBI_CPU_WRITE 0x11 | ||
38 | #define BT_MBI_BUNIT_READ 0x10 | ||
39 | #define BT_MBI_BUNIT_WRITE 0x11 | ||
40 | #define BT_MBI_PMC_READ 0x06 | ||
41 | #define BT_MBI_PMC_WRITE 0x07 | ||
42 | #define BT_MBI_GFX_READ 0x00 | ||
43 | #define BT_MBI_GFX_WRITE 0x01 | ||
44 | #define BT_MBI_SMIO_READ 0x06 | ||
45 | #define BT_MBI_SMIO_WRITE 0x07 | ||
46 | #define BT_MBI_USB_READ 0x06 | ||
47 | #define BT_MBI_USB_WRITE 0x07 | ||
48 | #define BT_MBI_SATA_READ 0x00 | ||
49 | #define BT_MBI_SATA_WRITE 0x01 | ||
50 | #define BT_MBI_PCIE_READ 0x00 | ||
51 | #define BT_MBI_PCIE_WRITE 0x01 | ||
52 | |||
53 | /** | ||
54 | * iosf_mbi_read() - MailBox Interface read command | ||
55 | * @port: port indicating subunit being accessed | ||
56 | * @opcode: port specific read or write opcode | ||
57 | * @offset: register address offset | ||
58 | * @mdr: register data to be read | ||
59 | * | ||
60 | * Locking is handled by spinlock - cannot sleep. | ||
61 | * Return: Nonzero on error | ||
62 | */ | ||
63 | int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr); | ||
64 | |||
65 | /** | ||
66 | * iosf_mbi_write() - MailBox unmasked write command | ||
67 | * @port: port indicating subunit being accessed | ||
68 | * @opcode: port specific read or write opcode | ||
69 | * @offset: register address offset | ||
70 | * @mdr: register data to be written | ||
71 | * | ||
72 | * Locking is handled by spinlock - cannot sleep. | ||
73 | * Return: Nonzero on error | ||
74 | */ | ||
75 | int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr); | ||
76 | |||
77 | /** | ||
78 | * iosf_mbi_modify() - MailBox masked write command | ||
79 | * @port: port indicating subunit being accessed | ||
80 | * @opcode: port specific read or write opcode | ||
81 | * @offset: register address offset | ||
82 | * @mdr: register data being modified | ||
83 | * @mask: mask indicating bits in mdr to be modified | ||
84 | * | ||
85 | * Locking is handled by spinlock - cannot sleep. | ||
86 | * Return: Nonzero on error | ||
87 | */ | ||
88 | int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask); | ||
89 | |||
90 | #endif /* IOSF_MBI_SYMS_H */ | ||
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index c696a8687567..6e4ce2df87cf 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -118,7 +118,6 @@ extern void mce_register_decode_chain(struct notifier_block *nb); | |||
118 | extern void mce_unregister_decode_chain(struct notifier_block *nb); | 118 | extern void mce_unregister_decode_chain(struct notifier_block *nb); |
119 | 119 | ||
120 | #include <linux/percpu.h> | 120 | #include <linux/percpu.h> |
121 | #include <linux/init.h> | ||
122 | #include <linux/atomic.h> | 121 | #include <linux/atomic.h> |
123 | 122 | ||
124 | extern int mce_p5_enabled; | 123 | extern int mce_p5_enabled; |
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index f98bd6625318..b59827e76529 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h | |||
@@ -1,6 +1,21 @@ | |||
1 | #ifndef _ASM_X86_MICROCODE_H | 1 | #ifndef _ASM_X86_MICROCODE_H |
2 | #define _ASM_X86_MICROCODE_H | 2 | #define _ASM_X86_MICROCODE_H |
3 | 3 | ||
4 | #define native_rdmsr(msr, val1, val2) \ | ||
5 | do { \ | ||
6 | u64 __val = native_read_msr((msr)); \ | ||
7 | (void)((val1) = (u32)__val); \ | ||
8 | (void)((val2) = (u32)(__val >> 32)); \ | ||
9 | } while (0) | ||
10 | |||
11 | #define native_wrmsr(msr, low, high) \ | ||
12 | native_write_msr(msr, low, high) | ||
13 | |||
14 | #define native_wrmsrl(msr, val) \ | ||
15 | native_write_msr((msr), \ | ||
16 | (u32)((u64)(val)), \ | ||
17 | (u32)((u64)(val) >> 32)) | ||
18 | |||
4 | struct cpu_signature { | 19 | struct cpu_signature { |
5 | unsigned int sig; | 20 | unsigned int sig; |
6 | unsigned int pf; | 21 | unsigned int pf; |
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h index 4c019179a57d..b7b10b82d3e5 100644 --- a/arch/x86/include/asm/microcode_amd.h +++ b/arch/x86/include/asm/microcode_amd.h | |||
@@ -61,11 +61,10 @@ extern int __apply_microcode_amd(struct microcode_amd *mc_amd); | |||
61 | extern int apply_microcode_amd(int cpu); | 61 | extern int apply_microcode_amd(int cpu); |
62 | extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); | 62 | extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); |
63 | 63 | ||
64 | #define PATCH_MAX_SIZE PAGE_SIZE | ||
65 | extern u8 amd_ucode_patch[PATCH_MAX_SIZE]; | ||
66 | |||
64 | #ifdef CONFIG_MICROCODE_AMD_EARLY | 67 | #ifdef CONFIG_MICROCODE_AMD_EARLY |
65 | #ifdef CONFIG_X86_32 | ||
66 | #define MPB_MAX_SIZE PAGE_SIZE | ||
67 | extern u8 amd_bsp_mpb[MPB_MAX_SIZE]; | ||
68 | #endif | ||
69 | extern void __init load_ucode_amd_bsp(void); | 68 | extern void __init load_ucode_amd_bsp(void); |
70 | extern void load_ucode_amd_ap(void); | 69 | extern void load_ucode_amd_ap(void); |
71 | extern int __init save_microcode_in_initrd_amd(void); | 70 | extern int __init save_microcode_in_initrd_amd(void); |
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 3142a94c7b4b..3e6b4920ef5d 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h | |||
@@ -1,7 +1,6 @@ | |||
1 | #ifndef _ASM_X86_MPSPEC_H | 1 | #ifndef _ASM_X86_MPSPEC_H |
2 | #define _ASM_X86_MPSPEC_H | 2 | #define _ASM_X86_MPSPEC_H |
3 | 3 | ||
4 | #include <linux/init.h> | ||
5 | 4 | ||
6 | #include <asm/mpspec_def.h> | 5 | #include <asm/mpspec_def.h> |
7 | #include <asm/x86_init.h> | 6 | #include <asm/x86_init.h> |
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index 2f366d0ac6b4..1da25a5f96f9 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _ASM_X86_MWAIT_H | 1 | #ifndef _ASM_X86_MWAIT_H |
2 | #define _ASM_X86_MWAIT_H | 2 | #define _ASM_X86_MWAIT_H |
3 | 3 | ||
4 | #include <linux/sched.h> | ||
5 | |||
4 | #define MWAIT_SUBSTATE_MASK 0xf | 6 | #define MWAIT_SUBSTATE_MASK 0xf |
5 | #define MWAIT_CSTATE_MASK 0xf | 7 | #define MWAIT_CSTATE_MASK 0xf |
6 | #define MWAIT_SUBSTATE_SIZE 4 | 8 | #define MWAIT_SUBSTATE_SIZE 4 |
@@ -13,4 +15,45 @@ | |||
13 | 15 | ||
14 | #define MWAIT_ECX_INTERRUPT_BREAK 0x1 | 16 | #define MWAIT_ECX_INTERRUPT_BREAK 0x1 |
15 | 17 | ||
18 | static inline void __monitor(const void *eax, unsigned long ecx, | ||
19 | unsigned long edx) | ||
20 | { | ||
21 | /* "monitor %eax, %ecx, %edx;" */ | ||
22 | asm volatile(".byte 0x0f, 0x01, 0xc8;" | ||
23 | :: "a" (eax), "c" (ecx), "d"(edx)); | ||
24 | } | ||
25 | |||
26 | static inline void __mwait(unsigned long eax, unsigned long ecx) | ||
27 | { | ||
28 | /* "mwait %eax, %ecx;" */ | ||
29 | asm volatile(".byte 0x0f, 0x01, 0xc9;" | ||
30 | :: "a" (eax), "c" (ecx)); | ||
31 | } | ||
32 | |||
33 | /* | ||
34 | * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, | ||
35 | * which can obviate IPI to trigger checking of need_resched. | ||
36 | * We execute MONITOR against need_resched and enter optimized wait state | ||
37 | * through MWAIT. Whenever someone changes need_resched, we would be woken | ||
38 | * up from MWAIT (without an IPI). | ||
39 | * | ||
40 | * New with Core Duo processors, MWAIT can take some hints based on CPU | ||
41 | * capability. | ||
42 | */ | ||
43 | static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) | ||
44 | { | ||
45 | if (!current_set_polling_and_test()) { | ||
46 | if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) { | ||
47 | mb(); | ||
48 | clflush((void *)¤t_thread_info()->flags); | ||
49 | mb(); | ||
50 | } | ||
51 | |||
52 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | ||
53 | if (!need_resched()) | ||
54 | __mwait(eax, ecx); | ||
55 | } | ||
56 | current_clr_polling(); | ||
57 | } | ||
58 | |||
16 | #endif /* _ASM_X86_MWAIT_H */ | 59 | #endif /* _ASM_X86_MWAIT_H */ |
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h index c87892442e53..775873d3be55 100644 --- a/arch/x86/include/asm/page.h +++ b/arch/x86/include/asm/page.h | |||
@@ -71,6 +71,7 @@ extern bool __virt_addr_valid(unsigned long kaddr); | |||
71 | #include <asm-generic/getorder.h> | 71 | #include <asm-generic/getorder.h> |
72 | 72 | ||
73 | #define __HAVE_ARCH_GATE_AREA 1 | 73 | #define __HAVE_ARCH_GATE_AREA 1 |
74 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA | ||
74 | 75 | ||
75 | #endif /* __KERNEL__ */ | 76 | #endif /* __KERNEL__ */ |
76 | #endif /* _ASM_X86_PAGE_H */ | 77 | #endif /* _ASM_X86_PAGE_H */ |
diff --git a/arch/x86/include/asm/page_32.h b/arch/x86/include/asm/page_32.h index 4d550d04b609..904f528cc8e8 100644 --- a/arch/x86/include/asm/page_32.h +++ b/arch/x86/include/asm/page_32.h | |||
@@ -5,10 +5,6 @@ | |||
5 | 5 | ||
6 | #ifndef __ASSEMBLY__ | 6 | #ifndef __ASSEMBLY__ |
7 | 7 | ||
8 | #ifdef CONFIG_HUGETLB_PAGE | ||
9 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA | ||
10 | #endif | ||
11 | |||
12 | #define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET) | 8 | #define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET) |
13 | #ifdef CONFIG_DEBUG_VIRTUAL | 9 | #ifdef CONFIG_DEBUG_VIRTUAL |
14 | extern unsigned long __phys_addr(unsigned long); | 10 | extern unsigned long __phys_addr(unsigned long); |
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 43dcd804ebd5..8de6d9cf3b95 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h | |||
@@ -39,9 +39,18 @@ | |||
39 | #define __VIRTUAL_MASK_SHIFT 47 | 39 | #define __VIRTUAL_MASK_SHIFT 47 |
40 | 40 | ||
41 | /* | 41 | /* |
42 | * Kernel image size is limited to 512 MB (see level2_kernel_pgt in | 42 | * Kernel image size is limited to 1GiB due to the fixmap living in the |
43 | * arch/x86/kernel/head_64.S), and it is mapped here: | 43 | * next 1GiB (see level2_kernel_pgt in arch/x86/kernel/head_64.S). Use |
44 | * 512MiB by default, leaving 1.5GiB for modules once the page tables | ||
45 | * are fully set up. If kernel ASLR is configured, it can extend the | ||
46 | * kernel page table mapping, reducing the size of the modules area. | ||
44 | */ | 47 | */ |
45 | #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) | 48 | #define KERNEL_IMAGE_SIZE_DEFAULT (512 * 1024 * 1024) |
49 | #if defined(CONFIG_RANDOMIZE_BASE) && \ | ||
50 | CONFIG_RANDOMIZE_BASE_MAX_OFFSET > KERNEL_IMAGE_SIZE_DEFAULT | ||
51 | #define KERNEL_IMAGE_SIZE CONFIG_RANDOMIZE_BASE_MAX_OFFSET | ||
52 | #else | ||
53 | #define KERNEL_IMAGE_SIZE KERNEL_IMAGE_SIZE_DEFAULT | ||
54 | #endif | ||
46 | 55 | ||
47 | #endif /* _ASM_X86_PAGE_64_DEFS_H */ | 56 | #endif /* _ASM_X86_PAGE_64_DEFS_H */ |
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h index 3bf2dd0cf61f..0d193e234647 100644 --- a/arch/x86/include/asm/pgtable-2level.h +++ b/arch/x86/include/asm/pgtable-2level.h | |||
@@ -55,6 +55,13 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) | |||
55 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) | 55 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | /* Bit manipulation helper on pte/pgoff entry */ | ||
59 | static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshift, | ||
60 | unsigned long mask, unsigned int leftshift) | ||
61 | { | ||
62 | return ((value >> rightshift) & mask) << leftshift; | ||
63 | } | ||
64 | |||
58 | #ifdef CONFIG_MEM_SOFT_DIRTY | 65 | #ifdef CONFIG_MEM_SOFT_DIRTY |
59 | 66 | ||
60 | /* | 67 | /* |
@@ -71,31 +78,34 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) | |||
71 | #define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) | 78 | #define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) |
72 | #define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1) | 79 | #define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1) |
73 | 80 | ||
74 | #define pte_to_pgoff(pte) \ | 81 | #define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1) |
75 | ((((pte).pte_low >> (PTE_FILE_SHIFT1)) \ | 82 | #define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1) |
76 | & ((1U << PTE_FILE_BITS1) - 1))) \ | 83 | #define PTE_FILE_MASK3 ((1U << PTE_FILE_BITS3) - 1) |
77 | + ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \ | 84 | |
78 | & ((1U << PTE_FILE_BITS2) - 1)) \ | 85 | #define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1) |
79 | << (PTE_FILE_BITS1)) \ | 86 | #define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2) |
80 | + ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \ | 87 | #define PTE_FILE_LSHIFT4 (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3) |
81 | & ((1U << PTE_FILE_BITS3) - 1)) \ | 88 | |
82 | << (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ | 89 | static __always_inline pgoff_t pte_to_pgoff(pte_t pte) |
83 | + ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \ | 90 | { |
84 | << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3)) | 91 | return (pgoff_t) |
85 | 92 | (pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) + | |
86 | #define pgoff_to_pte(off) \ | 93 | pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) + |
87 | ((pte_t) { .pte_low = \ | 94 | pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, PTE_FILE_MASK3, PTE_FILE_LSHIFT3) + |
88 | ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \ | 95 | pte_bitop(pte.pte_low, PTE_FILE_SHIFT4, -1UL, PTE_FILE_LSHIFT4)); |
89 | + ((((off) >> PTE_FILE_BITS1) \ | 96 | } |
90 | & ((1U << PTE_FILE_BITS2) - 1)) \ | 97 | |
91 | << PTE_FILE_SHIFT2) \ | 98 | static __always_inline pte_t pgoff_to_pte(pgoff_t off) |
92 | + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ | 99 | { |
93 | & ((1U << PTE_FILE_BITS3) - 1)) \ | 100 | return (pte_t){ |
94 | << PTE_FILE_SHIFT3) \ | 101 | .pte_low = |
95 | + ((((off) >> \ | 102 | pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) + |
96 | (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \ | 103 | pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) + |
97 | << PTE_FILE_SHIFT4) \ | 104 | pte_bitop(off, PTE_FILE_LSHIFT3, PTE_FILE_MASK3, PTE_FILE_SHIFT3) + |
98 | + _PAGE_FILE }) | 105 | pte_bitop(off, PTE_FILE_LSHIFT4, -1UL, PTE_FILE_SHIFT4) + |
106 | _PAGE_FILE, | ||
107 | }; | ||
108 | } | ||
99 | 109 | ||
100 | #else /* CONFIG_MEM_SOFT_DIRTY */ | 110 | #else /* CONFIG_MEM_SOFT_DIRTY */ |
101 | 111 | ||
@@ -115,22 +125,30 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) | |||
115 | #define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1) | 125 | #define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1) |
116 | #define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) | 126 | #define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) |
117 | 127 | ||
118 | #define pte_to_pgoff(pte) \ | 128 | #define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1) |
119 | ((((pte).pte_low >> PTE_FILE_SHIFT1) \ | 129 | #define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1) |
120 | & ((1U << PTE_FILE_BITS1) - 1)) \ | 130 | |
121 | + ((((pte).pte_low >> PTE_FILE_SHIFT2) \ | 131 | #define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1) |
122 | & ((1U << PTE_FILE_BITS2) - 1)) << PTE_FILE_BITS1) \ | 132 | #define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2) |
123 | + (((pte).pte_low >> PTE_FILE_SHIFT3) \ | 133 | |
124 | << (PTE_FILE_BITS1 + PTE_FILE_BITS2))) | 134 | static __always_inline pgoff_t pte_to_pgoff(pte_t pte) |
125 | 135 | { | |
126 | #define pgoff_to_pte(off) \ | 136 | return (pgoff_t) |
127 | ((pte_t) { .pte_low = \ | 137 | (pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) + |
128 | (((off) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \ | 138 | pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) + |
129 | + ((((off) >> PTE_FILE_BITS1) & ((1U << PTE_FILE_BITS2) - 1)) \ | 139 | pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, -1UL, PTE_FILE_LSHIFT3)); |
130 | << PTE_FILE_SHIFT2) \ | 140 | } |
131 | + (((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ | 141 | |
132 | << PTE_FILE_SHIFT3) \ | 142 | static __always_inline pte_t pgoff_to_pte(pgoff_t off) |
133 | + _PAGE_FILE }) | 143 | { |
144 | return (pte_t){ | ||
145 | .pte_low = | ||
146 | pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) + | ||
147 | pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) + | ||
148 | pte_bitop(off, PTE_FILE_LSHIFT3, -1UL, PTE_FILE_SHIFT3) + | ||
149 | _PAGE_FILE, | ||
150 | }; | ||
151 | } | ||
134 | 152 | ||
135 | #endif /* CONFIG_MEM_SOFT_DIRTY */ | 153 | #endif /* CONFIG_MEM_SOFT_DIRTY */ |
136 | 154 | ||
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 2d883440cb9a..c883bf726398 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h | |||
@@ -58,7 +58,7 @@ typedef struct { pteval_t pte; } pte_t; | |||
58 | #define VMALLOC_START _AC(0xffffc90000000000, UL) | 58 | #define VMALLOC_START _AC(0xffffc90000000000, UL) |
59 | #define VMALLOC_END _AC(0xffffe8ffffffffff, UL) | 59 | #define VMALLOC_END _AC(0xffffe8ffffffffff, UL) |
60 | #define VMEMMAP_START _AC(0xffffea0000000000, UL) | 60 | #define VMEMMAP_START _AC(0xffffea0000000000, UL) |
61 | #define MODULES_VADDR _AC(0xffffffffa0000000, UL) | 61 | #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) |
62 | #define MODULES_END _AC(0xffffffffff000000, UL) | 62 | #define MODULES_END _AC(0xffffffffff000000, UL) |
63 | #define MODULES_LEN (MODULES_END - MODULES_VADDR) | 63 | #define MODULES_LEN (MODULES_END - MODULES_VADDR) |
64 | 64 | ||
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 0ecac257fb26..a83aa44bb1fb 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -382,7 +382,8 @@ static inline void update_page_count(int level, unsigned long pages) { } | |||
382 | */ | 382 | */ |
383 | extern pte_t *lookup_address(unsigned long address, unsigned int *level); | 383 | extern pte_t *lookup_address(unsigned long address, unsigned int *level); |
384 | extern phys_addr_t slow_virt_to_phys(void *__address); | 384 | extern phys_addr_t slow_virt_to_phys(void *__address); |
385 | 385 | extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, | |
386 | unsigned numpages, unsigned long page_flags); | ||
386 | #endif /* !__ASSEMBLY__ */ | 387 | #endif /* !__ASSEMBLY__ */ |
387 | 388 | ||
388 | #endif /* _ASM_X86_PGTABLE_DEFS_H */ | 389 | #endif /* _ASM_X86_PGTABLE_DEFS_H */ |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 7b034a4057f9..fdedd38fd0fc 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -27,7 +27,6 @@ struct mm_struct; | |||
27 | #include <linux/cache.h> | 27 | #include <linux/cache.h> |
28 | #include <linux/threads.h> | 28 | #include <linux/threads.h> |
29 | #include <linux/math64.h> | 29 | #include <linux/math64.h> |
30 | #include <linux/init.h> | ||
31 | #include <linux/err.h> | 30 | #include <linux/err.h> |
32 | #include <linux/irqflags.h> | 31 | #include <linux/irqflags.h> |
33 | 32 | ||
@@ -72,6 +71,7 @@ extern u16 __read_mostly tlb_lli_4m[NR_INFO]; | |||
72 | extern u16 __read_mostly tlb_lld_4k[NR_INFO]; | 71 | extern u16 __read_mostly tlb_lld_4k[NR_INFO]; |
73 | extern u16 __read_mostly tlb_lld_2m[NR_INFO]; | 72 | extern u16 __read_mostly tlb_lld_2m[NR_INFO]; |
74 | extern u16 __read_mostly tlb_lld_4m[NR_INFO]; | 73 | extern u16 __read_mostly tlb_lld_4m[NR_INFO]; |
74 | extern u16 __read_mostly tlb_lld_1g[NR_INFO]; | ||
75 | extern s8 __read_mostly tlb_flushall_shift; | 75 | extern s8 __read_mostly tlb_flushall_shift; |
76 | 76 | ||
77 | /* | 77 | /* |
@@ -370,6 +370,20 @@ struct ymmh_struct { | |||
370 | u32 ymmh_space[64]; | 370 | u32 ymmh_space[64]; |
371 | }; | 371 | }; |
372 | 372 | ||
373 | /* We don't support LWP yet: */ | ||
374 | struct lwp_struct { | ||
375 | u8 reserved[128]; | ||
376 | }; | ||
377 | |||
378 | struct bndregs_struct { | ||
379 | u64 bndregs[8]; | ||
380 | } __packed; | ||
381 | |||
382 | struct bndcsr_struct { | ||
383 | u64 cfg_reg_u; | ||
384 | u64 status_reg; | ||
385 | } __packed; | ||
386 | |||
373 | struct xsave_hdr_struct { | 387 | struct xsave_hdr_struct { |
374 | u64 xstate_bv; | 388 | u64 xstate_bv; |
375 | u64 reserved1[2]; | 389 | u64 reserved1[2]; |
@@ -380,6 +394,9 @@ struct xsave_struct { | |||
380 | struct i387_fxsave_struct i387; | 394 | struct i387_fxsave_struct i387; |
381 | struct xsave_hdr_struct xsave_hdr; | 395 | struct xsave_hdr_struct xsave_hdr; |
382 | struct ymmh_struct ymmh; | 396 | struct ymmh_struct ymmh; |
397 | struct lwp_struct lwp; | ||
398 | struct bndregs_struct bndregs; | ||
399 | struct bndcsr_struct bndcsr; | ||
383 | /* new processor state extensions will go here */ | 400 | /* new processor state extensions will go here */ |
384 | } __attribute__ ((packed, aligned (64))); | 401 | } __attribute__ ((packed, aligned (64))); |
385 | 402 | ||
@@ -700,29 +717,6 @@ static inline void sync_core(void) | |||
700 | #endif | 717 | #endif |
701 | } | 718 | } |
702 | 719 | ||
703 | static inline void __monitor(const void *eax, unsigned long ecx, | ||
704 | unsigned long edx) | ||
705 | { | ||
706 | /* "monitor %eax, %ecx, %edx;" */ | ||
707 | asm volatile(".byte 0x0f, 0x01, 0xc8;" | ||
708 | :: "a" (eax), "c" (ecx), "d"(edx)); | ||
709 | } | ||
710 | |||
711 | static inline void __mwait(unsigned long eax, unsigned long ecx) | ||
712 | { | ||
713 | /* "mwait %eax, %ecx;" */ | ||
714 | asm volatile(".byte 0x0f, 0x01, 0xc9;" | ||
715 | :: "a" (eax), "c" (ecx)); | ||
716 | } | ||
717 | |||
718 | static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | ||
719 | { | ||
720 | trace_hardirqs_on(); | ||
721 | /* "mwait %eax, %ecx;" */ | ||
722 | asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" | ||
723 | :: "a" (eax), "c" (ecx)); | ||
724 | } | ||
725 | |||
726 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | 720 | extern void select_idle_routine(const struct cpuinfo_x86 *c); |
727 | extern void init_amd_e400_c1e_mask(void); | 721 | extern void init_amd_e400_c1e_mask(void); |
728 | 722 | ||
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 942a08623a1a..14fd6fd75a19 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h | |||
@@ -60,7 +60,6 @@ struct pt_regs { | |||
60 | 60 | ||
61 | #endif /* !__i386__ */ | 61 | #endif /* !__i386__ */ |
62 | 62 | ||
63 | #include <linux/init.h> | ||
64 | #ifdef CONFIG_PARAVIRT | 63 | #ifdef CONFIG_PARAVIRT |
65 | #include <asm/paravirt_types.h> | 64 | #include <asm/paravirt_types.h> |
66 | #endif | 65 | #endif |
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 59bcf4e22418..d62c9f809bc5 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h | |||
@@ -3,7 +3,6 @@ | |||
3 | 3 | ||
4 | #include <uapi/asm/setup.h> | 4 | #include <uapi/asm/setup.h> |
5 | 5 | ||
6 | |||
7 | #define COMMAND_LINE_SIZE 2048 | 6 | #define COMMAND_LINE_SIZE 2048 |
8 | 7 | ||
9 | #include <linux/linkage.h> | 8 | #include <linux/linkage.h> |
@@ -29,6 +28,8 @@ | |||
29 | #include <asm/bootparam.h> | 28 | #include <asm/bootparam.h> |
30 | #include <asm/x86_init.h> | 29 | #include <asm/x86_init.h> |
31 | 30 | ||
31 | extern u64 relocated_ramdisk; | ||
32 | |||
32 | /* Interrupt control for vSMPowered x86_64 systems */ | 33 | /* Interrupt control for vSMPowered x86_64 systems */ |
33 | #ifdef CONFIG_X86_64 | 34 | #ifdef CONFIG_X86_64 |
34 | void vsmp_init(void); | 35 | void vsmp_init(void); |
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 4137890e88e3..8cd27e08e23c 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -2,7 +2,6 @@ | |||
2 | #define _ASM_X86_SMP_H | 2 | #define _ASM_X86_SMP_H |
3 | #ifndef __ASSEMBLY__ | 3 | #ifndef __ASSEMBLY__ |
4 | #include <linux/cpumask.h> | 4 | #include <linux/cpumask.h> |
5 | #include <linux/init.h> | ||
6 | #include <asm/percpu.h> | 5 | #include <asm/percpu.h> |
7 | 6 | ||
8 | /* | 7 | /* |
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h index 34baa0eb5d0c..a04eabd43d06 100644 --- a/arch/x86/include/asm/timer.h +++ b/arch/x86/include/asm/timer.h | |||
@@ -1,9 +1,9 @@ | |||
1 | #ifndef _ASM_X86_TIMER_H | 1 | #ifndef _ASM_X86_TIMER_H |
2 | #define _ASM_X86_TIMER_H | 2 | #define _ASM_X86_TIMER_H |
3 | #include <linux/init.h> | ||
4 | #include <linux/pm.h> | 3 | #include <linux/pm.h> |
5 | #include <linux/percpu.h> | 4 | #include <linux/percpu.h> |
6 | #include <linux/interrupt.h> | 5 | #include <linux/interrupt.h> |
6 | #include <linux/math64.h> | ||
7 | 7 | ||
8 | #define TICK_SIZE (tick_nsec / 1000) | 8 | #define TICK_SIZE (tick_nsec / 1000) |
9 | 9 | ||
@@ -12,68 +12,26 @@ extern int recalibrate_cpu_khz(void); | |||
12 | 12 | ||
13 | extern int no_timer_check; | 13 | extern int no_timer_check; |
14 | 14 | ||
15 | /* Accelerators for sched_clock() | 15 | /* |
16 | * convert from cycles(64bits) => nanoseconds (64bits) | 16 | * We use the full linear equation: f(x) = a + b*x, in order to allow |
17 | * basic equation: | 17 | * a continuous function in the face of dynamic freq changes. |
18 | * ns = cycles / (freq / ns_per_sec) | ||
19 | * ns = cycles * (ns_per_sec / freq) | ||
20 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | ||
21 | * ns = cycles * (10^6 / cpu_khz) | ||
22 | * | 18 | * |
23 | * Then we use scaling math (suggested by george@mvista.com) to get: | 19 | * Continuity means that when our frequency changes our slope (b); we want to |
24 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | 20 | * ensure that: f(t) == f'(t), which gives: a + b*t == a' + b'*t. |
25 | * ns = cycles * cyc2ns_scale / SC | ||
26 | * | 21 | * |
27 | * And since SC is a constant power of two, we can convert the div | 22 | * Without an offset (a) the above would not be possible. |
28 | * into a shift. | ||
29 | * | 23 | * |
30 | * We can use khz divisor instead of mhz to keep a better precision, since | 24 | * See the comment near cycles_2_ns() for details on how we compute (b). |
31 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | ||
32 | * (mathieu.desnoyers@polymtl.ca) | ||
33 | * | ||
34 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | ||
35 | * | ||
36 | * In: | ||
37 | * | ||
38 | * ns = cycles * cyc2ns_scale / SC | ||
39 | * | ||
40 | * Although we may still have enough bits to store the value of ns, | ||
41 | * in some cases, we may not have enough bits to store cycles * cyc2ns_scale, | ||
42 | * leading to an incorrect result. | ||
43 | * | ||
44 | * To avoid this, we can decompose 'cycles' into quotient and remainder | ||
45 | * of division by SC. Then, | ||
46 | * | ||
47 | * ns = (quot * SC + rem) * cyc2ns_scale / SC | ||
48 | * = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC | ||
49 | * | ||
50 | * - sqazi@google.com | ||
51 | */ | 25 | */ |
52 | 26 | struct cyc2ns_data { | |
53 | DECLARE_PER_CPU(unsigned long, cyc2ns); | 27 | u32 cyc2ns_mul; |
54 | DECLARE_PER_CPU(unsigned long long, cyc2ns_offset); | 28 | u32 cyc2ns_shift; |
55 | 29 | u64 cyc2ns_offset; | |
56 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | 30 | u32 __count; |
57 | 31 | /* u32 hole */ | |
58 | static inline unsigned long long __cycles_2_ns(unsigned long long cyc) | 32 | }; /* 24 bytes -- do not grow */ |
59 | { | 33 | |
60 | int cpu = smp_processor_id(); | 34 | extern struct cyc2ns_data *cyc2ns_read_begin(void); |
61 | unsigned long long ns = per_cpu(cyc2ns_offset, cpu); | 35 | extern void cyc2ns_read_end(struct cyc2ns_data *); |
62 | ns += mult_frac(cyc, per_cpu(cyc2ns, cpu), | ||
63 | (1UL << CYC2NS_SCALE_FACTOR)); | ||
64 | return ns; | ||
65 | } | ||
66 | |||
67 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) | ||
68 | { | ||
69 | unsigned long long ns; | ||
70 | unsigned long flags; | ||
71 | |||
72 | local_irq_save(flags); | ||
73 | ns = __cycles_2_ns(cyc); | ||
74 | local_irq_restore(flags); | ||
75 | |||
76 | return ns; | ||
77 | } | ||
78 | 36 | ||
79 | #endif /* _ASM_X86_TIMER_H */ | 37 | #endif /* _ASM_X86_TIMER_H */ |
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 235be70d5bb4..57ae63cd6ee2 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h | |||
@@ -65,4 +65,7 @@ extern int notsc_setup(char *); | |||
65 | extern void tsc_save_sched_clock_state(void); | 65 | extern void tsc_save_sched_clock_state(void); |
66 | extern void tsc_restore_sched_clock_state(void); | 66 | extern void tsc_restore_sched_clock_state(void); |
67 | 67 | ||
68 | /* MSR based TSC calibration for Intel Atom SoC platforms */ | ||
69 | int try_msr_calibrate_tsc(unsigned long *fast_calibrate); | ||
70 | |||
68 | #endif /* _ASM_X86_TSC_H */ | 71 | #endif /* _ASM_X86_TSC_H */ |
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 8ec57c07b125..0d592e0a5b84 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -40,22 +40,30 @@ | |||
40 | /* | 40 | /* |
41 | * Test whether a block of memory is a valid user space address. | 41 | * Test whether a block of memory is a valid user space address. |
42 | * Returns 0 if the range is valid, nonzero otherwise. | 42 | * Returns 0 if the range is valid, nonzero otherwise. |
43 | * | ||
44 | * This is equivalent to the following test: | ||
45 | * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64) | ||
46 | * | ||
47 | * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... | ||
48 | */ | 43 | */ |
44 | static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) | ||
45 | { | ||
46 | /* | ||
47 | * If we have used "sizeof()" for the size, | ||
48 | * we know it won't overflow the limit (but | ||
49 | * it might overflow the 'addr', so it's | ||
50 | * important to subtract the size from the | ||
51 | * limit, not add it to the address). | ||
52 | */ | ||
53 | if (__builtin_constant_p(size)) | ||
54 | return addr > limit - size; | ||
55 | |||
56 | /* Arbitrary sizes? Be careful about overflow */ | ||
57 | addr += size; | ||
58 | if (addr < size) | ||
59 | return true; | ||
60 | return addr > limit; | ||
61 | } | ||
49 | 62 | ||
50 | #define __range_not_ok(addr, size, limit) \ | 63 | #define __range_not_ok(addr, size, limit) \ |
51 | ({ \ | 64 | ({ \ |
52 | unsigned long flag, roksum; \ | ||
53 | __chk_user_ptr(addr); \ | 65 | __chk_user_ptr(addr); \ |
54 | asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ | 66 | __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ |
55 | : "=&r" (flag), "=r" (roksum) \ | ||
56 | : "1" (addr), "g" ((long)(size)), \ | ||
57 | "rm" (limit)); \ | ||
58 | flag; \ | ||
59 | }) | 67 | }) |
60 | 68 | ||
61 | /** | 69 | /** |
@@ -78,7 +86,7 @@ | |||
78 | * this function, memory access functions may still return -EFAULT. | 86 | * this function, memory access functions may still return -EFAULT. |
79 | */ | 87 | */ |
80 | #define access_ok(type, addr, size) \ | 88 | #define access_ok(type, addr, size) \ |
81 | (likely(__range_not_ok(addr, size, user_addr_max()) == 0)) | 89 | likely(!__range_not_ok(addr, size, user_addr_max())) |
82 | 90 | ||
83 | /* | 91 | /* |
84 | * The exception table consists of pairs of addresses relative to the | 92 | * The exception table consists of pairs of addresses relative to the |
@@ -525,6 +533,98 @@ extern __must_check long strnlen_user(const char __user *str, long n); | |||
525 | unsigned long __must_check clear_user(void __user *mem, unsigned long len); | 533 | unsigned long __must_check clear_user(void __user *mem, unsigned long len); |
526 | unsigned long __must_check __clear_user(void __user *mem, unsigned long len); | 534 | unsigned long __must_check __clear_user(void __user *mem, unsigned long len); |
527 | 535 | ||
536 | extern void __cmpxchg_wrong_size(void) | ||
537 | __compiletime_error("Bad argument size for cmpxchg"); | ||
538 | |||
539 | #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \ | ||
540 | ({ \ | ||
541 | int __ret = 0; \ | ||
542 | __typeof__(ptr) __uval = (uval); \ | ||
543 | __typeof__(*(ptr)) __old = (old); \ | ||
544 | __typeof__(*(ptr)) __new = (new); \ | ||
545 | switch (size) { \ | ||
546 | case 1: \ | ||
547 | { \ | ||
548 | asm volatile("\t" ASM_STAC "\n" \ | ||
549 | "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ | ||
550 | "2:\t" ASM_CLAC "\n" \ | ||
551 | "\t.section .fixup, \"ax\"\n" \ | ||
552 | "3:\tmov %3, %0\n" \ | ||
553 | "\tjmp 2b\n" \ | ||
554 | "\t.previous\n" \ | ||
555 | _ASM_EXTABLE(1b, 3b) \ | ||
556 | : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ | ||
557 | : "i" (-EFAULT), "q" (__new), "1" (__old) \ | ||
558 | : "memory" \ | ||
559 | ); \ | ||
560 | break; \ | ||
561 | } \ | ||
562 | case 2: \ | ||
563 | { \ | ||
564 | asm volatile("\t" ASM_STAC "\n" \ | ||
565 | "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ | ||
566 | "2:\t" ASM_CLAC "\n" \ | ||
567 | "\t.section .fixup, \"ax\"\n" \ | ||
568 | "3:\tmov %3, %0\n" \ | ||
569 | "\tjmp 2b\n" \ | ||
570 | "\t.previous\n" \ | ||
571 | _ASM_EXTABLE(1b, 3b) \ | ||
572 | : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ | ||
573 | : "i" (-EFAULT), "r" (__new), "1" (__old) \ | ||
574 | : "memory" \ | ||
575 | ); \ | ||
576 | break; \ | ||
577 | } \ | ||
578 | case 4: \ | ||
579 | { \ | ||
580 | asm volatile("\t" ASM_STAC "\n" \ | ||
581 | "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ | ||
582 | "2:\t" ASM_CLAC "\n" \ | ||
583 | "\t.section .fixup, \"ax\"\n" \ | ||
584 | "3:\tmov %3, %0\n" \ | ||
585 | "\tjmp 2b\n" \ | ||
586 | "\t.previous\n" \ | ||
587 | _ASM_EXTABLE(1b, 3b) \ | ||
588 | : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ | ||
589 | : "i" (-EFAULT), "r" (__new), "1" (__old) \ | ||
590 | : "memory" \ | ||
591 | ); \ | ||
592 | break; \ | ||
593 | } \ | ||
594 | case 8: \ | ||
595 | { \ | ||
596 | if (!IS_ENABLED(CONFIG_X86_64)) \ | ||
597 | __cmpxchg_wrong_size(); \ | ||
598 | \ | ||
599 | asm volatile("\t" ASM_STAC "\n" \ | ||
600 | "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ | ||
601 | "2:\t" ASM_CLAC "\n" \ | ||
602 | "\t.section .fixup, \"ax\"\n" \ | ||
603 | "3:\tmov %3, %0\n" \ | ||
604 | "\tjmp 2b\n" \ | ||
605 | "\t.previous\n" \ | ||
606 | _ASM_EXTABLE(1b, 3b) \ | ||
607 | : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ | ||
608 | : "i" (-EFAULT), "r" (__new), "1" (__old) \ | ||
609 | : "memory" \ | ||
610 | ); \ | ||
611 | break; \ | ||
612 | } \ | ||
613 | default: \ | ||
614 | __cmpxchg_wrong_size(); \ | ||
615 | } \ | ||
616 | *__uval = __old; \ | ||
617 | __ret; \ | ||
618 | }) | ||
619 | |||
620 | #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \ | ||
621 | ({ \ | ||
622 | access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \ | ||
623 | __user_atomic_cmpxchg_inatomic((uval), (ptr), \ | ||
624 | (old), (new), sizeof(*(ptr))) : \ | ||
625 | -EFAULT; \ | ||
626 | }) | ||
627 | |||
528 | /* | 628 | /* |
529 | * movsl can be slow when source and dest are not both 8-byte aligned | 629 | * movsl can be slow when source and dest are not both 8-byte aligned |
530 | */ | 630 | */ |
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 190413d0de57..12a26b979bf1 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h | |||
@@ -204,13 +204,13 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) | |||
204 | static __must_check __always_inline int | 204 | static __must_check __always_inline int |
205 | __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) | 205 | __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) |
206 | { | 206 | { |
207 | return __copy_from_user_nocheck(dst, (__force const void *)src, size); | 207 | return __copy_from_user_nocheck(dst, src, size); |
208 | } | 208 | } |
209 | 209 | ||
210 | static __must_check __always_inline int | 210 | static __must_check __always_inline int |
211 | __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) | 211 | __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) |
212 | { | 212 | { |
213 | return __copy_to_user_nocheck((__force void *)dst, src, size); | 213 | return __copy_to_user_nocheck(dst, src, size); |
214 | } | 214 | } |
215 | 215 | ||
216 | extern long __copy_user_nocache(void *dst, const void __user *src, | 216 | extern long __copy_user_nocache(void *dst, const void __user *src, |
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 0415cdabb5a6..554738963b28 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h | |||
@@ -9,6 +9,8 @@ | |||
9 | #define XSTATE_FP 0x1 | 9 | #define XSTATE_FP 0x1 |
10 | #define XSTATE_SSE 0x2 | 10 | #define XSTATE_SSE 0x2 |
11 | #define XSTATE_YMM 0x4 | 11 | #define XSTATE_YMM 0x4 |
12 | #define XSTATE_BNDREGS 0x8 | ||
13 | #define XSTATE_BNDCSR 0x10 | ||
12 | 14 | ||
13 | #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) | 15 | #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) |
14 | 16 | ||
@@ -20,10 +22,14 @@ | |||
20 | #define XSAVE_YMM_SIZE 256 | 22 | #define XSAVE_YMM_SIZE 256 |
21 | #define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET) | 23 | #define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET) |
22 | 24 | ||
23 | /* | 25 | /* Supported features which support lazy state saving */ |
24 | * These are the features that the OS can handle currently. | 26 | #define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) |
25 | */ | 27 | |
26 | #define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) | 28 | /* Supported features which require eager state saving */ |
29 | #define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR) | ||
30 | |||
31 | /* All currently supported features */ | ||
32 | #define XCNTXT_MASK (XSTATE_LAZY | XSTATE_EAGER) | ||
27 | 33 | ||
28 | #ifdef CONFIG_X86_64 | 34 | #ifdef CONFIG_X86_64 |
29 | #define REX_PREFIX "0x48, " | 35 | #define REX_PREFIX "0x48, " |
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index 9c3733c5f8f7..225b0988043a 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #define SETUP_E820_EXT 1 | 6 | #define SETUP_E820_EXT 1 |
7 | #define SETUP_DTB 2 | 7 | #define SETUP_DTB 2 |
8 | #define SETUP_PCI 3 | 8 | #define SETUP_PCI 3 |
9 | #define SETUP_EFI 4 | ||
9 | 10 | ||
10 | /* ram_size flags */ | 11 | /* ram_size flags */ |
11 | #define RAMDISK_IMAGE_START_MASK 0x07FF | 12 | #define RAMDISK_IMAGE_START_MASK 0x07FF |
@@ -23,6 +24,7 @@ | |||
23 | #define XLF_CAN_BE_LOADED_ABOVE_4G (1<<1) | 24 | #define XLF_CAN_BE_LOADED_ABOVE_4G (1<<1) |
24 | #define XLF_EFI_HANDOVER_32 (1<<2) | 25 | #define XLF_EFI_HANDOVER_32 (1<<2) |
25 | #define XLF_EFI_HANDOVER_64 (1<<3) | 26 | #define XLF_EFI_HANDOVER_64 (1<<3) |
27 | #define XLF_EFI_KEXEC (1<<4) | ||
26 | 28 | ||
27 | #ifndef __ASSEMBLY__ | 29 | #ifndef __ASSEMBLY__ |
28 | 30 | ||
diff --git a/arch/x86/include/uapi/asm/stat.h b/arch/x86/include/uapi/asm/stat.h index 7b3ddc348585..bc03eb5d6360 100644 --- a/arch/x86/include/uapi/asm/stat.h +++ b/arch/x86/include/uapi/asm/stat.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _ASM_X86_STAT_H | 1 | #ifndef _ASM_X86_STAT_H |
2 | #define _ASM_X86_STAT_H | 2 | #define _ASM_X86_STAT_H |
3 | 3 | ||
4 | #include <asm/posix_types.h> | ||
5 | |||
4 | #define STAT_HAVE_NSEC 1 | 6 | #define STAT_HAVE_NSEC 1 |
5 | 7 | ||
6 | #ifdef __i386__ | 8 | #ifdef __i386__ |
@@ -78,26 +80,26 @@ struct stat64 { | |||
78 | #else /* __i386__ */ | 80 | #else /* __i386__ */ |
79 | 81 | ||
80 | struct stat { | 82 | struct stat { |
81 | unsigned long st_dev; | 83 | __kernel_ulong_t st_dev; |
82 | unsigned long st_ino; | 84 | __kernel_ulong_t st_ino; |
83 | unsigned long st_nlink; | 85 | __kernel_ulong_t st_nlink; |
84 | 86 | ||
85 | unsigned int st_mode; | 87 | unsigned int st_mode; |
86 | unsigned int st_uid; | 88 | unsigned int st_uid; |
87 | unsigned int st_gid; | 89 | unsigned int st_gid; |
88 | unsigned int __pad0; | 90 | unsigned int __pad0; |
89 | unsigned long st_rdev; | 91 | __kernel_ulong_t st_rdev; |
90 | long st_size; | 92 | __kernel_long_t st_size; |
91 | long st_blksize; | 93 | __kernel_long_t st_blksize; |
92 | long st_blocks; /* Number 512-byte blocks allocated. */ | 94 | __kernel_long_t st_blocks; /* Number 512-byte blocks allocated. */ |
93 | 95 | ||
94 | unsigned long st_atime; | 96 | __kernel_ulong_t st_atime; |
95 | unsigned long st_atime_nsec; | 97 | __kernel_ulong_t st_atime_nsec; |
96 | unsigned long st_mtime; | 98 | __kernel_ulong_t st_mtime; |
97 | unsigned long st_mtime_nsec; | 99 | __kernel_ulong_t st_mtime_nsec; |
98 | unsigned long st_ctime; | 100 | __kernel_ulong_t st_ctime; |
99 | unsigned long st_ctime_nsec; | 101 | __kernel_ulong_t st_ctime_nsec; |
100 | long __unused[3]; | 102 | __kernel_long_t __unused[3]; |
101 | }; | 103 | }; |
102 | 104 | ||
103 | /* We don't need to memset the whole thing just to initialize the padding */ | 105 | /* We don't need to memset the whole thing just to initialize the padding */ |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 9b0a34e2cd79..cb648c84b327 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -29,10 +29,11 @@ obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o | |||
29 | obj-y += syscall_$(BITS).o | 29 | obj-y += syscall_$(BITS).o |
30 | obj-$(CONFIG_X86_64) += vsyscall_64.o | 30 | obj-$(CONFIG_X86_64) += vsyscall_64.o |
31 | obj-$(CONFIG_X86_64) += vsyscall_emu_64.o | 31 | obj-$(CONFIG_X86_64) += vsyscall_emu_64.o |
32 | obj-$(CONFIG_SYSFS) += ksysfs.o | ||
32 | obj-y += bootflag.o e820.o | 33 | obj-y += bootflag.o e820.o |
33 | obj-y += pci-dma.o quirks.o topology.o kdebugfs.o | 34 | obj-y += pci-dma.o quirks.o topology.o kdebugfs.o |
34 | obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o | 35 | obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o |
35 | obj-y += tsc.o io_delay.o rtc.o | 36 | obj-y += tsc.o tsc_msr.o io_delay.o rtc.o |
36 | obj-y += pci-iommu_table.o | 37 | obj-y += pci-iommu_table.o |
37 | obj-y += resource.o | 38 | obj-y += resource.o |
38 | 39 | ||
@@ -91,15 +92,6 @@ obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o | |||
91 | 92 | ||
92 | obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o | 93 | obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o |
93 | 94 | ||
94 | obj-$(CONFIG_MICROCODE_EARLY) += microcode_core_early.o | ||
95 | obj-$(CONFIG_MICROCODE_INTEL_EARLY) += microcode_intel_early.o | ||
96 | obj-$(CONFIG_MICROCODE_INTEL_LIB) += microcode_intel_lib.o | ||
97 | microcode-y := microcode_core.o | ||
98 | microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o | ||
99 | microcode-$(CONFIG_MICROCODE_AMD) += microcode_amd.o | ||
100 | obj-$(CONFIG_MICROCODE_AMD_EARLY) += microcode_amd_early.o | ||
101 | obj-$(CONFIG_MICROCODE) += microcode.o | ||
102 | |||
103 | obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o | 95 | obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o |
104 | 96 | ||
105 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o | 97 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o |
@@ -111,6 +103,7 @@ obj-$(CONFIG_EFI) += sysfb_efi.o | |||
111 | 103 | ||
112 | obj-$(CONFIG_PERF_EVENTS) += perf_regs.o | 104 | obj-$(CONFIG_PERF_EVENTS) += perf_regs.o |
113 | obj-$(CONFIG_TRACING) += tracepoint.o | 105 | obj-$(CONFIG_TRACING) += tracepoint.o |
106 | obj-$(CONFIG_IOSF_MBI) += iosf_mbi.o | ||
114 | 107 | ||
115 | ### | 108 | ### |
116 | # 64 bit specific files | 109 | # 64 bit specific files |
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index d2b7f27781bc..e69182fd01cf 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c | |||
@@ -150,29 +150,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, | |||
150 | } | 150 | } |
151 | EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); | 151 | EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); |
152 | 152 | ||
153 | /* | ||
154 | * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, | ||
155 | * which can obviate IPI to trigger checking of need_resched. | ||
156 | * We execute MONITOR against need_resched and enter optimized wait state | ||
157 | * through MWAIT. Whenever someone changes need_resched, we would be woken | ||
158 | * up from MWAIT (without an IPI). | ||
159 | * | ||
160 | * New with Core Duo processors, MWAIT can take some hints based on CPU | ||
161 | * capability. | ||
162 | */ | ||
163 | void mwait_idle_with_hints(unsigned long ax, unsigned long cx) | ||
164 | { | ||
165 | if (!need_resched()) { | ||
166 | if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) | ||
167 | clflush((void *)¤t_thread_info()->flags); | ||
168 | |||
169 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | ||
170 | smp_mb(); | ||
171 | if (!need_resched()) | ||
172 | __mwait(ax, cx); | ||
173 | } | ||
174 | } | ||
175 | |||
176 | void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) | 153 | void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) |
177 | { | 154 | { |
178 | unsigned int cpu = smp_processor_id(); | 155 | unsigned int cpu = smp_processor_id(); |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index d278736bf774..7f26c9a70a9e 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -75,6 +75,13 @@ unsigned int max_physical_apicid; | |||
75 | physid_mask_t phys_cpu_present_map; | 75 | physid_mask_t phys_cpu_present_map; |
76 | 76 | ||
77 | /* | 77 | /* |
78 | * Processor to be disabled specified by kernel parameter | ||
79 | * disable_cpu_apicid=<int>, mostly used for the kdump 2nd kernel to | ||
80 | * avoid undefined behaviour caused by sending INIT from AP to BSP. | ||
81 | */ | ||
82 | static unsigned int disabled_cpu_apicid __read_mostly = BAD_APICID; | ||
83 | |||
84 | /* | ||
78 | * Map cpu index to physical APIC ID | 85 | * Map cpu index to physical APIC ID |
79 | */ | 86 | */ |
80 | DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID); | 87 | DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID); |
@@ -1968,7 +1975,7 @@ __visible void smp_trace_spurious_interrupt(struct pt_regs *regs) | |||
1968 | */ | 1975 | */ |
1969 | static inline void __smp_error_interrupt(struct pt_regs *regs) | 1976 | static inline void __smp_error_interrupt(struct pt_regs *regs) |
1970 | { | 1977 | { |
1971 | u32 v0, v1; | 1978 | u32 v; |
1972 | u32 i = 0; | 1979 | u32 i = 0; |
1973 | static const char * const error_interrupt_reason[] = { | 1980 | static const char * const error_interrupt_reason[] = { |
1974 | "Send CS error", /* APIC Error Bit 0 */ | 1981 | "Send CS error", /* APIC Error Bit 0 */ |
@@ -1982,21 +1989,20 @@ static inline void __smp_error_interrupt(struct pt_regs *regs) | |||
1982 | }; | 1989 | }; |
1983 | 1990 | ||
1984 | /* First tickle the hardware, only then report what went on. -- REW */ | 1991 | /* First tickle the hardware, only then report what went on. -- REW */ |
1985 | v0 = apic_read(APIC_ESR); | ||
1986 | apic_write(APIC_ESR, 0); | 1992 | apic_write(APIC_ESR, 0); |
1987 | v1 = apic_read(APIC_ESR); | 1993 | v = apic_read(APIC_ESR); |
1988 | ack_APIC_irq(); | 1994 | ack_APIC_irq(); |
1989 | atomic_inc(&irq_err_count); | 1995 | atomic_inc(&irq_err_count); |
1990 | 1996 | ||
1991 | apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)", | 1997 | apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x", |
1992 | smp_processor_id(), v0 , v1); | 1998 | smp_processor_id(), v); |
1993 | 1999 | ||
1994 | v1 = v1 & 0xff; | 2000 | v &= 0xff; |
1995 | while (v1) { | 2001 | while (v) { |
1996 | if (v1 & 0x1) | 2002 | if (v & 0x1) |
1997 | apic_printk(APIC_DEBUG, KERN_CONT " : %s", error_interrupt_reason[i]); | 2003 | apic_printk(APIC_DEBUG, KERN_CONT " : %s", error_interrupt_reason[i]); |
1998 | i++; | 2004 | i++; |
1999 | v1 >>= 1; | 2005 | v >>= 1; |
2000 | } | 2006 | } |
2001 | 2007 | ||
2002 | apic_printk(APIC_DEBUG, KERN_CONT "\n"); | 2008 | apic_printk(APIC_DEBUG, KERN_CONT "\n"); |
@@ -2115,6 +2121,39 @@ int generic_processor_info(int apicid, int version) | |||
2115 | phys_cpu_present_map); | 2121 | phys_cpu_present_map); |
2116 | 2122 | ||
2117 | /* | 2123 | /* |
2124 | * boot_cpu_physical_apicid is designed to have the apicid | ||
2125 | * returned by read_apic_id(), i.e, the apicid of the | ||
2126 | * currently booting-up processor. However, on some platforms, | ||
2127 | * it is temporarily modified by the apicid reported as BSP | ||
2128 | * through MP table. Concretely: | ||
2129 | * | ||
2130 | * - arch/x86/kernel/mpparse.c: MP_processor_info() | ||
2131 | * - arch/x86/mm/amdtopology.c: amd_numa_init() | ||
2132 | * - arch/x86/platform/visws/visws_quirks.c: MP_processor_info() | ||
2133 | * | ||
2134 | * This function is executed with the modified | ||
2135 | * boot_cpu_physical_apicid. So, disabled_cpu_apicid kernel | ||
2136 | * parameter doesn't work to disable APs on kdump 2nd kernel. | ||
2137 | * | ||
2138 | * Since fixing handling of boot_cpu_physical_apicid requires | ||
2139 | * another discussion and tests on each platform, we leave it | ||
2140 | * for now and here we use read_apic_id() directly in this | ||
2141 | * function, generic_processor_info(). | ||
2142 | */ | ||
2143 | if (disabled_cpu_apicid != BAD_APICID && | ||
2144 | disabled_cpu_apicid != read_apic_id() && | ||
2145 | disabled_cpu_apicid == apicid) { | ||
2146 | int thiscpu = num_processors + disabled_cpus; | ||
2147 | |||
2148 | pr_warning("APIC: Disabling requested cpu." | ||
2149 | " Processor %d/0x%x ignored.\n", | ||
2150 | thiscpu, apicid); | ||
2151 | |||
2152 | disabled_cpus++; | ||
2153 | return -ENODEV; | ||
2154 | } | ||
2155 | |||
2156 | /* | ||
2118 | * If boot cpu has not been detected yet, then only allow upto | 2157 | * If boot cpu has not been detected yet, then only allow upto |
2119 | * nr_cpu_ids - 1 processors and keep one slot free for boot cpu | 2158 | * nr_cpu_ids - 1 processors and keep one slot free for boot cpu |
2120 | */ | 2159 | */ |
@@ -2592,3 +2631,12 @@ static int __init lapic_insert_resource(void) | |||
2592 | * that is using request_resource | 2631 | * that is using request_resource |
2593 | */ | 2632 | */ |
2594 | late_initcall(lapic_insert_resource); | 2633 | late_initcall(lapic_insert_resource); |
2634 | |||
2635 | static int __init apic_set_disabled_cpu_apicid(char *arg) | ||
2636 | { | ||
2637 | if (!arg || !get_option(&arg, &disabled_cpu_apicid)) | ||
2638 | return -EINVAL; | ||
2639 | |||
2640 | return 0; | ||
2641 | } | ||
2642 | early_param("disable_cpu_apicid", apic_set_disabled_cpu_apicid); | ||
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index 00c77cf78e9e..5d5b9eb2b7a4 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/string.h> | 14 | #include <linux/string.h> |
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/ctype.h> | 16 | #include <linux/ctype.h> |
17 | #include <linux/init.h> | ||
18 | #include <linux/hardirq.h> | 17 | #include <linux/hardirq.h> |
19 | #include <linux/module.h> | 18 | #include <linux/module.h> |
20 | #include <asm/smp.h> | 19 | #include <asm/smp.h> |
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c index e145f28b4099..191ce75c0e54 100644 --- a/arch/x86/kernel/apic/apic_noop.c +++ b/arch/x86/kernel/apic/apic_noop.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/string.h> | 15 | #include <linux/string.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/ctype.h> | 17 | #include <linux/ctype.h> |
18 | #include <linux/init.h> | ||
19 | #include <linux/errno.h> | 18 | #include <linux/errno.h> |
20 | #include <asm/fixmap.h> | 19 | #include <asm/fixmap.h> |
21 | #include <asm/mpspec.h> | 20 | #include <asm/mpspec.h> |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index e63a5bd2a78f..a43f068ebec1 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -1142,9 +1142,10 @@ next: | |||
1142 | if (test_bit(vector, used_vectors)) | 1142 | if (test_bit(vector, used_vectors)) |
1143 | goto next; | 1143 | goto next; |
1144 | 1144 | ||
1145 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) | 1145 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) { |
1146 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) | 1146 | if (per_cpu(vector_irq, new_cpu)[vector] > VECTOR_UNDEFINED) |
1147 | goto next; | 1147 | goto next; |
1148 | } | ||
1148 | /* Found one! */ | 1149 | /* Found one! */ |
1149 | current_vector = vector; | 1150 | current_vector = vector; |
1150 | current_offset = offset; | 1151 | current_offset = offset; |
@@ -1183,7 +1184,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) | |||
1183 | 1184 | ||
1184 | vector = cfg->vector; | 1185 | vector = cfg->vector; |
1185 | for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) | 1186 | for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) |
1186 | per_cpu(vector_irq, cpu)[vector] = -1; | 1187 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; |
1187 | 1188 | ||
1188 | cfg->vector = 0; | 1189 | cfg->vector = 0; |
1189 | cpumask_clear(cfg->domain); | 1190 | cpumask_clear(cfg->domain); |
@@ -1191,11 +1192,10 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) | |||
1191 | if (likely(!cfg->move_in_progress)) | 1192 | if (likely(!cfg->move_in_progress)) |
1192 | return; | 1193 | return; |
1193 | for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { | 1194 | for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { |
1194 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; | 1195 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { |
1195 | vector++) { | ||
1196 | if (per_cpu(vector_irq, cpu)[vector] != irq) | 1196 | if (per_cpu(vector_irq, cpu)[vector] != irq) |
1197 | continue; | 1197 | continue; |
1198 | per_cpu(vector_irq, cpu)[vector] = -1; | 1198 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; |
1199 | break; | 1199 | break; |
1200 | } | 1200 | } |
1201 | } | 1201 | } |
@@ -1228,12 +1228,12 @@ void __setup_vector_irq(int cpu) | |||
1228 | /* Mark the free vectors */ | 1228 | /* Mark the free vectors */ |
1229 | for (vector = 0; vector < NR_VECTORS; ++vector) { | 1229 | for (vector = 0; vector < NR_VECTORS; ++vector) { |
1230 | irq = per_cpu(vector_irq, cpu)[vector]; | 1230 | irq = per_cpu(vector_irq, cpu)[vector]; |
1231 | if (irq < 0) | 1231 | if (irq <= VECTOR_UNDEFINED) |
1232 | continue; | 1232 | continue; |
1233 | 1233 | ||
1234 | cfg = irq_cfg(irq); | 1234 | cfg = irq_cfg(irq); |
1235 | if (!cpumask_test_cpu(cpu, cfg->domain)) | 1235 | if (!cpumask_test_cpu(cpu, cfg->domain)) |
1236 | per_cpu(vector_irq, cpu)[vector] = -1; | 1236 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; |
1237 | } | 1237 | } |
1238 | raw_spin_unlock(&vector_lock); | 1238 | raw_spin_unlock(&vector_lock); |
1239 | } | 1239 | } |
@@ -2202,13 +2202,13 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
2202 | 2202 | ||
2203 | me = smp_processor_id(); | 2203 | me = smp_processor_id(); |
2204 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { | 2204 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { |
2205 | unsigned int irq; | 2205 | int irq; |
2206 | unsigned int irr; | 2206 | unsigned int irr; |
2207 | struct irq_desc *desc; | 2207 | struct irq_desc *desc; |
2208 | struct irq_cfg *cfg; | 2208 | struct irq_cfg *cfg; |
2209 | irq = __this_cpu_read(vector_irq[vector]); | 2209 | irq = __this_cpu_read(vector_irq[vector]); |
2210 | 2210 | ||
2211 | if (irq == -1) | 2211 | if (irq <= VECTOR_UNDEFINED) |
2212 | continue; | 2212 | continue; |
2213 | 2213 | ||
2214 | desc = irq_to_desc(irq); | 2214 | desc = irq_to_desc(irq); |
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index 7434d8556d09..62071569bd50 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c | |||
@@ -1,6 +1,5 @@ | |||
1 | #include <linux/cpumask.h> | 1 | #include <linux/cpumask.h> |
2 | #include <linux/interrupt.h> | 2 | #include <linux/interrupt.h> |
3 | #include <linux/init.h> | ||
4 | 3 | ||
5 | #include <linux/mm.h> | 4 | #include <linux/mm.h> |
6 | #include <linux/delay.h> | 5 | #include <linux/delay.h> |
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index 77c95c0e1bf7..00146f9b0254 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #define pr_fmt(fmt) "summit: %s: " fmt, __func__ | 29 | #define pr_fmt(fmt) "summit: %s: " fmt, __func__ |
30 | 30 | ||
31 | #include <linux/mm.h> | 31 | #include <linux/mm.h> |
32 | #include <linux/init.h> | ||
33 | #include <asm/io.h> | 32 | #include <asm/io.h> |
34 | #include <asm/bios_ebda.h> | 33 | #include <asm/bios_ebda.h> |
35 | 34 | ||
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index 140e29db478d..cac85ee6913f 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c | |||
@@ -3,7 +3,6 @@ | |||
3 | #include <linux/string.h> | 3 | #include <linux/string.h> |
4 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
5 | #include <linux/ctype.h> | 5 | #include <linux/ctype.h> |
6 | #include <linux/init.h> | ||
7 | #include <linux/dmar.h> | 6 | #include <linux/dmar.h> |
8 | #include <linux/cpu.h> | 7 | #include <linux/cpu.h> |
9 | 8 | ||
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 562a76d433c8..de231e328cae 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c | |||
@@ -3,7 +3,6 @@ | |||
3 | #include <linux/string.h> | 3 | #include <linux/string.h> |
4 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
5 | #include <linux/ctype.h> | 5 | #include <linux/ctype.h> |
6 | #include <linux/init.h> | ||
7 | #include <linux/dmar.h> | 6 | #include <linux/dmar.h> |
8 | 7 | ||
9 | #include <asm/smp.h> | 8 | #include <asm/smp.h> |
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 47b56a7e99cb..7fd54f09b011 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -36,12 +36,13 @@ obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o | |||
36 | endif | 36 | endif |
37 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o | 37 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o |
38 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o | 38 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o |
39 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o | 39 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_rapl.o |
40 | endif | 40 | endif |
41 | 41 | ||
42 | 42 | ||
43 | obj-$(CONFIG_X86_MCE) += mcheck/ | 43 | obj-$(CONFIG_X86_MCE) += mcheck/ |
44 | obj-$(CONFIG_MTRR) += mtrr/ | 44 | obj-$(CONFIG_MTRR) += mtrr/ |
45 | obj-$(CONFIG_MICROCODE) += microcode/ | ||
45 | 46 | ||
46 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o perf_event_amd_ibs.o | 47 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o perf_event_amd_ibs.o |
47 | 48 | ||
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 96abccaada33..c67ffa686064 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -1,5 +1,4 @@ | |||
1 | #include <linux/export.h> | 1 | #include <linux/export.h> |
2 | #include <linux/init.h> | ||
3 | #include <linux/bitops.h> | 2 | #include <linux/bitops.h> |
4 | #include <linux/elf.h> | 3 | #include <linux/elf.h> |
5 | #include <linux/mm.h> | 4 | #include <linux/mm.h> |
@@ -487,7 +486,7 @@ static void early_init_amd(struct cpuinfo_x86 *c) | |||
487 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 486 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
488 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); | 487 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); |
489 | if (!check_tsc_unstable()) | 488 | if (!check_tsc_unstable()) |
490 | sched_clock_stable = 1; | 489 | set_sched_clock_stable(); |
491 | } | 490 | } |
492 | 491 | ||
493 | #ifdef CONFIG_X86_64 | 492 | #ifdef CONFIG_X86_64 |
@@ -797,14 +796,10 @@ static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) | |||
797 | } | 796 | } |
798 | 797 | ||
799 | /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ | 798 | /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ |
800 | if (!((eax >> 16) & mask)) { | 799 | if (!((eax >> 16) & mask)) |
801 | u32 a, b, c, d; | 800 | tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff; |
802 | 801 | else | |
803 | cpuid(0x80000005, &a, &b, &c, &d); | ||
804 | tlb_lld_2m[ENTRIES] = (a >> 16) & 0xff; | ||
805 | } else { | ||
806 | tlb_lld_2m[ENTRIES] = (eax >> 16) & mask; | 802 | tlb_lld_2m[ENTRIES] = (eax >> 16) & mask; |
807 | } | ||
808 | 803 | ||
809 | /* a 4M entry uses two 2M entries */ | 804 | /* a 4M entry uses two 2M entries */ |
810 | tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1; | 805 | tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1; |
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 8d5652dc99dd..8779edab684e 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c | |||
@@ -1,6 +1,5 @@ | |||
1 | #include <linux/bitops.h> | 1 | #include <linux/bitops.h> |
2 | #include <linux/kernel.h> | 2 | #include <linux/kernel.h> |
3 | #include <linux/init.h> | ||
4 | 3 | ||
5 | #include <asm/processor.h> | 4 | #include <asm/processor.h> |
6 | #include <asm/e820.h> | 5 | #include <asm/e820.h> |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 6abc172b8258..24b6fd10625a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -472,6 +472,7 @@ u16 __read_mostly tlb_lli_4m[NR_INFO]; | |||
472 | u16 __read_mostly tlb_lld_4k[NR_INFO]; | 472 | u16 __read_mostly tlb_lld_4k[NR_INFO]; |
473 | u16 __read_mostly tlb_lld_2m[NR_INFO]; | 473 | u16 __read_mostly tlb_lld_2m[NR_INFO]; |
474 | u16 __read_mostly tlb_lld_4m[NR_INFO]; | 474 | u16 __read_mostly tlb_lld_4m[NR_INFO]; |
475 | u16 __read_mostly tlb_lld_1g[NR_INFO]; | ||
475 | 476 | ||
476 | /* | 477 | /* |
477 | * tlb_flushall_shift shows the balance point in replacing cr3 write | 478 | * tlb_flushall_shift shows the balance point in replacing cr3 write |
@@ -486,13 +487,13 @@ void cpu_detect_tlb(struct cpuinfo_x86 *c) | |||
486 | if (this_cpu->c_detect_tlb) | 487 | if (this_cpu->c_detect_tlb) |
487 | this_cpu->c_detect_tlb(c); | 488 | this_cpu->c_detect_tlb(c); |
488 | 489 | ||
489 | printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n" \ | 490 | printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n" |
490 | "Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d\n" \ | 491 | "Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n" |
491 | "tlb_flushall_shift: %d\n", | 492 | "tlb_flushall_shift: %d\n", |
492 | tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], | 493 | tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], |
493 | tlb_lli_4m[ENTRIES], tlb_lld_4k[ENTRIES], | 494 | tlb_lli_4m[ENTRIES], tlb_lld_4k[ENTRIES], |
494 | tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES], | 495 | tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES], |
495 | tlb_flushall_shift); | 496 | tlb_lld_1g[ENTRIES], tlb_flushall_shift); |
496 | } | 497 | } |
497 | 498 | ||
498 | void detect_ht(struct cpuinfo_x86 *c) | 499 | void detect_ht(struct cpuinfo_x86 *c) |
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index d0969c75ab54..aaf152e79637 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c | |||
@@ -1,4 +1,3 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/bitops.h> | 1 | #include <linux/bitops.h> |
3 | #include <linux/delay.h> | 2 | #include <linux/delay.h> |
4 | #include <linux/pci.h> | 3 | #include <linux/pci.h> |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index d358a3928b8f..5cd9bfabd645 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -1,4 +1,3 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/kernel.h> | 1 | #include <linux/kernel.h> |
3 | 2 | ||
4 | #include <linux/string.h> | 3 | #include <linux/string.h> |
@@ -93,7 +92,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) | |||
93 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 92 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
94 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); | 93 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); |
95 | if (!check_tsc_unstable()) | 94 | if (!check_tsc_unstable()) |
96 | sched_clock_stable = 1; | 95 | set_sched_clock_stable(); |
97 | } | 96 | } |
98 | 97 | ||
99 | /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ | 98 | /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ |
@@ -506,6 +505,7 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) | |||
506 | #define TLB_DATA0_2M_4M 0x23 | 505 | #define TLB_DATA0_2M_4M 0x23 |
507 | 506 | ||
508 | #define STLB_4K 0x41 | 507 | #define STLB_4K 0x41 |
508 | #define STLB_4K_2M 0x42 | ||
509 | 509 | ||
510 | static const struct _tlb_table intel_tlb_table[] = { | 510 | static const struct _tlb_table intel_tlb_table[] = { |
511 | { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, | 511 | { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, |
@@ -526,13 +526,20 @@ static const struct _tlb_table intel_tlb_table[] = { | |||
526 | { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" }, | 526 | { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" }, |
527 | { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" }, | 527 | { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" }, |
528 | { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, | 528 | { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, |
529 | { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" }, | ||
530 | { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" }, | ||
531 | { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, | ||
529 | { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" }, | 532 | { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" }, |
530 | { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" }, | 533 | { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" }, |
531 | { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" }, | 534 | { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" }, |
532 | { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" }, | 535 | { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" }, |
533 | { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" }, | 536 | { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" }, |
537 | { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set ssociative" }, | ||
538 | { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set ssociative" }, | ||
534 | { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" }, | 539 | { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" }, |
535 | { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" }, | 540 | { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" }, |
541 | { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" }, | ||
542 | { 0xc2, TLB_DATA_2M_4M, 16, " DTLB 2 MByte/4MByte pages, 4-way associative" }, | ||
536 | { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" }, | 543 | { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" }, |
537 | { 0x00, 0, 0 } | 544 | { 0x00, 0, 0 } |
538 | }; | 545 | }; |
@@ -558,6 +565,20 @@ static void intel_tlb_lookup(const unsigned char desc) | |||
558 | if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) | 565 | if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) |
559 | tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; | 566 | tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; |
560 | break; | 567 | break; |
568 | case STLB_4K_2M: | ||
569 | if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) | ||
570 | tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; | ||
571 | if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) | ||
572 | tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; | ||
573 | if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) | ||
574 | tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; | ||
575 | if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries) | ||
576 | tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries; | ||
577 | if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) | ||
578 | tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; | ||
579 | if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) | ||
580 | tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; | ||
581 | break; | ||
561 | case TLB_INST_ALL: | 582 | case TLB_INST_ALL: |
562 | if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) | 583 | if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) |
563 | tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; | 584 | tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; |
@@ -603,6 +624,10 @@ static void intel_tlb_lookup(const unsigned char desc) | |||
603 | if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) | 624 | if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) |
604 | tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; | 625 | tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; |
605 | break; | 626 | break; |
627 | case TLB_DATA_1G: | ||
628 | if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries) | ||
629 | tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries; | ||
630 | break; | ||
606 | } | 631 | } |
607 | } | 632 | } |
608 | 633 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mcheck/mce-apei.c index de8b60a53f69..a1aef9533154 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-apei.c +++ b/arch/x86/kernel/cpu/mcheck/mce-apei.c | |||
@@ -33,22 +33,28 @@ | |||
33 | #include <linux/acpi.h> | 33 | #include <linux/acpi.h> |
34 | #include <linux/cper.h> | 34 | #include <linux/cper.h> |
35 | #include <acpi/apei.h> | 35 | #include <acpi/apei.h> |
36 | #include <acpi/ghes.h> | ||
36 | #include <asm/mce.h> | 37 | #include <asm/mce.h> |
37 | 38 | ||
38 | #include "mce-internal.h" | 39 | #include "mce-internal.h" |
39 | 40 | ||
40 | void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err) | 41 | void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) |
41 | { | 42 | { |
42 | struct mce m; | 43 | struct mce m; |
43 | 44 | ||
44 | /* Only corrected MC is reported */ | 45 | if (!(mem_err->validation_bits & CPER_MEM_VALID_PA)) |
45 | if (!corrected || !(mem_err->validation_bits & CPER_MEM_VALID_PA)) | ||
46 | return; | 46 | return; |
47 | 47 | ||
48 | mce_setup(&m); | 48 | mce_setup(&m); |
49 | m.bank = 1; | 49 | m.bank = 1; |
50 | /* Fake a memory read corrected error with unknown channel */ | 50 | /* Fake a memory read error with unknown channel */ |
51 | m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f; | 51 | m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f; |
52 | |||
53 | if (severity >= GHES_SEV_RECOVERABLE) | ||
54 | m.status |= MCI_STATUS_UC; | ||
55 | if (severity >= GHES_SEV_PANIC) | ||
56 | m.status |= MCI_STATUS_PCC; | ||
57 | |||
52 | m.addr = mem_err->physical_addr; | 58 | m.addr = mem_err->physical_addr; |
53 | mce_log(&m); | 59 | mce_log(&m); |
54 | mce_notify_irq(); | 60 | mce_notify_irq(); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index b3218cdee95f..4d5419b249da 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -1638,15 +1638,15 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) | |||
1638 | 1638 | ||
1639 | static void mce_start_timer(unsigned int cpu, struct timer_list *t) | 1639 | static void mce_start_timer(unsigned int cpu, struct timer_list *t) |
1640 | { | 1640 | { |
1641 | unsigned long iv = mce_adjust_timer(check_interval * HZ); | 1641 | unsigned long iv = check_interval * HZ; |
1642 | |||
1643 | __this_cpu_write(mce_next_interval, iv); | ||
1644 | 1642 | ||
1645 | if (mca_cfg.ignore_ce || !iv) | 1643 | if (mca_cfg.ignore_ce || !iv) |
1646 | return; | 1644 | return; |
1647 | 1645 | ||
1646 | per_cpu(mce_next_interval, cpu) = iv; | ||
1647 | |||
1648 | t->expires = round_jiffies(jiffies + iv); | 1648 | t->expires = round_jiffies(jiffies + iv); |
1649 | add_timer_on(t, smp_processor_id()); | 1649 | add_timer_on(t, cpu); |
1650 | } | 1650 | } |
1651 | 1651 | ||
1652 | static void __mcheck_cpu_init_timer(void) | 1652 | static void __mcheck_cpu_init_timer(void) |
@@ -2272,8 +2272,10 @@ static int mce_device_create(unsigned int cpu) | |||
2272 | dev->release = &mce_device_release; | 2272 | dev->release = &mce_device_release; |
2273 | 2273 | ||
2274 | err = device_register(dev); | 2274 | err = device_register(dev); |
2275 | if (err) | 2275 | if (err) { |
2276 | put_device(dev); | ||
2276 | return err; | 2277 | return err; |
2278 | } | ||
2277 | 2279 | ||
2278 | for (i = 0; mce_device_attrs[i]; i++) { | 2280 | for (i = 0; mce_device_attrs[i]; i++) { |
2279 | err = device_create_file(dev, mce_device_attrs[i]); | 2281 | err = device_create_file(dev, mce_device_attrs[i]); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 4cfe0458ca66..fb6156fee6f7 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c | |||
@@ -6,7 +6,6 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/gfp.h> | 8 | #include <linux/gfp.h> |
9 | #include <linux/init.h> | ||
10 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
11 | #include <linux/percpu.h> | 10 | #include <linux/percpu.h> |
12 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c index 1c044b1ccc59..a3042989398c 100644 --- a/arch/x86/kernel/cpu/mcheck/p5.c +++ b/arch/x86/kernel/cpu/mcheck/p5.c | |||
@@ -5,7 +5,6 @@ | |||
5 | #include <linux/interrupt.h> | 5 | #include <linux/interrupt.h> |
6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
7 | #include <linux/types.h> | 7 | #include <linux/types.h> |
8 | #include <linux/init.h> | ||
9 | #include <linux/smp.h> | 8 | #include <linux/smp.h> |
10 | 9 | ||
11 | #include <asm/processor.h> | 10 | #include <asm/processor.h> |
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c index e9a701aecaa1..7dc5564d0cdf 100644 --- a/arch/x86/kernel/cpu/mcheck/winchip.c +++ b/arch/x86/kernel/cpu/mcheck/winchip.c | |||
@@ -5,7 +5,6 @@ | |||
5 | #include <linux/interrupt.h> | 5 | #include <linux/interrupt.h> |
6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
7 | #include <linux/types.h> | 7 | #include <linux/types.h> |
8 | #include <linux/init.h> | ||
9 | 8 | ||
10 | #include <asm/processor.h> | 9 | #include <asm/processor.h> |
11 | #include <asm/mce.h> | 10 | #include <asm/mce.h> |
diff --git a/arch/x86/kernel/cpu/microcode/Makefile b/arch/x86/kernel/cpu/microcode/Makefile new file mode 100644 index 000000000000..285c85427c32 --- /dev/null +++ b/arch/x86/kernel/cpu/microcode/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | microcode-y := core.o | ||
2 | obj-$(CONFIG_MICROCODE) += microcode.o | ||
3 | microcode-$(CONFIG_MICROCODE_INTEL) += intel.o intel_lib.o | ||
4 | microcode-$(CONFIG_MICROCODE_AMD) += amd.o | ||
5 | obj-$(CONFIG_MICROCODE_EARLY) += core_early.o | ||
6 | obj-$(CONFIG_MICROCODE_INTEL_EARLY) += intel_early.o | ||
7 | obj-$(CONFIG_MICROCODE_AMD_EARLY) += amd_early.o | ||
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/cpu/microcode/amd.c index c3d4cc972eca..4a6ff747aaad 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c | |||
@@ -182,10 +182,10 @@ int __apply_microcode_amd(struct microcode_amd *mc_amd) | |||
182 | { | 182 | { |
183 | u32 rev, dummy; | 183 | u32 rev, dummy; |
184 | 184 | ||
185 | wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code); | 185 | native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code); |
186 | 186 | ||
187 | /* verify patch application was successful */ | 187 | /* verify patch application was successful */ |
188 | rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); | 188 | native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); |
189 | if (rev != mc_amd->hdr.patch_id) | 189 | if (rev != mc_amd->hdr.patch_id) |
190 | return -1; | 190 | return -1; |
191 | 191 | ||
@@ -332,6 +332,9 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover) | |||
332 | patch->patch_id = mc_hdr->patch_id; | 332 | patch->patch_id = mc_hdr->patch_id; |
333 | patch->equiv_cpu = proc_id; | 333 | patch->equiv_cpu = proc_id; |
334 | 334 | ||
335 | pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n", | ||
336 | __func__, patch->patch_id, proc_id); | ||
337 | |||
335 | /* ... and add to cache. */ | 338 | /* ... and add to cache. */ |
336 | update_cache(patch); | 339 | update_cache(patch); |
337 | 340 | ||
@@ -390,9 +393,9 @@ enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size) | |||
390 | if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) { | 393 | if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) { |
391 | struct ucode_patch *p = find_patch(smp_processor_id()); | 394 | struct ucode_patch *p = find_patch(smp_processor_id()); |
392 | if (p) { | 395 | if (p) { |
393 | memset(amd_bsp_mpb, 0, MPB_MAX_SIZE); | 396 | memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); |
394 | memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data), | 397 | memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), |
395 | MPB_MAX_SIZE)); | 398 | PATCH_MAX_SIZE)); |
396 | } | 399 | } |
397 | } | 400 | } |
398 | #endif | 401 | #endif |
diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c index 6073104ccaa3..8384c0fa206f 100644 --- a/arch/x86/kernel/microcode_amd_early.c +++ b/arch/x86/kernel/cpu/microcode/amd_early.c | |||
@@ -2,6 +2,7 @@ | |||
2 | * Copyright (C) 2013 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2013 Advanced Micro Devices, Inc. |
3 | * | 3 | * |
4 | * Author: Jacob Shin <jacob.shin@amd.com> | 4 | * Author: Jacob Shin <jacob.shin@amd.com> |
5 | * Fixes: Borislav Petkov <bp@suse.de> | ||
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
@@ -15,10 +16,18 @@ | |||
15 | #include <asm/setup.h> | 16 | #include <asm/setup.h> |
16 | #include <asm/microcode_amd.h> | 17 | #include <asm/microcode_amd.h> |
17 | 18 | ||
18 | static bool ucode_loaded; | 19 | /* |
20 | * This points to the current valid container of microcode patches which we will | ||
21 | * save from the initrd before jettisoning its contents. | ||
22 | */ | ||
23 | static u8 *container; | ||
24 | static size_t container_size; | ||
25 | |||
19 | static u32 ucode_new_rev; | 26 | static u32 ucode_new_rev; |
20 | static unsigned long ucode_offset; | 27 | u8 amd_ucode_patch[PATCH_MAX_SIZE]; |
21 | static size_t ucode_size; | 28 | static u16 this_equiv_id; |
29 | |||
30 | struct cpio_data ucode_cpio; | ||
22 | 31 | ||
23 | /* | 32 | /* |
24 | * Microcode patch container file is prepended to the initrd in cpio format. | 33 | * Microcode patch container file is prepended to the initrd in cpio format. |
@@ -32,9 +41,6 @@ static struct cpio_data __init find_ucode_in_initrd(void) | |||
32 | char *path; | 41 | char *path; |
33 | void *start; | 42 | void *start; |
34 | size_t size; | 43 | size_t size; |
35 | unsigned long *uoffset; | ||
36 | size_t *usize; | ||
37 | struct cpio_data cd; | ||
38 | 44 | ||
39 | #ifdef CONFIG_X86_32 | 45 | #ifdef CONFIG_X86_32 |
40 | struct boot_params *p; | 46 | struct boot_params *p; |
@@ -47,30 +53,50 @@ static struct cpio_data __init find_ucode_in_initrd(void) | |||
47 | path = (char *)__pa_nodebug(ucode_path); | 53 | path = (char *)__pa_nodebug(ucode_path); |
48 | start = (void *)p->hdr.ramdisk_image; | 54 | start = (void *)p->hdr.ramdisk_image; |
49 | size = p->hdr.ramdisk_size; | 55 | size = p->hdr.ramdisk_size; |
50 | uoffset = (unsigned long *)__pa_nodebug(&ucode_offset); | ||
51 | usize = (size_t *)__pa_nodebug(&ucode_size); | ||
52 | #else | 56 | #else |
53 | path = ucode_path; | 57 | path = ucode_path; |
54 | start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET); | 58 | start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET); |
55 | size = boot_params.hdr.ramdisk_size; | 59 | size = boot_params.hdr.ramdisk_size; |
56 | uoffset = &ucode_offset; | ||
57 | usize = &ucode_size; | ||
58 | #endif | 60 | #endif |
59 | 61 | ||
60 | cd = find_cpio_data(path, start, size, &offset); | 62 | return find_cpio_data(path, start, size, &offset); |
61 | if (!cd.data) | 63 | } |
62 | return cd; | ||
63 | 64 | ||
64 | if (*(u32 *)cd.data != UCODE_MAGIC) { | 65 | static size_t compute_container_size(u8 *data, u32 total_size) |
65 | cd.data = NULL; | 66 | { |
66 | cd.size = 0; | 67 | size_t size = 0; |
67 | return cd; | 68 | u32 *header = (u32 *)data; |
68 | } | ||
69 | 69 | ||
70 | *uoffset = (u8 *)cd.data - (u8 *)start; | 70 | if (header[0] != UCODE_MAGIC || |
71 | *usize = cd.size; | 71 | header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ |
72 | header[2] == 0) /* size */ | ||
73 | return size; | ||
72 | 74 | ||
73 | return cd; | 75 | size = header[2] + CONTAINER_HDR_SZ; |
76 | total_size -= size; | ||
77 | data += size; | ||
78 | |||
79 | while (total_size) { | ||
80 | u16 patch_size; | ||
81 | |||
82 | header = (u32 *)data; | ||
83 | |||
84 | if (header[0] != UCODE_UCODE_TYPE) | ||
85 | break; | ||
86 | |||
87 | /* | ||
88 | * Sanity-check patch size. | ||
89 | */ | ||
90 | patch_size = header[1]; | ||
91 | if (patch_size > PATCH_MAX_SIZE) | ||
92 | break; | ||
93 | |||
94 | size += patch_size + SECTION_HDR_SIZE; | ||
95 | data += patch_size + SECTION_HDR_SIZE; | ||
96 | total_size -= patch_size + SECTION_HDR_SIZE; | ||
97 | } | ||
98 | |||
99 | return size; | ||
74 | } | 100 | } |
75 | 101 | ||
76 | /* | 102 | /* |
@@ -85,23 +111,22 @@ static struct cpio_data __init find_ucode_in_initrd(void) | |||
85 | static void apply_ucode_in_initrd(void *ucode, size_t size) | 111 | static void apply_ucode_in_initrd(void *ucode, size_t size) |
86 | { | 112 | { |
87 | struct equiv_cpu_entry *eq; | 113 | struct equiv_cpu_entry *eq; |
114 | size_t *cont_sz; | ||
88 | u32 *header; | 115 | u32 *header; |
89 | u8 *data; | 116 | u8 *data, **cont; |
90 | u16 eq_id = 0; | 117 | u16 eq_id = 0; |
91 | int offset, left; | 118 | int offset, left; |
92 | u32 rev, eax; | 119 | u32 rev, eax, ebx, ecx, edx; |
93 | u32 *new_rev; | 120 | u32 *new_rev; |
94 | unsigned long *uoffset; | ||
95 | size_t *usize; | ||
96 | 121 | ||
97 | #ifdef CONFIG_X86_32 | 122 | #ifdef CONFIG_X86_32 |
98 | new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); | 123 | new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); |
99 | uoffset = (unsigned long *)__pa_nodebug(&ucode_offset); | 124 | cont_sz = (size_t *)__pa_nodebug(&container_size); |
100 | usize = (size_t *)__pa_nodebug(&ucode_size); | 125 | cont = (u8 **)__pa_nodebug(&container); |
101 | #else | 126 | #else |
102 | new_rev = &ucode_new_rev; | 127 | new_rev = &ucode_new_rev; |
103 | uoffset = &ucode_offset; | 128 | cont_sz = &container_size; |
104 | usize = &ucode_size; | 129 | cont = &container; |
105 | #endif | 130 | #endif |
106 | 131 | ||
107 | data = ucode; | 132 | data = ucode; |
@@ -109,23 +134,37 @@ static void apply_ucode_in_initrd(void *ucode, size_t size) | |||
109 | header = (u32 *)data; | 134 | header = (u32 *)data; |
110 | 135 | ||
111 | /* find equiv cpu table */ | 136 | /* find equiv cpu table */ |
112 | 137 | if (header[0] != UCODE_MAGIC || | |
113 | if (header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ | 138 | header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ |
114 | header[2] == 0) /* size */ | 139 | header[2] == 0) /* size */ |
115 | return; | 140 | return; |
116 | 141 | ||
117 | eax = cpuid_eax(0x00000001); | 142 | eax = 0x00000001; |
143 | ecx = 0; | ||
144 | native_cpuid(&eax, &ebx, &ecx, &edx); | ||
118 | 145 | ||
119 | while (left > 0) { | 146 | while (left > 0) { |
120 | eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ); | 147 | eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ); |
121 | 148 | ||
149 | *cont = data; | ||
150 | |||
151 | /* Advance past the container header */ | ||
122 | offset = header[2] + CONTAINER_HDR_SZ; | 152 | offset = header[2] + CONTAINER_HDR_SZ; |
123 | data += offset; | 153 | data += offset; |
124 | left -= offset; | 154 | left -= offset; |
125 | 155 | ||
126 | eq_id = find_equiv_id(eq, eax); | 156 | eq_id = find_equiv_id(eq, eax); |
127 | if (eq_id) | 157 | if (eq_id) { |
158 | this_equiv_id = eq_id; | ||
159 | *cont_sz = compute_container_size(*cont, left + offset); | ||
160 | |||
161 | /* | ||
162 | * truncate how much we need to iterate over in the | ||
163 | * ucode update loop below | ||
164 | */ | ||
165 | left = *cont_sz - offset; | ||
128 | break; | 166 | break; |
167 | } | ||
129 | 168 | ||
130 | /* | 169 | /* |
131 | * support multiple container files appended together. if this | 170 | * support multiple container files appended together. if this |
@@ -145,19 +184,18 @@ static void apply_ucode_in_initrd(void *ucode, size_t size) | |||
145 | 184 | ||
146 | /* mark where the next microcode container file starts */ | 185 | /* mark where the next microcode container file starts */ |
147 | offset = data - (u8 *)ucode; | 186 | offset = data - (u8 *)ucode; |
148 | *uoffset += offset; | ||
149 | *usize -= offset; | ||
150 | ucode = data; | 187 | ucode = data; |
151 | } | 188 | } |
152 | 189 | ||
153 | if (!eq_id) { | 190 | if (!eq_id) { |
154 | *usize = 0; | 191 | *cont = NULL; |
192 | *cont_sz = 0; | ||
155 | return; | 193 | return; |
156 | } | 194 | } |
157 | 195 | ||
158 | /* find ucode and update if needed */ | 196 | /* find ucode and update if needed */ |
159 | 197 | ||
160 | rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); | 198 | native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); |
161 | 199 | ||
162 | while (left > 0) { | 200 | while (left > 0) { |
163 | struct microcode_amd *mc; | 201 | struct microcode_amd *mc; |
@@ -168,73 +206,83 @@ static void apply_ucode_in_initrd(void *ucode, size_t size) | |||
168 | break; | 206 | break; |
169 | 207 | ||
170 | mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE); | 208 | mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE); |
171 | if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) | 209 | |
172 | if (__apply_microcode_amd(mc) == 0) { | 210 | if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) { |
211 | |||
212 | if (!__apply_microcode_amd(mc)) { | ||
173 | rev = mc->hdr.patch_id; | 213 | rev = mc->hdr.patch_id; |
174 | *new_rev = rev; | 214 | *new_rev = rev; |
215 | |||
216 | /* save ucode patch */ | ||
217 | memcpy(amd_ucode_patch, mc, | ||
218 | min_t(u32, header[1], PATCH_MAX_SIZE)); | ||
175 | } | 219 | } |
220 | } | ||
176 | 221 | ||
177 | offset = header[1] + SECTION_HDR_SIZE; | 222 | offset = header[1] + SECTION_HDR_SIZE; |
178 | data += offset; | 223 | data += offset; |
179 | left -= offset; | 224 | left -= offset; |
180 | } | 225 | } |
181 | |||
182 | /* mark where this microcode container file ends */ | ||
183 | offset = *usize - (data - (u8 *)ucode); | ||
184 | *usize -= offset; | ||
185 | |||
186 | if (!(*new_rev)) | ||
187 | *usize = 0; | ||
188 | } | 226 | } |
189 | 227 | ||
190 | void __init load_ucode_amd_bsp(void) | 228 | void __init load_ucode_amd_bsp(void) |
191 | { | 229 | { |
192 | struct cpio_data cd = find_ucode_in_initrd(); | 230 | struct cpio_data cp; |
193 | if (!cd.data) | 231 | void **data; |
232 | size_t *size; | ||
233 | |||
234 | #ifdef CONFIG_X86_32 | ||
235 | data = (void **)__pa_nodebug(&ucode_cpio.data); | ||
236 | size = (size_t *)__pa_nodebug(&ucode_cpio.size); | ||
237 | #else | ||
238 | data = &ucode_cpio.data; | ||
239 | size = &ucode_cpio.size; | ||
240 | #endif | ||
241 | |||
242 | cp = find_ucode_in_initrd(); | ||
243 | if (!cp.data) | ||
194 | return; | 244 | return; |
195 | 245 | ||
196 | apply_ucode_in_initrd(cd.data, cd.size); | 246 | *data = cp.data; |
247 | *size = cp.size; | ||
248 | |||
249 | apply_ucode_in_initrd(cp.data, cp.size); | ||
197 | } | 250 | } |
198 | 251 | ||
199 | #ifdef CONFIG_X86_32 | 252 | #ifdef CONFIG_X86_32 |
200 | u8 amd_bsp_mpb[MPB_MAX_SIZE]; | ||
201 | |||
202 | /* | 253 | /* |
203 | * On 32-bit, since AP's early load occurs before paging is turned on, we | 254 | * On 32-bit, since AP's early load occurs before paging is turned on, we |
204 | * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during | 255 | * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during |
205 | * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During | 256 | * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During |
206 | * save_microcode_in_initrd_amd() BSP's patch is copied to amd_bsp_mpb, which | 257 | * save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch, |
207 | * is used upon resume from suspend. | 258 | * which is used upon resume from suspend. |
208 | */ | 259 | */ |
209 | void load_ucode_amd_ap(void) | 260 | void load_ucode_amd_ap(void) |
210 | { | 261 | { |
211 | struct microcode_amd *mc; | 262 | struct microcode_amd *mc; |
212 | unsigned long *initrd; | ||
213 | unsigned long *uoffset; | ||
214 | size_t *usize; | 263 | size_t *usize; |
215 | void *ucode; | 264 | void **ucode; |
216 | 265 | ||
217 | mc = (struct microcode_amd *)__pa(amd_bsp_mpb); | 266 | mc = (struct microcode_amd *)__pa(amd_ucode_patch); |
218 | if (mc->hdr.patch_id && mc->hdr.processor_rev_id) { | 267 | if (mc->hdr.patch_id && mc->hdr.processor_rev_id) { |
219 | __apply_microcode_amd(mc); | 268 | __apply_microcode_amd(mc); |
220 | return; | 269 | return; |
221 | } | 270 | } |
222 | 271 | ||
223 | initrd = (unsigned long *)__pa(&initrd_start); | 272 | ucode = (void *)__pa_nodebug(&container); |
224 | uoffset = (unsigned long *)__pa(&ucode_offset); | 273 | usize = (size_t *)__pa_nodebug(&container_size); |
225 | usize = (size_t *)__pa(&ucode_size); | ||
226 | 274 | ||
227 | if (!*usize || !*initrd) | 275 | if (!*ucode || !*usize) |
228 | return; | 276 | return; |
229 | 277 | ||
230 | ucode = (void *)((unsigned long)__pa(*initrd) + *uoffset); | 278 | apply_ucode_in_initrd(*ucode, *usize); |
231 | apply_ucode_in_initrd(ucode, *usize); | ||
232 | } | 279 | } |
233 | 280 | ||
234 | static void __init collect_cpu_sig_on_bsp(void *arg) | 281 | static void __init collect_cpu_sig_on_bsp(void *arg) |
235 | { | 282 | { |
236 | unsigned int cpu = smp_processor_id(); | 283 | unsigned int cpu = smp_processor_id(); |
237 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 284 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
285 | |||
238 | uci->cpu_sig.sig = cpuid_eax(0x00000001); | 286 | uci->cpu_sig.sig = cpuid_eax(0x00000001); |
239 | } | 287 | } |
240 | #else | 288 | #else |
@@ -242,36 +290,54 @@ void load_ucode_amd_ap(void) | |||
242 | { | 290 | { |
243 | unsigned int cpu = smp_processor_id(); | 291 | unsigned int cpu = smp_processor_id(); |
244 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 292 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
293 | struct equiv_cpu_entry *eq; | ||
294 | struct microcode_amd *mc; | ||
245 | u32 rev, eax; | 295 | u32 rev, eax; |
296 | u16 eq_id; | ||
297 | |||
298 | /* Exit if called on the BSP. */ | ||
299 | if (!cpu) | ||
300 | return; | ||
301 | |||
302 | if (!container) | ||
303 | return; | ||
246 | 304 | ||
247 | rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); | 305 | rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); |
248 | eax = cpuid_eax(0x00000001); | ||
249 | 306 | ||
250 | uci->cpu_sig.rev = rev; | 307 | uci->cpu_sig.rev = rev; |
251 | uci->cpu_sig.sig = eax; | 308 | uci->cpu_sig.sig = eax; |
252 | 309 | ||
253 | if (cpu && !ucode_loaded) { | 310 | eax = cpuid_eax(0x00000001); |
254 | void *ucode; | 311 | eq = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ); |
255 | 312 | ||
256 | if (!ucode_size || !initrd_start) | 313 | eq_id = find_equiv_id(eq, eax); |
257 | return; | 314 | if (!eq_id) |
315 | return; | ||
316 | |||
317 | if (eq_id == this_equiv_id) { | ||
318 | mc = (struct microcode_amd *)amd_ucode_patch; | ||
258 | 319 | ||
259 | ucode = (void *)(initrd_start + ucode_offset); | 320 | if (mc && rev < mc->hdr.patch_id) { |
260 | eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); | 321 | if (!__apply_microcode_amd(mc)) |
261 | if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK) | 322 | ucode_new_rev = mc->hdr.patch_id; |
323 | } | ||
324 | |||
325 | } else { | ||
326 | if (!ucode_cpio.data) | ||
262 | return; | 327 | return; |
263 | 328 | ||
264 | ucode_loaded = true; | 329 | /* |
330 | * AP has a different equivalence ID than BSP, looks like | ||
331 | * mixed-steppings silicon so go through the ucode blob anew. | ||
332 | */ | ||
333 | apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size); | ||
265 | } | 334 | } |
266 | |||
267 | apply_microcode_amd(cpu); | ||
268 | } | 335 | } |
269 | #endif | 336 | #endif |
270 | 337 | ||
271 | int __init save_microcode_in_initrd_amd(void) | 338 | int __init save_microcode_in_initrd_amd(void) |
272 | { | 339 | { |
273 | enum ucode_state ret; | 340 | enum ucode_state ret; |
274 | void *ucode; | ||
275 | u32 eax; | 341 | u32 eax; |
276 | 342 | ||
277 | #ifdef CONFIG_X86_32 | 343 | #ifdef CONFIG_X86_32 |
@@ -280,22 +346,35 @@ int __init save_microcode_in_initrd_amd(void) | |||
280 | 346 | ||
281 | if (!uci->cpu_sig.sig) | 347 | if (!uci->cpu_sig.sig) |
282 | smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1); | 348 | smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1); |
349 | |||
350 | /* | ||
351 | * Take into account the fact that the ramdisk might get relocated | ||
352 | * and therefore we need to recompute the container's position in | ||
353 | * virtual memory space. | ||
354 | */ | ||
355 | container = (u8 *)(__va((u32)relocated_ramdisk) + | ||
356 | ((u32)container - boot_params.hdr.ramdisk_image)); | ||
283 | #endif | 357 | #endif |
284 | if (ucode_new_rev) | 358 | if (ucode_new_rev) |
285 | pr_info("microcode: updated early to new patch_level=0x%08x\n", | 359 | pr_info("microcode: updated early to new patch_level=0x%08x\n", |
286 | ucode_new_rev); | 360 | ucode_new_rev); |
287 | 361 | ||
288 | if (ucode_loaded || !ucode_size || !initrd_start) | 362 | if (!container) |
289 | return 0; | 363 | return -EINVAL; |
290 | 364 | ||
291 | ucode = (void *)(initrd_start + ucode_offset); | ||
292 | eax = cpuid_eax(0x00000001); | 365 | eax = cpuid_eax(0x00000001); |
293 | eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); | 366 | eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); |
294 | 367 | ||
295 | ret = load_microcode_amd(eax, ucode, ucode_size); | 368 | ret = load_microcode_amd(eax, container, container_size); |
296 | if (ret != UCODE_OK) | 369 | if (ret != UCODE_OK) |
297 | return -EINVAL; | 370 | return -EINVAL; |
298 | 371 | ||
299 | ucode_loaded = true; | 372 | /* |
373 | * This will be freed any msec now, stash patches for the current | ||
374 | * family and switch to patch cache for cpu hotplug, etc later. | ||
375 | */ | ||
376 | container = NULL; | ||
377 | container_size = 0; | ||
378 | |||
300 | return 0; | 379 | return 0; |
301 | } | 380 | } |
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/cpu/microcode/core.c index 15c987698b0f..15c987698b0f 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/cpu/microcode/core.c | |||
diff --git a/arch/x86/kernel/microcode_core_early.c b/arch/x86/kernel/cpu/microcode/core_early.c index be7f8514f577..be7f8514f577 100644 --- a/arch/x86/kernel/microcode_core_early.c +++ b/arch/x86/kernel/cpu/microcode/core_early.c | |||
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 5fb2cebf556b..5fb2cebf556b 100644 --- a/arch/x86/kernel/microcode_intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c | |||
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c index 1575deb2e636..18f739129e72 100644 --- a/arch/x86/kernel/microcode_intel_early.c +++ b/arch/x86/kernel/cpu/microcode/intel_early.c | |||
@@ -365,16 +365,6 @@ out: | |||
365 | return state; | 365 | return state; |
366 | } | 366 | } |
367 | 367 | ||
368 | #define native_rdmsr(msr, val1, val2) \ | ||
369 | do { \ | ||
370 | u64 __val = native_read_msr((msr)); \ | ||
371 | (void)((val1) = (u32)__val); \ | ||
372 | (void)((val2) = (u32)(__val >> 32)); \ | ||
373 | } while (0) | ||
374 | |||
375 | #define native_wrmsr(msr, low, high) \ | ||
376 | native_write_msr(msr, low, high); | ||
377 | |||
378 | static int collect_cpu_info_early(struct ucode_cpu_info *uci) | 368 | static int collect_cpu_info_early(struct ucode_cpu_info *uci) |
379 | { | 369 | { |
380 | unsigned int val[2]; | 370 | unsigned int val[2]; |
diff --git a/arch/x86/kernel/microcode_intel_lib.c b/arch/x86/kernel/cpu/microcode/intel_lib.c index ce69320d0179..ce69320d0179 100644 --- a/arch/x86/kernel/microcode_intel_lib.c +++ b/arch/x86/kernel/cpu/microcode/intel_lib.c | |||
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 8e132931614d..b88645191fe5 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -1883,21 +1883,27 @@ static struct pmu pmu = { | |||
1883 | 1883 | ||
1884 | void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) | 1884 | void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) |
1885 | { | 1885 | { |
1886 | struct cyc2ns_data *data; | ||
1887 | |||
1886 | userpg->cap_user_time = 0; | 1888 | userpg->cap_user_time = 0; |
1887 | userpg->cap_user_time_zero = 0; | 1889 | userpg->cap_user_time_zero = 0; |
1888 | userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc; | 1890 | userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc; |
1889 | userpg->pmc_width = x86_pmu.cntval_bits; | 1891 | userpg->pmc_width = x86_pmu.cntval_bits; |
1890 | 1892 | ||
1891 | if (!sched_clock_stable) | 1893 | if (!sched_clock_stable()) |
1892 | return; | 1894 | return; |
1893 | 1895 | ||
1896 | data = cyc2ns_read_begin(); | ||
1897 | |||
1894 | userpg->cap_user_time = 1; | 1898 | userpg->cap_user_time = 1; |
1895 | userpg->time_mult = this_cpu_read(cyc2ns); | 1899 | userpg->time_mult = data->cyc2ns_mul; |
1896 | userpg->time_shift = CYC2NS_SCALE_FACTOR; | 1900 | userpg->time_shift = data->cyc2ns_shift; |
1897 | userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; | 1901 | userpg->time_offset = data->cyc2ns_offset - now; |
1898 | 1902 | ||
1899 | userpg->cap_user_time_zero = 1; | 1903 | userpg->cap_user_time_zero = 1; |
1900 | userpg->time_zero = this_cpu_read(cyc2ns_offset); | 1904 | userpg->time_zero = data->cyc2ns_offset; |
1905 | |||
1906 | cyc2ns_read_end(data); | ||
1901 | } | 1907 | } |
1902 | 1908 | ||
1903 | /* | 1909 | /* |
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c index e09f0bfb7b8f..4b8e4d3cd6ea 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c +++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | #include <linux/ptrace.h> | 12 | #include <linux/ptrace.h> |
13 | #include <linux/syscore_ops.h> | ||
13 | 14 | ||
14 | #include <asm/apic.h> | 15 | #include <asm/apic.h> |
15 | 16 | ||
@@ -816,6 +817,18 @@ out: | |||
816 | return ret; | 817 | return ret; |
817 | } | 818 | } |
818 | 819 | ||
820 | static void ibs_eilvt_setup(void) | ||
821 | { | ||
822 | /* | ||
823 | * Force LVT offset assignment for family 10h: The offsets are | ||
824 | * not assigned by the BIOS for this family, so the OS is | ||
825 | * responsible for doing it. If the OS assignment fails, fall | ||
826 | * back to BIOS settings and try to setup this. | ||
827 | */ | ||
828 | if (boot_cpu_data.x86 == 0x10) | ||
829 | force_ibs_eilvt_setup(); | ||
830 | } | ||
831 | |||
819 | static inline int get_ibs_lvt_offset(void) | 832 | static inline int get_ibs_lvt_offset(void) |
820 | { | 833 | { |
821 | u64 val; | 834 | u64 val; |
@@ -851,6 +864,36 @@ static void clear_APIC_ibs(void *dummy) | |||
851 | setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); | 864 | setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); |
852 | } | 865 | } |
853 | 866 | ||
867 | #ifdef CONFIG_PM | ||
868 | |||
869 | static int perf_ibs_suspend(void) | ||
870 | { | ||
871 | clear_APIC_ibs(NULL); | ||
872 | return 0; | ||
873 | } | ||
874 | |||
875 | static void perf_ibs_resume(void) | ||
876 | { | ||
877 | ibs_eilvt_setup(); | ||
878 | setup_APIC_ibs(NULL); | ||
879 | } | ||
880 | |||
881 | static struct syscore_ops perf_ibs_syscore_ops = { | ||
882 | .resume = perf_ibs_resume, | ||
883 | .suspend = perf_ibs_suspend, | ||
884 | }; | ||
885 | |||
886 | static void perf_ibs_pm_init(void) | ||
887 | { | ||
888 | register_syscore_ops(&perf_ibs_syscore_ops); | ||
889 | } | ||
890 | |||
891 | #else | ||
892 | |||
893 | static inline void perf_ibs_pm_init(void) { } | ||
894 | |||
895 | #endif | ||
896 | |||
854 | static int | 897 | static int |
855 | perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | 898 | perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) |
856 | { | 899 | { |
@@ -877,18 +920,12 @@ static __init int amd_ibs_init(void) | |||
877 | if (!caps) | 920 | if (!caps) |
878 | return -ENODEV; /* ibs not supported by the cpu */ | 921 | return -ENODEV; /* ibs not supported by the cpu */ |
879 | 922 | ||
880 | /* | 923 | ibs_eilvt_setup(); |
881 | * Force LVT offset assignment for family 10h: The offsets are | ||
882 | * not assigned by the BIOS for this family, so the OS is | ||
883 | * responsible for doing it. If the OS assignment fails, fall | ||
884 | * back to BIOS settings and try to setup this. | ||
885 | */ | ||
886 | if (boot_cpu_data.x86 == 0x10) | ||
887 | force_ibs_eilvt_setup(); | ||
888 | 924 | ||
889 | if (!ibs_eilvt_valid()) | 925 | if (!ibs_eilvt_valid()) |
890 | goto out; | 926 | goto out; |
891 | 927 | ||
928 | perf_ibs_pm_init(); | ||
892 | get_online_cpus(); | 929 | get_online_cpus(); |
893 | ibs_caps = caps; | 930 | ibs_caps = caps; |
894 | /* make ibs_caps visible to other cpus: */ | 931 | /* make ibs_caps visible to other cpus: */ |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c new file mode 100644 index 000000000000..5ad35ad94d0f --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c | |||
@@ -0,0 +1,679 @@ | |||
1 | /* | ||
2 | * perf_event_intel_rapl.c: support Intel RAPL energy consumption counters | ||
3 | * Copyright (C) 2013 Google, Inc., Stephane Eranian | ||
4 | * | ||
5 | * Intel RAPL interface is specified in the IA-32 Manual Vol3b | ||
6 | * section 14.7.1 (September 2013) | ||
7 | * | ||
8 | * RAPL provides more controls than just reporting energy consumption | ||
9 | * however here we only expose the 3 energy consumption free running | ||
10 | * counters (pp0, pkg, dram). | ||
11 | * | ||
12 | * Each of those counters increments in a power unit defined by the | ||
13 | * RAPL_POWER_UNIT MSR. On SandyBridge, this unit is 1/(2^16) Joules | ||
14 | * but it can vary. | ||
15 | * | ||
16 | * Counter to rapl events mappings: | ||
17 | * | ||
18 | * pp0 counter: consumption of all physical cores (power plane 0) | ||
19 | * event: rapl_energy_cores | ||
20 | * perf code: 0x1 | ||
21 | * | ||
22 | * pkg counter: consumption of the whole processor package | ||
23 | * event: rapl_energy_pkg | ||
24 | * perf code: 0x2 | ||
25 | * | ||
26 | * dram counter: consumption of the dram domain (servers only) | ||
27 | * event: rapl_energy_dram | ||
28 | * perf code: 0x3 | ||
29 | * | ||
30 | * dram counter: consumption of the builtin-gpu domain (client only) | ||
31 | * event: rapl_energy_gpu | ||
32 | * perf code: 0x4 | ||
33 | * | ||
34 | * We manage those counters as free running (read-only). They may be | ||
35 | * use simultaneously by other tools, such as turbostat. | ||
36 | * | ||
37 | * The events only support system-wide mode counting. There is no | ||
38 | * sampling support because it does not make sense and is not | ||
39 | * supported by the RAPL hardware. | ||
40 | * | ||
41 | * Because we want to avoid floating-point operations in the kernel, | ||
42 | * the events are all reported in fixed point arithmetic (32.32). | ||
43 | * Tools must adjust the counts to convert them to Watts using | ||
44 | * the duration of the measurement. Tools may use a function such as | ||
45 | * ldexp(raw_count, -32); | ||
46 | */ | ||
47 | #include <linux/module.h> | ||
48 | #include <linux/slab.h> | ||
49 | #include <linux/perf_event.h> | ||
50 | #include <asm/cpu_device_id.h> | ||
51 | #include "perf_event.h" | ||
52 | |||
53 | /* | ||
54 | * RAPL energy status counters | ||
55 | */ | ||
56 | #define RAPL_IDX_PP0_NRG_STAT 0 /* all cores */ | ||
57 | #define INTEL_RAPL_PP0 0x1 /* pseudo-encoding */ | ||
58 | #define RAPL_IDX_PKG_NRG_STAT 1 /* entire package */ | ||
59 | #define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */ | ||
60 | #define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */ | ||
61 | #define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */ | ||
62 | #define RAPL_IDX_PP1_NRG_STAT 3 /* DRAM */ | ||
63 | #define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */ | ||
64 | |||
65 | /* Clients have PP0, PKG */ | ||
66 | #define RAPL_IDX_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\ | ||
67 | 1<<RAPL_IDX_PKG_NRG_STAT|\ | ||
68 | 1<<RAPL_IDX_PP1_NRG_STAT) | ||
69 | |||
70 | /* Servers have PP0, PKG, RAM */ | ||
71 | #define RAPL_IDX_SRV (1<<RAPL_IDX_PP0_NRG_STAT|\ | ||
72 | 1<<RAPL_IDX_PKG_NRG_STAT|\ | ||
73 | 1<<RAPL_IDX_RAM_NRG_STAT) | ||
74 | |||
75 | /* | ||
76 | * event code: LSB 8 bits, passed in attr->config | ||
77 | * any other bit is reserved | ||
78 | */ | ||
79 | #define RAPL_EVENT_MASK 0xFFULL | ||
80 | |||
81 | #define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \ | ||
82 | static ssize_t __rapl_##_var##_show(struct kobject *kobj, \ | ||
83 | struct kobj_attribute *attr, \ | ||
84 | char *page) \ | ||
85 | { \ | ||
86 | BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ | ||
87 | return sprintf(page, _format "\n"); \ | ||
88 | } \ | ||
89 | static struct kobj_attribute format_attr_##_var = \ | ||
90 | __ATTR(_name, 0444, __rapl_##_var##_show, NULL) | ||
91 | |||
92 | #define RAPL_EVENT_DESC(_name, _config) \ | ||
93 | { \ | ||
94 | .attr = __ATTR(_name, 0444, rapl_event_show, NULL), \ | ||
95 | .config = _config, \ | ||
96 | } | ||
97 | |||
98 | #define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */ | ||
99 | |||
100 | struct rapl_pmu { | ||
101 | spinlock_t lock; | ||
102 | int hw_unit; /* 1/2^hw_unit Joule */ | ||
103 | int n_active; /* number of active events */ | ||
104 | struct list_head active_list; | ||
105 | struct pmu *pmu; /* pointer to rapl_pmu_class */ | ||
106 | ktime_t timer_interval; /* in ktime_t unit */ | ||
107 | struct hrtimer hrtimer; | ||
108 | }; | ||
109 | |||
110 | static struct pmu rapl_pmu_class; | ||
111 | static cpumask_t rapl_cpu_mask; | ||
112 | static int rapl_cntr_mask; | ||
113 | |||
114 | static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu); | ||
115 | static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu_to_free); | ||
116 | |||
117 | static inline u64 rapl_read_counter(struct perf_event *event) | ||
118 | { | ||
119 | u64 raw; | ||
120 | rdmsrl(event->hw.event_base, raw); | ||
121 | return raw; | ||
122 | } | ||
123 | |||
124 | static inline u64 rapl_scale(u64 v) | ||
125 | { | ||
126 | /* | ||
127 | * scale delta to smallest unit (1/2^32) | ||
128 | * users must then scale back: count * 1/(1e9*2^32) to get Joules | ||
129 | * or use ldexp(count, -32). | ||
130 | * Watts = Joules/Time delta | ||
131 | */ | ||
132 | return v << (32 - __get_cpu_var(rapl_pmu)->hw_unit); | ||
133 | } | ||
134 | |||
135 | static u64 rapl_event_update(struct perf_event *event) | ||
136 | { | ||
137 | struct hw_perf_event *hwc = &event->hw; | ||
138 | u64 prev_raw_count, new_raw_count; | ||
139 | s64 delta, sdelta; | ||
140 | int shift = RAPL_CNTR_WIDTH; | ||
141 | |||
142 | again: | ||
143 | prev_raw_count = local64_read(&hwc->prev_count); | ||
144 | rdmsrl(event->hw.event_base, new_raw_count); | ||
145 | |||
146 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | ||
147 | new_raw_count) != prev_raw_count) { | ||
148 | cpu_relax(); | ||
149 | goto again; | ||
150 | } | ||
151 | |||
152 | /* | ||
153 | * Now we have the new raw value and have updated the prev | ||
154 | * timestamp already. We can now calculate the elapsed delta | ||
155 | * (event-)time and add that to the generic event. | ||
156 | * | ||
157 | * Careful, not all hw sign-extends above the physical width | ||
158 | * of the count. | ||
159 | */ | ||
160 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | ||
161 | delta >>= shift; | ||
162 | |||
163 | sdelta = rapl_scale(delta); | ||
164 | |||
165 | local64_add(sdelta, &event->count); | ||
166 | |||
167 | return new_raw_count; | ||
168 | } | ||
169 | |||
170 | static void rapl_start_hrtimer(struct rapl_pmu *pmu) | ||
171 | { | ||
172 | __hrtimer_start_range_ns(&pmu->hrtimer, | ||
173 | pmu->timer_interval, 0, | ||
174 | HRTIMER_MODE_REL_PINNED, 0); | ||
175 | } | ||
176 | |||
177 | static void rapl_stop_hrtimer(struct rapl_pmu *pmu) | ||
178 | { | ||
179 | hrtimer_cancel(&pmu->hrtimer); | ||
180 | } | ||
181 | |||
182 | static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) | ||
183 | { | ||
184 | struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); | ||
185 | struct perf_event *event; | ||
186 | unsigned long flags; | ||
187 | |||
188 | if (!pmu->n_active) | ||
189 | return HRTIMER_NORESTART; | ||
190 | |||
191 | spin_lock_irqsave(&pmu->lock, flags); | ||
192 | |||
193 | list_for_each_entry(event, &pmu->active_list, active_entry) { | ||
194 | rapl_event_update(event); | ||
195 | } | ||
196 | |||
197 | spin_unlock_irqrestore(&pmu->lock, flags); | ||
198 | |||
199 | hrtimer_forward_now(hrtimer, pmu->timer_interval); | ||
200 | |||
201 | return HRTIMER_RESTART; | ||
202 | } | ||
203 | |||
204 | static void rapl_hrtimer_init(struct rapl_pmu *pmu) | ||
205 | { | ||
206 | struct hrtimer *hr = &pmu->hrtimer; | ||
207 | |||
208 | hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
209 | hr->function = rapl_hrtimer_handle; | ||
210 | } | ||
211 | |||
212 | static void __rapl_pmu_event_start(struct rapl_pmu *pmu, | ||
213 | struct perf_event *event) | ||
214 | { | ||
215 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) | ||
216 | return; | ||
217 | |||
218 | event->hw.state = 0; | ||
219 | |||
220 | list_add_tail(&event->active_entry, &pmu->active_list); | ||
221 | |||
222 | local64_set(&event->hw.prev_count, rapl_read_counter(event)); | ||
223 | |||
224 | pmu->n_active++; | ||
225 | if (pmu->n_active == 1) | ||
226 | rapl_start_hrtimer(pmu); | ||
227 | } | ||
228 | |||
229 | static void rapl_pmu_event_start(struct perf_event *event, int mode) | ||
230 | { | ||
231 | struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); | ||
232 | unsigned long flags; | ||
233 | |||
234 | spin_lock_irqsave(&pmu->lock, flags); | ||
235 | __rapl_pmu_event_start(pmu, event); | ||
236 | spin_unlock_irqrestore(&pmu->lock, flags); | ||
237 | } | ||
238 | |||
239 | static void rapl_pmu_event_stop(struct perf_event *event, int mode) | ||
240 | { | ||
241 | struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); | ||
242 | struct hw_perf_event *hwc = &event->hw; | ||
243 | unsigned long flags; | ||
244 | |||
245 | spin_lock_irqsave(&pmu->lock, flags); | ||
246 | |||
247 | /* mark event as deactivated and stopped */ | ||
248 | if (!(hwc->state & PERF_HES_STOPPED)) { | ||
249 | WARN_ON_ONCE(pmu->n_active <= 0); | ||
250 | pmu->n_active--; | ||
251 | if (pmu->n_active == 0) | ||
252 | rapl_stop_hrtimer(pmu); | ||
253 | |||
254 | list_del(&event->active_entry); | ||
255 | |||
256 | WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); | ||
257 | hwc->state |= PERF_HES_STOPPED; | ||
258 | } | ||
259 | |||
260 | /* check if update of sw counter is necessary */ | ||
261 | if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { | ||
262 | /* | ||
263 | * Drain the remaining delta count out of a event | ||
264 | * that we are disabling: | ||
265 | */ | ||
266 | rapl_event_update(event); | ||
267 | hwc->state |= PERF_HES_UPTODATE; | ||
268 | } | ||
269 | |||
270 | spin_unlock_irqrestore(&pmu->lock, flags); | ||
271 | } | ||
272 | |||
273 | static int rapl_pmu_event_add(struct perf_event *event, int mode) | ||
274 | { | ||
275 | struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu); | ||
276 | struct hw_perf_event *hwc = &event->hw; | ||
277 | unsigned long flags; | ||
278 | |||
279 | spin_lock_irqsave(&pmu->lock, flags); | ||
280 | |||
281 | hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; | ||
282 | |||
283 | if (mode & PERF_EF_START) | ||
284 | __rapl_pmu_event_start(pmu, event); | ||
285 | |||
286 | spin_unlock_irqrestore(&pmu->lock, flags); | ||
287 | |||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | static void rapl_pmu_event_del(struct perf_event *event, int flags) | ||
292 | { | ||
293 | rapl_pmu_event_stop(event, PERF_EF_UPDATE); | ||
294 | } | ||
295 | |||
296 | static int rapl_pmu_event_init(struct perf_event *event) | ||
297 | { | ||
298 | u64 cfg = event->attr.config & RAPL_EVENT_MASK; | ||
299 | int bit, msr, ret = 0; | ||
300 | |||
301 | /* only look at RAPL events */ | ||
302 | if (event->attr.type != rapl_pmu_class.type) | ||
303 | return -ENOENT; | ||
304 | |||
305 | /* check only supported bits are set */ | ||
306 | if (event->attr.config & ~RAPL_EVENT_MASK) | ||
307 | return -EINVAL; | ||
308 | |||
309 | /* | ||
310 | * check event is known (determines counter) | ||
311 | */ | ||
312 | switch (cfg) { | ||
313 | case INTEL_RAPL_PP0: | ||
314 | bit = RAPL_IDX_PP0_NRG_STAT; | ||
315 | msr = MSR_PP0_ENERGY_STATUS; | ||
316 | break; | ||
317 | case INTEL_RAPL_PKG: | ||
318 | bit = RAPL_IDX_PKG_NRG_STAT; | ||
319 | msr = MSR_PKG_ENERGY_STATUS; | ||
320 | break; | ||
321 | case INTEL_RAPL_RAM: | ||
322 | bit = RAPL_IDX_RAM_NRG_STAT; | ||
323 | msr = MSR_DRAM_ENERGY_STATUS; | ||
324 | break; | ||
325 | case INTEL_RAPL_PP1: | ||
326 | bit = RAPL_IDX_PP1_NRG_STAT; | ||
327 | msr = MSR_PP1_ENERGY_STATUS; | ||
328 | break; | ||
329 | default: | ||
330 | return -EINVAL; | ||
331 | } | ||
332 | /* check event supported */ | ||
333 | if (!(rapl_cntr_mask & (1 << bit))) | ||
334 | return -EINVAL; | ||
335 | |||
336 | /* unsupported modes and filters */ | ||
337 | if (event->attr.exclude_user || | ||
338 | event->attr.exclude_kernel || | ||
339 | event->attr.exclude_hv || | ||
340 | event->attr.exclude_idle || | ||
341 | event->attr.exclude_host || | ||
342 | event->attr.exclude_guest || | ||
343 | event->attr.sample_period) /* no sampling */ | ||
344 | return -EINVAL; | ||
345 | |||
346 | /* must be done before validate_group */ | ||
347 | event->hw.event_base = msr; | ||
348 | event->hw.config = cfg; | ||
349 | event->hw.idx = bit; | ||
350 | |||
351 | return ret; | ||
352 | } | ||
353 | |||
354 | static void rapl_pmu_event_read(struct perf_event *event) | ||
355 | { | ||
356 | rapl_event_update(event); | ||
357 | } | ||
358 | |||
359 | static ssize_t rapl_get_attr_cpumask(struct device *dev, | ||
360 | struct device_attribute *attr, char *buf) | ||
361 | { | ||
362 | int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &rapl_cpu_mask); | ||
363 | |||
364 | buf[n++] = '\n'; | ||
365 | buf[n] = '\0'; | ||
366 | return n; | ||
367 | } | ||
368 | |||
369 | static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL); | ||
370 | |||
371 | static struct attribute *rapl_pmu_attrs[] = { | ||
372 | &dev_attr_cpumask.attr, | ||
373 | NULL, | ||
374 | }; | ||
375 | |||
376 | static struct attribute_group rapl_pmu_attr_group = { | ||
377 | .attrs = rapl_pmu_attrs, | ||
378 | }; | ||
379 | |||
380 | EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01"); | ||
381 | EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02"); | ||
382 | EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03"); | ||
383 | EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04"); | ||
384 | |||
385 | EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules"); | ||
386 | EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules"); | ||
387 | EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules"); | ||
388 | EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules"); | ||
389 | |||
390 | /* | ||
391 | * we compute in 0.23 nJ increments regardless of MSR | ||
392 | */ | ||
393 | EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10"); | ||
394 | EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10"); | ||
395 | EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10"); | ||
396 | EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10"); | ||
397 | |||
398 | static struct attribute *rapl_events_srv_attr[] = { | ||
399 | EVENT_PTR(rapl_cores), | ||
400 | EVENT_PTR(rapl_pkg), | ||
401 | EVENT_PTR(rapl_ram), | ||
402 | |||
403 | EVENT_PTR(rapl_cores_unit), | ||
404 | EVENT_PTR(rapl_pkg_unit), | ||
405 | EVENT_PTR(rapl_ram_unit), | ||
406 | |||
407 | EVENT_PTR(rapl_cores_scale), | ||
408 | EVENT_PTR(rapl_pkg_scale), | ||
409 | EVENT_PTR(rapl_ram_scale), | ||
410 | NULL, | ||
411 | }; | ||
412 | |||
413 | static struct attribute *rapl_events_cln_attr[] = { | ||
414 | EVENT_PTR(rapl_cores), | ||
415 | EVENT_PTR(rapl_pkg), | ||
416 | EVENT_PTR(rapl_gpu), | ||
417 | |||
418 | EVENT_PTR(rapl_cores_unit), | ||
419 | EVENT_PTR(rapl_pkg_unit), | ||
420 | EVENT_PTR(rapl_gpu_unit), | ||
421 | |||
422 | EVENT_PTR(rapl_cores_scale), | ||
423 | EVENT_PTR(rapl_pkg_scale), | ||
424 | EVENT_PTR(rapl_gpu_scale), | ||
425 | NULL, | ||
426 | }; | ||
427 | |||
428 | static struct attribute_group rapl_pmu_events_group = { | ||
429 | .name = "events", | ||
430 | .attrs = NULL, /* patched at runtime */ | ||
431 | }; | ||
432 | |||
433 | DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7"); | ||
434 | static struct attribute *rapl_formats_attr[] = { | ||
435 | &format_attr_event.attr, | ||
436 | NULL, | ||
437 | }; | ||
438 | |||
439 | static struct attribute_group rapl_pmu_format_group = { | ||
440 | .name = "format", | ||
441 | .attrs = rapl_formats_attr, | ||
442 | }; | ||
443 | |||
444 | const struct attribute_group *rapl_attr_groups[] = { | ||
445 | &rapl_pmu_attr_group, | ||
446 | &rapl_pmu_format_group, | ||
447 | &rapl_pmu_events_group, | ||
448 | NULL, | ||
449 | }; | ||
450 | |||
451 | static struct pmu rapl_pmu_class = { | ||
452 | .attr_groups = rapl_attr_groups, | ||
453 | .task_ctx_nr = perf_invalid_context, /* system-wide only */ | ||
454 | .event_init = rapl_pmu_event_init, | ||
455 | .add = rapl_pmu_event_add, /* must have */ | ||
456 | .del = rapl_pmu_event_del, /* must have */ | ||
457 | .start = rapl_pmu_event_start, | ||
458 | .stop = rapl_pmu_event_stop, | ||
459 | .read = rapl_pmu_event_read, | ||
460 | }; | ||
461 | |||
462 | static void rapl_cpu_exit(int cpu) | ||
463 | { | ||
464 | struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); | ||
465 | int i, phys_id = topology_physical_package_id(cpu); | ||
466 | int target = -1; | ||
467 | |||
468 | /* find a new cpu on same package */ | ||
469 | for_each_online_cpu(i) { | ||
470 | if (i == cpu) | ||
471 | continue; | ||
472 | if (phys_id == topology_physical_package_id(i)) { | ||
473 | target = i; | ||
474 | break; | ||
475 | } | ||
476 | } | ||
477 | /* | ||
478 | * clear cpu from cpumask | ||
479 | * if was set in cpumask and still some cpu on package, | ||
480 | * then move to new cpu | ||
481 | */ | ||
482 | if (cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask) && target >= 0) | ||
483 | cpumask_set_cpu(target, &rapl_cpu_mask); | ||
484 | |||
485 | WARN_ON(cpumask_empty(&rapl_cpu_mask)); | ||
486 | /* | ||
487 | * migrate events and context to new cpu | ||
488 | */ | ||
489 | if (target >= 0) | ||
490 | perf_pmu_migrate_context(pmu->pmu, cpu, target); | ||
491 | |||
492 | /* cancel overflow polling timer for CPU */ | ||
493 | rapl_stop_hrtimer(pmu); | ||
494 | } | ||
495 | |||
496 | static void rapl_cpu_init(int cpu) | ||
497 | { | ||
498 | int i, phys_id = topology_physical_package_id(cpu); | ||
499 | |||
500 | /* check if phys_is is already covered */ | ||
501 | for_each_cpu(i, &rapl_cpu_mask) { | ||
502 | if (phys_id == topology_physical_package_id(i)) | ||
503 | return; | ||
504 | } | ||
505 | /* was not found, so add it */ | ||
506 | cpumask_set_cpu(cpu, &rapl_cpu_mask); | ||
507 | } | ||
508 | |||
509 | static int rapl_cpu_prepare(int cpu) | ||
510 | { | ||
511 | struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); | ||
512 | int phys_id = topology_physical_package_id(cpu); | ||
513 | u64 ms; | ||
514 | |||
515 | if (pmu) | ||
516 | return 0; | ||
517 | |||
518 | if (phys_id < 0) | ||
519 | return -1; | ||
520 | |||
521 | pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); | ||
522 | if (!pmu) | ||
523 | return -1; | ||
524 | |||
525 | spin_lock_init(&pmu->lock); | ||
526 | |||
527 | INIT_LIST_HEAD(&pmu->active_list); | ||
528 | |||
529 | /* | ||
530 | * grab power unit as: 1/2^unit Joules | ||
531 | * | ||
532 | * we cache in local PMU instance | ||
533 | */ | ||
534 | rdmsrl(MSR_RAPL_POWER_UNIT, pmu->hw_unit); | ||
535 | pmu->hw_unit = (pmu->hw_unit >> 8) & 0x1FULL; | ||
536 | pmu->pmu = &rapl_pmu_class; | ||
537 | |||
538 | /* | ||
539 | * use reference of 200W for scaling the timeout | ||
540 | * to avoid missing counter overflows. | ||
541 | * 200W = 200 Joules/sec | ||
542 | * divide interval by 2 to avoid lockstep (2 * 100) | ||
543 | * if hw unit is 32, then we use 2 ms 1/200/2 | ||
544 | */ | ||
545 | if (pmu->hw_unit < 32) | ||
546 | ms = (1000 / (2 * 100)) * (1ULL << (32 - pmu->hw_unit - 1)); | ||
547 | else | ||
548 | ms = 2; | ||
549 | |||
550 | pmu->timer_interval = ms_to_ktime(ms); | ||
551 | |||
552 | rapl_hrtimer_init(pmu); | ||
553 | |||
554 | /* set RAPL pmu for this cpu for now */ | ||
555 | per_cpu(rapl_pmu, cpu) = pmu; | ||
556 | per_cpu(rapl_pmu_to_free, cpu) = NULL; | ||
557 | |||
558 | return 0; | ||
559 | } | ||
560 | |||
561 | static void rapl_cpu_kfree(int cpu) | ||
562 | { | ||
563 | struct rapl_pmu *pmu = per_cpu(rapl_pmu_to_free, cpu); | ||
564 | |||
565 | kfree(pmu); | ||
566 | |||
567 | per_cpu(rapl_pmu_to_free, cpu) = NULL; | ||
568 | } | ||
569 | |||
570 | static int rapl_cpu_dying(int cpu) | ||
571 | { | ||
572 | struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); | ||
573 | |||
574 | if (!pmu) | ||
575 | return 0; | ||
576 | |||
577 | per_cpu(rapl_pmu, cpu) = NULL; | ||
578 | |||
579 | per_cpu(rapl_pmu_to_free, cpu) = pmu; | ||
580 | |||
581 | return 0; | ||
582 | } | ||
583 | |||
584 | static int rapl_cpu_notifier(struct notifier_block *self, | ||
585 | unsigned long action, void *hcpu) | ||
586 | { | ||
587 | unsigned int cpu = (long)hcpu; | ||
588 | |||
589 | switch (action & ~CPU_TASKS_FROZEN) { | ||
590 | case CPU_UP_PREPARE: | ||
591 | rapl_cpu_prepare(cpu); | ||
592 | break; | ||
593 | case CPU_STARTING: | ||
594 | rapl_cpu_init(cpu); | ||
595 | break; | ||
596 | case CPU_UP_CANCELED: | ||
597 | case CPU_DYING: | ||
598 | rapl_cpu_dying(cpu); | ||
599 | break; | ||
600 | case CPU_ONLINE: | ||
601 | case CPU_DEAD: | ||
602 | rapl_cpu_kfree(cpu); | ||
603 | break; | ||
604 | case CPU_DOWN_PREPARE: | ||
605 | rapl_cpu_exit(cpu); | ||
606 | break; | ||
607 | default: | ||
608 | break; | ||
609 | } | ||
610 | |||
611 | return NOTIFY_OK; | ||
612 | } | ||
613 | |||
614 | static const struct x86_cpu_id rapl_cpu_match[] = { | ||
615 | [0] = { .vendor = X86_VENDOR_INTEL, .family = 6 }, | ||
616 | [1] = {}, | ||
617 | }; | ||
618 | |||
619 | static int __init rapl_pmu_init(void) | ||
620 | { | ||
621 | struct rapl_pmu *pmu; | ||
622 | int cpu, ret; | ||
623 | |||
624 | /* | ||
625 | * check for Intel processor family 6 | ||
626 | */ | ||
627 | if (!x86_match_cpu(rapl_cpu_match)) | ||
628 | return 0; | ||
629 | |||
630 | /* check supported CPU */ | ||
631 | switch (boot_cpu_data.x86_model) { | ||
632 | case 42: /* Sandy Bridge */ | ||
633 | case 58: /* Ivy Bridge */ | ||
634 | case 60: /* Haswell */ | ||
635 | case 69: /* Haswell-Celeron */ | ||
636 | rapl_cntr_mask = RAPL_IDX_CLN; | ||
637 | rapl_pmu_events_group.attrs = rapl_events_cln_attr; | ||
638 | break; | ||
639 | case 45: /* Sandy Bridge-EP */ | ||
640 | case 62: /* IvyTown */ | ||
641 | rapl_cntr_mask = RAPL_IDX_SRV; | ||
642 | rapl_pmu_events_group.attrs = rapl_events_srv_attr; | ||
643 | break; | ||
644 | |||
645 | default: | ||
646 | /* unsupported */ | ||
647 | return 0; | ||
648 | } | ||
649 | get_online_cpus(); | ||
650 | |||
651 | for_each_online_cpu(cpu) { | ||
652 | rapl_cpu_prepare(cpu); | ||
653 | rapl_cpu_init(cpu); | ||
654 | } | ||
655 | |||
656 | perf_cpu_notifier(rapl_cpu_notifier); | ||
657 | |||
658 | ret = perf_pmu_register(&rapl_pmu_class, "power", -1); | ||
659 | if (WARN_ON(ret)) { | ||
660 | pr_info("RAPL PMU detected, registration failed (%d), RAPL PMU disabled\n", ret); | ||
661 | put_online_cpus(); | ||
662 | return -1; | ||
663 | } | ||
664 | |||
665 | pmu = __get_cpu_var(rapl_pmu); | ||
666 | |||
667 | pr_info("RAPL PMU detected, hw unit 2^-%d Joules," | ||
668 | " API unit is 2^-32 Joules," | ||
669 | " %d fixed counters" | ||
670 | " %llu ms ovfl timer\n", | ||
671 | pmu->hw_unit, | ||
672 | hweight32(rapl_cntr_mask), | ||
673 | ktime_to_ms(pmu->timer_interval)); | ||
674 | |||
675 | put_online_cpus(); | ||
676 | |||
677 | return 0; | ||
678 | } | ||
679 | device_initcall(rapl_pmu_init); | ||
diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c index 88db010845cb..384df5105fbc 100644 --- a/arch/x86/kernel/cpu/rdrand.c +++ b/arch/x86/kernel/cpu/rdrand.c | |||
@@ -31,20 +31,6 @@ static int __init x86_rdrand_setup(char *s) | |||
31 | } | 31 | } |
32 | __setup("nordrand", x86_rdrand_setup); | 32 | __setup("nordrand", x86_rdrand_setup); |
33 | 33 | ||
34 | /* We can't use arch_get_random_long() here since alternatives haven't run */ | ||
35 | static inline int rdrand_long(unsigned long *v) | ||
36 | { | ||
37 | int ok; | ||
38 | asm volatile("1: " RDRAND_LONG "\n\t" | ||
39 | "jc 2f\n\t" | ||
40 | "decl %0\n\t" | ||
41 | "jnz 1b\n\t" | ||
42 | "2:" | ||
43 | : "=r" (ok), "=a" (*v) | ||
44 | : "0" (RDRAND_RETRY_LOOPS)); | ||
45 | return ok; | ||
46 | } | ||
47 | |||
48 | /* | 34 | /* |
49 | * Force a reseed cycle; we are architecturally guaranteed a reseed | 35 | * Force a reseed cycle; we are architecturally guaranteed a reseed |
50 | * after no more than 512 128-bit chunks of random data. This also | 36 | * after no more than 512 128-bit chunks of random data. This also |
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index aa0430d69b90..3fa0e5ad86b4 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c | |||
@@ -1,6 +1,5 @@ | |||
1 | #include <linux/kernel.h> | 1 | #include <linux/kernel.h> |
2 | #include <linux/mm.h> | 2 | #include <linux/mm.h> |
3 | #include <linux/init.h> | ||
4 | #include <asm/processor.h> | 3 | #include <asm/processor.h> |
5 | #include <asm/msr.h> | 4 | #include <asm/msr.h> |
6 | #include "cpu.h" | 5 | #include "cpu.h" |
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c index 75c5ad5d35cc..ef9c2a0078bd 100644 --- a/arch/x86/kernel/cpu/umc.c +++ b/arch/x86/kernel/cpu/umc.c | |||
@@ -1,5 +1,4 @@ | |||
1 | #include <linux/kernel.h> | 1 | #include <linux/kernel.h> |
2 | #include <linux/init.h> | ||
3 | #include <asm/processor.h> | 2 | #include <asm/processor.h> |
4 | #include "cpu.h" | 3 | #include "cpu.h" |
5 | 4 | ||
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 18677a90d6a3..a57902efe2d5 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c | |||
@@ -7,7 +7,6 @@ | |||
7 | * | 7 | * |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/types.h> | 10 | #include <linux/types.h> |
12 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
13 | #include <linux/smp.h> | 12 | #include <linux/smp.h> |
diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c index 5d3fe8d36e4a..f6dfd9334b67 100644 --- a/arch/x86/kernel/doublefault.c +++ b/arch/x86/kernel/doublefault.c | |||
@@ -1,6 +1,5 @@ | |||
1 | #include <linux/mm.h> | 1 | #include <linux/mm.h> |
2 | #include <linux/sched.h> | 2 | #include <linux/sched.h> |
3 | #include <linux/init.h> | ||
4 | #include <linux/init_task.h> | 3 | #include <linux/init_task.h> |
5 | #include <linux/fs.h> | 4 | #include <linux/fs.h> |
6 | 5 | ||
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index f66ff162dce8..a67b47c31314 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #include <linux/kernel.h> | 38 | #include <linux/kernel.h> |
39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <linux/sched.h> | 40 | #include <linux/sched.h> |
41 | #include <linux/init.h> | ||
42 | #include <linux/smp.h> | 41 | #include <linux/smp.h> |
43 | 42 | ||
44 | #include <asm/hw_breakpoint.h> | 43 | #include <asm/hw_breakpoint.h> |
diff --git a/arch/x86/kernel/iosf_mbi.c b/arch/x86/kernel/iosf_mbi.c new file mode 100644 index 000000000000..c3aae6672843 --- /dev/null +++ b/arch/x86/kernel/iosf_mbi.c | |||
@@ -0,0 +1,226 @@ | |||
1 | /* | ||
2 | * IOSF-SB MailBox Interface Driver | ||
3 | * Copyright (c) 2013, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * | ||
15 | * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a | ||
16 | * mailbox interface (MBI) to communicate with mutiple devices. This | ||
17 | * driver implements access to this interface for those platforms that can | ||
18 | * enumerate the device using PCI. | ||
19 | */ | ||
20 | |||
21 | #include <linux/module.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/spinlock.h> | ||
24 | #include <linux/pci.h> | ||
25 | |||
26 | #include <asm/iosf_mbi.h> | ||
27 | |||
28 | static DEFINE_SPINLOCK(iosf_mbi_lock); | ||
29 | |||
30 | static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset) | ||
31 | { | ||
32 | return (op << 24) | (port << 16) | (offset << 8) | MBI_ENABLE; | ||
33 | } | ||
34 | |||
35 | static struct pci_dev *mbi_pdev; /* one mbi device */ | ||
36 | |||
37 | static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr) | ||
38 | { | ||
39 | int result; | ||
40 | |||
41 | if (!mbi_pdev) | ||
42 | return -ENODEV; | ||
43 | |||
44 | if (mcrx) { | ||
45 | result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET, | ||
46 | mcrx); | ||
47 | if (result < 0) | ||
48 | goto fail_read; | ||
49 | } | ||
50 | |||
51 | result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr); | ||
52 | if (result < 0) | ||
53 | goto fail_read; | ||
54 | |||
55 | result = pci_read_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr); | ||
56 | if (result < 0) | ||
57 | goto fail_read; | ||
58 | |||
59 | return 0; | ||
60 | |||
61 | fail_read: | ||
62 | dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result); | ||
63 | return result; | ||
64 | } | ||
65 | |||
66 | static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr) | ||
67 | { | ||
68 | int result; | ||
69 | |||
70 | if (!mbi_pdev) | ||
71 | return -ENODEV; | ||
72 | |||
73 | result = pci_write_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr); | ||
74 | if (result < 0) | ||
75 | goto fail_write; | ||
76 | |||
77 | if (mcrx) { | ||
78 | result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET, | ||
79 | mcrx); | ||
80 | if (result < 0) | ||
81 | goto fail_write; | ||
82 | } | ||
83 | |||
84 | result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr); | ||
85 | if (result < 0) | ||
86 | goto fail_write; | ||
87 | |||
88 | return 0; | ||
89 | |||
90 | fail_write: | ||
91 | dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result); | ||
92 | return result; | ||
93 | } | ||
94 | |||
95 | int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr) | ||
96 | { | ||
97 | u32 mcr, mcrx; | ||
98 | unsigned long flags; | ||
99 | int ret; | ||
100 | |||
101 | /*Access to the GFX unit is handled by GPU code */ | ||
102 | if (port == BT_MBI_UNIT_GFX) { | ||
103 | WARN_ON(1); | ||
104 | return -EPERM; | ||
105 | } | ||
106 | |||
107 | mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO); | ||
108 | mcrx = offset & MBI_MASK_HI; | ||
109 | |||
110 | spin_lock_irqsave(&iosf_mbi_lock, flags); | ||
111 | ret = iosf_mbi_pci_read_mdr(mcrx, mcr, mdr); | ||
112 | spin_unlock_irqrestore(&iosf_mbi_lock, flags); | ||
113 | |||
114 | return ret; | ||
115 | } | ||
116 | EXPORT_SYMBOL(iosf_mbi_read); | ||
117 | |||
118 | int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr) | ||
119 | { | ||
120 | u32 mcr, mcrx; | ||
121 | unsigned long flags; | ||
122 | int ret; | ||
123 | |||
124 | /*Access to the GFX unit is handled by GPU code */ | ||
125 | if (port == BT_MBI_UNIT_GFX) { | ||
126 | WARN_ON(1); | ||
127 | return -EPERM; | ||
128 | } | ||
129 | |||
130 | mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO); | ||
131 | mcrx = offset & MBI_MASK_HI; | ||
132 | |||
133 | spin_lock_irqsave(&iosf_mbi_lock, flags); | ||
134 | ret = iosf_mbi_pci_write_mdr(mcrx, mcr, mdr); | ||
135 | spin_unlock_irqrestore(&iosf_mbi_lock, flags); | ||
136 | |||
137 | return ret; | ||
138 | } | ||
139 | EXPORT_SYMBOL(iosf_mbi_write); | ||
140 | |||
141 | int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask) | ||
142 | { | ||
143 | u32 mcr, mcrx; | ||
144 | u32 value; | ||
145 | unsigned long flags; | ||
146 | int ret; | ||
147 | |||
148 | /*Access to the GFX unit is handled by GPU code */ | ||
149 | if (port == BT_MBI_UNIT_GFX) { | ||
150 | WARN_ON(1); | ||
151 | return -EPERM; | ||
152 | } | ||
153 | |||
154 | mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO); | ||
155 | mcrx = offset & MBI_MASK_HI; | ||
156 | |||
157 | spin_lock_irqsave(&iosf_mbi_lock, flags); | ||
158 | |||
159 | /* Read current mdr value */ | ||
160 | ret = iosf_mbi_pci_read_mdr(mcrx, mcr & MBI_RD_MASK, &value); | ||
161 | if (ret < 0) { | ||
162 | spin_unlock_irqrestore(&iosf_mbi_lock, flags); | ||
163 | return ret; | ||
164 | } | ||
165 | |||
166 | /* Apply mask */ | ||
167 | value &= ~mask; | ||
168 | mdr &= mask; | ||
169 | value |= mdr; | ||
170 | |||
171 | /* Write back */ | ||
172 | ret = iosf_mbi_pci_write_mdr(mcrx, mcr | MBI_WR_MASK, value); | ||
173 | |||
174 | spin_unlock_irqrestore(&iosf_mbi_lock, flags); | ||
175 | |||
176 | return ret; | ||
177 | } | ||
178 | EXPORT_SYMBOL(iosf_mbi_modify); | ||
179 | |||
180 | static int iosf_mbi_probe(struct pci_dev *pdev, | ||
181 | const struct pci_device_id *unused) | ||
182 | { | ||
183 | int ret; | ||
184 | |||
185 | ret = pci_enable_device(pdev); | ||
186 | if (ret < 0) { | ||
187 | dev_err(&pdev->dev, "error: could not enable device\n"); | ||
188 | return ret; | ||
189 | } | ||
190 | |||
191 | mbi_pdev = pci_dev_get(pdev); | ||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | static DEFINE_PCI_DEVICE_TABLE(iosf_mbi_pci_ids) = { | ||
196 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0F00) }, | ||
197 | { 0, }, | ||
198 | }; | ||
199 | MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids); | ||
200 | |||
201 | static struct pci_driver iosf_mbi_pci_driver = { | ||
202 | .name = "iosf_mbi_pci", | ||
203 | .probe = iosf_mbi_probe, | ||
204 | .id_table = iosf_mbi_pci_ids, | ||
205 | }; | ||
206 | |||
207 | static int __init iosf_mbi_init(void) | ||
208 | { | ||
209 | return pci_register_driver(&iosf_mbi_pci_driver); | ||
210 | } | ||
211 | |||
212 | static void __exit iosf_mbi_exit(void) | ||
213 | { | ||
214 | pci_unregister_driver(&iosf_mbi_pci_driver); | ||
215 | if (mbi_pdev) { | ||
216 | pci_dev_put(mbi_pdev); | ||
217 | mbi_pdev = NULL; | ||
218 | } | ||
219 | } | ||
220 | |||
221 | module_init(iosf_mbi_init); | ||
222 | module_exit(iosf_mbi_exit); | ||
223 | |||
224 | MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>"); | ||
225 | MODULE_DESCRIPTION("IOSF Mailbox Interface accessor"); | ||
226 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 4207e8d1a094..dbb60878b744 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -193,9 +193,13 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) | |||
193 | if (!handle_irq(irq, regs)) { | 193 | if (!handle_irq(irq, regs)) { |
194 | ack_APIC_irq(); | 194 | ack_APIC_irq(); |
195 | 195 | ||
196 | if (printk_ratelimit()) | 196 | if (irq != VECTOR_RETRIGGERED) { |
197 | pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n", | 197 | pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n", |
198 | __func__, smp_processor_id(), vector, irq); | 198 | __func__, smp_processor_id(), |
199 | vector, irq); | ||
200 | } else { | ||
201 | __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); | ||
202 | } | ||
199 | } | 203 | } |
200 | 204 | ||
201 | irq_exit(); | 205 | irq_exit(); |
@@ -414,7 +418,7 @@ void fixup_irqs(void) | |||
414 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { | 418 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { |
415 | unsigned int irr; | 419 | unsigned int irr; |
416 | 420 | ||
417 | if (__this_cpu_read(vector_irq[vector]) < 0) | 421 | if (__this_cpu_read(vector_irq[vector]) <= VECTOR_UNDEFINED) |
418 | continue; | 422 | continue; |
419 | 423 | ||
420 | irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); | 424 | irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); |
@@ -425,11 +429,14 @@ void fixup_irqs(void) | |||
425 | data = irq_desc_get_irq_data(desc); | 429 | data = irq_desc_get_irq_data(desc); |
426 | chip = irq_data_get_irq_chip(data); | 430 | chip = irq_data_get_irq_chip(data); |
427 | raw_spin_lock(&desc->lock); | 431 | raw_spin_lock(&desc->lock); |
428 | if (chip->irq_retrigger) | 432 | if (chip->irq_retrigger) { |
429 | chip->irq_retrigger(data); | 433 | chip->irq_retrigger(data); |
434 | __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED); | ||
435 | } | ||
430 | raw_spin_unlock(&desc->lock); | 436 | raw_spin_unlock(&desc->lock); |
431 | } | 437 | } |
432 | __this_cpu_write(vector_irq[vector], -1); | 438 | if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED) |
439 | __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); | ||
433 | } | 440 | } |
434 | } | 441 | } |
435 | #endif | 442 | #endif |
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index a2a1fbc594ff..7f50156542fb 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
@@ -52,7 +52,7 @@ static struct irqaction irq2 = { | |||
52 | }; | 52 | }; |
53 | 53 | ||
54 | DEFINE_PER_CPU(vector_irq_t, vector_irq) = { | 54 | DEFINE_PER_CPU(vector_irq_t, vector_irq) = { |
55 | [0 ... NR_VECTORS - 1] = -1, | 55 | [0 ... NR_VECTORS - 1] = VECTOR_UNDEFINED, |
56 | }; | 56 | }; |
57 | 57 | ||
58 | int vector_used_by_percpu_irq(unsigned int vector) | 58 | int vector_used_by_percpu_irq(unsigned int vector) |
@@ -60,7 +60,7 @@ int vector_used_by_percpu_irq(unsigned int vector) | |||
60 | int cpu; | 60 | int cpu; |
61 | 61 | ||
62 | for_each_online_cpu(cpu) { | 62 | for_each_online_cpu(cpu) { |
63 | if (per_cpu(vector_irq, cpu)[vector] != -1) | 63 | if (per_cpu(vector_irq, cpu)[vector] > VECTOR_UNDEFINED) |
64 | return 1; | 64 | return 1; |
65 | } | 65 | } |
66 | 66 | ||
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 836f8322960e..7ec1d5f8d283 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -39,7 +39,6 @@ | |||
39 | #include <linux/sched.h> | 39 | #include <linux/sched.h> |
40 | #include <linux/delay.h> | 40 | #include <linux/delay.h> |
41 | #include <linux/kgdb.h> | 41 | #include <linux/kgdb.h> |
42 | #include <linux/init.h> | ||
43 | #include <linux/smp.h> | 42 | #include <linux/smp.h> |
44 | #include <linux/nmi.h> | 43 | #include <linux/nmi.h> |
45 | #include <linux/hw_breakpoint.h> | 44 | #include <linux/hw_breakpoint.h> |
diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c new file mode 100644 index 000000000000..c2bedaea11f7 --- /dev/null +++ b/arch/x86/kernel/ksysfs.c | |||
@@ -0,0 +1,340 @@ | |||
1 | /* | ||
2 | * Architecture specific sysfs attributes in /sys/kernel | ||
3 | * | ||
4 | * Copyright (C) 2007, Intel Corp. | ||
5 | * Huang Ying <ying.huang@intel.com> | ||
6 | * Copyright (C) 2013, 2013 Red Hat, Inc. | ||
7 | * Dave Young <dyoung@redhat.com> | ||
8 | * | ||
9 | * This file is released under the GPLv2 | ||
10 | */ | ||
11 | |||
12 | #include <linux/kobject.h> | ||
13 | #include <linux/string.h> | ||
14 | #include <linux/sysfs.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/stat.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/mm.h> | ||
19 | |||
20 | #include <asm/io.h> | ||
21 | #include <asm/setup.h> | ||
22 | |||
23 | static ssize_t version_show(struct kobject *kobj, | ||
24 | struct kobj_attribute *attr, char *buf) | ||
25 | { | ||
26 | return sprintf(buf, "0x%04x\n", boot_params.hdr.version); | ||
27 | } | ||
28 | |||
29 | static struct kobj_attribute boot_params_version_attr = __ATTR_RO(version); | ||
30 | |||
31 | static ssize_t boot_params_data_read(struct file *fp, struct kobject *kobj, | ||
32 | struct bin_attribute *bin_attr, | ||
33 | char *buf, loff_t off, size_t count) | ||
34 | { | ||
35 | memcpy(buf, (void *)&boot_params + off, count); | ||
36 | return count; | ||
37 | } | ||
38 | |||
39 | static struct bin_attribute boot_params_data_attr = { | ||
40 | .attr = { | ||
41 | .name = "data", | ||
42 | .mode = S_IRUGO, | ||
43 | }, | ||
44 | .read = boot_params_data_read, | ||
45 | .size = sizeof(boot_params), | ||
46 | }; | ||
47 | |||
48 | static struct attribute *boot_params_version_attrs[] = { | ||
49 | &boot_params_version_attr.attr, | ||
50 | NULL, | ||
51 | }; | ||
52 | |||
53 | static struct bin_attribute *boot_params_data_attrs[] = { | ||
54 | &boot_params_data_attr, | ||
55 | NULL, | ||
56 | }; | ||
57 | |||
58 | static struct attribute_group boot_params_attr_group = { | ||
59 | .attrs = boot_params_version_attrs, | ||
60 | .bin_attrs = boot_params_data_attrs, | ||
61 | }; | ||
62 | |||
63 | static int kobj_to_setup_data_nr(struct kobject *kobj, int *nr) | ||
64 | { | ||
65 | const char *name; | ||
66 | |||
67 | name = kobject_name(kobj); | ||
68 | return kstrtoint(name, 10, nr); | ||
69 | } | ||
70 | |||
71 | static int get_setup_data_paddr(int nr, u64 *paddr) | ||
72 | { | ||
73 | int i = 0; | ||
74 | struct setup_data *data; | ||
75 | u64 pa_data = boot_params.hdr.setup_data; | ||
76 | |||
77 | while (pa_data) { | ||
78 | if (nr == i) { | ||
79 | *paddr = pa_data; | ||
80 | return 0; | ||
81 | } | ||
82 | data = ioremap_cache(pa_data, sizeof(*data)); | ||
83 | if (!data) | ||
84 | return -ENOMEM; | ||
85 | |||
86 | pa_data = data->next; | ||
87 | iounmap(data); | ||
88 | i++; | ||
89 | } | ||
90 | return -EINVAL; | ||
91 | } | ||
92 | |||
93 | static int __init get_setup_data_size(int nr, size_t *size) | ||
94 | { | ||
95 | int i = 0; | ||
96 | struct setup_data *data; | ||
97 | u64 pa_data = boot_params.hdr.setup_data; | ||
98 | |||
99 | while (pa_data) { | ||
100 | data = ioremap_cache(pa_data, sizeof(*data)); | ||
101 | if (!data) | ||
102 | return -ENOMEM; | ||
103 | if (nr == i) { | ||
104 | *size = data->len; | ||
105 | iounmap(data); | ||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | pa_data = data->next; | ||
110 | iounmap(data); | ||
111 | i++; | ||
112 | } | ||
113 | return -EINVAL; | ||
114 | } | ||
115 | |||
116 | static ssize_t type_show(struct kobject *kobj, | ||
117 | struct kobj_attribute *attr, char *buf) | ||
118 | { | ||
119 | int nr, ret; | ||
120 | u64 paddr; | ||
121 | struct setup_data *data; | ||
122 | |||
123 | ret = kobj_to_setup_data_nr(kobj, &nr); | ||
124 | if (ret) | ||
125 | return ret; | ||
126 | |||
127 | ret = get_setup_data_paddr(nr, &paddr); | ||
128 | if (ret) | ||
129 | return ret; | ||
130 | data = ioremap_cache(paddr, sizeof(*data)); | ||
131 | if (!data) | ||
132 | return -ENOMEM; | ||
133 | |||
134 | ret = sprintf(buf, "0x%x\n", data->type); | ||
135 | iounmap(data); | ||
136 | return ret; | ||
137 | } | ||
138 | |||
139 | static ssize_t setup_data_data_read(struct file *fp, | ||
140 | struct kobject *kobj, | ||
141 | struct bin_attribute *bin_attr, | ||
142 | char *buf, | ||
143 | loff_t off, size_t count) | ||
144 | { | ||
145 | int nr, ret = 0; | ||
146 | u64 paddr; | ||
147 | struct setup_data *data; | ||
148 | void *p; | ||
149 | |||
150 | ret = kobj_to_setup_data_nr(kobj, &nr); | ||
151 | if (ret) | ||
152 | return ret; | ||
153 | |||
154 | ret = get_setup_data_paddr(nr, &paddr); | ||
155 | if (ret) | ||
156 | return ret; | ||
157 | data = ioremap_cache(paddr, sizeof(*data)); | ||
158 | if (!data) | ||
159 | return -ENOMEM; | ||
160 | |||
161 | if (off > data->len) { | ||
162 | ret = -EINVAL; | ||
163 | goto out; | ||
164 | } | ||
165 | |||
166 | if (count > data->len - off) | ||
167 | count = data->len - off; | ||
168 | |||
169 | if (!count) | ||
170 | goto out; | ||
171 | |||
172 | ret = count; | ||
173 | p = ioremap_cache(paddr + sizeof(*data), data->len); | ||
174 | if (!p) { | ||
175 | ret = -ENOMEM; | ||
176 | goto out; | ||
177 | } | ||
178 | memcpy(buf, p + off, count); | ||
179 | iounmap(p); | ||
180 | out: | ||
181 | iounmap(data); | ||
182 | return ret; | ||
183 | } | ||
184 | |||
185 | static struct kobj_attribute type_attr = __ATTR_RO(type); | ||
186 | |||
187 | static struct bin_attribute data_attr = { | ||
188 | .attr = { | ||
189 | .name = "data", | ||
190 | .mode = S_IRUGO, | ||
191 | }, | ||
192 | .read = setup_data_data_read, | ||
193 | }; | ||
194 | |||
195 | static struct attribute *setup_data_type_attrs[] = { | ||
196 | &type_attr.attr, | ||
197 | NULL, | ||
198 | }; | ||
199 | |||
200 | static struct bin_attribute *setup_data_data_attrs[] = { | ||
201 | &data_attr, | ||
202 | NULL, | ||
203 | }; | ||
204 | |||
205 | static struct attribute_group setup_data_attr_group = { | ||
206 | .attrs = setup_data_type_attrs, | ||
207 | .bin_attrs = setup_data_data_attrs, | ||
208 | }; | ||
209 | |||
210 | static int __init create_setup_data_node(struct kobject *parent, | ||
211 | struct kobject **kobjp, int nr) | ||
212 | { | ||
213 | int ret = 0; | ||
214 | size_t size; | ||
215 | struct kobject *kobj; | ||
216 | char name[16]; /* should be enough for setup_data nodes numbers */ | ||
217 | snprintf(name, 16, "%d", nr); | ||
218 | |||
219 | kobj = kobject_create_and_add(name, parent); | ||
220 | if (!kobj) | ||
221 | return -ENOMEM; | ||
222 | |||
223 | ret = get_setup_data_size(nr, &size); | ||
224 | if (ret) | ||
225 | goto out_kobj; | ||
226 | |||
227 | data_attr.size = size; | ||
228 | ret = sysfs_create_group(kobj, &setup_data_attr_group); | ||
229 | if (ret) | ||
230 | goto out_kobj; | ||
231 | *kobjp = kobj; | ||
232 | |||
233 | return 0; | ||
234 | out_kobj: | ||
235 | kobject_put(kobj); | ||
236 | return ret; | ||
237 | } | ||
238 | |||
239 | static void __init cleanup_setup_data_node(struct kobject *kobj) | ||
240 | { | ||
241 | sysfs_remove_group(kobj, &setup_data_attr_group); | ||
242 | kobject_put(kobj); | ||
243 | } | ||
244 | |||
245 | static int __init get_setup_data_total_num(u64 pa_data, int *nr) | ||
246 | { | ||
247 | int ret = 0; | ||
248 | struct setup_data *data; | ||
249 | |||
250 | *nr = 0; | ||
251 | while (pa_data) { | ||
252 | *nr += 1; | ||
253 | data = ioremap_cache(pa_data, sizeof(*data)); | ||
254 | if (!data) { | ||
255 | ret = -ENOMEM; | ||
256 | goto out; | ||
257 | } | ||
258 | pa_data = data->next; | ||
259 | iounmap(data); | ||
260 | } | ||
261 | |||
262 | out: | ||
263 | return ret; | ||
264 | } | ||
265 | |||
266 | static int __init create_setup_data_nodes(struct kobject *parent) | ||
267 | { | ||
268 | struct kobject *setup_data_kobj, **kobjp; | ||
269 | u64 pa_data; | ||
270 | int i, j, nr, ret = 0; | ||
271 | |||
272 | pa_data = boot_params.hdr.setup_data; | ||
273 | if (!pa_data) | ||
274 | return 0; | ||
275 | |||
276 | setup_data_kobj = kobject_create_and_add("setup_data", parent); | ||
277 | if (!setup_data_kobj) { | ||
278 | ret = -ENOMEM; | ||
279 | goto out; | ||
280 | } | ||
281 | |||
282 | ret = get_setup_data_total_num(pa_data, &nr); | ||
283 | if (ret) | ||
284 | goto out_setup_data_kobj; | ||
285 | |||
286 | kobjp = kmalloc(sizeof(*kobjp) * nr, GFP_KERNEL); | ||
287 | if (!kobjp) { | ||
288 | ret = -ENOMEM; | ||
289 | goto out_setup_data_kobj; | ||
290 | } | ||
291 | |||
292 | for (i = 0; i < nr; i++) { | ||
293 | ret = create_setup_data_node(setup_data_kobj, kobjp + i, i); | ||
294 | if (ret) | ||
295 | goto out_clean_nodes; | ||
296 | } | ||
297 | |||
298 | kfree(kobjp); | ||
299 | return 0; | ||
300 | |||
301 | out_clean_nodes: | ||
302 | for (j = i - 1; j > 0; j--) | ||
303 | cleanup_setup_data_node(*(kobjp + j)); | ||
304 | kfree(kobjp); | ||
305 | out_setup_data_kobj: | ||
306 | kobject_put(setup_data_kobj); | ||
307 | out: | ||
308 | return ret; | ||
309 | } | ||
310 | |||
311 | static int __init boot_params_ksysfs_init(void) | ||
312 | { | ||
313 | int ret; | ||
314 | struct kobject *boot_params_kobj; | ||
315 | |||
316 | boot_params_kobj = kobject_create_and_add("boot_params", | ||
317 | kernel_kobj); | ||
318 | if (!boot_params_kobj) { | ||
319 | ret = -ENOMEM; | ||
320 | goto out; | ||
321 | } | ||
322 | |||
323 | ret = sysfs_create_group(boot_params_kobj, &boot_params_attr_group); | ||
324 | if (ret) | ||
325 | goto out_boot_params_kobj; | ||
326 | |||
327 | ret = create_setup_data_nodes(boot_params_kobj); | ||
328 | if (ret) | ||
329 | goto out_create_group; | ||
330 | |||
331 | return 0; | ||
332 | out_create_group: | ||
333 | sysfs_remove_group(boot_params_kobj, &boot_params_attr_group); | ||
334 | out_boot_params_kobj: | ||
335 | kobject_put(boot_params_kobj); | ||
336 | out: | ||
337 | return ret; | ||
338 | } | ||
339 | |||
340 | arch_initcall(boot_params_ksysfs_init); | ||
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 5b19e4d78b00..1667b1de8d5d 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
10 | #include <linux/kexec.h> | 10 | #include <linux/kexec.h> |
11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
12 | #include <linux/init.h> | ||
13 | #include <linux/numa.h> | 12 | #include <linux/numa.h> |
14 | #include <linux/ftrace.h> | 13 | #include <linux/ftrace.h> |
15 | #include <linux/suspend.h> | 14 | #include <linux/suspend.h> |
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 871be4a84c7d..da15918d1c81 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c | |||
@@ -3,7 +3,6 @@ | |||
3 | #include <linux/dma-mapping.h> | 3 | #include <linux/dma-mapping.h> |
4 | #include <linux/scatterlist.h> | 4 | #include <linux/scatterlist.h> |
5 | #include <linux/string.h> | 5 | #include <linux/string.h> |
6 | #include <linux/init.h> | ||
7 | #include <linux/gfp.h> | 6 | #include <linux/gfp.h> |
8 | #include <linux/pci.h> | 7 | #include <linux/pci.h> |
9 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 6f1236c29c4b..0de43e98ce08 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
26 | #include <linux/reboot.h> | 26 | #include <linux/reboot.h> |
27 | #include <linux/init.h> | ||
28 | #include <linux/mc146818rtc.h> | 27 | #include <linux/mc146818rtc.h> |
29 | #include <linux/module.h> | 28 | #include <linux/module.h> |
30 | #include <linux/kallsyms.h> | 29 | #include <linux/kallsyms.h> |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index cb233bc9dee3..06853e670354 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -295,6 +295,8 @@ static void __init reserve_brk(void) | |||
295 | _brk_start = 0; | 295 | _brk_start = 0; |
296 | } | 296 | } |
297 | 297 | ||
298 | u64 relocated_ramdisk; | ||
299 | |||
298 | #ifdef CONFIG_BLK_DEV_INITRD | 300 | #ifdef CONFIG_BLK_DEV_INITRD |
299 | 301 | ||
300 | static u64 __init get_ramdisk_image(void) | 302 | static u64 __init get_ramdisk_image(void) |
@@ -321,25 +323,24 @@ static void __init relocate_initrd(void) | |||
321 | u64 ramdisk_image = get_ramdisk_image(); | 323 | u64 ramdisk_image = get_ramdisk_image(); |
322 | u64 ramdisk_size = get_ramdisk_size(); | 324 | u64 ramdisk_size = get_ramdisk_size(); |
323 | u64 area_size = PAGE_ALIGN(ramdisk_size); | 325 | u64 area_size = PAGE_ALIGN(ramdisk_size); |
324 | u64 ramdisk_here; | ||
325 | unsigned long slop, clen, mapaddr; | 326 | unsigned long slop, clen, mapaddr; |
326 | char *p, *q; | 327 | char *p, *q; |
327 | 328 | ||
328 | /* We need to move the initrd down into directly mapped mem */ | 329 | /* We need to move the initrd down into directly mapped mem */ |
329 | ramdisk_here = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), | 330 | relocated_ramdisk = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), |
330 | area_size, PAGE_SIZE); | 331 | area_size, PAGE_SIZE); |
331 | 332 | ||
332 | if (!ramdisk_here) | 333 | if (!relocated_ramdisk) |
333 | panic("Cannot find place for new RAMDISK of size %lld\n", | 334 | panic("Cannot find place for new RAMDISK of size %lld\n", |
334 | ramdisk_size); | 335 | ramdisk_size); |
335 | 336 | ||
336 | /* Note: this includes all the mem currently occupied by | 337 | /* Note: this includes all the mem currently occupied by |
337 | the initrd, we rely on that fact to keep the data intact. */ | 338 | the initrd, we rely on that fact to keep the data intact. */ |
338 | memblock_reserve(ramdisk_here, area_size); | 339 | memblock_reserve(relocated_ramdisk, area_size); |
339 | initrd_start = ramdisk_here + PAGE_OFFSET; | 340 | initrd_start = relocated_ramdisk + PAGE_OFFSET; |
340 | initrd_end = initrd_start + ramdisk_size; | 341 | initrd_end = initrd_start + ramdisk_size; |
341 | printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n", | 342 | printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n", |
342 | ramdisk_here, ramdisk_here + ramdisk_size - 1); | 343 | relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1); |
343 | 344 | ||
344 | q = (char *)initrd_start; | 345 | q = (char *)initrd_start; |
345 | 346 | ||
@@ -363,7 +364,7 @@ static void __init relocate_initrd(void) | |||
363 | printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to" | 364 | printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to" |
364 | " [mem %#010llx-%#010llx]\n", | 365 | " [mem %#010llx-%#010llx]\n", |
365 | ramdisk_image, ramdisk_image + ramdisk_size - 1, | 366 | ramdisk_image, ramdisk_image + ramdisk_size - 1, |
366 | ramdisk_here, ramdisk_here + ramdisk_size - 1); | 367 | relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1); |
367 | } | 368 | } |
368 | 369 | ||
369 | static void __init early_reserve_initrd(void) | 370 | static void __init early_reserve_initrd(void) |
@@ -447,6 +448,9 @@ static void __init parse_setup_data(void) | |||
447 | case SETUP_DTB: | 448 | case SETUP_DTB: |
448 | add_dtb(pa_data); | 449 | add_dtb(pa_data); |
449 | break; | 450 | break; |
451 | case SETUP_EFI: | ||
452 | parse_efi_setup(pa_data, data_len); | ||
453 | break; | ||
450 | default: | 454 | default: |
451 | break; | 455 | break; |
452 | } | 456 | } |
@@ -824,6 +828,20 @@ static void __init trim_low_memory_range(void) | |||
824 | } | 828 | } |
825 | 829 | ||
826 | /* | 830 | /* |
831 | * Dump out kernel offset information on panic. | ||
832 | */ | ||
833 | static int | ||
834 | dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) | ||
835 | { | ||
836 | pr_emerg("Kernel Offset: 0x%lx from 0x%lx " | ||
837 | "(relocation range: 0x%lx-0x%lx)\n", | ||
838 | (unsigned long)&_text - __START_KERNEL, __START_KERNEL, | ||
839 | __START_KERNEL_map, MODULES_VADDR-1); | ||
840 | |||
841 | return 0; | ||
842 | } | ||
843 | |||
844 | /* | ||
827 | * Determine if we were loaded by an EFI loader. If so, then we have also been | 845 | * Determine if we were loaded by an EFI loader. If so, then we have also been |
828 | * passed the efi memmap, systab, etc., so we should use these data structures | 846 | * passed the efi memmap, systab, etc., so we should use these data structures |
829 | * for initialization. Note, the efi init code path is determined by the | 847 | * for initialization. Note, the efi init code path is determined by the |
@@ -924,8 +942,6 @@ void __init setup_arch(char **cmdline_p) | |||
924 | iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; | 942 | iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; |
925 | setup_memory_map(); | 943 | setup_memory_map(); |
926 | parse_setup_data(); | 944 | parse_setup_data(); |
927 | /* update the e820_saved too */ | ||
928 | e820_reserve_setup_data(); | ||
929 | 945 | ||
930 | copy_edd(); | 946 | copy_edd(); |
931 | 947 | ||
@@ -987,6 +1003,8 @@ void __init setup_arch(char **cmdline_p) | |||
987 | early_dump_pci_devices(); | 1003 | early_dump_pci_devices(); |
988 | #endif | 1004 | #endif |
989 | 1005 | ||
1006 | /* update the e820_saved too */ | ||
1007 | e820_reserve_setup_data(); | ||
990 | finish_e820_parsing(); | 1008 | finish_e820_parsing(); |
991 | 1009 | ||
992 | if (efi_enabled(EFI_BOOT)) | 1010 | if (efi_enabled(EFI_BOOT)) |
@@ -1248,3 +1266,15 @@ void __init i386_reserve_resources(void) | |||
1248 | } | 1266 | } |
1249 | 1267 | ||
1250 | #endif /* CONFIG_X86_32 */ | 1268 | #endif /* CONFIG_X86_32 */ |
1269 | |||
1270 | static struct notifier_block kernel_offset_notifier = { | ||
1271 | .notifier_call = dump_kernel_offset | ||
1272 | }; | ||
1273 | |||
1274 | static int __init register_kernel_offset_dumper(void) | ||
1275 | { | ||
1276 | atomic_notifier_chain_register(&panic_notifier_list, | ||
1277 | &kernel_offset_notifier); | ||
1278 | return 0; | ||
1279 | } | ||
1280 | __initcall(register_kernel_offset_dumper); | ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 391ea529dc26..a32da804252e 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -1423,7 +1423,9 @@ static inline void mwait_play_dead(void) | |||
1423 | * The WBINVD is insufficient due to the spurious-wakeup | 1423 | * The WBINVD is insufficient due to the spurious-wakeup |
1424 | * case where we return around the loop. | 1424 | * case where we return around the loop. |
1425 | */ | 1425 | */ |
1426 | mb(); | ||
1426 | clflush(mwait_ptr); | 1427 | clflush(mwait_ptr); |
1428 | mb(); | ||
1427 | __monitor(mwait_ptr, 0, 0); | 1429 | __monitor(mwait_ptr, 0, 0); |
1428 | mb(); | 1430 | mb(); |
1429 | __mwait(eax, 0); | 1431 | __mwait(eax, 0); |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index b857ed890b4c..57409f6b8c62 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -211,21 +211,17 @@ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ | |||
211 | exception_exit(prev_state); \ | 211 | exception_exit(prev_state); \ |
212 | } | 212 | } |
213 | 213 | ||
214 | DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, | 214 | DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip ) |
215 | regs->ip) | 215 | DO_ERROR (X86_TRAP_OF, SIGSEGV, "overflow", overflow ) |
216 | DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) | 216 | DO_ERROR (X86_TRAP_BR, SIGSEGV, "bounds", bounds ) |
217 | DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds) | 217 | DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip ) |
218 | DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, | 218 | DO_ERROR (X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun ) |
219 | regs->ip) | 219 | DO_ERROR (X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS ) |
220 | DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", | 220 | DO_ERROR (X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present ) |
221 | coprocessor_segment_overrun) | ||
222 | DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) | ||
223 | DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) | ||
224 | #ifdef CONFIG_X86_32 | 221 | #ifdef CONFIG_X86_32 |
225 | DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) | 222 | DO_ERROR (X86_TRAP_SS, SIGBUS, "stack segment", stack_segment ) |
226 | #endif | 223 | #endif |
227 | DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, | 224 | DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0 ) |
228 | BUS_ADRALN, 0) | ||
229 | 225 | ||
230 | #ifdef CONFIG_X86_64 | 226 | #ifdef CONFIG_X86_64 |
231 | /* Runs on IST stack */ | 227 | /* Runs on IST stack */ |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 930e5d48f560..a3acbac2ee72 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/clocksource.h> | 11 | #include <linux/clocksource.h> |
12 | #include <linux/percpu.h> | 12 | #include <linux/percpu.h> |
13 | #include <linux/timex.h> | 13 | #include <linux/timex.h> |
14 | #include <linux/static_key.h> | ||
14 | 15 | ||
15 | #include <asm/hpet.h> | 16 | #include <asm/hpet.h> |
16 | #include <asm/timer.h> | 17 | #include <asm/timer.h> |
@@ -37,13 +38,244 @@ static int __read_mostly tsc_unstable; | |||
37 | erroneous rdtsc usage on !cpu_has_tsc processors */ | 38 | erroneous rdtsc usage on !cpu_has_tsc processors */ |
38 | static int __read_mostly tsc_disabled = -1; | 39 | static int __read_mostly tsc_disabled = -1; |
39 | 40 | ||
41 | static struct static_key __use_tsc = STATIC_KEY_INIT; | ||
42 | |||
40 | int tsc_clocksource_reliable; | 43 | int tsc_clocksource_reliable; |
44 | |||
45 | /* | ||
46 | * Use a ring-buffer like data structure, where a writer advances the head by | ||
47 | * writing a new data entry and a reader advances the tail when it observes a | ||
48 | * new entry. | ||
49 | * | ||
50 | * Writers are made to wait on readers until there's space to write a new | ||
51 | * entry. | ||
52 | * | ||
53 | * This means that we can always use an {offset, mul} pair to compute a ns | ||
54 | * value that is 'roughly' in the right direction, even if we're writing a new | ||
55 | * {offset, mul} pair during the clock read. | ||
56 | * | ||
57 | * The down-side is that we can no longer guarantee strict monotonicity anymore | ||
58 | * (assuming the TSC was that to begin with), because while we compute the | ||
59 | * intersection point of the two clock slopes and make sure the time is | ||
60 | * continuous at the point of switching; we can no longer guarantee a reader is | ||
61 | * strictly before or after the switch point. | ||
62 | * | ||
63 | * It does mean a reader no longer needs to disable IRQs in order to avoid | ||
64 | * CPU-Freq updates messing with his times, and similarly an NMI reader will | ||
65 | * no longer run the risk of hitting half-written state. | ||
66 | */ | ||
67 | |||
68 | struct cyc2ns { | ||
69 | struct cyc2ns_data data[2]; /* 0 + 2*24 = 48 */ | ||
70 | struct cyc2ns_data *head; /* 48 + 8 = 56 */ | ||
71 | struct cyc2ns_data *tail; /* 56 + 8 = 64 */ | ||
72 | }; /* exactly fits one cacheline */ | ||
73 | |||
74 | static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns); | ||
75 | |||
76 | struct cyc2ns_data *cyc2ns_read_begin(void) | ||
77 | { | ||
78 | struct cyc2ns_data *head; | ||
79 | |||
80 | preempt_disable(); | ||
81 | |||
82 | head = this_cpu_read(cyc2ns.head); | ||
83 | /* | ||
84 | * Ensure we observe the entry when we observe the pointer to it. | ||
85 | * matches the wmb from cyc2ns_write_end(). | ||
86 | */ | ||
87 | smp_read_barrier_depends(); | ||
88 | head->__count++; | ||
89 | barrier(); | ||
90 | |||
91 | return head; | ||
92 | } | ||
93 | |||
94 | void cyc2ns_read_end(struct cyc2ns_data *head) | ||
95 | { | ||
96 | barrier(); | ||
97 | /* | ||
98 | * If we're the outer most nested read; update the tail pointer | ||
99 | * when we're done. This notifies possible pending writers | ||
100 | * that we've observed the head pointer and that the other | ||
101 | * entry is now free. | ||
102 | */ | ||
103 | if (!--head->__count) { | ||
104 | /* | ||
105 | * x86-TSO does not reorder writes with older reads; | ||
106 | * therefore once this write becomes visible to another | ||
107 | * cpu, we must be finished reading the cyc2ns_data. | ||
108 | * | ||
109 | * matches with cyc2ns_write_begin(). | ||
110 | */ | ||
111 | this_cpu_write(cyc2ns.tail, head); | ||
112 | } | ||
113 | preempt_enable(); | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * Begin writing a new @data entry for @cpu. | ||
118 | * | ||
119 | * Assumes some sort of write side lock; currently 'provided' by the assumption | ||
120 | * that cpufreq will call its notifiers sequentially. | ||
121 | */ | ||
122 | static struct cyc2ns_data *cyc2ns_write_begin(int cpu) | ||
123 | { | ||
124 | struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); | ||
125 | struct cyc2ns_data *data = c2n->data; | ||
126 | |||
127 | if (data == c2n->head) | ||
128 | data++; | ||
129 | |||
130 | /* XXX send an IPI to @cpu in order to guarantee a read? */ | ||
131 | |||
132 | /* | ||
133 | * When we observe the tail write from cyc2ns_read_end(), | ||
134 | * the cpu must be done with that entry and its safe | ||
135 | * to start writing to it. | ||
136 | */ | ||
137 | while (c2n->tail == data) | ||
138 | cpu_relax(); | ||
139 | |||
140 | return data; | ||
141 | } | ||
142 | |||
143 | static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data) | ||
144 | { | ||
145 | struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); | ||
146 | |||
147 | /* | ||
148 | * Ensure the @data writes are visible before we publish the | ||
149 | * entry. Matches the data-depencency in cyc2ns_read_begin(). | ||
150 | */ | ||
151 | smp_wmb(); | ||
152 | |||
153 | ACCESS_ONCE(c2n->head) = data; | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * Accelerators for sched_clock() | ||
158 | * convert from cycles(64bits) => nanoseconds (64bits) | ||
159 | * basic equation: | ||
160 | * ns = cycles / (freq / ns_per_sec) | ||
161 | * ns = cycles * (ns_per_sec / freq) | ||
162 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | ||
163 | * ns = cycles * (10^6 / cpu_khz) | ||
164 | * | ||
165 | * Then we use scaling math (suggested by george@mvista.com) to get: | ||
166 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | ||
167 | * ns = cycles * cyc2ns_scale / SC | ||
168 | * | ||
169 | * And since SC is a constant power of two, we can convert the div | ||
170 | * into a shift. | ||
171 | * | ||
172 | * We can use khz divisor instead of mhz to keep a better precision, since | ||
173 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | ||
174 | * (mathieu.desnoyers@polymtl.ca) | ||
175 | * | ||
176 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | ||
177 | */ | ||
178 | |||
179 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | ||
180 | |||
181 | static void cyc2ns_data_init(struct cyc2ns_data *data) | ||
182 | { | ||
183 | data->cyc2ns_mul = 1U << CYC2NS_SCALE_FACTOR; | ||
184 | data->cyc2ns_shift = CYC2NS_SCALE_FACTOR; | ||
185 | data->cyc2ns_offset = 0; | ||
186 | data->__count = 0; | ||
187 | } | ||
188 | |||
189 | static void cyc2ns_init(int cpu) | ||
190 | { | ||
191 | struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); | ||
192 | |||
193 | cyc2ns_data_init(&c2n->data[0]); | ||
194 | cyc2ns_data_init(&c2n->data[1]); | ||
195 | |||
196 | c2n->head = c2n->data; | ||
197 | c2n->tail = c2n->data; | ||
198 | } | ||
199 | |||
200 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) | ||
201 | { | ||
202 | struct cyc2ns_data *data, *tail; | ||
203 | unsigned long long ns; | ||
204 | |||
205 | /* | ||
206 | * See cyc2ns_read_*() for details; replicated in order to avoid | ||
207 | * an extra few instructions that came with the abstraction. | ||
208 | * Notable, it allows us to only do the __count and tail update | ||
209 | * dance when its actually needed. | ||
210 | */ | ||
211 | |||
212 | preempt_disable(); | ||
213 | data = this_cpu_read(cyc2ns.head); | ||
214 | tail = this_cpu_read(cyc2ns.tail); | ||
215 | |||
216 | if (likely(data == tail)) { | ||
217 | ns = data->cyc2ns_offset; | ||
218 | ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR); | ||
219 | } else { | ||
220 | data->__count++; | ||
221 | |||
222 | barrier(); | ||
223 | |||
224 | ns = data->cyc2ns_offset; | ||
225 | ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR); | ||
226 | |||
227 | barrier(); | ||
228 | |||
229 | if (!--data->__count) | ||
230 | this_cpu_write(cyc2ns.tail, data); | ||
231 | } | ||
232 | preempt_enable(); | ||
233 | |||
234 | return ns; | ||
235 | } | ||
236 | |||
237 | /* XXX surely we already have this someplace in the kernel?! */ | ||
238 | #define DIV_ROUND(n, d) (((n) + ((d) / 2)) / (d)) | ||
239 | |||
240 | static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) | ||
241 | { | ||
242 | unsigned long long tsc_now, ns_now; | ||
243 | struct cyc2ns_data *data; | ||
244 | unsigned long flags; | ||
245 | |||
246 | local_irq_save(flags); | ||
247 | sched_clock_idle_sleep_event(); | ||
248 | |||
249 | if (!cpu_khz) | ||
250 | goto done; | ||
251 | |||
252 | data = cyc2ns_write_begin(cpu); | ||
253 | |||
254 | rdtscll(tsc_now); | ||
255 | ns_now = cycles_2_ns(tsc_now); | ||
256 | |||
257 | /* | ||
258 | * Compute a new multiplier as per the above comment and ensure our | ||
259 | * time function is continuous; see the comment near struct | ||
260 | * cyc2ns_data. | ||
261 | */ | ||
262 | data->cyc2ns_mul = DIV_ROUND(NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR, cpu_khz); | ||
263 | data->cyc2ns_shift = CYC2NS_SCALE_FACTOR; | ||
264 | data->cyc2ns_offset = ns_now - | ||
265 | mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR); | ||
266 | |||
267 | cyc2ns_write_end(cpu, data); | ||
268 | |||
269 | done: | ||
270 | sched_clock_idle_wakeup_event(0); | ||
271 | local_irq_restore(flags); | ||
272 | } | ||
41 | /* | 273 | /* |
42 | * Scheduler clock - returns current time in nanosec units. | 274 | * Scheduler clock - returns current time in nanosec units. |
43 | */ | 275 | */ |
44 | u64 native_sched_clock(void) | 276 | u64 native_sched_clock(void) |
45 | { | 277 | { |
46 | u64 this_offset; | 278 | u64 tsc_now; |
47 | 279 | ||
48 | /* | 280 | /* |
49 | * Fall back to jiffies if there's no TSC available: | 281 | * Fall back to jiffies if there's no TSC available: |
@@ -53,16 +285,16 @@ u64 native_sched_clock(void) | |||
53 | * very important for it to be as fast as the platform | 285 | * very important for it to be as fast as the platform |
54 | * can achieve it. ) | 286 | * can achieve it. ) |
55 | */ | 287 | */ |
56 | if (unlikely(tsc_disabled)) { | 288 | if (!static_key_false(&__use_tsc)) { |
57 | /* No locking but a rare wrong value is not a big deal: */ | 289 | /* No locking but a rare wrong value is not a big deal: */ |
58 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); | 290 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); |
59 | } | 291 | } |
60 | 292 | ||
61 | /* read the Time Stamp Counter: */ | 293 | /* read the Time Stamp Counter: */ |
62 | rdtscll(this_offset); | 294 | rdtscll(tsc_now); |
63 | 295 | ||
64 | /* return the value in ns */ | 296 | /* return the value in ns */ |
65 | return __cycles_2_ns(this_offset); | 297 | return cycles_2_ns(tsc_now); |
66 | } | 298 | } |
67 | 299 | ||
68 | /* We need to define a real function for sched_clock, to override the | 300 | /* We need to define a real function for sched_clock, to override the |
@@ -419,6 +651,16 @@ unsigned long native_calibrate_tsc(void) | |||
419 | unsigned long flags, latch, ms, fast_calibrate; | 651 | unsigned long flags, latch, ms, fast_calibrate; |
420 | int hpet = is_hpet_enabled(), i, loopmin; | 652 | int hpet = is_hpet_enabled(), i, loopmin; |
421 | 653 | ||
654 | /* Calibrate TSC using MSR for Intel Atom SoCs */ | ||
655 | local_irq_save(flags); | ||
656 | i = try_msr_calibrate_tsc(&fast_calibrate); | ||
657 | local_irq_restore(flags); | ||
658 | if (i >= 0) { | ||
659 | if (i == 0) | ||
660 | pr_warn("Fast TSC calibration using MSR failed\n"); | ||
661 | return fast_calibrate; | ||
662 | } | ||
663 | |||
422 | local_irq_save(flags); | 664 | local_irq_save(flags); |
423 | fast_calibrate = quick_pit_calibrate(); | 665 | fast_calibrate = quick_pit_calibrate(); |
424 | local_irq_restore(flags); | 666 | local_irq_restore(flags); |
@@ -589,61 +831,11 @@ int recalibrate_cpu_khz(void) | |||
589 | EXPORT_SYMBOL(recalibrate_cpu_khz); | 831 | EXPORT_SYMBOL(recalibrate_cpu_khz); |
590 | 832 | ||
591 | 833 | ||
592 | /* Accelerators for sched_clock() | ||
593 | * convert from cycles(64bits) => nanoseconds (64bits) | ||
594 | * basic equation: | ||
595 | * ns = cycles / (freq / ns_per_sec) | ||
596 | * ns = cycles * (ns_per_sec / freq) | ||
597 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | ||
598 | * ns = cycles * (10^6 / cpu_khz) | ||
599 | * | ||
600 | * Then we use scaling math (suggested by george@mvista.com) to get: | ||
601 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | ||
602 | * ns = cycles * cyc2ns_scale / SC | ||
603 | * | ||
604 | * And since SC is a constant power of two, we can convert the div | ||
605 | * into a shift. | ||
606 | * | ||
607 | * We can use khz divisor instead of mhz to keep a better precision, since | ||
608 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | ||
609 | * (mathieu.desnoyers@polymtl.ca) | ||
610 | * | ||
611 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | ||
612 | */ | ||
613 | |||
614 | DEFINE_PER_CPU(unsigned long, cyc2ns); | ||
615 | DEFINE_PER_CPU(unsigned long long, cyc2ns_offset); | ||
616 | |||
617 | static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) | ||
618 | { | ||
619 | unsigned long long tsc_now, ns_now, *offset; | ||
620 | unsigned long flags, *scale; | ||
621 | |||
622 | local_irq_save(flags); | ||
623 | sched_clock_idle_sleep_event(); | ||
624 | |||
625 | scale = &per_cpu(cyc2ns, cpu); | ||
626 | offset = &per_cpu(cyc2ns_offset, cpu); | ||
627 | |||
628 | rdtscll(tsc_now); | ||
629 | ns_now = __cycles_2_ns(tsc_now); | ||
630 | |||
631 | if (cpu_khz) { | ||
632 | *scale = ((NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR) + | ||
633 | cpu_khz / 2) / cpu_khz; | ||
634 | *offset = ns_now - mult_frac(tsc_now, *scale, | ||
635 | (1UL << CYC2NS_SCALE_FACTOR)); | ||
636 | } | ||
637 | |||
638 | sched_clock_idle_wakeup_event(0); | ||
639 | local_irq_restore(flags); | ||
640 | } | ||
641 | |||
642 | static unsigned long long cyc2ns_suspend; | 834 | static unsigned long long cyc2ns_suspend; |
643 | 835 | ||
644 | void tsc_save_sched_clock_state(void) | 836 | void tsc_save_sched_clock_state(void) |
645 | { | 837 | { |
646 | if (!sched_clock_stable) | 838 | if (!sched_clock_stable()) |
647 | return; | 839 | return; |
648 | 840 | ||
649 | cyc2ns_suspend = sched_clock(); | 841 | cyc2ns_suspend = sched_clock(); |
@@ -663,16 +855,26 @@ void tsc_restore_sched_clock_state(void) | |||
663 | unsigned long flags; | 855 | unsigned long flags; |
664 | int cpu; | 856 | int cpu; |
665 | 857 | ||
666 | if (!sched_clock_stable) | 858 | if (!sched_clock_stable()) |
667 | return; | 859 | return; |
668 | 860 | ||
669 | local_irq_save(flags); | 861 | local_irq_save(flags); |
670 | 862 | ||
671 | __this_cpu_write(cyc2ns_offset, 0); | 863 | /* |
864 | * We're comming out of suspend, there's no concurrency yet; don't | ||
865 | * bother being nice about the RCU stuff, just write to both | ||
866 | * data fields. | ||
867 | */ | ||
868 | |||
869 | this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0); | ||
870 | this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0); | ||
871 | |||
672 | offset = cyc2ns_suspend - sched_clock(); | 872 | offset = cyc2ns_suspend - sched_clock(); |
673 | 873 | ||
674 | for_each_possible_cpu(cpu) | 874 | for_each_possible_cpu(cpu) { |
675 | per_cpu(cyc2ns_offset, cpu) = offset; | 875 | per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset; |
876 | per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset; | ||
877 | } | ||
676 | 878 | ||
677 | local_irq_restore(flags); | 879 | local_irq_restore(flags); |
678 | } | 880 | } |
@@ -795,7 +997,7 @@ void mark_tsc_unstable(char *reason) | |||
795 | { | 997 | { |
796 | if (!tsc_unstable) { | 998 | if (!tsc_unstable) { |
797 | tsc_unstable = 1; | 999 | tsc_unstable = 1; |
798 | sched_clock_stable = 0; | 1000 | clear_sched_clock_stable(); |
799 | disable_sched_clock_irqtime(); | 1001 | disable_sched_clock_irqtime(); |
800 | pr_info("Marking TSC unstable due to %s\n", reason); | 1002 | pr_info("Marking TSC unstable due to %s\n", reason); |
801 | /* Change only the rating, when not registered */ | 1003 | /* Change only the rating, when not registered */ |
@@ -995,14 +1197,18 @@ void __init tsc_init(void) | |||
995 | * speed as the bootup CPU. (cpufreq notifiers will fix this | 1197 | * speed as the bootup CPU. (cpufreq notifiers will fix this |
996 | * up if their speed diverges) | 1198 | * up if their speed diverges) |
997 | */ | 1199 | */ |
998 | for_each_possible_cpu(cpu) | 1200 | for_each_possible_cpu(cpu) { |
1201 | cyc2ns_init(cpu); | ||
999 | set_cyc2ns_scale(cpu_khz, cpu); | 1202 | set_cyc2ns_scale(cpu_khz, cpu); |
1203 | } | ||
1000 | 1204 | ||
1001 | if (tsc_disabled > 0) | 1205 | if (tsc_disabled > 0) |
1002 | return; | 1206 | return; |
1003 | 1207 | ||
1004 | /* now allow native_sched_clock() to use rdtsc */ | 1208 | /* now allow native_sched_clock() to use rdtsc */ |
1209 | |||
1005 | tsc_disabled = 0; | 1210 | tsc_disabled = 0; |
1211 | static_key_slow_inc(&__use_tsc); | ||
1006 | 1212 | ||
1007 | if (!no_sched_irq_time) | 1213 | if (!no_sched_irq_time) |
1008 | enable_sched_clock_irqtime(); | 1214 | enable_sched_clock_irqtime(); |
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c new file mode 100644 index 000000000000..8b5434f4389f --- /dev/null +++ b/arch/x86/kernel/tsc_msr.c | |||
@@ -0,0 +1,127 @@ | |||
1 | /* | ||
2 | * tsc_msr.c - MSR based TSC calibration on Intel Atom SoC platforms. | ||
3 | * | ||
4 | * TSC in Intel Atom SoC runs at a constant rate which can be figured | ||
5 | * by this formula: | ||
6 | * <maximum core-clock to bus-clock ratio> * <maximum resolved frequency> | ||
7 | * See Intel 64 and IA-32 System Programming Guid section 16.12 and 30.11.5 | ||
8 | * for details. | ||
9 | * Especially some Intel Atom SoCs don't have PIT(i8254) or HPET, so MSR | ||
10 | * based calibration is the only option. | ||
11 | * | ||
12 | * | ||
13 | * Copyright (C) 2013 Intel Corporation | ||
14 | * Author: Bin Gao <bin.gao@intel.com> | ||
15 | * | ||
16 | * This file is released under the GPLv2. | ||
17 | */ | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <asm/processor.h> | ||
21 | #include <asm/setup.h> | ||
22 | #include <asm/apic.h> | ||
23 | #include <asm/param.h> | ||
24 | |||
25 | /* CPU reference clock frequency: in KHz */ | ||
26 | #define FREQ_83 83200 | ||
27 | #define FREQ_100 99840 | ||
28 | #define FREQ_133 133200 | ||
29 | #define FREQ_166 166400 | ||
30 | |||
31 | #define MAX_NUM_FREQS 8 | ||
32 | |||
33 | /* | ||
34 | * According to Intel 64 and IA-32 System Programming Guide, | ||
35 | * if MSR_PERF_STAT[31] is set, the maximum resolved bus ratio can be | ||
36 | * read in MSR_PLATFORM_ID[12:8], otherwise in MSR_PERF_STAT[44:40]. | ||
37 | * Unfortunately some Intel Atom SoCs aren't quite compliant to this, | ||
38 | * so we need manually differentiate SoC families. This is what the | ||
39 | * field msr_plat does. | ||
40 | */ | ||
41 | struct freq_desc { | ||
42 | u8 x86_family; /* CPU family */ | ||
43 | u8 x86_model; /* model */ | ||
44 | u8 msr_plat; /* 1: use MSR_PLATFORM_INFO, 0: MSR_IA32_PERF_STATUS */ | ||
45 | u32 freqs[MAX_NUM_FREQS]; | ||
46 | }; | ||
47 | |||
48 | static struct freq_desc freq_desc_tables[] = { | ||
49 | /* PNW */ | ||
50 | { 6, 0x27, 0, { 0, 0, 0, 0, 0, FREQ_100, 0, FREQ_83 } }, | ||
51 | /* CLV+ */ | ||
52 | { 6, 0x35, 0, { 0, FREQ_133, 0, 0, 0, FREQ_100, 0, FREQ_83 } }, | ||
53 | /* TNG */ | ||
54 | { 6, 0x4a, 1, { 0, FREQ_100, FREQ_133, 0, 0, 0, 0, 0 } }, | ||
55 | /* VLV2 */ | ||
56 | { 6, 0x37, 1, { 0, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } }, | ||
57 | /* ANN */ | ||
58 | { 6, 0x5a, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_100, 0, 0, 0, 0 } }, | ||
59 | }; | ||
60 | |||
61 | static int match_cpu(u8 family, u8 model) | ||
62 | { | ||
63 | int i; | ||
64 | |||
65 | for (i = 0; i < ARRAY_SIZE(freq_desc_tables); i++) { | ||
66 | if ((family == freq_desc_tables[i].x86_family) && | ||
67 | (model == freq_desc_tables[i].x86_model)) | ||
68 | return i; | ||
69 | } | ||
70 | |||
71 | return -1; | ||
72 | } | ||
73 | |||
74 | /* Map CPU reference clock freq ID(0-7) to CPU reference clock freq(KHz) */ | ||
75 | #define id_to_freq(cpu_index, freq_id) \ | ||
76 | (freq_desc_tables[cpu_index].freqs[freq_id]) | ||
77 | |||
78 | /* | ||
79 | * Do MSR calibration only for known/supported CPUs. | ||
80 | * Return values: | ||
81 | * -1: CPU is unknown/unsupported for MSR based calibration | ||
82 | * 0: CPU is known/supported, but calibration failed | ||
83 | * 1: CPU is known/supported, and calibration succeeded | ||
84 | */ | ||
85 | int try_msr_calibrate_tsc(unsigned long *fast_calibrate) | ||
86 | { | ||
87 | int cpu_index; | ||
88 | u32 lo, hi, ratio, freq_id, freq; | ||
89 | |||
90 | cpu_index = match_cpu(boot_cpu_data.x86, boot_cpu_data.x86_model); | ||
91 | if (cpu_index < 0) | ||
92 | return -1; | ||
93 | |||
94 | *fast_calibrate = 0; | ||
95 | |||
96 | if (freq_desc_tables[cpu_index].msr_plat) { | ||
97 | rdmsr(MSR_PLATFORM_INFO, lo, hi); | ||
98 | ratio = (lo >> 8) & 0x1f; | ||
99 | } else { | ||
100 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | ||
101 | ratio = (hi >> 8) & 0x1f; | ||
102 | } | ||
103 | pr_info("Maximum core-clock to bus-clock ratio: 0x%x\n", ratio); | ||
104 | |||
105 | if (!ratio) | ||
106 | return 0; | ||
107 | |||
108 | /* Get FSB FREQ ID */ | ||
109 | rdmsr(MSR_FSB_FREQ, lo, hi); | ||
110 | freq_id = lo & 0x7; | ||
111 | freq = id_to_freq(cpu_index, freq_id); | ||
112 | pr_info("Resolved frequency ID: %u, frequency: %u KHz\n", | ||
113 | freq_id, freq); | ||
114 | if (!freq) | ||
115 | return 0; | ||
116 | |||
117 | /* TSC frequency = maximum resolved freq * maximum resolved bus ratio */ | ||
118 | *fast_calibrate = freq * ratio; | ||
119 | pr_info("TSC runs at %lu KHz\n", *fast_calibrate); | ||
120 | |||
121 | #ifdef CONFIG_X86_LOCAL_APIC | ||
122 | lapic_timer_frequency = (freq * 1000) / HZ; | ||
123 | pr_info("lapic_timer_frequency = %d\n", lapic_timer_frequency); | ||
124 | #endif | ||
125 | |||
126 | return 1; | ||
127 | } | ||
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index adfdf56a3714..26488487bc61 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c | |||
@@ -16,7 +16,6 @@ | |||
16 | */ | 16 | */ |
17 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/init.h> | ||
20 | #include <linux/smp.h> | 19 | #include <linux/smp.h> |
21 | #include <linux/nmi.h> | 20 | #include <linux/nmi.h> |
22 | #include <asm/tsc.h> | 21 | #include <asm/tsc.h> |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 422fd8223470..a4b451c6addf 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -562,6 +562,16 @@ static void __init xstate_enable_boot_cpu(void) | |||
562 | if (cpu_has_xsaveopt && eagerfpu != DISABLE) | 562 | if (cpu_has_xsaveopt && eagerfpu != DISABLE) |
563 | eagerfpu = ENABLE; | 563 | eagerfpu = ENABLE; |
564 | 564 | ||
565 | if (pcntxt_mask & XSTATE_EAGER) { | ||
566 | if (eagerfpu == DISABLE) { | ||
567 | pr_err("eagerfpu not present, disabling some xstate features: 0x%llx\n", | ||
568 | pcntxt_mask & XSTATE_EAGER); | ||
569 | pcntxt_mask &= ~XSTATE_EAGER; | ||
570 | } else { | ||
571 | eagerfpu = ENABLE; | ||
572 | } | ||
573 | } | ||
574 | |||
565 | pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x\n", | 575 | pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x\n", |
566 | pcntxt_mask, xstate_size); | 576 | pcntxt_mask, xstate_size); |
567 | } | 577 | } |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 1673940cf9c3..775702f649ca 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -1355,7 +1355,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) | |||
1355 | vcpu->arch.apic_base = value; | 1355 | vcpu->arch.apic_base = value; |
1356 | 1356 | ||
1357 | /* update jump label if enable bit changes */ | 1357 | /* update jump label if enable bit changes */ |
1358 | if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) { | 1358 | if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) { |
1359 | if (value & MSR_IA32_APICBASE_ENABLE) | 1359 | if (value & MSR_IA32_APICBASE_ENABLE) |
1360 | static_key_slow_dec_deferred(&apic_hw_disabled); | 1360 | static_key_slow_dec_deferred(&apic_hw_disabled); |
1361 | else | 1361 | else |
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index a30ca15be21c..dee945d55594 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S | |||
@@ -186,7 +186,7 @@ ENTRY(copy_user_generic_unrolled) | |||
186 | 30: shll $6,%ecx | 186 | 30: shll $6,%ecx |
187 | addl %ecx,%edx | 187 | addl %ecx,%edx |
188 | jmp 60f | 188 | jmp 60f |
189 | 40: lea (%rdx,%rcx,8),%rdx | 189 | 40: leal (%rdx,%rcx,8),%edx |
190 | jmp 60f | 190 | jmp 60f |
191 | 50: movl %ecx,%edx | 191 | 50: movl %ecx,%edx |
192 | 60: jmp copy_user_handle_tail /* ecx is zerorest also */ | 192 | 60: jmp copy_user_handle_tail /* ecx is zerorest also */ |
@@ -236,8 +236,6 @@ ENDPROC(copy_user_generic_unrolled) | |||
236 | ENTRY(copy_user_generic_string) | 236 | ENTRY(copy_user_generic_string) |
237 | CFI_STARTPROC | 237 | CFI_STARTPROC |
238 | ASM_STAC | 238 | ASM_STAC |
239 | andl %edx,%edx | ||
240 | jz 4f | ||
241 | cmpl $8,%edx | 239 | cmpl $8,%edx |
242 | jb 2f /* less than 8 bytes, go to byte copy loop */ | 240 | jb 2f /* less than 8 bytes, go to byte copy loop */ |
243 | ALIGN_DESTINATION | 241 | ALIGN_DESTINATION |
@@ -249,12 +247,12 @@ ENTRY(copy_user_generic_string) | |||
249 | 2: movl %edx,%ecx | 247 | 2: movl %edx,%ecx |
250 | 3: rep | 248 | 3: rep |
251 | movsb | 249 | movsb |
252 | 4: xorl %eax,%eax | 250 | xorl %eax,%eax |
253 | ASM_CLAC | 251 | ASM_CLAC |
254 | ret | 252 | ret |
255 | 253 | ||
256 | .section .fixup,"ax" | 254 | .section .fixup,"ax" |
257 | 11: lea (%rdx,%rcx,8),%rcx | 255 | 11: leal (%rdx,%rcx,8),%ecx |
258 | 12: movl %ecx,%edx /* ecx is zerorest also */ | 256 | 12: movl %ecx,%edx /* ecx is zerorest also */ |
259 | jmp copy_user_handle_tail | 257 | jmp copy_user_handle_tail |
260 | .previous | 258 | .previous |
@@ -279,12 +277,10 @@ ENDPROC(copy_user_generic_string) | |||
279 | ENTRY(copy_user_enhanced_fast_string) | 277 | ENTRY(copy_user_enhanced_fast_string) |
280 | CFI_STARTPROC | 278 | CFI_STARTPROC |
281 | ASM_STAC | 279 | ASM_STAC |
282 | andl %edx,%edx | ||
283 | jz 2f | ||
284 | movl %edx,%ecx | 280 | movl %edx,%ecx |
285 | 1: rep | 281 | 1: rep |
286 | movsb | 282 | movsb |
287 | 2: xorl %eax,%eax | 283 | xorl %eax,%eax |
288 | ASM_CLAC | 284 | ASM_CLAC |
289 | ret | 285 | ret |
290 | 286 | ||
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index 7c3bee636e2f..39d6a3db0b96 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/timex.h> | 16 | #include <linux/timex.h> |
17 | #include <linux/preempt.h> | 17 | #include <linux/preempt.h> |
18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
19 | #include <linux/init.h> | ||
20 | 19 | ||
21 | #include <asm/processor.h> | 20 | #include <asm/processor.h> |
22 | #include <asm/delay.h> | 21 | #include <asm/delay.h> |
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index 533a85e3a07e..1a2be7c6895d 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt | |||
@@ -346,8 +346,8 @@ AVXcode: 1 | |||
346 | 17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1) | 346 | 17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1) |
347 | 18: Grp16 (1A) | 347 | 18: Grp16 (1A) |
348 | 19: | 348 | 19: |
349 | 1a: | 349 | 1a: BNDCL Ev,Gv | BNDCU Ev,Gv | BNDMOV Gv,Ev | BNDLDX Gv,Ev,Gv |
350 | 1b: | 350 | 1b: BNDCN Ev,Gv | BNDMOV Ev,Gv | BNDMK Gv,Ev | BNDSTX Ev,GV,Gv |
351 | 1c: | 351 | 1c: |
352 | 1d: | 352 | 1d: |
353 | 1e: | 353 | 1e: |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9ff85bb8dd69..9d591c895803 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -641,6 +641,20 @@ no_context(struct pt_regs *regs, unsigned long error_code, | |||
641 | 641 | ||
642 | /* Are we prepared to handle this kernel fault? */ | 642 | /* Are we prepared to handle this kernel fault? */ |
643 | if (fixup_exception(regs)) { | 643 | if (fixup_exception(regs)) { |
644 | /* | ||
645 | * Any interrupt that takes a fault gets the fixup. This makes | ||
646 | * the below recursive fault logic only apply to a faults from | ||
647 | * task context. | ||
648 | */ | ||
649 | if (in_interrupt()) | ||
650 | return; | ||
651 | |||
652 | /* | ||
653 | * Per the above we're !in_interrupt(), aka. task context. | ||
654 | * | ||
655 | * In this case we need to make sure we're not recursively | ||
656 | * faulting through the emulate_vsyscall() logic. | ||
657 | */ | ||
644 | if (current_thread_info()->sig_on_uaccess_error && signal) { | 658 | if (current_thread_info()->sig_on_uaccess_error && signal) { |
645 | tsk->thread.trap_nr = X86_TRAP_PF; | 659 | tsk->thread.trap_nr = X86_TRAP_PF; |
646 | tsk->thread.error_code = error_code | PF_USER; | 660 | tsk->thread.error_code = error_code | PF_USER; |
@@ -649,6 +663,10 @@ no_context(struct pt_regs *regs, unsigned long error_code, | |||
649 | /* XXX: hwpoison faults will set the wrong code. */ | 663 | /* XXX: hwpoison faults will set the wrong code. */ |
650 | force_sig_info_fault(signal, si_code, address, tsk, 0); | 664 | force_sig_info_fault(signal, si_code, address, tsk, 0); |
651 | } | 665 | } |
666 | |||
667 | /* | ||
668 | * Barring that, we can do the fixup and be happy. | ||
669 | */ | ||
652 | return; | 670 | return; |
653 | } | 671 | } |
654 | 672 | ||
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 9d980d88b747..8c9f647ff9e1 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c | |||
@@ -87,9 +87,7 @@ int pmd_huge_support(void) | |||
87 | } | 87 | } |
88 | #endif | 88 | #endif |
89 | 89 | ||
90 | /* x86_64 also uses this file */ | 90 | #ifdef CONFIG_HUGETLB_PAGE |
91 | |||
92 | #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA | ||
93 | static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, | 91 | static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, |
94 | unsigned long addr, unsigned long len, | 92 | unsigned long addr, unsigned long len, |
95 | unsigned long pgoff, unsigned long flags) | 93 | unsigned long pgoff, unsigned long flags) |
@@ -99,7 +97,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, | |||
99 | 97 | ||
100 | info.flags = 0; | 98 | info.flags = 0; |
101 | info.length = len; | 99 | info.length = len; |
102 | info.low_limit = TASK_UNMAPPED_BASE; | 100 | info.low_limit = current->mm->mmap_legacy_base; |
103 | info.high_limit = TASK_SIZE; | 101 | info.high_limit = TASK_SIZE; |
104 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); | 102 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
105 | info.align_offset = 0; | 103 | info.align_offset = 0; |
@@ -172,8 +170,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
172 | return hugetlb_get_unmapped_area_topdown(file, addr, len, | 170 | return hugetlb_get_unmapped_area_topdown(file, addr, len, |
173 | pgoff, flags); | 171 | pgoff, flags); |
174 | } | 172 | } |
175 | 173 | #endif /* CONFIG_HUGETLB_PAGE */ | |
176 | #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/ | ||
177 | 174 | ||
178 | #ifdef CONFIG_X86_64 | 175 | #ifdef CONFIG_X86_64 |
179 | static __init int setup_hugepagesz(char *opt) | 176 | static __init int setup_hugepagesz(char *opt) |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 4287f1ffba7e..5bdc5430597c 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -806,6 +806,9 @@ void __init mem_init(void) | |||
806 | BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); | 806 | BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); |
807 | #undef high_memory | 807 | #undef high_memory |
808 | #undef __FIXADDR_TOP | 808 | #undef __FIXADDR_TOP |
809 | #ifdef CONFIG_RANDOMIZE_BASE | ||
810 | BUILD_BUG_ON(CONFIG_RANDOMIZE_BASE_MAX_OFFSET > KERNEL_IMAGE_SIZE); | ||
811 | #endif | ||
809 | 812 | ||
810 | #ifdef CONFIG_HIGHMEM | 813 | #ifdef CONFIG_HIGHMEM |
811 | BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); | 814 | BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); |
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index e5d5e2ce9f77..637ab34ed632 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/rculist.h> | 11 | #include <linux/rculist.h> |
12 | #include <linux/spinlock.h> | 12 | #include <linux/spinlock.h> |
13 | #include <linux/hash.h> | 13 | #include <linux/hash.h> |
14 | #include <linux/init.h> | ||
15 | #include <linux/module.h> | 14 | #include <linux/module.h> |
16 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
17 | #include <linux/uaccess.h> | 16 | #include <linux/uaccess.h> |
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 24aec58d6afd..c85da7bb6b60 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c | |||
@@ -211,9 +211,13 @@ static void __init setup_node_data(int nid, u64 start, u64 end) | |||
211 | */ | 211 | */ |
212 | nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid); | 212 | nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid); |
213 | if (!nd_pa) { | 213 | if (!nd_pa) { |
214 | pr_err("Cannot find %zu bytes in node %d\n", | 214 | nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES, |
215 | nd_size, nid); | 215 | MEMBLOCK_ALLOC_ACCESSIBLE); |
216 | return; | 216 | if (!nd_pa) { |
217 | pr_err("Cannot find %zu bytes in node %d\n", | ||
218 | nd_size, nid); | ||
219 | return; | ||
220 | } | ||
217 | } | 221 | } |
218 | nd = __va(nd_pa); | 222 | nd = __va(nd_pa); |
219 | 223 | ||
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c index d0b1773d9d2e..461bc8289024 100644 --- a/arch/x86/mm/pageattr-test.c +++ b/arch/x86/mm/pageattr-test.c | |||
@@ -8,7 +8,6 @@ | |||
8 | #include <linux/kthread.h> | 8 | #include <linux/kthread.h> |
9 | #include <linux/random.h> | 9 | #include <linux/random.h> |
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/init.h> | ||
12 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
13 | 12 | ||
14 | #include <asm/cacheflush.h> | 13 | #include <asm/cacheflush.h> |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index bb32480c2d71..b3b19f46c016 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -30,6 +30,7 @@ | |||
30 | */ | 30 | */ |
31 | struct cpa_data { | 31 | struct cpa_data { |
32 | unsigned long *vaddr; | 32 | unsigned long *vaddr; |
33 | pgd_t *pgd; | ||
33 | pgprot_t mask_set; | 34 | pgprot_t mask_set; |
34 | pgprot_t mask_clr; | 35 | pgprot_t mask_clr; |
35 | int numpages; | 36 | int numpages; |
@@ -322,17 +323,9 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, | |||
322 | return prot; | 323 | return prot; |
323 | } | 324 | } |
324 | 325 | ||
325 | /* | 326 | static pte_t *__lookup_address_in_pgd(pgd_t *pgd, unsigned long address, |
326 | * Lookup the page table entry for a virtual address. Return a pointer | 327 | unsigned int *level) |
327 | * to the entry and the level of the mapping. | ||
328 | * | ||
329 | * Note: We return pud and pmd either when the entry is marked large | ||
330 | * or when the present bit is not set. Otherwise we would return a | ||
331 | * pointer to a nonexisting mapping. | ||
332 | */ | ||
333 | pte_t *lookup_address(unsigned long address, unsigned int *level) | ||
334 | { | 328 | { |
335 | pgd_t *pgd = pgd_offset_k(address); | ||
336 | pud_t *pud; | 329 | pud_t *pud; |
337 | pmd_t *pmd; | 330 | pmd_t *pmd; |
338 | 331 | ||
@@ -361,8 +354,31 @@ pte_t *lookup_address(unsigned long address, unsigned int *level) | |||
361 | 354 | ||
362 | return pte_offset_kernel(pmd, address); | 355 | return pte_offset_kernel(pmd, address); |
363 | } | 356 | } |
357 | |||
358 | /* | ||
359 | * Lookup the page table entry for a virtual address. Return a pointer | ||
360 | * to the entry and the level of the mapping. | ||
361 | * | ||
362 | * Note: We return pud and pmd either when the entry is marked large | ||
363 | * or when the present bit is not set. Otherwise we would return a | ||
364 | * pointer to a nonexisting mapping. | ||
365 | */ | ||
366 | pte_t *lookup_address(unsigned long address, unsigned int *level) | ||
367 | { | ||
368 | return __lookup_address_in_pgd(pgd_offset_k(address), address, level); | ||
369 | } | ||
364 | EXPORT_SYMBOL_GPL(lookup_address); | 370 | EXPORT_SYMBOL_GPL(lookup_address); |
365 | 371 | ||
372 | static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address, | ||
373 | unsigned int *level) | ||
374 | { | ||
375 | if (cpa->pgd) | ||
376 | return __lookup_address_in_pgd(cpa->pgd + pgd_index(address), | ||
377 | address, level); | ||
378 | |||
379 | return lookup_address(address, level); | ||
380 | } | ||
381 | |||
366 | /* | 382 | /* |
367 | * This is necessary because __pa() does not work on some | 383 | * This is necessary because __pa() does not work on some |
368 | * kinds of memory, like vmalloc() or the alloc_remap() | 384 | * kinds of memory, like vmalloc() or the alloc_remap() |
@@ -437,7 +453,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
437 | * Check for races, another CPU might have split this page | 453 | * Check for races, another CPU might have split this page |
438 | * up already: | 454 | * up already: |
439 | */ | 455 | */ |
440 | tmp = lookup_address(address, &level); | 456 | tmp = _lookup_address_cpa(cpa, address, &level); |
441 | if (tmp != kpte) | 457 | if (tmp != kpte) |
442 | goto out_unlock; | 458 | goto out_unlock; |
443 | 459 | ||
@@ -543,7 +559,8 @@ out_unlock: | |||
543 | } | 559 | } |
544 | 560 | ||
545 | static int | 561 | static int |
546 | __split_large_page(pte_t *kpte, unsigned long address, struct page *base) | 562 | __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, |
563 | struct page *base) | ||
547 | { | 564 | { |
548 | pte_t *pbase = (pte_t *)page_address(base); | 565 | pte_t *pbase = (pte_t *)page_address(base); |
549 | unsigned long pfn, pfninc = 1; | 566 | unsigned long pfn, pfninc = 1; |
@@ -556,7 +573,7 @@ __split_large_page(pte_t *kpte, unsigned long address, struct page *base) | |||
556 | * Check for races, another CPU might have split this page | 573 | * Check for races, another CPU might have split this page |
557 | * up for us already: | 574 | * up for us already: |
558 | */ | 575 | */ |
559 | tmp = lookup_address(address, &level); | 576 | tmp = _lookup_address_cpa(cpa, address, &level); |
560 | if (tmp != kpte) { | 577 | if (tmp != kpte) { |
561 | spin_unlock(&pgd_lock); | 578 | spin_unlock(&pgd_lock); |
562 | return 1; | 579 | return 1; |
@@ -632,7 +649,8 @@ __split_large_page(pte_t *kpte, unsigned long address, struct page *base) | |||
632 | return 0; | 649 | return 0; |
633 | } | 650 | } |
634 | 651 | ||
635 | static int split_large_page(pte_t *kpte, unsigned long address) | 652 | static int split_large_page(struct cpa_data *cpa, pte_t *kpte, |
653 | unsigned long address) | ||
636 | { | 654 | { |
637 | struct page *base; | 655 | struct page *base; |
638 | 656 | ||
@@ -644,15 +662,390 @@ static int split_large_page(pte_t *kpte, unsigned long address) | |||
644 | if (!base) | 662 | if (!base) |
645 | return -ENOMEM; | 663 | return -ENOMEM; |
646 | 664 | ||
647 | if (__split_large_page(kpte, address, base)) | 665 | if (__split_large_page(cpa, kpte, address, base)) |
648 | __free_page(base); | 666 | __free_page(base); |
649 | 667 | ||
650 | return 0; | 668 | return 0; |
651 | } | 669 | } |
652 | 670 | ||
671 | static bool try_to_free_pte_page(pte_t *pte) | ||
672 | { | ||
673 | int i; | ||
674 | |||
675 | for (i = 0; i < PTRS_PER_PTE; i++) | ||
676 | if (!pte_none(pte[i])) | ||
677 | return false; | ||
678 | |||
679 | free_page((unsigned long)pte); | ||
680 | return true; | ||
681 | } | ||
682 | |||
683 | static bool try_to_free_pmd_page(pmd_t *pmd) | ||
684 | { | ||
685 | int i; | ||
686 | |||
687 | for (i = 0; i < PTRS_PER_PMD; i++) | ||
688 | if (!pmd_none(pmd[i])) | ||
689 | return false; | ||
690 | |||
691 | free_page((unsigned long)pmd); | ||
692 | return true; | ||
693 | } | ||
694 | |||
695 | static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end) | ||
696 | { | ||
697 | pte_t *pte = pte_offset_kernel(pmd, start); | ||
698 | |||
699 | while (start < end) { | ||
700 | set_pte(pte, __pte(0)); | ||
701 | |||
702 | start += PAGE_SIZE; | ||
703 | pte++; | ||
704 | } | ||
705 | |||
706 | if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) { | ||
707 | pmd_clear(pmd); | ||
708 | return true; | ||
709 | } | ||
710 | return false; | ||
711 | } | ||
712 | |||
713 | static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd, | ||
714 | unsigned long start, unsigned long end) | ||
715 | { | ||
716 | if (unmap_pte_range(pmd, start, end)) | ||
717 | if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) | ||
718 | pud_clear(pud); | ||
719 | } | ||
720 | |||
721 | static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end) | ||
722 | { | ||
723 | pmd_t *pmd = pmd_offset(pud, start); | ||
724 | |||
725 | /* | ||
726 | * Not on a 2MB page boundary? | ||
727 | */ | ||
728 | if (start & (PMD_SIZE - 1)) { | ||
729 | unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; | ||
730 | unsigned long pre_end = min_t(unsigned long, end, next_page); | ||
731 | |||
732 | __unmap_pmd_range(pud, pmd, start, pre_end); | ||
733 | |||
734 | start = pre_end; | ||
735 | pmd++; | ||
736 | } | ||
737 | |||
738 | /* | ||
739 | * Try to unmap in 2M chunks. | ||
740 | */ | ||
741 | while (end - start >= PMD_SIZE) { | ||
742 | if (pmd_large(*pmd)) | ||
743 | pmd_clear(pmd); | ||
744 | else | ||
745 | __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE); | ||
746 | |||
747 | start += PMD_SIZE; | ||
748 | pmd++; | ||
749 | } | ||
750 | |||
751 | /* | ||
752 | * 4K leftovers? | ||
753 | */ | ||
754 | if (start < end) | ||
755 | return __unmap_pmd_range(pud, pmd, start, end); | ||
756 | |||
757 | /* | ||
758 | * Try again to free the PMD page if haven't succeeded above. | ||
759 | */ | ||
760 | if (!pud_none(*pud)) | ||
761 | if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) | ||
762 | pud_clear(pud); | ||
763 | } | ||
764 | |||
765 | static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end) | ||
766 | { | ||
767 | pud_t *pud = pud_offset(pgd, start); | ||
768 | |||
769 | /* | ||
770 | * Not on a GB page boundary? | ||
771 | */ | ||
772 | if (start & (PUD_SIZE - 1)) { | ||
773 | unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; | ||
774 | unsigned long pre_end = min_t(unsigned long, end, next_page); | ||
775 | |||
776 | unmap_pmd_range(pud, start, pre_end); | ||
777 | |||
778 | start = pre_end; | ||
779 | pud++; | ||
780 | } | ||
781 | |||
782 | /* | ||
783 | * Try to unmap in 1G chunks? | ||
784 | */ | ||
785 | while (end - start >= PUD_SIZE) { | ||
786 | |||
787 | if (pud_large(*pud)) | ||
788 | pud_clear(pud); | ||
789 | else | ||
790 | unmap_pmd_range(pud, start, start + PUD_SIZE); | ||
791 | |||
792 | start += PUD_SIZE; | ||
793 | pud++; | ||
794 | } | ||
795 | |||
796 | /* | ||
797 | * 2M leftovers? | ||
798 | */ | ||
799 | if (start < end) | ||
800 | unmap_pmd_range(pud, start, end); | ||
801 | |||
802 | /* | ||
803 | * No need to try to free the PUD page because we'll free it in | ||
804 | * populate_pgd's error path | ||
805 | */ | ||
806 | } | ||
807 | |||
808 | static int alloc_pte_page(pmd_t *pmd) | ||
809 | { | ||
810 | pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); | ||
811 | if (!pte) | ||
812 | return -1; | ||
813 | |||
814 | set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); | ||
815 | return 0; | ||
816 | } | ||
817 | |||
818 | static int alloc_pmd_page(pud_t *pud) | ||
819 | { | ||
820 | pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); | ||
821 | if (!pmd) | ||
822 | return -1; | ||
823 | |||
824 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); | ||
825 | return 0; | ||
826 | } | ||
827 | |||
828 | static void populate_pte(struct cpa_data *cpa, | ||
829 | unsigned long start, unsigned long end, | ||
830 | unsigned num_pages, pmd_t *pmd, pgprot_t pgprot) | ||
831 | { | ||
832 | pte_t *pte; | ||
833 | |||
834 | pte = pte_offset_kernel(pmd, start); | ||
835 | |||
836 | while (num_pages-- && start < end) { | ||
837 | |||
838 | /* deal with the NX bit */ | ||
839 | if (!(pgprot_val(pgprot) & _PAGE_NX)) | ||
840 | cpa->pfn &= ~_PAGE_NX; | ||
841 | |||
842 | set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot)); | ||
843 | |||
844 | start += PAGE_SIZE; | ||
845 | cpa->pfn += PAGE_SIZE; | ||
846 | pte++; | ||
847 | } | ||
848 | } | ||
849 | |||
850 | static int populate_pmd(struct cpa_data *cpa, | ||
851 | unsigned long start, unsigned long end, | ||
852 | unsigned num_pages, pud_t *pud, pgprot_t pgprot) | ||
853 | { | ||
854 | unsigned int cur_pages = 0; | ||
855 | pmd_t *pmd; | ||
856 | |||
857 | /* | ||
858 | * Not on a 2M boundary? | ||
859 | */ | ||
860 | if (start & (PMD_SIZE - 1)) { | ||
861 | unsigned long pre_end = start + (num_pages << PAGE_SHIFT); | ||
862 | unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; | ||
863 | |||
864 | pre_end = min_t(unsigned long, pre_end, next_page); | ||
865 | cur_pages = (pre_end - start) >> PAGE_SHIFT; | ||
866 | cur_pages = min_t(unsigned int, num_pages, cur_pages); | ||
867 | |||
868 | /* | ||
869 | * Need a PTE page? | ||
870 | */ | ||
871 | pmd = pmd_offset(pud, start); | ||
872 | if (pmd_none(*pmd)) | ||
873 | if (alloc_pte_page(pmd)) | ||
874 | return -1; | ||
875 | |||
876 | populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot); | ||
877 | |||
878 | start = pre_end; | ||
879 | } | ||
880 | |||
881 | /* | ||
882 | * We mapped them all? | ||
883 | */ | ||
884 | if (num_pages == cur_pages) | ||
885 | return cur_pages; | ||
886 | |||
887 | while (end - start >= PMD_SIZE) { | ||
888 | |||
889 | /* | ||
890 | * We cannot use a 1G page so allocate a PMD page if needed. | ||
891 | */ | ||
892 | if (pud_none(*pud)) | ||
893 | if (alloc_pmd_page(pud)) | ||
894 | return -1; | ||
895 | |||
896 | pmd = pmd_offset(pud, start); | ||
897 | |||
898 | set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot))); | ||
899 | |||
900 | start += PMD_SIZE; | ||
901 | cpa->pfn += PMD_SIZE; | ||
902 | cur_pages += PMD_SIZE >> PAGE_SHIFT; | ||
903 | } | ||
904 | |||
905 | /* | ||
906 | * Map trailing 4K pages. | ||
907 | */ | ||
908 | if (start < end) { | ||
909 | pmd = pmd_offset(pud, start); | ||
910 | if (pmd_none(*pmd)) | ||
911 | if (alloc_pte_page(pmd)) | ||
912 | return -1; | ||
913 | |||
914 | populate_pte(cpa, start, end, num_pages - cur_pages, | ||
915 | pmd, pgprot); | ||
916 | } | ||
917 | return num_pages; | ||
918 | } | ||
919 | |||
920 | static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, | ||
921 | pgprot_t pgprot) | ||
922 | { | ||
923 | pud_t *pud; | ||
924 | unsigned long end; | ||
925 | int cur_pages = 0; | ||
926 | |||
927 | end = start + (cpa->numpages << PAGE_SHIFT); | ||
928 | |||
929 | /* | ||
930 | * Not on a Gb page boundary? => map everything up to it with | ||
931 | * smaller pages. | ||
932 | */ | ||
933 | if (start & (PUD_SIZE - 1)) { | ||
934 | unsigned long pre_end; | ||
935 | unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; | ||
936 | |||
937 | pre_end = min_t(unsigned long, end, next_page); | ||
938 | cur_pages = (pre_end - start) >> PAGE_SHIFT; | ||
939 | cur_pages = min_t(int, (int)cpa->numpages, cur_pages); | ||
940 | |||
941 | pud = pud_offset(pgd, start); | ||
942 | |||
943 | /* | ||
944 | * Need a PMD page? | ||
945 | */ | ||
946 | if (pud_none(*pud)) | ||
947 | if (alloc_pmd_page(pud)) | ||
948 | return -1; | ||
949 | |||
950 | cur_pages = populate_pmd(cpa, start, pre_end, cur_pages, | ||
951 | pud, pgprot); | ||
952 | if (cur_pages < 0) | ||
953 | return cur_pages; | ||
954 | |||
955 | start = pre_end; | ||
956 | } | ||
957 | |||
958 | /* We mapped them all? */ | ||
959 | if (cpa->numpages == cur_pages) | ||
960 | return cur_pages; | ||
961 | |||
962 | pud = pud_offset(pgd, start); | ||
963 | |||
964 | /* | ||
965 | * Map everything starting from the Gb boundary, possibly with 1G pages | ||
966 | */ | ||
967 | while (end - start >= PUD_SIZE) { | ||
968 | set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot))); | ||
969 | |||
970 | start += PUD_SIZE; | ||
971 | cpa->pfn += PUD_SIZE; | ||
972 | cur_pages += PUD_SIZE >> PAGE_SHIFT; | ||
973 | pud++; | ||
974 | } | ||
975 | |||
976 | /* Map trailing leftover */ | ||
977 | if (start < end) { | ||
978 | int tmp; | ||
979 | |||
980 | pud = pud_offset(pgd, start); | ||
981 | if (pud_none(*pud)) | ||
982 | if (alloc_pmd_page(pud)) | ||
983 | return -1; | ||
984 | |||
985 | tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages, | ||
986 | pud, pgprot); | ||
987 | if (tmp < 0) | ||
988 | return cur_pages; | ||
989 | |||
990 | cur_pages += tmp; | ||
991 | } | ||
992 | return cur_pages; | ||
993 | } | ||
994 | |||
995 | /* | ||
996 | * Restrictions for kernel page table do not necessarily apply when mapping in | ||
997 | * an alternate PGD. | ||
998 | */ | ||
999 | static int populate_pgd(struct cpa_data *cpa, unsigned long addr) | ||
1000 | { | ||
1001 | pgprot_t pgprot = __pgprot(_KERNPG_TABLE); | ||
1002 | bool allocd_pgd = false; | ||
1003 | pgd_t *pgd_entry; | ||
1004 | pud_t *pud = NULL; /* shut up gcc */ | ||
1005 | int ret; | ||
1006 | |||
1007 | pgd_entry = cpa->pgd + pgd_index(addr); | ||
1008 | |||
1009 | /* | ||
1010 | * Allocate a PUD page and hand it down for mapping. | ||
1011 | */ | ||
1012 | if (pgd_none(*pgd_entry)) { | ||
1013 | pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); | ||
1014 | if (!pud) | ||
1015 | return -1; | ||
1016 | |||
1017 | set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE)); | ||
1018 | allocd_pgd = true; | ||
1019 | } | ||
1020 | |||
1021 | pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr); | ||
1022 | pgprot_val(pgprot) |= pgprot_val(cpa->mask_set); | ||
1023 | |||
1024 | ret = populate_pud(cpa, addr, pgd_entry, pgprot); | ||
1025 | if (ret < 0) { | ||
1026 | unmap_pud_range(pgd_entry, addr, | ||
1027 | addr + (cpa->numpages << PAGE_SHIFT)); | ||
1028 | |||
1029 | if (allocd_pgd) { | ||
1030 | /* | ||
1031 | * If I allocated this PUD page, I can just as well | ||
1032 | * free it in this error path. | ||
1033 | */ | ||
1034 | pgd_clear(pgd_entry); | ||
1035 | free_page((unsigned long)pud); | ||
1036 | } | ||
1037 | return ret; | ||
1038 | } | ||
1039 | cpa->numpages = ret; | ||
1040 | return 0; | ||
1041 | } | ||
1042 | |||
653 | static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, | 1043 | static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, |
654 | int primary) | 1044 | int primary) |
655 | { | 1045 | { |
1046 | if (cpa->pgd) | ||
1047 | return populate_pgd(cpa, vaddr); | ||
1048 | |||
656 | /* | 1049 | /* |
657 | * Ignore all non primary paths. | 1050 | * Ignore all non primary paths. |
658 | */ | 1051 | */ |
@@ -697,7 +1090,7 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) | |||
697 | else | 1090 | else |
698 | address = *cpa->vaddr; | 1091 | address = *cpa->vaddr; |
699 | repeat: | 1092 | repeat: |
700 | kpte = lookup_address(address, &level); | 1093 | kpte = _lookup_address_cpa(cpa, address, &level); |
701 | if (!kpte) | 1094 | if (!kpte) |
702 | return __cpa_process_fault(cpa, address, primary); | 1095 | return __cpa_process_fault(cpa, address, primary); |
703 | 1096 | ||
@@ -761,7 +1154,7 @@ repeat: | |||
761 | /* | 1154 | /* |
762 | * We have to split the large page: | 1155 | * We have to split the large page: |
763 | */ | 1156 | */ |
764 | err = split_large_page(kpte, address); | 1157 | err = split_large_page(cpa, kpte, address); |
765 | if (!err) { | 1158 | if (!err) { |
766 | /* | 1159 | /* |
767 | * Do a global flush tlb after splitting the large page | 1160 | * Do a global flush tlb after splitting the large page |
@@ -910,6 +1303,8 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, | |||
910 | int ret, cache, checkalias; | 1303 | int ret, cache, checkalias; |
911 | unsigned long baddr = 0; | 1304 | unsigned long baddr = 0; |
912 | 1305 | ||
1306 | memset(&cpa, 0, sizeof(cpa)); | ||
1307 | |||
913 | /* | 1308 | /* |
914 | * Check, if we are requested to change a not supported | 1309 | * Check, if we are requested to change a not supported |
915 | * feature: | 1310 | * feature: |
@@ -1356,6 +1751,7 @@ static int __set_pages_p(struct page *page, int numpages) | |||
1356 | { | 1751 | { |
1357 | unsigned long tempaddr = (unsigned long) page_address(page); | 1752 | unsigned long tempaddr = (unsigned long) page_address(page); |
1358 | struct cpa_data cpa = { .vaddr = &tempaddr, | 1753 | struct cpa_data cpa = { .vaddr = &tempaddr, |
1754 | .pgd = NULL, | ||
1359 | .numpages = numpages, | 1755 | .numpages = numpages, |
1360 | .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), | 1756 | .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), |
1361 | .mask_clr = __pgprot(0), | 1757 | .mask_clr = __pgprot(0), |
@@ -1374,6 +1770,7 @@ static int __set_pages_np(struct page *page, int numpages) | |||
1374 | { | 1770 | { |
1375 | unsigned long tempaddr = (unsigned long) page_address(page); | 1771 | unsigned long tempaddr = (unsigned long) page_address(page); |
1376 | struct cpa_data cpa = { .vaddr = &tempaddr, | 1772 | struct cpa_data cpa = { .vaddr = &tempaddr, |
1773 | .pgd = NULL, | ||
1377 | .numpages = numpages, | 1774 | .numpages = numpages, |
1378 | .mask_set = __pgprot(0), | 1775 | .mask_set = __pgprot(0), |
1379 | .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW), | 1776 | .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW), |
@@ -1434,6 +1831,36 @@ bool kernel_page_present(struct page *page) | |||
1434 | 1831 | ||
1435 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | 1832 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
1436 | 1833 | ||
1834 | int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, | ||
1835 | unsigned numpages, unsigned long page_flags) | ||
1836 | { | ||
1837 | int retval = -EINVAL; | ||
1838 | |||
1839 | struct cpa_data cpa = { | ||
1840 | .vaddr = &address, | ||
1841 | .pfn = pfn, | ||
1842 | .pgd = pgd, | ||
1843 | .numpages = numpages, | ||
1844 | .mask_set = __pgprot(0), | ||
1845 | .mask_clr = __pgprot(0), | ||
1846 | .flags = 0, | ||
1847 | }; | ||
1848 | |||
1849 | if (!(__supported_pte_mask & _PAGE_NX)) | ||
1850 | goto out; | ||
1851 | |||
1852 | if (!(page_flags & _PAGE_NX)) | ||
1853 | cpa.mask_clr = __pgprot(_PAGE_NX); | ||
1854 | |||
1855 | cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags); | ||
1856 | |||
1857 | retval = __change_page_attr_set_clr(&cpa, 0); | ||
1858 | __flush_tlb_all(); | ||
1859 | |||
1860 | out: | ||
1861 | return retval; | ||
1862 | } | ||
1863 | |||
1437 | /* | 1864 | /* |
1438 | * The testcases use internal knowledge of the implementation that shouldn't | 1865 | * The testcases use internal knowledge of the implementation that shouldn't |
1439 | * be exposed to the rest of the kernel. Include these directly here. | 1866 | * be exposed to the rest of the kernel. Include these directly here. |
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 26328e800869..4ed75dd81d05 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c | |||
@@ -359,15 +359,21 @@ void bpf_jit_compile(struct sk_filter *fp) | |||
359 | EMIT2(0x89, 0xd0); /* mov %edx,%eax */ | 359 | EMIT2(0x89, 0xd0); /* mov %edx,%eax */ |
360 | break; | 360 | break; |
361 | case BPF_S_ALU_MOD_K: /* A %= K; */ | 361 | case BPF_S_ALU_MOD_K: /* A %= K; */ |
362 | if (K == 1) { | ||
363 | CLEAR_A(); | ||
364 | break; | ||
365 | } | ||
362 | EMIT2(0x31, 0xd2); /* xor %edx,%edx */ | 366 | EMIT2(0x31, 0xd2); /* xor %edx,%edx */ |
363 | EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */ | 367 | EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */ |
364 | EMIT2(0xf7, 0xf1); /* div %ecx */ | 368 | EMIT2(0xf7, 0xf1); /* div %ecx */ |
365 | EMIT2(0x89, 0xd0); /* mov %edx,%eax */ | 369 | EMIT2(0x89, 0xd0); /* mov %edx,%eax */ |
366 | break; | 370 | break; |
367 | case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */ | 371 | case BPF_S_ALU_DIV_K: /* A /= K */ |
368 | EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */ | 372 | if (K == 1) |
369 | EMIT(K, 4); | 373 | break; |
370 | EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */ | 374 | EMIT2(0x31, 0xd2); /* xor %edx,%edx */ |
375 | EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */ | ||
376 | EMIT2(0xf7, 0xf1); /* div %ecx */ | ||
371 | break; | 377 | break; |
372 | case BPF_S_ALU_AND_X: | 378 | case BPF_S_ALU_AND_X: |
373 | seen |= SEEN_XREG; | 379 | seen |= SEEN_XREG; |
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index b046e070e088..bca9e85daaa5 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c | |||
@@ -5,7 +5,6 @@ | |||
5 | #include <linux/delay.h> | 5 | #include <linux/delay.h> |
6 | #include <linux/dmi.h> | 6 | #include <linux/dmi.h> |
7 | #include <linux/pci.h> | 7 | #include <linux/pci.h> |
8 | #include <linux/init.h> | ||
9 | #include <linux/vgaarb.h> | 8 | #include <linux/vgaarb.h> |
10 | #include <asm/pci_x86.h> | 9 | #include <asm/pci_x86.h> |
11 | 10 | ||
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c index 51384ca727ad..84b9d672843d 100644 --- a/arch/x86/pci/intel_mid_pci.c +++ b/arch/x86/pci/intel_mid_pci.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <asm/pci_x86.h> | 31 | #include <asm/pci_x86.h> |
32 | #include <asm/hw_irq.h> | 32 | #include <asm/hw_irq.h> |
33 | #include <asm/io_apic.h> | 33 | #include <asm/io_apic.h> |
34 | #include <asm/intel-mid.h> | ||
34 | 35 | ||
35 | #define PCIE_CAP_OFFSET 0x100 | 36 | #define PCIE_CAP_OFFSET 0x100 |
36 | 37 | ||
@@ -219,7 +220,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) | |||
219 | irq_attr.ioapic = mp_find_ioapic(dev->irq); | 220 | irq_attr.ioapic = mp_find_ioapic(dev->irq); |
220 | irq_attr.ioapic_pin = dev->irq; | 221 | irq_attr.ioapic_pin = dev->irq; |
221 | irq_attr.trigger = 1; /* level */ | 222 | irq_attr.trigger = 1; /* level */ |
222 | irq_attr.polarity = 1; /* active low */ | 223 | if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) |
224 | irq_attr.polarity = 0; /* active high */ | ||
225 | else | ||
226 | irq_attr.polarity = 1; /* active low */ | ||
223 | io_apic_set_pci_routing(&dev->dev, dev->irq, &irq_attr); | 227 | io_apic_set_pci_routing(&dev->dev, dev->irq, &irq_attr); |
224 | 228 | ||
225 | return 0; | 229 | return 0; |
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index cceb813044ef..d62ec87a2b26 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -12,6 +12,8 @@ | |||
12 | * Bibo Mao <bibo.mao@intel.com> | 12 | * Bibo Mao <bibo.mao@intel.com> |
13 | * Chandramouli Narayanan <mouli@linux.intel.com> | 13 | * Chandramouli Narayanan <mouli@linux.intel.com> |
14 | * Huang Ying <ying.huang@intel.com> | 14 | * Huang Ying <ying.huang@intel.com> |
15 | * Copyright (C) 2013 SuSE Labs | ||
16 | * Borislav Petkov <bp@suse.de> - runtime services VA mapping | ||
15 | * | 17 | * |
16 | * Copied from efi_32.c to eliminate the duplicated code between EFI | 18 | * Copied from efi_32.c to eliminate the duplicated code between EFI |
17 | * 32/64 support code. --ying 2007-10-26 | 19 | * 32/64 support code. --ying 2007-10-26 |
@@ -51,7 +53,7 @@ | |||
51 | #include <asm/x86_init.h> | 53 | #include <asm/x86_init.h> |
52 | #include <asm/rtc.h> | 54 | #include <asm/rtc.h> |
53 | 55 | ||
54 | #define EFI_DEBUG 1 | 56 | #define EFI_DEBUG |
55 | 57 | ||
56 | #define EFI_MIN_RESERVE 5120 | 58 | #define EFI_MIN_RESERVE 5120 |
57 | 59 | ||
@@ -74,6 +76,8 @@ static __initdata efi_config_table_type_t arch_tables[] = { | |||
74 | {NULL_GUID, NULL, NULL}, | 76 | {NULL_GUID, NULL, NULL}, |
75 | }; | 77 | }; |
76 | 78 | ||
79 | u64 efi_setup; /* efi setup_data physical address */ | ||
80 | |||
77 | /* | 81 | /* |
78 | * Returns 1 if 'facility' is enabled, 0 otherwise. | 82 | * Returns 1 if 'facility' is enabled, 0 otherwise. |
79 | */ | 83 | */ |
@@ -110,7 +114,6 @@ static int __init setup_storage_paranoia(char *arg) | |||
110 | } | 114 | } |
111 | early_param("efi_no_storage_paranoia", setup_storage_paranoia); | 115 | early_param("efi_no_storage_paranoia", setup_storage_paranoia); |
112 | 116 | ||
113 | |||
114 | static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) | 117 | static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) |
115 | { | 118 | { |
116 | unsigned long flags; | 119 | unsigned long flags; |
@@ -398,9 +401,9 @@ int __init efi_memblock_x86_reserve_range(void) | |||
398 | return 0; | 401 | return 0; |
399 | } | 402 | } |
400 | 403 | ||
401 | #if EFI_DEBUG | ||
402 | static void __init print_efi_memmap(void) | 404 | static void __init print_efi_memmap(void) |
403 | { | 405 | { |
406 | #ifdef EFI_DEBUG | ||
404 | efi_memory_desc_t *md; | 407 | efi_memory_desc_t *md; |
405 | void *p; | 408 | void *p; |
406 | int i; | 409 | int i; |
@@ -415,8 +418,8 @@ static void __init print_efi_memmap(void) | |||
415 | md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT), | 418 | md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT), |
416 | (md->num_pages >> (20 - EFI_PAGE_SHIFT))); | 419 | (md->num_pages >> (20 - EFI_PAGE_SHIFT))); |
417 | } | 420 | } |
418 | } | ||
419 | #endif /* EFI_DEBUG */ | 421 | #endif /* EFI_DEBUG */ |
422 | } | ||
420 | 423 | ||
421 | void __init efi_reserve_boot_services(void) | 424 | void __init efi_reserve_boot_services(void) |
422 | { | 425 | { |
@@ -436,7 +439,7 @@ void __init efi_reserve_boot_services(void) | |||
436 | * - Not within any part of the kernel | 439 | * - Not within any part of the kernel |
437 | * - Not the bios reserved area | 440 | * - Not the bios reserved area |
438 | */ | 441 | */ |
439 | if ((start+size >= __pa_symbol(_text) | 442 | if ((start + size > __pa_symbol(_text) |
440 | && start <= __pa_symbol(_end)) || | 443 | && start <= __pa_symbol(_end)) || |
441 | !e820_all_mapped(start, start+size, E820_RAM) || | 444 | !e820_all_mapped(start, start+size, E820_RAM) || |
442 | memblock_is_region_reserved(start, size)) { | 445 | memblock_is_region_reserved(start, size)) { |
@@ -489,18 +492,27 @@ static int __init efi_systab_init(void *phys) | |||
489 | { | 492 | { |
490 | if (efi_enabled(EFI_64BIT)) { | 493 | if (efi_enabled(EFI_64BIT)) { |
491 | efi_system_table_64_t *systab64; | 494 | efi_system_table_64_t *systab64; |
495 | struct efi_setup_data *data = NULL; | ||
492 | u64 tmp = 0; | 496 | u64 tmp = 0; |
493 | 497 | ||
498 | if (efi_setup) { | ||
499 | data = early_memremap(efi_setup, sizeof(*data)); | ||
500 | if (!data) | ||
501 | return -ENOMEM; | ||
502 | } | ||
494 | systab64 = early_ioremap((unsigned long)phys, | 503 | systab64 = early_ioremap((unsigned long)phys, |
495 | sizeof(*systab64)); | 504 | sizeof(*systab64)); |
496 | if (systab64 == NULL) { | 505 | if (systab64 == NULL) { |
497 | pr_err("Couldn't map the system table!\n"); | 506 | pr_err("Couldn't map the system table!\n"); |
507 | if (data) | ||
508 | early_iounmap(data, sizeof(*data)); | ||
498 | return -ENOMEM; | 509 | return -ENOMEM; |
499 | } | 510 | } |
500 | 511 | ||
501 | efi_systab.hdr = systab64->hdr; | 512 | efi_systab.hdr = systab64->hdr; |
502 | efi_systab.fw_vendor = systab64->fw_vendor; | 513 | efi_systab.fw_vendor = data ? (unsigned long)data->fw_vendor : |
503 | tmp |= systab64->fw_vendor; | 514 | systab64->fw_vendor; |
515 | tmp |= data ? data->fw_vendor : systab64->fw_vendor; | ||
504 | efi_systab.fw_revision = systab64->fw_revision; | 516 | efi_systab.fw_revision = systab64->fw_revision; |
505 | efi_systab.con_in_handle = systab64->con_in_handle; | 517 | efi_systab.con_in_handle = systab64->con_in_handle; |
506 | tmp |= systab64->con_in_handle; | 518 | tmp |= systab64->con_in_handle; |
@@ -514,15 +526,20 @@ static int __init efi_systab_init(void *phys) | |||
514 | tmp |= systab64->stderr_handle; | 526 | tmp |= systab64->stderr_handle; |
515 | efi_systab.stderr = systab64->stderr; | 527 | efi_systab.stderr = systab64->stderr; |
516 | tmp |= systab64->stderr; | 528 | tmp |= systab64->stderr; |
517 | efi_systab.runtime = (void *)(unsigned long)systab64->runtime; | 529 | efi_systab.runtime = data ? |
518 | tmp |= systab64->runtime; | 530 | (void *)(unsigned long)data->runtime : |
531 | (void *)(unsigned long)systab64->runtime; | ||
532 | tmp |= data ? data->runtime : systab64->runtime; | ||
519 | efi_systab.boottime = (void *)(unsigned long)systab64->boottime; | 533 | efi_systab.boottime = (void *)(unsigned long)systab64->boottime; |
520 | tmp |= systab64->boottime; | 534 | tmp |= systab64->boottime; |
521 | efi_systab.nr_tables = systab64->nr_tables; | 535 | efi_systab.nr_tables = systab64->nr_tables; |
522 | efi_systab.tables = systab64->tables; | 536 | efi_systab.tables = data ? (unsigned long)data->tables : |
523 | tmp |= systab64->tables; | 537 | systab64->tables; |
538 | tmp |= data ? data->tables : systab64->tables; | ||
524 | 539 | ||
525 | early_iounmap(systab64, sizeof(*systab64)); | 540 | early_iounmap(systab64, sizeof(*systab64)); |
541 | if (data) | ||
542 | early_iounmap(data, sizeof(*data)); | ||
526 | #ifdef CONFIG_X86_32 | 543 | #ifdef CONFIG_X86_32 |
527 | if (tmp >> 32) { | 544 | if (tmp >> 32) { |
528 | pr_err("EFI data located above 4GB, disabling EFI.\n"); | 545 | pr_err("EFI data located above 4GB, disabling EFI.\n"); |
@@ -626,6 +643,62 @@ static int __init efi_memmap_init(void) | |||
626 | return 0; | 643 | return 0; |
627 | } | 644 | } |
628 | 645 | ||
646 | /* | ||
647 | * A number of config table entries get remapped to virtual addresses | ||
648 | * after entering EFI virtual mode. However, the kexec kernel requires | ||
649 | * their physical addresses therefore we pass them via setup_data and | ||
650 | * correct those entries to their respective physical addresses here. | ||
651 | * | ||
652 | * Currently only handles smbios which is necessary for some firmware | ||
653 | * implementation. | ||
654 | */ | ||
655 | static int __init efi_reuse_config(u64 tables, int nr_tables) | ||
656 | { | ||
657 | int i, sz, ret = 0; | ||
658 | void *p, *tablep; | ||
659 | struct efi_setup_data *data; | ||
660 | |||
661 | if (!efi_setup) | ||
662 | return 0; | ||
663 | |||
664 | if (!efi_enabled(EFI_64BIT)) | ||
665 | return 0; | ||
666 | |||
667 | data = early_memremap(efi_setup, sizeof(*data)); | ||
668 | if (!data) { | ||
669 | ret = -ENOMEM; | ||
670 | goto out; | ||
671 | } | ||
672 | |||
673 | if (!data->smbios) | ||
674 | goto out_memremap; | ||
675 | |||
676 | sz = sizeof(efi_config_table_64_t); | ||
677 | |||
678 | p = tablep = early_memremap(tables, nr_tables * sz); | ||
679 | if (!p) { | ||
680 | pr_err("Could not map Configuration table!\n"); | ||
681 | ret = -ENOMEM; | ||
682 | goto out_memremap; | ||
683 | } | ||
684 | |||
685 | for (i = 0; i < efi.systab->nr_tables; i++) { | ||
686 | efi_guid_t guid; | ||
687 | |||
688 | guid = ((efi_config_table_64_t *)p)->guid; | ||
689 | |||
690 | if (!efi_guidcmp(guid, SMBIOS_TABLE_GUID)) | ||
691 | ((efi_config_table_64_t *)p)->table = data->smbios; | ||
692 | p += sz; | ||
693 | } | ||
694 | early_iounmap(tablep, nr_tables * sz); | ||
695 | |||
696 | out_memremap: | ||
697 | early_iounmap(data, sizeof(*data)); | ||
698 | out: | ||
699 | return ret; | ||
700 | } | ||
701 | |||
629 | void __init efi_init(void) | 702 | void __init efi_init(void) |
630 | { | 703 | { |
631 | efi_char16_t *c16; | 704 | efi_char16_t *c16; |
@@ -651,6 +724,10 @@ void __init efi_init(void) | |||
651 | 724 | ||
652 | set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); | 725 | set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); |
653 | 726 | ||
727 | efi.config_table = (unsigned long)efi.systab->tables; | ||
728 | efi.fw_vendor = (unsigned long)efi.systab->fw_vendor; | ||
729 | efi.runtime = (unsigned long)efi.systab->runtime; | ||
730 | |||
654 | /* | 731 | /* |
655 | * Show what we know for posterity | 732 | * Show what we know for posterity |
656 | */ | 733 | */ |
@@ -667,6 +744,9 @@ void __init efi_init(void) | |||
667 | efi.systab->hdr.revision >> 16, | 744 | efi.systab->hdr.revision >> 16, |
668 | efi.systab->hdr.revision & 0xffff, vendor); | 745 | efi.systab->hdr.revision & 0xffff, vendor); |
669 | 746 | ||
747 | if (efi_reuse_config(efi.systab->tables, efi.systab->nr_tables)) | ||
748 | return; | ||
749 | |||
670 | if (efi_config_init(arch_tables)) | 750 | if (efi_config_init(arch_tables)) |
671 | return; | 751 | return; |
672 | 752 | ||
@@ -684,15 +764,12 @@ void __init efi_init(void) | |||
684 | return; | 764 | return; |
685 | set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility); | 765 | set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility); |
686 | } | 766 | } |
687 | |||
688 | if (efi_memmap_init()) | 767 | if (efi_memmap_init()) |
689 | return; | 768 | return; |
690 | 769 | ||
691 | set_bit(EFI_MEMMAP, &x86_efi_facility); | 770 | set_bit(EFI_MEMMAP, &x86_efi_facility); |
692 | 771 | ||
693 | #if EFI_DEBUG | ||
694 | print_efi_memmap(); | 772 | print_efi_memmap(); |
695 | #endif | ||
696 | } | 773 | } |
697 | 774 | ||
698 | void __init efi_late_init(void) | 775 | void __init efi_late_init(void) |
@@ -741,36 +818,38 @@ void efi_memory_uc(u64 addr, unsigned long size) | |||
741 | set_memory_uc(addr, npages); | 818 | set_memory_uc(addr, npages); |
742 | } | 819 | } |
743 | 820 | ||
744 | /* | 821 | void __init old_map_region(efi_memory_desc_t *md) |
745 | * This function will switch the EFI runtime services to virtual mode. | ||
746 | * Essentially, look through the EFI memmap and map every region that | ||
747 | * has the runtime attribute bit set in its memory descriptor and update | ||
748 | * that memory descriptor with the virtual address obtained from ioremap(). | ||
749 | * This enables the runtime services to be called without having to | ||
750 | * thunk back into physical mode for every invocation. | ||
751 | */ | ||
752 | void __init efi_enter_virtual_mode(void) | ||
753 | { | 822 | { |
754 | efi_memory_desc_t *md, *prev_md = NULL; | 823 | u64 start_pfn, end_pfn, end; |
755 | efi_status_t status; | ||
756 | unsigned long size; | 824 | unsigned long size; |
757 | u64 end, systab, start_pfn, end_pfn; | 825 | void *va; |
758 | void *p, *va, *new_memmap = NULL; | ||
759 | int count = 0; | ||
760 | 826 | ||
761 | efi.systab = NULL; | 827 | start_pfn = PFN_DOWN(md->phys_addr); |
828 | size = md->num_pages << PAGE_SHIFT; | ||
829 | end = md->phys_addr + size; | ||
830 | end_pfn = PFN_UP(end); | ||
762 | 831 | ||
763 | /* | 832 | if (pfn_range_is_mapped(start_pfn, end_pfn)) { |
764 | * We don't do virtual mode, since we don't do runtime services, on | 833 | va = __va(md->phys_addr); |
765 | * non-native EFI | ||
766 | */ | ||
767 | 834 | ||
768 | if (!efi_is_native()) { | 835 | if (!(md->attribute & EFI_MEMORY_WB)) |
769 | efi_unmap_memmap(); | 836 | efi_memory_uc((u64)(unsigned long)va, size); |
770 | return; | 837 | } else |
771 | } | 838 | va = efi_ioremap(md->phys_addr, size, |
839 | md->type, md->attribute); | ||
840 | |||
841 | md->virt_addr = (u64) (unsigned long) va; | ||
842 | if (!va) | ||
843 | pr_err("ioremap of 0x%llX failed!\n", | ||
844 | (unsigned long long)md->phys_addr); | ||
845 | } | ||
846 | |||
847 | /* Merge contiguous regions of the same type and attribute */ | ||
848 | static void __init efi_merge_regions(void) | ||
849 | { | ||
850 | void *p; | ||
851 | efi_memory_desc_t *md, *prev_md = NULL; | ||
772 | 852 | ||
773 | /* Merge contiguous regions of the same type and attribute */ | ||
774 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | 853 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { |
775 | u64 prev_size; | 854 | u64 prev_size; |
776 | md = p; | 855 | md = p; |
@@ -796,6 +875,77 @@ void __init efi_enter_virtual_mode(void) | |||
796 | } | 875 | } |
797 | prev_md = md; | 876 | prev_md = md; |
798 | } | 877 | } |
878 | } | ||
879 | |||
880 | static void __init get_systab_virt_addr(efi_memory_desc_t *md) | ||
881 | { | ||
882 | unsigned long size; | ||
883 | u64 end, systab; | ||
884 | |||
885 | size = md->num_pages << EFI_PAGE_SHIFT; | ||
886 | end = md->phys_addr + size; | ||
887 | systab = (u64)(unsigned long)efi_phys.systab; | ||
888 | if (md->phys_addr <= systab && systab < end) { | ||
889 | systab += md->virt_addr - md->phys_addr; | ||
890 | efi.systab = (efi_system_table_t *)(unsigned long)systab; | ||
891 | } | ||
892 | } | ||
893 | |||
894 | static int __init save_runtime_map(void) | ||
895 | { | ||
896 | efi_memory_desc_t *md; | ||
897 | void *tmp, *p, *q = NULL; | ||
898 | int count = 0; | ||
899 | |||
900 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | ||
901 | md = p; | ||
902 | |||
903 | if (!(md->attribute & EFI_MEMORY_RUNTIME) || | ||
904 | (md->type == EFI_BOOT_SERVICES_CODE) || | ||
905 | (md->type == EFI_BOOT_SERVICES_DATA)) | ||
906 | continue; | ||
907 | tmp = krealloc(q, (count + 1) * memmap.desc_size, GFP_KERNEL); | ||
908 | if (!tmp) | ||
909 | goto out; | ||
910 | q = tmp; | ||
911 | |||
912 | memcpy(q + count * memmap.desc_size, md, memmap.desc_size); | ||
913 | count++; | ||
914 | } | ||
915 | |||
916 | efi_runtime_map_setup(q, count, memmap.desc_size); | ||
917 | |||
918 | return 0; | ||
919 | out: | ||
920 | kfree(q); | ||
921 | return -ENOMEM; | ||
922 | } | ||
923 | |||
924 | /* | ||
925 | * Map efi regions which were passed via setup_data. The virt_addr is a fixed | ||
926 | * addr which was used in first kernel of a kexec boot. | ||
927 | */ | ||
928 | static void __init efi_map_regions_fixed(void) | ||
929 | { | ||
930 | void *p; | ||
931 | efi_memory_desc_t *md; | ||
932 | |||
933 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | ||
934 | md = p; | ||
935 | efi_map_region_fixed(md); /* FIXME: add error handling */ | ||
936 | get_systab_virt_addr(md); | ||
937 | } | ||
938 | |||
939 | } | ||
940 | |||
941 | /* | ||
942 | * Map efi memory ranges for runtime serivce and update new_memmap with virtual | ||
943 | * addresses. | ||
944 | */ | ||
945 | static void * __init efi_map_regions(int *count) | ||
946 | { | ||
947 | efi_memory_desc_t *md; | ||
948 | void *p, *tmp, *new_memmap = NULL; | ||
799 | 949 | ||
800 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | 950 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { |
801 | md = p; | 951 | md = p; |
@@ -807,53 +957,95 @@ void __init efi_enter_virtual_mode(void) | |||
807 | continue; | 957 | continue; |
808 | } | 958 | } |
809 | 959 | ||
810 | size = md->num_pages << EFI_PAGE_SHIFT; | 960 | efi_map_region(md); |
811 | end = md->phys_addr + size; | 961 | get_systab_virt_addr(md); |
812 | 962 | ||
813 | start_pfn = PFN_DOWN(md->phys_addr); | 963 | tmp = krealloc(new_memmap, (*count + 1) * memmap.desc_size, |
814 | end_pfn = PFN_UP(end); | 964 | GFP_KERNEL); |
815 | if (pfn_range_is_mapped(start_pfn, end_pfn)) { | 965 | if (!tmp) |
816 | va = __va(md->phys_addr); | 966 | goto out; |
967 | new_memmap = tmp; | ||
968 | memcpy(new_memmap + (*count * memmap.desc_size), md, | ||
969 | memmap.desc_size); | ||
970 | (*count)++; | ||
971 | } | ||
817 | 972 | ||
818 | if (!(md->attribute & EFI_MEMORY_WB)) | 973 | return new_memmap; |
819 | efi_memory_uc((u64)(unsigned long)va, size); | 974 | out: |
820 | } else | 975 | kfree(new_memmap); |
821 | va = efi_ioremap(md->phys_addr, size, | 976 | return NULL; |
822 | md->type, md->attribute); | 977 | } |
978 | |||
979 | /* | ||
980 | * This function will switch the EFI runtime services to virtual mode. | ||
981 | * Essentially, we look through the EFI memmap and map every region that | ||
982 | * has the runtime attribute bit set in its memory descriptor into the | ||
983 | * ->trampoline_pgd page table using a top-down VA allocation scheme. | ||
984 | * | ||
985 | * The old method which used to update that memory descriptor with the | ||
986 | * virtual address obtained from ioremap() is still supported when the | ||
987 | * kernel is booted with efi=old_map on its command line. Same old | ||
988 | * method enabled the runtime services to be called without having to | ||
989 | * thunk back into physical mode for every invocation. | ||
990 | * | ||
991 | * The new method does a pagetable switch in a preemption-safe manner | ||
992 | * so that we're in a different address space when calling a runtime | ||
993 | * function. For function arguments passing we do copy the PGDs of the | ||
994 | * kernel page table into ->trampoline_pgd prior to each call. | ||
995 | * | ||
996 | * Specially for kexec boot, efi runtime maps in previous kernel should | ||
997 | * be passed in via setup_data. In that case runtime ranges will be mapped | ||
998 | * to the same virtual addresses as the first kernel. | ||
999 | */ | ||
1000 | void __init efi_enter_virtual_mode(void) | ||
1001 | { | ||
1002 | efi_status_t status; | ||
1003 | void *new_memmap = NULL; | ||
1004 | int err, count = 0; | ||
823 | 1005 | ||
824 | md->virt_addr = (u64) (unsigned long) va; | 1006 | efi.systab = NULL; |
825 | 1007 | ||
826 | if (!va) { | 1008 | /* |
827 | pr_err("ioremap of 0x%llX failed!\n", | 1009 | * We don't do virtual mode, since we don't do runtime services, on |
828 | (unsigned long long)md->phys_addr); | 1010 | * non-native EFI |
829 | continue; | 1011 | */ |
830 | } | 1012 | if (!efi_is_native()) { |
1013 | efi_unmap_memmap(); | ||
1014 | return; | ||
1015 | } | ||
831 | 1016 | ||
832 | systab = (u64) (unsigned long) efi_phys.systab; | 1017 | if (efi_setup) { |
833 | if (md->phys_addr <= systab && systab < end) { | 1018 | efi_map_regions_fixed(); |
834 | systab += md->virt_addr - md->phys_addr; | 1019 | } else { |
835 | efi.systab = (efi_system_table_t *) (unsigned long) systab; | 1020 | efi_merge_regions(); |
1021 | new_memmap = efi_map_regions(&count); | ||
1022 | if (!new_memmap) { | ||
1023 | pr_err("Error reallocating memory, EFI runtime non-functional!\n"); | ||
1024 | return; | ||
836 | } | 1025 | } |
837 | new_memmap = krealloc(new_memmap, | ||
838 | (count + 1) * memmap.desc_size, | ||
839 | GFP_KERNEL); | ||
840 | memcpy(new_memmap + (count * memmap.desc_size), md, | ||
841 | memmap.desc_size); | ||
842 | count++; | ||
843 | } | 1026 | } |
844 | 1027 | ||
1028 | err = save_runtime_map(); | ||
1029 | if (err) | ||
1030 | pr_err("Error saving runtime map, efi runtime on kexec non-functional!!\n"); | ||
1031 | |||
845 | BUG_ON(!efi.systab); | 1032 | BUG_ON(!efi.systab); |
846 | 1033 | ||
847 | status = phys_efi_set_virtual_address_map( | 1034 | efi_setup_page_tables(); |
848 | memmap.desc_size * count, | 1035 | efi_sync_low_kernel_mappings(); |
849 | memmap.desc_size, | ||
850 | memmap.desc_version, | ||
851 | (efi_memory_desc_t *)__pa(new_memmap)); | ||
852 | 1036 | ||
853 | if (status != EFI_SUCCESS) { | 1037 | if (!efi_setup) { |
854 | pr_alert("Unable to switch EFI into virtual mode " | 1038 | status = phys_efi_set_virtual_address_map( |
855 | "(status=%lx)!\n", status); | 1039 | memmap.desc_size * count, |
856 | panic("EFI call to SetVirtualAddressMap() failed!"); | 1040 | memmap.desc_size, |
1041 | memmap.desc_version, | ||
1042 | (efi_memory_desc_t *)__pa(new_memmap)); | ||
1043 | |||
1044 | if (status != EFI_SUCCESS) { | ||
1045 | pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n", | ||
1046 | status); | ||
1047 | panic("EFI call to SetVirtualAddressMap() failed!"); | ||
1048 | } | ||
857 | } | 1049 | } |
858 | 1050 | ||
859 | /* | 1051 | /* |
@@ -876,7 +1068,8 @@ void __init efi_enter_virtual_mode(void) | |||
876 | efi.query_variable_info = virt_efi_query_variable_info; | 1068 | efi.query_variable_info = virt_efi_query_variable_info; |
877 | efi.update_capsule = virt_efi_update_capsule; | 1069 | efi.update_capsule = virt_efi_update_capsule; |
878 | efi.query_capsule_caps = virt_efi_query_capsule_caps; | 1070 | efi.query_capsule_caps = virt_efi_query_capsule_caps; |
879 | if (__supported_pte_mask & _PAGE_NX) | 1071 | |
1072 | if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX)) | ||
880 | runtime_code_page_mkexec(); | 1073 | runtime_code_page_mkexec(); |
881 | 1074 | ||
882 | kfree(new_memmap); | 1075 | kfree(new_memmap); |
@@ -1006,3 +1199,15 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) | |||
1006 | return EFI_SUCCESS; | 1199 | return EFI_SUCCESS; |
1007 | } | 1200 | } |
1008 | EXPORT_SYMBOL_GPL(efi_query_variable_store); | 1201 | EXPORT_SYMBOL_GPL(efi_query_variable_store); |
1202 | |||
1203 | static int __init parse_efi_cmdline(char *str) | ||
1204 | { | ||
1205 | if (*str == '=') | ||
1206 | str++; | ||
1207 | |||
1208 | if (!strncmp(str, "old_map", 7)) | ||
1209 | set_bit(EFI_OLD_MEMMAP, &x86_efi_facility); | ||
1210 | |||
1211 | return 0; | ||
1212 | } | ||
1213 | early_param("efi", parse_efi_cmdline); | ||
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index 40e446941dd7..249b183cf417 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c | |||
@@ -37,9 +37,19 @@ | |||
37 | * claim EFI runtime service handler exclusively and to duplicate a memory in | 37 | * claim EFI runtime service handler exclusively and to duplicate a memory in |
38 | * low memory space say 0 - 3G. | 38 | * low memory space say 0 - 3G. |
39 | */ | 39 | */ |
40 | |||
41 | static unsigned long efi_rt_eflags; | 40 | static unsigned long efi_rt_eflags; |
42 | 41 | ||
42 | void efi_sync_low_kernel_mappings(void) {} | ||
43 | void efi_setup_page_tables(void) {} | ||
44 | |||
45 | void __init efi_map_region(efi_memory_desc_t *md) | ||
46 | { | ||
47 | old_map_region(md); | ||
48 | } | ||
49 | |||
50 | void __init efi_map_region_fixed(efi_memory_desc_t *md) {} | ||
51 | void __init parse_efi_setup(u64 phys_addr, u32 data_len) {} | ||
52 | |||
43 | void efi_call_phys_prelog(void) | 53 | void efi_call_phys_prelog(void) |
44 | { | 54 | { |
45 | struct desc_ptr gdt_descr; | 55 | struct desc_ptr gdt_descr; |
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 39a0e7f1f0a3..6284f158a47d 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c | |||
@@ -38,10 +38,28 @@ | |||
38 | #include <asm/efi.h> | 38 | #include <asm/efi.h> |
39 | #include <asm/cacheflush.h> | 39 | #include <asm/cacheflush.h> |
40 | #include <asm/fixmap.h> | 40 | #include <asm/fixmap.h> |
41 | #include <asm/realmode.h> | ||
41 | 42 | ||
42 | static pgd_t *save_pgd __initdata; | 43 | static pgd_t *save_pgd __initdata; |
43 | static unsigned long efi_flags __initdata; | 44 | static unsigned long efi_flags __initdata; |
44 | 45 | ||
46 | /* | ||
47 | * We allocate runtime services regions bottom-up, starting from -4G, i.e. | ||
48 | * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G. | ||
49 | */ | ||
50 | static u64 efi_va = -4 * (1UL << 30); | ||
51 | #define EFI_VA_END (-68 * (1UL << 30)) | ||
52 | |||
53 | /* | ||
54 | * Scratch space used for switching the pagetable in the EFI stub | ||
55 | */ | ||
56 | struct efi_scratch { | ||
57 | u64 r15; | ||
58 | u64 prev_cr3; | ||
59 | pgd_t *efi_pgt; | ||
60 | bool use_pgd; | ||
61 | }; | ||
62 | |||
45 | static void __init early_code_mapping_set_exec(int executable) | 63 | static void __init early_code_mapping_set_exec(int executable) |
46 | { | 64 | { |
47 | efi_memory_desc_t *md; | 65 | efi_memory_desc_t *md; |
@@ -65,6 +83,9 @@ void __init efi_call_phys_prelog(void) | |||
65 | int pgd; | 83 | int pgd; |
66 | int n_pgds; | 84 | int n_pgds; |
67 | 85 | ||
86 | if (!efi_enabled(EFI_OLD_MEMMAP)) | ||
87 | return; | ||
88 | |||
68 | early_code_mapping_set_exec(1); | 89 | early_code_mapping_set_exec(1); |
69 | local_irq_save(efi_flags); | 90 | local_irq_save(efi_flags); |
70 | 91 | ||
@@ -86,6 +107,10 @@ void __init efi_call_phys_epilog(void) | |||
86 | */ | 107 | */ |
87 | int pgd; | 108 | int pgd; |
88 | int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); | 109 | int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); |
110 | |||
111 | if (!efi_enabled(EFI_OLD_MEMMAP)) | ||
112 | return; | ||
113 | |||
89 | for (pgd = 0; pgd < n_pgds; pgd++) | 114 | for (pgd = 0; pgd < n_pgds; pgd++) |
90 | set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); | 115 | set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); |
91 | kfree(save_pgd); | 116 | kfree(save_pgd); |
@@ -94,6 +119,96 @@ void __init efi_call_phys_epilog(void) | |||
94 | early_code_mapping_set_exec(0); | 119 | early_code_mapping_set_exec(0); |
95 | } | 120 | } |
96 | 121 | ||
122 | /* | ||
123 | * Add low kernel mappings for passing arguments to EFI functions. | ||
124 | */ | ||
125 | void efi_sync_low_kernel_mappings(void) | ||
126 | { | ||
127 | unsigned num_pgds; | ||
128 | pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); | ||
129 | |||
130 | if (efi_enabled(EFI_OLD_MEMMAP)) | ||
131 | return; | ||
132 | |||
133 | num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET); | ||
134 | |||
135 | memcpy(pgd + pgd_index(PAGE_OFFSET), | ||
136 | init_mm.pgd + pgd_index(PAGE_OFFSET), | ||
137 | sizeof(pgd_t) * num_pgds); | ||
138 | } | ||
139 | |||
140 | void efi_setup_page_tables(void) | ||
141 | { | ||
142 | efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd; | ||
143 | |||
144 | if (!efi_enabled(EFI_OLD_MEMMAP)) | ||
145 | efi_scratch.use_pgd = true; | ||
146 | } | ||
147 | |||
148 | static void __init __map_region(efi_memory_desc_t *md, u64 va) | ||
149 | { | ||
150 | pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); | ||
151 | unsigned long pf = 0; | ||
152 | |||
153 | if (!(md->attribute & EFI_MEMORY_WB)) | ||
154 | pf |= _PAGE_PCD; | ||
155 | |||
156 | if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf)) | ||
157 | pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n", | ||
158 | md->phys_addr, va); | ||
159 | } | ||
160 | |||
161 | void __init efi_map_region(efi_memory_desc_t *md) | ||
162 | { | ||
163 | unsigned long size = md->num_pages << PAGE_SHIFT; | ||
164 | u64 pa = md->phys_addr; | ||
165 | |||
166 | if (efi_enabled(EFI_OLD_MEMMAP)) | ||
167 | return old_map_region(md); | ||
168 | |||
169 | /* | ||
170 | * Make sure the 1:1 mappings are present as a catch-all for b0rked | ||
171 | * firmware which doesn't update all internal pointers after switching | ||
172 | * to virtual mode and would otherwise crap on us. | ||
173 | */ | ||
174 | __map_region(md, md->phys_addr); | ||
175 | |||
176 | efi_va -= size; | ||
177 | |||
178 | /* Is PA 2M-aligned? */ | ||
179 | if (!(pa & (PMD_SIZE - 1))) { | ||
180 | efi_va &= PMD_MASK; | ||
181 | } else { | ||
182 | u64 pa_offset = pa & (PMD_SIZE - 1); | ||
183 | u64 prev_va = efi_va; | ||
184 | |||
185 | /* get us the same offset within this 2M page */ | ||
186 | efi_va = (efi_va & PMD_MASK) + pa_offset; | ||
187 | |||
188 | if (efi_va > prev_va) | ||
189 | efi_va -= PMD_SIZE; | ||
190 | } | ||
191 | |||
192 | if (efi_va < EFI_VA_END) { | ||
193 | pr_warn(FW_WARN "VA address range overflow!\n"); | ||
194 | return; | ||
195 | } | ||
196 | |||
197 | /* Do the VA map */ | ||
198 | __map_region(md, efi_va); | ||
199 | md->virt_addr = efi_va; | ||
200 | } | ||
201 | |||
202 | /* | ||
203 | * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges. | ||
204 | * md->virt_addr is the original virtual address which had been mapped in kexec | ||
205 | * 1st kernel. | ||
206 | */ | ||
207 | void __init efi_map_region_fixed(efi_memory_desc_t *md) | ||
208 | { | ||
209 | __map_region(md, md->virt_addr); | ||
210 | } | ||
211 | |||
97 | void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, | 212 | void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, |
98 | u32 type, u64 attribute) | 213 | u32 type, u64 attribute) |
99 | { | 214 | { |
@@ -113,3 +228,8 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, | |||
113 | 228 | ||
114 | return (void __iomem *)__va(phys_addr); | 229 | return (void __iomem *)__va(phys_addr); |
115 | } | 230 | } |
231 | |||
232 | void __init parse_efi_setup(u64 phys_addr, u32 data_len) | ||
233 | { | ||
234 | efi_setup = phys_addr + sizeof(struct setup_data); | ||
235 | } | ||
diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S index 4c07ccab8146..88073b140298 100644 --- a/arch/x86/platform/efi/efi_stub_64.S +++ b/arch/x86/platform/efi/efi_stub_64.S | |||
@@ -34,10 +34,47 @@ | |||
34 | mov %rsi, %cr0; \ | 34 | mov %rsi, %cr0; \ |
35 | mov (%rsp), %rsp | 35 | mov (%rsp), %rsp |
36 | 36 | ||
37 | /* stolen from gcc */ | ||
38 | .macro FLUSH_TLB_ALL | ||
39 | movq %r15, efi_scratch(%rip) | ||
40 | movq %r14, efi_scratch+8(%rip) | ||
41 | movq %cr4, %r15 | ||
42 | movq %r15, %r14 | ||
43 | andb $0x7f, %r14b | ||
44 | movq %r14, %cr4 | ||
45 | movq %r15, %cr4 | ||
46 | movq efi_scratch+8(%rip), %r14 | ||
47 | movq efi_scratch(%rip), %r15 | ||
48 | .endm | ||
49 | |||
50 | .macro SWITCH_PGT | ||
51 | cmpb $0, efi_scratch+24(%rip) | ||
52 | je 1f | ||
53 | movq %r15, efi_scratch(%rip) # r15 | ||
54 | # save previous CR3 | ||
55 | movq %cr3, %r15 | ||
56 | movq %r15, efi_scratch+8(%rip) # prev_cr3 | ||
57 | movq efi_scratch+16(%rip), %r15 # EFI pgt | ||
58 | movq %r15, %cr3 | ||
59 | 1: | ||
60 | .endm | ||
61 | |||
62 | .macro RESTORE_PGT | ||
63 | cmpb $0, efi_scratch+24(%rip) | ||
64 | je 2f | ||
65 | movq efi_scratch+8(%rip), %r15 | ||
66 | movq %r15, %cr3 | ||
67 | movq efi_scratch(%rip), %r15 | ||
68 | FLUSH_TLB_ALL | ||
69 | 2: | ||
70 | .endm | ||
71 | |||
37 | ENTRY(efi_call0) | 72 | ENTRY(efi_call0) |
38 | SAVE_XMM | 73 | SAVE_XMM |
39 | subq $32, %rsp | 74 | subq $32, %rsp |
75 | SWITCH_PGT | ||
40 | call *%rdi | 76 | call *%rdi |
77 | RESTORE_PGT | ||
41 | addq $32, %rsp | 78 | addq $32, %rsp |
42 | RESTORE_XMM | 79 | RESTORE_XMM |
43 | ret | 80 | ret |
@@ -47,7 +84,9 @@ ENTRY(efi_call1) | |||
47 | SAVE_XMM | 84 | SAVE_XMM |
48 | subq $32, %rsp | 85 | subq $32, %rsp |
49 | mov %rsi, %rcx | 86 | mov %rsi, %rcx |
87 | SWITCH_PGT | ||
50 | call *%rdi | 88 | call *%rdi |
89 | RESTORE_PGT | ||
51 | addq $32, %rsp | 90 | addq $32, %rsp |
52 | RESTORE_XMM | 91 | RESTORE_XMM |
53 | ret | 92 | ret |
@@ -57,7 +96,9 @@ ENTRY(efi_call2) | |||
57 | SAVE_XMM | 96 | SAVE_XMM |
58 | subq $32, %rsp | 97 | subq $32, %rsp |
59 | mov %rsi, %rcx | 98 | mov %rsi, %rcx |
99 | SWITCH_PGT | ||
60 | call *%rdi | 100 | call *%rdi |
101 | RESTORE_PGT | ||
61 | addq $32, %rsp | 102 | addq $32, %rsp |
62 | RESTORE_XMM | 103 | RESTORE_XMM |
63 | ret | 104 | ret |
@@ -68,7 +109,9 @@ ENTRY(efi_call3) | |||
68 | subq $32, %rsp | 109 | subq $32, %rsp |
69 | mov %rcx, %r8 | 110 | mov %rcx, %r8 |
70 | mov %rsi, %rcx | 111 | mov %rsi, %rcx |
112 | SWITCH_PGT | ||
71 | call *%rdi | 113 | call *%rdi |
114 | RESTORE_PGT | ||
72 | addq $32, %rsp | 115 | addq $32, %rsp |
73 | RESTORE_XMM | 116 | RESTORE_XMM |
74 | ret | 117 | ret |
@@ -80,7 +123,9 @@ ENTRY(efi_call4) | |||
80 | mov %r8, %r9 | 123 | mov %r8, %r9 |
81 | mov %rcx, %r8 | 124 | mov %rcx, %r8 |
82 | mov %rsi, %rcx | 125 | mov %rsi, %rcx |
126 | SWITCH_PGT | ||
83 | call *%rdi | 127 | call *%rdi |
128 | RESTORE_PGT | ||
84 | addq $32, %rsp | 129 | addq $32, %rsp |
85 | RESTORE_XMM | 130 | RESTORE_XMM |
86 | ret | 131 | ret |
@@ -93,7 +138,9 @@ ENTRY(efi_call5) | |||
93 | mov %r8, %r9 | 138 | mov %r8, %r9 |
94 | mov %rcx, %r8 | 139 | mov %rcx, %r8 |
95 | mov %rsi, %rcx | 140 | mov %rsi, %rcx |
141 | SWITCH_PGT | ||
96 | call *%rdi | 142 | call *%rdi |
143 | RESTORE_PGT | ||
97 | addq $48, %rsp | 144 | addq $48, %rsp |
98 | RESTORE_XMM | 145 | RESTORE_XMM |
99 | ret | 146 | ret |
@@ -109,8 +156,15 @@ ENTRY(efi_call6) | |||
109 | mov %r8, %r9 | 156 | mov %r8, %r9 |
110 | mov %rcx, %r8 | 157 | mov %rcx, %r8 |
111 | mov %rsi, %rcx | 158 | mov %rsi, %rcx |
159 | SWITCH_PGT | ||
112 | call *%rdi | 160 | call *%rdi |
161 | RESTORE_PGT | ||
113 | addq $48, %rsp | 162 | addq $48, %rsp |
114 | RESTORE_XMM | 163 | RESTORE_XMM |
115 | ret | 164 | ret |
116 | ENDPROC(efi_call6) | 165 | ENDPROC(efi_call6) |
166 | |||
167 | .data | ||
168 | ENTRY(efi_scratch) | ||
169 | .fill 3,8,0 | ||
170 | .byte 0 | ||
diff --git a/arch/x86/platform/intel-mid/Makefile b/arch/x86/platform/intel-mid/Makefile index 01cc29ea5ff7..0a8ee703b9fa 100644 --- a/arch/x86/platform/intel-mid/Makefile +++ b/arch/x86/platform/intel-mid/Makefile | |||
@@ -1,6 +1,6 @@ | |||
1 | obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o | 1 | obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o intel_mid_vrtc.o mfld.o mrfl.o |
2 | obj-$(CONFIG_X86_INTEL_MID) += intel_mid_vrtc.o | ||
3 | obj-$(CONFIG_EARLY_PRINTK_INTEL_MID) += early_printk_intel_mid.o | 2 | obj-$(CONFIG_EARLY_PRINTK_INTEL_MID) += early_printk_intel_mid.o |
3 | |||
4 | # SFI specific code | 4 | # SFI specific code |
5 | ifdef CONFIG_X86_INTEL_MID | 5 | ifdef CONFIG_X86_INTEL_MID |
6 | obj-$(CONFIG_SFI) += sfi.o device_libs/ | 6 | obj-$(CONFIG_SFI) += sfi.o device_libs/ |
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c b/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c index 0d942c1d26d5..69a783689d21 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c | |||
@@ -22,7 +22,9 @@ static void __init *emc1403_platform_data(void *info) | |||
22 | int intr = get_gpio_by_name("thermal_int"); | 22 | int intr = get_gpio_by_name("thermal_int"); |
23 | int intr2nd = get_gpio_by_name("thermal_alert"); | 23 | int intr2nd = get_gpio_by_name("thermal_alert"); |
24 | 24 | ||
25 | if (intr == -1 || intr2nd == -1) | 25 | if (intr < 0) |
26 | return NULL; | ||
27 | if (intr2nd < 0) | ||
26 | return NULL; | 28 | return NULL; |
27 | 29 | ||
28 | i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; | 30 | i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; |
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c index a013a4834bbe..dccae6b0413f 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c | |||
@@ -66,7 +66,7 @@ static int __init pb_keys_init(void) | |||
66 | gb[i].gpio = get_gpio_by_name(gb[i].desc); | 66 | gb[i].gpio = get_gpio_by_name(gb[i].desc); |
67 | pr_debug("info[%2d]: name = %s, gpio = %d\n", i, gb[i].desc, | 67 | pr_debug("info[%2d]: name = %s, gpio = %d\n", i, gb[i].desc, |
68 | gb[i].gpio); | 68 | gb[i].gpio); |
69 | if (gb[i].gpio == -1) | 69 | if (gb[i].gpio < 0) |
70 | continue; | 70 | continue; |
71 | 71 | ||
72 | if (i != good) | 72 | if (i != good) |
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_lis331.c b/arch/x86/platform/intel-mid/device_libs/platform_lis331.c index 15278c11f714..54226de7541a 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_lis331.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_lis331.c | |||
@@ -21,7 +21,9 @@ static void __init *lis331dl_platform_data(void *info) | |||
21 | int intr = get_gpio_by_name("accel_int"); | 21 | int intr = get_gpio_by_name("accel_int"); |
22 | int intr2nd = get_gpio_by_name("accel_2"); | 22 | int intr2nd = get_gpio_by_name("accel_2"); |
23 | 23 | ||
24 | if (intr == -1 || intr2nd == -1) | 24 | if (intr < 0) |
25 | return NULL; | ||
26 | if (intr2nd < 0) | ||
25 | return NULL; | 27 | return NULL; |
26 | 28 | ||
27 | i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; | 29 | i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; |
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c index 94ade10024ae..2c8acbc1e9ad 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c | |||
@@ -48,7 +48,7 @@ static void __init *max7315_platform_data(void *info) | |||
48 | gpio_base = get_gpio_by_name(base_pin_name); | 48 | gpio_base = get_gpio_by_name(base_pin_name); |
49 | intr = get_gpio_by_name(intr_pin_name); | 49 | intr = get_gpio_by_name(intr_pin_name); |
50 | 50 | ||
51 | if (gpio_base == -1) | 51 | if (gpio_base < 0) |
52 | return NULL; | 52 | return NULL; |
53 | max7315->gpio_base = gpio_base; | 53 | max7315->gpio_base = gpio_base; |
54 | if (intr != -1) { | 54 | if (intr != -1) { |
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c b/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c index dd28d63c84fb..cfe9a47a1e87 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c | |||
@@ -19,7 +19,7 @@ static void *mpu3050_platform_data(void *info) | |||
19 | struct i2c_board_info *i2c_info = info; | 19 | struct i2c_board_info *i2c_info = info; |
20 | int intr = get_gpio_by_name("mpu3050_int"); | 20 | int intr = get_gpio_by_name("mpu3050_int"); |
21 | 21 | ||
22 | if (intr == -1) | 22 | if (intr < 0) |
23 | return NULL; | 23 | return NULL; |
24 | 24 | ||
25 | i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; | 25 | i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; |
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c index d87182a09263..65c2a9a19db4 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c | |||
@@ -26,7 +26,7 @@ static void __init *pmic_gpio_platform_data(void *info) | |||
26 | static struct intel_pmic_gpio_platform_data pmic_gpio_pdata; | 26 | static struct intel_pmic_gpio_platform_data pmic_gpio_pdata; |
27 | int gpio_base = get_gpio_by_name("pmic_gpio_base"); | 27 | int gpio_base = get_gpio_by_name("pmic_gpio_base"); |
28 | 28 | ||
29 | if (gpio_base == -1) | 29 | if (gpio_base < 0) |
30 | gpio_base = 64; | 30 | gpio_base = 64; |
31 | pmic_gpio_pdata.gpio_base = gpio_base; | 31 | pmic_gpio_pdata.gpio_base = gpio_base; |
32 | pmic_gpio_pdata.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET; | 32 | pmic_gpio_pdata.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET; |
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c index 22881c9a6737..33be0b3be6e1 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c | |||
@@ -34,10 +34,10 @@ static void *tca6416_platform_data(void *info) | |||
34 | gpio_base = get_gpio_by_name(base_pin_name); | 34 | gpio_base = get_gpio_by_name(base_pin_name); |
35 | intr = get_gpio_by_name(intr_pin_name); | 35 | intr = get_gpio_by_name(intr_pin_name); |
36 | 36 | ||
37 | if (gpio_base == -1) | 37 | if (gpio_base < 0) |
38 | return NULL; | 38 | return NULL; |
39 | tca6416.gpio_base = gpio_base; | 39 | tca6416.gpio_base = gpio_base; |
40 | if (intr != -1) { | 40 | if (intr >= 0) { |
41 | i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; | 41 | i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; |
42 | tca6416.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET; | 42 | tca6416.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET; |
43 | } else { | 43 | } else { |
diff --git a/arch/x86/platform/intel-mid/early_printk_intel_mid.c b/arch/x86/platform/intel-mid/early_printk_intel_mid.c index 4f702f554f6e..e0bd082a80e0 100644 --- a/arch/x86/platform/intel-mid/early_printk_intel_mid.c +++ b/arch/x86/platform/intel-mid/early_printk_intel_mid.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/console.h> | 22 | #include <linux/console.h> |
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
25 | #include <linux/init.h> | ||
26 | #include <linux/io.h> | 25 | #include <linux/io.h> |
27 | 26 | ||
28 | #include <asm/fixmap.h> | 27 | #include <asm/fixmap.h> |
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c index f90e290f689f..1bbedc4b0f88 100644 --- a/arch/x86/platform/intel-mid/intel-mid.c +++ b/arch/x86/platform/intel-mid/intel-mid.c | |||
@@ -35,6 +35,8 @@ | |||
35 | #include <asm/apb_timer.h> | 35 | #include <asm/apb_timer.h> |
36 | #include <asm/reboot.h> | 36 | #include <asm/reboot.h> |
37 | 37 | ||
38 | #include "intel_mid_weak_decls.h" | ||
39 | |||
38 | /* | 40 | /* |
39 | * the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock, | 41 | * the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock, |
40 | * cmdline option x86_intel_mid_timer can be used to override the configuration | 42 | * cmdline option x86_intel_mid_timer can be used to override the configuration |
@@ -58,12 +60,16 @@ | |||
58 | 60 | ||
59 | enum intel_mid_timer_options intel_mid_timer_options; | 61 | enum intel_mid_timer_options intel_mid_timer_options; |
60 | 62 | ||
63 | /* intel_mid_ops to store sub arch ops */ | ||
64 | struct intel_mid_ops *intel_mid_ops; | ||
65 | /* getter function for sub arch ops*/ | ||
66 | static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT; | ||
61 | enum intel_mid_cpu_type __intel_mid_cpu_chip; | 67 | enum intel_mid_cpu_type __intel_mid_cpu_chip; |
62 | EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip); | 68 | EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip); |
63 | 69 | ||
64 | static void intel_mid_power_off(void) | 70 | static void intel_mid_power_off(void) |
65 | { | 71 | { |
66 | } | 72 | }; |
67 | 73 | ||
68 | static void intel_mid_reboot(void) | 74 | static void intel_mid_reboot(void) |
69 | { | 75 | { |
@@ -72,32 +78,6 @@ static void intel_mid_reboot(void) | |||
72 | 78 | ||
73 | static unsigned long __init intel_mid_calibrate_tsc(void) | 79 | static unsigned long __init intel_mid_calibrate_tsc(void) |
74 | { | 80 | { |
75 | unsigned long fast_calibrate; | ||
76 | u32 lo, hi, ratio, fsb; | ||
77 | |||
78 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | ||
79 | pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi); | ||
80 | ratio = (hi >> 8) & 0x1f; | ||
81 | pr_debug("ratio is %d\n", ratio); | ||
82 | if (!ratio) { | ||
83 | pr_err("read a zero ratio, should be incorrect!\n"); | ||
84 | pr_err("force tsc ratio to 16 ...\n"); | ||
85 | ratio = 16; | ||
86 | } | ||
87 | rdmsr(MSR_FSB_FREQ, lo, hi); | ||
88 | if ((lo & 0x7) == 0x7) | ||
89 | fsb = PENWELL_FSB_FREQ_83SKU; | ||
90 | else | ||
91 | fsb = PENWELL_FSB_FREQ_100SKU; | ||
92 | fast_calibrate = ratio * fsb; | ||
93 | pr_debug("read penwell tsc %lu khz\n", fast_calibrate); | ||
94 | lapic_timer_frequency = fsb * 1000 / HZ; | ||
95 | /* mark tsc clocksource as reliable */ | ||
96 | set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE); | ||
97 | |||
98 | if (fast_calibrate) | ||
99 | return fast_calibrate; | ||
100 | |||
101 | return 0; | 81 | return 0; |
102 | } | 82 | } |
103 | 83 | ||
@@ -125,13 +105,37 @@ static void __init intel_mid_time_init(void) | |||
125 | 105 | ||
126 | static void intel_mid_arch_setup(void) | 106 | static void intel_mid_arch_setup(void) |
127 | { | 107 | { |
128 | if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27) | 108 | if (boot_cpu_data.x86 != 6) { |
129 | __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL; | ||
130 | else { | ||
131 | pr_err("Unknown Intel MID CPU (%d:%d), default to Penwell\n", | 109 | pr_err("Unknown Intel MID CPU (%d:%d), default to Penwell\n", |
132 | boot_cpu_data.x86, boot_cpu_data.x86_model); | 110 | boot_cpu_data.x86, boot_cpu_data.x86_model); |
133 | __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL; | 111 | __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL; |
112 | goto out; | ||
134 | } | 113 | } |
114 | |||
115 | switch (boot_cpu_data.x86_model) { | ||
116 | case 0x35: | ||
117 | __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_CLOVERVIEW; | ||
118 | break; | ||
119 | case 0x3C: | ||
120 | case 0x4A: | ||
121 | __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_TANGIER; | ||
122 | break; | ||
123 | case 0x27: | ||
124 | default: | ||
125 | __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL; | ||
126 | break; | ||
127 | } | ||
128 | |||
129 | if (__intel_mid_cpu_chip < MAX_CPU_OPS(get_intel_mid_ops)) | ||
130 | intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip](); | ||
131 | else { | ||
132 | intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL](); | ||
133 | pr_info("ARCH: Uknown SoC, assuming PENWELL!\n"); | ||
134 | } | ||
135 | |||
136 | out: | ||
137 | if (intel_mid_ops->arch_setup) | ||
138 | intel_mid_ops->arch_setup(); | ||
135 | } | 139 | } |
136 | 140 | ||
137 | /* MID systems don't have i8042 controller */ | 141 | /* MID systems don't have i8042 controller */ |
diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h new file mode 100644 index 000000000000..a537ffc16299 --- /dev/null +++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * intel_mid_weak_decls.h: Weak declarations of intel-mid.c | ||
3 | * | ||
4 | * (C) Copyright 2013 Intel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; version 2 | ||
9 | * of the License. | ||
10 | */ | ||
11 | |||
12 | |||
13 | /* __attribute__((weak)) makes these declarations overridable */ | ||
14 | /* For every CPU addition a new get_<cpuname>_ops interface needs | ||
15 | * to be added. | ||
16 | */ | ||
17 | extern void * __cpuinit get_penwell_ops(void) __attribute__((weak)); | ||
18 | extern void * __cpuinit get_cloverview_ops(void) __attribute__((weak)); | ||
19 | extern void * __init get_tangier_ops(void) __attribute__((weak)); | ||
diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c new file mode 100644 index 000000000000..4f7884eebc14 --- /dev/null +++ b/arch/x86/platform/intel-mid/mfld.c | |||
@@ -0,0 +1,75 @@ | |||
1 | /* | ||
2 | * mfld.c: Intel Medfield platform setup code | ||
3 | * | ||
4 | * (C) Copyright 2013 Intel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; version 2 | ||
9 | * of the License. | ||
10 | */ | ||
11 | |||
12 | #include <linux/init.h> | ||
13 | |||
14 | #include <asm/apic.h> | ||
15 | #include <asm/intel-mid.h> | ||
16 | #include <asm/intel_mid_vrtc.h> | ||
17 | |||
18 | #include "intel_mid_weak_decls.h" | ||
19 | |||
20 | static void penwell_arch_setup(void); | ||
21 | /* penwell arch ops */ | ||
22 | static struct intel_mid_ops penwell_ops = { | ||
23 | .arch_setup = penwell_arch_setup, | ||
24 | }; | ||
25 | |||
26 | static void mfld_power_off(void) | ||
27 | { | ||
28 | } | ||
29 | |||
30 | static unsigned long __init mfld_calibrate_tsc(void) | ||
31 | { | ||
32 | unsigned long fast_calibrate; | ||
33 | u32 lo, hi, ratio, fsb; | ||
34 | |||
35 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | ||
36 | pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi); | ||
37 | ratio = (hi >> 8) & 0x1f; | ||
38 | pr_debug("ratio is %d\n", ratio); | ||
39 | if (!ratio) { | ||
40 | pr_err("read a zero ratio, should be incorrect!\n"); | ||
41 | pr_err("force tsc ratio to 16 ...\n"); | ||
42 | ratio = 16; | ||
43 | } | ||
44 | rdmsr(MSR_FSB_FREQ, lo, hi); | ||
45 | if ((lo & 0x7) == 0x7) | ||
46 | fsb = FSB_FREQ_83SKU; | ||
47 | else | ||
48 | fsb = FSB_FREQ_100SKU; | ||
49 | fast_calibrate = ratio * fsb; | ||
50 | pr_debug("read penwell tsc %lu khz\n", fast_calibrate); | ||
51 | lapic_timer_frequency = fsb * 1000 / HZ; | ||
52 | /* mark tsc clocksource as reliable */ | ||
53 | set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE); | ||
54 | |||
55 | if (fast_calibrate) | ||
56 | return fast_calibrate; | ||
57 | |||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | static void __init penwell_arch_setup() | ||
62 | { | ||
63 | x86_platform.calibrate_tsc = mfld_calibrate_tsc; | ||
64 | pm_power_off = mfld_power_off; | ||
65 | } | ||
66 | |||
67 | void * __cpuinit get_penwell_ops() | ||
68 | { | ||
69 | return &penwell_ops; | ||
70 | } | ||
71 | |||
72 | void * __cpuinit get_cloverview_ops() | ||
73 | { | ||
74 | return &penwell_ops; | ||
75 | } | ||
diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c new file mode 100644 index 000000000000..09d10159e7b7 --- /dev/null +++ b/arch/x86/platform/intel-mid/mrfl.c | |||
@@ -0,0 +1,103 @@ | |||
1 | /* | ||
2 | * mrfl.c: Intel Merrifield platform specific setup code | ||
3 | * | ||
4 | * (C) Copyright 2013 Intel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; version 2 | ||
9 | * of the License. | ||
10 | */ | ||
11 | |||
12 | #include <linux/init.h> | ||
13 | |||
14 | #include <asm/apic.h> | ||
15 | #include <asm/intel-mid.h> | ||
16 | |||
17 | #include "intel_mid_weak_decls.h" | ||
18 | |||
19 | static unsigned long __init tangier_calibrate_tsc(void) | ||
20 | { | ||
21 | unsigned long fast_calibrate; | ||
22 | u32 lo, hi, ratio, fsb, bus_freq; | ||
23 | |||
24 | /* *********************** */ | ||
25 | /* Compute TSC:Ratio * FSB */ | ||
26 | /* *********************** */ | ||
27 | |||
28 | /* Compute Ratio */ | ||
29 | rdmsr(MSR_PLATFORM_INFO, lo, hi); | ||
30 | pr_debug("IA32 PLATFORM_INFO is 0x%x : %x\n", hi, lo); | ||
31 | |||
32 | ratio = (lo >> 8) & 0xFF; | ||
33 | pr_debug("ratio is %d\n", ratio); | ||
34 | if (!ratio) { | ||
35 | pr_err("Read a zero ratio, force tsc ratio to 4 ...\n"); | ||
36 | ratio = 4; | ||
37 | } | ||
38 | |||
39 | /* Compute FSB */ | ||
40 | rdmsr(MSR_FSB_FREQ, lo, hi); | ||
41 | pr_debug("Actual FSB frequency detected by SOC 0x%x : %x\n", | ||
42 | hi, lo); | ||
43 | |||
44 | bus_freq = lo & 0x7; | ||
45 | pr_debug("bus_freq = 0x%x\n", bus_freq); | ||
46 | |||
47 | if (bus_freq == 0) | ||
48 | fsb = FSB_FREQ_100SKU; | ||
49 | else if (bus_freq == 1) | ||
50 | fsb = FSB_FREQ_100SKU; | ||
51 | else if (bus_freq == 2) | ||
52 | fsb = FSB_FREQ_133SKU; | ||
53 | else if (bus_freq == 3) | ||
54 | fsb = FSB_FREQ_167SKU; | ||
55 | else if (bus_freq == 4) | ||
56 | fsb = FSB_FREQ_83SKU; | ||
57 | else if (bus_freq == 5) | ||
58 | fsb = FSB_FREQ_400SKU; | ||
59 | else if (bus_freq == 6) | ||
60 | fsb = FSB_FREQ_267SKU; | ||
61 | else if (bus_freq == 7) | ||
62 | fsb = FSB_FREQ_333SKU; | ||
63 | else { | ||
64 | BUG(); | ||
65 | pr_err("Invalid bus_freq! Setting to minimal value!\n"); | ||
66 | fsb = FSB_FREQ_100SKU; | ||
67 | } | ||
68 | |||
69 | /* TSC = FSB Freq * Resolved HFM Ratio */ | ||
70 | fast_calibrate = ratio * fsb; | ||
71 | pr_debug("calculate tangier tsc %lu KHz\n", fast_calibrate); | ||
72 | |||
73 | /* ************************************ */ | ||
74 | /* Calculate Local APIC Timer Frequency */ | ||
75 | /* ************************************ */ | ||
76 | lapic_timer_frequency = (fsb * 1000) / HZ; | ||
77 | |||
78 | pr_debug("Setting lapic_timer_frequency = %d\n", | ||
79 | lapic_timer_frequency); | ||
80 | |||
81 | /* mark tsc clocksource as reliable */ | ||
82 | set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE); | ||
83 | |||
84 | if (fast_calibrate) | ||
85 | return fast_calibrate; | ||
86 | |||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | static void __init tangier_arch_setup(void) | ||
91 | { | ||
92 | x86_platform.calibrate_tsc = tangier_calibrate_tsc; | ||
93 | } | ||
94 | |||
95 | /* tangier arch ops */ | ||
96 | static struct intel_mid_ops tangier_ops = { | ||
97 | .arch_setup = tangier_arch_setup, | ||
98 | }; | ||
99 | |||
100 | void * __cpuinit get_tangier_ops() | ||
101 | { | ||
102 | return &tangier_ops; | ||
103 | } | ||
diff --git a/arch/x86/platform/intel-mid/sfi.c b/arch/x86/platform/intel-mid/sfi.c index c84c1ca396bf..994c40bd7cb7 100644 --- a/arch/x86/platform/intel-mid/sfi.c +++ b/arch/x86/platform/intel-mid/sfi.c | |||
@@ -224,7 +224,7 @@ int get_gpio_by_name(const char *name) | |||
224 | if (!strncmp(name, pentry->pin_name, SFI_NAME_LEN)) | 224 | if (!strncmp(name, pentry->pin_name, SFI_NAME_LEN)) |
225 | return pentry->pin_no; | 225 | return pentry->pin_no; |
226 | } | 226 | } |
227 | return -1; | 227 | return -EINVAL; |
228 | } | 228 | } |
229 | 229 | ||
230 | void __init intel_scu_device_register(struct platform_device *pdev) | 230 | void __init intel_scu_device_register(struct platform_device *pdev) |
@@ -250,7 +250,7 @@ static void __init intel_scu_spi_device_register(struct spi_board_info *sdev) | |||
250 | sdev->modalias); | 250 | sdev->modalias); |
251 | return; | 251 | return; |
252 | } | 252 | } |
253 | memcpy(new_dev, sdev, sizeof(*sdev)); | 253 | *new_dev = *sdev; |
254 | 254 | ||
255 | spi_devs[spi_next_dev++] = new_dev; | 255 | spi_devs[spi_next_dev++] = new_dev; |
256 | } | 256 | } |
@@ -271,7 +271,7 @@ static void __init intel_scu_i2c_device_register(int bus, | |||
271 | idev->type); | 271 | idev->type); |
272 | return; | 272 | return; |
273 | } | 273 | } |
274 | memcpy(new_dev, idev, sizeof(*idev)); | 274 | *new_dev = *idev; |
275 | 275 | ||
276 | i2c_bus[i2c_next_dev] = bus; | 276 | i2c_bus[i2c_next_dev] = bus; |
277 | i2c_devs[i2c_next_dev++] = new_dev; | 277 | i2c_devs[i2c_next_dev++] = new_dev; |
@@ -337,6 +337,8 @@ static void __init sfi_handle_ipc_dev(struct sfi_device_table_entry *pentry, | |||
337 | pr_debug("IPC bus, name = %16.16s, irq = 0x%2x\n", | 337 | pr_debug("IPC bus, name = %16.16s, irq = 0x%2x\n", |
338 | pentry->name, pentry->irq); | 338 | pentry->name, pentry->irq); |
339 | pdata = intel_mid_sfi_get_pdata(dev, pentry); | 339 | pdata = intel_mid_sfi_get_pdata(dev, pentry); |
340 | if (IS_ERR(pdata)) | ||
341 | return; | ||
340 | 342 | ||
341 | pdev = platform_device_alloc(pentry->name, 0); | 343 | pdev = platform_device_alloc(pentry->name, 0); |
342 | if (pdev == NULL) { | 344 | if (pdev == NULL) { |
@@ -370,6 +372,8 @@ static void __init sfi_handle_spi_dev(struct sfi_device_table_entry *pentry, | |||
370 | spi_info.chip_select); | 372 | spi_info.chip_select); |
371 | 373 | ||
372 | pdata = intel_mid_sfi_get_pdata(dev, &spi_info); | 374 | pdata = intel_mid_sfi_get_pdata(dev, &spi_info); |
375 | if (IS_ERR(pdata)) | ||
376 | return; | ||
373 | 377 | ||
374 | spi_info.platform_data = pdata; | 378 | spi_info.platform_data = pdata; |
375 | if (dev->delay) | 379 | if (dev->delay) |
@@ -395,6 +399,8 @@ static void __init sfi_handle_i2c_dev(struct sfi_device_table_entry *pentry, | |||
395 | i2c_info.addr); | 399 | i2c_info.addr); |
396 | pdata = intel_mid_sfi_get_pdata(dev, &i2c_info); | 400 | pdata = intel_mid_sfi_get_pdata(dev, &i2c_info); |
397 | i2c_info.platform_data = pdata; | 401 | i2c_info.platform_data = pdata; |
402 | if (IS_ERR(pdata)) | ||
403 | return; | ||
398 | 404 | ||
399 | if (dev->delay) | 405 | if (dev->delay) |
400 | intel_scu_i2c_device_register(pentry->host_num, &i2c_info); | 406 | intel_scu_i2c_device_register(pentry->host_num, &i2c_info); |
@@ -443,13 +449,35 @@ static int __init sfi_parse_devs(struct sfi_table_header *table) | |||
443 | * so we have to enable them one by one here | 449 | * so we have to enable them one by one here |
444 | */ | 450 | */ |
445 | ioapic = mp_find_ioapic(irq); | 451 | ioapic = mp_find_ioapic(irq); |
446 | irq_attr.ioapic = ioapic; | 452 | if (ioapic >= 0) { |
447 | irq_attr.ioapic_pin = irq; | 453 | irq_attr.ioapic = ioapic; |
448 | irq_attr.trigger = 1; | 454 | irq_attr.ioapic_pin = irq; |
449 | irq_attr.polarity = 1; | 455 | irq_attr.trigger = 1; |
450 | io_apic_set_pci_routing(NULL, irq, &irq_attr); | 456 | if (intel_mid_identify_cpu() == |
451 | } else | 457 | INTEL_MID_CPU_CHIP_TANGIER) { |
458 | if (!strncmp(pentry->name, | ||
459 | "r69001-ts-i2c", 13)) | ||
460 | /* active low */ | ||
461 | irq_attr.polarity = 1; | ||
462 | else if (!strncmp(pentry->name, | ||
463 | "synaptics_3202", 14)) | ||
464 | /* active low */ | ||
465 | irq_attr.polarity = 1; | ||
466 | else if (irq == 41) | ||
467 | /* fast_int_1 */ | ||
468 | irq_attr.polarity = 1; | ||
469 | else | ||
470 | /* active high */ | ||
471 | irq_attr.polarity = 0; | ||
472 | } else { | ||
473 | /* PNW and CLV go with active low */ | ||
474 | irq_attr.polarity = 1; | ||
475 | } | ||
476 | io_apic_set_pci_routing(NULL, irq, &irq_attr); | ||
477 | } | ||
478 | } else { | ||
452 | irq = 0; /* No irq */ | 479 | irq = 0; /* No irq */ |
480 | } | ||
453 | 481 | ||
454 | dev = get_device_id(pentry->type, pentry->name); | 482 | dev = get_device_id(pentry->type, pentry->name); |
455 | 483 | ||
diff --git a/arch/x86/platform/iris/iris.c b/arch/x86/platform/iris/iris.c index e6cb80f620af..4d171e8640ef 100644 --- a/arch/x86/platform/iris/iris.c +++ b/arch/x86/platform/iris/iris.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
28 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
29 | #include <linux/delay.h> | 29 | #include <linux/delay.h> |
30 | #include <linux/init.h> | ||
31 | #include <linux/pm.h> | 30 | #include <linux/pm.h> |
32 | #include <asm/io.h> | 31 | #include <asm/io.h> |
33 | 32 | ||
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index efe4d7220397..dfe605ac1bcd 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -433,15 +433,49 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp) | |||
433 | return; | 433 | return; |
434 | } | 434 | } |
435 | 435 | ||
436 | static inline unsigned long cycles_2_us(unsigned long long cyc) | 436 | /* |
437 | * Not to be confused with cycles_2_ns() from tsc.c; this gives a relative | ||
438 | * number, not an absolute. It converts a duration in cycles to a duration in | ||
439 | * ns. | ||
440 | */ | ||
441 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) | ||
437 | { | 442 | { |
443 | struct cyc2ns_data *data = cyc2ns_read_begin(); | ||
438 | unsigned long long ns; | 444 | unsigned long long ns; |
439 | unsigned long us; | ||
440 | int cpu = smp_processor_id(); | ||
441 | 445 | ||
442 | ns = (cyc * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR; | 446 | ns = mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift); |
443 | us = ns / 1000; | 447 | |
444 | return us; | 448 | cyc2ns_read_end(data); |
449 | return ns; | ||
450 | } | ||
451 | |||
452 | /* | ||
453 | * The reverse of the above; converts a duration in ns to a duration in cycles. | ||
454 | */ | ||
455 | static inline unsigned long long ns_2_cycles(unsigned long long ns) | ||
456 | { | ||
457 | struct cyc2ns_data *data = cyc2ns_read_begin(); | ||
458 | unsigned long long cyc; | ||
459 | |||
460 | cyc = (ns << data->cyc2ns_shift) / data->cyc2ns_mul; | ||
461 | |||
462 | cyc2ns_read_end(data); | ||
463 | return cyc; | ||
464 | } | ||
465 | |||
466 | static inline unsigned long cycles_2_us(unsigned long long cyc) | ||
467 | { | ||
468 | return cycles_2_ns(cyc) / NSEC_PER_USEC; | ||
469 | } | ||
470 | |||
471 | static inline cycles_t sec_2_cycles(unsigned long sec) | ||
472 | { | ||
473 | return ns_2_cycles(sec * NSEC_PER_SEC); | ||
474 | } | ||
475 | |||
476 | static inline unsigned long long usec_2_cycles(unsigned long usec) | ||
477 | { | ||
478 | return ns_2_cycles(usec * NSEC_PER_USEC); | ||
445 | } | 479 | } |
446 | 480 | ||
447 | /* | 481 | /* |
@@ -668,16 +702,6 @@ static int wait_completion(struct bau_desc *bau_desc, | |||
668 | bcp, try); | 702 | bcp, try); |
669 | } | 703 | } |
670 | 704 | ||
671 | static inline cycles_t sec_2_cycles(unsigned long sec) | ||
672 | { | ||
673 | unsigned long ns; | ||
674 | cycles_t cyc; | ||
675 | |||
676 | ns = sec * 1000000000; | ||
677 | cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id())); | ||
678 | return cyc; | ||
679 | } | ||
680 | |||
681 | /* | 705 | /* |
682 | * Our retries are blocked by all destination sw ack resources being | 706 | * Our retries are blocked by all destination sw ack resources being |
683 | * in use, and a timeout is pending. In that case hardware immediately | 707 | * in use, and a timeout is pending. In that case hardware immediately |
@@ -1327,16 +1351,6 @@ static void ptc_seq_stop(struct seq_file *file, void *data) | |||
1327 | { | 1351 | { |
1328 | } | 1352 | } |
1329 | 1353 | ||
1330 | static inline unsigned long long usec_2_cycles(unsigned long microsec) | ||
1331 | { | ||
1332 | unsigned long ns; | ||
1333 | unsigned long long cyc; | ||
1334 | |||
1335 | ns = microsec * 1000; | ||
1336 | cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id())); | ||
1337 | return cyc; | ||
1338 | } | ||
1339 | |||
1340 | /* | 1354 | /* |
1341 | * Display the statistics thru /proc/sgi_uv/ptc_statistics | 1355 | * Display the statistics thru /proc/sgi_uv/ptc_statistics |
1342 | * 'data' points to the cpu number | 1356 | * 'data' points to the cpu number |
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index a44f457e70a1..bad628a620c4 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c | |||
@@ -29,12 +29,10 @@ void __init reserve_real_mode(void) | |||
29 | void __init setup_real_mode(void) | 29 | void __init setup_real_mode(void) |
30 | { | 30 | { |
31 | u16 real_mode_seg; | 31 | u16 real_mode_seg; |
32 | u32 *rel; | 32 | const u32 *rel; |
33 | u32 count; | 33 | u32 count; |
34 | u32 *ptr; | ||
35 | u16 *seg; | ||
36 | int i; | ||
37 | unsigned char *base; | 34 | unsigned char *base; |
35 | unsigned long phys_base; | ||
38 | struct trampoline_header *trampoline_header; | 36 | struct trampoline_header *trampoline_header; |
39 | size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); | 37 | size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); |
40 | #ifdef CONFIG_X86_64 | 38 | #ifdef CONFIG_X86_64 |
@@ -46,23 +44,23 @@ void __init setup_real_mode(void) | |||
46 | 44 | ||
47 | memcpy(base, real_mode_blob, size); | 45 | memcpy(base, real_mode_blob, size); |
48 | 46 | ||
49 | real_mode_seg = __pa(base) >> 4; | 47 | phys_base = __pa(base); |
48 | real_mode_seg = phys_base >> 4; | ||
49 | |||
50 | rel = (u32 *) real_mode_relocs; | 50 | rel = (u32 *) real_mode_relocs; |
51 | 51 | ||
52 | /* 16-bit segment relocations. */ | 52 | /* 16-bit segment relocations. */ |
53 | count = rel[0]; | 53 | count = *rel++; |
54 | rel = &rel[1]; | 54 | while (count--) { |
55 | for (i = 0; i < count; i++) { | 55 | u16 *seg = (u16 *) (base + *rel++); |
56 | seg = (u16 *) (base + rel[i]); | ||
57 | *seg = real_mode_seg; | 56 | *seg = real_mode_seg; |
58 | } | 57 | } |
59 | 58 | ||
60 | /* 32-bit linear relocations. */ | 59 | /* 32-bit linear relocations. */ |
61 | count = rel[i]; | 60 | count = *rel++; |
62 | rel = &rel[i + 1]; | 61 | while (count--) { |
63 | for (i = 0; i < count; i++) { | 62 | u32 *ptr = (u32 *) (base + *rel++); |
64 | ptr = (u32 *) (base + rel[i]); | 63 | *ptr += phys_base; |
65 | *ptr += __pa(base); | ||
66 | } | 64 | } |
67 | 65 | ||
68 | /* Must be perfomed *after* relocation. */ | 66 | /* Must be perfomed *after* relocation. */ |
diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S index f932ea61d1c8..d66c607bdc58 100644 --- a/arch/x86/realmode/rm/reboot.S +++ b/arch/x86/realmode/rm/reboot.S | |||
@@ -1,5 +1,4 @@ | |||
1 | #include <linux/linkage.h> | 1 | #include <linux/linkage.h> |
2 | #include <linux/init.h> | ||
3 | #include <asm/segment.h> | 2 | #include <asm/segment.h> |
4 | #include <asm/page_types.h> | 3 | #include <asm/page_types.h> |
5 | #include <asm/processor-flags.h> | 4 | #include <asm/processor-flags.h> |
diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S index c1b2791183e7..48ddd76bc4c3 100644 --- a/arch/x86/realmode/rm/trampoline_32.S +++ b/arch/x86/realmode/rm/trampoline_32.S | |||
@@ -20,7 +20,6 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/linkage.h> | 22 | #include <linux/linkage.h> |
23 | #include <linux/init.h> | ||
24 | #include <asm/segment.h> | 23 | #include <asm/segment.h> |
25 | #include <asm/page_types.h> | 24 | #include <asm/page_types.h> |
26 | #include "realmode.h" | 25 | #include "realmode.h" |
diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S index bb360dc39d21..dac7b20d2f9d 100644 --- a/arch/x86/realmode/rm/trampoline_64.S +++ b/arch/x86/realmode/rm/trampoline_64.S | |||
@@ -25,7 +25,6 @@ | |||
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/linkage.h> | 27 | #include <linux/linkage.h> |
28 | #include <linux/init.h> | ||
29 | #include <asm/pgtable_types.h> | 28 | #include <asm/pgtable_types.h> |
30 | #include <asm/page_types.h> | 29 | #include <asm/page_types.h> |
31 | #include <asm/msr.h> | 30 | #include <asm/msr.h> |
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl index aabfb8380a1c..96bc506ac6de 100644 --- a/arch/x86/syscalls/syscall_32.tbl +++ b/arch/x86/syscalls/syscall_32.tbl | |||
@@ -357,3 +357,5 @@ | |||
357 | 348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev | 357 | 348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev |
358 | 349 i386 kcmp sys_kcmp | 358 | 349 i386 kcmp sys_kcmp |
359 | 350 i386 finit_module sys_finit_module | 359 | 350 i386 finit_module sys_finit_module |
360 | 351 i386 sched_setattr sys_sched_setattr | ||
361 | 352 i386 sched_getattr sys_sched_getattr | ||
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl index 38ae65dfd14f..a12bddc7ccea 100644 --- a/arch/x86/syscalls/syscall_64.tbl +++ b/arch/x86/syscalls/syscall_64.tbl | |||
@@ -320,6 +320,8 @@ | |||
320 | 311 64 process_vm_writev sys_process_vm_writev | 320 | 311 64 process_vm_writev sys_process_vm_writev |
321 | 312 common kcmp sys_kcmp | 321 | 312 common kcmp sys_kcmp |
322 | 313 common finit_module sys_finit_module | 322 | 313 common finit_module sys_finit_module |
323 | 314 common sched_setattr sys_sched_setattr | ||
324 | 315 common sched_getattr sys_sched_getattr | ||
323 | 325 | ||
324 | # | 326 | # |
325 | # x32-specific system call numbers start at 512 to avoid cache impact | 327 | # x32-specific system call numbers start at 512 to avoid cache impact |
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index f7bab68a4b83..11f9285a2ff6 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c | |||
@@ -722,15 +722,25 @@ static void percpu_init(void) | |||
722 | 722 | ||
723 | /* | 723 | /* |
724 | * Check to see if a symbol lies in the .data..percpu section. | 724 | * Check to see if a symbol lies in the .data..percpu section. |
725 | * For some as yet not understood reason the "__init_begin" | 725 | * |
726 | * symbol which immediately preceeds the .data..percpu section | 726 | * The linker incorrectly associates some symbols with the |
727 | * also shows up as it it were part of it so we do an explict | 727 | * .data..percpu section so we also need to check the symbol |
728 | * check for that symbol name and ignore it. | 728 | * name to make sure that we classify the symbol correctly. |
729 | * | ||
730 | * The GNU linker incorrectly associates: | ||
731 | * __init_begin | ||
732 | * __per_cpu_load | ||
733 | * | ||
734 | * The "gold" linker incorrectly associates: | ||
735 | * init_per_cpu__irq_stack_union | ||
736 | * init_per_cpu__gdt_page | ||
729 | */ | 737 | */ |
730 | static int is_percpu_sym(ElfW(Sym) *sym, const char *symname) | 738 | static int is_percpu_sym(ElfW(Sym) *sym, const char *symname) |
731 | { | 739 | { |
732 | return (sym->st_shndx == per_cpu_shndx) && | 740 | return (sym->st_shndx == per_cpu_shndx) && |
733 | strcmp(symname, "__init_begin"); | 741 | strcmp(symname, "__init_begin") && |
742 | strcmp(symname, "__per_cpu_load") && | ||
743 | strncmp(symname, "init_per_cpu_", 13); | ||
734 | } | 744 | } |
735 | 745 | ||
736 | 746 | ||
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 2ada505067cc..eb5d7a56f8d4 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c | |||
@@ -178,7 +178,7 @@ notrace static int __always_inline do_realtime(struct timespec *ts) | |||
178 | 178 | ||
179 | ts->tv_nsec = 0; | 179 | ts->tv_nsec = 0; |
180 | do { | 180 | do { |
181 | seq = read_seqcount_begin_no_lockdep(>od->seq); | 181 | seq = raw_read_seqcount_begin(>od->seq); |
182 | mode = gtod->clock.vclock_mode; | 182 | mode = gtod->clock.vclock_mode; |
183 | ts->tv_sec = gtod->wall_time_sec; | 183 | ts->tv_sec = gtod->wall_time_sec; |
184 | ns = gtod->wall_time_snsec; | 184 | ns = gtod->wall_time_snsec; |
@@ -198,7 +198,7 @@ notrace static int do_monotonic(struct timespec *ts) | |||
198 | 198 | ||
199 | ts->tv_nsec = 0; | 199 | ts->tv_nsec = 0; |
200 | do { | 200 | do { |
201 | seq = read_seqcount_begin_no_lockdep(>od->seq); | 201 | seq = raw_read_seqcount_begin(>od->seq); |
202 | mode = gtod->clock.vclock_mode; | 202 | mode = gtod->clock.vclock_mode; |
203 | ts->tv_sec = gtod->monotonic_time_sec; | 203 | ts->tv_sec = gtod->monotonic_time_sec; |
204 | ns = gtod->monotonic_time_snsec; | 204 | ns = gtod->monotonic_time_snsec; |
@@ -214,7 +214,7 @@ notrace static int do_realtime_coarse(struct timespec *ts) | |||
214 | { | 214 | { |
215 | unsigned long seq; | 215 | unsigned long seq; |
216 | do { | 216 | do { |
217 | seq = read_seqcount_begin_no_lockdep(>od->seq); | 217 | seq = raw_read_seqcount_begin(>od->seq); |
218 | ts->tv_sec = gtod->wall_time_coarse.tv_sec; | 218 | ts->tv_sec = gtod->wall_time_coarse.tv_sec; |
219 | ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; | 219 | ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; |
220 | } while (unlikely(read_seqcount_retry(>od->seq, seq))); | 220 | } while (unlikely(read_seqcount_retry(>od->seq, seq))); |
@@ -225,7 +225,7 @@ notrace static int do_monotonic_coarse(struct timespec *ts) | |||
225 | { | 225 | { |
226 | unsigned long seq; | 226 | unsigned long seq; |
227 | do { | 227 | do { |
228 | seq = read_seqcount_begin_no_lockdep(>od->seq); | 228 | seq = raw_read_seqcount_begin(>od->seq); |
229 | ts->tv_sec = gtod->monotonic_time_coarse.tv_sec; | 229 | ts->tv_sec = gtod->monotonic_time_coarse.tv_sec; |
230 | ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec; | 230 | ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec; |
231 | } while (unlikely(read_seqcount_retry(>od->seq, seq))); | 231 | } while (unlikely(read_seqcount_retry(>od->seq, seq))); |
diff --git a/arch/x86/vdso/vdso.S b/arch/x86/vdso/vdso.S index 01f5e3b4613c..1e13eb8c9656 100644 --- a/arch/x86/vdso/vdso.S +++ b/arch/x86/vdso/vdso.S | |||
@@ -1,6 +1,5 @@ | |||
1 | #include <asm/page_types.h> | 1 | #include <asm/page_types.h> |
2 | #include <linux/linkage.h> | 2 | #include <linux/linkage.h> |
3 | #include <linux/init.h> | ||
4 | 3 | ||
5 | __PAGE_ALIGNED_DATA | 4 | __PAGE_ALIGNED_DATA |
6 | 5 | ||
diff --git a/arch/x86/vdso/vdsox32.S b/arch/x86/vdso/vdsox32.S index d6b9a7f42a8a..295f1c7543d8 100644 --- a/arch/x86/vdso/vdsox32.S +++ b/arch/x86/vdso/vdsox32.S | |||
@@ -1,6 +1,5 @@ | |||
1 | #include <asm/page_types.h> | 1 | #include <asm/page_types.h> |
2 | #include <linux/linkage.h> | 2 | #include <linux/linkage.h> |
3 | #include <linux/init.h> | ||
4 | 3 | ||
5 | __PAGE_ALIGNED_DATA | 4 | __PAGE_ALIGNED_DATA |
6 | 5 | ||
diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h index ef021677d536..e1ee6b51dfc5 100644 --- a/arch/xtensa/include/asm/barrier.h +++ b/arch/xtensa/include/asm/barrier.h | |||
@@ -9,21 +9,14 @@ | |||
9 | #ifndef _XTENSA_SYSTEM_H | 9 | #ifndef _XTENSA_SYSTEM_H |
10 | #define _XTENSA_SYSTEM_H | 10 | #define _XTENSA_SYSTEM_H |
11 | 11 | ||
12 | #define smp_read_barrier_depends() do { } while(0) | ||
13 | #define read_barrier_depends() do { } while(0) | ||
14 | |||
15 | #define mb() ({ __asm__ __volatile__("memw" : : : "memory"); }) | 12 | #define mb() ({ __asm__ __volatile__("memw" : : : "memory"); }) |
16 | #define rmb() barrier() | 13 | #define rmb() barrier() |
17 | #define wmb() mb() | 14 | #define wmb() mb() |
18 | 15 | ||
19 | #ifdef CONFIG_SMP | 16 | #ifdef CONFIG_SMP |
20 | #error smp_* not defined | 17 | #error smp_* not defined |
21 | #else | ||
22 | #define smp_mb() barrier() | ||
23 | #define smp_rmb() barrier() | ||
24 | #define smp_wmb() barrier() | ||
25 | #endif | 18 | #endif |
26 | 19 | ||
27 | #define set_mb(var, value) do { var = value; mb(); } while (0) | 20 | #include <asm-generic/barrier.h> |
28 | 21 | ||
29 | #endif /* _XTENSA_SYSTEM_H */ | 22 | #endif /* _XTENSA_SYSTEM_H */ |