diff options
Diffstat (limited to 'arch/s390')
75 files changed, 1715 insertions, 1071 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index c80235206c01..0d8cd9bbe101 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -54,6 +54,9 @@ config GENERIC_BUG | |||
54 | depends on BUG | 54 | depends on BUG |
55 | default y | 55 | default y |
56 | 56 | ||
57 | config GENERIC_BUG_RELATIVE_POINTERS | ||
58 | def_bool y | ||
59 | |||
57 | config NO_IOMEM | 60 | config NO_IOMEM |
58 | def_bool y | 61 | def_bool y |
59 | 62 | ||
@@ -87,6 +90,7 @@ config S390 | |||
87 | select HAVE_SYSCALL_TRACEPOINTS | 90 | select HAVE_SYSCALL_TRACEPOINTS |
88 | select HAVE_DYNAMIC_FTRACE | 91 | select HAVE_DYNAMIC_FTRACE |
89 | select HAVE_FUNCTION_GRAPH_TRACER | 92 | select HAVE_FUNCTION_GRAPH_TRACER |
93 | select HAVE_REGS_AND_STACK_ACCESS_API | ||
90 | select HAVE_DEFAULT_NO_SPIN_MUTEXES | 94 | select HAVE_DEFAULT_NO_SPIN_MUTEXES |
91 | select HAVE_OPROFILE | 95 | select HAVE_OPROFILE |
92 | select HAVE_KPROBES | 96 | select HAVE_KPROBES |
@@ -95,6 +99,9 @@ config S390 | |||
95 | select HAVE_ARCH_TRACEHOOK | 99 | select HAVE_ARCH_TRACEHOOK |
96 | select INIT_ALL_POSSIBLE | 100 | select INIT_ALL_POSSIBLE |
97 | select HAVE_PERF_EVENTS | 101 | select HAVE_PERF_EVENTS |
102 | select HAVE_KERNEL_GZIP | ||
103 | select HAVE_KERNEL_BZIP2 | ||
104 | select HAVE_KERNEL_LZMA | ||
98 | select ARCH_INLINE_SPIN_TRYLOCK | 105 | select ARCH_INLINE_SPIN_TRYLOCK |
99 | select ARCH_INLINE_SPIN_TRYLOCK_BH | 106 | select ARCH_INLINE_SPIN_TRYLOCK_BH |
100 | select ARCH_INLINE_SPIN_LOCK | 107 | select ARCH_INLINE_SPIN_LOCK |
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug index 2283933a9a93..45e0c6199f36 100644 --- a/arch/s390/Kconfig.debug +++ b/arch/s390/Kconfig.debug | |||
@@ -6,4 +6,17 @@ config TRACE_IRQFLAGS_SUPPORT | |||
6 | 6 | ||
7 | source "lib/Kconfig.debug" | 7 | source "lib/Kconfig.debug" |
8 | 8 | ||
9 | config DEBUG_STRICT_USER_COPY_CHECKS | ||
10 | bool "Strict user copy size checks" | ||
11 | ---help--- | ||
12 | Enabling this option turns a certain set of sanity checks for user | ||
13 | copy operations into compile time warnings. | ||
14 | |||
15 | The copy_from_user() etc checks are there to help test if there | ||
16 | are sufficient security checks on the length argument of | ||
17 | the copy operation, by having gcc prove that the argument is | ||
18 | within bounds. | ||
19 | |||
20 | If unsure, or if you run an older (pre 4.4) gcc, say N. | ||
21 | |||
9 | endmenu | 22 | endmenu |
diff --git a/arch/s390/Makefile b/arch/s390/Makefile index fc8fb20e7fc0..0da10746e0e5 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile | |||
@@ -14,6 +14,7 @@ | |||
14 | # | 14 | # |
15 | 15 | ||
16 | ifndef CONFIG_64BIT | 16 | ifndef CONFIG_64BIT |
17 | LD_BFD := elf32-s390 | ||
17 | LDFLAGS := -m elf_s390 | 18 | LDFLAGS := -m elf_s390 |
18 | KBUILD_CFLAGS += -m31 | 19 | KBUILD_CFLAGS += -m31 |
19 | KBUILD_AFLAGS += -m31 | 20 | KBUILD_AFLAGS += -m31 |
@@ -21,6 +22,7 @@ UTS_MACHINE := s390 | |||
21 | STACK_SIZE := 8192 | 22 | STACK_SIZE := 8192 |
22 | CHECKFLAGS += -D__s390__ -msize-long | 23 | CHECKFLAGS += -D__s390__ -msize-long |
23 | else | 24 | else |
25 | LD_BFD := elf64-s390 | ||
24 | LDFLAGS := -m elf64_s390 | 26 | LDFLAGS := -m elf64_s390 |
25 | MODFLAGS += -fpic -D__PIC__ | 27 | MODFLAGS += -fpic -D__PIC__ |
26 | KBUILD_CFLAGS += -m64 | 28 | KBUILD_CFLAGS += -m64 |
@@ -30,6 +32,8 @@ STACK_SIZE := 16384 | |||
30 | CHECKFLAGS += -D__s390__ -D__s390x__ | 32 | CHECKFLAGS += -D__s390__ -D__s390x__ |
31 | endif | 33 | endif |
32 | 34 | ||
35 | export LD_BFD | ||
36 | |||
33 | cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5) | 37 | cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5) |
34 | cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900) | 38 | cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900) |
35 | cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990) | 39 | cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990) |
@@ -85,7 +89,9 @@ KBUILD_AFLAGS += $(aflags-y) | |||
85 | OBJCOPYFLAGS := -O binary | 89 | OBJCOPYFLAGS := -O binary |
86 | LDFLAGS_vmlinux := -e start | 90 | LDFLAGS_vmlinux := -e start |
87 | 91 | ||
88 | head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o | 92 | head-y := arch/s390/kernel/head.o |
93 | head-y += arch/s390/kernel/$(if $(CONFIG_64BIT),head64.o,head31.o) | ||
94 | head-y += arch/s390/kernel/init_task.o | ||
89 | 95 | ||
90 | core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ | 96 | core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ |
91 | arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ | 97 | arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ |
@@ -99,12 +105,12 @@ drivers-$(CONFIG_OPROFILE) += arch/s390/oprofile/ | |||
99 | 105 | ||
100 | boot := arch/s390/boot | 106 | boot := arch/s390/boot |
101 | 107 | ||
102 | all: image | 108 | all: image bzImage |
103 | 109 | ||
104 | install: vmlinux | 110 | install: vmlinux |
105 | $(Q)$(MAKE) $(build)=$(boot) $@ | 111 | $(Q)$(MAKE) $(build)=$(boot) $@ |
106 | 112 | ||
107 | image: vmlinux | 113 | image bzImage: vmlinux |
108 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ | 114 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ |
109 | 115 | ||
110 | zfcpdump: | 116 | zfcpdump: |
@@ -116,4 +122,5 @@ archclean: | |||
116 | # Don't use tabs in echo arguments | 122 | # Don't use tabs in echo arguments |
117 | define archhelp | 123 | define archhelp |
118 | echo '* image - Kernel image for IPL ($(boot)/image)' | 124 | echo '* image - Kernel image for IPL ($(boot)/image)' |
125 | echo '* bzImage - Compressed kernel image for IPL ($(boot)/bzImage)' | ||
119 | endef | 126 | endef |
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile index 4d97eef36b8d..8800cf090694 100644 --- a/arch/s390/boot/Makefile +++ b/arch/s390/boot/Makefile | |||
@@ -9,10 +9,18 @@ COMPILE_VERSION := __linux_compile_version_id__`hostname | \ | |||
9 | EXTRA_CFLAGS := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I. | 9 | EXTRA_CFLAGS := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I. |
10 | 10 | ||
11 | targets := image | 11 | targets := image |
12 | targets += bzImage | ||
13 | subdir- := compressed | ||
12 | 14 | ||
13 | $(obj)/image: vmlinux FORCE | 15 | $(obj)/image: vmlinux FORCE |
14 | $(call if_changed,objcopy) | 16 | $(call if_changed,objcopy) |
15 | 17 | ||
18 | $(obj)/bzImage: $(obj)/compressed/vmlinux FORCE | ||
19 | $(call if_changed,objcopy) | ||
20 | |||
21 | $(obj)/compressed/vmlinux: FORCE | ||
22 | $(Q)$(MAKE) $(build)=$(obj)/compressed $@ | ||
23 | |||
16 | install: $(CONFIGURE) $(obj)/image | 24 | install: $(CONFIGURE) $(obj)/image |
17 | sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/image \ | 25 | sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/image \ |
18 | System.map Kerntypes "$(INSTALL_PATH)" | 26 | System.map Kerntypes "$(INSTALL_PATH)" |
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile new file mode 100644 index 000000000000..6e4a67ad07e1 --- /dev/null +++ b/arch/s390/boot/compressed/Makefile | |||
@@ -0,0 +1,60 @@ | |||
1 | # | ||
2 | # linux/arch/s390/boot/compressed/Makefile | ||
3 | # | ||
4 | # create a compressed vmlinux image from the original vmlinux | ||
5 | # | ||
6 | |||
7 | BITS := $(if $(CONFIG_64BIT),64,31) | ||
8 | |||
9 | targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \ | ||
10 | vmlinux.bin.lzma misc.o piggy.o sizes.h head$(BITS).o | ||
11 | |||
12 | KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 | ||
13 | KBUILD_CFLAGS += $(cflags-y) | ||
14 | KBUILD_CFLAGS += $(call cc-option,-mpacked-stack) | ||
15 | KBUILD_CFLAGS += $(call cc-option,-ffreestanding) | ||
16 | |||
17 | GCOV_PROFILE := n | ||
18 | |||
19 | OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o) | ||
20 | OBJECTS += $(obj)/head$(BITS).o $(obj)/misc.o $(obj)/piggy.o | ||
21 | |||
22 | LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T | ||
23 | $(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS) | ||
24 | $(call if_changed,ld) | ||
25 | @: | ||
26 | |||
27 | sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\)$$/\#define SZ\2 0x\1/p' | ||
28 | |||
29 | quiet_cmd_sizes = GEN $@ | ||
30 | cmd_sizes = $(NM) $< | sed -n $(sed-sizes) > $@ | ||
31 | |||
32 | $(obj)/sizes.h: vmlinux | ||
33 | $(call if_changed,sizes) | ||
34 | |||
35 | AFLAGS_head$(BITS).o += -I$(obj) | ||
36 | $(obj)/head$(BITS).o: $(obj)/sizes.h | ||
37 | |||
38 | CFLAGS_misc.o += -I$(obj) | ||
39 | $(obj)/misc.o: $(obj)/sizes.h | ||
40 | |||
41 | OBJCOPYFLAGS_vmlinux.bin := -R .comment -S | ||
42 | $(obj)/vmlinux.bin: vmlinux | ||
43 | $(call if_changed,objcopy) | ||
44 | |||
45 | vmlinux.bin.all-y := $(obj)/vmlinux.bin | ||
46 | |||
47 | suffix-$(CONFIG_KERNEL_GZIP) := gz | ||
48 | suffix-$(CONFIG_KERNEL_BZIP2) := bz2 | ||
49 | suffix-$(CONFIG_KERNEL_LZMA) := lzma | ||
50 | |||
51 | $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) | ||
52 | $(call if_changed,gzip) | ||
53 | $(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) | ||
54 | $(call if_changed,bzip2) | ||
55 | $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) | ||
56 | $(call if_changed,lzma) | ||
57 | |||
58 | LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T | ||
59 | $(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) | ||
60 | $(call if_changed,ld) | ||
diff --git a/arch/s390/boot/compressed/head31.S b/arch/s390/boot/compressed/head31.S new file mode 100644 index 000000000000..2a5523a32bcc --- /dev/null +++ b/arch/s390/boot/compressed/head31.S | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * Startup glue code to uncompress the kernel | ||
3 | * | ||
4 | * Copyright IBM Corp. 2010 | ||
5 | * | ||
6 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <asm/asm-offsets.h> | ||
11 | #include <asm/thread_info.h> | ||
12 | #include <asm/page.h> | ||
13 | #include "sizes.h" | ||
14 | |||
15 | __HEAD | ||
16 | .globl startup_continue | ||
17 | startup_continue: | ||
18 | basr %r13,0 # get base | ||
19 | .LPG1: | ||
20 | # setup stack | ||
21 | l %r15,.Lstack-.LPG1(%r13) | ||
22 | ahi %r15,-96 | ||
23 | l %r1,.Ldecompress-.LPG1(%r13) | ||
24 | basr %r14,%r1 | ||
25 | # setup registers for memory mover & branch to target | ||
26 | lr %r4,%r2 | ||
27 | l %r2,.Loffset-.LPG1(%r13) | ||
28 | la %r4,0(%r2,%r4) | ||
29 | l %r3,.Lmvsize-.LPG1(%r13) | ||
30 | lr %r5,%r3 | ||
31 | # move the memory mover someplace safe | ||
32 | la %r1,0x200 | ||
33 | mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13) | ||
34 | # decompress image is started at 0x11000 | ||
35 | lr %r6,%r2 | ||
36 | br %r1 | ||
37 | mover: | ||
38 | mvcle %r2,%r4,0 | ||
39 | jo mover | ||
40 | br %r6 | ||
41 | mover_end: | ||
42 | |||
43 | .align 8 | ||
44 | .Lstack: | ||
45 | .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER)) | ||
46 | .Ldecompress: | ||
47 | .long decompress_kernel | ||
48 | .Loffset: | ||
49 | .long 0x11000 | ||
50 | .Lmvsize: | ||
51 | .long SZ__bss_start | ||
diff --git a/arch/s390/boot/compressed/head64.S b/arch/s390/boot/compressed/head64.S new file mode 100644 index 000000000000..2982cb140550 --- /dev/null +++ b/arch/s390/boot/compressed/head64.S | |||
@@ -0,0 +1,48 @@ | |||
1 | /* | ||
2 | * Startup glue code to uncompress the kernel | ||
3 | * | ||
4 | * Copyright IBM Corp. 2010 | ||
5 | * | ||
6 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <asm/asm-offsets.h> | ||
11 | #include <asm/thread_info.h> | ||
12 | #include <asm/page.h> | ||
13 | #include "sizes.h" | ||
14 | |||
15 | __HEAD | ||
16 | .globl startup_continue | ||
17 | startup_continue: | ||
18 | basr %r13,0 # get base | ||
19 | .LPG1: | ||
20 | # setup stack | ||
21 | lg %r15,.Lstack-.LPG1(%r13) | ||
22 | aghi %r15,-160 | ||
23 | brasl %r14,decompress_kernel | ||
24 | # setup registers for memory mover & branch to target | ||
25 | lgr %r4,%r2 | ||
26 | lg %r2,.Loffset-.LPG1(%r13) | ||
27 | la %r4,0(%r2,%r4) | ||
28 | lg %r3,.Lmvsize-.LPG1(%r13) | ||
29 | lgr %r5,%r3 | ||
30 | # move the memory mover someplace safe | ||
31 | la %r1,0x200 | ||
32 | mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13) | ||
33 | # decompress image is started at 0x11000 | ||
34 | lgr %r6,%r2 | ||
35 | br %r1 | ||
36 | mover: | ||
37 | mvcle %r2,%r4,0 | ||
38 | jo mover | ||
39 | br %r6 | ||
40 | mover_end: | ||
41 | |||
42 | .align 8 | ||
43 | .Lstack: | ||
44 | .quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER)) | ||
45 | .Loffset: | ||
46 | .quad 0x11000 | ||
47 | .Lmvsize: | ||
48 | .quad SZ__bss_start | ||
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c new file mode 100644 index 000000000000..a97d69525829 --- /dev/null +++ b/arch/s390/boot/compressed/misc.c | |||
@@ -0,0 +1,158 @@ | |||
1 | /* | ||
2 | * Definitions and wrapper functions for kernel decompressor | ||
3 | * | ||
4 | * Copyright IBM Corp. 2010 | ||
5 | * | ||
6 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #include <asm/uaccess.h> | ||
10 | #include <asm/page.h> | ||
11 | #include <asm/ipl.h> | ||
12 | #include "sizes.h" | ||
13 | |||
14 | /* | ||
15 | * gzip declarations | ||
16 | */ | ||
17 | #define STATIC static | ||
18 | |||
19 | #undef memset | ||
20 | #undef memcpy | ||
21 | #undef memmove | ||
22 | #define memzero(s, n) memset((s), 0, (n)) | ||
23 | |||
24 | /* Symbols defined by linker scripts */ | ||
25 | extern char input_data[]; | ||
26 | extern int input_len; | ||
27 | extern int _text; | ||
28 | extern int _end; | ||
29 | |||
30 | static void error(char *m); | ||
31 | |||
32 | static unsigned long free_mem_ptr; | ||
33 | static unsigned long free_mem_end_ptr; | ||
34 | |||
35 | #ifdef CONFIG_HAVE_KERNEL_BZIP2 | ||
36 | #define HEAP_SIZE 0x400000 | ||
37 | #else | ||
38 | #define HEAP_SIZE 0x10000 | ||
39 | #endif | ||
40 | |||
41 | #ifdef CONFIG_KERNEL_GZIP | ||
42 | #include "../../../../lib/decompress_inflate.c" | ||
43 | #endif | ||
44 | |||
45 | #ifdef CONFIG_KERNEL_BZIP2 | ||
46 | #include "../../../../lib/decompress_bunzip2.c" | ||
47 | #endif | ||
48 | |||
49 | #ifdef CONFIG_KERNEL_LZMA | ||
50 | #include "../../../../lib/decompress_unlzma.c" | ||
51 | #endif | ||
52 | |||
53 | extern _sclp_print_early(const char *); | ||
54 | |||
55 | int puts(const char *s) | ||
56 | { | ||
57 | _sclp_print_early(s); | ||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | void *memset(void *s, int c, size_t n) | ||
62 | { | ||
63 | char *xs; | ||
64 | |||
65 | if (c == 0) | ||
66 | return __builtin_memset(s, 0, n); | ||
67 | |||
68 | xs = (char *) s; | ||
69 | if (n > 0) | ||
70 | do { | ||
71 | *xs++ = c; | ||
72 | } while (--n > 0); | ||
73 | return s; | ||
74 | } | ||
75 | |||
76 | void *memcpy(void *__dest, __const void *__src, size_t __n) | ||
77 | { | ||
78 | return __builtin_memcpy(__dest, __src, __n); | ||
79 | } | ||
80 | |||
81 | void *memmove(void *__dest, __const void *__src, size_t __n) | ||
82 | { | ||
83 | char *d; | ||
84 | const char *s; | ||
85 | |||
86 | if (__dest <= __src) | ||
87 | return __builtin_memcpy(__dest, __src, __n); | ||
88 | d = __dest + __n; | ||
89 | s = __src + __n; | ||
90 | while (__n--) | ||
91 | *--d = *--s; | ||
92 | return __dest; | ||
93 | } | ||
94 | |||
95 | static void error(char *x) | ||
96 | { | ||
97 | unsigned long long psw = 0x000a0000deadbeefULL; | ||
98 | |||
99 | puts("\n\n"); | ||
100 | puts(x); | ||
101 | puts("\n\n -- System halted"); | ||
102 | |||
103 | asm volatile("lpsw %0" : : "Q" (psw)); | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Safe guard the ipl parameter block against a memory area that will be | ||
108 | * overwritten. The validity check for the ipl parameter block is complex | ||
109 | * (see cio_get_iplinfo and ipl_save_parameters) but if the pointer to | ||
110 | * the ipl parameter block intersects with the passed memory area we can | ||
111 | * safely assume that we can read from that memory. In that case just copy | ||
112 | * the memory to IPL_PARMBLOCK_ORIGIN even if there is no ipl parameter | ||
113 | * block. | ||
114 | */ | ||
115 | static void check_ipl_parmblock(void *start, unsigned long size) | ||
116 | { | ||
117 | void *src, *dst; | ||
118 | |||
119 | src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr; | ||
120 | if (src + PAGE_SIZE <= start || src >= start + size) | ||
121 | return; | ||
122 | dst = (void *) IPL_PARMBLOCK_ORIGIN; | ||
123 | memmove(dst, src, PAGE_SIZE); | ||
124 | S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN; | ||
125 | } | ||
126 | |||
127 | unsigned long decompress_kernel(void) | ||
128 | { | ||
129 | unsigned long output_addr; | ||
130 | unsigned char *output; | ||
131 | |||
132 | free_mem_ptr = (unsigned long)&_end; | ||
133 | free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; | ||
134 | output = (unsigned char *) ((free_mem_end_ptr + 4095UL) & -4096UL); | ||
135 | |||
136 | check_ipl_parmblock((void *) 0, (unsigned long) output + SZ__bss_start); | ||
137 | |||
138 | #ifdef CONFIG_BLK_DEV_INITRD | ||
139 | /* | ||
140 | * Move the initrd right behind the end of the decompressed | ||
141 | * kernel image. | ||
142 | */ | ||
143 | if (INITRD_START && INITRD_SIZE && | ||
144 | INITRD_START < (unsigned long) output + SZ__bss_start) { | ||
145 | check_ipl_parmblock(output + SZ__bss_start, | ||
146 | INITRD_START + INITRD_SIZE); | ||
147 | memmove(output + SZ__bss_start, | ||
148 | (void *) INITRD_START, INITRD_SIZE); | ||
149 | INITRD_START = (unsigned long) output + SZ__bss_start; | ||
150 | } | ||
151 | #endif | ||
152 | |||
153 | puts("Uncompressing Linux... "); | ||
154 | decompress(input_data, input_len, NULL, NULL, output, NULL, error); | ||
155 | puts("Ok, booting the kernel.\n"); | ||
156 | return (unsigned long) output; | ||
157 | } | ||
158 | |||
diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S new file mode 100644 index 000000000000..d80f79d8dd9c --- /dev/null +++ b/arch/s390/boot/compressed/vmlinux.lds.S | |||
@@ -0,0 +1,55 @@ | |||
1 | #include <asm-generic/vmlinux.lds.h> | ||
2 | |||
3 | #ifdef CONFIG_64BIT | ||
4 | OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") | ||
5 | OUTPUT_ARCH(s390:64-bit) | ||
6 | #else | ||
7 | OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") | ||
8 | OUTPUT_ARCH(s390) | ||
9 | #endif | ||
10 | |||
11 | ENTRY(startup) | ||
12 | |||
13 | SECTIONS | ||
14 | { | ||
15 | /* Be careful parts of head_64.S assume startup_32 is at | ||
16 | * address 0. | ||
17 | */ | ||
18 | . = 0; | ||
19 | .head.text : { | ||
20 | _head = . ; | ||
21 | HEAD_TEXT | ||
22 | _ehead = . ; | ||
23 | } | ||
24 | .rodata.compressed : { | ||
25 | *(.rodata.compressed) | ||
26 | } | ||
27 | .text : { | ||
28 | _text = .; /* Text */ | ||
29 | *(.text) | ||
30 | *(.text.*) | ||
31 | _etext = . ; | ||
32 | } | ||
33 | .rodata : { | ||
34 | _rodata = . ; | ||
35 | *(.rodata) /* read-only data */ | ||
36 | *(.rodata.*) | ||
37 | _erodata = . ; | ||
38 | } | ||
39 | .data : { | ||
40 | _data = . ; | ||
41 | *(.data) | ||
42 | *(.data.*) | ||
43 | _edata = . ; | ||
44 | } | ||
45 | . = ALIGN(256); | ||
46 | .bss : { | ||
47 | _bss = . ; | ||
48 | *(.bss) | ||
49 | *(.bss.*) | ||
50 | *(COMMON) | ||
51 | . = ALIGN(8); /* For convenience during zeroing */ | ||
52 | _ebss = .; | ||
53 | } | ||
54 | _end = .; | ||
55 | } | ||
diff --git a/arch/s390/boot/compressed/vmlinux.scr b/arch/s390/boot/compressed/vmlinux.scr new file mode 100644 index 000000000000..f02382ae5c48 --- /dev/null +++ b/arch/s390/boot/compressed/vmlinux.scr | |||
@@ -0,0 +1,10 @@ | |||
1 | SECTIONS | ||
2 | { | ||
3 | .rodata.compressed : { | ||
4 | input_len = .; | ||
5 | LONG(input_data_end - input_data) input_data = .; | ||
6 | *(.data) | ||
7 | output_len = . - 4; | ||
8 | input_data_end = .; | ||
9 | } | ||
10 | } | ||
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 6be4503201ac..58f46734465f 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c | |||
@@ -78,14 +78,14 @@ static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key, | |||
78 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | 78 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
79 | int ret; | 79 | int ret; |
80 | 80 | ||
81 | sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | 81 | sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; |
82 | sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags & | 82 | sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags & |
83 | CRYPTO_TFM_REQ_MASK); | 83 | CRYPTO_TFM_REQ_MASK); |
84 | 84 | ||
85 | ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len); | 85 | ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len); |
86 | if (ret) { | 86 | if (ret) { |
87 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | 87 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; |
88 | tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags & | 88 | tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags & |
89 | CRYPTO_TFM_RES_MASK); | 89 | CRYPTO_TFM_RES_MASK); |
90 | } | 90 | } |
91 | return ret; | 91 | return ret; |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index b416aa11b91e..7ae71cc56973 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
@@ -36,6 +36,13 @@ CONFIG_LOCK_KERNEL=y | |||
36 | CONFIG_INIT_ENV_ARG_LIMIT=32 | 36 | CONFIG_INIT_ENV_ARG_LIMIT=32 |
37 | CONFIG_LOCALVERSION="" | 37 | CONFIG_LOCALVERSION="" |
38 | CONFIG_LOCALVERSION_AUTO=y | 38 | CONFIG_LOCALVERSION_AUTO=y |
39 | CONFIG_HAVE_KERNEL_GZIP=y | ||
40 | CONFIG_HAVE_KERNEL_BZIP2=y | ||
41 | CONFIG_HAVE_KERNEL_LZMA=y | ||
42 | CONFIG_KERNEL_GZIP=y | ||
43 | # CONFIG_KERNEL_BZIP2 is not set | ||
44 | # CONFIG_KERNEL_LZMA is not set | ||
45 | # CONFIG_KERNEL_LZO is not set | ||
39 | CONFIG_SWAP=y | 46 | CONFIG_SWAP=y |
40 | CONFIG_SYSVIPC=y | 47 | CONFIG_SYSVIPC=y |
41 | CONFIG_SYSVIPC_SYSCTL=y | 48 | CONFIG_SYSVIPC_SYSCTL=y |
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index 2b92d501425f..87cf523192e9 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c | |||
@@ -488,7 +488,7 @@ out: | |||
488 | 488 | ||
489 | static int diag224(void *ptr) | 489 | static int diag224(void *ptr) |
490 | { | 490 | { |
491 | int rc = -ENOTSUPP; | 491 | int rc = -EOPNOTSUPP; |
492 | 492 | ||
493 | asm volatile( | 493 | asm volatile( |
494 | " diag %1,%2,0x224\n" | 494 | " diag %1,%2,0x224\n" |
@@ -507,7 +507,7 @@ static int diag224_get_name_table(void) | |||
507 | return -ENOMEM; | 507 | return -ENOMEM; |
508 | if (diag224(diag224_cpu_names)) { | 508 | if (diag224(diag224_cpu_names)) { |
509 | kfree(diag224_cpu_names); | 509 | kfree(diag224_cpu_names); |
510 | return -ENOTSUPP; | 510 | return -EOPNOTSUPP; |
511 | } | 511 | } |
512 | EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16); | 512 | EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16); |
513 | return 0; | 513 | return 0; |
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 2a113d6a7dfd..451bfbb9db3d 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h | |||
@@ -18,8 +18,6 @@ | |||
18 | 18 | ||
19 | #define ATOMIC_INIT(i) { (i) } | 19 | #define ATOMIC_INIT(i) { (i) } |
20 | 20 | ||
21 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
22 | |||
23 | #define __CS_LOOP(ptr, op_val, op_string) ({ \ | 21 | #define __CS_LOOP(ptr, op_val, op_string) ({ \ |
24 | int old_val, new_val; \ | 22 | int old_val, new_val; \ |
25 | asm volatile( \ | 23 | asm volatile( \ |
@@ -35,26 +33,6 @@ | |||
35 | new_val; \ | 33 | new_val; \ |
36 | }) | 34 | }) |
37 | 35 | ||
38 | #else /* __GNUC__ */ | ||
39 | |||
40 | #define __CS_LOOP(ptr, op_val, op_string) ({ \ | ||
41 | int old_val, new_val; \ | ||
42 | asm volatile( \ | ||
43 | " l %0,0(%3)\n" \ | ||
44 | "0: lr %1,%0\n" \ | ||
45 | op_string " %1,%4\n" \ | ||
46 | " cs %0,%1,0(%3)\n" \ | ||
47 | " jl 0b" \ | ||
48 | : "=&d" (old_val), "=&d" (new_val), \ | ||
49 | "=m" (((atomic_t *)(ptr))->counter) \ | ||
50 | : "a" (ptr), "d" (op_val), \ | ||
51 | "m" (((atomic_t *)(ptr))->counter) \ | ||
52 | : "cc", "memory"); \ | ||
53 | new_val; \ | ||
54 | }) | ||
55 | |||
56 | #endif /* __GNUC__ */ | ||
57 | |||
58 | static inline int atomic_read(const atomic_t *v) | 36 | static inline int atomic_read(const atomic_t *v) |
59 | { | 37 | { |
60 | barrier(); | 38 | barrier(); |
@@ -101,19 +79,11 @@ static inline void atomic_set_mask(unsigned long mask, atomic_t *v) | |||
101 | 79 | ||
102 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | 80 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
103 | { | 81 | { |
104 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
105 | asm volatile( | 82 | asm volatile( |
106 | " cs %0,%2,%1" | 83 | " cs %0,%2,%1" |
107 | : "+d" (old), "=Q" (v->counter) | 84 | : "+d" (old), "=Q" (v->counter) |
108 | : "d" (new), "Q" (v->counter) | 85 | : "d" (new), "Q" (v->counter) |
109 | : "cc", "memory"); | 86 | : "cc", "memory"); |
110 | #else /* __GNUC__ */ | ||
111 | asm volatile( | ||
112 | " cs %0,%3,0(%2)" | ||
113 | : "+d" (old), "=m" (v->counter) | ||
114 | : "a" (v), "d" (new), "m" (v->counter) | ||
115 | : "cc", "memory"); | ||
116 | #endif /* __GNUC__ */ | ||
117 | return old; | 87 | return old; |
118 | } | 88 | } |
119 | 89 | ||
@@ -140,8 +110,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) | |||
140 | 110 | ||
141 | #ifdef CONFIG_64BIT | 111 | #ifdef CONFIG_64BIT |
142 | 112 | ||
143 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
144 | |||
145 | #define __CSG_LOOP(ptr, op_val, op_string) ({ \ | 113 | #define __CSG_LOOP(ptr, op_val, op_string) ({ \ |
146 | long long old_val, new_val; \ | 114 | long long old_val, new_val; \ |
147 | asm volatile( \ | 115 | asm volatile( \ |
@@ -157,26 +125,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) | |||
157 | new_val; \ | 125 | new_val; \ |
158 | }) | 126 | }) |
159 | 127 | ||
160 | #else /* __GNUC__ */ | ||
161 | |||
162 | #define __CSG_LOOP(ptr, op_val, op_string) ({ \ | ||
163 | long long old_val, new_val; \ | ||
164 | asm volatile( \ | ||
165 | " lg %0,0(%3)\n" \ | ||
166 | "0: lgr %1,%0\n" \ | ||
167 | op_string " %1,%4\n" \ | ||
168 | " csg %0,%1,0(%3)\n" \ | ||
169 | " jl 0b" \ | ||
170 | : "=&d" (old_val), "=&d" (new_val), \ | ||
171 | "=m" (((atomic_t *)(ptr))->counter) \ | ||
172 | : "a" (ptr), "d" (op_val), \ | ||
173 | "m" (((atomic_t *)(ptr))->counter) \ | ||
174 | : "cc", "memory"); \ | ||
175 | new_val; \ | ||
176 | }) | ||
177 | |||
178 | #endif /* __GNUC__ */ | ||
179 | |||
180 | static inline long long atomic64_read(const atomic64_t *v) | 128 | static inline long long atomic64_read(const atomic64_t *v) |
181 | { | 129 | { |
182 | barrier(); | 130 | barrier(); |
@@ -214,19 +162,11 @@ static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) | |||
214 | static inline long long atomic64_cmpxchg(atomic64_t *v, | 162 | static inline long long atomic64_cmpxchg(atomic64_t *v, |
215 | long long old, long long new) | 163 | long long old, long long new) |
216 | { | 164 | { |
217 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
218 | asm volatile( | 165 | asm volatile( |
219 | " csg %0,%2,%1" | 166 | " csg %0,%2,%1" |
220 | : "+d" (old), "=Q" (v->counter) | 167 | : "+d" (old), "=Q" (v->counter) |
221 | : "d" (new), "Q" (v->counter) | 168 | : "d" (new), "Q" (v->counter) |
222 | : "cc", "memory"); | 169 | : "cc", "memory"); |
223 | #else /* __GNUC__ */ | ||
224 | asm volatile( | ||
225 | " csg %0,%3,0(%2)" | ||
226 | : "+d" (old), "=m" (v->counter) | ||
227 | : "a" (v), "d" (new), "m" (v->counter) | ||
228 | : "cc", "memory"); | ||
229 | #endif /* __GNUC__ */ | ||
230 | return old; | 170 | return old; |
231 | } | 171 | } |
232 | 172 | ||
@@ -243,10 +183,8 @@ static inline long long atomic64_read(const atomic64_t *v) | |||
243 | register_pair rp; | 183 | register_pair rp; |
244 | 184 | ||
245 | asm volatile( | 185 | asm volatile( |
246 | " lm %0,%N0,0(%1)" | 186 | " lm %0,%N0,%1" |
247 | : "=&d" (rp) | 187 | : "=&d" (rp) : "Q" (v->counter) ); |
248 | : "a" (&v->counter), "m" (v->counter) | ||
249 | ); | ||
250 | return rp.pair; | 188 | return rp.pair; |
251 | } | 189 | } |
252 | 190 | ||
@@ -255,10 +193,8 @@ static inline void atomic64_set(atomic64_t *v, long long i) | |||
255 | register_pair rp = {.pair = i}; | 193 | register_pair rp = {.pair = i}; |
256 | 194 | ||
257 | asm volatile( | 195 | asm volatile( |
258 | " stm %1,%N1,0(%2)" | 196 | " stm %1,%N1,%0" |
259 | : "=m" (v->counter) | 197 | : "=Q" (v->counter) : "d" (rp) ); |
260 | : "d" (rp), "a" (&v->counter) | ||
261 | ); | ||
262 | } | 198 | } |
263 | 199 | ||
264 | static inline long long atomic64_xchg(atomic64_t *v, long long new) | 200 | static inline long long atomic64_xchg(atomic64_t *v, long long new) |
@@ -267,11 +203,11 @@ static inline long long atomic64_xchg(atomic64_t *v, long long new) | |||
267 | register_pair rp_old; | 203 | register_pair rp_old; |
268 | 204 | ||
269 | asm volatile( | 205 | asm volatile( |
270 | " lm %0,%N0,0(%2)\n" | 206 | " lm %0,%N0,%1\n" |
271 | "0: cds %0,%3,0(%2)\n" | 207 | "0: cds %0,%2,%1\n" |
272 | " jl 0b\n" | 208 | " jl 0b\n" |
273 | : "=&d" (rp_old), "+m" (v->counter) | 209 | : "=&d" (rp_old), "=Q" (v->counter) |
274 | : "a" (&v->counter), "d" (rp_new) | 210 | : "d" (rp_new), "Q" (v->counter) |
275 | : "cc"); | 211 | : "cc"); |
276 | return rp_old.pair; | 212 | return rp_old.pair; |
277 | } | 213 | } |
@@ -283,9 +219,9 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, | |||
283 | register_pair rp_new = {.pair = new}; | 219 | register_pair rp_new = {.pair = new}; |
284 | 220 | ||
285 | asm volatile( | 221 | asm volatile( |
286 | " cds %0,%3,0(%2)" | 222 | " cds %0,%2,%1" |
287 | : "+&d" (rp_old), "+m" (v->counter) | 223 | : "+&d" (rp_old), "=Q" (v->counter) |
288 | : "a" (&v->counter), "d" (rp_new) | 224 | : "d" (rp_new), "Q" (v->counter) |
289 | : "cc"); | 225 | : "cc"); |
290 | return rp_old.pair; | 226 | return rp_old.pair; |
291 | } | 227 | } |
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index b30606f6d523..2e05972c5085 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h | |||
@@ -71,8 +71,6 @@ extern const char _sb_findmap[]; | |||
71 | #define __BITOPS_AND "nr" | 71 | #define __BITOPS_AND "nr" |
72 | #define __BITOPS_XOR "xr" | 72 | #define __BITOPS_XOR "xr" |
73 | 73 | ||
74 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
75 | |||
76 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ | 74 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ |
77 | asm volatile( \ | 75 | asm volatile( \ |
78 | " l %0,%2\n" \ | 76 | " l %0,%2\n" \ |
@@ -85,22 +83,6 @@ extern const char _sb_findmap[]; | |||
85 | : "d" (__val), "Q" (*(unsigned long *) __addr) \ | 83 | : "d" (__val), "Q" (*(unsigned long *) __addr) \ |
86 | : "cc"); | 84 | : "cc"); |
87 | 85 | ||
88 | #else /* __GNUC__ */ | ||
89 | |||
90 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ | ||
91 | asm volatile( \ | ||
92 | " l %0,0(%4)\n" \ | ||
93 | "0: lr %1,%0\n" \ | ||
94 | __op_string " %1,%3\n" \ | ||
95 | " cs %0,%1,0(%4)\n" \ | ||
96 | " jl 0b" \ | ||
97 | : "=&d" (__old), "=&d" (__new), \ | ||
98 | "=m" (*(unsigned long *) __addr) \ | ||
99 | : "d" (__val), "a" (__addr), \ | ||
100 | "m" (*(unsigned long *) __addr) : "cc"); | ||
101 | |||
102 | #endif /* __GNUC__ */ | ||
103 | |||
104 | #else /* __s390x__ */ | 86 | #else /* __s390x__ */ |
105 | 87 | ||
106 | #define __BITOPS_ALIGN 7 | 88 | #define __BITOPS_ALIGN 7 |
@@ -109,8 +91,6 @@ extern const char _sb_findmap[]; | |||
109 | #define __BITOPS_AND "ngr" | 91 | #define __BITOPS_AND "ngr" |
110 | #define __BITOPS_XOR "xgr" | 92 | #define __BITOPS_XOR "xgr" |
111 | 93 | ||
112 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
113 | |||
114 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ | 94 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ |
115 | asm volatile( \ | 95 | asm volatile( \ |
116 | " lg %0,%2\n" \ | 96 | " lg %0,%2\n" \ |
@@ -123,23 +103,6 @@ extern const char _sb_findmap[]; | |||
123 | : "d" (__val), "Q" (*(unsigned long *) __addr) \ | 103 | : "d" (__val), "Q" (*(unsigned long *) __addr) \ |
124 | : "cc"); | 104 | : "cc"); |
125 | 105 | ||
126 | #else /* __GNUC__ */ | ||
127 | |||
128 | #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ | ||
129 | asm volatile( \ | ||
130 | " lg %0,0(%4)\n" \ | ||
131 | "0: lgr %1,%0\n" \ | ||
132 | __op_string " %1,%3\n" \ | ||
133 | " csg %0,%1,0(%4)\n" \ | ||
134 | " jl 0b" \ | ||
135 | : "=&d" (__old), "=&d" (__new), \ | ||
136 | "=m" (*(unsigned long *) __addr) \ | ||
137 | : "d" (__val), "a" (__addr), \ | ||
138 | "m" (*(unsigned long *) __addr) : "cc"); | ||
139 | |||
140 | |||
141 | #endif /* __GNUC__ */ | ||
142 | |||
143 | #endif /* __s390x__ */ | 106 | #endif /* __s390x__ */ |
144 | 107 | ||
145 | #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) | 108 | #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) |
@@ -261,9 +224,8 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr) | |||
261 | 224 | ||
262 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 225 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
263 | asm volatile( | 226 | asm volatile( |
264 | " oc 0(1,%1),0(%2)" | 227 | " oc %O0(1,%R0),%1" |
265 | : "=m" (*(char *) addr) : "a" (addr), | 228 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); |
266 | "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" ); | ||
267 | } | 229 | } |
268 | 230 | ||
269 | static inline void | 231 | static inline void |
@@ -290,9 +252,8 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr) | |||
290 | 252 | ||
291 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 253 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
292 | asm volatile( | 254 | asm volatile( |
293 | " nc 0(1,%1),0(%2)" | 255 | " nc %O0(1,%R0),%1" |
294 | : "=m" (*(char *) addr) : "a" (addr), | 256 | : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" ); |
295 | "a" (_ni_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc"); | ||
296 | } | 257 | } |
297 | 258 | ||
298 | static inline void | 259 | static inline void |
@@ -318,9 +279,8 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr) | |||
318 | 279 | ||
319 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 280 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
320 | asm volatile( | 281 | asm volatile( |
321 | " xc 0(1,%1),0(%2)" | 282 | " xc %O0(1,%R0),%1" |
322 | : "=m" (*(char *) addr) : "a" (addr), | 283 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); |
323 | "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" ); | ||
324 | } | 284 | } |
325 | 285 | ||
326 | static inline void | 286 | static inline void |
@@ -349,10 +309,9 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr) | |||
349 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 309 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
350 | ch = *(unsigned char *) addr; | 310 | ch = *(unsigned char *) addr; |
351 | asm volatile( | 311 | asm volatile( |
352 | " oc 0(1,%1),0(%2)" | 312 | " oc %O0(1,%R0),%1" |
353 | : "=m" (*(char *) addr) | 313 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) |
354 | : "a" (addr), "a" (_oi_bitmap + (nr & 7)), | 314 | : "cc", "memory"); |
355 | "m" (*(char *) addr) : "cc", "memory"); | ||
356 | return (ch >> (nr & 7)) & 1; | 315 | return (ch >> (nr & 7)) & 1; |
357 | } | 316 | } |
358 | #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) | 317 | #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) |
@@ -369,10 +328,9 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr) | |||
369 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 328 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
370 | ch = *(unsigned char *) addr; | 329 | ch = *(unsigned char *) addr; |
371 | asm volatile( | 330 | asm volatile( |
372 | " nc 0(1,%1),0(%2)" | 331 | " nc %O0(1,%R0),%1" |
373 | : "=m" (*(char *) addr) | 332 | : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) |
374 | : "a" (addr), "a" (_ni_bitmap + (nr & 7)), | 333 | : "cc", "memory"); |
375 | "m" (*(char *) addr) : "cc", "memory"); | ||
376 | return (ch >> (nr & 7)) & 1; | 334 | return (ch >> (nr & 7)) & 1; |
377 | } | 335 | } |
378 | #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) | 336 | #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) |
@@ -389,10 +347,9 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr) | |||
389 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); | 347 | addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); |
390 | ch = *(unsigned char *) addr; | 348 | ch = *(unsigned char *) addr; |
391 | asm volatile( | 349 | asm volatile( |
392 | " xc 0(1,%1),0(%2)" | 350 | " xc %O0(1,%R0),%1" |
393 | : "=m" (*(char *) addr) | 351 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) |
394 | : "a" (addr), "a" (_oi_bitmap + (nr & 7)), | 352 | : "cc", "memory"); |
395 | "m" (*(char *) addr) : "cc", "memory"); | ||
396 | return (ch >> (nr & 7)) & 1; | 353 | return (ch >> (nr & 7)) & 1; |
397 | } | 354 | } |
398 | #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) | 355 | #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) |
@@ -591,11 +548,11 @@ static inline unsigned long __load_ulong_le(const unsigned long *p, | |||
591 | p = (unsigned long *)((unsigned long) p + offset); | 548 | p = (unsigned long *)((unsigned long) p + offset); |
592 | #ifndef __s390x__ | 549 | #ifndef __s390x__ |
593 | asm volatile( | 550 | asm volatile( |
594 | " ic %0,0(%1)\n" | 551 | " ic %0,%O1(%R1)\n" |
595 | " icm %0,2,1(%1)\n" | 552 | " icm %0,2,%O1+1(%R1)\n" |
596 | " icm %0,4,2(%1)\n" | 553 | " icm %0,4,%O1+2(%R1)\n" |
597 | " icm %0,8,3(%1)" | 554 | " icm %0,8,%O1+3(%R1)" |
598 | : "=&d" (word) : "a" (p), "m" (*p) : "cc"); | 555 | : "=&d" (word) : "Q" (*p) : "cc"); |
599 | #else | 556 | #else |
600 | asm volatile( | 557 | asm volatile( |
601 | " lrvg %0,%1" | 558 | " lrvg %0,%1" |
diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h index b1066b9fb5f8..9beeb9db9b23 100644 --- a/arch/s390/include/asm/bug.h +++ b/arch/s390/include/asm/bug.h | |||
@@ -5,12 +5,6 @@ | |||
5 | 5 | ||
6 | #ifdef CONFIG_BUG | 6 | #ifdef CONFIG_BUG |
7 | 7 | ||
8 | #ifdef CONFIG_64BIT | ||
9 | #define S390_LONG ".quad" | ||
10 | #else | ||
11 | #define S390_LONG ".long" | ||
12 | #endif | ||
13 | |||
14 | #ifdef CONFIG_DEBUG_BUGVERBOSE | 8 | #ifdef CONFIG_DEBUG_BUGVERBOSE |
15 | 9 | ||
16 | #define __EMIT_BUG(x) do { \ | 10 | #define __EMIT_BUG(x) do { \ |
@@ -21,7 +15,7 @@ | |||
21 | "2: .asciz \""__FILE__"\"\n" \ | 15 | "2: .asciz \""__FILE__"\"\n" \ |
22 | ".previous\n" \ | 16 | ".previous\n" \ |
23 | ".section __bug_table,\"a\"\n" \ | 17 | ".section __bug_table,\"a\"\n" \ |
24 | "3:\t" S390_LONG "\t1b,2b\n" \ | 18 | "3: .long 1b-3b,2b-3b\n" \ |
25 | " .short %0,%1\n" \ | 19 | " .short %0,%1\n" \ |
26 | " .org 3b+%2\n" \ | 20 | " .org 3b+%2\n" \ |
27 | ".previous\n" \ | 21 | ".previous\n" \ |
@@ -37,7 +31,7 @@ | |||
37 | "0: j 0b+2\n" \ | 31 | "0: j 0b+2\n" \ |
38 | "1:\n" \ | 32 | "1:\n" \ |
39 | ".section __bug_table,\"a\"\n" \ | 33 | ".section __bug_table,\"a\"\n" \ |
40 | "2:\t" S390_LONG "\t1b\n" \ | 34 | "2: .long 1b-2b\n" \ |
41 | " .short %0\n" \ | 35 | " .short %0\n" \ |
42 | " .org 2b+%1\n" \ | 36 | " .org 2b+%1\n" \ |
43 | ".previous\n" \ | 37 | ".previous\n" \ |
diff --git a/arch/s390/include/asm/crw.h b/arch/s390/include/asm/crw.h index 2185a6d619d3..749a97e61bea 100644 --- a/arch/s390/include/asm/crw.h +++ b/arch/s390/include/asm/crw.h | |||
@@ -32,6 +32,7 @@ typedef void (*crw_handler_t)(struct crw *, struct crw *, int); | |||
32 | extern int crw_register_handler(int rsc, crw_handler_t handler); | 32 | extern int crw_register_handler(int rsc, crw_handler_t handler); |
33 | extern void crw_unregister_handler(int rsc); | 33 | extern void crw_unregister_handler(int rsc); |
34 | extern void crw_handle_channel_report(void); | 34 | extern void crw_handle_channel_report(void); |
35 | void crw_wait_for_channel_report(void); | ||
35 | 36 | ||
36 | #define NR_RSCS 16 | 37 | #define NR_RSCS 16 |
37 | 38 | ||
diff --git a/arch/s390/include/asm/etr.h b/arch/s390/include/asm/etr.h index 80ef58c61970..538e1b36a726 100644 --- a/arch/s390/include/asm/etr.h +++ b/arch/s390/include/asm/etr.h | |||
@@ -145,11 +145,11 @@ static inline int etr_setr(struct etr_eacr *ctrl) | |||
145 | int rc = -ENOSYS; | 145 | int rc = -ENOSYS; |
146 | 146 | ||
147 | asm volatile( | 147 | asm volatile( |
148 | " .insn s,0xb2160000,0(%2)\n" | 148 | " .insn s,0xb2160000,%1\n" |
149 | "0: la %0,0\n" | 149 | "0: la %0,0\n" |
150 | "1:\n" | 150 | "1:\n" |
151 | EX_TABLE(0b,1b) | 151 | EX_TABLE(0b,1b) |
152 | : "+d" (rc) : "m" (*ctrl), "a" (ctrl)); | 152 | : "+d" (rc) : "Q" (*ctrl)); |
153 | return rc; | 153 | return rc; |
154 | } | 154 | } |
155 | 155 | ||
@@ -159,11 +159,11 @@ static inline int etr_stetr(struct etr_aib *aib) | |||
159 | int rc = -ENOSYS; | 159 | int rc = -ENOSYS; |
160 | 160 | ||
161 | asm volatile( | 161 | asm volatile( |
162 | " .insn s,0xb2170000,0(%2)\n" | 162 | " .insn s,0xb2170000,%1\n" |
163 | "0: la %0,0\n" | 163 | "0: la %0,0\n" |
164 | "1:\n" | 164 | "1:\n" |
165 | EX_TABLE(0b,1b) | 165 | EX_TABLE(0b,1b) |
166 | : "+d" (rc) : "m" (*aib), "a" (aib)); | 166 | : "+d" (rc) : "Q" (*aib)); |
167 | return rc; | 167 | return rc; |
168 | } | 168 | } |
169 | 169 | ||
@@ -174,11 +174,11 @@ static inline int etr_steai(struct etr_aib *aib, unsigned int func) | |||
174 | int rc = -ENOSYS; | 174 | int rc = -ENOSYS; |
175 | 175 | ||
176 | asm volatile( | 176 | asm volatile( |
177 | " .insn s,0xb2b30000,0(%2)\n" | 177 | " .insn s,0xb2b30000,%1\n" |
178 | "0: la %0,0\n" | 178 | "0: la %0,0\n" |
179 | "1:\n" | 179 | "1:\n" |
180 | EX_TABLE(0b,1b) | 180 | EX_TABLE(0b,1b) |
181 | : "+d" (rc) : "m" (*aib), "a" (aib), "d" (reg0)); | 181 | : "+d" (rc) : "Q" (*aib), "d" (reg0)); |
182 | return rc; | 182 | return rc; |
183 | } | 183 | } |
184 | 184 | ||
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h index c2fb432f576a..15b3ac253898 100644 --- a/arch/s390/include/asm/irqflags.h +++ b/arch/s390/include/asm/irqflags.h | |||
@@ -8,8 +8,6 @@ | |||
8 | 8 | ||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | 10 | ||
11 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
12 | |||
13 | /* store then or system mask. */ | 11 | /* store then or system mask. */ |
14 | #define __raw_local_irq_stosm(__or) \ | 12 | #define __raw_local_irq_stosm(__or) \ |
15 | ({ \ | 13 | ({ \ |
@@ -36,40 +34,6 @@ | |||
36 | asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \ | 34 | asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \ |
37 | }) | 35 | }) |
38 | 36 | ||
39 | #else /* __GNUC__ */ | ||
40 | |||
41 | /* store then or system mask. */ | ||
42 | #define __raw_local_irq_stosm(__or) \ | ||
43 | ({ \ | ||
44 | unsigned long __mask; \ | ||
45 | asm volatile( \ | ||
46 | " stosm 0(%1),%2" \ | ||
47 | : "=m" (__mask) \ | ||
48 | : "a" (&__mask), "i" (__or) : "memory"); \ | ||
49 | __mask; \ | ||
50 | }) | ||
51 | |||
52 | /* store then and system mask. */ | ||
53 | #define __raw_local_irq_stnsm(__and) \ | ||
54 | ({ \ | ||
55 | unsigned long __mask; \ | ||
56 | asm volatile( \ | ||
57 | " stnsm 0(%1),%2" \ | ||
58 | : "=m" (__mask) \ | ||
59 | : "a" (&__mask), "i" (__and) : "memory"); \ | ||
60 | __mask; \ | ||
61 | }) | ||
62 | |||
63 | /* set system mask. */ | ||
64 | #define __raw_local_irq_ssm(__mask) \ | ||
65 | ({ \ | ||
66 | asm volatile( \ | ||
67 | " ssm 0(%0)" \ | ||
68 | : : "a" (&__mask), "m" (__mask) : "memory"); \ | ||
69 | }) | ||
70 | |||
71 | #endif /* __GNUC__ */ | ||
72 | |||
73 | /* interrupt control.. */ | 37 | /* interrupt control.. */ |
74 | static inline unsigned long raw_local_irq_enable(void) | 38 | static inline unsigned long raw_local_irq_enable(void) |
75 | { | 39 | { |
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index c25dfac7dd76..05527c040b7a 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h | |||
@@ -1,141 +1,16 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-s390/lowcore.h | 2 | * Copyright IBM Corp. 1999,2010 |
3 | * | 3 | * Author(s): Hartmut Penner <hp@de.ibm.com>, |
4 | * S390 version | 4 | * Martin Schwidefsky <schwidefsky@de.ibm.com>, |
5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | 5 | * Denis Joseph Barrow, |
6 | * Author(s): Hartmut Penner (hp@de.ibm.com), | ||
7 | * Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
8 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) | ||
9 | */ | 6 | */ |
10 | 7 | ||
11 | #ifndef _ASM_S390_LOWCORE_H | 8 | #ifndef _ASM_S390_LOWCORE_H |
12 | #define _ASM_S390_LOWCORE_H | 9 | #define _ASM_S390_LOWCORE_H |
13 | 10 | ||
14 | #define __LC_IPL_PARMBLOCK_PTR 0x0014 | ||
15 | #define __LC_EXT_PARAMS 0x0080 | ||
16 | #define __LC_CPU_ADDRESS 0x0084 | ||
17 | #define __LC_EXT_INT_CODE 0x0086 | ||
18 | |||
19 | #define __LC_SVC_ILC 0x0088 | ||
20 | #define __LC_SVC_INT_CODE 0x008a | ||
21 | #define __LC_PGM_ILC 0x008c | ||
22 | #define __LC_PGM_INT_CODE 0x008e | ||
23 | |||
24 | #define __LC_PER_ATMID 0x0096 | ||
25 | #define __LC_PER_ADDRESS 0x0098 | ||
26 | #define __LC_PER_ACCESS_ID 0x00a1 | ||
27 | #define __LC_AR_MODE_ID 0x00a3 | ||
28 | |||
29 | #define __LC_SUBCHANNEL_ID 0x00b8 | ||
30 | #define __LC_SUBCHANNEL_NR 0x00ba | ||
31 | #define __LC_IO_INT_PARM 0x00bc | ||
32 | #define __LC_IO_INT_WORD 0x00c0 | ||
33 | #define __LC_STFL_FAC_LIST 0x00c8 | ||
34 | #define __LC_MCCK_CODE 0x00e8 | ||
35 | |||
36 | #define __LC_DUMP_REIPL 0x0e00 | ||
37 | |||
38 | #ifndef __s390x__ | ||
39 | #define __LC_EXT_OLD_PSW 0x0018 | ||
40 | #define __LC_SVC_OLD_PSW 0x0020 | ||
41 | #define __LC_PGM_OLD_PSW 0x0028 | ||
42 | #define __LC_MCK_OLD_PSW 0x0030 | ||
43 | #define __LC_IO_OLD_PSW 0x0038 | ||
44 | #define __LC_EXT_NEW_PSW 0x0058 | ||
45 | #define __LC_SVC_NEW_PSW 0x0060 | ||
46 | #define __LC_PGM_NEW_PSW 0x0068 | ||
47 | #define __LC_MCK_NEW_PSW 0x0070 | ||
48 | #define __LC_IO_NEW_PSW 0x0078 | ||
49 | #define __LC_SAVE_AREA 0x0200 | ||
50 | #define __LC_RETURN_PSW 0x0240 | ||
51 | #define __LC_RETURN_MCCK_PSW 0x0248 | ||
52 | #define __LC_SYNC_ENTER_TIMER 0x0250 | ||
53 | #define __LC_ASYNC_ENTER_TIMER 0x0258 | ||
54 | #define __LC_EXIT_TIMER 0x0260 | ||
55 | #define __LC_USER_TIMER 0x0268 | ||
56 | #define __LC_SYSTEM_TIMER 0x0270 | ||
57 | #define __LC_STEAL_TIMER 0x0278 | ||
58 | #define __LC_LAST_UPDATE_TIMER 0x0280 | ||
59 | #define __LC_LAST_UPDATE_CLOCK 0x0288 | ||
60 | #define __LC_CURRENT 0x0290 | ||
61 | #define __LC_THREAD_INFO 0x0294 | ||
62 | #define __LC_KERNEL_STACK 0x0298 | ||
63 | #define __LC_ASYNC_STACK 0x029c | ||
64 | #define __LC_PANIC_STACK 0x02a0 | ||
65 | #define __LC_KERNEL_ASCE 0x02a4 | ||
66 | #define __LC_USER_ASCE 0x02a8 | ||
67 | #define __LC_USER_EXEC_ASCE 0x02ac | ||
68 | #define __LC_CPUID 0x02b0 | ||
69 | #define __LC_INT_CLOCK 0x02c8 | ||
70 | #define __LC_MACHINE_FLAGS 0x02d8 | ||
71 | #define __LC_FTRACE_FUNC 0x02dc | ||
72 | #define __LC_IRB 0x0300 | ||
73 | #define __LC_PFAULT_INTPARM 0x0080 | ||
74 | #define __LC_CPU_TIMER_SAVE_AREA 0x00d8 | ||
75 | #define __LC_CLOCK_COMP_SAVE_AREA 0x00e0 | ||
76 | #define __LC_PSW_SAVE_AREA 0x0100 | ||
77 | #define __LC_PREFIX_SAVE_AREA 0x0108 | ||
78 | #define __LC_AREGS_SAVE_AREA 0x0120 | ||
79 | #define __LC_FPREGS_SAVE_AREA 0x0160 | ||
80 | #define __LC_GPREGS_SAVE_AREA 0x0180 | ||
81 | #define __LC_CREGS_SAVE_AREA 0x01c0 | ||
82 | #else /* __s390x__ */ | ||
83 | #define __LC_LAST_BREAK 0x0110 | ||
84 | #define __LC_EXT_OLD_PSW 0x0130 | ||
85 | #define __LC_SVC_OLD_PSW 0x0140 | ||
86 | #define __LC_PGM_OLD_PSW 0x0150 | ||
87 | #define __LC_MCK_OLD_PSW 0x0160 | ||
88 | #define __LC_IO_OLD_PSW 0x0170 | ||
89 | #define __LC_RESTART_PSW 0x01a0 | ||
90 | #define __LC_EXT_NEW_PSW 0x01b0 | ||
91 | #define __LC_SVC_NEW_PSW 0x01c0 | ||
92 | #define __LC_PGM_NEW_PSW 0x01d0 | ||
93 | #define __LC_MCK_NEW_PSW 0x01e0 | ||
94 | #define __LC_IO_NEW_PSW 0x01f0 | ||
95 | #define __LC_SAVE_AREA 0x0200 | ||
96 | #define __LC_RETURN_PSW 0x0280 | ||
97 | #define __LC_RETURN_MCCK_PSW 0x0290 | ||
98 | #define __LC_SYNC_ENTER_TIMER 0x02a0 | ||
99 | #define __LC_ASYNC_ENTER_TIMER 0x02a8 | ||
100 | #define __LC_EXIT_TIMER 0x02b0 | ||
101 | #define __LC_USER_TIMER 0x02b8 | ||
102 | #define __LC_SYSTEM_TIMER 0x02c0 | ||
103 | #define __LC_STEAL_TIMER 0x02c8 | ||
104 | #define __LC_LAST_UPDATE_TIMER 0x02d0 | ||
105 | #define __LC_LAST_UPDATE_CLOCK 0x02d8 | ||
106 | #define __LC_CURRENT 0x02e0 | ||
107 | #define __LC_THREAD_INFO 0x02e8 | ||
108 | #define __LC_KERNEL_STACK 0x02f0 | ||
109 | #define __LC_ASYNC_STACK 0x02f8 | ||
110 | #define __LC_PANIC_STACK 0x0300 | ||
111 | #define __LC_KERNEL_ASCE 0x0308 | ||
112 | #define __LC_USER_ASCE 0x0310 | ||
113 | #define __LC_USER_EXEC_ASCE 0x0318 | ||
114 | #define __LC_CPUID 0x0320 | ||
115 | #define __LC_INT_CLOCK 0x0340 | ||
116 | #define __LC_VDSO_PER_CPU 0x0350 | ||
117 | #define __LC_MACHINE_FLAGS 0x0358 | ||
118 | #define __LC_FTRACE_FUNC 0x0360 | ||
119 | #define __LC_IRB 0x0380 | ||
120 | #define __LC_PASTE 0x03c0 | ||
121 | #define __LC_PFAULT_INTPARM 0x11b8 | ||
122 | #define __LC_FPREGS_SAVE_AREA 0x1200 | ||
123 | #define __LC_GPREGS_SAVE_AREA 0x1280 | ||
124 | #define __LC_PSW_SAVE_AREA 0x1300 | ||
125 | #define __LC_PREFIX_SAVE_AREA 0x1318 | ||
126 | #define __LC_FP_CREG_SAVE_AREA 0x131c | ||
127 | #define __LC_TODREG_SAVE_AREA 0x1324 | ||
128 | #define __LC_CPU_TIMER_SAVE_AREA 0x1328 | ||
129 | #define __LC_CLOCK_COMP_SAVE_AREA 0x1331 | ||
130 | #define __LC_AREGS_SAVE_AREA 0x1340 | ||
131 | #define __LC_CREGS_SAVE_AREA 0x1380 | ||
132 | #endif /* __s390x__ */ | ||
133 | |||
134 | #ifndef __ASSEMBLY__ | ||
135 | |||
136 | #include <asm/cpu.h> | ||
137 | #include <asm/ptrace.h> | ||
138 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <asm/ptrace.h> | ||
13 | #include <asm/cpu.h> | ||
139 | 14 | ||
140 | void restart_int_handler(void); | 15 | void restart_int_handler(void); |
141 | void ext_int_handler(void); | 16 | void ext_int_handler(void); |
@@ -144,7 +19,12 @@ void pgm_check_handler(void); | |||
144 | void mcck_int_handler(void); | 19 | void mcck_int_handler(void); |
145 | void io_int_handler(void); | 20 | void io_int_handler(void); |
146 | 21 | ||
147 | struct save_area_s390 { | 22 | #ifdef CONFIG_32BIT |
23 | |||
24 | #define LC_ORDER 0 | ||
25 | #define LC_PAGES 1 | ||
26 | |||
27 | struct save_area { | ||
148 | u32 ext_save; | 28 | u32 ext_save; |
149 | u64 timer; | 29 | u64 timer; |
150 | u64 clk_cmp; | 30 | u64 clk_cmp; |
@@ -156,54 +36,13 @@ struct save_area_s390 { | |||
156 | u64 fp_regs[4]; | 36 | u64 fp_regs[4]; |
157 | u32 gp_regs[16]; | 37 | u32 gp_regs[16]; |
158 | u32 ctrl_regs[16]; | 38 | u32 ctrl_regs[16]; |
159 | } __attribute__((packed)); | 39 | } __packed; |
160 | 40 | ||
161 | struct save_area_s390x { | 41 | struct _lowcore { |
162 | u64 fp_regs[16]; | ||
163 | u64 gp_regs[16]; | ||
164 | u8 psw[16]; | ||
165 | u8 pad1[8]; | ||
166 | u32 pref_reg; | ||
167 | u32 fp_ctrl_reg; | ||
168 | u8 pad2[4]; | ||
169 | u32 tod_reg; | ||
170 | u64 timer; | ||
171 | u64 clk_cmp; | ||
172 | u8 pad3[8]; | ||
173 | u32 acc_regs[16]; | ||
174 | u64 ctrl_regs[16]; | ||
175 | } __attribute__((packed)); | ||
176 | |||
177 | union save_area { | ||
178 | struct save_area_s390 s390; | ||
179 | struct save_area_s390x s390x; | ||
180 | }; | ||
181 | |||
182 | #define SAVE_AREA_BASE_S390 0xd4 | ||
183 | #define SAVE_AREA_BASE_S390X 0x1200 | ||
184 | |||
185 | #ifndef __s390x__ | ||
186 | #define SAVE_AREA_SIZE sizeof(struct save_area_s390) | ||
187 | #define SAVE_AREA_BASE SAVE_AREA_BASE_S390 | ||
188 | #else | ||
189 | #define SAVE_AREA_SIZE sizeof(struct save_area_s390x) | ||
190 | #define SAVE_AREA_BASE SAVE_AREA_BASE_S390X | ||
191 | #endif | ||
192 | |||
193 | #ifndef __s390x__ | ||
194 | #define LC_ORDER 0 | ||
195 | #else | ||
196 | #define LC_ORDER 1 | ||
197 | #endif | ||
198 | |||
199 | #define LC_PAGES (1UL << LC_ORDER) | ||
200 | |||
201 | struct _lowcore | ||
202 | { | ||
203 | #ifndef __s390x__ | ||
204 | /* 0x0000 - 0x01ff: defined by architecture */ | ||
205 | psw_t restart_psw; /* 0x0000 */ | 42 | psw_t restart_psw; /* 0x0000 */ |
206 | __u32 ccw2[4]; /* 0x0008 */ | 43 | psw_t restart_old_psw; /* 0x0008 */ |
44 | __u8 pad_0x0010[0x0014-0x0010]; /* 0x0010 */ | ||
45 | __u32 ipl_parmblock_ptr; /* 0x0014 */ | ||
207 | psw_t external_old_psw; /* 0x0018 */ | 46 | psw_t external_old_psw; /* 0x0018 */ |
208 | psw_t svc_old_psw; /* 0x0020 */ | 47 | psw_t svc_old_psw; /* 0x0020 */ |
209 | psw_t program_old_psw; /* 0x0028 */ | 48 | psw_t program_old_psw; /* 0x0028 */ |
@@ -229,7 +68,9 @@ struct _lowcore | |||
229 | __u32 monitor_code; /* 0x009c */ | 68 | __u32 monitor_code; /* 0x009c */ |
230 | __u8 exc_access_id; /* 0x00a0 */ | 69 | __u8 exc_access_id; /* 0x00a0 */ |
231 | __u8 per_access_id; /* 0x00a1 */ | 70 | __u8 per_access_id; /* 0x00a1 */ |
232 | __u8 pad_0x00a2[0x00b8-0x00a2]; /* 0x00a2 */ | 71 | __u8 op_access_id; /* 0x00a2 */ |
72 | __u8 ar_access_id; /* 0x00a3 */ | ||
73 | __u8 pad_0x00a4[0x00b8-0x00a4]; /* 0x00a4 */ | ||
233 | __u16 subchannel_id; /* 0x00b8 */ | 74 | __u16 subchannel_id; /* 0x00b8 */ |
234 | __u16 subchannel_nr; /* 0x00ba */ | 75 | __u16 subchannel_nr; /* 0x00ba */ |
235 | __u32 io_int_parm; /* 0x00bc */ | 76 | __u32 io_int_parm; /* 0x00bc */ |
@@ -245,8 +86,9 @@ struct _lowcore | |||
245 | __u32 external_damage_code; /* 0x00f4 */ | 86 | __u32 external_damage_code; /* 0x00f4 */ |
246 | __u32 failing_storage_address; /* 0x00f8 */ | 87 | __u32 failing_storage_address; /* 0x00f8 */ |
247 | __u8 pad_0x00fc[0x0100-0x00fc]; /* 0x00fc */ | 88 | __u8 pad_0x00fc[0x0100-0x00fc]; /* 0x00fc */ |
248 | __u32 st_status_fixed_logout[4]; /* 0x0100 */ | 89 | psw_t psw_save_area; /* 0x0100 */ |
249 | __u8 pad_0x0110[0x0120-0x0110]; /* 0x0110 */ | 90 | __u32 prefixreg_save_area; /* 0x0108 */ |
91 | __u8 pad_0x010c[0x0120-0x010c]; /* 0x010c */ | ||
250 | 92 | ||
251 | /* CPU register save area: defined by architecture */ | 93 | /* CPU register save area: defined by architecture */ |
252 | __u32 access_regs_save_area[16]; /* 0x0120 */ | 94 | __u32 access_regs_save_area[16]; /* 0x0120 */ |
@@ -310,10 +152,32 @@ struct _lowcore | |||
310 | 152 | ||
311 | /* Align to the top 1k of prefix area */ | 153 | /* Align to the top 1k of prefix area */ |
312 | __u8 pad_0x0e08[0x1000-0x0e08]; /* 0x0e08 */ | 154 | __u8 pad_0x0e08[0x1000-0x0e08]; /* 0x0e08 */ |
313 | #else /* !__s390x__ */ | 155 | } __packed; |
314 | /* 0x0000 - 0x01ff: defined by architecture */ | 156 | |
315 | __u32 ccw1[2]; /* 0x0000 */ | 157 | #else /* CONFIG_32BIT */ |
316 | __u32 ccw2[4]; /* 0x0008 */ | 158 | |
159 | #define LC_ORDER 1 | ||
160 | #define LC_PAGES 2 | ||
161 | |||
162 | struct save_area { | ||
163 | u64 fp_regs[16]; | ||
164 | u64 gp_regs[16]; | ||
165 | u8 psw[16]; | ||
166 | u8 pad1[8]; | ||
167 | u32 pref_reg; | ||
168 | u32 fp_ctrl_reg; | ||
169 | u8 pad2[4]; | ||
170 | u32 tod_reg; | ||
171 | u64 timer; | ||
172 | u64 clk_cmp; | ||
173 | u8 pad3[8]; | ||
174 | u32 acc_regs[16]; | ||
175 | u64 ctrl_regs[16]; | ||
176 | } __packed; | ||
177 | |||
178 | struct _lowcore { | ||
179 | __u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */ | ||
180 | __u32 ipl_parmblock_ptr; /* 0x0014 */ | ||
317 | __u8 pad_0x0018[0x0080-0x0018]; /* 0x0018 */ | 181 | __u8 pad_0x0018[0x0080-0x0018]; /* 0x0018 */ |
318 | __u32 ext_params; /* 0x0080 */ | 182 | __u32 ext_params; /* 0x0080 */ |
319 | __u16 cpu_addr; /* 0x0084 */ | 183 | __u16 cpu_addr; /* 0x0084 */ |
@@ -344,7 +208,9 @@ struct _lowcore | |||
344 | __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */ | 208 | __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */ |
345 | __u32 external_damage_code; /* 0x00f4 */ | 209 | __u32 external_damage_code; /* 0x00f4 */ |
346 | addr_t failing_storage_address; /* 0x00f8 */ | 210 | addr_t failing_storage_address; /* 0x00f8 */ |
347 | __u8 pad_0x0100[0x0120-0x0100]; /* 0x0100 */ | 211 | __u8 pad_0x0100[0x0110-0x0100]; /* 0x0100 */ |
212 | __u64 breaking_event_addr; /* 0x0110 */ | ||
213 | __u8 pad_0x0118[0x0120-0x0118]; /* 0x0118 */ | ||
348 | psw_t restart_old_psw; /* 0x0120 */ | 214 | psw_t restart_old_psw; /* 0x0120 */ |
349 | psw_t external_old_psw; /* 0x0130 */ | 215 | psw_t external_old_psw; /* 0x0130 */ |
350 | psw_t svc_old_psw; /* 0x0140 */ | 216 | psw_t svc_old_psw; /* 0x0140 */ |
@@ -425,7 +291,7 @@ struct _lowcore | |||
425 | /* CPU register save area: defined by architecture */ | 291 | /* CPU register save area: defined by architecture */ |
426 | __u64 floating_pt_save_area[16]; /* 0x1200 */ | 292 | __u64 floating_pt_save_area[16]; /* 0x1200 */ |
427 | __u64 gpregs_save_area[16]; /* 0x1280 */ | 293 | __u64 gpregs_save_area[16]; /* 0x1280 */ |
428 | __u32 st_status_fixed_logout[4]; /* 0x1300 */ | 294 | psw_t psw_save_area; /* 0x1300 */ |
429 | __u8 pad_0x1310[0x1318-0x1310]; /* 0x1310 */ | 295 | __u8 pad_0x1310[0x1318-0x1310]; /* 0x1310 */ |
430 | __u32 prefixreg_save_area; /* 0x1318 */ | 296 | __u32 prefixreg_save_area; /* 0x1318 */ |
431 | __u32 fpt_creg_save_area; /* 0x131c */ | 297 | __u32 fpt_creg_save_area; /* 0x131c */ |
@@ -439,10 +305,12 @@ struct _lowcore | |||
439 | 305 | ||
440 | /* align to the top of the prefix area */ | 306 | /* align to the top of the prefix area */ |
441 | __u8 pad_0x1400[0x2000-0x1400]; /* 0x1400 */ | 307 | __u8 pad_0x1400[0x2000-0x1400]; /* 0x1400 */ |
442 | #endif /* !__s390x__ */ | 308 | } __packed; |
443 | } __attribute__((packed)); /* End structure*/ | 309 | |
310 | #endif /* CONFIG_32BIT */ | ||
444 | 311 | ||
445 | #define S390_lowcore (*((struct _lowcore *) 0)) | 312 | #define S390_lowcore (*((struct _lowcore *) 0)) |
313 | |||
446 | extern struct _lowcore *lowcore_ptr[]; | 314 | extern struct _lowcore *lowcore_ptr[]; |
447 | 315 | ||
448 | static inline void set_prefix(__u32 address) | 316 | static inline void set_prefix(__u32 address) |
@@ -458,6 +326,4 @@ static inline __u32 store_prefix(void) | |||
458 | return address; | 326 | return address; |
459 | } | 327 | } |
460 | 328 | ||
461 | #endif | 329 | #endif /* _ASM_S390_LOWCORE_H */ |
462 | |||
463 | #endif | ||
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 5e9daf5d7f22..af650fb47206 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h | |||
@@ -107,9 +107,6 @@ typedef pte_t *pgtable_t; | |||
107 | #define __pgd(x) ((pgd_t) { (x) } ) | 107 | #define __pgd(x) ((pgd_t) { (x) } ) |
108 | #define __pgprot(x) ((pgprot_t) { (x) } ) | 108 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
109 | 109 | ||
110 | /* default storage key used for all pages */ | ||
111 | extern unsigned int default_storage_key; | ||
112 | |||
113 | static inline void | 110 | static inline void |
114 | page_set_storage_key(unsigned long addr, unsigned int skey) | 111 | page_set_storage_key(unsigned long addr, unsigned int skey) |
115 | { | 112 | { |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index e2fa79cf0614..9b5b9189c15e 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -43,7 +43,7 @@ extern void vmem_map_init(void); | |||
43 | * The S390 doesn't have any external MMU info: the kernel page | 43 | * The S390 doesn't have any external MMU info: the kernel page |
44 | * tables contain all the necessary information. | 44 | * tables contain all the necessary information. |
45 | */ | 45 | */ |
46 | #define update_mmu_cache(vma, address, pte) do { } while (0) | 46 | #define update_mmu_cache(vma, address, ptep) do { } while (0) |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * ZERO_PAGE is a global shared page that is always zero: used | 49 | * ZERO_PAGE is a global shared page that is always zero: used |
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index b42715458312..73e259834e10 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
@@ -28,7 +28,7 @@ | |||
28 | 28 | ||
29 | static inline void get_cpu_id(struct cpuid *ptr) | 29 | static inline void get_cpu_id(struct cpuid *ptr) |
30 | { | 30 | { |
31 | asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr)); | 31 | asm volatile("stidp %0" : "=Q" (*ptr)); |
32 | } | 32 | } |
33 | 33 | ||
34 | extern void s390_adjust_jiffies(void); | 34 | extern void s390_adjust_jiffies(void); |
@@ -184,9 +184,9 @@ static inline void psw_set_key(unsigned int key) | |||
184 | static inline void __load_psw(psw_t psw) | 184 | static inline void __load_psw(psw_t psw) |
185 | { | 185 | { |
186 | #ifndef __s390x__ | 186 | #ifndef __s390x__ |
187 | asm volatile("lpsw 0(%0)" : : "a" (&psw), "m" (psw) : "cc"); | 187 | asm volatile("lpsw %0" : : "Q" (psw) : "cc"); |
188 | #else | 188 | #else |
189 | asm volatile("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc"); | 189 | asm volatile("lpswe %0" : : "Q" (psw) : "cc"); |
190 | #endif | 190 | #endif |
191 | } | 191 | } |
192 | 192 | ||
@@ -206,17 +206,17 @@ static inline void __load_psw_mask (unsigned long mask) | |||
206 | asm volatile( | 206 | asm volatile( |
207 | " basr %0,0\n" | 207 | " basr %0,0\n" |
208 | "0: ahi %0,1f-0b\n" | 208 | "0: ahi %0,1f-0b\n" |
209 | " st %0,4(%1)\n" | 209 | " st %0,%O1+4(%R1)\n" |
210 | " lpsw 0(%1)\n" | 210 | " lpsw %1\n" |
211 | "1:" | 211 | "1:" |
212 | : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc"); | 212 | : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); |
213 | #else /* __s390x__ */ | 213 | #else /* __s390x__ */ |
214 | asm volatile( | 214 | asm volatile( |
215 | " larl %0,1f\n" | 215 | " larl %0,1f\n" |
216 | " stg %0,8(%1)\n" | 216 | " stg %0,%O1+8(%R1)\n" |
217 | " lpswe 0(%1)\n" | 217 | " lpswe %1\n" |
218 | "1:" | 218 | "1:" |
219 | : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc"); | 219 | : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); |
220 | #endif /* __s390x__ */ | 220 | #endif /* __s390x__ */ |
221 | } | 221 | } |
222 | 222 | ||
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index 95dcf183a28d..dd2d913afcae 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h | |||
@@ -492,13 +492,24 @@ struct user_regs_struct | |||
492 | struct task_struct; | 492 | struct task_struct; |
493 | extern void user_enable_single_step(struct task_struct *); | 493 | extern void user_enable_single_step(struct task_struct *); |
494 | extern void user_disable_single_step(struct task_struct *); | 494 | extern void user_disable_single_step(struct task_struct *); |
495 | extern void show_regs(struct pt_regs * regs); | ||
495 | 496 | ||
496 | #define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0) | 497 | #define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0) |
497 | #define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN) | 498 | #define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN) |
498 | #define user_stack_pointer(regs)((regs)->gprs[15]) | 499 | #define user_stack_pointer(regs)((regs)->gprs[15]) |
499 | #define regs_return_value(regs)((regs)->gprs[2]) | 500 | #define regs_return_value(regs)((regs)->gprs[2]) |
500 | #define profile_pc(regs) instruction_pointer(regs) | 501 | #define profile_pc(regs) instruction_pointer(regs) |
501 | extern void show_regs(struct pt_regs * regs); | 502 | |
503 | int regs_query_register_offset(const char *name); | ||
504 | const char *regs_query_register_name(unsigned int offset); | ||
505 | unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset); | ||
506 | unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n); | ||
507 | |||
508 | static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) | ||
509 | { | ||
510 | return regs->gprs[15] & PSW_ADDR_INSN; | ||
511 | } | ||
512 | |||
502 | #endif /* __KERNEL__ */ | 513 | #endif /* __KERNEL__ */ |
503 | #endif /* __ASSEMBLY__ */ | 514 | #endif /* __ASSEMBLY__ */ |
504 | 515 | ||
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index 79d849f014f0..c666bfe5e984 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h | |||
@@ -13,7 +13,8 @@ | |||
13 | #include <asm/cio.h> | 13 | #include <asm/cio.h> |
14 | #include <asm/ccwdev.h> | 14 | #include <asm/ccwdev.h> |
15 | 15 | ||
16 | #define QDIO_MAX_QUEUES_PER_IRQ 32 | 16 | /* only use 4 queues to save some cachelines */ |
17 | #define QDIO_MAX_QUEUES_PER_IRQ 4 | ||
17 | #define QDIO_MAX_BUFFERS_PER_Q 128 | 18 | #define QDIO_MAX_BUFFERS_PER_Q 128 |
18 | #define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1) | 19 | #define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1) |
19 | #define QDIO_MAX_ELEMENTS_PER_BUFFER 16 | 20 | #define QDIO_MAX_ELEMENTS_PER_BUFFER 16 |
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h index 9d2a17971805..423fdda2322d 100644 --- a/arch/s390/include/asm/rwsem.h +++ b/arch/s390/include/asm/rwsem.h | |||
@@ -124,21 +124,21 @@ static inline void __down_read(struct rw_semaphore *sem) | |||
124 | 124 | ||
125 | asm volatile( | 125 | asm volatile( |
126 | #ifndef __s390x__ | 126 | #ifndef __s390x__ |
127 | " l %0,0(%3)\n" | 127 | " l %0,%2\n" |
128 | "0: lr %1,%0\n" | 128 | "0: lr %1,%0\n" |
129 | " ahi %1,%5\n" | 129 | " ahi %1,%4\n" |
130 | " cs %0,%1,0(%3)\n" | 130 | " cs %0,%1,%2\n" |
131 | " jl 0b" | 131 | " jl 0b" |
132 | #else /* __s390x__ */ | 132 | #else /* __s390x__ */ |
133 | " lg %0,0(%3)\n" | 133 | " lg %0,%2\n" |
134 | "0: lgr %1,%0\n" | 134 | "0: lgr %1,%0\n" |
135 | " aghi %1,%5\n" | 135 | " aghi %1,%4\n" |
136 | " csg %0,%1,0(%3)\n" | 136 | " csg %0,%1,%2\n" |
137 | " jl 0b" | 137 | " jl 0b" |
138 | #endif /* __s390x__ */ | 138 | #endif /* __s390x__ */ |
139 | : "=&d" (old), "=&d" (new), "=m" (sem->count) | 139 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
140 | : "a" (&sem->count), "m" (sem->count), | 140 | : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) |
141 | "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); | 141 | : "cc", "memory"); |
142 | if (old < 0) | 142 | if (old < 0) |
143 | rwsem_down_read_failed(sem); | 143 | rwsem_down_read_failed(sem); |
144 | } | 144 | } |
@@ -152,25 +152,25 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) | |||
152 | 152 | ||
153 | asm volatile( | 153 | asm volatile( |
154 | #ifndef __s390x__ | 154 | #ifndef __s390x__ |
155 | " l %0,0(%3)\n" | 155 | " l %0,%2\n" |
156 | "0: ltr %1,%0\n" | 156 | "0: ltr %1,%0\n" |
157 | " jm 1f\n" | 157 | " jm 1f\n" |
158 | " ahi %1,%5\n" | 158 | " ahi %1,%4\n" |
159 | " cs %0,%1,0(%3)\n" | 159 | " cs %0,%1,%2\n" |
160 | " jl 0b\n" | 160 | " jl 0b\n" |
161 | "1:" | 161 | "1:" |
162 | #else /* __s390x__ */ | 162 | #else /* __s390x__ */ |
163 | " lg %0,0(%3)\n" | 163 | " lg %0,%2\n" |
164 | "0: ltgr %1,%0\n" | 164 | "0: ltgr %1,%0\n" |
165 | " jm 1f\n" | 165 | " jm 1f\n" |
166 | " aghi %1,%5\n" | 166 | " aghi %1,%4\n" |
167 | " csg %0,%1,0(%3)\n" | 167 | " csg %0,%1,%2\n" |
168 | " jl 0b\n" | 168 | " jl 0b\n" |
169 | "1:" | 169 | "1:" |
170 | #endif /* __s390x__ */ | 170 | #endif /* __s390x__ */ |
171 | : "=&d" (old), "=&d" (new), "=m" (sem->count) | 171 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
172 | : "a" (&sem->count), "m" (sem->count), | 172 | : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) |
173 | "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); | 173 | : "cc", "memory"); |
174 | return old >= 0 ? 1 : 0; | 174 | return old >= 0 ? 1 : 0; |
175 | } | 175 | } |
176 | 176 | ||
@@ -184,20 +184,20 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
184 | tmp = RWSEM_ACTIVE_WRITE_BIAS; | 184 | tmp = RWSEM_ACTIVE_WRITE_BIAS; |
185 | asm volatile( | 185 | asm volatile( |
186 | #ifndef __s390x__ | 186 | #ifndef __s390x__ |
187 | " l %0,0(%3)\n" | 187 | " l %0,%2\n" |
188 | "0: lr %1,%0\n" | 188 | "0: lr %1,%0\n" |
189 | " a %1,%5\n" | 189 | " a %1,%4\n" |
190 | " cs %0,%1,0(%3)\n" | 190 | " cs %0,%1,%2\n" |
191 | " jl 0b" | 191 | " jl 0b" |
192 | #else /* __s390x__ */ | 192 | #else /* __s390x__ */ |
193 | " lg %0,0(%3)\n" | 193 | " lg %0,%2\n" |
194 | "0: lgr %1,%0\n" | 194 | "0: lgr %1,%0\n" |
195 | " ag %1,%5\n" | 195 | " ag %1,%4\n" |
196 | " csg %0,%1,0(%3)\n" | 196 | " csg %0,%1,%2\n" |
197 | " jl 0b" | 197 | " jl 0b" |
198 | #endif /* __s390x__ */ | 198 | #endif /* __s390x__ */ |
199 | : "=&d" (old), "=&d" (new), "=m" (sem->count) | 199 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
200 | : "a" (&sem->count), "m" (sem->count), "m" (tmp) | 200 | : "Q" (sem->count), "m" (tmp) |
201 | : "cc", "memory"); | 201 | : "cc", "memory"); |
202 | if (old != 0) | 202 | if (old != 0) |
203 | rwsem_down_write_failed(sem); | 203 | rwsem_down_write_failed(sem); |
@@ -217,22 +217,22 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) | |||
217 | 217 | ||
218 | asm volatile( | 218 | asm volatile( |
219 | #ifndef __s390x__ | 219 | #ifndef __s390x__ |
220 | " l %0,0(%2)\n" | 220 | " l %0,%1\n" |
221 | "0: ltr %0,%0\n" | 221 | "0: ltr %0,%0\n" |
222 | " jnz 1f\n" | 222 | " jnz 1f\n" |
223 | " cs %0,%4,0(%2)\n" | 223 | " cs %0,%3,%1\n" |
224 | " jl 0b\n" | 224 | " jl 0b\n" |
225 | #else /* __s390x__ */ | 225 | #else /* __s390x__ */ |
226 | " lg %0,0(%2)\n" | 226 | " lg %0,%1\n" |
227 | "0: ltgr %0,%0\n" | 227 | "0: ltgr %0,%0\n" |
228 | " jnz 1f\n" | 228 | " jnz 1f\n" |
229 | " csg %0,%4,0(%2)\n" | 229 | " csg %0,%3,%1\n" |
230 | " jl 0b\n" | 230 | " jl 0b\n" |
231 | #endif /* __s390x__ */ | 231 | #endif /* __s390x__ */ |
232 | "1:" | 232 | "1:" |
233 | : "=&d" (old), "=m" (sem->count) | 233 | : "=&d" (old), "=Q" (sem->count) |
234 | : "a" (&sem->count), "m" (sem->count), | 234 | : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS) |
235 | "d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory"); | 235 | : "cc", "memory"); |
236 | return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0; | 236 | return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0; |
237 | } | 237 | } |
238 | 238 | ||
@@ -245,21 +245,20 @@ static inline void __up_read(struct rw_semaphore *sem) | |||
245 | 245 | ||
246 | asm volatile( | 246 | asm volatile( |
247 | #ifndef __s390x__ | 247 | #ifndef __s390x__ |
248 | " l %0,0(%3)\n" | 248 | " l %0,%2\n" |
249 | "0: lr %1,%0\n" | 249 | "0: lr %1,%0\n" |
250 | " ahi %1,%5\n" | 250 | " ahi %1,%4\n" |
251 | " cs %0,%1,0(%3)\n" | 251 | " cs %0,%1,%2\n" |
252 | " jl 0b" | 252 | " jl 0b" |
253 | #else /* __s390x__ */ | 253 | #else /* __s390x__ */ |
254 | " lg %0,0(%3)\n" | 254 | " lg %0,%2\n" |
255 | "0: lgr %1,%0\n" | 255 | "0: lgr %1,%0\n" |
256 | " aghi %1,%5\n" | 256 | " aghi %1,%4\n" |
257 | " csg %0,%1,0(%3)\n" | 257 | " csg %0,%1,%2\n" |
258 | " jl 0b" | 258 | " jl 0b" |
259 | #endif /* __s390x__ */ | 259 | #endif /* __s390x__ */ |
260 | : "=&d" (old), "=&d" (new), "=m" (sem->count) | 260 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
261 | : "a" (&sem->count), "m" (sem->count), | 261 | : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS) |
262 | "i" (-RWSEM_ACTIVE_READ_BIAS) | ||
263 | : "cc", "memory"); | 262 | : "cc", "memory"); |
264 | if (new < 0) | 263 | if (new < 0) |
265 | if ((new & RWSEM_ACTIVE_MASK) == 0) | 264 | if ((new & RWSEM_ACTIVE_MASK) == 0) |
@@ -276,20 +275,20 @@ static inline void __up_write(struct rw_semaphore *sem) | |||
276 | tmp = -RWSEM_ACTIVE_WRITE_BIAS; | 275 | tmp = -RWSEM_ACTIVE_WRITE_BIAS; |
277 | asm volatile( | 276 | asm volatile( |
278 | #ifndef __s390x__ | 277 | #ifndef __s390x__ |
279 | " l %0,0(%3)\n" | 278 | " l %0,%2\n" |
280 | "0: lr %1,%0\n" | 279 | "0: lr %1,%0\n" |
281 | " a %1,%5\n" | 280 | " a %1,%4\n" |
282 | " cs %0,%1,0(%3)\n" | 281 | " cs %0,%1,%2\n" |
283 | " jl 0b" | 282 | " jl 0b" |
284 | #else /* __s390x__ */ | 283 | #else /* __s390x__ */ |
285 | " lg %0,0(%3)\n" | 284 | " lg %0,%2\n" |
286 | "0: lgr %1,%0\n" | 285 | "0: lgr %1,%0\n" |
287 | " ag %1,%5\n" | 286 | " ag %1,%4\n" |
288 | " csg %0,%1,0(%3)\n" | 287 | " csg %0,%1,%2\n" |
289 | " jl 0b" | 288 | " jl 0b" |
290 | #endif /* __s390x__ */ | 289 | #endif /* __s390x__ */ |
291 | : "=&d" (old), "=&d" (new), "=m" (sem->count) | 290 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
292 | : "a" (&sem->count), "m" (sem->count), "m" (tmp) | 291 | : "Q" (sem->count), "m" (tmp) |
293 | : "cc", "memory"); | 292 | : "cc", "memory"); |
294 | if (new < 0) | 293 | if (new < 0) |
295 | if ((new & RWSEM_ACTIVE_MASK) == 0) | 294 | if ((new & RWSEM_ACTIVE_MASK) == 0) |
@@ -306,20 +305,20 @@ static inline void __downgrade_write(struct rw_semaphore *sem) | |||
306 | tmp = -RWSEM_WAITING_BIAS; | 305 | tmp = -RWSEM_WAITING_BIAS; |
307 | asm volatile( | 306 | asm volatile( |
308 | #ifndef __s390x__ | 307 | #ifndef __s390x__ |
309 | " l %0,0(%3)\n" | 308 | " l %0,%2\n" |
310 | "0: lr %1,%0\n" | 309 | "0: lr %1,%0\n" |
311 | " a %1,%5\n" | 310 | " a %1,%4\n" |
312 | " cs %0,%1,0(%3)\n" | 311 | " cs %0,%1,%2\n" |
313 | " jl 0b" | 312 | " jl 0b" |
314 | #else /* __s390x__ */ | 313 | #else /* __s390x__ */ |
315 | " lg %0,0(%3)\n" | 314 | " lg %0,%2\n" |
316 | "0: lgr %1,%0\n" | 315 | "0: lgr %1,%0\n" |
317 | " ag %1,%5\n" | 316 | " ag %1,%4\n" |
318 | " csg %0,%1,0(%3)\n" | 317 | " csg %0,%1,%2\n" |
319 | " jl 0b" | 318 | " jl 0b" |
320 | #endif /* __s390x__ */ | 319 | #endif /* __s390x__ */ |
321 | : "=&d" (old), "=&d" (new), "=m" (sem->count) | 320 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
322 | : "a" (&sem->count), "m" (sem->count), "m" (tmp) | 321 | : "Q" (sem->count), "m" (tmp) |
323 | : "cc", "memory"); | 322 | : "cc", "memory"); |
324 | if (new > 1) | 323 | if (new > 1) |
325 | rwsem_downgrade_wake(sem); | 324 | rwsem_downgrade_wake(sem); |
@@ -334,20 +333,20 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) | |||
334 | 333 | ||
335 | asm volatile( | 334 | asm volatile( |
336 | #ifndef __s390x__ | 335 | #ifndef __s390x__ |
337 | " l %0,0(%3)\n" | 336 | " l %0,%2\n" |
338 | "0: lr %1,%0\n" | 337 | "0: lr %1,%0\n" |
339 | " ar %1,%5\n" | 338 | " ar %1,%4\n" |
340 | " cs %0,%1,0(%3)\n" | 339 | " cs %0,%1,%2\n" |
341 | " jl 0b" | 340 | " jl 0b" |
342 | #else /* __s390x__ */ | 341 | #else /* __s390x__ */ |
343 | " lg %0,0(%3)\n" | 342 | " lg %0,%2\n" |
344 | "0: lgr %1,%0\n" | 343 | "0: lgr %1,%0\n" |
345 | " agr %1,%5\n" | 344 | " agr %1,%4\n" |
346 | " csg %0,%1,0(%3)\n" | 345 | " csg %0,%1,%2\n" |
347 | " jl 0b" | 346 | " jl 0b" |
348 | #endif /* __s390x__ */ | 347 | #endif /* __s390x__ */ |
349 | : "=&d" (old), "=&d" (new), "=m" (sem->count) | 348 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
350 | : "a" (&sem->count), "m" (sem->count), "d" (delta) | 349 | : "Q" (sem->count), "d" (delta) |
351 | : "cc", "memory"); | 350 | : "cc", "memory"); |
352 | } | 351 | } |
353 | 352 | ||
@@ -360,20 +359,20 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) | |||
360 | 359 | ||
361 | asm volatile( | 360 | asm volatile( |
362 | #ifndef __s390x__ | 361 | #ifndef __s390x__ |
363 | " l %0,0(%3)\n" | 362 | " l %0,%2\n" |
364 | "0: lr %1,%0\n" | 363 | "0: lr %1,%0\n" |
365 | " ar %1,%5\n" | 364 | " ar %1,%4\n" |
366 | " cs %0,%1,0(%3)\n" | 365 | " cs %0,%1,%2\n" |
367 | " jl 0b" | 366 | " jl 0b" |
368 | #else /* __s390x__ */ | 367 | #else /* __s390x__ */ |
369 | " lg %0,0(%3)\n" | 368 | " lg %0,%2\n" |
370 | "0: lgr %1,%0\n" | 369 | "0: lgr %1,%0\n" |
371 | " agr %1,%5\n" | 370 | " agr %1,%4\n" |
372 | " csg %0,%1,0(%3)\n" | 371 | " csg %0,%1,%2\n" |
373 | " jl 0b" | 372 | " jl 0b" |
374 | #endif /* __s390x__ */ | 373 | #endif /* __s390x__ */ |
375 | : "=&d" (old), "=&d" (new), "=m" (sem->count) | 374 | : "=&d" (old), "=&d" (new), "=Q" (sem->count) |
376 | : "a" (&sem->count), "m" (sem->count), "d" (delta) | 375 | : "Q" (sem->count), "d" (delta) |
377 | : "cc", "memory"); | 376 | : "cc", "memory"); |
378 | return new; | 377 | return new; |
379 | } | 378 | } |
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 52a779c337e8..9ab6bd3a65d1 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h | |||
@@ -14,14 +14,14 @@ | |||
14 | 14 | ||
15 | #ifdef __KERNEL__ | 15 | #ifdef __KERNEL__ |
16 | 16 | ||
17 | #include <asm/lowcore.h> | ||
18 | #include <asm/types.h> | ||
19 | |||
20 | #define PARMAREA 0x10400 | 17 | #define PARMAREA 0x10400 |
21 | #define MEMORY_CHUNKS 256 | 18 | #define MEMORY_CHUNKS 256 |
22 | 19 | ||
23 | #ifndef __ASSEMBLY__ | 20 | #ifndef __ASSEMBLY__ |
24 | 21 | ||
22 | #include <asm/lowcore.h> | ||
23 | #include <asm/types.h> | ||
24 | |||
25 | #ifndef __s390x__ | 25 | #ifndef __s390x__ |
26 | #define IPL_DEVICE (*(unsigned long *) (0x10404)) | 26 | #define IPL_DEVICE (*(unsigned long *) (0x10404)) |
27 | #define INITRD_START (*(unsigned long *) (0x1040C)) | 27 | #define INITRD_START (*(unsigned long *) (0x1040C)) |
@@ -71,9 +71,12 @@ extern unsigned int user_mode; | |||
71 | #define MACHINE_FLAG_KVM (1UL << 9) | 71 | #define MACHINE_FLAG_KVM (1UL << 9) |
72 | #define MACHINE_FLAG_HPAGE (1UL << 10) | 72 | #define MACHINE_FLAG_HPAGE (1UL << 10) |
73 | #define MACHINE_FLAG_PFMF (1UL << 11) | 73 | #define MACHINE_FLAG_PFMF (1UL << 11) |
74 | #define MACHINE_FLAG_LPAR (1UL << 12) | ||
74 | 75 | ||
75 | #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) | 76 | #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) |
76 | #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) | 77 | #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) |
78 | #define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR) | ||
79 | |||
77 | #define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C) | 80 | #define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C) |
78 | 81 | ||
79 | #ifndef __s390x__ | 82 | #ifndef __s390x__ |
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h index f72d611f7e13..e3bffd4e2d66 100644 --- a/arch/s390/include/asm/sigp.h +++ b/arch/s390/include/asm/sigp.h | |||
@@ -1,24 +1,19 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-s390/sigp.h | 2 | * Routines and structures for signalling other processors. |
3 | * | 3 | * |
4 | * S390 version | 4 | * Copyright IBM Corp. 1999,2010 |
5 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | 5 | * Author(s): Denis Joseph Barrow, |
6 | * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | 6 | * Martin Schwidefsky <schwidefsky@de.ibm.com>, |
7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | 7 | * Heiko Carstens <heiko.carstens@de.ibm.com>, |
8 | * Heiko Carstens (heiko.carstens@de.ibm.com) | ||
9 | * | ||
10 | * sigp.h by D.J. Barrow (c) IBM 1999 | ||
11 | * contains routines / structures for signalling other S/390 processors in an | ||
12 | * SMP configuration. | ||
13 | */ | 8 | */ |
14 | 9 | ||
15 | #ifndef __SIGP__ | 10 | #ifndef __ASM_SIGP_H |
16 | #define __SIGP__ | 11 | #define __ASM_SIGP_H |
17 | 12 | ||
18 | #include <asm/system.h> | 13 | #include <asm/system.h> |
19 | 14 | ||
20 | /* get real cpu address from logical cpu number */ | 15 | /* Get real cpu address from logical cpu number. */ |
21 | extern int __cpu_logical_map[]; | 16 | extern unsigned short __cpu_logical_map[]; |
22 | 17 | ||
23 | static inline int cpu_logical_map(int cpu) | 18 | static inline int cpu_logical_map(int cpu) |
24 | { | 19 | { |
@@ -29,107 +24,108 @@ static inline int cpu_logical_map(int cpu) | |||
29 | #endif | 24 | #endif |
30 | } | 25 | } |
31 | 26 | ||
32 | typedef enum | 27 | enum { |
33 | { | 28 | sigp_sense = 1, |
34 | sigp_unassigned=0x0, | 29 | sigp_external_call = 2, |
35 | sigp_sense, | 30 | sigp_emergency_signal = 3, |
36 | sigp_external_call, | 31 | sigp_start = 4, |
37 | sigp_emergency_signal, | 32 | sigp_stop = 5, |
38 | sigp_start, | 33 | sigp_restart = 6, |
39 | sigp_stop, | 34 | sigp_stop_and_store_status = 9, |
40 | sigp_restart, | 35 | sigp_initial_cpu_reset = 11, |
41 | sigp_unassigned1, | 36 | sigp_cpu_reset = 12, |
42 | sigp_unassigned2, | 37 | sigp_set_prefix = 13, |
43 | sigp_stop_and_store_status, | 38 | sigp_store_status_at_address = 14, |
44 | sigp_unassigned3, | 39 | sigp_store_extended_status_at_address = 15, |
45 | sigp_initial_cpu_reset, | 40 | sigp_set_architecture = 18, |
46 | sigp_cpu_reset, | 41 | sigp_conditional_emergency_signal = 19, |
47 | sigp_set_prefix, | 42 | sigp_sense_running = 21, |
48 | sigp_store_status_at_address, | 43 | }; |
49 | sigp_store_extended_status_at_address | 44 | |
50 | } sigp_order_code; | 45 | enum { |
51 | 46 | sigp_order_code_accepted = 0, | |
52 | typedef __u32 sigp_status_word; | 47 | sigp_status_stored = 1, |
53 | 48 | sigp_busy = 2, | |
54 | typedef enum | 49 | sigp_not_operational = 3, |
55 | { | 50 | }; |
56 | sigp_order_code_accepted=0, | ||
57 | sigp_status_stored, | ||
58 | sigp_busy, | ||
59 | sigp_not_operational | ||
60 | } sigp_ccode; | ||
61 | |||
62 | 51 | ||
63 | /* | 52 | /* |
64 | * Definitions for the external call | 53 | * Definitions for external call. |
65 | */ | 54 | */ |
66 | 55 | enum { | |
67 | /* 'Bit' signals, asynchronous */ | 56 | ec_schedule = 0, |
68 | typedef enum | ||
69 | { | ||
70 | ec_schedule=0, | ||
71 | ec_call_function, | 57 | ec_call_function, |
72 | ec_call_function_single, | 58 | ec_call_function_single, |
73 | ec_bit_last | 59 | }; |
74 | } ec_bit_sig; | ||
75 | 60 | ||
76 | /* | 61 | /* |
77 | * Signal processor | 62 | * Signal processor. |
78 | */ | 63 | */ |
79 | static inline sigp_ccode | 64 | static inline int raw_sigp(u16 cpu, int order) |
80 | signal_processor(__u16 cpu_addr, sigp_order_code order_code) | ||
81 | { | 65 | { |
82 | register unsigned long reg1 asm ("1") = 0; | 66 | register unsigned long reg1 asm ("1") = 0; |
83 | sigp_ccode ccode; | 67 | int ccode; |
84 | 68 | ||
85 | asm volatile( | 69 | asm volatile( |
86 | " sigp %1,%2,0(%3)\n" | 70 | " sigp %1,%2,0(%3)\n" |
87 | " ipm %0\n" | 71 | " ipm %0\n" |
88 | " srl %0,28\n" | 72 | " srl %0,28\n" |
89 | : "=d" (ccode) | 73 | : "=d" (ccode) |
90 | : "d" (reg1), "d" (cpu_logical_map(cpu_addr)), | 74 | : "d" (reg1), "d" (cpu), |
91 | "a" (order_code) : "cc" , "memory"); | 75 | "a" (order) : "cc" , "memory"); |
92 | return ccode; | 76 | return ccode; |
93 | } | 77 | } |
94 | 78 | ||
95 | /* | 79 | /* |
96 | * Signal processor with parameter | 80 | * Signal processor with parameter. |
97 | */ | 81 | */ |
98 | static inline sigp_ccode | 82 | static inline int raw_sigp_p(u32 parameter, u16 cpu, int order) |
99 | signal_processor_p(__u32 parameter, __u16 cpu_addr, sigp_order_code order_code) | ||
100 | { | 83 | { |
101 | register unsigned int reg1 asm ("1") = parameter; | 84 | register unsigned int reg1 asm ("1") = parameter; |
102 | sigp_ccode ccode; | 85 | int ccode; |
103 | 86 | ||
104 | asm volatile( | 87 | asm volatile( |
105 | " sigp %1,%2,0(%3)\n" | 88 | " sigp %1,%2,0(%3)\n" |
106 | " ipm %0\n" | 89 | " ipm %0\n" |
107 | " srl %0,28\n" | 90 | " srl %0,28\n" |
108 | : "=d" (ccode) | 91 | : "=d" (ccode) |
109 | : "d" (reg1), "d" (cpu_logical_map(cpu_addr)), | 92 | : "d" (reg1), "d" (cpu), |
110 | "a" (order_code) : "cc" , "memory"); | 93 | "a" (order) : "cc" , "memory"); |
111 | return ccode; | 94 | return ccode; |
112 | } | 95 | } |
113 | 96 | ||
114 | /* | 97 | /* |
115 | * Signal processor with parameter and return status | 98 | * Signal processor with parameter and return status. |
116 | */ | 99 | */ |
117 | static inline sigp_ccode | 100 | static inline int raw_sigp_ps(u32 *status, u32 parm, u16 cpu, int order) |
118 | signal_processor_ps(__u32 *statusptr, __u32 parameter, __u16 cpu_addr, | ||
119 | sigp_order_code order_code) | ||
120 | { | 101 | { |
121 | register unsigned int reg1 asm ("1") = parameter; | 102 | register unsigned int reg1 asm ("1") = parm; |
122 | sigp_ccode ccode; | 103 | int ccode; |
123 | 104 | ||
124 | asm volatile( | 105 | asm volatile( |
125 | " sigp %1,%2,0(%3)\n" | 106 | " sigp %1,%2,0(%3)\n" |
126 | " ipm %0\n" | 107 | " ipm %0\n" |
127 | " srl %0,28\n" | 108 | " srl %0,28\n" |
128 | : "=d" (ccode), "+d" (reg1) | 109 | : "=d" (ccode), "+d" (reg1) |
129 | : "d" (cpu_logical_map(cpu_addr)), "a" (order_code) | 110 | : "d" (cpu), "a" (order) |
130 | : "cc" , "memory"); | 111 | : "cc" , "memory"); |
131 | *statusptr = reg1; | 112 | *status = reg1; |
132 | return ccode; | 113 | return ccode; |
133 | } | 114 | } |
134 | 115 | ||
135 | #endif /* __SIGP__ */ | 116 | static inline int sigp(int cpu, int order) |
117 | { | ||
118 | return raw_sigp(cpu_logical_map(cpu), order); | ||
119 | } | ||
120 | |||
121 | static inline int sigp_p(u32 parameter, int cpu, int order) | ||
122 | { | ||
123 | return raw_sigp_p(parameter, cpu_logical_map(cpu), order); | ||
124 | } | ||
125 | |||
126 | static inline int sigp_ps(u32 *status, u32 parm, int cpu, int order) | ||
127 | { | ||
128 | return raw_sigp_ps(status, parm, cpu_logical_map(cpu), order); | ||
129 | } | ||
130 | |||
131 | #endif /* __ASM_SIGP_H */ | ||
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index 2ab1141eeb50..edc03cb9cd79 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h | |||
@@ -29,7 +29,43 @@ extern int smp_cpu_polarization[]; | |||
29 | extern void arch_send_call_function_single_ipi(int cpu); | 29 | extern void arch_send_call_function_single_ipi(int cpu); |
30 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); | 30 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); |
31 | 31 | ||
32 | extern union save_area *zfcpdump_save_areas[NR_CPUS + 1]; | 32 | extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; |
33 | |||
34 | extern void smp_switch_to_ipl_cpu(void (*func)(void *), void *); | ||
35 | extern void smp_switch_to_cpu(void (*)(void *), void *, unsigned long sp, | ||
36 | int from, int to); | ||
37 | extern void smp_restart_cpu(void); | ||
38 | |||
39 | /* | ||
40 | * returns 1 if (virtual) cpu is scheduled | ||
41 | * returns 0 otherwise | ||
42 | */ | ||
43 | static inline int smp_vcpu_scheduled(int cpu) | ||
44 | { | ||
45 | u32 status; | ||
46 | |||
47 | switch (sigp_ps(&status, 0, cpu, sigp_sense_running)) { | ||
48 | case sigp_status_stored: | ||
49 | /* Check for running status */ | ||
50 | if (status & 0x400) | ||
51 | return 0; | ||
52 | break; | ||
53 | case sigp_not_operational: | ||
54 | return 0; | ||
55 | default: | ||
56 | break; | ||
57 | } | ||
58 | return 1; | ||
59 | } | ||
60 | |||
61 | #else /* CONFIG_SMP */ | ||
62 | |||
63 | static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) | ||
64 | { | ||
65 | func(data); | ||
66 | } | ||
67 | |||
68 | #define smp_vcpu_scheduled (1) | ||
33 | 69 | ||
34 | #endif /* CONFIG_SMP */ | 70 | #endif /* CONFIG_SMP */ |
35 | 71 | ||
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index a587907d77f3..56612fc8186e 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h | |||
@@ -13,8 +13,6 @@ | |||
13 | 13 | ||
14 | #include <linux/smp.h> | 14 | #include <linux/smp.h> |
15 | 15 | ||
16 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
17 | |||
18 | static inline int | 16 | static inline int |
19 | _raw_compare_and_swap(volatile unsigned int *lock, | 17 | _raw_compare_and_swap(volatile unsigned int *lock, |
20 | unsigned int old, unsigned int new) | 18 | unsigned int old, unsigned int new) |
@@ -27,22 +25,6 @@ _raw_compare_and_swap(volatile unsigned int *lock, | |||
27 | return old; | 25 | return old; |
28 | } | 26 | } |
29 | 27 | ||
30 | #else /* __GNUC__ */ | ||
31 | |||
32 | static inline int | ||
33 | _raw_compare_and_swap(volatile unsigned int *lock, | ||
34 | unsigned int old, unsigned int new) | ||
35 | { | ||
36 | asm volatile( | ||
37 | " cs %0,%3,0(%4)" | ||
38 | : "=d" (old), "=m" (*lock) | ||
39 | : "0" (old), "d" (new), "a" (lock), "m" (*lock) | ||
40 | : "cc", "memory" ); | ||
41 | return old; | ||
42 | } | ||
43 | |||
44 | #endif /* __GNUC__ */ | ||
45 | |||
46 | /* | 28 | /* |
47 | * Simple spin lock operations. There are two variants, one clears IRQ's | 29 | * Simple spin lock operations. There are two variants, one clears IRQ's |
48 | * on the local processor, one does not. | 30 | * on the local processor, one does not. |
diff --git a/arch/s390/include/asm/swab.h b/arch/s390/include/asm/swab.h index eb18dc1f327b..6bdee21c077e 100644 --- a/arch/s390/include/asm/swab.h +++ b/arch/s390/include/asm/swab.h | |||
@@ -47,11 +47,11 @@ static inline __u32 __arch_swab32p(const __u32 *x) | |||
47 | 47 | ||
48 | asm volatile( | 48 | asm volatile( |
49 | #ifndef __s390x__ | 49 | #ifndef __s390x__ |
50 | " icm %0,8,3(%1)\n" | 50 | " icm %0,8,%O1+3(%R1)\n" |
51 | " icm %0,4,2(%1)\n" | 51 | " icm %0,4,%O1+2(%R1)\n" |
52 | " icm %0,2,1(%1)\n" | 52 | " icm %0,2,%O1+1(%R1)\n" |
53 | " ic %0,0(%1)" | 53 | " ic %0,%1" |
54 | : "=&d" (result) : "a" (x), "m" (*x) : "cc"); | 54 | : "=&d" (result) : "Q" (*x) : "cc"); |
55 | #else /* __s390x__ */ | 55 | #else /* __s390x__ */ |
56 | " lrv %0,%1" | 56 | " lrv %0,%1" |
57 | : "=d" (result) : "m" (*x)); | 57 | : "=d" (result) : "m" (*x)); |
@@ -77,9 +77,9 @@ static inline __u16 __arch_swab16p(const __u16 *x) | |||
77 | 77 | ||
78 | asm volatile( | 78 | asm volatile( |
79 | #ifndef __s390x__ | 79 | #ifndef __s390x__ |
80 | " icm %0,2,1(%1)\n" | 80 | " icm %0,2,%O+1(%R1)\n" |
81 | " ic %0,0(%1)\n" | 81 | " ic %0,%1\n" |
82 | : "=&d" (result) : "a" (x), "m" (*x) : "cc"); | 82 | : "=&d" (result) : "Q" (*x) : "cc"); |
83 | #else /* __s390x__ */ | 83 | #else /* __s390x__ */ |
84 | " lrvh %0,%1" | 84 | " lrvh %0,%1" |
85 | : "=d" (result) : "m" (*x)); | 85 | : "=d" (result) : "m" (*x)); |
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index e0a73d3eb837..8429686951f9 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h | |||
@@ -15,6 +15,13 @@ | |||
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <asm/ptrace.h> | 16 | #include <asm/ptrace.h> |
17 | 17 | ||
18 | /* | ||
19 | * The syscall table always contains 32 bit pointers since we know that the | ||
20 | * address of the function to be called is (way) below 4GB. So the "int" | ||
21 | * type here is what we want [need] for both 32 bit and 64 bit systems. | ||
22 | */ | ||
23 | extern const unsigned int sys_call_table[]; | ||
24 | |||
18 | static inline long syscall_get_nr(struct task_struct *task, | 25 | static inline long syscall_get_nr(struct task_struct *task, |
19 | struct pt_regs *regs) | 26 | struct pt_regs *regs) |
20 | { | 27 | { |
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h index 9d70057d828c..22bdb2a0ee5f 100644 --- a/arch/s390/include/asm/sysinfo.h +++ b/arch/s390/include/asm/sysinfo.h | |||
@@ -87,7 +87,8 @@ struct sysinfo_2_2_2 { | |||
87 | 87 | ||
88 | struct sysinfo_3_2_2 { | 88 | struct sysinfo_3_2_2 { |
89 | char reserved_0[31]; | 89 | char reserved_0[31]; |
90 | unsigned char count; | 90 | unsigned char :4; |
91 | unsigned char count:4; | ||
91 | struct { | 92 | struct { |
92 | char reserved_0[4]; | 93 | char reserved_0[4]; |
93 | unsigned short cpus_total; | 94 | unsigned short cpus_total; |
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 379661d2f81a..67ee6c3c6bb3 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h | |||
@@ -24,65 +24,65 @@ extern struct task_struct *__switch_to(void *, void *); | |||
24 | static inline void save_fp_regs(s390_fp_regs *fpregs) | 24 | static inline void save_fp_regs(s390_fp_regs *fpregs) |
25 | { | 25 | { |
26 | asm volatile( | 26 | asm volatile( |
27 | " std 0,8(%1)\n" | 27 | " std 0,%O0+8(%R0)\n" |
28 | " std 2,24(%1)\n" | 28 | " std 2,%O0+24(%R0)\n" |
29 | " std 4,40(%1)\n" | 29 | " std 4,%O0+40(%R0)\n" |
30 | " std 6,56(%1)" | 30 | " std 6,%O0+56(%R0)" |
31 | : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); | 31 | : "=Q" (*fpregs) : "Q" (*fpregs)); |
32 | if (!MACHINE_HAS_IEEE) | 32 | if (!MACHINE_HAS_IEEE) |
33 | return; | 33 | return; |
34 | asm volatile( | 34 | asm volatile( |
35 | " stfpc 0(%1)\n" | 35 | " stfpc %0\n" |
36 | " std 1,16(%1)\n" | 36 | " std 1,%O0+16(%R0)\n" |
37 | " std 3,32(%1)\n" | 37 | " std 3,%O0+32(%R0)\n" |
38 | " std 5,48(%1)\n" | 38 | " std 5,%O0+48(%R0)\n" |
39 | " std 7,64(%1)\n" | 39 | " std 7,%O0+64(%R0)\n" |
40 | " std 8,72(%1)\n" | 40 | " std 8,%O0+72(%R0)\n" |
41 | " std 9,80(%1)\n" | 41 | " std 9,%O0+80(%R0)\n" |
42 | " std 10,88(%1)\n" | 42 | " std 10,%O0+88(%R0)\n" |
43 | " std 11,96(%1)\n" | 43 | " std 11,%O0+96(%R0)\n" |
44 | " std 12,104(%1)\n" | 44 | " std 12,%O0+104(%R0)\n" |
45 | " std 13,112(%1)\n" | 45 | " std 13,%O0+112(%R0)\n" |
46 | " std 14,120(%1)\n" | 46 | " std 14,%O0+120(%R0)\n" |
47 | " std 15,128(%1)\n" | 47 | " std 15,%O0+128(%R0)\n" |
48 | : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); | 48 | : "=Q" (*fpregs) : "Q" (*fpregs)); |
49 | } | 49 | } |
50 | 50 | ||
51 | static inline void restore_fp_regs(s390_fp_regs *fpregs) | 51 | static inline void restore_fp_regs(s390_fp_regs *fpregs) |
52 | { | 52 | { |
53 | asm volatile( | 53 | asm volatile( |
54 | " ld 0,8(%0)\n" | 54 | " ld 0,%O0+8(%R0)\n" |
55 | " ld 2,24(%0)\n" | 55 | " ld 2,%O0+24(%R0)\n" |
56 | " ld 4,40(%0)\n" | 56 | " ld 4,%O0+40(%R0)\n" |
57 | " ld 6,56(%0)" | 57 | " ld 6,%O0+56(%R0)" |
58 | : : "a" (fpregs), "m" (*fpregs)); | 58 | : : "Q" (*fpregs)); |
59 | if (!MACHINE_HAS_IEEE) | 59 | if (!MACHINE_HAS_IEEE) |
60 | return; | 60 | return; |
61 | asm volatile( | 61 | asm volatile( |
62 | " lfpc 0(%0)\n" | 62 | " lfpc %0\n" |
63 | " ld 1,16(%0)\n" | 63 | " ld 1,%O0+16(%R0)\n" |
64 | " ld 3,32(%0)\n" | 64 | " ld 3,%O0+32(%R0)\n" |
65 | " ld 5,48(%0)\n" | 65 | " ld 5,%O0+48(%R0)\n" |
66 | " ld 7,64(%0)\n" | 66 | " ld 7,%O0+64(%R0)\n" |
67 | " ld 8,72(%0)\n" | 67 | " ld 8,%O0+72(%R0)\n" |
68 | " ld 9,80(%0)\n" | 68 | " ld 9,%O0+80(%R0)\n" |
69 | " ld 10,88(%0)\n" | 69 | " ld 10,%O0+88(%R0)\n" |
70 | " ld 11,96(%0)\n" | 70 | " ld 11,%O0+96(%R0)\n" |
71 | " ld 12,104(%0)\n" | 71 | " ld 12,%O0+104(%R0)\n" |
72 | " ld 13,112(%0)\n" | 72 | " ld 13,%O0+112(%R0)\n" |
73 | " ld 14,120(%0)\n" | 73 | " ld 14,%O0+120(%R0)\n" |
74 | " ld 15,128(%0)\n" | 74 | " ld 15,%O0+128(%R0)\n" |
75 | : : "a" (fpregs), "m" (*fpregs)); | 75 | : : "Q" (*fpregs)); |
76 | } | 76 | } |
77 | 77 | ||
78 | static inline void save_access_regs(unsigned int *acrs) | 78 | static inline void save_access_regs(unsigned int *acrs) |
79 | { | 79 | { |
80 | asm volatile("stam 0,15,0(%0)" : : "a" (acrs) : "memory"); | 80 | asm volatile("stam 0,15,%0" : "=Q" (*acrs)); |
81 | } | 81 | } |
82 | 82 | ||
83 | static inline void restore_access_regs(unsigned int *acrs) | 83 | static inline void restore_access_regs(unsigned int *acrs) |
84 | { | 84 | { |
85 | asm volatile("lam 0,15,0(%0)" : : "a" (acrs)); | 85 | asm volatile("lam 0,15,%0" : : "Q" (*acrs)); |
86 | } | 86 | } |
87 | 87 | ||
88 | #define switch_to(prev,next,last) do { \ | 88 | #define switch_to(prev,next,last) do { \ |
@@ -139,48 +139,48 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size) | |||
139 | shift = (3 ^ (addr & 3)) << 3; | 139 | shift = (3 ^ (addr & 3)) << 3; |
140 | addr ^= addr & 3; | 140 | addr ^= addr & 3; |
141 | asm volatile( | 141 | asm volatile( |
142 | " l %0,0(%4)\n" | 142 | " l %0,%4\n" |
143 | "0: lr 0,%0\n" | 143 | "0: lr 0,%0\n" |
144 | " nr 0,%3\n" | 144 | " nr 0,%3\n" |
145 | " or 0,%2\n" | 145 | " or 0,%2\n" |
146 | " cs %0,0,0(%4)\n" | 146 | " cs %0,0,%4\n" |
147 | " jl 0b\n" | 147 | " jl 0b\n" |
148 | : "=&d" (old), "=m" (*(int *) addr) | 148 | : "=&d" (old), "=Q" (*(int *) addr) |
149 | : "d" (x << shift), "d" (~(255 << shift)), "a" (addr), | 149 | : "d" (x << shift), "d" (~(255 << shift)), |
150 | "m" (*(int *) addr) : "memory", "cc", "0"); | 150 | "Q" (*(int *) addr) : "memory", "cc", "0"); |
151 | return old >> shift; | 151 | return old >> shift; |
152 | case 2: | 152 | case 2: |
153 | addr = (unsigned long) ptr; | 153 | addr = (unsigned long) ptr; |
154 | shift = (2 ^ (addr & 2)) << 3; | 154 | shift = (2 ^ (addr & 2)) << 3; |
155 | addr ^= addr & 2; | 155 | addr ^= addr & 2; |
156 | asm volatile( | 156 | asm volatile( |
157 | " l %0,0(%4)\n" | 157 | " l %0,%4\n" |
158 | "0: lr 0,%0\n" | 158 | "0: lr 0,%0\n" |
159 | " nr 0,%3\n" | 159 | " nr 0,%3\n" |
160 | " or 0,%2\n" | 160 | " or 0,%2\n" |
161 | " cs %0,0,0(%4)\n" | 161 | " cs %0,0,%4\n" |
162 | " jl 0b\n" | 162 | " jl 0b\n" |
163 | : "=&d" (old), "=m" (*(int *) addr) | 163 | : "=&d" (old), "=Q" (*(int *) addr) |
164 | : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr), | 164 | : "d" (x << shift), "d" (~(65535 << shift)), |
165 | "m" (*(int *) addr) : "memory", "cc", "0"); | 165 | "Q" (*(int *) addr) : "memory", "cc", "0"); |
166 | return old >> shift; | 166 | return old >> shift; |
167 | case 4: | 167 | case 4: |
168 | asm volatile( | 168 | asm volatile( |
169 | " l %0,0(%3)\n" | 169 | " l %0,%3\n" |
170 | "0: cs %0,%2,0(%3)\n" | 170 | "0: cs %0,%2,%3\n" |
171 | " jl 0b\n" | 171 | " jl 0b\n" |
172 | : "=&d" (old), "=m" (*(int *) ptr) | 172 | : "=&d" (old), "=Q" (*(int *) ptr) |
173 | : "d" (x), "a" (ptr), "m" (*(int *) ptr) | 173 | : "d" (x), "Q" (*(int *) ptr) |
174 | : "memory", "cc"); | 174 | : "memory", "cc"); |
175 | return old; | 175 | return old; |
176 | #ifdef __s390x__ | 176 | #ifdef __s390x__ |
177 | case 8: | 177 | case 8: |
178 | asm volatile( | 178 | asm volatile( |
179 | " lg %0,0(%3)\n" | 179 | " lg %0,%3\n" |
180 | "0: csg %0,%2,0(%3)\n" | 180 | "0: csg %0,%2,%3\n" |
181 | " jl 0b\n" | 181 | " jl 0b\n" |
182 | : "=&d" (old), "=m" (*(long *) ptr) | 182 | : "=&d" (old), "=m" (*(long *) ptr) |
183 | : "d" (x), "a" (ptr), "m" (*(long *) ptr) | 183 | : "d" (x), "Q" (*(long *) ptr) |
184 | : "memory", "cc"); | 184 | : "memory", "cc"); |
185 | return old; | 185 | return old; |
186 | #endif /* __s390x__ */ | 186 | #endif /* __s390x__ */ |
@@ -215,20 +215,20 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | |||
215 | shift = (3 ^ (addr & 3)) << 3; | 215 | shift = (3 ^ (addr & 3)) << 3; |
216 | addr ^= addr & 3; | 216 | addr ^= addr & 3; |
217 | asm volatile( | 217 | asm volatile( |
218 | " l %0,0(%4)\n" | 218 | " l %0,%2\n" |
219 | "0: nr %0,%5\n" | 219 | "0: nr %0,%5\n" |
220 | " lr %1,%0\n" | 220 | " lr %1,%0\n" |
221 | " or %0,%2\n" | 221 | " or %0,%2\n" |
222 | " or %1,%3\n" | 222 | " or %1,%3\n" |
223 | " cs %0,%1,0(%4)\n" | 223 | " cs %0,%1,%2\n" |
224 | " jnl 1f\n" | 224 | " jnl 1f\n" |
225 | " xr %1,%0\n" | 225 | " xr %1,%0\n" |
226 | " nr %1,%5\n" | 226 | " nr %1,%5\n" |
227 | " jnz 0b\n" | 227 | " jnz 0b\n" |
228 | "1:" | 228 | "1:" |
229 | : "=&d" (prev), "=&d" (tmp) | 229 | : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr) |
230 | : "d" (old << shift), "d" (new << shift), "a" (ptr), | 230 | : "d" (old << shift), "d" (new << shift), |
231 | "d" (~(255 << shift)) | 231 | "d" (~(255 << shift)), "Q" (*(int *) ptr) |
232 | : "memory", "cc"); | 232 | : "memory", "cc"); |
233 | return prev >> shift; | 233 | return prev >> shift; |
234 | case 2: | 234 | case 2: |
@@ -236,33 +236,35 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | |||
236 | shift = (2 ^ (addr & 2)) << 3; | 236 | shift = (2 ^ (addr & 2)) << 3; |
237 | addr ^= addr & 2; | 237 | addr ^= addr & 2; |
238 | asm volatile( | 238 | asm volatile( |
239 | " l %0,0(%4)\n" | 239 | " l %0,%2\n" |
240 | "0: nr %0,%5\n" | 240 | "0: nr %0,%5\n" |
241 | " lr %1,%0\n" | 241 | " lr %1,%0\n" |
242 | " or %0,%2\n" | 242 | " or %0,%2\n" |
243 | " or %1,%3\n" | 243 | " or %1,%3\n" |
244 | " cs %0,%1,0(%4)\n" | 244 | " cs %0,%1,%2\n" |
245 | " jnl 1f\n" | 245 | " jnl 1f\n" |
246 | " xr %1,%0\n" | 246 | " xr %1,%0\n" |
247 | " nr %1,%5\n" | 247 | " nr %1,%5\n" |
248 | " jnz 0b\n" | 248 | " jnz 0b\n" |
249 | "1:" | 249 | "1:" |
250 | : "=&d" (prev), "=&d" (tmp) | 250 | : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr) |
251 | : "d" (old << shift), "d" (new << shift), "a" (ptr), | 251 | : "d" (old << shift), "d" (new << shift), |
252 | "d" (~(65535 << shift)) | 252 | "d" (~(65535 << shift)), "Q" (*(int *) ptr) |
253 | : "memory", "cc"); | 253 | : "memory", "cc"); |
254 | return prev >> shift; | 254 | return prev >> shift; |
255 | case 4: | 255 | case 4: |
256 | asm volatile( | 256 | asm volatile( |
257 | " cs %0,%2,0(%3)\n" | 257 | " cs %0,%3,%1\n" |
258 | : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) | 258 | : "=&d" (prev), "=Q" (*(int *) ptr) |
259 | : "0" (old), "d" (new), "Q" (*(int *) ptr) | ||
259 | : "memory", "cc"); | 260 | : "memory", "cc"); |
260 | return prev; | 261 | return prev; |
261 | #ifdef __s390x__ | 262 | #ifdef __s390x__ |
262 | case 8: | 263 | case 8: |
263 | asm volatile( | 264 | asm volatile( |
264 | " csg %0,%2,0(%3)\n" | 265 | " csg %0,%3,%1\n" |
265 | : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) | 266 | : "=&d" (prev), "=Q" (*(long *) ptr) |
267 | : "0" (old), "d" (new), "Q" (*(long *) ptr) | ||
266 | : "memory", "cc"); | 268 | : "memory", "cc"); |
267 | return prev; | 269 | return prev; |
268 | #endif /* __s390x__ */ | 270 | #endif /* __s390x__ */ |
@@ -302,17 +304,17 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | |||
302 | #define __ctl_load(array, low, high) ({ \ | 304 | #define __ctl_load(array, low, high) ({ \ |
303 | typedef struct { char _[sizeof(array)]; } addrtype; \ | 305 | typedef struct { char _[sizeof(array)]; } addrtype; \ |
304 | asm volatile( \ | 306 | asm volatile( \ |
305 | " lctlg %1,%2,0(%0)\n" \ | 307 | " lctlg %1,%2,%0\n" \ |
306 | : : "a" (&array), "i" (low), "i" (high), \ | 308 | : : "Q" (*(addrtype *)(&array)), \ |
307 | "m" (*(addrtype *)(&array))); \ | 309 | "i" (low), "i" (high)); \ |
308 | }) | 310 | }) |
309 | 311 | ||
310 | #define __ctl_store(array, low, high) ({ \ | 312 | #define __ctl_store(array, low, high) ({ \ |
311 | typedef struct { char _[sizeof(array)]; } addrtype; \ | 313 | typedef struct { char _[sizeof(array)]; } addrtype; \ |
312 | asm volatile( \ | 314 | asm volatile( \ |
313 | " stctg %2,%3,0(%1)\n" \ | 315 | " stctg %1,%2,%0\n" \ |
314 | : "=m" (*(addrtype *)(&array)) \ | 316 | : "=Q" (*(addrtype *)(&array)) \ |
315 | : "a" (&array), "i" (low), "i" (high)); \ | 317 | : "i" (low), "i" (high)); \ |
316 | }) | 318 | }) |
317 | 319 | ||
318 | #else /* __s390x__ */ | 320 | #else /* __s390x__ */ |
@@ -320,17 +322,17 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | |||
320 | #define __ctl_load(array, low, high) ({ \ | 322 | #define __ctl_load(array, low, high) ({ \ |
321 | typedef struct { char _[sizeof(array)]; } addrtype; \ | 323 | typedef struct { char _[sizeof(array)]; } addrtype; \ |
322 | asm volatile( \ | 324 | asm volatile( \ |
323 | " lctl %1,%2,0(%0)\n" \ | 325 | " lctl %1,%2,%0\n" \ |
324 | : : "a" (&array), "i" (low), "i" (high), \ | 326 | : : "Q" (*(addrtype *)(&array)), \ |
325 | "m" (*(addrtype *)(&array))); \ | 327 | "i" (low), "i" (high)); \ |
326 | }) | 328 | }) |
327 | 329 | ||
328 | #define __ctl_store(array, low, high) ({ \ | 330 | #define __ctl_store(array, low, high) ({ \ |
329 | typedef struct { char _[sizeof(array)]; } addrtype; \ | 331 | typedef struct { char _[sizeof(array)]; } addrtype; \ |
330 | asm volatile( \ | 332 | asm volatile( \ |
331 | " stctl %2,%3,0(%1)\n" \ | 333 | " stctl %1,%2,%0\n" \ |
332 | : "=m" (*(addrtype *)(&array)) \ | 334 | : "=Q" (*(addrtype *)(&array)) \ |
333 | : "a" (&array), "i" (low), "i" (high)); \ | 335 | : "i" (low), "i" (high)); \ |
334 | }) | 336 | }) |
335 | 337 | ||
336 | #endif /* __s390x__ */ | 338 | #endif /* __s390x__ */ |
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 66069e736842..34f0873d6525 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h | |||
@@ -73,7 +73,7 @@ struct thread_info { | |||
73 | /* how to get the thread information struct from C */ | 73 | /* how to get the thread information struct from C */ |
74 | static inline struct thread_info *current_thread_info(void) | 74 | static inline struct thread_info *current_thread_info(void) |
75 | { | 75 | { |
76 | return (struct thread_info *)((*(unsigned long *) __LC_KERNEL_STACK)-THREAD_SIZE); | 76 | return (struct thread_info *)(S390_lowcore.kernel_stack - THREAD_SIZE); |
77 | } | 77 | } |
78 | 78 | ||
79 | #define THREAD_SIZE_ORDER THREAD_ORDER | 79 | #define THREAD_SIZE_ORDER THREAD_ORDER |
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 68d9fea34b4b..f174bdaa6b59 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h | |||
@@ -20,10 +20,10 @@ static inline int set_clock(__u64 time) | |||
20 | int cc; | 20 | int cc; |
21 | 21 | ||
22 | asm volatile( | 22 | asm volatile( |
23 | " sck 0(%2)\n" | 23 | " sck %1\n" |
24 | " ipm %0\n" | 24 | " ipm %0\n" |
25 | " srl %0,28\n" | 25 | " srl %0,28\n" |
26 | : "=d" (cc) : "m" (time), "a" (&time) : "cc"); | 26 | : "=d" (cc) : "Q" (time) : "cc"); |
27 | return cc; | 27 | return cc; |
28 | } | 28 | } |
29 | 29 | ||
@@ -32,21 +32,21 @@ static inline int store_clock(__u64 *time) | |||
32 | int cc; | 32 | int cc; |
33 | 33 | ||
34 | asm volatile( | 34 | asm volatile( |
35 | " stck 0(%2)\n" | 35 | " stck %1\n" |
36 | " ipm %0\n" | 36 | " ipm %0\n" |
37 | " srl %0,28\n" | 37 | " srl %0,28\n" |
38 | : "=d" (cc), "=m" (*time) : "a" (time) : "cc"); | 38 | : "=d" (cc), "=Q" (*time) : : "cc"); |
39 | return cc; | 39 | return cc; |
40 | } | 40 | } |
41 | 41 | ||
42 | static inline void set_clock_comparator(__u64 time) | 42 | static inline void set_clock_comparator(__u64 time) |
43 | { | 43 | { |
44 | asm volatile("sckc 0(%1)" : : "m" (time), "a" (&time)); | 44 | asm volatile("sckc %0" : : "Q" (time)); |
45 | } | 45 | } |
46 | 46 | ||
47 | static inline void store_clock_comparator(__u64 *time) | 47 | static inline void store_clock_comparator(__u64 *time) |
48 | { | 48 | { |
49 | asm volatile("stckc 0(%1)" : "=m" (*time) : "a" (time)); | 49 | asm volatile("stckc %0" : "=Q" (*time)); |
50 | } | 50 | } |
51 | 51 | ||
52 | #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ | 52 | #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ |
@@ -57,11 +57,7 @@ static inline unsigned long long get_clock (void) | |||
57 | { | 57 | { |
58 | unsigned long long clk; | 58 | unsigned long long clk; |
59 | 59 | ||
60 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
61 | asm volatile("stck %0" : "=Q" (clk) : : "cc"); | 60 | asm volatile("stck %0" : "=Q" (clk) : : "cc"); |
62 | #else /* __GNUC__ */ | ||
63 | asm volatile("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc"); | ||
64 | #endif /* __GNUC__ */ | ||
65 | return clk; | 61 | return clk; |
66 | } | 62 | } |
67 | 63 | ||
@@ -69,13 +65,7 @@ static inline unsigned long long get_clock_xt(void) | |||
69 | { | 65 | { |
70 | unsigned char clk[16]; | 66 | unsigned char clk[16]; |
71 | 67 | ||
72 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
73 | asm volatile("stcke %0" : "=Q" (clk) : : "cc"); | 68 | asm volatile("stcke %0" : "=Q" (clk) : : "cc"); |
74 | #else /* __GNUC__ */ | ||
75 | asm volatile("stcke 0(%1)" : "=m" (clk) | ||
76 | : "a" (clk) : "cc"); | ||
77 | #endif /* __GNUC__ */ | ||
78 | |||
79 | return *((unsigned long long *)&clk[1]); | 69 | return *((unsigned long long *)&clk[1]); |
80 | } | 70 | } |
81 | 71 | ||
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index cbf0a8745bf4..d6b1ed0ec52b 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h | |||
@@ -265,6 +265,12 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) | |||
265 | return uaccess.copy_from_user(n, from, to); | 265 | return uaccess.copy_from_user(n, from, to); |
266 | } | 266 | } |
267 | 267 | ||
268 | extern void copy_from_user_overflow(void) | ||
269 | #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS | ||
270 | __compiletime_warning("copy_from_user() buffer size is not provably correct") | ||
271 | #endif | ||
272 | ; | ||
273 | |||
268 | /** | 274 | /** |
269 | * copy_from_user: - Copy a block of data from user space. | 275 | * copy_from_user: - Copy a block of data from user space. |
270 | * @to: Destination address, in kernel space. | 276 | * @to: Destination address, in kernel space. |
@@ -284,7 +290,13 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) | |||
284 | static inline unsigned long __must_check | 290 | static inline unsigned long __must_check |
285 | copy_from_user(void *to, const void __user *from, unsigned long n) | 291 | copy_from_user(void *to, const void __user *from, unsigned long n) |
286 | { | 292 | { |
293 | unsigned int sz = __compiletime_object_size(to); | ||
294 | |||
287 | might_fault(); | 295 | might_fault(); |
296 | if (unlikely(sz != -1 && sz < n)) { | ||
297 | copy_from_user_overflow(); | ||
298 | return n; | ||
299 | } | ||
288 | if (access_ok(VERIFY_READ, from, n)) | 300 | if (access_ok(VERIFY_READ, from, n)) |
289 | n = __copy_from_user(to, from, n); | 301 | n = __copy_from_user(to, from, n); |
290 | else | 302 | else |
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h index 7bdd7c8ebc91..4a76d9480cce 100644 --- a/arch/s390/include/asm/vdso.h +++ b/arch/s390/include/asm/vdso.h | |||
@@ -7,7 +7,7 @@ | |||
7 | #define VDSO32_LBASE 0 | 7 | #define VDSO32_LBASE 0 |
8 | #define VDSO64_LBASE 0 | 8 | #define VDSO64_LBASE 0 |
9 | 9 | ||
10 | #define VDSO_VERSION_STRING LINUX_2.6.26 | 10 | #define VDSO_VERSION_STRING LINUX_2.6.29 |
11 | 11 | ||
12 | #ifndef __ASSEMBLY__ | 12 | #ifndef __ASSEMBLY__ |
13 | 13 | ||
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 683f6381cc59..64230bc392fa 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -29,9 +29,12 @@ obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) | |||
29 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) | 29 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) |
30 | 30 | ||
31 | extra-y += head.o init_task.o vmlinux.lds | 31 | extra-y += head.o init_task.o vmlinux.lds |
32 | extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o) | ||
32 | 33 | ||
33 | obj-$(CONFIG_MODULES) += s390_ksyms.o module.o | 34 | obj-$(CONFIG_MODULES) += s390_ksyms.o module.o |
34 | obj-$(CONFIG_SMP) += smp.o topology.o | 35 | obj-$(CONFIG_SMP) += smp.o topology.o |
36 | obj-$(CONFIG_SMP) += $(if $(CONFIG_64BIT),switch_cpu64.o, \ | ||
37 | switch_cpu.o) | ||
35 | obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o | 38 | obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o |
36 | obj-$(CONFIG_AUDIT) += audit.o | 39 | obj-$(CONFIG_AUDIT) += audit.o |
37 | compat-obj-$(CONFIG_AUDIT) += compat_audit.o | 40 | compat-obj-$(CONFIG_AUDIT) += compat_audit.o |
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 63e46433e81d..08db736dded0 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -4,18 +4,27 @@ | |||
4 | * and format the required data. | 4 | * and format the required data. |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/sched.h> | 7 | #define ASM_OFFSETS_C |
8 | |||
8 | #include <linux/kbuild.h> | 9 | #include <linux/kbuild.h> |
10 | #include <linux/sched.h> | ||
9 | #include <asm/vdso.h> | 11 | #include <asm/vdso.h> |
10 | #include <asm/sigp.h> | 12 | #include <asm/sigp.h> |
11 | 13 | ||
14 | /* | ||
15 | * Make sure that the compiler is new enough. We want a compiler that | ||
16 | * is known to work with the "Q" assembler constraint. | ||
17 | */ | ||
18 | #if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3) | ||
19 | #error Your compiler is too old; please use version 3.3.3 or newer | ||
20 | #endif | ||
21 | |||
12 | int main(void) | 22 | int main(void) |
13 | { | 23 | { |
14 | DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); | 24 | DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); |
15 | DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); | 25 | DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); |
16 | DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info)); | 26 | DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info)); |
17 | DEFINE(__THREAD_mm_segment, | 27 | DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment)); |
18 | offsetof(struct task_struct, thread.mm_segment)); | ||
19 | BLANK(); | 28 | BLANK(); |
20 | DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); | 29 | DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); |
21 | BLANK(); | 30 | BLANK(); |
@@ -52,18 +61,94 @@ int main(void) | |||
52 | DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); | 61 | DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); |
53 | DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); | 62 | DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); |
54 | DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); | 63 | DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); |
55 | DEFINE(__VDSO_ECTG_BASE, | 64 | DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base)); |
56 | offsetof(struct vdso_per_cpu_data, ectg_timer_base)); | 65 | DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time)); |
57 | DEFINE(__VDSO_ECTG_USER, | ||
58 | offsetof(struct vdso_per_cpu_data, ectg_user_time)); | ||
59 | /* constants used by the vdso */ | 66 | /* constants used by the vdso */ |
60 | DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); | 67 | DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); |
61 | DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); | 68 | DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); |
62 | DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); | 69 | DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); |
70 | BLANK(); | ||
63 | /* constants for SIGP */ | 71 | /* constants for SIGP */ |
64 | DEFINE(__SIGP_STOP, sigp_stop); | 72 | DEFINE(__SIGP_STOP, sigp_stop); |
65 | DEFINE(__SIGP_RESTART, sigp_restart); | 73 | DEFINE(__SIGP_RESTART, sigp_restart); |
66 | DEFINE(__SIGP_SENSE, sigp_sense); | 74 | DEFINE(__SIGP_SENSE, sigp_sense); |
67 | DEFINE(__SIGP_INITIAL_CPU_RESET, sigp_initial_cpu_reset); | 75 | DEFINE(__SIGP_INITIAL_CPU_RESET, sigp_initial_cpu_reset); |
76 | BLANK(); | ||
77 | /* lowcore offsets */ | ||
78 | DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params)); | ||
79 | DEFINE(__LC_CPU_ADDRESS, offsetof(struct _lowcore, cpu_addr)); | ||
80 | DEFINE(__LC_EXT_INT_CODE, offsetof(struct _lowcore, ext_int_code)); | ||
81 | DEFINE(__LC_SVC_ILC, offsetof(struct _lowcore, svc_ilc)); | ||
82 | DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code)); | ||
83 | DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc)); | ||
84 | DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code)); | ||
85 | DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_perc_atmid)); | ||
86 | DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address)); | ||
87 | DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id)); | ||
88 | DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id)); | ||
89 | DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id)); | ||
90 | DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr)); | ||
91 | DEFINE(__LC_IO_INT_PARM, offsetof(struct _lowcore, io_int_parm)); | ||
92 | DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word)); | ||
93 | DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list)); | ||
94 | DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code)); | ||
95 | DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib)); | ||
96 | BLANK(); | ||
97 | DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw)); | ||
98 | DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw)); | ||
99 | DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw)); | ||
100 | DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw)); | ||
101 | DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw)); | ||
102 | DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw)); | ||
103 | DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw)); | ||
104 | DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw)); | ||
105 | DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw)); | ||
106 | DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw)); | ||
107 | DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw)); | ||
108 | DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw)); | ||
109 | DEFINE(__LC_SAVE_AREA, offsetof(struct _lowcore, save_area)); | ||
110 | DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw)); | ||
111 | DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw)); | ||
112 | DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer)); | ||
113 | DEFINE(__LC_ASYNC_ENTER_TIMER, offsetof(struct _lowcore, async_enter_timer)); | ||
114 | DEFINE(__LC_EXIT_TIMER, offsetof(struct _lowcore, exit_timer)); | ||
115 | DEFINE(__LC_USER_TIMER, offsetof(struct _lowcore, user_timer)); | ||
116 | DEFINE(__LC_SYSTEM_TIMER, offsetof(struct _lowcore, system_timer)); | ||
117 | DEFINE(__LC_STEAL_TIMER, offsetof(struct _lowcore, steal_timer)); | ||
118 | DEFINE(__LC_LAST_UPDATE_TIMER, offsetof(struct _lowcore, last_update_timer)); | ||
119 | DEFINE(__LC_LAST_UPDATE_CLOCK, offsetof(struct _lowcore, last_update_clock)); | ||
120 | DEFINE(__LC_CURRENT, offsetof(struct _lowcore, current_task)); | ||
121 | DEFINE(__LC_THREAD_INFO, offsetof(struct _lowcore, thread_info)); | ||
122 | DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); | ||
123 | DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); | ||
124 | DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); | ||
125 | DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce)); | ||
126 | DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); | ||
127 | DEFINE(__LC_USER_EXEC_ASCE, offsetof(struct _lowcore, user_exec_asce)); | ||
128 | DEFINE(__LC_CPUID, offsetof(struct _lowcore, cpu_id)); | ||
129 | DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); | ||
130 | DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); | ||
131 | DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func)); | ||
132 | DEFINE(__LC_IRB, offsetof(struct _lowcore, irb)); | ||
133 | DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); | ||
134 | DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area)); | ||
135 | DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area)); | ||
136 | DEFINE(__LC_PREFIX_SAVE_AREA, offsetof(struct _lowcore, prefixreg_save_area)); | ||
137 | DEFINE(__LC_AREGS_SAVE_AREA, offsetof(struct _lowcore, access_regs_save_area)); | ||
138 | DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); | ||
139 | DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); | ||
140 | DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); | ||
141 | #ifdef CONFIG_32BIT | ||
142 | DEFINE(__LC_PFAULT_INTPARM, offsetof(struct _lowcore, ext_params)); | ||
143 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); | ||
144 | #else /* CONFIG_32BIT */ | ||
145 | DEFINE(__LC_PFAULT_INTPARM, offsetof(struct _lowcore, ext_params2)); | ||
146 | DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2)); | ||
147 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area)); | ||
148 | DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); | ||
149 | DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area)); | ||
150 | DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr)); | ||
151 | DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); | ||
152 | #endif /* CONFIG_32BIT */ | ||
68 | return 0; | 153 | return 0; |
69 | } | 154 | } |
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index dc7e5259770f..15e46ca94335 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S | |||
@@ -6,8 +6,8 @@ | |||
6 | * Michael Holzheu <holzheu@de.ibm.com> | 6 | * Michael Holzheu <holzheu@de.ibm.com> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <asm/asm-offsets.h> | ||
9 | #include <asm/ptrace.h> | 10 | #include <asm/ptrace.h> |
10 | #include <asm/lowcore.h> | ||
11 | 11 | ||
12 | #ifdef CONFIG_64BIT | 12 | #ifdef CONFIG_64BIT |
13 | 13 | ||
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index db943a7ec513..b39b27d68b45 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c | |||
@@ -86,10 +86,17 @@ enum { | |||
86 | U4_12, /* 4 bit unsigned value starting at 12 */ | 86 | U4_12, /* 4 bit unsigned value starting at 12 */ |
87 | U4_16, /* 4 bit unsigned value starting at 16 */ | 87 | U4_16, /* 4 bit unsigned value starting at 16 */ |
88 | U4_20, /* 4 bit unsigned value starting at 20 */ | 88 | U4_20, /* 4 bit unsigned value starting at 20 */ |
89 | U4_32, /* 4 bit unsigned value starting at 32 */ | ||
89 | U8_8, /* 8 bit unsigned value starting at 8 */ | 90 | U8_8, /* 8 bit unsigned value starting at 8 */ |
90 | U8_16, /* 8 bit unsigned value starting at 16 */ | 91 | U8_16, /* 8 bit unsigned value starting at 16 */ |
92 | U8_24, /* 8 bit unsigned value starting at 24 */ | ||
93 | U8_32, /* 8 bit unsigned value starting at 32 */ | ||
94 | I8_8, /* 8 bit signed value starting at 8 */ | ||
95 | I8_32, /* 8 bit signed value starting at 32 */ | ||
91 | I16_16, /* 16 bit signed value starting at 16 */ | 96 | I16_16, /* 16 bit signed value starting at 16 */ |
97 | I16_32, /* 32 bit signed value starting at 16 */ | ||
92 | U16_16, /* 16 bit unsigned value starting at 16 */ | 98 | U16_16, /* 16 bit unsigned value starting at 16 */ |
99 | U16_32, /* 32 bit unsigned value starting at 16 */ | ||
93 | J16_16, /* PC relative jump offset at 16 */ | 100 | J16_16, /* PC relative jump offset at 16 */ |
94 | J32_16, /* PC relative long offset at 16 */ | 101 | J32_16, /* PC relative long offset at 16 */ |
95 | I32_16, /* 32 bit signed value starting at 16 */ | 102 | I32_16, /* 32 bit signed value starting at 16 */ |
@@ -104,21 +111,37 @@ enum { | |||
104 | */ | 111 | */ |
105 | enum { | 112 | enum { |
106 | INSTR_INVALID, | 113 | INSTR_INVALID, |
107 | INSTR_E, INSTR_RIE_RRP, INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, | 114 | INSTR_E, |
108 | INSTR_RIL_UP, INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP, | 115 | INSTR_RIE_R0IU, INSTR_RIE_R0UU, INSTR_RIE_RRP, INSTR_RIE_RRPU, |
116 | INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU, | ||
117 | INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_RIL_UP, | ||
118 | INSTR_RIS_R0RDU, INSTR_RIS_R0UU, INSTR_RIS_RURDI, INSTR_RIS_RURDU, | ||
119 | INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP, | ||
109 | INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0, | 120 | INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0, |
110 | INSTR_RRE_FF, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF, INSTR_RRE_RR, | 121 | INSTR_RRE_FF, INSTR_RRE_FR, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF, |
111 | INSTR_RRE_RR_OPT, INSTR_RRF_F0FF, INSTR_RRF_FUFF, INSTR_RRF_M0RR, | 122 | INSTR_RRE_RR, INSTR_RRE_RR_OPT, |
112 | INSTR_RRF_R0RR, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, | 123 | INSTR_RRF_0UFF, INSTR_RRF_F0FF, INSTR_RRF_F0FF2, INSTR_RRF_F0FR, |
124 | INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_M0RR, INSTR_RRF_R0RR, | ||
125 | INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, INSTR_RRF_U0RR, | ||
126 | INSTR_RRF_UUFF, INSTR_RRR_F0FF, INSTR_RRS_RRRDU, | ||
113 | INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR, | 127 | INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR, |
114 | INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, INSTR_RSI_RRP, | 128 | INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, |
115 | INSTR_RSL_R0RD, INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, | 129 | INSTR_RSI_RRP, |
116 | INSTR_RSY_RURD, INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, | 130 | INSTR_RSL_R0RD, |
117 | INSTR_RS_RRRD, INSTR_RS_RURD, INSTR_RXE_FRRD, INSTR_RXE_RRRD, | 131 | INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD, |
118 | INSTR_RXF_FRRDF, INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RX_FRRD, | 132 | INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD, |
119 | INSTR_RX_RRRD, INSTR_RX_URRD, INSTR_SIY_URD, INSTR_SI_URD, | 133 | INSTR_RS_RURD, |
120 | INSTR_SSE_RDRD, INSTR_SSF_RRDRD, INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, | 134 | INSTR_RXE_FRRD, INSTR_RXE_RRRD, |
121 | INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3, | 135 | INSTR_RXF_FRRDF, |
136 | INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RXY_URRD, | ||
137 | INSTR_RX_FRRD, INSTR_RX_RRRD, INSTR_RX_URRD, | ||
138 | INSTR_SIL_RDI, INSTR_SIL_RDU, | ||
139 | INSTR_SIY_IRD, INSTR_SIY_URD, | ||
140 | INSTR_SI_URD, | ||
141 | INSTR_SSE_RDRD, | ||
142 | INSTR_SSF_RRDRD, | ||
143 | INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, | ||
144 | INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3, | ||
122 | INSTR_S_00, INSTR_S_RD, | 145 | INSTR_S_00, INSTR_S_RD, |
123 | }; | 146 | }; |
124 | 147 | ||
@@ -129,7 +152,7 @@ struct operand { | |||
129 | }; | 152 | }; |
130 | 153 | ||
131 | struct insn { | 154 | struct insn { |
132 | const char name[5]; | 155 | const char name[6]; |
133 | unsigned char opfrag; | 156 | unsigned char opfrag; |
134 | unsigned char format; | 157 | unsigned char format; |
135 | }; | 158 | }; |
@@ -170,11 +193,16 @@ static const struct operand operands[] = | |||
170 | [U4_12] = { 4, 12, 0 }, | 193 | [U4_12] = { 4, 12, 0 }, |
171 | [U4_16] = { 4, 16, 0 }, | 194 | [U4_16] = { 4, 16, 0 }, |
172 | [U4_20] = { 4, 20, 0 }, | 195 | [U4_20] = { 4, 20, 0 }, |
196 | [U4_32] = { 4, 32, 0 }, | ||
173 | [U8_8] = { 8, 8, 0 }, | 197 | [U8_8] = { 8, 8, 0 }, |
174 | [U8_16] = { 8, 16, 0 }, | 198 | [U8_16] = { 8, 16, 0 }, |
199 | [U8_24] = { 8, 24, 0 }, | ||
200 | [U8_32] = { 8, 32, 0 }, | ||
175 | [I16_16] = { 16, 16, OPERAND_SIGNED }, | 201 | [I16_16] = { 16, 16, OPERAND_SIGNED }, |
176 | [U16_16] = { 16, 16, 0 }, | 202 | [U16_16] = { 16, 16, 0 }, |
203 | [U16_32] = { 16, 32, 0 }, | ||
177 | [J16_16] = { 16, 16, OPERAND_PCREL }, | 204 | [J16_16] = { 16, 16, OPERAND_PCREL }, |
205 | [I16_32] = { 16, 32, OPERAND_SIGNED }, | ||
178 | [J32_16] = { 32, 16, OPERAND_PCREL }, | 206 | [J32_16] = { 32, 16, OPERAND_PCREL }, |
179 | [I32_16] = { 32, 16, OPERAND_SIGNED }, | 207 | [I32_16] = { 32, 16, OPERAND_SIGNED }, |
180 | [U32_16] = { 32, 16, 0 }, | 208 | [U32_16] = { 32, 16, 0 }, |
@@ -183,82 +211,93 @@ static const struct operand operands[] = | |||
183 | }; | 211 | }; |
184 | 212 | ||
185 | static const unsigned char formats[][7] = { | 213 | static const unsigned char formats[][7] = { |
186 | [INSTR_E] = { 0xff, 0,0,0,0,0,0 }, /* e.g. pr */ | 214 | [INSTR_E] = { 0xff, 0,0,0,0,0,0 }, |
187 | [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxhg */ | 215 | [INSTR_RIE_R0UU] = { 0xff, R_8,U16_16,U4_32,0,0,0 }, |
188 | [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, /* e.g. brasl */ | 216 | [INSTR_RIE_RRPU] = { 0xff, R_8,R_12,U4_32,J16_16,0,0 }, |
189 | [INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 }, /* e.g. brcl */ | 217 | [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, |
190 | [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, /* e.g. afi */ | 218 | [INSTR_RIE_RRUUU] = { 0xff, R_8,R_12,U8_16,U8_24,U8_32,0 }, |
191 | [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, /* e.g. alfi */ | 219 | [INSTR_RIE_RUPI] = { 0xff, R_8,I8_32,U4_12,J16_16,0,0 }, |
192 | [INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 }, /* e.g. ahi */ | 220 | [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, |
193 | [INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 }, /* e.g. brct */ | 221 | [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, |
194 | [INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 }, /* e.g. tml */ | 222 | [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, |
195 | [INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 }, /* e.g. brc */ | 223 | [INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 }, |
196 | [INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. palb */ | 224 | [INSTR_RIS_R0RDU] = { 0xff, R_8,U8_32,D_20,B_16,0,0 }, |
197 | [INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 }, /* e.g. tb */ | 225 | [INSTR_RIS_RURDI] = { 0xff, R_8,I8_32,U4_12,D_20,B_16,0 }, |
198 | [INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 }, /* e.g. cpya */ | 226 | [INSTR_RIS_RURDU] = { 0xff, R_8,U8_32,U4_12,D_20,B_16,0 }, |
199 | [INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 }, /* e.g. sar */ | 227 | [INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 }, |
200 | [INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 }, /* e.g. sqer */ | 228 | [INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 }, |
201 | [INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 }, /* e.g. debr */ | 229 | [INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 }, |
202 | [INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 }, /* e.g. ipm */ | 230 | [INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 }, |
203 | [INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 }, /* e.g. ear */ | 231 | [INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 }, |
204 | [INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 }, /* e.g. cefbr */ | 232 | [INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 }, |
205 | [INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 }, /* e.g. lura */ | 233 | [INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 }, |
206 | [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 }, /* efpc, sfpc */ | 234 | [INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 }, |
207 | [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */ | 235 | [INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 }, |
208 | [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */ | 236 | [INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 }, |
209 | [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */ | 237 | [INSTR_RRE_FR] = { 0xff, F_24,R_28,0,0,0,0 }, |
210 | [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, /* e.g. idte */ | 238 | [INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 }, |
211 | [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */ | 239 | [INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 }, |
212 | [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */ | 240 | [INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 }, |
213 | [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */ | 241 | [INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 }, |
214 | [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 }, /* e.g. adr */ | 242 | [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 }, |
215 | [INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 }, /* e.g. spm */ | 243 | [INSTR_RRF_0UFF] = { 0xff, F_24,F_28,U4_20,0,0,0 }, |
216 | [INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 }, /* e.g. lr */ | 244 | [INSTR_RRF_F0FF2] = { 0xff, F_24,F_16,F_28,0,0,0 }, |
217 | [INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 }, /* e.g. svc */ | 245 | [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, |
218 | [INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 }, /* e.g. bcr */ | 246 | [INSTR_RRF_F0FR] = { 0xff, F_24,F_16,R_28,0,0,0 }, |
219 | [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. lmh */ | 247 | [INSTR_RRF_FFRU] = { 0xff, F_24,F_16,R_28,U4_20,0,0 }, |
220 | [INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lmh */ | 248 | [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 }, |
221 | [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icmh */ | 249 | [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, |
222 | [INSTR_RSL_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. tp */ | 250 | [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, |
223 | [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxh */ | 251 | [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 }, |
224 | [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },/* e.g. stmy */ | 252 | [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, |
253 | [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, | ||
254 | [INSTR_RRF_U0RR] = { 0xff, R_24,R_28,U4_16,0,0,0 }, | ||
255 | [INSTR_RRF_UUFF] = { 0xff, F_24,U4_16,F_28,U4_20,0,0 }, | ||
256 | [INSTR_RRR_F0FF] = { 0xff, F_24,F_28,F_16,0,0,0 }, | ||
257 | [INSTR_RRS_RRRDU] = { 0xff, R_8,R_12,U4_32,D_20,B_16,0 }, | ||
258 | [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 }, | ||
259 | [INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 }, | ||
260 | [INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 }, | ||
261 | [INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 }, | ||
262 | [INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 }, | ||
263 | [INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, | ||
264 | [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, | ||
265 | [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, | ||
266 | [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, | ||
267 | [INSTR_RSL_R0RD] = { 0xff, D_20,L4_8,B_16,0,0,0 }, | ||
268 | [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 }, | ||
269 | [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 }, | ||
270 | [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 }, | ||
225 | [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, | 271 | [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, |
226 | /* e.g. icmh */ | 272 | [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, |
227 | [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },/* e.g. lamy */ | 273 | [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, |
228 | [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },/* e.g. lamy */ | 274 | [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, |
229 | [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, /* e.g. lam */ | 275 | [INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, |
230 | [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lctl */ | 276 | [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, |
231 | [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. sll */ | 277 | [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, |
232 | [INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. cs */ | 278 | [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, |
233 | [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icm */ | ||
234 | [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. axbr */ | ||
235 | [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. lg */ | ||
236 | [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 }, | 279 | [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 }, |
237 | /* e.g. madb */ | 280 | [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 }, |
238 | [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },/* e.g. ly */ | 281 | [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 }, |
239 | [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },/* e.g. ley */ | 282 | [INSTR_RXY_URRD] = { 0xff, U4_8,D20_20,X_12,B_16,0,0 }, |
240 | [INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. ae */ | 283 | [INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, |
241 | [INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. l */ | 284 | [INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, |
242 | [INSTR_RX_URRD] = { 0xff, U4_8,D_20,X_12,B_16,0,0 }, /* e.g. bc */ | 285 | [INSTR_RX_URRD] = { 0xff, U4_8,D_20,X_12,B_16,0,0 }, |
243 | [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 }, /* e.g. cli */ | 286 | [INSTR_SIL_RDI] = { 0xff, D_20,B_16,I16_32,0,0,0 }, |
244 | [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 }, /* e.g. tmy */ | 287 | [INSTR_SIL_RDU] = { 0xff, D_20,B_16,U16_32,0,0,0 }, |
245 | [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, /* e.g. mvsdk */ | 288 | [INSTR_SIY_IRD] = { 0xff, D20_20,B_16,I8_8,0,0,0 }, |
289 | [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 }, | ||
290 | [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 }, | ||
291 | [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, | ||
292 | [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 }, | ||
246 | [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 }, | 293 | [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 }, |
247 | /* e.g. mvc */ | ||
248 | [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 }, | 294 | [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 }, |
249 | /* e.g. srp */ | ||
250 | [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 }, | 295 | [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 }, |
251 | /* e.g. pack */ | ||
252 | [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 }, | ||
253 | /* e.g. mvck */ | ||
254 | [INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 }, | 296 | [INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 }, |
255 | /* e.g. plo */ | ||
256 | [INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 }, | 297 | [INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 }, |
257 | /* e.g. lmd */ | 298 | [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 }, |
258 | [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. hsch */ | 299 | [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 }, |
259 | [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, /* e.g. lpsw */ | 300 | [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, |
260 | [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 }, | ||
261 | /* e.g. mvcos */ | ||
262 | }; | 301 | }; |
263 | 302 | ||
264 | static struct insn opcode[] = { | 303 | static struct insn opcode[] = { |
@@ -454,6 +493,8 @@ static struct insn opcode[] = { | |||
454 | static struct insn opcode_01[] = { | 493 | static struct insn opcode_01[] = { |
455 | #ifdef CONFIG_64BIT | 494 | #ifdef CONFIG_64BIT |
456 | { "sam64", 0x0e, INSTR_E }, | 495 | { "sam64", 0x0e, INSTR_E }, |
496 | { "pfpo", 0x0a, INSTR_E }, | ||
497 | { "ptff", 0x04, INSTR_E }, | ||
457 | #endif | 498 | #endif |
458 | { "pr", 0x01, INSTR_E }, | 499 | { "pr", 0x01, INSTR_E }, |
459 | { "upt", 0x02, INSTR_E }, | 500 | { "upt", 0x02, INSTR_E }, |
@@ -519,6 +560,8 @@ static struct insn opcode_b2[] = { | |||
519 | { "cutfu", 0xa7, INSTR_RRF_M0RR }, | 560 | { "cutfu", 0xa7, INSTR_RRF_M0RR }, |
520 | { "stfle", 0xb0, INSTR_S_RD }, | 561 | { "stfle", 0xb0, INSTR_S_RD }, |
521 | { "lpswe", 0xb2, INSTR_S_RD }, | 562 | { "lpswe", 0xb2, INSTR_S_RD }, |
563 | { "srnmt", 0xb9, INSTR_S_RD }, | ||
564 | { "lfas", 0xbd, INSTR_S_RD }, | ||
522 | #endif | 565 | #endif |
523 | { "stidp", 0x02, INSTR_S_RD }, | 566 | { "stidp", 0x02, INSTR_S_RD }, |
524 | { "sck", 0x04, INSTR_S_RD }, | 567 | { "sck", 0x04, INSTR_S_RD }, |
@@ -589,7 +632,6 @@ static struct insn opcode_b2[] = { | |||
589 | { "clst", 0x5d, INSTR_RRE_RR }, | 632 | { "clst", 0x5d, INSTR_RRE_RR }, |
590 | { "srst", 0x5e, INSTR_RRE_RR }, | 633 | { "srst", 0x5e, INSTR_RRE_RR }, |
591 | { "cmpsc", 0x63, INSTR_RRE_RR }, | 634 | { "cmpsc", 0x63, INSTR_RRE_RR }, |
592 | { "cmpsc", 0x63, INSTR_RRE_RR }, | ||
593 | { "siga", 0x74, INSTR_S_RD }, | 635 | { "siga", 0x74, INSTR_S_RD }, |
594 | { "xsch", 0x76, INSTR_S_00 }, | 636 | { "xsch", 0x76, INSTR_S_00 }, |
595 | { "rp", 0x77, INSTR_S_RD }, | 637 | { "rp", 0x77, INSTR_S_RD }, |
@@ -630,6 +672,57 @@ static struct insn opcode_b3[] = { | |||
630 | { "cger", 0xc8, INSTR_RRF_U0RF }, | 672 | { "cger", 0xc8, INSTR_RRF_U0RF }, |
631 | { "cgdr", 0xc9, INSTR_RRF_U0RF }, | 673 | { "cgdr", 0xc9, INSTR_RRF_U0RF }, |
632 | { "cgxr", 0xca, INSTR_RRF_U0RF }, | 674 | { "cgxr", 0xca, INSTR_RRF_U0RF }, |
675 | { "lpdfr", 0x70, INSTR_RRE_FF }, | ||
676 | { "lndfr", 0x71, INSTR_RRE_FF }, | ||
677 | { "cpsdr", 0x72, INSTR_RRF_F0FF2 }, | ||
678 | { "lcdfr", 0x73, INSTR_RRE_FF }, | ||
679 | { "ldgr", 0xc1, INSTR_RRE_FR }, | ||
680 | { "lgdr", 0xcd, INSTR_RRE_RF }, | ||
681 | { "adtr", 0xd2, INSTR_RRR_F0FF }, | ||
682 | { "axtr", 0xda, INSTR_RRR_F0FF }, | ||
683 | { "cdtr", 0xe4, INSTR_RRE_FF }, | ||
684 | { "cxtr", 0xec, INSTR_RRE_FF }, | ||
685 | { "kdtr", 0xe0, INSTR_RRE_FF }, | ||
686 | { "kxtr", 0xe8, INSTR_RRE_FF }, | ||
687 | { "cedtr", 0xf4, INSTR_RRE_FF }, | ||
688 | { "cextr", 0xfc, INSTR_RRE_FF }, | ||
689 | { "cdgtr", 0xf1, INSTR_RRE_FR }, | ||
690 | { "cxgtr", 0xf9, INSTR_RRE_FR }, | ||
691 | { "cdstr", 0xf3, INSTR_RRE_FR }, | ||
692 | { "cxstr", 0xfb, INSTR_RRE_FR }, | ||
693 | { "cdutr", 0xf2, INSTR_RRE_FR }, | ||
694 | { "cxutr", 0xfa, INSTR_RRE_FR }, | ||
695 | { "cgdtr", 0xe1, INSTR_RRF_U0RF }, | ||
696 | { "cgxtr", 0xe9, INSTR_RRF_U0RF }, | ||
697 | { "csdtr", 0xe3, INSTR_RRE_RF }, | ||
698 | { "csxtr", 0xeb, INSTR_RRE_RF }, | ||
699 | { "cudtr", 0xe2, INSTR_RRE_RF }, | ||
700 | { "cuxtr", 0xea, INSTR_RRE_RF }, | ||
701 | { "ddtr", 0xd1, INSTR_RRR_F0FF }, | ||
702 | { "dxtr", 0xd9, INSTR_RRR_F0FF }, | ||
703 | { "eedtr", 0xe5, INSTR_RRE_RF }, | ||
704 | { "eextr", 0xed, INSTR_RRE_RF }, | ||
705 | { "esdtr", 0xe7, INSTR_RRE_RF }, | ||
706 | { "esxtr", 0xef, INSTR_RRE_RF }, | ||
707 | { "iedtr", 0xf6, INSTR_RRF_F0FR }, | ||
708 | { "iextr", 0xfe, INSTR_RRF_F0FR }, | ||
709 | { "ltdtr", 0xd6, INSTR_RRE_FF }, | ||
710 | { "ltxtr", 0xde, INSTR_RRE_FF }, | ||
711 | { "fidtr", 0xd7, INSTR_RRF_UUFF }, | ||
712 | { "fixtr", 0xdf, INSTR_RRF_UUFF }, | ||
713 | { "ldetr", 0xd4, INSTR_RRF_0UFF }, | ||
714 | { "lxdtr", 0xdc, INSTR_RRF_0UFF }, | ||
715 | { "ledtr", 0xd5, INSTR_RRF_UUFF }, | ||
716 | { "ldxtr", 0xdd, INSTR_RRF_UUFF }, | ||
717 | { "mdtr", 0xd0, INSTR_RRR_F0FF }, | ||
718 | { "mxtr", 0xd8, INSTR_RRR_F0FF }, | ||
719 | { "qadtr", 0xf5, INSTR_RRF_FUFF }, | ||
720 | { "qaxtr", 0xfd, INSTR_RRF_FUFF }, | ||
721 | { "rrdtr", 0xf7, INSTR_RRF_FFRU }, | ||
722 | { "rrxtr", 0xff, INSTR_RRF_FFRU }, | ||
723 | { "sfasr", 0x85, INSTR_RRE_R0 }, | ||
724 | { "sdtr", 0xd3, INSTR_RRR_F0FF }, | ||
725 | { "sxtr", 0xdb, INSTR_RRR_F0FF }, | ||
633 | #endif | 726 | #endif |
634 | { "lpebr", 0x00, INSTR_RRE_FF }, | 727 | { "lpebr", 0x00, INSTR_RRE_FF }, |
635 | { "lnebr", 0x01, INSTR_RRE_FF }, | 728 | { "lnebr", 0x01, INSTR_RRE_FF }, |
@@ -780,6 +873,14 @@ static struct insn opcode_b9[] = { | |||
780 | { "cu24", 0xb1, INSTR_RRF_M0RR }, | 873 | { "cu24", 0xb1, INSTR_RRF_M0RR }, |
781 | { "cu41", 0xb2, INSTR_RRF_M0RR }, | 874 | { "cu41", 0xb2, INSTR_RRF_M0RR }, |
782 | { "cu42", 0xb3, INSTR_RRF_M0RR }, | 875 | { "cu42", 0xb3, INSTR_RRF_M0RR }, |
876 | { "crt", 0x72, INSTR_RRF_U0RR }, | ||
877 | { "cgrt", 0x60, INSTR_RRF_U0RR }, | ||
878 | { "clrt", 0x73, INSTR_RRF_U0RR }, | ||
879 | { "clgrt", 0x61, INSTR_RRF_U0RR }, | ||
880 | { "ptf", 0xa2, INSTR_RRE_R0 }, | ||
881 | { "pfmf", 0xaf, INSTR_RRE_RR }, | ||
882 | { "trte", 0xbf, INSTR_RRF_M0RR }, | ||
883 | { "trtre", 0xbd, INSTR_RRF_M0RR }, | ||
783 | #endif | 884 | #endif |
784 | { "kmac", 0x1e, INSTR_RRE_RR }, | 885 | { "kmac", 0x1e, INSTR_RRE_RR }, |
785 | { "lrvr", 0x1f, INSTR_RRE_RR }, | 886 | { "lrvr", 0x1f, INSTR_RRE_RR }, |
@@ -835,6 +936,43 @@ static struct insn opcode_c2[] = { | |||
835 | { "cfi", 0x0d, INSTR_RIL_RI }, | 936 | { "cfi", 0x0d, INSTR_RIL_RI }, |
836 | { "clgfi", 0x0e, INSTR_RIL_RU }, | 937 | { "clgfi", 0x0e, INSTR_RIL_RU }, |
837 | { "clfi", 0x0f, INSTR_RIL_RU }, | 938 | { "clfi", 0x0f, INSTR_RIL_RU }, |
939 | { "msfi", 0x01, INSTR_RIL_RI }, | ||
940 | { "msgfi", 0x00, INSTR_RIL_RI }, | ||
941 | #endif | ||
942 | { "", 0, INSTR_INVALID } | ||
943 | }; | ||
944 | |||
945 | static struct insn opcode_c4[] = { | ||
946 | #ifdef CONFIG_64BIT | ||
947 | { "lrl", 0x0d, INSTR_RIL_RP }, | ||
948 | { "lgrl", 0x08, INSTR_RIL_RP }, | ||
949 | { "lgfrl", 0x0c, INSTR_RIL_RP }, | ||
950 | { "lhrl", 0x05, INSTR_RIL_RP }, | ||
951 | { "lghrl", 0x04, INSTR_RIL_RP }, | ||
952 | { "llgfrl", 0x0e, INSTR_RIL_RP }, | ||
953 | { "llhrl", 0x02, INSTR_RIL_RP }, | ||
954 | { "llghrl", 0x06, INSTR_RIL_RP }, | ||
955 | { "strl", 0x0f, INSTR_RIL_RP }, | ||
956 | { "stgrl", 0x0b, INSTR_RIL_RP }, | ||
957 | { "sthrl", 0x07, INSTR_RIL_RP }, | ||
958 | #endif | ||
959 | { "", 0, INSTR_INVALID } | ||
960 | }; | ||
961 | |||
962 | static struct insn opcode_c6[] = { | ||
963 | #ifdef CONFIG_64BIT | ||
964 | { "crl", 0x0d, INSTR_RIL_RP }, | ||
965 | { "cgrl", 0x08, INSTR_RIL_RP }, | ||
966 | { "cgfrl", 0x0c, INSTR_RIL_RP }, | ||
967 | { "chrl", 0x05, INSTR_RIL_RP }, | ||
968 | { "cghrl", 0x04, INSTR_RIL_RP }, | ||
969 | { "clrl", 0x0f, INSTR_RIL_RP }, | ||
970 | { "clgrl", 0x0a, INSTR_RIL_RP }, | ||
971 | { "clgfrl", 0x0e, INSTR_RIL_RP }, | ||
972 | { "clhrl", 0x07, INSTR_RIL_RP }, | ||
973 | { "clghrl", 0x06, INSTR_RIL_RP }, | ||
974 | { "pfdrl", 0x02, INSTR_RIL_UP }, | ||
975 | { "exrl", 0x00, INSTR_RIL_RP }, | ||
838 | #endif | 976 | #endif |
839 | { "", 0, INSTR_INVALID } | 977 | { "", 0, INSTR_INVALID } |
840 | }; | 978 | }; |
@@ -842,6 +980,8 @@ static struct insn opcode_c2[] = { | |||
842 | static struct insn opcode_c8[] = { | 980 | static struct insn opcode_c8[] = { |
843 | #ifdef CONFIG_64BIT | 981 | #ifdef CONFIG_64BIT |
844 | { "mvcos", 0x00, INSTR_SSF_RRDRD }, | 982 | { "mvcos", 0x00, INSTR_SSF_RRDRD }, |
983 | { "ectg", 0x01, INSTR_SSF_RRDRD }, | ||
984 | { "csst", 0x02, INSTR_SSF_RRDRD }, | ||
845 | #endif | 985 | #endif |
846 | { "", 0, INSTR_INVALID } | 986 | { "", 0, INSTR_INVALID } |
847 | }; | 987 | }; |
@@ -917,6 +1057,12 @@ static struct insn opcode_e3[] = { | |||
917 | { "llgh", 0x91, INSTR_RXY_RRRD }, | 1057 | { "llgh", 0x91, INSTR_RXY_RRRD }, |
918 | { "llc", 0x94, INSTR_RXY_RRRD }, | 1058 | { "llc", 0x94, INSTR_RXY_RRRD }, |
919 | { "llh", 0x95, INSTR_RXY_RRRD }, | 1059 | { "llh", 0x95, INSTR_RXY_RRRD }, |
1060 | { "cgh", 0x34, INSTR_RXY_RRRD }, | ||
1061 | { "laey", 0x75, INSTR_RXY_RRRD }, | ||
1062 | { "ltgf", 0x32, INSTR_RXY_RRRD }, | ||
1063 | { "mfy", 0x5c, INSTR_RXY_RRRD }, | ||
1064 | { "mhy", 0x7c, INSTR_RXY_RRRD }, | ||
1065 | { "pfd", 0x36, INSTR_RXY_URRD }, | ||
920 | #endif | 1066 | #endif |
921 | { "lrv", 0x1e, INSTR_RXY_RRRD }, | 1067 | { "lrv", 0x1e, INSTR_RXY_RRRD }, |
922 | { "lrvh", 0x1f, INSTR_RXY_RRRD }, | 1068 | { "lrvh", 0x1f, INSTR_RXY_RRRD }, |
@@ -931,6 +1077,15 @@ static struct insn opcode_e3[] = { | |||
931 | static struct insn opcode_e5[] = { | 1077 | static struct insn opcode_e5[] = { |
932 | #ifdef CONFIG_64BIT | 1078 | #ifdef CONFIG_64BIT |
933 | { "strag", 0x02, INSTR_SSE_RDRD }, | 1079 | { "strag", 0x02, INSTR_SSE_RDRD }, |
1080 | { "chhsi", 0x54, INSTR_SIL_RDI }, | ||
1081 | { "chsi", 0x5c, INSTR_SIL_RDI }, | ||
1082 | { "cghsi", 0x58, INSTR_SIL_RDI }, | ||
1083 | { "clhhsi", 0x55, INSTR_SIL_RDU }, | ||
1084 | { "clfhsi", 0x5d, INSTR_SIL_RDU }, | ||
1085 | { "clghsi", 0x59, INSTR_SIL_RDU }, | ||
1086 | { "mvhhi", 0x44, INSTR_SIL_RDI }, | ||
1087 | { "mvhi", 0x4c, INSTR_SIL_RDI }, | ||
1088 | { "mvghi", 0x48, INSTR_SIL_RDI }, | ||
934 | #endif | 1089 | #endif |
935 | { "lasp", 0x00, INSTR_SSE_RDRD }, | 1090 | { "lasp", 0x00, INSTR_SSE_RDRD }, |
936 | { "tprot", 0x01, INSTR_SSE_RDRD }, | 1091 | { "tprot", 0x01, INSTR_SSE_RDRD }, |
@@ -977,6 +1132,11 @@ static struct insn opcode_eb[] = { | |||
977 | { "lmy", 0x98, INSTR_RSY_RRRD }, | 1132 | { "lmy", 0x98, INSTR_RSY_RRRD }, |
978 | { "lamy", 0x9a, INSTR_RSY_AARD }, | 1133 | { "lamy", 0x9a, INSTR_RSY_AARD }, |
979 | { "stamy", 0x9b, INSTR_RSY_AARD }, | 1134 | { "stamy", 0x9b, INSTR_RSY_AARD }, |
1135 | { "asi", 0x6a, INSTR_SIY_IRD }, | ||
1136 | { "agsi", 0x7a, INSTR_SIY_IRD }, | ||
1137 | { "alsi", 0x6e, INSTR_SIY_IRD }, | ||
1138 | { "algsi", 0x7e, INSTR_SIY_IRD }, | ||
1139 | { "ecag", 0x4c, INSTR_RSY_RRRD }, | ||
980 | #endif | 1140 | #endif |
981 | { "rll", 0x1d, INSTR_RSY_RRRD }, | 1141 | { "rll", 0x1d, INSTR_RSY_RRRD }, |
982 | { "mvclu", 0x8e, INSTR_RSY_RRRD }, | 1142 | { "mvclu", 0x8e, INSTR_RSY_RRRD }, |
@@ -988,6 +1148,30 @@ static struct insn opcode_ec[] = { | |||
988 | #ifdef CONFIG_64BIT | 1148 | #ifdef CONFIG_64BIT |
989 | { "brxhg", 0x44, INSTR_RIE_RRP }, | 1149 | { "brxhg", 0x44, INSTR_RIE_RRP }, |
990 | { "brxlg", 0x45, INSTR_RIE_RRP }, | 1150 | { "brxlg", 0x45, INSTR_RIE_RRP }, |
1151 | { "crb", 0xf6, INSTR_RRS_RRRDU }, | ||
1152 | { "cgrb", 0xe4, INSTR_RRS_RRRDU }, | ||
1153 | { "crj", 0x76, INSTR_RIE_RRPU }, | ||
1154 | { "cgrj", 0x64, INSTR_RIE_RRPU }, | ||
1155 | { "cib", 0xfe, INSTR_RIS_RURDI }, | ||
1156 | { "cgib", 0xfc, INSTR_RIS_RURDI }, | ||
1157 | { "cij", 0x7e, INSTR_RIE_RUPI }, | ||
1158 | { "cgij", 0x7c, INSTR_RIE_RUPI }, | ||
1159 | { "cit", 0x72, INSTR_RIE_R0IU }, | ||
1160 | { "cgit", 0x70, INSTR_RIE_R0IU }, | ||
1161 | { "clrb", 0xf7, INSTR_RRS_RRRDU }, | ||
1162 | { "clgrb", 0xe5, INSTR_RRS_RRRDU }, | ||
1163 | { "clrj", 0x77, INSTR_RIE_RRPU }, | ||
1164 | { "clgrj", 0x65, INSTR_RIE_RRPU }, | ||
1165 | { "clib", 0xff, INSTR_RIS_RURDU }, | ||
1166 | { "clgib", 0xfd, INSTR_RIS_RURDU }, | ||
1167 | { "clij", 0x7f, INSTR_RIE_RUPU }, | ||
1168 | { "clgij", 0x7d, INSTR_RIE_RUPU }, | ||
1169 | { "clfit", 0x73, INSTR_RIE_R0UU }, | ||
1170 | { "clgit", 0x71, INSTR_RIE_R0UU }, | ||
1171 | { "rnsbg", 0x54, INSTR_RIE_RRUUU }, | ||
1172 | { "rxsbg", 0x57, INSTR_RIE_RRUUU }, | ||
1173 | { "rosbg", 0x56, INSTR_RIE_RRUUU }, | ||
1174 | { "risbg", 0x55, INSTR_RIE_RRUUU }, | ||
991 | #endif | 1175 | #endif |
992 | { "", 0, INSTR_INVALID } | 1176 | { "", 0, INSTR_INVALID } |
993 | }; | 1177 | }; |
@@ -1004,6 +1188,16 @@ static struct insn opcode_ed[] = { | |||
1004 | { "ldy", 0x65, INSTR_RXY_FRRD }, | 1188 | { "ldy", 0x65, INSTR_RXY_FRRD }, |
1005 | { "stey", 0x66, INSTR_RXY_FRRD }, | 1189 | { "stey", 0x66, INSTR_RXY_FRRD }, |
1006 | { "stdy", 0x67, INSTR_RXY_FRRD }, | 1190 | { "stdy", 0x67, INSTR_RXY_FRRD }, |
1191 | { "sldt", 0x40, INSTR_RXF_FRRDF }, | ||
1192 | { "slxt", 0x48, INSTR_RXF_FRRDF }, | ||
1193 | { "srdt", 0x41, INSTR_RXF_FRRDF }, | ||
1194 | { "srxt", 0x49, INSTR_RXF_FRRDF }, | ||
1195 | { "tdcet", 0x50, INSTR_RXE_FRRD }, | ||
1196 | { "tdcdt", 0x54, INSTR_RXE_FRRD }, | ||
1197 | { "tdcxt", 0x58, INSTR_RXE_FRRD }, | ||
1198 | { "tdget", 0x51, INSTR_RXE_FRRD }, | ||
1199 | { "tdgdt", 0x55, INSTR_RXE_FRRD }, | ||
1200 | { "tdgxt", 0x59, INSTR_RXE_FRRD }, | ||
1007 | #endif | 1201 | #endif |
1008 | { "ldeb", 0x04, INSTR_RXE_FRRD }, | 1202 | { "ldeb", 0x04, INSTR_RXE_FRRD }, |
1009 | { "lxdb", 0x05, INSTR_RXE_FRRD }, | 1203 | { "lxdb", 0x05, INSTR_RXE_FRRD }, |
@@ -1037,6 +1231,7 @@ static struct insn opcode_ed[] = { | |||
1037 | { "mae", 0x2e, INSTR_RXF_FRRDF }, | 1231 | { "mae", 0x2e, INSTR_RXF_FRRDF }, |
1038 | { "mse", 0x2f, INSTR_RXF_FRRDF }, | 1232 | { "mse", 0x2f, INSTR_RXF_FRRDF }, |
1039 | { "sqe", 0x34, INSTR_RXE_FRRD }, | 1233 | { "sqe", 0x34, INSTR_RXE_FRRD }, |
1234 | { "sqd", 0x35, INSTR_RXE_FRRD }, | ||
1040 | { "mee", 0x37, INSTR_RXE_FRRD }, | 1235 | { "mee", 0x37, INSTR_RXE_FRRD }, |
1041 | { "mad", 0x3e, INSTR_RXF_FRRDF }, | 1236 | { "mad", 0x3e, INSTR_RXF_FRRDF }, |
1042 | { "msd", 0x3f, INSTR_RXF_FRRDF }, | 1237 | { "msd", 0x3f, INSTR_RXF_FRRDF }, |
@@ -1117,6 +1312,12 @@ static struct insn *find_insn(unsigned char *code) | |||
1117 | case 0xc2: | 1312 | case 0xc2: |
1118 | table = opcode_c2; | 1313 | table = opcode_c2; |
1119 | break; | 1314 | break; |
1315 | case 0xc4: | ||
1316 | table = opcode_c4; | ||
1317 | break; | ||
1318 | case 0xc6: | ||
1319 | table = opcode_c6; | ||
1320 | break; | ||
1120 | case 0xc8: | 1321 | case 0xc8: |
1121 | table = opcode_c8; | 1322 | table = opcode_c8; |
1122 | break; | 1323 | break; |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index e49e9e0c69fd..31d618a443af 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -214,10 +214,13 @@ static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE); | |||
214 | 214 | ||
215 | static noinline __init void detect_machine_type(void) | 215 | static noinline __init void detect_machine_type(void) |
216 | { | 216 | { |
217 | /* No VM information? Looks like LPAR */ | 217 | /* Check current-configuration-level */ |
218 | if (stsi(&vmms, 3, 2, 2) == -ENOSYS) | 218 | if ((stsi(NULL, 0, 0, 0) >> 28) <= 2) { |
219 | S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR; | ||
219 | return; | 220 | return; |
220 | if (!vmms.count) | 221 | } |
222 | /* Get virtual-machine cpu information. */ | ||
223 | if (stsi(&vmms, 3, 2, 2) == -ENOSYS || !vmms.count) | ||
221 | return; | 224 | return; |
222 | 225 | ||
223 | /* Running under KVM? If not we assume z/VM */ | 226 | /* Running under KVM? If not we assume z/VM */ |
@@ -402,8 +405,19 @@ static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t)) | |||
402 | 405 | ||
403 | static void __init setup_boot_command_line(void) | 406 | static void __init setup_boot_command_line(void) |
404 | { | 407 | { |
408 | int i; | ||
409 | |||
410 | /* convert arch command line to ascii */ | ||
411 | for (i = 0; i < ARCH_COMMAND_LINE_SIZE; i++) | ||
412 | if (COMMAND_LINE[i] & 0x80) | ||
413 | break; | ||
414 | if (i < ARCH_COMMAND_LINE_SIZE) | ||
415 | EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); | ||
416 | COMMAND_LINE[ARCH_COMMAND_LINE_SIZE-1] = 0; | ||
417 | |||
405 | /* copy arch command line */ | 418 | /* copy arch command line */ |
406 | strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); | 419 | strlcpy(boot_command_line, strstrip(COMMAND_LINE), |
420 | ARCH_COMMAND_LINE_SIZE); | ||
407 | 421 | ||
408 | /* append IPL PARM data to the boot command line */ | 422 | /* append IPL PARM data to the boot command line */ |
409 | if (MACHINE_IS_VM) | 423 | if (MACHINE_IS_VM) |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index e8ef21c51bbe..4348f9bc5393 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/linkage.h> | 13 | #include <linux/linkage.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <asm/cache.h> | 15 | #include <asm/cache.h> |
16 | #include <asm/lowcore.h> | ||
17 | #include <asm/errno.h> | 16 | #include <asm/errno.h> |
18 | #include <asm/ptrace.h> | 17 | #include <asm/ptrace.h> |
19 | #include <asm/thread_info.h> | 18 | #include <asm/thread_info.h> |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index f33658f09dd7..29fd0f1e6ec4 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -9,11 +9,9 @@ | |||
9 | * Heiko Carstens <heiko.carstens@de.ibm.com> | 9 | * Heiko Carstens <heiko.carstens@de.ibm.com> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/sys.h> | ||
13 | #include <linux/linkage.h> | 12 | #include <linux/linkage.h> |
14 | #include <linux/init.h> | 13 | #include <linux/init.h> |
15 | #include <asm/cache.h> | 14 | #include <asm/cache.h> |
16 | #include <asm/lowcore.h> | ||
17 | #include <asm/errno.h> | 15 | #include <asm/errno.h> |
18 | #include <asm/ptrace.h> | 16 | #include <asm/ptrace.h> |
19 | #include <asm/thread_info.h> | 17 | #include <asm/thread_info.h> |
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 5a82bc68193e..6a83d0581317 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c | |||
@@ -13,7 +13,7 @@ | |||
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <trace/syscall.h> | 15 | #include <trace/syscall.h> |
16 | #include <asm/lowcore.h> | 16 | #include <asm/asm-offsets.h> |
17 | 17 | ||
18 | #ifdef CONFIG_DYNAMIC_FTRACE | 18 | #ifdef CONFIG_DYNAMIC_FTRACE |
19 | 19 | ||
@@ -200,13 +200,3 @@ out: | |||
200 | return parent; | 200 | return parent; |
201 | } | 201 | } |
202 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 202 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
203 | |||
204 | #ifdef CONFIG_FTRACE_SYSCALLS | ||
205 | |||
206 | extern unsigned int sys_call_table[]; | ||
207 | |||
208 | unsigned long __init arch_syscall_addr(int nr) | ||
209 | { | ||
210 | return (unsigned long)sys_call_table[nr]; | ||
211 | } | ||
212 | #endif | ||
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index c52b4f7742fa..ca4a62bd862f 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright IBM Corp. 1999,2009 | 2 | * Copyright IBM Corp. 1999,2010 |
3 | * | 3 | * |
4 | * Author(s): Hartmut Penner <hp@de.ibm.com> | 4 | * Author(s): Hartmut Penner <hp@de.ibm.com> |
5 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 5 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
@@ -22,12 +22,9 @@ | |||
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <asm/setup.h> | ||
26 | #include <asm/lowcore.h> | ||
27 | #include <asm/asm-offsets.h> | 25 | #include <asm/asm-offsets.h> |
28 | #include <asm/thread_info.h> | 26 | #include <asm/thread_info.h> |
29 | #include <asm/page.h> | 27 | #include <asm/page.h> |
30 | #include <asm/cpu.h> | ||
31 | 28 | ||
32 | #ifdef CONFIG_64BIT | 29 | #ifdef CONFIG_64BIT |
33 | #define ARCH_OFFSET 4 | 30 | #define ARCH_OFFSET 4 |
@@ -288,19 +285,7 @@ iplstart: | |||
288 | bz .Lagain1 # skip dateset trailer | 285 | bz .Lagain1 # skip dateset trailer |
289 | la %r5,0(%r4,%r2) | 286 | la %r5,0(%r4,%r2) |
290 | lr %r3,%r2 | 287 | lr %r3,%r2 |
291 | .Lidebc: | 288 | la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line |
292 | tm 0(%r5),0x80 # high order bit set ? | ||
293 | bo .Ldocv # yes -> convert from EBCDIC | ||
294 | ahi %r5,-1 | ||
295 | bct %r3,.Lidebc | ||
296 | b .Lnocv | ||
297 | .Ldocv: | ||
298 | l %r3,.Lcvtab | ||
299 | tr 0(256,%r4),0(%r3) # convert parameters to ascii | ||
300 | tr 256(256,%r4),0(%r3) | ||
301 | tr 512(256,%r4),0(%r3) | ||
302 | tr 768(122,%r4),0(%r3) | ||
303 | .Lnocv: la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line | ||
304 | mvc 0(256,%r3),0(%r4) | 289 | mvc 0(256,%r3),0(%r4) |
305 | mvc 256(256,%r3),256(%r4) | 290 | mvc 256(256,%r3),256(%r4) |
306 | mvc 512(256,%r3),512(%r4) | 291 | mvc 512(256,%r3),512(%r4) |
@@ -384,7 +369,6 @@ iplstart: | |||
384 | .Linitrd:.long _end + 0x400000 # default address of initrd | 369 | .Linitrd:.long _end + 0x400000 # default address of initrd |
385 | .Lparm: .long PARMAREA | 370 | .Lparm: .long PARMAREA |
386 | .Lstartup: .long startup | 371 | .Lstartup: .long startup |
387 | .Lcvtab:.long _ebcasc # ebcdic to ascii table | ||
388 | .Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40 | 372 | .Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40 |
389 | .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6 | 373 | .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6 |
390 | .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold" | 374 | .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold" |
@@ -417,13 +401,10 @@ start: | |||
417 | .sk8x8: | 401 | .sk8x8: |
418 | mvc 0(240,%r8),0(%r9) # copy iplparms into buffer | 402 | mvc 0(240,%r8),0(%r9) # copy iplparms into buffer |
419 | .gotr: | 403 | .gotr: |
420 | l %r10,.tbl # EBCDIC to ASCII table | ||
421 | tr 0(240,%r8),0(%r10) | ||
422 | slr %r0,%r0 | 404 | slr %r0,%r0 |
423 | st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11) | 405 | st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11) |
424 | st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11) | 406 | st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11) |
425 | j startup # continue with startup | 407 | j startup # continue with startup |
426 | .tbl: .long _ebcasc # translate table | ||
427 | .cmd: .long COMMAND_LINE # address of command line buffer | 408 | .cmd: .long COMMAND_LINE # address of command line buffer |
428 | .parm: .long PARMAREA | 409 | .parm: .long PARMAREA |
429 | .lowcase: | 410 | .lowcase: |
@@ -467,16 +448,15 @@ start: | |||
467 | # or linload or SALIPL | 448 | # or linload or SALIPL |
468 | # | 449 | # |
469 | .org 0x10000 | 450 | .org 0x10000 |
470 | startup:basr %r13,0 # get base | 451 | .globl startup |
452 | startup: | ||
453 | basr %r13,0 # get base | ||
471 | .LPG0: | 454 | .LPG0: |
472 | xc 0x200(256),0x200 # partially clear lowcore | 455 | xc 0x200(256),0x200 # partially clear lowcore |
473 | xc 0x300(256),0x300 | 456 | xc 0x300(256),0x300 |
474 | l %r1,5f-.LPG0(%r13) | 457 | stck __LC_LAST_UPDATE_CLOCK |
475 | stck 0(%r1) | 458 | spt 5f-.LPG0(%r13) |
476 | spt 6f-.LPG0(%r13) | 459 | mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13) |
477 | mvc __LC_LAST_UPDATE_CLOCK(8),0(%r1) | ||
478 | mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) | ||
479 | mvc __LC_EXIT_TIMER(8),5f-.LPG0(%r13) | ||
480 | #ifndef CONFIG_MARCH_G5 | 460 | #ifndef CONFIG_MARCH_G5 |
481 | # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} | 461 | # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} |
482 | xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST | 462 | xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST |
@@ -494,7 +474,6 @@ startup:basr %r13,0 # get base | |||
494 | cl %r0,2f+12-.LPG0(%r13) | 474 | cl %r0,2f+12-.LPG0(%r13) |
495 | je 3f | 475 | je 3f |
496 | 1: l %r15,.Lstack-.LPG0(%r13) | 476 | 1: l %r15,.Lstack-.LPG0(%r13) |
497 | ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE | ||
498 | ahi %r15,-96 | 477 | ahi %r15,-96 |
499 | la %r2,.Lals_string-.LPG0(%r13) | 478 | la %r2,.Lals_string-.LPG0(%r13) |
500 | l %r3,.Lsclp_print-.LPG0(%r13) | 479 | l %r3,.Lsclp_print-.LPG0(%r13) |
@@ -505,7 +484,7 @@ startup:basr %r13,0 # get base | |||
505 | .Lsclp_print: | 484 | .Lsclp_print: |
506 | .long _sclp_print_early | 485 | .long _sclp_print_early |
507 | .Lstack: | 486 | .Lstack: |
508 | .long init_thread_union | 487 | .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER)) |
509 | .align 16 | 488 | .align 16 |
510 | 2: .long 0x000a0000,0x8badcccc | 489 | 2: .long 0x000a0000,0x8badcccc |
511 | #if defined(CONFIG_64BIT) | 490 | #if defined(CONFIG_64BIT) |
@@ -532,13 +511,22 @@ startup:basr %r13,0 # get base | |||
532 | 3: | 511 | 3: |
533 | #endif | 512 | #endif |
534 | 513 | ||
514 | #ifdef CONFIG_64BIT | ||
515 | mvi __LC_AR_MODE_ID,1 # set esame flag | ||
516 | slr %r0,%r0 # set cpuid to zero | ||
517 | lhi %r1,2 # mode 2 = esame (dump) | ||
518 | sigp %r1,%r0,0x12 # switch to esame mode | ||
519 | sam64 # switch to 64 bit mode | ||
520 | jg startup_continue | ||
521 | #else | ||
522 | mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) | ||
535 | l %r13,4f-.LPG0(%r13) | 523 | l %r13,4f-.LPG0(%r13) |
536 | b 0(%r13) | 524 | b 0(%r13) |
537 | .align 4 | 525 | .align 8 |
538 | 4: .long startup_continue | 526 | 4: .long startup_continue |
539 | 5: .long sched_clock_base_cc | 527 | #endif |
540 | .align 8 | 528 | .align 8 |
541 | 6: .long 0x7fffffff,0xffffffff | 529 | 5: .long 0x7fffffff,0xffffffff |
542 | 530 | ||
543 | # | 531 | # |
544 | # params at 10400 (setup.h) | 532 | # params at 10400 (setup.h) |
@@ -552,8 +540,4 @@ startup:basr %r13,0 # get base | |||
552 | .byte "root=/dev/ram0 ro" | 540 | .byte "root=/dev/ram0 ro" |
553 | .byte 0 | 541 | .byte 0 |
554 | 542 | ||
555 | #ifdef CONFIG_64BIT | 543 | .org 0x11000 |
556 | #include "head64.S" | ||
557 | #else | ||
558 | #include "head31.S" | ||
559 | #endif | ||
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index 602b508cd4c4..1bbcc499d455 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/kernel/head31.S | 2 | * arch/s390/kernel/head31.S |
3 | * | 3 | * |
4 | * Copyright (C) IBM Corp. 2005,2006 | 4 | * Copyright (C) IBM Corp. 2005,2010 |
5 | * | 5 | * |
6 | * Author(s): Hartmut Penner <hp@de.ibm.com> | 6 | * Author(s): Hartmut Penner <hp@de.ibm.com> |
7 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 7 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
@@ -10,13 +10,19 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | .org 0x11000 | 13 | #include <linux/init.h> |
14 | #include <asm/asm-offsets.h> | ||
15 | #include <asm/thread_info.h> | ||
16 | #include <asm/page.h> | ||
14 | 17 | ||
18 | __HEAD | ||
19 | .globl startup_continue | ||
15 | startup_continue: | 20 | startup_continue: |
16 | basr %r13,0 # get base | 21 | basr %r13,0 # get base |
17 | .LPG1: | 22 | .LPG1: |
18 | 23 | ||
19 | mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) | 24 | l %r1,.Lbase_cc-.LPG1(%r13) |
25 | mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK | ||
20 | lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers | 26 | lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers |
21 | l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area | 27 | l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area |
22 | # move IPL device to lowcore | 28 | # move IPL device to lowcore |
@@ -69,10 +75,12 @@ startup_continue: | |||
69 | .Lduald:.rept 8 | 75 | .Lduald:.rept 8 |
70 | .long 0x80000000,0,0,0 # invalid access-list entries | 76 | .long 0x80000000,0,0,0 # invalid access-list entries |
71 | .endr | 77 | .endr |
78 | .Lbase_cc: | ||
79 | .long sched_clock_base_cc | ||
72 | 80 | ||
73 | .org 0x12000 | ||
74 | .globl _ehead | 81 | .globl _ehead |
75 | _ehead: | 82 | _ehead: |
83 | |||
76 | #ifdef CONFIG_SHARED_KERNEL | 84 | #ifdef CONFIG_SHARED_KERNEL |
77 | .org 0x100000 | 85 | .org 0x100000 |
78 | #endif | 86 | #endif |
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index d984a2a380c3..39580e768658 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/kernel/head64.S | 2 | * arch/s390/kernel/head64.S |
3 | * | 3 | * |
4 | * Copyright (C) IBM Corp. 1999,2006 | 4 | * Copyright (C) IBM Corp. 1999,2010 |
5 | * | 5 | * |
6 | * Author(s): Hartmut Penner <hp@de.ibm.com> | 6 | * Author(s): Hartmut Penner <hp@de.ibm.com> |
7 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 7 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
@@ -10,80 +10,17 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | .org 0x11000 | 13 | #include <linux/init.h> |
14 | #include <asm/asm-offsets.h> | ||
15 | #include <asm/thread_info.h> | ||
16 | #include <asm/page.h> | ||
14 | 17 | ||
18 | __HEAD | ||
19 | .globl startup_continue | ||
15 | startup_continue: | 20 | startup_continue: |
16 | basr %r13,0 # get base | 21 | larl %r1,sched_clock_base_cc |
17 | .LPG1: sll %r13,1 # remove high order bit | 22 | mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK |
18 | srl %r13,1 | 23 | larl %r13,.LPG1 # get base |
19 | |||
20 | #ifdef CONFIG_ZFCPDUMP | ||
21 | |||
22 | # check if we have been ipled using zfcp dump: | ||
23 | |||
24 | tm 0xb9,0x01 # test if subchannel is enabled | ||
25 | jno .nodump # subchannel disabled | ||
26 | l %r1,0xb8 | ||
27 | la %r5,.Lipl_schib-.LPG1(%r13) | ||
28 | stsch 0(%r5) # get schib of subchannel | ||
29 | jne .nodump # schib not available | ||
30 | tm 5(%r5),0x01 # devno valid? | ||
31 | jno .nodump | ||
32 | tm 4(%r5),0x80 # qdio capable device? | ||
33 | jno .nodump | ||
34 | l %r2,20(%r0) # address of ipl parameter block | ||
35 | lhi %r3,0 | ||
36 | ic %r3,0x148(%r2) # get opt field | ||
37 | chi %r3,0x20 # load with dump? | ||
38 | jne .nodump | ||
39 | |||
40 | # store all prefix registers in case of load with dump: | ||
41 | |||
42 | la %r7,0 # base register for 0 page | ||
43 | la %r8,0 # first cpu | ||
44 | l %r11,.Lpref_arr_ptr-.LPG1(%r13) # address of prefix array | ||
45 | ahi %r11,4 # skip boot cpu | ||
46 | lr %r12,%r11 | ||
47 | ahi %r12,(CONFIG_NR_CPUS*4) # end of prefix array | ||
48 | stap .Lcurrent_cpu+2-.LPG1(%r13) # store current cpu addr | ||
49 | 1: | ||
50 | cl %r8,.Lcurrent_cpu-.LPG1(%r13) # is ipl cpu ? | ||
51 | je 4f # if yes get next cpu | ||
52 | 2: | ||
53 | lr %r9,%r7 | ||
54 | sigp %r9,%r8,0x9 # stop & store status of cpu | ||
55 | brc 8,3f # accepted | ||
56 | brc 4,4f # status stored: next cpu | ||
57 | brc 2,2b # busy: try again | ||
58 | brc 1,4f # not op: next cpu | ||
59 | 3: | ||
60 | mvc 0(4,%r11),264(%r7) # copy prefix register to prefix array | ||
61 | ahi %r11,4 # next element in prefix array | ||
62 | clr %r11,%r12 | ||
63 | je 5f # no more space in prefix array | ||
64 | 4: | ||
65 | ahi %r8,1 # next cpu (r8 += 1) | ||
66 | chi %r8,MAX_CPU_ADDRESS # is last possible cpu ? | ||
67 | jle 1b # jump if not last cpu | ||
68 | 5: | ||
69 | lhi %r1,2 # mode 2 = esame (dump) | ||
70 | j 6f | ||
71 | .align 4 | ||
72 | .Lipl_schib: | ||
73 | .rept 13 | ||
74 | .long 0 | ||
75 | .endr | ||
76 | .nodump: | ||
77 | lhi %r1,1 # mode 1 = esame (normal ipl) | ||
78 | 6: | ||
79 | #else | ||
80 | lhi %r1,1 # mode 1 = esame (normal ipl) | ||
81 | #endif /* CONFIG_ZFCPDUMP */ | ||
82 | mvi __LC_AR_MODE_ID,1 # set esame flag | ||
83 | slr %r0,%r0 # set cpuid to zero | ||
84 | sigp %r1,%r0,0x12 # switch to esame mode | ||
85 | sam64 # switch to 64 bit mode | ||
86 | llgfr %r13,%r13 # clear high-order half of base reg | ||
87 | lmh %r0,%r15,.Lzero64-.LPG1(%r13) # clear high-order half | 24 | lmh %r0,%r15,.Lzero64-.LPG1(%r13) # clear high-order half |
88 | lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers | 25 | lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers |
89 | lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area | 26 | lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area |
@@ -108,6 +45,7 @@ startup_continue: | |||
108 | lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space, | 45 | lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space, |
109 | # virtual and never return ... | 46 | # virtual and never return ... |
110 | .align 16 | 47 | .align 16 |
48 | .LPG1: | ||
111 | .Lentry:.quad 0x0000000180000000,_stext | 49 | .Lentry:.quad 0x0000000180000000,_stext |
112 | .Lctl: .quad 0x04350002 # cr0: various things | 50 | .Lctl: .quad 0x04350002 # cr0: various things |
113 | .quad 0 # cr1: primary space segment table | 51 | .quad 0 # cr1: primary space segment table |
@@ -130,12 +68,6 @@ startup_continue: | |||
130 | .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 | 68 | .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 |
131 | .Lnop: .long 0x07000700 | 69 | .Lnop: .long 0x07000700 |
132 | .Lzero64:.fill 16,4,0x0 | 70 | .Lzero64:.fill 16,4,0x0 |
133 | #ifdef CONFIG_ZFCPDUMP | ||
134 | .Lcurrent_cpu: | ||
135 | .long 0x0 | ||
136 | .Lpref_arr_ptr: | ||
137 | .long zfcpdump_prefix_array | ||
138 | #endif /* CONFIG_ZFCPDUMP */ | ||
139 | .Lparmaddr: | 71 | .Lparmaddr: |
140 | .quad PARMAREA | 72 | .quad PARMAREA |
141 | .align 64 | 73 | .align 64 |
@@ -146,9 +78,9 @@ startup_continue: | |||
146 | .long 0x80000000,0,0,0 # invalid access-list entries | 78 | .long 0x80000000,0,0,0 # invalid access-list entries |
147 | .endr | 79 | .endr |
148 | 80 | ||
149 | .org 0x12000 | ||
150 | .globl _ehead | 81 | .globl _ehead |
151 | _ehead: | 82 | _ehead: |
83 | |||
152 | #ifdef CONFIG_SHARED_KERNEL | 84 | #ifdef CONFIG_SHARED_KERNEL |
153 | .org 0x100000 | 85 | .org 0x100000 |
154 | #endif | 86 | #endif |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 4d73296fed74..7eedbbcb54aa 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -553,7 +553,7 @@ out: | |||
553 | return rc; | 553 | return rc; |
554 | } | 554 | } |
555 | 555 | ||
556 | static void ipl_run(struct shutdown_trigger *trigger) | 556 | static void __ipl_run(void *unused) |
557 | { | 557 | { |
558 | diag308(DIAG308_IPL, NULL); | 558 | diag308(DIAG308_IPL, NULL); |
559 | if (MACHINE_IS_VM) | 559 | if (MACHINE_IS_VM) |
@@ -562,6 +562,11 @@ static void ipl_run(struct shutdown_trigger *trigger) | |||
562 | reipl_ccw_dev(&ipl_info.data.ccw.dev_id); | 562 | reipl_ccw_dev(&ipl_info.data.ccw.dev_id); |
563 | } | 563 | } |
564 | 564 | ||
565 | static void ipl_run(struct shutdown_trigger *trigger) | ||
566 | { | ||
567 | smp_switch_to_ipl_cpu(__ipl_run, NULL); | ||
568 | } | ||
569 | |||
565 | static int __init ipl_init(void) | 570 | static int __init ipl_init(void) |
566 | { | 571 | { |
567 | int rc; | 572 | int rc; |
@@ -1039,7 +1044,7 @@ static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb, | |||
1039 | sprintf(dst + pos, " PARM %s", vmparm); | 1044 | sprintf(dst + pos, " PARM %s", vmparm); |
1040 | } | 1045 | } |
1041 | 1046 | ||
1042 | static void reipl_run(struct shutdown_trigger *trigger) | 1047 | static void __reipl_run(void *unused) |
1043 | { | 1048 | { |
1044 | struct ccw_dev_id devid; | 1049 | struct ccw_dev_id devid; |
1045 | static char buf[128]; | 1050 | static char buf[128]; |
@@ -1087,6 +1092,11 @@ static void reipl_run(struct shutdown_trigger *trigger) | |||
1087 | disabled_wait((unsigned long) __builtin_return_address(0)); | 1092 | disabled_wait((unsigned long) __builtin_return_address(0)); |
1088 | } | 1093 | } |
1089 | 1094 | ||
1095 | static void reipl_run(struct shutdown_trigger *trigger) | ||
1096 | { | ||
1097 | smp_switch_to_ipl_cpu(__reipl_run, NULL); | ||
1098 | } | ||
1099 | |||
1090 | static void reipl_block_ccw_init(struct ipl_parameter_block *ipb) | 1100 | static void reipl_block_ccw_init(struct ipl_parameter_block *ipb) |
1091 | { | 1101 | { |
1092 | ipb->hdr.len = IPL_PARM_BLK_CCW_LEN; | 1102 | ipb->hdr.len = IPL_PARM_BLK_CCW_LEN; |
@@ -1369,20 +1379,18 @@ static struct kobj_attribute dump_type_attr = | |||
1369 | 1379 | ||
1370 | static struct kset *dump_kset; | 1380 | static struct kset *dump_kset; |
1371 | 1381 | ||
1372 | static void dump_run(struct shutdown_trigger *trigger) | 1382 | static void __dump_run(void *unused) |
1373 | { | 1383 | { |
1374 | struct ccw_dev_id devid; | 1384 | struct ccw_dev_id devid; |
1375 | static char buf[100]; | 1385 | static char buf[100]; |
1376 | 1386 | ||
1377 | switch (dump_method) { | 1387 | switch (dump_method) { |
1378 | case DUMP_METHOD_CCW_CIO: | 1388 | case DUMP_METHOD_CCW_CIO: |
1379 | smp_send_stop(); | ||
1380 | devid.devno = dump_block_ccw->ipl_info.ccw.devno; | 1389 | devid.devno = dump_block_ccw->ipl_info.ccw.devno; |
1381 | devid.ssid = 0; | 1390 | devid.ssid = 0; |
1382 | reipl_ccw_dev(&devid); | 1391 | reipl_ccw_dev(&devid); |
1383 | break; | 1392 | break; |
1384 | case DUMP_METHOD_CCW_VM: | 1393 | case DUMP_METHOD_CCW_VM: |
1385 | smp_send_stop(); | ||
1386 | sprintf(buf, "STORE STATUS"); | 1394 | sprintf(buf, "STORE STATUS"); |
1387 | __cpcmd(buf, NULL, 0, NULL); | 1395 | __cpcmd(buf, NULL, 0, NULL); |
1388 | sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno); | 1396 | sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno); |
@@ -1396,10 +1404,17 @@ static void dump_run(struct shutdown_trigger *trigger) | |||
1396 | diag308(DIAG308_SET, dump_block_fcp); | 1404 | diag308(DIAG308_SET, dump_block_fcp); |
1397 | diag308(DIAG308_DUMP, NULL); | 1405 | diag308(DIAG308_DUMP, NULL); |
1398 | break; | 1406 | break; |
1399 | case DUMP_METHOD_NONE: | 1407 | default: |
1400 | return; | 1408 | break; |
1401 | } | 1409 | } |
1402 | printk(KERN_EMERG "Dump failed!\n"); | 1410 | } |
1411 | |||
1412 | static void dump_run(struct shutdown_trigger *trigger) | ||
1413 | { | ||
1414 | if (dump_method == DUMP_METHOD_NONE) | ||
1415 | return; | ||
1416 | smp_send_stop(); | ||
1417 | smp_switch_to_ipl_cpu(__dump_run, NULL); | ||
1403 | } | 1418 | } |
1404 | 1419 | ||
1405 | static int __init dump_ccw_init(void) | 1420 | static int __init dump_ccw_init(void) |
@@ -1577,7 +1592,7 @@ static void vmcmd_run(struct shutdown_trigger *trigger) | |||
1577 | static int vmcmd_init(void) | 1592 | static int vmcmd_init(void) |
1578 | { | 1593 | { |
1579 | if (!MACHINE_IS_VM) | 1594 | if (!MACHINE_IS_VM) |
1580 | return -ENOTSUPP; | 1595 | return -EOPNOTSUPP; |
1581 | vmcmd_kset = kset_create_and_add("vmcmd", NULL, firmware_kobj); | 1596 | vmcmd_kset = kset_create_and_add("vmcmd", NULL, firmware_kobj); |
1582 | if (!vmcmd_kset) | 1597 | if (!vmcmd_kset) |
1583 | return -ENOMEM; | 1598 | return -ENOMEM; |
@@ -1595,7 +1610,7 @@ static void stop_run(struct shutdown_trigger *trigger) | |||
1595 | { | 1610 | { |
1596 | if (strcmp(trigger->name, ON_PANIC_STR) == 0) | 1611 | if (strcmp(trigger->name, ON_PANIC_STR) == 0) |
1597 | disabled_wait((unsigned long) __builtin_return_address(0)); | 1612 | disabled_wait((unsigned long) __builtin_return_address(0)); |
1598 | while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy) | 1613 | while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) |
1599 | cpu_relax(); | 1614 | cpu_relax(); |
1600 | for (;;); | 1615 | for (;;); |
1601 | } | 1616 | } |
@@ -1902,7 +1917,6 @@ void __init ipl_update_parameters(void) | |||
1902 | void __init ipl_save_parameters(void) | 1917 | void __init ipl_save_parameters(void) |
1903 | { | 1918 | { |
1904 | struct cio_iplinfo iplinfo; | 1919 | struct cio_iplinfo iplinfo; |
1905 | unsigned int *ipl_ptr; | ||
1906 | void *src, *dst; | 1920 | void *src, *dst; |
1907 | 1921 | ||
1908 | if (cio_get_iplinfo(&iplinfo)) | 1922 | if (cio_get_iplinfo(&iplinfo)) |
@@ -1913,11 +1927,10 @@ void __init ipl_save_parameters(void) | |||
1913 | if (!iplinfo.is_qdio) | 1927 | if (!iplinfo.is_qdio) |
1914 | return; | 1928 | return; |
1915 | ipl_flags |= IPL_PARMBLOCK_VALID; | 1929 | ipl_flags |= IPL_PARMBLOCK_VALID; |
1916 | ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR; | 1930 | src = (void *)(unsigned long)S390_lowcore.ipl_parmblock_ptr; |
1917 | src = (void *)(unsigned long)*ipl_ptr; | ||
1918 | dst = (void *)IPL_PARMBLOCK_ORIGIN; | 1931 | dst = (void *)IPL_PARMBLOCK_ORIGIN; |
1919 | memmove(dst, src, PAGE_SIZE); | 1932 | memmove(dst, src, PAGE_SIZE); |
1920 | *ipl_ptr = IPL_PARMBLOCK_ORIGIN; | 1933 | S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN; |
1921 | } | 1934 | } |
1922 | 1935 | ||
1923 | static LIST_HEAD(rcall); | 1936 | static LIST_HEAD(rcall); |
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 131d7ee8b416..a922d51df6bf 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c | |||
@@ -54,11 +54,11 @@ void machine_shutdown(void) | |||
54 | { | 54 | { |
55 | } | 55 | } |
56 | 56 | ||
57 | void machine_kexec(struct kimage *image) | 57 | static void __machine_kexec(void *data) |
58 | { | 58 | { |
59 | relocate_kernel_t data_mover; | 59 | relocate_kernel_t data_mover; |
60 | struct kimage *image = data; | ||
60 | 61 | ||
61 | smp_send_stop(); | ||
62 | pfault_fini(); | 62 | pfault_fini(); |
63 | s390_reset_system(); | 63 | s390_reset_system(); |
64 | 64 | ||
@@ -68,3 +68,9 @@ void machine_kexec(struct kimage *image) | |||
68 | (*data_mover)(&image->head, image->start); | 68 | (*data_mover)(&image->head, image->start); |
69 | for (;;); | 69 | for (;;); |
70 | } | 70 | } |
71 | |||
72 | void machine_kexec(struct kimage *image) | ||
73 | { | ||
74 | smp_send_stop(); | ||
75 | smp_switch_to_ipl_cpu(__machine_kexec, image); | ||
76 | } | ||
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 7cf464234419..33fdc5a79764 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -992,3 +992,61 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task) | |||
992 | #endif | 992 | #endif |
993 | return &user_s390_view; | 993 | return &user_s390_view; |
994 | } | 994 | } |
995 | |||
996 | static const char *gpr_names[NUM_GPRS] = { | ||
997 | "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", | ||
998 | "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", | ||
999 | }; | ||
1000 | |||
1001 | unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset) | ||
1002 | { | ||
1003 | if (offset >= NUM_GPRS) | ||
1004 | return 0; | ||
1005 | return regs->gprs[offset]; | ||
1006 | } | ||
1007 | |||
1008 | int regs_query_register_offset(const char *name) | ||
1009 | { | ||
1010 | unsigned long offset; | ||
1011 | |||
1012 | if (!name || *name != 'r') | ||
1013 | return -EINVAL; | ||
1014 | if (strict_strtoul(name + 1, 10, &offset)) | ||
1015 | return -EINVAL; | ||
1016 | if (offset >= NUM_GPRS) | ||
1017 | return -EINVAL; | ||
1018 | return offset; | ||
1019 | } | ||
1020 | |||
1021 | const char *regs_query_register_name(unsigned int offset) | ||
1022 | { | ||
1023 | if (offset >= NUM_GPRS) | ||
1024 | return NULL; | ||
1025 | return gpr_names[offset]; | ||
1026 | } | ||
1027 | |||
1028 | static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) | ||
1029 | { | ||
1030 | unsigned long ksp = kernel_stack_pointer(regs); | ||
1031 | |||
1032 | return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1)); | ||
1033 | } | ||
1034 | |||
1035 | /** | ||
1036 | * regs_get_kernel_stack_nth() - get Nth entry of the stack | ||
1037 | * @regs:pt_regs which contains kernel stack pointer. | ||
1038 | * @n:stack entry number. | ||
1039 | * | ||
1040 | * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which | ||
1041 | * is specifined by @regs. If the @n th entry is NOT in the kernel stack, | ||
1042 | * this returns 0. | ||
1043 | */ | ||
1044 | unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) | ||
1045 | { | ||
1046 | unsigned long addr; | ||
1047 | |||
1048 | addr = kernel_stack_pointer(regs) + n * sizeof(long); | ||
1049 | if (!regs_within_kernel_stack(regs, addr)) | ||
1050 | return 0; | ||
1051 | return *(unsigned long *)addr; | ||
1052 | } | ||
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S index 2f481cc3d1c9..cb899d9f8505 100644 --- a/arch/s390/kernel/reipl.S +++ b/arch/s390/kernel/reipl.S | |||
@@ -6,7 +6,7 @@ | |||
6 | * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) | 6 | * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <asm/lowcore.h> | 9 | #include <asm/asm-offsets.h> |
10 | 10 | ||
11 | # | 11 | # |
12 | # do_reipl_asm | 12 | # do_reipl_asm |
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S index 774147824c3d..5e73dee63baa 100644 --- a/arch/s390/kernel/reipl64.S +++ b/arch/s390/kernel/reipl64.S | |||
@@ -4,7 +4,7 @@ | |||
4 | * Denis Joseph Barrow, | 4 | * Denis Joseph Barrow, |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <asm/lowcore.h> | 7 | #include <asm/asm-offsets.h> |
8 | 8 | ||
9 | # | 9 | # |
10 | # do_reipl_asm | 10 | # do_reipl_asm |
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S index e27ca63076d1..27af3bf3a009 100644 --- a/arch/s390/kernel/sclp.S +++ b/arch/s390/kernel/sclp.S | |||
@@ -9,8 +9,10 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | LC_EXT_NEW_PSW = 0x58 # addr of ext int handler | 11 | LC_EXT_NEW_PSW = 0x58 # addr of ext int handler |
12 | LC_EXT_NEW_PSW_64 = 0x1b0 # addr of ext int handler 64 bit | ||
12 | LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter | 13 | LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter |
13 | LC_EXT_INT_CODE = 0x86 # addr of ext int code | 14 | LC_EXT_INT_CODE = 0x86 # addr of ext int code |
15 | LC_AR_MODE_ID = 0xa3 | ||
14 | 16 | ||
15 | # | 17 | # |
16 | # Subroutine which waits synchronously until either an external interruption | 18 | # Subroutine which waits synchronously until either an external interruption |
@@ -30,8 +32,16 @@ _sclp_wait_int: | |||
30 | .LbaseS1: | 32 | .LbaseS1: |
31 | ahi %r15,-96 # create stack frame | 33 | ahi %r15,-96 # create stack frame |
32 | la %r8,LC_EXT_NEW_PSW # register int handler | 34 | la %r8,LC_EXT_NEW_PSW # register int handler |
33 | mvc .LoldpswS1-.LbaseS1(8,%r13),0(%r8) | 35 | la %r9,.LextpswS1-.LbaseS1(%r13) |
34 | mvc 0(8,%r8),.LextpswS1-.LbaseS1(%r13) | 36 | #ifdef CONFIG_64BIT |
37 | tm LC_AR_MODE_ID,1 | ||
38 | jno .Lesa1 | ||
39 | la %r8,LC_EXT_NEW_PSW_64 # register int handler 64 bit | ||
40 | la %r9,.LextpswS1_64-.LbaseS1(%r13) | ||
41 | .Lesa1: | ||
42 | #endif | ||
43 | mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8) | ||
44 | mvc 0(16,%r8),0(%r9) | ||
35 | lhi %r6,0x0200 # cr mask for ext int (cr0.54) | 45 | lhi %r6,0x0200 # cr mask for ext int (cr0.54) |
36 | ltr %r2,%r2 | 46 | ltr %r2,%r2 |
37 | jz .LsetctS1 | 47 | jz .LsetctS1 |
@@ -64,15 +74,19 @@ _sclp_wait_int: | |||
64 | .LtimeoutS1: | 74 | .LtimeoutS1: |
65 | lctl %c0,%c0,.LctlS1-.LbaseS1(%r13) # restore interrupt setting | 75 | lctl %c0,%c0,.LctlS1-.LbaseS1(%r13) # restore interrupt setting |
66 | # restore old handler | 76 | # restore old handler |
67 | mvc 0(8,%r8),.LoldpswS1-.LbaseS1(%r13) | 77 | mvc 0(16,%r8),.LoldpswS1-.LbaseS1(%r13) |
68 | lm %r6,%r15,120(%r15) # restore registers | 78 | lm %r6,%r15,120(%r15) # restore registers |
69 | br %r14 # return to caller | 79 | br %r14 # return to caller |
70 | 80 | ||
71 | .align 8 | 81 | .align 8 |
72 | .LoldpswS1: | 82 | .LoldpswS1: |
73 | .long 0, 0 # old ext int PSW | 83 | .long 0, 0, 0, 0 # old ext int PSW |
74 | .LextpswS1: | 84 | .LextpswS1: |
75 | .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int | 85 | .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int |
86 | #ifdef CONFIG_64BIT | ||
87 | .LextpswS1_64: | ||
88 | .quad 0x0000000180000000, .LwaitS1 # PSW to handle ext int, 64 bit | ||
89 | #endif | ||
76 | .LwaitpswS1: | 90 | .LwaitpswS1: |
77 | .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int | 91 | .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int |
78 | .LtimeS1: | 92 | .LtimeS1: |
@@ -250,6 +264,13 @@ _sclp_print: | |||
250 | _sclp_print_early: | 264 | _sclp_print_early: |
251 | stm %r6,%r15,24(%r15) # save registers | 265 | stm %r6,%r15,24(%r15) # save registers |
252 | ahi %r15,-96 # create stack frame | 266 | ahi %r15,-96 # create stack frame |
267 | #ifdef CONFIG_64BIT | ||
268 | tm LC_AR_MODE_ID,1 | ||
269 | jno .Lesa2 | ||
270 | ahi %r15,-80 | ||
271 | stmh %r6,%r15,96(%r15) # store upper register halves | ||
272 | .Lesa2: | ||
273 | #endif | ||
253 | lr %r10,%r2 # save string pointer | 274 | lr %r10,%r2 # save string pointer |
254 | lhi %r2,0 | 275 | lhi %r2,0 |
255 | bras %r14,_sclp_setup # enable console | 276 | bras %r14,_sclp_setup # enable console |
@@ -262,6 +283,13 @@ _sclp_print_early: | |||
262 | lhi %r2,1 | 283 | lhi %r2,1 |
263 | bras %r14,_sclp_setup # disable console | 284 | bras %r14,_sclp_setup # disable console |
264 | .LendS5: | 285 | .LendS5: |
286 | #ifdef CONFIG_64BIT | ||
287 | tm LC_AR_MODE_ID,1 | ||
288 | jno .Lesa3 | ||
289 | lmh %r6,%r15,96(%r15) # store upper register halves | ||
290 | ahi %r15,80 | ||
291 | .Lesa3: | ||
292 | #endif | ||
265 | lm %r6,%r15,120(%r15) # restore registers | 293 | lm %r6,%r15,120(%r15) # restore registers |
266 | br %r14 | 294 | br %r14 |
267 | 295 | ||
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 8d8957b38ab3..77a63ae419f0 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -396,15 +396,12 @@ static void __init | |||
396 | setup_lowcore(void) | 396 | setup_lowcore(void) |
397 | { | 397 | { |
398 | struct _lowcore *lc; | 398 | struct _lowcore *lc; |
399 | int lc_pages; | ||
400 | 399 | ||
401 | /* | 400 | /* |
402 | * Setup lowcore for boot cpu | 401 | * Setup lowcore for boot cpu |
403 | */ | 402 | */ |
404 | lc_pages = sizeof(void *) == 8 ? 2 : 1; | 403 | BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); |
405 | lc = (struct _lowcore *) | 404 | lc = __alloc_bootmem(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); |
406 | __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0); | ||
407 | memset(lc, 0, lc_pages * PAGE_SIZE); | ||
408 | lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 405 | lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; |
409 | lc->restart_psw.addr = | 406 | lc->restart_psw.addr = |
410 | PSW_ADDR_AMODE | (unsigned long) restart_int_handler; | 407 | PSW_ADDR_AMODE | (unsigned long) restart_int_handler; |
@@ -804,7 +801,7 @@ setup_arch(char **cmdline_p) | |||
804 | if (MACHINE_IS_VM) | 801 | if (MACHINE_IS_VM) |
805 | pr_info("Linux is running as a z/VM " | 802 | pr_info("Linux is running as a z/VM " |
806 | "guest operating system in 31-bit mode\n"); | 803 | "guest operating system in 31-bit mode\n"); |
807 | else | 804 | else if (MACHINE_IS_LPAR) |
808 | pr_info("Linux is running natively in 31-bit mode\n"); | 805 | pr_info("Linux is running natively in 31-bit mode\n"); |
809 | if (MACHINE_HAS_IEEE) | 806 | if (MACHINE_HAS_IEEE) |
810 | pr_info("The hardware system has IEEE compatible " | 807 | pr_info("The hardware system has IEEE compatible " |
@@ -818,7 +815,7 @@ setup_arch(char **cmdline_p) | |||
818 | "guest operating system in 64-bit mode\n"); | 815 | "guest operating system in 64-bit mode\n"); |
819 | else if (MACHINE_IS_KVM) | 816 | else if (MACHINE_IS_KVM) |
820 | pr_info("Linux is running under KVM in 64-bit mode\n"); | 817 | pr_info("Linux is running under KVM in 64-bit mode\n"); |
821 | else | 818 | else if (MACHINE_IS_LPAR) |
822 | pr_info("Linux is running natively in 64-bit mode\n"); | 819 | pr_info("Linux is running natively in 64-bit mode\n"); |
823 | #endif /* CONFIG_64BIT */ | 820 | #endif /* CONFIG_64BIT */ |
824 | 821 | ||
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 76a6fdd46c45..8b10127c00ad 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/cpu.h> | 36 | #include <linux/cpu.h> |
37 | #include <linux/timex.h> | 37 | #include <linux/timex.h> |
38 | #include <linux/bootmem.h> | 38 | #include <linux/bootmem.h> |
39 | #include <asm/asm-offsets.h> | ||
39 | #include <asm/ipl.h> | 40 | #include <asm/ipl.h> |
40 | #include <asm/setup.h> | 41 | #include <asm/setup.h> |
41 | #include <asm/sigp.h> | 42 | #include <asm/sigp.h> |
@@ -53,7 +54,7 @@ | |||
53 | #include "entry.h" | 54 | #include "entry.h" |
54 | 55 | ||
55 | /* logical cpu to cpu address */ | 56 | /* logical cpu to cpu address */ |
56 | int __cpu_logical_map[NR_CPUS]; | 57 | unsigned short __cpu_logical_map[NR_CPUS]; |
57 | 58 | ||
58 | static struct task_struct *current_set[NR_CPUS]; | 59 | static struct task_struct *current_set[NR_CPUS]; |
59 | 60 | ||
@@ -72,13 +73,13 @@ static int cpu_management; | |||
72 | 73 | ||
73 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | 74 | static DEFINE_PER_CPU(struct cpu, cpu_devices); |
74 | 75 | ||
75 | static void smp_ext_bitcall(int, ec_bit_sig); | 76 | static void smp_ext_bitcall(int, int); |
76 | 77 | ||
77 | static int cpu_stopped(int cpu) | 78 | static int raw_cpu_stopped(int cpu) |
78 | { | 79 | { |
79 | __u32 status; | 80 | u32 status; |
80 | 81 | ||
81 | switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { | 82 | switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) { |
82 | case sigp_status_stored: | 83 | case sigp_status_stored: |
83 | /* Check for stopped and check stop state */ | 84 | /* Check for stopped and check stop state */ |
84 | if (status & 0x50) | 85 | if (status & 0x50) |
@@ -90,6 +91,44 @@ static int cpu_stopped(int cpu) | |||
90 | return 0; | 91 | return 0; |
91 | } | 92 | } |
92 | 93 | ||
94 | static inline int cpu_stopped(int cpu) | ||
95 | { | ||
96 | return raw_cpu_stopped(cpu_logical_map(cpu)); | ||
97 | } | ||
98 | |||
99 | void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) | ||
100 | { | ||
101 | struct _lowcore *lc, *current_lc; | ||
102 | struct stack_frame *sf; | ||
103 | struct pt_regs *regs; | ||
104 | unsigned long sp; | ||
105 | |||
106 | if (smp_processor_id() == 0) | ||
107 | func(data); | ||
108 | __load_psw_mask(PSW_BASE_BITS | PSW_DEFAULT_KEY); | ||
109 | /* Disable lowcore protection */ | ||
110 | __ctl_clear_bit(0, 28); | ||
111 | current_lc = lowcore_ptr[smp_processor_id()]; | ||
112 | lc = lowcore_ptr[0]; | ||
113 | if (!lc) | ||
114 | lc = current_lc; | ||
115 | lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | ||
116 | lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu; | ||
117 | if (!cpu_online(0)) | ||
118 | smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]); | ||
119 | while (sigp(0, sigp_stop_and_store_status) == sigp_busy) | ||
120 | cpu_relax(); | ||
121 | sp = lc->panic_stack; | ||
122 | sp -= sizeof(struct pt_regs); | ||
123 | regs = (struct pt_regs *) sp; | ||
124 | memcpy(®s->gprs, ¤t_lc->gpregs_save_area, sizeof(regs->gprs)); | ||
125 | regs->psw = lc->psw_save_area; | ||
126 | sp -= STACK_FRAME_OVERHEAD; | ||
127 | sf = (struct stack_frame *) sp; | ||
128 | sf->back_chain = regs->gprs[15]; | ||
129 | smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]); | ||
130 | } | ||
131 | |||
93 | void smp_send_stop(void) | 132 | void smp_send_stop(void) |
94 | { | 133 | { |
95 | int cpu, rc; | 134 | int cpu, rc; |
@@ -103,7 +142,7 @@ void smp_send_stop(void) | |||
103 | if (cpu == smp_processor_id()) | 142 | if (cpu == smp_processor_id()) |
104 | continue; | 143 | continue; |
105 | do { | 144 | do { |
106 | rc = signal_processor(cpu, sigp_stop); | 145 | rc = sigp(cpu, sigp_stop); |
107 | } while (rc == sigp_busy); | 146 | } while (rc == sigp_busy); |
108 | 147 | ||
109 | while (!cpu_stopped(cpu)) | 148 | while (!cpu_stopped(cpu)) |
@@ -139,13 +178,13 @@ static void do_ext_call_interrupt(__u16 code) | |||
139 | * Send an external call sigp to another cpu and return without waiting | 178 | * Send an external call sigp to another cpu and return without waiting |
140 | * for its completion. | 179 | * for its completion. |
141 | */ | 180 | */ |
142 | static void smp_ext_bitcall(int cpu, ec_bit_sig sig) | 181 | static void smp_ext_bitcall(int cpu, int sig) |
143 | { | 182 | { |
144 | /* | 183 | /* |
145 | * Set signaling bit in lowcore of target cpu and kick it | 184 | * Set signaling bit in lowcore of target cpu and kick it |
146 | */ | 185 | */ |
147 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); | 186 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); |
148 | while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) | 187 | while (sigp(cpu, sigp_emergency_signal) == sigp_busy) |
149 | udelay(10); | 188 | udelay(10); |
150 | } | 189 | } |
151 | 190 | ||
@@ -239,24 +278,8 @@ void smp_ctl_clear_bit(int cr, int bit) | |||
239 | } | 278 | } |
240 | EXPORT_SYMBOL(smp_ctl_clear_bit); | 279 | EXPORT_SYMBOL(smp_ctl_clear_bit); |
241 | 280 | ||
242 | /* | ||
243 | * In early ipl state a temp. logically cpu number is needed, so the sigp | ||
244 | * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on | ||
245 | * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1. | ||
246 | */ | ||
247 | #define CPU_INIT_NO 1 | ||
248 | |||
249 | #ifdef CONFIG_ZFCPDUMP | 281 | #ifdef CONFIG_ZFCPDUMP |
250 | 282 | ||
251 | /* | ||
252 | * zfcpdump_prefix_array holds prefix registers for the following scenario: | ||
253 | * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to | ||
254 | * save its prefix registers, since they get lost, when switching from 31 bit | ||
255 | * to 64 bit. | ||
256 | */ | ||
257 | unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \ | ||
258 | __attribute__((__section__(".data"))); | ||
259 | |||
260 | static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) | 283 | static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) |
261 | { | 284 | { |
262 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) | 285 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) |
@@ -266,21 +289,15 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) | |||
266 | "the dump\n", cpu, NR_CPUS - 1); | 289 | "the dump\n", cpu, NR_CPUS - 1); |
267 | return; | 290 | return; |
268 | } | 291 | } |
269 | zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); | 292 | zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL); |
270 | __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu; | 293 | while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy) |
271 | while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) == | ||
272 | sigp_busy) | ||
273 | cpu_relax(); | 294 | cpu_relax(); |
274 | memcpy(zfcpdump_save_areas[cpu], | 295 | memcpy(zfcpdump_save_areas[cpu], |
275 | (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, | 296 | (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, |
276 | SAVE_AREA_SIZE); | 297 | sizeof(struct save_area)); |
277 | #ifdef CONFIG_64BIT | ||
278 | /* copy original prefix register */ | ||
279 | zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu]; | ||
280 | #endif | ||
281 | } | 298 | } |
282 | 299 | ||
283 | union save_area *zfcpdump_save_areas[NR_CPUS + 1]; | 300 | struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; |
284 | EXPORT_SYMBOL_GPL(zfcpdump_save_areas); | 301 | EXPORT_SYMBOL_GPL(zfcpdump_save_areas); |
285 | 302 | ||
286 | #else | 303 | #else |
@@ -389,8 +406,7 @@ static void __init smp_detect_cpus(void) | |||
389 | for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) { | 406 | for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) { |
390 | if (cpu == boot_cpu_addr) | 407 | if (cpu == boot_cpu_addr) |
391 | continue; | 408 | continue; |
392 | __cpu_logical_map[CPU_INIT_NO] = cpu; | 409 | if (!raw_cpu_stopped(cpu)) |
393 | if (!cpu_stopped(CPU_INIT_NO)) | ||
394 | continue; | 410 | continue; |
395 | smp_get_save_area(c_cpus, cpu); | 411 | smp_get_save_area(c_cpus, cpu); |
396 | c_cpus++; | 412 | c_cpus++; |
@@ -413,8 +429,7 @@ static void __init smp_detect_cpus(void) | |||
413 | cpu_addr = info->cpu[cpu].address; | 429 | cpu_addr = info->cpu[cpu].address; |
414 | if (cpu_addr == boot_cpu_addr) | 430 | if (cpu_addr == boot_cpu_addr) |
415 | continue; | 431 | continue; |
416 | __cpu_logical_map[CPU_INIT_NO] = cpu_addr; | 432 | if (!raw_cpu_stopped(cpu_addr)) { |
417 | if (!cpu_stopped(CPU_INIT_NO)) { | ||
418 | s_cpus++; | 433 | s_cpus++; |
419 | continue; | 434 | continue; |
420 | } | 435 | } |
@@ -533,18 +548,18 @@ static void smp_free_lowcore(int cpu) | |||
533 | /* Upping and downing of CPUs */ | 548 | /* Upping and downing of CPUs */ |
534 | int __cpuinit __cpu_up(unsigned int cpu) | 549 | int __cpuinit __cpu_up(unsigned int cpu) |
535 | { | 550 | { |
536 | struct task_struct *idle; | ||
537 | struct _lowcore *cpu_lowcore; | 551 | struct _lowcore *cpu_lowcore; |
552 | struct task_struct *idle; | ||
538 | struct stack_frame *sf; | 553 | struct stack_frame *sf; |
539 | sigp_ccode ccode; | ||
540 | u32 lowcore; | 554 | u32 lowcore; |
555 | int ccode; | ||
541 | 556 | ||
542 | if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) | 557 | if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) |
543 | return -EIO; | 558 | return -EIO; |
544 | if (smp_alloc_lowcore(cpu)) | 559 | if (smp_alloc_lowcore(cpu)) |
545 | return -ENOMEM; | 560 | return -ENOMEM; |
546 | do { | 561 | do { |
547 | ccode = signal_processor(cpu, sigp_initial_cpu_reset); | 562 | ccode = sigp(cpu, sigp_initial_cpu_reset); |
548 | if (ccode == sigp_busy) | 563 | if (ccode == sigp_busy) |
549 | udelay(10); | 564 | udelay(10); |
550 | if (ccode == sigp_not_operational) | 565 | if (ccode == sigp_not_operational) |
@@ -552,7 +567,7 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
552 | } while (ccode == sigp_busy); | 567 | } while (ccode == sigp_busy); |
553 | 568 | ||
554 | lowcore = (u32)(unsigned long)lowcore_ptr[cpu]; | 569 | lowcore = (u32)(unsigned long)lowcore_ptr[cpu]; |
555 | while (signal_processor_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) | 570 | while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) |
556 | udelay(10); | 571 | udelay(10); |
557 | 572 | ||
558 | idle = current_set[cpu]; | 573 | idle = current_set[cpu]; |
@@ -578,7 +593,7 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
578 | cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; | 593 | cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; |
579 | eieio(); | 594 | eieio(); |
580 | 595 | ||
581 | while (signal_processor(cpu, sigp_restart) == sigp_busy) | 596 | while (sigp(cpu, sigp_restart) == sigp_busy) |
582 | udelay(10); | 597 | udelay(10); |
583 | 598 | ||
584 | while (!cpu_online(cpu)) | 599 | while (!cpu_online(cpu)) |
@@ -640,7 +655,7 @@ void __cpu_die(unsigned int cpu) | |||
640 | /* Wait until target cpu is down */ | 655 | /* Wait until target cpu is down */ |
641 | while (!cpu_stopped(cpu)) | 656 | while (!cpu_stopped(cpu)) |
642 | cpu_relax(); | 657 | cpu_relax(); |
643 | while (signal_processor_p(0, cpu, sigp_set_prefix) == sigp_busy) | 658 | while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy) |
644 | udelay(10); | 659 | udelay(10); |
645 | smp_free_lowcore(cpu); | 660 | smp_free_lowcore(cpu); |
646 | pr_info("Processor %d stopped\n", cpu); | 661 | pr_info("Processor %d stopped\n", cpu); |
@@ -649,7 +664,7 @@ void __cpu_die(unsigned int cpu) | |||
649 | void cpu_die(void) | 664 | void cpu_die(void) |
650 | { | 665 | { |
651 | idle_task_exit(); | 666 | idle_task_exit(); |
652 | while (signal_processor(smp_processor_id(), sigp_stop) == sigp_busy) | 667 | while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) |
653 | cpu_relax(); | 668 | cpu_relax(); |
654 | for (;;); | 669 | for (;;); |
655 | } | 670 | } |
@@ -765,7 +780,8 @@ static ssize_t cpu_configure_store(struct sys_device *dev, | |||
765 | get_online_cpus(); | 780 | get_online_cpus(); |
766 | mutex_lock(&smp_cpu_state_mutex); | 781 | mutex_lock(&smp_cpu_state_mutex); |
767 | rc = -EBUSY; | 782 | rc = -EBUSY; |
768 | if (cpu_online(cpu)) | 783 | /* disallow configuration changes of online cpus and cpu 0 */ |
784 | if (cpu_online(cpu) || cpu == 0) | ||
769 | goto out; | 785 | goto out; |
770 | rc = 0; | 786 | rc = 0; |
771 | switch (val) { | 787 | switch (val) { |
diff --git a/arch/s390/kernel/switch_cpu.S b/arch/s390/kernel/switch_cpu.S new file mode 100644 index 000000000000..469f11b574fa --- /dev/null +++ b/arch/s390/kernel/switch_cpu.S | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * 31-bit switch cpu code | ||
3 | * | ||
4 | * Copyright IBM Corp. 2009 | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <asm/asm-offsets.h> | ||
9 | #include <asm/ptrace.h> | ||
10 | |||
11 | # smp_switch_to_cpu switches to destination cpu and executes the passed function | ||
12 | # Parameter: %r2 - function to call | ||
13 | # %r3 - function parameter | ||
14 | # %r4 - stack poiner | ||
15 | # %r5 - current cpu | ||
16 | # %r6 - destination cpu | ||
17 | |||
18 | .section .text | ||
19 | .align 4 | ||
20 | .globl smp_switch_to_cpu | ||
21 | smp_switch_to_cpu: | ||
22 | stm %r6,%r15,__SF_GPRS(%r15) | ||
23 | lr %r1,%r15 | ||
24 | ahi %r15,-STACK_FRAME_OVERHEAD | ||
25 | st %r1,__SF_BACKCHAIN(%r15) | ||
26 | basr %r13,0 | ||
27 | 0: la %r1,.gprregs_addr-0b(%r13) | ||
28 | l %r1,0(%r1) | ||
29 | stm %r0,%r15,0(%r1) | ||
30 | 1: sigp %r0,%r6,__SIGP_RESTART /* start destination CPU */ | ||
31 | brc 2,1b /* busy, try again */ | ||
32 | 2: sigp %r0,%r5,__SIGP_STOP /* stop current CPU */ | ||
33 | brc 2,2b /* busy, try again */ | ||
34 | 3: j 3b | ||
35 | |||
36 | .globl smp_restart_cpu | ||
37 | smp_restart_cpu: | ||
38 | basr %r13,0 | ||
39 | 0: la %r1,.gprregs_addr-0b(%r13) | ||
40 | l %r1,0(%r1) | ||
41 | lm %r0,%r15,0(%r1) | ||
42 | 1: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */ | ||
43 | brc 10,1b /* busy, accepted (status 0), running */ | ||
44 | tmll %r0,0x40 /* Test if calling CPU is stopped */ | ||
45 | jz 1b | ||
46 | ltr %r4,%r4 /* New stack ? */ | ||
47 | jz 1f | ||
48 | lr %r15,%r4 | ||
49 | 1: basr %r14,%r2 | ||
50 | |||
51 | .gprregs_addr: | ||
52 | .long .gprregs | ||
53 | |||
54 | .section .data,"aw",@progbits | ||
55 | .gprregs: | ||
56 | .rept 16 | ||
57 | .long 0 | ||
58 | .endr | ||
diff --git a/arch/s390/kernel/switch_cpu64.S b/arch/s390/kernel/switch_cpu64.S new file mode 100644 index 000000000000..d94aacc898cb --- /dev/null +++ b/arch/s390/kernel/switch_cpu64.S | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * 64-bit switch cpu code | ||
3 | * | ||
4 | * Copyright IBM Corp. 2009 | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <asm/asm-offsets.h> | ||
9 | #include <asm/ptrace.h> | ||
10 | |||
11 | # smp_switch_to_cpu switches to destination cpu and executes the passed function | ||
12 | # Parameter: %r2 - function to call | ||
13 | # %r3 - function parameter | ||
14 | # %r4 - stack poiner | ||
15 | # %r5 - current cpu | ||
16 | # %r6 - destination cpu | ||
17 | |||
18 | .section .text | ||
19 | .align 4 | ||
20 | .globl smp_switch_to_cpu | ||
21 | smp_switch_to_cpu: | ||
22 | stmg %r6,%r15,__SF_GPRS(%r15) | ||
23 | lgr %r1,%r15 | ||
24 | aghi %r15,-STACK_FRAME_OVERHEAD | ||
25 | stg %r1,__SF_BACKCHAIN(%r15) | ||
26 | larl %r1,.gprregs | ||
27 | stmg %r0,%r15,0(%r1) | ||
28 | 1: sigp %r0,%r6,__SIGP_RESTART /* start destination CPU */ | ||
29 | brc 2,1b /* busy, try again */ | ||
30 | 2: sigp %r0,%r5,__SIGP_STOP /* stop current CPU */ | ||
31 | brc 2,2b /* busy, try again */ | ||
32 | 3: j 3b | ||
33 | |||
34 | .globl smp_restart_cpu | ||
35 | smp_restart_cpu: | ||
36 | larl %r1,.gprregs | ||
37 | lmg %r0,%r15,0(%r1) | ||
38 | 1: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */ | ||
39 | brc 10,1b /* busy, accepted (status 0), running */ | ||
40 | tmll %r0,0x40 /* Test if calling CPU is stopped */ | ||
41 | jz 1b | ||
42 | ltgr %r4,%r4 /* New stack ? */ | ||
43 | jz 1f | ||
44 | lgr %r15,%r4 | ||
45 | 1: basr %r14,%r2 | ||
46 | |||
47 | .section .data,"aw",@progbits | ||
48 | .gprregs: | ||
49 | .rept 16 | ||
50 | .quad 0 | ||
51 | .endr | ||
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index 0c26cc1898ec..b354427e03b7 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S | |||
@@ -176,7 +176,7 @@ pgm_check_entry: | |||
176 | cgr %r1,%r2 | 176 | cgr %r1,%r2 |
177 | je restore_registers /* r1 = r2 -> nothing to do */ | 177 | je restore_registers /* r1 = r2 -> nothing to do */ |
178 | larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */ | 178 | larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */ |
179 | mvc __LC_RESTART_PSW(16,%r0),0(%r4) | 179 | mvc __LC_RST_NEW_PSW(16,%r0),0(%r4) |
180 | 3: | 180 | 3: |
181 | sigp %r9,%r1,__SIGP_INITIAL_CPU_RESET | 181 | sigp %r9,%r1,__SIGP_INITIAL_CPU_RESET |
182 | brc 8,4f /* accepted */ | 182 | brc 8,4f /* accepted */ |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 65065ac48ed3..a8f93f1705ad 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -51,14 +51,6 @@ | |||
51 | #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) | 51 | #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) |
52 | #define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12) | 52 | #define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12) |
53 | 53 | ||
54 | /* | ||
55 | * Create a small time difference between the timer interrupts | ||
56 | * on the different cpus to avoid lock contention. | ||
57 | */ | ||
58 | #define CPU_DEVIATION (smp_processor_id() << 12) | ||
59 | |||
60 | #define TICK_SIZE tick | ||
61 | |||
62 | u64 sched_clock_base_cc = -1; /* Force to data section. */ | 54 | u64 sched_clock_base_cc = -1; /* Force to data section. */ |
63 | EXPORT_SYMBOL_GPL(sched_clock_base_cc); | 55 | EXPORT_SYMBOL_GPL(sched_clock_base_cc); |
64 | 56 | ||
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 5f99e66c51c3..6bc9c197aa91 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/security.h> | 23 | #include <linux/security.h> |
24 | #include <linux/bootmem.h> | 24 | #include <linux/bootmem.h> |
25 | #include <linux/compat.h> | 25 | #include <linux/compat.h> |
26 | #include <asm/asm-offsets.h> | ||
26 | #include <asm/pgtable.h> | 27 | #include <asm/pgtable.h> |
27 | #include <asm/system.h> | 28 | #include <asm/system.h> |
28 | #include <asm/processor.h> | 29 | #include <asm/processor.h> |
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig index 6ee55ae84ce2..a7251580891c 100644 --- a/arch/s390/kvm/Kconfig +++ b/arch/s390/kvm/Kconfig | |||
@@ -35,6 +35,7 @@ config KVM | |||
35 | 35 | ||
36 | # OK, it's a little counter-intuitive to do this, but it puts it neatly under | 36 | # OK, it's a little counter-intuitive to do this, but it puts it neatly under |
37 | # the virtualization menu. | 37 | # the virtualization menu. |
38 | source drivers/vhost/Kconfig | ||
38 | source drivers/virtio/Kconfig | 39 | source drivers/virtio/Kconfig |
39 | 40 | ||
40 | endif # VIRTUALIZATION | 41 | endif # VIRTUALIZATION |
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 8300309698fa..9e4c84187cf5 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c | |||
@@ -39,7 +39,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu) | |||
39 | vcpu->run->s390_reset_flags = 0; | 39 | vcpu->run->s390_reset_flags = 0; |
40 | break; | 40 | break; |
41 | default: | 41 | default: |
42 | return -ENOTSUPP; | 42 | return -EOPNOTSUPP; |
43 | } | 43 | } |
44 | 44 | ||
45 | atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | 45 | atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); |
@@ -62,6 +62,6 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) | |||
62 | case 0x308: | 62 | case 0x308: |
63 | return __diag_ipl_functions(vcpu); | 63 | return __diag_ipl_functions(vcpu); |
64 | default: | 64 | default: |
65 | return -ENOTSUPP; | 65 | return -EOPNOTSUPP; |
66 | } | 66 | } |
67 | } | 67 | } |
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index b40096494e46..3ddc30895e31 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c | |||
@@ -32,7 +32,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) | |||
32 | 32 | ||
33 | vcpu->stat.instruction_lctlg++; | 33 | vcpu->stat.instruction_lctlg++; |
34 | if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f) | 34 | if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f) |
35 | return -ENOTSUPP; | 35 | return -EOPNOTSUPP; |
36 | 36 | ||
37 | useraddr = disp2; | 37 | useraddr = disp2; |
38 | if (base2) | 38 | if (base2) |
@@ -138,7 +138,7 @@ static int handle_stop(struct kvm_vcpu *vcpu) | |||
138 | rc = __kvm_s390_vcpu_store_status(vcpu, | 138 | rc = __kvm_s390_vcpu_store_status(vcpu, |
139 | KVM_S390_STORE_STATUS_NOADDR); | 139 | KVM_S390_STORE_STATUS_NOADDR); |
140 | if (rc >= 0) | 140 | if (rc >= 0) |
141 | rc = -ENOTSUPP; | 141 | rc = -EOPNOTSUPP; |
142 | } | 142 | } |
143 | 143 | ||
144 | if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) { | 144 | if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) { |
@@ -150,7 +150,7 @@ static int handle_stop(struct kvm_vcpu *vcpu) | |||
150 | if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { | 150 | if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { |
151 | vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; | 151 | vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; |
152 | VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); | 152 | VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); |
153 | rc = -ENOTSUPP; | 153 | rc = -EOPNOTSUPP; |
154 | } | 154 | } |
155 | 155 | ||
156 | spin_unlock_bh(&vcpu->arch.local_int.lock); | 156 | spin_unlock_bh(&vcpu->arch.local_int.lock); |
@@ -171,9 +171,9 @@ static int handle_validity(struct kvm_vcpu *vcpu) | |||
171 | 2*PAGE_SIZE); | 171 | 2*PAGE_SIZE); |
172 | if (rc) | 172 | if (rc) |
173 | /* user will receive sigsegv, exit to user */ | 173 | /* user will receive sigsegv, exit to user */ |
174 | rc = -ENOTSUPP; | 174 | rc = -EOPNOTSUPP; |
175 | } else | 175 | } else |
176 | rc = -ENOTSUPP; | 176 | rc = -EOPNOTSUPP; |
177 | 177 | ||
178 | if (rc) | 178 | if (rc) |
179 | VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d", | 179 | VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d", |
@@ -189,7 +189,7 @@ static int handle_instruction(struct kvm_vcpu *vcpu) | |||
189 | handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8]; | 189 | handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8]; |
190 | if (handler) | 190 | if (handler) |
191 | return handler(vcpu); | 191 | return handler(vcpu); |
192 | return -ENOTSUPP; | 192 | return -EOPNOTSUPP; |
193 | } | 193 | } |
194 | 194 | ||
195 | static int handle_prog(struct kvm_vcpu *vcpu) | 195 | static int handle_prog(struct kvm_vcpu *vcpu) |
@@ -206,7 +206,7 @@ static int handle_instruction_and_prog(struct kvm_vcpu *vcpu) | |||
206 | rc = handle_instruction(vcpu); | 206 | rc = handle_instruction(vcpu); |
207 | rc2 = handle_prog(vcpu); | 207 | rc2 = handle_prog(vcpu); |
208 | 208 | ||
209 | if (rc == -ENOTSUPP) | 209 | if (rc == -EOPNOTSUPP) |
210 | vcpu->arch.sie_block->icptcode = 0x04; | 210 | vcpu->arch.sie_block->icptcode = 0x04; |
211 | if (rc) | 211 | if (rc) |
212 | return rc; | 212 | return rc; |
@@ -231,9 +231,9 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) | |||
231 | u8 code = vcpu->arch.sie_block->icptcode; | 231 | u8 code = vcpu->arch.sie_block->icptcode; |
232 | 232 | ||
233 | if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs)) | 233 | if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs)) |
234 | return -ENOTSUPP; | 234 | return -EOPNOTSUPP; |
235 | func = intercept_funcs[code >> 2]; | 235 | func = intercept_funcs[code >> 2]; |
236 | if (func) | 236 | if (func) |
237 | return func(vcpu); | 237 | return func(vcpu); |
238 | return -ENOTSUPP; | 238 | return -EOPNOTSUPP; |
239 | } | 239 | } |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 43486c2408e1..834774d8d5f3 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -10,12 +10,12 @@ | |||
10 | * Author(s): Carsten Otte <cotte@de.ibm.com> | 10 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <asm/lowcore.h> | ||
14 | #include <asm/uaccess.h> | ||
15 | #include <linux/hrtimer.h> | ||
16 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
17 | #include <linux/kvm_host.h> | 14 | #include <linux/kvm_host.h> |
15 | #include <linux/hrtimer.h> | ||
18 | #include <linux/signal.h> | 16 | #include <linux/signal.h> |
17 | #include <asm/asm-offsets.h> | ||
18 | #include <asm/uaccess.h> | ||
19 | #include "kvm-s390.h" | 19 | #include "kvm-s390.h" |
20 | #include "gaccess.h" | 20 | #include "gaccess.h" |
21 | 21 | ||
@@ -187,8 +187,8 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
187 | if (rc == -EFAULT) | 187 | if (rc == -EFAULT) |
188 | exception = 1; | 188 | exception = 1; |
189 | 189 | ||
190 | rc = put_guest_u64(vcpu, __LC_PFAULT_INTPARM, | 190 | rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2, |
191 | inti->ext.ext_params2); | 191 | inti->ext.ext_params2); |
192 | if (rc == -EFAULT) | 192 | if (rc == -EFAULT) |
193 | exception = 1; | 193 | exception = 1; |
194 | break; | 194 | break; |
@@ -342,7 +342,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) | |||
342 | if (psw_interrupts_disabled(vcpu)) { | 342 | if (psw_interrupts_disabled(vcpu)) { |
343 | VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); | 343 | VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); |
344 | __unset_cpu_idle(vcpu); | 344 | __unset_cpu_idle(vcpu); |
345 | return -ENOTSUPP; /* disabled wait */ | 345 | return -EOPNOTSUPP; /* disabled wait */ |
346 | } | 346 | } |
347 | 347 | ||
348 | if (psw_extint_disabled(vcpu) || | 348 | if (psw_extint_disabled(vcpu) || |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f8bcaefd7d34..3fa0a10e4668 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/timer.h> | 25 | #include <linux/timer.h> |
26 | #include <asm/asm-offsets.h> | ||
26 | #include <asm/lowcore.h> | 27 | #include <asm/lowcore.h> |
27 | #include <asm/pgtable.h> | 28 | #include <asm/pgtable.h> |
28 | #include <asm/nmi.h> | 29 | #include <asm/nmi.h> |
@@ -543,7 +544,7 @@ rerun_vcpu: | |||
543 | rc = -EINTR; | 544 | rc = -EINTR; |
544 | } | 545 | } |
545 | 546 | ||
546 | if (rc == -ENOTSUPP) { | 547 | if (rc == -EOPNOTSUPP) { |
547 | /* intercept cannot be handled in-kernel, prepare kvm-run */ | 548 | /* intercept cannot be handled in-kernel, prepare kvm-run */ |
548 | kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; | 549 | kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; |
549 | kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; | 550 | kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; |
@@ -603,45 +604,45 @@ int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | |||
603 | } else | 604 | } else |
604 | prefix = 0; | 605 | prefix = 0; |
605 | 606 | ||
606 | if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs), | 607 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), |
607 | vcpu->arch.guest_fpregs.fprs, 128, prefix)) | 608 | vcpu->arch.guest_fpregs.fprs, 128, prefix)) |
608 | return -EFAULT; | 609 | return -EFAULT; |
609 | 610 | ||
610 | if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs), | 611 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs), |
611 | vcpu->arch.guest_gprs, 128, prefix)) | 612 | vcpu->arch.guest_gprs, 128, prefix)) |
612 | return -EFAULT; | 613 | return -EFAULT; |
613 | 614 | ||
614 | if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw), | 615 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw), |
615 | &vcpu->arch.sie_block->gpsw, 16, prefix)) | 616 | &vcpu->arch.sie_block->gpsw, 16, prefix)) |
616 | return -EFAULT; | 617 | return -EFAULT; |
617 | 618 | ||
618 | if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg), | 619 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg), |
619 | &vcpu->arch.sie_block->prefix, 4, prefix)) | 620 | &vcpu->arch.sie_block->prefix, 4, prefix)) |
620 | return -EFAULT; | 621 | return -EFAULT; |
621 | 622 | ||
622 | if (__guestcopy(vcpu, | 623 | if (__guestcopy(vcpu, |
623 | addr + offsetof(struct save_area_s390x, fp_ctrl_reg), | 624 | addr + offsetof(struct save_area, fp_ctrl_reg), |
624 | &vcpu->arch.guest_fpregs.fpc, 4, prefix)) | 625 | &vcpu->arch.guest_fpregs.fpc, 4, prefix)) |
625 | return -EFAULT; | 626 | return -EFAULT; |
626 | 627 | ||
627 | if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg), | 628 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg), |
628 | &vcpu->arch.sie_block->todpr, 4, prefix)) | 629 | &vcpu->arch.sie_block->todpr, 4, prefix)) |
629 | return -EFAULT; | 630 | return -EFAULT; |
630 | 631 | ||
631 | if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer), | 632 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer), |
632 | &vcpu->arch.sie_block->cputm, 8, prefix)) | 633 | &vcpu->arch.sie_block->cputm, 8, prefix)) |
633 | return -EFAULT; | 634 | return -EFAULT; |
634 | 635 | ||
635 | if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp), | 636 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp), |
636 | &vcpu->arch.sie_block->ckc, 8, prefix)) | 637 | &vcpu->arch.sie_block->ckc, 8, prefix)) |
637 | return -EFAULT; | 638 | return -EFAULT; |
638 | 639 | ||
639 | if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs), | 640 | if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs), |
640 | &vcpu->arch.guest_acrs, 64, prefix)) | 641 | &vcpu->arch.guest_acrs, 64, prefix)) |
641 | return -EFAULT; | 642 | return -EFAULT; |
642 | 643 | ||
643 | if (__guestcopy(vcpu, | 644 | if (__guestcopy(vcpu, |
644 | addr + offsetof(struct save_area_s390x, ctrl_regs), | 645 | addr + offsetof(struct save_area, ctrl_regs), |
645 | &vcpu->arch.sie_block->gcr, 128, prefix)) | 646 | &vcpu->arch.sie_block->gcr, 128, prefix)) |
646 | return -EFAULT; | 647 | return -EFAULT; |
647 | return 0; | 648 | return 0; |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index d426aac8095d..28c55677eb39 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -323,5 +323,5 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) | |||
323 | else | 323 | else |
324 | return handler(vcpu); | 324 | return handler(vcpu); |
325 | } | 325 | } |
326 | return -ENOTSUPP; | 326 | return -EOPNOTSUPP; |
327 | } | 327 | } |
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 15ee1111de58..241a48459b66 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
@@ -172,7 +172,7 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) | |||
172 | rc = 0; /* order accepted */ | 172 | rc = 0; /* order accepted */ |
173 | break; | 173 | break; |
174 | default: | 174 | default: |
175 | rc = -ENOTSUPP; | 175 | rc = -EOPNOTSUPP; |
176 | } | 176 | } |
177 | return rc; | 177 | return rc; |
178 | } | 178 | } |
@@ -293,7 +293,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) | |||
293 | vcpu->stat.instruction_sigp_restart++; | 293 | vcpu->stat.instruction_sigp_restart++; |
294 | /* user space must know about restart */ | 294 | /* user space must know about restart */ |
295 | default: | 295 | default: |
296 | return -ENOTSUPP; | 296 | return -EOPNOTSUPP; |
297 | } | 297 | } |
298 | 298 | ||
299 | if (rc < 0) | 299 | if (rc < 0) |
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index 97975ec7a274..cd54a1c352af 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for s390-specific library files.. | 2 | # Makefile for s390-specific library files.. |
3 | # | 3 | # |
4 | 4 | ||
5 | lib-y += delay.o string.o uaccess_std.o uaccess_pt.o | 5 | lib-y += delay.o string.o uaccess_std.o uaccess_pt.o usercopy.o |
6 | obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o | 6 | obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o |
7 | lib-$(CONFIG_64BIT) += uaccess_mvcos.o | 7 | lib-$(CONFIG_64BIT) += uaccess_mvcos.o |
8 | lib-$(CONFIG_SMP) += spinlock.o | 8 | lib-$(CONFIG_SMP) += spinlock.o |
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index cff327f109a8..91754ffb9203 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c | |||
@@ -43,16 +43,24 @@ void arch_spin_lock_wait(arch_spinlock_t *lp) | |||
43 | { | 43 | { |
44 | int count = spin_retry; | 44 | int count = spin_retry; |
45 | unsigned int cpu = ~smp_processor_id(); | 45 | unsigned int cpu = ~smp_processor_id(); |
46 | unsigned int owner; | ||
46 | 47 | ||
47 | while (1) { | 48 | while (1) { |
48 | if (count-- <= 0) { | 49 | owner = lp->owner_cpu; |
49 | unsigned int owner = lp->owner_cpu; | 50 | if (!owner || smp_vcpu_scheduled(~owner)) { |
50 | if (owner != 0) | 51 | for (count = spin_retry; count > 0; count--) { |
51 | _raw_yield_cpu(~owner); | 52 | if (arch_spin_is_locked(lp)) |
52 | count = spin_retry; | 53 | continue; |
54 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, | ||
55 | cpu) == 0) | ||
56 | return; | ||
57 | } | ||
58 | if (MACHINE_IS_LPAR) | ||
59 | continue; | ||
53 | } | 60 | } |
54 | if (arch_spin_is_locked(lp)) | 61 | owner = lp->owner_cpu; |
55 | continue; | 62 | if (owner) |
63 | _raw_yield_cpu(~owner); | ||
56 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | 64 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) |
57 | return; | 65 | return; |
58 | } | 66 | } |
@@ -63,17 +71,27 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) | |||
63 | { | 71 | { |
64 | int count = spin_retry; | 72 | int count = spin_retry; |
65 | unsigned int cpu = ~smp_processor_id(); | 73 | unsigned int cpu = ~smp_processor_id(); |
74 | unsigned int owner; | ||
66 | 75 | ||
67 | local_irq_restore(flags); | 76 | local_irq_restore(flags); |
68 | while (1) { | 77 | while (1) { |
69 | if (count-- <= 0) { | 78 | owner = lp->owner_cpu; |
70 | unsigned int owner = lp->owner_cpu; | 79 | if (!owner || smp_vcpu_scheduled(~owner)) { |
71 | if (owner != 0) | 80 | for (count = spin_retry; count > 0; count--) { |
72 | _raw_yield_cpu(~owner); | 81 | if (arch_spin_is_locked(lp)) |
73 | count = spin_retry; | 82 | continue; |
83 | local_irq_disable(); | ||
84 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, | ||
85 | cpu) == 0) | ||
86 | return; | ||
87 | local_irq_restore(flags); | ||
88 | } | ||
89 | if (MACHINE_IS_LPAR) | ||
90 | continue; | ||
74 | } | 91 | } |
75 | if (arch_spin_is_locked(lp)) | 92 | owner = lp->owner_cpu; |
76 | continue; | 93 | if (owner) |
94 | _raw_yield_cpu(~owner); | ||
77 | local_irq_disable(); | 95 | local_irq_disable(); |
78 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | 96 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) |
79 | return; | 97 | return; |
@@ -100,8 +118,11 @@ EXPORT_SYMBOL(arch_spin_trylock_retry); | |||
100 | void arch_spin_relax(arch_spinlock_t *lock) | 118 | void arch_spin_relax(arch_spinlock_t *lock) |
101 | { | 119 | { |
102 | unsigned int cpu = lock->owner_cpu; | 120 | unsigned int cpu = lock->owner_cpu; |
103 | if (cpu != 0) | 121 | if (cpu != 0) { |
104 | _raw_yield_cpu(~cpu); | 122 | if (MACHINE_IS_VM || MACHINE_IS_KVM || |
123 | !smp_vcpu_scheduled(~cpu)) | ||
124 | _raw_yield_cpu(~cpu); | ||
125 | } | ||
105 | } | 126 | } |
106 | EXPORT_SYMBOL(arch_spin_relax); | 127 | EXPORT_SYMBOL(arch_spin_relax); |
107 | 128 | ||
diff --git a/arch/s390/lib/usercopy.c b/arch/s390/lib/usercopy.c new file mode 100644 index 000000000000..14b363fec8a2 --- /dev/null +++ b/arch/s390/lib/usercopy.c | |||
@@ -0,0 +1,8 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <linux/bug.h> | ||
3 | |||
4 | void copy_from_user_overflow(void) | ||
5 | { | ||
6 | WARN(1, "Buffer overflow detected!\n"); | ||
7 | } | ||
8 | EXPORT_SYMBOL(copy_from_user_overflow); | ||
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 5c8457129603..6409fd57eb04 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c | |||
@@ -309,7 +309,7 @@ query_segment_type (struct dcss_segment *seg) | |||
309 | } | 309 | } |
310 | #endif | 310 | #endif |
311 | if (qout->segcnt > 6) { | 311 | if (qout->segcnt > 6) { |
312 | rc = -ENOTSUPP; | 312 | rc = -EOPNOTSUPP; |
313 | goto out_free; | 313 | goto out_free; |
314 | } | 314 | } |
315 | 315 | ||
@@ -324,11 +324,11 @@ query_segment_type (struct dcss_segment *seg) | |||
324 | for (i=0; i<qout->segcnt; i++) { | 324 | for (i=0; i<qout->segcnt; i++) { |
325 | if (((qout->range[i].start & 0xff) != SEG_TYPE_EW) && | 325 | if (((qout->range[i].start & 0xff) != SEG_TYPE_EW) && |
326 | ((qout->range[i].start & 0xff) != SEG_TYPE_EN)) { | 326 | ((qout->range[i].start & 0xff) != SEG_TYPE_EN)) { |
327 | rc = -ENOTSUPP; | 327 | rc = -EOPNOTSUPP; |
328 | goto out_free; | 328 | goto out_free; |
329 | } | 329 | } |
330 | if (start != qout->range[i].start >> PAGE_SHIFT) { | 330 | if (start != qout->range[i].start >> PAGE_SHIFT) { |
331 | rc = -ENOTSUPP; | 331 | rc = -EOPNOTSUPP; |
332 | goto out_free; | 332 | goto out_free; |
333 | } | 333 | } |
334 | start = (qout->range[i].end >> PAGE_SHIFT) + 1; | 334 | start = (qout->range[i].end >> PAGE_SHIFT) + 1; |
@@ -357,7 +357,7 @@ query_segment_type (struct dcss_segment *seg) | |||
357 | * -ENOSYS : we are not running on VM | 357 | * -ENOSYS : we are not running on VM |
358 | * -EIO : could not perform query diagnose | 358 | * -EIO : could not perform query diagnose |
359 | * -ENOENT : no such segment | 359 | * -ENOENT : no such segment |
360 | * -ENOTSUPP: multi-part segment cannot be used with linux | 360 | * -EOPNOTSUPP: multi-part segment cannot be used with linux |
361 | * -ENOMEM : out of memory | 361 | * -ENOMEM : out of memory |
362 | * 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h | 362 | * 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h |
363 | */ | 363 | */ |
@@ -515,7 +515,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
515 | * -ENOSYS : we are not running on VM | 515 | * -ENOSYS : we are not running on VM |
516 | * -EIO : could not perform query or load diagnose | 516 | * -EIO : could not perform query or load diagnose |
517 | * -ENOENT : no such segment | 517 | * -ENOENT : no such segment |
518 | * -ENOTSUPP: multi-part segment cannot be used with linux | 518 | * -EOPNOTSUPP: multi-part segment cannot be used with linux |
519 | * -ENOSPC : segment cannot be used (overlaps with storage) | 519 | * -ENOSPC : segment cannot be used (overlaps with storage) |
520 | * -EBUSY : segment can temporarily not be used (overlaps with dcss) | 520 | * -EBUSY : segment can temporarily not be used (overlaps with dcss) |
521 | * -ERANGE : segment cannot be used (exceeds kernel mapping range) | 521 | * -ERANGE : segment cannot be used (exceeds kernel mapping range) |
@@ -742,7 +742,7 @@ void segment_warning(int rc, char *seg_name) | |||
742 | pr_err("Loading or querying DCSS %s resulted in a " | 742 | pr_err("Loading or querying DCSS %s resulted in a " |
743 | "hardware error\n", seg_name); | 743 | "hardware error\n", seg_name); |
744 | break; | 744 | break; |
745 | case -ENOTSUPP: | 745 | case -EOPNOTSUPP: |
746 | pr_err("DCSS %s has multiple page ranges and cannot be " | 746 | pr_err("DCSS %s has multiple page ranges and cannot be " |
747 | "loaded or queried\n", seg_name); | 747 | "loaded or queried\n", seg_name); |
748 | break; | 748 | break; |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index fc102e70d9c2..3040d7c78fe0 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/kprobes.h> | 30 | #include <linux/kprobes.h> |
31 | #include <linux/uaccess.h> | 31 | #include <linux/uaccess.h> |
32 | #include <linux/hugetlb.h> | 32 | #include <linux/hugetlb.h> |
33 | #include <asm/asm-offsets.h> | ||
33 | #include <asm/system.h> | 34 | #include <asm/system.h> |
34 | #include <asm/pgtable.h> | 35 | #include <asm/pgtable.h> |
35 | #include <asm/s390_ext.h> | 36 | #include <asm/s390_ext.h> |
@@ -59,15 +60,13 @@ static inline int notify_page_fault(struct pt_regs *regs) | |||
59 | { | 60 | { |
60 | int ret = 0; | 61 | int ret = 0; |
61 | 62 | ||
62 | #ifdef CONFIG_KPROBES | ||
63 | /* kprobe_running() needs smp_processor_id() */ | 63 | /* kprobe_running() needs smp_processor_id() */ |
64 | if (!user_mode(regs)) { | 64 | if (kprobes_built_in() && !user_mode(regs)) { |
65 | preempt_disable(); | 65 | preempt_disable(); |
66 | if (kprobe_running() && kprobe_fault_handler(regs, 14)) | 66 | if (kprobe_running() && kprobe_fault_handler(regs, 14)) |
67 | ret = 1; | 67 | ret = 1; |
68 | preempt_enable(); | 68 | preempt_enable(); |
69 | } | 69 | } |
70 | #endif | ||
71 | return ret; | 70 | return ret; |
72 | } | 71 | } |
73 | 72 | ||
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 765647952221..d5865e4024ce 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -143,33 +143,34 @@ void kernel_map_pages(struct page *page, int numpages, int enable) | |||
143 | } | 143 | } |
144 | #endif | 144 | #endif |
145 | 145 | ||
146 | void free_initmem(void) | 146 | void free_init_pages(char *what, unsigned long begin, unsigned long end) |
147 | { | 147 | { |
148 | unsigned long addr; | 148 | unsigned long addr = begin; |
149 | 149 | ||
150 | addr = (unsigned long)(&__init_begin); | 150 | if (begin >= end) |
151 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | 151 | return; |
152 | for (; addr < end; addr += PAGE_SIZE) { | ||
152 | ClearPageReserved(virt_to_page(addr)); | 153 | ClearPageReserved(virt_to_page(addr)); |
153 | init_page_count(virt_to_page(addr)); | 154 | init_page_count(virt_to_page(addr)); |
154 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); | 155 | memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM, |
156 | PAGE_SIZE); | ||
155 | free_page(addr); | 157 | free_page(addr); |
156 | totalram_pages++; | 158 | totalram_pages++; |
157 | } | 159 | } |
158 | printk ("Freeing unused kernel memory: %ldk freed\n", | 160 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); |
159 | ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10); | 161 | } |
162 | |||
163 | void free_initmem(void) | ||
164 | { | ||
165 | free_init_pages("unused kernel memory", | ||
166 | (unsigned long)&__init_begin, | ||
167 | (unsigned long)&__init_end); | ||
160 | } | 168 | } |
161 | 169 | ||
162 | #ifdef CONFIG_BLK_DEV_INITRD | 170 | #ifdef CONFIG_BLK_DEV_INITRD |
163 | void free_initrd_mem(unsigned long start, unsigned long end) | 171 | void free_initrd_mem(unsigned long start, unsigned long end) |
164 | { | 172 | { |
165 | if (start < end) | 173 | free_init_pages("initrd memory", start, end); |
166 | printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | ||
167 | for (; start < end; start += PAGE_SIZE) { | ||
168 | ClearPageReserved(virt_to_page(start)); | ||
169 | init_page_count(virt_to_page(start)); | ||
170 | free_page(start); | ||
171 | totalram_pages++; | ||
172 | } | ||
173 | } | 174 | } |
174 | #endif | 175 | #endif |
175 | 176 | ||