aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-07 18:24:38 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-07 18:24:38 -0500
commit280c84d1c1726be7ada045735858acdc8cfdd65a (patch)
treeb9afa3fb97b08272b6952d5c8d1fe31f6a8092fa
parent8efdf2b759409f85953b84d52a14ea4d39c80474 (diff)
parentde9587a2f54d2d0063f0dbc775328129b9daaaa2 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: "The bulk of the patches for the 3.13 merge window. Heiko spent quite a bit of work to improve the code generation for the kernel. That includes the exploitation of the interlocked-access facility for the atomics and bitops implementation and the improvement for the -march and -mtune compiler settings. Another important change is the removal of the user_mode=home option, user processes now always run in primary space. The storage keys are not initialized at system startup any more, with that the storage key removal work is complete. For the PCI support the hibernation hooks have been implemented. And as usual cleanup and fixes" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (62 commits) s390/scm_blk: fix endless loop for requests != REQ_TYPE_FS s390/mm,tlb: correct tlb flush on page table upgrade s390/mm: page_table_realloc returns failure s390: allow to set gcc -mtune flag s390/percpu: remove this_cpu_xor() implementation s390/vtime: correct idle time calculation s390/time: fix get_tod_clock_ext inline assembly tty/hvc_iucv: remove redundant NULL check s390/dasd: Write to profile data area only if it is available s390: convert use of typedef ctl_table to struct ctl_table s390/pci: cleanup function information block s390/pci: remove CONFIG_PCI_DEBUG dependancy s390/pci: message cleanup Update default configuration s390: add a couple of useful defconfigs s390/percpu: make use of interlocked-access facility 1 instructions s390/percpu: use generic percpu ops for CONFIG_32BIT s390/compat: make psw32_user_bits a constant value again s390: fix handling of runtime instrumentation psw bit s390: fix save and restore of the floating-point-control register ...
-rw-r--r--Documentation/s390/s390dbf.txt10
-rw-r--r--arch/s390/Kconfig62
-rw-r--r--arch/s390/Makefile22
-rw-r--r--arch/s390/appldata/appldata_base.c18
-rw-r--r--arch/s390/configs/default_defconfig655
-rw-r--r--arch/s390/configs/gcov_defconfig618
-rw-r--r--arch/s390/configs/performance_defconfig610
-rw-r--r--arch/s390/configs/zfcpdump_defconfig86
-rw-r--r--arch/s390/crypto/aes_s390.c15
-rw-r--r--arch/s390/defconfig4
-rw-r--r--arch/s390/include/asm/atomic.h190
-rw-r--r--arch/s390/include/asm/bitops.h1008
-rw-r--r--arch/s390/include/asm/compat.h5
-rw-r--r--arch/s390/include/asm/ctl_reg.h112
-rw-r--r--arch/s390/include/asm/debug.h5
-rw-r--r--arch/s390/include/asm/dis.h52
-rw-r--r--arch/s390/include/asm/fcx.h38
-rw-r--r--arch/s390/include/asm/ipl.h10
-rw-r--r--arch/s390/include/asm/mmu_context.h10
-rw-r--r--arch/s390/include/asm/page.h7
-rw-r--r--arch/s390/include/asm/pci_debug.h5
-rw-r--r--arch/s390/include/asm/pci_insn.h15
-rw-r--r--arch/s390/include/asm/percpu.h137
-rw-r--r--arch/s390/include/asm/processor.h18
-rw-r--r--arch/s390/include/asm/ptrace.h7
-rw-r--r--arch/s390/include/asm/setup.h7
-rw-r--r--arch/s390/include/asm/smp.h1
-rw-r--r--arch/s390/include/asm/switch_to.h124
-rw-r--r--arch/s390/include/asm/timex.h6
-rw-r--r--arch/s390/include/asm/uaccess.h18
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h4
-rw-r--r--arch/s390/include/uapi/asm/sigcontext.h1
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/bitmap.c54
-rw-r--r--arch/s390/kernel/cache.c5
-rw-r--r--arch/s390/kernel/compat_linux.c4
-rw-r--r--arch/s390/kernel/compat_linux.h1
-rw-r--r--arch/s390/kernel/compat_signal.c89
-rw-r--r--arch/s390/kernel/crash_dump.c35
-rw-r--r--arch/s390/kernel/debug.c2
-rw-r--r--arch/s390/kernel/dis.c81
-rw-r--r--arch/s390/kernel/dumpstack.c1
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/kernel/entry.h1
-rw-r--r--arch/s390/kernel/ftrace.c9
-rw-r--r--arch/s390/kernel/head.S2
-rw-r--r--arch/s390/kernel/ipl.c4
-rw-r--r--arch/s390/kernel/kprobes.c13
-rw-r--r--arch/s390/kernel/pgm_check.S2
-rw-r--r--arch/s390/kernel/process.c15
-rw-r--r--arch/s390/kernel/ptrace.c70
-rw-r--r--arch/s390/kernel/runtime_instr.c2
-rw-r--r--arch/s390/kernel/setup.c59
-rw-r--r--arch/s390/kernel/signal.c49
-rw-r--r--arch/s390/kernel/smp.c21
-rw-r--r--arch/s390/kernel/vdso.c9
-rw-r--r--arch/s390/kernel/vtime.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c23
-rw-r--r--arch/s390/kvm/trace.h1
-rw-r--r--arch/s390/lib/Makefile2
-rw-r--r--arch/s390/lib/find.c77
-rw-r--r--arch/s390/lib/uaccess_mvcos.c30
-rw-r--r--arch/s390/lib/uaccess_pt.c2
-rw-r--r--arch/s390/lib/uaccess_std.c305
-rw-r--r--arch/s390/math-emu/math.c2
-rw-r--r--arch/s390/mm/cmm.c12
-rw-r--r--arch/s390/mm/fault.c46
-rw-r--r--arch/s390/mm/gup.c83
-rw-r--r--arch/s390/mm/mmap.c12
-rw-r--r--arch/s390/mm/pageattr.c4
-rw-r--r--arch/s390/mm/pgtable.c43
-rw-r--r--arch/s390/net/bpf_jit_comp.c6
-rw-r--r--arch/s390/pci/pci.c108
-rw-r--r--arch/s390/pci/pci_clp.c33
-rw-r--r--arch/s390/pci/pci_dma.c18
-rw-r--r--arch/s390/pci/pci_event.c35
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c6
-rw-r--r--drivers/s390/block/dasd.c3
-rw-r--r--drivers/s390/block/scm_blk.c6
-rw-r--r--drivers/s390/block/scm_blk.h2
-rw-r--r--drivers/s390/char/monwriter.c2
-rw-r--r--drivers/s390/char/raw3270.c4
-rw-r--r--drivers/s390/char/zcore.c20
-rw-r--r--drivers/s390/cio/airq.c19
-rw-r--r--drivers/s390/cio/eadm_sch.c29
-rw-r--r--drivers/s390/cio/eadm_sch.h4
-rw-r--r--drivers/s390/cio/qdio_debug.h8
-rw-r--r--drivers/s390/cio/qdio_main.c2
-rw-r--r--drivers/s390/crypto/zcrypt_debug.h12
-rw-r--r--drivers/s390/net/claw.h8
-rw-r--r--drivers/s390/net/ctcm_dbug.c2
-rw-r--r--drivers/s390/net/lcs.h8
-rw-r--r--drivers/s390/net/netiucv.c8
-rw-r--r--drivers/s390/net/qeth_core_main.c2
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h4
-rw-r--r--drivers/tty/hvc/hvc_iucv.c3
96 files changed, 3427 insertions, 1983 deletions
diff --git a/Documentation/s390/s390dbf.txt b/Documentation/s390/s390dbf.txt
index fcaf0b4efba2..3da163383c93 100644
--- a/Documentation/s390/s390dbf.txt
+++ b/Documentation/s390/s390dbf.txt
@@ -158,6 +158,16 @@ Return Value: none
158Description: Sets new actual debug level if new_level is valid. 158Description: Sets new actual debug level if new_level is valid.
159 159
160--------------------------------------------------------------------------- 160---------------------------------------------------------------------------
161bool debug_level_enabled (debug_info_t * id, int level);
162
163Parameter: id: handle for debug log
164 level: debug level
165
166Return Value: True if level is less or equal to the current debug level.
167
168Description: Returns true if debug events for the specified level would be
169 logged. Otherwise returns false.
170---------------------------------------------------------------------------
161void debug_stop_all(void); 171void debug_stop_all(void);
162 172
163Parameter: none 173Parameter: none
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 7143793859fa..f75d7e517927 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -99,6 +99,7 @@ config S390
99 select CLONE_BACKWARDS2 99 select CLONE_BACKWARDS2
100 select GENERIC_CLOCKEVENTS 100 select GENERIC_CLOCKEVENTS
101 select GENERIC_CPU_DEVICES if !SMP 101 select GENERIC_CPU_DEVICES if !SMP
102 select GENERIC_FIND_FIRST_BIT
102 select GENERIC_SMP_IDLE_THREAD 103 select GENERIC_SMP_IDLE_THREAD
103 select GENERIC_TIME_VSYSCALL_OLD 104 select GENERIC_TIME_VSYSCALL_OLD
104 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 105 select HAVE_ALIGNED_STRUCT_PAGE if SLUB
@@ -237,6 +238,67 @@ config MARCH_ZEC12
237 238
238endchoice 239endchoice
239 240
241config MARCH_G5_TUNE
242 def_bool TUNE_G5 || MARCH_G5 && TUNE_DEFAULT
243
244config MARCH_Z900_TUNE
245 def_bool TUNE_Z900 || MARCH_Z900 && TUNE_DEFAULT
246
247config MARCH_Z990_TUNE
248 def_bool TUNE_Z990 || MARCH_Z990 && TUNE_DEFAULT
249
250config MARCH_Z9_109_TUNE
251 def_bool TUNE_Z9_109 || MARCH_Z9_109 && TUNE_DEFAULT
252
253config MARCH_Z10_TUNE
254 def_bool TUNE_Z10 || MARCH_Z10 && TUNE_DEFAULT
255
256config MARCH_Z196_TUNE
257 def_bool TUNE_Z196 || MARCH_Z196 && TUNE_DEFAULT
258
259config MARCH_ZEC12_TUNE
260 def_bool TUNE_ZEC12 || MARCH_ZEC12 && TUNE_DEFAULT
261
262choice
263 prompt "Tune code generation"
264 default TUNE_DEFAULT
265 help
266 Cause the compiler to tune (-mtune) the generated code for a machine.
267 This will make the code run faster on the selected machine but
268 somewhat slower on other machines.
269 This option only changes how the compiler emits instructions, not the
270 selection of instructions itself, so the resulting kernel will run on
271 all other machines.
272
273config TUNE_DEFAULT
274 bool "Default"
275 help
276 Tune the generated code for the target processor for which the kernel
277 will be compiled.
278
279config TUNE_G5
280 bool "System/390 model G5 and G6"
281
282config TUNE_Z900
283 bool "IBM zSeries model z800 and z900"
284
285config TUNE_Z990
286 bool "IBM zSeries model z890 and z990"
287
288config TUNE_Z9_109
289 bool "IBM System z9"
290
291config TUNE_Z10
292 bool "IBM System z10"
293
294config TUNE_Z196
295 bool "IBM zEnterprise 114 and 196"
296
297config TUNE_ZEC12
298 bool "IBM zBC12 and zEC12"
299
300endchoice
301
240config 64BIT 302config 64BIT
241 def_bool y 303 def_bool y
242 prompt "64 bit kernel" 304 prompt "64 bit kernel"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index a7d68a467ce8..874e6d6e9c5f 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -35,13 +35,21 @@ endif
35 35
36export LD_BFD 36export LD_BFD
37 37
38cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5) 38cflags-$(CONFIG_MARCH_G5) += -march=g5
39cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900) 39cflags-$(CONFIG_MARCH_Z900) += -march=z900
40cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990) 40cflags-$(CONFIG_MARCH_Z990) += -march=z990
41cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109) 41cflags-$(CONFIG_MARCH_Z9_109) += -march=z9-109
42cflags-$(CONFIG_MARCH_Z10) += $(call cc-option,-march=z10) 42cflags-$(CONFIG_MARCH_Z10) += -march=z10
43cflags-$(CONFIG_MARCH_Z196) += $(call cc-option,-march=z196) 43cflags-$(CONFIG_MARCH_Z196) += -march=z196
44cflags-$(CONFIG_MARCH_ZEC12) += $(call cc-option,-march=zEC12) 44cflags-$(CONFIG_MARCH_ZEC12) += -march=zEC12
45
46cflags-$(CONFIG_MARCH_G5_TUNE) += -mtune=g5
47cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900
48cflags-$(CONFIG_MARCH_Z990_TUNE) += -mtune=z990
49cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109
50cflags-$(CONFIG_MARCH_Z10_TUNE) += -mtune=z10
51cflags-$(CONFIG_MARCH_Z196_TUNE) += -mtune=z196
52cflags-$(CONFIG_MARCH_ZEC12_TUNE) += -mtune=zEC12
45 53
46#KBUILD_IMAGE is necessary for make rpm 54#KBUILD_IMAGE is necessary for make rpm
47KBUILD_IMAGE :=arch/s390/boot/image 55KBUILD_IMAGE :=arch/s390/boot/image
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 87a22092b68f..4c4a1cef5208 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -48,9 +48,9 @@ static struct platform_device *appldata_pdev;
48 * /proc entries (sysctl) 48 * /proc entries (sysctl)
49 */ 49 */
50static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata"; 50static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata";
51static int appldata_timer_handler(ctl_table *ctl, int write, 51static int appldata_timer_handler(struct ctl_table *ctl, int write,
52 void __user *buffer, size_t *lenp, loff_t *ppos); 52 void __user *buffer, size_t *lenp, loff_t *ppos);
53static int appldata_interval_handler(ctl_table *ctl, int write, 53static int appldata_interval_handler(struct ctl_table *ctl, int write,
54 void __user *buffer, 54 void __user *buffer,
55 size_t *lenp, loff_t *ppos); 55 size_t *lenp, loff_t *ppos);
56 56
@@ -201,10 +201,10 @@ static void __appldata_vtimer_setup(int cmd)
201 * Start/Stop timer, show status of timer (0 = not active, 1 = active) 201 * Start/Stop timer, show status of timer (0 = not active, 1 = active)
202 */ 202 */
203static int 203static int
204appldata_timer_handler(ctl_table *ctl, int write, 204appldata_timer_handler(struct ctl_table *ctl, int write,
205 void __user *buffer, size_t *lenp, loff_t *ppos) 205 void __user *buffer, size_t *lenp, loff_t *ppos)
206{ 206{
207 int len; 207 unsigned int len;
208 char buf[2]; 208 char buf[2];
209 209
210 if (!*lenp || *ppos) { 210 if (!*lenp || *ppos) {
@@ -243,10 +243,11 @@ out:
243 * current timer interval. 243 * current timer interval.
244 */ 244 */
245static int 245static int
246appldata_interval_handler(ctl_table *ctl, int write, 246appldata_interval_handler(struct ctl_table *ctl, int write,
247 void __user *buffer, size_t *lenp, loff_t *ppos) 247 void __user *buffer, size_t *lenp, loff_t *ppos)
248{ 248{
249 int len, interval; 249 unsigned int len;
250 int interval;
250 char buf[16]; 251 char buf[16];
251 252
252 if (!*lenp || *ppos) { 253 if (!*lenp || *ppos) {
@@ -286,11 +287,12 @@ out:
286 * monitoring (0 = not in process, 1 = in process) 287 * monitoring (0 = not in process, 1 = in process)
287 */ 288 */
288static int 289static int
289appldata_generic_handler(ctl_table *ctl, int write, 290appldata_generic_handler(struct ctl_table *ctl, int write,
290 void __user *buffer, size_t *lenp, loff_t *ppos) 291 void __user *buffer, size_t *lenp, loff_t *ppos)
291{ 292{
292 struct appldata_ops *ops = NULL, *tmp_ops; 293 struct appldata_ops *ops = NULL, *tmp_ops;
293 int rc, len, found; 294 unsigned int len;
295 int rc, found;
294 char buf[2]; 296 char buf[2];
295 struct list_head *lh; 297 struct list_head *lh;
296 298
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
new file mode 100644
index 000000000000..e0af2ee58751
--- /dev/null
+++ b/arch/s390/configs/default_defconfig
@@ -0,0 +1,655 @@
1CONFIG_SYSVIPC=y
2CONFIG_POSIX_MQUEUE=y
3CONFIG_FHANDLE=y
4CONFIG_AUDIT=y
5CONFIG_NO_HZ=y
6CONFIG_HIGH_RES_TIMERS=y
7CONFIG_BSD_PROCESS_ACCT=y
8CONFIG_BSD_PROCESS_ACCT_V3=y
9CONFIG_TASKSTATS=y
10CONFIG_TASK_DELAY_ACCT=y
11CONFIG_TASK_XACCT=y
12CONFIG_TASK_IO_ACCOUNTING=y
13CONFIG_RCU_FAST_NO_HZ=y
14CONFIG_IKCONFIG=y
15CONFIG_IKCONFIG_PROC=y
16CONFIG_CGROUP_FREEZER=y
17CONFIG_CGROUP_DEVICE=y
18CONFIG_CPUSETS=y
19CONFIG_CGROUP_CPUACCT=y
20CONFIG_RESOURCE_COUNTERS=y
21CONFIG_CGROUP_PERF=y
22CONFIG_CFS_BANDWIDTH=y
23CONFIG_RT_GROUP_SCHED=y
24CONFIG_BLK_CGROUP=y
25CONFIG_SCHED_AUTOGROUP=y
26CONFIG_BLK_DEV_INITRD=y
27# CONFIG_COMPAT_BRK is not set
28CONFIG_PROFILING=y
29CONFIG_OPROFILE=m
30CONFIG_KPROBES=y
31CONFIG_JUMP_LABEL=y
32CONFIG_MODULES=y
33CONFIG_MODULE_FORCE_LOAD=y
34CONFIG_MODULE_UNLOAD=y
35CONFIG_MODULE_FORCE_UNLOAD=y
36CONFIG_MODVERSIONS=y
37CONFIG_MODULE_SRCVERSION_ALL=y
38CONFIG_BLK_DEV_INTEGRITY=y
39CONFIG_BLK_DEV_THROTTLING=y
40CONFIG_PARTITION_ADVANCED=y
41CONFIG_IBM_PARTITION=y
42CONFIG_BSD_DISKLABEL=y
43CONFIG_MINIX_SUBPARTITION=y
44CONFIG_SOLARIS_X86_PARTITION=y
45CONFIG_UNIXWARE_DISKLABEL=y
46CONFIG_CFQ_GROUP_IOSCHED=y
47CONFIG_DEFAULT_DEADLINE=y
48CONFIG_MARCH_Z9_109=y
49CONFIG_PREEMPT=y
50CONFIG_HZ_100=y
51CONFIG_MEMORY_HOTPLUG=y
52CONFIG_MEMORY_HOTREMOVE=y
53CONFIG_KSM=y
54CONFIG_TRANSPARENT_HUGEPAGE=y
55CONFIG_PCI=y
56CONFIG_PCI_DEBUG=y
57CONFIG_HOTPLUG_PCI=y
58CONFIG_HOTPLUG_PCI_S390=y
59CONFIG_CHSC_SCH=y
60CONFIG_CRASH_DUMP=y
61CONFIG_ZFCPDUMP=y
62# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
63CONFIG_BINFMT_MISC=m
64CONFIG_HIBERNATION=y
65CONFIG_PACKET=y
66CONFIG_PACKET_DIAG=m
67CONFIG_UNIX=y
68CONFIG_UNIX_DIAG=m
69CONFIG_XFRM_USER=m
70CONFIG_NET_KEY=m
71CONFIG_INET=y
72CONFIG_IP_MULTICAST=y
73CONFIG_IP_ADVANCED_ROUTER=y
74CONFIG_IP_MULTIPLE_TABLES=y
75CONFIG_IP_ROUTE_MULTIPATH=y
76CONFIG_IP_ROUTE_VERBOSE=y
77CONFIG_NET_IPIP=m
78CONFIG_NET_IPGRE_DEMUX=m
79CONFIG_NET_IPGRE=m
80CONFIG_NET_IPGRE_BROADCAST=y
81CONFIG_IP_MROUTE=y
82CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
83CONFIG_IP_PIMSM_V1=y
84CONFIG_IP_PIMSM_V2=y
85CONFIG_SYN_COOKIES=y
86CONFIG_NET_IPVTI=m
87CONFIG_INET_AH=m
88CONFIG_INET_ESP=m
89CONFIG_INET_IPCOMP=m
90CONFIG_INET_XFRM_MODE_TRANSPORT=m
91CONFIG_INET_XFRM_MODE_TUNNEL=m
92CONFIG_INET_XFRM_MODE_BEET=m
93CONFIG_INET_DIAG=m
94CONFIG_INET_UDP_DIAG=m
95CONFIG_TCP_CONG_ADVANCED=y
96CONFIG_TCP_CONG_HSTCP=m
97CONFIG_TCP_CONG_HYBLA=m
98CONFIG_TCP_CONG_SCALABLE=m
99CONFIG_TCP_CONG_LP=m
100CONFIG_TCP_CONG_VENO=m
101CONFIG_TCP_CONG_YEAH=m
102CONFIG_TCP_CONG_ILLINOIS=m
103CONFIG_IPV6=y
104CONFIG_IPV6_PRIVACY=y
105CONFIG_IPV6_ROUTER_PREF=y
106CONFIG_INET6_AH=m
107CONFIG_INET6_ESP=m
108CONFIG_INET6_IPCOMP=m
109CONFIG_IPV6_MIP6=m
110CONFIG_INET6_XFRM_MODE_TRANSPORT=m
111CONFIG_INET6_XFRM_MODE_TUNNEL=m
112CONFIG_INET6_XFRM_MODE_BEET=m
113CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
114CONFIG_IPV6_SIT=m
115CONFIG_IPV6_GRE=m
116CONFIG_IPV6_MULTIPLE_TABLES=y
117CONFIG_IPV6_SUBTREES=y
118CONFIG_NETFILTER=y
119CONFIG_NF_CONNTRACK=m
120CONFIG_NF_CONNTRACK_SECMARK=y
121CONFIG_NF_CONNTRACK_EVENTS=y
122CONFIG_NF_CONNTRACK_TIMEOUT=y
123CONFIG_NF_CONNTRACK_TIMESTAMP=y
124CONFIG_NF_CT_PROTO_DCCP=m
125CONFIG_NF_CT_PROTO_UDPLITE=m
126CONFIG_NF_CONNTRACK_AMANDA=m
127CONFIG_NF_CONNTRACK_FTP=m
128CONFIG_NF_CONNTRACK_H323=m
129CONFIG_NF_CONNTRACK_IRC=m
130CONFIG_NF_CONNTRACK_NETBIOS_NS=m
131CONFIG_NF_CONNTRACK_SNMP=m
132CONFIG_NF_CONNTRACK_PPTP=m
133CONFIG_NF_CONNTRACK_SANE=m
134CONFIG_NF_CONNTRACK_SIP=m
135CONFIG_NF_CONNTRACK_TFTP=m
136CONFIG_NF_CT_NETLINK=m
137CONFIG_NF_CT_NETLINK_TIMEOUT=m
138CONFIG_NETFILTER_TPROXY=m
139CONFIG_NETFILTER_XT_SET=m
140CONFIG_NETFILTER_XT_TARGET_AUDIT=m
141CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
142CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
143CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
144CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
145CONFIG_NETFILTER_XT_TARGET_CT=m
146CONFIG_NETFILTER_XT_TARGET_DSCP=m
147CONFIG_NETFILTER_XT_TARGET_HMARK=m
148CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
149CONFIG_NETFILTER_XT_TARGET_LOG=m
150CONFIG_NETFILTER_XT_TARGET_MARK=m
151CONFIG_NETFILTER_XT_TARGET_NFLOG=m
152CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
153CONFIG_NETFILTER_XT_TARGET_TEE=m
154CONFIG_NETFILTER_XT_TARGET_TPROXY=m
155CONFIG_NETFILTER_XT_TARGET_TRACE=m
156CONFIG_NETFILTER_XT_TARGET_SECMARK=m
157CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
158CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
159CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
160CONFIG_NETFILTER_XT_MATCH_BPF=m
161CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
162CONFIG_NETFILTER_XT_MATCH_COMMENT=m
163CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
164CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
165CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
166CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
167CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
168CONFIG_NETFILTER_XT_MATCH_CPU=m
169CONFIG_NETFILTER_XT_MATCH_DCCP=m
170CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
171CONFIG_NETFILTER_XT_MATCH_DSCP=m
172CONFIG_NETFILTER_XT_MATCH_ESP=m
173CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
174CONFIG_NETFILTER_XT_MATCH_HELPER=m
175CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
176CONFIG_NETFILTER_XT_MATCH_IPVS=m
177CONFIG_NETFILTER_XT_MATCH_LENGTH=m
178CONFIG_NETFILTER_XT_MATCH_LIMIT=m
179CONFIG_NETFILTER_XT_MATCH_MAC=m
180CONFIG_NETFILTER_XT_MATCH_MARK=m
181CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
182CONFIG_NETFILTER_XT_MATCH_NFACCT=m
183CONFIG_NETFILTER_XT_MATCH_OSF=m
184CONFIG_NETFILTER_XT_MATCH_OWNER=m
185CONFIG_NETFILTER_XT_MATCH_POLICY=m
186CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
187CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
188CONFIG_NETFILTER_XT_MATCH_QUOTA=m
189CONFIG_NETFILTER_XT_MATCH_RATEEST=m
190CONFIG_NETFILTER_XT_MATCH_REALM=m
191CONFIG_NETFILTER_XT_MATCH_RECENT=m
192CONFIG_NETFILTER_XT_MATCH_SOCKET=m
193CONFIG_NETFILTER_XT_MATCH_STATE=m
194CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
195CONFIG_NETFILTER_XT_MATCH_STRING=m
196CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
197CONFIG_NETFILTER_XT_MATCH_TIME=m
198CONFIG_NETFILTER_XT_MATCH_U32=m
199CONFIG_IP_SET=m
200CONFIG_IP_SET_BITMAP_IP=m
201CONFIG_IP_SET_BITMAP_IPMAC=m
202CONFIG_IP_SET_BITMAP_PORT=m
203CONFIG_IP_SET_HASH_IP=m
204CONFIG_IP_SET_HASH_IPPORT=m
205CONFIG_IP_SET_HASH_IPPORTIP=m
206CONFIG_IP_SET_HASH_IPPORTNET=m
207CONFIG_IP_SET_HASH_NET=m
208CONFIG_IP_SET_HASH_NETPORT=m
209CONFIG_IP_SET_HASH_NETIFACE=m
210CONFIG_IP_SET_LIST_SET=m
211CONFIG_IP_VS=m
212CONFIG_IP_VS_PROTO_TCP=y
213CONFIG_IP_VS_PROTO_UDP=y
214CONFIG_IP_VS_PROTO_ESP=y
215CONFIG_IP_VS_PROTO_AH=y
216CONFIG_IP_VS_RR=m
217CONFIG_IP_VS_WRR=m
218CONFIG_IP_VS_LC=m
219CONFIG_IP_VS_WLC=m
220CONFIG_IP_VS_LBLC=m
221CONFIG_IP_VS_LBLCR=m
222CONFIG_IP_VS_DH=m
223CONFIG_IP_VS_SH=m
224CONFIG_IP_VS_SED=m
225CONFIG_IP_VS_NQ=m
226CONFIG_IP_VS_FTP=m
227CONFIG_IP_VS_PE_SIP=m
228CONFIG_NF_CONNTRACK_IPV4=m
229# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
230CONFIG_IP_NF_IPTABLES=m
231CONFIG_IP_NF_MATCH_AH=m
232CONFIG_IP_NF_MATCH_ECN=m
233CONFIG_IP_NF_MATCH_RPFILTER=m
234CONFIG_IP_NF_MATCH_TTL=m
235CONFIG_IP_NF_FILTER=m
236CONFIG_IP_NF_TARGET_REJECT=m
237CONFIG_IP_NF_TARGET_ULOG=m
238CONFIG_NF_NAT_IPV4=m
239CONFIG_IP_NF_TARGET_MASQUERADE=m
240CONFIG_IP_NF_TARGET_NETMAP=m
241CONFIG_IP_NF_TARGET_REDIRECT=m
242CONFIG_IP_NF_MANGLE=m
243CONFIG_IP_NF_TARGET_CLUSTERIP=m
244CONFIG_IP_NF_TARGET_ECN=m
245CONFIG_IP_NF_TARGET_TTL=m
246CONFIG_IP_NF_RAW=m
247CONFIG_IP_NF_SECURITY=m
248CONFIG_IP_NF_ARPTABLES=m
249CONFIG_IP_NF_ARPFILTER=m
250CONFIG_IP_NF_ARP_MANGLE=m
251CONFIG_NF_CONNTRACK_IPV6=m
252CONFIG_IP6_NF_IPTABLES=m
253CONFIG_IP6_NF_MATCH_AH=m
254CONFIG_IP6_NF_MATCH_EUI64=m
255CONFIG_IP6_NF_MATCH_FRAG=m
256CONFIG_IP6_NF_MATCH_OPTS=m
257CONFIG_IP6_NF_MATCH_HL=m
258CONFIG_IP6_NF_MATCH_IPV6HEADER=m
259CONFIG_IP6_NF_MATCH_MH=m
260CONFIG_IP6_NF_MATCH_RPFILTER=m
261CONFIG_IP6_NF_MATCH_RT=m
262CONFIG_IP6_NF_TARGET_HL=m
263CONFIG_IP6_NF_FILTER=m
264CONFIG_IP6_NF_TARGET_REJECT=m
265CONFIG_IP6_NF_MANGLE=m
266CONFIG_IP6_NF_RAW=m
267CONFIG_IP6_NF_SECURITY=m
268CONFIG_NF_NAT_IPV6=m
269CONFIG_IP6_NF_TARGET_MASQUERADE=m
270CONFIG_IP6_NF_TARGET_NPT=m
271CONFIG_NET_SCTPPROBE=m
272CONFIG_RDS=m
273CONFIG_RDS_RDMA=m
274CONFIG_RDS_TCP=m
275CONFIG_RDS_DEBUG=y
276CONFIG_L2TP=m
277CONFIG_L2TP_DEBUGFS=m
278CONFIG_L2TP_V3=y
279CONFIG_L2TP_IP=m
280CONFIG_L2TP_ETH=m
281CONFIG_BRIDGE=m
282CONFIG_VLAN_8021Q=m
283CONFIG_VLAN_8021Q_GVRP=y
284CONFIG_NET_SCHED=y
285CONFIG_NET_SCH_CBQ=m
286CONFIG_NET_SCH_HTB=m
287CONFIG_NET_SCH_HFSC=m
288CONFIG_NET_SCH_PRIO=m
289CONFIG_NET_SCH_MULTIQ=m
290CONFIG_NET_SCH_RED=m
291CONFIG_NET_SCH_SFB=m
292CONFIG_NET_SCH_SFQ=m
293CONFIG_NET_SCH_TEQL=m
294CONFIG_NET_SCH_TBF=m
295CONFIG_NET_SCH_GRED=m
296CONFIG_NET_SCH_DSMARK=m
297CONFIG_NET_SCH_NETEM=m
298CONFIG_NET_SCH_DRR=m
299CONFIG_NET_SCH_MQPRIO=m
300CONFIG_NET_SCH_CHOKE=m
301CONFIG_NET_SCH_QFQ=m
302CONFIG_NET_SCH_CODEL=m
303CONFIG_NET_SCH_FQ_CODEL=m
304CONFIG_NET_SCH_INGRESS=m
305CONFIG_NET_SCH_PLUG=m
306CONFIG_NET_CLS_BASIC=m
307CONFIG_NET_CLS_TCINDEX=m
308CONFIG_NET_CLS_ROUTE4=m
309CONFIG_NET_CLS_FW=m
310CONFIG_NET_CLS_U32=m
311CONFIG_CLS_U32_PERF=y
312CONFIG_CLS_U32_MARK=y
313CONFIG_NET_CLS_RSVP=m
314CONFIG_NET_CLS_RSVP6=m
315CONFIG_NET_CLS_FLOW=m
316CONFIG_NET_CLS_CGROUP=y
317CONFIG_NET_CLS_ACT=y
318CONFIG_NET_ACT_POLICE=m
319CONFIG_NET_ACT_GACT=m
320CONFIG_GACT_PROB=y
321CONFIG_NET_ACT_MIRRED=m
322CONFIG_NET_ACT_IPT=m
323CONFIG_NET_ACT_NAT=m
324CONFIG_NET_ACT_PEDIT=m
325CONFIG_NET_ACT_SIMP=m
326CONFIG_NET_ACT_SKBEDIT=m
327CONFIG_NET_ACT_CSUM=m
328CONFIG_DNS_RESOLVER=y
329CONFIG_BPF_JIT=y
330CONFIG_NET_PKTGEN=m
331CONFIG_NET_TCPPROBE=m
332CONFIG_DEVTMPFS=y
333CONFIG_CONNECTOR=y
334CONFIG_BLK_DEV_LOOP=m
335CONFIG_BLK_DEV_CRYPTOLOOP=m
336CONFIG_BLK_DEV_NBD=m
337CONFIG_BLK_DEV_OSD=m
338CONFIG_BLK_DEV_RAM=y
339CONFIG_BLK_DEV_RAM_SIZE=32768
340CONFIG_BLK_DEV_XIP=y
341CONFIG_CDROM_PKTCDVD=m
342CONFIG_ATA_OVER_ETH=m
343CONFIG_VIRTIO_BLK=y
344CONFIG_ENCLOSURE_SERVICES=m
345CONFIG_RAID_ATTRS=m
346CONFIG_SCSI=y
347CONFIG_SCSI_TGT=m
348CONFIG_BLK_DEV_SD=y
349CONFIG_CHR_DEV_ST=m
350CONFIG_CHR_DEV_OSST=m
351CONFIG_BLK_DEV_SR=m
352CONFIG_CHR_DEV_SG=y
353CONFIG_CHR_DEV_SCH=m
354CONFIG_SCSI_ENCLOSURE=m
355CONFIG_SCSI_MULTI_LUN=y
356CONFIG_SCSI_CONSTANTS=y
357CONFIG_SCSI_LOGGING=y
358CONFIG_SCSI_SPI_ATTRS=m
359CONFIG_SCSI_SAS_LIBSAS=m
360CONFIG_SCSI_SRP_ATTRS=m
361CONFIG_SCSI_SRP_TGT_ATTRS=y
362CONFIG_ISCSI_TCP=m
363CONFIG_LIBFCOE=m
364CONFIG_SCSI_DEBUG=m
365CONFIG_ZFCP=y
366CONFIG_SCSI_VIRTIO=m
367CONFIG_SCSI_DH=m
368CONFIG_SCSI_DH_RDAC=m
369CONFIG_SCSI_DH_HP_SW=m
370CONFIG_SCSI_DH_EMC=m
371CONFIG_SCSI_DH_ALUA=m
372CONFIG_SCSI_OSD_INITIATOR=m
373CONFIG_SCSI_OSD_ULD=m
374CONFIG_MD=y
375CONFIG_BLK_DEV_MD=y
376CONFIG_MD_LINEAR=m
377CONFIG_MD_RAID0=m
378CONFIG_MD_MULTIPATH=m
379CONFIG_MD_FAULTY=m
380CONFIG_BLK_DEV_DM=m
381CONFIG_DM_CRYPT=m
382CONFIG_DM_SNAPSHOT=m
383CONFIG_DM_MIRROR=m
384CONFIG_DM_RAID=m
385CONFIG_DM_LOG_USERSPACE=m
386CONFIG_DM_ZERO=m
387CONFIG_DM_MULTIPATH=m
388CONFIG_DM_MULTIPATH_QL=m
389CONFIG_DM_MULTIPATH_ST=m
390CONFIG_DM_DELAY=m
391CONFIG_DM_UEVENT=y
392CONFIG_DM_FLAKEY=m
393CONFIG_DM_VERITY=m
394CONFIG_DM_SWITCH=m
395CONFIG_NETDEVICES=y
396CONFIG_BONDING=m
397CONFIG_DUMMY=m
398CONFIG_EQUALIZER=m
399CONFIG_IFB=m
400CONFIG_MACVLAN=m
401CONFIG_MACVTAP=m
402CONFIG_VXLAN=m
403CONFIG_TUN=m
404CONFIG_VETH=m
405CONFIG_VIRTIO_NET=m
406CONFIG_NLMON=m
407CONFIG_VHOST_NET=m
408# CONFIG_NET_VENDOR_ARC is not set
409# CONFIG_NET_CADENCE is not set
410# CONFIG_NET_VENDOR_CHELSIO is not set
411# CONFIG_NET_VENDOR_INTEL is not set
412# CONFIG_NET_VENDOR_MARVELL is not set
413CONFIG_MLX4_EN=m
414# CONFIG_NET_VENDOR_NATSEMI is not set
415CONFIG_PPP=m
416CONFIG_PPP_BSDCOMP=m
417CONFIG_PPP_DEFLATE=m
418CONFIG_PPP_MPPE=m
419CONFIG_PPPOE=m
420CONFIG_PPTP=m
421CONFIG_PPPOL2TP=m
422CONFIG_PPP_ASYNC=m
423CONFIG_PPP_SYNC_TTY=m
424# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
425# CONFIG_INPUT_KEYBOARD is not set
426# CONFIG_INPUT_MOUSE is not set
427# CONFIG_SERIO is not set
428CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
429CONFIG_LEGACY_PTY_COUNT=0
430CONFIG_HW_RANDOM_VIRTIO=m
431CONFIG_RAW_DRIVER=m
432CONFIG_HANGCHECK_TIMER=m
433CONFIG_TN3270_FS=y
434CONFIG_WATCHDOG=y
435CONFIG_WATCHDOG_NOWAYOUT=y
436CONFIG_SOFT_WATCHDOG=m
437CONFIG_ZVM_WATCHDOG=m
438# CONFIG_HID is not set
439# CONFIG_USB_SUPPORT is not set
440CONFIG_INFINIBAND=m
441CONFIG_INFINIBAND_USER_ACCESS=m
442CONFIG_MLX4_INFINIBAND=m
443CONFIG_VIRTIO_BALLOON=m
444# CONFIG_IOMMU_SUPPORT is not set
445CONFIG_EXT2_FS=y
446CONFIG_EXT2_FS_XATTR=y
447CONFIG_EXT2_FS_POSIX_ACL=y
448CONFIG_EXT2_FS_SECURITY=y
449CONFIG_EXT2_FS_XIP=y
450CONFIG_EXT3_FS=y
451# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
452CONFIG_EXT3_FS_POSIX_ACL=y
453CONFIG_EXT3_FS_SECURITY=y
454CONFIG_EXT4_FS=y
455CONFIG_EXT4_FS_POSIX_ACL=y
456CONFIG_EXT4_FS_SECURITY=y
457CONFIG_JBD_DEBUG=y
458CONFIG_JBD2_DEBUG=y
459CONFIG_JFS_FS=m
460CONFIG_JFS_POSIX_ACL=y
461CONFIG_JFS_SECURITY=y
462CONFIG_JFS_STATISTICS=y
463CONFIG_XFS_FS=m
464CONFIG_XFS_QUOTA=y
465CONFIG_XFS_POSIX_ACL=y
466CONFIG_XFS_RT=y
467CONFIG_XFS_DEBUG=y
468CONFIG_GFS2_FS=m
469CONFIG_OCFS2_FS=m
470CONFIG_BTRFS_FS=m
471CONFIG_BTRFS_FS_POSIX_ACL=y
472CONFIG_NILFS2_FS=m
473CONFIG_FANOTIFY=y
474CONFIG_QUOTA_NETLINK_INTERFACE=y
475CONFIG_QFMT_V1=m
476CONFIG_QFMT_V2=m
477CONFIG_AUTOFS4_FS=m
478CONFIG_FUSE_FS=m
479CONFIG_CUSE=m
480CONFIG_FSCACHE=m
481CONFIG_CACHEFILES=m
482CONFIG_ISO9660_FS=y
483CONFIG_JOLIET=y
484CONFIG_ZISOFS=y
485CONFIG_UDF_FS=m
486CONFIG_MSDOS_FS=m
487CONFIG_VFAT_FS=m
488CONFIG_NTFS_FS=m
489CONFIG_NTFS_RW=y
490CONFIG_PROC_KCORE=y
491CONFIG_TMPFS=y
492CONFIG_TMPFS_POSIX_ACL=y
493CONFIG_HUGETLBFS=y
494CONFIG_CONFIGFS_FS=m
495CONFIG_ECRYPT_FS=m
496CONFIG_CRAMFS=m
497CONFIG_SQUASHFS=m
498CONFIG_SQUASHFS_XATTR=y
499CONFIG_SQUASHFS_LZO=y
500CONFIG_SQUASHFS_XZ=y
501CONFIG_ROMFS_FS=m
502CONFIG_NFS_FS=m
503CONFIG_NFS_V3_ACL=y
504CONFIG_NFS_V4=m
505CONFIG_NFS_SWAP=y
506CONFIG_NFSD=m
507CONFIG_NFSD_V3_ACL=y
508CONFIG_NFSD_V4=y
509CONFIG_NFSD_V4_SECURITY_LABEL=y
510CONFIG_CIFS=m
511CONFIG_CIFS_STATS=y
512CONFIG_CIFS_STATS2=y
513CONFIG_CIFS_WEAK_PW_HASH=y
514CONFIG_CIFS_UPCALL=y
515CONFIG_CIFS_XATTR=y
516CONFIG_CIFS_POSIX=y
517# CONFIG_CIFS_DEBUG is not set
518CONFIG_CIFS_DFS_UPCALL=y
519CONFIG_NLS_DEFAULT="utf8"
520CONFIG_NLS_CODEPAGE_437=m
521CONFIG_NLS_CODEPAGE_850=m
522CONFIG_NLS_ASCII=m
523CONFIG_NLS_ISO8859_1=m
524CONFIG_NLS_ISO8859_15=m
525CONFIG_NLS_UTF8=m
526CONFIG_DLM=m
527CONFIG_PRINTK_TIME=y
528CONFIG_DYNAMIC_DEBUG=y
529CONFIG_DEBUG_INFO=y
530# CONFIG_ENABLE_MUST_CHECK is not set
531CONFIG_FRAME_WARN=1024
532CONFIG_READABLE_ASM=y
533CONFIG_UNUSED_SYMBOLS=y
534CONFIG_MAGIC_SYSRQ=y
535CONFIG_DEBUG_KERNEL=y
536CONFIG_DEBUG_PAGEALLOC=y
537CONFIG_SLUB_DEBUG_ON=y
538CONFIG_SLUB_STATS=y
539CONFIG_DEBUG_STACK_USAGE=y
540CONFIG_DEBUG_VM=y
541CONFIG_DEBUG_VM_RB=y
542CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
543CONFIG_DEBUG_PER_CPU_MAPS=y
544CONFIG_TIMER_STATS=y
545CONFIG_DEBUG_RT_MUTEXES=y
546CONFIG_RT_MUTEX_TESTER=y
547CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
548CONFIG_PROVE_LOCKING=y
549CONFIG_LOCK_STAT=y
550CONFIG_DEBUG_LOCKDEP=y
551CONFIG_DEBUG_ATOMIC_SLEEP=y
552CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
553CONFIG_DEBUG_WRITECOUNT=y
554CONFIG_DEBUG_LIST=y
555CONFIG_DEBUG_SG=y
556CONFIG_DEBUG_NOTIFIERS=y
557CONFIG_DEBUG_CREDENTIALS=y
558CONFIG_PROVE_RCU=y
559CONFIG_RCU_TORTURE_TEST=m
560CONFIG_RCU_CPU_STALL_TIMEOUT=300
561CONFIG_NOTIFIER_ERROR_INJECTION=m
562CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
563CONFIG_PM_NOTIFIER_ERROR_INJECT=m
564CONFIG_FAULT_INJECTION=y
565CONFIG_FAILSLAB=y
566CONFIG_FAIL_PAGE_ALLOC=y
567CONFIG_FAIL_MAKE_REQUEST=y
568CONFIG_FAIL_IO_TIMEOUT=y
569CONFIG_FAULT_INJECTION_DEBUG_FS=y
570CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
571CONFIG_LATENCYTOP=y
572CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
573CONFIG_BLK_DEV_IO_TRACE=y
574# CONFIG_KPROBE_EVENT is not set
575CONFIG_LKDTM=m
576CONFIG_KPROBES_SANITY_TEST=y
577CONFIG_RBTREE_TEST=m
578CONFIG_INTERVAL_TREE_TEST=m
579CONFIG_ATOMIC64_SELFTEST=y
580CONFIG_DMA_API_DEBUG=y
581# CONFIG_STRICT_DEVMEM is not set
582CONFIG_S390_PTDUMP=y
583CONFIG_ENCRYPTED_KEYS=m
584CONFIG_KEYS_DEBUG_PROC_KEYS=y
585CONFIG_SECURITY=y
586CONFIG_SECURITY_NETWORK=y
587CONFIG_SECURITY_SELINUX=y
588CONFIG_SECURITY_SELINUX_BOOTPARAM=y
589CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
590CONFIG_SECURITY_SELINUX_DISABLE=y
591CONFIG_IMA=y
592CONFIG_IMA_APPRAISE=y
593CONFIG_CRYPTO_USER=m
594# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
595CONFIG_CRYPTO_CRYPTD=m
596CONFIG_CRYPTO_TEST=m
597CONFIG_CRYPTO_CCM=m
598CONFIG_CRYPTO_GCM=m
599CONFIG_CRYPTO_CTS=m
600CONFIG_CRYPTO_LRW=m
601CONFIG_CRYPTO_PCBC=m
602CONFIG_CRYPTO_XTS=m
603CONFIG_CRYPTO_XCBC=m
604CONFIG_CRYPTO_VMAC=m
605CONFIG_CRYPTO_CRC32=m
606CONFIG_CRYPTO_MICHAEL_MIC=m
607CONFIG_CRYPTO_RMD128=m
608CONFIG_CRYPTO_RMD160=m
609CONFIG_CRYPTO_RMD256=m
610CONFIG_CRYPTO_RMD320=m
611CONFIG_CRYPTO_SHA512=m
612CONFIG_CRYPTO_TGR192=m
613CONFIG_CRYPTO_WP512=m
614CONFIG_CRYPTO_ANUBIS=m
615CONFIG_CRYPTO_BLOWFISH=m
616CONFIG_CRYPTO_CAMELLIA=m
617CONFIG_CRYPTO_CAST5=m
618CONFIG_CRYPTO_CAST6=m
619CONFIG_CRYPTO_FCRYPT=m
620CONFIG_CRYPTO_KHAZAD=m
621CONFIG_CRYPTO_SALSA20=m
622CONFIG_CRYPTO_SEED=m
623CONFIG_CRYPTO_SERPENT=m
624CONFIG_CRYPTO_TEA=m
625CONFIG_CRYPTO_TWOFISH=m
626CONFIG_CRYPTO_ZLIB=y
627CONFIG_CRYPTO_LZO=m
628CONFIG_CRYPTO_LZ4=m
629CONFIG_CRYPTO_LZ4HC=m
630CONFIG_CRYPTO_USER_API_HASH=m
631CONFIG_CRYPTO_USER_API_SKCIPHER=m
632CONFIG_ZCRYPT=m
633CONFIG_CRYPTO_SHA1_S390=m
634CONFIG_CRYPTO_SHA256_S390=m
635CONFIG_CRYPTO_SHA512_S390=m
636CONFIG_CRYPTO_DES_S390=m
637CONFIG_CRYPTO_AES_S390=m
638CONFIG_CRYPTO_GHASH_S390=m
639CONFIG_ASYMMETRIC_KEY_TYPE=m
640CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
641CONFIG_PUBLIC_KEY_ALGO_RSA=m
642CONFIG_X509_CERTIFICATE_PARSER=m
643CONFIG_CRC7=m
644CONFIG_CRC8=m
645CONFIG_XZ_DEC_X86=y
646CONFIG_XZ_DEC_POWERPC=y
647CONFIG_XZ_DEC_IA64=y
648CONFIG_XZ_DEC_ARM=y
649CONFIG_XZ_DEC_ARMTHUMB=y
650CONFIG_XZ_DEC_SPARC=y
651CONFIG_CORDIC=m
652CONFIG_CMM=m
653CONFIG_APPLDATA_BASE=y
654CONFIG_KVM=m
655CONFIG_KVM_S390_UCONTROL=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
new file mode 100644
index 000000000000..b9f6b4cab927
--- /dev/null
+++ b/arch/s390/configs/gcov_defconfig
@@ -0,0 +1,618 @@
1CONFIG_SYSVIPC=y
2CONFIG_POSIX_MQUEUE=y
3CONFIG_FHANDLE=y
4CONFIG_AUDIT=y
5CONFIG_NO_HZ=y
6CONFIG_HIGH_RES_TIMERS=y
7CONFIG_BSD_PROCESS_ACCT=y
8CONFIG_BSD_PROCESS_ACCT_V3=y
9CONFIG_TASKSTATS=y
10CONFIG_TASK_DELAY_ACCT=y
11CONFIG_TASK_XACCT=y
12CONFIG_TASK_IO_ACCOUNTING=y
13CONFIG_RCU_FAST_NO_HZ=y
14CONFIG_IKCONFIG=y
15CONFIG_IKCONFIG_PROC=y
16CONFIG_CGROUP_FREEZER=y
17CONFIG_CGROUP_DEVICE=y
18CONFIG_CPUSETS=y
19CONFIG_CGROUP_CPUACCT=y
20CONFIG_RESOURCE_COUNTERS=y
21CONFIG_CGROUP_PERF=y
22CONFIG_BLK_CGROUP=y
23CONFIG_SCHED_AUTOGROUP=y
24CONFIG_BLK_DEV_INITRD=y
25# CONFIG_COMPAT_BRK is not set
26CONFIG_PROFILING=y
27CONFIG_OPROFILE=m
28CONFIG_KPROBES=y
29CONFIG_JUMP_LABEL=y
30CONFIG_GCOV_KERNEL=y
31CONFIG_GCOV_PROFILE_ALL=y
32CONFIG_MODULES=y
33CONFIG_MODULE_FORCE_LOAD=y
34CONFIG_MODULE_UNLOAD=y
35CONFIG_MODULE_FORCE_UNLOAD=y
36CONFIG_MODVERSIONS=y
37CONFIG_MODULE_SRCVERSION_ALL=y
38CONFIG_BLK_DEV_INTEGRITY=y
39CONFIG_BLK_DEV_THROTTLING=y
40CONFIG_PARTITION_ADVANCED=y
41CONFIG_IBM_PARTITION=y
42CONFIG_BSD_DISKLABEL=y
43CONFIG_MINIX_SUBPARTITION=y
44CONFIG_SOLARIS_X86_PARTITION=y
45CONFIG_UNIXWARE_DISKLABEL=y
46CONFIG_CFQ_GROUP_IOSCHED=y
47CONFIG_DEFAULT_DEADLINE=y
48CONFIG_MARCH_Z9_109=y
49CONFIG_HZ_100=y
50CONFIG_MEMORY_HOTPLUG=y
51CONFIG_MEMORY_HOTREMOVE=y
52CONFIG_KSM=y
53CONFIG_TRANSPARENT_HUGEPAGE=y
54CONFIG_PCI=y
55CONFIG_HOTPLUG_PCI=y
56CONFIG_HOTPLUG_PCI_S390=y
57CONFIG_CHSC_SCH=y
58CONFIG_CRASH_DUMP=y
59CONFIG_ZFCPDUMP=y
60# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
61CONFIG_BINFMT_MISC=m
62CONFIG_HIBERNATION=y
63CONFIG_PACKET=y
64CONFIG_PACKET_DIAG=m
65CONFIG_UNIX=y
66CONFIG_UNIX_DIAG=m
67CONFIG_XFRM_USER=m
68CONFIG_NET_KEY=m
69CONFIG_INET=y
70CONFIG_IP_MULTICAST=y
71CONFIG_IP_ADVANCED_ROUTER=y
72CONFIG_IP_MULTIPLE_TABLES=y
73CONFIG_IP_ROUTE_MULTIPATH=y
74CONFIG_IP_ROUTE_VERBOSE=y
75CONFIG_NET_IPIP=m
76CONFIG_NET_IPGRE_DEMUX=m
77CONFIG_NET_IPGRE=m
78CONFIG_NET_IPGRE_BROADCAST=y
79CONFIG_IP_MROUTE=y
80CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
81CONFIG_IP_PIMSM_V1=y
82CONFIG_IP_PIMSM_V2=y
83CONFIG_SYN_COOKIES=y
84CONFIG_NET_IPVTI=m
85CONFIG_INET_AH=m
86CONFIG_INET_ESP=m
87CONFIG_INET_IPCOMP=m
88CONFIG_INET_XFRM_MODE_TRANSPORT=m
89CONFIG_INET_XFRM_MODE_TUNNEL=m
90CONFIG_INET_XFRM_MODE_BEET=m
91CONFIG_INET_DIAG=m
92CONFIG_INET_UDP_DIAG=m
93CONFIG_TCP_CONG_ADVANCED=y
94CONFIG_TCP_CONG_HSTCP=m
95CONFIG_TCP_CONG_HYBLA=m
96CONFIG_TCP_CONG_SCALABLE=m
97CONFIG_TCP_CONG_LP=m
98CONFIG_TCP_CONG_VENO=m
99CONFIG_TCP_CONG_YEAH=m
100CONFIG_TCP_CONG_ILLINOIS=m
101CONFIG_IPV6=y
102CONFIG_IPV6_PRIVACY=y
103CONFIG_IPV6_ROUTER_PREF=y
104CONFIG_INET6_AH=m
105CONFIG_INET6_ESP=m
106CONFIG_INET6_IPCOMP=m
107CONFIG_IPV6_MIP6=m
108CONFIG_INET6_XFRM_MODE_TRANSPORT=m
109CONFIG_INET6_XFRM_MODE_TUNNEL=m
110CONFIG_INET6_XFRM_MODE_BEET=m
111CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
112CONFIG_IPV6_SIT=m
113CONFIG_IPV6_GRE=m
114CONFIG_IPV6_MULTIPLE_TABLES=y
115CONFIG_IPV6_SUBTREES=y
116CONFIG_NETFILTER=y
117CONFIG_NF_CONNTRACK=m
118CONFIG_NF_CONNTRACK_SECMARK=y
119CONFIG_NF_CONNTRACK_EVENTS=y
120CONFIG_NF_CONNTRACK_TIMEOUT=y
121CONFIG_NF_CONNTRACK_TIMESTAMP=y
122CONFIG_NF_CT_PROTO_DCCP=m
123CONFIG_NF_CT_PROTO_UDPLITE=m
124CONFIG_NF_CONNTRACK_AMANDA=m
125CONFIG_NF_CONNTRACK_FTP=m
126CONFIG_NF_CONNTRACK_H323=m
127CONFIG_NF_CONNTRACK_IRC=m
128CONFIG_NF_CONNTRACK_NETBIOS_NS=m
129CONFIG_NF_CONNTRACK_SNMP=m
130CONFIG_NF_CONNTRACK_PPTP=m
131CONFIG_NF_CONNTRACK_SANE=m
132CONFIG_NF_CONNTRACK_SIP=m
133CONFIG_NF_CONNTRACK_TFTP=m
134CONFIG_NF_CT_NETLINK=m
135CONFIG_NF_CT_NETLINK_TIMEOUT=m
136CONFIG_NETFILTER_TPROXY=m
137CONFIG_NETFILTER_XT_SET=m
138CONFIG_NETFILTER_XT_TARGET_AUDIT=m
139CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
140CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
141CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
142CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
143CONFIG_NETFILTER_XT_TARGET_CT=m
144CONFIG_NETFILTER_XT_TARGET_DSCP=m
145CONFIG_NETFILTER_XT_TARGET_HMARK=m
146CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
147CONFIG_NETFILTER_XT_TARGET_LOG=m
148CONFIG_NETFILTER_XT_TARGET_MARK=m
149CONFIG_NETFILTER_XT_TARGET_NFLOG=m
150CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
151CONFIG_NETFILTER_XT_TARGET_TEE=m
152CONFIG_NETFILTER_XT_TARGET_TPROXY=m
153CONFIG_NETFILTER_XT_TARGET_TRACE=m
154CONFIG_NETFILTER_XT_TARGET_SECMARK=m
155CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
156CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
157CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
158CONFIG_NETFILTER_XT_MATCH_BPF=m
159CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
160CONFIG_NETFILTER_XT_MATCH_COMMENT=m
161CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
162CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
163CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
164CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
165CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
166CONFIG_NETFILTER_XT_MATCH_CPU=m
167CONFIG_NETFILTER_XT_MATCH_DCCP=m
168CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
169CONFIG_NETFILTER_XT_MATCH_DSCP=m
170CONFIG_NETFILTER_XT_MATCH_ESP=m
171CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
172CONFIG_NETFILTER_XT_MATCH_HELPER=m
173CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
174CONFIG_NETFILTER_XT_MATCH_IPVS=m
175CONFIG_NETFILTER_XT_MATCH_LENGTH=m
176CONFIG_NETFILTER_XT_MATCH_LIMIT=m
177CONFIG_NETFILTER_XT_MATCH_MAC=m
178CONFIG_NETFILTER_XT_MATCH_MARK=m
179CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
180CONFIG_NETFILTER_XT_MATCH_NFACCT=m
181CONFIG_NETFILTER_XT_MATCH_OSF=m
182CONFIG_NETFILTER_XT_MATCH_OWNER=m
183CONFIG_NETFILTER_XT_MATCH_POLICY=m
184CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
185CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
186CONFIG_NETFILTER_XT_MATCH_QUOTA=m
187CONFIG_NETFILTER_XT_MATCH_RATEEST=m
188CONFIG_NETFILTER_XT_MATCH_REALM=m
189CONFIG_NETFILTER_XT_MATCH_RECENT=m
190CONFIG_NETFILTER_XT_MATCH_SOCKET=m
191CONFIG_NETFILTER_XT_MATCH_STATE=m
192CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
193CONFIG_NETFILTER_XT_MATCH_STRING=m
194CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
195CONFIG_NETFILTER_XT_MATCH_TIME=m
196CONFIG_NETFILTER_XT_MATCH_U32=m
197CONFIG_IP_SET=m
198CONFIG_IP_SET_BITMAP_IP=m
199CONFIG_IP_SET_BITMAP_IPMAC=m
200CONFIG_IP_SET_BITMAP_PORT=m
201CONFIG_IP_SET_HASH_IP=m
202CONFIG_IP_SET_HASH_IPPORT=m
203CONFIG_IP_SET_HASH_IPPORTIP=m
204CONFIG_IP_SET_HASH_IPPORTNET=m
205CONFIG_IP_SET_HASH_NET=m
206CONFIG_IP_SET_HASH_NETPORT=m
207CONFIG_IP_SET_HASH_NETIFACE=m
208CONFIG_IP_SET_LIST_SET=m
209CONFIG_IP_VS=m
210CONFIG_IP_VS_PROTO_TCP=y
211CONFIG_IP_VS_PROTO_UDP=y
212CONFIG_IP_VS_PROTO_ESP=y
213CONFIG_IP_VS_PROTO_AH=y
214CONFIG_IP_VS_RR=m
215CONFIG_IP_VS_WRR=m
216CONFIG_IP_VS_LC=m
217CONFIG_IP_VS_WLC=m
218CONFIG_IP_VS_LBLC=m
219CONFIG_IP_VS_LBLCR=m
220CONFIG_IP_VS_DH=m
221CONFIG_IP_VS_SH=m
222CONFIG_IP_VS_SED=m
223CONFIG_IP_VS_NQ=m
224CONFIG_IP_VS_FTP=m
225CONFIG_IP_VS_PE_SIP=m
226CONFIG_NF_CONNTRACK_IPV4=m
227# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
228CONFIG_IP_NF_IPTABLES=m
229CONFIG_IP_NF_MATCH_AH=m
230CONFIG_IP_NF_MATCH_ECN=m
231CONFIG_IP_NF_MATCH_RPFILTER=m
232CONFIG_IP_NF_MATCH_TTL=m
233CONFIG_IP_NF_FILTER=m
234CONFIG_IP_NF_TARGET_REJECT=m
235CONFIG_IP_NF_TARGET_ULOG=m
236CONFIG_NF_NAT_IPV4=m
237CONFIG_IP_NF_TARGET_MASQUERADE=m
238CONFIG_IP_NF_TARGET_NETMAP=m
239CONFIG_IP_NF_TARGET_REDIRECT=m
240CONFIG_IP_NF_MANGLE=m
241CONFIG_IP_NF_TARGET_CLUSTERIP=m
242CONFIG_IP_NF_TARGET_ECN=m
243CONFIG_IP_NF_TARGET_TTL=m
244CONFIG_IP_NF_RAW=m
245CONFIG_IP_NF_SECURITY=m
246CONFIG_IP_NF_ARPTABLES=m
247CONFIG_IP_NF_ARPFILTER=m
248CONFIG_IP_NF_ARP_MANGLE=m
249CONFIG_NF_CONNTRACK_IPV6=m
250CONFIG_IP6_NF_IPTABLES=m
251CONFIG_IP6_NF_MATCH_AH=m
252CONFIG_IP6_NF_MATCH_EUI64=m
253CONFIG_IP6_NF_MATCH_FRAG=m
254CONFIG_IP6_NF_MATCH_OPTS=m
255CONFIG_IP6_NF_MATCH_HL=m
256CONFIG_IP6_NF_MATCH_IPV6HEADER=m
257CONFIG_IP6_NF_MATCH_MH=m
258CONFIG_IP6_NF_MATCH_RPFILTER=m
259CONFIG_IP6_NF_MATCH_RT=m
260CONFIG_IP6_NF_TARGET_HL=m
261CONFIG_IP6_NF_FILTER=m
262CONFIG_IP6_NF_TARGET_REJECT=m
263CONFIG_IP6_NF_MANGLE=m
264CONFIG_IP6_NF_RAW=m
265CONFIG_IP6_NF_SECURITY=m
266CONFIG_NF_NAT_IPV6=m
267CONFIG_IP6_NF_TARGET_MASQUERADE=m
268CONFIG_IP6_NF_TARGET_NPT=m
269CONFIG_NET_SCTPPROBE=m
270CONFIG_RDS=m
271CONFIG_RDS_RDMA=m
272CONFIG_RDS_TCP=m
273CONFIG_L2TP=m
274CONFIG_L2TP_DEBUGFS=m
275CONFIG_L2TP_V3=y
276CONFIG_L2TP_IP=m
277CONFIG_L2TP_ETH=m
278CONFIG_BRIDGE=m
279CONFIG_VLAN_8021Q=m
280CONFIG_VLAN_8021Q_GVRP=y
281CONFIG_NET_SCHED=y
282CONFIG_NET_SCH_CBQ=m
283CONFIG_NET_SCH_HTB=m
284CONFIG_NET_SCH_HFSC=m
285CONFIG_NET_SCH_PRIO=m
286CONFIG_NET_SCH_MULTIQ=m
287CONFIG_NET_SCH_RED=m
288CONFIG_NET_SCH_SFB=m
289CONFIG_NET_SCH_SFQ=m
290CONFIG_NET_SCH_TEQL=m
291CONFIG_NET_SCH_TBF=m
292CONFIG_NET_SCH_GRED=m
293CONFIG_NET_SCH_DSMARK=m
294CONFIG_NET_SCH_NETEM=m
295CONFIG_NET_SCH_DRR=m
296CONFIG_NET_SCH_MQPRIO=m
297CONFIG_NET_SCH_CHOKE=m
298CONFIG_NET_SCH_QFQ=m
299CONFIG_NET_SCH_CODEL=m
300CONFIG_NET_SCH_FQ_CODEL=m
301CONFIG_NET_SCH_INGRESS=m
302CONFIG_NET_SCH_PLUG=m
303CONFIG_NET_CLS_BASIC=m
304CONFIG_NET_CLS_TCINDEX=m
305CONFIG_NET_CLS_ROUTE4=m
306CONFIG_NET_CLS_FW=m
307CONFIG_NET_CLS_U32=m
308CONFIG_CLS_U32_PERF=y
309CONFIG_CLS_U32_MARK=y
310CONFIG_NET_CLS_RSVP=m
311CONFIG_NET_CLS_RSVP6=m
312CONFIG_NET_CLS_FLOW=m
313CONFIG_NET_CLS_CGROUP=y
314CONFIG_NET_CLS_ACT=y
315CONFIG_NET_ACT_POLICE=m
316CONFIG_NET_ACT_GACT=m
317CONFIG_GACT_PROB=y
318CONFIG_NET_ACT_MIRRED=m
319CONFIG_NET_ACT_IPT=m
320CONFIG_NET_ACT_NAT=m
321CONFIG_NET_ACT_PEDIT=m
322CONFIG_NET_ACT_SIMP=m
323CONFIG_NET_ACT_SKBEDIT=m
324CONFIG_NET_ACT_CSUM=m
325CONFIG_DNS_RESOLVER=y
326CONFIG_BPF_JIT=y
327CONFIG_NET_PKTGEN=m
328CONFIG_NET_TCPPROBE=m
329CONFIG_DEVTMPFS=y
330CONFIG_CONNECTOR=y
331CONFIG_BLK_DEV_LOOP=m
332CONFIG_BLK_DEV_CRYPTOLOOP=m
333CONFIG_BLK_DEV_NBD=m
334CONFIG_BLK_DEV_OSD=m
335CONFIG_BLK_DEV_RAM=y
336CONFIG_BLK_DEV_RAM_SIZE=32768
337CONFIG_BLK_DEV_XIP=y
338CONFIG_CDROM_PKTCDVD=m
339CONFIG_ATA_OVER_ETH=m
340CONFIG_VIRTIO_BLK=y
341CONFIG_ENCLOSURE_SERVICES=m
342CONFIG_RAID_ATTRS=m
343CONFIG_SCSI=y
344CONFIG_SCSI_TGT=m
345CONFIG_BLK_DEV_SD=y
346CONFIG_CHR_DEV_ST=m
347CONFIG_CHR_DEV_OSST=m
348CONFIG_BLK_DEV_SR=m
349CONFIG_CHR_DEV_SG=y
350CONFIG_CHR_DEV_SCH=m
351CONFIG_SCSI_ENCLOSURE=m
352CONFIG_SCSI_MULTI_LUN=y
353CONFIG_SCSI_CONSTANTS=y
354CONFIG_SCSI_LOGGING=y
355CONFIG_SCSI_SPI_ATTRS=m
356CONFIG_SCSI_SAS_LIBSAS=m
357CONFIG_SCSI_SRP_ATTRS=m
358CONFIG_SCSI_SRP_TGT_ATTRS=y
359CONFIG_ISCSI_TCP=m
360CONFIG_LIBFCOE=m
361CONFIG_SCSI_DEBUG=m
362CONFIG_ZFCP=y
363CONFIG_SCSI_VIRTIO=m
364CONFIG_SCSI_DH=m
365CONFIG_SCSI_DH_RDAC=m
366CONFIG_SCSI_DH_HP_SW=m
367CONFIG_SCSI_DH_EMC=m
368CONFIG_SCSI_DH_ALUA=m
369CONFIG_SCSI_OSD_INITIATOR=m
370CONFIG_SCSI_OSD_ULD=m
371CONFIG_MD=y
372CONFIG_BLK_DEV_MD=y
373CONFIG_MD_LINEAR=m
374CONFIG_MD_RAID0=m
375CONFIG_MD_MULTIPATH=m
376CONFIG_MD_FAULTY=m
377CONFIG_BLK_DEV_DM=m
378CONFIG_DM_CRYPT=m
379CONFIG_DM_SNAPSHOT=m
380CONFIG_DM_MIRROR=m
381CONFIG_DM_RAID=m
382CONFIG_DM_LOG_USERSPACE=m
383CONFIG_DM_ZERO=m
384CONFIG_DM_MULTIPATH=m
385CONFIG_DM_MULTIPATH_QL=m
386CONFIG_DM_MULTIPATH_ST=m
387CONFIG_DM_DELAY=m
388CONFIG_DM_UEVENT=y
389CONFIG_DM_FLAKEY=m
390CONFIG_DM_VERITY=m
391CONFIG_DM_SWITCH=m
392CONFIG_NETDEVICES=y
393CONFIG_BONDING=m
394CONFIG_DUMMY=m
395CONFIG_EQUALIZER=m
396CONFIG_IFB=m
397CONFIG_MACVLAN=m
398CONFIG_MACVTAP=m
399CONFIG_VXLAN=m
400CONFIG_TUN=m
401CONFIG_VETH=m
402CONFIG_VIRTIO_NET=m
403CONFIG_NLMON=m
404CONFIG_VHOST_NET=m
405# CONFIG_NET_VENDOR_ARC is not set
406# CONFIG_NET_CADENCE is not set
407# CONFIG_NET_VENDOR_CHELSIO is not set
408# CONFIG_NET_VENDOR_INTEL is not set
409# CONFIG_NET_VENDOR_MARVELL is not set
410CONFIG_MLX4_EN=m
411# CONFIG_NET_VENDOR_NATSEMI is not set
412CONFIG_PPP=m
413CONFIG_PPP_BSDCOMP=m
414CONFIG_PPP_DEFLATE=m
415CONFIG_PPP_MPPE=m
416CONFIG_PPPOE=m
417CONFIG_PPTP=m
418CONFIG_PPPOL2TP=m
419CONFIG_PPP_ASYNC=m
420CONFIG_PPP_SYNC_TTY=m
421# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
422# CONFIG_INPUT_KEYBOARD is not set
423# CONFIG_INPUT_MOUSE is not set
424# CONFIG_SERIO is not set
425CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
426CONFIG_LEGACY_PTY_COUNT=0
427CONFIG_HW_RANDOM_VIRTIO=m
428CONFIG_RAW_DRIVER=m
429CONFIG_HANGCHECK_TIMER=m
430CONFIG_TN3270_FS=y
431CONFIG_WATCHDOG=y
432CONFIG_WATCHDOG_NOWAYOUT=y
433CONFIG_SOFT_WATCHDOG=m
434CONFIG_ZVM_WATCHDOG=m
435# CONFIG_HID is not set
436# CONFIG_USB_SUPPORT is not set
437CONFIG_INFINIBAND=m
438CONFIG_INFINIBAND_USER_ACCESS=m
439CONFIG_MLX4_INFINIBAND=m
440CONFIG_VIRTIO_BALLOON=m
441# CONFIG_IOMMU_SUPPORT is not set
442CONFIG_EXT2_FS=y
443CONFIG_EXT2_FS_XATTR=y
444CONFIG_EXT2_FS_POSIX_ACL=y
445CONFIG_EXT2_FS_SECURITY=y
446CONFIG_EXT2_FS_XIP=y
447CONFIG_EXT3_FS=y
448# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
449CONFIG_EXT3_FS_POSIX_ACL=y
450CONFIG_EXT3_FS_SECURITY=y
451CONFIG_EXT4_FS=y
452CONFIG_EXT4_FS_POSIX_ACL=y
453CONFIG_EXT4_FS_SECURITY=y
454CONFIG_JBD_DEBUG=y
455CONFIG_JBD2_DEBUG=y
456CONFIG_JFS_FS=m
457CONFIG_JFS_POSIX_ACL=y
458CONFIG_JFS_SECURITY=y
459CONFIG_JFS_STATISTICS=y
460CONFIG_XFS_FS=m
461CONFIG_XFS_QUOTA=y
462CONFIG_XFS_POSIX_ACL=y
463CONFIG_XFS_RT=y
464CONFIG_GFS2_FS=m
465CONFIG_OCFS2_FS=m
466CONFIG_BTRFS_FS=m
467CONFIG_BTRFS_FS_POSIX_ACL=y
468CONFIG_NILFS2_FS=m
469CONFIG_FANOTIFY=y
470CONFIG_QUOTA_NETLINK_INTERFACE=y
471CONFIG_QFMT_V1=m
472CONFIG_QFMT_V2=m
473CONFIG_AUTOFS4_FS=m
474CONFIG_FUSE_FS=m
475CONFIG_CUSE=m
476CONFIG_FSCACHE=m
477CONFIG_CACHEFILES=m
478CONFIG_ISO9660_FS=y
479CONFIG_JOLIET=y
480CONFIG_ZISOFS=y
481CONFIG_UDF_FS=m
482CONFIG_MSDOS_FS=m
483CONFIG_VFAT_FS=m
484CONFIG_NTFS_FS=m
485CONFIG_NTFS_RW=y
486CONFIG_PROC_KCORE=y
487CONFIG_TMPFS=y
488CONFIG_TMPFS_POSIX_ACL=y
489CONFIG_HUGETLBFS=y
490CONFIG_CONFIGFS_FS=m
491CONFIG_ECRYPT_FS=m
492CONFIG_CRAMFS=m
493CONFIG_SQUASHFS=m
494CONFIG_SQUASHFS_XATTR=y
495CONFIG_SQUASHFS_LZO=y
496CONFIG_SQUASHFS_XZ=y
497CONFIG_ROMFS_FS=m
498CONFIG_NFS_FS=m
499CONFIG_NFS_V3_ACL=y
500CONFIG_NFS_V4=m
501CONFIG_NFS_SWAP=y
502CONFIG_NFSD=m
503CONFIG_NFSD_V3_ACL=y
504CONFIG_NFSD_V4=y
505CONFIG_NFSD_V4_SECURITY_LABEL=y
506CONFIG_CIFS=m
507CONFIG_CIFS_STATS=y
508CONFIG_CIFS_STATS2=y
509CONFIG_CIFS_WEAK_PW_HASH=y
510CONFIG_CIFS_UPCALL=y
511CONFIG_CIFS_XATTR=y
512CONFIG_CIFS_POSIX=y
513# CONFIG_CIFS_DEBUG is not set
514CONFIG_CIFS_DFS_UPCALL=y
515CONFIG_NLS_DEFAULT="utf8"
516CONFIG_NLS_CODEPAGE_437=m
517CONFIG_NLS_CODEPAGE_850=m
518CONFIG_NLS_ASCII=m
519CONFIG_NLS_ISO8859_1=m
520CONFIG_NLS_ISO8859_15=m
521CONFIG_NLS_UTF8=m
522CONFIG_DLM=m
523CONFIG_PRINTK_TIME=y
524CONFIG_DEBUG_INFO=y
525# CONFIG_ENABLE_MUST_CHECK is not set
526CONFIG_FRAME_WARN=1024
527CONFIG_UNUSED_SYMBOLS=y
528CONFIG_MAGIC_SYSRQ=y
529CONFIG_DEBUG_KERNEL=y
530CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
531CONFIG_TIMER_STATS=y
532CONFIG_RCU_TORTURE_TEST=m
533CONFIG_RCU_CPU_STALL_TIMEOUT=60
534CONFIG_NOTIFIER_ERROR_INJECTION=m
535CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
536CONFIG_PM_NOTIFIER_ERROR_INJECT=m
537CONFIG_LATENCYTOP=y
538CONFIG_BLK_DEV_IO_TRACE=y
539# CONFIG_KPROBE_EVENT is not set
540CONFIG_LKDTM=m
541CONFIG_RBTREE_TEST=m
542CONFIG_INTERVAL_TREE_TEST=m
543CONFIG_ATOMIC64_SELFTEST=y
544# CONFIG_STRICT_DEVMEM is not set
545CONFIG_S390_PTDUMP=y
546CONFIG_ENCRYPTED_KEYS=m
547CONFIG_KEYS_DEBUG_PROC_KEYS=y
548CONFIG_SECURITY=y
549CONFIG_SECURITY_NETWORK=y
550CONFIG_SECURITY_SELINUX=y
551CONFIG_SECURITY_SELINUX_BOOTPARAM=y
552CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
553CONFIG_SECURITY_SELINUX_DISABLE=y
554CONFIG_IMA=y
555CONFIG_IMA_APPRAISE=y
556CONFIG_CRYPTO_USER=m
557# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
558CONFIG_CRYPTO_CRYPTD=m
559CONFIG_CRYPTO_TEST=m
560CONFIG_CRYPTO_CCM=m
561CONFIG_CRYPTO_GCM=m
562CONFIG_CRYPTO_CTS=m
563CONFIG_CRYPTO_LRW=m
564CONFIG_CRYPTO_PCBC=m
565CONFIG_CRYPTO_XTS=m
566CONFIG_CRYPTO_XCBC=m
567CONFIG_CRYPTO_VMAC=m
568CONFIG_CRYPTO_CRC32=m
569CONFIG_CRYPTO_MICHAEL_MIC=m
570CONFIG_CRYPTO_RMD128=m
571CONFIG_CRYPTO_RMD160=m
572CONFIG_CRYPTO_RMD256=m
573CONFIG_CRYPTO_RMD320=m
574CONFIG_CRYPTO_SHA512=m
575CONFIG_CRYPTO_TGR192=m
576CONFIG_CRYPTO_WP512=m
577CONFIG_CRYPTO_ANUBIS=m
578CONFIG_CRYPTO_BLOWFISH=m
579CONFIG_CRYPTO_CAMELLIA=m
580CONFIG_CRYPTO_CAST5=m
581CONFIG_CRYPTO_CAST6=m
582CONFIG_CRYPTO_FCRYPT=m
583CONFIG_CRYPTO_KHAZAD=m
584CONFIG_CRYPTO_SALSA20=m
585CONFIG_CRYPTO_SEED=m
586CONFIG_CRYPTO_SERPENT=m
587CONFIG_CRYPTO_TEA=m
588CONFIG_CRYPTO_TWOFISH=m
589CONFIG_CRYPTO_ZLIB=y
590CONFIG_CRYPTO_LZO=m
591CONFIG_CRYPTO_LZ4=m
592CONFIG_CRYPTO_LZ4HC=m
593CONFIG_CRYPTO_USER_API_HASH=m
594CONFIG_CRYPTO_USER_API_SKCIPHER=m
595CONFIG_ZCRYPT=m
596CONFIG_CRYPTO_SHA1_S390=m
597CONFIG_CRYPTO_SHA256_S390=m
598CONFIG_CRYPTO_SHA512_S390=m
599CONFIG_CRYPTO_DES_S390=m
600CONFIG_CRYPTO_AES_S390=m
601CONFIG_CRYPTO_GHASH_S390=m
602CONFIG_ASYMMETRIC_KEY_TYPE=m
603CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
604CONFIG_PUBLIC_KEY_ALGO_RSA=m
605CONFIG_X509_CERTIFICATE_PARSER=m
606CONFIG_CRC7=m
607CONFIG_CRC8=m
608CONFIG_XZ_DEC_X86=y
609CONFIG_XZ_DEC_POWERPC=y
610CONFIG_XZ_DEC_IA64=y
611CONFIG_XZ_DEC_ARM=y
612CONFIG_XZ_DEC_ARMTHUMB=y
613CONFIG_XZ_DEC_SPARC=y
614CONFIG_CORDIC=m
615CONFIG_CMM=m
616CONFIG_APPLDATA_BASE=y
617CONFIG_KVM=m
618CONFIG_KVM_S390_UCONTROL=y
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
new file mode 100644
index 000000000000..91087b43e8fa
--- /dev/null
+++ b/arch/s390/configs/performance_defconfig
@@ -0,0 +1,610 @@
1CONFIG_SYSVIPC=y
2CONFIG_POSIX_MQUEUE=y
3CONFIG_FHANDLE=y
4CONFIG_AUDIT=y
5CONFIG_NO_HZ=y
6CONFIG_HIGH_RES_TIMERS=y
7CONFIG_BSD_PROCESS_ACCT=y
8CONFIG_BSD_PROCESS_ACCT_V3=y
9CONFIG_TASKSTATS=y
10CONFIG_TASK_DELAY_ACCT=y
11CONFIG_TASK_XACCT=y
12CONFIG_TASK_IO_ACCOUNTING=y
13CONFIG_RCU_FAST_NO_HZ=y
14CONFIG_IKCONFIG=y
15CONFIG_IKCONFIG_PROC=y
16CONFIG_CGROUP_FREEZER=y
17CONFIG_CGROUP_DEVICE=y
18CONFIG_CPUSETS=y
19CONFIG_CGROUP_CPUACCT=y
20CONFIG_RESOURCE_COUNTERS=y
21CONFIG_CGROUP_PERF=y
22CONFIG_BLK_CGROUP=y
23CONFIG_SCHED_AUTOGROUP=y
24CONFIG_BLK_DEV_INITRD=y
25# CONFIG_COMPAT_BRK is not set
26CONFIG_PROFILING=y
27CONFIG_OPROFILE=m
28CONFIG_KPROBES=y
29CONFIG_JUMP_LABEL=y
30CONFIG_MODULES=y
31CONFIG_MODULE_FORCE_LOAD=y
32CONFIG_MODULE_UNLOAD=y
33CONFIG_MODULE_FORCE_UNLOAD=y
34CONFIG_MODVERSIONS=y
35CONFIG_MODULE_SRCVERSION_ALL=y
36CONFIG_BLK_DEV_INTEGRITY=y
37CONFIG_BLK_DEV_THROTTLING=y
38CONFIG_PARTITION_ADVANCED=y
39CONFIG_IBM_PARTITION=y
40CONFIG_BSD_DISKLABEL=y
41CONFIG_MINIX_SUBPARTITION=y
42CONFIG_SOLARIS_X86_PARTITION=y
43CONFIG_UNIXWARE_DISKLABEL=y
44CONFIG_CFQ_GROUP_IOSCHED=y
45CONFIG_DEFAULT_DEADLINE=y
46CONFIG_MARCH_Z9_109=y
47CONFIG_HZ_100=y
48CONFIG_MEMORY_HOTPLUG=y
49CONFIG_MEMORY_HOTREMOVE=y
50CONFIG_KSM=y
51CONFIG_TRANSPARENT_HUGEPAGE=y
52CONFIG_PCI=y
53CONFIG_HOTPLUG_PCI=y
54CONFIG_HOTPLUG_PCI_S390=y
55CONFIG_CHSC_SCH=y
56CONFIG_CRASH_DUMP=y
57CONFIG_ZFCPDUMP=y
58# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
59CONFIG_BINFMT_MISC=m
60CONFIG_HIBERNATION=y
61CONFIG_PACKET=y
62CONFIG_PACKET_DIAG=m
63CONFIG_UNIX=y
64CONFIG_UNIX_DIAG=m
65CONFIG_XFRM_USER=m
66CONFIG_NET_KEY=m
67CONFIG_INET=y
68CONFIG_IP_MULTICAST=y
69CONFIG_IP_ADVANCED_ROUTER=y
70CONFIG_IP_MULTIPLE_TABLES=y
71CONFIG_IP_ROUTE_MULTIPATH=y
72CONFIG_IP_ROUTE_VERBOSE=y
73CONFIG_NET_IPIP=m
74CONFIG_NET_IPGRE_DEMUX=m
75CONFIG_NET_IPGRE=m
76CONFIG_NET_IPGRE_BROADCAST=y
77CONFIG_IP_MROUTE=y
78CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
79CONFIG_IP_PIMSM_V1=y
80CONFIG_IP_PIMSM_V2=y
81CONFIG_SYN_COOKIES=y
82CONFIG_NET_IPVTI=m
83CONFIG_INET_AH=m
84CONFIG_INET_ESP=m
85CONFIG_INET_IPCOMP=m
86CONFIG_INET_XFRM_MODE_TRANSPORT=m
87CONFIG_INET_XFRM_MODE_TUNNEL=m
88CONFIG_INET_XFRM_MODE_BEET=m
89CONFIG_INET_DIAG=m
90CONFIG_INET_UDP_DIAG=m
91CONFIG_TCP_CONG_ADVANCED=y
92CONFIG_TCP_CONG_HSTCP=m
93CONFIG_TCP_CONG_HYBLA=m
94CONFIG_TCP_CONG_SCALABLE=m
95CONFIG_TCP_CONG_LP=m
96CONFIG_TCP_CONG_VENO=m
97CONFIG_TCP_CONG_YEAH=m
98CONFIG_TCP_CONG_ILLINOIS=m
99CONFIG_IPV6=y
100CONFIG_IPV6_PRIVACY=y
101CONFIG_IPV6_ROUTER_PREF=y
102CONFIG_INET6_AH=m
103CONFIG_INET6_ESP=m
104CONFIG_INET6_IPCOMP=m
105CONFIG_IPV6_MIP6=m
106CONFIG_INET6_XFRM_MODE_TRANSPORT=m
107CONFIG_INET6_XFRM_MODE_TUNNEL=m
108CONFIG_INET6_XFRM_MODE_BEET=m
109CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
110CONFIG_IPV6_SIT=m
111CONFIG_IPV6_GRE=m
112CONFIG_IPV6_MULTIPLE_TABLES=y
113CONFIG_IPV6_SUBTREES=y
114CONFIG_NETFILTER=y
115CONFIG_NF_CONNTRACK=m
116CONFIG_NF_CONNTRACK_SECMARK=y
117CONFIG_NF_CONNTRACK_EVENTS=y
118CONFIG_NF_CONNTRACK_TIMEOUT=y
119CONFIG_NF_CONNTRACK_TIMESTAMP=y
120CONFIG_NF_CT_PROTO_DCCP=m
121CONFIG_NF_CT_PROTO_UDPLITE=m
122CONFIG_NF_CONNTRACK_AMANDA=m
123CONFIG_NF_CONNTRACK_FTP=m
124CONFIG_NF_CONNTRACK_H323=m
125CONFIG_NF_CONNTRACK_IRC=m
126CONFIG_NF_CONNTRACK_NETBIOS_NS=m
127CONFIG_NF_CONNTRACK_SNMP=m
128CONFIG_NF_CONNTRACK_PPTP=m
129CONFIG_NF_CONNTRACK_SANE=m
130CONFIG_NF_CONNTRACK_SIP=m
131CONFIG_NF_CONNTRACK_TFTP=m
132CONFIG_NF_CT_NETLINK=m
133CONFIG_NF_CT_NETLINK_TIMEOUT=m
134CONFIG_NETFILTER_TPROXY=m
135CONFIG_NETFILTER_XT_SET=m
136CONFIG_NETFILTER_XT_TARGET_AUDIT=m
137CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
138CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
139CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
140CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
141CONFIG_NETFILTER_XT_TARGET_CT=m
142CONFIG_NETFILTER_XT_TARGET_DSCP=m
143CONFIG_NETFILTER_XT_TARGET_HMARK=m
144CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
145CONFIG_NETFILTER_XT_TARGET_LOG=m
146CONFIG_NETFILTER_XT_TARGET_MARK=m
147CONFIG_NETFILTER_XT_TARGET_NFLOG=m
148CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
149CONFIG_NETFILTER_XT_TARGET_TEE=m
150CONFIG_NETFILTER_XT_TARGET_TPROXY=m
151CONFIG_NETFILTER_XT_TARGET_TRACE=m
152CONFIG_NETFILTER_XT_TARGET_SECMARK=m
153CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
154CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
155CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
156CONFIG_NETFILTER_XT_MATCH_BPF=m
157CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
158CONFIG_NETFILTER_XT_MATCH_COMMENT=m
159CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
160CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
161CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
162CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
163CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
164CONFIG_NETFILTER_XT_MATCH_CPU=m
165CONFIG_NETFILTER_XT_MATCH_DCCP=m
166CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
167CONFIG_NETFILTER_XT_MATCH_DSCP=m
168CONFIG_NETFILTER_XT_MATCH_ESP=m
169CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
170CONFIG_NETFILTER_XT_MATCH_HELPER=m
171CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
172CONFIG_NETFILTER_XT_MATCH_IPVS=m
173CONFIG_NETFILTER_XT_MATCH_LENGTH=m
174CONFIG_NETFILTER_XT_MATCH_LIMIT=m
175CONFIG_NETFILTER_XT_MATCH_MAC=m
176CONFIG_NETFILTER_XT_MATCH_MARK=m
177CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
178CONFIG_NETFILTER_XT_MATCH_NFACCT=m
179CONFIG_NETFILTER_XT_MATCH_OSF=m
180CONFIG_NETFILTER_XT_MATCH_OWNER=m
181CONFIG_NETFILTER_XT_MATCH_POLICY=m
182CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
183CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
184CONFIG_NETFILTER_XT_MATCH_QUOTA=m
185CONFIG_NETFILTER_XT_MATCH_RATEEST=m
186CONFIG_NETFILTER_XT_MATCH_REALM=m
187CONFIG_NETFILTER_XT_MATCH_RECENT=m
188CONFIG_NETFILTER_XT_MATCH_SOCKET=m
189CONFIG_NETFILTER_XT_MATCH_STATE=m
190CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
191CONFIG_NETFILTER_XT_MATCH_STRING=m
192CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
193CONFIG_NETFILTER_XT_MATCH_TIME=m
194CONFIG_NETFILTER_XT_MATCH_U32=m
195CONFIG_IP_SET=m
196CONFIG_IP_SET_BITMAP_IP=m
197CONFIG_IP_SET_BITMAP_IPMAC=m
198CONFIG_IP_SET_BITMAP_PORT=m
199CONFIG_IP_SET_HASH_IP=m
200CONFIG_IP_SET_HASH_IPPORT=m
201CONFIG_IP_SET_HASH_IPPORTIP=m
202CONFIG_IP_SET_HASH_IPPORTNET=m
203CONFIG_IP_SET_HASH_NET=m
204CONFIG_IP_SET_HASH_NETPORT=m
205CONFIG_IP_SET_HASH_NETIFACE=m
206CONFIG_IP_SET_LIST_SET=m
207CONFIG_IP_VS=m
208CONFIG_IP_VS_PROTO_TCP=y
209CONFIG_IP_VS_PROTO_UDP=y
210CONFIG_IP_VS_PROTO_ESP=y
211CONFIG_IP_VS_PROTO_AH=y
212CONFIG_IP_VS_RR=m
213CONFIG_IP_VS_WRR=m
214CONFIG_IP_VS_LC=m
215CONFIG_IP_VS_WLC=m
216CONFIG_IP_VS_LBLC=m
217CONFIG_IP_VS_LBLCR=m
218CONFIG_IP_VS_DH=m
219CONFIG_IP_VS_SH=m
220CONFIG_IP_VS_SED=m
221CONFIG_IP_VS_NQ=m
222CONFIG_IP_VS_FTP=m
223CONFIG_IP_VS_PE_SIP=m
224CONFIG_NF_CONNTRACK_IPV4=m
225# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
226CONFIG_IP_NF_IPTABLES=m
227CONFIG_IP_NF_MATCH_AH=m
228CONFIG_IP_NF_MATCH_ECN=m
229CONFIG_IP_NF_MATCH_RPFILTER=m
230CONFIG_IP_NF_MATCH_TTL=m
231CONFIG_IP_NF_FILTER=m
232CONFIG_IP_NF_TARGET_REJECT=m
233CONFIG_IP_NF_TARGET_ULOG=m
234CONFIG_NF_NAT_IPV4=m
235CONFIG_IP_NF_TARGET_MASQUERADE=m
236CONFIG_IP_NF_TARGET_NETMAP=m
237CONFIG_IP_NF_TARGET_REDIRECT=m
238CONFIG_IP_NF_MANGLE=m
239CONFIG_IP_NF_TARGET_CLUSTERIP=m
240CONFIG_IP_NF_TARGET_ECN=m
241CONFIG_IP_NF_TARGET_TTL=m
242CONFIG_IP_NF_RAW=m
243CONFIG_IP_NF_SECURITY=m
244CONFIG_IP_NF_ARPTABLES=m
245CONFIG_IP_NF_ARPFILTER=m
246CONFIG_IP_NF_ARP_MANGLE=m
247CONFIG_NF_CONNTRACK_IPV6=m
248CONFIG_IP6_NF_IPTABLES=m
249CONFIG_IP6_NF_MATCH_AH=m
250CONFIG_IP6_NF_MATCH_EUI64=m
251CONFIG_IP6_NF_MATCH_FRAG=m
252CONFIG_IP6_NF_MATCH_OPTS=m
253CONFIG_IP6_NF_MATCH_HL=m
254CONFIG_IP6_NF_MATCH_IPV6HEADER=m
255CONFIG_IP6_NF_MATCH_MH=m
256CONFIG_IP6_NF_MATCH_RPFILTER=m
257CONFIG_IP6_NF_MATCH_RT=m
258CONFIG_IP6_NF_TARGET_HL=m
259CONFIG_IP6_NF_FILTER=m
260CONFIG_IP6_NF_TARGET_REJECT=m
261CONFIG_IP6_NF_MANGLE=m
262CONFIG_IP6_NF_RAW=m
263CONFIG_IP6_NF_SECURITY=m
264CONFIG_NF_NAT_IPV6=m
265CONFIG_IP6_NF_TARGET_MASQUERADE=m
266CONFIG_IP6_NF_TARGET_NPT=m
267CONFIG_NET_SCTPPROBE=m
268CONFIG_RDS=m
269CONFIG_RDS_RDMA=m
270CONFIG_RDS_TCP=m
271CONFIG_L2TP=m
272CONFIG_L2TP_DEBUGFS=m
273CONFIG_L2TP_V3=y
274CONFIG_L2TP_IP=m
275CONFIG_L2TP_ETH=m
276CONFIG_BRIDGE=m
277CONFIG_VLAN_8021Q=m
278CONFIG_VLAN_8021Q_GVRP=y
279CONFIG_NET_SCHED=y
280CONFIG_NET_SCH_CBQ=m
281CONFIG_NET_SCH_HTB=m
282CONFIG_NET_SCH_HFSC=m
283CONFIG_NET_SCH_PRIO=m
284CONFIG_NET_SCH_MULTIQ=m
285CONFIG_NET_SCH_RED=m
286CONFIG_NET_SCH_SFB=m
287CONFIG_NET_SCH_SFQ=m
288CONFIG_NET_SCH_TEQL=m
289CONFIG_NET_SCH_TBF=m
290CONFIG_NET_SCH_GRED=m
291CONFIG_NET_SCH_DSMARK=m
292CONFIG_NET_SCH_NETEM=m
293CONFIG_NET_SCH_DRR=m
294CONFIG_NET_SCH_MQPRIO=m
295CONFIG_NET_SCH_CHOKE=m
296CONFIG_NET_SCH_QFQ=m
297CONFIG_NET_SCH_CODEL=m
298CONFIG_NET_SCH_FQ_CODEL=m
299CONFIG_NET_SCH_INGRESS=m
300CONFIG_NET_SCH_PLUG=m
301CONFIG_NET_CLS_BASIC=m
302CONFIG_NET_CLS_TCINDEX=m
303CONFIG_NET_CLS_ROUTE4=m
304CONFIG_NET_CLS_FW=m
305CONFIG_NET_CLS_U32=m
306CONFIG_CLS_U32_PERF=y
307CONFIG_CLS_U32_MARK=y
308CONFIG_NET_CLS_RSVP=m
309CONFIG_NET_CLS_RSVP6=m
310CONFIG_NET_CLS_FLOW=m
311CONFIG_NET_CLS_CGROUP=y
312CONFIG_NET_CLS_ACT=y
313CONFIG_NET_ACT_POLICE=m
314CONFIG_NET_ACT_GACT=m
315CONFIG_GACT_PROB=y
316CONFIG_NET_ACT_MIRRED=m
317CONFIG_NET_ACT_IPT=m
318CONFIG_NET_ACT_NAT=m
319CONFIG_NET_ACT_PEDIT=m
320CONFIG_NET_ACT_SIMP=m
321CONFIG_NET_ACT_SKBEDIT=m
322CONFIG_NET_ACT_CSUM=m
323CONFIG_DNS_RESOLVER=y
324CONFIG_BPF_JIT=y
325CONFIG_NET_PKTGEN=m
326CONFIG_NET_TCPPROBE=m
327CONFIG_DEVTMPFS=y
328CONFIG_CONNECTOR=y
329CONFIG_BLK_DEV_LOOP=m
330CONFIG_BLK_DEV_CRYPTOLOOP=m
331CONFIG_BLK_DEV_NBD=m
332CONFIG_BLK_DEV_OSD=m
333CONFIG_BLK_DEV_RAM=y
334CONFIG_BLK_DEV_RAM_SIZE=32768
335CONFIG_BLK_DEV_XIP=y
336CONFIG_CDROM_PKTCDVD=m
337CONFIG_ATA_OVER_ETH=m
338CONFIG_VIRTIO_BLK=y
339CONFIG_ENCLOSURE_SERVICES=m
340CONFIG_RAID_ATTRS=m
341CONFIG_SCSI=y
342CONFIG_SCSI_TGT=m
343CONFIG_BLK_DEV_SD=y
344CONFIG_CHR_DEV_ST=m
345CONFIG_CHR_DEV_OSST=m
346CONFIG_BLK_DEV_SR=m
347CONFIG_CHR_DEV_SG=y
348CONFIG_CHR_DEV_SCH=m
349CONFIG_SCSI_ENCLOSURE=m
350CONFIG_SCSI_MULTI_LUN=y
351CONFIG_SCSI_CONSTANTS=y
352CONFIG_SCSI_LOGGING=y
353CONFIG_SCSI_SPI_ATTRS=m
354CONFIG_SCSI_SAS_LIBSAS=m
355CONFIG_SCSI_SRP_ATTRS=m
356CONFIG_SCSI_SRP_TGT_ATTRS=y
357CONFIG_ISCSI_TCP=m
358CONFIG_LIBFCOE=m
359CONFIG_SCSI_DEBUG=m
360CONFIG_ZFCP=y
361CONFIG_SCSI_VIRTIO=m
362CONFIG_SCSI_DH=m
363CONFIG_SCSI_DH_RDAC=m
364CONFIG_SCSI_DH_HP_SW=m
365CONFIG_SCSI_DH_EMC=m
366CONFIG_SCSI_DH_ALUA=m
367CONFIG_SCSI_OSD_INITIATOR=m
368CONFIG_SCSI_OSD_ULD=m
369CONFIG_MD=y
370CONFIG_BLK_DEV_MD=y
371CONFIG_MD_LINEAR=m
372CONFIG_MD_RAID0=m
373CONFIG_MD_MULTIPATH=m
374CONFIG_MD_FAULTY=m
375CONFIG_BLK_DEV_DM=m
376CONFIG_DM_CRYPT=m
377CONFIG_DM_SNAPSHOT=m
378CONFIG_DM_MIRROR=m
379CONFIG_DM_RAID=m
380CONFIG_DM_LOG_USERSPACE=m
381CONFIG_DM_ZERO=m
382CONFIG_DM_MULTIPATH=m
383CONFIG_DM_MULTIPATH_QL=m
384CONFIG_DM_MULTIPATH_ST=m
385CONFIG_DM_DELAY=m
386CONFIG_DM_UEVENT=y
387CONFIG_DM_FLAKEY=m
388CONFIG_DM_VERITY=m
389CONFIG_DM_SWITCH=m
390CONFIG_NETDEVICES=y
391CONFIG_BONDING=m
392CONFIG_DUMMY=m
393CONFIG_EQUALIZER=m
394CONFIG_IFB=m
395CONFIG_MACVLAN=m
396CONFIG_MACVTAP=m
397CONFIG_VXLAN=m
398CONFIG_TUN=m
399CONFIG_VETH=m
400CONFIG_VIRTIO_NET=m
401CONFIG_NLMON=m
402CONFIG_VHOST_NET=m
403# CONFIG_NET_VENDOR_ARC is not set
404# CONFIG_NET_CADENCE is not set
405# CONFIG_NET_VENDOR_CHELSIO is not set
406# CONFIG_NET_VENDOR_INTEL is not set
407# CONFIG_NET_VENDOR_MARVELL is not set
408CONFIG_MLX4_EN=m
409# CONFIG_NET_VENDOR_NATSEMI is not set
410CONFIG_PPP=m
411CONFIG_PPP_BSDCOMP=m
412CONFIG_PPP_DEFLATE=m
413CONFIG_PPP_MPPE=m
414CONFIG_PPPOE=m
415CONFIG_PPTP=m
416CONFIG_PPPOL2TP=m
417CONFIG_PPP_ASYNC=m
418CONFIG_PPP_SYNC_TTY=m
419# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
420# CONFIG_INPUT_KEYBOARD is not set
421# CONFIG_INPUT_MOUSE is not set
422# CONFIG_SERIO is not set
423CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
424CONFIG_LEGACY_PTY_COUNT=0
425CONFIG_HW_RANDOM_VIRTIO=m
426CONFIG_RAW_DRIVER=m
427CONFIG_HANGCHECK_TIMER=m
428CONFIG_TN3270_FS=y
429CONFIG_WATCHDOG=y
430CONFIG_WATCHDOG_NOWAYOUT=y
431CONFIG_SOFT_WATCHDOG=m
432CONFIG_ZVM_WATCHDOG=m
433# CONFIG_HID is not set
434# CONFIG_USB_SUPPORT is not set
435CONFIG_INFINIBAND=m
436CONFIG_INFINIBAND_USER_ACCESS=m
437CONFIG_MLX4_INFINIBAND=m
438CONFIG_VIRTIO_BALLOON=m
439# CONFIG_IOMMU_SUPPORT is not set
440CONFIG_EXT2_FS=y
441CONFIG_EXT2_FS_XATTR=y
442CONFIG_EXT2_FS_POSIX_ACL=y
443CONFIG_EXT2_FS_SECURITY=y
444CONFIG_EXT2_FS_XIP=y
445CONFIG_EXT3_FS=y
446# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
447CONFIG_EXT3_FS_POSIX_ACL=y
448CONFIG_EXT3_FS_SECURITY=y
449CONFIG_EXT4_FS=y
450CONFIG_EXT4_FS_POSIX_ACL=y
451CONFIG_EXT4_FS_SECURITY=y
452CONFIG_JBD_DEBUG=y
453CONFIG_JBD2_DEBUG=y
454CONFIG_JFS_FS=m
455CONFIG_JFS_POSIX_ACL=y
456CONFIG_JFS_SECURITY=y
457CONFIG_JFS_STATISTICS=y
458CONFIG_XFS_FS=m
459CONFIG_XFS_QUOTA=y
460CONFIG_XFS_POSIX_ACL=y
461CONFIG_XFS_RT=y
462CONFIG_GFS2_FS=m
463CONFIG_OCFS2_FS=m
464CONFIG_BTRFS_FS=m
465CONFIG_BTRFS_FS_POSIX_ACL=y
466CONFIG_NILFS2_FS=m
467CONFIG_FANOTIFY=y
468CONFIG_QUOTA_NETLINK_INTERFACE=y
469CONFIG_QFMT_V1=m
470CONFIG_QFMT_V2=m
471CONFIG_AUTOFS4_FS=m
472CONFIG_FUSE_FS=m
473CONFIG_CUSE=m
474CONFIG_FSCACHE=m
475CONFIG_CACHEFILES=m
476CONFIG_ISO9660_FS=y
477CONFIG_JOLIET=y
478CONFIG_ZISOFS=y
479CONFIG_UDF_FS=m
480CONFIG_MSDOS_FS=m
481CONFIG_VFAT_FS=m
482CONFIG_NTFS_FS=m
483CONFIG_NTFS_RW=y
484CONFIG_PROC_KCORE=y
485CONFIG_TMPFS=y
486CONFIG_TMPFS_POSIX_ACL=y
487CONFIG_HUGETLBFS=y
488CONFIG_CONFIGFS_FS=m
489CONFIG_ECRYPT_FS=m
490CONFIG_CRAMFS=m
491CONFIG_SQUASHFS=m
492CONFIG_SQUASHFS_XATTR=y
493CONFIG_SQUASHFS_LZO=y
494CONFIG_SQUASHFS_XZ=y
495CONFIG_ROMFS_FS=m
496CONFIG_NFS_FS=m
497CONFIG_NFS_V3_ACL=y
498CONFIG_NFS_V4=m
499CONFIG_NFS_SWAP=y
500CONFIG_NFSD=m
501CONFIG_NFSD_V3_ACL=y
502CONFIG_NFSD_V4=y
503CONFIG_NFSD_V4_SECURITY_LABEL=y
504CONFIG_CIFS=m
505CONFIG_CIFS_STATS=y
506CONFIG_CIFS_STATS2=y
507CONFIG_CIFS_WEAK_PW_HASH=y
508CONFIG_CIFS_UPCALL=y
509CONFIG_CIFS_XATTR=y
510CONFIG_CIFS_POSIX=y
511# CONFIG_CIFS_DEBUG is not set
512CONFIG_CIFS_DFS_UPCALL=y
513CONFIG_NLS_DEFAULT="utf8"
514CONFIG_NLS_CODEPAGE_437=m
515CONFIG_NLS_CODEPAGE_850=m
516CONFIG_NLS_ASCII=m
517CONFIG_NLS_ISO8859_1=m
518CONFIG_NLS_ISO8859_15=m
519CONFIG_NLS_UTF8=m
520CONFIG_DLM=m
521CONFIG_PRINTK_TIME=y
522CONFIG_DEBUG_INFO=y
523# CONFIG_ENABLE_MUST_CHECK is not set
524CONFIG_FRAME_WARN=1024
525CONFIG_UNUSED_SYMBOLS=y
526CONFIG_MAGIC_SYSRQ=y
527CONFIG_DEBUG_KERNEL=y
528CONFIG_TIMER_STATS=y
529CONFIG_RCU_TORTURE_TEST=m
530CONFIG_RCU_CPU_STALL_TIMEOUT=60
531CONFIG_LATENCYTOP=y
532CONFIG_BLK_DEV_IO_TRACE=y
533# CONFIG_KPROBE_EVENT is not set
534CONFIG_LKDTM=m
535CONFIG_ATOMIC64_SELFTEST=y
536# CONFIG_STRICT_DEVMEM is not set
537CONFIG_S390_PTDUMP=y
538CONFIG_ENCRYPTED_KEYS=m
539CONFIG_KEYS_DEBUG_PROC_KEYS=y
540CONFIG_SECURITY=y
541CONFIG_SECURITY_NETWORK=y
542CONFIG_SECURITY_SELINUX=y
543CONFIG_SECURITY_SELINUX_BOOTPARAM=y
544CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
545CONFIG_SECURITY_SELINUX_DISABLE=y
546CONFIG_IMA=y
547CONFIG_IMA_APPRAISE=y
548CONFIG_CRYPTO_USER=m
549# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
550CONFIG_CRYPTO_CRYPTD=m
551CONFIG_CRYPTO_TEST=m
552CONFIG_CRYPTO_CCM=m
553CONFIG_CRYPTO_GCM=m
554CONFIG_CRYPTO_CTS=m
555CONFIG_CRYPTO_LRW=m
556CONFIG_CRYPTO_PCBC=m
557CONFIG_CRYPTO_XTS=m
558CONFIG_CRYPTO_XCBC=m
559CONFIG_CRYPTO_VMAC=m
560CONFIG_CRYPTO_CRC32=m
561CONFIG_CRYPTO_MICHAEL_MIC=m
562CONFIG_CRYPTO_RMD128=m
563CONFIG_CRYPTO_RMD160=m
564CONFIG_CRYPTO_RMD256=m
565CONFIG_CRYPTO_RMD320=m
566CONFIG_CRYPTO_SHA512=m
567CONFIG_CRYPTO_TGR192=m
568CONFIG_CRYPTO_WP512=m
569CONFIG_CRYPTO_ANUBIS=m
570CONFIG_CRYPTO_BLOWFISH=m
571CONFIG_CRYPTO_CAMELLIA=m
572CONFIG_CRYPTO_CAST5=m
573CONFIG_CRYPTO_CAST6=m
574CONFIG_CRYPTO_FCRYPT=m
575CONFIG_CRYPTO_KHAZAD=m
576CONFIG_CRYPTO_SALSA20=m
577CONFIG_CRYPTO_SEED=m
578CONFIG_CRYPTO_SERPENT=m
579CONFIG_CRYPTO_TEA=m
580CONFIG_CRYPTO_TWOFISH=m
581CONFIG_CRYPTO_ZLIB=y
582CONFIG_CRYPTO_LZO=m
583CONFIG_CRYPTO_LZ4=m
584CONFIG_CRYPTO_LZ4HC=m
585CONFIG_CRYPTO_USER_API_HASH=m
586CONFIG_CRYPTO_USER_API_SKCIPHER=m
587CONFIG_ZCRYPT=m
588CONFIG_CRYPTO_SHA1_S390=m
589CONFIG_CRYPTO_SHA256_S390=m
590CONFIG_CRYPTO_SHA512_S390=m
591CONFIG_CRYPTO_DES_S390=m
592CONFIG_CRYPTO_AES_S390=m
593CONFIG_CRYPTO_GHASH_S390=m
594CONFIG_ASYMMETRIC_KEY_TYPE=m
595CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
596CONFIG_PUBLIC_KEY_ALGO_RSA=m
597CONFIG_X509_CERTIFICATE_PARSER=m
598CONFIG_CRC7=m
599CONFIG_CRC8=m
600CONFIG_XZ_DEC_X86=y
601CONFIG_XZ_DEC_POWERPC=y
602CONFIG_XZ_DEC_IA64=y
603CONFIG_XZ_DEC_ARM=y
604CONFIG_XZ_DEC_ARMTHUMB=y
605CONFIG_XZ_DEC_SPARC=y
606CONFIG_CORDIC=m
607CONFIG_CMM=m
608CONFIG_APPLDATA_BASE=y
609CONFIG_KVM=m
610CONFIG_KVM_S390_UCONTROL=y
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
new file mode 100644
index 000000000000..d725c4d956e4
--- /dev/null
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -0,0 +1,86 @@
1# CONFIG_SWAP is not set
2CONFIG_NO_HZ=y
3CONFIG_HIGH_RES_TIMERS=y
4CONFIG_RCU_FAST_NO_HZ=y
5CONFIG_BLK_DEV_INITRD=y
6CONFIG_CC_OPTIMIZE_FOR_SIZE=y
7# CONFIG_COMPAT_BRK is not set
8CONFIG_PARTITION_ADVANCED=y
9CONFIG_IBM_PARTITION=y
10CONFIG_DEFAULT_DEADLINE=y
11CONFIG_MARCH_Z9_109=y
12# CONFIG_COMPAT is not set
13CONFIG_NR_CPUS=2
14# CONFIG_HOTPLUG_CPU is not set
15CONFIG_HZ_100=y
16# CONFIG_COMPACTION is not set
17# CONFIG_MIGRATION is not set
18# CONFIG_CHECK_STACK is not set
19# CONFIG_CHSC_SCH is not set
20# CONFIG_SCM_BUS is not set
21CONFIG_CRASH_DUMP=y
22CONFIG_ZFCPDUMP=y
23# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
24# CONFIG_SECCOMP is not set
25# CONFIG_IUCV is not set
26CONFIG_ATM=y
27CONFIG_ATM_LANE=y
28CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
29CONFIG_DEVTMPFS=y
30# CONFIG_FIRMWARE_IN_KERNEL is not set
31# CONFIG_BLK_DEV_XPRAM is not set
32# CONFIG_DCSSBLK is not set
33# CONFIG_DASD is not set
34CONFIG_ENCLOSURE_SERVICES=y
35CONFIG_SCSI=y
36CONFIG_BLK_DEV_SD=y
37CONFIG_SCSI_ENCLOSURE=y
38CONFIG_SCSI_MULTI_LUN=y
39CONFIG_SCSI_CONSTANTS=y
40CONFIG_SCSI_LOGGING=y
41CONFIG_SCSI_SRP_ATTRS=y
42CONFIG_ZFCP=y
43# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
44# CONFIG_INPUT_KEYBOARD is not set
45# CONFIG_INPUT_MOUSE is not set
46# CONFIG_SERIO is not set
47# CONFIG_HVC_IUCV is not set
48CONFIG_RAW_DRIVER=y
49# CONFIG_SCLP_ASYNC is not set
50# CONFIG_HMC_DRV is not set
51# CONFIG_S390_TAPE is not set
52# CONFIG_VMCP is not set
53# CONFIG_MONWRITER is not set
54# CONFIG_S390_VMUR is not set
55# CONFIG_HID is not set
56CONFIG_MEMSTICK=y
57CONFIG_MEMSTICK_DEBUG=y
58CONFIG_MEMSTICK_UNSAFE_RESUME=y
59CONFIG_MSPRO_BLOCK=y
60# CONFIG_IOMMU_SUPPORT is not set
61CONFIG_EXT2_FS=y
62CONFIG_EXT3_FS=y
63# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
64CONFIG_EXT4_FS=y
65CONFIG_EXT4_FS_POSIX_ACL=y
66CONFIG_EXT4_FS_SECURITY=y
67# CONFIG_INOTIFY_USER is not set
68CONFIG_CONFIGFS_FS=y
69CONFIG_PRINTK_TIME=y
70CONFIG_DEBUG_INFO=y
71CONFIG_DEBUG_FS=y
72CONFIG_DEBUG_KERNEL=y
73# CONFIG_SCHED_DEBUG is not set
74CONFIG_RCU_CPU_STALL_TIMEOUT=60
75# CONFIG_FTRACE is not set
76# CONFIG_STRICT_DEVMEM is not set
77CONFIG_XZ_DEC_X86=y
78CONFIG_XZ_DEC_POWERPC=y
79CONFIG_XZ_DEC_IA64=y
80CONFIG_XZ_DEC_ARM=y
81CONFIG_XZ_DEC_ARMTHUMB=y
82CONFIG_XZ_DEC_SPARC=y
83# CONFIG_PFAULT is not set
84# CONFIG_S390_HYPFS_FS is not set
85# CONFIG_VIRTUALIZATION is not set
86# CONFIG_S390_GUEST is not set
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index b4dbade8ca24..46cae138ece2 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -725,6 +725,8 @@ static struct crypto_alg xts_aes_alg = {
725 } 725 }
726}; 726};
727 727
728static int xts_aes_alg_reg;
729
728static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 730static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
729 unsigned int key_len) 731 unsigned int key_len)
730{ 732{
@@ -846,6 +848,8 @@ static struct crypto_alg ctr_aes_alg = {
846 } 848 }
847}; 849};
848 850
851static int ctr_aes_alg_reg;
852
849static int __init aes_s390_init(void) 853static int __init aes_s390_init(void)
850{ 854{
851 int ret; 855 int ret;
@@ -884,6 +888,7 @@ static int __init aes_s390_init(void)
884 ret = crypto_register_alg(&xts_aes_alg); 888 ret = crypto_register_alg(&xts_aes_alg);
885 if (ret) 889 if (ret)
886 goto xts_aes_err; 890 goto xts_aes_err;
891 xts_aes_alg_reg = 1;
887 } 892 }
888 893
889 if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT, 894 if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
@@ -902,6 +907,7 @@ static int __init aes_s390_init(void)
902 free_page((unsigned long) ctrblk); 907 free_page((unsigned long) ctrblk);
903 goto ctr_aes_err; 908 goto ctr_aes_err;
904 } 909 }
910 ctr_aes_alg_reg = 1;
905 } 911 }
906 912
907out: 913out:
@@ -921,9 +927,12 @@ aes_err:
921 927
922static void __exit aes_s390_fini(void) 928static void __exit aes_s390_fini(void)
923{ 929{
924 crypto_unregister_alg(&ctr_aes_alg); 930 if (ctr_aes_alg_reg) {
925 free_page((unsigned long) ctrblk); 931 crypto_unregister_alg(&ctr_aes_alg);
926 crypto_unregister_alg(&xts_aes_alg); 932 free_page((unsigned long) ctrblk);
933 }
934 if (xts_aes_alg_reg)
935 crypto_unregister_alg(&xts_aes_alg);
927 crypto_unregister_alg(&cbc_aes_alg); 936 crypto_unregister_alg(&cbc_aes_alg);
928 crypto_unregister_alg(&ecb_aes_alg); 937 crypto_unregister_alg(&ecb_aes_alg);
929 crypto_unregister_alg(&aes_alg); 938 crypto_unregister_alg(&aes_alg);
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index d204c65bf722..33f57514f424 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -38,13 +38,14 @@ CONFIG_MODULE_UNLOAD=y
38CONFIG_MODVERSIONS=y 38CONFIG_MODVERSIONS=y
39CONFIG_PARTITION_ADVANCED=y 39CONFIG_PARTITION_ADVANCED=y
40CONFIG_IBM_PARTITION=y 40CONFIG_IBM_PARTITION=y
41# CONFIG_EFI_PARTITION is not set
42CONFIG_DEFAULT_DEADLINE=y 41CONFIG_DEFAULT_DEADLINE=y
42CONFIG_MARCH_Z196=y
43CONFIG_HZ_100=y 43CONFIG_HZ_100=y
44CONFIG_MEMORY_HOTPLUG=y 44CONFIG_MEMORY_HOTPLUG=y
45CONFIG_MEMORY_HOTREMOVE=y 45CONFIG_MEMORY_HOTREMOVE=y
46CONFIG_KSM=y 46CONFIG_KSM=y
47CONFIG_TRANSPARENT_HUGEPAGE=y 47CONFIG_TRANSPARENT_HUGEPAGE=y
48CONFIG_CMA=y
48CONFIG_CRASH_DUMP=y 49CONFIG_CRASH_DUMP=y
49CONFIG_BINFMT_MISC=m 50CONFIG_BINFMT_MISC=m
50CONFIG_HIBERNATION=y 51CONFIG_HIBERNATION=y
@@ -152,6 +153,7 @@ CONFIG_CRYPTO_CMAC=m
152CONFIG_CRYPTO_XCBC=m 153CONFIG_CRYPTO_XCBC=m
153CONFIG_CRYPTO_VMAC=m 154CONFIG_CRYPTO_VMAC=m
154CONFIG_CRYPTO_CRC32=m 155CONFIG_CRYPTO_CRC32=m
156CONFIG_CRYPTO_CRCT10DIF=m
155CONFIG_CRYPTO_MD4=m 157CONFIG_CRYPTO_MD4=m
156CONFIG_CRYPTO_MICHAEL_MIC=m 158CONFIG_CRYPTO_MICHAEL_MIC=m
157CONFIG_CRYPTO_RMD128=m 159CONFIG_CRYPTO_RMD128=m
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index c797832daa5f..fa9aaf7144b7 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -19,21 +19,50 @@
19 19
20#define ATOMIC_INIT(i) { (i) } 20#define ATOMIC_INIT(i) { (i) }
21 21
22#define __CS_LOOP(ptr, op_val, op_string) ({ \ 22#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
23
24#define __ATOMIC_OR "lao"
25#define __ATOMIC_AND "lan"
26#define __ATOMIC_ADD "laa"
27
28#define __ATOMIC_LOOP(ptr, op_val, op_string) \
29({ \
30 int old_val; \
31 \
32 typecheck(atomic_t *, ptr); \
33 asm volatile( \
34 op_string " %0,%2,%1\n" \
35 : "=d" (old_val), "+Q" ((ptr)->counter) \
36 : "d" (op_val) \
37 : "cc", "memory"); \
38 old_val; \
39})
40
41#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
42
43#define __ATOMIC_OR "or"
44#define __ATOMIC_AND "nr"
45#define __ATOMIC_ADD "ar"
46
47#define __ATOMIC_LOOP(ptr, op_val, op_string) \
48({ \
23 int old_val, new_val; \ 49 int old_val, new_val; \
50 \
51 typecheck(atomic_t *, ptr); \
24 asm volatile( \ 52 asm volatile( \
25 " l %0,%2\n" \ 53 " l %0,%2\n" \
26 "0: lr %1,%0\n" \ 54 "0: lr %1,%0\n" \
27 op_string " %1,%3\n" \ 55 op_string " %1,%3\n" \
28 " cs %0,%1,%2\n" \ 56 " cs %0,%1,%2\n" \
29 " jl 0b" \ 57 " jl 0b" \
30 : "=&d" (old_val), "=&d" (new_val), \ 58 : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
31 "=Q" (((atomic_t *)(ptr))->counter) \ 59 : "d" (op_val) \
32 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
33 : "cc", "memory"); \ 60 : "cc", "memory"); \
34 new_val; \ 61 old_val; \
35}) 62})
36 63
64#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
65
37static inline int atomic_read(const atomic_t *v) 66static inline int atomic_read(const atomic_t *v)
38{ 67{
39 int c; 68 int c;
@@ -53,32 +82,45 @@ static inline void atomic_set(atomic_t *v, int i)
53 82
54static inline int atomic_add_return(int i, atomic_t *v) 83static inline int atomic_add_return(int i, atomic_t *v)
55{ 84{
56 return __CS_LOOP(v, i, "ar"); 85 return __ATOMIC_LOOP(v, i, __ATOMIC_ADD) + i;
57} 86}
58#define atomic_add(_i, _v) atomic_add_return(_i, _v)
59#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
60#define atomic_inc(_v) atomic_add_return(1, _v)
61#define atomic_inc_return(_v) atomic_add_return(1, _v)
62#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
63 87
64static inline int atomic_sub_return(int i, atomic_t *v) 88static inline void atomic_add(int i, atomic_t *v)
65{ 89{
66 return __CS_LOOP(v, i, "sr"); 90#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
91 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
92 asm volatile(
93 "asi %0,%1\n"
94 : "+Q" (v->counter)
95 : "i" (i)
96 : "cc", "memory");
97 } else {
98 atomic_add_return(i, v);
99 }
100#else
101 atomic_add_return(i, v);
102#endif
67} 103}
68#define atomic_sub(_i, _v) atomic_sub_return(_i, _v) 104
105#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
106#define atomic_inc(_v) atomic_add(1, _v)
107#define atomic_inc_return(_v) atomic_add_return(1, _v)
108#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
109#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
110#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
69#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0) 111#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
70#define atomic_dec(_v) atomic_sub_return(1, _v) 112#define atomic_dec(_v) atomic_sub(1, _v)
71#define atomic_dec_return(_v) atomic_sub_return(1, _v) 113#define atomic_dec_return(_v) atomic_sub_return(1, _v)
72#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) 114#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
73 115
74static inline void atomic_clear_mask(unsigned long mask, atomic_t *v) 116static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
75{ 117{
76 __CS_LOOP(v, ~mask, "nr"); 118 __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND);
77} 119}
78 120
79static inline void atomic_set_mask(unsigned long mask, atomic_t *v) 121static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
80{ 122{
81 __CS_LOOP(v, mask, "or"); 123 __ATOMIC_LOOP(v, mask, __ATOMIC_OR);
82} 124}
83 125
84#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 126#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -87,8 +129,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
87{ 129{
88 asm volatile( 130 asm volatile(
89 " cs %0,%2,%1" 131 " cs %0,%2,%1"
90 : "+d" (old), "=Q" (v->counter) 132 : "+d" (old), "+Q" (v->counter)
91 : "d" (new), "Q" (v->counter) 133 : "d" (new)
92 : "cc", "memory"); 134 : "cc", "memory");
93 return old; 135 return old;
94} 136}
@@ -109,27 +151,56 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
109} 151}
110 152
111 153
112#undef __CS_LOOP 154#undef __ATOMIC_LOOP
113 155
114#define ATOMIC64_INIT(i) { (i) } 156#define ATOMIC64_INIT(i) { (i) }
115 157
116#ifdef CONFIG_64BIT 158#ifdef CONFIG_64BIT
117 159
118#define __CSG_LOOP(ptr, op_val, op_string) ({ \ 160#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
161
162#define __ATOMIC64_OR "laog"
163#define __ATOMIC64_AND "lang"
164#define __ATOMIC64_ADD "laag"
165
166#define __ATOMIC64_LOOP(ptr, op_val, op_string) \
167({ \
168 long long old_val; \
169 \
170 typecheck(atomic64_t *, ptr); \
171 asm volatile( \
172 op_string " %0,%2,%1\n" \
173 : "=d" (old_val), "+Q" ((ptr)->counter) \
174 : "d" (op_val) \
175 : "cc", "memory"); \
176 old_val; \
177})
178
179#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
180
181#define __ATOMIC64_OR "ogr"
182#define __ATOMIC64_AND "ngr"
183#define __ATOMIC64_ADD "agr"
184
185#define __ATOMIC64_LOOP(ptr, op_val, op_string) \
186({ \
119 long long old_val, new_val; \ 187 long long old_val, new_val; \
188 \
189 typecheck(atomic64_t *, ptr); \
120 asm volatile( \ 190 asm volatile( \
121 " lg %0,%2\n" \ 191 " lg %0,%2\n" \
122 "0: lgr %1,%0\n" \ 192 "0: lgr %1,%0\n" \
123 op_string " %1,%3\n" \ 193 op_string " %1,%3\n" \
124 " csg %0,%1,%2\n" \ 194 " csg %0,%1,%2\n" \
125 " jl 0b" \ 195 " jl 0b" \
126 : "=&d" (old_val), "=&d" (new_val), \ 196 : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
127 "=Q" (((atomic_t *)(ptr))->counter) \ 197 : "d" (op_val) \
128 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
129 : "cc", "memory"); \ 198 : "cc", "memory"); \
130 new_val; \ 199 old_val; \
131}) 200})
132 201
202#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
203
133static inline long long atomic64_read(const atomic64_t *v) 204static inline long long atomic64_read(const atomic64_t *v)
134{ 205{
135 long long c; 206 long long c;
@@ -149,22 +220,17 @@ static inline void atomic64_set(atomic64_t *v, long long i)
149 220
150static inline long long atomic64_add_return(long long i, atomic64_t *v) 221static inline long long atomic64_add_return(long long i, atomic64_t *v)
151{ 222{
152 return __CSG_LOOP(v, i, "agr"); 223 return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD) + i;
153}
154
155static inline long long atomic64_sub_return(long long i, atomic64_t *v)
156{
157 return __CSG_LOOP(v, i, "sgr");
158} 224}
159 225
160static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) 226static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
161{ 227{
162 __CSG_LOOP(v, ~mask, "ngr"); 228 __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND);
163} 229}
164 230
165static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) 231static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
166{ 232{
167 __CSG_LOOP(v, mask, "ogr"); 233 __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR);
168} 234}
169 235
170#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) 236#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
@@ -174,13 +240,13 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
174{ 240{
175 asm volatile( 241 asm volatile(
176 " csg %0,%2,%1" 242 " csg %0,%2,%1"
177 : "+d" (old), "=Q" (v->counter) 243 : "+d" (old), "+Q" (v->counter)
178 : "d" (new), "Q" (v->counter) 244 : "d" (new)
179 : "cc", "memory"); 245 : "cc", "memory");
180 return old; 246 return old;
181} 247}
182 248
183#undef __CSG_LOOP 249#undef __ATOMIC64_LOOP
184 250
185#else /* CONFIG_64BIT */ 251#else /* CONFIG_64BIT */
186 252
@@ -216,8 +282,8 @@ static inline long long atomic64_xchg(atomic64_t *v, long long new)
216 " lm %0,%N0,%1\n" 282 " lm %0,%N0,%1\n"
217 "0: cds %0,%2,%1\n" 283 "0: cds %0,%2,%1\n"
218 " jl 0b\n" 284 " jl 0b\n"
219 : "=&d" (rp_old), "=Q" (v->counter) 285 : "=&d" (rp_old), "+Q" (v->counter)
220 : "d" (rp_new), "Q" (v->counter) 286 : "d" (rp_new)
221 : "cc"); 287 : "cc");
222 return rp_old.pair; 288 return rp_old.pair;
223} 289}
@@ -230,8 +296,8 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
230 296
231 asm volatile( 297 asm volatile(
232 " cds %0,%2,%1" 298 " cds %0,%2,%1"
233 : "+&d" (rp_old), "=Q" (v->counter) 299 : "+&d" (rp_old), "+Q" (v->counter)
234 : "d" (rp_new), "Q" (v->counter) 300 : "d" (rp_new)
235 : "cc"); 301 : "cc");
236 return rp_old.pair; 302 return rp_old.pair;
237} 303}
@@ -248,17 +314,6 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
248 return new; 314 return new;
249} 315}
250 316
251static inline long long atomic64_sub_return(long long i, atomic64_t *v)
252{
253 long long old, new;
254
255 do {
256 old = atomic64_read(v);
257 new = old - i;
258 } while (atomic64_cmpxchg(v, old, new) != old);
259 return new;
260}
261
262static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v) 317static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
263{ 318{
264 long long old, new; 319 long long old, new;
@@ -281,7 +336,24 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
281 336
282#endif /* CONFIG_64BIT */ 337#endif /* CONFIG_64BIT */
283 338
284static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) 339static inline void atomic64_add(long long i, atomic64_t *v)
340{
341#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
342 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
343 asm volatile(
344 "agsi %0,%1\n"
345 : "+Q" (v->counter)
346 : "i" (i)
347 : "cc", "memory");
348 } else {
349 atomic64_add_return(i, v);
350 }
351#else
352 atomic64_add_return(i, v);
353#endif
354}
355
356static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
285{ 357{
286 long long c, old; 358 long long c, old;
287 359
@@ -289,7 +361,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
289 for (;;) { 361 for (;;) {
290 if (unlikely(c == u)) 362 if (unlikely(c == u))
291 break; 363 break;
292 old = atomic64_cmpxchg(v, c, c + a); 364 old = atomic64_cmpxchg(v, c, c + i);
293 if (likely(old == c)) 365 if (likely(old == c))
294 break; 366 break;
295 c = old; 367 c = old;
@@ -314,14 +386,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
314 return dec; 386 return dec;
315} 387}
316 388
317#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
318#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) 389#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
319#define atomic64_inc(_v) atomic64_add_return(1, _v) 390#define atomic64_inc(_v) atomic64_add(1, _v)
320#define atomic64_inc_return(_v) atomic64_add_return(1, _v) 391#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
321#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) 392#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
322#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v) 393#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
394#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
323#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) 395#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
324#define atomic64_dec(_v) atomic64_sub_return(1, _v) 396#define atomic64_dec(_v) atomic64_sub(1, _v)
325#define atomic64_dec_return(_v) atomic64_sub_return(1, _v) 397#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
326#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) 398#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
327#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 399#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 10135a38673c..6e6ad0680829 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -1,10 +1,40 @@
1/* 1/*
2 * S390 version 2 * Copyright IBM Corp. 1999,2013
3 * Copyright IBM Corp. 1999
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5 * 3 *
6 * Derived from "include/asm-i386/bitops.h" 4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Copyright (C) 1992, Linus Torvalds 5 *
6 * The description below was taken in large parts from the powerpc
7 * bitops header file:
8 * Within a word, bits are numbered LSB first. Lot's of places make
9 * this assumption by directly testing bits with (val & (1<<nr)).
10 * This can cause confusion for large (> 1 word) bitmaps on a
11 * big-endian system because, unlike little endian, the number of each
12 * bit depends on the word size.
13 *
14 * The bitop functions are defined to work on unsigned longs, so for an
15 * s390x system the bits end up numbered:
16 * |63..............0|127............64|191...........128|255...........196|
17 * and on s390:
18 * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
19 *
20 * There are a few little-endian macros used mostly for filesystem
21 * bitmaps, these work on similar bit arrays layouts, but
22 * byte-oriented:
23 * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
24 *
25 * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
26 * number field needs to be reversed compared to the big-endian bit
27 * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
28 *
29 * We also have special functions which work with an MSB0 encoding:
30 * on an s390x system the bits are numbered:
31 * |0..............63|64............127|128...........191|192...........255|
32 * and on s390:
33 * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
34 *
35 * The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit
36 * number field needs to be reversed compared to the LSB0 encoded bit
37 * fields. This can be achieved by XOR with 0x3f (64b) or 0x1f (32b).
8 * 38 *
9 */ 39 */
10 40
@@ -15,556 +45,348 @@
15#error only <linux/bitops.h> can be included directly 45#error only <linux/bitops.h> can be included directly
16#endif 46#endif
17 47
48#include <linux/typecheck.h>
18#include <linux/compiler.h> 49#include <linux/compiler.h>
19 50
20/*
21 * 32 bit bitops format:
22 * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
23 * bit 32 is the LSB of *(addr+4). That combined with the
24 * big endian byte order on S390 give the following bit
25 * order in memory:
26 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
27 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
28 * after that follows the next long with bit numbers
29 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
30 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
31 * The reason for this bit ordering is the fact that
32 * in the architecture independent code bits operations
33 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
34 * with operation of the form "set_bit(bitnr, flags)".
35 *
36 * 64 bit bitops format:
37 * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
38 * bit 64 is the LSB of *(addr+8). That combined with the
39 * big endian byte order on S390 give the following bit
40 * order in memory:
41 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
42 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
43 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
44 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
45 * after that follows the next long with bit numbers
46 * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
47 * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
48 * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
49 * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
50 * The reason for this bit ordering is the fact that
51 * in the architecture independent code bits operations
52 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
53 * with operation of the form "set_bit(bitnr, flags)".
54 */
55
56/* bitmap tables from arch/s390/kernel/bitmap.c */
57extern const char _oi_bitmap[];
58extern const char _ni_bitmap[];
59extern const char _zb_findmap[];
60extern const char _sb_findmap[];
61
62#ifndef CONFIG_64BIT 51#ifndef CONFIG_64BIT
63 52
64#define __BITOPS_OR "or" 53#define __BITOPS_OR "or"
65#define __BITOPS_AND "nr" 54#define __BITOPS_AND "nr"
66#define __BITOPS_XOR "xr" 55#define __BITOPS_XOR "xr"
67 56
68#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 57#define __BITOPS_LOOP(__addr, __val, __op_string) \
58({ \
59 unsigned long __old, __new; \
60 \
61 typecheck(unsigned long *, (__addr)); \
69 asm volatile( \ 62 asm volatile( \
70 " l %0,%2\n" \ 63 " l %0,%2\n" \
71 "0: lr %1,%0\n" \ 64 "0: lr %1,%0\n" \
72 __op_string " %1,%3\n" \ 65 __op_string " %1,%3\n" \
73 " cs %0,%1,%2\n" \ 66 " cs %0,%1,%2\n" \
74 " jl 0b" \ 67 " jl 0b" \
75 : "=&d" (__old), "=&d" (__new), \ 68 : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
76 "=Q" (*(unsigned long *) __addr) \ 69 : "d" (__val) \
77 : "d" (__val), "Q" (*(unsigned long *) __addr) \ 70 : "cc"); \
78 : "cc"); 71 __old; \
72})
79 73
80#else /* CONFIG_64BIT */ 74#else /* CONFIG_64BIT */
81 75
76#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
77
78#define __BITOPS_OR "laog"
79#define __BITOPS_AND "lang"
80#define __BITOPS_XOR "laxg"
81
82#define __BITOPS_LOOP(__addr, __val, __op_string) \
83({ \
84 unsigned long __old; \
85 \
86 typecheck(unsigned long *, (__addr)); \
87 asm volatile( \
88 __op_string " %0,%2,%1\n" \
89 : "=d" (__old), "+Q" (*(__addr)) \
90 : "d" (__val) \
91 : "cc"); \
92 __old; \
93})
94
95#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
96
82#define __BITOPS_OR "ogr" 97#define __BITOPS_OR "ogr"
83#define __BITOPS_AND "ngr" 98#define __BITOPS_AND "ngr"
84#define __BITOPS_XOR "xgr" 99#define __BITOPS_XOR "xgr"
85 100
86#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 101#define __BITOPS_LOOP(__addr, __val, __op_string) \
102({ \
103 unsigned long __old, __new; \
104 \
105 typecheck(unsigned long *, (__addr)); \
87 asm volatile( \ 106 asm volatile( \
88 " lg %0,%2\n" \ 107 " lg %0,%2\n" \
89 "0: lgr %1,%0\n" \ 108 "0: lgr %1,%0\n" \
90 __op_string " %1,%3\n" \ 109 __op_string " %1,%3\n" \
91 " csg %0,%1,%2\n" \ 110 " csg %0,%1,%2\n" \
92 " jl 0b" \ 111 " jl 0b" \
93 : "=&d" (__old), "=&d" (__new), \ 112 : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
94 "=Q" (*(unsigned long *) __addr) \ 113 : "d" (__val) \
95 : "d" (__val), "Q" (*(unsigned long *) __addr) \ 114 : "cc"); \
96 : "cc"); 115 __old; \
116})
117
118#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
97 119
98#endif /* CONFIG_64BIT */ 120#endif /* CONFIG_64BIT */
99 121
100#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG) 122#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
101 123
102#ifdef CONFIG_SMP 124static inline unsigned long *
103/* 125__bitops_word(unsigned long nr, volatile unsigned long *ptr)
104 * SMP safe set_bit routine based on compare and swap (CS) 126{
105 */ 127 unsigned long addr;
106static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr) 128
129 addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
130 return (unsigned long *)addr;
131}
132
133static inline unsigned char *
134__bitops_byte(unsigned long nr, volatile unsigned long *ptr)
107{ 135{
108 unsigned long addr, old, new, mask; 136 return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
137}
138
139static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
140{
141 unsigned long *addr = __bitops_word(nr, ptr);
142 unsigned long mask;
109 143
110 addr = (unsigned long) ptr; 144#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
111 /* calculate address for CS */ 145 if (__builtin_constant_p(nr)) {
112 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; 146 unsigned char *caddr = __bitops_byte(nr, ptr);
113 /* make OR mask */ 147
148 asm volatile(
149 "oi %0,%b1\n"
150 : "+Q" (*caddr)
151 : "i" (1 << (nr & 7))
152 : "cc");
153 return;
154 }
155#endif
114 mask = 1UL << (nr & (BITS_PER_LONG - 1)); 156 mask = 1UL << (nr & (BITS_PER_LONG - 1));
115 /* Do the atomic update. */ 157 __BITOPS_LOOP(addr, mask, __BITOPS_OR);
116 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
117} 158}
118 159
119/* 160static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
120 * SMP safe clear_bit routine based on compare and swap (CS)
121 */
122static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
123{ 161{
124 unsigned long addr, old, new, mask; 162 unsigned long *addr = __bitops_word(nr, ptr);
163 unsigned long mask;
164
165#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
166 if (__builtin_constant_p(nr)) {
167 unsigned char *caddr = __bitops_byte(nr, ptr);
125 168
126 addr = (unsigned long) ptr; 169 asm volatile(
127 /* calculate address for CS */ 170 "ni %0,%b1\n"
128 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; 171 : "+Q" (*caddr)
129 /* make AND mask */ 172 : "i" (~(1 << (nr & 7)))
173 : "cc");
174 return;
175 }
176#endif
130 mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); 177 mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
131 /* Do the atomic update. */ 178 __BITOPS_LOOP(addr, mask, __BITOPS_AND);
132 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
133} 179}
134 180
135/* 181static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
136 * SMP safe change_bit routine based on compare and swap (CS)
137 */
138static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
139{ 182{
140 unsigned long addr, old, new, mask; 183 unsigned long *addr = __bitops_word(nr, ptr);
184 unsigned long mask;
185
186#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
187 if (__builtin_constant_p(nr)) {
188 unsigned char *caddr = __bitops_byte(nr, ptr);
141 189
142 addr = (unsigned long) ptr; 190 asm volatile(
143 /* calculate address for CS */ 191 "xi %0,%b1\n"
144 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; 192 : "+Q" (*caddr)
145 /* make XOR mask */ 193 : "i" (1 << (nr & 7))
194 : "cc");
195 return;
196 }
197#endif
146 mask = 1UL << (nr & (BITS_PER_LONG - 1)); 198 mask = 1UL << (nr & (BITS_PER_LONG - 1));
147 /* Do the atomic update. */ 199 __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
148 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
149} 200}
150 201
151/*
152 * SMP safe test_and_set_bit routine based on compare and swap (CS)
153 */
154static inline int 202static inline int
155test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr) 203test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
156{ 204{
157 unsigned long addr, old, new, mask; 205 unsigned long *addr = __bitops_word(nr, ptr);
206 unsigned long old, mask;
158 207
159 addr = (unsigned long) ptr;
160 /* calculate address for CS */
161 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
162 /* make OR/test mask */
163 mask = 1UL << (nr & (BITS_PER_LONG - 1)); 208 mask = 1UL << (nr & (BITS_PER_LONG - 1));
164 /* Do the atomic update. */ 209 old = __BITOPS_LOOP(addr, mask, __BITOPS_OR);
165 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
166 barrier(); 210 barrier();
167 return (old & mask) != 0; 211 return (old & mask) != 0;
168} 212}
169 213
170/*
171 * SMP safe test_and_clear_bit routine based on compare and swap (CS)
172 */
173static inline int 214static inline int
174test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) 215test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
175{ 216{
176 unsigned long addr, old, new, mask; 217 unsigned long *addr = __bitops_word(nr, ptr);
218 unsigned long old, mask;
177 219
178 addr = (unsigned long) ptr;
179 /* calculate address for CS */
180 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
181 /* make AND/test mask */
182 mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); 220 mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
183 /* Do the atomic update. */ 221 old = __BITOPS_LOOP(addr, mask, __BITOPS_AND);
184 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
185 barrier(); 222 barrier();
186 return (old ^ new) != 0; 223 return (old & ~mask) != 0;
187} 224}
188 225
189/*
190 * SMP safe test_and_change_bit routine based on compare and swap (CS)
191 */
192static inline int 226static inline int
193test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr) 227test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
194{ 228{
195 unsigned long addr, old, new, mask; 229 unsigned long *addr = __bitops_word(nr, ptr);
230 unsigned long old, mask;
196 231
197 addr = (unsigned long) ptr;
198 /* calculate address for CS */
199 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
200 /* make XOR/test mask */
201 mask = 1UL << (nr & (BITS_PER_LONG - 1)); 232 mask = 1UL << (nr & (BITS_PER_LONG - 1));
202 /* Do the atomic update. */ 233 old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
203 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
204 barrier(); 234 barrier();
205 return (old & mask) != 0; 235 return (old & mask) != 0;
206} 236}
207#endif /* CONFIG_SMP */
208 237
209/*
210 * fast, non-SMP set_bit routine
211 */
212static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr) 238static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
213{ 239{
214 unsigned long addr; 240 unsigned char *addr = __bitops_byte(nr, ptr);
215
216 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
217 asm volatile(
218 " oc %O0(1,%R0),%1"
219 : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
220}
221
222static inline void
223__constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
224{
225 unsigned long addr;
226 241
227 addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3); 242 *addr |= 1 << (nr & 7);
228 *(unsigned char *) addr |= 1 << (nr & 7);
229} 243}
230 244
231#define set_bit_simple(nr,addr) \
232(__builtin_constant_p((nr)) ? \
233 __constant_set_bit((nr),(addr)) : \
234 __set_bit((nr),(addr)) )
235
236/*
237 * fast, non-SMP clear_bit routine
238 */
239static inline void 245static inline void
240__clear_bit(unsigned long nr, volatile unsigned long *ptr) 246__clear_bit(unsigned long nr, volatile unsigned long *ptr)
241{ 247{
242 unsigned long addr; 248 unsigned char *addr = __bitops_byte(nr, ptr);
243
244 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
245 asm volatile(
246 " nc %O0(1,%R0),%1"
247 : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc");
248}
249
250static inline void
251__constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
252{
253 unsigned long addr;
254 249
255 addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3); 250 *addr &= ~(1 << (nr & 7));
256 *(unsigned char *) addr &= ~(1 << (nr & 7));
257} 251}
258 252
259#define clear_bit_simple(nr,addr) \
260(__builtin_constant_p((nr)) ? \
261 __constant_clear_bit((nr),(addr)) : \
262 __clear_bit((nr),(addr)) )
263
264/*
265 * fast, non-SMP change_bit routine
266 */
267static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr) 253static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
268{ 254{
269 unsigned long addr; 255 unsigned char *addr = __bitops_byte(nr, ptr);
270
271 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
272 asm volatile(
273 " xc %O0(1,%R0),%1"
274 : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
275}
276
277static inline void
278__constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
279{
280 unsigned long addr;
281 256
282 addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3); 257 *addr ^= 1 << (nr & 7);
283 *(unsigned char *) addr ^= 1 << (nr & 7);
284} 258}
285 259
286#define change_bit_simple(nr,addr) \
287(__builtin_constant_p((nr)) ? \
288 __constant_change_bit((nr),(addr)) : \
289 __change_bit((nr),(addr)) )
290
291/*
292 * fast, non-SMP test_and_set_bit routine
293 */
294static inline int 260static inline int
295test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr) 261__test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
296{ 262{
297 unsigned long addr; 263 unsigned char *addr = __bitops_byte(nr, ptr);
298 unsigned char ch; 264 unsigned char ch;
299 265
300 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); 266 ch = *addr;
301 ch = *(unsigned char *) addr; 267 *addr |= 1 << (nr & 7);
302 asm volatile(
303 " oc %O0(1,%R0),%1"
304 : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
305 : "cc", "memory");
306 return (ch >> (nr & 7)) & 1; 268 return (ch >> (nr & 7)) & 1;
307} 269}
308#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
309 270
310/*
311 * fast, non-SMP test_and_clear_bit routine
312 */
313static inline int 271static inline int
314test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr) 272__test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
315{ 273{
316 unsigned long addr; 274 unsigned char *addr = __bitops_byte(nr, ptr);
317 unsigned char ch; 275 unsigned char ch;
318 276
319 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); 277 ch = *addr;
320 ch = *(unsigned char *) addr; 278 *addr &= ~(1 << (nr & 7));
321 asm volatile(
322 " nc %O0(1,%R0),%1"
323 : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
324 : "cc", "memory");
325 return (ch >> (nr & 7)) & 1; 279 return (ch >> (nr & 7)) & 1;
326} 280}
327#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
328 281
329/*
330 * fast, non-SMP test_and_change_bit routine
331 */
332static inline int 282static inline int
333test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr) 283__test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
334{ 284{
335 unsigned long addr; 285 unsigned char *addr = __bitops_byte(nr, ptr);
336 unsigned char ch; 286 unsigned char ch;
337 287
338 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); 288 ch = *addr;
339 ch = *(unsigned char *) addr; 289 *addr ^= 1 << (nr & 7);
340 asm volatile(
341 " xc %O0(1,%R0),%1"
342 : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
343 : "cc", "memory");
344 return (ch >> (nr & 7)) & 1; 290 return (ch >> (nr & 7)) & 1;
345} 291}
346#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
347
348#ifdef CONFIG_SMP
349#define set_bit set_bit_cs
350#define clear_bit clear_bit_cs
351#define change_bit change_bit_cs
352#define test_and_set_bit test_and_set_bit_cs
353#define test_and_clear_bit test_and_clear_bit_cs
354#define test_and_change_bit test_and_change_bit_cs
355#else
356#define set_bit set_bit_simple
357#define clear_bit clear_bit_simple
358#define change_bit change_bit_simple
359#define test_and_set_bit test_and_set_bit_simple
360#define test_and_clear_bit test_and_clear_bit_simple
361#define test_and_change_bit test_and_change_bit_simple
362#endif
363
364
365/*
366 * This routine doesn't need to be atomic.
367 */
368 292
369static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr) 293static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
370{ 294{
371 unsigned long addr; 295 const volatile unsigned char *addr;
372 unsigned char ch;
373
374 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
375 ch = *(volatile unsigned char *) addr;
376 return (ch >> (nr & 7)) & 1;
377}
378 296
379static inline int 297 addr = ((const volatile unsigned char *)ptr);
380__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) { 298 addr += (nr ^ (BITS_PER_LONG - 8)) >> 3;
381 return (((volatile char *) addr) 299 return (*addr >> (nr & 7)) & 1;
382 [(nr^(BITS_PER_LONG-8))>>3] & (1<<(nr&7))) != 0;
383} 300}
384 301
385#define test_bit(nr,addr) \
386(__builtin_constant_p((nr)) ? \
387 __constant_test_bit((nr),(addr)) : \
388 __test_bit((nr),(addr)) )
389
390/* 302/*
391 * Optimized find bit helper functions. 303 * Functions which use MSB0 bit numbering.
392 */ 304 * On an s390x system the bits are numbered:
393 305 * |0..............63|64............127|128...........191|192...........255|
394/** 306 * and on s390:
395 * __ffz_word_loop - find byte offset of first long != -1UL 307 * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
396 * @addr: pointer to array of unsigned long
397 * @size: size of the array in bits
398 */ 308 */
399static inline unsigned long __ffz_word_loop(const unsigned long *addr, 309unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size);
400 unsigned long size) 310unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
401{ 311 unsigned long offset);
402 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
403 unsigned long bytes = 0;
404
405 asm volatile(
406#ifndef CONFIG_64BIT
407 " ahi %1,-1\n"
408 " sra %1,5\n"
409 " jz 1f\n"
410 "0: c %2,0(%0,%3)\n"
411 " jne 1f\n"
412 " la %0,4(%0)\n"
413 " brct %1,0b\n"
414 "1:\n"
415#else
416 " aghi %1,-1\n"
417 " srag %1,%1,6\n"
418 " jz 1f\n"
419 "0: cg %2,0(%0,%3)\n"
420 " jne 1f\n"
421 " la %0,8(%0)\n"
422 " brct %1,0b\n"
423 "1:\n"
424#endif
425 : "+&a" (bytes), "+&d" (size)
426 : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr)
427 : "cc" );
428 return bytes;
429}
430 312
431/** 313static inline void set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
432 * __ffs_word_loop - find byte offset of first long != 0UL
433 * @addr: pointer to array of unsigned long
434 * @size: size of the array in bits
435 */
436static inline unsigned long __ffs_word_loop(const unsigned long *addr,
437 unsigned long size)
438{ 314{
439 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 315 return set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
440 unsigned long bytes = 0;
441
442 asm volatile(
443#ifndef CONFIG_64BIT
444 " ahi %1,-1\n"
445 " sra %1,5\n"
446 " jz 1f\n"
447 "0: c %2,0(%0,%3)\n"
448 " jne 1f\n"
449 " la %0,4(%0)\n"
450 " brct %1,0b\n"
451 "1:\n"
452#else
453 " aghi %1,-1\n"
454 " srag %1,%1,6\n"
455 " jz 1f\n"
456 "0: cg %2,0(%0,%3)\n"
457 " jne 1f\n"
458 " la %0,8(%0)\n"
459 " brct %1,0b\n"
460 "1:\n"
461#endif
462 : "+&a" (bytes), "+&a" (size)
463 : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr)
464 : "cc" );
465 return bytes;
466} 316}
467 317
468/** 318static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
469 * __ffz_word - add number of the first unset bit
470 * @nr: base value the bit number is added to
471 * @word: the word that is searched for unset bits
472 */
473static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
474{ 319{
475#ifdef CONFIG_64BIT 320 return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
476 if ((word & 0xffffffff) == 0xffffffff) {
477 word >>= 32;
478 nr += 32;
479 }
480#endif
481 if ((word & 0xffff) == 0xffff) {
482 word >>= 16;
483 nr += 16;
484 }
485 if ((word & 0xff) == 0xff) {
486 word >>= 8;
487 nr += 8;
488 }
489 return nr + _zb_findmap[(unsigned char) word];
490} 321}
491 322
492/** 323static inline void __set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
493 * __ffs_word - add number of the first set bit
494 * @nr: base value the bit number is added to
495 * @word: the word that is searched for set bits
496 */
497static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
498{ 324{
499#ifdef CONFIG_64BIT 325 return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
500 if ((word & 0xffffffff) == 0) {
501 word >>= 32;
502 nr += 32;
503 }
504#endif
505 if ((word & 0xffff) == 0) {
506 word >>= 16;
507 nr += 16;
508 }
509 if ((word & 0xff) == 0) {
510 word >>= 8;
511 nr += 8;
512 }
513 return nr + _sb_findmap[(unsigned char) word];
514} 326}
515 327
516 328static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
517/**
518 * __load_ulong_be - load big endian unsigned long
519 * @p: pointer to array of unsigned long
520 * @offset: byte offset of source value in the array
521 */
522static inline unsigned long __load_ulong_be(const unsigned long *p,
523 unsigned long offset)
524{ 329{
525 p = (unsigned long *)((unsigned long) p + offset); 330 return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
526 return *p;
527} 331}
528 332
529/** 333static inline int test_bit_inv(unsigned long nr,
530 * __load_ulong_le - load little endian unsigned long 334 const volatile unsigned long *ptr)
531 * @p: pointer to array of unsigned long
532 * @offset: byte offset of source value in the array
533 */
534static inline unsigned long __load_ulong_le(const unsigned long *p,
535 unsigned long offset)
536{ 335{
537 unsigned long word; 336 return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
538
539 p = (unsigned long *)((unsigned long) p + offset);
540#ifndef CONFIG_64BIT
541 asm volatile(
542 " ic %0,%O1(%R1)\n"
543 " icm %0,2,%O1+1(%R1)\n"
544 " icm %0,4,%O1+2(%R1)\n"
545 " icm %0,8,%O1+3(%R1)"
546 : "=&d" (word) : "Q" (*p) : "cc");
547#else
548 asm volatile(
549 " lrvg %0,%1"
550 : "=d" (word) : "m" (*p) );
551#endif
552 return word;
553} 337}
554 338
555/* 339#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
556 * The various find bit functions.
557 */
558 340
559/* 341/**
560 * ffz - find first zero in word. 342 * __flogr - find leftmost one
561 * @word: The word to search 343 * @word - The word to search
562 * 344 *
563 * Undefined if no zero exists, so code should check against ~0UL first. 345 * Returns the bit number of the most significant bit set,
564 */ 346 * where the most significant bit has bit number 0.
565static inline unsigned long ffz(unsigned long word) 347 * If no bit is set this function returns 64.
566{ 348 */
567 return __ffz_word(0, word); 349static inline unsigned char __flogr(unsigned long word)
350{
351 if (__builtin_constant_p(word)) {
352 unsigned long bit = 0;
353
354 if (!word)
355 return 64;
356 if (!(word & 0xffffffff00000000UL)) {
357 word <<= 32;
358 bit += 32;
359 }
360 if (!(word & 0xffff000000000000UL)) {
361 word <<= 16;
362 bit += 16;
363 }
364 if (!(word & 0xff00000000000000UL)) {
365 word <<= 8;
366 bit += 8;
367 }
368 if (!(word & 0xf000000000000000UL)) {
369 word <<= 4;
370 bit += 4;
371 }
372 if (!(word & 0xc000000000000000UL)) {
373 word <<= 2;
374 bit += 2;
375 }
376 if (!(word & 0x8000000000000000UL)) {
377 word <<= 1;
378 bit += 1;
379 }
380 return bit;
381 } else {
382 register unsigned long bit asm("4") = word;
383 register unsigned long out asm("5");
384
385 asm volatile(
386 " flogr %[bit],%[bit]\n"
387 : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
388 return bit;
389 }
568} 390}
569 391
570/** 392/**
@@ -573,337 +395,83 @@ static inline unsigned long ffz(unsigned long word)
573 * 395 *
574 * Undefined if no bit exists, so code should check against 0 first. 396 * Undefined if no bit exists, so code should check against 0 first.
575 */ 397 */
576static inline unsigned long __ffs (unsigned long word) 398static inline unsigned long __ffs(unsigned long word)
577{ 399{
578 return __ffs_word(0, word); 400 return __flogr(-word & word) ^ (BITS_PER_LONG - 1);
579} 401}
580 402
581/** 403/**
582 * ffs - find first bit set 404 * ffs - find first bit set
583 * @x: the word to search 405 * @word: the word to search
584 * 406 *
585 * This is defined the same way as 407 * This is defined the same way as the libc and
586 * the libc and compiler builtin ffs routines, therefore 408 * compiler builtin ffs routines (man ffs).
587 * differs in spirit from the above ffz (man ffs).
588 */ 409 */
589static inline int ffs(int x) 410static inline int ffs(int word)
590{ 411{
591 if (!x) 412 unsigned long mask = 2 * BITS_PER_LONG - 1;
592 return 0; 413 unsigned int val = (unsigned int)word;
593 return __ffs_word(1, x); 414
415 return (1 + (__flogr(-val & val) ^ (BITS_PER_LONG - 1))) & mask;
594} 416}
595 417
596/** 418/**
597 * find_first_zero_bit - find the first zero bit in a memory region 419 * __fls - find last (most-significant) set bit in a long word
598 * @addr: The address to start the search at 420 * @word: the word to search
599 * @size: The maximum size to search
600 * 421 *
601 * Returns the bit-number of the first zero bit, not the number of the byte 422 * Undefined if no set bit exists, so code should check against 0 first.
602 * containing a bit.
603 */ 423 */
604static inline unsigned long find_first_zero_bit(const unsigned long *addr, 424static inline unsigned long __fls(unsigned long word)
605 unsigned long size)
606{ 425{
607 unsigned long bytes, bits; 426 return __flogr(word) ^ (BITS_PER_LONG - 1);
608
609 if (!size)
610 return 0;
611 bytes = __ffz_word_loop(addr, size);
612 bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
613 return (bits < size) ? bits : size;
614} 427}
615#define find_first_zero_bit find_first_zero_bit
616 428
617/** 429/**
618 * find_first_bit - find the first set bit in a memory region 430 * fls64 - find last set bit in a 64-bit word
619 * @addr: The address to start the search at 431 * @word: the word to search
620 * @size: The maximum size to search
621 * 432 *
622 * Returns the bit-number of the first set bit, not the number of the byte 433 * This is defined in a similar way as the libc and compiler builtin
623 * containing a bit. 434 * ffsll, but returns the position of the most significant set bit.
624 */ 435 *
625static inline unsigned long find_first_bit(const unsigned long * addr, 436 * fls64(value) returns 0 if value is 0 or the position of the last
626 unsigned long size) 437 * set bit if value is nonzero. The last (most significant) bit is
627{ 438 * at position 64.
628 unsigned long bytes, bits;
629
630 if (!size)
631 return 0;
632 bytes = __ffs_word_loop(addr, size);
633 bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
634 return (bits < size) ? bits : size;
635}
636#define find_first_bit find_first_bit
637
638/*
639 * Big endian variant whichs starts bit counting from left using
640 * the flogr (find leftmost one) instruction.
641 */
642static inline unsigned long __flo_word(unsigned long nr, unsigned long val)
643{
644 register unsigned long bit asm("2") = val;
645 register unsigned long out asm("3");
646
647 asm volatile (
648 " .insn rre,0xb9830000,%[bit],%[bit]\n"
649 : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
650 return nr + bit;
651}
652
653/*
654 * 64 bit special left bitops format:
655 * order in memory:
656 * 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f
657 * 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f
658 * 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f
659 * 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f
660 * after that follows the next long with bit numbers
661 * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f
662 * 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f
663 * 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f
664 * 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f
665 * The reason for this bit ordering is the fact that
666 * the hardware sets bits in a bitmap starting at bit 0
667 * and we don't want to scan the bitmap from the 'wrong
668 * end'.
669 */ 439 */
670static inline unsigned long find_first_bit_left(const unsigned long *addr, 440static inline int fls64(unsigned long word)
671 unsigned long size)
672{
673 unsigned long bytes, bits;
674
675 if (!size)
676 return 0;
677 bytes = __ffs_word_loop(addr, size);
678 bits = __flo_word(bytes * 8, __load_ulong_be(addr, bytes));
679 return (bits < size) ? bits : size;
680}
681
682static inline int find_next_bit_left(const unsigned long *addr,
683 unsigned long size,
684 unsigned long offset)
685{ 441{
686 const unsigned long *p; 442 unsigned long mask = 2 * BITS_PER_LONG - 1;
687 unsigned long bit, set;
688
689 if (offset >= size)
690 return size;
691 bit = offset & (BITS_PER_LONG - 1);
692 offset -= bit;
693 size -= offset;
694 p = addr + offset / BITS_PER_LONG;
695 if (bit) {
696 set = __flo_word(0, *p & (~0UL >> bit));
697 if (set >= size)
698 return size + offset;
699 if (set < BITS_PER_LONG)
700 return set + offset;
701 offset += BITS_PER_LONG;
702 size -= BITS_PER_LONG;
703 p++;
704 }
705 return offset + find_first_bit_left(p, size);
706}
707
708#define for_each_set_bit_left(bit, addr, size) \
709 for ((bit) = find_first_bit_left((addr), (size)); \
710 (bit) < (size); \
711 (bit) = find_next_bit_left((addr), (size), (bit) + 1))
712
713/* same as for_each_set_bit() but use bit as value to start with */
714#define for_each_set_bit_left_cont(bit, addr, size) \
715 for ((bit) = find_next_bit_left((addr), (size), (bit)); \
716 (bit) < (size); \
717 (bit) = find_next_bit_left((addr), (size), (bit) + 1))
718 443
719/** 444 return (1 + (__flogr(word) ^ (BITS_PER_LONG - 1))) & mask;
720 * find_next_zero_bit - find the first zero bit in a memory region
721 * @addr: The address to base the search on
722 * @offset: The bitnumber to start searching at
723 * @size: The maximum size to search
724 */
725static inline int find_next_zero_bit (const unsigned long * addr,
726 unsigned long size,
727 unsigned long offset)
728{
729 const unsigned long *p;
730 unsigned long bit, set;
731
732 if (offset >= size)
733 return size;
734 bit = offset & (BITS_PER_LONG - 1);
735 offset -= bit;
736 size -= offset;
737 p = addr + offset / BITS_PER_LONG;
738 if (bit) {
739 /*
740 * __ffz_word returns BITS_PER_LONG
741 * if no zero bit is present in the word.
742 */
743 set = __ffz_word(bit, *p >> bit);
744 if (set >= size)
745 return size + offset;
746 if (set < BITS_PER_LONG)
747 return set + offset;
748 offset += BITS_PER_LONG;
749 size -= BITS_PER_LONG;
750 p++;
751 }
752 return offset + find_first_zero_bit(p, size);
753} 445}
754#define find_next_zero_bit find_next_zero_bit
755 446
756/** 447/**
757 * find_next_bit - find the first set bit in a memory region 448 * fls - find last (most-significant) bit set
758 * @addr: The address to base the search on 449 * @word: the word to search
759 * @offset: The bitnumber to start searching at 450 *
760 * @size: The maximum size to search 451 * This is defined the same way as ffs.
452 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
761 */ 453 */
762static inline int find_next_bit (const unsigned long * addr, 454static inline int fls(int word)
763 unsigned long size,
764 unsigned long offset)
765{ 455{
766 const unsigned long *p; 456 return fls64((unsigned int)word);
767 unsigned long bit, set;
768
769 if (offset >= size)
770 return size;
771 bit = offset & (BITS_PER_LONG - 1);
772 offset -= bit;
773 size -= offset;
774 p = addr + offset / BITS_PER_LONG;
775 if (bit) {
776 /*
777 * __ffs_word returns BITS_PER_LONG
778 * if no one bit is present in the word.
779 */
780 set = __ffs_word(0, *p & (~0UL << bit));
781 if (set >= size)
782 return size + offset;
783 if (set < BITS_PER_LONG)
784 return set + offset;
785 offset += BITS_PER_LONG;
786 size -= BITS_PER_LONG;
787 p++;
788 }
789 return offset + find_first_bit(p, size);
790} 457}
791#define find_next_bit find_next_bit
792 458
793/* 459#else /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
794 * Every architecture must define this function. It's the fastest
795 * way of searching a 140-bit bitmap where the first 100 bits are
796 * unlikely to be set. It's guaranteed that at least one of the 140
797 * bits is cleared.
798 */
799static inline int sched_find_first_bit(unsigned long *b)
800{
801 return find_first_bit(b, 140);
802}
803 460
804#include <asm-generic/bitops/fls.h> 461#include <asm-generic/bitops/__ffs.h>
462#include <asm-generic/bitops/ffs.h>
805#include <asm-generic/bitops/__fls.h> 463#include <asm-generic/bitops/__fls.h>
464#include <asm-generic/bitops/fls.h>
806#include <asm-generic/bitops/fls64.h> 465#include <asm-generic/bitops/fls64.h>
807 466
467#endif /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
468
469#include <asm-generic/bitops/ffz.h>
470#include <asm-generic/bitops/find.h>
808#include <asm-generic/bitops/hweight.h> 471#include <asm-generic/bitops/hweight.h>
809#include <asm-generic/bitops/lock.h> 472#include <asm-generic/bitops/lock.h>
810 473#include <asm-generic/bitops/sched.h>
811/*
812 * ATTENTION: intel byte ordering convention for ext2 and minix !!
813 * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
814 * bit 32 is the LSB of (addr+4).
815 * That combined with the little endian byte order of Intel gives the
816 * following bit order in memory:
817 * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
818 * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
819 */
820
821static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
822{
823 unsigned long bytes, bits;
824
825 if (!size)
826 return 0;
827 bytes = __ffz_word_loop(vaddr, size);
828 bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
829 return (bits < size) ? bits : size;
830}
831#define find_first_zero_bit_le find_first_zero_bit_le
832
833static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
834 unsigned long offset)
835{
836 unsigned long *addr = vaddr, *p;
837 unsigned long bit, set;
838
839 if (offset >= size)
840 return size;
841 bit = offset & (BITS_PER_LONG - 1);
842 offset -= bit;
843 size -= offset;
844 p = addr + offset / BITS_PER_LONG;
845 if (bit) {
846 /*
847 * s390 version of ffz returns BITS_PER_LONG
848 * if no zero bit is present in the word.
849 */
850 set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
851 if (set >= size)
852 return size + offset;
853 if (set < BITS_PER_LONG)
854 return set + offset;
855 offset += BITS_PER_LONG;
856 size -= BITS_PER_LONG;
857 p++;
858 }
859 return offset + find_first_zero_bit_le(p, size);
860}
861#define find_next_zero_bit_le find_next_zero_bit_le
862
863static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
864{
865 unsigned long bytes, bits;
866
867 if (!size)
868 return 0;
869 bytes = __ffs_word_loop(vaddr, size);
870 bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
871 return (bits < size) ? bits : size;
872}
873#define find_first_bit_le find_first_bit_le
874
875static inline int find_next_bit_le(void *vaddr, unsigned long size,
876 unsigned long offset)
877{
878 unsigned long *addr = vaddr, *p;
879 unsigned long bit, set;
880
881 if (offset >= size)
882 return size;
883 bit = offset & (BITS_PER_LONG - 1);
884 offset -= bit;
885 size -= offset;
886 p = addr + offset / BITS_PER_LONG;
887 if (bit) {
888 /*
889 * s390 version of ffz returns BITS_PER_LONG
890 * if no zero bit is present in the word.
891 */
892 set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
893 if (set >= size)
894 return size + offset;
895 if (set < BITS_PER_LONG)
896 return set + offset;
897 offset += BITS_PER_LONG;
898 size -= BITS_PER_LONG;
899 p++;
900 }
901 return offset + find_first_bit_le(p, size);
902}
903#define find_next_bit_le find_next_bit_le
904
905#include <asm-generic/bitops/le.h> 474#include <asm-generic/bitops/le.h>
906
907#include <asm-generic/bitops/ext2-atomic-setbit.h> 475#include <asm-generic/bitops/ext2-atomic-setbit.h>
908 476
909#endif /* _S390_BITOPS_H */ 477#endif /* _S390_BITOPS_H */
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index c1e7c646727c..4bf9da03591e 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -22,6 +22,7 @@
22#define PSW32_MASK_ASC 0x0000C000UL 22#define PSW32_MASK_ASC 0x0000C000UL
23#define PSW32_MASK_CC 0x00003000UL 23#define PSW32_MASK_CC 0x00003000UL
24#define PSW32_MASK_PM 0x00000f00UL 24#define PSW32_MASK_PM 0x00000f00UL
25#define PSW32_MASK_RI 0x00000080UL
25 26
26#define PSW32_MASK_USER 0x0000FF00UL 27#define PSW32_MASK_USER 0x0000FF00UL
27 28
@@ -35,7 +36,9 @@
35#define PSW32_ASC_SECONDARY 0x00008000UL 36#define PSW32_ASC_SECONDARY 0x00008000UL
36#define PSW32_ASC_HOME 0x0000C000UL 37#define PSW32_ASC_HOME 0x0000C000UL
37 38
38extern u32 psw32_user_bits; 39#define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \
40 PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \
41 PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | PSW32_ASC_HOME)
39 42
40#define COMPAT_USER_HZ 100 43#define COMPAT_USER_HZ 100
41#define COMPAT_UTS_MACHINE "s390\0\0\0\0" 44#define COMPAT_UTS_MACHINE "s390\0\0\0\0"
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index debfda33d1f8..9b69c0befdca 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -8,69 +8,59 @@
8#define __ASM_CTL_REG_H 8#define __ASM_CTL_REG_H
9 9
10#ifdef CONFIG_64BIT 10#ifdef CONFIG_64BIT
11 11# define __CTL_LOAD "lctlg"
12#define __ctl_load(array, low, high) ({ \ 12# define __CTL_STORE "stctg"
13 typedef struct { char _[sizeof(array)]; } addrtype; \ 13#else
14 asm volatile( \ 14# define __CTL_LOAD "lctl"
15 " lctlg %1,%2,%0\n" \ 15# define __CTL_STORE "stctl"
16 : : "Q" (*(addrtype *)(&array)), \ 16#endif
17 "i" (low), "i" (high)); \ 17
18 }) 18#define __ctl_load(array, low, high) { \
19 19 typedef struct { char _[sizeof(array)]; } addrtype; \
20#define __ctl_store(array, low, high) ({ \ 20 \
21 typedef struct { char _[sizeof(array)]; } addrtype; \ 21 BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
22 asm volatile( \ 22 asm volatile( \
23 " stctg %1,%2,%0\n" \ 23 __CTL_LOAD " %1,%2,%0\n" \
24 : "=Q" (*(addrtype *)(&array)) \ 24 : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
25 : "i" (low), "i" (high)); \ 25}
26 }) 26
27 27#define __ctl_store(array, low, high) { \
28#else /* CONFIG_64BIT */ 28 typedef struct { char _[sizeof(array)]; } addrtype; \
29 29 \
30#define __ctl_load(array, low, high) ({ \ 30 BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
31 typedef struct { char _[sizeof(array)]; } addrtype; \ 31 asm volatile( \
32 asm volatile( \ 32 __CTL_STORE " %1,%2,%0\n" \
33 " lctl %1,%2,%0\n" \ 33 : "=Q" (*(addrtype *)(&array)) \
34 : : "Q" (*(addrtype *)(&array)), \ 34 : "i" (low), "i" (high)); \
35 "i" (low), "i" (high)); \ 35}
36}) 36
37 37static inline void __ctl_set_bit(unsigned int cr, unsigned int bit)
38#define __ctl_store(array, low, high) ({ \ 38{
39 typedef struct { char _[sizeof(array)]; } addrtype; \ 39 unsigned long reg;
40 asm volatile( \ 40
41 " stctl %1,%2,%0\n" \ 41 __ctl_store(reg, cr, cr);
42 : "=Q" (*(addrtype *)(&array)) \ 42 reg |= 1UL << bit;
43 : "i" (low), "i" (high)); \ 43 __ctl_load(reg, cr, cr);
44 }) 44}
45 45
46#endif /* CONFIG_64BIT */ 46static inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
47 47{
48#define __ctl_set_bit(cr, bit) ({ \ 48 unsigned long reg;
49 unsigned long __dummy; \ 49
50 __ctl_store(__dummy, cr, cr); \ 50 __ctl_store(reg, cr, cr);
51 __dummy |= 1UL << (bit); \ 51 reg &= ~(1UL << bit);
52 __ctl_load(__dummy, cr, cr); \ 52 __ctl_load(reg, cr, cr);
53}) 53}
54 54
55#define __ctl_clear_bit(cr, bit) ({ \ 55void smp_ctl_set_bit(int cr, int bit);
56 unsigned long __dummy; \ 56void smp_ctl_clear_bit(int cr, int bit);
57 __ctl_store(__dummy, cr, cr); \
58 __dummy &= ~(1UL << (bit)); \
59 __ctl_load(__dummy, cr, cr); \
60})
61 57
62#ifdef CONFIG_SMP 58#ifdef CONFIG_SMP
63 59# define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
64extern void smp_ctl_set_bit(int cr, int bit); 60# define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
65extern void smp_ctl_clear_bit(int cr, int bit);
66#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
67#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
68
69#else 61#else
70 62# define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
71#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit) 63# define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
72#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit) 64#endif
73
74#endif /* CONFIG_SMP */
75 65
76#endif /* __ASM_CTL_REG_H */ 66#endif /* __ASM_CTL_REG_H */
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 188c5052a20a..530c15eb01e9 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -107,6 +107,11 @@ void debug_set_level(debug_info_t* id, int new_level);
107void debug_set_critical(void); 107void debug_set_critical(void);
108void debug_stop_all(void); 108void debug_stop_all(void);
109 109
110static inline bool debug_level_enabled(debug_info_t* id, int level)
111{
112 return level <= id->level;
113}
114
110static inline debug_entry_t* 115static inline debug_entry_t*
111debug_event(debug_info_t* id, int level, void* data, int length) 116debug_event(debug_info_t* id, int level, void* data, int length)
112{ 117{
diff --git a/arch/s390/include/asm/dis.h b/arch/s390/include/asm/dis.h
new file mode 100644
index 000000000000..04a83f5773cd
--- /dev/null
+++ b/arch/s390/include/asm/dis.h
@@ -0,0 +1,52 @@
1/*
2 * Disassemble s390 instructions.
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 */
7
8#ifndef __ASM_S390_DIS_H__
9#define __ASM_S390_DIS_H__
10
11/* Type of operand */
12#define OPERAND_GPR 0x1 /* Operand printed as %rx */
13#define OPERAND_FPR 0x2 /* Operand printed as %fx */
14#define OPERAND_AR 0x4 /* Operand printed as %ax */
15#define OPERAND_CR 0x8 /* Operand printed as %cx */
16#define OPERAND_DISP 0x10 /* Operand printed as displacement */
17#define OPERAND_BASE 0x20 /* Operand printed as base register */
18#define OPERAND_INDEX 0x40 /* Operand printed as index register */
19#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */
20#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */
21#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */
22
23
24struct s390_operand {
25 int bits; /* The number of bits in the operand. */
26 int shift; /* The number of bits to shift. */
27 int flags; /* One bit syntax flags. */
28};
29
30struct s390_insn {
31 const char name[5];
32 unsigned char opfrag;
33 unsigned char format;
34};
35
36
37static inline int insn_length(unsigned char code)
38{
39 return ((((int) code + 64) >> 7) + 1) << 1;
40}
41
42void show_code(struct pt_regs *regs);
43void print_fn_code(unsigned char *code, unsigned long len);
44int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len);
45struct s390_insn *find_insn(unsigned char *code);
46
47static inline int is_known_insn(unsigned char *code)
48{
49 return !!find_insn(code);
50}
51
52#endif /* __ASM_S390_DIS_H__ */
diff --git a/arch/s390/include/asm/fcx.h b/arch/s390/include/asm/fcx.h
index ef6170995076..7ecb92b469b6 100644
--- a/arch/s390/include/asm/fcx.h
+++ b/arch/s390/include/asm/fcx.h
@@ -12,9 +12,9 @@
12 12
13#define TCW_FORMAT_DEFAULT 0 13#define TCW_FORMAT_DEFAULT 0
14#define TCW_TIDAW_FORMAT_DEFAULT 0 14#define TCW_TIDAW_FORMAT_DEFAULT 0
15#define TCW_FLAGS_INPUT_TIDA 1 << (23 - 5) 15#define TCW_FLAGS_INPUT_TIDA (1 << (23 - 5))
16#define TCW_FLAGS_TCCB_TIDA 1 << (23 - 6) 16#define TCW_FLAGS_TCCB_TIDA (1 << (23 - 6))
17#define TCW_FLAGS_OUTPUT_TIDA 1 << (23 - 7) 17#define TCW_FLAGS_OUTPUT_TIDA (1 << (23 - 7))
18#define TCW_FLAGS_TIDAW_FORMAT(x) ((x) & 3) << (23 - 9) 18#define TCW_FLAGS_TIDAW_FORMAT(x) ((x) & 3) << (23 - 9)
19#define TCW_FLAGS_GET_TIDAW_FORMAT(x) (((x) >> (23 - 9)) & 3) 19#define TCW_FLAGS_GET_TIDAW_FORMAT(x) (((x) >> (23 - 9)) & 3)
20 20
@@ -54,11 +54,11 @@ struct tcw {
54 u32 intrg; 54 u32 intrg;
55} __attribute__ ((packed, aligned(64))); 55} __attribute__ ((packed, aligned(64)));
56 56
57#define TIDAW_FLAGS_LAST 1 << (7 - 0) 57#define TIDAW_FLAGS_LAST (1 << (7 - 0))
58#define TIDAW_FLAGS_SKIP 1 << (7 - 1) 58#define TIDAW_FLAGS_SKIP (1 << (7 - 1))
59#define TIDAW_FLAGS_DATA_INT 1 << (7 - 2) 59#define TIDAW_FLAGS_DATA_INT (1 << (7 - 2))
60#define TIDAW_FLAGS_TTIC 1 << (7 - 3) 60#define TIDAW_FLAGS_TTIC (1 << (7 - 3))
61#define TIDAW_FLAGS_INSERT_CBC 1 << (7 - 4) 61#define TIDAW_FLAGS_INSERT_CBC (1 << (7 - 4))
62 62
63/** 63/**
64 * struct tidaw - Transport-Indirect-Addressing Word (TIDAW) 64 * struct tidaw - Transport-Indirect-Addressing Word (TIDAW)
@@ -106,9 +106,9 @@ struct tsa_ddpc {
106 u8 sense[32]; 106 u8 sense[32];
107} __attribute__ ((packed)); 107} __attribute__ ((packed));
108 108
109#define TSA_INTRG_FLAGS_CU_STATE_VALID 1 << (7 - 0) 109#define TSA_INTRG_FLAGS_CU_STATE_VALID (1 << (7 - 0))
110#define TSA_INTRG_FLAGS_DEV_STATE_VALID 1 << (7 - 1) 110#define TSA_INTRG_FLAGS_DEV_STATE_VALID (1 << (7 - 1))
111#define TSA_INTRG_FLAGS_OP_STATE_VALID 1 << (7 - 2) 111#define TSA_INTRG_FLAGS_OP_STATE_VALID (1 << (7 - 2))
112 112
113/** 113/**
114 * struct tsa_intrg - Interrogate Transport-Status Area (Intrg. TSA) 114 * struct tsa_intrg - Interrogate Transport-Status Area (Intrg. TSA)
@@ -140,10 +140,10 @@ struct tsa_intrg {
140#define TSB_FORMAT_DDPC 2 140#define TSB_FORMAT_DDPC 2
141#define TSB_FORMAT_INTRG 3 141#define TSB_FORMAT_INTRG 3
142 142
143#define TSB_FLAGS_DCW_OFFSET_VALID 1 << (7 - 0) 143#define TSB_FLAGS_DCW_OFFSET_VALID (1 << (7 - 0))
144#define TSB_FLAGS_COUNT_VALID 1 << (7 - 1) 144#define TSB_FLAGS_COUNT_VALID (1 << (7 - 1))
145#define TSB_FLAGS_CACHE_MISS 1 << (7 - 2) 145#define TSB_FLAGS_CACHE_MISS (1 << (7 - 2))
146#define TSB_FLAGS_TIME_VALID 1 << (7 - 3) 146#define TSB_FLAGS_TIME_VALID (1 << (7 - 3))
147#define TSB_FLAGS_FORMAT(x) ((x) & 7) 147#define TSB_FLAGS_FORMAT(x) ((x) & 7)
148#define TSB_FORMAT(t) ((t)->flags & 7) 148#define TSB_FORMAT(t) ((t)->flags & 7)
149 149
@@ -179,9 +179,9 @@ struct tsb {
179#define DCW_INTRG_RCQ_PRIMARY 1 179#define DCW_INTRG_RCQ_PRIMARY 1
180#define DCW_INTRG_RCQ_SECONDARY 2 180#define DCW_INTRG_RCQ_SECONDARY 2
181 181
182#define DCW_INTRG_FLAGS_MPM 1 < (7 - 0) 182#define DCW_INTRG_FLAGS_MPM (1 << (7 - 0))
183#define DCW_INTRG_FLAGS_PPR 1 < (7 - 1) 183#define DCW_INTRG_FLAGS_PPR (1 << (7 - 1))
184#define DCW_INTRG_FLAGS_CRIT 1 < (7 - 2) 184#define DCW_INTRG_FLAGS_CRIT (1 << (7 - 2))
185 185
186/** 186/**
187 * struct dcw_intrg_data - Interrogate DCW data 187 * struct dcw_intrg_data - Interrogate DCW data
@@ -216,7 +216,7 @@ struct dcw_intrg_data {
216 u8 prog_data[0]; 216 u8 prog_data[0];
217} __attribute__ ((packed)); 217} __attribute__ ((packed));
218 218
219#define DCW_FLAGS_CC 1 << (7 - 1) 219#define DCW_FLAGS_CC (1 << (7 - 1))
220 220
221#define DCW_CMD_WRITE 0x01 221#define DCW_CMD_WRITE 0x01
222#define DCW_CMD_READ 0x02 222#define DCW_CMD_READ 0x02
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 2bd6cb897b90..2fcccc0c997c 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -7,6 +7,7 @@
7#ifndef _ASM_S390_IPL_H 7#ifndef _ASM_S390_IPL_H
8#define _ASM_S390_IPL_H 8#define _ASM_S390_IPL_H
9 9
10#include <asm/lowcore.h>
10#include <asm/types.h> 11#include <asm/types.h>
11#include <asm/cio.h> 12#include <asm/cio.h>
12#include <asm/setup.h> 13#include <asm/setup.h>
@@ -86,7 +87,14 @@ struct ipl_parameter_block {
86 */ 87 */
87extern u32 ipl_flags; 88extern u32 ipl_flags;
88extern u32 dump_prefix_page; 89extern u32 dump_prefix_page;
89extern unsigned int zfcpdump_prefix_array[]; 90
91struct dump_save_areas {
92 struct save_area **areas;
93 int count;
94};
95
96extern struct dump_save_areas dump_save_areas;
97struct save_area *dump_save_area_create(int cpu);
90 98
91extern void do_reipl(void); 99extern void do_reipl(void);
92extern void do_halt(void); 100extern void do_halt(void);
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 9f973d8de90e..5d1f950704dc 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -40,14 +40,8 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
40 pgd_t *pgd = mm->pgd; 40 pgd_t *pgd = mm->pgd;
41 41
42 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); 42 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
43 if (s390_user_mode != HOME_SPACE_MODE) { 43 /* Load primary space page table origin. */
44 /* Load primary space page table origin. */ 44 asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce));
45 asm volatile(LCTL_OPCODE" 1,1,%0\n"
46 : : "m" (S390_lowcore.user_asce) );
47 } else
48 /* Load home space page table origin. */
49 asm volatile(LCTL_OPCODE" 13,13,%0"
50 : : "m" (S390_lowcore.user_asce) );
51 set_fs(current->thread.mm_segment); 45 set_fs(current->thread.mm_segment);
52} 46}
53 47
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 1e51f2915b2e..316c8503a3b4 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -30,7 +30,12 @@
30#include <asm/setup.h> 30#include <asm/setup.h>
31#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
32 32
33void storage_key_init_range(unsigned long start, unsigned long end); 33static inline void storage_key_init_range(unsigned long start, unsigned long end)
34{
35#if PAGE_DEFAULT_KEY
36 __storage_key_init_range(start, end);
37#endif
38}
34 39
35static inline void clear_page(void *page) 40static inline void clear_page(void *page)
36{ 41{
diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h
index 1ca5d1047c71..ac24b26fc065 100644
--- a/arch/s390/include/asm/pci_debug.h
+++ b/arch/s390/include/asm/pci_debug.h
@@ -6,14 +6,9 @@
6extern debug_info_t *pci_debug_msg_id; 6extern debug_info_t *pci_debug_msg_id;
7extern debug_info_t *pci_debug_err_id; 7extern debug_info_t *pci_debug_err_id;
8 8
9#ifdef CONFIG_PCI_DEBUG
10#define zpci_dbg(imp, fmt, args...) \ 9#define zpci_dbg(imp, fmt, args...) \
11 debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args) 10 debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args)
12 11
13#else /* !CONFIG_PCI_DEBUG */
14#define zpci_dbg(imp, fmt, args...) do { } while (0)
15#endif
16
17#define zpci_err(text...) \ 12#define zpci_err(text...) \
18 do { \ 13 do { \
19 char debug_buffer[16]; \ 14 char debug_buffer[16]; \
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index df6eac9f0cb4..649eb62c52b3 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -54,11 +54,9 @@
54struct zpci_fib { 54struct zpci_fib {
55 u32 fmt : 8; /* format */ 55 u32 fmt : 8; /* format */
56 u32 : 24; 56 u32 : 24;
57 u32 reserved1; 57 u32 : 32;
58 u8 fc; /* function controls */ 58 u8 fc; /* function controls */
59 u8 reserved2; 59 u64 : 56;
60 u16 reserved3;
61 u32 reserved4;
62 u64 pba; /* PCI base address */ 60 u64 pba; /* PCI base address */
63 u64 pal; /* PCI address limit */ 61 u64 pal; /* PCI address limit */
64 u64 iota; /* I/O Translation Anchor */ 62 u64 iota; /* I/O Translation Anchor */
@@ -70,14 +68,13 @@ struct zpci_fib {
70 u32 sum : 1; /* Adapter int summary bit enabled */ 68 u32 sum : 1; /* Adapter int summary bit enabled */
71 u32 : 1; 69 u32 : 1;
72 u32 aisbo : 6; /* Adapter int summary bit offset */ 70 u32 aisbo : 6; /* Adapter int summary bit offset */
73 u32 reserved5; 71 u32 : 32;
74 u64 aibv; /* Adapter int bit vector address */ 72 u64 aibv; /* Adapter int bit vector address */
75 u64 aisb; /* Adapter int summary bit address */ 73 u64 aisb; /* Adapter int summary bit address */
76 u64 fmb_addr; /* Function measurement block address and key */ 74 u64 fmb_addr; /* Function measurement block address and key */
77 u64 reserved6; 75 u32 : 32;
78 u64 reserved7; 76 u32 gd;
79} __packed; 77} __packed __aligned(8);
80
81 78
82int zpci_mod_fc(u64 req, struct zpci_fib *fib); 79int zpci_mod_fc(u64 req, struct zpci_fib *fib);
83int zpci_refresh_trans(u64 fn, u64 addr, u64 range); 80int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 86fe0ee2cee5..fa91e0097458 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -10,16 +10,22 @@
10 */ 10 */
11#define __my_cpu_offset S390_lowcore.percpu_offset 11#define __my_cpu_offset S390_lowcore.percpu_offset
12 12
13#ifdef CONFIG_64BIT
14
13/* 15/*
14 * For 64 bit module code, the module may be more than 4G above the 16 * For 64 bit module code, the module may be more than 4G above the
15 * per cpu area, use weak definitions to force the compiler to 17 * per cpu area, use weak definitions to force the compiler to
16 * generate external references. 18 * generate external references.
17 */ 19 */
18#if defined(CONFIG_SMP) && defined(CONFIG_64BIT) && defined(MODULE) 20#if defined(CONFIG_SMP) && defined(MODULE)
19#define ARCH_NEEDS_WEAK_PER_CPU 21#define ARCH_NEEDS_WEAK_PER_CPU
20#endif 22#endif
21 23
22#define arch_this_cpu_to_op(pcp, val, op) \ 24/*
25 * We use a compare-and-swap loop since that uses less cpu cycles than
26 * disabling and enabling interrupts like the generic variant would do.
27 */
28#define arch_this_cpu_to_op_simple(pcp, val, op) \
23({ \ 29({ \
24 typedef typeof(pcp) pcp_op_T__; \ 30 typedef typeof(pcp) pcp_op_T__; \
25 pcp_op_T__ old__, new__, prev__; \ 31 pcp_op_T__ old__, new__, prev__; \
@@ -30,42 +36,101 @@
30 do { \ 36 do { \
31 old__ = prev__; \ 37 old__ = prev__; \
32 new__ = old__ op (val); \ 38 new__ = old__ op (val); \
33 switch (sizeof(*ptr__)) { \ 39 prev__ = cmpxchg(ptr__, old__, new__); \
34 case 8: \
35 prev__ = cmpxchg64(ptr__, old__, new__); \
36 break; \
37 default: \
38 prev__ = cmpxchg(ptr__, old__, new__); \
39 } \
40 } while (prev__ != old__); \ 40 } while (prev__ != old__); \
41 preempt_enable(); \ 41 preempt_enable(); \
42 new__; \ 42 new__; \
43}) 43})
44 44
45#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op(pcp, val, +) 45#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
46#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op(pcp, val, +) 46#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
47#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op(pcp, val, +) 47#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
48#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op(pcp, val, +) 48#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
49#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
50#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
51#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
52#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
53
54#ifndef CONFIG_HAVE_MARCH_Z196_FEATURES
55
56#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
57#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
58#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
59#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
60#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
61#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &)
62#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
63#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |)
64
65#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
66
67#define arch_this_cpu_add(pcp, val, op1, op2, szcast) \
68{ \
69 typedef typeof(pcp) pcp_op_T__; \
70 pcp_op_T__ val__ = (val); \
71 pcp_op_T__ old__, *ptr__; \
72 preempt_disable(); \
73 ptr__ = __this_cpu_ptr(&(pcp)); \
74 if (__builtin_constant_p(val__) && \
75 ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
76 asm volatile( \
77 op2 " %[ptr__],%[val__]\n" \
78 : [ptr__] "+Q" (*ptr__) \
79 : [val__] "i" ((szcast)val__) \
80 : "cc"); \
81 } else { \
82 asm volatile( \
83 op1 " %[old__],%[val__],%[ptr__]\n" \
84 : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
85 : [val__] "d" (val__) \
86 : "cc"); \
87 } \
88 preempt_enable(); \
89}
49 90
50#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op(pcp, val, +) 91#define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
51#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op(pcp, val, +) 92#define this_cpu_add_8(pcp, val) arch_this_cpu_add(pcp, val, "laag", "agsi", long)
52#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
53#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
54 93
55#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op(pcp, val, &) 94#define arch_this_cpu_add_return(pcp, val, op) \
56#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op(pcp, val, &) 95({ \
57#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, &) 96 typedef typeof(pcp) pcp_op_T__; \
58#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, &) 97 pcp_op_T__ val__ = (val); \
98 pcp_op_T__ old__, *ptr__; \
99 preempt_disable(); \
100 ptr__ = __this_cpu_ptr(&(pcp)); \
101 asm volatile( \
102 op " %[old__],%[val__],%[ptr__]\n" \
103 : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
104 : [val__] "d" (val__) \
105 : "cc"); \
106 preempt_enable(); \
107 old__ + val__; \
108})
59 109
60#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op(pcp, val, |) 110#define this_cpu_add_return_4(pcp, val) arch_this_cpu_add_return(pcp, val, "laa")
61#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op(pcp, val, |) 111#define this_cpu_add_return_8(pcp, val) arch_this_cpu_add_return(pcp, val, "laag")
62#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, |)
63#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, |)
64 112
65#define this_cpu_xor_1(pcp, val) arch_this_cpu_to_op(pcp, val, ^) 113#define arch_this_cpu_to_op(pcp, val, op) \
66#define this_cpu_xor_2(pcp, val) arch_this_cpu_to_op(pcp, val, ^) 114{ \
67#define this_cpu_xor_4(pcp, val) arch_this_cpu_to_op(pcp, val, ^) 115 typedef typeof(pcp) pcp_op_T__; \
68#define this_cpu_xor_8(pcp, val) arch_this_cpu_to_op(pcp, val, ^) 116 pcp_op_T__ val__ = (val); \
117 pcp_op_T__ old__, *ptr__; \
118 preempt_disable(); \
119 ptr__ = __this_cpu_ptr(&(pcp)); \
120 asm volatile( \
121 op " %[old__],%[val__],%[ptr__]\n" \
122 : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
123 : [val__] "d" (val__) \
124 : "cc"); \
125 preempt_enable(); \
126}
127
128#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan")
129#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, "lang")
130#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lao")
131#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, "laog")
132
133#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
69 134
70#define arch_this_cpu_cmpxchg(pcp, oval, nval) \ 135#define arch_this_cpu_cmpxchg(pcp, oval, nval) \
71({ \ 136({ \
@@ -74,13 +139,7 @@
74 pcp_op_T__ *ptr__; \ 139 pcp_op_T__ *ptr__; \
75 preempt_disable(); \ 140 preempt_disable(); \
76 ptr__ = __this_cpu_ptr(&(pcp)); \ 141 ptr__ = __this_cpu_ptr(&(pcp)); \
77 switch (sizeof(*ptr__)) { \ 142 ret__ = cmpxchg(ptr__, oval, nval); \
78 case 8: \
79 ret__ = cmpxchg64(ptr__, oval, nval); \
80 break; \
81 default: \
82 ret__ = cmpxchg(ptr__, oval, nval); \
83 } \
84 preempt_enable(); \ 143 preempt_enable(); \
85 ret__; \ 144 ret__; \
86}) 145})
@@ -104,9 +163,7 @@
104#define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval) 163#define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval)
105#define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval) 164#define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval)
106#define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval) 165#define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
107#ifdef CONFIG_64BIT
108#define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval) 166#define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
109#endif
110 167
111#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \ 168#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \
112({ \ 169({ \
@@ -124,9 +181,9 @@
124}) 181})
125 182
126#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double 183#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
127#ifdef CONFIG_64BIT
128#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double 184#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
129#endif 185
186#endif /* CONFIG_64BIT */
130 187
131#include <asm-generic/percpu.h> 188#include <asm-generic/percpu.h>
132 189
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index ca7821f07260..0a876bc543d3 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -134,19 +134,17 @@ struct stack_frame {
134 * Do necessary setup to start up a new thread. 134 * Do necessary setup to start up a new thread.
135 */ 135 */
136#define start_thread(regs, new_psw, new_stackp) do { \ 136#define start_thread(regs, new_psw, new_stackp) do { \
137 regs->psw.mask = psw_user_bits | PSW_MASK_EA | PSW_MASK_BA; \ 137 regs->psw.mask = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA; \
138 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ 138 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
139 regs->gprs[15] = new_stackp; \ 139 regs->gprs[15] = new_stackp; \
140 execve_tail(); \ 140 execve_tail(); \
141} while (0) 141} while (0)
142 142
143#define start_thread31(regs, new_psw, new_stackp) do { \ 143#define start_thread31(regs, new_psw, new_stackp) do { \
144 regs->psw.mask = psw_user_bits | PSW_MASK_BA; \ 144 regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
145 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ 145 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
146 regs->gprs[15] = new_stackp; \ 146 regs->gprs[15] = new_stackp; \
147 __tlb_flush_mm(current->mm); \
148 crst_table_downgrade(current->mm, 1UL << 31); \ 147 crst_table_downgrade(current->mm, 1UL << 31); \
149 update_mm(current->mm, current); \
150 execve_tail(); \ 148 execve_tail(); \
151} while (0) 149} while (0)
152 150
@@ -169,17 +167,15 @@ extern void release_thread(struct task_struct *);
169 */ 167 */
170extern unsigned long thread_saved_pc(struct task_struct *t); 168extern unsigned long thread_saved_pc(struct task_struct *t);
171 169
172extern void show_code(struct pt_regs *regs);
173extern void print_fn_code(unsigned char *code, unsigned long len);
174extern int insn_to_mnemonic(unsigned char *instruction, char *buf,
175 unsigned int len);
176
177unsigned long get_wchan(struct task_struct *p); 170unsigned long get_wchan(struct task_struct *p);
178#define task_pt_regs(tsk) ((struct pt_regs *) \ 171#define task_pt_regs(tsk) ((struct pt_regs *) \
179 (task_stack_page(tsk) + THREAD_SIZE) - 1) 172 (task_stack_page(tsk) + THREAD_SIZE) - 1)
180#define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr) 173#define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr)
181#define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15]) 174#define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15])
182 175
176/* Has task runtime instrumentation enabled ? */
177#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
178
183static inline unsigned short stap(void) 179static inline unsigned short stap(void)
184{ 180{
185 unsigned short cpu_address; 181 unsigned short cpu_address;
@@ -348,9 +344,9 @@ __set_psw_mask(unsigned long mask)
348} 344}
349 345
350#define local_mcck_enable() \ 346#define local_mcck_enable() \
351 __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK) 347 __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK)
352#define local_mcck_disable() \ 348#define local_mcck_disable() \
353 __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT) 349 __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT)
354 350
355/* 351/*
356 * Basic Machine Check/Program Check Handler. 352 * Basic Machine Check/Program Check Handler.
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 52b56533c57c..9c82cebddabd 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -10,8 +10,11 @@
10 10
11#ifndef __ASSEMBLY__ 11#ifndef __ASSEMBLY__
12 12
13extern long psw_kernel_bits; 13#define PSW_KERNEL_BITS (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \
14extern long psw_user_bits; 14 PSW_MASK_EA | PSW_MASK_BA)
15#define PSW_USER_BITS (PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \
16 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \
17 PSW_MASK_PSTATE | PSW_ASC_PRIMARY)
15 18
16/* 19/*
17 * The pt_regs struct defines the way the registers are stored on 20 * The pt_regs struct defines the way the registers are stored on
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 59880dbaf360..df802ee14af6 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -48,13 +48,6 @@ void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize);
48void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, 48void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
49 unsigned long size); 49 unsigned long size);
50 50
51#define PRIMARY_SPACE_MODE 0
52#define ACCESS_REGISTER_MODE 1
53#define SECONDARY_SPACE_MODE 2
54#define HOME_SPACE_MODE 3
55
56extern unsigned int s390_user_mode;
57
58/* 51/*
59 * Machine features detected in head.S 52 * Machine features detected in head.S
60 */ 53 */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index b64f15c3b4cc..ac9bed8e103f 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -14,7 +14,6 @@
14#define raw_smp_processor_id() (S390_lowcore.cpu_nr) 14#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
15 15
16extern struct mutex smp_cpu_state_mutex; 16extern struct mutex smp_cpu_state_mutex;
17extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
18 17
19extern int __cpu_up(unsigned int cpu, struct task_struct *tidle); 18extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
20 19
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 6dbd559763c9..29c81f82705e 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -13,58 +13,94 @@
13extern struct task_struct *__switch_to(void *, void *); 13extern struct task_struct *__switch_to(void *, void *);
14extern void update_cr_regs(struct task_struct *task); 14extern void update_cr_regs(struct task_struct *task);
15 15
16static inline void save_fp_regs(s390_fp_regs *fpregs) 16static inline int test_fp_ctl(u32 fpc)
17{ 17{
18 u32 orig_fpc;
19 int rc;
20
21 if (!MACHINE_HAS_IEEE)
22 return 0;
23
18 asm volatile( 24 asm volatile(
19 " std 0,%O0+8(%R0)\n" 25 " efpc %1\n"
20 " std 2,%O0+24(%R0)\n" 26 " sfpc %2\n"
21 " std 4,%O0+40(%R0)\n" 27 "0: sfpc %1\n"
22 " std 6,%O0+56(%R0)" 28 " la %0,0\n"
23 : "=Q" (*fpregs) : "Q" (*fpregs)); 29 "1:\n"
30 EX_TABLE(0b,1b)
31 : "=d" (rc), "=d" (orig_fpc)
32 : "d" (fpc), "0" (-EINVAL));
33 return rc;
34}
35
36static inline void save_fp_ctl(u32 *fpc)
37{
24 if (!MACHINE_HAS_IEEE) 38 if (!MACHINE_HAS_IEEE)
25 return; 39 return;
40
26 asm volatile( 41 asm volatile(
27 " stfpc %0\n" 42 " stfpc %0\n"
28 " std 1,%O0+16(%R0)\n" 43 : "+Q" (*fpc));
29 " std 3,%O0+32(%R0)\n"
30 " std 5,%O0+48(%R0)\n"
31 " std 7,%O0+64(%R0)\n"
32 " std 8,%O0+72(%R0)\n"
33 " std 9,%O0+80(%R0)\n"
34 " std 10,%O0+88(%R0)\n"
35 " std 11,%O0+96(%R0)\n"
36 " std 12,%O0+104(%R0)\n"
37 " std 13,%O0+112(%R0)\n"
38 " std 14,%O0+120(%R0)\n"
39 " std 15,%O0+128(%R0)\n"
40 : "=Q" (*fpregs) : "Q" (*fpregs));
41} 44}
42 45
43static inline void restore_fp_regs(s390_fp_regs *fpregs) 46static inline int restore_fp_ctl(u32 *fpc)
44{ 47{
48 int rc;
49
50 if (!MACHINE_HAS_IEEE)
51 return 0;
52
45 asm volatile( 53 asm volatile(
46 " ld 0,%O0+8(%R0)\n" 54 "0: lfpc %1\n"
47 " ld 2,%O0+24(%R0)\n" 55 " la %0,0\n"
48 " ld 4,%O0+40(%R0)\n" 56 "1:\n"
49 " ld 6,%O0+56(%R0)" 57 EX_TABLE(0b,1b)
50 : : "Q" (*fpregs)); 58 : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
59 return rc;
60}
61
62static inline void save_fp_regs(freg_t *fprs)
63{
64 asm volatile("std 0,%0" : "=Q" (fprs[0]));
65 asm volatile("std 2,%0" : "=Q" (fprs[2]));
66 asm volatile("std 4,%0" : "=Q" (fprs[4]));
67 asm volatile("std 6,%0" : "=Q" (fprs[6]));
51 if (!MACHINE_HAS_IEEE) 68 if (!MACHINE_HAS_IEEE)
52 return; 69 return;
53 asm volatile( 70 asm volatile("std 1,%0" : "=Q" (fprs[1]));
54 " lfpc %0\n" 71 asm volatile("std 3,%0" : "=Q" (fprs[3]));
55 " ld 1,%O0+16(%R0)\n" 72 asm volatile("std 5,%0" : "=Q" (fprs[5]));
56 " ld 3,%O0+32(%R0)\n" 73 asm volatile("std 7,%0" : "=Q" (fprs[7]));
57 " ld 5,%O0+48(%R0)\n" 74 asm volatile("std 8,%0" : "=Q" (fprs[8]));
58 " ld 7,%O0+64(%R0)\n" 75 asm volatile("std 9,%0" : "=Q" (fprs[9]));
59 " ld 8,%O0+72(%R0)\n" 76 asm volatile("std 10,%0" : "=Q" (fprs[10]));
60 " ld 9,%O0+80(%R0)\n" 77 asm volatile("std 11,%0" : "=Q" (fprs[11]));
61 " ld 10,%O0+88(%R0)\n" 78 asm volatile("std 12,%0" : "=Q" (fprs[12]));
62 " ld 11,%O0+96(%R0)\n" 79 asm volatile("std 13,%0" : "=Q" (fprs[13]));
63 " ld 12,%O0+104(%R0)\n" 80 asm volatile("std 14,%0" : "=Q" (fprs[14]));
64 " ld 13,%O0+112(%R0)\n" 81 asm volatile("std 15,%0" : "=Q" (fprs[15]));
65 " ld 14,%O0+120(%R0)\n" 82}
66 " ld 15,%O0+128(%R0)\n" 83
67 : : "Q" (*fpregs)); 84static inline void restore_fp_regs(freg_t *fprs)
85{
86 asm volatile("ld 0,%0" : : "Q" (fprs[0]));
87 asm volatile("ld 2,%0" : : "Q" (fprs[2]));
88 asm volatile("ld 4,%0" : : "Q" (fprs[4]));
89 asm volatile("ld 6,%0" : : "Q" (fprs[6]));
90 if (!MACHINE_HAS_IEEE)
91 return;
92 asm volatile("ld 1,%0" : : "Q" (fprs[1]));
93 asm volatile("ld 3,%0" : : "Q" (fprs[3]));
94 asm volatile("ld 5,%0" : : "Q" (fprs[5]));
95 asm volatile("ld 7,%0" : : "Q" (fprs[7]));
96 asm volatile("ld 8,%0" : : "Q" (fprs[8]));
97 asm volatile("ld 9,%0" : : "Q" (fprs[9]));
98 asm volatile("ld 10,%0" : : "Q" (fprs[10]));
99 asm volatile("ld 11,%0" : : "Q" (fprs[11]));
100 asm volatile("ld 12,%0" : : "Q" (fprs[12]));
101 asm volatile("ld 13,%0" : : "Q" (fprs[13]));
102 asm volatile("ld 14,%0" : : "Q" (fprs[14]));
103 asm volatile("ld 15,%0" : : "Q" (fprs[15]));
68} 104}
69 105
70static inline void save_access_regs(unsigned int *acrs) 106static inline void save_access_regs(unsigned int *acrs)
@@ -83,12 +119,14 @@ static inline void restore_access_regs(unsigned int *acrs)
83 119
84#define switch_to(prev,next,last) do { \ 120#define switch_to(prev,next,last) do { \
85 if (prev->mm) { \ 121 if (prev->mm) { \
86 save_fp_regs(&prev->thread.fp_regs); \ 122 save_fp_ctl(&prev->thread.fp_regs.fpc); \
123 save_fp_regs(prev->thread.fp_regs.fprs); \
87 save_access_regs(&prev->thread.acrs[0]); \ 124 save_access_regs(&prev->thread.acrs[0]); \
88 save_ri_cb(prev->thread.ri_cb); \ 125 save_ri_cb(prev->thread.ri_cb); \
89 } \ 126 } \
90 if (next->mm) { \ 127 if (next->mm) { \
91 restore_fp_regs(&next->thread.fp_regs); \ 128 restore_fp_ctl(&next->thread.fp_regs.fpc); \
129 restore_fp_regs(next->thread.fp_regs.fprs); \
92 restore_access_regs(&next->thread.acrs[0]); \ 130 restore_access_regs(&next->thread.acrs[0]); \
93 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ 131 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
94 update_cr_regs(next); \ 132 update_cr_regs(next); \
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 819b94d22720..8beee1cceba4 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -71,9 +71,11 @@ static inline void local_tick_enable(unsigned long long comp)
71 71
72typedef unsigned long long cycles_t; 72typedef unsigned long long cycles_t;
73 73
74static inline void get_tod_clock_ext(char *clk) 74static inline void get_tod_clock_ext(char clk[16])
75{ 75{
76 asm volatile("stcke %0" : "=Q" (*clk) : : "cc"); 76 typedef struct { char _[sizeof(clk)]; } addrtype;
77
78 asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc");
77} 79}
78 80
79static inline unsigned long long get_tod_clock(void) 81static inline unsigned long long get_tod_clock(void)
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 9c33ed4e666f..79330af9a5f8 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -94,9 +94,7 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
94 94
95struct uaccess_ops { 95struct uaccess_ops {
96 size_t (*copy_from_user)(size_t, const void __user *, void *); 96 size_t (*copy_from_user)(size_t, const void __user *, void *);
97 size_t (*copy_from_user_small)(size_t, const void __user *, void *);
98 size_t (*copy_to_user)(size_t, void __user *, const void *); 97 size_t (*copy_to_user)(size_t, void __user *, const void *);
99 size_t (*copy_to_user_small)(size_t, void __user *, const void *);
100 size_t (*copy_in_user)(size_t, void __user *, const void __user *); 98 size_t (*copy_in_user)(size_t, void __user *, const void __user *);
101 size_t (*clear_user)(size_t, void __user *); 99 size_t (*clear_user)(size_t, void __user *);
102 size_t (*strnlen_user)(size_t, const char __user *); 100 size_t (*strnlen_user)(size_t, const char __user *);
@@ -106,22 +104,20 @@ struct uaccess_ops {
106}; 104};
107 105
108extern struct uaccess_ops uaccess; 106extern struct uaccess_ops uaccess;
109extern struct uaccess_ops uaccess_std;
110extern struct uaccess_ops uaccess_mvcos; 107extern struct uaccess_ops uaccess_mvcos;
111extern struct uaccess_ops uaccess_mvcos_switch;
112extern struct uaccess_ops uaccess_pt; 108extern struct uaccess_ops uaccess_pt;
113 109
114extern int __handle_fault(unsigned long, unsigned long, int); 110extern int __handle_fault(unsigned long, unsigned long, int);
115 111
116static inline int __put_user_fn(size_t size, void __user *ptr, void *x) 112static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
117{ 113{
118 size = uaccess.copy_to_user_small(size, ptr, x); 114 size = uaccess.copy_to_user(size, ptr, x);
119 return size ? -EFAULT : size; 115 return size ? -EFAULT : size;
120} 116}
121 117
122static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) 118static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
123{ 119{
124 size = uaccess.copy_from_user_small(size, ptr, x); 120 size = uaccess.copy_from_user(size, ptr, x);
125 return size ? -EFAULT : size; 121 return size ? -EFAULT : size;
126} 122}
127 123
@@ -226,10 +222,7 @@ extern int __get_user_bad(void) __attribute__((noreturn));
226static inline unsigned long __must_check 222static inline unsigned long __must_check
227__copy_to_user(void __user *to, const void *from, unsigned long n) 223__copy_to_user(void __user *to, const void *from, unsigned long n)
228{ 224{
229 if (__builtin_constant_p(n) && (n <= 256)) 225 return uaccess.copy_to_user(n, to, from);
230 return uaccess.copy_to_user_small(n, to, from);
231 else
232 return uaccess.copy_to_user(n, to, from);
233} 226}
234 227
235#define __copy_to_user_inatomic __copy_to_user 228#define __copy_to_user_inatomic __copy_to_user
@@ -275,10 +268,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
275static inline unsigned long __must_check 268static inline unsigned long __must_check
276__copy_from_user(void *to, const void __user *from, unsigned long n) 269__copy_from_user(void *to, const void __user *from, unsigned long n)
277{ 270{
278 if (__builtin_constant_p(n) && (n <= 256)) 271 return uaccess.copy_from_user(n, from, to);
279 return uaccess.copy_from_user_small(n, from, to);
280 else
281 return uaccess.copy_from_user(n, from, to);
282} 272}
283 273
284extern void copy_from_user_overflow(void) 274extern void copy_from_user_overflow(void)
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index 7a84619e315e..7e0b498a2c2b 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -199,6 +199,7 @@ typedef union
199typedef struct 199typedef struct
200{ 200{
201 __u32 fpc; 201 __u32 fpc;
202 __u32 pad;
202 freg_t fprs[NUM_FPRS]; 203 freg_t fprs[NUM_FPRS];
203} s390_fp_regs; 204} s390_fp_regs;
204 205
@@ -206,7 +207,6 @@ typedef struct
206#define FPC_FLAGS_MASK 0x00F80000 207#define FPC_FLAGS_MASK 0x00F80000
207#define FPC_DXC_MASK 0x0000FF00 208#define FPC_DXC_MASK 0x0000FF00
208#define FPC_RM_MASK 0x00000003 209#define FPC_RM_MASK 0x00000003
209#define FPC_VALID_MASK 0xF8F8FF03
210 210
211/* this typedef defines how a Program Status Word looks like */ 211/* this typedef defines how a Program Status Word looks like */
212typedef struct 212typedef struct
@@ -263,7 +263,7 @@ typedef struct
263#define PSW_MASK_EA 0x0000000100000000UL 263#define PSW_MASK_EA 0x0000000100000000UL
264#define PSW_MASK_BA 0x0000000080000000UL 264#define PSW_MASK_BA 0x0000000080000000UL
265 265
266#define PSW_MASK_USER 0x0000FF8180000000UL 266#define PSW_MASK_USER 0x0000FF0180000000UL
267 267
268#define PSW_ADDR_AMODE 0x0000000000000000UL 268#define PSW_ADDR_AMODE 0x0000000000000000UL
269#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL 269#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
diff --git a/arch/s390/include/uapi/asm/sigcontext.h b/arch/s390/include/uapi/asm/sigcontext.h
index 584787f6ce44..b30de9c01bbe 100644
--- a/arch/s390/include/uapi/asm/sigcontext.h
+++ b/arch/s390/include/uapi/asm/sigcontext.h
@@ -49,6 +49,7 @@ typedef struct
49typedef struct 49typedef struct
50{ 50{
51 unsigned int fpc; 51 unsigned int fpc;
52 unsigned int pad;
52 double fprs[__NUM_FPRS]; 53 double fprs[__NUM_FPRS];
53} _s390_fp_regs; 54} _s390_fp_regs;
54 55
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 4bb2a4656163..2403303cfed7 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -28,7 +28,7 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
28 28
29CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w 29CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
30 30
31obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o 31obj-y := traps.o time.o process.o base.o early.o setup.o vtime.o
32obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o 32obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
33obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o 33obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
34obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o 34obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
diff --git a/arch/s390/kernel/bitmap.c b/arch/s390/kernel/bitmap.c
deleted file mode 100644
index 102da5e23037..000000000000
--- a/arch/s390/kernel/bitmap.c
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 * Bitmaps for set_bit, clear_bit, test_and_set_bit, ...
3 * See include/asm/{bitops.h|posix_types.h} for details
4 *
5 * Copyright IBM Corp. 1999, 2009
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 */
8
9#include <linux/bitops.h>
10#include <linux/module.h>
11
12const char _oi_bitmap[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
13EXPORT_SYMBOL(_oi_bitmap);
14
15const char _ni_bitmap[] = { 0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f };
16EXPORT_SYMBOL(_ni_bitmap);
17
18const char _zb_findmap[] = {
19 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
20 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
21 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
22 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,
23 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
24 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
25 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
26 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7,
27 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
28 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
29 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
30 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,
31 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
32 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
33 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
34 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 };
35EXPORT_SYMBOL(_zb_findmap);
36
37const char _sb_findmap[] = {
38 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
39 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
40 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
41 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
42 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
43 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
44 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
45 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
46 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
47 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
48 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
49 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
50 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
51 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
52 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
53 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 };
54EXPORT_SYMBOL(_sb_findmap);
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index dd62071624be..3a414c0f93ed 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -146,15 +146,14 @@ static void __init cache_build_info(void)
146 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); 146 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
147 for (level = 0; level < CACHE_MAX_LEVEL; level++) { 147 for (level = 0; level < CACHE_MAX_LEVEL; level++) {
148 switch (ct.ci[level].scope) { 148 switch (ct.ci[level].scope) {
149 case CACHE_SCOPE_NOTEXISTS:
150 case CACHE_SCOPE_RESERVED:
151 return;
152 case CACHE_SCOPE_SHARED: 149 case CACHE_SCOPE_SHARED:
153 private = 0; 150 private = 0;
154 break; 151 break;
155 case CACHE_SCOPE_PRIVATE: 152 case CACHE_SCOPE_PRIVATE:
156 private = 1; 153 private = 1;
157 break; 154 break;
155 default:
156 return;
158 } 157 }
159 if (ct.ci[level].type == CACHE_TYPE_SEPARATE) { 158 if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
160 rc = cache_add(level, private, CACHE_TYPE_DATA); 159 rc = cache_add(level, private, CACHE_TYPE_DATA);
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 1f1b8c70ab97..e030d2bdec1b 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -58,10 +58,6 @@
58 58
59#include "compat_linux.h" 59#include "compat_linux.h"
60 60
61u32 psw32_user_bits = PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT |
62 PSW32_DEFAULT_KEY | PSW32_MASK_BASE | PSW32_MASK_MCHECK |
63 PSW32_MASK_PSTATE | PSW32_ASC_HOME;
64
65/* For this source file, we want overflow handling. */ 61/* For this source file, we want overflow handling. */
66 62
67#undef high2lowuid 63#undef high2lowuid
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index 976518c0592a..1bfda3eca379 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -27,6 +27,7 @@ typedef union
27typedef struct 27typedef struct
28{ 28{
29 unsigned int fpc; 29 unsigned int fpc;
30 unsigned int pad;
30 freg_t32 fprs[__NUM_FPRS]; 31 freg_t32 fprs[__NUM_FPRS];
31} _s390_fp_regs32; 32} _s390_fp_regs32;
32 33
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index adaa9e9478d8..5a3ab5c191fd 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -153,57 +153,66 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
153 153
154static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) 154static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
155{ 155{
156 _s390_regs_common32 regs32; 156 _sigregs32 user_sregs;
157 int err, i; 157 int i;
158 158
159 regs32.psw.mask = psw32_user_bits | 159 user_sregs.regs.psw.mask = (__u32)(regs->psw.mask >> 32);
160 ((__u32)(regs->psw.mask >> 32) & PSW32_MASK_USER); 160 user_sregs.regs.psw.mask &= PSW32_MASK_USER | PSW32_MASK_RI;
161 regs32.psw.addr = (__u32) regs->psw.addr | 161 user_sregs.regs.psw.mask |= PSW32_USER_BITS;
162 user_sregs.regs.psw.addr = (__u32) regs->psw.addr |
162 (__u32)(regs->psw.mask & PSW_MASK_BA); 163 (__u32)(regs->psw.mask & PSW_MASK_BA);
163 for (i = 0; i < NUM_GPRS; i++) 164 for (i = 0; i < NUM_GPRS; i++)
164 regs32.gprs[i] = (__u32) regs->gprs[i]; 165 user_sregs.regs.gprs[i] = (__u32) regs->gprs[i];
165 save_access_regs(current->thread.acrs); 166 save_access_regs(current->thread.acrs);
166 memcpy(regs32.acrs, current->thread.acrs, sizeof(regs32.acrs)); 167 memcpy(&user_sregs.regs.acrs, current->thread.acrs,
167 err = __copy_to_user(&sregs->regs, &regs32, sizeof(regs32)); 168 sizeof(user_sregs.regs.acrs));
168 if (err) 169 save_fp_ctl(&current->thread.fp_regs.fpc);
169 return err; 170 save_fp_regs(current->thread.fp_regs.fprs);
170 save_fp_regs(&current->thread.fp_regs); 171 memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
171 /* s390_fp_regs and _s390_fp_regs32 are the same ! */ 172 sizeof(user_sregs.fpregs));
172 return __copy_to_user(&sregs->fpregs, &current->thread.fp_regs, 173 if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32)))
173 sizeof(_s390_fp_regs32)); 174 return -EFAULT;
175 return 0;
174} 176}
175 177
176static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) 178static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
177{ 179{
178 _s390_regs_common32 regs32; 180 _sigregs32 user_sregs;
179 int err, i; 181 int i;
180 182
181 /* Alwys make any pending restarted system call return -EINTR */ 183 /* Alwys make any pending restarted system call return -EINTR */
182 current_thread_info()->restart_block.fn = do_no_restart_syscall; 184 current_thread_info()->restart_block.fn = do_no_restart_syscall;
183 185
184 err = __copy_from_user(&regs32, &sregs->regs, sizeof(regs32)); 186 if (__copy_from_user(&user_sregs, &sregs->regs, sizeof(user_sregs)))
185 if (err) 187 return -EFAULT;
186 return err; 188
189 if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW32_MASK_RI))
190 return -EINVAL;
191
192 /* Loading the floating-point-control word can fail. Do that first. */
193 if (restore_fp_ctl(&user_sregs.fpregs.fpc))
194 return -EINVAL;
195
196 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
187 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 197 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
188 (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 | 198 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 |
189 (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE); 199 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 |
200 (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE);
190 /* Check for invalid user address space control. */ 201 /* Check for invalid user address space control. */
191 if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC)) 202 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
192 regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) | 203 regs->psw.mask = PSW_ASC_PRIMARY |
193 (regs->psw.mask & ~PSW_MASK_ASC); 204 (regs->psw.mask & ~PSW_MASK_ASC);
194 regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN); 205 regs->psw.addr = (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_INSN);
195 for (i = 0; i < NUM_GPRS; i++) 206 for (i = 0; i < NUM_GPRS; i++)
196 regs->gprs[i] = (__u64) regs32.gprs[i]; 207 regs->gprs[i] = (__u64) user_sregs.regs.gprs[i];
197 memcpy(current->thread.acrs, regs32.acrs, sizeof(current->thread.acrs)); 208 memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
209 sizeof(current->thread.acrs));
198 restore_access_regs(current->thread.acrs); 210 restore_access_regs(current->thread.acrs);
199 211
200 err = __copy_from_user(&current->thread.fp_regs, &sregs->fpregs, 212 memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
201 sizeof(_s390_fp_regs32)); 213 sizeof(current->thread.fp_regs));
202 current->thread.fp_regs.fpc &= FPC_VALID_MASK;
203 if (err)
204 return err;
205 214
206 restore_fp_regs(&current->thread.fp_regs); 215 restore_fp_regs(current->thread.fp_regs.fprs);
207 clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */ 216 clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */
208 return 0; 217 return 0;
209} 218}
@@ -215,18 +224,18 @@ static int save_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs)
215 224
216 for (i = 0; i < NUM_GPRS; i++) 225 for (i = 0; i < NUM_GPRS; i++)
217 gprs_high[i] = regs->gprs[i] >> 32; 226 gprs_high[i] = regs->gprs[i] >> 32;
218 227 if (__copy_to_user(uregs, &gprs_high, sizeof(gprs_high)))
219 return __copy_to_user(uregs, &gprs_high, sizeof(gprs_high)); 228 return -EFAULT;
229 return 0;
220} 230}
221 231
222static int restore_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs) 232static int restore_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs)
223{ 233{
224 __u32 gprs_high[NUM_GPRS]; 234 __u32 gprs_high[NUM_GPRS];
225 int err, i; 235 int i;
226 236
227 err = __copy_from_user(&gprs_high, uregs, sizeof(gprs_high)); 237 if (__copy_from_user(&gprs_high, uregs, sizeof(gprs_high)))
228 if (err) 238 return -EFAULT;
229 return err;
230 for (i = 0; i < NUM_GPRS; i++) 239 for (i = 0; i < NUM_GPRS; i++)
231 *(__u32 *)&regs->gprs[i] = gprs_high[i]; 240 *(__u32 *)&regs->gprs[i] = gprs_high[i];
232 return 0; 241 return 0;
@@ -348,7 +357,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
348 regs->gprs[15] = (__force __u64) frame; 357 regs->gprs[15] = (__force __u64) frame;
349 /* Force 31 bit amode and default user address space control. */ 358 /* Force 31 bit amode and default user address space control. */
350 regs->psw.mask = PSW_MASK_BA | 359 regs->psw.mask = PSW_MASK_BA |
351 (psw_user_bits & PSW_MASK_ASC) | 360 (PSW_USER_BITS & PSW_MASK_ASC) |
352 (regs->psw.mask & ~PSW_MASK_ASC); 361 (regs->psw.mask & ~PSW_MASK_ASC);
353 regs->psw.addr = (__force __u64) ka->sa.sa_handler; 362 regs->psw.addr = (__force __u64) ka->sa.sa_handler;
354 363
@@ -415,7 +424,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
415 regs->gprs[15] = (__force __u64) frame; 424 regs->gprs[15] = (__force __u64) frame;
416 /* Force 31 bit amode and default user address space control. */ 425 /* Force 31 bit amode and default user address space control. */
417 regs->psw.mask = PSW_MASK_BA | 426 regs->psw.mask = PSW_MASK_BA |
418 (psw_user_bits & PSW_MASK_ASC) | 427 (PSW_USER_BITS & PSW_MASK_ASC) |
419 (regs->psw.mask & ~PSW_MASK_ASC); 428 (regs->psw.mask & ~PSW_MASK_ASC);
420 regs->psw.addr = (__u64 __force) ka->sa.sa_handler; 429 regs->psw.addr = (__u64 __force) ka->sa.sa_handler;
421 430
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 7dd21720e5b0..f45b2ab0cb81 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -22,6 +22,32 @@
22#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) 22#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
23#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y)))) 23#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
24 24
25struct dump_save_areas dump_save_areas;
26
27/*
28 * Allocate and add a save area for a CPU
29 */
30struct save_area *dump_save_area_create(int cpu)
31{
32 struct save_area **save_areas, *save_area;
33
34 save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
35 if (!save_area)
36 return NULL;
37 if (cpu + 1 > dump_save_areas.count) {
38 dump_save_areas.count = cpu + 1;
39 save_areas = krealloc(dump_save_areas.areas,
40 dump_save_areas.count * sizeof(void *),
41 GFP_KERNEL | __GFP_ZERO);
42 if (!save_areas) {
43 kfree(save_area);
44 return NULL;
45 }
46 dump_save_areas.areas = save_areas;
47 }
48 dump_save_areas.areas[cpu] = save_area;
49 return save_area;
50}
25 51
26/* 52/*
27 * Return physical address for virtual address 53 * Return physical address for virtual address
@@ -45,7 +71,6 @@ static inline void *load_real_addr(void *addr)
45static int copy_from_realmem(void *dest, void *src, size_t count) 71static int copy_from_realmem(void *dest, void *src, size_t count)
46{ 72{
47 unsigned long size; 73 unsigned long size;
48 int rc;
49 74
50 if (!count) 75 if (!count)
51 return 0; 76 return 0;
@@ -451,8 +476,8 @@ static int get_cpu_cnt(void)
451{ 476{
452 int i, cpus = 0; 477 int i, cpus = 0;
453 478
454 for (i = 0; zfcpdump_save_areas[i]; i++) { 479 for (i = 0; i < dump_save_areas.count; i++) {
455 if (zfcpdump_save_areas[i]->pref_reg == 0) 480 if (dump_save_areas.areas[i]->pref_reg == 0)
456 continue; 481 continue;
457 cpus++; 482 cpus++;
458 } 483 }
@@ -523,8 +548,8 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
523 548
524 ptr = nt_prpsinfo(ptr); 549 ptr = nt_prpsinfo(ptr);
525 550
526 for (i = 0; zfcpdump_save_areas[i]; i++) { 551 for (i = 0; i < dump_save_areas.count; i++) {
527 sa = zfcpdump_save_areas[i]; 552 sa = dump_save_areas.areas[i];
528 if (sa->pref_reg == 0) 553 if (sa->pref_reg == 0)
529 continue; 554 continue;
530 ptr = fill_cpu_elf_notes(ptr, sa); 555 ptr = fill_cpu_elf_notes(ptr, sa);
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 17d62fe5d7b7..ee8390da6ea7 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -889,7 +889,7 @@ static int debug_active=1;
889 * if debug_active is already off 889 * if debug_active is already off
890 */ 890 */
891static int 891static int
892s390dbf_procactive(ctl_table *table, int write, 892s390dbf_procactive(struct ctl_table *table, int write,
893 void __user *buffer, size_t *lenp, loff_t *ppos) 893 void __user *buffer, size_t *lenp, loff_t *ppos)
894{ 894{
895 if (!write || debug_stoppable || !debug_active) 895 if (!write || debug_stoppable || !debug_active)
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index be87d3e05a5b..993efe6a887c 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -23,6 +23,7 @@
23#include <linux/kdebug.h> 23#include <linux/kdebug.h>
24 24
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <asm/dis.h>
26#include <asm/io.h> 27#include <asm/io.h>
27#include <linux/atomic.h> 28#include <linux/atomic.h>
28#include <asm/mathemu.h> 29#include <asm/mathemu.h>
@@ -37,17 +38,6 @@
37#define ONELONG "%016lx: " 38#define ONELONG "%016lx: "
38#endif /* CONFIG_64BIT */ 39#endif /* CONFIG_64BIT */
39 40
40#define OPERAND_GPR 0x1 /* Operand printed as %rx */
41#define OPERAND_FPR 0x2 /* Operand printed as %fx */
42#define OPERAND_AR 0x4 /* Operand printed as %ax */
43#define OPERAND_CR 0x8 /* Operand printed as %cx */
44#define OPERAND_DISP 0x10 /* Operand printed as displacement */
45#define OPERAND_BASE 0x20 /* Operand printed as base register */
46#define OPERAND_INDEX 0x40 /* Operand printed as index register */
47#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */
48#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */
49#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */
50
51enum { 41enum {
52 UNUSED, /* Indicates the end of the operand list */ 42 UNUSED, /* Indicates the end of the operand list */
53 R_8, /* GPR starting at position 8 */ 43 R_8, /* GPR starting at position 8 */
@@ -155,19 +145,7 @@ enum {
155 INSTR_S_00, INSTR_S_RD, 145 INSTR_S_00, INSTR_S_RD,
156}; 146};
157 147
158struct operand { 148static const struct s390_operand operands[] =
159 int bits; /* The number of bits in the operand. */
160 int shift; /* The number of bits to shift. */
161 int flags; /* One bit syntax flags. */
162};
163
164struct insn {
165 const char name[5];
166 unsigned char opfrag;
167 unsigned char format;
168};
169
170static const struct operand operands[] =
171{ 149{
172 [UNUSED] = { 0, 0, 0 }, 150 [UNUSED] = { 0, 0, 0 },
173 [R_8] = { 4, 8, OPERAND_GPR }, 151 [R_8] = { 4, 8, OPERAND_GPR },
@@ -479,7 +457,7 @@ static char *long_insn_name[] = {
479 [LONG_INSN_PCISTB] = "pcistb", 457 [LONG_INSN_PCISTB] = "pcistb",
480}; 458};
481 459
482static struct insn opcode[] = { 460static struct s390_insn opcode[] = {
483#ifdef CONFIG_64BIT 461#ifdef CONFIG_64BIT
484 { "bprp", 0xc5, INSTR_MII_UPI }, 462 { "bprp", 0xc5, INSTR_MII_UPI },
485 { "bpp", 0xc7, INSTR_SMI_U0RDP }, 463 { "bpp", 0xc7, INSTR_SMI_U0RDP },
@@ -668,7 +646,7 @@ static struct insn opcode[] = {
668 { "", 0, INSTR_INVALID } 646 { "", 0, INSTR_INVALID }
669}; 647};
670 648
671static struct insn opcode_01[] = { 649static struct s390_insn opcode_01[] = {
672#ifdef CONFIG_64BIT 650#ifdef CONFIG_64BIT
673 { "ptff", 0x04, INSTR_E }, 651 { "ptff", 0x04, INSTR_E },
674 { "pfpo", 0x0a, INSTR_E }, 652 { "pfpo", 0x0a, INSTR_E },
@@ -684,7 +662,7 @@ static struct insn opcode_01[] = {
684 { "", 0, INSTR_INVALID } 662 { "", 0, INSTR_INVALID }
685}; 663};
686 664
687static struct insn opcode_a5[] = { 665static struct s390_insn opcode_a5[] = {
688#ifdef CONFIG_64BIT 666#ifdef CONFIG_64BIT
689 { "iihh", 0x00, INSTR_RI_RU }, 667 { "iihh", 0x00, INSTR_RI_RU },
690 { "iihl", 0x01, INSTR_RI_RU }, 668 { "iihl", 0x01, INSTR_RI_RU },
@@ -706,7 +684,7 @@ static struct insn opcode_a5[] = {
706 { "", 0, INSTR_INVALID } 684 { "", 0, INSTR_INVALID }
707}; 685};
708 686
709static struct insn opcode_a7[] = { 687static struct s390_insn opcode_a7[] = {
710#ifdef CONFIG_64BIT 688#ifdef CONFIG_64BIT
711 { "tmhh", 0x02, INSTR_RI_RU }, 689 { "tmhh", 0x02, INSTR_RI_RU },
712 { "tmhl", 0x03, INSTR_RI_RU }, 690 { "tmhl", 0x03, INSTR_RI_RU },
@@ -728,7 +706,7 @@ static struct insn opcode_a7[] = {
728 { "", 0, INSTR_INVALID } 706 { "", 0, INSTR_INVALID }
729}; 707};
730 708
731static struct insn opcode_aa[] = { 709static struct s390_insn opcode_aa[] = {
732#ifdef CONFIG_64BIT 710#ifdef CONFIG_64BIT
733 { { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI }, 711 { { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI },
734 { "rion", 0x01, INSTR_RI_RI }, 712 { "rion", 0x01, INSTR_RI_RI },
@@ -739,7 +717,7 @@ static struct insn opcode_aa[] = {
739 { "", 0, INSTR_INVALID } 717 { "", 0, INSTR_INVALID }
740}; 718};
741 719
742static struct insn opcode_b2[] = { 720static struct s390_insn opcode_b2[] = {
743#ifdef CONFIG_64BIT 721#ifdef CONFIG_64BIT
744 { "stckf", 0x7c, INSTR_S_RD }, 722 { "stckf", 0x7c, INSTR_S_RD },
745 { "lpp", 0x80, INSTR_S_RD }, 723 { "lpp", 0x80, INSTR_S_RD },
@@ -851,7 +829,7 @@ static struct insn opcode_b2[] = {
851 { "", 0, INSTR_INVALID } 829 { "", 0, INSTR_INVALID }
852}; 830};
853 831
854static struct insn opcode_b3[] = { 832static struct s390_insn opcode_b3[] = {
855#ifdef CONFIG_64BIT 833#ifdef CONFIG_64BIT
856 { "maylr", 0x38, INSTR_RRF_F0FF }, 834 { "maylr", 0x38, INSTR_RRF_F0FF },
857 { "mylr", 0x39, INSTR_RRF_F0FF }, 835 { "mylr", 0x39, INSTR_RRF_F0FF },
@@ -1034,7 +1012,7 @@ static struct insn opcode_b3[] = {
1034 { "", 0, INSTR_INVALID } 1012 { "", 0, INSTR_INVALID }
1035}; 1013};
1036 1014
1037static struct insn opcode_b9[] = { 1015static struct s390_insn opcode_b9[] = {
1038#ifdef CONFIG_64BIT 1016#ifdef CONFIG_64BIT
1039 { "lpgr", 0x00, INSTR_RRE_RR }, 1017 { "lpgr", 0x00, INSTR_RRE_RR },
1040 { "lngr", 0x01, INSTR_RRE_RR }, 1018 { "lngr", 0x01, INSTR_RRE_RR },
@@ -1167,7 +1145,7 @@ static struct insn opcode_b9[] = {
1167 { "", 0, INSTR_INVALID } 1145 { "", 0, INSTR_INVALID }
1168}; 1146};
1169 1147
1170static struct insn opcode_c0[] = { 1148static struct s390_insn opcode_c0[] = {
1171#ifdef CONFIG_64BIT 1149#ifdef CONFIG_64BIT
1172 { "lgfi", 0x01, INSTR_RIL_RI }, 1150 { "lgfi", 0x01, INSTR_RIL_RI },
1173 { "xihf", 0x06, INSTR_RIL_RU }, 1151 { "xihf", 0x06, INSTR_RIL_RU },
@@ -1187,7 +1165,7 @@ static struct insn opcode_c0[] = {
1187 { "", 0, INSTR_INVALID } 1165 { "", 0, INSTR_INVALID }
1188}; 1166};
1189 1167
1190static struct insn opcode_c2[] = { 1168static struct s390_insn opcode_c2[] = {
1191#ifdef CONFIG_64BIT 1169#ifdef CONFIG_64BIT
1192 { "msgfi", 0x00, INSTR_RIL_RI }, 1170 { "msgfi", 0x00, INSTR_RIL_RI },
1193 { "msfi", 0x01, INSTR_RIL_RI }, 1171 { "msfi", 0x01, INSTR_RIL_RI },
@@ -1205,7 +1183,7 @@ static struct insn opcode_c2[] = {
1205 { "", 0, INSTR_INVALID } 1183 { "", 0, INSTR_INVALID }
1206}; 1184};
1207 1185
1208static struct insn opcode_c4[] = { 1186static struct s390_insn opcode_c4[] = {
1209#ifdef CONFIG_64BIT 1187#ifdef CONFIG_64BIT
1210 { "llhrl", 0x02, INSTR_RIL_RP }, 1188 { "llhrl", 0x02, INSTR_RIL_RP },
1211 { "lghrl", 0x04, INSTR_RIL_RP }, 1189 { "lghrl", 0x04, INSTR_RIL_RP },
@@ -1222,7 +1200,7 @@ static struct insn opcode_c4[] = {
1222 { "", 0, INSTR_INVALID } 1200 { "", 0, INSTR_INVALID }
1223}; 1201};
1224 1202
1225static struct insn opcode_c6[] = { 1203static struct s390_insn opcode_c6[] = {
1226#ifdef CONFIG_64BIT 1204#ifdef CONFIG_64BIT
1227 { "exrl", 0x00, INSTR_RIL_RP }, 1205 { "exrl", 0x00, INSTR_RIL_RP },
1228 { "pfdrl", 0x02, INSTR_RIL_UP }, 1206 { "pfdrl", 0x02, INSTR_RIL_UP },
@@ -1240,7 +1218,7 @@ static struct insn opcode_c6[] = {
1240 { "", 0, INSTR_INVALID } 1218 { "", 0, INSTR_INVALID }
1241}; 1219};
1242 1220
1243static struct insn opcode_c8[] = { 1221static struct s390_insn opcode_c8[] = {
1244#ifdef CONFIG_64BIT 1222#ifdef CONFIG_64BIT
1245 { "mvcos", 0x00, INSTR_SSF_RRDRD }, 1223 { "mvcos", 0x00, INSTR_SSF_RRDRD },
1246 { "ectg", 0x01, INSTR_SSF_RRDRD }, 1224 { "ectg", 0x01, INSTR_SSF_RRDRD },
@@ -1251,7 +1229,7 @@ static struct insn opcode_c8[] = {
1251 { "", 0, INSTR_INVALID } 1229 { "", 0, INSTR_INVALID }
1252}; 1230};
1253 1231
1254static struct insn opcode_cc[] = { 1232static struct s390_insn opcode_cc[] = {
1255#ifdef CONFIG_64BIT 1233#ifdef CONFIG_64BIT
1256 { "brcth", 0x06, INSTR_RIL_RP }, 1234 { "brcth", 0x06, INSTR_RIL_RP },
1257 { "aih", 0x08, INSTR_RIL_RI }, 1235 { "aih", 0x08, INSTR_RIL_RI },
@@ -1263,7 +1241,7 @@ static struct insn opcode_cc[] = {
1263 { "", 0, INSTR_INVALID } 1241 { "", 0, INSTR_INVALID }
1264}; 1242};
1265 1243
1266static struct insn opcode_e3[] = { 1244static struct s390_insn opcode_e3[] = {
1267#ifdef CONFIG_64BIT 1245#ifdef CONFIG_64BIT
1268 { "ltg", 0x02, INSTR_RXY_RRRD }, 1246 { "ltg", 0x02, INSTR_RXY_RRRD },
1269 { "lrag", 0x03, INSTR_RXY_RRRD }, 1247 { "lrag", 0x03, INSTR_RXY_RRRD },
@@ -1369,7 +1347,7 @@ static struct insn opcode_e3[] = {
1369 { "", 0, INSTR_INVALID } 1347 { "", 0, INSTR_INVALID }
1370}; 1348};
1371 1349
1372static struct insn opcode_e5[] = { 1350static struct s390_insn opcode_e5[] = {
1373#ifdef CONFIG_64BIT 1351#ifdef CONFIG_64BIT
1374 { "strag", 0x02, INSTR_SSE_RDRD }, 1352 { "strag", 0x02, INSTR_SSE_RDRD },
1375 { "mvhhi", 0x44, INSTR_SIL_RDI }, 1353 { "mvhhi", 0x44, INSTR_SIL_RDI },
@@ -1391,7 +1369,7 @@ static struct insn opcode_e5[] = {
1391 { "", 0, INSTR_INVALID } 1369 { "", 0, INSTR_INVALID }
1392}; 1370};
1393 1371
1394static struct insn opcode_eb[] = { 1372static struct s390_insn opcode_eb[] = {
1395#ifdef CONFIG_64BIT 1373#ifdef CONFIG_64BIT
1396 { "lmg", 0x04, INSTR_RSY_RRRD }, 1374 { "lmg", 0x04, INSTR_RSY_RRRD },
1397 { "srag", 0x0a, INSTR_RSY_RRRD }, 1375 { "srag", 0x0a, INSTR_RSY_RRRD },
@@ -1465,7 +1443,7 @@ static struct insn opcode_eb[] = {
1465 { "", 0, INSTR_INVALID } 1443 { "", 0, INSTR_INVALID }
1466}; 1444};
1467 1445
1468static struct insn opcode_ec[] = { 1446static struct s390_insn opcode_ec[] = {
1469#ifdef CONFIG_64BIT 1447#ifdef CONFIG_64BIT
1470 { "brxhg", 0x44, INSTR_RIE_RRP }, 1448 { "brxhg", 0x44, INSTR_RIE_RRP },
1471 { "brxlg", 0x45, INSTR_RIE_RRP }, 1449 { "brxlg", 0x45, INSTR_RIE_RRP },
@@ -1504,7 +1482,7 @@ static struct insn opcode_ec[] = {
1504 { "", 0, INSTR_INVALID } 1482 { "", 0, INSTR_INVALID }
1505}; 1483};
1506 1484
1507static struct insn opcode_ed[] = { 1485static struct s390_insn opcode_ed[] = {
1508#ifdef CONFIG_64BIT 1486#ifdef CONFIG_64BIT
1509 { "mayl", 0x38, INSTR_RXF_FRRDF }, 1487 { "mayl", 0x38, INSTR_RXF_FRRDF },
1510 { "myl", 0x39, INSTR_RXF_FRRDF }, 1488 { "myl", 0x39, INSTR_RXF_FRRDF },
@@ -1572,7 +1550,7 @@ static struct insn opcode_ed[] = {
1572 1550
1573/* Extracts an operand value from an instruction. */ 1551/* Extracts an operand value from an instruction. */
1574static unsigned int extract_operand(unsigned char *code, 1552static unsigned int extract_operand(unsigned char *code,
1575 const struct operand *operand) 1553 const struct s390_operand *operand)
1576{ 1554{
1577 unsigned int val; 1555 unsigned int val;
1578 int bits; 1556 int bits;
@@ -1608,16 +1586,11 @@ static unsigned int extract_operand(unsigned char *code,
1608 return val; 1586 return val;
1609} 1587}
1610 1588
1611static inline int insn_length(unsigned char code) 1589struct s390_insn *find_insn(unsigned char *code)
1612{
1613 return ((((int) code + 64) >> 7) + 1) << 1;
1614}
1615
1616static struct insn *find_insn(unsigned char *code)
1617{ 1590{
1618 unsigned char opfrag = code[1]; 1591 unsigned char opfrag = code[1];
1619 unsigned char opmask; 1592 unsigned char opmask;
1620 struct insn *table; 1593 struct s390_insn *table;
1621 1594
1622 switch (code[0]) { 1595 switch (code[0]) {
1623 case 0x01: 1596 case 0x01:
@@ -1706,7 +1679,7 @@ static struct insn *find_insn(unsigned char *code)
1706 */ 1679 */
1707int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len) 1680int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len)
1708{ 1681{
1709 struct insn *insn; 1682 struct s390_insn *insn;
1710 1683
1711 insn = find_insn(instruction); 1684 insn = find_insn(instruction);
1712 if (!insn) 1685 if (!insn)
@@ -1722,9 +1695,9 @@ EXPORT_SYMBOL_GPL(insn_to_mnemonic);
1722 1695
1723static int print_insn(char *buffer, unsigned char *code, unsigned long addr) 1696static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
1724{ 1697{
1725 struct insn *insn; 1698 struct s390_insn *insn;
1726 const unsigned char *ops; 1699 const unsigned char *ops;
1727 const struct operand *operand; 1700 const struct s390_operand *operand;
1728 unsigned int value; 1701 unsigned int value;
1729 char separator; 1702 char separator;
1730 char *ptr; 1703 char *ptr;
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 99e7f6035895..e6af9406987c 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -15,6 +15,7 @@
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <asm/processor.h> 16#include <asm/processor.h>
17#include <asm/debug.h> 17#include <asm/debug.h>
18#include <asm/dis.h>
18#include <asm/ipl.h> 19#include <asm/ipl.h>
19 20
20#ifndef CONFIG_64BIT 21#ifndef CONFIG_64BIT
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index dc8770d7173c..96543ac400a7 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -206,6 +206,7 @@ static noinline __init void clear_bss_section(void)
206 */ 206 */
207static noinline __init void init_kernel_storage_key(void) 207static noinline __init void init_kernel_storage_key(void)
208{ 208{
209#if PAGE_DEFAULT_KEY
209 unsigned long end_pfn, init_pfn; 210 unsigned long end_pfn, init_pfn;
210 211
211 end_pfn = PFN_UP(__pa(&_end)); 212 end_pfn = PFN_UP(__pa(&_end));
@@ -213,6 +214,7 @@ static noinline __init void init_kernel_storage_key(void)
213 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++) 214 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
214 page_set_storage_key(init_pfn << PAGE_SHIFT, 215 page_set_storage_key(init_pfn << PAGE_SHIFT,
215 PAGE_DEFAULT_KEY, 0); 216 PAGE_DEFAULT_KEY, 0);
217#endif
216} 218}
217 219
218static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE); 220static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index e9b04c33d383..cb533f78c09e 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -23,7 +23,6 @@ asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
23 23
24void do_protection_exception(struct pt_regs *regs); 24void do_protection_exception(struct pt_regs *regs);
25void do_dat_exception(struct pt_regs *regs); 25void do_dat_exception(struct pt_regs *regs);
26void do_asce_exception(struct pt_regs *regs);
27 26
28void addressing_exception(struct pt_regs *regs); 27void addressing_exception(struct pt_regs *regs);
29void data_exception(struct pt_regs *regs); 28void data_exception(struct pt_regs *regs);
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 1014ad5f7693..224db03e9518 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -151,14 +151,13 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
151 if (unlikely(atomic_read(&current->tracing_graph_pause))) 151 if (unlikely(atomic_read(&current->tracing_graph_pause)))
152 goto out; 152 goto out;
153 ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE; 153 ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
154 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
155 goto out;
156 trace.func = ip; 154 trace.func = ip;
155 trace.depth = current->curr_ret_stack + 1;
157 /* Only trace if the calling function expects to. */ 156 /* Only trace if the calling function expects to. */
158 if (!ftrace_graph_entry(&trace)) { 157 if (!ftrace_graph_entry(&trace))
159 current->curr_ret_stack--; 158 goto out;
159 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
160 goto out; 160 goto out;
161 }
162 parent = (unsigned long) return_to_handler; 161 parent = (unsigned long) return_to_handler;
163out: 162out:
164 return parent; 163 return parent;
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index fd8db63dfc94..429afcc480cb 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -437,7 +437,7 @@ ENTRY(startup_kdump)
437 437
438#if defined(CONFIG_64BIT) 438#if defined(CONFIG_64BIT)
439#if defined(CONFIG_MARCH_ZEC12) 439#if defined(CONFIG_MARCH_ZEC12)
440 .long 3, 0xc100efe3, 0xf46ce000, 0x00400000 440 .long 3, 0xc100efe3, 0xf46ce800, 0x00400000
441#elif defined(CONFIG_MARCH_Z196) 441#elif defined(CONFIG_MARCH_Z196)
442 .long 2, 0xc100efe3, 0xf46c0000 442 .long 2, 0xc100efe3, 0xf46c0000
443#elif defined(CONFIG_MARCH_Z10) 443#elif defined(CONFIG_MARCH_Z10)
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index feb719d3c851..633ca7504536 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2051,12 +2051,12 @@ void s390_reset_system(void (*func)(void *), void *data)
2051 __ctl_clear_bit(0,28); 2051 __ctl_clear_bit(0,28);
2052 2052
2053 /* Set new machine check handler */ 2053 /* Set new machine check handler */
2054 S390_lowcore.mcck_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT; 2054 S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
2055 S390_lowcore.mcck_new_psw.addr = 2055 S390_lowcore.mcck_new_psw.addr =
2056 PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler; 2056 PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler;
2057 2057
2058 /* Set new program check handler */ 2058 /* Set new program check handler */
2059 S390_lowcore.program_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT; 2059 S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
2060 S390_lowcore.program_new_psw.addr = 2060 S390_lowcore.program_new_psw.addr =
2061 PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; 2061 PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
2062 2062
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index d86e64eddb42..59a9c35c4598 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -26,11 +26,12 @@
26#include <linux/stop_machine.h> 26#include <linux/stop_machine.h>
27#include <linux/kdebug.h> 27#include <linux/kdebug.h>
28#include <linux/uaccess.h> 28#include <linux/uaccess.h>
29#include <asm/cacheflush.h>
30#include <asm/sections.h>
31#include <linux/module.h> 29#include <linux/module.h>
32#include <linux/slab.h> 30#include <linux/slab.h>
33#include <linux/hardirq.h> 31#include <linux/hardirq.h>
32#include <asm/cacheflush.h>
33#include <asm/sections.h>
34#include <asm/dis.h>
34 35
35DEFINE_PER_CPU(struct kprobe *, current_kprobe); 36DEFINE_PER_CPU(struct kprobe *, current_kprobe);
36DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 37DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -59,6 +60,8 @@ struct kprobe_insn_cache kprobe_dmainsn_slots = {
59 60
60static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn) 61static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
61{ 62{
63 if (!is_known_insn((unsigned char *)insn))
64 return -EINVAL;
62 switch (insn[0] >> 8) { 65 switch (insn[0] >> 8) {
63 case 0x0c: /* bassm */ 66 case 0x0c: /* bassm */
64 case 0x0b: /* bsm */ 67 case 0x0b: /* bsm */
@@ -208,7 +211,7 @@ static void __kprobes copy_instruction(struct kprobe *p)
208 s64 disp, new_disp; 211 s64 disp, new_disp;
209 u64 addr, new_addr; 212 u64 addr, new_addr;
210 213
211 memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2); 214 memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8));
212 if (!is_insn_relative_long(p->ainsn.insn)) 215 if (!is_insn_relative_long(p->ainsn.insn))
213 return; 216 return;
214 /* 217 /*
@@ -252,7 +255,7 @@ static int __kprobes s390_get_insn_slot(struct kprobe *p)
252 p->ainsn.insn = NULL; 255 p->ainsn.insn = NULL;
253 if (is_kernel_addr(p->addr)) 256 if (is_kernel_addr(p->addr))
254 p->ainsn.insn = get_dmainsn_slot(); 257 p->ainsn.insn = get_dmainsn_slot();
255 if (is_module_addr(p->addr)) 258 else if (is_module_addr(p->addr))
256 p->ainsn.insn = get_insn_slot(); 259 p->ainsn.insn = get_insn_slot();
257 return p->ainsn.insn ? 0 : -ENOMEM; 260 return p->ainsn.insn ? 0 : -ENOMEM;
258} 261}
@@ -608,7 +611,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
608 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; 611 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
609 612
610 if (fixup & FIXUP_BRANCH_NOT_TAKEN) { 613 if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
611 int ilen = ((p->ainsn.insn[0] >> 14) + 3) & -2; 614 int ilen = insn_length(p->ainsn.insn[0] >> 8);
612 if (ip - (unsigned long) p->ainsn.insn == ilen) 615 if (ip - (unsigned long) p->ainsn.insn == ilen)
613 ip = (unsigned long) p->addr + ilen; 616 ip = (unsigned long) p->addr + ilen;
614 } 617 }
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index 14bdecb61923..4a460c44e17e 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -78,7 +78,7 @@ PGM_CHECK_DEFAULT /* 34 */
78PGM_CHECK_DEFAULT /* 35 */ 78PGM_CHECK_DEFAULT /* 35 */
79PGM_CHECK_DEFAULT /* 36 */ 79PGM_CHECK_DEFAULT /* 36 */
80PGM_CHECK_DEFAULT /* 37 */ 80PGM_CHECK_DEFAULT /* 37 */
81PGM_CHECK_64BIT(do_asce_exception) /* 38 */ 81PGM_CHECK_DEFAULT /* 38 */
82PGM_CHECK_64BIT(do_dat_exception) /* 39 */ 82PGM_CHECK_64BIT(do_dat_exception) /* 39 */
83PGM_CHECK_64BIT(do_dat_exception) /* 3a */ 83PGM_CHECK_64BIT(do_dat_exception) /* 3a */
84PGM_CHECK_64BIT(do_dat_exception) /* 3b */ 84PGM_CHECK_64BIT(do_dat_exception) /* 3b */
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index c5dbb335716d..7ed0d4e2a435 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -139,7 +139,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
139 if (unlikely(p->flags & PF_KTHREAD)) { 139 if (unlikely(p->flags & PF_KTHREAD)) {
140 /* kernel thread */ 140 /* kernel thread */
141 memset(&frame->childregs, 0, sizeof(struct pt_regs)); 141 memset(&frame->childregs, 0, sizeof(struct pt_regs));
142 frame->childregs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | 142 frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
143 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; 143 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
144 frame->childregs.psw.addr = PSW_ADDR_AMODE | 144 frame->childregs.psw.addr = PSW_ADDR_AMODE |
145 (unsigned long) kernel_thread_starter; 145 (unsigned long) kernel_thread_starter;
@@ -165,7 +165,8 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
165 * save fprs to current->thread.fp_regs to merge them with 165 * save fprs to current->thread.fp_regs to merge them with
166 * the emulated registers and then copy the result to the child. 166 * the emulated registers and then copy the result to the child.
167 */ 167 */
168 save_fp_regs(&current->thread.fp_regs); 168 save_fp_ctl(&current->thread.fp_regs.fpc);
169 save_fp_regs(current->thread.fp_regs.fprs);
169 memcpy(&p->thread.fp_regs, &current->thread.fp_regs, 170 memcpy(&p->thread.fp_regs, &current->thread.fp_regs,
170 sizeof(s390_fp_regs)); 171 sizeof(s390_fp_regs));
171 /* Set a new TLS ? */ 172 /* Set a new TLS ? */
@@ -173,7 +174,9 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
173 p->thread.acrs[0] = frame->childregs.gprs[6]; 174 p->thread.acrs[0] = frame->childregs.gprs[6];
174#else /* CONFIG_64BIT */ 175#else /* CONFIG_64BIT */
175 /* Save the fpu registers to new thread structure. */ 176 /* Save the fpu registers to new thread structure. */
176 save_fp_regs(&p->thread.fp_regs); 177 save_fp_ctl(&p->thread.fp_regs.fpc);
178 save_fp_regs(p->thread.fp_regs.fprs);
179 p->thread.fp_regs.pad = 0;
177 /* Set a new TLS ? */ 180 /* Set a new TLS ? */
178 if (clone_flags & CLONE_SETTLS) { 181 if (clone_flags & CLONE_SETTLS) {
179 unsigned long tls = frame->childregs.gprs[6]; 182 unsigned long tls = frame->childregs.gprs[6];
@@ -205,10 +208,12 @@ int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
205 * save fprs to current->thread.fp_regs to merge them with 208 * save fprs to current->thread.fp_regs to merge them with
206 * the emulated registers and then copy the result to the dump. 209 * the emulated registers and then copy the result to the dump.
207 */ 210 */
208 save_fp_regs(&current->thread.fp_regs); 211 save_fp_ctl(&current->thread.fp_regs.fpc);
212 save_fp_regs(current->thread.fp_regs.fprs);
209 memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs)); 213 memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs));
210#else /* CONFIG_64BIT */ 214#else /* CONFIG_64BIT */
211 save_fp_regs(fpregs); 215 save_fp_ctl(&fpregs->fpc);
216 save_fp_regs(fpregs->fprs);
212#endif /* CONFIG_64BIT */ 217#endif /* CONFIG_64BIT */
213 return 1; 218 return 1;
214} 219}
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 9556905bd3ce..e65c91c591e8 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -198,9 +198,11 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
198 * psw and gprs are stored on the stack 198 * psw and gprs are stored on the stack
199 */ 199 */
200 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); 200 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
201 if (addr == (addr_t) &dummy->regs.psw.mask) 201 if (addr == (addr_t) &dummy->regs.psw.mask) {
202 /* Return a clean psw mask. */ 202 /* Return a clean psw mask. */
203 tmp = psw_user_bits | (tmp & PSW_MASK_USER); 203 tmp &= PSW_MASK_USER | PSW_MASK_RI;
204 tmp |= PSW_USER_BITS;
205 }
204 206
205 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { 207 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
206 /* 208 /*
@@ -239,8 +241,7 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
239 offset = addr - (addr_t) &dummy->regs.fp_regs; 241 offset = addr - (addr_t) &dummy->regs.fp_regs;
240 tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset); 242 tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
241 if (addr == (addr_t) &dummy->regs.fp_regs.fpc) 243 if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
242 tmp &= (unsigned long) FPC_VALID_MASK 244 tmp <<= BITS_PER_LONG - 32;
243 << (BITS_PER_LONG - 32);
244 245
245 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 246 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
246 /* 247 /*
@@ -321,11 +322,15 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
321 /* 322 /*
322 * psw and gprs are stored on the stack 323 * psw and gprs are stored on the stack
323 */ 324 */
324 if (addr == (addr_t) &dummy->regs.psw.mask && 325 if (addr == (addr_t) &dummy->regs.psw.mask) {
325 ((data & ~PSW_MASK_USER) != psw_user_bits || 326 unsigned long mask = PSW_MASK_USER;
326 ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)))) 327
327 /* Invalid psw mask. */ 328 mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
328 return -EINVAL; 329 if ((data & ~mask) != PSW_USER_BITS)
330 return -EINVAL;
331 if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
332 return -EINVAL;
333 }
329 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; 334 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
330 335
331 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { 336 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
@@ -363,10 +368,10 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
363 /* 368 /*
364 * floating point regs. are stored in the thread structure 369 * floating point regs. are stored in the thread structure
365 */ 370 */
366 if (addr == (addr_t) &dummy->regs.fp_regs.fpc && 371 if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
367 (data & ~((unsigned long) FPC_VALID_MASK 372 if ((unsigned int) data != 0 ||
368 << (BITS_PER_LONG - 32))) != 0) 373 test_fp_ctl(data >> (BITS_PER_LONG - 32)))
369 return -EINVAL; 374 return -EINVAL;
370 offset = addr - (addr_t) &dummy->regs.fp_regs; 375 offset = addr - (addr_t) &dummy->regs.fp_regs;
371 *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data; 376 *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
372 377
@@ -557,7 +562,8 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
557 if (addr == (addr_t) &dummy32->regs.psw.mask) { 562 if (addr == (addr_t) &dummy32->regs.psw.mask) {
558 /* Fake a 31 bit psw mask. */ 563 /* Fake a 31 bit psw mask. */
559 tmp = (__u32)(regs->psw.mask >> 32); 564 tmp = (__u32)(regs->psw.mask >> 32);
560 tmp = psw32_user_bits | (tmp & PSW32_MASK_USER); 565 tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
566 tmp |= PSW32_USER_BITS;
561 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 567 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
562 /* Fake a 31 bit psw address. */ 568 /* Fake a 31 bit psw address. */
563 tmp = (__u32) regs->psw.addr | 569 tmp = (__u32) regs->psw.addr |
@@ -654,13 +660,16 @@ static int __poke_user_compat(struct task_struct *child,
654 * psw, gprs, acrs and orig_gpr2 are stored on the stack 660 * psw, gprs, acrs and orig_gpr2 are stored on the stack
655 */ 661 */
656 if (addr == (addr_t) &dummy32->regs.psw.mask) { 662 if (addr == (addr_t) &dummy32->regs.psw.mask) {
663 __u32 mask = PSW32_MASK_USER;
664
665 mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
657 /* Build a 64 bit psw mask from 31 bit mask. */ 666 /* Build a 64 bit psw mask from 31 bit mask. */
658 if ((tmp & ~PSW32_MASK_USER) != psw32_user_bits) 667 if ((tmp & ~mask) != PSW32_USER_BITS)
659 /* Invalid psw mask. */ 668 /* Invalid psw mask. */
660 return -EINVAL; 669 return -EINVAL;
661 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 670 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
662 (regs->psw.mask & PSW_MASK_BA) | 671 (regs->psw.mask & PSW_MASK_BA) |
663 (__u64)(tmp & PSW32_MASK_USER) << 32; 672 (__u64)(tmp & mask) << 32;
664 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 673 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
665 /* Build a 64 bit psw address from 31 bit address. */ 674 /* Build a 64 bit psw address from 31 bit address. */
666 regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN; 675 regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
@@ -696,8 +705,7 @@ static int __poke_user_compat(struct task_struct *child,
696 * floating point regs. are stored in the thread structure 705 * floating point regs. are stored in the thread structure
697 */ 706 */
698 if (addr == (addr_t) &dummy32->regs.fp_regs.fpc && 707 if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
699 (tmp & ~FPC_VALID_MASK) != 0) 708 test_fp_ctl(tmp))
700 /* Invalid floating point control. */
701 return -EINVAL; 709 return -EINVAL;
702 offset = addr - (addr_t) &dummy32->regs.fp_regs; 710 offset = addr - (addr_t) &dummy32->regs.fp_regs;
703 *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp; 711 *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
@@ -895,8 +903,10 @@ static int s390_fpregs_get(struct task_struct *target,
895 const struct user_regset *regset, unsigned int pos, 903 const struct user_regset *regset, unsigned int pos,
896 unsigned int count, void *kbuf, void __user *ubuf) 904 unsigned int count, void *kbuf, void __user *ubuf)
897{ 905{
898 if (target == current) 906 if (target == current) {
899 save_fp_regs(&target->thread.fp_regs); 907 save_fp_ctl(&target->thread.fp_regs.fpc);
908 save_fp_regs(target->thread.fp_regs.fprs);
909 }
900 910
901 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 911 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
902 &target->thread.fp_regs, 0, -1); 912 &target->thread.fp_regs, 0, -1);
@@ -909,19 +919,21 @@ static int s390_fpregs_set(struct task_struct *target,
909{ 919{
910 int rc = 0; 920 int rc = 0;
911 921
912 if (target == current) 922 if (target == current) {
913 save_fp_regs(&target->thread.fp_regs); 923 save_fp_ctl(&target->thread.fp_regs.fpc);
924 save_fp_regs(target->thread.fp_regs.fprs);
925 }
914 926
915 /* If setting FPC, must validate it first. */ 927 /* If setting FPC, must validate it first. */
916 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) { 928 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
917 u32 fpc[2] = { target->thread.fp_regs.fpc, 0 }; 929 u32 ufpc[2] = { target->thread.fp_regs.fpc, 0 };
918 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc, 930 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
919 0, offsetof(s390_fp_regs, fprs)); 931 0, offsetof(s390_fp_regs, fprs));
920 if (rc) 932 if (rc)
921 return rc; 933 return rc;
922 if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0) 934 if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
923 return -EINVAL; 935 return -EINVAL;
924 target->thread.fp_regs.fpc = fpc[0]; 936 target->thread.fp_regs.fpc = ufpc[0];
925 } 937 }
926 938
927 if (rc == 0 && count > 0) 939 if (rc == 0 && count > 0)
@@ -929,8 +941,10 @@ static int s390_fpregs_set(struct task_struct *target,
929 target->thread.fp_regs.fprs, 941 target->thread.fp_regs.fprs,
930 offsetof(s390_fp_regs, fprs), -1); 942 offsetof(s390_fp_regs, fprs), -1);
931 943
932 if (rc == 0 && target == current) 944 if (rc == 0 && target == current) {
933 restore_fp_regs(&target->thread.fp_regs); 945 restore_fp_ctl(&target->thread.fp_regs.fpc);
946 restore_fp_regs(target->thread.fp_regs.fprs);
947 }
934 948
935 return rc; 949 return rc;
936} 950}
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
index e1c9d1c292fa..d817cce7e72d 100644
--- a/arch/s390/kernel/runtime_instr.c
+++ b/arch/s390/kernel/runtime_instr.c
@@ -40,8 +40,6 @@ static void disable_runtime_instr(void)
40static void init_runtime_instr_cb(struct runtime_instr_cb *cb) 40static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
41{ 41{
42 cb->buf_limit = 0xfff; 42 cb->buf_limit = 0xfff;
43 if (s390_user_mode == HOME_SPACE_MODE)
44 cb->home_space = 1;
45 cb->int_requested = 1; 43 cb->int_requested = 1;
46 cb->pstate = 1; 44 cb->pstate = 1;
47 cb->pstate_set_buf = 1; 45 cb->pstate_set_buf = 1;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index aeed8a61fa0d..ffe1c53264a7 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -64,12 +64,6 @@
64#include <asm/sclp.h> 64#include <asm/sclp.h>
65#include "entry.h" 65#include "entry.h"
66 66
67long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY |
68 PSW_MASK_EA | PSW_MASK_BA;
69long psw_user_bits = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT |
70 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK |
71 PSW_MASK_PSTATE | PSW_ASC_HOME;
72
73/* 67/*
74 * User copy operations. 68 * User copy operations.
75 */ 69 */
@@ -300,43 +294,14 @@ static int __init parse_vmalloc(char *arg)
300} 294}
301early_param("vmalloc", parse_vmalloc); 295early_param("vmalloc", parse_vmalloc);
302 296
303unsigned int s390_user_mode = PRIMARY_SPACE_MODE;
304EXPORT_SYMBOL_GPL(s390_user_mode);
305
306static void __init set_user_mode_primary(void)
307{
308 psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME;
309 psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY;
310#ifdef CONFIG_COMPAT
311 psw32_user_bits =
312 (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY;
313#endif
314 uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos_switch : uaccess_pt;
315}
316
317static int __init early_parse_user_mode(char *p) 297static int __init early_parse_user_mode(char *p)
318{ 298{
319 if (p && strcmp(p, "primary") == 0) 299 if (!p || strcmp(p, "primary") == 0)
320 s390_user_mode = PRIMARY_SPACE_MODE; 300 return 0;
321 else if (!p || strcmp(p, "home") == 0) 301 return 1;
322 s390_user_mode = HOME_SPACE_MODE;
323 else
324 return 1;
325 return 0;
326} 302}
327early_param("user_mode", early_parse_user_mode); 303early_param("user_mode", early_parse_user_mode);
328 304
329static void __init setup_addressing_mode(void)
330{
331 if (s390_user_mode != PRIMARY_SPACE_MODE)
332 return;
333 set_user_mode_primary();
334 if (MACHINE_HAS_MVCOS)
335 pr_info("Address spaces switched, mvcos available\n");
336 else
337 pr_info("Address spaces switched, mvcos not available\n");
338}
339
340void *restart_stack __attribute__((__section__(".data"))); 305void *restart_stack __attribute__((__section__(".data")));
341 306
342static void __init setup_lowcore(void) 307static void __init setup_lowcore(void)
@@ -348,24 +313,24 @@ static void __init setup_lowcore(void)
348 */ 313 */
349 BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); 314 BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096);
350 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); 315 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
351 lc->restart_psw.mask = psw_kernel_bits; 316 lc->restart_psw.mask = PSW_KERNEL_BITS;
352 lc->restart_psw.addr = 317 lc->restart_psw.addr =
353 PSW_ADDR_AMODE | (unsigned long) restart_int_handler; 318 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
354 lc->external_new_psw.mask = psw_kernel_bits | 319 lc->external_new_psw.mask = PSW_KERNEL_BITS |
355 PSW_MASK_DAT | PSW_MASK_MCHECK; 320 PSW_MASK_DAT | PSW_MASK_MCHECK;
356 lc->external_new_psw.addr = 321 lc->external_new_psw.addr =
357 PSW_ADDR_AMODE | (unsigned long) ext_int_handler; 322 PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
358 lc->svc_new_psw.mask = psw_kernel_bits | 323 lc->svc_new_psw.mask = PSW_KERNEL_BITS |
359 PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; 324 PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
360 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; 325 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
361 lc->program_new_psw.mask = psw_kernel_bits | 326 lc->program_new_psw.mask = PSW_KERNEL_BITS |
362 PSW_MASK_DAT | PSW_MASK_MCHECK; 327 PSW_MASK_DAT | PSW_MASK_MCHECK;
363 lc->program_new_psw.addr = 328 lc->program_new_psw.addr =
364 PSW_ADDR_AMODE | (unsigned long) pgm_check_handler; 329 PSW_ADDR_AMODE | (unsigned long) pgm_check_handler;
365 lc->mcck_new_psw.mask = psw_kernel_bits; 330 lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
366 lc->mcck_new_psw.addr = 331 lc->mcck_new_psw.addr =
367 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; 332 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
368 lc->io_new_psw.mask = psw_kernel_bits | 333 lc->io_new_psw.mask = PSW_KERNEL_BITS |
369 PSW_MASK_DAT | PSW_MASK_MCHECK; 334 PSW_MASK_DAT | PSW_MASK_MCHECK;
370 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; 335 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
371 lc->clock_comparator = -1ULL; 336 lc->clock_comparator = -1ULL;
@@ -1043,10 +1008,7 @@ void __init setup_arch(char **cmdline_p)
1043 init_mm.end_data = (unsigned long) &_edata; 1008 init_mm.end_data = (unsigned long) &_edata;
1044 init_mm.brk = (unsigned long) &_end; 1009 init_mm.brk = (unsigned long) &_end;
1045 1010
1046 if (MACHINE_HAS_MVCOS) 1011 uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos : uaccess_pt;
1047 memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
1048 else
1049 memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
1050 1012
1051 parse_early_param(); 1013 parse_early_param();
1052 detect_memory_layout(memory_chunk, memory_end); 1014 detect_memory_layout(memory_chunk, memory_end);
@@ -1054,7 +1016,6 @@ void __init setup_arch(char **cmdline_p)
1054 setup_ipl(); 1016 setup_ipl();
1055 reserve_oldmem(); 1017 reserve_oldmem();
1056 setup_memory_end(); 1018 setup_memory_end();
1057 setup_addressing_mode();
1058 reserve_crashkernel(); 1019 reserve_crashkernel();
1059 setup_memory(); 1020 setup_memory();
1060 setup_resources(); 1021 setup_resources();
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index c45becf82e01..fb535874a246 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -57,40 +57,48 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
57 57
58 /* Copy a 'clean' PSW mask to the user to avoid leaking 58 /* Copy a 'clean' PSW mask to the user to avoid leaking
59 information about whether PER is currently on. */ 59 information about whether PER is currently on. */
60 user_sregs.regs.psw.mask = psw_user_bits | 60 user_sregs.regs.psw.mask = PSW_USER_BITS |
61 (regs->psw.mask & PSW_MASK_USER); 61 (regs->psw.mask & (PSW_MASK_USER | PSW_MASK_RI));
62 user_sregs.regs.psw.addr = regs->psw.addr; 62 user_sregs.regs.psw.addr = regs->psw.addr;
63 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs)); 63 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs));
64 memcpy(&user_sregs.regs.acrs, current->thread.acrs, 64 memcpy(&user_sregs.regs.acrs, current->thread.acrs,
65 sizeof(sregs->regs.acrs)); 65 sizeof(user_sregs.regs.acrs));
66 /* 66 /*
67 * We have to store the fp registers to current->thread.fp_regs 67 * We have to store the fp registers to current->thread.fp_regs
68 * to merge them with the emulated registers. 68 * to merge them with the emulated registers.
69 */ 69 */
70 save_fp_regs(&current->thread.fp_regs); 70 save_fp_ctl(&current->thread.fp_regs.fpc);
71 save_fp_regs(current->thread.fp_regs.fprs);
71 memcpy(&user_sregs.fpregs, &current->thread.fp_regs, 72 memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
72 sizeof(s390_fp_regs)); 73 sizeof(user_sregs.fpregs));
73 return __copy_to_user(sregs, &user_sregs, sizeof(_sigregs)); 74 if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs)))
75 return -EFAULT;
76 return 0;
74} 77}
75 78
76/* Returns positive number on error */
77static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) 79static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
78{ 80{
79 int err;
80 _sigregs user_sregs; 81 _sigregs user_sregs;
81 82
82 /* Alwys make any pending restarted system call return -EINTR */ 83 /* Alwys make any pending restarted system call return -EINTR */
83 current_thread_info()->restart_block.fn = do_no_restart_syscall; 84 current_thread_info()->restart_block.fn = do_no_restart_syscall;
84 85
85 err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs)); 86 if (__copy_from_user(&user_sregs, sregs, sizeof(user_sregs)))
86 if (err) 87 return -EFAULT;
87 return err; 88
88 /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */ 89 if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW_MASK_RI))
90 return -EINVAL;
91
92 /* Loading the floating-point-control word can fail. Do that first. */
93 if (restore_fp_ctl(&user_sregs.fpregs.fpc))
94 return -EINVAL;
95
96 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
89 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 97 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
90 (user_sregs.regs.psw.mask & PSW_MASK_USER); 98 (user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI));
91 /* Check for invalid user address space control. */ 99 /* Check for invalid user address space control. */
92 if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC)) 100 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
93 regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) | 101 regs->psw.mask = PSW_ASC_PRIMARY |
94 (regs->psw.mask & ~PSW_MASK_ASC); 102 (regs->psw.mask & ~PSW_MASK_ASC);
95 /* Check for invalid amode */ 103 /* Check for invalid amode */
96 if (regs->psw.mask & PSW_MASK_EA) 104 if (regs->psw.mask & PSW_MASK_EA)
@@ -98,14 +106,13 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
98 regs->psw.addr = user_sregs.regs.psw.addr; 106 regs->psw.addr = user_sregs.regs.psw.addr;
99 memcpy(&regs->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs)); 107 memcpy(&regs->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs));
100 memcpy(&current->thread.acrs, &user_sregs.regs.acrs, 108 memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
101 sizeof(sregs->regs.acrs)); 109 sizeof(current->thread.acrs));
102 restore_access_regs(current->thread.acrs); 110 restore_access_regs(current->thread.acrs);
103 111
104 memcpy(&current->thread.fp_regs, &user_sregs.fpregs, 112 memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
105 sizeof(s390_fp_regs)); 113 sizeof(current->thread.fp_regs));
106 current->thread.fp_regs.fpc &= FPC_VALID_MASK;
107 114
108 restore_fp_regs(&current->thread.fp_regs); 115 restore_fp_regs(current->thread.fp_regs.fprs);
109 clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */ 116 clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */
110 return 0; 117 return 0;
111} 118}
@@ -224,7 +231,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
224 regs->gprs[15] = (unsigned long) frame; 231 regs->gprs[15] = (unsigned long) frame;
225 /* Force default amode and default user address space control. */ 232 /* Force default amode and default user address space control. */
226 regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | 233 regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
227 (psw_user_bits & PSW_MASK_ASC) | 234 (PSW_USER_BITS & PSW_MASK_ASC) |
228 (regs->psw.mask & ~PSW_MASK_ASC); 235 (regs->psw.mask & ~PSW_MASK_ASC);
229 regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; 236 regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
230 237
@@ -295,7 +302,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
295 regs->gprs[15] = (unsigned long) frame; 302 regs->gprs[15] = (unsigned long) frame;
296 /* Force default amode and default user address space control. */ 303 /* Force default amode and default user address space control. */
297 regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | 304 regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
298 (psw_user_bits & PSW_MASK_ASC) | 305 (PSW_USER_BITS & PSW_MASK_ASC) |
299 (regs->psw.mask & ~PSW_MASK_ASC); 306 (regs->psw.mask & ~PSW_MASK_ASC);
300 regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; 307 regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
301 308
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 1a4313a1b60f..dc4a53465060 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -283,7 +283,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
283 struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices]; 283 struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
284 unsigned long source_cpu = stap(); 284 unsigned long source_cpu = stap();
285 285
286 __load_psw_mask(psw_kernel_bits); 286 __load_psw_mask(PSW_KERNEL_BITS);
287 if (pcpu->address == source_cpu) 287 if (pcpu->address == source_cpu)
288 func(data); /* should not return */ 288 func(data); /* should not return */
289 /* Stop target cpu (if func returns this stops the current cpu). */ 289 /* Stop target cpu (if func returns this stops the current cpu). */
@@ -395,7 +395,7 @@ void smp_send_stop(void)
395 int cpu; 395 int cpu;
396 396
397 /* Disable all interrupts/machine checks */ 397 /* Disable all interrupts/machine checks */
398 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); 398 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
399 trace_hardirqs_off(); 399 trace_hardirqs_off();
400 400
401 debug_set_critical(); 401 debug_set_critical();
@@ -533,9 +533,6 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
533 533
534#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP) 534#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
535 535
536struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
537EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
538
539static void __init smp_get_save_area(int cpu, u16 address) 536static void __init smp_get_save_area(int cpu, u16 address)
540{ 537{
541 void *lc = pcpu_devices[0].lowcore; 538 void *lc = pcpu_devices[0].lowcore;
@@ -546,15 +543,9 @@ static void __init smp_get_save_area(int cpu, u16 address)
546 if (!OLDMEM_BASE && (address == boot_cpu_address || 543 if (!OLDMEM_BASE && (address == boot_cpu_address ||
547 ipl_info.type != IPL_TYPE_FCP_DUMP)) 544 ipl_info.type != IPL_TYPE_FCP_DUMP))
548 return; 545 return;
549 if (cpu >= NR_CPUS) { 546 save_area = dump_save_area_create(cpu);
550 pr_warning("CPU %i exceeds the maximum %i and is excluded "
551 "from the dump\n", cpu, NR_CPUS - 1);
552 return;
553 }
554 save_area = kmalloc(sizeof(struct save_area), GFP_KERNEL);
555 if (!save_area) 547 if (!save_area)
556 panic("could not allocate memory for save area\n"); 548 panic("could not allocate memory for save area\n");
557 zfcpdump_save_areas[cpu] = save_area;
558#ifdef CONFIG_CRASH_DUMP 549#ifdef CONFIG_CRASH_DUMP
559 if (address == boot_cpu_address) { 550 if (address == boot_cpu_address) {
560 /* Copy the registers of the boot cpu. */ 551 /* Copy the registers of the boot cpu. */
@@ -693,7 +684,7 @@ static void smp_start_secondary(void *cpuvoid)
693 S390_lowcore.restart_source = -1UL; 684 S390_lowcore.restart_source = -1UL;
694 restore_access_regs(S390_lowcore.access_regs_save_area); 685 restore_access_regs(S390_lowcore.access_regs_save_area);
695 __ctl_load(S390_lowcore.cregs_save_area, 0, 15); 686 __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
696 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); 687 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
697 cpu_init(); 688 cpu_init();
698 preempt_disable(); 689 preempt_disable();
699 init_cpu_timer(); 690 init_cpu_timer();
@@ -929,7 +920,7 @@ static ssize_t show_idle_count(struct device *dev,
929 idle_count = ACCESS_ONCE(idle->idle_count); 920 idle_count = ACCESS_ONCE(idle->idle_count);
930 if (ACCESS_ONCE(idle->clock_idle_enter)) 921 if (ACCESS_ONCE(idle->clock_idle_enter))
931 idle_count++; 922 idle_count++;
932 } while ((sequence & 1) || (idle->sequence != sequence)); 923 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
933 return sprintf(buf, "%llu\n", idle_count); 924 return sprintf(buf, "%llu\n", idle_count);
934} 925}
935static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL); 926static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
@@ -947,7 +938,7 @@ static ssize_t show_idle_time(struct device *dev,
947 idle_time = ACCESS_ONCE(idle->idle_time); 938 idle_time = ACCESS_ONCE(idle->idle_time);
948 idle_enter = ACCESS_ONCE(idle->clock_idle_enter); 939 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
949 idle_exit = ACCESS_ONCE(idle->clock_idle_exit); 940 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
950 } while ((sequence & 1) || (idle->sequence != sequence)); 941 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
951 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; 942 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
952 return sprintf(buf, "%llu\n", idle_time >> 12); 943 return sprintf(buf, "%llu\n", idle_time >> 12);
953} 944}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 05d75c413137..a84476f2a9bb 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -84,8 +84,7 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
84 */ 84 */
85static void vdso_init_data(struct vdso_data *vd) 85static void vdso_init_data(struct vdso_data *vd)
86{ 86{
87 vd->ectg_available = 87 vd->ectg_available = test_facility(31);
88 s390_user_mode != HOME_SPACE_MODE && test_facility(31);
89} 88}
90 89
91#ifdef CONFIG_64BIT 90#ifdef CONFIG_64BIT
@@ -102,7 +101,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
102 101
103 lowcore->vdso_per_cpu_data = __LC_PASTE; 102 lowcore->vdso_per_cpu_data = __LC_PASTE;
104 103
105 if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled) 104 if (!vdso_enabled)
106 return 0; 105 return 0;
107 106
108 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); 107 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
@@ -147,7 +146,7 @@ void vdso_free_per_cpu(struct _lowcore *lowcore)
147 unsigned long segment_table, page_table, page_frame; 146 unsigned long segment_table, page_table, page_frame;
148 u32 *psal, *aste; 147 u32 *psal, *aste;
149 148
150 if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled) 149 if (!vdso_enabled)
151 return; 150 return;
152 151
153 psal = (u32 *)(addr_t) lowcore->paste[4]; 152 psal = (u32 *)(addr_t) lowcore->paste[4];
@@ -165,7 +164,7 @@ static void vdso_init_cr5(void)
165{ 164{
166 unsigned long cr5; 165 unsigned long cr5;
167 166
168 if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled) 167 if (!vdso_enabled)
169 return; 168 return;
170 cr5 = offsetof(struct _lowcore, paste); 169 cr5 = offsetof(struct _lowcore, paste);
171 __ctl_load(cr5, 5, 5); 170 __ctl_load(cr5, 5, 5);
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index abcfab55f99b..8c34363d6f1e 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -161,7 +161,7 @@ void __kprobes vtime_stop_cpu(void)
161 trace_hardirqs_on(); 161 trace_hardirqs_on();
162 162
163 /* Wait for external, I/O or machine check interrupt. */ 163 /* Wait for external, I/O or machine check interrupt. */
164 psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT | 164 psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
165 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; 165 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
166 idle->nohz_delay = 0; 166 idle->nohz_delay = 0;
167 167
@@ -191,7 +191,7 @@ cputime64_t s390_get_idle_time(int cpu)
191 sequence = ACCESS_ONCE(idle->sequence); 191 sequence = ACCESS_ONCE(idle->sequence);
192 idle_enter = ACCESS_ONCE(idle->clock_idle_enter); 192 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
193 idle_exit = ACCESS_ONCE(idle->clock_idle_exit); 193 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
194 } while ((sequence & 1) || (idle->sequence != sequence)); 194 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
195 return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0; 195 return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
196} 196}
197 197
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 776dafe918db..ed8064cb5c49 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -343,10 +343,11 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
343 343
344void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 344void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
345{ 345{
346 save_fp_regs(&vcpu->arch.host_fpregs); 346 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
347 save_fp_regs(vcpu->arch.host_fpregs.fprs);
347 save_access_regs(vcpu->arch.host_acrs); 348 save_access_regs(vcpu->arch.host_acrs);
348 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; 349 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
349 restore_fp_regs(&vcpu->arch.guest_fpregs); 350 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
350 restore_access_regs(vcpu->run->s.regs.acrs); 351 restore_access_regs(vcpu->run->s.regs.acrs);
351 gmap_enable(vcpu->arch.gmap); 352 gmap_enable(vcpu->arch.gmap);
352 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 353 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
@@ -356,9 +357,11 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
356{ 357{
357 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 358 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
358 gmap_disable(vcpu->arch.gmap); 359 gmap_disable(vcpu->arch.gmap);
359 save_fp_regs(&vcpu->arch.guest_fpregs); 360 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
361 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
360 save_access_regs(vcpu->run->s.regs.acrs); 362 save_access_regs(vcpu->run->s.regs.acrs);
361 restore_fp_regs(&vcpu->arch.host_fpregs); 363 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
364 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
362 restore_access_regs(vcpu->arch.host_acrs); 365 restore_access_regs(vcpu->arch.host_acrs);
363} 366}
364 367
@@ -618,9 +621,12 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
618 621
619int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 622int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
620{ 623{
624 if (test_fp_ctl(fpu->fpc))
625 return -EINVAL;
621 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); 626 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
622 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK; 627 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
623 restore_fp_regs(&vcpu->arch.guest_fpregs); 628 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
629 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
624 return 0; 630 return 0;
625} 631}
626 632
@@ -876,7 +882,8 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
876 * copying in vcpu load/put. Lets update our copies before we save 882 * copying in vcpu load/put. Lets update our copies before we save
877 * it into the save area 883 * it into the save area
878 */ 884 */
879 save_fp_regs(&vcpu->arch.guest_fpregs); 885 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
886 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
880 save_access_regs(vcpu->run->s.regs.acrs); 887 save_access_regs(vcpu->run->s.regs.acrs);
881 888
882 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), 889 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
diff --git a/arch/s390/kvm/trace.h b/arch/s390/kvm/trace.h
index c2f582bb1cb2..0c991c6748ab 100644
--- a/arch/s390/kvm/trace.h
+++ b/arch/s390/kvm/trace.h
@@ -4,6 +4,7 @@
4#include <linux/tracepoint.h> 4#include <linux/tracepoint.h>
5#include <asm/sigp.h> 5#include <asm/sigp.h>
6#include <asm/debug.h> 6#include <asm/debug.h>
7#include <asm/dis.h>
7 8
8#undef TRACE_SYSTEM 9#undef TRACE_SYSTEM
9#define TRACE_SYSTEM kvm 10#define TRACE_SYSTEM kvm
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index 20b0e97a7df2..b068729e50ac 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -2,7 +2,7 @@
2# Makefile for s390-specific library files.. 2# Makefile for s390-specific library files..
3# 3#
4 4
5lib-y += delay.o string.o uaccess_std.o uaccess_pt.o 5lib-y += delay.o string.o uaccess_pt.o find.o
6obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o 6obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
7obj-$(CONFIG_64BIT) += mem64.o 7obj-$(CONFIG_64BIT) += mem64.o
8lib-$(CONFIG_64BIT) += uaccess_mvcos.o 8lib-$(CONFIG_64BIT) += uaccess_mvcos.o
diff --git a/arch/s390/lib/find.c b/arch/s390/lib/find.c
new file mode 100644
index 000000000000..620d34d6487e
--- /dev/null
+++ b/arch/s390/lib/find.c
@@ -0,0 +1,77 @@
1/*
2 * MSB0 numbered special bitops handling.
3 *
4 * On s390x the bits are numbered:
5 * |0..............63|64............127|128...........191|192...........255|
6 * and on s390:
7 * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
8 *
9 * The reason for this bit numbering is the fact that the hardware sets bits
10 * in a bitmap starting at bit 0 (MSB) and we don't want to scan the bitmap
11 * from the 'wrong end'.
12 */
13
14#include <linux/compiler.h>
15#include <linux/bitops.h>
16#include <linux/export.h>
17
18unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size)
19{
20 const unsigned long *p = addr;
21 unsigned long result = 0;
22 unsigned long tmp;
23
24 while (size & ~(BITS_PER_LONG - 1)) {
25 if ((tmp = *(p++)))
26 goto found;
27 result += BITS_PER_LONG;
28 size -= BITS_PER_LONG;
29 }
30 if (!size)
31 return result;
32 tmp = (*p) & (~0UL << (BITS_PER_LONG - size));
33 if (!tmp) /* Are any bits set? */
34 return result + size; /* Nope. */
35found:
36 return result + (__fls(tmp) ^ (BITS_PER_LONG - 1));
37}
38EXPORT_SYMBOL(find_first_bit_inv);
39
40unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
41 unsigned long offset)
42{
43 const unsigned long *p = addr + (offset / BITS_PER_LONG);
44 unsigned long result = offset & ~(BITS_PER_LONG - 1);
45 unsigned long tmp;
46
47 if (offset >= size)
48 return size;
49 size -= result;
50 offset %= BITS_PER_LONG;
51 if (offset) {
52 tmp = *(p++);
53 tmp &= (~0UL >> offset);
54 if (size < BITS_PER_LONG)
55 goto found_first;
56 if (tmp)
57 goto found_middle;
58 size -= BITS_PER_LONG;
59 result += BITS_PER_LONG;
60 }
61 while (size & ~(BITS_PER_LONG-1)) {
62 if ((tmp = *(p++)))
63 goto found_middle;
64 result += BITS_PER_LONG;
65 size -= BITS_PER_LONG;
66 }
67 if (!size)
68 return result;
69 tmp = *p;
70found_first:
71 tmp &= (~0UL << (BITS_PER_LONG - size));
72 if (!tmp) /* Are any bits set? */
73 return result + size; /* Nope. */
74found_middle:
75 return result + (__fls(tmp) ^ (BITS_PER_LONG - 1));
76}
77EXPORT_SYMBOL(find_next_bit_inv);
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index 1829742bf479..4b7993bf69b9 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -65,13 +65,6 @@ static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
65 return size; 65 return size;
66} 66}
67 67
68static size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x)
69{
70 if (size <= 256)
71 return copy_from_user_std(size, ptr, x);
72 return copy_from_user_mvcos(size, ptr, x);
73}
74
75static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) 68static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
76{ 69{
77 register unsigned long reg0 asm("0") = 0x810000UL; 70 register unsigned long reg0 asm("0") = 0x810000UL;
@@ -101,14 +94,6 @@ static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
101 return size; 94 return size;
102} 95}
103 96
104static size_t copy_to_user_mvcos_check(size_t size, void __user *ptr,
105 const void *x)
106{
107 if (size <= 256)
108 return copy_to_user_std(size, ptr, x);
109 return copy_to_user_mvcos(size, ptr, x);
110}
111
112static size_t copy_in_user_mvcos(size_t size, void __user *to, 97static size_t copy_in_user_mvcos(size_t size, void __user *to,
113 const void __user *from) 98 const void __user *from)
114{ 99{
@@ -201,23 +186,8 @@ static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
201} 186}
202 187
203struct uaccess_ops uaccess_mvcos = { 188struct uaccess_ops uaccess_mvcos = {
204 .copy_from_user = copy_from_user_mvcos_check,
205 .copy_from_user_small = copy_from_user_std,
206 .copy_to_user = copy_to_user_mvcos_check,
207 .copy_to_user_small = copy_to_user_std,
208 .copy_in_user = copy_in_user_mvcos,
209 .clear_user = clear_user_mvcos,
210 .strnlen_user = strnlen_user_std,
211 .strncpy_from_user = strncpy_from_user_std,
212 .futex_atomic_op = futex_atomic_op_std,
213 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
214};
215
216struct uaccess_ops uaccess_mvcos_switch = {
217 .copy_from_user = copy_from_user_mvcos, 189 .copy_from_user = copy_from_user_mvcos,
218 .copy_from_user_small = copy_from_user_mvcos,
219 .copy_to_user = copy_to_user_mvcos, 190 .copy_to_user = copy_to_user_mvcos,
220 .copy_to_user_small = copy_to_user_mvcos,
221 .copy_in_user = copy_in_user_mvcos, 191 .copy_in_user = copy_in_user_mvcos,
222 .clear_user = clear_user_mvcos, 192 .clear_user = clear_user_mvcos,
223 .strnlen_user = strnlen_user_mvcos, 193 .strnlen_user = strnlen_user_mvcos,
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 1694d738b175..97e03caf7825 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -461,9 +461,7 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
461 461
462struct uaccess_ops uaccess_pt = { 462struct uaccess_ops uaccess_pt = {
463 .copy_from_user = copy_from_user_pt, 463 .copy_from_user = copy_from_user_pt,
464 .copy_from_user_small = copy_from_user_pt,
465 .copy_to_user = copy_to_user_pt, 464 .copy_to_user = copy_to_user_pt,
466 .copy_to_user_small = copy_to_user_pt,
467 .copy_in_user = copy_in_user_pt, 465 .copy_in_user = copy_in_user_pt,
468 .clear_user = clear_user_pt, 466 .clear_user = clear_user_pt,
469 .strnlen_user = strnlen_user_pt, 467 .strnlen_user = strnlen_user_pt,
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
deleted file mode 100644
index 4a75d475b06a..000000000000
--- a/arch/s390/lib/uaccess_std.c
+++ /dev/null
@@ -1,305 +0,0 @@
1/*
2 * Standard user space access functions based on mvcp/mvcs and doing
3 * interesting things in the secondary space mode.
4 *
5 * Copyright IBM Corp. 2006
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Gerald Schaefer (gerald.schaefer@de.ibm.com)
8 */
9
10#include <linux/errno.h>
11#include <linux/mm.h>
12#include <linux/uaccess.h>
13#include <asm/futex.h>
14#include "uaccess.h"
15
16#ifndef CONFIG_64BIT
17#define AHI "ahi"
18#define ALR "alr"
19#define CLR "clr"
20#define LHI "lhi"
21#define SLR "slr"
22#else
23#define AHI "aghi"
24#define ALR "algr"
25#define CLR "clgr"
26#define LHI "lghi"
27#define SLR "slgr"
28#endif
29
30size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
31{
32 unsigned long tmp1, tmp2;
33
34 tmp1 = -256UL;
35 asm volatile(
36 "0: mvcp 0(%0,%2),0(%1),%3\n"
37 "10:jz 8f\n"
38 "1:"ALR" %0,%3\n"
39 " la %1,256(%1)\n"
40 " la %2,256(%2)\n"
41 "2: mvcp 0(%0,%2),0(%1),%3\n"
42 "11:jnz 1b\n"
43 " j 8f\n"
44 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
45 " "LHI" %3,-4096\n"
46 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
47 " "SLR" %4,%1\n"
48 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
49 " jnh 5f\n"
50 "4: mvcp 0(%4,%2),0(%1),%3\n"
51 "12:"SLR" %0,%4\n"
52 " "ALR" %2,%4\n"
53 "5:"LHI" %4,-1\n"
54 " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
55 " bras %3,7f\n" /* memset loop */
56 " xc 0(1,%2),0(%2)\n"
57 "6: xc 0(256,%2),0(%2)\n"
58 " la %2,256(%2)\n"
59 "7:"AHI" %4,-256\n"
60 " jnm 6b\n"
61 " ex %4,0(%3)\n"
62 " j 9f\n"
63 "8:"SLR" %0,%0\n"
64 "9: \n"
65 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
66 EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
67 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
68 : : "cc", "memory");
69 return size;
70}
71
72static size_t copy_from_user_std_check(size_t size, const void __user *ptr,
73 void *x)
74{
75 if (size <= 1024)
76 return copy_from_user_std(size, ptr, x);
77 return copy_from_user_pt(size, ptr, x);
78}
79
80size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
81{
82 unsigned long tmp1, tmp2;
83
84 tmp1 = -256UL;
85 asm volatile(
86 "0: mvcs 0(%0,%1),0(%2),%3\n"
87 "7: jz 5f\n"
88 "1:"ALR" %0,%3\n"
89 " la %1,256(%1)\n"
90 " la %2,256(%2)\n"
91 "2: mvcs 0(%0,%1),0(%2),%3\n"
92 "8: jnz 1b\n"
93 " j 5f\n"
94 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
95 " "LHI" %3,-4096\n"
96 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
97 " "SLR" %4,%1\n"
98 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
99 " jnh 6f\n"
100 "4: mvcs 0(%4,%1),0(%2),%3\n"
101 "9:"SLR" %0,%4\n"
102 " j 6f\n"
103 "5:"SLR" %0,%0\n"
104 "6: \n"
105 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
106 EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
107 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
108 : : "cc", "memory");
109 return size;
110}
111
112static size_t copy_to_user_std_check(size_t size, void __user *ptr,
113 const void *x)
114{
115 if (size <= 1024)
116 return copy_to_user_std(size, ptr, x);
117 return copy_to_user_pt(size, ptr, x);
118}
119
120static size_t copy_in_user_std(size_t size, void __user *to,
121 const void __user *from)
122{
123 unsigned long tmp1;
124
125 asm volatile(
126 " sacf 256\n"
127 " "AHI" %0,-1\n"
128 " jo 5f\n"
129 " bras %3,3f\n"
130 "0:"AHI" %0,257\n"
131 "1: mvc 0(1,%1),0(%2)\n"
132 " la %1,1(%1)\n"
133 " la %2,1(%2)\n"
134 " "AHI" %0,-1\n"
135 " jnz 1b\n"
136 " j 5f\n"
137 "2: mvc 0(256,%1),0(%2)\n"
138 " la %1,256(%1)\n"
139 " la %2,256(%2)\n"
140 "3:"AHI" %0,-256\n"
141 " jnm 2b\n"
142 "4: ex %0,1b-0b(%3)\n"
143 "5: "SLR" %0,%0\n"
144 "6: sacf 0\n"
145 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
146 : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
147 : : "cc", "memory");
148 return size;
149}
150
151static size_t clear_user_std(size_t size, void __user *to)
152{
153 unsigned long tmp1, tmp2;
154
155 asm volatile(
156 " sacf 256\n"
157 " "AHI" %0,-1\n"
158 " jo 5f\n"
159 " bras %3,3f\n"
160 " xc 0(1,%1),0(%1)\n"
161 "0:"AHI" %0,257\n"
162 " la %2,255(%1)\n" /* %2 = ptr + 255 */
163 " srl %2,12\n"
164 " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
165 " "SLR" %2,%1\n"
166 " "CLR" %0,%2\n" /* clear crosses next page boundary? */
167 " jnh 5f\n"
168 " "AHI" %2,-1\n"
169 "1: ex %2,0(%3)\n"
170 " "AHI" %2,1\n"
171 " "SLR" %0,%2\n"
172 " j 5f\n"
173 "2: xc 0(256,%1),0(%1)\n"
174 " la %1,256(%1)\n"
175 "3:"AHI" %0,-256\n"
176 " jnm 2b\n"
177 "4: ex %0,0(%3)\n"
178 "5: "SLR" %0,%0\n"
179 "6: sacf 0\n"
180 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
181 : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
182 : : "cc", "memory");
183 return size;
184}
185
186size_t strnlen_user_std(size_t size, const char __user *src)
187{
188 register unsigned long reg0 asm("0") = 0UL;
189 unsigned long tmp1, tmp2;
190
191 if (unlikely(!size))
192 return 0;
193 asm volatile(
194 " la %2,0(%1)\n"
195 " la %3,0(%0,%1)\n"
196 " "SLR" %0,%0\n"
197 " sacf 256\n"
198 "0: srst %3,%2\n"
199 " jo 0b\n"
200 " la %0,1(%3)\n" /* strnlen_user results includes \0 */
201 " "SLR" %0,%1\n"
202 "1: sacf 0\n"
203 EX_TABLE(0b,1b)
204 : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
205 : "d" (reg0) : "cc", "memory");
206 return size;
207}
208
209size_t strncpy_from_user_std(size_t count, const char __user *src, char *dst)
210{
211 size_t done, len, offset, len_str;
212
213 if (unlikely(!count))
214 return 0;
215 done = 0;
216 do {
217 offset = (size_t)src & ~PAGE_MASK;
218 len = min(count - done, PAGE_SIZE - offset);
219 if (copy_from_user_std(len, src, dst))
220 return -EFAULT;
221 len_str = strnlen(dst, len);
222 done += len_str;
223 src += len_str;
224 dst += len_str;
225 } while ((len_str == len) && (done < count));
226 return done;
227}
228
229#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
230 asm volatile( \
231 " sacf 256\n" \
232 "0: l %1,0(%6)\n" \
233 "1:"insn \
234 "2: cs %1,%2,0(%6)\n" \
235 "3: jl 1b\n" \
236 " lhi %0,0\n" \
237 "4: sacf 0\n" \
238 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
239 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
240 "=m" (*uaddr) \
241 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
242 "m" (*uaddr) : "cc");
243
244int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old)
245{
246 int oldval = 0, newval, ret;
247
248 switch (op) {
249 case FUTEX_OP_SET:
250 __futex_atomic_op("lr %2,%5\n",
251 ret, oldval, newval, uaddr, oparg);
252 break;
253 case FUTEX_OP_ADD:
254 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
255 ret, oldval, newval, uaddr, oparg);
256 break;
257 case FUTEX_OP_OR:
258 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
259 ret, oldval, newval, uaddr, oparg);
260 break;
261 case FUTEX_OP_ANDN:
262 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
263 ret, oldval, newval, uaddr, oparg);
264 break;
265 case FUTEX_OP_XOR:
266 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
267 ret, oldval, newval, uaddr, oparg);
268 break;
269 default:
270 ret = -ENOSYS;
271 }
272 *old = oldval;
273 return ret;
274}
275
276int futex_atomic_cmpxchg_std(u32 *uval, u32 __user *uaddr,
277 u32 oldval, u32 newval)
278{
279 int ret;
280
281 asm volatile(
282 " sacf 256\n"
283 "0: cs %1,%4,0(%5)\n"
284 "1: la %0,0\n"
285 "2: sacf 0\n"
286 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
287 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
288 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
289 : "cc", "memory" );
290 *uval = oldval;
291 return ret;
292}
293
294struct uaccess_ops uaccess_std = {
295 .copy_from_user = copy_from_user_std_check,
296 .copy_from_user_small = copy_from_user_std,
297 .copy_to_user = copy_to_user_std_check,
298 .copy_to_user_small = copy_to_user_std,
299 .copy_in_user = copy_in_user_std,
300 .clear_user = clear_user_std,
301 .strnlen_user = strnlen_user_std,
302 .strncpy_from_user = strncpy_from_user_std,
303 .futex_atomic_op = futex_atomic_op_std,
304 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
305};
diff --git a/arch/s390/math-emu/math.c b/arch/s390/math-emu/math.c
index 58bff541fde9..a6ba0d724335 100644
--- a/arch/s390/math-emu/math.c
+++ b/arch/s390/math-emu/math.c
@@ -19,6 +19,8 @@
19#include <math-emu/double.h> 19#include <math-emu/double.h>
20#include <math-emu/quad.h> 20#include <math-emu/quad.h>
21 21
22#define FPC_VALID_MASK 0xF8F8FF03
23
22/* 24/*
23 * I miss a macro to round a floating point number to the 25 * I miss a macro to round a floating point number to the
24 * nearest integer in the same floating point format. 26 * nearest integer in the same floating point format.
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 9d84a1feefef..79ddd580d605 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -253,12 +253,12 @@ static int cmm_skip_blanks(char *cp, char **endp)
253 253
254static struct ctl_table cmm_table[]; 254static struct ctl_table cmm_table[];
255 255
256static int cmm_pages_handler(ctl_table *ctl, int write, void __user *buffer, 256static int cmm_pages_handler(struct ctl_table *ctl, int write,
257 size_t *lenp, loff_t *ppos) 257 void __user *buffer, size_t *lenp, loff_t *ppos)
258{ 258{
259 char buf[16], *p; 259 char buf[16], *p;
260 unsigned int len;
260 long nr; 261 long nr;
261 int len;
262 262
263 if (!*lenp || (*ppos && !write)) { 263 if (!*lenp || (*ppos && !write)) {
264 *lenp = 0; 264 *lenp = 0;
@@ -293,12 +293,12 @@ static int cmm_pages_handler(ctl_table *ctl, int write, void __user *buffer,
293 return 0; 293 return 0;
294} 294}
295 295
296static int cmm_timeout_handler(ctl_table *ctl, int write, void __user *buffer, 296static int cmm_timeout_handler(struct ctl_table *ctl, int write,
297 size_t *lenp, loff_t *ppos) 297 void __user *buffer, size_t *lenp, loff_t *ppos)
298{ 298{
299 char buf[64], *p; 299 char buf[64], *p;
300 long nr, seconds; 300 long nr, seconds;
301 int len; 301 unsigned int len;
302 302
303 if (!*lenp || (*ppos && !write)) { 303 if (!*lenp || (*ppos && !write)) {
304 *lenp = 0; 304 *lenp = 0;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index fc6679210d83..d95265b2719f 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -115,13 +115,8 @@ static inline int user_space_fault(unsigned long trans_exc_code)
115 if (trans_exc_code == 2) 115 if (trans_exc_code == 2)
116 /* Access via secondary space, set_fs setting decides */ 116 /* Access via secondary space, set_fs setting decides */
117 return current->thread.mm_segment.ar4; 117 return current->thread.mm_segment.ar4;
118 if (s390_user_mode == HOME_SPACE_MODE)
119 /* User space if the access has been done via home space. */
120 return trans_exc_code == 3;
121 /* 118 /*
122 * If the user space is not the home space the kernel runs in home 119 * Access via primary space or access register is from user space
123 * space. Access via secondary space has already been covered,
124 * access via primary space or access register is from user space
125 * and access via home space is from the kernel. 120 * and access via home space is from the kernel.
126 */ 121 */
127 return trans_exc_code != 3; 122 return trans_exc_code != 3;
@@ -428,50 +423,13 @@ void __kprobes do_dat_exception(struct pt_regs *regs)
428 do_fault_error(regs, fault); 423 do_fault_error(regs, fault);
429} 424}
430 425
431#ifdef CONFIG_64BIT
432void __kprobes do_asce_exception(struct pt_regs *regs)
433{
434 struct mm_struct *mm = current->mm;
435 struct vm_area_struct *vma;
436 unsigned long trans_exc_code;
437
438 /*
439 * The instruction that caused the program check has
440 * been nullified. Don't signal single step via SIGTRAP.
441 */
442 clear_tsk_thread_flag(current, TIF_PER_TRAP);
443
444 trans_exc_code = regs->int_parm_long;
445 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
446 goto no_context;
447
448 down_read(&mm->mmap_sem);
449 vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
450 up_read(&mm->mmap_sem);
451
452 if (vma) {
453 update_mm(mm, current);
454 return;
455 }
456
457 /* User mode accesses just cause a SIGSEGV */
458 if (user_mode(regs)) {
459 do_sigsegv(regs, SEGV_MAPERR);
460 return;
461 }
462
463no_context:
464 do_no_context(regs);
465}
466#endif
467
468int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write) 426int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
469{ 427{
470 struct pt_regs regs; 428 struct pt_regs regs;
471 int access, fault; 429 int access, fault;
472 430
473 /* Emulate a uaccess fault from kernel mode. */ 431 /* Emulate a uaccess fault from kernel mode. */
474 regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK; 432 regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK;
475 if (!irqs_disabled()) 433 if (!irqs_disabled())
476 regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; 434 regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
477 regs.psw.addr = (unsigned long) __builtin_return_address(0); 435 regs.psw.addr = (unsigned long) __builtin_return_address(0);
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 5d758db27bdc..639fce464008 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -180,9 +180,15 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
180 addr = start; 180 addr = start;
181 len = (unsigned long) nr_pages << PAGE_SHIFT; 181 len = (unsigned long) nr_pages << PAGE_SHIFT;
182 end = start + len; 182 end = start + len;
183 if ((end < start) || (end > TASK_SIZE)) 183 if ((end <= start) || (end > TASK_SIZE))
184 return 0; 184 return 0;
185 185 /*
186 * local_irq_save() doesn't prevent pagetable teardown, but does
187 * prevent the pagetables from being freed on s390.
188 *
189 * So long as we atomically load page table pointers versus teardown,
190 * we can follow the address down to the the page and take a ref on it.
191 */
186 local_irq_save(flags); 192 local_irq_save(flags);
187 pgdp = pgd_offset(mm, addr); 193 pgdp = pgd_offset(mm, addr);
188 do { 194 do {
@@ -219,63 +225,22 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
219 struct page **pages) 225 struct page **pages)
220{ 226{
221 struct mm_struct *mm = current->mm; 227 struct mm_struct *mm = current->mm;
222 unsigned long addr, len, end; 228 int nr, ret;
223 unsigned long next;
224 pgd_t *pgdp, pgd;
225 int nr = 0;
226 229
227 start &= PAGE_MASK; 230 start &= PAGE_MASK;
228 addr = start; 231 nr = __get_user_pages_fast(start, nr_pages, write, pages);
229 len = (unsigned long) nr_pages << PAGE_SHIFT; 232 if (nr == nr_pages)
230 end = start + len; 233 return nr;
231 if ((end < start) || (end > TASK_SIZE)) 234
232 goto slow_irqon; 235 /* Try to get the remaining pages with get_user_pages */
233 236 start += nr << PAGE_SHIFT;
234 /* 237 pages += nr;
235 * local_irq_disable() doesn't prevent pagetable teardown, but does 238 down_read(&mm->mmap_sem);
236 * prevent the pagetables from being freed on s390. 239 ret = get_user_pages(current, mm, start,
237 * 240 nr_pages - nr, write, 0, pages, NULL);
238 * So long as we atomically load page table pointers versus teardown, 241 up_read(&mm->mmap_sem);
239 * we can follow the address down to the the page and take a ref on it. 242 /* Have to be a bit careful with return values */
240 */ 243 if (nr > 0)
241 local_irq_disable(); 244 ret = (ret < 0) ? nr : ret + nr;
242 pgdp = pgd_offset(mm, addr); 245 return ret;
243 do {
244 pgd = *pgdp;
245 barrier();
246 next = pgd_addr_end(addr, end);
247 if (pgd_none(pgd))
248 goto slow;
249 if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
250 goto slow;
251 } while (pgdp++, addr = next, addr != end);
252 local_irq_enable();
253
254 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
255 return nr;
256
257 {
258 int ret;
259slow:
260 local_irq_enable();
261slow_irqon:
262 /* Try to get the remaining pages with get_user_pages */
263 start += nr << PAGE_SHIFT;
264 pages += nr;
265
266 down_read(&mm->mmap_sem);
267 ret = get_user_pages(current, mm, start,
268 (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
269 up_read(&mm->mmap_sem);
270
271 /* Have to be a bit careful with return values */
272 if (nr > 0) {
273 if (ret < 0)
274 ret = nr;
275 else
276 ret += nr;
277 }
278
279 return ret;
280 }
281} 246}
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 40023290ee5b..6bcb045d2bd2 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -101,18 +101,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
101 101
102int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) 102int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
103{ 103{
104 int rc;
105
106 if (is_compat_task() || (TASK_SIZE >= (1UL << 53))) 104 if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
107 return 0; 105 return 0;
108 if (!(flags & MAP_FIXED)) 106 if (!(flags & MAP_FIXED))
109 addr = 0; 107 addr = 0;
110 if ((addr + len) >= TASK_SIZE) { 108 if ((addr + len) >= TASK_SIZE)
111 rc = crst_table_upgrade(current->mm, 1UL << 53); 109 return crst_table_upgrade(current->mm, 1UL << 53);
112 if (rc)
113 return rc;
114 update_mm(current->mm, current);
115 }
116 return 0; 110 return 0;
117} 111}
118 112
@@ -132,7 +126,6 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
132 rc = crst_table_upgrade(mm, 1UL << 53); 126 rc = crst_table_upgrade(mm, 1UL << 53);
133 if (rc) 127 if (rc)
134 return (unsigned long) rc; 128 return (unsigned long) rc;
135 update_mm(mm, current);
136 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 129 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
137 } 130 }
138 return area; 131 return area;
@@ -155,7 +148,6 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
155 rc = crst_table_upgrade(mm, 1UL << 53); 148 rc = crst_table_upgrade(mm, 1UL << 53);
156 if (rc) 149 if (rc)
157 return (unsigned long) rc; 150 return (unsigned long) rc;
158 update_mm(mm, current);
159 area = arch_get_unmapped_area_topdown(filp, addr, len, 151 area = arch_get_unmapped_area_topdown(filp, addr, len,
160 pgoff, flags); 152 pgoff, flags);
161 } 153 }
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 990397420e6b..8400f494623f 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -9,6 +9,7 @@
9#include <asm/pgtable.h> 9#include <asm/pgtable.h>
10#include <asm/page.h> 10#include <asm/page.h>
11 11
12#if PAGE_DEFAULT_KEY
12static inline unsigned long sske_frame(unsigned long addr, unsigned char skey) 13static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
13{ 14{
14 asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0" 15 asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
@@ -16,7 +17,7 @@ static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
16 return addr; 17 return addr;
17} 18}
18 19
19void storage_key_init_range(unsigned long start, unsigned long end) 20void __storage_key_init_range(unsigned long start, unsigned long end)
20{ 21{
21 unsigned long boundary, size; 22 unsigned long boundary, size;
22 23
@@ -36,6 +37,7 @@ void storage_key_init_range(unsigned long start, unsigned long end)
36 start += PAGE_SIZE; 37 start += PAGE_SIZE;
37 } 38 }
38} 39}
40#endif
39 41
40static pte_t *walk_page_table(unsigned long addr) 42static pte_t *walk_page_table(unsigned long addr)
41{ 43{
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index de8cbc30dcd1..0a2e5e086749 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -48,12 +48,23 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)
48} 48}
49 49
50#ifdef CONFIG_64BIT 50#ifdef CONFIG_64BIT
51static void __crst_table_upgrade(void *arg)
52{
53 struct mm_struct *mm = arg;
54
55 if (current->active_mm == mm)
56 update_mm(mm, current);
57 __tlb_flush_local();
58}
59
51int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) 60int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
52{ 61{
53 unsigned long *table, *pgd; 62 unsigned long *table, *pgd;
54 unsigned long entry; 63 unsigned long entry;
64 int flush;
55 65
56 BUG_ON(limit > (1UL << 53)); 66 BUG_ON(limit > (1UL << 53));
67 flush = 0;
57repeat: 68repeat:
58 table = crst_table_alloc(mm); 69 table = crst_table_alloc(mm);
59 if (!table) 70 if (!table)
@@ -79,12 +90,15 @@ repeat:
79 mm->pgd = (pgd_t *) table; 90 mm->pgd = (pgd_t *) table;
80 mm->task_size = mm->context.asce_limit; 91 mm->task_size = mm->context.asce_limit;
81 table = NULL; 92 table = NULL;
93 flush = 1;
82 } 94 }
83 spin_unlock_bh(&mm->page_table_lock); 95 spin_unlock_bh(&mm->page_table_lock);
84 if (table) 96 if (table)
85 crst_table_free(mm, table); 97 crst_table_free(mm, table);
86 if (mm->context.asce_limit < limit) 98 if (mm->context.asce_limit < limit)
87 goto repeat; 99 goto repeat;
100 if (flush)
101 on_each_cpu(__crst_table_upgrade, mm, 0);
88 return 0; 102 return 0;
89} 103}
90 104
@@ -92,6 +106,8 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
92{ 106{
93 pgd_t *pgd; 107 pgd_t *pgd;
94 108
109 if (current->active_mm == mm)
110 __tlb_flush_mm(mm);
95 while (mm->context.asce_limit > limit) { 111 while (mm->context.asce_limit > limit) {
96 pgd = mm->pgd; 112 pgd = mm->pgd;
97 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { 113 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
@@ -114,6 +130,8 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
114 mm->task_size = mm->context.asce_limit; 130 mm->task_size = mm->context.asce_limit;
115 crst_table_free(mm, (unsigned long *) pgd); 131 crst_table_free(mm, (unsigned long *) pgd);
116 } 132 }
133 if (current->active_mm == mm)
134 update_mm(mm, current);
117} 135}
118#endif 136#endif
119 137
@@ -1087,10 +1105,9 @@ again:
1087 continue; 1105 continue;
1088 /* Allocate new page table with pgstes */ 1106 /* Allocate new page table with pgstes */
1089 new = page_table_alloc_pgste(mm, addr); 1107 new = page_table_alloc_pgste(mm, addr);
1090 if (!new) { 1108 if (!new)
1091 mm->context.has_pgste = 0; 1109 return -ENOMEM;
1092 continue; 1110
1093 }
1094 spin_lock(&mm->page_table_lock); 1111 spin_lock(&mm->page_table_lock);
1095 if (likely((unsigned long *) pmd_deref(*pmd) == table)) { 1112 if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
1096 /* Nuke pmd entry pointing to the "short" page table */ 1113 /* Nuke pmd entry pointing to the "short" page table */
@@ -1128,13 +1145,15 @@ static unsigned long page_table_realloc_pud(struct mmu_gather *tlb,
1128 if (pud_none_or_clear_bad(pud)) 1145 if (pud_none_or_clear_bad(pud))
1129 continue; 1146 continue;
1130 next = page_table_realloc_pmd(tlb, mm, pud, addr, next); 1147 next = page_table_realloc_pmd(tlb, mm, pud, addr, next);
1148 if (unlikely(IS_ERR_VALUE(next)))
1149 return next;
1131 } while (pud++, addr = next, addr != end); 1150 } while (pud++, addr = next, addr != end);
1132 1151
1133 return addr; 1152 return addr;
1134} 1153}
1135 1154
1136static void page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm, 1155static unsigned long page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
1137 unsigned long addr, unsigned long end) 1156 unsigned long addr, unsigned long end)
1138{ 1157{
1139 unsigned long next; 1158 unsigned long next;
1140 pgd_t *pgd; 1159 pgd_t *pgd;
@@ -1145,7 +1164,11 @@ static void page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
1145 if (pgd_none_or_clear_bad(pgd)) 1164 if (pgd_none_or_clear_bad(pgd))
1146 continue; 1165 continue;
1147 next = page_table_realloc_pud(tlb, mm, pgd, addr, next); 1166 next = page_table_realloc_pud(tlb, mm, pgd, addr, next);
1167 if (unlikely(IS_ERR_VALUE(next)))
1168 return next;
1148 } while (pgd++, addr = next, addr != end); 1169 } while (pgd++, addr = next, addr != end);
1170
1171 return 0;
1149} 1172}
1150 1173
1151/* 1174/*
@@ -1157,10 +1180,6 @@ int s390_enable_sie(void)
1157 struct mm_struct *mm = tsk->mm; 1180 struct mm_struct *mm = tsk->mm;
1158 struct mmu_gather tlb; 1181 struct mmu_gather tlb;
1159 1182
1160 /* Do we have switched amode? If no, we cannot do sie */
1161 if (s390_user_mode == HOME_SPACE_MODE)
1162 return -EINVAL;
1163
1164 /* Do we have pgstes? if yes, we are done */ 1183 /* Do we have pgstes? if yes, we are done */
1165 if (mm_has_pgste(tsk->mm)) 1184 if (mm_has_pgste(tsk->mm))
1166 return 0; 1185 return 0;
@@ -1169,9 +1188,9 @@ int s390_enable_sie(void)
1169 /* split thp mappings and disable thp for future mappings */ 1188 /* split thp mappings and disable thp for future mappings */
1170 thp_split_mm(mm); 1189 thp_split_mm(mm);
1171 /* Reallocate the page tables with pgstes */ 1190 /* Reallocate the page tables with pgstes */
1172 mm->context.has_pgste = 1;
1173 tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE); 1191 tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE);
1174 page_table_realloc(&tlb, mm, 0, TASK_SIZE); 1192 if (!page_table_realloc(&tlb, mm, 0, TASK_SIZE))
1193 mm->context.has_pgste = 1;
1175 tlb_finish_mmu(&tlb, 0, TASK_SIZE); 1194 tlb_finish_mmu(&tlb, 0, TASK_SIZE);
1176 up_write(&mm->mmap_sem); 1195 up_write(&mm->mmap_sem);
1177 return mm->context.has_pgste ? 0 : -ENOMEM; 1196 return mm->context.has_pgste ? 0 : -ENOMEM;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index a5df511e27a2..16871da37371 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -12,8 +12,8 @@
12#include <linux/random.h> 12#include <linux/random.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/processor.h>
16#include <asm/facility.h> 15#include <asm/facility.h>
16#include <asm/dis.h>
17 17
18/* 18/*
19 * Conventions: 19 * Conventions:
@@ -156,8 +156,8 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
156 EMIT6(0xeb8ff058, 0x0024); 156 EMIT6(0xeb8ff058, 0x0024);
157 /* lgr %r14,%r15 */ 157 /* lgr %r14,%r15 */
158 EMIT4(0xb90400ef); 158 EMIT4(0xb90400ef);
159 /* ahi %r15,<offset> */ 159 /* aghi %r15,<offset> */
160 EMIT4_IMM(0xa7fa0000, (jit->seen & SEEN_MEM) ? -112 : -80); 160 EMIT4_IMM(0xa7fb0000, (jit->seen & SEEN_MEM) ? -112 : -80);
161 /* stg %r14,152(%r15) */ 161 /* stg %r14,152(%r15) */
162 EMIT6(0xe3e0f098, 0x0024); 162 EMIT6(0xe3e0f098, 0x0024);
163 } else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL)) 163 } else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index f17a8343e360..0c9a17780e4b 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -120,26 +120,17 @@ EXPORT_SYMBOL_GPL(pci_proc_domain);
120static int zpci_set_airq(struct zpci_dev *zdev) 120static int zpci_set_airq(struct zpci_dev *zdev)
121{ 121{
122 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT); 122 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
123 struct zpci_fib *fib; 123 struct zpci_fib fib = {0};
124 int rc;
125
126 fib = (void *) get_zeroed_page(GFP_KERNEL);
127 if (!fib)
128 return -ENOMEM;
129 124
130 fib->isc = PCI_ISC; 125 fib.isc = PCI_ISC;
131 fib->sum = 1; /* enable summary notifications */ 126 fib.sum = 1; /* enable summary notifications */
132 fib->noi = airq_iv_end(zdev->aibv); 127 fib.noi = airq_iv_end(zdev->aibv);
133 fib->aibv = (unsigned long) zdev->aibv->vector; 128 fib.aibv = (unsigned long) zdev->aibv->vector;
134 fib->aibvo = 0; /* each zdev has its own interrupt vector */ 129 fib.aibvo = 0; /* each zdev has its own interrupt vector */
135 fib->aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8; 130 fib.aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8;
136 fib->aisbo = zdev->aisb & 63; 131 fib.aisbo = zdev->aisb & 63;
137 132
138 rc = zpci_mod_fc(req, fib); 133 return zpci_mod_fc(req, &fib);
139 pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
140
141 free_page((unsigned long) fib);
142 return rc;
143} 134}
144 135
145struct mod_pci_args { 136struct mod_pci_args {
@@ -152,22 +143,14 @@ struct mod_pci_args {
152static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args) 143static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args)
153{ 144{
154 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn); 145 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn);
155 struct zpci_fib *fib; 146 struct zpci_fib fib = {0};
156 int rc;
157
158 /* The FIB must be available even if it's not used */
159 fib = (void *) get_zeroed_page(GFP_KERNEL);
160 if (!fib)
161 return -ENOMEM;
162 147
163 fib->pba = args->base; 148 fib.pba = args->base;
164 fib->pal = args->limit; 149 fib.pal = args->limit;
165 fib->iota = args->iota; 150 fib.iota = args->iota;
166 fib->fmb_addr = args->fmb_addr; 151 fib.fmb_addr = args->fmb_addr;
167 152
168 rc = zpci_mod_fc(req, fib); 153 return zpci_mod_fc(req, &fib);
169 free_page((unsigned long) fib);
170 return rc;
171} 154}
172 155
173/* Modify PCI: Register I/O address translation parameters */ 156/* Modify PCI: Register I/O address translation parameters */
@@ -424,7 +407,6 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
424 struct msi_msg msg; 407 struct msi_msg msg;
425 int rc; 408 int rc;
426 409
427 pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec);
428 if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI) 410 if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
429 return -EINVAL; 411 return -EINVAL;
430 msi_vecs = min(nvec, ZPCI_MSI_VEC_MAX); 412 msi_vecs = min(nvec, ZPCI_MSI_VEC_MAX);
@@ -489,7 +471,6 @@ out_msi:
489out_si: 471out_si:
490 airq_iv_free_bit(zpci_aisb_iv, aisb); 472 airq_iv_free_bit(zpci_aisb_iv, aisb);
491out: 473out:
492 dev_err(&pdev->dev, "register MSI failed with: %d\n", rc);
493 return rc; 474 return rc;
494} 475}
495 476
@@ -499,14 +480,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
499 struct msi_desc *msi; 480 struct msi_desc *msi;
500 int rc; 481 int rc;
501 482
502 pr_info("%s: on pdev: %p\n", __func__, pdev);
503
504 /* Disable adapter interrupts */ 483 /* Disable adapter interrupts */
505 rc = zpci_clear_airq(zdev); 484 rc = zpci_clear_airq(zdev);
506 if (rc) { 485 if (rc)
507 dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc);
508 return; 486 return;
509 }
510 487
511 /* Release MSI interrupts */ 488 /* Release MSI interrupts */
512 list_for_each_entry(msi, &pdev->msi_list, list) { 489 list_for_each_entry(msi, &pdev->msi_list, list) {
@@ -625,8 +602,11 @@ static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned lo
625 r->name = name; 602 r->name = name;
626 603
627 rc = request_resource(&iomem_resource, r); 604 rc = request_resource(&iomem_resource, r);
628 if (rc) 605 if (rc) {
629 pr_debug("request resource %pR failed\n", r); 606 kfree(r->name);
607 kfree(r);
608 return ERR_PTR(-ENOMEM);
609 }
630 return r; 610 return r;
631} 611}
632 612
@@ -708,6 +688,47 @@ void pcibios_disable_device(struct pci_dev *pdev)
708 zdev->pdev = NULL; 688 zdev->pdev = NULL;
709} 689}
710 690
691#ifdef CONFIG_HIBERNATE_CALLBACKS
692static int zpci_restore(struct device *dev)
693{
694 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
695 int ret = 0;
696
697 if (zdev->state != ZPCI_FN_STATE_ONLINE)
698 goto out;
699
700 ret = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
701 if (ret)
702 goto out;
703
704 zpci_map_resources(zdev);
705 zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
706 zdev->start_dma + zdev->iommu_size - 1,
707 (u64) zdev->dma_table);
708
709out:
710 return ret;
711}
712
713static int zpci_freeze(struct device *dev)
714{
715 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
716
717 if (zdev->state != ZPCI_FN_STATE_ONLINE)
718 return 0;
719
720 zpci_unregister_ioat(zdev, 0);
721 return clp_disable_fh(zdev);
722}
723
724struct dev_pm_ops pcibios_pm_ops = {
725 .thaw_noirq = zpci_restore,
726 .freeze_noirq = zpci_freeze,
727 .restore_noirq = zpci_restore,
728 .poweroff_noirq = zpci_freeze,
729};
730#endif /* CONFIG_HIBERNATE_CALLBACKS */
731
711static int zpci_scan_bus(struct zpci_dev *zdev) 732static int zpci_scan_bus(struct zpci_dev *zdev)
712{ 733{
713 struct resource *res; 734 struct resource *res;
@@ -781,7 +802,6 @@ int zpci_enable_device(struct zpci_dev *zdev)
781 rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES); 802 rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
782 if (rc) 803 if (rc)
783 goto out; 804 goto out;
784 pr_info("Enabled fh: 0x%x fid: 0x%x\n", zdev->fh, zdev->fid);
785 805
786 rc = zpci_dma_init_device(zdev); 806 rc = zpci_dma_init_device(zdev);
787 if (rc) 807 if (rc)
@@ -901,10 +921,6 @@ static int __init pci_base_init(void)
901 || !test_facility(71) || !test_facility(72)) 921 || !test_facility(71) || !test_facility(72))
902 return 0; 922 return 0;
903 923
904 pr_info("Probing PCI hardware: PCI:%d SID:%d AEN:%d\n",
905 test_facility(69), test_facility(70),
906 test_facility(71));
907
908 rc = zpci_debug_init(); 924 rc = zpci_debug_init();
909 if (rc) 925 if (rc)
910 goto out; 926 goto out;
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 475563c3d1e4..84147984224a 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -16,6 +16,16 @@
16#include <asm/pci_debug.h> 16#include <asm/pci_debug.h>
17#include <asm/pci_clp.h> 17#include <asm/pci_clp.h>
18 18
19static inline void zpci_err_clp(unsigned int rsp, int rc)
20{
21 struct {
22 unsigned int rsp;
23 int rc;
24 } __packed data = {rsp, rc};
25
26 zpci_err_hex(&data, sizeof(data));
27}
28
19/* 29/*
20 * Call Logical Processor 30 * Call Logical Processor
21 * Retry logic is handled by the caller. 31 * Retry logic is handled by the caller.
@@ -54,7 +64,6 @@ static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
54 zdev->msi_addr = response->msia; 64 zdev->msi_addr = response->msia;
55 zdev->fmb_update = response->mui; 65 zdev->fmb_update = response->mui;
56 66
57 pr_debug("Supported number of MSI vectors: %u\n", response->noi);
58 switch (response->version) { 67 switch (response->version) {
59 case 1: 68 case 1:
60 zdev->max_bus_speed = PCIE_SPEED_5_0GT; 69 zdev->max_bus_speed = PCIE_SPEED_5_0GT;
@@ -84,8 +93,8 @@ static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
84 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) 93 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
85 clp_store_query_pci_fngrp(zdev, &rrb->response); 94 clp_store_query_pci_fngrp(zdev, &rrb->response);
86 else { 95 else {
87 pr_err("Query PCI FNGRP failed with response: %x cc: %d\n", 96 zpci_err("Q PCI FGRP:\n");
88 rrb->response.hdr.rsp, rc); 97 zpci_err_clp(rrb->response.hdr.rsp, rc);
89 rc = -EIO; 98 rc = -EIO;
90 } 99 }
91 clp_free_block(rrb); 100 clp_free_block(rrb);
@@ -131,8 +140,8 @@ static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
131 if (rrb->response.pfgid) 140 if (rrb->response.pfgid)
132 rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid); 141 rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
133 } else { 142 } else {
134 pr_err("Query PCI failed with response: %x cc: %d\n", 143 zpci_err("Q PCI FN:\n");
135 rrb->response.hdr.rsp, rc); 144 zpci_err_clp(rrb->response.hdr.rsp, rc);
136 rc = -EIO; 145 rc = -EIO;
137 } 146 }
138out: 147out:
@@ -206,8 +215,8 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
206 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) 215 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
207 *fh = rrb->response.fh; 216 *fh = rrb->response.fh;
208 else { 217 else {
209 zpci_dbg(0, "SPF fh:%x, cc:%d, resp:%x\n", *fh, rc, 218 zpci_err("Set PCI FN:\n");
210 rrb->response.hdr.rsp); 219 zpci_err_clp(rrb->response.hdr.rsp, rc);
211 rc = -EIO; 220 rc = -EIO;
212 } 221 }
213 clp_free_block(rrb); 222 clp_free_block(rrb);
@@ -262,8 +271,8 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
262 /* Get PCI function handle list */ 271 /* Get PCI function handle list */
263 rc = clp_instr(rrb); 272 rc = clp_instr(rrb);
264 if (rc || rrb->response.hdr.rsp != CLP_RC_OK) { 273 if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
265 pr_err("List PCI failed with response: 0x%x cc: %d\n", 274 zpci_err("List PCI FN:\n");
266 rrb->response.hdr.rsp, rc); 275 zpci_err_clp(rrb->response.hdr.rsp, rc);
267 rc = -EIO; 276 rc = -EIO;
268 goto out; 277 goto out;
269 } 278 }
@@ -273,17 +282,11 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
273 282
274 entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) / 283 entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
275 rrb->response.entry_size; 284 rrb->response.entry_size;
276 pr_info("Detected number of PCI functions: %u\n", entries);
277 285
278 /* Store the returned resume token as input for the next call */
279 resume_token = rrb->response.resume_token; 286 resume_token = rrb->response.resume_token;
280
281 for (i = 0; i < entries; i++) 287 for (i = 0; i < entries; i++)
282 cb(&rrb->response.fh_list[i]); 288 cb(&rrb->response.fh_list[i]);
283 } while (resume_token); 289 } while (resume_token);
284
285 pr_debug("Maximum number of supported PCI functions: %u\n",
286 rrb->response.max_fn);
287out: 290out:
288 return rc; 291 return rc;
289} 292}
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 7e5573acb063..9b83d080902d 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -145,10 +145,8 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
145 return -EINVAL; 145 return -EINVAL;
146 146
147 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags); 147 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
148 if (!zdev->dma_table) { 148 if (!zdev->dma_table)
149 dev_err(&zdev->pdev->dev, "Missing DMA table\n");
150 goto no_refresh; 149 goto no_refresh;
151 }
152 150
153 for (i = 0; i < nr_pages; i++) { 151 for (i = 0; i < nr_pages; i++) {
154 dma_update_cpu_trans(zdev, page_addr, dma_addr, flags); 152 dma_update_cpu_trans(zdev, page_addr, dma_addr, flags);
@@ -280,11 +278,8 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
280 size = nr_pages * PAGE_SIZE; 278 size = nr_pages * PAGE_SIZE;
281 279
282 dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE; 280 dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
283 if (dma_addr + size > zdev->end_dma) { 281 if (dma_addr + size > zdev->end_dma)
284 dev_err(dev, "(dma_addr: 0x%16.16LX + size: 0x%16.16lx) > end_dma: 0x%16.16Lx\n",
285 dma_addr, size, zdev->end_dma);
286 goto out_free; 282 goto out_free;
287 }
288 283
289 if (direction == DMA_NONE || direction == DMA_TO_DEVICE) 284 if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
290 flags |= ZPCI_TABLE_PROTECTED; 285 flags |= ZPCI_TABLE_PROTECTED;
@@ -297,7 +292,8 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
297out_free: 292out_free:
298 dma_free_iommu(zdev, iommu_page_index, nr_pages); 293 dma_free_iommu(zdev, iommu_page_index, nr_pages);
299out_err: 294out_err:
300 dev_err(dev, "Failed to map addr: %lx\n", pa); 295 zpci_err("map error:\n");
296 zpci_err_hex(&pa, sizeof(pa));
301 return DMA_ERROR_CODE; 297 return DMA_ERROR_CODE;
302} 298}
303 299
@@ -312,8 +308,10 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
312 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 308 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
313 dma_addr = dma_addr & PAGE_MASK; 309 dma_addr = dma_addr & PAGE_MASK;
314 if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE, 310 if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
315 ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) 311 ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) {
316 dev_err(dev, "Failed to unmap addr: %Lx\n", dma_addr); 312 zpci_err("unmap error:\n");
313 zpci_err_hex(&dma_addr, sizeof(dma_addr));
314 }
317 315
318 atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages); 316 atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages);
319 iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT; 317 iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index 0aecaf954845..278e671ec9ac 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/pci.h> 12#include <linux/pci.h>
13#include <asm/pci_debug.h>
13 14
14/* Content Code Description for PCI Function Error */ 15/* Content Code Description for PCI Function Error */
15struct zpci_ccdf_err { 16struct zpci_ccdf_err {
@@ -41,25 +42,15 @@ struct zpci_ccdf_avail {
41 u16 pec; /* PCI event code */ 42 u16 pec; /* PCI event code */
42} __packed; 43} __packed;
43 44
44static void zpci_event_log_err(struct zpci_ccdf_err *ccdf)
45{
46 struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
47
48 zpci_err("SEI error CCD:\n");
49 zpci_err_hex(ccdf, sizeof(*ccdf));
50 dev_err(&zdev->pdev->dev, "event code: 0x%x\n", ccdf->pec);
51}
52
53static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf) 45static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf)
54{ 46{
55 struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); 47 struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
48 struct pci_dev *pdev = zdev ? zdev->pdev : NULL;
56 49
57 pr_err("%s%s: availability event: fh: 0x%x fid: 0x%x event code: 0x%x reason:", 50 pr_info("%s: Event 0x%x reconfigured PCI function 0x%x\n",
58 (zdev) ? dev_driver_string(&zdev->pdev->dev) : "?", 51 pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
59 (zdev) ? dev_name(&zdev->pdev->dev) : "?", 52 zpci_err("avail CCDF:\n");
60 ccdf->fh, ccdf->fid, ccdf->pec); 53 zpci_err_hex(ccdf, sizeof(*ccdf));
61 print_hex_dump(KERN_CONT, "ccdf", DUMP_PREFIX_OFFSET,
62 16, 1, ccdf, sizeof(*ccdf), false);
63 54
64 switch (ccdf->pec) { 55 switch (ccdf->pec) {
65 case 0x0301: 56 case 0x0301:
@@ -79,14 +70,16 @@ static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf)
79void zpci_event_error(void *data) 70void zpci_event_error(void *data)
80{ 71{
81 struct zpci_ccdf_err *ccdf = data; 72 struct zpci_ccdf_err *ccdf = data;
82 struct zpci_dev *zdev; 73 struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
74
75 zpci_err("error CCDF:\n");
76 zpci_err_hex(ccdf, sizeof(*ccdf));
83 77
84 zpci_event_log_err(ccdf); 78 if (!zdev)
85 zdev = get_zdev_by_fid(ccdf->fid);
86 if (!zdev) {
87 pr_err("Error event for unknown fid: %x", ccdf->fid);
88 return; 79 return;
89 } 80
81 pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n",
82 pci_name(zdev->pdev), ccdf->pec, ccdf->fid);
90} 83}
91 84
92void zpci_event_availability(void *data) 85void zpci_event_availability(void *data)
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 66e505ca24ef..3c7eb5dd91c6 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -133,7 +133,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
133{ 133{
134 struct slot *slot = hotplug_slot->private; 134 struct slot *slot = hotplug_slot->private;
135 135
136 pr_debug("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
137 kfree(slot->hotplug_slot->info); 136 kfree(slot->hotplug_slot->info);
138 kfree(slot->hotplug_slot); 137 kfree(slot->hotplug_slot);
139 kfree(slot); 138 kfree(slot);
@@ -183,10 +182,9 @@ int zpci_init_slot(struct zpci_dev *zdev)
183 snprintf(name, SLOT_NAME_SIZE, "%08x", zdev->fid); 182 snprintf(name, SLOT_NAME_SIZE, "%08x", zdev->fid);
184 rc = pci_hp_register(slot->hotplug_slot, zdev->bus, 183 rc = pci_hp_register(slot->hotplug_slot, zdev->bus,
185 ZPCI_DEVFN, name); 184 ZPCI_DEVFN, name);
186 if (rc) { 185 if (rc)
187 pr_err("pci_hp_register failed with error %d\n", rc);
188 goto error_reg; 186 goto error_reg;
189 } 187
190 list_add(&slot->slot_list, &s390_hotplug_slot_list); 188 list_add(&slot->slot_list, &s390_hotplug_slot_list);
191 return 0; 189 return 0;
192 190
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 451bf99582ff..244f77f844f0 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -698,10 +698,11 @@ static void dasd_profile_start(struct dasd_block *block,
698 } 698 }
699 699
700 spin_lock(&block->profile.lock); 700 spin_lock(&block->profile.lock);
701 if (block->profile.data) 701 if (block->profile.data) {
702 block->profile.data->dasd_io_nr_req[counter]++; 702 block->profile.data->dasd_io_nr_req[counter]++;
703 if (rq_data_dir(req) == READ) 703 if (rq_data_dir(req) == READ)
704 block->profile.data->dasd_read_nr_req[counter]++; 704 block->profile.data->dasd_read_nr_req[counter]++;
705 }
705 spin_unlock(&block->profile.lock); 706 spin_unlock(&block->profile.lock);
706 707
707 /* 708 /*
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 5d73e6e49af6..548209a9c43c 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -223,8 +223,12 @@ static void scm_blk_request(struct request_queue *rq)
223 int ret; 223 int ret;
224 224
225 while ((req = blk_peek_request(rq))) { 225 while ((req = blk_peek_request(rq))) {
226 if (req->cmd_type != REQ_TYPE_FS) 226 if (req->cmd_type != REQ_TYPE_FS) {
227 blk_start_request(req);
228 blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
229 blk_end_request_all(req, -EIO);
227 continue; 230 continue;
231 }
228 232
229 if (!scm_permit_request(bdev, req)) { 233 if (!scm_permit_request(bdev, req)) {
230 scm_ensure_queue_restart(bdev); 234 scm_ensure_queue_restart(bdev);
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index 8b387b32fd62..e59331e6c2e5 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -107,7 +107,7 @@ extern debug_info_t *scm_debug;
107 107
108static inline void SCM_LOG_HEX(int level, void *data, int length) 108static inline void SCM_LOG_HEX(int level, void *data, int length)
109{ 109{
110 if (level > scm_debug->level) 110 if (!debug_level_enabled(scm_debug, level))
111 return; 111 return;
112 while (length > 0) { 112 while (length > 0) {
113 debug_event(scm_debug, level, data, length); 113 debug_event(scm_debug, level, data, length);
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 4600aa10a1c6..668b32b0dc1d 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -60,7 +60,7 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
60 struct appldata_product_id id; 60 struct appldata_product_id id;
61 int rc; 61 int rc;
62 62
63 strcpy(id.prod_nr, "LNXAPPL"); 63 strncpy(id.prod_nr, "LNXAPPL", 7);
64 id.prod_fn = myhdr->applid; 64 id.prod_fn = myhdr->applid;
65 id.record_nr = myhdr->record_num; 65 id.record_nr = myhdr->record_num;
66 id.version_nr = myhdr->version; 66 id.version_nr = myhdr->version;
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 24a08e8f19e1..2cdec21e8924 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -615,10 +615,10 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
615 615
616 if (rp->state != RAW3270_STATE_RESET) 616 if (rp->state != RAW3270_STATE_RESET)
617 return; 617 return;
618 if (rq && rq->rc) { 618 if (rq->rc) {
619 /* Reset command failed. */ 619 /* Reset command failed. */
620 rp->state = RAW3270_STATE_INIT; 620 rp->state = RAW3270_STATE_INIT;
621 } else if (0 && MACHINE_IS_VM) { 621 } else if (MACHINE_IS_VM) {
622 raw3270_size_device_vm(rp); 622 raw3270_size_device_vm(rp);
623 raw3270_size_device_done(rp); 623 raw3270_size_device_done(rp);
624 } else 624 } else
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 794820a123d0..ffb1fcf0bf5b 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -151,7 +151,7 @@ static int __init init_cpu_info(enum arch_id arch)
151 151
152 /* get info for boot cpu from lowcore, stored in the HSA */ 152 /* get info for boot cpu from lowcore, stored in the HSA */
153 153
154 sa = kmalloc(sizeof(*sa), GFP_KERNEL); 154 sa = dump_save_area_create(0);
155 if (!sa) 155 if (!sa)
156 return -ENOMEM; 156 return -ENOMEM;
157 if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) { 157 if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
@@ -159,7 +159,6 @@ static int __init init_cpu_info(enum arch_id arch)
159 kfree(sa); 159 kfree(sa);
160 return -EIO; 160 return -EIO;
161 } 161 }
162 zfcpdump_save_areas[0] = sa;
163 return 0; 162 return 0;
164} 163}
165 164
@@ -246,24 +245,25 @@ static int copy_lc(void __user *buf, void *sa, int sa_off, int len)
246static int zcore_add_lc(char __user *buf, unsigned long start, size_t count) 245static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
247{ 246{
248 unsigned long end; 247 unsigned long end;
249 int i = 0; 248 int i;
250 249
251 if (count == 0) 250 if (count == 0)
252 return 0; 251 return 0;
253 252
254 end = start + count; 253 end = start + count;
255 while (zfcpdump_save_areas[i]) { 254 for (i = 0; i < dump_save_areas.count; i++) {
256 unsigned long cp_start, cp_end; /* copy range */ 255 unsigned long cp_start, cp_end; /* copy range */
257 unsigned long sa_start, sa_end; /* save area range */ 256 unsigned long sa_start, sa_end; /* save area range */
258 unsigned long prefix; 257 unsigned long prefix;
259 unsigned long sa_off, len, buf_off; 258 unsigned long sa_off, len, buf_off;
259 struct save_area *save_area = dump_save_areas.areas[i];
260 260
261 prefix = zfcpdump_save_areas[i]->pref_reg; 261 prefix = save_area->pref_reg;
262 sa_start = prefix + sys_info.sa_base; 262 sa_start = prefix + sys_info.sa_base;
263 sa_end = prefix + sys_info.sa_base + sys_info.sa_size; 263 sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
264 264
265 if ((end < sa_start) || (start > sa_end)) 265 if ((end < sa_start) || (start > sa_end))
266 goto next; 266 continue;
267 cp_start = max(start, sa_start); 267 cp_start = max(start, sa_start);
268 cp_end = min(end, sa_end); 268 cp_end = min(end, sa_end);
269 269
@@ -272,10 +272,8 @@ static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
272 len = cp_end - cp_start; 272 len = cp_end - cp_start;
273 273
274 TRACE("copy_lc for: %lx\n", start); 274 TRACE("copy_lc for: %lx\n", start);
275 if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len)) 275 if (copy_lc(buf + buf_off, save_area, sa_off, len))
276 return -EFAULT; 276 return -EFAULT;
277next:
278 i++;
279 } 277 }
280 return 0; 278 return 0;
281} 279}
@@ -637,8 +635,8 @@ static void __init zcore_header_init(int arch, struct zcore_header *hdr,
637 hdr->num_pages = mem_size / PAGE_SIZE; 635 hdr->num_pages = mem_size / PAGE_SIZE;
638 hdr->tod = get_tod_clock(); 636 hdr->tod = get_tod_clock();
639 get_cpu_id(&hdr->cpu_id); 637 get_cpu_id(&hdr->cpu_id);
640 for (i = 0; zfcpdump_save_areas[i]; i++) { 638 for (i = 0; i < dump_save_areas.count; i++) {
641 prefix = zfcpdump_save_areas[i]->pref_reg; 639 prefix = dump_save_areas.areas[i]->pref_reg;
642 hdr->real_cpu_cnt++; 640 hdr->real_cpu_cnt++;
643 if (!prefix) 641 if (!prefix)
644 continue; 642 continue;
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index d028fd800c9c..f055df0b167f 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -194,15 +194,14 @@ EXPORT_SYMBOL(airq_iv_release);
194 */ 194 */
195unsigned long airq_iv_alloc_bit(struct airq_iv *iv) 195unsigned long airq_iv_alloc_bit(struct airq_iv *iv)
196{ 196{
197 const unsigned long be_to_le = BITS_PER_LONG - 1;
198 unsigned long bit; 197 unsigned long bit;
199 198
200 if (!iv->avail) 199 if (!iv->avail)
201 return -1UL; 200 return -1UL;
202 spin_lock(&iv->lock); 201 spin_lock(&iv->lock);
203 bit = find_first_bit_left(iv->avail, iv->bits); 202 bit = find_first_bit_inv(iv->avail, iv->bits);
204 if (bit < iv->bits) { 203 if (bit < iv->bits) {
205 clear_bit(bit ^ be_to_le, iv->avail); 204 clear_bit_inv(bit, iv->avail);
206 if (bit >= iv->end) 205 if (bit >= iv->end)
207 iv->end = bit + 1; 206 iv->end = bit + 1;
208 } else 207 } else
@@ -220,19 +219,17 @@ EXPORT_SYMBOL(airq_iv_alloc_bit);
220 */ 219 */
221void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit) 220void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit)
222{ 221{
223 const unsigned long be_to_le = BITS_PER_LONG - 1;
224
225 if (!iv->avail) 222 if (!iv->avail)
226 return; 223 return;
227 spin_lock(&iv->lock); 224 spin_lock(&iv->lock);
228 /* Clear (possibly left over) interrupt bit */ 225 /* Clear (possibly left over) interrupt bit */
229 clear_bit(bit ^ be_to_le, iv->vector); 226 clear_bit_inv(bit, iv->vector);
230 /* Make the bit position available again */ 227 /* Make the bit position available again */
231 set_bit(bit ^ be_to_le, iv->avail); 228 set_bit_inv(bit, iv->avail);
232 if (bit == iv->end - 1) { 229 if (bit == iv->end - 1) {
233 /* Find new end of bit-field */ 230 /* Find new end of bit-field */
234 while (--iv->end > 0) 231 while (--iv->end > 0)
235 if (!test_bit((iv->end - 1) ^ be_to_le, iv->avail)) 232 if (!test_bit_inv(iv->end - 1, iv->avail))
236 break; 233 break;
237 } 234 }
238 spin_unlock(&iv->lock); 235 spin_unlock(&iv->lock);
@@ -251,15 +248,13 @@ EXPORT_SYMBOL(airq_iv_free_bit);
251unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start, 248unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
252 unsigned long end) 249 unsigned long end)
253{ 250{
254 const unsigned long be_to_le = BITS_PER_LONG - 1;
255 unsigned long bit; 251 unsigned long bit;
256 252
257 /* Find non-zero bit starting from 'ivs->next'. */ 253 /* Find non-zero bit starting from 'ivs->next'. */
258 bit = find_next_bit_left(iv->vector, end, start); 254 bit = find_next_bit_inv(iv->vector, end, start);
259 if (bit >= end) 255 if (bit >= end)
260 return -1UL; 256 return -1UL;
261 /* Clear interrupt bit (find left uses big-endian bit numbers) */ 257 clear_bit_inv(bit, iv->vector);
262 clear_bit(bit ^ be_to_le, iv->vector);
263 return bit; 258 return bit;
264} 259}
265EXPORT_SYMBOL(airq_iv_scan); 260EXPORT_SYMBOL(airq_iv_scan);
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index d9eddcba7e88..aca7bfc113aa 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include <linux/kernel_stat.h> 8#include <linux/kernel_stat.h>
9#include <linux/completion.h>
9#include <linux/workqueue.h> 10#include <linux/workqueue.h>
10#include <linux/spinlock.h> 11#include <linux/spinlock.h>
11#include <linux/device.h> 12#include <linux/device.h>
@@ -42,7 +43,7 @@ static debug_info_t *eadm_debug;
42 43
43static void EADM_LOG_HEX(int level, void *data, int length) 44static void EADM_LOG_HEX(int level, void *data, int length)
44{ 45{
45 if (level > eadm_debug->level) 46 if (!debug_level_enabled(eadm_debug, level))
46 return; 47 return;
47 while (length > 0) { 48 while (length > 0) {
48 debug_event(eadm_debug, level, data, length); 49 debug_event(eadm_debug, level, data, length);
@@ -159,6 +160,9 @@ static void eadm_subchannel_irq(struct subchannel *sch)
159 } 160 }
160 scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error); 161 scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error);
161 private->state = EADM_IDLE; 162 private->state = EADM_IDLE;
163
164 if (private->completion)
165 complete(private->completion);
162} 166}
163 167
164static struct subchannel *eadm_get_idle_sch(void) 168static struct subchannel *eadm_get_idle_sch(void)
@@ -255,13 +259,32 @@ out:
255 259
256static void eadm_quiesce(struct subchannel *sch) 260static void eadm_quiesce(struct subchannel *sch)
257{ 261{
262 struct eadm_private *private = get_eadm_private(sch);
263 DECLARE_COMPLETION_ONSTACK(completion);
258 int ret; 264 int ret;
259 265
266 spin_lock_irq(sch->lock);
267 if (private->state != EADM_BUSY)
268 goto disable;
269
270 if (eadm_subchannel_clear(sch))
271 goto disable;
272
273 private->completion = &completion;
274 spin_unlock_irq(sch->lock);
275
276 wait_for_completion_io(&completion);
277
278 spin_lock_irq(sch->lock);
279 private->completion = NULL;
280
281disable:
282 eadm_subchannel_set_timeout(sch, 0);
260 do { 283 do {
261 spin_lock_irq(sch->lock);
262 ret = cio_disable_subchannel(sch); 284 ret = cio_disable_subchannel(sch);
263 spin_unlock_irq(sch->lock);
264 } while (ret == -EBUSY); 285 } while (ret == -EBUSY);
286
287 spin_unlock_irq(sch->lock);
265} 288}
266 289
267static int eadm_subchannel_remove(struct subchannel *sch) 290static int eadm_subchannel_remove(struct subchannel *sch)
diff --git a/drivers/s390/cio/eadm_sch.h b/drivers/s390/cio/eadm_sch.h
index 2779be093982..9664e4653f98 100644
--- a/drivers/s390/cio/eadm_sch.h
+++ b/drivers/s390/cio/eadm_sch.h
@@ -1,6 +1,7 @@
1#ifndef EADM_SCH_H 1#ifndef EADM_SCH_H
2#define EADM_SCH_H 2#define EADM_SCH_H
3 3
4#include <linux/completion.h>
4#include <linux/device.h> 5#include <linux/device.h>
5#include <linux/timer.h> 6#include <linux/timer.h>
6#include <linux/list.h> 7#include <linux/list.h>
@@ -9,9 +10,10 @@
9struct eadm_private { 10struct eadm_private {
10 union orb orb; 11 union orb orb;
11 enum {EADM_IDLE, EADM_BUSY, EADM_NOT_OPER} state; 12 enum {EADM_IDLE, EADM_BUSY, EADM_NOT_OPER} state;
13 struct completion *completion;
14 struct subchannel *sch;
12 struct timer_list timer; 15 struct timer_list timer;
13 struct list_head head; 16 struct list_head head;
14 struct subchannel *sch;
15} __aligned(8); 17} __aligned(8);
16 18
17#define get_eadm_private(n) ((struct eadm_private *)dev_get_drvdata(&n->dev)) 19#define get_eadm_private(n) ((struct eadm_private *)dev_get_drvdata(&n->dev))
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index 647b422bb22a..dfac9bfefea3 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -16,12 +16,6 @@
16extern debug_info_t *qdio_dbf_setup; 16extern debug_info_t *qdio_dbf_setup;
17extern debug_info_t *qdio_dbf_error; 17extern debug_info_t *qdio_dbf_error;
18 18
19/* sort out low debug levels early to avoid wasted sprints */
20static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level)
21{
22 return (level <= dbf_grp->level);
23}
24
25#define DBF_ERR 3 /* error conditions */ 19#define DBF_ERR 3 /* error conditions */
26#define DBF_WARN 4 /* warning conditions */ 20#define DBF_WARN 4 /* warning conditions */
27#define DBF_INFO 6 /* informational */ 21#define DBF_INFO 6 /* informational */
@@ -65,7 +59,7 @@ static inline void DBF_ERROR_HEX(void *addr, int len)
65#define DBF_DEV_EVENT(level, device, text...) \ 59#define DBF_DEV_EVENT(level, device, text...) \
66 do { \ 60 do { \
67 char debug_buffer[QDIO_DBF_LEN]; \ 61 char debug_buffer[QDIO_DBF_LEN]; \
68 if (qdio_dbf_passes(device->debug_area, level)) { \ 62 if (debug_level_enabled(device->debug_area, level)) { \
69 snprintf(debug_buffer, QDIO_DBF_LEN, text); \ 63 snprintf(debug_buffer, QDIO_DBF_LEN, text); \
70 debug_text_event(device->debug_area, level, debug_buffer); \ 64 debug_text_event(device->debug_area, level, debug_buffer); \
71 } \ 65 } \
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index bbd3e511c771..3e602e8affa7 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -528,7 +528,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
528 case SLSB_P_INPUT_PRIMED: 528 case SLSB_P_INPUT_PRIMED:
529 inbound_primed(q, count); 529 inbound_primed(q, count);
530 q->first_to_check = add_buf(q->first_to_check, count); 530 q->first_to_check = add_buf(q->first_to_check, count);
531 if (atomic_sub(count, &q->nr_buf_used) == 0) 531 if (atomic_sub_return(count, &q->nr_buf_used) == 0)
532 qperf_inc(q, inbound_queue_full); 532 qperf_inc(q, inbound_queue_full);
533 if (q->irq_ptr->perf_stat_enabled) 533 if (q->irq_ptr->perf_stat_enabled)
534 account_sbals(q, count); 534 account_sbals(q, count);
diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
index 841ea72e4a4e..28d9349de1ad 100644
--- a/drivers/s390/crypto/zcrypt_debug.h
+++ b/drivers/s390/crypto/zcrypt_debug.h
@@ -11,12 +11,6 @@
11/* that gives us 15 characters in the text event views */ 11/* that gives us 15 characters in the text event views */
12#define ZCRYPT_DBF_LEN 16 12#define ZCRYPT_DBF_LEN 16
13 13
14/* sort out low debug levels early to avoid wasted sprints */
15static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
16{
17 return (level <= dbf_grp->level);
18}
19
20#define DBF_ERR 3 /* error conditions */ 14#define DBF_ERR 3 /* error conditions */
21#define DBF_WARN 4 /* warning conditions */ 15#define DBF_WARN 4 /* warning conditions */
22#define DBF_INFO 6 /* informational */ 16#define DBF_INFO 6 /* informational */
@@ -25,7 +19,7 @@ static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
25 19
26#define ZCRYPT_DBF_COMMON(level, text...) \ 20#define ZCRYPT_DBF_COMMON(level, text...) \
27 do { \ 21 do { \
28 if (zcrypt_dbf_passes(zcrypt_dbf_common, level)) { \ 22 if (debug_level_enabled(zcrypt_dbf_common, level)) { \
29 char debug_buffer[ZCRYPT_DBF_LEN]; \ 23 char debug_buffer[ZCRYPT_DBF_LEN]; \
30 snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \ 24 snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
31 debug_text_event(zcrypt_dbf_common, level, \ 25 debug_text_event(zcrypt_dbf_common, level, \
@@ -35,7 +29,7 @@ static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
35 29
36#define ZCRYPT_DBF_DEVICES(level, text...) \ 30#define ZCRYPT_DBF_DEVICES(level, text...) \
37 do { \ 31 do { \
38 if (zcrypt_dbf_passes(zcrypt_dbf_devices, level)) { \ 32 if (debug_level_enabled(zcrypt_dbf_devices, level)) { \
39 char debug_buffer[ZCRYPT_DBF_LEN]; \ 33 char debug_buffer[ZCRYPT_DBF_LEN]; \
40 snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \ 34 snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
41 debug_text_event(zcrypt_dbf_devices, level, \ 35 debug_text_event(zcrypt_dbf_devices, level, \
@@ -45,7 +39,7 @@ static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
45 39
46#define ZCRYPT_DBF_DEV(level, device, text...) \ 40#define ZCRYPT_DBF_DEV(level, device, text...) \
47 do { \ 41 do { \
48 if (zcrypt_dbf_passes(device->dbf_area, level)) { \ 42 if (debug_level_enabled(device->dbf_area, level)) { \
49 char debug_buffer[ZCRYPT_DBF_LEN]; \ 43 char debug_buffer[ZCRYPT_DBF_LEN]; \
50 snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \ 44 snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
51 debug_text_event(device->dbf_area, level, \ 45 debug_text_event(device->dbf_area, level, \
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
index 1bc5904df19f..3339b9b607b3 100644
--- a/drivers/s390/net/claw.h
+++ b/drivers/s390/net/claw.h
@@ -114,15 +114,9 @@ do { \
114 debug_event(claw_dbf_##name,level,(void*)(addr),len); \ 114 debug_event(claw_dbf_##name,level,(void*)(addr),len); \
115} while (0) 115} while (0)
116 116
117/* Allow to sort out low debug levels early to avoid wasted sprints */
118static inline int claw_dbf_passes(debug_info_t *dbf_grp, int level)
119{
120 return (level <= dbf_grp->level);
121}
122
123#define CLAW_DBF_TEXT_(level,name,text...) \ 117#define CLAW_DBF_TEXT_(level,name,text...) \
124 do { \ 118 do { \
125 if (claw_dbf_passes(claw_dbf_##name, level)) { \ 119 if (debug_level_enabled(claw_dbf_##name, level)) { \
126 sprintf(debug_buffer, text); \ 120 sprintf(debug_buffer, text); \
127 debug_text_event(claw_dbf_##name, level, \ 121 debug_text_event(claw_dbf_##name, level, \
128 debug_buffer); \ 122 debug_buffer); \
diff --git a/drivers/s390/net/ctcm_dbug.c b/drivers/s390/net/ctcm_dbug.c
index 6514e1cb3f1c..8363f1c966ef 100644
--- a/drivers/s390/net/ctcm_dbug.c
+++ b/drivers/s390/net/ctcm_dbug.c
@@ -66,7 +66,7 @@ void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *fmt, ...)
66 char dbf_txt_buf[64]; 66 char dbf_txt_buf[64];
67 va_list args; 67 va_list args;
68 68
69 if (level > (ctcm_dbf[dbf_nix].id)->level) 69 if (!debug_level_enabled(ctcm_dbf[dbf_nix].id, level))
70 return; 70 return;
71 va_start(args, fmt); 71 va_start(args, fmt);
72 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); 72 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index 8c03392ac833..150fcb4cebc3 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -16,15 +16,9 @@ do { \
16 debug_event(lcs_dbf_##name,level,(void*)(addr),len); \ 16 debug_event(lcs_dbf_##name,level,(void*)(addr),len); \
17} while (0) 17} while (0)
18 18
19/* Allow to sort out low debug levels early to avoid wasted sprints */
20static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level)
21{
22 return (level <= dbf_grp->level);
23}
24
25#define LCS_DBF_TEXT_(level,name,text...) \ 19#define LCS_DBF_TEXT_(level,name,text...) \
26 do { \ 20 do { \
27 if (lcs_dbf_passes(lcs_dbf_##name, level)) { \ 21 if (debug_level_enabled(lcs_dbf_##name, level)) { \
28 sprintf(debug_buffer, text); \ 22 sprintf(debug_buffer, text); \
29 debug_text_event(lcs_dbf_##name, level, debug_buffer); \ 23 debug_text_event(lcs_dbf_##name, level, debug_buffer); \
30 } \ 24 } \
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 279ad504ec3c..9b333fcf1a4c 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -105,15 +105,9 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
105 105
106DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf); 106DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
107 107
108/* Allow to sort out low debug levels early to avoid wasted sprints */
109static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
110{
111 return (level <= dbf_grp->level);
112}
113
114#define IUCV_DBF_TEXT_(name, level, text...) \ 108#define IUCV_DBF_TEXT_(name, level, text...) \
115 do { \ 109 do { \
116 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \ 110 if (debug_level_enabled(iucv_dbf_##name, level)) { \
117 char* __buf = get_cpu_var(iucv_dbf_txt_buf); \ 111 char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
118 sprintf(__buf, text); \ 112 sprintf(__buf, text); \
119 debug_text_event(iucv_dbf_##name, level, __buf); \ 113 debug_text_event(iucv_dbf_##name, level, __buf); \
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 0a328d0d11be..d7b66a28fe75 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -5096,7 +5096,7 @@ void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
5096 char dbf_txt_buf[32]; 5096 char dbf_txt_buf[32];
5097 va_list args; 5097 va_list args;
5098 5098
5099 if (level > id->level) 5099 if (!debug_level_enabled(id, level))
5100 return; 5100 return;
5101 va_start(args, fmt); 5101 va_start(args, fmt);
5102 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); 5102 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 3ac7a4b30dd9..0be3d48681ae 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -278,7 +278,7 @@ struct zfcp_dbf {
278static inline 278static inline
279void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req) 279void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
280{ 280{
281 if (level <= req->adapter->dbf->hba->level) 281 if (debug_level_enabled(req->adapter->dbf->hba, level))
282 zfcp_dbf_hba_fsf_res(tag, req); 282 zfcp_dbf_hba_fsf_res(tag, req);
283} 283}
284 284
@@ -317,7 +317,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
317 struct zfcp_adapter *adapter = (struct zfcp_adapter *) 317 struct zfcp_adapter *adapter = (struct zfcp_adapter *)
318 scmd->device->host->hostdata[0]; 318 scmd->device->host->hostdata[0];
319 319
320 if (level <= adapter->dbf->scsi->level) 320 if (debug_level_enabled(adapter->dbf->scsi, level))
321 zfcp_dbf_scsi(tag, scmd, req); 321 zfcp_dbf_scsi(tag, scmd, req);
322} 322}
323 323
diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
index fd17a9b804b8..db19a38c8c69 100644
--- a/drivers/tty/hvc/hvc_iucv.c
+++ b/drivers/tty/hvc/hvc_iucv.c
@@ -1354,8 +1354,7 @@ out_error_memory:
1354 mempool_destroy(hvc_iucv_mempool); 1354 mempool_destroy(hvc_iucv_mempool);
1355 kmem_cache_destroy(hvc_iucv_buffer_cache); 1355 kmem_cache_destroy(hvc_iucv_buffer_cache);
1356out_error: 1356out_error:
1357 if (hvc_iucv_filter) 1357 kfree(hvc_iucv_filter);
1358 kfree(hvc_iucv_filter);
1359 hvc_iucv_devices = 0; /* ensure that we do not provide any device */ 1358 hvc_iucv_devices = 0; /* ensure that we do not provide any device */
1360 return rc; 1359 return rc;
1361} 1360}