aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-04-27 12:15:31 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-04-27 12:15:31 -0400
commitda8ac5e0fab11d0e84be4e49aaaa828c52d17097 (patch)
treeeade52afcbb5eb31d2d8869fc66e8223a7681a6f
parent32f15dc5e6252f03aa2e04a2b140827a8297f21f (diff)
parentcb629a01bb5bca951287e761c590a5686c6ca416 (diff)
Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6: (38 commits) [S390] SPIN_LOCK_UNLOCKED cleanup in drivers/s390 [S390] Clean up smp code in preparation for some larger changes. [S390] Remove debugging junk. [S390] Switch etr from tasklet to workqueue. [S390] split page_test_and_clear_dirty. [S390] Processor degradation notification. [S390] vtime: cleanup per_cpu usage. [S390] crypto: cleanup. [S390] sclp: fix coding style. [S390] vmlogrdr: stop IUCV connection in vmlogrdr_release. [S390] sclp: initialize early. [S390] ctc: kmalloc->kzalloc/casting cleanups. [S390] zfcpdump support. [S390] dasd: Add ipldev parameter. [S390] dasd: Add sysfs attribute status and generate uevents. [S390] Improved kernel stack overflow checking. [S390] Get rid of console setup functions. [S390] No execute support cleanup. [S390] Minor fault path optimization. [S390] Use generic bug. ...
-rw-r--r--Documentation/s390/crypto/crypto-API.txt83
-rw-r--r--Documentation/s390/zfcpdump.txt87
-rw-r--r--arch/s390/Kconfig13
-rw-r--r--arch/s390/Makefile5
-rw-r--r--arch/s390/appldata/appldata_base.c38
-rw-r--r--arch/s390/crypto/sha1_s390.c129
-rw-r--r--arch/s390/crypto/sha256_s390.c38
-rw-r--r--arch/s390/defconfig3
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/compat_linux.c60
-rw-r--r--arch/s390/kernel/compat_signal.c14
-rw-r--r--arch/s390/kernel/dis.c1278
-rw-r--r--arch/s390/kernel/early.c7
-rw-r--r--arch/s390/kernel/entry.S87
-rw-r--r--arch/s390/kernel/entry64.S100
-rw-r--r--arch/s390/kernel/head64.S72
-rw-r--r--arch/s390/kernel/ipl.c253
-rw-r--r--arch/s390/kernel/module.c4
-rw-r--r--arch/s390/kernel/process.c82
-rw-r--r--arch/s390/kernel/setup.c38
-rw-r--r--arch/s390/kernel/signal.c10
-rw-r--r--arch/s390/kernel/smp.c369
-rw-r--r--arch/s390/kernel/sys_s390.c20
-rw-r--r--arch/s390/kernel/syscalls.S14
-rw-r--r--arch/s390/kernel/time.c34
-rw-r--r--arch/s390/kernel/traps.c72
-rw-r--r--arch/s390/kernel/vmlinux.lds.S10
-rw-r--r--arch/s390/kernel/vtime.c16
-rw-r--r--arch/s390/mm/fault.c331
-rw-r--r--drivers/s390/block/dasd.c3
-rw-r--r--drivers/s390/block/dasd_devmap.c58
-rw-r--r--drivers/s390/char/Makefile5
-rw-r--r--drivers/s390/char/con3215.c7
-rw-r--r--drivers/s390/char/con3270.c7
-rw-r--r--drivers/s390/char/sclp.c10
-rw-r--r--drivers/s390/char/sclp.h72
-rw-r--r--drivers/s390/char/sclp_chp.c196
-rw-r--r--drivers/s390/char/sclp_config.c75
-rw-r--r--drivers/s390/char/sclp_cpi.c4
-rw-r--r--drivers/s390/char/sclp_quiesce.c2
-rw-r--r--drivers/s390/char/sclp_rw.c16
-rw-r--r--drivers/s390/char/sclp_sdias.c255
-rw-r--r--drivers/s390/char/sclp_tty.c6
-rw-r--r--drivers/s390/char/sclp_vt220.c8
-rw-r--r--drivers/s390/char/vmlogrdr.c9
-rw-r--r--drivers/s390/char/zcore.c651
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/ccwgroup.c33
-rw-r--r--drivers/s390/cio/chp.c683
-rw-r--r--drivers/s390/cio/chp.h53
-rw-r--r--drivers/s390/cio/chsc.c1024
-rw-r--r--drivers/s390/cio/chsc.h42
-rw-r--r--drivers/s390/cio/cio.c52
-rw-r--r--drivers/s390/cio/cio.h17
-rw-r--r--drivers/s390/cio/cmf.c2
-rw-r--r--drivers/s390/cio/css.c201
-rw-r--r--drivers/s390/cio/css.h16
-rw-r--r--drivers/s390/cio/device.c246
-rw-r--r--drivers/s390/cio/device_fsm.c8
-rw-r--r--drivers/s390/cio/device_ops.c7
-rw-r--r--drivers/s390/cio/idset.c112
-rw-r--r--drivers/s390/cio/idset.h25
-rw-r--r--drivers/s390/cio/ioasm.h5
-rw-r--r--drivers/s390/net/ctcmain.c23
-rw-r--r--drivers/s390/s390mach.c25
-rw-r--r--drivers/s390/sysinfo.c18
-rw-r--r--include/asm-generic/pgtable.h11
-rw-r--r--include/asm-s390/bug.h69
-rw-r--r--include/asm-s390/ccwgroup.h1
-rw-r--r--include/asm-s390/chpid.h53
-rw-r--r--include/asm-s390/cio.h8
-rw-r--r--include/asm-s390/ipl.h35
-rw-r--r--include/asm-s390/lowcore.h46
-rw-r--r--include/asm-s390/pgtable.h15
-rw-r--r--include/asm-s390/processor.h2
-rw-r--r--include/asm-s390/sclp.h14
-rw-r--r--include/asm-s390/setup.h2
-rw-r--r--include/asm-s390/smp.h6
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--mm/rmap.c8
80 files changed, 5364 insertions, 2155 deletions
diff --git a/Documentation/s390/crypto/crypto-API.txt b/Documentation/s390/crypto/crypto-API.txt
deleted file mode 100644
index 71ae6ca9f2c2..000000000000
--- a/Documentation/s390/crypto/crypto-API.txt
+++ /dev/null
@@ -1,83 +0,0 @@
1crypto-API support for z990 Message Security Assist (MSA) instructions
2~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
3
4AUTHOR: Thomas Spatzier (tspat@de.ibm.com)
5
6
71. Introduction crypto-API
8~~~~~~~~~~~~~~~~~~~~~~~~~~
9See Documentation/crypto/api-intro.txt for an introduction/description of the
10kernel crypto API.
11According to api-intro.txt support for z990 crypto instructions has been added
12in the algorithm api layer of the crypto API. Several files containing z990
13optimized implementations of crypto algorithms are placed in the
14arch/s390/crypto directory.
15
16
172. Probing for availability of MSA
18~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
19It should be possible to use Kernels with the z990 crypto implementations both
20on machines with MSA available and on those without MSA (pre z990 or z990
21without MSA). Therefore a simple probing mechanism has been implemented:
22In the init function of each crypto module the availability of MSA and of the
23respective crypto algorithm in particular will be tested. If the algorithm is
24available the module will load and register its algorithm with the crypto API.
25
26If the respective crypto algorithm is not available, the init function will
27return -ENOSYS. In that case a fallback to the standard software implementation
28of the crypto algorithm must be taken ( -> the standard crypto modules are
29also built when compiling the kernel).
30
31
323. Ensuring z990 crypto module preference
33~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
34If z990 crypto instructions are available the optimized modules should be
35preferred instead of standard modules.
36
373.1. compiled-in modules
38~~~~~~~~~~~~~~~~~~~~~~~~
39For compiled-in modules it has to be ensured that the z990 modules are linked
40before the standard crypto modules. Then, on system startup the init functions
41of z990 crypto modules will be called first and query for availability of z990
42crypto instructions. If instruction is available, the z990 module will register
43its crypto algorithm implementation -> the load of the standard module will fail
44since the algorithm is already registered.
45If z990 crypto instruction is not available the load of the z990 module will
46fail -> the standard module will load and register its algorithm.
47
483.2. dynamic modules
49~~~~~~~~~~~~~~~~~~~~
50A system administrator has to take care of giving preference to z990 crypto
51modules. If MSA is available appropriate lines have to be added to
52/etc/modprobe.conf.
53
54Example: z990 crypto instruction for SHA1 algorithm is available
55
56 add the following line to /etc/modprobe.conf (assuming the
57 z990 crypto modules for SHA1 is called sha1_z990):
58
59 alias sha1 sha1_z990
60
61 -> when the sha1 algorithm is requested through the crypto API
62 (which has a module autoloader) the z990 module will be loaded.
63
64TBD: a userspace module probing mechanism
65 something like 'probe sha1 sha1_z990 sha1' in modprobe.conf
66 -> try module sha1_z990, if it fails to load standard module sha1
67 the 'probe' statement is currently not supported in modprobe.conf
68
69
704. Currently implemented z990 crypto algorithms
71~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
72The following crypto algorithms with z990 MSA support are currently implemented.
73The name of each algorithm under which it is registered in crypto API and the
74name of the respective module is given in square brackets.
75
76- SHA1 Digest Algorithm [sha1 -> sha1_z990]
77- DES Encrypt/Decrypt Algorithm (64bit key) [des -> des_z990]
78- Triple DES Encrypt/Decrypt Algorithm (128bit key) [des3_ede128 -> des_z990]
79- Triple DES Encrypt/Decrypt Algorithm (192bit key) [des3_ede -> des_z990]
80
81In order to load, for example, the sha1_z990 module when the sha1 algorithm is
82requested (see 3.2.) add 'alias sha1 sha1_z990' to /etc/modprobe.conf.
83
diff --git a/Documentation/s390/zfcpdump.txt b/Documentation/s390/zfcpdump.txt
new file mode 100644
index 000000000000..cf45d27c4608
--- /dev/null
+++ b/Documentation/s390/zfcpdump.txt
@@ -0,0 +1,87 @@
1s390 SCSI dump tool (zfcpdump)
2
3System z machines (z900 or higher) provide hardware support for creating system
4dumps on SCSI disks. The dump process is initiated by booting a dump tool, which
5has to create a dump of the current (probably crashed) Linux image. In order to
6not overwrite memory of the crashed Linux with data of the dump tool, the
7hardware saves some memory plus the register sets of the boot cpu before the
8dump tool is loaded. There exists an SCLP hardware interface to obtain the saved
9memory afterwards. Currently 32 MB are saved.
10
11This zfcpdump implementation consists of a Linux dump kernel together with
12a userspace dump tool, which are loaded together into the saved memory region
13below 32 MB. zfcpdump is installed on a SCSI disk using zipl (as contained in
14the s390-tools package) to make the device bootable. The operator of a Linux
15system can then trigger a SCSI dump by booting the SCSI disk, where zfcpdump
16resides on.
17
18The kernel part of zfcpdump is implemented as a debugfs file under "zcore/mem",
19which exports memory and registers of the crashed Linux in an s390
20standalone dump format. It can be used in the same way as e.g. /dev/mem. The
21dump format defines a 4K header followed by plain uncompressed memory. The
22register sets are stored in the prefix pages of the respective cpus. To build a
23dump enabled kernel with the zcore driver, the kernel config option
24CONFIG_ZFCPDUMP has to be set. When reading from "zcore/mem", the part of
25memory, which has been saved by hardware is read by the driver via the SCLP
26hardware interface. The second part is just copied from the non overwritten real
27memory.
28
29The userspace application of zfcpdump can reside e.g. in an intitramfs or an
30initrd. It reads from zcore/mem and writes the system dump to a file on a
31SCSI disk.
32
33To build a zfcpdump kernel use the following settings in your kernel
34configuration:
35 * CONFIG_ZFCPDUMP=y
36 * Enable ZFCP driver
37 * Enable SCSI driver
38 * Enable ext2 and ext3 filesystems
39 * Disable as many features as possible to keep the kernel small.
40 E.g. network support is not needed at all.
41
42To use the zfcpdump userspace application in an initramfs you have to do the
43following:
44
45 * Copy the zfcpdump executable somewhere into your Linux tree.
46 E.g. to "arch/s390/boot/zfcpdump. If you do not want to include
47 shared libraries, compile the tool with the "-static" gcc option.
48 * If you want to include e2fsck, add it to your source tree, too. The zfcpdump
49 application attempts to start /sbin/e2fsck from the ramdisk.
50 * Use an initramfs config file like the following:
51
52 dir /dev 755 0 0
53 nod /dev/console 644 0 0 c 5 1
54 nod /dev/null 644 0 0 c 1 3
55 nod /dev/sda1 644 0 0 b 8 1
56 nod /dev/sda2 644 0 0 b 8 2
57 nod /dev/sda3 644 0 0 b 8 3
58 nod /dev/sda4 644 0 0 b 8 4
59 nod /dev/sda5 644 0 0 b 8 5
60 nod /dev/sda6 644 0 0 b 8 6
61 nod /dev/sda7 644 0 0 b 8 7
62 nod /dev/sda8 644 0 0 b 8 8
63 nod /dev/sda9 644 0 0 b 8 9
64 nod /dev/sda10 644 0 0 b 8 10
65 nod /dev/sda11 644 0 0 b 8 11
66 nod /dev/sda12 644 0 0 b 8 12
67 nod /dev/sda13 644 0 0 b 8 13
68 nod /dev/sda14 644 0 0 b 8 14
69 nod /dev/sda15 644 0 0 b 8 15
70 file /init arch/s390/boot/zfcpdump 755 0 0
71 file /sbin/e2fsck arch/s390/boot/e2fsck 755 0 0
72 dir /proc 755 0 0
73 dir /sys 755 0 0
74 dir /mnt 755 0 0
75 dir /sbin 755 0 0
76
77 * Issue "make image" to build the zfcpdump image with initramfs.
78
79In a Linux distribution the zfcpdump enabled kernel image must be copied to
80/usr/share/zfcpdump/zfcpdump.image, where the s390 zipl tool is looking for the
81dump kernel when preparing a SCSI dump disk.
82
83If you use a ramdisk copy it to "/usr/share/zfcpdump/zfcpdump.rd".
84
85For more information on how to use zfcpdump refer to the s390 'Using the Dump
86Tools book', which is available from
87http://www.ibm.com/developerworks/linux/linux390.
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 0f293aa7b0fa..e6ec418093e5 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -41,6 +41,11 @@ config GENERIC_HWEIGHT
41config GENERIC_TIME 41config GENERIC_TIME
42 def_bool y 42 def_bool y
43 43
44config GENERIC_BUG
45 bool
46 depends on BUG
47 default y
48
44config NO_IOMEM 49config NO_IOMEM
45 def_bool y 50 def_bool y
46 51
@@ -514,6 +519,14 @@ config KEXEC
514 current kernel, and to start another kernel. It is like a reboot 519 current kernel, and to start another kernel. It is like a reboot
515 but is independent of hardware/microcode support. 520 but is independent of hardware/microcode support.
516 521
522config ZFCPDUMP
523 tristate "zfcpdump support"
524 select SMP
525 default n
526 help
527 Select this option if you want to build an zfcpdump enabled kernel.
528 Refer to "Documentation/s390/zfcpdump.txt" for more details on this.
529
517endmenu 530endmenu
518 531
519source "net/Kconfig" 532source "net/Kconfig"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index b1e558496469..68441e0e74b6 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -67,8 +67,10 @@ endif
67 67
68ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y) 68ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y)
69cflags-$(CONFIG_CHECK_STACK) += -mstack-size=$(STACK_SIZE) 69cflags-$(CONFIG_CHECK_STACK) += -mstack-size=$(STACK_SIZE)
70ifneq ($(call cc-option-yn,-mstack-size=8192),y)
70cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD) 71cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD)
71endif 72endif
73endif
72 74
73ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y) 75ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
74cflags-$(CONFIG_WARN_STACK) += -mwarn-dynamicstack 76cflags-$(CONFIG_WARN_STACK) += -mwarn-dynamicstack
@@ -103,6 +105,9 @@ install: vmlinux
103image: vmlinux 105image: vmlinux
104 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 106 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
105 107
108zfcpdump:
109 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
110
106archclean: 111archclean:
107 $(Q)$(MAKE) $(clean)=$(boot) 112 $(Q)$(MAKE) $(clean)=$(boot)
108 113
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 0c3cf4b16ae4..ee89b33145d5 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -668,45 +668,7 @@ EXPORT_SYMBOL_GPL(appldata_register_ops);
668EXPORT_SYMBOL_GPL(appldata_unregister_ops); 668EXPORT_SYMBOL_GPL(appldata_unregister_ops);
669EXPORT_SYMBOL_GPL(appldata_diag); 669EXPORT_SYMBOL_GPL(appldata_diag);
670 670
671#ifdef MODULE
672/*
673 * Kernel symbols needed by appldata_mem and appldata_os modules.
674 * However, if this file is compiled as a module (for testing only), these
675 * symbols are not exported. In this case, we define them locally and export
676 * those.
677 */
678void si_swapinfo(struct sysinfo *val)
679{
680 val->freeswap = -1ul;
681 val->totalswap = -1ul;
682}
683
684unsigned long avenrun[3] = {-1 - FIXED_1/200, -1 - FIXED_1/200,
685 -1 - FIXED_1/200};
686int nr_threads = -1;
687
688void get_full_page_state(struct page_state *ps)
689{
690 memset(ps, -1, sizeof(struct page_state));
691}
692
693unsigned long nr_running(void)
694{
695 return -1;
696}
697
698unsigned long nr_iowait(void)
699{
700 return -1;
701}
702
703/*unsigned long nr_context_switches(void)
704{
705 return -1;
706}*/
707#endif /* MODULE */
708EXPORT_SYMBOL_GPL(si_swapinfo); 671EXPORT_SYMBOL_GPL(si_swapinfo);
709EXPORT_SYMBOL_GPL(nr_threads); 672EXPORT_SYMBOL_GPL(nr_threads);
710EXPORT_SYMBOL_GPL(nr_running); 673EXPORT_SYMBOL_GPL(nr_running);
711EXPORT_SYMBOL_GPL(nr_iowait); 674EXPORT_SYMBOL_GPL(nr_iowait);
712//EXPORT_SYMBOL_GPL(nr_context_switches);
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 969639f31977..af4460ec381f 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -25,99 +25,100 @@
25 */ 25 */
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/mm.h>
29#include <linux/crypto.h> 28#include <linux/crypto.h>
30#include <asm/scatterlist.h> 29
31#include <asm/byteorder.h>
32#include "crypt_s390.h" 30#include "crypt_s390.h"
33 31
34#define SHA1_DIGEST_SIZE 20 32#define SHA1_DIGEST_SIZE 20
35#define SHA1_BLOCK_SIZE 64 33#define SHA1_BLOCK_SIZE 64
36 34
37struct crypt_s390_sha1_ctx { 35struct s390_sha1_ctx {
38 u64 count; 36 u64 count; /* message length */
39 u32 state[5]; 37 u32 state[5];
40 u32 buf_len; 38 u8 buf[2 * SHA1_BLOCK_SIZE];
41 u8 buffer[2 * SHA1_BLOCK_SIZE];
42}; 39};
43 40
44static void sha1_init(struct crypto_tfm *tfm) 41static void sha1_init(struct crypto_tfm *tfm)
45{ 42{
46 struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm); 43 struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
47 44
48 ctx->state[0] = 0x67452301; 45 sctx->state[0] = 0x67452301;
49 ctx->state[1] = 0xEFCDAB89; 46 sctx->state[1] = 0xEFCDAB89;
50 ctx->state[2] = 0x98BADCFE; 47 sctx->state[2] = 0x98BADCFE;
51 ctx->state[3] = 0x10325476; 48 sctx->state[3] = 0x10325476;
52 ctx->state[4] = 0xC3D2E1F0; 49 sctx->state[4] = 0xC3D2E1F0;
53 50 sctx->count = 0;
54 ctx->count = 0;
55 ctx->buf_len = 0;
56} 51}
57 52
58static void sha1_update(struct crypto_tfm *tfm, const u8 *data, 53static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
59 unsigned int len) 54 unsigned int len)
60{ 55{
61 struct crypt_s390_sha1_ctx *sctx; 56 struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
62 long imd_len; 57 unsigned int index;
63 58 int ret;
64 sctx = crypto_tfm_ctx(tfm); 59
65 sctx->count += len * 8; /* message bit length */ 60 /* how much is already in the buffer? */
66 61 index = sctx->count & 0x3f;
67 /* anything in buffer yet? -> must be completed */ 62
68 if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) { 63 sctx->count += len;
69 /* complete full block and hash */ 64
70 memcpy(sctx->buffer + sctx->buf_len, data, 65 if (index + len < SHA1_BLOCK_SIZE)
71 SHA1_BLOCK_SIZE - sctx->buf_len); 66 goto store;
72 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, 67
73 SHA1_BLOCK_SIZE); 68 /* process one stored block */
74 data += SHA1_BLOCK_SIZE - sctx->buf_len; 69 if (index) {
75 len -= SHA1_BLOCK_SIZE - sctx->buf_len; 70 memcpy(sctx->buf + index, data, SHA1_BLOCK_SIZE - index);
76 sctx->buf_len = 0; 71 ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf,
72 SHA1_BLOCK_SIZE);
73 BUG_ON(ret != SHA1_BLOCK_SIZE);
74 data += SHA1_BLOCK_SIZE - index;
75 len -= SHA1_BLOCK_SIZE - index;
77 } 76 }
78 77
79 /* rest of data contains full blocks? */ 78 /* process as many blocks as possible */
80 imd_len = len & ~0x3ful; 79 if (len >= SHA1_BLOCK_SIZE) {
81 if (imd_len) { 80 ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, data,
82 crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len); 81 len & ~(SHA1_BLOCK_SIZE - 1));
83 data += imd_len; 82 BUG_ON(ret != (len & ~(SHA1_BLOCK_SIZE - 1)));
84 len -= imd_len; 83 data += ret;
84 len -= ret;
85 } 85 }
86 /* anything left? store in buffer */
87 if (len) {
88 memcpy(sctx->buffer + sctx->buf_len , data, len);
89 sctx->buf_len += len;
90 }
91}
92 86
87store:
88 /* anything left? */
89 if (len)
90 memcpy(sctx->buf + index , data, len);
91}
93 92
94static void pad_message(struct crypt_s390_sha1_ctx* sctx) 93/* Add padding and return the message digest. */
94static void sha1_final(struct crypto_tfm *tfm, u8 *out)
95{ 95{
96 int index; 96 struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
97 u64 bits;
98 unsigned int index, end;
99 int ret;
100
101 /* must perform manual padding */
102 index = sctx->count & 0x3f;
103 end = (index < 56) ? SHA1_BLOCK_SIZE : (2 * SHA1_BLOCK_SIZE);
97 104
98 index = sctx->buf_len;
99 sctx->buf_len = (sctx->buf_len < 56) ?
100 SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE;
101 /* start pad with 1 */ 105 /* start pad with 1 */
102 sctx->buffer[index] = 0x80; 106 sctx->buf[index] = 0x80;
107
103 /* pad with zeros */ 108 /* pad with zeros */
104 index++; 109 index++;
105 memset(sctx->buffer + index, 0x00, sctx->buf_len - index); 110 memset(sctx->buf + index, 0x00, end - index - 8);
106 /* append length */
107 memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count,
108 sizeof sctx->count);
109}
110 111
111/* Add padding and return the message digest. */ 112 /* append message length */
112static void sha1_final(struct crypto_tfm *tfm, u8 *out) 113 bits = sctx->count * 8;
113{ 114 memcpy(sctx->buf + end - 8, &bits, sizeof(bits));
114 struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); 115
116 ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf, end);
117 BUG_ON(ret != end);
115 118
116 /* must perform manual padding */
117 pad_message(sctx);
118 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len);
119 /* copy digest to out */ 119 /* copy digest to out */
120 memcpy(out, sctx->state, SHA1_DIGEST_SIZE); 120 memcpy(out, sctx->state, SHA1_DIGEST_SIZE);
121
121 /* wipe context */ 122 /* wipe context */
122 memset(sctx, 0, sizeof *sctx); 123 memset(sctx, 0, sizeof *sctx);
123} 124}
@@ -128,7 +129,7 @@ static struct crypto_alg alg = {
128 .cra_priority = CRYPT_S390_PRIORITY, 129 .cra_priority = CRYPT_S390_PRIORITY,
129 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 130 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
130 .cra_blocksize = SHA1_BLOCK_SIZE, 131 .cra_blocksize = SHA1_BLOCK_SIZE,
131 .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx), 132 .cra_ctxsize = sizeof(struct s390_sha1_ctx),
132 .cra_module = THIS_MODULE, 133 .cra_module = THIS_MODULE,
133 .cra_list = LIST_HEAD_INIT(alg.cra_list), 134 .cra_list = LIST_HEAD_INIT(alg.cra_list),
134 .cra_u = { .digest = { 135 .cra_u = { .digest = {
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 78436c696d37..2ced3330bce0 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -26,7 +26,7 @@
26#define SHA256_BLOCK_SIZE 64 26#define SHA256_BLOCK_SIZE 64
27 27
28struct s390_sha256_ctx { 28struct s390_sha256_ctx {
29 u64 count; 29 u64 count; /* message length */
30 u32 state[8]; 30 u32 state[8];
31 u8 buf[2 * SHA256_BLOCK_SIZE]; 31 u8 buf[2 * SHA256_BLOCK_SIZE];
32}; 32};
@@ -54,10 +54,9 @@ static void sha256_update(struct crypto_tfm *tfm, const u8 *data,
54 int ret; 54 int ret;
55 55
56 /* how much is already in the buffer? */ 56 /* how much is already in the buffer? */
57 index = sctx->count / 8 & 0x3f; 57 index = sctx->count & 0x3f;
58 58
59 /* update message bit length */ 59 sctx->count += len;
60 sctx->count += len * 8;
61 60
62 if ((index + len) < SHA256_BLOCK_SIZE) 61 if ((index + len) < SHA256_BLOCK_SIZE)
63 goto store; 62 goto store;
@@ -87,12 +86,17 @@ store:
87 memcpy(sctx->buf + index , data, len); 86 memcpy(sctx->buf + index , data, len);
88} 87}
89 88
90static void pad_message(struct s390_sha256_ctx* sctx) 89/* Add padding and return the message digest */
90static void sha256_final(struct crypto_tfm *tfm, u8 *out)
91{ 91{
92 int index, end; 92 struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
93 u64 bits;
94 unsigned int index, end;
95 int ret;
93 96
94 index = sctx->count / 8 & 0x3f; 97 /* must perform manual padding */
95 end = index < 56 ? SHA256_BLOCK_SIZE : 2 * SHA256_BLOCK_SIZE; 98 index = sctx->count & 0x3f;
99 end = (index < 56) ? SHA256_BLOCK_SIZE : (2 * SHA256_BLOCK_SIZE);
96 100
97 /* start pad with 1 */ 101 /* start pad with 1 */
98 sctx->buf[index] = 0x80; 102 sctx->buf[index] = 0x80;
@@ -102,21 +106,11 @@ static void pad_message(struct s390_sha256_ctx* sctx)
102 memset(sctx->buf + index, 0x00, end - index - 8); 106 memset(sctx->buf + index, 0x00, end - index - 8);
103 107
104 /* append message length */ 108 /* append message length */
105 memcpy(sctx->buf + end - 8, &sctx->count, sizeof sctx->count); 109 bits = sctx->count * 8;
106 110 memcpy(sctx->buf + end - 8, &bits, sizeof(bits));
107 sctx->count = end * 8;
108}
109
110/* Add padding and return the message digest */
111static void sha256_final(struct crypto_tfm *tfm, u8 *out)
112{
113 struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
114
115 /* must perform manual padding */
116 pad_message(sctx);
117 111
118 crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, 112 ret = crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, end);
119 sctx->count / 8); 113 BUG_ON(ret != end);
120 114
121 /* copy digest to out */ 115 /* copy digest to out */
122 memcpy(out, sctx->state, SHA256_DIGEST_SIZE); 116 memcpy(out, sctx->state, SHA256_DIGEST_SIZE);
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 741d2bbb2b37..0e4da8a7d826 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -12,6 +12,7 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y
12# CONFIG_ARCH_HAS_ILOG2_U64 is not set 12# CONFIG_ARCH_HAS_ILOG2_U64 is not set
13CONFIG_GENERIC_HWEIGHT=y 13CONFIG_GENERIC_HWEIGHT=y
14CONFIG_GENERIC_TIME=y 14CONFIG_GENERIC_TIME=y
15CONFIG_GENERIC_BUG=y
15CONFIG_NO_IOMEM=y 16CONFIG_NO_IOMEM=y
16CONFIG_S390=y 17CONFIG_S390=y
17CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 18CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
@@ -166,6 +167,7 @@ CONFIG_NO_IDLE_HZ=y
166CONFIG_NO_IDLE_HZ_INIT=y 167CONFIG_NO_IDLE_HZ_INIT=y
167CONFIG_S390_HYPFS_FS=y 168CONFIG_S390_HYPFS_FS=y
168CONFIG_KEXEC=y 169CONFIG_KEXEC=y
170# CONFIG_ZFCPDUMP is not set
169 171
170# 172#
171# Networking 173# Networking
@@ -705,6 +707,7 @@ CONFIG_DEBUG_MUTEXES=y
705CONFIG_DEBUG_SPINLOCK_SLEEP=y 707CONFIG_DEBUG_SPINLOCK_SLEEP=y
706# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 708# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
707# CONFIG_DEBUG_KOBJECT is not set 709# CONFIG_DEBUG_KOBJECT is not set
710CONFIG_DEBUG_BUGVERBOSE=y
708# CONFIG_DEBUG_INFO is not set 711# CONFIG_DEBUG_INFO is not set
709# CONFIG_DEBUG_VM is not set 712# CONFIG_DEBUG_VM is not set
710# CONFIG_DEBUG_LIST is not set 713# CONFIG_DEBUG_LIST is not set
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 5492d25d7d69..3195d375bd51 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -6,7 +6,7 @@ EXTRA_AFLAGS := -traditional
6 6
7obj-y := bitmap.o traps.o time.o process.o base.o early.o \ 7obj-y := bitmap.o traps.o time.o process.o base.o early.o \
8 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 8 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
9 semaphore.o s390_ext.o debug.o irq.o ipl.o 9 semaphore.o s390_ext.o debug.o irq.o ipl.o dis.o
10 10
11obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 11obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
12obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 12obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 664c669b1856..5236fdb17fcb 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -495,29 +495,34 @@ sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo)
495 * sys32_execve() executes a new program after the asm stub has set 495 * sys32_execve() executes a new program after the asm stub has set
496 * things up for us. This should basically do what I want it to. 496 * things up for us. This should basically do what I want it to.
497 */ 497 */
498asmlinkage long 498asmlinkage long sys32_execve(void)
499sys32_execve(struct pt_regs regs)
500{ 499{
501 int error; 500 struct pt_regs *regs = task_pt_regs(current);
502 char * filename; 501 char *filename;
502 unsigned long result;
503 int rc;
503 504
504 filename = getname(compat_ptr(regs.orig_gpr2)); 505 filename = getname(compat_ptr(regs->orig_gpr2));
505 error = PTR_ERR(filename); 506 if (IS_ERR(filename)) {
506 if (IS_ERR(filename)) 507 result = PTR_ERR(filename);
507 goto out; 508 goto out;
508 error = compat_do_execve(filename, compat_ptr(regs.gprs[3]),
509 compat_ptr(regs.gprs[4]), &regs);
510 if (error == 0)
511 {
512 task_lock(current);
513 current->ptrace &= ~PT_DTRACE;
514 task_unlock(current);
515 current->thread.fp_regs.fpc=0;
516 asm volatile("sfpc %0,0" : : "d" (0));
517 } 509 }
510 rc = compat_do_execve(filename, compat_ptr(regs->gprs[3]),
511 compat_ptr(regs->gprs[4]), regs);
512 if (rc) {
513 result = rc;
514 goto out_putname;
515 }
516 task_lock(current);
517 current->ptrace &= ~PT_DTRACE;
518 task_unlock(current);
519 current->thread.fp_regs.fpc=0;
520 asm volatile("sfpc %0,0" : : "d" (0));
521 result = regs->gprs[2];
522out_putname:
518 putname(filename); 523 putname(filename);
519out: 524out:
520 return error; 525 return result;
521} 526}
522 527
523 528
@@ -918,19 +923,20 @@ asmlinkage long sys32_write(unsigned int fd, char __user * buf, size_t count)
918 return sys_write(fd, buf, count); 923 return sys_write(fd, buf, count);
919} 924}
920 925
921asmlinkage long sys32_clone(struct pt_regs regs) 926asmlinkage long sys32_clone(void)
922{ 927{
923 unsigned long clone_flags; 928 struct pt_regs *regs = task_pt_regs(current);
924 unsigned long newsp; 929 unsigned long clone_flags;
930 unsigned long newsp;
925 int __user *parent_tidptr, *child_tidptr; 931 int __user *parent_tidptr, *child_tidptr;
926 932
927 clone_flags = regs.gprs[3] & 0xffffffffUL; 933 clone_flags = regs->gprs[3] & 0xffffffffUL;
928 newsp = regs.orig_gpr2 & 0x7fffffffUL; 934 newsp = regs->orig_gpr2 & 0x7fffffffUL;
929 parent_tidptr = compat_ptr(regs.gprs[4]); 935 parent_tidptr = compat_ptr(regs->gprs[4]);
930 child_tidptr = compat_ptr(regs.gprs[5]); 936 child_tidptr = compat_ptr(regs->gprs[5]);
931 if (!newsp) 937 if (!newsp)
932 newsp = regs.gprs[15]; 938 newsp = regs->gprs[15];
933 return do_fork(clone_flags, newsp, &regs, 0, 939 return do_fork(clone_flags, newsp, regs, 0,
934 parent_tidptr, child_tidptr); 940 parent_tidptr, child_tidptr);
935} 941}
936 942
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 887a9881d0d0..80a54a0149ab 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -255,9 +255,9 @@ sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
255} 255}
256 256
257asmlinkage long 257asmlinkage long
258sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss, 258sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss)
259 struct pt_regs *regs)
260{ 259{
260 struct pt_regs *regs = task_pt_regs(current);
261 stack_t kss, koss; 261 stack_t kss, koss;
262 unsigned long ss_sp; 262 unsigned long ss_sp;
263 int ret, err = 0; 263 int ret, err = 0;
@@ -344,8 +344,9 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
344 return 0; 344 return 0;
345} 345}
346 346
347asmlinkage long sys32_sigreturn(struct pt_regs *regs) 347asmlinkage long sys32_sigreturn(void)
348{ 348{
349 struct pt_regs *regs = task_pt_regs(current);
349 sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15]; 350 sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
350 sigset_t set; 351 sigset_t set;
351 352
@@ -370,8 +371,9 @@ badframe:
370 return 0; 371 return 0;
371} 372}
372 373
373asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs) 374asmlinkage long sys32_rt_sigreturn(void)
374{ 375{
376 struct pt_regs *regs = task_pt_regs(current);
375 rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15]; 377 rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
376 sigset_t set; 378 sigset_t set;
377 stack_t st; 379 stack_t st;
@@ -407,8 +409,8 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
407 return regs->gprs[2]; 409 return regs->gprs[2];
408 410
409badframe: 411badframe:
410 force_sig(SIGSEGV, current); 412 force_sig(SIGSEGV, current);
411 return 0; 413 return 0;
412} 414}
413 415
414/* 416/*
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
new file mode 100644
index 000000000000..dabaf98943d0
--- /dev/null
+++ b/arch/s390/kernel/dis.c
@@ -0,0 +1,1278 @@
1/*
2 * arch/s390/kernel/dis.c
3 *
4 * Disassemble s390 instructions.
5 *
6 * Copyright IBM Corp. 2007
7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8 */
9
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/errno.h>
14#include <linux/ptrace.h>
15#include <linux/timer.h>
16#include <linux/mm.h>
17#include <linux/smp.h>
18#include <linux/smp_lock.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/delay.h>
22#include <linux/module.h>
23#include <linux/kallsyms.h>
24#include <linux/reboot.h>
25#include <linux/kprobes.h>
26
27#include <asm/system.h>
28#include <asm/uaccess.h>
29#include <asm/io.h>
30#include <asm/atomic.h>
31#include <asm/mathemu.h>
32#include <asm/cpcmd.h>
33#include <asm/s390_ext.h>
34#include <asm/lowcore.h>
35#include <asm/debug.h>
36#include <asm/kdebug.h>
37
38#ifndef CONFIG_64BIT
39#define ONELONG "%08lx: "
40#else /* CONFIG_64BIT */
41#define ONELONG "%016lx: "
42#endif /* CONFIG_64BIT */
43
44#define OPERAND_GPR 0x1 /* Operand printed as %rx */
45#define OPERAND_FPR 0x2 /* Operand printed as %fx */
46#define OPERAND_AR 0x4 /* Operand printed as %ax */
47#define OPERAND_CR 0x8 /* Operand printed as %cx */
48#define OPERAND_DISP 0x10 /* Operand printed as displacement */
49#define OPERAND_BASE 0x20 /* Operand printed as base register */
50#define OPERAND_INDEX 0x40 /* Operand printed as index register */
51#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */
52#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */
53#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */
54
55enum {
56 UNUSED, /* Indicates the end of the operand list */
57 R_8, /* GPR starting at position 8 */
58 R_12, /* GPR starting at position 12 */
59 R_16, /* GPR starting at position 16 */
60 R_20, /* GPR starting at position 20 */
61 R_24, /* GPR starting at position 24 */
62 R_28, /* GPR starting at position 28 */
63 R_32, /* GPR starting at position 32 */
64 F_8, /* FPR starting at position 8 */
65 F_12, /* FPR starting at position 12 */
66 F_16, /* FPR starting at position 16 */
67 F_20, /* FPR starting at position 16 */
68 F_24, /* FPR starting at position 24 */
69 F_28, /* FPR starting at position 28 */
70 F_32, /* FPR starting at position 32 */
71 A_8, /* Access reg. starting at position 8 */
72 A_12, /* Access reg. starting at position 12 */
73 A_24, /* Access reg. starting at position 24 */
74 A_28, /* Access reg. starting at position 28 */
75 C_8, /* Control reg. starting at position 8 */
76 C_12, /* Control reg. starting at position 12 */
77 B_16, /* Base register starting at position 16 */
78 B_32, /* Base register starting at position 32 */
79 X_12, /* Index register starting at position 12 */
80 D_20, /* Displacement starting at position 20 */
81 D_36, /* Displacement starting at position 36 */
82 D20_20, /* 20 bit displacement starting at 20 */
83 L4_8, /* 4 bit length starting at position 8 */
84 L4_12, /* 4 bit length starting at position 12 */
85 L8_8, /* 8 bit length starting at position 8 */
86 U4_8, /* 4 bit unsigned value starting at 8 */
87 U4_12, /* 4 bit unsigned value starting at 12 */
88 U4_16, /* 4 bit unsigned value starting at 16 */
89 U4_20, /* 4 bit unsigned value starting at 20 */
90 U8_8, /* 8 bit unsigned value starting at 8 */
91 U8_16, /* 8 bit unsigned value starting at 16 */
92 I16_16, /* 16 bit signed value starting at 16 */
93 U16_16, /* 16 bit unsigned value starting at 16 */
94 J16_16, /* PC relative jump offset at 16 */
95 J32_16, /* PC relative long offset at 16 */
96 I32_16, /* 32 bit signed value starting at 16 */
97 U32_16, /* 32 bit unsigned value starting at 16 */
98 M_16, /* 4 bit optional mask starting at 16 */
99 RO_28, /* optional GPR starting at position 28 */
100};
101
102/*
103 * Enumeration of the different instruction formats.
104 * For details consult the principles of operation.
105 */
106enum {
107 INSTR_INVALID,
108 INSTR_E, INSTR_RIE_RRP, INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU,
109 INSTR_RIL_UP, INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP,
110 INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0,
111 INSTR_RRE_FF, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF, INSTR_RRE_RR,
112 INSTR_RRE_RR_OPT, INSTR_RRF_F0FF, INSTR_RRF_FUFF, INSTR_RRF_M0RR,
113 INSTR_RRF_R0RR, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF,
114 INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR,
115 INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, INSTR_RSI_RRP,
116 INSTR_RSL_R0RD, INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD,
117 INSTR_RSY_RURD, INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD,
118 INSTR_RS_RRRD, INSTR_RS_RURD, INSTR_RXE_FRRD, INSTR_RXE_RRRD,
119 INSTR_RXF_FRRDF, INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RX_FRRD,
120 INSTR_RX_RRRD, INSTR_RX_URRD, INSTR_SIY_URD, INSTR_SI_URD,
121 INSTR_SSE_RDRD, INSTR_SSF_RRDRD, INSTR_SS_L0RDRD, INSTR_SS_LIRDRD,
122 INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3,
123 INSTR_S_00, INSTR_S_RD,
124};
125
126struct operand {
127 int bits; /* The number of bits in the operand. */
128 int shift; /* The number of bits to shift. */
129 int flags; /* One bit syntax flags. */
130};
131
132struct insn {
133 const char name[5];
134 unsigned char opfrag;
135 unsigned char format;
136};
137
138static const struct operand operands[] =
139{
140 [UNUSED] = { 0, 0, 0 },
141 [R_8] = { 4, 8, OPERAND_GPR },
142 [R_12] = { 4, 12, OPERAND_GPR },
143 [R_16] = { 4, 16, OPERAND_GPR },
144 [R_20] = { 4, 20, OPERAND_GPR },
145 [R_24] = { 4, 24, OPERAND_GPR },
146 [R_28] = { 4, 28, OPERAND_GPR },
147 [R_32] = { 4, 32, OPERAND_GPR },
148 [F_8] = { 4, 8, OPERAND_FPR },
149 [F_12] = { 4, 12, OPERAND_FPR },
150 [F_16] = { 4, 16, OPERAND_FPR },
151 [F_20] = { 4, 16, OPERAND_FPR },
152 [F_24] = { 4, 24, OPERAND_FPR },
153 [F_28] = { 4, 28, OPERAND_FPR },
154 [F_32] = { 4, 32, OPERAND_FPR },
155 [A_8] = { 4, 8, OPERAND_AR },
156 [A_12] = { 4, 12, OPERAND_AR },
157 [A_24] = { 4, 24, OPERAND_AR },
158 [A_28] = { 4, 28, OPERAND_AR },
159 [C_8] = { 4, 8, OPERAND_CR },
160 [C_12] = { 4, 12, OPERAND_CR },
161 [B_16] = { 4, 16, OPERAND_BASE | OPERAND_GPR },
162 [B_32] = { 4, 32, OPERAND_BASE | OPERAND_GPR },
163 [X_12] = { 4, 12, OPERAND_INDEX | OPERAND_GPR },
164 [D_20] = { 12, 20, OPERAND_DISP },
165 [D_36] = { 12, 36, OPERAND_DISP },
166 [D20_20] = { 20, 20, OPERAND_DISP | OPERAND_SIGNED },
167 [L4_8] = { 4, 8, OPERAND_LENGTH },
168 [L4_12] = { 4, 12, OPERAND_LENGTH },
169 [L8_8] = { 8, 8, OPERAND_LENGTH },
170 [U4_8] = { 4, 8, 0 },
171 [U4_12] = { 4, 12, 0 },
172 [U4_16] = { 4, 16, 0 },
173 [U4_20] = { 4, 20, 0 },
174 [U8_8] = { 8, 8, 0 },
175 [U8_16] = { 8, 16, 0 },
176 [I16_16] = { 16, 16, OPERAND_SIGNED },
177 [U16_16] = { 16, 16, 0 },
178 [J16_16] = { 16, 16, OPERAND_PCREL },
179 [J32_16] = { 32, 16, OPERAND_PCREL },
180 [I32_16] = { 32, 16, OPERAND_SIGNED },
181 [U32_16] = { 32, 16, 0 },
182 [M_16] = { 4, 16, 0 },
183 [RO_28] = { 4, 28, OPERAND_GPR }
184};
185
186static const unsigned char formats[][7] = {
187 [INSTR_E] = { 0xff, 0,0,0,0,0,0 }, /* e.g. pr */
188 [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxhg */
189 [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, /* e.g. brasl */
190 [INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 }, /* e.g. brcl */
191 [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, /* e.g. afi */
192 [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, /* e.g. alfi */
193 [INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 }, /* e.g. ahi */
194 [INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 }, /* e.g. brct */
195 [INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 }, /* e.g. tml */
196 [INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 }, /* e.g. brc */
197 [INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. palb */
198 [INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 }, /* e.g. tb */
199 [INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 }, /* e.g. cpya */
200 [INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 }, /* e.g. sar */
201 [INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 }, /* e.g. sqer */
202 [INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 }, /* e.g. debr */
203 [INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 }, /* e.g. ipm */
204 [INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 }, /* e.g. ear */
205 [INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 }, /* e.g. cefbr */
206 [INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 }, /* e.g. lura */
207 [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 }, /* efpc, sfpc */
208 [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */
209 [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */
210 [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */
211 [INSTR_RRF_R0RR] = { 0xff, R_24,R_28,R_16,0,0,0 }, /* e.g. idte */
212 [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */
213 [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */
214 [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */
215 [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 }, /* e.g. adr */
216 [INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 }, /* e.g. spm */
217 [INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 }, /* e.g. lr */
218 [INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 }, /* e.g. svc */
219 [INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 }, /* e.g. bcr */
220 [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. lmh */
221 [INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lmh */
222 [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icmh */
223 [INSTR_RSL_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. tp */
224 [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxh */
225 [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },/* e.g. stmy */
226 [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
227 /* e.g. icmh */
228 [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },/* e.g. lamy */
229 [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },/* e.g. lamy */
230 [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, /* e.g. lam */
231 [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lctl */
232 [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. sll */
233 [INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. cs */
234 [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icm */
235 [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. axbr */
236 [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. lg */
237 [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 },
238 /* e.g. madb */
239 [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },/* e.g. ly */
240 [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },/* e.g. ley */
241 [INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. ae */
242 [INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. l */
243 [INSTR_RX_URRD] = { 0x00, U4_8,D_20,X_12,B_16,0,0 }, /* e.g. bc */
244 [INSTR_SI_URD] = { 0x00, D_20,B_16,U8_8,0,0,0 }, /* e.g. cli */
245 [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 }, /* e.g. tmy */
246 [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, /* e.g. mvsdk */
247 [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 },
248 /* e.g. mvc */
249 [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 },
250 /* e.g. srp */
251 [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 },
252 /* e.g. pack */
253 [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 },
254 /* e.g. mvck */
255 [INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 },
256 /* e.g. plo */
257 [INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 },
258 /* e.g. lmd */
259 [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. hsch */
260 [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, /* e.g. lpsw */
261 [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 },
262 /* e.g. mvcos */
263};
264
265static struct insn opcode[] = {
266#ifdef CONFIG_64BIT
267 { "lmd", 0xef, INSTR_SS_RRRDRD3 },
268#endif
269 { "spm", 0x04, INSTR_RR_R0 },
270 { "balr", 0x05, INSTR_RR_RR },
271 { "bctr", 0x06, INSTR_RR_RR },
272 { "bcr", 0x07, INSTR_RR_UR },
273 { "svc", 0x0a, INSTR_RR_U0 },
274 { "bsm", 0x0b, INSTR_RR_RR },
275 { "bassm", 0x0c, INSTR_RR_RR },
276 { "basr", 0x0d, INSTR_RR_RR },
277 { "mvcl", 0x0e, INSTR_RR_RR },
278 { "clcl", 0x0f, INSTR_RR_RR },
279 { "lpr", 0x10, INSTR_RR_RR },
280 { "lnr", 0x11, INSTR_RR_RR },
281 { "ltr", 0x12, INSTR_RR_RR },
282 { "lcr", 0x13, INSTR_RR_RR },
283 { "nr", 0x14, INSTR_RR_RR },
284 { "clr", 0x15, INSTR_RR_RR },
285 { "or", 0x16, INSTR_RR_RR },
286 { "xr", 0x17, INSTR_RR_RR },
287 { "lr", 0x18, INSTR_RR_RR },
288 { "cr", 0x19, INSTR_RR_RR },
289 { "ar", 0x1a, INSTR_RR_RR },
290 { "sr", 0x1b, INSTR_RR_RR },
291 { "mr", 0x1c, INSTR_RR_RR },
292 { "dr", 0x1d, INSTR_RR_RR },
293 { "alr", 0x1e, INSTR_RR_RR },
294 { "slr", 0x1f, INSTR_RR_RR },
295 { "lpdr", 0x20, INSTR_RR_FF },
296 { "lndr", 0x21, INSTR_RR_FF },
297 { "ltdr", 0x22, INSTR_RR_FF },
298 { "lcdr", 0x23, INSTR_RR_FF },
299 { "hdr", 0x24, INSTR_RR_FF },
300 { "ldxr", 0x25, INSTR_RR_FF },
301 { "lrdr", 0x25, INSTR_RR_FF },
302 { "mxr", 0x26, INSTR_RR_FF },
303 { "mxdr", 0x27, INSTR_RR_FF },
304 { "ldr", 0x28, INSTR_RR_FF },
305 { "cdr", 0x29, INSTR_RR_FF },
306 { "adr", 0x2a, INSTR_RR_FF },
307 { "sdr", 0x2b, INSTR_RR_FF },
308 { "mdr", 0x2c, INSTR_RR_FF },
309 { "ddr", 0x2d, INSTR_RR_FF },
310 { "awr", 0x2e, INSTR_RR_FF },
311 { "swr", 0x2f, INSTR_RR_FF },
312 { "lper", 0x30, INSTR_RR_FF },
313 { "lner", 0x31, INSTR_RR_FF },
314 { "lter", 0x32, INSTR_RR_FF },
315 { "lcer", 0x33, INSTR_RR_FF },
316 { "her", 0x34, INSTR_RR_FF },
317 { "ledr", 0x35, INSTR_RR_FF },
318 { "lrer", 0x35, INSTR_RR_FF },
319 { "axr", 0x36, INSTR_RR_FF },
320 { "sxr", 0x37, INSTR_RR_FF },
321 { "ler", 0x38, INSTR_RR_FF },
322 { "cer", 0x39, INSTR_RR_FF },
323 { "aer", 0x3a, INSTR_RR_FF },
324 { "ser", 0x3b, INSTR_RR_FF },
325 { "mder", 0x3c, INSTR_RR_FF },
326 { "mer", 0x3c, INSTR_RR_FF },
327 { "der", 0x3d, INSTR_RR_FF },
328 { "aur", 0x3e, INSTR_RR_FF },
329 { "sur", 0x3f, INSTR_RR_FF },
330 { "sth", 0x40, INSTR_RX_RRRD },
331 { "la", 0x41, INSTR_RX_RRRD },
332 { "stc", 0x42, INSTR_RX_RRRD },
333 { "ic", 0x43, INSTR_RX_RRRD },
334 { "ex", 0x44, INSTR_RX_RRRD },
335 { "bal", 0x45, INSTR_RX_RRRD },
336 { "bct", 0x46, INSTR_RX_RRRD },
337 { "bc", 0x47, INSTR_RX_URRD },
338 { "lh", 0x48, INSTR_RX_RRRD },
339 { "ch", 0x49, INSTR_RX_RRRD },
340 { "ah", 0x4a, INSTR_RX_RRRD },
341 { "sh", 0x4b, INSTR_RX_RRRD },
342 { "mh", 0x4c, INSTR_RX_RRRD },
343 { "bas", 0x4d, INSTR_RX_RRRD },
344 { "cvd", 0x4e, INSTR_RX_RRRD },
345 { "cvb", 0x4f, INSTR_RX_RRRD },
346 { "st", 0x50, INSTR_RX_RRRD },
347 { "lae", 0x51, INSTR_RX_RRRD },
348 { "n", 0x54, INSTR_RX_RRRD },
349 { "cl", 0x55, INSTR_RX_RRRD },
350 { "o", 0x56, INSTR_RX_RRRD },
351 { "x", 0x57, INSTR_RX_RRRD },
352 { "l", 0x58, INSTR_RX_RRRD },
353 { "c", 0x59, INSTR_RX_RRRD },
354 { "a", 0x5a, INSTR_RX_RRRD },
355 { "s", 0x5b, INSTR_RX_RRRD },
356 { "m", 0x5c, INSTR_RX_RRRD },
357 { "d", 0x5d, INSTR_RX_RRRD },
358 { "al", 0x5e, INSTR_RX_RRRD },
359 { "sl", 0x5f, INSTR_RX_RRRD },
360 { "std", 0x60, INSTR_RX_FRRD },
361 { "mxd", 0x67, INSTR_RX_FRRD },
362 { "ld", 0x68, INSTR_RX_FRRD },
363 { "cd", 0x69, INSTR_RX_FRRD },
364 { "ad", 0x6a, INSTR_RX_FRRD },
365 { "sd", 0x6b, INSTR_RX_FRRD },
366 { "md", 0x6c, INSTR_RX_FRRD },
367 { "dd", 0x6d, INSTR_RX_FRRD },
368 { "aw", 0x6e, INSTR_RX_FRRD },
369 { "sw", 0x6f, INSTR_RX_FRRD },
370 { "ste", 0x70, INSTR_RX_FRRD },
371 { "ms", 0x71, INSTR_RX_RRRD },
372 { "le", 0x78, INSTR_RX_FRRD },
373 { "ce", 0x79, INSTR_RX_FRRD },
374 { "ae", 0x7a, INSTR_RX_FRRD },
375 { "se", 0x7b, INSTR_RX_FRRD },
376 { "mde", 0x7c, INSTR_RX_FRRD },
377 { "me", 0x7c, INSTR_RX_FRRD },
378 { "de", 0x7d, INSTR_RX_FRRD },
379 { "au", 0x7e, INSTR_RX_FRRD },
380 { "su", 0x7f, INSTR_RX_FRRD },
381 { "ssm", 0x80, INSTR_S_RD },
382 { "lpsw", 0x82, INSTR_S_RD },
383 { "diag", 0x83, INSTR_RS_RRRD },
384 { "brxh", 0x84, INSTR_RSI_RRP },
385 { "brxle", 0x85, INSTR_RSI_RRP },
386 { "bxh", 0x86, INSTR_RS_RRRD },
387 { "bxle", 0x87, INSTR_RS_RRRD },
388 { "srl", 0x88, INSTR_RS_R0RD },
389 { "sll", 0x89, INSTR_RS_R0RD },
390 { "sra", 0x8a, INSTR_RS_R0RD },
391 { "sla", 0x8b, INSTR_RS_R0RD },
392 { "srdl", 0x8c, INSTR_RS_R0RD },
393 { "sldl", 0x8d, INSTR_RS_R0RD },
394 { "srda", 0x8e, INSTR_RS_R0RD },
395 { "slda", 0x8f, INSTR_RS_R0RD },
396 { "stm", 0x90, INSTR_RS_RRRD },
397 { "tm", 0x91, INSTR_SI_URD },
398 { "mvi", 0x92, INSTR_SI_URD },
399 { "ts", 0x93, INSTR_S_RD },
400 { "ni", 0x94, INSTR_SI_URD },
401 { "cli", 0x95, INSTR_SI_URD },
402 { "oi", 0x96, INSTR_SI_URD },
403 { "xi", 0x97, INSTR_SI_URD },
404 { "lm", 0x98, INSTR_RS_RRRD },
405 { "trace", 0x99, INSTR_RS_RRRD },
406 { "lam", 0x9a, INSTR_RS_AARD },
407 { "stam", 0x9b, INSTR_RS_AARD },
408 { "mvcle", 0xa8, INSTR_RS_RRRD },
409 { "clcle", 0xa9, INSTR_RS_RRRD },
410 { "stnsm", 0xac, INSTR_SI_URD },
411 { "stosm", 0xad, INSTR_SI_URD },
412 { "sigp", 0xae, INSTR_RS_RRRD },
413 { "mc", 0xaf, INSTR_SI_URD },
414 { "lra", 0xb1, INSTR_RX_RRRD },
415 { "stctl", 0xb6, INSTR_RS_CCRD },
416 { "lctl", 0xb7, INSTR_RS_CCRD },
417 { "cs", 0xba, INSTR_RS_RRRD },
418 { "cds", 0xbb, INSTR_RS_RRRD },
419 { "clm", 0xbd, INSTR_RS_RURD },
420 { "stcm", 0xbe, INSTR_RS_RURD },
421 { "icm", 0xbf, INSTR_RS_RURD },
422 { "mvn", 0xd1, INSTR_SS_L0RDRD },
423 { "mvc", 0xd2, INSTR_SS_L0RDRD },
424 { "mvz", 0xd3, INSTR_SS_L0RDRD },
425 { "nc", 0xd4, INSTR_SS_L0RDRD },
426 { "clc", 0xd5, INSTR_SS_L0RDRD },
427 { "oc", 0xd6, INSTR_SS_L0RDRD },
428 { "xc", 0xd7, INSTR_SS_L0RDRD },
429 { "mvck", 0xd9, INSTR_SS_RRRDRD },
430 { "mvcp", 0xda, INSTR_SS_RRRDRD },
431 { "mvcs", 0xdb, INSTR_SS_RRRDRD },
432 { "tr", 0xdc, INSTR_SS_L0RDRD },
433 { "trt", 0xdd, INSTR_SS_L0RDRD },
434 { "ed", 0xde, INSTR_SS_L0RDRD },
435 { "edmk", 0xdf, INSTR_SS_L0RDRD },
436 { "pku", 0xe1, INSTR_SS_L0RDRD },
437 { "unpku", 0xe2, INSTR_SS_L0RDRD },
438 { "mvcin", 0xe8, INSTR_SS_L0RDRD },
439 { "pka", 0xe9, INSTR_SS_L0RDRD },
440 { "unpka", 0xea, INSTR_SS_L0RDRD },
441 { "plo", 0xee, INSTR_SS_RRRDRD2 },
442 { "srp", 0xf0, INSTR_SS_LIRDRD },
443 { "mvo", 0xf1, INSTR_SS_LLRDRD },
444 { "pack", 0xf2, INSTR_SS_LLRDRD },
445 { "unpk", 0xf3, INSTR_SS_LLRDRD },
446 { "zap", 0xf8, INSTR_SS_LLRDRD },
447 { "cp", 0xf9, INSTR_SS_LLRDRD },
448 { "ap", 0xfa, INSTR_SS_LLRDRD },
449 { "sp", 0xfb, INSTR_SS_LLRDRD },
450 { "mp", 0xfc, INSTR_SS_LLRDRD },
451 { "dp", 0xfd, INSTR_SS_LLRDRD },
452 { "", 0, INSTR_INVALID }
453};
454
455static struct insn opcode_01[] = {
456#ifdef CONFIG_64BIT
457 { "sam64", 0x0e, INSTR_E },
458#endif
459 { "pr", 0x01, INSTR_E },
460 { "upt", 0x02, INSTR_E },
461 { "sckpf", 0x07, INSTR_E },
462 { "tam", 0x0b, INSTR_E },
463 { "sam24", 0x0c, INSTR_E },
464 { "sam31", 0x0d, INSTR_E },
465 { "trap2", 0xff, INSTR_E },
466 { "", 0, INSTR_INVALID }
467};
468
469static struct insn opcode_a5[] = {
470#ifdef CONFIG_64BIT
471 { "iihh", 0x00, INSTR_RI_RU },
472 { "iihl", 0x01, INSTR_RI_RU },
473 { "iilh", 0x02, INSTR_RI_RU },
474 { "iill", 0x03, INSTR_RI_RU },
475 { "nihh", 0x04, INSTR_RI_RU },
476 { "nihl", 0x05, INSTR_RI_RU },
477 { "nilh", 0x06, INSTR_RI_RU },
478 { "nill", 0x07, INSTR_RI_RU },
479 { "oihh", 0x08, INSTR_RI_RU },
480 { "oihl", 0x09, INSTR_RI_RU },
481 { "oilh", 0x0a, INSTR_RI_RU },
482 { "oill", 0x0b, INSTR_RI_RU },
483 { "llihh", 0x0c, INSTR_RI_RU },
484 { "llihl", 0x0d, INSTR_RI_RU },
485 { "llilh", 0x0e, INSTR_RI_RU },
486 { "llill", 0x0f, INSTR_RI_RU },
487#endif
488 { "", 0, INSTR_INVALID }
489};
490
491static struct insn opcode_a7[] = {
492#ifdef CONFIG_64BIT
493 { "tmhh", 0x02, INSTR_RI_RU },
494 { "tmhl", 0x03, INSTR_RI_RU },
495 { "brctg", 0x07, INSTR_RI_RP },
496 { "lghi", 0x09, INSTR_RI_RI },
497 { "aghi", 0x0b, INSTR_RI_RI },
498 { "mghi", 0x0d, INSTR_RI_RI },
499 { "cghi", 0x0f, INSTR_RI_RI },
500#endif
501 { "tmlh", 0x00, INSTR_RI_RU },
502 { "tmll", 0x01, INSTR_RI_RU },
503 { "brc", 0x04, INSTR_RI_UP },
504 { "bras", 0x05, INSTR_RI_RP },
505 { "brct", 0x06, INSTR_RI_RP },
506 { "lhi", 0x08, INSTR_RI_RI },
507 { "ahi", 0x0a, INSTR_RI_RI },
508 { "mhi", 0x0c, INSTR_RI_RI },
509 { "chi", 0x0e, INSTR_RI_RI },
510 { "", 0, INSTR_INVALID }
511};
512
513static struct insn opcode_b2[] = {
514#ifdef CONFIG_64BIT
515 { "sske", 0x2b, INSTR_RRF_M0RR },
516 { "stckf", 0x7c, INSTR_S_RD },
517 { "cu21", 0xa6, INSTR_RRF_M0RR },
518 { "cuutf", 0xa6, INSTR_RRF_M0RR },
519 { "cu12", 0xa7, INSTR_RRF_M0RR },
520 { "cutfu", 0xa7, INSTR_RRF_M0RR },
521 { "stfle", 0xb0, INSTR_S_RD },
522 { "lpswe", 0xb2, INSTR_S_RD },
523#endif
524 { "stidp", 0x02, INSTR_S_RD },
525 { "sck", 0x04, INSTR_S_RD },
526 { "stck", 0x05, INSTR_S_RD },
527 { "sckc", 0x06, INSTR_S_RD },
528 { "stckc", 0x07, INSTR_S_RD },
529 { "spt", 0x08, INSTR_S_RD },
530 { "stpt", 0x09, INSTR_S_RD },
531 { "spka", 0x0a, INSTR_S_RD },
532 { "ipk", 0x0b, INSTR_S_00 },
533 { "ptlb", 0x0d, INSTR_S_00 },
534 { "spx", 0x10, INSTR_S_RD },
535 { "stpx", 0x11, INSTR_S_RD },
536 { "stap", 0x12, INSTR_S_RD },
537 { "sie", 0x14, INSTR_S_RD },
538 { "pc", 0x18, INSTR_S_RD },
539 { "sac", 0x19, INSTR_S_RD },
540 { "cfc", 0x1a, INSTR_S_RD },
541 { "ipte", 0x21, INSTR_RRE_RR },
542 { "ipm", 0x22, INSTR_RRE_R0 },
543 { "ivsk", 0x23, INSTR_RRE_RR },
544 { "iac", 0x24, INSTR_RRE_R0 },
545 { "ssar", 0x25, INSTR_RRE_R0 },
546 { "epar", 0x26, INSTR_RRE_R0 },
547 { "esar", 0x27, INSTR_RRE_R0 },
548 { "pt", 0x28, INSTR_RRE_RR },
549 { "iske", 0x29, INSTR_RRE_RR },
550 { "rrbe", 0x2a, INSTR_RRE_RR },
551 { "sske", 0x2b, INSTR_RRE_RR },
552 { "tb", 0x2c, INSTR_RRE_0R },
553 { "dxr", 0x2d, INSTR_RRE_F0 },
554 { "pgin", 0x2e, INSTR_RRE_RR },
555 { "pgout", 0x2f, INSTR_RRE_RR },
556 { "csch", 0x30, INSTR_S_00 },
557 { "hsch", 0x31, INSTR_S_00 },
558 { "msch", 0x32, INSTR_S_RD },
559 { "ssch", 0x33, INSTR_S_RD },
560 { "stsch", 0x34, INSTR_S_RD },
561 { "tsch", 0x35, INSTR_S_RD },
562 { "tpi", 0x36, INSTR_S_RD },
563 { "sal", 0x37, INSTR_S_00 },
564 { "rsch", 0x38, INSTR_S_00 },
565 { "stcrw", 0x39, INSTR_S_RD },
566 { "stcps", 0x3a, INSTR_S_RD },
567 { "rchp", 0x3b, INSTR_S_00 },
568 { "schm", 0x3c, INSTR_S_00 },
569 { "bakr", 0x40, INSTR_RRE_RR },
570 { "cksm", 0x41, INSTR_RRE_RR },
571 { "sqdr", 0x44, INSTR_RRE_F0 },
572 { "sqer", 0x45, INSTR_RRE_F0 },
573 { "stura", 0x46, INSTR_RRE_RR },
574 { "msta", 0x47, INSTR_RRE_R0 },
575 { "palb", 0x48, INSTR_RRE_00 },
576 { "ereg", 0x49, INSTR_RRE_RR },
577 { "esta", 0x4a, INSTR_RRE_RR },
578 { "lura", 0x4b, INSTR_RRE_RR },
579 { "tar", 0x4c, INSTR_RRE_AR },
580 { "cpya", INSTR_RRE_AA },
581 { "sar", 0x4e, INSTR_RRE_AR },
582 { "ear", 0x4f, INSTR_RRE_RA },
583 { "csp", 0x50, INSTR_RRE_RR },
584 { "msr", 0x52, INSTR_RRE_RR },
585 { "mvpg", 0x54, INSTR_RRE_RR },
586 { "mvst", 0x55, INSTR_RRE_RR },
587 { "cuse", 0x57, INSTR_RRE_RR },
588 { "bsg", 0x58, INSTR_RRE_RR },
589 { "bsa", 0x5a, INSTR_RRE_RR },
590 { "clst", 0x5d, INSTR_RRE_RR },
591 { "srst", 0x5e, INSTR_RRE_RR },
592 { "cmpsc", 0x63, INSTR_RRE_RR },
593 { "cmpsc", 0x63, INSTR_RRE_RR },
594 { "siga", 0x74, INSTR_S_RD },
595 { "xsch", 0x76, INSTR_S_00 },
596 { "rp", 0x77, INSTR_S_RD },
597 { "stcke", 0x78, INSTR_S_RD },
598 { "sacf", 0x79, INSTR_S_RD },
599 { "stsi", 0x7d, INSTR_S_RD },
600 { "srnm", 0x99, INSTR_S_RD },
601 { "stfpc", 0x9c, INSTR_S_RD },
602 { "lfpc", 0x9d, INSTR_S_RD },
603 { "tre", 0xa5, INSTR_RRE_RR },
604 { "cuutf", 0xa6, INSTR_RRE_RR },
605 { "cutfu", 0xa7, INSTR_RRE_RR },
606 { "stfl", 0xb1, INSTR_S_RD },
607 { "trap4", 0xff, INSTR_S_RD },
608 { "", 0, INSTR_INVALID }
609};
610
611static struct insn opcode_b3[] = {
612#ifdef CONFIG_64BIT
613 { "maylr", 0x38, INSTR_RRF_F0FF },
614 { "mylr", 0x39, INSTR_RRF_F0FF },
615 { "mayr", 0x3a, INSTR_RRF_F0FF },
616 { "myr", 0x3b, INSTR_RRF_F0FF },
617 { "mayhr", 0x3c, INSTR_RRF_F0FF },
618 { "myhr", 0x3d, INSTR_RRF_F0FF },
619 { "cegbr", 0xa4, INSTR_RRE_RR },
620 { "cdgbr", 0xa5, INSTR_RRE_RR },
621 { "cxgbr", 0xa6, INSTR_RRE_RR },
622 { "cgebr", 0xa8, INSTR_RRF_U0RF },
623 { "cgdbr", 0xa9, INSTR_RRF_U0RF },
624 { "cgxbr", 0xaa, INSTR_RRF_U0RF },
625 { "cfer", 0xb8, INSTR_RRF_U0RF },
626 { "cfdr", 0xb9, INSTR_RRF_U0RF },
627 { "cfxr", 0xba, INSTR_RRF_U0RF },
628 { "cegr", 0xc4, INSTR_RRE_RR },
629 { "cdgr", 0xc5, INSTR_RRE_RR },
630 { "cxgr", 0xc6, INSTR_RRE_RR },
631 { "cger", 0xc8, INSTR_RRF_U0RF },
632 { "cgdr", 0xc9, INSTR_RRF_U0RF },
633 { "cgxr", 0xca, INSTR_RRF_U0RF },
634#endif
635 { "lpebr", 0x00, INSTR_RRE_FF },
636 { "lnebr", 0x01, INSTR_RRE_FF },
637 { "ltebr", 0x02, INSTR_RRE_FF },
638 { "lcebr", 0x03, INSTR_RRE_FF },
639 { "ldebr", 0x04, INSTR_RRE_FF },
640 { "lxdbr", 0x05, INSTR_RRE_FF },
641 { "lxebr", 0x06, INSTR_RRE_FF },
642 { "mxdbr", 0x07, INSTR_RRE_FF },
643 { "kebr", 0x08, INSTR_RRE_FF },
644 { "cebr", 0x09, INSTR_RRE_FF },
645 { "aebr", 0x0a, INSTR_RRE_FF },
646 { "sebr", 0x0b, INSTR_RRE_FF },
647 { "mdebr", 0x0c, INSTR_RRE_FF },
648 { "debr", 0x0d, INSTR_RRE_FF },
649 { "maebr", 0x0e, INSTR_RRF_F0FF },
650 { "msebr", 0x0f, INSTR_RRF_F0FF },
651 { "lpdbr", 0x10, INSTR_RRE_FF },
652 { "lndbr", 0x11, INSTR_RRE_FF },
653 { "ltdbr", 0x12, INSTR_RRE_FF },
654 { "lcdbr", 0x13, INSTR_RRE_FF },
655 { "sqebr", 0x14, INSTR_RRE_FF },
656 { "sqdbr", 0x15, INSTR_RRE_FF },
657 { "sqxbr", 0x16, INSTR_RRE_FF },
658 { "meebr", 0x17, INSTR_RRE_FF },
659 { "kdbr", 0x18, INSTR_RRE_FF },
660 { "cdbr", 0x19, INSTR_RRE_FF },
661 { "adbr", 0x1a, INSTR_RRE_FF },
662 { "sdbr", 0x1b, INSTR_RRE_FF },
663 { "mdbr", 0x1c, INSTR_RRE_FF },
664 { "ddbr", 0x1d, INSTR_RRE_FF },
665 { "madbr", 0x1e, INSTR_RRF_F0FF },
666 { "msdbr", 0x1f, INSTR_RRF_F0FF },
667 { "lder", 0x24, INSTR_RRE_FF },
668 { "lxdr", 0x25, INSTR_RRE_FF },
669 { "lxer", 0x26, INSTR_RRE_FF },
670 { "maer", 0x2e, INSTR_RRF_F0FF },
671 { "mser", 0x2f, INSTR_RRF_F0FF },
672 { "sqxr", 0x36, INSTR_RRE_FF },
673 { "meer", 0x37, INSTR_RRE_FF },
674 { "madr", 0x3e, INSTR_RRF_F0FF },
675 { "msdr", 0x3f, INSTR_RRF_F0FF },
676 { "lpxbr", 0x40, INSTR_RRE_FF },
677 { "lnxbr", 0x41, INSTR_RRE_FF },
678 { "ltxbr", 0x42, INSTR_RRE_FF },
679 { "lcxbr", 0x43, INSTR_RRE_FF },
680 { "ledbr", 0x44, INSTR_RRE_FF },
681 { "ldxbr", 0x45, INSTR_RRE_FF },
682 { "lexbr", 0x46, INSTR_RRE_FF },
683 { "fixbr", 0x47, INSTR_RRF_U0FF },
684 { "kxbr", 0x48, INSTR_RRE_FF },
685 { "cxbr", 0x49, INSTR_RRE_FF },
686 { "axbr", 0x4a, INSTR_RRE_FF },
687 { "sxbr", 0x4b, INSTR_RRE_FF },
688 { "mxbr", 0x4c, INSTR_RRE_FF },
689 { "dxbr", 0x4d, INSTR_RRE_FF },
690 { "tbedr", 0x50, INSTR_RRF_U0FF },
691 { "tbdr", 0x51, INSTR_RRF_U0FF },
692 { "diebr", 0x53, INSTR_RRF_FUFF },
693 { "fiebr", 0x57, INSTR_RRF_U0FF },
694 { "thder", 0x58, INSTR_RRE_RR },
695 { "thdr", 0x59, INSTR_RRE_RR },
696 { "didbr", 0x5b, INSTR_RRF_FUFF },
697 { "fidbr", 0x5f, INSTR_RRF_U0FF },
698 { "lpxr", 0x60, INSTR_RRE_FF },
699 { "lnxr", 0x61, INSTR_RRE_FF },
700 { "ltxr", 0x62, INSTR_RRE_FF },
701 { "lcxr", 0x63, INSTR_RRE_FF },
702 { "lxr", 0x65, INSTR_RRE_RR },
703 { "lexr", 0x66, INSTR_RRE_FF },
704 { "fixr", 0x67, INSTR_RRF_U0FF },
705 { "cxr", 0x69, INSTR_RRE_FF },
706 { "lzer", 0x74, INSTR_RRE_R0 },
707 { "lzdr", 0x75, INSTR_RRE_R0 },
708 { "lzxr", 0x76, INSTR_RRE_R0 },
709 { "fier", 0x77, INSTR_RRF_U0FF },
710 { "fidr", 0x7f, INSTR_RRF_U0FF },
711 { "sfpc", 0x84, INSTR_RRE_RR_OPT },
712 { "efpc", 0x8c, INSTR_RRE_RR_OPT },
713 { "cefbr", 0x94, INSTR_RRE_RF },
714 { "cdfbr", 0x95, INSTR_RRE_RF },
715 { "cxfbr", 0x96, INSTR_RRE_RF },
716 { "cfebr", 0x98, INSTR_RRF_U0RF },
717 { "cfdbr", 0x99, INSTR_RRF_U0RF },
718 { "cfxbr", 0x9a, INSTR_RRF_U0RF },
719 { "cefr", 0xb4, INSTR_RRE_RF },
720 { "cdfr", 0xb5, INSTR_RRE_RF },
721 { "cxfr", 0xb6, INSTR_RRE_RF },
722 { "", 0, INSTR_INVALID }
723};
724
725static struct insn opcode_b9[] = {
726#ifdef CONFIG_64BIT
727 { "lpgr", 0x00, INSTR_RRE_RR },
728 { "lngr", 0x01, INSTR_RRE_RR },
729 { "ltgr", 0x02, INSTR_RRE_RR },
730 { "lcgr", 0x03, INSTR_RRE_RR },
731 { "lgr", 0x04, INSTR_RRE_RR },
732 { "lurag", 0x05, INSTR_RRE_RR },
733 { "lgbr", 0x06, INSTR_RRE_RR },
734 { "lghr", 0x07, INSTR_RRE_RR },
735 { "agr", 0x08, INSTR_RRE_RR },
736 { "sgr", 0x09, INSTR_RRE_RR },
737 { "algr", 0x0a, INSTR_RRE_RR },
738 { "slgr", 0x0b, INSTR_RRE_RR },
739 { "msgr", 0x0c, INSTR_RRE_RR },
740 { "dsgr", 0x0d, INSTR_RRE_RR },
741 { "eregg", 0x0e, INSTR_RRE_RR },
742 { "lrvgr", 0x0f, INSTR_RRE_RR },
743 { "lpgfr", 0x10, INSTR_RRE_RR },
744 { "lngfr", 0x11, INSTR_RRE_RR },
745 { "ltgfr", 0x12, INSTR_RRE_RR },
746 { "lcgfr", 0x13, INSTR_RRE_RR },
747 { "lgfr", 0x14, INSTR_RRE_RR },
748 { "llgfr", 0x16, INSTR_RRE_RR },
749 { "llgtr", 0x17, INSTR_RRE_RR },
750 { "agfr", 0x18, INSTR_RRE_RR },
751 { "sgfr", 0x19, INSTR_RRE_RR },
752 { "algfr", 0x1a, INSTR_RRE_RR },
753 { "slgfr", 0x1b, INSTR_RRE_RR },
754 { "msgfr", 0x1c, INSTR_RRE_RR },
755 { "dsgfr", 0x1d, INSTR_RRE_RR },
756 { "cgr", 0x20, INSTR_RRE_RR },
757 { "clgr", 0x21, INSTR_RRE_RR },
758 { "sturg", 0x25, INSTR_RRE_RR },
759 { "lbr", 0x26, INSTR_RRE_RR },
760 { "lhr", 0x27, INSTR_RRE_RR },
761 { "cgfr", 0x30, INSTR_RRE_RR },
762 { "clgfr", 0x31, INSTR_RRE_RR },
763 { "bctgr", 0x46, INSTR_RRE_RR },
764 { "ngr", 0x80, INSTR_RRE_RR },
765 { "ogr", 0x81, INSTR_RRE_RR },
766 { "xgr", 0x82, INSTR_RRE_RR },
767 { "flogr", 0x83, INSTR_RRE_RR },
768 { "llgcr", 0x84, INSTR_RRE_RR },
769 { "llghr", 0x85, INSTR_RRE_RR },
770 { "mlgr", 0x86, INSTR_RRE_RR },
771 { "dlgr", 0x87, INSTR_RRE_RR },
772 { "alcgr", 0x88, INSTR_RRE_RR },
773 { "slbgr", 0x89, INSTR_RRE_RR },
774 { "cspg", 0x8a, INSTR_RRE_RR },
775 { "idte", 0x8e, INSTR_RRF_R0RR },
776 { "llcr", 0x94, INSTR_RRE_RR },
777 { "llhr", 0x95, INSTR_RRE_RR },
778 { "esea", 0x9d, INSTR_RRE_R0 },
779 { "lptea", 0xaa, INSTR_RRF_RURR },
780 { "cu14", 0xb0, INSTR_RRF_M0RR },
781 { "cu24", 0xb1, INSTR_RRF_M0RR },
782 { "cu41", 0xb2, INSTR_RRF_M0RR },
783 { "cu42", 0xb3, INSTR_RRF_M0RR },
784#endif
785 { "kmac", 0x1e, INSTR_RRE_RR },
786 { "lrvr", 0x1f, INSTR_RRE_RR },
787 { "km", 0x2e, INSTR_RRE_RR },
788 { "kmc", 0x2f, INSTR_RRE_RR },
789 { "kimd", 0x3e, INSTR_RRE_RR },
790 { "klmd", 0x3f, INSTR_RRE_RR },
791 { "epsw", 0x8d, INSTR_RRE_RR },
792 { "trtt", 0x90, INSTR_RRE_RR },
793 { "trtt", 0x90, INSTR_RRF_M0RR },
794 { "trto", 0x91, INSTR_RRE_RR },
795 { "trto", 0x91, INSTR_RRF_M0RR },
796 { "trot", 0x92, INSTR_RRE_RR },
797 { "trot", 0x92, INSTR_RRF_M0RR },
798 { "troo", 0x93, INSTR_RRE_RR },
799 { "troo", 0x93, INSTR_RRF_M0RR },
800 { "mlr", 0x96, INSTR_RRE_RR },
801 { "dlr", 0x97, INSTR_RRE_RR },
802 { "alcr", 0x98, INSTR_RRE_RR },
803 { "slbr", 0x99, INSTR_RRE_RR },
804 { "", 0, INSTR_INVALID }
805};
806
807static struct insn opcode_c0[] = {
808#ifdef CONFIG_64BIT
809 { "lgfi", 0x01, INSTR_RIL_RI },
810 { "xihf", 0x06, INSTR_RIL_RU },
811 { "xilf", 0x07, INSTR_RIL_RU },
812 { "iihf", 0x08, INSTR_RIL_RU },
813 { "iilf", 0x09, INSTR_RIL_RU },
814 { "nihf", 0x0a, INSTR_RIL_RU },
815 { "nilf", 0x0b, INSTR_RIL_RU },
816 { "oihf", 0x0c, INSTR_RIL_RU },
817 { "oilf", 0x0d, INSTR_RIL_RU },
818 { "llihf", 0x0e, INSTR_RIL_RU },
819 { "llilf", 0x0f, INSTR_RIL_RU },
820#endif
821 { "larl", 0x00, INSTR_RIL_RP },
822 { "brcl", 0x04, INSTR_RIL_UP },
823 { "brasl", 0x05, INSTR_RIL_RP },
824 { "", 0, INSTR_INVALID }
825};
826
827static struct insn opcode_c2[] = {
828#ifdef CONFIG_64BIT
829 { "slgfi", 0x04, INSTR_RIL_RU },
830 { "slfi", 0x05, INSTR_RIL_RU },
831 { "agfi", 0x08, INSTR_RIL_RI },
832 { "afi", 0x09, INSTR_RIL_RI },
833 { "algfi", 0x0a, INSTR_RIL_RU },
834 { "alfi", 0x0b, INSTR_RIL_RU },
835 { "cgfi", 0x0c, INSTR_RIL_RI },
836 { "cfi", 0x0d, INSTR_RIL_RI },
837 { "clgfi", 0x0e, INSTR_RIL_RU },
838 { "clfi", 0x0f, INSTR_RIL_RU },
839#endif
840 { "", 0, INSTR_INVALID }
841};
842
843static struct insn opcode_c8[] = {
844#ifdef CONFIG_64BIT
845 { "mvcos", 0x00, INSTR_SSF_RRDRD },
846#endif
847 { "", 0, INSTR_INVALID }
848};
849
850static struct insn opcode_e3[] = {
851#ifdef CONFIG_64BIT
852 { "ltg", 0x02, INSTR_RXY_RRRD },
853 { "lrag", 0x03, INSTR_RXY_RRRD },
854 { "lg", 0x04, INSTR_RXY_RRRD },
855 { "cvby", 0x06, INSTR_RXY_RRRD },
856 { "ag", 0x08, INSTR_RXY_RRRD },
857 { "sg", 0x09, INSTR_RXY_RRRD },
858 { "alg", 0x0a, INSTR_RXY_RRRD },
859 { "slg", 0x0b, INSTR_RXY_RRRD },
860 { "msg", 0x0c, INSTR_RXY_RRRD },
861 { "dsg", 0x0d, INSTR_RXY_RRRD },
862 { "cvbg", 0x0e, INSTR_RXY_RRRD },
863 { "lrvg", 0x0f, INSTR_RXY_RRRD },
864 { "lt", 0x12, INSTR_RXY_RRRD },
865 { "lray", 0x13, INSTR_RXY_RRRD },
866 { "lgf", 0x14, INSTR_RXY_RRRD },
867 { "lgh", 0x15, INSTR_RXY_RRRD },
868 { "llgf", 0x16, INSTR_RXY_RRRD },
869 { "llgt", 0x17, INSTR_RXY_RRRD },
870 { "agf", 0x18, INSTR_RXY_RRRD },
871 { "sgf", 0x19, INSTR_RXY_RRRD },
872 { "algf", 0x1a, INSTR_RXY_RRRD },
873 { "slgf", 0x1b, INSTR_RXY_RRRD },
874 { "msgf", 0x1c, INSTR_RXY_RRRD },
875 { "dsgf", 0x1d, INSTR_RXY_RRRD },
876 { "cg", 0x20, INSTR_RXY_RRRD },
877 { "clg", 0x21, INSTR_RXY_RRRD },
878 { "stg", 0x24, INSTR_RXY_RRRD },
879 { "cvdy", 0x26, INSTR_RXY_RRRD },
880 { "cvdg", 0x2e, INSTR_RXY_RRRD },
881 { "strvg", 0x2f, INSTR_RXY_RRRD },
882 { "cgf", 0x30, INSTR_RXY_RRRD },
883 { "clgf", 0x31, INSTR_RXY_RRRD },
884 { "strvh", 0x3f, INSTR_RXY_RRRD },
885 { "bctg", 0x46, INSTR_RXY_RRRD },
886 { "sty", 0x50, INSTR_RXY_RRRD },
887 { "msy", 0x51, INSTR_RXY_RRRD },
888 { "ny", 0x54, INSTR_RXY_RRRD },
889 { "cly", 0x55, INSTR_RXY_RRRD },
890 { "oy", 0x56, INSTR_RXY_RRRD },
891 { "xy", 0x57, INSTR_RXY_RRRD },
892 { "ly", 0x58, INSTR_RXY_RRRD },
893 { "cy", 0x59, INSTR_RXY_RRRD },
894 { "ay", 0x5a, INSTR_RXY_RRRD },
895 { "sy", 0x5b, INSTR_RXY_RRRD },
896 { "aly", 0x5e, INSTR_RXY_RRRD },
897 { "sly", 0x5f, INSTR_RXY_RRRD },
898 { "sthy", 0x70, INSTR_RXY_RRRD },
899 { "lay", 0x71, INSTR_RXY_RRRD },
900 { "stcy", 0x72, INSTR_RXY_RRRD },
901 { "icy", 0x73, INSTR_RXY_RRRD },
902 { "lb", 0x76, INSTR_RXY_RRRD },
903 { "lgb", 0x77, INSTR_RXY_RRRD },
904 { "lhy", 0x78, INSTR_RXY_RRRD },
905 { "chy", 0x79, INSTR_RXY_RRRD },
906 { "ahy", 0x7a, INSTR_RXY_RRRD },
907 { "shy", 0x7b, INSTR_RXY_RRRD },
908 { "ng", 0x80, INSTR_RXY_RRRD },
909 { "og", 0x81, INSTR_RXY_RRRD },
910 { "xg", 0x82, INSTR_RXY_RRRD },
911 { "mlg", 0x86, INSTR_RXY_RRRD },
912 { "dlg", 0x87, INSTR_RXY_RRRD },
913 { "alcg", 0x88, INSTR_RXY_RRRD },
914 { "slbg", 0x89, INSTR_RXY_RRRD },
915 { "stpq", 0x8e, INSTR_RXY_RRRD },
916 { "lpq", 0x8f, INSTR_RXY_RRRD },
917 { "llgc", 0x90, INSTR_RXY_RRRD },
918 { "llgh", 0x91, INSTR_RXY_RRRD },
919 { "llc", 0x94, INSTR_RXY_RRRD },
920 { "llh", 0x95, INSTR_RXY_RRRD },
921#endif
922 { "lrv", 0x1e, INSTR_RXY_RRRD },
923 { "lrvh", 0x1f, INSTR_RXY_RRRD },
924 { "strv", 0x3e, INSTR_RXY_RRRD },
925 { "ml", 0x96, INSTR_RXY_RRRD },
926 { "dl", 0x97, INSTR_RXY_RRRD },
927 { "alc", 0x98, INSTR_RXY_RRRD },
928 { "slb", 0x99, INSTR_RXY_RRRD },
929 { "", 0, INSTR_INVALID }
930};
931
932static struct insn opcode_e5[] = {
933#ifdef CONFIG_64BIT
934 { "strag", 0x02, INSTR_SSE_RDRD },
935#endif
936 { "lasp", 0x00, INSTR_SSE_RDRD },
937 { "tprot", 0x01, INSTR_SSE_RDRD },
938 { "mvcsk", 0x0e, INSTR_SSE_RDRD },
939 { "mvcdk", 0x0f, INSTR_SSE_RDRD },
940 { "", 0, INSTR_INVALID }
941};
942
943static struct insn opcode_eb[] = {
944#ifdef CONFIG_64BIT
945 { "lmg", 0x04, INSTR_RSY_RRRD },
946 { "srag", 0x0a, INSTR_RSY_RRRD },
947 { "slag", 0x0b, INSTR_RSY_RRRD },
948 { "srlg", 0x0c, INSTR_RSY_RRRD },
949 { "sllg", 0x0d, INSTR_RSY_RRRD },
950 { "tracg", 0x0f, INSTR_RSY_RRRD },
951 { "csy", 0x14, INSTR_RSY_RRRD },
952 { "rllg", 0x1c, INSTR_RSY_RRRD },
953 { "clmh", 0x20, INSTR_RSY_RURD },
954 { "clmy", 0x21, INSTR_RSY_RURD },
955 { "stmg", 0x24, INSTR_RSY_RRRD },
956 { "stctg", 0x25, INSTR_RSY_CCRD },
957 { "stmh", 0x26, INSTR_RSY_RRRD },
958 { "stcmh", 0x2c, INSTR_RSY_RURD },
959 { "stcmy", 0x2d, INSTR_RSY_RURD },
960 { "lctlg", 0x2f, INSTR_RSY_CCRD },
961 { "csg", 0x30, INSTR_RSY_RRRD },
962 { "cdsy", 0x31, INSTR_RSY_RRRD },
963 { "cdsg", 0x3e, INSTR_RSY_RRRD },
964 { "bxhg", 0x44, INSTR_RSY_RRRD },
965 { "bxleg", 0x45, INSTR_RSY_RRRD },
966 { "tmy", 0x51, INSTR_SIY_URD },
967 { "mviy", 0x52, INSTR_SIY_URD },
968 { "niy", 0x54, INSTR_SIY_URD },
969 { "cliy", 0x55, INSTR_SIY_URD },
970 { "oiy", 0x56, INSTR_SIY_URD },
971 { "xiy", 0x57, INSTR_SIY_URD },
972 { "icmh", 0x80, INSTR_RSE_RURD },
973 { "icmh", 0x80, INSTR_RSY_RURD },
974 { "icmy", 0x81, INSTR_RSY_RURD },
975 { "clclu", 0x8f, INSTR_RSY_RRRD },
976 { "stmy", 0x90, INSTR_RSY_RRRD },
977 { "lmh", 0x96, INSTR_RSY_RRRD },
978 { "lmy", 0x98, INSTR_RSY_RRRD },
979 { "lamy", 0x9a, INSTR_RSY_AARD },
980 { "stamy", 0x9b, INSTR_RSY_AARD },
981#endif
982 { "rll", 0x1d, INSTR_RSY_RRRD },
983 { "mvclu", 0x8e, INSTR_RSY_RRRD },
984 { "tp", 0xc0, INSTR_RSL_R0RD },
985 { "", 0, INSTR_INVALID }
986};
987
988static struct insn opcode_ec[] = {
989#ifdef CONFIG_64BIT
990 { "brxhg", 0x44, INSTR_RIE_RRP },
991 { "brxlg", 0x45, INSTR_RIE_RRP },
992#endif
993 { "", 0, INSTR_INVALID }
994};
995
996static struct insn opcode_ed[] = {
997#ifdef CONFIG_64BIT
998 { "mayl", 0x38, INSTR_RXF_FRRDF },
999 { "myl", 0x39, INSTR_RXF_FRRDF },
1000 { "may", 0x3a, INSTR_RXF_FRRDF },
1001 { "my", 0x3b, INSTR_RXF_FRRDF },
1002 { "mayh", 0x3c, INSTR_RXF_FRRDF },
1003 { "myh", 0x3d, INSTR_RXF_FRRDF },
1004 { "ley", 0x64, INSTR_RXY_FRRD },
1005 { "ldy", 0x65, INSTR_RXY_FRRD },
1006 { "stey", 0x66, INSTR_RXY_FRRD },
1007 { "stdy", 0x67, INSTR_RXY_FRRD },
1008#endif
1009 { "ldeb", 0x04, INSTR_RXE_FRRD },
1010 { "lxdb", 0x05, INSTR_RXE_FRRD },
1011 { "lxeb", 0x06, INSTR_RXE_FRRD },
1012 { "mxdb", 0x07, INSTR_RXE_FRRD },
1013 { "keb", 0x08, INSTR_RXE_FRRD },
1014 { "ceb", 0x09, INSTR_RXE_FRRD },
1015 { "aeb", 0x0a, INSTR_RXE_FRRD },
1016 { "seb", 0x0b, INSTR_RXE_FRRD },
1017 { "mdeb", 0x0c, INSTR_RXE_FRRD },
1018 { "deb", 0x0d, INSTR_RXE_FRRD },
1019 { "maeb", 0x0e, INSTR_RXF_FRRDF },
1020 { "mseb", 0x0f, INSTR_RXF_FRRDF },
1021 { "tceb", 0x10, INSTR_RXE_FRRD },
1022 { "tcdb", 0x11, INSTR_RXE_FRRD },
1023 { "tcxb", 0x12, INSTR_RXE_FRRD },
1024 { "sqeb", 0x14, INSTR_RXE_FRRD },
1025 { "sqdb", 0x15, INSTR_RXE_FRRD },
1026 { "meeb", 0x17, INSTR_RXE_FRRD },
1027 { "kdb", 0x18, INSTR_RXE_FRRD },
1028 { "cdb", 0x19, INSTR_RXE_FRRD },
1029 { "adb", 0x1a, INSTR_RXE_FRRD },
1030 { "sdb", 0x1b, INSTR_RXE_FRRD },
1031 { "mdb", 0x1c, INSTR_RXE_FRRD },
1032 { "ddb", 0x1d, INSTR_RXE_FRRD },
1033 { "madb", 0x1e, INSTR_RXF_FRRDF },
1034 { "msdb", 0x1f, INSTR_RXF_FRRDF },
1035 { "lde", 0x24, INSTR_RXE_FRRD },
1036 { "lxd", 0x25, INSTR_RXE_FRRD },
1037 { "lxe", 0x26, INSTR_RXE_FRRD },
1038 { "mae", 0x2e, INSTR_RXF_FRRDF },
1039 { "mse", 0x2f, INSTR_RXF_FRRDF },
1040 { "sqe", 0x34, INSTR_RXE_FRRD },
1041 { "mee", 0x37, INSTR_RXE_FRRD },
1042 { "mad", 0x3e, INSTR_RXF_FRRDF },
1043 { "msd", 0x3f, INSTR_RXF_FRRDF },
1044 { "", 0, INSTR_INVALID }
1045};
1046
1047/* Extracts an operand value from an instruction. */
1048static unsigned int extract_operand(unsigned char *code,
1049 const struct operand *operand)
1050{
1051 unsigned int val;
1052 int bits;
1053
1054 /* Extract fragments of the operand byte for byte. */
1055 code += operand->shift / 8;
1056 bits = (operand->shift & 7) + operand->bits;
1057 val = 0;
1058 do {
1059 val <<= 8;
1060 val |= (unsigned int) *code++;
1061 bits -= 8;
1062 } while (bits > 0);
1063 val >>= -bits;
1064 val &= ((1U << (operand->bits - 1)) << 1) - 1;
1065
1066 /* Check for special long displacement case. */
1067 if (operand->bits == 20 && operand->shift == 20)
1068 val = (val & 0xff) << 12 | (val & 0xfff00) >> 8;
1069
1070 /* Sign extend value if the operand is signed or pc relative. */
1071 if ((operand->flags & (OPERAND_SIGNED | OPERAND_PCREL)) &&
1072 (val & (1U << (operand->bits - 1))))
1073 val |= (-1U << (operand->bits - 1)) << 1;
1074
1075 /* Double value if the operand is pc relative. */
1076 if (operand->flags & OPERAND_PCREL)
1077 val <<= 1;
1078
1079 /* Length x in an instructions has real length x + 1. */
1080 if (operand->flags & OPERAND_LENGTH)
1081 val++;
1082 return val;
1083}
1084
1085static inline int insn_length(unsigned char code)
1086{
1087 return ((((int) code + 64) >> 7) + 1) << 1;
1088}
1089
1090static struct insn *find_insn(unsigned char *code)
1091{
1092 unsigned char opfrag = code[1];
1093 unsigned char opmask;
1094 struct insn *table;
1095
1096 switch (code[0]) {
1097 case 0x01:
1098 table = opcode_01;
1099 break;
1100 case 0xa5:
1101 table = opcode_a5;
1102 break;
1103 case 0xa7:
1104 table = opcode_a7;
1105 break;
1106 case 0xb2:
1107 table = opcode_b2;
1108 break;
1109 case 0xb3:
1110 table = opcode_b3;
1111 break;
1112 case 0xb9:
1113 table = opcode_b9;
1114 break;
1115 case 0xc0:
1116 table = opcode_c0;
1117 break;
1118 case 0xc2:
1119 table = opcode_c2;
1120 break;
1121 case 0xc8:
1122 table = opcode_c8;
1123 break;
1124 case 0xe3:
1125 table = opcode_e3;
1126 opfrag = code[5];
1127 break;
1128 case 0xe5:
1129 table = opcode_e5;
1130 break;
1131 case 0xeb:
1132 table = opcode_eb;
1133 opfrag = code[5];
1134 break;
1135 case 0xec:
1136 table = opcode_ec;
1137 opfrag = code[5];
1138 break;
1139 case 0xed:
1140 table = opcode_ed;
1141 opfrag = code[5];
1142 break;
1143 default:
1144 table = opcode;
1145 opfrag = code[0];
1146 break;
1147 }
1148 while (table->format != INSTR_INVALID) {
1149 opmask = formats[table->format][0];
1150 if (table->opfrag == (opfrag & opmask))
1151 return table;
1152 table++;
1153 }
1154 return NULL;
1155}
1156
1157static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
1158{
1159 struct insn *insn;
1160 const unsigned char *ops;
1161 const struct operand *operand;
1162 unsigned int value;
1163 char separator;
1164 char *ptr;
1165
1166 ptr = buffer;
1167 insn = find_insn(code);
1168 if (insn) {
1169 ptr += sprintf(ptr, "%.5s\t", insn->name);
1170 /* Extract the operands. */
1171 separator = 0;
1172 for (ops = formats[insn->format] + 1; *ops != 0; ops++) {
1173 operand = operands + *ops;
1174 value = extract_operand(code, operand);
1175 if ((operand->flags & OPERAND_INDEX) && value == 0)
1176 continue;
1177 if ((operand->flags & OPERAND_BASE) &&
1178 value == 0 && separator == '(') {
1179 separator = ',';
1180 continue;
1181 }
1182 if (separator)
1183 ptr += sprintf(ptr, "%c", separator);
1184 if (operand->flags & OPERAND_GPR)
1185 ptr += sprintf(ptr, "%%r%i", value);
1186 else if (operand->flags & OPERAND_FPR)
1187 ptr += sprintf(ptr, "%%f%i", value);
1188 else if (operand->flags & OPERAND_AR)
1189 ptr += sprintf(ptr, "%%a%i", value);
1190 else if (operand->flags & OPERAND_CR)
1191 ptr += sprintf(ptr, "%%c%i", value);
1192 else if (operand->flags & OPERAND_PCREL)
1193 ptr += sprintf(ptr, "%lx", value + addr);
1194 else if (operand->flags & OPERAND_SIGNED)
1195 ptr += sprintf(ptr, "%i", value);
1196 else
1197 ptr += sprintf(ptr, "%u", value);
1198 if (operand->flags & OPERAND_DISP)
1199 separator = '(';
1200 else if (operand->flags & OPERAND_BASE) {
1201 ptr += sprintf(ptr, ")");
1202 separator = ',';
1203 } else
1204 separator = ',';
1205 }
1206 } else
1207 ptr += sprintf(ptr, "unknown");
1208 return (int) (ptr - buffer);
1209}
1210
1211void show_code(struct pt_regs *regs)
1212{
1213 char *mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
1214 unsigned char code[64];
1215 char buffer[64], *ptr;
1216 mm_segment_t old_fs;
1217 unsigned long addr;
1218 int start, end, opsize, hops, i;
1219
1220 /* Get a snapshot of the 64 bytes surrounding the fault address. */
1221 old_fs = get_fs();
1222 set_fs((regs->psw.mask & PSW_MASK_PSTATE) ? USER_DS : KERNEL_DS);
1223 for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) {
1224 addr = regs->psw.addr - 34 + start;
1225 if (__copy_from_user(code + start - 2,
1226 (char __user *) addr, 2))
1227 break;
1228 }
1229 for (end = 32; end < 64; end += 2) {
1230 addr = regs->psw.addr + end - 32;
1231 if (__copy_from_user(code + end,
1232 (char __user *) addr, 2))
1233 break;
1234 }
1235 set_fs(old_fs);
1236 /* Code snapshot useable ? */
1237 if ((regs->psw.addr & 1) || start >= end) {
1238 printk("%s Code: Bad PSW.\n", mode);
1239 return;
1240 }
1241 /* Find a starting point for the disassembly. */
1242 while (start < 32) {
1243 hops = 0;
1244 for (i = 0, hops = 0; start + i < 32 && hops < 3; hops++) {
1245 if (!find_insn(code + start + i))
1246 break;
1247 i += insn_length(code[start + i]);
1248 }
1249 if (start + i == 32)
1250 /* Looks good, sequence ends at PSW. */
1251 break;
1252 start += 2;
1253 }
1254 /* Decode the instructions. */
1255 ptr = buffer;
1256 ptr += sprintf(ptr, "%s Code:", mode);
1257 hops = 0;
1258 while (start < end && hops < 8) {
1259 *ptr++ = (start == 32) ? '>' : ' ';
1260 addr = regs->psw.addr + start - 32;
1261 ptr += sprintf(ptr, ONELONG, addr);
1262 opsize = insn_length(code[start]);
1263 if (start + opsize >= end)
1264 break;
1265 for (i = 0; i < opsize; i++)
1266 ptr += sprintf(ptr, "%02x", code[start + i]);
1267 *ptr++ = '\t';
1268 if (i < 6)
1269 *ptr++ = '\t';
1270 ptr += print_insn(ptr, code + start, addr);
1271 start += opsize;
1272 printk(buffer);
1273 ptr = buffer;
1274 ptr += sprintf(ptr, "\n ");
1275 hops++;
1276 }
1277 printk("\n");
1278}
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 5e47936573f2..50538e545618 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -253,11 +253,10 @@ static noinline __init void find_memory_chunks(unsigned long memsize)
253 break; 253 break;
254#endif 254#endif
255 /* 255 /*
256 * Finish memory detection at the first hole, unless 256 * Finish memory detection at the first hole
257 * - we reached the hsa -> skip it. 257 * if storage size is unknown.
258 * - we know there must be more.
259 */ 258 */
260 if (cc == -1UL && !memsize && old_addr != ADDR2G) 259 if (cc == -1UL && !memsize)
261 break; 260 break;
262 if (memsize && addr >= memsize) 261 if (memsize && addr >= memsize)
263 break; 262 break;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index dddc3de30401..c8a2212014e0 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -249,8 +249,6 @@ sysc_do_restart:
249 bnz BASED(sysc_tracesys) 249 bnz BASED(sysc_tracesys)
250 basr %r14,%r8 # call sys_xxxx 250 basr %r14,%r8 # call sys_xxxx
251 st %r2,SP_R2(%r15) # store return value (change R2 on stack) 251 st %r2,SP_R2(%r15) # store return value (change R2 on stack)
252 # ATTENTION: check sys_execve_glue before
253 # changing anything here !!
254 252
255sysc_return: 253sysc_return:
256 tm SP_PSW+1(%r15),0x01 # returning to user ? 254 tm SP_PSW+1(%r15),0x01 # returning to user ?
@@ -381,50 +379,37 @@ ret_from_fork:
381 b BASED(sysc_return) 379 b BASED(sysc_return)
382 380
383# 381#
384# clone, fork, vfork, exec and sigreturn need glue, 382# kernel_execve function needs to deal with pt_regs that is not
385# because they all expect pt_regs as parameter, 383# at the usual place
386# but are called with different parameter.
387# return-address is set up above
388# 384#
389sys_clone_glue: 385 .globl kernel_execve
390 la %r2,SP_PTREGS(%r15) # load pt_regs 386kernel_execve:
391 l %r1,BASED(.Lclone) 387 stm %r12,%r15,48(%r15)
392 br %r1 # branch to sys_clone 388 lr %r14,%r15
393 389 l %r13,__LC_SVC_NEW_PSW+4
394sys_fork_glue: 390 s %r15,BASED(.Lc_spsize)
395 la %r2,SP_PTREGS(%r15) # load pt_regs 391 st %r14,__SF_BACKCHAIN(%r15)
396 l %r1,BASED(.Lfork) 392 la %r12,SP_PTREGS(%r15)
397 br %r1 # branch to sys_fork 393 xc 0(__PT_SIZE,%r12),0(%r12)
398 394 l %r1,BASED(.Ldo_execve)
399sys_vfork_glue: 395 lr %r5,%r12
400 la %r2,SP_PTREGS(%r15) # load pt_regs 396 basr %r14,%r1
401 l %r1,BASED(.Lvfork) 397 ltr %r2,%r2
402 br %r1 # branch to sys_vfork 398 be BASED(0f)
403 399 a %r15,BASED(.Lc_spsize)
404sys_execve_glue: 400 lm %r12,%r15,48(%r15)
405 la %r2,SP_PTREGS(%r15) # load pt_regs 401 br %r14
406 l %r1,BASED(.Lexecve) 402 # execve succeeded.
407 lr %r12,%r14 # save return address 4030: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
408 basr %r14,%r1 # call sys_execve 404 l %r15,__LC_KERNEL_STACK # load ksp
409 ltr %r2,%r2 # check if execve failed 405 s %r15,BASED(.Lc_spsize) # make room for registers & psw
410 bnz 0(%r12) # it did fail -> store result in gpr2 406 l %r9,__LC_THREAD_INFO
411 b 4(%r12) # SKIP ST 2,SP_R2(15) after BASR 14,8 407 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
412 # in system_call/sysc_tracesys 408 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
413 409 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
414sys_sigreturn_glue: 410 l %r1,BASED(.Lexecve_tail)
415 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter 411 basr %r14,%r1
416 l %r1,BASED(.Lsigreturn) 412 b BASED(sysc_return)
417 br %r1 # branch to sys_sigreturn
418
419sys_rt_sigreturn_glue:
420 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
421 l %r1,BASED(.Lrt_sigreturn)
422 br %r1 # branch to sys_sigreturn
423
424sys_sigaltstack_glue:
425 la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
426 l %r1,BASED(.Lsigaltstack)
427 br %r1 # branch to sys_sigreturn
428 413
429/* 414/*
430 * Program check handler routine 415 * Program check handler routine
@@ -1031,19 +1016,11 @@ cleanup_io_leave_insn:
1031.Ldo_extint: .long do_extint 1016.Ldo_extint: .long do_extint
1032.Ldo_signal: .long do_signal 1017.Ldo_signal: .long do_signal
1033.Lhandle_per: .long do_single_step 1018.Lhandle_per: .long do_single_step
1019.Ldo_execve: .long do_execve
1020.Lexecve_tail: .long execve_tail
1034.Ljump_table: .long pgm_check_table 1021.Ljump_table: .long pgm_check_table
1035.Lschedule: .long schedule 1022.Lschedule: .long schedule
1036.Lclone: .long sys_clone
1037.Lexecve: .long sys_execve
1038.Lfork: .long sys_fork
1039.Lrt_sigreturn: .long sys_rt_sigreturn
1040.Lrt_sigsuspend:
1041 .long sys_rt_sigsuspend
1042.Lsigreturn: .long sys_sigreturn
1043.Lsigsuspend: .long sys_sigsuspend
1044.Lsigaltstack: .long sys_sigaltstack
1045.Ltrace: .long syscall_trace 1023.Ltrace: .long syscall_trace
1046.Lvfork: .long sys_vfork
1047.Lschedtail: .long schedule_tail 1024.Lschedtail: .long schedule_tail
1048.Lsysc_table: .long sys_call_table 1025.Lsysc_table: .long sys_call_table
1049#ifdef CONFIG_TRACE_IRQFLAGS 1026#ifdef CONFIG_TRACE_IRQFLAGS
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 0f758c329a5d..93745fd8f555 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -244,8 +244,6 @@ sysc_noemu:
244 jnz sysc_tracesys 244 jnz sysc_tracesys
245 basr %r14,%r8 # call sys_xxxx 245 basr %r14,%r8 # call sys_xxxx
246 stg %r2,SP_R2(%r15) # store return value (change R2 on stack) 246 stg %r2,SP_R2(%r15) # store return value (change R2 on stack)
247 # ATTENTION: check sys_execve_glue before
248 # changing anything here !!
249 247
250sysc_return: 248sysc_return:
251 tm SP_PSW+1(%r15),0x01 # returning to user ? 249 tm SP_PSW+1(%r15),0x01 # returning to user ?
@@ -371,77 +369,35 @@ ret_from_fork:
371 j sysc_return 369 j sysc_return
372 370
373# 371#
374# clone, fork, vfork, exec and sigreturn need glue, 372# kernel_execve function needs to deal with pt_regs that is not
375# because they all expect pt_regs as parameter, 373# at the usual place
376# but are called with different parameter.
377# return-address is set up above
378# 374#
379sys_clone_glue: 375 .globl kernel_execve
380 la %r2,SP_PTREGS(%r15) # load pt_regs 376kernel_execve:
381 jg sys_clone # branch to sys_clone 377 stmg %r12,%r15,96(%r15)
382 378 lgr %r14,%r15
383#ifdef CONFIG_COMPAT 379 aghi %r15,-SP_SIZE
384sys32_clone_glue: 380 stg %r14,__SF_BACKCHAIN(%r15)
385 la %r2,SP_PTREGS(%r15) # load pt_regs 381 la %r12,SP_PTREGS(%r15)
386 jg sys32_clone # branch to sys32_clone 382 xc 0(__PT_SIZE,%r12),0(%r12)
387#endif 383 lgr %r5,%r12
388 384 brasl %r14,do_execve
389sys_fork_glue: 385 ltgfr %r2,%r2
390 la %r2,SP_PTREGS(%r15) # load pt_regs 386 je 0f
391 jg sys_fork # branch to sys_fork 387 aghi %r15,SP_SIZE
392 388 lmg %r12,%r15,96(%r15)
393sys_vfork_glue: 389 br %r14
394 la %r2,SP_PTREGS(%r15) # load pt_regs 390 # execve succeeded.
395 jg sys_vfork # branch to sys_vfork 3910: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
396 392 lg %r15,__LC_KERNEL_STACK # load ksp
397sys_execve_glue: 393 aghi %r15,-SP_SIZE # make room for registers & psw
398 la %r2,SP_PTREGS(%r15) # load pt_regs 394 lg %r13,__LC_SVC_NEW_PSW+8
399 lgr %r12,%r14 # save return address 395 lg %r9,__LC_THREAD_INFO
400 brasl %r14,sys_execve # call sys_execve 396 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
401 ltgr %r2,%r2 # check if execve failed 397 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
402 bnz 0(%r12) # it did fail -> store result in gpr2 398 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
403 b 6(%r12) # SKIP STG 2,SP_R2(15) in 399 brasl %r14,execve_tail
404 # system_call/sysc_tracesys 400 j sysc_return
405#ifdef CONFIG_COMPAT
406sys32_execve_glue:
407 la %r2,SP_PTREGS(%r15) # load pt_regs
408 lgr %r12,%r14 # save return address
409 brasl %r14,sys32_execve # call sys32_execve
410 ltgr %r2,%r2 # check if execve failed
411 bnz 0(%r12) # it did fail -> store result in gpr2
412 b 6(%r12) # SKIP STG 2,SP_R2(15) in
413 # system_call/sysc_tracesys
414#endif
415
416sys_sigreturn_glue:
417 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
418 jg sys_sigreturn # branch to sys_sigreturn
419
420#ifdef CONFIG_COMPAT
421sys32_sigreturn_glue:
422 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
423 jg sys32_sigreturn # branch to sys32_sigreturn
424#endif
425
426sys_rt_sigreturn_glue:
427 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
428 jg sys_rt_sigreturn # branch to sys_sigreturn
429
430#ifdef CONFIG_COMPAT
431sys32_rt_sigreturn_glue:
432 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
433 jg sys32_rt_sigreturn # branch to sys32_sigreturn
434#endif
435
436sys_sigaltstack_glue:
437 la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
438 jg sys_sigaltstack # branch to sys_sigreturn
439
440#ifdef CONFIG_COMPAT
441sys32_sigaltstack_glue:
442 la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
443 jg sys32_sigaltstack_wrapper # branch to sys_sigreturn
444#endif
445 401
446/* 402/*
447 * Program check handler routine 403 * Program check handler routine
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 37010709fe68..a87b1976d409 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -39,7 +39,69 @@ startup_continue:
39 basr %r13,0 # get base 39 basr %r13,0 # get base
40.LPG1: sll %r13,1 # remove high order bit 40.LPG1: sll %r13,1 # remove high order bit
41 srl %r13,1 41 srl %r13,1
42 lhi %r1,1 # mode 1 = esame 42
43#ifdef CONFIG_ZFCPDUMP
44
45 # check if we have been ipled using zfcp dump:
46
47 tm 0xb9,0x01 # test if subchannel is enabled
48 jno .nodump # subchannel disabled
49 l %r1,0xb8
50 la %r5,.Lipl_schib-.LPG1(%r13)
51 stsch 0(%r5) # get schib of subchannel
52 jne .nodump # schib not available
53 tm 5(%r5),0x01 # devno valid?
54 jno .nodump
55 tm 4(%r5),0x80 # qdio capable device?
56 jno .nodump
57 l %r2,20(%r0) # address of ipl parameter block
58 lhi %r3,0
59 ic %r3,0x148(%r2) # get opt field
60 chi %r3,0x20 # load with dump?
61 jne .nodump
62
63 # store all prefix registers in case of load with dump:
64
65 la %r7,0 # base register for 0 page
66 la %r8,0 # first cpu
67 l %r11,.Lpref_arr_ptr-.LPG1(%r13) # address of prefix array
68 ahi %r11,4 # skip boot cpu
69 lr %r12,%r11
70 ahi %r12,(CONFIG_NR_CPUS*4) # end of prefix array
71 stap .Lcurrent_cpu+2-.LPG1(%r13) # store current cpu addr
721:
73 cl %r8,.Lcurrent_cpu-.LPG1(%r13) # is ipl cpu ?
74 je 4f # if yes get next cpu
752:
76 lr %r9,%r7
77 sigp %r9,%r8,0x9 # stop & store status of cpu
78 brc 8,3f # accepted
79 brc 4,4f # status stored: next cpu
80 brc 2,2b # busy: try again
81 brc 1,4f # not op: next cpu
823:
83 mvc 0(4,%r11),264(%r7) # copy prefix register to prefix array
84 ahi %r11,4 # next element in prefix array
85 clr %r11,%r12
86 je 5f # no more space in prefix array
874:
88 ahi %r8,1 # next cpu (r8 += 1)
89 cl %r8,.Llast_cpu-.LPG1(%r13) # is last possible cpu ?
90 jl 1b # jump if not last cpu
915:
92 lhi %r1,2 # mode 2 = esame (dump)
93 j 6f
94 .align 4
95.Lipl_schib:
96 .rept 13
97 .long 0
98 .endr
99.nodump:
100 lhi %r1,1 # mode 1 = esame (normal ipl)
1016:
102#else
103 lhi %r1,1 # mode 1 = esame (normal ipl)
104#endif /* CONFIG_ZFCPDUMP */
43 mvi __LC_AR_MODE_ID,1 # set esame flag 105 mvi __LC_AR_MODE_ID,1 # set esame flag
44 slr %r0,%r0 # set cpuid to zero 106 slr %r0,%r0 # set cpuid to zero
45 sigp %r1,%r0,0x12 # switch to esame mode 107 sigp %r1,%r0,0x12 # switch to esame mode
@@ -149,6 +211,14 @@ startup_continue:
149.L4malign:.quad 0xffffffffffc00000 211.L4malign:.quad 0xffffffffffc00000
150.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 212.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
151.Lnop: .long 0x07000700 213.Lnop: .long 0x07000700
214#ifdef CONFIG_ZFCPDUMP
215.Lcurrent_cpu:
216 .long 0x0
217.Llast_cpu:
218 .long 0x0000ffff
219.Lpref_arr_ptr:
220 .long zfcpdump_prefix_array
221#endif /* CONFIG_ZFCPDUMP */
152.Lparmaddr: 222.Lparmaddr:
153 .quad PARMAREA 223 .quad PARMAREA
154 .align 64 224 .align 64
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index f731185bf2bd..06833ac2b115 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -29,36 +29,21 @@
29#define SCCB_LOADPARM (&s390_readinfo_sccb.loadparm) 29#define SCCB_LOADPARM (&s390_readinfo_sccb.loadparm)
30#define SCCB_FLAG (s390_readinfo_sccb.flags) 30#define SCCB_FLAG (s390_readinfo_sccb.flags)
31 31
32enum ipl_type { 32#define IPL_UNKNOWN_STR "unknown"
33 IPL_TYPE_NONE = 1, 33#define IPL_CCW_STR "ccw"
34 IPL_TYPE_UNKNOWN = 2, 34#define IPL_FCP_STR "fcp"
35 IPL_TYPE_CCW = 4, 35#define IPL_FCP_DUMP_STR "fcp_dump"
36 IPL_TYPE_FCP = 8, 36#define IPL_NSS_STR "nss"
37 IPL_TYPE_NSS = 16,
38};
39
40#define IPL_NONE_STR "none"
41#define IPL_UNKNOWN_STR "unknown"
42#define IPL_CCW_STR "ccw"
43#define IPL_FCP_STR "fcp"
44#define IPL_NSS_STR "nss"
45
46/*
47 * Must be in data section since the bss section
48 * is not cleared when these are accessed.
49 */
50u16 ipl_devno __attribute__((__section__(".data"))) = 0;
51u32 ipl_flags __attribute__((__section__(".data"))) = 0;
52 37
53static char *ipl_type_str(enum ipl_type type) 38static char *ipl_type_str(enum ipl_type type)
54{ 39{
55 switch (type) { 40 switch (type) {
56 case IPL_TYPE_NONE:
57 return IPL_NONE_STR;
58 case IPL_TYPE_CCW: 41 case IPL_TYPE_CCW:
59 return IPL_CCW_STR; 42 return IPL_CCW_STR;
60 case IPL_TYPE_FCP: 43 case IPL_TYPE_FCP:
61 return IPL_FCP_STR; 44 return IPL_FCP_STR;
45 case IPL_TYPE_FCP_DUMP:
46 return IPL_FCP_DUMP_STR;
62 case IPL_TYPE_NSS: 47 case IPL_TYPE_NSS:
63 return IPL_NSS_STR; 48 return IPL_NSS_STR;
64 case IPL_TYPE_UNKNOWN: 49 case IPL_TYPE_UNKNOWN:
@@ -67,15 +52,55 @@ static char *ipl_type_str(enum ipl_type type)
67 } 52 }
68} 53}
69 54
55enum dump_type {
56 DUMP_TYPE_NONE = 1,
57 DUMP_TYPE_CCW = 2,
58 DUMP_TYPE_FCP = 4,
59};
60
61#define DUMP_NONE_STR "none"
62#define DUMP_CCW_STR "ccw"
63#define DUMP_FCP_STR "fcp"
64
65static char *dump_type_str(enum dump_type type)
66{
67 switch (type) {
68 case DUMP_TYPE_NONE:
69 return DUMP_NONE_STR;
70 case DUMP_TYPE_CCW:
71 return DUMP_CCW_STR;
72 case DUMP_TYPE_FCP:
73 return DUMP_FCP_STR;
74 default:
75 return NULL;
76 }
77}
78
79/*
80 * Must be in data section since the bss section
81 * is not cleared when these are accessed.
82 */
83static u16 ipl_devno __attribute__((__section__(".data"))) = 0;
84u32 ipl_flags __attribute__((__section__(".data"))) = 0;
85
70enum ipl_method { 86enum ipl_method {
71 IPL_METHOD_NONE, 87 REIPL_METHOD_CCW_CIO,
72 IPL_METHOD_CCW_CIO, 88 REIPL_METHOD_CCW_DIAG,
73 IPL_METHOD_CCW_DIAG, 89 REIPL_METHOD_CCW_VM,
74 IPL_METHOD_CCW_VM, 90 REIPL_METHOD_FCP_RO_DIAG,
75 IPL_METHOD_FCP_RO_DIAG, 91 REIPL_METHOD_FCP_RW_DIAG,
76 IPL_METHOD_FCP_RW_DIAG, 92 REIPL_METHOD_FCP_RO_VM,
77 IPL_METHOD_FCP_RO_VM, 93 REIPL_METHOD_FCP_DUMP,
78 IPL_METHOD_NSS, 94 REIPL_METHOD_NSS,
95 REIPL_METHOD_DEFAULT,
96};
97
98enum dump_method {
99 DUMP_METHOD_NONE,
100 DUMP_METHOD_CCW_CIO,
101 DUMP_METHOD_CCW_DIAG,
102 DUMP_METHOD_CCW_VM,
103 DUMP_METHOD_FCP_DIAG,
79}; 104};
80 105
81enum shutdown_action { 106enum shutdown_action {
@@ -107,15 +132,15 @@ static int diag308_set_works = 0;
107static int reipl_capabilities = IPL_TYPE_UNKNOWN; 132static int reipl_capabilities = IPL_TYPE_UNKNOWN;
108 133
109static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN; 134static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN;
110static enum ipl_method reipl_method = IPL_METHOD_NONE; 135static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT;
111static struct ipl_parameter_block *reipl_block_fcp; 136static struct ipl_parameter_block *reipl_block_fcp;
112static struct ipl_parameter_block *reipl_block_ccw; 137static struct ipl_parameter_block *reipl_block_ccw;
113 138
114static char reipl_nss_name[NSS_NAME_SIZE + 1]; 139static char reipl_nss_name[NSS_NAME_SIZE + 1];
115 140
116static int dump_capabilities = IPL_TYPE_NONE; 141static int dump_capabilities = DUMP_TYPE_NONE;
117static enum ipl_type dump_type = IPL_TYPE_NONE; 142static enum dump_type dump_type = DUMP_TYPE_NONE;
118static enum ipl_method dump_method = IPL_METHOD_NONE; 143static enum dump_method dump_method = DUMP_METHOD_NONE;
119static struct ipl_parameter_block *dump_block_fcp; 144static struct ipl_parameter_block *dump_block_fcp;
120static struct ipl_parameter_block *dump_block_ccw; 145static struct ipl_parameter_block *dump_block_ccw;
121 146
@@ -134,6 +159,7 @@ int diag308(unsigned long subcode, void *addr)
134 : "d" (subcode) : "cc", "memory"); 159 : "d" (subcode) : "cc", "memory");
135 return _rc; 160 return _rc;
136} 161}
162EXPORT_SYMBOL_GPL(diag308);
137 163
138/* SYSFS */ 164/* SYSFS */
139 165
@@ -197,7 +223,7 @@ static void make_attrs_ro(struct attribute **attrs)
197 * ipl section 223 * ipl section
198 */ 224 */
199 225
200static enum ipl_type ipl_get_type(void) 226static __init enum ipl_type get_ipl_type(void)
201{ 227{
202 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; 228 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
203 229
@@ -211,12 +237,44 @@ static enum ipl_type ipl_get_type(void)
211 return IPL_TYPE_UNKNOWN; 237 return IPL_TYPE_UNKNOWN;
212 if (ipl->hdr.pbt != DIAG308_IPL_TYPE_FCP) 238 if (ipl->hdr.pbt != DIAG308_IPL_TYPE_FCP)
213 return IPL_TYPE_UNKNOWN; 239 return IPL_TYPE_UNKNOWN;
240 if (ipl->ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP)
241 return IPL_TYPE_FCP_DUMP;
214 return IPL_TYPE_FCP; 242 return IPL_TYPE_FCP;
215} 243}
216 244
245void __init setup_ipl_info(void)
246{
247 ipl_info.type = get_ipl_type();
248 switch (ipl_info.type) {
249 case IPL_TYPE_CCW:
250 ipl_info.data.ccw.dev_id.devno = ipl_devno;
251 ipl_info.data.ccw.dev_id.ssid = 0;
252 break;
253 case IPL_TYPE_FCP:
254 case IPL_TYPE_FCP_DUMP:
255 ipl_info.data.fcp.dev_id.devno =
256 IPL_PARMBLOCK_START->ipl_info.fcp.devno;
257 ipl_info.data.fcp.dev_id.ssid = 0;
258 ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
259 ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
260 break;
261 case IPL_TYPE_NSS:
262 strncpy(ipl_info.data.nss.name, kernel_nss_name,
263 sizeof(ipl_info.data.nss.name));
264 break;
265 case IPL_TYPE_UNKNOWN:
266 default:
267 /* We have no info to copy */
268 break;
269 }
270}
271
272struct ipl_info ipl_info;
273EXPORT_SYMBOL_GPL(ipl_info);
274
217static ssize_t ipl_type_show(struct subsystem *subsys, char *page) 275static ssize_t ipl_type_show(struct subsystem *subsys, char *page)
218{ 276{
219 return sprintf(page, "%s\n", ipl_type_str(ipl_get_type())); 277 return sprintf(page, "%s\n", ipl_type_str(ipl_info.type));
220} 278}
221 279
222static struct subsys_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); 280static struct subsys_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
@@ -225,10 +283,11 @@ static ssize_t sys_ipl_device_show(struct subsystem *subsys, char *page)
225{ 283{
226 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; 284 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
227 285
228 switch (ipl_get_type()) { 286 switch (ipl_info.type) {
229 case IPL_TYPE_CCW: 287 case IPL_TYPE_CCW:
230 return sprintf(page, "0.0.%04x\n", ipl_devno); 288 return sprintf(page, "0.0.%04x\n", ipl_devno);
231 case IPL_TYPE_FCP: 289 case IPL_TYPE_FCP:
290 case IPL_TYPE_FCP_DUMP:
232 return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno); 291 return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno);
233 default: 292 default:
234 return 0; 293 return 0;
@@ -485,23 +544,29 @@ static int reipl_set_type(enum ipl_type type)
485 switch(type) { 544 switch(type) {
486 case IPL_TYPE_CCW: 545 case IPL_TYPE_CCW:
487 if (MACHINE_IS_VM) 546 if (MACHINE_IS_VM)
488 reipl_method = IPL_METHOD_CCW_VM; 547 reipl_method = REIPL_METHOD_CCW_VM;
489 else 548 else
490 reipl_method = IPL_METHOD_CCW_CIO; 549 reipl_method = REIPL_METHOD_CCW_CIO;
491 break; 550 break;
492 case IPL_TYPE_FCP: 551 case IPL_TYPE_FCP:
493 if (diag308_set_works) 552 if (diag308_set_works)
494 reipl_method = IPL_METHOD_FCP_RW_DIAG; 553 reipl_method = REIPL_METHOD_FCP_RW_DIAG;
495 else if (MACHINE_IS_VM) 554 else if (MACHINE_IS_VM)
496 reipl_method = IPL_METHOD_FCP_RO_VM; 555 reipl_method = REIPL_METHOD_FCP_RO_VM;
497 else 556 else
498 reipl_method = IPL_METHOD_FCP_RO_DIAG; 557 reipl_method = REIPL_METHOD_FCP_RO_DIAG;
558 break;
559 case IPL_TYPE_FCP_DUMP:
560 reipl_method = REIPL_METHOD_FCP_DUMP;
499 break; 561 break;
500 case IPL_TYPE_NSS: 562 case IPL_TYPE_NSS:
501 reipl_method = IPL_METHOD_NSS; 563 reipl_method = REIPL_METHOD_NSS;
564 break;
565 case IPL_TYPE_UNKNOWN:
566 reipl_method = REIPL_METHOD_DEFAULT;
502 break; 567 break;
503 default: 568 default:
504 reipl_method = IPL_METHOD_NONE; 569 BUG();
505 } 570 }
506 reipl_type = type; 571 reipl_type = type;
507 return 0; 572 return 0;
@@ -579,22 +644,22 @@ static struct attribute_group dump_ccw_attr_group = {
579 644
580/* dump type */ 645/* dump type */
581 646
582static int dump_set_type(enum ipl_type type) 647static int dump_set_type(enum dump_type type)
583{ 648{
584 if (!(dump_capabilities & type)) 649 if (!(dump_capabilities & type))
585 return -EINVAL; 650 return -EINVAL;
586 switch(type) { 651 switch(type) {
587 case IPL_TYPE_CCW: 652 case DUMP_TYPE_CCW:
588 if (MACHINE_IS_VM) 653 if (MACHINE_IS_VM)
589 dump_method = IPL_METHOD_CCW_VM; 654 dump_method = DUMP_METHOD_CCW_VM;
590 else 655 else
591 dump_method = IPL_METHOD_CCW_CIO; 656 dump_method = DUMP_METHOD_CCW_CIO;
592 break; 657 break;
593 case IPL_TYPE_FCP: 658 case DUMP_TYPE_FCP:
594 dump_method = IPL_METHOD_FCP_RW_DIAG; 659 dump_method = DUMP_METHOD_FCP_DIAG;
595 break; 660 break;
596 default: 661 default:
597 dump_method = IPL_METHOD_NONE; 662 dump_method = DUMP_METHOD_NONE;
598 } 663 }
599 dump_type = type; 664 dump_type = type;
600 return 0; 665 return 0;
@@ -602,7 +667,7 @@ static int dump_set_type(enum ipl_type type)
602 667
603static ssize_t dump_type_show(struct subsystem *subsys, char *page) 668static ssize_t dump_type_show(struct subsystem *subsys, char *page)
604{ 669{
605 return sprintf(page, "%s\n", ipl_type_str(dump_type)); 670 return sprintf(page, "%s\n", dump_type_str(dump_type));
606} 671}
607 672
608static ssize_t dump_type_store(struct subsystem *subsys, const char *buf, 673static ssize_t dump_type_store(struct subsystem *subsys, const char *buf,
@@ -610,12 +675,12 @@ static ssize_t dump_type_store(struct subsystem *subsys, const char *buf,
610{ 675{
611 int rc = -EINVAL; 676 int rc = -EINVAL;
612 677
613 if (strncmp(buf, IPL_NONE_STR, strlen(IPL_NONE_STR)) == 0) 678 if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0)
614 rc = dump_set_type(IPL_TYPE_NONE); 679 rc = dump_set_type(DUMP_TYPE_NONE);
615 else if (strncmp(buf, IPL_CCW_STR, strlen(IPL_CCW_STR)) == 0) 680 else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0)
616 rc = dump_set_type(IPL_TYPE_CCW); 681 rc = dump_set_type(DUMP_TYPE_CCW);
617 else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0) 682 else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0)
618 rc = dump_set_type(IPL_TYPE_FCP); 683 rc = dump_set_type(DUMP_TYPE_FCP);
619 return (rc != 0) ? rc : len; 684 return (rc != 0) ? rc : len;
620} 685}
621 686
@@ -664,14 +729,14 @@ void do_reipl(void)
664 char loadparm[LOADPARM_LEN + 1]; 729 char loadparm[LOADPARM_LEN + 1];
665 730
666 switch (reipl_method) { 731 switch (reipl_method) {
667 case IPL_METHOD_CCW_CIO: 732 case REIPL_METHOD_CCW_CIO:
668 devid.devno = reipl_block_ccw->ipl_info.ccw.devno; 733 devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
669 if (ipl_get_type() == IPL_TYPE_CCW && devid.devno == ipl_devno) 734 if (ipl_info.type == IPL_TYPE_CCW && devid.devno == ipl_devno)
670 diag308(DIAG308_IPL, NULL); 735 diag308(DIAG308_IPL, NULL);
671 devid.ssid = 0; 736 devid.ssid = 0;
672 reipl_ccw_dev(&devid); 737 reipl_ccw_dev(&devid);
673 break; 738 break;
674 case IPL_METHOD_CCW_VM: 739 case REIPL_METHOD_CCW_VM:
675 reipl_get_ascii_loadparm(loadparm); 740 reipl_get_ascii_loadparm(loadparm);
676 if (strlen(loadparm) == 0) 741 if (strlen(loadparm) == 0)
677 sprintf(buf, "IPL %X", 742 sprintf(buf, "IPL %X",
@@ -681,30 +746,32 @@ void do_reipl(void)
681 reipl_block_ccw->ipl_info.ccw.devno, loadparm); 746 reipl_block_ccw->ipl_info.ccw.devno, loadparm);
682 __cpcmd(buf, NULL, 0, NULL); 747 __cpcmd(buf, NULL, 0, NULL);
683 break; 748 break;
684 case IPL_METHOD_CCW_DIAG: 749 case REIPL_METHOD_CCW_DIAG:
685 diag308(DIAG308_SET, reipl_block_ccw); 750 diag308(DIAG308_SET, reipl_block_ccw);
686 diag308(DIAG308_IPL, NULL); 751 diag308(DIAG308_IPL, NULL);
687 break; 752 break;
688 case IPL_METHOD_FCP_RW_DIAG: 753 case REIPL_METHOD_FCP_RW_DIAG:
689 diag308(DIAG308_SET, reipl_block_fcp); 754 diag308(DIAG308_SET, reipl_block_fcp);
690 diag308(DIAG308_IPL, NULL); 755 diag308(DIAG308_IPL, NULL);
691 break; 756 break;
692 case IPL_METHOD_FCP_RO_DIAG: 757 case REIPL_METHOD_FCP_RO_DIAG:
693 diag308(DIAG308_IPL, NULL); 758 diag308(DIAG308_IPL, NULL);
694 break; 759 break;
695 case IPL_METHOD_FCP_RO_VM: 760 case REIPL_METHOD_FCP_RO_VM:
696 __cpcmd("IPL", NULL, 0, NULL); 761 __cpcmd("IPL", NULL, 0, NULL);
697 break; 762 break;
698 case IPL_METHOD_NSS: 763 case REIPL_METHOD_NSS:
699 sprintf(buf, "IPL %s", reipl_nss_name); 764 sprintf(buf, "IPL %s", reipl_nss_name);
700 __cpcmd(buf, NULL, 0, NULL); 765 __cpcmd(buf, NULL, 0, NULL);
701 break; 766 break;
702 case IPL_METHOD_NONE: 767 case REIPL_METHOD_DEFAULT:
703 default:
704 if (MACHINE_IS_VM) 768 if (MACHINE_IS_VM)
705 __cpcmd("IPL", NULL, 0, NULL); 769 __cpcmd("IPL", NULL, 0, NULL);
706 diag308(DIAG308_IPL, NULL); 770 diag308(DIAG308_IPL, NULL);
707 break; 771 break;
772 case REIPL_METHOD_FCP_DUMP:
773 default:
774 break;
708 } 775 }
709 signal_processor(smp_processor_id(), sigp_stop_and_store_status); 776 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
710} 777}
@@ -715,28 +782,28 @@ static void do_dump(void)
715 static char buf[100]; 782 static char buf[100];
716 783
717 switch (dump_method) { 784 switch (dump_method) {
718 case IPL_METHOD_CCW_CIO: 785 case DUMP_METHOD_CCW_CIO:
719 smp_send_stop(); 786 smp_send_stop();
720 devid.devno = dump_block_ccw->ipl_info.ccw.devno; 787 devid.devno = dump_block_ccw->ipl_info.ccw.devno;
721 devid.ssid = 0; 788 devid.ssid = 0;
722 reipl_ccw_dev(&devid); 789 reipl_ccw_dev(&devid);
723 break; 790 break;
724 case IPL_METHOD_CCW_VM: 791 case DUMP_METHOD_CCW_VM:
725 smp_send_stop(); 792 smp_send_stop();
726 sprintf(buf, "STORE STATUS"); 793 sprintf(buf, "STORE STATUS");
727 __cpcmd(buf, NULL, 0, NULL); 794 __cpcmd(buf, NULL, 0, NULL);
728 sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno); 795 sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
729 __cpcmd(buf, NULL, 0, NULL); 796 __cpcmd(buf, NULL, 0, NULL);
730 break; 797 break;
731 case IPL_METHOD_CCW_DIAG: 798 case DUMP_METHOD_CCW_DIAG:
732 diag308(DIAG308_SET, dump_block_ccw); 799 diag308(DIAG308_SET, dump_block_ccw);
733 diag308(DIAG308_DUMP, NULL); 800 diag308(DIAG308_DUMP, NULL);
734 break; 801 break;
735 case IPL_METHOD_FCP_RW_DIAG: 802 case DUMP_METHOD_FCP_DIAG:
736 diag308(DIAG308_SET, dump_block_fcp); 803 diag308(DIAG308_SET, dump_block_fcp);
737 diag308(DIAG308_DUMP, NULL); 804 diag308(DIAG308_DUMP, NULL);
738 break; 805 break;
739 case IPL_METHOD_NONE: 806 case DUMP_METHOD_NONE:
740 default: 807 default:
741 return; 808 return;
742 } 809 }
@@ -777,12 +844,13 @@ static int __init ipl_init(void)
777 rc = firmware_register(&ipl_subsys); 844 rc = firmware_register(&ipl_subsys);
778 if (rc) 845 if (rc)
779 return rc; 846 return rc;
780 switch (ipl_get_type()) { 847 switch (ipl_info.type) {
781 case IPL_TYPE_CCW: 848 case IPL_TYPE_CCW:
782 rc = sysfs_create_group(&ipl_subsys.kset.kobj, 849 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
783 &ipl_ccw_attr_group); 850 &ipl_ccw_attr_group);
784 break; 851 break;
785 case IPL_TYPE_FCP: 852 case IPL_TYPE_FCP:
853 case IPL_TYPE_FCP_DUMP:
786 rc = ipl_register_fcp_files(); 854 rc = ipl_register_fcp_files();
787 break; 855 break;
788 case IPL_TYPE_NSS: 856 case IPL_TYPE_NSS:
@@ -852,7 +920,7 @@ static int __init reipl_ccw_init(void)
852 /* FIXME: check for diag308_set_works when enabling diag ccw reipl */ 920 /* FIXME: check for diag308_set_works when enabling diag ccw reipl */
853 if (!MACHINE_IS_VM) 921 if (!MACHINE_IS_VM)
854 sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO; 922 sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
855 if (ipl_get_type() == IPL_TYPE_CCW) 923 if (ipl_info.type == IPL_TYPE_CCW)
856 reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; 924 reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
857 reipl_capabilities |= IPL_TYPE_CCW; 925 reipl_capabilities |= IPL_TYPE_CCW;
858 return 0; 926 return 0;
@@ -862,9 +930,9 @@ static int __init reipl_fcp_init(void)
862{ 930{
863 int rc; 931 int rc;
864 932
865 if ((!diag308_set_works) && (ipl_get_type() != IPL_TYPE_FCP)) 933 if ((!diag308_set_works) && (ipl_info.type != IPL_TYPE_FCP))
866 return 0; 934 return 0;
867 if ((!diag308_set_works) && (ipl_get_type() == IPL_TYPE_FCP)) 935 if ((!diag308_set_works) && (ipl_info.type == IPL_TYPE_FCP))
868 make_attrs_ro(reipl_fcp_attrs); 936 make_attrs_ro(reipl_fcp_attrs);
869 937
870 reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); 938 reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
@@ -875,7 +943,7 @@ static int __init reipl_fcp_init(void)
875 free_page((unsigned long)reipl_block_fcp); 943 free_page((unsigned long)reipl_block_fcp);
876 return rc; 944 return rc;
877 } 945 }
878 if (ipl_get_type() == IPL_TYPE_FCP) { 946 if (ipl_info.type == IPL_TYPE_FCP) {
879 memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); 947 memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE);
880 } else { 948 } else {
881 reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN; 949 reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
@@ -909,7 +977,7 @@ static int __init reipl_init(void)
909 rc = reipl_nss_init(); 977 rc = reipl_nss_init();
910 if (rc) 978 if (rc)
911 return rc; 979 return rc;
912 rc = reipl_set_type(ipl_get_type()); 980 rc = reipl_set_type(ipl_info.type);
913 if (rc) 981 if (rc)
914 return rc; 982 return rc;
915 return 0; 983 return 0;
@@ -931,7 +999,7 @@ static int __init dump_ccw_init(void)
931 dump_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION; 999 dump_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
932 dump_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN; 1000 dump_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
933 dump_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW; 1001 dump_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
934 dump_capabilities |= IPL_TYPE_CCW; 1002 dump_capabilities |= DUMP_TYPE_CCW;
935 return 0; 1003 return 0;
936} 1004}
937 1005
@@ -956,7 +1024,7 @@ static int __init dump_fcp_init(void)
956 dump_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN; 1024 dump_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN;
957 dump_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP; 1025 dump_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP;
958 dump_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_DUMP; 1026 dump_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_DUMP;
959 dump_capabilities |= IPL_TYPE_FCP; 1027 dump_capabilities |= DUMP_TYPE_FCP;
960 return 0; 1028 return 0;
961} 1029}
962 1030
@@ -995,7 +1063,7 @@ static int __init dump_init(void)
995 rc = dump_fcp_init(); 1063 rc = dump_fcp_init();
996 if (rc) 1064 if (rc)
997 return rc; 1065 return rc;
998 dump_set_type(IPL_TYPE_NONE); 1066 dump_set_type(DUMP_TYPE_NONE);
999 return 0; 1067 return 0;
1000} 1068}
1001 1069
@@ -1038,6 +1106,27 @@ static int __init s390_ipl_init(void)
1038 1106
1039__initcall(s390_ipl_init); 1107__initcall(s390_ipl_init);
1040 1108
1109void __init ipl_save_parameters(void)
1110{
1111 struct cio_iplinfo iplinfo;
1112 unsigned int *ipl_ptr;
1113 void *src, *dst;
1114
1115 if (cio_get_iplinfo(&iplinfo))
1116 return;
1117
1118 ipl_devno = iplinfo.devno;
1119 ipl_flags |= IPL_DEVNO_VALID;
1120 if (!iplinfo.is_qdio)
1121 return;
1122 ipl_flags |= IPL_PARMBLOCK_VALID;
1123 ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR;
1124 src = (void *)(unsigned long)*ipl_ptr;
1125 dst = (void *)IPL_PARMBLOCK_ORIGIN;
1126 memmove(dst, src, PAGE_SIZE);
1127 *ipl_ptr = IPL_PARMBLOCK_ORIGIN;
1128}
1129
1041static LIST_HEAD(rcall); 1130static LIST_HEAD(rcall);
1042static DEFINE_MUTEX(rcall_mutex); 1131static DEFINE_MUTEX(rcall_mutex);
1043 1132
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 39d1dd752529..59b4e796680a 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -31,6 +31,7 @@
31#include <linux/string.h> 31#include <linux/string.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/moduleloader.h> 33#include <linux/moduleloader.h>
34#include <linux/bug.h>
34 35
35#if 0 36#if 0
36#define DEBUGP printk 37#define DEBUGP printk
@@ -398,9 +399,10 @@ int module_finalize(const Elf_Ehdr *hdr,
398 struct module *me) 399 struct module *me)
399{ 400{
400 vfree(me->arch.syminfo); 401 vfree(me->arch.syminfo);
401 return 0; 402 return module_bug_finalize(hdr, sechdrs, me);
402} 403}
403 404
404void module_arch_cleanup(struct module *mod) 405void module_arch_cleanup(struct module *mod)
405{ 406{
407 module_bug_cleanup(mod);
406} 408}
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 5acfac654f9d..11d9b0197626 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -280,24 +280,26 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
280 return 0; 280 return 0;
281} 281}
282 282
283asmlinkage long sys_fork(struct pt_regs regs) 283asmlinkage long sys_fork(void)
284{ 284{
285 return do_fork(SIGCHLD, regs.gprs[15], &regs, 0, NULL, NULL); 285 struct pt_regs *regs = task_pt_regs(current);
286 return do_fork(SIGCHLD, regs->gprs[15], regs, 0, NULL, NULL);
286} 287}
287 288
288asmlinkage long sys_clone(struct pt_regs regs) 289asmlinkage long sys_clone(void)
289{ 290{
290 unsigned long clone_flags; 291 struct pt_regs *regs = task_pt_regs(current);
291 unsigned long newsp; 292 unsigned long clone_flags;
293 unsigned long newsp;
292 int __user *parent_tidptr, *child_tidptr; 294 int __user *parent_tidptr, *child_tidptr;
293 295
294 clone_flags = regs.gprs[3]; 296 clone_flags = regs->gprs[3];
295 newsp = regs.orig_gpr2; 297 newsp = regs->orig_gpr2;
296 parent_tidptr = (int __user *) regs.gprs[4]; 298 parent_tidptr = (int __user *) regs->gprs[4];
297 child_tidptr = (int __user *) regs.gprs[5]; 299 child_tidptr = (int __user *) regs->gprs[5];
298 if (!newsp) 300 if (!newsp)
299 newsp = regs.gprs[15]; 301 newsp = regs->gprs[15];
300 return do_fork(clone_flags, newsp, &regs, 0, 302 return do_fork(clone_flags, newsp, regs, 0,
301 parent_tidptr, child_tidptr); 303 parent_tidptr, child_tidptr);
302} 304}
303 305
@@ -311,40 +313,52 @@ asmlinkage long sys_clone(struct pt_regs regs)
311 * do not have enough call-clobbered registers to hold all 313 * do not have enough call-clobbered registers to hold all
312 * the information you need. 314 * the information you need.
313 */ 315 */
314asmlinkage long sys_vfork(struct pt_regs regs) 316asmlinkage long sys_vfork(void)
315{ 317{
318 struct pt_regs *regs = task_pt_regs(current);
316 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 319 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
317 regs.gprs[15], &regs, 0, NULL, NULL); 320 regs->gprs[15], regs, 0, NULL, NULL);
321}
322
323asmlinkage void execve_tail(void)
324{
325 task_lock(current);
326 current->ptrace &= ~PT_DTRACE;
327 task_unlock(current);
328 current->thread.fp_regs.fpc = 0;
329 if (MACHINE_HAS_IEEE)
330 asm volatile("sfpc %0,%0" : : "d" (0));
318} 331}
319 332
320/* 333/*
321 * sys_execve() executes a new program. 334 * sys_execve() executes a new program.
322 */ 335 */
323asmlinkage long sys_execve(struct pt_regs regs) 336asmlinkage long sys_execve(void)
324{ 337{
325 int error; 338 struct pt_regs *regs = task_pt_regs(current);
326 char * filename; 339 char *filename;
327 340 unsigned long result;
328 filename = getname((char __user *) regs.orig_gpr2); 341 int rc;
329 error = PTR_ERR(filename); 342
330 if (IS_ERR(filename)) 343 filename = getname((char __user *) regs->orig_gpr2);
331 goto out; 344 if (IS_ERR(filename)) {
332 error = do_execve(filename, (char __user * __user *) regs.gprs[3], 345 result = PTR_ERR(filename);
333 (char __user * __user *) regs.gprs[4], &regs); 346 goto out;
334 if (error == 0) {
335 task_lock(current);
336 current->ptrace &= ~PT_DTRACE;
337 task_unlock(current);
338 current->thread.fp_regs.fpc = 0;
339 if (MACHINE_HAS_IEEE)
340 asm volatile("sfpc %0,%0" : : "d" (0));
341 } 347 }
342 putname(filename); 348 rc = do_execve(filename, (char __user * __user *) regs->gprs[3],
349 (char __user * __user *) regs->gprs[4], regs);
350 if (rc) {
351 result = rc;
352 goto out_putname;
353 }
354 execve_tail();
355 result = regs->gprs[2];
356out_putname:
357 putname(filename);
343out: 358out:
344 return error; 359 return result;
345} 360}
346 361
347
348/* 362/*
349 * fill in the FPU structure for a core dump. 363 * fill in the FPU structure for a core dump.
350 */ 364 */
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 863c8d08c026..3dfd0985861c 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -285,6 +285,26 @@ static void __init conmode_default(void)
285 } 285 }
286} 286}
287 287
288#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
289static void __init setup_zfcpdump(unsigned int console_devno)
290{
291 static char str[64];
292
293 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
294 return;
295 if (console_devno != -1)
296 sprintf(str, "cio_ignore=all,!0.0.%04x,!0.0.%04x",
297 ipl_info.data.fcp.dev_id.devno, console_devno);
298 else
299 sprintf(str, "cio_ignore=all,!0.0.%04x",
300 ipl_info.data.fcp.dev_id.devno);
301 strcat(COMMAND_LINE, str);
302 console_loglevel = 2;
303}
304#else
305static inline void setup_zfcpdump(unsigned int console_devno) {}
306#endif /* CONFIG_ZFCPDUMP */
307
288#ifdef CONFIG_SMP 308#ifdef CONFIG_SMP
289void (*_machine_restart)(char *command) = machine_restart_smp; 309void (*_machine_restart)(char *command) = machine_restart_smp;
290void (*_machine_halt)(void) = machine_halt_smp; 310void (*_machine_halt)(void) = machine_halt_smp;
@@ -586,13 +606,20 @@ setup_resources(void)
586 } 606 }
587} 607}
588 608
609unsigned long real_memory_size;
610EXPORT_SYMBOL_GPL(real_memory_size);
611
589static void __init setup_memory_end(void) 612static void __init setup_memory_end(void)
590{ 613{
591 unsigned long real_size, memory_size; 614 unsigned long memory_size;
592 unsigned long max_mem, max_phys; 615 unsigned long max_mem, max_phys;
593 int i; 616 int i;
594 617
595 memory_size = real_size = 0; 618#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
619 if (ipl_info.type == IPL_TYPE_FCP_DUMP)
620 memory_end = ZFCPDUMP_HSA_SIZE;
621#endif
622 memory_size = 0;
596 max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE; 623 max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE;
597 memory_end &= PAGE_MASK; 624 memory_end &= PAGE_MASK;
598 625
@@ -601,7 +628,8 @@ static void __init setup_memory_end(void)
601 for (i = 0; i < MEMORY_CHUNKS; i++) { 628 for (i = 0; i < MEMORY_CHUNKS; i++) {
602 struct mem_chunk *chunk = &memory_chunk[i]; 629 struct mem_chunk *chunk = &memory_chunk[i];
603 630
604 real_size = max(real_size, chunk->addr + chunk->size); 631 real_memory_size = max(real_memory_size,
632 chunk->addr + chunk->size);
605 if (chunk->addr >= max_mem) { 633 if (chunk->addr >= max_mem) {
606 memset(chunk, 0, sizeof(*chunk)); 634 memset(chunk, 0, sizeof(*chunk));
607 continue; 635 continue;
@@ -765,6 +793,7 @@ setup_arch(char **cmdline_p)
765 793
766 parse_early_param(); 794 parse_early_param();
767 795
796 setup_ipl_info();
768 setup_memory_end(); 797 setup_memory_end();
769 setup_addressing_mode(); 798 setup_addressing_mode();
770 setup_memory(); 799 setup_memory();
@@ -782,6 +811,9 @@ setup_arch(char **cmdline_p)
782 811
783 /* Setup default console */ 812 /* Setup default console */
784 conmode_default(); 813 conmode_default();
814
815 /* Setup zfcpdump support */
816 setup_zfcpdump(console_devno);
785} 817}
786 818
787void print_cpu_info(struct cpuinfo_S390 *cpuinfo) 819void print_cpu_info(struct cpuinfo_S390 *cpuinfo)
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 554f9cf7499c..3c41907799a1 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -102,9 +102,9 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
102} 102}
103 103
104asmlinkage long 104asmlinkage long
105sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 105sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
106 struct pt_regs *regs)
107{ 106{
107 struct pt_regs *regs = task_pt_regs(current);
108 return do_sigaltstack(uss, uoss, regs->gprs[15]); 108 return do_sigaltstack(uss, uoss, regs->gprs[15]);
109} 109}
110 110
@@ -163,8 +163,9 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
163 return 0; 163 return 0;
164} 164}
165 165
166asmlinkage long sys_sigreturn(struct pt_regs *regs) 166asmlinkage long sys_sigreturn(void)
167{ 167{
168 struct pt_regs *regs = task_pt_regs(current);
168 sigframe __user *frame = (sigframe __user *)regs->gprs[15]; 169 sigframe __user *frame = (sigframe __user *)regs->gprs[15];
169 sigset_t set; 170 sigset_t set;
170 171
@@ -189,8 +190,9 @@ badframe:
189 return 0; 190 return 0;
190} 191}
191 192
192asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) 193asmlinkage long sys_rt_sigreturn(void)
193{ 194{
195 struct pt_regs *regs = task_pt_regs(current);
194 rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15]; 196 rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15];
195 sigset_t set; 197 sigset_t set;
196 198
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 97764f710bb7..3754e2031b39 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -1,12 +1,12 @@
1/* 1/*
2 * arch/s390/kernel/smp.c 2 * arch/s390/kernel/smp.c
3 * 3 *
4 * Copyright (C) IBM Corp. 1999,2006 4 * Copyright IBM Corp. 1999,2007
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Heiko Carstens (heiko.carstens@de.ibm.com) 7 * Heiko Carstens (heiko.carstens@de.ibm.com)
8 * 8 *
9 * based on other smp stuff by 9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> 10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar 11 * (c) 1998 Ingo Molnar
12 * 12 *
@@ -31,6 +31,7 @@
31#include <linux/interrupt.h> 31#include <linux/interrupt.h>
32#include <linux/cpu.h> 32#include <linux/cpu.h>
33#include <linux/timex.h> 33#include <linux/timex.h>
34#include <linux/bootmem.h>
34#include <asm/ipl.h> 35#include <asm/ipl.h>
35#include <asm/setup.h> 36#include <asm/setup.h>
36#include <asm/sigp.h> 37#include <asm/sigp.h>
@@ -40,17 +41,19 @@
40#include <asm/cpcmd.h> 41#include <asm/cpcmd.h>
41#include <asm/tlbflush.h> 42#include <asm/tlbflush.h>
42#include <asm/timer.h> 43#include <asm/timer.h>
43 44#include <asm/lowcore.h>
44extern volatile int __cpu_logical_map[];
45 45
46/* 46/*
47 * An array with a pointer the lowcore of every CPU. 47 * An array with a pointer the lowcore of every CPU.
48 */ 48 */
49
50struct _lowcore *lowcore_ptr[NR_CPUS]; 49struct _lowcore *lowcore_ptr[NR_CPUS];
50EXPORT_SYMBOL(lowcore_ptr);
51 51
52cpumask_t cpu_online_map = CPU_MASK_NONE; 52cpumask_t cpu_online_map = CPU_MASK_NONE;
53EXPORT_SYMBOL(cpu_online_map);
54
53cpumask_t cpu_possible_map = CPU_MASK_NONE; 55cpumask_t cpu_possible_map = CPU_MASK_NONE;
56EXPORT_SYMBOL(cpu_possible_map);
54 57
55static struct task_struct *current_set[NR_CPUS]; 58static struct task_struct *current_set[NR_CPUS];
56 59
@@ -70,7 +73,7 @@ struct call_data_struct {
70 int wait; 73 int wait;
71}; 74};
72 75
73static struct call_data_struct * call_data; 76static struct call_data_struct *call_data;
74 77
75/* 78/*
76 * 'Call function' interrupt callback 79 * 'Call function' interrupt callback
@@ -150,8 +153,8 @@ out:
150 * 153 *
151 * Run a function on all other CPUs. 154 * Run a function on all other CPUs.
152 * 155 *
153 * You must not call this function with disabled interrupts or from a 156 * You must not call this function with disabled interrupts, from a
154 * hardware interrupt handler. You may call it from a bottom half. 157 * hardware interrupt handler or from a bottom half.
155 */ 158 */
156int smp_call_function(void (*func) (void *info), void *info, int nonatomic, 159int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
157 int wait) 160 int wait)
@@ -177,11 +180,11 @@ EXPORT_SYMBOL(smp_call_function);
177 * 180 *
178 * Run a function on one processor. 181 * Run a function on one processor.
179 * 182 *
180 * You must not call this function with disabled interrupts or from a 183 * You must not call this function with disabled interrupts, from a
181 * hardware interrupt handler. You may call it from a bottom half. 184 * hardware interrupt handler or from a bottom half.
182 */ 185 */
183int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, 186int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic,
184 int wait, int cpu) 187 int wait, int cpu)
185{ 188{
186 cpumask_t map = CPU_MASK_NONE; 189 cpumask_t map = CPU_MASK_NONE;
187 190
@@ -195,9 +198,9 @@ EXPORT_SYMBOL(smp_call_function_on);
195 198
196static void do_send_stop(void) 199static void do_send_stop(void)
197{ 200{
198 int cpu, rc; 201 int cpu, rc;
199 202
200 /* stop all processors */ 203 /* stop all processors */
201 for_each_online_cpu(cpu) { 204 for_each_online_cpu(cpu) {
202 if (cpu == smp_processor_id()) 205 if (cpu == smp_processor_id())
203 continue; 206 continue;
@@ -209,9 +212,9 @@ static void do_send_stop(void)
209 212
210static void do_store_status(void) 213static void do_store_status(void)
211{ 214{
212 int cpu, rc; 215 int cpu, rc;
213 216
214 /* store status of all processors in their lowcores (real 0) */ 217 /* store status of all processors in their lowcores (real 0) */
215 for_each_online_cpu(cpu) { 218 for_each_online_cpu(cpu) {
216 if (cpu == smp_processor_id()) 219 if (cpu == smp_processor_id())
217 continue; 220 continue;
@@ -219,8 +222,8 @@ static void do_store_status(void)
219 rc = signal_processor_p( 222 rc = signal_processor_p(
220 (__u32)(unsigned long) lowcore_ptr[cpu], cpu, 223 (__u32)(unsigned long) lowcore_ptr[cpu], cpu,
221 sigp_store_status_at_address); 224 sigp_store_status_at_address);
222 } while(rc == sigp_busy); 225 } while (rc == sigp_busy);
223 } 226 }
224} 227}
225 228
226static void do_wait_for_stop(void) 229static void do_wait_for_stop(void)
@@ -231,7 +234,7 @@ static void do_wait_for_stop(void)
231 for_each_online_cpu(cpu) { 234 for_each_online_cpu(cpu) {
232 if (cpu == smp_processor_id()) 235 if (cpu == smp_processor_id())
233 continue; 236 continue;
234 while(!smp_cpu_not_running(cpu)) 237 while (!smp_cpu_not_running(cpu))
235 cpu_relax(); 238 cpu_relax();
236 } 239 }
237} 240}
@@ -245,7 +248,7 @@ void smp_send_stop(void)
245 /* Disable all interrupts/machine checks */ 248 /* Disable all interrupts/machine checks */
246 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); 249 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
247 250
248 /* write magic number to zero page (absolute 0) */ 251 /* write magic number to zero page (absolute 0) */
249 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; 252 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
250 253
251 /* stop other processors. */ 254 /* stop other processors. */
@@ -261,8 +264,7 @@ void smp_send_stop(void)
261/* 264/*
262 * Reboot, halt and power_off routines for SMP. 265 * Reboot, halt and power_off routines for SMP.
263 */ 266 */
264 267void machine_restart_smp(char *__unused)
265void machine_restart_smp(char * __unused)
266{ 268{
267 smp_send_stop(); 269 smp_send_stop();
268 do_reipl(); 270 do_reipl();
@@ -293,17 +295,17 @@ void machine_power_off_smp(void)
293 295
294static void do_ext_call_interrupt(__u16 code) 296static void do_ext_call_interrupt(__u16 code)
295{ 297{
296 unsigned long bits; 298 unsigned long bits;
297 299
298 /* 300 /*
299 * handle bit signal external calls 301 * handle bit signal external calls
300 * 302 *
301 * For the ec_schedule signal we have to do nothing. All the work 303 * For the ec_schedule signal we have to do nothing. All the work
302 * is done automatically when we return from the interrupt. 304 * is done automatically when we return from the interrupt.
303 */ 305 */
304 bits = xchg(&S390_lowcore.ext_call_fast, 0); 306 bits = xchg(&S390_lowcore.ext_call_fast, 0);
305 307
306 if (test_bit(ec_call_function, &bits)) 308 if (test_bit(ec_call_function, &bits))
307 do_call_function(); 309 do_call_function();
308} 310}
309 311
@@ -313,11 +315,11 @@ static void do_ext_call_interrupt(__u16 code)
313 */ 315 */
314static void smp_ext_bitcall(int cpu, ec_bit_sig sig) 316static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
315{ 317{
316 /* 318 /*
317 * Set signaling bit in lowcore of target cpu and kick it 319 * Set signaling bit in lowcore of target cpu and kick it
318 */ 320 */
319 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); 321 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
320 while(signal_processor(cpu, sigp_emergency_signal) == sigp_busy) 322 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
321 udelay(10); 323 udelay(10);
322} 324}
323 325
@@ -332,7 +334,7 @@ void smp_ptlb_callback(void *info)
332 334
333void smp_ptlb_all(void) 335void smp_ptlb_all(void)
334{ 336{
335 on_each_cpu(smp_ptlb_callback, NULL, 0, 1); 337 on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
336} 338}
337EXPORT_SYMBOL(smp_ptlb_all); 339EXPORT_SYMBOL(smp_ptlb_all);
338#endif /* ! CONFIG_64BIT */ 340#endif /* ! CONFIG_64BIT */
@@ -344,7 +346,7 @@ EXPORT_SYMBOL(smp_ptlb_all);
344 */ 346 */
345void smp_send_reschedule(int cpu) 347void smp_send_reschedule(int cpu)
346{ 348{
347 smp_ext_bitcall(cpu, ec_schedule); 349 smp_ext_bitcall(cpu, ec_schedule);
348} 350}
349 351
350/* 352/*
@@ -358,11 +360,12 @@ struct ec_creg_mask_parms {
358/* 360/*
359 * callback for setting/clearing control bits 361 * callback for setting/clearing control bits
360 */ 362 */
361static void smp_ctl_bit_callback(void *info) { 363static void smp_ctl_bit_callback(void *info)
364{
362 struct ec_creg_mask_parms *pp = info; 365 struct ec_creg_mask_parms *pp = info;
363 unsigned long cregs[16]; 366 unsigned long cregs[16];
364 int i; 367 int i;
365 368
366 __ctl_store(cregs, 0, 15); 369 __ctl_store(cregs, 0, 15);
367 for (i = 0; i <= 15; i++) 370 for (i = 0; i <= 15; i++)
368 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; 371 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
@@ -381,6 +384,7 @@ void smp_ctl_set_bit(int cr, int bit)
381 parms.orvals[cr] = 1 << bit; 384 parms.orvals[cr] = 1 << bit;
382 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 385 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
383} 386}
387EXPORT_SYMBOL(smp_ctl_set_bit);
384 388
385/* 389/*
386 * Clear a bit in a control register of all cpus 390 * Clear a bit in a control register of all cpus
@@ -394,13 +398,72 @@ void smp_ctl_clear_bit(int cr, int bit)
394 parms.andvals[cr] = ~(1L << bit); 398 parms.andvals[cr] = ~(1L << bit);
395 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 399 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
396} 400}
401EXPORT_SYMBOL(smp_ctl_clear_bit);
402
403#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
404
405/*
406 * zfcpdump_prefix_array holds prefix registers for the following scenario:
407 * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
408 * save its prefix registers, since they get lost, when switching from 31 bit
409 * to 64 bit.
410 */
411unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \
412 __attribute__((__section__(".data")));
413
414static void __init smp_get_save_areas(void)
415{
416 unsigned int cpu, cpu_num, rc;
417 __u16 boot_cpu_addr;
418
419 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
420 return;
421 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
422 cpu_num = 1;
423 for (cpu = 0; cpu <= 65535; cpu++) {
424 if ((u16) cpu == boot_cpu_addr)
425 continue;
426 __cpu_logical_map[1] = (__u16) cpu;
427 if (signal_processor(1, sigp_sense) == sigp_not_operational)
428 continue;
429 if (cpu_num >= NR_CPUS) {
430 printk("WARNING: Registers for cpu %i are not "
431 "saved, since dump kernel was compiled with"
432 "NR_CPUS=%i!\n", cpu_num, NR_CPUS);
433 continue;
434 }
435 zfcpdump_save_areas[cpu_num] =
436 alloc_bootmem(sizeof(union save_area));
437 while (1) {
438 rc = signal_processor(1, sigp_stop_and_store_status);
439 if (rc != sigp_busy)
440 break;
441 cpu_relax();
442 }
443 memcpy(zfcpdump_save_areas[cpu_num],
444 (void *)(unsigned long) store_prefix() +
445 SAVE_AREA_BASE, SAVE_AREA_SIZE);
446#ifdef __s390x__
447 /* copy original prefix register */
448 zfcpdump_save_areas[cpu_num]->s390x.pref_reg =
449 zfcpdump_prefix_array[cpu_num];
450#endif
451 cpu_num++;
452 }
453}
454
455union save_area *zfcpdump_save_areas[NR_CPUS + 1];
456EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
457
458#else
459#define smp_get_save_areas() do { } while (0)
460#endif
397 461
398/* 462/*
399 * Lets check how many CPUs we have. 463 * Lets check how many CPUs we have.
400 */ 464 */
401 465
402static unsigned int 466static unsigned int __init smp_count_cpus(void)
403__init smp_count_cpus(void)
404{ 467{
405 unsigned int cpu, num_cpus; 468 unsigned int cpu, num_cpus;
406 __u16 boot_cpu_addr; 469 __u16 boot_cpu_addr;
@@ -416,31 +479,30 @@ __init smp_count_cpus(void)
416 if ((__u16) cpu == boot_cpu_addr) 479 if ((__u16) cpu == boot_cpu_addr)
417 continue; 480 continue;
418 __cpu_logical_map[1] = (__u16) cpu; 481 __cpu_logical_map[1] = (__u16) cpu;
419 if (signal_processor(1, sigp_sense) == 482 if (signal_processor(1, sigp_sense) == sigp_not_operational)
420 sigp_not_operational)
421 continue; 483 continue;
422 num_cpus++; 484 num_cpus++;
423 } 485 }
424 486
425 printk("Detected %d CPU's\n",(int) num_cpus); 487 printk("Detected %d CPU's\n", (int) num_cpus);
426 printk("Boot cpu address %2X\n", boot_cpu_addr); 488 printk("Boot cpu address %2X\n", boot_cpu_addr);
427 489
428 return num_cpus; 490 return num_cpus;
429} 491}
430 492
431/* 493/*
432 * Activate a secondary processor. 494 * Activate a secondary processor.
433 */ 495 */
434int __devinit start_secondary(void *cpuvoid) 496int __devinit start_secondary(void *cpuvoid)
435{ 497{
436 /* Setup the cpu */ 498 /* Setup the cpu */
437 cpu_init(); 499 cpu_init();
438 preempt_disable(); 500 preempt_disable();
439 /* Enable TOD clock interrupts on the secondary cpu. */ 501 /* Enable TOD clock interrupts on the secondary cpu. */
440 init_cpu_timer(); 502 init_cpu_timer();
441#ifdef CONFIG_VIRT_TIMER 503#ifdef CONFIG_VIRT_TIMER
442 /* Enable cpu timer interrupts on the secondary cpu. */ 504 /* Enable cpu timer interrupts on the secondary cpu. */
443 init_cpu_vtimer(); 505 init_cpu_vtimer();
444#endif 506#endif
445 /* Enable pfault pseudo page faults on this cpu. */ 507 /* Enable pfault pseudo page faults on this cpu. */
446 pfault_init(); 508 pfault_init();
@@ -449,11 +511,11 @@ int __devinit start_secondary(void *cpuvoid)
449 cpu_set(smp_processor_id(), cpu_online_map); 511 cpu_set(smp_processor_id(), cpu_online_map);
450 /* Switch on interrupts */ 512 /* Switch on interrupts */
451 local_irq_enable(); 513 local_irq_enable();
452 /* Print info about this processor */ 514 /* Print info about this processor */
453 print_cpu_info(&S390_lowcore.cpu_data); 515 print_cpu_info(&S390_lowcore.cpu_data);
454 /* cpu_idle will call schedule for us */ 516 /* cpu_idle will call schedule for us */
455 cpu_idle(); 517 cpu_idle();
456 return 0; 518 return 0;
457} 519}
458 520
459static void __init smp_create_idle(unsigned int cpu) 521static void __init smp_create_idle(unsigned int cpu)
@@ -470,56 +532,13 @@ static void __init smp_create_idle(unsigned int cpu)
470 current_set[cpu] = p; 532 current_set[cpu] = p;
471} 533}
472 534
473/* Reserving and releasing of CPUs */ 535static int cpu_stopped(int cpu)
474
475static DEFINE_SPINLOCK(smp_reserve_lock);
476static int smp_cpu_reserved[NR_CPUS];
477
478int
479smp_get_cpu(cpumask_t cpu_mask)
480{
481 unsigned long flags;
482 int cpu;
483
484 spin_lock_irqsave(&smp_reserve_lock, flags);
485 /* Try to find an already reserved cpu. */
486 for_each_cpu_mask(cpu, cpu_mask) {
487 if (smp_cpu_reserved[cpu] != 0) {
488 smp_cpu_reserved[cpu]++;
489 /* Found one. */
490 goto out;
491 }
492 }
493 /* Reserve a new cpu from cpu_mask. */
494 for_each_cpu_mask(cpu, cpu_mask) {
495 if (cpu_online(cpu)) {
496 smp_cpu_reserved[cpu]++;
497 goto out;
498 }
499 }
500 cpu = -ENODEV;
501out:
502 spin_unlock_irqrestore(&smp_reserve_lock, flags);
503 return cpu;
504}
505
506void
507smp_put_cpu(int cpu)
508{
509 unsigned long flags;
510
511 spin_lock_irqsave(&smp_reserve_lock, flags);
512 smp_cpu_reserved[cpu]--;
513 spin_unlock_irqrestore(&smp_reserve_lock, flags);
514}
515
516static int
517cpu_stopped(int cpu)
518{ 536{
519 __u32 status; 537 __u32 status;
520 538
521 /* Check for stopped state */ 539 /* Check for stopped state */
522 if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) { 540 if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
541 sigp_status_stored) {
523 if (status & 0x40) 542 if (status & 0x40)
524 return 1; 543 return 1;
525 } 544 }
@@ -528,14 +547,13 @@ cpu_stopped(int cpu)
528 547
529/* Upping and downing of CPUs */ 548/* Upping and downing of CPUs */
530 549
531int 550int __cpu_up(unsigned int cpu)
532__cpu_up(unsigned int cpu)
533{ 551{
534 struct task_struct *idle; 552 struct task_struct *idle;
535 struct _lowcore *cpu_lowcore; 553 struct _lowcore *cpu_lowcore;
536 struct stack_frame *sf; 554 struct stack_frame *sf;
537 sigp_ccode ccode; 555 sigp_ccode ccode;
538 int curr_cpu; 556 int curr_cpu;
539 557
540 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { 558 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
541 __cpu_logical_map[cpu] = (__u16) curr_cpu; 559 __cpu_logical_map[cpu] = (__u16) curr_cpu;
@@ -548,7 +566,7 @@ __cpu_up(unsigned int cpu)
548 566
549 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), 567 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
550 cpu, sigp_set_prefix); 568 cpu, sigp_set_prefix);
551 if (ccode){ 569 if (ccode) {
552 printk("sigp_set_prefix failed for cpu %d " 570 printk("sigp_set_prefix failed for cpu %d "
553 "with condition code %d\n", 571 "with condition code %d\n",
554 (int) cpu, (int) ccode); 572 (int) cpu, (int) ccode);
@@ -556,9 +574,9 @@ __cpu_up(unsigned int cpu)
556 } 574 }
557 575
558 idle = current_set[cpu]; 576 idle = current_set[cpu];
559 cpu_lowcore = lowcore_ptr[cpu]; 577 cpu_lowcore = lowcore_ptr[cpu];
560 cpu_lowcore->kernel_stack = (unsigned long) 578 cpu_lowcore->kernel_stack = (unsigned long)
561 task_stack_page(idle) + (THREAD_SIZE); 579 task_stack_page(idle) + THREAD_SIZE;
562 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack 580 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
563 - sizeof(struct pt_regs) 581 - sizeof(struct pt_regs)
564 - sizeof(struct stack_frame)); 582 - sizeof(struct stack_frame));
@@ -570,11 +588,11 @@ __cpu_up(unsigned int cpu)
570 " stam 0,15,0(%0)" 588 " stam 0,15,0(%0)"
571 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); 589 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
572 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; 590 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
573 cpu_lowcore->current_task = (unsigned long) idle; 591 cpu_lowcore->current_task = (unsigned long) idle;
574 cpu_lowcore->cpu_data.cpu_nr = cpu; 592 cpu_lowcore->cpu_data.cpu_nr = cpu;
575 eieio(); 593 eieio();
576 594
577 while (signal_processor(cpu,sigp_restart) == sigp_busy) 595 while (signal_processor(cpu, sigp_restart) == sigp_busy)
578 udelay(10); 596 udelay(10);
579 597
580 while (!cpu_online(cpu)) 598 while (!cpu_online(cpu))
@@ -589,6 +607,7 @@ void __init smp_setup_cpu_possible_map(void)
589{ 607{
590 unsigned int phy_cpus, pos_cpus, cpu; 608 unsigned int phy_cpus, pos_cpus, cpu;
591 609
610 smp_get_save_areas();
592 phy_cpus = smp_count_cpus(); 611 phy_cpus = smp_count_cpus();
593 pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS); 612 pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS);
594 613
@@ -620,18 +639,11 @@ static int __init setup_possible_cpus(char *s)
620} 639}
621early_param("possible_cpus", setup_possible_cpus); 640early_param("possible_cpus", setup_possible_cpus);
622 641
623int 642int __cpu_disable(void)
624__cpu_disable(void)
625{ 643{
626 unsigned long flags;
627 struct ec_creg_mask_parms cr_parms; 644 struct ec_creg_mask_parms cr_parms;
628 int cpu = smp_processor_id(); 645 int cpu = smp_processor_id();
629 646
630 spin_lock_irqsave(&smp_reserve_lock, flags);
631 if (smp_cpu_reserved[cpu] != 0) {
632 spin_unlock_irqrestore(&smp_reserve_lock, flags);
633 return -EBUSY;
634 }
635 cpu_clear(cpu, cpu_online_map); 647 cpu_clear(cpu, cpu_online_map);
636 648
637 /* Disable pfault pseudo page faults on this cpu. */ 649 /* Disable pfault pseudo page faults on this cpu. */
@@ -642,24 +654,23 @@ __cpu_disable(void)
642 654
643 /* disable all external interrupts */ 655 /* disable all external interrupts */
644 cr_parms.orvals[0] = 0; 656 cr_parms.orvals[0] = 0;
645 cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 | 657 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |
646 1<<11 | 1<<10 | 1<< 6 | 1<< 4); 658 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4);
647 /* disable all I/O interrupts */ 659 /* disable all I/O interrupts */
648 cr_parms.orvals[6] = 0; 660 cr_parms.orvals[6] = 0;
649 cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 | 661 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
650 1<<27 | 1<<26 | 1<<25 | 1<<24); 662 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
651 /* disable most machine checks */ 663 /* disable most machine checks */
652 cr_parms.orvals[14] = 0; 664 cr_parms.orvals[14] = 0;
653 cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); 665 cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
666 1 << 25 | 1 << 24);
654 667
655 smp_ctl_bit_callback(&cr_parms); 668 smp_ctl_bit_callback(&cr_parms);
656 669
657 spin_unlock_irqrestore(&smp_reserve_lock, flags);
658 return 0; 670 return 0;
659} 671}
660 672
661void 673void __cpu_die(unsigned int cpu)
662__cpu_die(unsigned int cpu)
663{ 674{
664 /* Wait until target cpu is down */ 675 /* Wait until target cpu is down */
665 while (!smp_cpu_not_running(cpu)) 676 while (!smp_cpu_not_running(cpu))
@@ -667,13 +678,12 @@ __cpu_die(unsigned int cpu)
667 printk("Processor %d spun down\n", cpu); 678 printk("Processor %d spun down\n", cpu);
668} 679}
669 680
670void 681void cpu_die(void)
671cpu_die(void)
672{ 682{
673 idle_task_exit(); 683 idle_task_exit();
674 signal_processor(smp_processor_id(), sigp_stop); 684 signal_processor(smp_processor_id(), sigp_stop);
675 BUG(); 685 BUG();
676 for(;;); 686 for (;;);
677} 687}
678 688
679#endif /* CONFIG_HOTPLUG_CPU */ 689#endif /* CONFIG_HOTPLUG_CPU */
@@ -686,36 +696,36 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
686{ 696{
687 unsigned long stack; 697 unsigned long stack;
688 unsigned int cpu; 698 unsigned int cpu;
689 int i; 699 int i;
690 700
691 /* request the 0x1201 emergency signal external interrupt */ 701 /* request the 0x1201 emergency signal external interrupt */
692 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 702 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
693 panic("Couldn't request external interrupt 0x1201"); 703 panic("Couldn't request external interrupt 0x1201");
694 memset(lowcore_ptr,0,sizeof(lowcore_ptr)); 704 memset(lowcore_ptr, 0, sizeof(lowcore_ptr));
695 /* 705 /*
696 * Initialize prefix pages and stacks for all possible cpus 706 * Initialize prefix pages and stacks for all possible cpus
697 */ 707 */
698 print_cpu_info(&S390_lowcore.cpu_data); 708 print_cpu_info(&S390_lowcore.cpu_data);
699 709
700 for_each_possible_cpu(i) { 710 for_each_possible_cpu(i) {
701 lowcore_ptr[i] = (struct _lowcore *) 711 lowcore_ptr[i] = (struct _lowcore *)
702 __get_free_pages(GFP_KERNEL|GFP_DMA, 712 __get_free_pages(GFP_KERNEL | GFP_DMA,
703 sizeof(void*) == 8 ? 1 : 0); 713 sizeof(void*) == 8 ? 1 : 0);
704 stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER); 714 stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
705 if (lowcore_ptr[i] == NULL || stack == 0ULL) 715 if (!lowcore_ptr[i] || !stack)
706 panic("smp_boot_cpus failed to allocate memory\n"); 716 panic("smp_boot_cpus failed to allocate memory\n");
707 717
708 *(lowcore_ptr[i]) = S390_lowcore; 718 *(lowcore_ptr[i]) = S390_lowcore;
709 lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE); 719 lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE;
710 stack = __get_free_pages(GFP_KERNEL,0); 720 stack = __get_free_pages(GFP_KERNEL, 0);
711 if (stack == 0ULL) 721 if (!stack)
712 panic("smp_boot_cpus failed to allocate memory\n"); 722 panic("smp_boot_cpus failed to allocate memory\n");
713 lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); 723 lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE;
714#ifndef CONFIG_64BIT 724#ifndef CONFIG_64BIT
715 if (MACHINE_HAS_IEEE) { 725 if (MACHINE_HAS_IEEE) {
716 lowcore_ptr[i]->extended_save_area_addr = 726 lowcore_ptr[i]->extended_save_area_addr =
717 (__u32) __get_free_pages(GFP_KERNEL,0); 727 (__u32) __get_free_pages(GFP_KERNEL, 0);
718 if (lowcore_ptr[i]->extended_save_area_addr == 0) 728 if (!lowcore_ptr[i]->extended_save_area_addr)
719 panic("smp_boot_cpus failed to " 729 panic("smp_boot_cpus failed to "
720 "allocate memory\n"); 730 "allocate memory\n");
721 } 731 }
@@ -754,34 +764,63 @@ void smp_cpus_done(unsigned int max_cpus)
754 */ 764 */
755int setup_profiling_timer(unsigned int multiplier) 765int setup_profiling_timer(unsigned int multiplier)
756{ 766{
757 return 0; 767 return 0;
758} 768}
759 769
760static DEFINE_PER_CPU(struct cpu, cpu_devices); 770static DEFINE_PER_CPU(struct cpu, cpu_devices);
761 771
772static ssize_t show_capability(struct sys_device *dev, char *buf)
773{
774 unsigned int capability;
775 int rc;
776
777 rc = get_cpu_capability(&capability);
778 if (rc)
779 return rc;
780 return sprintf(buf, "%u\n", capability);
781}
782static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
783
784static int __cpuinit smp_cpu_notify(struct notifier_block *self,
785 unsigned long action, void *hcpu)
786{
787 unsigned int cpu = (unsigned int)(long)hcpu;
788 struct cpu *c = &per_cpu(cpu_devices, cpu);
789 struct sys_device *s = &c->sysdev;
790
791 switch (action) {
792 case CPU_ONLINE:
793 if (sysdev_create_file(s, &attr_capability))
794 return NOTIFY_BAD;
795 break;
796 case CPU_DEAD:
797 sysdev_remove_file(s, &attr_capability);
798 break;
799 }
800 return NOTIFY_OK;
801}
802
803static struct notifier_block __cpuinitdata smp_cpu_nb = {
804 .notifier_call = smp_cpu_notify,
805};
806
762static int __init topology_init(void) 807static int __init topology_init(void)
763{ 808{
764 int cpu; 809 int cpu;
765 int ret; 810
811 register_cpu_notifier(&smp_cpu_nb);
766 812
767 for_each_possible_cpu(cpu) { 813 for_each_possible_cpu(cpu) {
768 struct cpu *c = &per_cpu(cpu_devices, cpu); 814 struct cpu *c = &per_cpu(cpu_devices, cpu);
815 struct sys_device *s = &c->sysdev;
769 816
770 c->hotpluggable = 1; 817 c->hotpluggable = 1;
771 ret = register_cpu(c, cpu); 818 register_cpu(c, cpu);
772 if (ret) 819 if (!cpu_online(cpu))
773 printk(KERN_WARNING "topology_init: register_cpu %d " 820 continue;
774 "failed (%d)\n", cpu, ret); 821 s = &c->sysdev;
822 sysdev_create_file(s, &attr_capability);
775 } 823 }
776 return 0; 824 return 0;
777} 825}
778
779subsys_initcall(topology_init); 826subsys_initcall(topology_init);
780
781EXPORT_SYMBOL(cpu_online_map);
782EXPORT_SYMBOL(cpu_possible_map);
783EXPORT_SYMBOL(lowcore_ptr);
784EXPORT_SYMBOL(smp_ctl_set_bit);
785EXPORT_SYMBOL(smp_ctl_clear_bit);
786EXPORT_SYMBOL(smp_get_cpu);
787EXPORT_SYMBOL(smp_put_cpu);
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index 584ed95f3380..3a77c22cda78 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -266,23 +266,3 @@ s390_fadvise64_64(struct fadvise64_64_args __user *args)
266 return -EFAULT; 266 return -EFAULT;
267 return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice); 267 return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
268} 268}
269
270/*
271 * Do a system call from kernel instead of calling sys_execve so we
272 * end up with proper pt_regs.
273 */
274int kernel_execve(const char *filename, char *const argv[], char *const envp[])
275{
276 register const char *__arg1 asm("2") = filename;
277 register char *const*__arg2 asm("3") = argv;
278 register char *const*__arg3 asm("4") = envp;
279 register long __svcres asm("2");
280 asm volatile(
281 "svc %b1"
282 : "=d" (__svcres)
283 : "i" (__NR_execve),
284 "0" (__arg1),
285 "d" (__arg2),
286 "d" (__arg3) : "memory");
287 return __svcres;
288}
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index c774f1069e10..cd8d321cd0c2 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -10,7 +10,7 @@
10 10
11NI_SYSCALL /* 0 */ 11NI_SYSCALL /* 0 */
12SYSCALL(sys_exit,sys_exit,sys32_exit_wrapper) 12SYSCALL(sys_exit,sys_exit,sys32_exit_wrapper)
13SYSCALL(sys_fork_glue,sys_fork_glue,sys_fork_glue) 13SYSCALL(sys_fork,sys_fork,sys_fork)
14SYSCALL(sys_read,sys_read,sys32_read_wrapper) 14SYSCALL(sys_read,sys_read,sys32_read_wrapper)
15SYSCALL(sys_write,sys_write,sys32_write_wrapper) 15SYSCALL(sys_write,sys_write,sys32_write_wrapper)
16SYSCALL(sys_open,sys_open,sys32_open_wrapper) /* 5 */ 16SYSCALL(sys_open,sys_open,sys32_open_wrapper) /* 5 */
@@ -19,7 +19,7 @@ SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall)
19SYSCALL(sys_creat,sys_creat,sys32_creat_wrapper) 19SYSCALL(sys_creat,sys_creat,sys32_creat_wrapper)
20SYSCALL(sys_link,sys_link,sys32_link_wrapper) 20SYSCALL(sys_link,sys_link,sys32_link_wrapper)
21SYSCALL(sys_unlink,sys_unlink,sys32_unlink_wrapper) /* 10 */ 21SYSCALL(sys_unlink,sys_unlink,sys32_unlink_wrapper) /* 10 */
22SYSCALL(sys_execve_glue,sys_execve_glue,sys32_execve_glue) 22SYSCALL(sys_execve,sys_execve,sys32_execve)
23SYSCALL(sys_chdir,sys_chdir,sys32_chdir_wrapper) 23SYSCALL(sys_chdir,sys_chdir,sys32_chdir_wrapper)
24SYSCALL(sys_time,sys_ni_syscall,sys32_time_wrapper) /* old time syscall */ 24SYSCALL(sys_time,sys_ni_syscall,sys32_time_wrapper) /* old time syscall */
25SYSCALL(sys_mknod,sys_mknod,sys32_mknod_wrapper) 25SYSCALL(sys_mknod,sys_mknod,sys32_mknod_wrapper)
@@ -127,8 +127,8 @@ SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper) /* 115 */
127SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper) 127SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper)
128SYSCALL(sys_ipc,sys_ipc,sys32_ipc_wrapper) 128SYSCALL(sys_ipc,sys_ipc,sys32_ipc_wrapper)
129SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper) 129SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper)
130SYSCALL(sys_sigreturn_glue,sys_sigreturn_glue,sys32_sigreturn_glue) 130SYSCALL(sys_sigreturn,sys_sigreturn,sys32_sigreturn)
131SYSCALL(sys_clone_glue,sys_clone_glue,sys32_clone_glue) /* 120 */ 131SYSCALL(sys_clone,sys_clone,sys32_clone) /* 120 */
132SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper) 132SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper)
133SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper) 133SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper)
134NI_SYSCALL /* modify_ldt for i386 */ 134NI_SYSCALL /* modify_ldt for i386 */
@@ -181,7 +181,7 @@ SYSCALL(sys_nfsservctl,sys_nfsservctl,compat_sys_nfsservctl_wrapper)
181SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper) /* 170 old setresgid16 syscall */ 181SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper) /* 170 old setresgid16 syscall */
182SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */ 182SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */
183SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper) 183SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper)
184SYSCALL(sys_rt_sigreturn_glue,sys_rt_sigreturn_glue,sys32_rt_sigreturn_glue) 184SYSCALL(sys_rt_sigreturn,sys_rt_sigreturn,sys32_rt_sigreturn)
185SYSCALL(sys_rt_sigaction,sys_rt_sigaction,sys32_rt_sigaction_wrapper) 185SYSCALL(sys_rt_sigaction,sys_rt_sigaction,sys32_rt_sigaction_wrapper)
186SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,sys32_rt_sigprocmask_wrapper) /* 175 */ 186SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,sys32_rt_sigprocmask_wrapper) /* 175 */
187SYSCALL(sys_rt_sigpending,sys_rt_sigpending,sys32_rt_sigpending_wrapper) 187SYSCALL(sys_rt_sigpending,sys_rt_sigpending,sys32_rt_sigpending_wrapper)
@@ -194,11 +194,11 @@ SYSCALL(sys_chown16,sys_ni_syscall,sys32_chown16_wrapper) /* old chown16 syscall
194SYSCALL(sys_getcwd,sys_getcwd,sys32_getcwd_wrapper) 194SYSCALL(sys_getcwd,sys_getcwd,sys32_getcwd_wrapper)
195SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper) 195SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper)
196SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper) /* 185 */ 196SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper) /* 185 */
197SYSCALL(sys_sigaltstack_glue,sys_sigaltstack_glue,sys32_sigaltstack_glue) 197SYSCALL(sys_sigaltstack,sys_sigaltstack,sys32_sigaltstack)
198SYSCALL(sys_sendfile,sys_sendfile64,sys32_sendfile_wrapper) 198SYSCALL(sys_sendfile,sys_sendfile64,sys32_sendfile_wrapper)
199NI_SYSCALL /* streams1 */ 199NI_SYSCALL /* streams1 */
200NI_SYSCALL /* streams2 */ 200NI_SYSCALL /* streams2 */
201SYSCALL(sys_vfork_glue,sys_vfork_glue,sys_vfork_glue) /* 190 */ 201SYSCALL(sys_vfork,sys_vfork,sys_vfork) /* 190 */
202SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit_wrapper) 202SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit_wrapper)
203SYSCALL(sys_mmap2,sys_mmap2,sys32_mmap2_wrapper) 203SYSCALL(sys_mmap2,sys_mmap2,sys32_mmap2_wrapper)
204SYSCALL(sys_truncate64,sys_ni_syscall,sys32_truncate64_wrapper) 204SYSCALL(sys_truncate64,sys_ni_syscall,sys32_truncate64_wrapper)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index e1ad464b6f20..711dae8da7ad 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -280,7 +280,6 @@ static void clock_comparator_interrupt(__u16 code)
280} 280}
281 281
282static void etr_reset(void); 282static void etr_reset(void);
283static void etr_init(void);
284static void etr_ext_handler(__u16); 283static void etr_ext_handler(__u16);
285 284
286/* 285/*
@@ -355,7 +354,6 @@ void __init time_init(void)
355#ifdef CONFIG_VIRT_TIMER 354#ifdef CONFIG_VIRT_TIMER
356 vtime_init(); 355 vtime_init();
357#endif 356#endif
358 etr_init();
359} 357}
360 358
361/* 359/*
@@ -426,11 +424,11 @@ static struct etr_aib etr_port1;
426static int etr_port1_uptodate; 424static int etr_port1_uptodate;
427static unsigned long etr_events; 425static unsigned long etr_events;
428static struct timer_list etr_timer; 426static struct timer_list etr_timer;
429static struct tasklet_struct etr_tasklet;
430static DEFINE_PER_CPU(atomic_t, etr_sync_word); 427static DEFINE_PER_CPU(atomic_t, etr_sync_word);
431 428
432static void etr_timeout(unsigned long dummy); 429static void etr_timeout(unsigned long dummy);
433static void etr_tasklet_fn(unsigned long dummy); 430static void etr_work_fn(struct work_struct *work);
431static DECLARE_WORK(etr_work, etr_work_fn);
434 432
435/* 433/*
436 * The etr get_clock function. It will write the current clock value 434 * The etr get_clock function. It will write the current clock value
@@ -507,29 +505,31 @@ static void etr_reset(void)
507 } 505 }
508} 506}
509 507
510static void etr_init(void) 508static int __init etr_init(void)
511{ 509{
512 struct etr_aib aib; 510 struct etr_aib aib;
513 511
514 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) 512 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
515 return; 513 return 0;
516 /* Check if this machine has the steai instruction. */ 514 /* Check if this machine has the steai instruction. */
517 if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) 515 if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
518 set_bit(ETR_FLAG_STEAI, &etr_flags); 516 set_bit(ETR_FLAG_STEAI, &etr_flags);
519 setup_timer(&etr_timer, etr_timeout, 0UL); 517 setup_timer(&etr_timer, etr_timeout, 0UL);
520 tasklet_init(&etr_tasklet, etr_tasklet_fn, 0);
521 if (!etr_port0_online && !etr_port1_online) 518 if (!etr_port0_online && !etr_port1_online)
522 set_bit(ETR_FLAG_EACCES, &etr_flags); 519 set_bit(ETR_FLAG_EACCES, &etr_flags);
523 if (etr_port0_online) { 520 if (etr_port0_online) {
524 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); 521 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
525 tasklet_hi_schedule(&etr_tasklet); 522 schedule_work(&etr_work);
526 } 523 }
527 if (etr_port1_online) { 524 if (etr_port1_online) {
528 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); 525 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
529 tasklet_hi_schedule(&etr_tasklet); 526 schedule_work(&etr_work);
530 } 527 }
528 return 0;
531} 529}
532 530
531arch_initcall(etr_init);
532
533/* 533/*
534 * Two sorts of ETR machine checks. The architecture reads: 534 * Two sorts of ETR machine checks. The architecture reads:
535 * "When a machine-check niterruption occurs and if a switch-to-local or 535 * "When a machine-check niterruption occurs and if a switch-to-local or
@@ -549,7 +549,7 @@ void etr_switch_to_local(void)
549 return; 549 return;
550 etr_disable_sync_clock(NULL); 550 etr_disable_sync_clock(NULL);
551 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); 551 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
552 tasklet_hi_schedule(&etr_tasklet); 552 schedule_work(&etr_work);
553} 553}
554 554
555/* 555/*
@@ -564,7 +564,7 @@ void etr_sync_check(void)
564 return; 564 return;
565 etr_disable_sync_clock(NULL); 565 etr_disable_sync_clock(NULL);
566 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); 566 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
567 tasklet_hi_schedule(&etr_tasklet); 567 schedule_work(&etr_work);
568} 568}
569 569
570/* 570/*
@@ -591,13 +591,13 @@ static void etr_ext_handler(__u16 code)
591 * Both ports are not up-to-date now. 591 * Both ports are not up-to-date now.
592 */ 592 */
593 set_bit(ETR_EVENT_PORT_ALERT, &etr_events); 593 set_bit(ETR_EVENT_PORT_ALERT, &etr_events);
594 tasklet_hi_schedule(&etr_tasklet); 594 schedule_work(&etr_work);
595} 595}
596 596
597static void etr_timeout(unsigned long dummy) 597static void etr_timeout(unsigned long dummy)
598{ 598{
599 set_bit(ETR_EVENT_UPDATE, &etr_events); 599 set_bit(ETR_EVENT_UPDATE, &etr_events);
600 tasklet_hi_schedule(&etr_tasklet); 600 schedule_work(&etr_work);
601} 601}
602 602
603/* 603/*
@@ -927,7 +927,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
927 if (!eacr.e0 && !eacr.e1) 927 if (!eacr.e0 && !eacr.e1)
928 return eacr; 928 return eacr;
929 929
930 /* Update port0 or port1 with aib stored in etr_tasklet_fn. */ 930 /* Update port0 or port1 with aib stored in etr_work_fn. */
931 if (aib->esw.q == 0) { 931 if (aib->esw.q == 0) {
932 /* Information for port 0 stored. */ 932 /* Information for port 0 stored. */
933 if (eacr.p0 && !etr_port0_uptodate) { 933 if (eacr.p0 && !etr_port0_uptodate) {
@@ -1007,7 +1007,7 @@ static void etr_update_eacr(struct etr_eacr eacr)
1007 * particular this is the only function that calls etr_update_eacr(), 1007 * particular this is the only function that calls etr_update_eacr(),
1008 * it "controls" the etr control register. 1008 * it "controls" the etr control register.
1009 */ 1009 */
1010static void etr_tasklet_fn(unsigned long dummy) 1010static void etr_work_fn(struct work_struct *work)
1011{ 1011{
1012 unsigned long long now; 1012 unsigned long long now;
1013 struct etr_eacr eacr; 1013 struct etr_eacr eacr;
@@ -1220,13 +1220,13 @@ static ssize_t etr_online_store(struct sys_device *dev,
1220 return count; /* Nothing to do. */ 1220 return count; /* Nothing to do. */
1221 etr_port0_online = value; 1221 etr_port0_online = value;
1222 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); 1222 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
1223 tasklet_hi_schedule(&etr_tasklet); 1223 schedule_work(&etr_work);
1224 } else { 1224 } else {
1225 if (etr_port1_online == value) 1225 if (etr_port1_online == value)
1226 return count; /* Nothing to do. */ 1226 return count; /* Nothing to do. */
1227 etr_port1_online = value; 1227 etr_port1_online = value;
1228 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); 1228 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
1229 tasklet_hi_schedule(&etr_tasklet); 1229 schedule_work(&etr_work);
1230 } 1230 }
1231 return count; 1231 return count;
1232} 1232}
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index f0e5a320e2ec..49dec830373a 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -30,7 +30,7 @@
30#include <linux/kallsyms.h> 30#include <linux/kallsyms.h>
31#include <linux/reboot.h> 31#include <linux/reboot.h>
32#include <linux/kprobes.h> 32#include <linux/kprobes.h>
33 33#include <linux/bug.h>
34#include <asm/system.h> 34#include <asm/system.h>
35#include <asm/uaccess.h> 35#include <asm/uaccess.h>
36#include <asm/io.h> 36#include <asm/io.h>
@@ -188,18 +188,31 @@ void dump_stack(void)
188 188
189EXPORT_SYMBOL(dump_stack); 189EXPORT_SYMBOL(dump_stack);
190 190
191static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
192{
193 return (regs->psw.mask & bits) / ((~bits + 1) & bits);
194}
195
191void show_registers(struct pt_regs *regs) 196void show_registers(struct pt_regs *regs)
192{ 197{
193 mm_segment_t old_fs;
194 char *mode; 198 char *mode;
195 int i;
196 199
197 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; 200 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
198 printk("%s PSW : %p %p", 201 printk("%s PSW : %p %p",
199 mode, (void *) regs->psw.mask, 202 mode, (void *) regs->psw.mask,
200 (void *) regs->psw.addr); 203 (void *) regs->psw.addr);
201 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); 204 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
202 printk("%s GPRS: " FOURLONG, mode, 205 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
206 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
207 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
208 mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
209 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
210 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
211 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
212#ifdef CONFIG_64BIT
213 printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS));
214#endif
215 printk("\n%s GPRS: " FOURLONG, mode,
203 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); 216 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
204 printk(" " FOURLONG, 217 printk(" " FOURLONG,
205 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); 218 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
@@ -208,41 +221,7 @@ void show_registers(struct pt_regs *regs)
208 printk(" " FOURLONG, 221 printk(" " FOURLONG,
209 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); 222 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
210 223
211#if 0 224 show_code(regs);
212 /* FIXME: this isn't needed any more but it changes the ksymoops
213 * input. To remove or not to remove ... */
214 save_access_regs(regs->acrs);
215 printk("%s ACRS: %08x %08x %08x %08x\n", mode,
216 regs->acrs[0], regs->acrs[1], regs->acrs[2], regs->acrs[3]);
217 printk(" %08x %08x %08x %08x\n",
218 regs->acrs[4], regs->acrs[5], regs->acrs[6], regs->acrs[7]);
219 printk(" %08x %08x %08x %08x\n",
220 regs->acrs[8], regs->acrs[9], regs->acrs[10], regs->acrs[11]);
221 printk(" %08x %08x %08x %08x\n",
222 regs->acrs[12], regs->acrs[13], regs->acrs[14], regs->acrs[15]);
223#endif
224
225 /*
226 * Print the first 20 byte of the instruction stream at the
227 * time of the fault.
228 */
229 old_fs = get_fs();
230 if (regs->psw.mask & PSW_MASK_PSTATE)
231 set_fs(USER_DS);
232 else
233 set_fs(KERNEL_DS);
234 printk("%s Code: ", mode);
235 for (i = 0; i < 20; i++) {
236 unsigned char c;
237 if (__get_user(c, (char __user *)(regs->psw.addr + i))) {
238 printk(" Bad PSW.");
239 break;
240 }
241 printk("%02x ", c);
242 }
243 set_fs(old_fs);
244
245 printk("\n");
246} 225}
247 226
248/* This is called from fs/proc/array.c */ 227/* This is called from fs/proc/array.c */
@@ -318,6 +297,11 @@ report_user_fault(long interruption_code, struct pt_regs *regs)
318#endif 297#endif
319} 298}
320 299
300int is_valid_bugaddr(unsigned long addr)
301{
302 return 1;
303}
304
321static void __kprobes inline do_trap(long interruption_code, int signr, 305static void __kprobes inline do_trap(long interruption_code, int signr,
322 char *str, struct pt_regs *regs, 306 char *str, struct pt_regs *regs,
323 siginfo_t *info) 307 siginfo_t *info)
@@ -344,8 +328,14 @@ static void __kprobes inline do_trap(long interruption_code, int signr,
344 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 328 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
345 if (fixup) 329 if (fixup)
346 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; 330 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
347 else 331 else {
348 die(str, regs, interruption_code); 332 enum bug_trap_type btt;
333
334 btt = report_bug(regs->psw.addr & PSW_ADDR_INSN);
335 if (btt == BUG_TRAP_TYPE_WARN)
336 return;
337 die(str, regs, interruption_code);
338 }
349 } 339 }
350} 340}
351 341
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index c30716ae130c..418f6426a949 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -45,6 +45,8 @@ SECTIONS
45 __ex_table : { *(__ex_table) } 45 __ex_table : { *(__ex_table) }
46 __stop___ex_table = .; 46 __stop___ex_table = .;
47 47
48 BUG_TABLE
49
48 .data : { /* Data */ 50 .data : { /* Data */
49 *(.data) 51 *(.data)
50 CONSTRUCTORS 52 CONSTRUCTORS
@@ -77,6 +79,12 @@ SECTIONS
77 *(.init.text) 79 *(.init.text)
78 _einittext = .; 80 _einittext = .;
79 } 81 }
82 /*
83 * .exit.text is discarded at runtime, not link time,
84 * to deal with references from __bug_table
85 */
86 .exit.text : { *(.exit.text) }
87
80 .init.data : { *(.init.data) } 88 .init.data : { *(.init.data) }
81 . = ALIGN(256); 89 . = ALIGN(256);
82 __setup_start = .; 90 __setup_start = .;
@@ -116,7 +124,7 @@ SECTIONS
116 124
117 /* Sections to be discarded */ 125 /* Sections to be discarded */
118 /DISCARD/ : { 126 /DISCARD/ : {
119 *(.exit.text) *(.exit.data) *(.exitcall.exit) 127 *(.exit.data) *(.exitcall.exit)
120 } 128 }
121 129
122 /* Stabs debugging sections. */ 130 /* Stabs debugging sections. */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 9d5b02801b46..1e1a6ee2cac1 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -128,7 +128,7 @@ static inline void set_vtimer(__u64 expires)
128 S390_lowcore.last_update_timer = expires; 128 S390_lowcore.last_update_timer = expires;
129 129
130 /* store expire time for this CPU timer */ 130 /* store expire time for this CPU timer */
131 per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires; 131 __get_cpu_var(virt_cpu_timer).to_expire = expires;
132} 132}
133#else 133#else
134static inline void set_vtimer(__u64 expires) 134static inline void set_vtimer(__u64 expires)
@@ -137,7 +137,7 @@ static inline void set_vtimer(__u64 expires)
137 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); 137 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
138 138
139 /* store expire time for this CPU timer */ 139 /* store expire time for this CPU timer */
140 per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires; 140 __get_cpu_var(virt_cpu_timer).to_expire = expires;
141} 141}
142#endif 142#endif
143 143
@@ -145,7 +145,7 @@ static void start_cpu_timer(void)
145{ 145{
146 struct vtimer_queue *vt_list; 146 struct vtimer_queue *vt_list;
147 147
148 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); 148 vt_list = &__get_cpu_var(virt_cpu_timer);
149 149
150 /* CPU timer interrupt is pending, don't reprogramm it */ 150 /* CPU timer interrupt is pending, don't reprogramm it */
151 if (vt_list->idle & 1LL<<63) 151 if (vt_list->idle & 1LL<<63)
@@ -159,7 +159,7 @@ static void stop_cpu_timer(void)
159{ 159{
160 struct vtimer_queue *vt_list; 160 struct vtimer_queue *vt_list;
161 161
162 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); 162 vt_list = &__get_cpu_var(virt_cpu_timer);
163 163
164 /* nothing to do */ 164 /* nothing to do */
165 if (list_empty(&vt_list->list)) { 165 if (list_empty(&vt_list->list)) {
@@ -219,7 +219,7 @@ static void do_callbacks(struct list_head *cb_list)
219 if (list_empty(cb_list)) 219 if (list_empty(cb_list))
220 return; 220 return;
221 221
222 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); 222 vt_list = &__get_cpu_var(virt_cpu_timer);
223 223
224 list_for_each_entry_safe(event, tmp, cb_list, entry) { 224 list_for_each_entry_safe(event, tmp, cb_list, entry) {
225 fn = event->function; 225 fn = event->function;
@@ -244,7 +244,6 @@ static void do_callbacks(struct list_head *cb_list)
244 */ 244 */
245static void do_cpu_timer_interrupt(__u16 error_code) 245static void do_cpu_timer_interrupt(__u16 error_code)
246{ 246{
247 int cpu;
248 __u64 next, delta; 247 __u64 next, delta;
249 struct vtimer_queue *vt_list; 248 struct vtimer_queue *vt_list;
250 struct vtimer_list *event, *tmp; 249 struct vtimer_list *event, *tmp;
@@ -253,8 +252,7 @@ static void do_cpu_timer_interrupt(__u16 error_code)
253 struct list_head cb_list; 252 struct list_head cb_list;
254 253
255 INIT_LIST_HEAD(&cb_list); 254 INIT_LIST_HEAD(&cb_list);
256 cpu = smp_processor_id(); 255 vt_list = &__get_cpu_var(virt_cpu_timer);
257 vt_list = &per_cpu(virt_cpu_timer, cpu);
258 256
259 /* walk timer list, fire all expired events */ 257 /* walk timer list, fire all expired events */
260 spin_lock(&vt_list->lock); 258 spin_lock(&vt_list->lock);
@@ -534,7 +532,7 @@ void init_cpu_vtimer(void)
534 /* enable cpu timer interrupts */ 532 /* enable cpu timer interrupts */
535 __ctl_set_bit(0,10); 533 __ctl_set_bit(0,10);
536 534
537 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); 535 vt_list = &__get_cpu_var(virt_cpu_timer);
538 INIT_LIST_HEAD(&vt_list->list); 536 INIT_LIST_HEAD(&vt_list->list);
539 spin_lock_init(&vt_list->lock); 537 spin_lock_init(&vt_list->lock);
540 vt_list->to_expire = 0; 538 vt_list->to_expire = 0;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 7462aebd3eb6..2b76a879a7b5 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -26,9 +26,9 @@
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/hardirq.h> 27#include <linux/hardirq.h>
28#include <linux/kprobes.h> 28#include <linux/kprobes.h>
29#include <linux/uaccess.h>
29 30
30#include <asm/system.h> 31#include <asm/system.h>
31#include <asm/uaccess.h>
32#include <asm/pgtable.h> 32#include <asm/pgtable.h>
33#include <asm/kdebug.h> 33#include <asm/kdebug.h>
34#include <asm/s390_ext.h> 34#include <asm/s390_ext.h>
@@ -63,21 +63,25 @@ int unregister_page_fault_notifier(struct notifier_block *nb)
63 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb); 63 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
64} 64}
65 65
66static inline int notify_page_fault(enum die_val val, const char *str, 66static int __kprobes __notify_page_fault(struct pt_regs *regs, long err)
67 struct pt_regs *regs, long err, int trap, int sig)
68{ 67{
69 struct die_args args = { 68 struct die_args args = { .str = "page fault",
70 .regs = regs, 69 .trapnr = 14,
71 .str = str, 70 .signr = SIGSEGV };
72 .err = err, 71 args.regs = regs;
73 .trapnr = trap, 72 args.err = err;
74 .signr = sig 73 return atomic_notifier_call_chain(&notify_page_fault_chain,
75 }; 74 DIE_PAGE_FAULT, &args);
76 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args); 75}
76
77static inline int notify_page_fault(struct pt_regs *regs, long err)
78{
79 if (unlikely(kprobe_running()))
80 return __notify_page_fault(regs, err);
81 return NOTIFY_DONE;
77} 82}
78#else 83#else
79static inline int notify_page_fault(enum die_val val, const char *str, 84static inline int notify_page_fault(struct pt_regs *regs, long err)
80 struct pt_regs *regs, long err, int trap, int sig)
81{ 85{
82 return NOTIFY_DONE; 86 return NOTIFY_DONE;
83} 87}
@@ -170,74 +174,127 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
170 force_sig_info(SIGSEGV, &si, current); 174 force_sig_info(SIGSEGV, &si, current);
171} 175}
172 176
177static void do_no_context(struct pt_regs *regs, unsigned long error_code,
178 unsigned long address)
179{
180 const struct exception_table_entry *fixup;
181
182 /* Are we prepared to handle this kernel fault? */
183 fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK);
184 if (fixup) {
185 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
186 return;
187 }
188
189 /*
190 * Oops. The kernel tried to access some bad page. We'll have to
191 * terminate things with extreme prejudice.
192 */
193 if (check_space(current) == 0)
194 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
195 " at virtual kernel address %p\n", (void *)address);
196 else
197 printk(KERN_ALERT "Unable to handle kernel paging request"
198 " at virtual user address %p\n", (void *)address);
199
200 die("Oops", regs, error_code);
201 do_exit(SIGKILL);
202}
203
204static void do_low_address(struct pt_regs *regs, unsigned long error_code)
205{
206 /* Low-address protection hit in kernel mode means
207 NULL pointer write access in kernel mode. */
208 if (regs->psw.mask & PSW_MASK_PSTATE) {
209 /* Low-address protection hit in user mode 'cannot happen'. */
210 die ("Low-address protection", regs, error_code);
211 do_exit(SIGKILL);
212 }
213
214 do_no_context(regs, error_code, 0);
215}
216
217/*
218 * We ran out of memory, or some other thing happened to us that made
219 * us unable to handle the page fault gracefully.
220 */
221static int do_out_of_memory(struct pt_regs *regs, unsigned long error_code,
222 unsigned long address)
223{
224 struct task_struct *tsk = current;
225 struct mm_struct *mm = tsk->mm;
226
227 up_read(&mm->mmap_sem);
228 if (is_init(tsk)) {
229 yield();
230 down_read(&mm->mmap_sem);
231 return 1;
232 }
233 printk("VM: killing process %s\n", tsk->comm);
234 if (regs->psw.mask & PSW_MASK_PSTATE)
235 do_exit(SIGKILL);
236 do_no_context(regs, error_code, address);
237 return 0;
238}
239
240static void do_sigbus(struct pt_regs *regs, unsigned long error_code,
241 unsigned long address)
242{
243 struct task_struct *tsk = current;
244 struct mm_struct *mm = tsk->mm;
245
246 up_read(&mm->mmap_sem);
247 /*
248 * Send a sigbus, regardless of whether we were in kernel
249 * or user mode.
250 */
251 tsk->thread.prot_addr = address;
252 tsk->thread.trap_no = error_code;
253 force_sig(SIGBUS, tsk);
254
255 /* Kernel mode? Handle exceptions or die */
256 if (!(regs->psw.mask & PSW_MASK_PSTATE))
257 do_no_context(regs, error_code, address);
258}
259
173#ifdef CONFIG_S390_EXEC_PROTECT 260#ifdef CONFIG_S390_EXEC_PROTECT
174extern long sys_sigreturn(struct pt_regs *regs); 261extern long sys_sigreturn(struct pt_regs *regs);
175extern long sys_rt_sigreturn(struct pt_regs *regs); 262extern long sys_rt_sigreturn(struct pt_regs *regs);
176extern long sys32_sigreturn(struct pt_regs *regs); 263extern long sys32_sigreturn(struct pt_regs *regs);
177extern long sys32_rt_sigreturn(struct pt_regs *regs); 264extern long sys32_rt_sigreturn(struct pt_regs *regs);
178 265
179static inline void do_sigreturn(struct mm_struct *mm, struct pt_regs *regs, 266static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
180 int rt) 267 unsigned long address, unsigned long error_code)
181{ 268{
269 u16 instruction;
270 int rc, compat;
271
272 pagefault_disable();
273 rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
274 pagefault_enable();
275 if (rc)
276 return -EFAULT;
277
182 up_read(&mm->mmap_sem); 278 up_read(&mm->mmap_sem);
183 clear_tsk_thread_flag(current, TIF_SINGLE_STEP); 279 clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
184#ifdef CONFIG_COMPAT 280#ifdef CONFIG_COMPAT
185 if (test_tsk_thread_flag(current, TIF_31BIT)) { 281 compat = test_tsk_thread_flag(current, TIF_31BIT);
186 if (rt) 282 if (compat && instruction == 0x0a77)
187 sys32_rt_sigreturn(regs); 283 sys32_sigreturn(regs);
188 else 284 else if (compat && instruction == 0x0aad)
189 sys32_sigreturn(regs); 285 sys32_rt_sigreturn(regs);
190 return;
191 }
192#endif /* CONFIG_COMPAT */
193 if (rt)
194 sys_rt_sigreturn(regs);
195 else 286 else
287#endif
288 if (instruction == 0x0a77)
196 sys_sigreturn(regs); 289 sys_sigreturn(regs);
197 return; 290 else if (instruction == 0x0aad)
198} 291 sys_rt_sigreturn(regs);
199
200static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
201 unsigned long address, unsigned long error_code)
202{
203 pgd_t *pgd;
204 pmd_t *pmd;
205 pte_t *pte;
206 u16 *instruction;
207 unsigned long pfn, uaddr = regs->psw.addr;
208
209 spin_lock(&mm->page_table_lock);
210 pgd = pgd_offset(mm, uaddr);
211 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
212 goto out_fault;
213 pmd = pmd_offset(pgd, uaddr);
214 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
215 goto out_fault;
216 pte = pte_offset_map(pmd_offset(pgd_offset(mm, uaddr), uaddr), uaddr);
217 if (!pte || !pte_present(*pte))
218 goto out_fault;
219 pfn = pte_pfn(*pte);
220 if (!pfn_valid(pfn))
221 goto out_fault;
222 spin_unlock(&mm->page_table_lock);
223
224 instruction = (u16 *) ((pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE-1)));
225 if (*instruction == 0x0a77)
226 do_sigreturn(mm, regs, 0);
227 else if (*instruction == 0x0aad)
228 do_sigreturn(mm, regs, 1);
229 else { 292 else {
230 printk("- XXX - do_exception: task = %s, primary, NO EXEC "
231 "-> SIGSEGV\n", current->comm);
232 up_read(&mm->mmap_sem);
233 current->thread.prot_addr = address; 293 current->thread.prot_addr = address;
234 current->thread.trap_no = error_code; 294 current->thread.trap_no = error_code;
235 do_sigsegv(regs, error_code, SEGV_MAPERR, address); 295 do_sigsegv(regs, error_code, SEGV_MAPERR, address);
236 } 296 }
237 return 0; 297 return 0;
238out_fault:
239 spin_unlock(&mm->page_table_lock);
240 return -EFAULT;
241} 298}
242#endif /* CONFIG_S390_EXEC_PROTECT */ 299#endif /* CONFIG_S390_EXEC_PROTECT */
243 300
@@ -253,49 +310,23 @@ out_fault:
253 * 3b Region third trans. -> Not present (nullification) 310 * 3b Region third trans. -> Not present (nullification)
254 */ 311 */
255static inline void 312static inline void
256do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) 313do_exception(struct pt_regs *regs, unsigned long error_code, int write)
257{ 314{
258 struct task_struct *tsk; 315 struct task_struct *tsk;
259 struct mm_struct *mm; 316 struct mm_struct *mm;
260 struct vm_area_struct * vma; 317 struct vm_area_struct *vma;
261 unsigned long address; 318 unsigned long address;
262 const struct exception_table_entry *fixup;
263 int si_code;
264 int space; 319 int space;
320 int si_code;
265 321
266 tsk = current; 322 if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
267 mm = tsk->mm;
268
269 if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
270 SIGSEGV) == NOTIFY_STOP)
271 return; 323 return;
272 324
273 /* 325 tsk = current;
274 * Check for low-address protection. This needs to be treated 326 mm = tsk->mm;
275 * as a special case because the translation exception code
276 * field is not guaranteed to contain valid data in this case.
277 */
278 if (is_protection && !(S390_lowcore.trans_exc_code & 4)) {
279
280 /* Low-address protection hit in kernel mode means
281 NULL pointer write access in kernel mode. */
282 if (!(regs->psw.mask & PSW_MASK_PSTATE)) {
283 address = 0;
284 space = 0;
285 goto no_context;
286 }
287
288 /* Low-address protection hit in user mode 'cannot happen'. */
289 die ("Low-address protection", regs, error_code);
290 do_exit(SIGKILL);
291 }
292 327
293 /* 328 /* get the failing address and the affected space */
294 * get the failing address 329 address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
295 * more specific the segment and page table portion of
296 * the address
297 */
298 address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
299 space = check_space(tsk); 330 space = check_space(tsk);
300 331
301 /* 332 /*
@@ -313,7 +344,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
313 */ 344 */
314 local_irq_enable(); 345 local_irq_enable();
315 346
316 down_read(&mm->mmap_sem); 347 down_read(&mm->mmap_sem);
317 348
318 si_code = SEGV_MAPERR; 349 si_code = SEGV_MAPERR;
319 vma = find_vma(mm, address); 350 vma = find_vma(mm, address);
@@ -330,19 +361,19 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
330 return; 361 return;
331#endif 362#endif
332 363
333 if (vma->vm_start <= address) 364 if (vma->vm_start <= address)
334 goto good_area; 365 goto good_area;
335 if (!(vma->vm_flags & VM_GROWSDOWN)) 366 if (!(vma->vm_flags & VM_GROWSDOWN))
336 goto bad_area; 367 goto bad_area;
337 if (expand_stack(vma, address)) 368 if (expand_stack(vma, address))
338 goto bad_area; 369 goto bad_area;
339/* 370/*
340 * Ok, we have a good vm_area for this memory access, so 371 * Ok, we have a good vm_area for this memory access, so
341 * we can handle it.. 372 * we can handle it..
342 */ 373 */
343good_area: 374good_area:
344 si_code = SEGV_ACCERR; 375 si_code = SEGV_ACCERR;
345 if (!is_protection) { 376 if (!write) {
346 /* page not present, check vm flags */ 377 /* page not present, check vm flags */
347 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) 378 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
348 goto bad_area; 379 goto bad_area;
@@ -357,7 +388,7 @@ survive:
357 * make sure we exit gracefully rather than endlessly redo 388 * make sure we exit gracefully rather than endlessly redo
358 * the fault. 389 * the fault.
359 */ 390 */
360 switch (handle_mm_fault(mm, vma, address, is_protection)) { 391 switch (handle_mm_fault(mm, vma, address, write)) {
361 case VM_FAULT_MINOR: 392 case VM_FAULT_MINOR:
362 tsk->min_flt++; 393 tsk->min_flt++;
363 break; 394 break;
@@ -365,9 +396,12 @@ survive:
365 tsk->maj_flt++; 396 tsk->maj_flt++;
366 break; 397 break;
367 case VM_FAULT_SIGBUS: 398 case VM_FAULT_SIGBUS:
368 goto do_sigbus; 399 do_sigbus(regs, error_code, address);
400 return;
369 case VM_FAULT_OOM: 401 case VM_FAULT_OOM:
370 goto out_of_memory; 402 if (do_out_of_memory(regs, error_code, address))
403 goto survive;
404 return;
371 default: 405 default:
372 BUG(); 406 BUG();
373 } 407 }
@@ -385,75 +419,34 @@ survive:
385 * Fix it, but check if it's kernel or user first.. 419 * Fix it, but check if it's kernel or user first..
386 */ 420 */
387bad_area: 421bad_area:
388 up_read(&mm->mmap_sem); 422 up_read(&mm->mmap_sem);
389 423
390 /* User mode accesses just cause a SIGSEGV */ 424 /* User mode accesses just cause a SIGSEGV */
391 if (regs->psw.mask & PSW_MASK_PSTATE) { 425 if (regs->psw.mask & PSW_MASK_PSTATE) {
392 tsk->thread.prot_addr = address; 426 tsk->thread.prot_addr = address;
393 tsk->thread.trap_no = error_code; 427 tsk->thread.trap_no = error_code;
394 do_sigsegv(regs, error_code, si_code, address); 428 do_sigsegv(regs, error_code, si_code, address);
395 return; 429 return;
396 } 430 }
397 431
398no_context: 432no_context:
399 /* Are we prepared to handle this kernel fault? */ 433 do_no_context(regs, error_code, address);
400 fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK);
401 if (fixup) {
402 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
403 return;
404 }
405
406/*
407 * Oops. The kernel tried to access some bad page. We'll have to
408 * terminate things with extreme prejudice.
409 */
410 if (space == 0)
411 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
412 " at virtual kernel address %p\n", (void *)address);
413 else
414 printk(KERN_ALERT "Unable to handle kernel paging request"
415 " at virtual user address %p\n", (void *)address);
416
417 die("Oops", regs, error_code);
418 do_exit(SIGKILL);
419
420
421/*
422 * We ran out of memory, or some other thing happened to us that made
423 * us unable to handle the page fault gracefully.
424*/
425out_of_memory:
426 up_read(&mm->mmap_sem);
427 if (is_init(tsk)) {
428 yield();
429 down_read(&mm->mmap_sem);
430 goto survive;
431 }
432 printk("VM: killing process %s\n", tsk->comm);
433 if (regs->psw.mask & PSW_MASK_PSTATE)
434 do_exit(SIGKILL);
435 goto no_context;
436
437do_sigbus:
438 up_read(&mm->mmap_sem);
439
440 /*
441 * Send a sigbus, regardless of whether we were in kernel
442 * or user mode.
443 */
444 tsk->thread.prot_addr = address;
445 tsk->thread.trap_no = error_code;
446 force_sig(SIGBUS, tsk);
447
448 /* Kernel mode? Handle exceptions or die */
449 if (!(regs->psw.mask & PSW_MASK_PSTATE))
450 goto no_context;
451} 434}
452 435
453void __kprobes do_protection_exception(struct pt_regs *regs, 436void __kprobes do_protection_exception(struct pt_regs *regs,
454 unsigned long error_code) 437 unsigned long error_code)
455{ 438{
439 /* Protection exception is supressing, decrement psw address. */
456 regs->psw.addr -= (error_code >> 16); 440 regs->psw.addr -= (error_code >> 16);
441 /*
442 * Check for low-address protection. This needs to be treated
443 * as a special case because the translation exception code
444 * field is not guaranteed to contain valid data in this case.
445 */
446 if (unlikely(!(S390_lowcore.trans_exc_code & 4))) {
447 do_low_address(regs, error_code);
448 return;
449 }
457 do_exception(regs, 4, 1); 450 do_exception(regs, 4, 1);
458} 451}
459 452
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index eb5dc62f0d9c..e71929db8b06 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -398,6 +398,9 @@ dasd_change_state(struct dasd_device *device)
398 398
399 if (device->state == device->target) 399 if (device->state == device->target)
400 wake_up(&dasd_init_waitq); 400 wake_up(&dasd_init_waitq);
401
402 /* let user-space know that the device status changed */
403 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
401} 404}
402 405
403/* 406/*
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index ed70852cc915..6a89cefe99bb 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -19,6 +19,7 @@
19 19
20#include <asm/debug.h> 20#include <asm/debug.h>
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22#include <asm/ipl.h>
22 23
23/* This is ugly... */ 24/* This is ugly... */
24#define PRINTK_HEADER "dasd_devmap:" 25#define PRINTK_HEADER "dasd_devmap:"
@@ -133,6 +134,8 @@ dasd_call_setup(char *str)
133__setup ("dasd=", dasd_call_setup); 134__setup ("dasd=", dasd_call_setup);
134#endif /* #ifndef MODULE */ 135#endif /* #ifndef MODULE */
135 136
137#define DASD_IPLDEV "ipldev"
138
136/* 139/*
137 * Read a device busid/devno from a string. 140 * Read a device busid/devno from a string.
138 */ 141 */
@@ -141,6 +144,20 @@ dasd_busid(char **str, int *id0, int *id1, int *devno)
141{ 144{
142 int val, old_style; 145 int val, old_style;
143 146
147 /* Interpret ipldev busid */
148 if (strncmp(DASD_IPLDEV, *str, strlen(DASD_IPLDEV)) == 0) {
149 if (ipl_info.type != IPL_TYPE_CCW) {
150 MESSAGE(KERN_ERR, "%s", "ipl device is not a ccw "
151 "device");
152 return -EINVAL;
153 }
154 *id0 = 0;
155 *id1 = ipl_info.data.ccw.dev_id.ssid;
156 *devno = ipl_info.data.ccw.dev_id.devno;
157 *str += strlen(DASD_IPLDEV);
158
159 return 0;
160 }
144 /* check for leading '0x' */ 161 /* check for leading '0x' */
145 old_style = 0; 162 old_style = 0;
146 if ((*str)[0] == '0' && (*str)[1] == 'x') { 163 if ((*str)[0] == '0' && (*str)[1] == 'x') {
@@ -829,6 +846,46 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr,
829static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL); 846static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL);
830 847
831static ssize_t 848static ssize_t
849dasd_device_status_show(struct device *dev, struct device_attribute *attr,
850 char *buf)
851{
852 struct dasd_device *device;
853 ssize_t len;
854
855 device = dasd_device_from_cdev(to_ccwdev(dev));
856 if (!IS_ERR(device)) {
857 switch (device->state) {
858 case DASD_STATE_NEW:
859 len = snprintf(buf, PAGE_SIZE, "new\n");
860 break;
861 case DASD_STATE_KNOWN:
862 len = snprintf(buf, PAGE_SIZE, "detected\n");
863 break;
864 case DASD_STATE_BASIC:
865 len = snprintf(buf, PAGE_SIZE, "basic\n");
866 break;
867 case DASD_STATE_UNFMT:
868 len = snprintf(buf, PAGE_SIZE, "unformatted\n");
869 break;
870 case DASD_STATE_READY:
871 len = snprintf(buf, PAGE_SIZE, "ready\n");
872 break;
873 case DASD_STATE_ONLINE:
874 len = snprintf(buf, PAGE_SIZE, "online\n");
875 break;
876 default:
877 len = snprintf(buf, PAGE_SIZE, "no stat\n");
878 break;
879 }
880 dasd_put_device(device);
881 } else
882 len = snprintf(buf, PAGE_SIZE, "unknown\n");
883 return len;
884}
885
886static DEVICE_ATTR(status, 0444, dasd_device_status_show, NULL);
887
888static ssize_t
832dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf) 889dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf)
833{ 890{
834 struct dasd_devmap *devmap; 891 struct dasd_devmap *devmap;
@@ -939,6 +996,7 @@ static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store);
939static struct attribute * dasd_attrs[] = { 996static struct attribute * dasd_attrs[] = {
940 &dev_attr_readonly.attr, 997 &dev_attr_readonly.attr,
941 &dev_attr_discipline.attr, 998 &dev_attr_discipline.attr,
999 &dev_attr_status.attr,
942 &dev_attr_alias.attr, 1000 &dev_attr_alias.attr,
943 &dev_attr_vendor.attr, 1001 &dev_attr_vendor.attr,
944 &dev_attr_uid.attr, 1002 &dev_attr_uid.attr,
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 293e667b50f2..c210784bdf46 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ 5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
6 sclp_info.o 6 sclp_info.o sclp_config.o sclp_chp.o
7 7
8obj-$(CONFIG_TN3270) += raw3270.o 8obj-$(CONFIG_TN3270) += raw3270.o
9obj-$(CONFIG_TN3270_CONSOLE) += con3270.o 9obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
@@ -29,3 +29,6 @@ obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o
29obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o 29obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o
30obj-$(CONFIG_MONREADER) += monreader.o 30obj-$(CONFIG_MONREADER) += monreader.o
31obj-$(CONFIG_MONWRITER) += monwriter.o 31obj-$(CONFIG_MONWRITER) += monwriter.o
32
33zcore_mod-objs := sclp_sdias.o zcore.o
34obj-$(CONFIG_ZFCPDUMP) += zcore_mod.o
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 9a328f14a641..6000bdee4082 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -813,12 +813,6 @@ con3215_unblank(void)
813 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); 813 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
814} 814}
815 815
816static int __init
817con3215_consetup(struct console *co, char *options)
818{
819 return 0;
820}
821
822/* 816/*
823 * The console structure for the 3215 console 817 * The console structure for the 3215 console
824 */ 818 */
@@ -827,7 +821,6 @@ static struct console con3215 = {
827 .write = con3215_write, 821 .write = con3215_write,
828 .device = con3215_device, 822 .device = con3215_device,
829 .unblank = con3215_unblank, 823 .unblank = con3215_unblank,
830 .setup = con3215_consetup,
831 .flags = CON_PRINTBUFFER, 824 .flags = CON_PRINTBUFFER,
832}; 825};
833 826
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 8e7f2d7633d6..fd3479119eb4 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -555,12 +555,6 @@ con3270_unblank(void)
555 spin_unlock_irqrestore(&cp->view.lock, flags); 555 spin_unlock_irqrestore(&cp->view.lock, flags);
556} 556}
557 557
558static int __init
559con3270_consetup(struct console *co, char *options)
560{
561 return 0;
562}
563
564/* 558/*
565 * The console structure for the 3270 console 559 * The console structure for the 3270 console
566 */ 560 */
@@ -569,7 +563,6 @@ static struct console con3270 = {
569 .write = con3270_write, 563 .write = con3270_write,
570 .device = con3270_device, 564 .device = con3270_device,
571 .unblank = con3270_unblank, 565 .unblank = con3270_unblank,
572 .setup = con3270_consetup,
573 .flags = CON_PRINTBUFFER, 566 .flags = CON_PRINTBUFFER,
574}; 567};
575 568
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index f171de3b0b11..fa62e6944057 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -15,6 +15,7 @@
15#include <linux/timer.h> 15#include <linux/timer.h>
16#include <linux/reboot.h> 16#include <linux/reboot.h>
17#include <linux/jiffies.h> 17#include <linux/jiffies.h>
18#include <linux/init.h>
18#include <asm/types.h> 19#include <asm/types.h>
19#include <asm/s390_ext.h> 20#include <asm/s390_ext.h>
20 21
@@ -510,7 +511,7 @@ sclp_state_change_cb(struct evbuf_header *evbuf)
510} 511}
511 512
512static struct sclp_register sclp_state_change_event = { 513static struct sclp_register sclp_state_change_event = {
513 .receive_mask = EvTyp_StateChange_Mask, 514 .receive_mask = EVTYP_STATECHANGE_MASK,
514 .receiver_fn = sclp_state_change_cb 515 .receiver_fn = sclp_state_change_cb
515}; 516};
516 517
@@ -930,3 +931,10 @@ sclp_init(void)
930 sclp_init_mask(1); 931 sclp_init_mask(1);
931 return 0; 932 return 0;
932} 933}
934
935static __init int sclp_initcall(void)
936{
937 return sclp_init();
938}
939
940arch_initcall(sclp_initcall);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 7d29ab45a6ed..87ac4a3ad49d 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -19,33 +19,37 @@
19#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3) 19#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
20#define MAX_CONSOLE_PAGES 4 20#define MAX_CONSOLE_PAGES 4
21 21
22#define EvTyp_OpCmd 0x01 22#define EVTYP_OPCMD 0x01
23#define EvTyp_Msg 0x02 23#define EVTYP_MSG 0x02
24#define EvTyp_StateChange 0x08 24#define EVTYP_STATECHANGE 0x08
25#define EvTyp_PMsgCmd 0x09 25#define EVTYP_PMSGCMD 0x09
26#define EvTyp_CntlProgOpCmd 0x20 26#define EVTYP_CNTLPROGOPCMD 0x20
27#define EvTyp_CntlProgIdent 0x0B 27#define EVTYP_CNTLPROGIDENT 0x0B
28#define EvTyp_SigQuiesce 0x1D 28#define EVTYP_SIGQUIESCE 0x1D
29#define EvTyp_VT220Msg 0x1A 29#define EVTYP_VT220MSG 0x1A
30 30#define EVTYP_CONFMGMDATA 0x04
31#define EvTyp_OpCmd_Mask 0x80000000 31#define EVTYP_SDIAS 0x1C
32#define EvTyp_Msg_Mask 0x40000000 32
33#define EvTyp_StateChange_Mask 0x01000000 33#define EVTYP_OPCMD_MASK 0x80000000
34#define EvTyp_PMsgCmd_Mask 0x00800000 34#define EVTYP_MSG_MASK 0x40000000
35#define EvTyp_CtlProgOpCmd_Mask 0x00000001 35#define EVTYP_STATECHANGE_MASK 0x01000000
36#define EvTyp_CtlProgIdent_Mask 0x00200000 36#define EVTYP_PMSGCMD_MASK 0x00800000
37#define EvTyp_SigQuiesce_Mask 0x00000008 37#define EVTYP_CTLPROGOPCMD_MASK 0x00000001
38#define EvTyp_VT220Msg_Mask 0x00000040 38#define EVTYP_CTLPROGIDENT_MASK 0x00200000
39 39#define EVTYP_SIGQUIESCE_MASK 0x00000008
40#define GnrlMsgFlgs_DOM 0x8000 40#define EVTYP_VT220MSG_MASK 0x00000040
41#define GnrlMsgFlgs_SndAlrm 0x4000 41#define EVTYP_CONFMGMDATA_MASK 0x10000000
42#define GnrlMsgFlgs_HoldMsg 0x2000 42#define EVTYP_SDIAS_MASK 0x00000010
43 43
44#define LnTpFlgs_CntlText 0x8000 44#define GNRLMSGFLGS_DOM 0x8000
45#define LnTpFlgs_LabelText 0x4000 45#define GNRLMSGFLGS_SNDALRM 0x4000
46#define LnTpFlgs_DataText 0x2000 46#define GNRLMSGFLGS_HOLDMSG 0x2000
47#define LnTpFlgs_EndText 0x1000 47
48#define LnTpFlgs_PromptText 0x0800 48#define LNTPFLGS_CNTLTEXT 0x8000
49#define LNTPFLGS_LABELTEXT 0x4000
50#define LNTPFLGS_DATATEXT 0x2000
51#define LNTPFLGS_ENDTEXT 0x1000
52#define LNTPFLGS_PROMPTTEXT 0x0800
49 53
50typedef unsigned int sclp_cmdw_t; 54typedef unsigned int sclp_cmdw_t;
51 55
@@ -56,15 +60,15 @@ typedef unsigned int sclp_cmdw_t;
56#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 60#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
57 61
58#define GDS_ID_MDSMU 0x1310 62#define GDS_ID_MDSMU 0x1310
59#define GDS_ID_MDSRouteInfo 0x1311 63#define GDS_ID_MDSROUTEINFO 0x1311
60#define GDS_ID_AgUnWrkCorr 0x1549 64#define GDS_ID_AGUNWRKCORR 0x1549
61#define GDS_ID_SNACondReport 0x1532 65#define GDS_ID_SNACONDREPORT 0x1532
62#define GDS_ID_CPMSU 0x1212 66#define GDS_ID_CPMSU 0x1212
63#define GDS_ID_RoutTargInstr 0x154D 67#define GDS_ID_ROUTTARGINSTR 0x154D
64#define GDS_ID_OpReq 0x8070 68#define GDS_ID_OPREQ 0x8070
65#define GDS_ID_TextCmd 0x1320 69#define GDS_ID_TEXTCMD 0x1320
66 70
67#define GDS_KEY_SelfDefTextMsg 0x31 71#define GDS_KEY_SELFDEFTEXTMSG 0x31
68 72
69typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ 73typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */
70 74
diff --git a/drivers/s390/char/sclp_chp.c b/drivers/s390/char/sclp_chp.c
new file mode 100644
index 000000000000..a66b914519b5
--- /dev/null
+++ b/drivers/s390/char/sclp_chp.c
@@ -0,0 +1,196 @@
1/*
2 * drivers/s390/char/sclp_chp.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/types.h>
9#include <linux/gfp.h>
10#include <linux/errno.h>
11#include <linux/completion.h>
12#include <asm/sclp.h>
13#include <asm/chpid.h>
14
15#include "sclp.h"
16
17#define TAG "sclp_chp: "
18
19#define SCLP_CMDW_CONFIGURE_CHANNEL_PATH 0x000f0001
20#define SCLP_CMDW_DECONFIGURE_CHANNEL_PATH 0x000e0001
21#define SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION 0x00030001
22
23static inline sclp_cmdw_t get_configure_cmdw(struct chp_id chpid)
24{
25 return SCLP_CMDW_CONFIGURE_CHANNEL_PATH | chpid.id << 8;
26}
27
28static inline sclp_cmdw_t get_deconfigure_cmdw(struct chp_id chpid)
29{
30 return SCLP_CMDW_DECONFIGURE_CHANNEL_PATH | chpid.id << 8;
31}
32
33static void chp_callback(struct sclp_req *req, void *data)
34{
35 struct completion *completion = data;
36
37 complete(completion);
38}
39
40struct chp_cfg_sccb {
41 struct sccb_header header;
42 u8 ccm;
43 u8 reserved[6];
44 u8 cssid;
45} __attribute__((packed));
46
47struct chp_cfg_data {
48 struct chp_cfg_sccb sccb;
49 struct sclp_req req;
50 struct completion completion;
51} __attribute__((packed));
52
53static int do_configure(sclp_cmdw_t cmd)
54{
55 struct chp_cfg_data *data;
56 int rc;
57
58 /* Prepare sccb. */
59 data = (struct chp_cfg_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
60 if (!data)
61 return -ENOMEM;
62 data->sccb.header.length = sizeof(struct chp_cfg_sccb);
63 data->req.command = cmd;
64 data->req.sccb = &(data->sccb);
65 data->req.status = SCLP_REQ_FILLED;
66 data->req.callback = chp_callback;
67 data->req.callback_data = &(data->completion);
68 init_completion(&data->completion);
69
70 /* Perform sclp request. */
71 rc = sclp_add_request(&(data->req));
72 if (rc)
73 goto out;
74 wait_for_completion(&data->completion);
75
76 /* Check response .*/
77 if (data->req.status != SCLP_REQ_DONE) {
78 printk(KERN_WARNING TAG "configure channel-path request failed "
79 "(status=0x%02x)\n", data->req.status);
80 rc = -EIO;
81 goto out;
82 }
83 switch (data->sccb.header.response_code) {
84 case 0x0020:
85 case 0x0120:
86 case 0x0440:
87 case 0x0450:
88 break;
89 default:
90 printk(KERN_WARNING TAG "configure channel-path failed "
91 "(cmd=0x%08x, response=0x%04x)\n", cmd,
92 data->sccb.header.response_code);
93 rc = -EIO;
94 break;
95 }
96out:
97 free_page((unsigned long) data);
98
99 return rc;
100}
101
102/**
103 * sclp_chp_configure - perform configure channel-path sclp command
104 * @chpid: channel-path ID
105 *
106 * Perform configure channel-path command sclp command for specified chpid.
107 * Return 0 after command successfully finished, non-zero otherwise.
108 */
109int sclp_chp_configure(struct chp_id chpid)
110{
111 return do_configure(get_configure_cmdw(chpid));
112}
113
114/**
115 * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
116 * @chpid: channel-path ID
117 *
118 * Perform deconfigure channel-path command sclp command for specified chpid
119 * and wait for completion. On success return 0. Return non-zero otherwise.
120 */
121int sclp_chp_deconfigure(struct chp_id chpid)
122{
123 return do_configure(get_deconfigure_cmdw(chpid));
124}
125
126struct chp_info_sccb {
127 struct sccb_header header;
128 u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
129 u8 standby[SCLP_CHP_INFO_MASK_SIZE];
130 u8 configured[SCLP_CHP_INFO_MASK_SIZE];
131 u8 ccm;
132 u8 reserved[6];
133 u8 cssid;
134} __attribute__((packed));
135
136struct chp_info_data {
137 struct chp_info_sccb sccb;
138 struct sclp_req req;
139 struct completion completion;
140} __attribute__((packed));
141
142/**
143 * sclp_chp_read_info - perform read channel-path information sclp command
144 * @info: resulting channel-path information data
145 *
146 * Perform read channel-path information sclp command and wait for completion.
147 * On success, store channel-path information in @info and return 0. Return
148 * non-zero otherwise.
149 */
150int sclp_chp_read_info(struct sclp_chp_info *info)
151{
152 struct chp_info_data *data;
153 int rc;
154
155 /* Prepare sccb. */
156 data = (struct chp_info_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
157 if (!data)
158 return -ENOMEM;
159 data->sccb.header.length = sizeof(struct chp_info_sccb);
160 data->req.command = SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION;
161 data->req.sccb = &(data->sccb);
162 data->req.status = SCLP_REQ_FILLED;
163 data->req.callback = chp_callback;
164 data->req.callback_data = &(data->completion);
165 init_completion(&data->completion);
166
167 /* Perform sclp request. */
168 rc = sclp_add_request(&(data->req));
169 if (rc)
170 goto out;
171 wait_for_completion(&data->completion);
172
173 /* Check response .*/
174 if (data->req.status != SCLP_REQ_DONE) {
175 printk(KERN_WARNING TAG "read channel-path info request failed "
176 "(status=0x%02x)\n", data->req.status);
177 rc = -EIO;
178 goto out;
179 }
180 if (data->sccb.header.response_code != 0x0010) {
181 printk(KERN_WARNING TAG "read channel-path info failed "
182 "(response=0x%04x)\n", data->sccb.header.response_code);
183 rc = -EIO;
184 goto out;
185 }
186 memcpy(info->recognized, data->sccb.recognized,
187 SCLP_CHP_INFO_MASK_SIZE);
188 memcpy(info->standby, data->sccb.standby,
189 SCLP_CHP_INFO_MASK_SIZE);
190 memcpy(info->configured, data->sccb.configured,
191 SCLP_CHP_INFO_MASK_SIZE);
192out:
193 free_page((unsigned long) data);
194
195 return rc;
196}
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
new file mode 100644
index 000000000000..5322e5e54a98
--- /dev/null
+++ b/drivers/s390/char/sclp_config.c
@@ -0,0 +1,75 @@
1/*
2 * drivers/s390/char/sclp_config.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/init.h>
9#include <linux/errno.h>
10#include <linux/cpu.h>
11#include <linux/sysdev.h>
12#include <linux/workqueue.h>
13#include "sclp.h"
14
15#define TAG "sclp_config: "
16
17struct conf_mgm_data {
18 u8 reserved;
19 u8 ev_qualifier;
20} __attribute__((packed));
21
22#define EV_QUAL_CAP_CHANGE 3
23
24static struct work_struct sclp_cpu_capability_work;
25
26static void sclp_cpu_capability_notify(struct work_struct *work)
27{
28 int cpu;
29 struct sys_device *sysdev;
30
31 printk(KERN_WARNING TAG "cpu capability changed.\n");
32 lock_cpu_hotplug();
33 for_each_online_cpu(cpu) {
34 sysdev = get_cpu_sysdev(cpu);
35 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
36 }
37 unlock_cpu_hotplug();
38}
39
40static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
41{
42 struct conf_mgm_data *cdata;
43
44 cdata = (struct conf_mgm_data *)(evbuf + 1);
45 if (cdata->ev_qualifier == EV_QUAL_CAP_CHANGE)
46 schedule_work(&sclp_cpu_capability_work);
47}
48
49static struct sclp_register sclp_conf_register =
50{
51 .receive_mask = EVTYP_CONFMGMDATA_MASK,
52 .receiver_fn = sclp_conf_receiver_fn,
53};
54
55static int __init sclp_conf_init(void)
56{
57 int rc;
58
59 INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify);
60
61 rc = sclp_register(&sclp_conf_register);
62 if (rc) {
63 printk(KERN_ERR TAG "failed to register (%d).\n", rc);
64 return rc;
65 }
66
67 if (!(sclp_conf_register.sclp_receive_mask & EVTYP_CONFMGMDATA_MASK)) {
68 printk(KERN_WARNING TAG "no configuration management.\n");
69 sclp_unregister(&sclp_conf_register);
70 rc = -ENOSYS;
71 }
72 return rc;
73}
74
75__initcall(sclp_conf_init);
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
index 65aa2c85737f..29fe2a5ec2fe 100644
--- a/drivers/s390/char/sclp_cpi.c
+++ b/drivers/s390/char/sclp_cpi.c
@@ -46,7 +46,7 @@ struct cpi_sccb {
46/* Event type structure for write message and write priority message */ 46/* Event type structure for write message and write priority message */
47static struct sclp_register sclp_cpi_event = 47static struct sclp_register sclp_cpi_event =
48{ 48{
49 .send_mask = EvTyp_CtlProgIdent_Mask 49 .send_mask = EVTYP_CTLPROGIDENT_MASK
50}; 50};
51 51
52MODULE_LICENSE("GPL"); 52MODULE_LICENSE("GPL");
@@ -201,7 +201,7 @@ cpi_module_init(void)
201 "console.\n"); 201 "console.\n");
202 return -EINVAL; 202 return -EINVAL;
203 } 203 }
204 if (!(sclp_cpi_event.sclp_send_mask & EvTyp_CtlProgIdent_Mask)) { 204 if (!(sclp_cpi_event.sclp_send_mask & EVTYP_CTLPROGIDENT_MASK)) {
205 printk(KERN_WARNING "cpi: no control program identification " 205 printk(KERN_WARNING "cpi: no control program identification "
206 "support\n"); 206 "support\n");
207 sclp_unregister(&sclp_cpi_event); 207 sclp_unregister(&sclp_cpi_event);
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index baa8fe669ed2..45ff25e787cb 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -43,7 +43,7 @@ sclp_quiesce_handler(struct evbuf_header *evbuf)
43} 43}
44 44
45static struct sclp_register sclp_quiesce_event = { 45static struct sclp_register sclp_quiesce_event = {
46 .receive_mask = EvTyp_SigQuiesce_Mask, 46 .receive_mask = EVTYP_SIGQUIESCE_MASK,
47 .receiver_fn = sclp_quiesce_handler 47 .receiver_fn = sclp_quiesce_handler
48}; 48};
49 49
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index 2486783ea58e..bbd5b8b66f42 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -30,7 +30,7 @@
30 30
31/* Event type structure for write message and write priority message */ 31/* Event type structure for write message and write priority message */
32static struct sclp_register sclp_rw_event = { 32static struct sclp_register sclp_rw_event = {
33 .send_mask = EvTyp_Msg_Mask | EvTyp_PMsgCmd_Mask 33 .send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK
34}; 34};
35 35
36/* 36/*
@@ -64,7 +64,7 @@ sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
64 memset(sccb, 0, sizeof(struct write_sccb)); 64 memset(sccb, 0, sizeof(struct write_sccb));
65 sccb->header.length = sizeof(struct write_sccb); 65 sccb->header.length = sizeof(struct write_sccb);
66 sccb->msg_buf.header.length = sizeof(struct msg_buf); 66 sccb->msg_buf.header.length = sizeof(struct msg_buf);
67 sccb->msg_buf.header.type = EvTyp_Msg; 67 sccb->msg_buf.header.type = EVTYP_MSG;
68 sccb->msg_buf.mdb.header.length = sizeof(struct mdb); 68 sccb->msg_buf.mdb.header.length = sizeof(struct mdb);
69 sccb->msg_buf.mdb.header.type = 1; 69 sccb->msg_buf.mdb.header.type = 1;
70 sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */ 70 sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */
@@ -114,7 +114,7 @@ sclp_initialize_mto(struct sclp_buffer *buffer, int max_len)
114 memset(mto, 0, sizeof(struct mto)); 114 memset(mto, 0, sizeof(struct mto));
115 mto->length = sizeof(struct mto); 115 mto->length = sizeof(struct mto);
116 mto->type = 4; /* message text object */ 116 mto->type = 4; /* message text object */
117 mto->line_type_flags = LnTpFlgs_EndText; /* end text */ 117 mto->line_type_flags = LNTPFLGS_ENDTEXT; /* end text */
118 118
119 /* set pointer to first byte after struct mto. */ 119 /* set pointer to first byte after struct mto. */
120 buffer->current_line = (char *) (mto + 1); 120 buffer->current_line = (char *) (mto + 1);
@@ -215,7 +215,7 @@ sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count)
215 case '\a': /* bell, one for several times */ 215 case '\a': /* bell, one for several times */
216 /* set SCLP sound alarm bit in General Object */ 216 /* set SCLP sound alarm bit in General Object */
217 buffer->sccb->msg_buf.mdb.go.general_msg_flags |= 217 buffer->sccb->msg_buf.mdb.go.general_msg_flags |=
218 GnrlMsgFlgs_SndAlrm; 218 GNRLMSGFLGS_SNDALRM;
219 break; 219 break;
220 case '\t': /* horizontal tabulator */ 220 case '\t': /* horizontal tabulator */
221 /* check if new mto needs to be created */ 221 /* check if new mto needs to be created */
@@ -452,12 +452,12 @@ sclp_emit_buffer(struct sclp_buffer *buffer,
452 return -EIO; 452 return -EIO;
453 453
454 sccb = buffer->sccb; 454 sccb = buffer->sccb;
455 if (sclp_rw_event.sclp_send_mask & EvTyp_Msg_Mask) 455 if (sclp_rw_event.sclp_send_mask & EVTYP_MSG_MASK)
456 /* Use normal write message */ 456 /* Use normal write message */
457 sccb->msg_buf.header.type = EvTyp_Msg; 457 sccb->msg_buf.header.type = EVTYP_MSG;
458 else if (sclp_rw_event.sclp_send_mask & EvTyp_PMsgCmd_Mask) 458 else if (sclp_rw_event.sclp_send_mask & EVTYP_PMSGCMD_MASK)
459 /* Use write priority message */ 459 /* Use write priority message */
460 sccb->msg_buf.header.type = EvTyp_PMsgCmd; 460 sccb->msg_buf.header.type = EVTYP_PMSGCMD;
461 else 461 else
462 return -ENOSYS; 462 return -ENOSYS;
463 buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA; 463 buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
new file mode 100644
index 000000000000..52283daddaef
--- /dev/null
+++ b/drivers/s390/char/sclp_sdias.c
@@ -0,0 +1,255 @@
1/*
2 * Sclp "store data in absolut storage"
3 *
4 * Copyright IBM Corp. 2003,2007
5 * Author(s): Michael Holzheu
6 */
7
8#include <linux/sched.h>
9#include <asm/sclp.h>
10#include <asm/debug.h>
11#include <asm/ipl.h>
12#include "sclp.h"
13#include "sclp_rw.h"
14
15#define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x)
16#define ERROR_MSG(x...) printk ( KERN_ALERT "SDIAS: " x )
17
18#define SDIAS_RETRIES 300
19#define SDIAS_SLEEP_TICKS 50
20
21#define EQ_STORE_DATA 0x0
22#define EQ_SIZE 0x1
23#define DI_FCP_DUMP 0x0
24#define ASA_SIZE_32 0x0
25#define ASA_SIZE_64 0x1
26#define EVSTATE_ALL_STORED 0x0
27#define EVSTATE_NO_DATA 0x3
28#define EVSTATE_PART_STORED 0x10
29
30static struct debug_info *sdias_dbf;
31
32static struct sclp_register sclp_sdias_register = {
33 .send_mask = EVTYP_SDIAS_MASK,
34};
35
36struct sdias_evbuf {
37 struct evbuf_header hdr;
38 u8 event_qual;
39 u8 data_id;
40 u64 reserved2;
41 u32 event_id;
42 u16 reserved3;
43 u8 asa_size;
44 u8 event_status;
45 u32 reserved4;
46 u32 blk_cnt;
47 u64 asa;
48 u32 reserved5;
49 u32 fbn;
50 u32 reserved6;
51 u32 lbn;
52 u16 reserved7;
53 u16 dbs;
54} __attribute__((packed));
55
56struct sdias_sccb {
57 struct sccb_header hdr;
58 struct sdias_evbuf evbuf;
59} __attribute__((packed));
60
61static struct sdias_sccb sccb __attribute__((aligned(4096)));
62
63static int sclp_req_done;
64static wait_queue_head_t sdias_wq;
65static DEFINE_MUTEX(sdias_mutex);
66
67static void sdias_callback(struct sclp_req *request, void *data)
68{
69 struct sdias_sccb *sccb;
70
71 sccb = (struct sdias_sccb *) request->sccb;
72 sclp_req_done = 1;
73 wake_up(&sdias_wq); /* Inform caller, that request is complete */
74 TRACE("callback done\n");
75}
76
77static int sdias_sclp_send(struct sclp_req *req)
78{
79 int retries;
80 int rc;
81
82 for (retries = SDIAS_RETRIES; retries; retries--) {
83 sclp_req_done = 0;
84 TRACE("add request\n");
85 rc = sclp_add_request(req);
86 if (rc) {
87 /* not initiated, wait some time and retry */
88 set_current_state(TASK_INTERRUPTIBLE);
89 TRACE("add request failed: rc = %i\n",rc);
90 schedule_timeout(SDIAS_SLEEP_TICKS);
91 continue;
92 }
93 /* initiated, wait for completion of service call */
94 wait_event(sdias_wq, (sclp_req_done == 1));
95 if (req->status == SCLP_REQ_FAILED) {
96 TRACE("sclp request failed\n");
97 rc = -EIO;
98 continue;
99 }
100 TRACE("request done\n");
101 break;
102 }
103 return rc;
104}
105
106/*
107 * Get number of blocks (4K) available in the HSA
108 */
109int sclp_sdias_blk_count(void)
110{
111 struct sclp_req request;
112 int rc;
113
114 mutex_lock(&sdias_mutex);
115
116 memset(&sccb, 0, sizeof(sccb));
117 memset(&request, 0, sizeof(request));
118
119 sccb.hdr.length = sizeof(sccb);
120 sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
121 sccb.evbuf.hdr.type = EVTYP_SDIAS;
122 sccb.evbuf.event_qual = EQ_SIZE;
123 sccb.evbuf.data_id = DI_FCP_DUMP;
124 sccb.evbuf.event_id = 4712;
125 sccb.evbuf.dbs = 1;
126
127 request.sccb = &sccb;
128 request.command = SCLP_CMDW_WRITE_EVENT_DATA;
129 request.status = SCLP_REQ_FILLED;
130 request.callback = sdias_callback;
131
132 rc = sdias_sclp_send(&request);
133 if (rc) {
134 ERROR_MSG("sclp_send failed for get_nr_blocks\n");
135 goto out;
136 }
137 if (sccb.hdr.response_code != 0x0020) {
138 TRACE("send failed: %x\n", sccb.hdr.response_code);
139 rc = -EIO;
140 goto out;
141 }
142
143 switch (sccb.evbuf.event_status) {
144 case 0:
145 rc = sccb.evbuf.blk_cnt;
146 break;
147 default:
148 ERROR_MSG("SCLP error: %x\n", sccb.evbuf.event_status);
149 rc = -EIO;
150 goto out;
151 }
152 TRACE("%i blocks\n", rc);
153out:
154 mutex_unlock(&sdias_mutex);
155 return rc;
156}
157
158/*
159 * Copy from HSA to absolute storage (not reentrant):
160 *
161 * @dest : Address of buffer where data should be copied
162 * @start_blk: Start Block (beginning with 1)
163 * @nr_blks : Number of 4K blocks to copy
164 *
165 * Return Value: 0 : Requested 'number' of blocks of data copied
166 * <0: ERROR - negative event status
167 */
168int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
169{
170 struct sclp_req request;
171 int rc;
172
173 mutex_lock(&sdias_mutex);
174
175 memset(&sccb, 0, sizeof(sccb));
176 memset(&request, 0, sizeof(request));
177
178 sccb.hdr.length = sizeof(sccb);
179 sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
180 sccb.evbuf.hdr.type = EVTYP_SDIAS;
181 sccb.evbuf.hdr.flags = 0;
182 sccb.evbuf.event_qual = EQ_STORE_DATA;
183 sccb.evbuf.data_id = DI_FCP_DUMP;
184 sccb.evbuf.event_id = 4712;
185#ifdef __s390x__
186 sccb.evbuf.asa_size = ASA_SIZE_64;
187#else
188 sccb.evbuf.asa_size = ASA_SIZE_32;
189#endif
190 sccb.evbuf.event_status = 0;
191 sccb.evbuf.blk_cnt = nr_blks;
192 sccb.evbuf.asa = (unsigned long)dest;
193 sccb.evbuf.fbn = start_blk;
194 sccb.evbuf.lbn = 0;
195 sccb.evbuf.dbs = 1;
196
197 request.sccb = &sccb;
198 request.command = SCLP_CMDW_WRITE_EVENT_DATA;
199 request.status = SCLP_REQ_FILLED;
200 request.callback = sdias_callback;
201
202 rc = sdias_sclp_send(&request);
203 if (rc) {
204 ERROR_MSG("sclp_send failed: %x\n", rc);
205 goto out;
206 }
207 if (sccb.hdr.response_code != 0x0020) {
208 TRACE("copy failed: %x\n", sccb.hdr.response_code);
209 rc = -EIO;
210 goto out;
211 }
212
213 switch (sccb.evbuf.event_status) {
214 case EVSTATE_ALL_STORED:
215 TRACE("all stored\n");
216 case EVSTATE_PART_STORED:
217 TRACE("part stored: %i\n", sccb.evbuf.blk_cnt);
218 break;
219 case EVSTATE_NO_DATA:
220 TRACE("no data\n");
221 default:
222 ERROR_MSG("Error from SCLP while copying hsa. "
223 "Event status = %x\n",
224 sccb.evbuf.event_status);
225 rc = -EIO;
226 }
227out:
228 mutex_unlock(&sdias_mutex);
229 return rc;
230}
231
232int __init sdias_init(void)
233{
234 int rc;
235
236 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
237 return 0;
238 sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long));
239 debug_register_view(sdias_dbf, &debug_sprintf_view);
240 debug_set_level(sdias_dbf, 6);
241 rc = sclp_register(&sclp_sdias_register);
242 if (rc) {
243 ERROR_MSG("sclp register failed\n");
244 return rc;
245 }
246 init_waitqueue_head(&sdias_wq);
247 TRACE("init done\n");
248 return 0;
249}
250
251void __exit sdias_exit(void)
252{
253 debug_unregister(sdias_dbf);
254 sclp_unregister(&sclp_sdias_register);
255}
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 076816b9d528..e3b3d390b4a3 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -648,7 +648,7 @@ sclp_eval_textcmd(struct gds_subvector *start,
648 subvec = start; 648 subvec = start;
649 while (subvec < end) { 649 while (subvec < end) {
650 subvec = find_gds_subvector(subvec, end, 650 subvec = find_gds_subvector(subvec, end,
651 GDS_KEY_SelfDefTextMsg); 651 GDS_KEY_SELFDEFTEXTMSG);
652 if (!subvec) 652 if (!subvec)
653 break; 653 break;
654 sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1), 654 sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1),
@@ -664,7 +664,7 @@ sclp_eval_cpmsu(struct gds_vector *start, struct gds_vector *end)
664 664
665 vec = start; 665 vec = start;
666 while (vec < end) { 666 while (vec < end) {
667 vec = find_gds_vector(vec, end, GDS_ID_TextCmd); 667 vec = find_gds_vector(vec, end, GDS_ID_TEXTCMD);
668 if (!vec) 668 if (!vec)
669 break; 669 break;
670 sclp_eval_textcmd((struct gds_subvector *)(vec + 1), 670 sclp_eval_textcmd((struct gds_subvector *)(vec + 1),
@@ -703,7 +703,7 @@ sclp_tty_state_change(struct sclp_register *reg)
703 703
704static struct sclp_register sclp_input_event = 704static struct sclp_register sclp_input_event =
705{ 705{
706 .receive_mask = EvTyp_OpCmd_Mask | EvTyp_PMsgCmd_Mask, 706 .receive_mask = EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK,
707 .state_change_fn = sclp_tty_state_change, 707 .state_change_fn = sclp_tty_state_change,
708 .receiver_fn = sclp_tty_receiver 708 .receiver_fn = sclp_tty_receiver
709}; 709};
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index f77dc33b5f8d..726334757bbf 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -99,8 +99,8 @@ static void sclp_vt220_emit_current(void);
99 99
100/* Registration structure for our interest in SCLP event buffers */ 100/* Registration structure for our interest in SCLP event buffers */
101static struct sclp_register sclp_vt220_register = { 101static struct sclp_register sclp_vt220_register = {
102 .send_mask = EvTyp_VT220Msg_Mask, 102 .send_mask = EVTYP_VT220MSG_MASK,
103 .receive_mask = EvTyp_VT220Msg_Mask, 103 .receive_mask = EVTYP_VT220MSG_MASK,
104 .state_change_fn = NULL, 104 .state_change_fn = NULL,
105 .receiver_fn = sclp_vt220_receiver_fn 105 .receiver_fn = sclp_vt220_receiver_fn
106}; 106};
@@ -202,7 +202,7 @@ sclp_vt220_callback(struct sclp_req *request, void *data)
202static int 202static int
203__sclp_vt220_emit(struct sclp_vt220_request *request) 203__sclp_vt220_emit(struct sclp_vt220_request *request)
204{ 204{
205 if (!(sclp_vt220_register.sclp_send_mask & EvTyp_VT220Msg_Mask)) { 205 if (!(sclp_vt220_register.sclp_send_mask & EVTYP_VT220MSG_MASK)) {
206 request->sclp_req.status = SCLP_REQ_FAILED; 206 request->sclp_req.status = SCLP_REQ_FAILED;
207 return -EIO; 207 return -EIO;
208 } 208 }
@@ -284,7 +284,7 @@ sclp_vt220_initialize_page(void *page)
284 sccb->header.length = sizeof(struct sclp_vt220_sccb); 284 sccb->header.length = sizeof(struct sclp_vt220_sccb);
285 sccb->header.function_code = SCLP_NORMAL_WRITE; 285 sccb->header.function_code = SCLP_NORMAL_WRITE;
286 sccb->header.response_code = 0x0000; 286 sccb->header.response_code = 0x0000;
287 sccb->evbuf.type = EvTyp_VT220Msg; 287 sccb->evbuf.type = EVTYP_VT220MSG;
288 sccb->evbuf.length = sizeof(struct evbuf_header); 288 sccb->evbuf.length = sizeof(struct evbuf_header);
289 289
290 return request; 290 return request;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index b87d3b019936..a5a00e9ae4d0 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -125,7 +125,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
125 .recording_name = "EREP", 125 .recording_name = "EREP",
126 .minor_num = 0, 126 .minor_num = 0,
127 .buffer_free = 1, 127 .buffer_free = 1,
128 .priv_lock = SPIN_LOCK_UNLOCKED, 128 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
129 .autorecording = 1, 129 .autorecording = 1,
130 .autopurge = 1, 130 .autopurge = 1,
131 }, 131 },
@@ -134,7 +134,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
134 .recording_name = "ACCOUNT", 134 .recording_name = "ACCOUNT",
135 .minor_num = 1, 135 .minor_num = 1,
136 .buffer_free = 1, 136 .buffer_free = 1,
137 .priv_lock = SPIN_LOCK_UNLOCKED, 137 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
138 .autorecording = 1, 138 .autorecording = 1,
139 .autopurge = 1, 139 .autopurge = 1,
140 }, 140 },
@@ -143,7 +143,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
143 .recording_name = "SYMPTOM", 143 .recording_name = "SYMPTOM",
144 .minor_num = 2, 144 .minor_num = 2,
145 .buffer_free = 1, 145 .buffer_free = 1,
146 .priv_lock = SPIN_LOCK_UNLOCKED, 146 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
147 .autorecording = 1, 147 .autorecording = 1,
148 .autopurge = 1, 148 .autopurge = 1,
149 } 149 }
@@ -385,6 +385,9 @@ static int vmlogrdr_release (struct inode *inode, struct file *filp)
385 385
386 struct vmlogrdr_priv_t * logptr = filp->private_data; 386 struct vmlogrdr_priv_t * logptr = filp->private_data;
387 387
388 iucv_path_sever(logptr->path, NULL);
389 kfree(logptr->path);
390 logptr->path = NULL;
388 if (logptr->autorecording) { 391 if (logptr->autorecording) {
389 ret = vmlogrdr_recording(logptr,0,logptr->autopurge); 392 ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
390 if (ret) 393 if (ret)
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
new file mode 100644
index 000000000000..89d439316a53
--- /dev/null
+++ b/drivers/s390/char/zcore.c
@@ -0,0 +1,651 @@
1/*
2 * zcore module to export memory content and register sets for creating system
3 * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
4 * dump format as s390 standalone dumps.
5 *
6 * For more information please refer to Documentation/s390/zfcpdump.txt
7 *
8 * Copyright IBM Corp. 2003,2007
9 * Author(s): Michael Holzheu
10 */
11
12#include <linux/init.h>
13#include <linux/miscdevice.h>
14#include <linux/utsname.h>
15#include <linux/debugfs.h>
16#include <asm/ipl.h>
17#include <asm/sclp.h>
18#include <asm/setup.h>
19#include <asm/sigp.h>
20#include <asm/uaccess.h>
21#include <asm/debug.h>
22#include <asm/processor.h>
23#include <asm/irqflags.h>
24
25#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
26#define MSG(x...) printk( KERN_ALERT x )
27#define ERROR_MSG(x...) printk ( KERN_ALERT "DUMP: " x )
28
29#define TO_USER 0
30#define TO_KERNEL 1
31
32enum arch_id {
33 ARCH_S390 = 0,
34 ARCH_S390X = 1,
35};
36
37/* dump system info */
38
39struct sys_info {
40 enum arch_id arch;
41 unsigned long sa_base;
42 u32 sa_size;
43 int cpu_map[NR_CPUS];
44 unsigned long mem_size;
45 union save_area lc_mask;
46};
47
48static struct sys_info sys_info;
49static struct debug_info *zcore_dbf;
50static int hsa_available;
51static struct dentry *zcore_dir;
52static struct dentry *zcore_file;
53
54/*
55 * Copy memory from HSA to kernel or user memory (not reentrant):
56 *
57 * @dest: Kernel or user buffer where memory should be copied to
58 * @src: Start address within HSA where data should be copied
59 * @count: Size of buffer, which should be copied
60 * @mode: Either TO_KERNEL or TO_USER
61 */
62static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
63{
64 int offs, blk_num;
65 static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
66
67 if (count == 0)
68 return 0;
69
70 /* copy first block */
71 offs = 0;
72 if ((src % PAGE_SIZE) != 0) {
73 blk_num = src / PAGE_SIZE + 2;
74 if (sclp_sdias_copy(buf, blk_num, 1)) {
75 TRACE("sclp_sdias_copy() failed\n");
76 return -EIO;
77 }
78 offs = min((PAGE_SIZE - (src % PAGE_SIZE)), count);
79 if (mode == TO_USER) {
80 if (copy_to_user((__force __user void*) dest,
81 buf + (src % PAGE_SIZE), offs))
82 return -EFAULT;
83 } else
84 memcpy(dest, buf + (src % PAGE_SIZE), offs);
85 }
86 if (offs == count)
87 goto out;
88
89 /* copy middle */
90 for (; (offs + PAGE_SIZE) <= count; offs += PAGE_SIZE) {
91 blk_num = (src + offs) / PAGE_SIZE + 2;
92 if (sclp_sdias_copy(buf, blk_num, 1)) {
93 TRACE("sclp_sdias_copy() failed\n");
94 return -EIO;
95 }
96 if (mode == TO_USER) {
97 if (copy_to_user((__force __user void*) dest + offs,
98 buf, PAGE_SIZE))
99 return -EFAULT;
100 } else
101 memcpy(dest + offs, buf, PAGE_SIZE);
102 }
103 if (offs == count)
104 goto out;
105
106 /* copy last block */
107 blk_num = (src + offs) / PAGE_SIZE + 2;
108 if (sclp_sdias_copy(buf, blk_num, 1)) {
109 TRACE("sclp_sdias_copy() failed\n");
110 return -EIO;
111 }
112 if (mode == TO_USER) {
113 if (copy_to_user((__force __user void*) dest + offs, buf,
114 PAGE_SIZE))
115 return -EFAULT;
116 } else
117 memcpy(dest + offs, buf, count - offs);
118out:
119 return 0;
120}
121
122static int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
123{
124 return memcpy_hsa((void __force *) dest, src, count, TO_USER);
125}
126
127static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
128{
129 return memcpy_hsa(dest, src, count, TO_KERNEL);
130}
131
132static int memcpy_real(void *dest, unsigned long src, size_t count)
133{
134 unsigned long flags;
135 int rc = -EFAULT;
136 register unsigned long _dest asm("2") = (unsigned long) dest;
137 register unsigned long _len1 asm("3") = (unsigned long) count;
138 register unsigned long _src asm("4") = src;
139 register unsigned long _len2 asm("5") = (unsigned long) count;
140
141 if (count == 0)
142 return 0;
143 flags = __raw_local_irq_stnsm(0xf8); /* switch to real mode */
144 asm volatile (
145 "0: mvcle %1,%2,0x0\n"
146 "1: jo 0b\n"
147 " lhi %0,0x0\n"
148 "2:\n"
149 EX_TABLE(1b,2b)
150 : "+d" (rc)
151 : "d" (_dest), "d" (_src), "d" (_len1), "d" (_len2)
152 : "cc", "memory");
153 __raw_local_irq_ssm(flags);
154
155 return rc;
156}
157
158static int memcpy_real_user(__user void *dest, unsigned long src, size_t count)
159{
160 static char buf[4096];
161 int offs = 0, size;
162
163 while (offs < count) {
164 size = min(sizeof(buf), count - offs);
165 if (memcpy_real(buf, src + offs, size))
166 return -EFAULT;
167 if (copy_to_user(dest + offs, buf, size))
168 return -EFAULT;
169 offs += size;
170 }
171 return 0;
172}
173
174#ifdef __s390x__
175/*
176 * Convert s390x (64 bit) cpu info to s390 (32 bit) cpu info
177 */
178static void __init s390x_to_s390_regs(union save_area *out, union save_area *in,
179 int cpu)
180{
181 int i;
182
183 for (i = 0; i < 16; i++) {
184 out->s390.gp_regs[i] = in->s390x.gp_regs[i] & 0x00000000ffffffff;
185 out->s390.acc_regs[i] = in->s390x.acc_regs[i];
186 out->s390.ctrl_regs[i] =
187 in->s390x.ctrl_regs[i] & 0x00000000ffffffff;
188 }
189 /* locore for 31 bit has only space for fpregs 0,2,4,6 */
190 out->s390.fp_regs[0] = in->s390x.fp_regs[0];
191 out->s390.fp_regs[1] = in->s390x.fp_regs[2];
192 out->s390.fp_regs[2] = in->s390x.fp_regs[4];
193 out->s390.fp_regs[3] = in->s390x.fp_regs[6];
194 memcpy(&(out->s390.psw[0]), &(in->s390x.psw[0]), 4);
195 out->s390.psw[1] |= 0x8; /* set bit 12 */
196 memcpy(&(out->s390.psw[4]),&(in->s390x.psw[12]), 4);
197 out->s390.psw[4] |= 0x80; /* set (31bit) addressing bit */
198 out->s390.pref_reg = in->s390x.pref_reg;
199 out->s390.timer = in->s390x.timer;
200 out->s390.clk_cmp = in->s390x.clk_cmp;
201}
202
203static void __init s390x_to_s390_save_areas(void)
204{
205 int i = 1;
206 static union save_area tmp;
207
208 while (zfcpdump_save_areas[i]) {
209 s390x_to_s390_regs(&tmp, zfcpdump_save_areas[i], i);
210 memcpy(zfcpdump_save_areas[i], &tmp, sizeof(tmp));
211 i++;
212 }
213}
214
215#endif /* __s390x__ */
216
217static int __init init_cpu_info(enum arch_id arch)
218{
219 union save_area *sa;
220
221 /* get info for boot cpu from lowcore, stored in the HSA */
222
223 sa = kmalloc(sizeof(*sa), GFP_KERNEL);
224 if (!sa) {
225 ERROR_MSG("kmalloc failed: %s: %i\n",__FUNCTION__, __LINE__);
226 return -ENOMEM;
227 }
228 if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
229 ERROR_MSG("could not copy from HSA\n");
230 kfree(sa);
231 return -EIO;
232 }
233 zfcpdump_save_areas[0] = sa;
234
235#ifdef __s390x__
236 /* convert s390x regs to s390, if we are dumping an s390 Linux */
237
238 if (arch == ARCH_S390)
239 s390x_to_s390_save_areas();
240#endif
241
242 return 0;
243}
244
245static DEFINE_MUTEX(zcore_mutex);
246
247#define DUMP_VERSION 0x3
248#define DUMP_MAGIC 0xa8190173618f23fdULL
249#define DUMP_ARCH_S390X 2
250#define DUMP_ARCH_S390 1
251#define HEADER_SIZE 4096
252
253/* dump header dumped according to s390 crash dump format */
254
255struct zcore_header {
256 u64 magic;
257 u32 version;
258 u32 header_size;
259 u32 dump_level;
260 u32 page_size;
261 u64 mem_size;
262 u64 mem_start;
263 u64 mem_end;
264 u32 num_pages;
265 u32 pad1;
266 u64 tod;
267 cpuid_t cpu_id;
268 u32 arch_id;
269 u32 build_arch;
270 char pad2[4016];
271} __attribute__((packed,__aligned__(16)));
272
273static struct zcore_header zcore_header = {
274 .magic = DUMP_MAGIC,
275 .version = DUMP_VERSION,
276 .header_size = 4096,
277 .dump_level = 0,
278 .page_size = PAGE_SIZE,
279 .mem_start = 0,
280#ifdef __s390x__
281 .build_arch = DUMP_ARCH_S390X,
282#else
283 .build_arch = DUMP_ARCH_S390,
284#endif
285};
286
287/*
288 * Copy lowcore info to buffer. Use map in order to copy only register parts.
289 *
290 * @buf: User buffer
291 * @sa: Pointer to save area
292 * @sa_off: Offset in save area to copy
293 * @len: Number of bytes to copy
294 */
295static int copy_lc(void __user *buf, void *sa, int sa_off, int len)
296{
297 int i;
298 char *lc_mask = (char*)&sys_info.lc_mask;
299
300 for (i = 0; i < len; i++) {
301 if (!lc_mask[i + sa_off])
302 continue;
303 if (copy_to_user(buf + i, sa + sa_off + i, 1))
304 return -EFAULT;
305 }
306 return 0;
307}
308
309/*
310 * Copy lowcores info to memory, if necessary
311 *
312 * @buf: User buffer
313 * @addr: Start address of buffer in dump memory
314 * @count: Size of buffer
315 */
316static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
317{
318 unsigned long end;
319 int i = 0;
320
321 if (count == 0)
322 return 0;
323
324 end = start + count;
325 while (zfcpdump_save_areas[i]) {
326 unsigned long cp_start, cp_end; /* copy range */
327 unsigned long sa_start, sa_end; /* save area range */
328 unsigned long prefix;
329 unsigned long sa_off, len, buf_off;
330
331 if (sys_info.arch == ARCH_S390)
332 prefix = zfcpdump_save_areas[i]->s390.pref_reg;
333 else
334 prefix = zfcpdump_save_areas[i]->s390x.pref_reg;
335
336 sa_start = prefix + sys_info.sa_base;
337 sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
338
339 if ((end < sa_start) || (start > sa_end))
340 goto next;
341 cp_start = max(start, sa_start);
342 cp_end = min(end, sa_end);
343
344 buf_off = cp_start - start;
345 sa_off = cp_start - sa_start;
346 len = cp_end - cp_start;
347
348 TRACE("copy_lc for: %lx\n", start);
349 if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len))
350 return -EFAULT;
351next:
352 i++;
353 }
354 return 0;
355}
356
357/*
358 * Read routine for zcore character device
359 * First 4K are dump header
360 * Next 32MB are HSA Memory
361 * Rest is read from absolute Memory
362 */
363static ssize_t zcore_read(struct file *file, char __user *buf, size_t count,
364 loff_t *ppos)
365{
366 unsigned long mem_start; /* Start address in memory */
367 size_t mem_offs; /* Offset in dump memory */
368 size_t hdr_count; /* Size of header part of output buffer */
369 size_t size;
370 int rc;
371
372 mutex_lock(&zcore_mutex);
373
374 if (*ppos > (sys_info.mem_size + HEADER_SIZE)) {
375 rc = -EINVAL;
376 goto fail;
377 }
378
379 count = min(count, (size_t) (sys_info.mem_size + HEADER_SIZE - *ppos));
380
381 /* Copy dump header */
382 if (*ppos < HEADER_SIZE) {
383 size = min(count, (size_t) (HEADER_SIZE - *ppos));
384 if (copy_to_user(buf, &zcore_header + *ppos, size)) {
385 rc = -EFAULT;
386 goto fail;
387 }
388 hdr_count = size;
389 mem_start = 0;
390 } else {
391 hdr_count = 0;
392 mem_start = *ppos - HEADER_SIZE;
393 }
394
395 mem_offs = 0;
396
397 /* Copy from HSA data */
398 if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) {
399 size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE
400 - mem_start));
401 rc = memcpy_hsa_user(buf + hdr_count, mem_start, size);
402 if (rc)
403 goto fail;
404
405 mem_offs += size;
406 }
407
408 /* Copy from real mem */
409 size = count - mem_offs - hdr_count;
410 rc = memcpy_real_user(buf + hdr_count + mem_offs, mem_start + mem_offs,
411 size);
412 if (rc)
413 goto fail;
414
415 /*
416 * Since s390 dump analysis tools like lcrash or crash
417 * expect register sets in the prefix pages of the cpus,
418 * we copy them into the read buffer, if necessary.
419 * buf + hdr_count: Start of memory part of output buffer
420 * mem_start: Start memory address to copy from
421 * count - hdr_count: Size of memory area to copy
422 */
423 if (zcore_add_lc(buf + hdr_count, mem_start, count - hdr_count)) {
424 rc = -EFAULT;
425 goto fail;
426 }
427 *ppos += count;
428fail:
429 mutex_unlock(&zcore_mutex);
430 return (rc < 0) ? rc : count;
431}
432
433static int zcore_open(struct inode *inode, struct file *filp)
434{
435 if (!hsa_available)
436 return -ENODATA;
437 else
438 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
439}
440
441static int zcore_release(struct inode *inode, struct file *filep)
442{
443 diag308(DIAG308_REL_HSA, NULL);
444 hsa_available = 0;
445 return 0;
446}
447
448static loff_t zcore_lseek(struct file *file, loff_t offset, int orig)
449{
450 loff_t rc;
451
452 mutex_lock(&zcore_mutex);
453 switch (orig) {
454 case 0:
455 file->f_pos = offset;
456 rc = file->f_pos;
457 break;
458 case 1:
459 file->f_pos += offset;
460 rc = file->f_pos;
461 break;
462 default:
463 rc = -EINVAL;
464 }
465 mutex_unlock(&zcore_mutex);
466 return rc;
467}
468
469static struct file_operations zcore_fops = {
470 .owner = THIS_MODULE,
471 .llseek = zcore_lseek,
472 .read = zcore_read,
473 .open = zcore_open,
474 .release = zcore_release,
475};
476
477
478static void __init set_s390_lc_mask(union save_area *map)
479{
480 memset(&map->s390.ext_save, 0xff, sizeof(map->s390.ext_save));
481 memset(&map->s390.timer, 0xff, sizeof(map->s390.timer));
482 memset(&map->s390.clk_cmp, 0xff, sizeof(map->s390.clk_cmp));
483 memset(&map->s390.psw, 0xff, sizeof(map->s390.psw));
484 memset(&map->s390.pref_reg, 0xff, sizeof(map->s390.pref_reg));
485 memset(&map->s390.acc_regs, 0xff, sizeof(map->s390.acc_regs));
486 memset(&map->s390.fp_regs, 0xff, sizeof(map->s390.fp_regs));
487 memset(&map->s390.gp_regs, 0xff, sizeof(map->s390.gp_regs));
488 memset(&map->s390.ctrl_regs, 0xff, sizeof(map->s390.ctrl_regs));
489}
490
491static void __init set_s390x_lc_mask(union save_area *map)
492{
493 memset(&map->s390x.fp_regs, 0xff, sizeof(map->s390x.fp_regs));
494 memset(&map->s390x.gp_regs, 0xff, sizeof(map->s390x.gp_regs));
495 memset(&map->s390x.psw, 0xff, sizeof(map->s390x.psw));
496 memset(&map->s390x.pref_reg, 0xff, sizeof(map->s390x.pref_reg));
497 memset(&map->s390x.fp_ctrl_reg, 0xff, sizeof(map->s390x.fp_ctrl_reg));
498 memset(&map->s390x.tod_reg, 0xff, sizeof(map->s390x.tod_reg));
499 memset(&map->s390x.timer, 0xff, sizeof(map->s390x.timer));
500 memset(&map->s390x.clk_cmp, 0xff, sizeof(map->s390x.clk_cmp));
501 memset(&map->s390x.acc_regs, 0xff, sizeof(map->s390x.acc_regs));
502 memset(&map->s390x.ctrl_regs, 0xff, sizeof(map->s390x.ctrl_regs));
503}
504
505/*
506 * Initialize dump globals for a given architecture
507 */
508static int __init sys_info_init(enum arch_id arch)
509{
510 switch (arch) {
511 case ARCH_S390X:
512 MSG("DETECTED 'S390X (64 bit) OS'\n");
513 sys_info.sa_base = SAVE_AREA_BASE_S390X;
514 sys_info.sa_size = sizeof(struct save_area_s390x);
515 set_s390x_lc_mask(&sys_info.lc_mask);
516 break;
517 case ARCH_S390:
518 MSG("DETECTED 'S390 (32 bit) OS'\n");
519 sys_info.sa_base = SAVE_AREA_BASE_S390;
520 sys_info.sa_size = sizeof(struct save_area_s390);
521 set_s390_lc_mask(&sys_info.lc_mask);
522 break;
523 default:
524 ERROR_MSG("unknown architecture 0x%x.\n",arch);
525 return -EINVAL;
526 }
527 sys_info.arch = arch;
528 if (init_cpu_info(arch)) {
529 ERROR_MSG("get cpu info failed\n");
530 return -ENOMEM;
531 }
532 sys_info.mem_size = real_memory_size;
533
534 return 0;
535}
536
537static int __init check_sdias(void)
538{
539 int rc, act_hsa_size;
540
541 rc = sclp_sdias_blk_count();
542 if (rc < 0) {
543 ERROR_MSG("Could not determine HSA size\n");
544 return rc;
545 }
546 act_hsa_size = (rc - 1) * PAGE_SIZE;
547 if (act_hsa_size < ZFCPDUMP_HSA_SIZE) {
548 ERROR_MSG("HSA size too small: %i\n", act_hsa_size);
549 return -EINVAL;
550 }
551 return 0;
552}
553
554static void __init zcore_header_init(int arch, struct zcore_header *hdr)
555{
556 if (arch == ARCH_S390X)
557 hdr->arch_id = DUMP_ARCH_S390X;
558 else
559 hdr->arch_id = DUMP_ARCH_S390;
560 hdr->mem_size = sys_info.mem_size;
561 hdr->mem_end = sys_info.mem_size;
562 hdr->num_pages = sys_info.mem_size / PAGE_SIZE;
563 hdr->tod = get_clock();
564 get_cpu_id(&hdr->cpu_id);
565}
566
567extern int sdias_init(void);
568
569static int __init zcore_init(void)
570{
571 unsigned char arch;
572 int rc;
573
574 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
575 return -ENODATA;
576
577 zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
578 debug_register_view(zcore_dbf, &debug_sprintf_view);
579 debug_set_level(zcore_dbf, 6);
580
581 TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno);
582 TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
583 TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
584
585 rc = sdias_init();
586 if (rc)
587 goto fail;
588
589 rc = check_sdias();
590 if (rc) {
591 ERROR_MSG("Dump initialization failed\n");
592 goto fail;
593 }
594
595 rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
596 if (rc) {
597 ERROR_MSG("sdial memcpy for arch id failed\n");
598 goto fail;
599 }
600
601#ifndef __s390x__
602 if (arch == ARCH_S390X) {
603 ERROR_MSG("32 bit dumper can't dump 64 bit system!\n");
604 rc = -EINVAL;
605 goto fail;
606 }
607#endif
608
609 rc = sys_info_init(arch);
610 if (rc) {
611 ERROR_MSG("arch init failed\n");
612 goto fail;
613 }
614
615 zcore_header_init(arch, &zcore_header);
616
617 zcore_dir = debugfs_create_dir("zcore" , NULL);
618 if (!zcore_dir) {
619 rc = -ENOMEM;
620 goto fail;
621 }
622 zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL,
623 &zcore_fops);
624 if (!zcore_file) {
625 debugfs_remove(zcore_dir);
626 rc = -ENOMEM;
627 goto fail;
628 }
629 hsa_available = 1;
630 return 0;
631
632fail:
633 diag308(DIAG308_REL_HSA, NULL);
634 return rc;
635}
636
637extern void sdias_exit(void);
638
639static void __exit zcore_exit(void)
640{
641 debug_unregister(zcore_dbf);
642 sdias_exit();
643 diag308(DIAG308_REL_HSA, NULL);
644}
645
646MODULE_AUTHOR("Copyright IBM Corp. 2003,2007");
647MODULE_DESCRIPTION("zcore module for zfcpdump support");
648MODULE_LICENSE("GPL");
649
650subsys_initcall(zcore_init);
651module_exit(zcore_exit);
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index c490c2a1c2fc..cfaf77b320f5 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the S/390 common i/o drivers 2# Makefile for the S/390 common i/o drivers
3# 3#
4 4
5obj-y += airq.o blacklist.o chsc.o cio.o css.o 5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o
6ccw_device-objs += device.o device_fsm.o device_ops.o 6ccw_device-objs += device.o device_fsm.o device_ops.o
7ccw_device-objs += device_id.o device_pgid.o device_status.o 7ccw_device-objs += device_id.o device_pgid.o device_status.o
8obj-y += ccw_device.o cmf.o 8obj-y += ccw_device.o cmf.o
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 5aeb68e732b0..e5ccda63e883 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -75,8 +75,10 @@ static void ccwgroup_ungroup_callback(struct device *dev)
75{ 75{
76 struct ccwgroup_device *gdev = to_ccwgroupdev(dev); 76 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
77 77
78 mutex_lock(&gdev->reg_mutex);
78 __ccwgroup_remove_symlinks(gdev); 79 __ccwgroup_remove_symlinks(gdev);
79 device_unregister(dev); 80 device_unregister(dev);
81 mutex_unlock(&gdev->reg_mutex);
80} 82}
81 83
82static ssize_t 84static ssize_t
@@ -173,7 +175,8 @@ ccwgroup_create(struct device *root,
173 return -ENOMEM; 175 return -ENOMEM;
174 176
175 atomic_set(&gdev->onoff, 0); 177 atomic_set(&gdev->onoff, 0);
176 178 mutex_init(&gdev->reg_mutex);
179 mutex_lock(&gdev->reg_mutex);
177 for (i = 0; i < argc; i++) { 180 for (i = 0; i < argc; i++) {
178 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); 181 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]);
179 182
@@ -183,12 +186,12 @@ ccwgroup_create(struct device *root,
183 || gdev->cdev[i]->id.driver_info != 186 || gdev->cdev[i]->id.driver_info !=
184 gdev->cdev[0]->id.driver_info) { 187 gdev->cdev[0]->id.driver_info) {
185 rc = -EINVAL; 188 rc = -EINVAL;
186 goto free_dev; 189 goto error;
187 } 190 }
188 /* Don't allow a device to belong to more than one group. */ 191 /* Don't allow a device to belong to more than one group. */
189 if (gdev->cdev[i]->dev.driver_data) { 192 if (gdev->cdev[i]->dev.driver_data) {
190 rc = -EINVAL; 193 rc = -EINVAL;
191 goto free_dev; 194 goto error;
192 } 195 }
193 gdev->cdev[i]->dev.driver_data = gdev; 196 gdev->cdev[i]->dev.driver_data = gdev;
194 } 197 }
@@ -203,9 +206,8 @@ ccwgroup_create(struct device *root,
203 gdev->cdev[0]->dev.bus_id); 206 gdev->cdev[0]->dev.bus_id);
204 207
205 rc = device_register(&gdev->dev); 208 rc = device_register(&gdev->dev);
206
207 if (rc) 209 if (rc)
208 goto free_dev; 210 goto error;
209 get_device(&gdev->dev); 211 get_device(&gdev->dev);
210 rc = device_create_file(&gdev->dev, &dev_attr_ungroup); 212 rc = device_create_file(&gdev->dev, &dev_attr_ungroup);
211 213
@@ -216,6 +218,7 @@ ccwgroup_create(struct device *root,
216 218
217 rc = __ccwgroup_create_symlinks(gdev); 219 rc = __ccwgroup_create_symlinks(gdev);
218 if (!rc) { 220 if (!rc) {
221 mutex_unlock(&gdev->reg_mutex);
219 put_device(&gdev->dev); 222 put_device(&gdev->dev);
220 return 0; 223 return 0;
221 } 224 }
@@ -224,19 +227,12 @@ ccwgroup_create(struct device *root,
224error: 227error:
225 for (i = 0; i < argc; i++) 228 for (i = 0; i < argc; i++)
226 if (gdev->cdev[i]) { 229 if (gdev->cdev[i]) {
227 put_device(&gdev->cdev[i]->dev);
228 gdev->cdev[i]->dev.driver_data = NULL;
229 }
230 put_device(&gdev->dev);
231 return rc;
232free_dev:
233 for (i = 0; i < argc; i++)
234 if (gdev->cdev[i]) {
235 if (gdev->cdev[i]->dev.driver_data == gdev) 230 if (gdev->cdev[i]->dev.driver_data == gdev)
236 gdev->cdev[i]->dev.driver_data = NULL; 231 gdev->cdev[i]->dev.driver_data = NULL;
237 put_device(&gdev->cdev[i]->dev); 232 put_device(&gdev->cdev[i]->dev);
238 } 233 }
239 kfree(gdev); 234 mutex_unlock(&gdev->reg_mutex);
235 put_device(&gdev->dev);
240 return rc; 236 return rc;
241} 237}
242 238
@@ -422,8 +418,12 @@ ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver)
422 get_driver(&cdriver->driver); 418 get_driver(&cdriver->driver);
423 while ((dev = driver_find_device(&cdriver->driver, NULL, NULL, 419 while ((dev = driver_find_device(&cdriver->driver, NULL, NULL,
424 __ccwgroup_match_all))) { 420 __ccwgroup_match_all))) {
425 __ccwgroup_remove_symlinks(to_ccwgroupdev(dev)); 421 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
422
423 mutex_lock(&gdev->reg_mutex);
424 __ccwgroup_remove_symlinks(gdev);
426 device_unregister(dev); 425 device_unregister(dev);
426 mutex_unlock(&gdev->reg_mutex);
427 put_device(dev); 427 put_device(dev);
428 } 428 }
429 put_driver(&cdriver->driver); 429 put_driver(&cdriver->driver);
@@ -444,8 +444,10 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
444 if (cdev->dev.driver_data) { 444 if (cdev->dev.driver_data) {
445 gdev = (struct ccwgroup_device *)cdev->dev.driver_data; 445 gdev = (struct ccwgroup_device *)cdev->dev.driver_data;
446 if (get_device(&gdev->dev)) { 446 if (get_device(&gdev->dev)) {
447 mutex_lock(&gdev->reg_mutex);
447 if (device_is_registered(&gdev->dev)) 448 if (device_is_registered(&gdev->dev))
448 return gdev; 449 return gdev;
450 mutex_unlock(&gdev->reg_mutex);
449 put_device(&gdev->dev); 451 put_device(&gdev->dev);
450 } 452 }
451 return NULL; 453 return NULL;
@@ -465,6 +467,7 @@ ccwgroup_remove_ccwdev(struct ccw_device *cdev)
465 if (gdev) { 467 if (gdev) {
466 __ccwgroup_remove_symlinks(gdev); 468 __ccwgroup_remove_symlinks(gdev);
467 device_unregister(&gdev->dev); 469 device_unregister(&gdev->dev);
470 mutex_unlock(&gdev->reg_mutex);
468 put_device(&gdev->dev); 471 put_device(&gdev->dev);
469 } 472 }
470} 473}
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
new file mode 100644
index 000000000000..ac289e6eadfe
--- /dev/null
+++ b/drivers/s390/cio/chp.c
@@ -0,0 +1,683 @@
1/*
2 * drivers/s390/cio/chp.c
3 *
4 * Copyright IBM Corp. 1999,2007
5 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
6 * Arnd Bergmann (arndb@de.ibm.com)
7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 */
9
10#include <linux/bug.h>
11#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/init.h>
14#include <linux/jiffies.h>
15#include <linux/wait.h>
16#include <linux/mutex.h>
17#include <asm/errno.h>
18#include <asm/chpid.h>
19#include <asm/sclp.h>
20
21#include "cio.h"
22#include "css.h"
23#include "ioasm.h"
24#include "cio_debug.h"
25#include "chp.h"
26
27#define to_channelpath(device) container_of(device, struct channel_path, dev)
28#define CHP_INFO_UPDATE_INTERVAL 1*HZ
29
30enum cfg_task_t {
31 cfg_none,
32 cfg_configure,
33 cfg_deconfigure
34};
35
36/* Map for pending configure tasks. */
37static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1];
38static DEFINE_MUTEX(cfg_lock);
39static int cfg_busy;
40
41/* Map for channel-path status. */
42static struct sclp_chp_info chp_info;
43static DEFINE_MUTEX(info_lock);
44
45/* Time after which channel-path status may be outdated. */
46static unsigned long chp_info_expires;
47
48/* Workqueue to perform pending configure tasks. */
49static struct workqueue_struct *chp_wq;
50static struct work_struct cfg_work;
51
52/* Wait queue for configure completion events. */
53static wait_queue_head_t cfg_wait_queue;
54
55/* Return channel_path struct for given chpid. */
56static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
57{
58 return css[chpid.cssid]->chps[chpid.id];
59}
60
61/* Set vary state for given chpid. */
62static void set_chp_logically_online(struct chp_id chpid, int onoff)
63{
64 chpid_to_chp(chpid)->state = onoff;
65}
66
67/* On succes return 0 if channel-path is varied offline, 1 if it is varied
68 * online. Return -ENODEV if channel-path is not registered. */
69int chp_get_status(struct chp_id chpid)
70{
71 return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV);
72}
73
74/**
75 * chp_get_sch_opm - return opm for subchannel
76 * @sch: subchannel
77 *
78 * Calculate and return the operational path mask (opm) based on the chpids
79 * used by the subchannel and the status of the associated channel-paths.
80 */
81u8 chp_get_sch_opm(struct subchannel *sch)
82{
83 struct chp_id chpid;
84 int opm;
85 int i;
86
87 opm = 0;
88 chp_id_init(&chpid);
89 for (i=0; i < 8; i++) {
90 opm <<= 1;
91 chpid.id = sch->schib.pmcw.chpid[i];
92 if (chp_get_status(chpid) != 0)
93 opm |= 1;
94 }
95 return opm;
96}
97
98/**
99 * chp_is_registered - check if a channel-path is registered
100 * @chpid: channel-path ID
101 *
102 * Return non-zero if a channel-path with the given chpid is registered,
103 * zero otherwise.
104 */
105int chp_is_registered(struct chp_id chpid)
106{
107 return chpid_to_chp(chpid) != NULL;
108}
109
110/*
111 * Function: s390_vary_chpid
112 * Varies the specified chpid online or offline
113 */
114static int s390_vary_chpid(struct chp_id chpid, int on)
115{
116 char dbf_text[15];
117 int status;
118
119 sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid,
120 chpid.id);
121 CIO_TRACE_EVENT( 2, dbf_text);
122
123 status = chp_get_status(chpid);
124 if (status < 0) {
125 printk(KERN_ERR "Can't vary unknown chpid %x.%02x\n",
126 chpid.cssid, chpid.id);
127 return -EINVAL;
128 }
129
130 if (!on && !status) {
131 printk(KERN_ERR "chpid %x.%02x is already offline\n",
132 chpid.cssid, chpid.id);
133 return -EINVAL;
134 }
135
136 set_chp_logically_online(chpid, on);
137 chsc_chp_vary(chpid, on);
138 return 0;
139}
140
141/*
142 * Channel measurement related functions
143 */
144static ssize_t chp_measurement_chars_read(struct kobject *kobj, char *buf,
145 loff_t off, size_t count)
146{
147 struct channel_path *chp;
148 unsigned int size;
149
150 chp = to_channelpath(container_of(kobj, struct device, kobj));
151 if (!chp->cmg_chars)
152 return 0;
153
154 size = sizeof(struct cmg_chars);
155
156 if (off > size)
157 return 0;
158 if (off + count > size)
159 count = size - off;
160 memcpy(buf, chp->cmg_chars + off, count);
161 return count;
162}
163
164static struct bin_attribute chp_measurement_chars_attr = {
165 .attr = {
166 .name = "measurement_chars",
167 .mode = S_IRUSR,
168 .owner = THIS_MODULE,
169 },
170 .size = sizeof(struct cmg_chars),
171 .read = chp_measurement_chars_read,
172};
173
174static void chp_measurement_copy_block(struct cmg_entry *buf,
175 struct channel_subsystem *css,
176 struct chp_id chpid)
177{
178 void *area;
179 struct cmg_entry *entry, reference_buf;
180 int idx;
181
182 if (chpid.id < 128) {
183 area = css->cub_addr1;
184 idx = chpid.id;
185 } else {
186 area = css->cub_addr2;
187 idx = chpid.id - 128;
188 }
189 entry = area + (idx * sizeof(struct cmg_entry));
190 do {
191 memcpy(buf, entry, sizeof(*entry));
192 memcpy(&reference_buf, entry, sizeof(*entry));
193 } while (reference_buf.values[0] != buf->values[0]);
194}
195
196static ssize_t chp_measurement_read(struct kobject *kobj, char *buf,
197 loff_t off, size_t count)
198{
199 struct channel_path *chp;
200 struct channel_subsystem *css;
201 unsigned int size;
202
203 chp = to_channelpath(container_of(kobj, struct device, kobj));
204 css = to_css(chp->dev.parent);
205
206 size = sizeof(struct cmg_entry);
207
208 /* Only allow single reads. */
209 if (off || count < size)
210 return 0;
211 chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid);
212 count = size;
213 return count;
214}
215
216static struct bin_attribute chp_measurement_attr = {
217 .attr = {
218 .name = "measurement",
219 .mode = S_IRUSR,
220 .owner = THIS_MODULE,
221 },
222 .size = sizeof(struct cmg_entry),
223 .read = chp_measurement_read,
224};
225
226void chp_remove_cmg_attr(struct channel_path *chp)
227{
228 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
229 device_remove_bin_file(&chp->dev, &chp_measurement_attr);
230}
231
232int chp_add_cmg_attr(struct channel_path *chp)
233{
234 int ret;
235
236 ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
237 if (ret)
238 return ret;
239 ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
240 if (ret)
241 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
242 return ret;
243}
244
245/*
246 * Files for the channel path entries.
247 */
248static ssize_t chp_status_show(struct device *dev,
249 struct device_attribute *attr, char *buf)
250{
251 struct channel_path *chp = container_of(dev, struct channel_path, dev);
252
253 if (!chp)
254 return 0;
255 return (chp_get_status(chp->chpid) ? sprintf(buf, "online\n") :
256 sprintf(buf, "offline\n"));
257}
258
259static ssize_t chp_status_write(struct device *dev,
260 struct device_attribute *attr,
261 const char *buf, size_t count)
262{
263 struct channel_path *cp = container_of(dev, struct channel_path, dev);
264 char cmd[10];
265 int num_args;
266 int error;
267
268 num_args = sscanf(buf, "%5s", cmd);
269 if (!num_args)
270 return count;
271
272 if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1"))
273 error = s390_vary_chpid(cp->chpid, 1);
274 else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0"))
275 error = s390_vary_chpid(cp->chpid, 0);
276 else
277 error = -EINVAL;
278
279 return error < 0 ? error : count;
280
281}
282
283static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
284
285static ssize_t chp_configure_show(struct device *dev,
286 struct device_attribute *attr, char *buf)
287{
288 struct channel_path *cp;
289 int status;
290
291 cp = container_of(dev, struct channel_path, dev);
292 status = chp_info_get_status(cp->chpid);
293 if (status < 0)
294 return status;
295
296 return snprintf(buf, PAGE_SIZE, "%d\n", status);
297}
298
299static int cfg_wait_idle(void);
300
301static ssize_t chp_configure_write(struct device *dev,
302 struct device_attribute *attr,
303 const char *buf, size_t count)
304{
305 struct channel_path *cp;
306 int val;
307 char delim;
308
309 if (sscanf(buf, "%d %c", &val, &delim) != 1)
310 return -EINVAL;
311 if (val != 0 && val != 1)
312 return -EINVAL;
313 cp = container_of(dev, struct channel_path, dev);
314 chp_cfg_schedule(cp->chpid, val);
315 cfg_wait_idle();
316
317 return count;
318}
319
320static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write);
321
322static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
323 char *buf)
324{
325 struct channel_path *chp = container_of(dev, struct channel_path, dev);
326
327 if (!chp)
328 return 0;
329 return sprintf(buf, "%x\n", chp->desc.desc);
330}
331
332static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
333
334static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr,
335 char *buf)
336{
337 struct channel_path *chp = to_channelpath(dev);
338
339 if (!chp)
340 return 0;
341 if (chp->cmg == -1) /* channel measurements not available */
342 return sprintf(buf, "unknown\n");
343 return sprintf(buf, "%x\n", chp->cmg);
344}
345
346static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
347
348static ssize_t chp_shared_show(struct device *dev,
349 struct device_attribute *attr, char *buf)
350{
351 struct channel_path *chp = to_channelpath(dev);
352
353 if (!chp)
354 return 0;
355 if (chp->shared == -1) /* channel measurements not available */
356 return sprintf(buf, "unknown\n");
357 return sprintf(buf, "%x\n", chp->shared);
358}
359
360static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
361
362static struct attribute * chp_attrs[] = {
363 &dev_attr_status.attr,
364 &dev_attr_configure.attr,
365 &dev_attr_type.attr,
366 &dev_attr_cmg.attr,
367 &dev_attr_shared.attr,
368 NULL,
369};
370
371static struct attribute_group chp_attr_group = {
372 .attrs = chp_attrs,
373};
374
375static void chp_release(struct device *dev)
376{
377 struct channel_path *cp;
378
379 cp = container_of(dev, struct channel_path, dev);
380 kfree(cp);
381}
382
383/**
384 * chp_new - register a new channel-path
385 * @chpid - channel-path ID
386 *
387 * Create and register data structure representing new channel-path. Return
388 * zero on success, non-zero otherwise.
389 */
390int chp_new(struct chp_id chpid)
391{
392 struct channel_path *chp;
393 int ret;
394
395 if (chp_is_registered(chpid))
396 return 0;
397 chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
398 if (!chp)
399 return -ENOMEM;
400
401 /* fill in status, etc. */
402 chp->chpid = chpid;
403 chp->state = 1;
404 chp->dev.parent = &css[chpid.cssid]->device;
405 chp->dev.release = chp_release;
406 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp%x.%02x", chpid.cssid,
407 chpid.id);
408
409 /* Obtain channel path description and fill it in. */
410 ret = chsc_determine_channel_path_description(chpid, &chp->desc);
411 if (ret)
412 goto out_free;
413 if ((chp->desc.flags & 0x80) == 0) {
414 ret = -ENODEV;
415 goto out_free;
416 }
417 /* Get channel-measurement characteristics. */
418 if (css_characteristics_avail && css_chsc_characteristics.scmc
419 && css_chsc_characteristics.secm) {
420 ret = chsc_get_channel_measurement_chars(chp);
421 if (ret)
422 goto out_free;
423 } else {
424 static int msg_done;
425
426 if (!msg_done) {
427 printk(KERN_WARNING "cio: Channel measurements not "
428 "available, continuing.\n");
429 msg_done = 1;
430 }
431 chp->cmg = -1;
432 }
433
434 /* make it known to the system */
435 ret = device_register(&chp->dev);
436 if (ret) {
437 printk(KERN_WARNING "%s: could not register %x.%02x\n",
438 __func__, chpid.cssid, chpid.id);
439 goto out_free;
440 }
441 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
442 if (ret) {
443 device_unregister(&chp->dev);
444 goto out_free;
445 }
446 mutex_lock(&css[chpid.cssid]->mutex);
447 if (css[chpid.cssid]->cm_enabled) {
448 ret = chp_add_cmg_attr(chp);
449 if (ret) {
450 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
451 device_unregister(&chp->dev);
452 mutex_unlock(&css[chpid.cssid]->mutex);
453 goto out_free;
454 }
455 }
456 css[chpid.cssid]->chps[chpid.id] = chp;
457 mutex_unlock(&css[chpid.cssid]->mutex);
458 return ret;
459out_free:
460 kfree(chp);
461 return ret;
462}
463
464/**
465 * chp_get_chp_desc - return newly allocated channel-path description
466 * @chpid: channel-path ID
467 *
468 * On success return a newly allocated copy of the channel-path description
469 * data associated with the given channel-path ID. Return %NULL on error.
470 */
471void *chp_get_chp_desc(struct chp_id chpid)
472{
473 struct channel_path *chp;
474 struct channel_path_desc *desc;
475
476 chp = chpid_to_chp(chpid);
477 if (!chp)
478 return NULL;
479 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
480 if (!desc)
481 return NULL;
482 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
483 return desc;
484}
485
486/**
487 * chp_process_crw - process channel-path status change
488 * @id: channel-path ID number
489 * @status: non-zero if channel-path has become available, zero otherwise
490 *
491 * Handle channel-report-words indicating that the status of a channel-path
492 * has changed.
493 */
494void chp_process_crw(int id, int status)
495{
496 struct chp_id chpid;
497
498 chp_id_init(&chpid);
499 chpid.id = id;
500 if (status) {
501 if (!chp_is_registered(chpid))
502 chp_new(chpid);
503 chsc_chp_online(chpid);
504 } else
505 chsc_chp_offline(chpid);
506}
507
508static inline int info_bit_num(struct chp_id id)
509{
510 return id.id + id.cssid * (__MAX_CHPID + 1);
511}
512
513/* Force chp_info refresh on next call to info_validate(). */
514static void info_expire(void)
515{
516 mutex_lock(&info_lock);
517 chp_info_expires = jiffies - 1;
518 mutex_unlock(&info_lock);
519}
520
521/* Ensure that chp_info is up-to-date. */
522static int info_update(void)
523{
524 int rc;
525
526 mutex_lock(&info_lock);
527 rc = 0;
528 if (time_after(jiffies, chp_info_expires)) {
529 /* Data is too old, update. */
530 rc = sclp_chp_read_info(&chp_info);
531 chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ;
532 }
533 mutex_unlock(&info_lock);
534
535 return rc;
536}
537
538/**
539 * chp_info_get_status - retrieve configure status of a channel-path
540 * @chpid: channel-path ID
541 *
542 * On success, return 0 for standby, 1 for configured, 2 for reserved,
543 * 3 for not recognized. Return negative error code on error.
544 */
545int chp_info_get_status(struct chp_id chpid)
546{
547 int rc;
548 int bit;
549
550 rc = info_update();
551 if (rc)
552 return rc;
553
554 bit = info_bit_num(chpid);
555 mutex_lock(&info_lock);
556 if (!chp_test_bit(chp_info.recognized, bit))
557 rc = CHP_STATUS_NOT_RECOGNIZED;
558 else if (chp_test_bit(chp_info.configured, bit))
559 rc = CHP_STATUS_CONFIGURED;
560 else if (chp_test_bit(chp_info.standby, bit))
561 rc = CHP_STATUS_STANDBY;
562 else
563 rc = CHP_STATUS_RESERVED;
564 mutex_unlock(&info_lock);
565
566 return rc;
567}
568
569/* Return configure task for chpid. */
570static enum cfg_task_t cfg_get_task(struct chp_id chpid)
571{
572 return chp_cfg_task[chpid.cssid][chpid.id];
573}
574
575/* Set configure task for chpid. */
576static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg)
577{
578 chp_cfg_task[chpid.cssid][chpid.id] = cfg;
579}
580
581/* Perform one configure/deconfigure request. Reschedule work function until
582 * last request. */
583static void cfg_func(struct work_struct *work)
584{
585 struct chp_id chpid;
586 enum cfg_task_t t;
587
588 mutex_lock(&cfg_lock);
589 t = cfg_none;
590 chp_id_for_each(&chpid) {
591 t = cfg_get_task(chpid);
592 if (t != cfg_none) {
593 cfg_set_task(chpid, cfg_none);
594 break;
595 }
596 }
597 mutex_unlock(&cfg_lock);
598
599 switch (t) {
600 case cfg_configure:
601 sclp_chp_configure(chpid);
602 info_expire();
603 chsc_chp_online(chpid);
604 break;
605 case cfg_deconfigure:
606 sclp_chp_deconfigure(chpid);
607 info_expire();
608 chsc_chp_offline(chpid);
609 break;
610 case cfg_none:
611 /* Get updated information after last change. */
612 info_update();
613 mutex_lock(&cfg_lock);
614 cfg_busy = 0;
615 mutex_unlock(&cfg_lock);
616 wake_up_interruptible(&cfg_wait_queue);
617 return;
618 }
619 queue_work(chp_wq, &cfg_work);
620}
621
622/**
623 * chp_cfg_schedule - schedule chpid configuration request
624 * @chpid - channel-path ID
625 * @configure - Non-zero for configure, zero for deconfigure
626 *
627 * Schedule a channel-path configuration/deconfiguration request.
628 */
629void chp_cfg_schedule(struct chp_id chpid, int configure)
630{
631 CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id,
632 configure);
633 mutex_lock(&cfg_lock);
634 cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure);
635 cfg_busy = 1;
636 mutex_unlock(&cfg_lock);
637 queue_work(chp_wq, &cfg_work);
638}
639
640/**
641 * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request
642 * @chpid - channel-path ID
643 *
644 * Cancel an active channel-path deconfiguration request if it has not yet
645 * been performed.
646 */
647void chp_cfg_cancel_deconfigure(struct chp_id chpid)
648{
649 CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id);
650 mutex_lock(&cfg_lock);
651 if (cfg_get_task(chpid) == cfg_deconfigure)
652 cfg_set_task(chpid, cfg_none);
653 mutex_unlock(&cfg_lock);
654}
655
656static int cfg_wait_idle(void)
657{
658 if (wait_event_interruptible(cfg_wait_queue, !cfg_busy))
659 return -ERESTARTSYS;
660 return 0;
661}
662
663static int __init chp_init(void)
664{
665 struct chp_id chpid;
666
667 chp_wq = create_singlethread_workqueue("cio_chp");
668 if (!chp_wq)
669 return -ENOMEM;
670 INIT_WORK(&cfg_work, cfg_func);
671 init_waitqueue_head(&cfg_wait_queue);
672 if (info_update())
673 return 0;
674 /* Register available channel-paths. */
675 chp_id_for_each(&chpid) {
676 if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED)
677 chp_new(chpid);
678 }
679
680 return 0;
681}
682
683subsys_initcall(chp_init);
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
new file mode 100644
index 000000000000..65286563c592
--- /dev/null
+++ b/drivers/s390/cio/chp.h
@@ -0,0 +1,53 @@
1/*
2 * drivers/s390/cio/chp.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#ifndef S390_CHP_H
9#define S390_CHP_H S390_CHP_H
10
11#include <linux/types.h>
12#include <linux/device.h>
13#include <asm/chpid.h>
14#include "chsc.h"
15
16#define CHP_STATUS_STANDBY 0
17#define CHP_STATUS_CONFIGURED 1
18#define CHP_STATUS_RESERVED 2
19#define CHP_STATUS_NOT_RECOGNIZED 3
20
21static inline int chp_test_bit(u8 *bitmap, int num)
22{
23 int byte = num >> 3;
24 int mask = 128 >> (num & 7);
25
26 return (bitmap[byte] & mask) ? 1 : 0;
27}
28
29
30struct channel_path {
31 struct chp_id chpid;
32 int state;
33 struct channel_path_desc desc;
34 /* Channel-measurement related stuff: */
35 int cmg;
36 int shared;
37 void *cmg_chars;
38 struct device dev;
39};
40
41int chp_get_status(struct chp_id chpid);
42u8 chp_get_sch_opm(struct subchannel *sch);
43int chp_is_registered(struct chp_id chpid);
44void *chp_get_chp_desc(struct chp_id chpid);
45void chp_process_crw(int id, int available);
46void chp_remove_cmg_attr(struct channel_path *chp);
47int chp_add_cmg_attr(struct channel_path *chp);
48int chp_new(struct chp_id chpid);
49void chp_cfg_schedule(struct chp_id chpid, int configure);
50void chp_cfg_cancel_deconfigure(struct chp_id chpid);
51int chp_info_get_status(struct chp_id chpid);
52
53#endif /* S390_CHP_H */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 6f05a44e3817..ea92ac4d6577 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -15,202 +15,124 @@
15#include <linux/device.h> 15#include <linux/device.h>
16 16
17#include <asm/cio.h> 17#include <asm/cio.h>
18#include <asm/chpid.h>
18 19
19#include "css.h" 20#include "css.h"
20#include "cio.h" 21#include "cio.h"
21#include "cio_debug.h" 22#include "cio_debug.h"
22#include "ioasm.h" 23#include "ioasm.h"
24#include "chp.h"
23#include "chsc.h" 25#include "chsc.h"
24 26
25static void *sei_page; 27static void *sei_page;
26 28
27static int new_channel_path(int chpid); 29struct chsc_ssd_area {
28 30 struct chsc_header request;
29static inline void 31 u16 :10;
30set_chp_logically_online(int chp, int onoff) 32 u16 ssid:2;
31{ 33 u16 :4;
32 css[0]->chps[chp]->state = onoff; 34 u16 f_sch; /* first subchannel */
33} 35 u16 :16;
34 36 u16 l_sch; /* last subchannel */
35static int 37 u32 :32;
36get_chp_status(int chp) 38 struct chsc_header response;
37{ 39 u32 :32;
38 return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV); 40 u8 sch_valid : 1;
39} 41 u8 dev_valid : 1;
40 42 u8 st : 3; /* subchannel type */
41void 43 u8 zeroes : 3;
42chsc_validate_chpids(struct subchannel *sch) 44 u8 unit_addr; /* unit address */
43{ 45 u16 devno; /* device number */
44 int mask, chp; 46 u8 path_mask;
45 47 u8 fla_valid_mask;
46 for (chp = 0; chp <= 7; chp++) { 48 u16 sch; /* subchannel */
47 mask = 0x80 >> chp; 49 u8 chpid[8]; /* chpids 0-7 */
48 if (!get_chp_status(sch->schib.pmcw.chpid[chp])) 50 u16 fla[8]; /* full link addresses 0-7 */
49 /* disable using this path */ 51} __attribute__ ((packed));
50 sch->opm &= ~mask;
51 }
52}
53
54void
55chpid_is_actually_online(int chp)
56{
57 int state;
58
59 state = get_chp_status(chp);
60 if (state < 0) {
61 need_rescan = 1;
62 queue_work(slow_path_wq, &slow_path_work);
63 } else
64 WARN_ON(!state);
65}
66 52
67/* FIXME: this is _always_ called for every subchannel. shouldn't we 53int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
68 * process more than one at a time? */
69static int
70chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
71{ 54{
72 int ccode, j; 55 unsigned long page;
73 56 struct chsc_ssd_area *ssd_area;
74 struct { 57 int ccode;
75 struct chsc_header request; 58 int ret;
76 u16 reserved1a:10; 59 int i;
77 u16 ssid:2; 60 int mask;
78 u16 reserved1b:4;
79 u16 f_sch; /* first subchannel */
80 u16 reserved2;
81 u16 l_sch; /* last subchannel */
82 u32 reserved3;
83 struct chsc_header response;
84 u32 reserved4;
85 u8 sch_valid : 1;
86 u8 dev_valid : 1;
87 u8 st : 3; /* subchannel type */
88 u8 zeroes : 3;
89 u8 unit_addr; /* unit address */
90 u16 devno; /* device number */
91 u8 path_mask;
92 u8 fla_valid_mask;
93 u16 sch; /* subchannel */
94 u8 chpid[8]; /* chpids 0-7 */
95 u16 fla[8]; /* full link addresses 0-7 */
96 } __attribute__ ((packed)) *ssd_area;
97
98 ssd_area = page;
99 61
62 page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
63 if (!page)
64 return -ENOMEM;
65 ssd_area = (struct chsc_ssd_area *) page;
100 ssd_area->request.length = 0x0010; 66 ssd_area->request.length = 0x0010;
101 ssd_area->request.code = 0x0004; 67 ssd_area->request.code = 0x0004;
102 68 ssd_area->ssid = schid.ssid;
103 ssd_area->ssid = sch->schid.ssid; 69 ssd_area->f_sch = schid.sch_no;
104 ssd_area->f_sch = sch->schid.sch_no; 70 ssd_area->l_sch = schid.sch_no;
105 ssd_area->l_sch = sch->schid.sch_no;
106 71
107 ccode = chsc(ssd_area); 72 ccode = chsc(ssd_area);
73 /* Check response. */
108 if (ccode > 0) { 74 if (ccode > 0) {
109 pr_debug("chsc returned with ccode = %d\n", ccode); 75 ret = (ccode == 3) ? -ENODEV : -EBUSY;
110 return (ccode == 3) ? -ENODEV : -EBUSY; 76 goto out_free;
111 } 77 }
112 78 if (ssd_area->response.code != 0x0001) {
113 switch (ssd_area->response.code) { 79 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
114 case 0x0001: /* everything ok */ 80 schid.ssid, schid.sch_no,
115 break;
116 case 0x0002:
117 CIO_CRW_EVENT(2, "Invalid command!\n");
118 return -EINVAL;
119 case 0x0003:
120 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
121 return -EINVAL;
122 case 0x0004:
123 CIO_CRW_EVENT(2, "Model does not provide ssd\n");
124 return -EOPNOTSUPP;
125 default:
126 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
127 ssd_area->response.code); 81 ssd_area->response.code);
128 return -EIO; 82 ret = -EIO;
83 goto out_free;
129 } 84 }
130 85 if (!ssd_area->sch_valid) {
131 /* 86 ret = -ENODEV;
132 * ssd_area->st stores the type of the detected 87 goto out_free;
133 * subchannel, with the following definitions:
134 *
135 * 0: I/O subchannel: All fields have meaning
136 * 1: CHSC subchannel: Only sch_val, st and sch
137 * have meaning
138 * 2: Message subchannel: All fields except unit_addr
139 * have meaning
140 * 3: ADM subchannel: Only sch_val, st and sch
141 * have meaning
142 *
143 * Other types are currently undefined.
144 */
145 if (ssd_area->st > 3) { /* uhm, that looks strange... */
146 CIO_CRW_EVENT(0, "Strange subchannel type %d"
147 " for sch 0.%x.%04x\n", ssd_area->st,
148 sch->schid.ssid, sch->schid.sch_no);
149 /*
150 * There may have been a new subchannel type defined in the
151 * time since this code was written; since we don't know which
152 * fields have meaning and what to do with it we just jump out
153 */
154 return 0;
155 } else {
156 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
157 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
158 sch->schid.ssid, sch->schid.sch_no,
159 type[ssd_area->st]);
160
161 sch->ssd_info.valid = 1;
162 sch->ssd_info.type = ssd_area->st;
163 } 88 }
164 89 /* Copy data */
165 if (ssd_area->st == 0 || ssd_area->st == 2) { 90 ret = 0;
166 for (j = 0; j < 8; j++) { 91 memset(ssd, 0, sizeof(struct chsc_ssd_info));
167 if (!((0x80 >> j) & ssd_area->path_mask & 92 if ((ssd_area->st != 0) && (ssd_area->st != 2))
168 ssd_area->fla_valid_mask)) 93 goto out_free;
169 continue; 94 ssd->path_mask = ssd_area->path_mask;
170 sch->ssd_info.chpid[j] = ssd_area->chpid[j]; 95 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
171 sch->ssd_info.fla[j] = ssd_area->fla[j]; 96 for (i = 0; i < 8; i++) {
97 mask = 0x80 >> i;
98 if (ssd_area->path_mask & mask) {
99 chp_id_init(&ssd->chpid[i]);
100 ssd->chpid[i].id = ssd_area->chpid[i];
172 } 101 }
102 if (ssd_area->fla_valid_mask & mask)
103 ssd->fla[i] = ssd_area->fla[i];
173 } 104 }
174 return 0; 105out_free:
106 free_page(page);
107 return ret;
175} 108}
176 109
177int 110static int check_for_io_on_path(struct subchannel *sch, int mask)
178css_get_ssd_info(struct subchannel *sch)
179{ 111{
180 int ret; 112 int cc;
181 void *page;
182 113
183 page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 114 cc = stsch(sch->schid, &sch->schib);
184 if (!page) 115 if (cc)
185 return -ENOMEM; 116 return 0;
186 spin_lock_irq(sch->lock); 117 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
187 ret = chsc_get_sch_desc_irq(sch, page); 118 return 1;
188 if (ret) { 119 return 0;
189 static int cio_chsc_err_msg; 120}
190 121
191 if (!cio_chsc_err_msg) { 122static void terminate_internal_io(struct subchannel *sch)
192 printk(KERN_ERR 123{
193 "chsc_get_sch_descriptions:" 124 if (cio_clear(sch)) {
194 " Error %d while doing chsc; " 125 /* Recheck device in case clear failed. */
195 "processing some machine checks may " 126 sch->lpm = 0;
196 "not work\n", ret); 127 if (device_trigger_verify(sch) != 0)
197 cio_chsc_err_msg = 1; 128 css_schedule_eval(sch->schid);
198 } 129 return;
199 }
200 spin_unlock_irq(sch->lock);
201 free_page((unsigned long)page);
202 if (!ret) {
203 int j, chpid, mask;
204 /* Allocate channel path structures, if needed. */
205 for (j = 0; j < 8; j++) {
206 mask = 0x80 >> j;
207 chpid = sch->ssd_info.chpid[j];
208 if ((sch->schib.pmcw.pim & mask) &&
209 (get_chp_status(chpid) < 0))
210 new_channel_path(chpid);
211 }
212 } 130 }
213 return ret; 131 /* Request retry of internal operation. */
132 device_set_intretry(sch);
133 /* Call handler. */
134 if (sch->driver && sch->driver->termination)
135 sch->driver->termination(&sch->dev);
214} 136}
215 137
216static int 138static int
@@ -219,7 +141,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
219 int j; 141 int j;
220 int mask; 142 int mask;
221 struct subchannel *sch; 143 struct subchannel *sch;
222 struct channel_path *chpid; 144 struct chp_id *chpid;
223 struct schib schib; 145 struct schib schib;
224 146
225 sch = to_subchannel(dev); 147 sch = to_subchannel(dev);
@@ -243,106 +165,50 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
243 if (sch->schib.pmcw.pim == 0x80) 165 if (sch->schib.pmcw.pim == 0x80)
244 goto out_unreg; 166 goto out_unreg;
245 167
246 if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && 168 if (check_for_io_on_path(sch, mask)) {
247 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && 169 if (device_is_online(sch))
248 (sch->schib.pmcw.lpum == mask)) { 170 device_kill_io(sch);
249 int cc; 171 else {
250 172 terminate_internal_io(sch);
251 cc = cio_clear(sch); 173 /* Re-start path verification. */
252 if (cc == -ENODEV) 174 if (sch->driver && sch->driver->verify)
175 sch->driver->verify(&sch->dev);
176 }
177 } else {
178 /* trigger path verification. */
179 if (sch->driver && sch->driver->verify)
180 sch->driver->verify(&sch->dev);
181 else if (sch->lpm == mask)
253 goto out_unreg; 182 goto out_unreg;
254 /* Request retry of internal operation. */
255 device_set_intretry(sch);
256 /* Call handler. */
257 if (sch->driver && sch->driver->termination)
258 sch->driver->termination(&sch->dev);
259 goto out_unlock;
260 } 183 }
261 184
262 /* trigger path verification. */
263 if (sch->driver && sch->driver->verify)
264 sch->driver->verify(&sch->dev);
265 else if (sch->lpm == mask)
266 goto out_unreg;
267out_unlock:
268 spin_unlock_irq(sch->lock); 185 spin_unlock_irq(sch->lock);
269 return 0; 186 return 0;
187
270out_unreg: 188out_unreg:
271 spin_unlock_irq(sch->lock);
272 sch->lpm = 0; 189 sch->lpm = 0;
273 if (css_enqueue_subchannel_slow(sch->schid)) { 190 spin_unlock_irq(sch->lock);
274 css_clear_subchannel_slow_list(); 191 css_schedule_eval(sch->schid);
275 need_rescan = 1;
276 }
277 return 0; 192 return 0;
278} 193}
279 194
280static void 195void chsc_chp_offline(struct chp_id chpid)
281s390_set_chpid_offline( __u8 chpid)
282{ 196{
283 char dbf_txt[15]; 197 char dbf_txt[15];
284 struct device *dev;
285 198
286 sprintf(dbf_txt, "chpr%x", chpid); 199 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
287 CIO_TRACE_EVENT(2, dbf_txt); 200 CIO_TRACE_EVENT(2, dbf_txt);
288 201
289 if (get_chp_status(chpid) <= 0) 202 if (chp_get_status(chpid) <= 0)
290 return; 203 return;
291 dev = get_device(&css[0]->chps[chpid]->dev); 204 bus_for_each_dev(&css_bus_type, NULL, &chpid,
292 bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
293 s390_subchannel_remove_chpid); 205 s390_subchannel_remove_chpid);
294
295 if (need_rescan || css_slow_subchannels_exist())
296 queue_work(slow_path_wq, &slow_path_work);
297 put_device(dev);
298}
299
300struct res_acc_data {
301 struct channel_path *chp;
302 u32 fla_mask;
303 u16 fla;
304};
305
306static int
307s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
308{
309 int found;
310 int chp;
311 int ccode;
312
313 found = 0;
314 for (chp = 0; chp <= 7; chp++)
315 /*
316 * check if chpid is in information updated by ssd
317 */
318 if (sch->ssd_info.valid &&
319 sch->ssd_info.chpid[chp] == res_data->chp->id &&
320 (sch->ssd_info.fla[chp] & res_data->fla_mask)
321 == res_data->fla) {
322 found = 1;
323 break;
324 }
325
326 if (found == 0)
327 return 0;
328
329 /*
330 * Do a stsch to update our subchannel structure with the
331 * new path information and eventually check for logically
332 * offline chpids.
333 */
334 ccode = stsch(sch->schid, &sch->schib);
335 if (ccode > 0)
336 return 0;
337
338 return 0x80 >> chp;
339} 206}
340 207
341static int 208static int
342s390_process_res_acc_new_sch(struct subchannel_id schid) 209s390_process_res_acc_new_sch(struct subchannel_id schid)
343{ 210{
344 struct schib schib; 211 struct schib schib;
345 int ret;
346 /* 212 /*
347 * We don't know the device yet, but since a path 213 * We don't know the device yet, but since a path
348 * may be available now to the device we'll have 214 * may be available now to the device we'll have
@@ -353,14 +219,35 @@ s390_process_res_acc_new_sch(struct subchannel_id schid)
353 */ 219 */
354 if (stsch_err(schid, &schib)) 220 if (stsch_err(schid, &schib))
355 /* We're through */ 221 /* We're through */
356 return need_rescan ? -EAGAIN : -ENXIO; 222 return -ENXIO;
357 223
358 /* Put it on the slow path. */ 224 /* Put it on the slow path. */
359 ret = css_enqueue_subchannel_slow(schid); 225 css_schedule_eval(schid);
360 if (ret) { 226 return 0;
361 css_clear_subchannel_slow_list(); 227}
362 need_rescan = 1; 228
363 return -EAGAIN; 229struct res_acc_data {
230 struct chp_id chpid;
231 u32 fla_mask;
232 u16 fla;
233};
234
235static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
236 struct res_acc_data *data)
237{
238 int i;
239 int mask;
240
241 for (i = 0; i < 8; i++) {
242 mask = 0x80 >> i;
243 if (!(ssd->path_mask & mask))
244 continue;
245 if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
246 continue;
247 if ((ssd->fla_valid_mask & mask) &&
248 ((ssd->fla[i] & data->fla_mask) != data->fla))
249 continue;
250 return mask;
364 } 251 }
365 return 0; 252 return 0;
366} 253}
@@ -379,14 +266,11 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
379 return s390_process_res_acc_new_sch(schid); 266 return s390_process_res_acc_new_sch(schid);
380 267
381 spin_lock_irq(sch->lock); 268 spin_lock_irq(sch->lock);
382 269 chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
383 chp_mask = s390_process_res_acc_sch(res_data, sch); 270 if (chp_mask == 0)
384 271 goto out;
385 if (chp_mask == 0) { 272 if (stsch(sch->schid, &sch->schib))
386 spin_unlock_irq(sch->lock); 273 goto out;
387 put_device(&sch->dev);
388 return 0;
389 }
390 old_lpm = sch->lpm; 274 old_lpm = sch->lpm;
391 sch->lpm = ((sch->schib.pmcw.pim & 275 sch->lpm = ((sch->schib.pmcw.pim &
392 sch->schib.pmcw.pam & 276 sch->schib.pmcw.pam &
@@ -396,20 +280,18 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
396 device_trigger_reprobe(sch); 280 device_trigger_reprobe(sch);
397 else if (sch->driver && sch->driver->verify) 281 else if (sch->driver && sch->driver->verify)
398 sch->driver->verify(&sch->dev); 282 sch->driver->verify(&sch->dev);
399 283out:
400 spin_unlock_irq(sch->lock); 284 spin_unlock_irq(sch->lock);
401 put_device(&sch->dev); 285 put_device(&sch->dev);
402 return 0; 286 return 0;
403} 287}
404 288
405 289static void s390_process_res_acc (struct res_acc_data *res_data)
406static int
407s390_process_res_acc (struct res_acc_data *res_data)
408{ 290{
409 int rc;
410 char dbf_txt[15]; 291 char dbf_txt[15];
411 292
412 sprintf(dbf_txt, "accpr%x", res_data->chp->id); 293 sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
294 res_data->chpid.id);
413 CIO_TRACE_EVENT( 2, dbf_txt); 295 CIO_TRACE_EVENT( 2, dbf_txt);
414 if (res_data->fla != 0) { 296 if (res_data->fla != 0) {
415 sprintf(dbf_txt, "fla%x", res_data->fla); 297 sprintf(dbf_txt, "fla%x", res_data->fla);
@@ -423,12 +305,7 @@ s390_process_res_acc (struct res_acc_data *res_data)
423 * The more information we have (info), the less scanning 305 * The more information we have (info), the less scanning
424 * will we have to do. 306 * will we have to do.
425 */ 307 */
426 rc = for_each_subchannel(__s390_process_res_acc, res_data); 308 for_each_subchannel(__s390_process_res_acc, res_data);
427 if (css_slow_subchannels_exist())
428 rc = -EAGAIN;
429 else if (rc != -EAGAIN)
430 rc = 0;
431 return rc;
432} 309}
433 310
434static int 311static int
@@ -480,43 +357,45 @@ struct chsc_sei_area {
480 /* ccdf has to be big enough for a link-incident record */ 357 /* ccdf has to be big enough for a link-incident record */
481} __attribute__ ((packed)); 358} __attribute__ ((packed));
482 359
483static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) 360static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
484{ 361{
485 int chpid; 362 struct chp_id chpid;
363 int id;
486 364
487 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", 365 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
488 sei_area->rs, sei_area->rsid); 366 sei_area->rs, sei_area->rsid);
489 if (sei_area->rs != 4) 367 if (sei_area->rs != 4)
490 return 0; 368 return;
491 chpid = __get_chpid_from_lir(sei_area->ccdf); 369 id = __get_chpid_from_lir(sei_area->ccdf);
492 if (chpid < 0) 370 if (id < 0)
493 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); 371 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
494 else 372 else {
495 s390_set_chpid_offline(chpid); 373 chp_id_init(&chpid);
496 374 chpid.id = id;
497 return 0; 375 chsc_chp_offline(chpid);
376 }
498} 377}
499 378
500static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) 379static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
501{ 380{
502 struct res_acc_data res_data; 381 struct res_acc_data res_data;
503 struct device *dev; 382 struct chp_id chpid;
504 int status; 383 int status;
505 int rc;
506 384
507 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " 385 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
508 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); 386 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
509 if (sei_area->rs != 4) 387 if (sei_area->rs != 4)
510 return 0; 388 return;
389 chp_id_init(&chpid);
390 chpid.id = sei_area->rsid;
511 /* allocate a new channel path structure, if needed */ 391 /* allocate a new channel path structure, if needed */
512 status = get_chp_status(sei_area->rsid); 392 status = chp_get_status(chpid);
513 if (status < 0) 393 if (status < 0)
514 new_channel_path(sei_area->rsid); 394 chp_new(chpid);
515 else if (!status) 395 else if (!status)
516 return 0; 396 return;
517 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
518 memset(&res_data, 0, sizeof(struct res_acc_data)); 397 memset(&res_data, 0, sizeof(struct res_acc_data));
519 res_data.chp = to_channelpath(dev); 398 res_data.chpid = chpid;
520 if ((sei_area->vf & 0xc0) != 0) { 399 if ((sei_area->vf & 0xc0) != 0) {
521 res_data.fla = sei_area->fla; 400 res_data.fla = sei_area->fla;
522 if ((sei_area->vf & 0xc0) == 0xc0) 401 if ((sei_area->vf & 0xc0) == 0xc0)
@@ -526,51 +405,82 @@ static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
526 /* link address */ 405 /* link address */
527 res_data.fla_mask = 0xff00; 406 res_data.fla_mask = 0xff00;
528 } 407 }
529 rc = s390_process_res_acc(&res_data); 408 s390_process_res_acc(&res_data);
530 put_device(dev);
531
532 return rc;
533} 409}
534 410
535static int chsc_process_sei(struct chsc_sei_area *sei_area) 411struct chp_config_data {
412 u8 map[32];
413 u8 op;
414 u8 pc;
415};
416
417static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
536{ 418{
537 int rc; 419 struct chp_config_data *data;
420 struct chp_id chpid;
421 int num;
422
423 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
424 if (sei_area->rs != 0)
425 return;
426 data = (struct chp_config_data *) &(sei_area->ccdf);
427 chp_id_init(&chpid);
428 for (num = 0; num <= __MAX_CHPID; num++) {
429 if (!chp_test_bit(data->map, num))
430 continue;
431 chpid.id = num;
432 printk(KERN_WARNING "cio: processing configure event %d for "
433 "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
434 switch (data->op) {
435 case 0:
436 chp_cfg_schedule(chpid, 1);
437 break;
438 case 1:
439 chp_cfg_schedule(chpid, 0);
440 break;
441 case 2:
442 chp_cfg_cancel_deconfigure(chpid);
443 break;
444 }
445 }
446}
538 447
448static void chsc_process_sei(struct chsc_sei_area *sei_area)
449{
539 /* Check if we might have lost some information. */ 450 /* Check if we might have lost some information. */
540 if (sei_area->flags & 0x40) 451 if (sei_area->flags & 0x40) {
541 CIO_CRW_EVENT(2, "chsc: event overflow\n"); 452 CIO_CRW_EVENT(2, "chsc: event overflow\n");
453 css_schedule_eval_all();
454 }
542 /* which kind of information was stored? */ 455 /* which kind of information was stored? */
543 rc = 0;
544 switch (sei_area->cc) { 456 switch (sei_area->cc) {
545 case 1: /* link incident*/ 457 case 1: /* link incident*/
546 rc = chsc_process_sei_link_incident(sei_area); 458 chsc_process_sei_link_incident(sei_area);
547 break; 459 break;
548 case 2: /* i/o resource accessibiliy */ 460 case 2: /* i/o resource accessibiliy */
549 rc = chsc_process_sei_res_acc(sei_area); 461 chsc_process_sei_res_acc(sei_area);
462 break;
463 case 8: /* channel-path-configuration notification */
464 chsc_process_sei_chp_config(sei_area);
550 break; 465 break;
551 default: /* other stuff */ 466 default: /* other stuff */
552 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", 467 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
553 sei_area->cc); 468 sei_area->cc);
554 break; 469 break;
555 } 470 }
556
557 return rc;
558} 471}
559 472
560int chsc_process_crw(void) 473void chsc_process_crw(void)
561{ 474{
562 struct chsc_sei_area *sei_area; 475 struct chsc_sei_area *sei_area;
563 int ret;
564 int rc;
565 476
566 if (!sei_page) 477 if (!sei_page)
567 return 0; 478 return;
568 /* Access to sei_page is serialized through machine check handler 479 /* Access to sei_page is serialized through machine check handler
569 * thread, so no need for locking. */ 480 * thread, so no need for locking. */
570 sei_area = sei_page; 481 sei_area = sei_page;
571 482
572 CIO_TRACE_EVENT( 2, "prcss"); 483 CIO_TRACE_EVENT( 2, "prcss");
573 ret = 0;
574 do { 484 do {
575 memset(sei_area, 0, sizeof(*sei_area)); 485 memset(sei_area, 0, sizeof(*sei_area));
576 sei_area->request.length = 0x0010; 486 sei_area->request.length = 0x0010;
@@ -580,37 +490,26 @@ int chsc_process_crw(void)
580 490
581 if (sei_area->response.code == 0x0001) { 491 if (sei_area->response.code == 0x0001) {
582 CIO_CRW_EVENT(4, "chsc: sei successful\n"); 492 CIO_CRW_EVENT(4, "chsc: sei successful\n");
583 rc = chsc_process_sei(sei_area); 493 chsc_process_sei(sei_area);
584 if (rc)
585 ret = rc;
586 } else { 494 } else {
587 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 495 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
588 sei_area->response.code); 496 sei_area->response.code);
589 ret = 0;
590 break; 497 break;
591 } 498 }
592 } while (sei_area->flags & 0x80); 499 } while (sei_area->flags & 0x80);
593
594 return ret;
595} 500}
596 501
597static int 502static int
598__chp_add_new_sch(struct subchannel_id schid) 503__chp_add_new_sch(struct subchannel_id schid)
599{ 504{
600 struct schib schib; 505 struct schib schib;
601 int ret;
602 506
603 if (stsch_err(schid, &schib)) 507 if (stsch_err(schid, &schib))
604 /* We're through */ 508 /* We're through */
605 return need_rescan ? -EAGAIN : -ENXIO; 509 return -ENXIO;
606 510
607 /* Put it on the slow path. */ 511 /* Put it on the slow path. */
608 ret = css_enqueue_subchannel_slow(schid); 512 css_schedule_eval(schid);
609 if (ret) {
610 css_clear_subchannel_slow_list();
611 need_rescan = 1;
612 return -EAGAIN;
613 }
614 return 0; 513 return 0;
615} 514}
616 515
@@ -619,10 +518,10 @@ static int
619__chp_add(struct subchannel_id schid, void *data) 518__chp_add(struct subchannel_id schid, void *data)
620{ 519{
621 int i, mask; 520 int i, mask;
622 struct channel_path *chp; 521 struct chp_id *chpid;
623 struct subchannel *sch; 522 struct subchannel *sch;
624 523
625 chp = data; 524 chpid = data;
626 sch = get_subchannel_by_schid(schid); 525 sch = get_subchannel_by_schid(schid);
627 if (!sch) 526 if (!sch)
628 /* Check if the subchannel is now available. */ 527 /* Check if the subchannel is now available. */
@@ -631,7 +530,7 @@ __chp_add(struct subchannel_id schid, void *data)
631 for (i=0; i<8; i++) { 530 for (i=0; i<8; i++) {
632 mask = 0x80 >> i; 531 mask = 0x80 >> i;
633 if ((sch->schib.pmcw.pim & mask) && 532 if ((sch->schib.pmcw.pim & mask) &&
634 (sch->schib.pmcw.chpid[i] == chp->id)) { 533 (sch->schib.pmcw.chpid[i] == chpid->id)) {
635 if (stsch(sch->schid, &sch->schib) != 0) { 534 if (stsch(sch->schid, &sch->schib) != 0) {
636 /* Endgame. */ 535 /* Endgame. */
637 spin_unlock_irq(sch->lock); 536 spin_unlock_irq(sch->lock);
@@ -657,122 +556,58 @@ __chp_add(struct subchannel_id schid, void *data)
657 return 0; 556 return 0;
658} 557}
659 558
660static int 559void chsc_chp_online(struct chp_id chpid)
661chp_add(int chpid)
662{ 560{
663 int rc;
664 char dbf_txt[15]; 561 char dbf_txt[15];
665 struct device *dev;
666 562
667 if (!get_chp_status(chpid)) 563 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
668 return 0; /* no need to do the rest */
669
670 sprintf(dbf_txt, "cadd%x", chpid);
671 CIO_TRACE_EVENT(2, dbf_txt); 564 CIO_TRACE_EVENT(2, dbf_txt);
672 565
673 dev = get_device(&css[0]->chps[chpid]->dev); 566 if (chp_get_status(chpid) != 0)
674 rc = for_each_subchannel(__chp_add, to_channelpath(dev)); 567 for_each_subchannel(__chp_add, &chpid);
675 if (css_slow_subchannels_exist())
676 rc = -EAGAIN;
677 if (rc != -EAGAIN)
678 rc = 0;
679 put_device(dev);
680 return rc;
681} 568}
682 569
683/* 570static void __s390_subchannel_vary_chpid(struct subchannel *sch,
684 * Handling of crw machine checks with channel path source. 571 struct chp_id chpid, int on)
685 */
686int
687chp_process_crw(int chpid, int on)
688{
689 if (on == 0) {
690 /* Path has gone. We use the link incident routine.*/
691 s390_set_chpid_offline(chpid);
692 return 0; /* De-register is async anyway. */
693 }
694 /*
695 * Path has come. Allocate a new channel path structure,
696 * if needed.
697 */
698 if (get_chp_status(chpid) < 0)
699 new_channel_path(chpid);
700 /* Avoid the extra overhead in process_rec_acc. */
701 return chp_add(chpid);
702}
703
704static int check_for_io_on_path(struct subchannel *sch, int index)
705{
706 int cc;
707
708 cc = stsch(sch->schid, &sch->schib);
709 if (cc)
710 return 0;
711 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index))
712 return 1;
713 return 0;
714}
715
716static void terminate_internal_io(struct subchannel *sch)
717{
718 if (cio_clear(sch)) {
719 /* Recheck device in case clear failed. */
720 sch->lpm = 0;
721 if (device_trigger_verify(sch) != 0) {
722 if(css_enqueue_subchannel_slow(sch->schid)) {
723 css_clear_subchannel_slow_list();
724 need_rescan = 1;
725 }
726 }
727 return;
728 }
729 /* Request retry of internal operation. */
730 device_set_intretry(sch);
731 /* Call handler. */
732 if (sch->driver && sch->driver->termination)
733 sch->driver->termination(&sch->dev);
734}
735
736static void
737__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
738{ 572{
739 int chp, old_lpm; 573 int chp, old_lpm;
574 int mask;
740 unsigned long flags; 575 unsigned long flags;
741 576
742 if (!sch->ssd_info.valid)
743 return;
744
745 spin_lock_irqsave(sch->lock, flags); 577 spin_lock_irqsave(sch->lock, flags);
746 old_lpm = sch->lpm; 578 old_lpm = sch->lpm;
747 for (chp = 0; chp < 8; chp++) { 579 for (chp = 0; chp < 8; chp++) {
748 if (sch->ssd_info.chpid[chp] != chpid) 580 mask = 0x80 >> chp;
581 if (!(sch->ssd_info.path_mask & mask))
582 continue;
583 if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
749 continue; 584 continue;
750 585
751 if (on) { 586 if (on) {
752 sch->opm |= (0x80 >> chp); 587 sch->opm |= mask;
753 sch->lpm |= (0x80 >> chp); 588 sch->lpm |= mask;
754 if (!old_lpm) 589 if (!old_lpm)
755 device_trigger_reprobe(sch); 590 device_trigger_reprobe(sch);
756 else if (sch->driver && sch->driver->verify) 591 else if (sch->driver && sch->driver->verify)
757 sch->driver->verify(&sch->dev); 592 sch->driver->verify(&sch->dev);
758 break; 593 break;
759 } 594 }
760 sch->opm &= ~(0x80 >> chp); 595 sch->opm &= ~mask;
761 sch->lpm &= ~(0x80 >> chp); 596 sch->lpm &= ~mask;
762 if (check_for_io_on_path(sch, chp)) { 597 if (check_for_io_on_path(sch, mask)) {
763 if (device_is_online(sch)) 598 if (device_is_online(sch))
764 /* Path verification is done after killing. */ 599 /* Path verification is done after killing. */
765 device_kill_io(sch); 600 device_kill_io(sch);
766 else 601 else {
767 /* Kill and retry internal I/O. */ 602 /* Kill and retry internal I/O. */
768 terminate_internal_io(sch); 603 terminate_internal_io(sch);
769 } else if (!sch->lpm) { 604 /* Re-start path verification. */
770 if (device_trigger_verify(sch) != 0) { 605 if (sch->driver && sch->driver->verify)
771 if (css_enqueue_subchannel_slow(sch->schid)) { 606 sch->driver->verify(&sch->dev);
772 css_clear_subchannel_slow_list();
773 need_rescan = 1;
774 }
775 } 607 }
608 } else if (!sch->lpm) {
609 if (device_trigger_verify(sch) != 0)
610 css_schedule_eval(sch->schid);
776 } else if (sch->driver && sch->driver->verify) 611 } else if (sch->driver && sch->driver->verify)
777 sch->driver->verify(&sch->dev); 612 sch->driver->verify(&sch->dev);
778 break; 613 break;
@@ -780,11 +615,10 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
780 spin_unlock_irqrestore(sch->lock, flags); 615 spin_unlock_irqrestore(sch->lock, flags);
781} 616}
782 617
783static int 618static int s390_subchannel_vary_chpid_off(struct device *dev, void *data)
784s390_subchannel_vary_chpid_off(struct device *dev, void *data)
785{ 619{
786 struct subchannel *sch; 620 struct subchannel *sch;
787 __u8 *chpid; 621 struct chp_id *chpid;
788 622
789 sch = to_subchannel(dev); 623 sch = to_subchannel(dev);
790 chpid = data; 624 chpid = data;
@@ -793,11 +627,10 @@ s390_subchannel_vary_chpid_off(struct device *dev, void *data)
793 return 0; 627 return 0;
794} 628}
795 629
796static int 630static int s390_subchannel_vary_chpid_on(struct device *dev, void *data)
797s390_subchannel_vary_chpid_on(struct device *dev, void *data)
798{ 631{
799 struct subchannel *sch; 632 struct subchannel *sch;
800 __u8 *chpid; 633 struct chp_id *chpid;
801 634
802 sch = to_subchannel(dev); 635 sch = to_subchannel(dev);
803 chpid = data; 636 chpid = data;
@@ -821,40 +654,17 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
821 /* We're through */ 654 /* We're through */
822 return -ENXIO; 655 return -ENXIO;
823 /* Put it on the slow path. */ 656 /* Put it on the slow path. */
824 if (css_enqueue_subchannel_slow(schid)) { 657 css_schedule_eval(schid);
825 css_clear_subchannel_slow_list();
826 need_rescan = 1;
827 return -EAGAIN;
828 }
829 return 0; 658 return 0;
830} 659}
831 660
832/* 661/**
833 * Function: s390_vary_chpid 662 * chsc_chp_vary - propagate channel-path vary operation to subchannels
834 * Varies the specified chpid online or offline 663 * @chpid: channl-path ID
664 * @on: non-zero for vary online, zero for vary offline
835 */ 665 */
836static int 666int chsc_chp_vary(struct chp_id chpid, int on)
837s390_vary_chpid( __u8 chpid, int on)
838{ 667{
839 char dbf_text[15];
840 int status;
841
842 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
843 CIO_TRACE_EVENT( 2, dbf_text);
844
845 status = get_chp_status(chpid);
846 if (status < 0) {
847 printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
848 return -EINVAL;
849 }
850
851 if (!on && !status) {
852 printk(KERN_ERR "chpid %x is already offline\n", chpid);
853 return -EINVAL;
854 }
855
856 set_chp_logically_online(chpid, on);
857
858 /* 668 /*
859 * Redo PathVerification on the devices the chpid connects to 669 * Redo PathVerification on the devices the chpid connects to
860 */ 670 */
@@ -865,118 +675,9 @@ s390_vary_chpid( __u8 chpid, int on)
865 if (on) 675 if (on)
866 /* Scan for new devices on varied on path. */ 676 /* Scan for new devices on varied on path. */
867 for_each_subchannel(__s390_vary_chpid_on, NULL); 677 for_each_subchannel(__s390_vary_chpid_on, NULL);
868 if (need_rescan || css_slow_subchannels_exist())
869 queue_work(slow_path_wq, &slow_path_work);
870 return 0; 678 return 0;
871} 679}
872 680
873/*
874 * Channel measurement related functions
875 */
876static ssize_t
877chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off,
878 size_t count)
879{
880 struct channel_path *chp;
881 unsigned int size;
882
883 chp = to_channelpath(container_of(kobj, struct device, kobj));
884 if (!chp->cmg_chars)
885 return 0;
886
887 size = sizeof(struct cmg_chars);
888
889 if (off > size)
890 return 0;
891 if (off + count > size)
892 count = size - off;
893 memcpy(buf, chp->cmg_chars + off, count);
894 return count;
895}
896
897static struct bin_attribute chp_measurement_chars_attr = {
898 .attr = {
899 .name = "measurement_chars",
900 .mode = S_IRUSR,
901 .owner = THIS_MODULE,
902 },
903 .size = sizeof(struct cmg_chars),
904 .read = chp_measurement_chars_read,
905};
906
907static void
908chp_measurement_copy_block(struct cmg_entry *buf,
909 struct channel_subsystem *css, int chpid)
910{
911 void *area;
912 struct cmg_entry *entry, reference_buf;
913 int idx;
914
915 if (chpid < 128) {
916 area = css->cub_addr1;
917 idx = chpid;
918 } else {
919 area = css->cub_addr2;
920 idx = chpid - 128;
921 }
922 entry = area + (idx * sizeof(struct cmg_entry));
923 do {
924 memcpy(buf, entry, sizeof(*entry));
925 memcpy(&reference_buf, entry, sizeof(*entry));
926 } while (reference_buf.values[0] != buf->values[0]);
927}
928
929static ssize_t
930chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
931{
932 struct channel_path *chp;
933 struct channel_subsystem *css;
934 unsigned int size;
935
936 chp = to_channelpath(container_of(kobj, struct device, kobj));
937 css = to_css(chp->dev.parent);
938
939 size = sizeof(struct cmg_entry);
940
941 /* Only allow single reads. */
942 if (off || count < size)
943 return 0;
944 chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id);
945 count = size;
946 return count;
947}
948
949static struct bin_attribute chp_measurement_attr = {
950 .attr = {
951 .name = "measurement",
952 .mode = S_IRUSR,
953 .owner = THIS_MODULE,
954 },
955 .size = sizeof(struct cmg_entry),
956 .read = chp_measurement_read,
957};
958
959static void
960chsc_remove_chp_cmg_attr(struct channel_path *chp)
961{
962 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
963 device_remove_bin_file(&chp->dev, &chp_measurement_attr);
964}
965
966static int
967chsc_add_chp_cmg_attr(struct channel_path *chp)
968{
969 int ret;
970
971 ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
972 if (ret)
973 return ret;
974 ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
975 if (ret)
976 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
977 return ret;
978}
979
980static void 681static void
981chsc_remove_cmg_attr(struct channel_subsystem *css) 682chsc_remove_cmg_attr(struct channel_subsystem *css)
982{ 683{
@@ -985,7 +686,7 @@ chsc_remove_cmg_attr(struct channel_subsystem *css)
985 for (i = 0; i <= __MAX_CHPID; i++) { 686 for (i = 0; i <= __MAX_CHPID; i++) {
986 if (!css->chps[i]) 687 if (!css->chps[i])
987 continue; 688 continue;
988 chsc_remove_chp_cmg_attr(css->chps[i]); 689 chp_remove_cmg_attr(css->chps[i]);
989 } 690 }
990} 691}
991 692
@@ -998,7 +699,7 @@ chsc_add_cmg_attr(struct channel_subsystem *css)
998 for (i = 0; i <= __MAX_CHPID; i++) { 699 for (i = 0; i <= __MAX_CHPID; i++) {
999 if (!css->chps[i]) 700 if (!css->chps[i])
1000 continue; 701 continue;
1001 ret = chsc_add_chp_cmg_attr(css->chps[i]); 702 ret = chp_add_cmg_attr(css->chps[i]);
1002 if (ret) 703 if (ret)
1003 goto cleanup; 704 goto cleanup;
1004 } 705 }
@@ -1007,12 +708,11 @@ cleanup:
1007 for (--i; i >= 0; i--) { 708 for (--i; i >= 0; i--) {
1008 if (!css->chps[i]) 709 if (!css->chps[i])
1009 continue; 710 continue;
1010 chsc_remove_chp_cmg_attr(css->chps[i]); 711 chp_remove_cmg_attr(css->chps[i]);
1011 } 712 }
1012 return ret; 713 return ret;
1013} 714}
1014 715
1015
1016static int 716static int
1017__chsc_do_secm(struct channel_subsystem *css, int enable, void *page) 717__chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
1018{ 718{
@@ -1118,7 +818,7 @@ chsc_secm(struct channel_subsystem *css, int enable)
1118 } else 818 } else
1119 chsc_remove_cmg_attr(css); 819 chsc_remove_cmg_attr(css);
1120 } 820 }
1121 if (enable && !css->cm_enabled) { 821 if (!css->cm_enabled) {
1122 free_page((unsigned long)css->cub_addr1); 822 free_page((unsigned long)css->cub_addr1);
1123 free_page((unsigned long)css->cub_addr2); 823 free_page((unsigned long)css->cub_addr2);
1124 } 824 }
@@ -1127,109 +827,8 @@ chsc_secm(struct channel_subsystem *css, int enable)
1127 return ret; 827 return ret;
1128} 828}
1129 829
1130/* 830int chsc_determine_channel_path_description(struct chp_id chpid,
1131 * Files for the channel path entries. 831 struct channel_path_desc *desc)
1132 */
1133static ssize_t
1134chp_status_show(struct device *dev, struct device_attribute *attr, char *buf)
1135{
1136 struct channel_path *chp = container_of(dev, struct channel_path, dev);
1137
1138 if (!chp)
1139 return 0;
1140 return (get_chp_status(chp->id) ? sprintf(buf, "online\n") :
1141 sprintf(buf, "offline\n"));
1142}
1143
1144static ssize_t
1145chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1146{
1147 struct channel_path *cp = container_of(dev, struct channel_path, dev);
1148 char cmd[10];
1149 int num_args;
1150 int error;
1151
1152 num_args = sscanf(buf, "%5s", cmd);
1153 if (!num_args)
1154 return count;
1155
1156 if (!strnicmp(cmd, "on", 2))
1157 error = s390_vary_chpid(cp->id, 1);
1158 else if (!strnicmp(cmd, "off", 3))
1159 error = s390_vary_chpid(cp->id, 0);
1160 else
1161 error = -EINVAL;
1162
1163 return error < 0 ? error : count;
1164
1165}
1166
1167static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
1168
1169static ssize_t
1170chp_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1171{
1172 struct channel_path *chp = container_of(dev, struct channel_path, dev);
1173
1174 if (!chp)
1175 return 0;
1176 return sprintf(buf, "%x\n", chp->desc.desc);
1177}
1178
1179static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
1180
1181static ssize_t
1182chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf)
1183{
1184 struct channel_path *chp = to_channelpath(dev);
1185
1186 if (!chp)
1187 return 0;
1188 if (chp->cmg == -1) /* channel measurements not available */
1189 return sprintf(buf, "unknown\n");
1190 return sprintf(buf, "%x\n", chp->cmg);
1191}
1192
1193static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
1194
1195static ssize_t
1196chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf)
1197{
1198 struct channel_path *chp = to_channelpath(dev);
1199
1200 if (!chp)
1201 return 0;
1202 if (chp->shared == -1) /* channel measurements not available */
1203 return sprintf(buf, "unknown\n");
1204 return sprintf(buf, "%x\n", chp->shared);
1205}
1206
1207static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
1208
1209static struct attribute * chp_attrs[] = {
1210 &dev_attr_status.attr,
1211 &dev_attr_type.attr,
1212 &dev_attr_cmg.attr,
1213 &dev_attr_shared.attr,
1214 NULL,
1215};
1216
1217static struct attribute_group chp_attr_group = {
1218 .attrs = chp_attrs,
1219};
1220
1221static void
1222chp_release(struct device *dev)
1223{
1224 struct channel_path *cp;
1225
1226 cp = container_of(dev, struct channel_path, dev);
1227 kfree(cp);
1228}
1229
1230static int
1231chsc_determine_channel_path_description(int chpid,
1232 struct channel_path_desc *desc)
1233{ 832{
1234 int ccode, ret; 833 int ccode, ret;
1235 834
@@ -1252,8 +851,8 @@ chsc_determine_channel_path_description(int chpid,
1252 scpd_area->request.length = 0x0010; 851 scpd_area->request.length = 0x0010;
1253 scpd_area->request.code = 0x0002; 852 scpd_area->request.code = 0x0002;
1254 853
1255 scpd_area->first_chpid = chpid; 854 scpd_area->first_chpid = chpid.id;
1256 scpd_area->last_chpid = chpid; 855 scpd_area->last_chpid = chpid.id;
1257 856
1258 ccode = chsc(scpd_area); 857 ccode = chsc(scpd_area);
1259 if (ccode > 0) { 858 if (ccode > 0) {
@@ -1316,8 +915,7 @@ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
1316 } 915 }
1317} 916}
1318 917
1319static int 918int chsc_get_channel_measurement_chars(struct channel_path *chp)
1320chsc_get_channel_measurement_chars(struct channel_path *chp)
1321{ 919{
1322 int ccode, ret; 920 int ccode, ret;
1323 921
@@ -1349,8 +947,8 @@ chsc_get_channel_measurement_chars(struct channel_path *chp)
1349 scmc_area->request.length = 0x0010; 947 scmc_area->request.length = 0x0010;
1350 scmc_area->request.code = 0x0022; 948 scmc_area->request.code = 0x0022;
1351 949
1352 scmc_area->first_chpid = chp->id; 950 scmc_area->first_chpid = chp->chpid.id;
1353 scmc_area->last_chpid = chp->id; 951 scmc_area->last_chpid = chp->chpid.id;
1354 952
1355 ccode = chsc(scmc_area); 953 ccode = chsc(scmc_area);
1356 if (ccode > 0) { 954 if (ccode > 0) {
@@ -1392,94 +990,6 @@ out:
1392 return ret; 990 return ret;
1393} 991}
1394 992
1395/*
1396 * Entries for chpids on the system bus.
1397 * This replaces /proc/chpids.
1398 */
1399static int
1400new_channel_path(int chpid)
1401{
1402 struct channel_path *chp;
1403 int ret;
1404
1405 chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
1406 if (!chp)
1407 return -ENOMEM;
1408
1409 /* fill in status, etc. */
1410 chp->id = chpid;
1411 chp->state = 1;
1412 chp->dev.parent = &css[0]->device;
1413 chp->dev.release = chp_release;
1414 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
1415
1416 /* Obtain channel path description and fill it in. */
1417 ret = chsc_determine_channel_path_description(chpid, &chp->desc);
1418 if (ret)
1419 goto out_free;
1420 /* Get channel-measurement characteristics. */
1421 if (css_characteristics_avail && css_chsc_characteristics.scmc
1422 && css_chsc_characteristics.secm) {
1423 ret = chsc_get_channel_measurement_chars(chp);
1424 if (ret)
1425 goto out_free;
1426 } else {
1427 static int msg_done;
1428
1429 if (!msg_done) {
1430 printk(KERN_WARNING "cio: Channel measurements not "
1431 "available, continuing.\n");
1432 msg_done = 1;
1433 }
1434 chp->cmg = -1;
1435 }
1436
1437 /* make it known to the system */
1438 ret = device_register(&chp->dev);
1439 if (ret) {
1440 printk(KERN_WARNING "%s: could not register %02x\n",
1441 __func__, chpid);
1442 goto out_free;
1443 }
1444 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
1445 if (ret) {
1446 device_unregister(&chp->dev);
1447 goto out_free;
1448 }
1449 mutex_lock(&css[0]->mutex);
1450 if (css[0]->cm_enabled) {
1451 ret = chsc_add_chp_cmg_attr(chp);
1452 if (ret) {
1453 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
1454 device_unregister(&chp->dev);
1455 mutex_unlock(&css[0]->mutex);
1456 goto out_free;
1457 }
1458 }
1459 css[0]->chps[chpid] = chp;
1460 mutex_unlock(&css[0]->mutex);
1461 return ret;
1462out_free:
1463 kfree(chp);
1464 return ret;
1465}
1466
1467void *
1468chsc_get_chp_desc(struct subchannel *sch, int chp_no)
1469{
1470 struct channel_path *chp;
1471 struct channel_path_desc *desc;
1472
1473 chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]];
1474 if (!chp)
1475 return NULL;
1476 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
1477 if (!desc)
1478 return NULL;
1479 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
1480 return desc;
1481}
1482
1483static int __init 993static int __init
1484chsc_alloc_sei_area(void) 994chsc_alloc_sei_area(void)
1485{ 995{
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 0fb2b024208f..2ad81d11cf7b 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -1,9 +1,10 @@
1#ifndef S390_CHSC_H 1#ifndef S390_CHSC_H
2#define S390_CHSC_H 2#define S390_CHSC_H
3 3
4#define CHSC_SEI_ACC_CHPID 1 4#include <linux/types.h>
5#define CHSC_SEI_ACC_LINKADDR 2 5#include <linux/device.h>
6#define CHSC_SEI_ACC_FULLLINKADDR 3 6#include <asm/chpid.h>
7#include "schid.h"
7 8
8#define CHSC_SDA_OC_MSS 0x2 9#define CHSC_SDA_OC_MSS 0x2
9 10
@@ -33,23 +34,9 @@ struct channel_path_desc {
33 u8 chpp; 34 u8 chpp;
34} __attribute__ ((packed)); 35} __attribute__ ((packed));
35 36
36struct channel_path { 37struct channel_path;
37 int id;
38 int state;
39 struct channel_path_desc desc;
40 /* Channel-measurement related stuff: */
41 int cmg;
42 int shared;
43 void *cmg_chars;
44 struct device dev;
45};
46 38
47extern void s390_process_css( void ); 39extern void chsc_process_crw(void);
48extern void chsc_validate_chpids(struct subchannel *);
49extern void chpid_is_actually_online(int);
50extern int css_get_ssd_info(struct subchannel *);
51extern int chsc_process_crw(void);
52extern int chp_process_crw(int, int);
53 40
54struct css_general_char { 41struct css_general_char {
55 u64 : 41; 42 u64 : 41;
@@ -82,15 +69,26 @@ struct css_chsc_char {
82extern struct css_general_char css_general_characteristics; 69extern struct css_general_char css_general_characteristics;
83extern struct css_chsc_char css_chsc_characteristics; 70extern struct css_chsc_char css_chsc_characteristics;
84 71
72struct chsc_ssd_info {
73 u8 path_mask;
74 u8 fla_valid_mask;
75 struct chp_id chpid[8];
76 u16 fla[8];
77};
78extern int chsc_get_ssd_info(struct subchannel_id schid,
79 struct chsc_ssd_info *ssd);
85extern int chsc_determine_css_characteristics(void); 80extern int chsc_determine_css_characteristics(void);
86extern int css_characteristics_avail; 81extern int css_characteristics_avail;
87 82
88extern void *chsc_get_chp_desc(struct subchannel*, int);
89
90extern int chsc_enable_facility(int); 83extern int chsc_enable_facility(int);
91struct channel_subsystem; 84struct channel_subsystem;
92extern int chsc_secm(struct channel_subsystem *, int); 85extern int chsc_secm(struct channel_subsystem *, int);
93 86
94#define to_channelpath(device) container_of(device, struct channel_path, dev) 87int chsc_chp_vary(struct chp_id chpid, int on);
88int chsc_determine_channel_path_description(struct chp_id chpid,
89 struct channel_path_desc *desc);
90void chsc_chp_online(struct chp_id chpid);
91void chsc_chp_offline(struct chp_id chpid);
92int chsc_get_channel_measurement_chars(struct channel_path *chp);
95 93
96#endif 94#endif
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 9cb129ab5be5..ea1defba5693 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -22,6 +22,7 @@
22#include <asm/setup.h> 22#include <asm/setup.h>
23#include <asm/reset.h> 23#include <asm/reset.h>
24#include <asm/ipl.h> 24#include <asm/ipl.h>
25#include <asm/chpid.h>
25#include "airq.h" 26#include "airq.h"
26#include "cio.h" 27#include "cio.h"
27#include "css.h" 28#include "css.h"
@@ -29,6 +30,7 @@
29#include "ioasm.h" 30#include "ioasm.h"
30#include "blacklist.h" 31#include "blacklist.h"
31#include "cio_debug.h" 32#include "cio_debug.h"
33#include "chp.h"
32#include "../s390mach.h" 34#include "../s390mach.h"
33 35
34debug_info_t *cio_debug_msg_id; 36debug_info_t *cio_debug_msg_id;
@@ -592,9 +594,10 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
592 err = -ENODEV; 594 err = -ENODEV;
593 goto out; 595 goto out;
594 } 596 }
595 sch->opm = 0xff; 597 if (cio_is_console(sch->schid))
596 if (!cio_is_console(sch->schid)) 598 sch->opm = 0xff;
597 chsc_validate_chpids(sch); 599 else
600 sch->opm = chp_get_sch_opm(sch);
598 sch->lpm = sch->schib.pmcw.pam & sch->opm; 601 sch->lpm = sch->schib.pmcw.pam & sch->opm;
599 602
600 CIO_DEBUG(KERN_INFO, 0, 603 CIO_DEBUG(KERN_INFO, 0,
@@ -954,6 +957,7 @@ static void css_reset(void)
954{ 957{
955 int i, ret; 958 int i, ret;
956 unsigned long long timeout; 959 unsigned long long timeout;
960 struct chp_id chpid;
957 961
958 /* Reset subchannels. */ 962 /* Reset subchannels. */
959 for_each_subchannel(__shutdown_subchannel_easy, NULL); 963 for_each_subchannel(__shutdown_subchannel_easy, NULL);
@@ -963,8 +967,10 @@ static void css_reset(void)
963 __ctl_set_bit(14, 28); 967 __ctl_set_bit(14, 28);
964 /* Temporarily reenable machine checks. */ 968 /* Temporarily reenable machine checks. */
965 local_mcck_enable(); 969 local_mcck_enable();
970 chp_id_init(&chpid);
966 for (i = 0; i <= __MAX_CHPID; i++) { 971 for (i = 0; i <= __MAX_CHPID; i++) {
967 ret = rchp(i); 972 chpid.id = i;
973 ret = rchp(chpid);
968 if ((ret == 0) || (ret == 2)) 974 if ((ret == 0) || (ret == 2))
969 /* 975 /*
970 * rchp either succeeded, or another rchp is already 976 * rchp either succeeded, or another rchp is already
@@ -1048,37 +1054,19 @@ void reipl_ccw_dev(struct ccw_dev_id *devid)
1048 do_reipl_asm(*((__u32*)&schid)); 1054 do_reipl_asm(*((__u32*)&schid));
1049} 1055}
1050 1056
1051static struct schib __initdata ipl_schib; 1057int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
1052
1053/*
1054 * ipl_save_parameters gets called very early. It is not allowed to access
1055 * anything in the bss section at all. The bss section is not cleared yet,
1056 * but may contain some ipl parameters written by the firmware.
1057 * These parameters (if present) are copied to 0x2000.
1058 * To avoid corruption of the ipl parameters, all variables used by this
1059 * function must reside on the stack or in the data section.
1060 */
1061void ipl_save_parameters(void)
1062{ 1058{
1063 struct subchannel_id schid; 1059 struct subchannel_id schid;
1064 unsigned int *ipl_ptr; 1060 struct schib schib;
1065 void *src, *dst;
1066 1061
1067 schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; 1062 schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID;
1068 if (!schid.one) 1063 if (!schid.one)
1069 return; 1064 return -ENODEV;
1070 if (stsch(schid, &ipl_schib)) 1065 if (stsch(schid, &schib))
1071 return; 1066 return -ENODEV;
1072 if (!ipl_schib.pmcw.dnv) 1067 if (!schib.pmcw.dnv)
1073 return; 1068 return -ENODEV;
1074 ipl_devno = ipl_schib.pmcw.dev; 1069 iplinfo->devno = schib.pmcw.dev;
1075 ipl_flags |= IPL_DEVNO_VALID; 1070 iplinfo->is_qdio = schib.pmcw.qf;
1076 if (!ipl_schib.pmcw.qf) 1071 return 0;
1077 return;
1078 ipl_flags |= IPL_PARMBLOCK_VALID;
1079 ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR;
1080 src = (void *)(unsigned long)*ipl_ptr;
1081 dst = (void *)IPL_PARMBLOCK_ORIGIN;
1082 memmove(dst, src, PAGE_SIZE);
1083 *ipl_ptr = IPL_PARMBLOCK_ORIGIN;
1084} 1072}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 35154a210357..7446c39951a7 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -1,18 +1,11 @@
1#ifndef S390_CIO_H 1#ifndef S390_CIO_H
2#define S390_CIO_H 2#define S390_CIO_H
3 3
4#include "schid.h"
5#include <linux/mutex.h> 4#include <linux/mutex.h>
6 5#include <linux/device.h>
7/* 6#include <asm/chpid.h>
8 * where we put the ssd info 7#include "chsc.h"
9 */ 8#include "schid.h"
10struct ssd_info {
11 __u8 valid:1;
12 __u8 type:7; /* subchannel type */
13 __u8 chpid[8]; /* chpids */
14 __u16 fla[8]; /* full link addresses */
15} __attribute__ ((packed));
16 9
17/* 10/*
18 * path management control word 11 * path management control word
@@ -108,7 +101,7 @@ struct subchannel {
108 struct schib schib; /* subchannel information block */ 101 struct schib schib; /* subchannel information block */
109 struct orb orb; /* operation request block */ 102 struct orb orb; /* operation request block */
110 struct ccw1 sense_ccw; /* static ccw for sense command */ 103 struct ccw1 sense_ccw; /* static ccw for sense command */
111 struct ssd_info ssd_info; /* subchannel description */ 104 struct chsc_ssd_info ssd_info; /* subchannel description */
112 struct device dev; /* entry in device tree */ 105 struct device dev; /* entry in device tree */
113 struct css_driver *driver; 106 struct css_driver *driver;
114} __attribute__ ((aligned(8))); 107} __attribute__ ((aligned(8)));
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 90b22faabbf7..28abd697be1a 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -476,7 +476,7 @@ struct cmb_area {
476}; 476};
477 477
478static struct cmb_area cmb_area = { 478static struct cmb_area cmb_area = {
479 .lock = SPIN_LOCK_UNLOCKED, 479 .lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock),
480 .list = LIST_HEAD_INIT(cmb_area.list), 480 .list = LIST_HEAD_INIT(cmb_area.list),
481 .num_channels = 1024, 481 .num_channels = 1024,
482}; 482};
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index fe0ace7aece8..27c6d9e55b23 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -20,8 +20,9 @@
20#include "ioasm.h" 20#include "ioasm.h"
21#include "chsc.h" 21#include "chsc.h"
22#include "device.h" 22#include "device.h"
23#include "idset.h"
24#include "chp.h"
23 25
24int need_rescan = 0;
25int css_init_done = 0; 26int css_init_done = 0;
26static int need_reprobe = 0; 27static int need_reprobe = 0;
27static int max_ssid = 0; 28static int max_ssid = 0;
@@ -125,8 +126,52 @@ void css_sch_device_unregister(struct subchannel *sch)
125 mutex_unlock(&sch->reg_mutex); 126 mutex_unlock(&sch->reg_mutex);
126} 127}
127 128
128static int 129static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
129css_register_subchannel(struct subchannel *sch) 130{
131 int i;
132 int mask;
133
134 memset(ssd, 0, sizeof(struct chsc_ssd_info));
135 ssd->path_mask = pmcw->pim;
136 for (i = 0; i < 8; i++) {
137 mask = 0x80 >> i;
138 if (pmcw->pim & mask) {
139 chp_id_init(&ssd->chpid[i]);
140 ssd->chpid[i].id = pmcw->chpid[i];
141 }
142 }
143}
144
145static void ssd_register_chpids(struct chsc_ssd_info *ssd)
146{
147 int i;
148 int mask;
149
150 for (i = 0; i < 8; i++) {
151 mask = 0x80 >> i;
152 if (ssd->path_mask & mask)
153 if (!chp_is_registered(ssd->chpid[i]))
154 chp_new(ssd->chpid[i]);
155 }
156}
157
158void css_update_ssd_info(struct subchannel *sch)
159{
160 int ret;
161
162 if (cio_is_console(sch->schid)) {
163 /* Console is initialized too early for functions requiring
164 * memory allocation. */
165 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
166 } else {
167 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
168 if (ret)
169 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
170 ssd_register_chpids(&sch->ssd_info);
171 }
172}
173
174static int css_register_subchannel(struct subchannel *sch)
130{ 175{
131 int ret; 176 int ret;
132 177
@@ -135,9 +180,7 @@ css_register_subchannel(struct subchannel *sch)
135 sch->dev.bus = &css_bus_type; 180 sch->dev.bus = &css_bus_type;
136 sch->dev.release = &css_subchannel_release; 181 sch->dev.release = &css_subchannel_release;
137 sch->dev.groups = subch_attr_groups; 182 sch->dev.groups = subch_attr_groups;
138 183 css_update_ssd_info(sch);
139 css_get_ssd_info(sch);
140
141 /* make it known to the system */ 184 /* make it known to the system */
142 ret = css_sch_device_register(sch); 185 ret = css_sch_device_register(sch);
143 if (ret) { 186 if (ret) {
@@ -306,7 +349,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
306 return css_probe_device(schid); 349 return css_probe_device(schid);
307} 350}
308 351
309static int css_evaluate_subchannel(struct subchannel_id schid, int slow) 352static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
310{ 353{
311 struct subchannel *sch; 354 struct subchannel *sch;
312 int ret; 355 int ret;
@@ -317,53 +360,66 @@ static int css_evaluate_subchannel(struct subchannel_id schid, int slow)
317 put_device(&sch->dev); 360 put_device(&sch->dev);
318 } else 361 } else
319 ret = css_evaluate_new_subchannel(schid, slow); 362 ret = css_evaluate_new_subchannel(schid, slow);
320 363 if (ret == -EAGAIN)
321 return ret; 364 css_schedule_eval(schid);
322} 365}
323 366
324static int 367static struct idset *slow_subchannel_set;
325css_rescan_devices(struct subchannel_id schid, void *data) 368static spinlock_t slow_subchannel_lock;
369
370static int __init slow_subchannel_init(void)
326{ 371{
327 return css_evaluate_subchannel(schid, 1); 372 spin_lock_init(&slow_subchannel_lock);
373 slow_subchannel_set = idset_sch_new();
374 if (!slow_subchannel_set) {
375 printk(KERN_WARNING "cio: could not allocate slow subchannel "
376 "set\n");
377 return -ENOMEM;
378 }
379 return 0;
328} 380}
329 381
330struct slow_subchannel { 382subsys_initcall(slow_subchannel_init);
331 struct list_head slow_list;
332 struct subchannel_id schid;
333};
334
335static LIST_HEAD(slow_subchannels_head);
336static DEFINE_SPINLOCK(slow_subchannel_lock);
337 383
338static void 384static void css_slow_path_func(struct work_struct *unused)
339css_trigger_slow_path(struct work_struct *unused)
340{ 385{
341 CIO_TRACE_EVENT(4, "slowpath"); 386 struct subchannel_id schid;
342
343 if (need_rescan) {
344 need_rescan = 0;
345 for_each_subchannel(css_rescan_devices, NULL);
346 return;
347 }
348 387
388 CIO_TRACE_EVENT(4, "slowpath");
349 spin_lock_irq(&slow_subchannel_lock); 389 spin_lock_irq(&slow_subchannel_lock);
350 while (!list_empty(&slow_subchannels_head)) { 390 init_subchannel_id(&schid);
351 struct slow_subchannel *slow_sch = 391 while (idset_sch_get_first(slow_subchannel_set, &schid)) {
352 list_entry(slow_subchannels_head.next, 392 idset_sch_del(slow_subchannel_set, schid);
353 struct slow_subchannel, slow_list);
354
355 list_del_init(slow_subchannels_head.next);
356 spin_unlock_irq(&slow_subchannel_lock); 393 spin_unlock_irq(&slow_subchannel_lock);
357 css_evaluate_subchannel(slow_sch->schid, 1); 394 css_evaluate_subchannel(schid, 1);
358 spin_lock_irq(&slow_subchannel_lock); 395 spin_lock_irq(&slow_subchannel_lock);
359 kfree(slow_sch);
360 } 396 }
361 spin_unlock_irq(&slow_subchannel_lock); 397 spin_unlock_irq(&slow_subchannel_lock);
362} 398}
363 399
364DECLARE_WORK(slow_path_work, css_trigger_slow_path); 400static DECLARE_WORK(slow_path_work, css_slow_path_func);
365struct workqueue_struct *slow_path_wq; 401struct workqueue_struct *slow_path_wq;
366 402
403void css_schedule_eval(struct subchannel_id schid)
404{
405 unsigned long flags;
406
407 spin_lock_irqsave(&slow_subchannel_lock, flags);
408 idset_sch_add(slow_subchannel_set, schid);
409 queue_work(slow_path_wq, &slow_path_work);
410 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
411}
412
413void css_schedule_eval_all(void)
414{
415 unsigned long flags;
416
417 spin_lock_irqsave(&slow_subchannel_lock, flags);
418 idset_fill(slow_subchannel_set);
419 queue_work(slow_path_wq, &slow_path_work);
420 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
421}
422
367/* Reprobe subchannel if unregistered. */ 423/* Reprobe subchannel if unregistered. */
368static int reprobe_subchannel(struct subchannel_id schid, void *data) 424static int reprobe_subchannel(struct subchannel_id schid, void *data)
369{ 425{
@@ -426,33 +482,14 @@ void css_schedule_reprobe(void)
426EXPORT_SYMBOL_GPL(css_schedule_reprobe); 482EXPORT_SYMBOL_GPL(css_schedule_reprobe);
427 483
428/* 484/*
429 * Rescan for new devices. FIXME: This is slow.
430 * This function is called when we have lost CRWs due to overflows and we have
431 * to do subchannel housekeeping.
432 */
433void
434css_reiterate_subchannels(void)
435{
436 css_clear_subchannel_slow_list();
437 need_rescan = 1;
438}
439
440/*
441 * Called from the machine check handler for subchannel report words. 485 * Called from the machine check handler for subchannel report words.
442 */ 486 */
443int 487void css_process_crw(int rsid1, int rsid2)
444css_process_crw(int rsid1, int rsid2)
445{ 488{
446 int ret;
447 struct subchannel_id mchk_schid; 489 struct subchannel_id mchk_schid;
448 490
449 CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", 491 CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n",
450 rsid1, rsid2); 492 rsid1, rsid2);
451
452 if (need_rescan)
453 /* We need to iterate all subchannels anyway. */
454 return -EAGAIN;
455
456 init_subchannel_id(&mchk_schid); 493 init_subchannel_id(&mchk_schid);
457 mchk_schid.sch_no = rsid1; 494 mchk_schid.sch_no = rsid1;
458 if (rsid2 != 0) 495 if (rsid2 != 0)
@@ -463,14 +500,7 @@ css_process_crw(int rsid1, int rsid2)
463 * use stsch() to find out if the subchannel in question has come 500 * use stsch() to find out if the subchannel in question has come
464 * or gone. 501 * or gone.
465 */ 502 */
466 ret = css_evaluate_subchannel(mchk_schid, 0); 503 css_evaluate_subchannel(mchk_schid, 0);
467 if (ret == -EAGAIN) {
468 if (css_enqueue_subchannel_slow(mchk_schid)) {
469 css_clear_subchannel_slow_list();
470 need_rescan = 1;
471 }
472 }
473 return ret;
474} 504}
475 505
476static int __init 506static int __init
@@ -745,47 +775,6 @@ struct bus_type css_bus_type = {
745 775
746subsys_initcall(init_channel_subsystem); 776subsys_initcall(init_channel_subsystem);
747 777
748int
749css_enqueue_subchannel_slow(struct subchannel_id schid)
750{
751 struct slow_subchannel *new_slow_sch;
752 unsigned long flags;
753
754 new_slow_sch = kzalloc(sizeof(struct slow_subchannel), GFP_ATOMIC);
755 if (!new_slow_sch)
756 return -ENOMEM;
757 new_slow_sch->schid = schid;
758 spin_lock_irqsave(&slow_subchannel_lock, flags);
759 list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head);
760 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
761 return 0;
762}
763
764void
765css_clear_subchannel_slow_list(void)
766{
767 unsigned long flags;
768
769 spin_lock_irqsave(&slow_subchannel_lock, flags);
770 while (!list_empty(&slow_subchannels_head)) {
771 struct slow_subchannel *slow_sch =
772 list_entry(slow_subchannels_head.next,
773 struct slow_subchannel, slow_list);
774
775 list_del_init(slow_subchannels_head.next);
776 kfree(slow_sch);
777 }
778 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
779}
780
781
782
783int
784css_slow_subchannels_exist(void)
785{
786 return (!list_empty(&slow_subchannels_head));
787}
788
789MODULE_LICENSE("GPL"); 778MODULE_LICENSE("GPL");
790EXPORT_SYMBOL(css_bus_type); 779EXPORT_SYMBOL(css_bus_type);
791EXPORT_SYMBOL_GPL(css_characteristics_avail); 780EXPORT_SYMBOL_GPL(css_characteristics_avail);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index ca2bab932a8a..71fcfdc42800 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -4,8 +4,11 @@
4#include <linux/mutex.h> 4#include <linux/mutex.h>
5#include <linux/wait.h> 5#include <linux/wait.h>
6#include <linux/workqueue.h> 6#include <linux/workqueue.h>
7#include <linux/device.h>
8#include <linux/types.h>
7 9
8#include <asm/cio.h> 10#include <asm/cio.h>
11#include <asm/chpid.h>
9 12
10#include "schid.h" 13#include "schid.h"
11 14
@@ -143,13 +146,12 @@ extern void css_sch_device_unregister(struct subchannel *);
143extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); 146extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
144extern int css_init_done; 147extern int css_init_done;
145extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); 148extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
146extern int css_process_crw(int, int); 149extern void css_process_crw(int, int);
147extern void css_reiterate_subchannels(void); 150extern void css_reiterate_subchannels(void);
151void css_update_ssd_info(struct subchannel *sch);
148 152
149#define __MAX_SUBCHANNEL 65535 153#define __MAX_SUBCHANNEL 65535
150#define __MAX_SSID 3 154#define __MAX_SSID 3
151#define __MAX_CHPID 255
152#define __MAX_CSSID 0
153 155
154struct channel_subsystem { 156struct channel_subsystem {
155 u8 cssid; 157 u8 cssid;
@@ -185,16 +187,12 @@ int device_trigger_verify(struct subchannel *sch);
185void device_kill_pending_timer(struct subchannel *); 187void device_kill_pending_timer(struct subchannel *);
186 188
187/* Helper functions to build lists for the slow path. */ 189/* Helper functions to build lists for the slow path. */
188extern int css_enqueue_subchannel_slow(struct subchannel_id schid); 190void css_schedule_eval(struct subchannel_id schid);
189void css_walk_subchannel_slow_list(void (*fn)(unsigned long)); 191void css_schedule_eval_all(void);
190void css_clear_subchannel_slow_list(void);
191int css_slow_subchannels_exist(void);
192extern int need_rescan;
193 192
194int sch_is_pseudo_sch(struct subchannel *); 193int sch_is_pseudo_sch(struct subchannel *);
195 194
196extern struct workqueue_struct *slow_path_wq; 195extern struct workqueue_struct *slow_path_wq;
197extern struct work_struct slow_path_work;
198 196
199int subchannel_add_files (struct device *); 197int subchannel_add_files (struct device *);
200extern struct attribute_group *subch_attr_groups[]; 198extern struct attribute_group *subch_attr_groups[];
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e322111fb369..03355902c582 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -56,13 +56,12 @@ ccw_bus_match (struct device * dev, struct device_driver * drv)
56/* Store modalias string delimited by prefix/suffix string into buffer with 56/* Store modalias string delimited by prefix/suffix string into buffer with
57 * specified size. Return length of resulting string (excluding trailing '\0') 57 * specified size. Return length of resulting string (excluding trailing '\0')
58 * even if string doesn't fit buffer (snprintf semantics). */ 58 * even if string doesn't fit buffer (snprintf semantics). */
59static int snprint_alias(char *buf, size_t size, const char *prefix, 59static int snprint_alias(char *buf, size_t size,
60 struct ccw_device_id *id, const char *suffix) 60 struct ccw_device_id *id, const char *suffix)
61{ 61{
62 int len; 62 int len;
63 63
64 len = snprintf(buf, size, "%sccw:t%04Xm%02X", prefix, id->cu_type, 64 len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
65 id->cu_model);
66 if (len > size) 65 if (len > size)
67 return len; 66 return len;
68 buf += len; 67 buf += len;
@@ -85,53 +84,40 @@ static int ccw_uevent(struct device *dev, char **envp, int num_envp,
85 struct ccw_device *cdev = to_ccwdev(dev); 84 struct ccw_device *cdev = to_ccwdev(dev);
86 struct ccw_device_id *id = &(cdev->id); 85 struct ccw_device_id *id = &(cdev->id);
87 int i = 0; 86 int i = 0;
88 int len; 87 int len = 0;
88 int ret;
89 char modalias_buf[30];
89 90
90 /* CU_TYPE= */ 91 /* CU_TYPE= */
91 len = snprintf(buffer, buffer_size, "CU_TYPE=%04X", id->cu_type) + 1; 92 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
92 if (len > buffer_size || i >= num_envp) 93 "CU_TYPE=%04X", id->cu_type);
93 return -ENOMEM; 94 if (ret)
94 envp[i++] = buffer; 95 return ret;
95 buffer += len;
96 buffer_size -= len;
97 96
98 /* CU_MODEL= */ 97 /* CU_MODEL= */
99 len = snprintf(buffer, buffer_size, "CU_MODEL=%02X", id->cu_model) + 1; 98 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
100 if (len > buffer_size || i >= num_envp) 99 "CU_MODEL=%02X", id->cu_model);
101 return -ENOMEM; 100 if (ret)
102 envp[i++] = buffer; 101 return ret;
103 buffer += len;
104 buffer_size -= len;
105 102
106 /* The next two can be zero, that's ok for us */ 103 /* The next two can be zero, that's ok for us */
107 /* DEV_TYPE= */ 104 /* DEV_TYPE= */
108 len = snprintf(buffer, buffer_size, "DEV_TYPE=%04X", id->dev_type) + 1; 105 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
109 if (len > buffer_size || i >= num_envp) 106 "DEV_TYPE=%04X", id->dev_type);
110 return -ENOMEM; 107 if (ret)
111 envp[i++] = buffer; 108 return ret;
112 buffer += len;
113 buffer_size -= len;
114 109
115 /* DEV_MODEL= */ 110 /* DEV_MODEL= */
116 len = snprintf(buffer, buffer_size, "DEV_MODEL=%02X", 111 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
117 (unsigned char) id->dev_model) + 1; 112 "DEV_MODEL=%02X", id->dev_model);
118 if (len > buffer_size || i >= num_envp) 113 if (ret)
119 return -ENOMEM; 114 return ret;
120 envp[i++] = buffer;
121 buffer += len;
122 buffer_size -= len;
123 115
124 /* MODALIAS= */ 116 /* MODALIAS= */
125 len = snprint_alias(buffer, buffer_size, "MODALIAS=", id, "") + 1; 117 snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
126 if (len > buffer_size || i >= num_envp) 118 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
127 return -ENOMEM; 119 "MODALIAS=%s", modalias_buf);
128 envp[i++] = buffer; 120 return ret;
129 buffer += len;
130 buffer_size -= len;
131
132 envp[i] = NULL;
133
134 return 0;
135} 121}
136 122
137struct bus_type ccw_bus_type; 123struct bus_type ccw_bus_type;
@@ -230,12 +216,18 @@ static ssize_t
230chpids_show (struct device * dev, struct device_attribute *attr, char * buf) 216chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
231{ 217{
232 struct subchannel *sch = to_subchannel(dev); 218 struct subchannel *sch = to_subchannel(dev);
233 struct ssd_info *ssd = &sch->ssd_info; 219 struct chsc_ssd_info *ssd = &sch->ssd_info;
234 ssize_t ret = 0; 220 ssize_t ret = 0;
235 int chp; 221 int chp;
222 int mask;
236 223
237 for (chp = 0; chp < 8; chp++) 224 for (chp = 0; chp < 8; chp++) {
238 ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]); 225 mask = 0x80 >> chp;
226 if (ssd->path_mask & mask)
227 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
228 else
229 ret += sprintf(buf + ret, "00 ");
230 }
239 ret += sprintf (buf+ret, "\n"); 231 ret += sprintf (buf+ret, "\n");
240 return min((ssize_t)PAGE_SIZE, ret); 232 return min((ssize_t)PAGE_SIZE, ret);
241} 233}
@@ -280,7 +272,7 @@ modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
280 struct ccw_device_id *id = &(cdev->id); 272 struct ccw_device_id *id = &(cdev->id);
281 int len; 273 int len;
282 274
283 len = snprint_alias(buf, PAGE_SIZE, "", id, "\n") + 1; 275 len = snprint_alias(buf, PAGE_SIZE, id, "\n") + 1;
284 276
285 return len > PAGE_SIZE ? PAGE_SIZE : len; 277 return len > PAGE_SIZE ? PAGE_SIZE : len;
286} 278}
@@ -298,16 +290,10 @@ int ccw_device_is_orphan(struct ccw_device *cdev)
298 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent)); 290 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
299} 291}
300 292
301static void ccw_device_unregister(struct work_struct *work) 293static void ccw_device_unregister(struct ccw_device *cdev)
302{ 294{
303 struct ccw_device_private *priv;
304 struct ccw_device *cdev;
305
306 priv = container_of(work, struct ccw_device_private, kick_work);
307 cdev = priv->cdev;
308 if (test_and_clear_bit(1, &cdev->private->registered)) 295 if (test_and_clear_bit(1, &cdev->private->registered))
309 device_unregister(&cdev->dev); 296 device_del(&cdev->dev);
310 put_device(&cdev->dev);
311} 297}
312 298
313static void 299static void
@@ -324,11 +310,8 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
324 spin_lock_irqsave(cdev->ccwlock, flags); 310 spin_lock_irqsave(cdev->ccwlock, flags);
325 cdev->private->state = DEV_STATE_NOT_OPER; 311 cdev->private->state = DEV_STATE_NOT_OPER;
326 spin_unlock_irqrestore(cdev->ccwlock, flags); 312 spin_unlock_irqrestore(cdev->ccwlock, flags);
327 if (get_device(&cdev->dev)) { 313 ccw_device_unregister(cdev);
328 PREPARE_WORK(&cdev->private->kick_work, 314 put_device(&cdev->dev);
329 ccw_device_unregister);
330 queue_work(ccw_device_work, &cdev->private->kick_work);
331 }
332 return ; 315 return ;
333 } 316 }
334 sch = to_subchannel(cdev->dev.parent); 317 sch = to_subchannel(cdev->dev.parent);
@@ -413,11 +396,60 @@ ccw_device_set_online(struct ccw_device *cdev)
413 return (ret == 0) ? -ENODEV : ret; 396 return (ret == 0) ? -ENODEV : ret;
414} 397}
415 398
416static ssize_t 399static void online_store_handle_offline(struct ccw_device *cdev)
417online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 400{
401 if (cdev->private->state == DEV_STATE_DISCONNECTED)
402 ccw_device_remove_disconnected(cdev);
403 else if (cdev->drv && cdev->drv->set_offline)
404 ccw_device_set_offline(cdev);
405}
406
407static int online_store_recog_and_online(struct ccw_device *cdev)
408{
409 int ret;
410
411 /* Do device recognition, if needed. */
412 if (cdev->id.cu_type == 0) {
413 ret = ccw_device_recognition(cdev);
414 if (ret) {
415 printk(KERN_WARNING"Couldn't start recognition "
416 "for device %s (ret=%d)\n",
417 cdev->dev.bus_id, ret);
418 return ret;
419 }
420 wait_event(cdev->private->wait_q,
421 cdev->private->flags.recog_done);
422 }
423 if (cdev->drv && cdev->drv->set_online)
424 ccw_device_set_online(cdev);
425 return 0;
426}
427static void online_store_handle_online(struct ccw_device *cdev, int force)
428{
429 int ret;
430
431 ret = online_store_recog_and_online(cdev);
432 if (ret)
433 return;
434 if (force && cdev->private->state == DEV_STATE_BOXED) {
435 ret = ccw_device_stlck(cdev);
436 if (ret) {
437 printk(KERN_WARNING"ccw_device_stlck for device %s "
438 "returned %d!\n", cdev->dev.bus_id, ret);
439 return;
440 }
441 if (cdev->id.cu_type == 0)
442 cdev->private->state = DEV_STATE_NOT_OPER;
443 online_store_recog_and_online(cdev);
444 }
445
446}
447
448static ssize_t online_store (struct device *dev, struct device_attribute *attr,
449 const char *buf, size_t count)
418{ 450{
419 struct ccw_device *cdev = to_ccwdev(dev); 451 struct ccw_device *cdev = to_ccwdev(dev);
420 int i, force, ret; 452 int i, force;
421 char *tmp; 453 char *tmp;
422 454
423 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) 455 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
@@ -434,51 +466,17 @@ online_store (struct device *dev, struct device_attribute *attr, const char *buf
434 force = 0; 466 force = 0;
435 i = simple_strtoul(buf, &tmp, 16); 467 i = simple_strtoul(buf, &tmp, 16);
436 } 468 }
437 if (i == 1) { 469
438 /* Do device recognition, if needed. */ 470 switch (i) {
439 if (cdev->id.cu_type == 0) { 471 case 0:
440 ret = ccw_device_recognition(cdev); 472 online_store_handle_offline(cdev);
441 if (ret) { 473 break;
442 printk(KERN_WARNING"Couldn't start recognition " 474 case 1:
443 "for device %s (ret=%d)\n", 475 online_store_handle_online(cdev, force);
444 cdev->dev.bus_id, ret); 476 break;
445 goto out; 477 default:
446 } 478 count = -EINVAL;
447 wait_event(cdev->private->wait_q,
448 cdev->private->flags.recog_done);
449 }
450 if (cdev->drv && cdev->drv->set_online)
451 ccw_device_set_online(cdev);
452 } else if (i == 0) {
453 if (cdev->private->state == DEV_STATE_DISCONNECTED)
454 ccw_device_remove_disconnected(cdev);
455 else if (cdev->drv && cdev->drv->set_offline)
456 ccw_device_set_offline(cdev);
457 }
458 if (force && cdev->private->state == DEV_STATE_BOXED) {
459 ret = ccw_device_stlck(cdev);
460 if (ret) {
461 printk(KERN_WARNING"ccw_device_stlck for device %s "
462 "returned %d!\n", cdev->dev.bus_id, ret);
463 goto out;
464 }
465 /* Do device recognition, if needed. */
466 if (cdev->id.cu_type == 0) {
467 cdev->private->state = DEV_STATE_NOT_OPER;
468 ret = ccw_device_recognition(cdev);
469 if (ret) {
470 printk(KERN_WARNING"Couldn't start recognition "
471 "for device %s (ret=%d)\n",
472 cdev->dev.bus_id, ret);
473 goto out;
474 }
475 wait_event(cdev->private->wait_q,
476 cdev->private->flags.recog_done);
477 }
478 if (cdev->drv && cdev->drv->set_online)
479 ccw_device_set_online(cdev);
480 } 479 }
481 out:
482 if (cdev->drv) 480 if (cdev->drv)
483 module_put(cdev->drv->owner); 481 module_put(cdev->drv->owner);
484 atomic_set(&cdev->private->onoff, 0); 482 atomic_set(&cdev->private->onoff, 0);
@@ -548,17 +546,10 @@ static struct attribute_group ccwdev_attr_group = {
548 .attrs = ccwdev_attrs, 546 .attrs = ccwdev_attrs,
549}; 547};
550 548
551static int 549struct attribute_group *ccwdev_attr_groups[] = {
552device_add_files (struct device *dev) 550 &ccwdev_attr_group,
553{ 551 NULL,
554 return sysfs_create_group(&dev->kobj, &ccwdev_attr_group); 552};
555}
556
557static void
558device_remove_files(struct device *dev)
559{
560 sysfs_remove_group(&dev->kobj, &ccwdev_attr_group);
561}
562 553
563/* this is a simple abstraction for device_register that sets the 554/* this is a simple abstraction for device_register that sets the
564 * correct bus type and adds the bus specific files */ 555 * correct bus type and adds the bus specific files */
@@ -573,10 +564,6 @@ static int ccw_device_register(struct ccw_device *cdev)
573 return ret; 564 return ret;
574 565
575 set_bit(1, &cdev->private->registered); 566 set_bit(1, &cdev->private->registered);
576 if ((ret = device_add_files(dev))) {
577 if (test_and_clear_bit(1, &cdev->private->registered))
578 device_del(dev);
579 }
580 return ret; 567 return ret;
581} 568}
582 569
@@ -648,10 +635,6 @@ ccw_device_add_changed(struct work_struct *work)
648 return; 635 return;
649 } 636 }
650 set_bit(1, &cdev->private->registered); 637 set_bit(1, &cdev->private->registered);
651 if (device_add_files(&cdev->dev)) {
652 if (test_and_clear_bit(1, &cdev->private->registered))
653 device_unregister(&cdev->dev);
654 }
655} 638}
656 639
657void ccw_device_do_unreg_rereg(struct work_struct *work) 640void ccw_device_do_unreg_rereg(struct work_struct *work)
@@ -664,9 +647,7 @@ void ccw_device_do_unreg_rereg(struct work_struct *work)
664 cdev = priv->cdev; 647 cdev = priv->cdev;
665 sch = to_subchannel(cdev->dev.parent); 648 sch = to_subchannel(cdev->dev.parent);
666 649
667 device_remove_files(&cdev->dev); 650 ccw_device_unregister(cdev);
668 if (test_and_clear_bit(1, &cdev->private->registered))
669 device_del(&cdev->dev);
670 PREPARE_WORK(&cdev->private->kick_work, 651 PREPARE_WORK(&cdev->private->kick_work,
671 ccw_device_add_changed); 652 ccw_device_add_changed);
672 queue_work(ccw_device_work, &cdev->private->kick_work); 653 queue_work(ccw_device_work, &cdev->private->kick_work);
@@ -705,6 +686,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
705 cdev->dev.parent = &sch->dev; 686 cdev->dev.parent = &sch->dev;
706 cdev->dev.release = ccw_device_release; 687 cdev->dev.release = ccw_device_release;
707 INIT_LIST_HEAD(&cdev->private->kick_work.entry); 688 INIT_LIST_HEAD(&cdev->private->kick_work.entry);
689 cdev->dev.groups = ccwdev_attr_groups;
708 /* Do first half of device_register. */ 690 /* Do first half of device_register. */
709 device_initialize(&cdev->dev); 691 device_initialize(&cdev->dev);
710 if (!get_device(&sch->dev)) { 692 if (!get_device(&sch->dev)) {
@@ -736,6 +718,7 @@ static int io_subchannel_recog(struct ccw_device *, struct subchannel *);
736static void sch_attach_device(struct subchannel *sch, 718static void sch_attach_device(struct subchannel *sch,
737 struct ccw_device *cdev) 719 struct ccw_device *cdev)
738{ 720{
721 css_update_ssd_info(sch);
739 spin_lock_irq(sch->lock); 722 spin_lock_irq(sch->lock);
740 sch->dev.driver_data = cdev; 723 sch->dev.driver_data = cdev;
741 cdev->private->schid = sch->schid; 724 cdev->private->schid = sch->schid;
@@ -871,7 +854,7 @@ io_subchannel_register(struct work_struct *work)
871 priv = container_of(work, struct ccw_device_private, kick_work); 854 priv = container_of(work, struct ccw_device_private, kick_work);
872 cdev = priv->cdev; 855 cdev = priv->cdev;
873 sch = to_subchannel(cdev->dev.parent); 856 sch = to_subchannel(cdev->dev.parent);
874 857 css_update_ssd_info(sch);
875 /* 858 /*
876 * io_subchannel_register() will also be called after device 859 * io_subchannel_register() will also be called after device
877 * recognition has been done for a boxed device (which will already 860 * recognition has been done for a boxed device (which will already
@@ -1133,15 +1116,8 @@ io_subchannel_remove (struct subchannel *sch)
1133 sch->dev.driver_data = NULL; 1116 sch->dev.driver_data = NULL;
1134 cdev->private->state = DEV_STATE_NOT_OPER; 1117 cdev->private->state = DEV_STATE_NOT_OPER;
1135 spin_unlock_irqrestore(cdev->ccwlock, flags); 1118 spin_unlock_irqrestore(cdev->ccwlock, flags);
1136 /* 1119 ccw_device_unregister(cdev);
1137 * Put unregistration on workqueue to avoid livelocks on the css bus 1120 put_device(&cdev->dev);
1138 * semaphore.
1139 */
1140 if (get_device(&cdev->dev)) {
1141 PREPARE_WORK(&cdev->private->kick_work,
1142 ccw_device_unregister);
1143 queue_work(ccw_device_work, &cdev->private->kick_work);
1144 }
1145 return 0; 1121 return 0;
1146} 1122}
1147 1123
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 089a3ddd6265..898ec3b2bebb 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -15,6 +15,7 @@
15 15
16#include <asm/ccwdev.h> 16#include <asm/ccwdev.h>
17#include <asm/cio.h> 17#include <asm/cio.h>
18#include <asm/chpid.h>
18 19
19#include "cio.h" 20#include "cio.h"
20#include "cio_debug.h" 21#include "cio_debug.h"
@@ -22,6 +23,7 @@
22#include "device.h" 23#include "device.h"
23#include "chsc.h" 24#include "chsc.h"
24#include "ioasm.h" 25#include "ioasm.h"
26#include "chp.h"
25 27
26int 28int
27device_is_online(struct subchannel *sch) 29device_is_online(struct subchannel *sch)
@@ -210,14 +212,18 @@ static void
210__recover_lost_chpids(struct subchannel *sch, int old_lpm) 212__recover_lost_chpids(struct subchannel *sch, int old_lpm)
211{ 213{
212 int mask, i; 214 int mask, i;
215 struct chp_id chpid;
213 216
217 chp_id_init(&chpid);
214 for (i = 0; i<8; i++) { 218 for (i = 0; i<8; i++) {
215 mask = 0x80 >> i; 219 mask = 0x80 >> i;
216 if (!(sch->lpm & mask)) 220 if (!(sch->lpm & mask))
217 continue; 221 continue;
218 if (old_lpm & mask) 222 if (old_lpm & mask)
219 continue; 223 continue;
220 chpid_is_actually_online(sch->schib.pmcw.chpid[i]); 224 chpid.id = sch->schib.pmcw.chpid[i];
225 if (!chp_is_registered(chpid))
226 css_schedule_eval_all();
221 } 227 }
222} 228}
223 229
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 7c7775aae38a..16f59fcb66b1 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -16,12 +16,14 @@
16 16
17#include <asm/ccwdev.h> 17#include <asm/ccwdev.h>
18#include <asm/idals.h> 18#include <asm/idals.h>
19#include <asm/chpid.h>
19 20
20#include "cio.h" 21#include "cio.h"
21#include "cio_debug.h" 22#include "cio_debug.h"
22#include "css.h" 23#include "css.h"
23#include "chsc.h" 24#include "chsc.h"
24#include "device.h" 25#include "device.h"
26#include "chp.h"
25 27
26int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags) 28int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
27{ 29{
@@ -606,9 +608,12 @@ void *
606ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) 608ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
607{ 609{
608 struct subchannel *sch; 610 struct subchannel *sch;
611 struct chp_id chpid;
609 612
610 sch = to_subchannel(cdev->dev.parent); 613 sch = to_subchannel(cdev->dev.parent);
611 return chsc_get_chp_desc(sch, chp_no); 614 chp_id_init(&chpid);
615 chpid.id = sch->schib.pmcw.chpid[chp_no];
616 return chp_get_chp_desc(chpid);
612} 617}
613 618
614// FIXME: these have to go: 619// FIXME: these have to go:
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
new file mode 100644
index 000000000000..16ea828e99f7
--- /dev/null
+++ b/drivers/s390/cio/idset.c
@@ -0,0 +1,112 @@
1/*
2 * drivers/s390/cio/idset.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/slab.h>
9#include <asm/bitops.h>
10#include "idset.h"
11#include "css.h"
12
13struct idset {
14 int num_ssid;
15 int num_id;
16 unsigned long bitmap[0];
17};
18
19static inline unsigned long bitmap_size(int num_ssid, int num_id)
20{
21 return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long);
22}
23
24static struct idset *idset_new(int num_ssid, int num_id)
25{
26 struct idset *set;
27
28 set = kzalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id),
29 GFP_KERNEL);
30 if (set) {
31 set->num_ssid = num_ssid;
32 set->num_id = num_id;
33 }
34 return set;
35}
36
37void idset_free(struct idset *set)
38{
39 kfree(set);
40}
41
42void idset_clear(struct idset *set)
43{
44 memset(set->bitmap, 0, bitmap_size(set->num_ssid, set->num_id));
45}
46
47void idset_fill(struct idset *set)
48{
49 memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id));
50}
51
52static inline void idset_add(struct idset *set, int ssid, int id)
53{
54 set_bit(ssid * set->num_id + id, set->bitmap);
55}
56
57static inline void idset_del(struct idset *set, int ssid, int id)
58{
59 clear_bit(ssid * set->num_id + id, set->bitmap);
60}
61
62static inline int idset_contains(struct idset *set, int ssid, int id)
63{
64 return test_bit(ssid * set->num_id + id, set->bitmap);
65}
66
67static inline int idset_get_first(struct idset *set, int *ssid, int *id)
68{
69 int bitnum;
70
71 bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
72 if (bitnum >= set->num_ssid * set->num_id)
73 return 0;
74 *ssid = bitnum / set->num_id;
75 *id = bitnum % set->num_id;
76 return 1;
77}
78
79struct idset *idset_sch_new(void)
80{
81 return idset_new(__MAX_SSID + 1, __MAX_SUBCHANNEL + 1);
82}
83
84void idset_sch_add(struct idset *set, struct subchannel_id schid)
85{
86 idset_add(set, schid.ssid, schid.sch_no);
87}
88
89void idset_sch_del(struct idset *set, struct subchannel_id schid)
90{
91 idset_del(set, schid.ssid, schid.sch_no);
92}
93
94int idset_sch_contains(struct idset *set, struct subchannel_id schid)
95{
96 return idset_contains(set, schid.ssid, schid.sch_no);
97}
98
99int idset_sch_get_first(struct idset *set, struct subchannel_id *schid)
100{
101 int ssid = 0;
102 int id = 0;
103 int rc;
104
105 rc = idset_get_first(set, &ssid, &id);
106 if (rc) {
107 init_subchannel_id(schid);
108 schid->ssid = ssid;
109 schid->sch_no = id;
110 }
111 return rc;
112}
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
new file mode 100644
index 000000000000..144466ab8c15
--- /dev/null
+++ b/drivers/s390/cio/idset.h
@@ -0,0 +1,25 @@
1/*
2 * drivers/s390/cio/idset.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#ifndef S390_IDSET_H
9#define S390_IDSET_H S390_IDSET_H
10
11#include "schid.h"
12
13struct idset;
14
15void idset_free(struct idset *set);
16void idset_clear(struct idset *set);
17void idset_fill(struct idset *set);
18
19struct idset *idset_sch_new(void);
20void idset_sch_add(struct idset *set, struct subchannel_id id);
21void idset_sch_del(struct idset *set, struct subchannel_id id);
22int idset_sch_contains(struct idset *set, struct subchannel_id id);
23int idset_sch_get_first(struct idset *set, struct subchannel_id *id);
24
25#endif /* S390_IDSET_H */
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index ad6d82940069..7153dd959082 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -1,6 +1,7 @@
1#ifndef S390_CIO_IOASM_H 1#ifndef S390_CIO_IOASM_H
2#define S390_CIO_IOASM_H 2#define S390_CIO_IOASM_H
3 3
4#include <asm/chpid.h>
4#include "schid.h" 5#include "schid.h"
5 6
6/* 7/*
@@ -189,9 +190,9 @@ static inline int chsc(void *chsc_area)
189 return cc; 190 return cc;
190} 191}
191 192
192static inline int rchp(int chpid) 193static inline int rchp(struct chp_id chpid)
193{ 194{
194 register unsigned int reg1 asm ("1") = chpid; 195 register struct chp_id reg1 asm ("1") = chpid;
195 int ccode; 196 int ccode;
196 197
197 asm volatile( 198 asm volatile(
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index 0d6d5fcc128b..570a960bfb5b 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -1638,21 +1638,19 @@ add_channel(struct ccw_device *cdev, enum channel_types type)
1638 struct channel *ch; 1638 struct channel *ch;
1639 1639
1640 DBF_TEXT(trace, 2, __FUNCTION__); 1640 DBF_TEXT(trace, 2, __FUNCTION__);
1641 if ((ch = 1641 ch = kzalloc(sizeof(struct channel), GFP_KERNEL);
1642 (struct channel *) kmalloc(sizeof (struct channel), 1642 if (!ch) {
1643 GFP_KERNEL)) == NULL) {
1644 ctc_pr_warn("ctc: Out of memory in add_channel\n"); 1643 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1645 return -1; 1644 return -1;
1646 } 1645 }
1647 memset(ch, 0, sizeof (struct channel)); 1646 /* assure all flags and counters are reset */
1648 if ((ch->ccw = kmalloc(8*sizeof(struct ccw1), 1647 ch->ccw = kzalloc(8 * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
1649 GFP_KERNEL | GFP_DMA)) == NULL) { 1648 if (!ch->ccw) {
1650 kfree(ch); 1649 kfree(ch);
1651 ctc_pr_warn("ctc: Out of memory in add_channel\n"); 1650 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1652 return -1; 1651 return -1;
1653 } 1652 }
1654 1653
1655 memset(ch->ccw, 0, 8*sizeof(struct ccw1)); // assure all flags and counters are reset
1656 1654
1657 /** 1655 /**
1658 * "static" ccws are used in the following way: 1656 * "static" ccws are used in the following way:
@@ -1692,15 +1690,14 @@ add_channel(struct ccw_device *cdev, enum channel_types type)
1692 return -1; 1690 return -1;
1693 } 1691 }
1694 fsm_newstate(ch->fsm, CH_STATE_IDLE); 1692 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1695 if ((ch->irb = kmalloc(sizeof (struct irb), 1693 ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL);
1696 GFP_KERNEL)) == NULL) { 1694 if (!ch->irb) {
1697 ctc_pr_warn("ctc: Out of memory in add_channel\n"); 1695 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1698 kfree_fsm(ch->fsm); 1696 kfree_fsm(ch->fsm);
1699 kfree(ch->ccw); 1697 kfree(ch->ccw);
1700 kfree(ch); 1698 kfree(ch);
1701 return -1; 1699 return -1;
1702 } 1700 }
1703 memset(ch->irb, 0, sizeof (struct irb));
1704 while (*c && less_than((*c)->id, ch->id)) 1701 while (*c && less_than((*c)->id, ch->id))
1705 c = &(*c)->next; 1702 c = &(*c)->next;
1706 if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) { 1703 if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) {
@@ -2745,14 +2742,13 @@ ctc_probe_device(struct ccwgroup_device *cgdev)
2745 if (!get_device(&cgdev->dev)) 2742 if (!get_device(&cgdev->dev))
2746 return -ENODEV; 2743 return -ENODEV;
2747 2744
2748 priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL); 2745 priv = kzalloc(sizeof(struct ctc_priv), GFP_KERNEL);
2749 if (!priv) { 2746 if (!priv) {
2750 ctc_pr_err("%s: Out of memory\n", __func__); 2747 ctc_pr_err("%s: Out of memory\n", __func__);
2751 put_device(&cgdev->dev); 2748 put_device(&cgdev->dev);
2752 return -ENOMEM; 2749 return -ENOMEM;
2753 } 2750 }
2754 2751
2755 memset(priv, 0, sizeof (struct ctc_priv));
2756 rc = ctc_add_files(&cgdev->dev); 2752 rc = ctc_add_files(&cgdev->dev);
2757 if (rc) { 2753 if (rc) {
2758 kfree(priv); 2754 kfree(priv);
@@ -2793,10 +2789,9 @@ ctc_init_netdevice(struct net_device * dev, int alloc_device,
2793 DBF_TEXT(setup, 3, __FUNCTION__); 2789 DBF_TEXT(setup, 3, __FUNCTION__);
2794 2790
2795 if (alloc_device) { 2791 if (alloc_device) {
2796 dev = kmalloc(sizeof (struct net_device), GFP_KERNEL); 2792 dev = kzalloc(sizeof(struct net_device), GFP_KERNEL);
2797 if (!dev) 2793 if (!dev)
2798 return NULL; 2794 return NULL;
2799 memset(dev, 0, sizeof (struct net_device));
2800 } 2795 }
2801 2796
2802 dev->priv = privptr; 2797 dev->priv = privptr;
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 806bb1a921eb..644a06eba828 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -21,6 +21,7 @@
21#include "cio/cio.h" 21#include "cio/cio.h"
22#include "cio/chsc.h" 22#include "cio/chsc.h"
23#include "cio/css.h" 23#include "cio/css.h"
24#include "cio/chp.h"
24#include "s390mach.h" 25#include "s390mach.h"
25 26
26static struct semaphore m_sem; 27static struct semaphore m_sem;
@@ -44,14 +45,13 @@ static int
44s390_collect_crw_info(void *param) 45s390_collect_crw_info(void *param)
45{ 46{
46 struct crw crw[2]; 47 struct crw crw[2];
47 int ccode, ret, slow; 48 int ccode;
48 struct semaphore *sem; 49 struct semaphore *sem;
49 unsigned int chain; 50 unsigned int chain;
50 51
51 sem = (struct semaphore *)param; 52 sem = (struct semaphore *)param;
52repeat: 53repeat:
53 down_interruptible(sem); 54 down_interruptible(sem);
54 slow = 0;
55 chain = 0; 55 chain = 0;
56 while (1) { 56 while (1) {
57 if (unlikely(chain > 1)) { 57 if (unlikely(chain > 1)) {
@@ -84,9 +84,8 @@ repeat:
84 /* Check for overflows. */ 84 /* Check for overflows. */
85 if (crw[chain].oflw) { 85 if (crw[chain].oflw) {
86 pr_debug("%s: crw overflow detected!\n", __FUNCTION__); 86 pr_debug("%s: crw overflow detected!\n", __FUNCTION__);
87 css_reiterate_subchannels(); 87 css_schedule_eval_all();
88 chain = 0; 88 chain = 0;
89 slow = 1;
90 continue; 89 continue;
91 } 90 }
92 switch (crw[chain].rsc) { 91 switch (crw[chain].rsc) {
@@ -94,10 +93,7 @@ repeat:
94 if (crw[0].chn && !chain) 93 if (crw[0].chn && !chain)
95 break; 94 break;
96 pr_debug("source is subchannel %04X\n", crw[0].rsid); 95 pr_debug("source is subchannel %04X\n", crw[0].rsid);
97 ret = css_process_crw (crw[0].rsid, 96 css_process_crw(crw[0].rsid, chain ? crw[1].rsid : 0);
98 chain ? crw[1].rsid : 0);
99 if (ret == -EAGAIN)
100 slow = 1;
101 break; 97 break;
102 case CRW_RSC_MONITOR: 98 case CRW_RSC_MONITOR:
103 pr_debug("source is monitoring facility\n"); 99 pr_debug("source is monitoring facility\n");
@@ -116,28 +112,23 @@ repeat:
116 } 112 }
117 switch (crw[0].erc) { 113 switch (crw[0].erc) {
118 case CRW_ERC_IPARM: /* Path has come. */ 114 case CRW_ERC_IPARM: /* Path has come. */
119 ret = chp_process_crw(crw[0].rsid, 1); 115 chp_process_crw(crw[0].rsid, 1);
120 break; 116 break;
121 case CRW_ERC_PERRI: /* Path has gone. */ 117 case CRW_ERC_PERRI: /* Path has gone. */
122 case CRW_ERC_PERRN: 118 case CRW_ERC_PERRN:
123 ret = chp_process_crw(crw[0].rsid, 0); 119 chp_process_crw(crw[0].rsid, 0);
124 break; 120 break;
125 default: 121 default:
126 pr_debug("Don't know how to handle erc=%x\n", 122 pr_debug("Don't know how to handle erc=%x\n",
127 crw[0].erc); 123 crw[0].erc);
128 ret = 0;
129 } 124 }
130 if (ret == -EAGAIN)
131 slow = 1;
132 break; 125 break;
133 case CRW_RSC_CONFIG: 126 case CRW_RSC_CONFIG:
134 pr_debug("source is configuration-alert facility\n"); 127 pr_debug("source is configuration-alert facility\n");
135 break; 128 break;
136 case CRW_RSC_CSS: 129 case CRW_RSC_CSS:
137 pr_debug("source is channel subsystem\n"); 130 pr_debug("source is channel subsystem\n");
138 ret = chsc_process_crw(); 131 chsc_process_crw();
139 if (ret == -EAGAIN)
140 slow = 1;
141 break; 132 break;
142 default: 133 default:
143 pr_debug("unknown source\n"); 134 pr_debug("unknown source\n");
@@ -146,8 +137,6 @@ repeat:
146 /* chain is always 0 or 1 here. */ 137 /* chain is always 0 or 1 here. */
147 chain = crw[chain].chn ? chain + 1 : 0; 138 chain = crw[chain].chn ? chain + 1 : 0;
148 } 139 }
149 if (slow)
150 queue_work(slow_path_wq, &slow_path_work);
151 goto repeat; 140 goto repeat;
152 return 0; 141 return 0;
153} 142}
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index 090743d2f914..19343f9675c3 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -357,6 +357,24 @@ static __init int create_proc_sysinfo(void)
357 357
358__initcall(create_proc_sysinfo); 358__initcall(create_proc_sysinfo);
359 359
360int get_cpu_capability(unsigned int *capability)
361{
362 struct sysinfo_1_2_2 *info;
363 int rc;
364
365 info = (void *) get_zeroed_page(GFP_KERNEL);
366 if (!info)
367 return -ENOMEM;
368 rc = stsi(info, 1, 2, 2);
369 if (rc == -ENOSYS)
370 goto out;
371 rc = 0;
372 *capability = info->capability;
373out:
374 free_page((unsigned long) info);
375 return rc;
376}
377
360/* 378/*
361 * CPU capability might have changed. Therefore recalculate loops_per_jiffy. 379 * CPU capability might have changed. Therefore recalculate loops_per_jiffy.
362 */ 380 */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 6d7e279b1490..dc8f99ee305f 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -139,8 +139,15 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
139#define pte_same(A,B) (pte_val(A) == pte_val(B)) 139#define pte_same(A,B) (pte_val(A) == pte_val(B))
140#endif 140#endif
141 141
142#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY 142#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
143#define page_test_and_clear_dirty(page) (0) 143#define page_test_dirty(page) (0)
144#endif
145
146#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
147#define page_clear_dirty(page) do { } while (0)
148#endif
149
150#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
144#define pte_maybe_dirty(pte) pte_dirty(pte) 151#define pte_maybe_dirty(pte) pte_dirty(pte)
145#else 152#else
146#define pte_maybe_dirty(pte) (1) 153#define pte_maybe_dirty(pte) (1)
diff --git a/include/asm-s390/bug.h b/include/asm-s390/bug.h
index 876898363944..838684dc6d35 100644
--- a/include/asm-s390/bug.h
+++ b/include/asm-s390/bug.h
@@ -1,27 +1,70 @@
1#ifndef _S390_BUG_H 1#ifndef _ASM_S390_BUG_H
2#define _S390_BUG_H 2#define _ASM_S390_BUG_H
3 3
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5 5
6#ifdef CONFIG_BUG 6#ifdef CONFIG_BUG
7 7
8static inline __attribute__((noreturn)) void __do_illegal_op(void) 8#ifdef CONFIG_64BIT
9{ 9#define S390_LONG ".quad"
10#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)
11 __builtin_trap();
12#else 10#else
13 asm volatile(".long 0"); 11#define S390_LONG ".long"
14#endif 12#endif
15}
16 13
17#define BUG() do { \ 14#ifdef CONFIG_DEBUG_BUGVERBOSE
18 printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ 15
19 __do_illegal_op(); \ 16#define __EMIT_BUG(x) do { \
17 asm volatile( \
18 "0: j 0b+2\n" \
19 "1:\n" \
20 ".section .rodata.str,\"aMS\",@progbits,1\n" \
21 "2: .asciz \""__FILE__"\"\n" \
22 ".previous\n" \
23 ".section __bug_table,\"a\"\n" \
24 "3:\t" S390_LONG "\t1b,2b\n" \
25 " .short %0,%1\n" \
26 " .org 3b+%2\n" \
27 ".previous\n" \
28 : : "i" (__LINE__), \
29 "i" (x), \
30 "i" (sizeof(struct bug_entry))); \
20} while (0) 31} while (0)
21 32
33#else /* CONFIG_DEBUG_BUGVERBOSE */
34
35#define __EMIT_BUG(x) do { \
36 asm volatile( \
37 "0: j 0b+2\n" \
38 "1:\n" \
39 ".section __bug_table,\"a\"\n" \
40 "2:\t" S390_LONG "\t1b\n" \
41 " .short %0\n" \
42 " .org 2b+%1\n" \
43 ".previous\n" \
44 : : "i" (x), \
45 "i" (sizeof(struct bug_entry))); \
46} while (0)
47
48#endif /* CONFIG_DEBUG_BUGVERBOSE */
49
50#define BUG() __EMIT_BUG(0)
51
52#define WARN_ON(x) ({ \
53 typeof(x) __ret_warn_on = (x); \
54 if (__builtin_constant_p(__ret_warn_on)) { \
55 if (__ret_warn_on) \
56 __EMIT_BUG(BUGFLAG_WARNING); \
57 } else { \
58 if (unlikely(__ret_warn_on)) \
59 __EMIT_BUG(BUGFLAG_WARNING); \
60 } \
61 unlikely(__ret_warn_on); \
62})
63
22#define HAVE_ARCH_BUG 64#define HAVE_ARCH_BUG
23#endif 65#define HAVE_ARCH_WARN_ON
66#endif /* CONFIG_BUG */
24 67
25#include <asm-generic/bug.h> 68#include <asm-generic/bug.h>
26 69
27#endif 70#endif /* _ASM_S390_BUG_H */
diff --git a/include/asm-s390/ccwgroup.h b/include/asm-s390/ccwgroup.h
index d2f9c0d53a97..925b3ddfa141 100644
--- a/include/asm-s390/ccwgroup.h
+++ b/include/asm-s390/ccwgroup.h
@@ -11,6 +11,7 @@ struct ccwgroup_device {
11 CCWGROUP_ONLINE, 11 CCWGROUP_ONLINE,
12 } state; 12 } state;
13 atomic_t onoff; 13 atomic_t onoff;
14 struct mutex reg_mutex;
14 unsigned int count; /* number of attached slave devices */ 15 unsigned int count; /* number of attached slave devices */
15 struct device dev; /* master device */ 16 struct device dev; /* master device */
16 struct ccw_device *cdev[0]; /* variable number, allocate as needed */ 17 struct ccw_device *cdev[0]; /* variable number, allocate as needed */
diff --git a/include/asm-s390/chpid.h b/include/asm-s390/chpid.h
new file mode 100644
index 000000000000..b203336fd892
--- /dev/null
+++ b/include/asm-s390/chpid.h
@@ -0,0 +1,53 @@
1/*
2 * drivers/s390/cio/chpid.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#ifndef _ASM_S390_CHPID_H
9#define _ASM_S390_CHPID_H _ASM_S390_CHPID_H
10
11#include <linux/string.h>
12#include <asm/types.h>
13#include <asm/cio.h>
14
15#define __MAX_CHPID 255
16
17struct chp_id {
18 u8 reserved1;
19 u8 cssid;
20 u8 reserved2;
21 u8 id;
22} __attribute__((packed));
23
24static inline void chp_id_init(struct chp_id *chpid)
25{
26 memset(chpid, 0, sizeof(struct chp_id));
27}
28
29static inline int chp_id_is_equal(struct chp_id *a, struct chp_id *b)
30{
31 return (a->id == b->id) && (a->cssid == b->cssid);
32}
33
34static inline void chp_id_next(struct chp_id *chpid)
35{
36 if (chpid->id < __MAX_CHPID)
37 chpid->id++;
38 else {
39 chpid->id = 0;
40 chpid->cssid++;
41 }
42}
43
44static inline int chp_id_is_valid(struct chp_id *chpid)
45{
46 return (chpid->cssid <= __MAX_CSSID);
47}
48
49
50#define chp_id_for_each(c) \
51 for (chp_id_init(c); chp_id_is_valid(c); chp_id_next(c))
52
53#endif /* _ASM_S390_CHPID_H */
diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h
index d92785030980..f738d2827582 100644
--- a/include/asm-s390/cio.h
+++ b/include/asm-s390/cio.h
@@ -13,6 +13,7 @@
13#ifdef __KERNEL__ 13#ifdef __KERNEL__
14 14
15#define LPM_ANYPATH 0xff 15#define LPM_ANYPATH 0xff
16#define __MAX_CSSID 0
16 17
17/* 18/*
18 * subchannel status word 19 * subchannel status word
@@ -292,6 +293,13 @@ extern void css_schedule_reprobe(void);
292 293
293extern void reipl_ccw_dev(struct ccw_dev_id *id); 294extern void reipl_ccw_dev(struct ccw_dev_id *id);
294 295
296struct cio_iplinfo {
297 u16 devno;
298 int is_qdio;
299};
300
301extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo);
302
295#endif 303#endif
296 304
297#endif 305#endif
diff --git a/include/asm-s390/ipl.h b/include/asm-s390/ipl.h
index 0eb64083480a..bdcd448d43fb 100644
--- a/include/asm-s390/ipl.h
+++ b/include/asm-s390/ipl.h
@@ -8,6 +8,8 @@
8#define _ASM_S390_IPL_H 8#define _ASM_S390_IPL_H
9 9
10#include <asm/types.h> 10#include <asm/types.h>
11#include <asm/cio.h>
12#include <asm/setup.h>
11 13
12#define IPL_PARMBLOCK_ORIGIN 0x2000 14#define IPL_PARMBLOCK_ORIGIN 0x2000
13 15
@@ -74,12 +76,12 @@ struct ipl_parameter_block {
74} __attribute__((packed)); 76} __attribute__((packed));
75 77
76/* 78/*
77 * IPL validity flags and parameters as detected in head.S 79 * IPL validity flags
78 */ 80 */
79extern u32 ipl_flags; 81extern u32 ipl_flags;
80extern u16 ipl_devno;
81 82
82extern u32 dump_prefix_page; 83extern u32 dump_prefix_page;
84
83extern void do_reipl(void); 85extern void do_reipl(void);
84extern void ipl_save_parameters(void); 86extern void ipl_save_parameters(void);
85 87
@@ -89,6 +91,35 @@ enum {
89 IPL_NSS_VALID = 4, 91 IPL_NSS_VALID = 4,
90}; 92};
91 93
94enum ipl_type {
95 IPL_TYPE_UNKNOWN = 1,
96 IPL_TYPE_CCW = 2,
97 IPL_TYPE_FCP = 4,
98 IPL_TYPE_FCP_DUMP = 8,
99 IPL_TYPE_NSS = 16,
100};
101
102struct ipl_info
103{
104 enum ipl_type type;
105 union {
106 struct {
107 struct ccw_dev_id dev_id;
108 } ccw;
109 struct {
110 struct ccw_dev_id dev_id;
111 u64 wwpn;
112 u64 lun;
113 } fcp;
114 struct {
115 char name[NSS_NAME_SIZE + 1];
116 } nss;
117 } data;
118};
119
120extern struct ipl_info ipl_info;
121extern void setup_ipl_info(void);
122
92/* 123/*
93 * DIAG 308 support 124 * DIAG 308 support
94 */ 125 */
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index 4a31d0a7ee83..ffc9788a21a7 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -147,6 +147,52 @@ void pgm_check_handler(void);
147void mcck_int_handler(void); 147void mcck_int_handler(void);
148void io_int_handler(void); 148void io_int_handler(void);
149 149
150struct save_area_s390 {
151 u32 ext_save;
152 u64 timer;
153 u64 clk_cmp;
154 u8 pad1[24];
155 u8 psw[8];
156 u32 pref_reg;
157 u8 pad2[20];
158 u32 acc_regs[16];
159 u64 fp_regs[4];
160 u32 gp_regs[16];
161 u32 ctrl_regs[16];
162} __attribute__((packed));
163
164struct save_area_s390x {
165 u64 fp_regs[16];
166 u64 gp_regs[16];
167 u8 psw[16];
168 u8 pad1[8];
169 u32 pref_reg;
170 u32 fp_ctrl_reg;
171 u8 pad2[4];
172 u32 tod_reg;
173 u64 timer;
174 u64 clk_cmp;
175 u8 pad3[8];
176 u32 acc_regs[16];
177 u64 ctrl_regs[16];
178} __attribute__((packed));
179
180union save_area {
181 struct save_area_s390 s390;
182 struct save_area_s390x s390x;
183};
184
185#define SAVE_AREA_BASE_S390 0xd4
186#define SAVE_AREA_BASE_S390X 0x1200
187
188#ifndef __s390x__
189#define SAVE_AREA_SIZE sizeof(struct save_area_s390)
190#define SAVE_AREA_BASE SAVE_AREA_BASE_S390
191#else
192#define SAVE_AREA_SIZE sizeof(struct save_area_s390x)
193#define SAVE_AREA_BASE SAVE_AREA_BASE_S390X
194#endif
195
150struct _lowcore 196struct _lowcore
151{ 197{
152#ifndef __s390x__ 198#ifndef __s390x__
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 13c16546eff5..8fe8d42e64c3 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -753,14 +753,14 @@ ptep_establish(struct vm_area_struct *vma,
753 * should therefore only be called if it is not mapped in any 753 * should therefore only be called if it is not mapped in any
754 * address space. 754 * address space.
755 */ 755 */
756static inline int page_test_and_clear_dirty(struct page *page) 756static inline int page_test_dirty(struct page *page)
757{ 757{
758 unsigned long physpage = page_to_phys(page); 758 return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
759 int skey = page_get_storage_key(physpage); 759}
760 760
761 if (skey & _PAGE_CHANGED) 761static inline void page_clear_dirty(struct page *page)
762 page_set_storage_key(physpage, skey & ~_PAGE_CHANGED); 762{
763 return skey & _PAGE_CHANGED; 763 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY);
764} 764}
765 765
766/* 766/*
@@ -953,7 +953,8 @@ extern void memmap_init(unsigned long, int, unsigned long, unsigned long);
953#define __HAVE_ARCH_PTEP_CLEAR_FLUSH 953#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
954#define __HAVE_ARCH_PTEP_SET_WRPROTECT 954#define __HAVE_ARCH_PTEP_SET_WRPROTECT
955#define __HAVE_ARCH_PTE_SAME 955#define __HAVE_ARCH_PTE_SAME
956#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY 956#define __HAVE_ARCH_PAGE_TEST_DIRTY
957#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
957#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG 958#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
958#include <asm-generic/pgtable.h> 959#include <asm-generic/pgtable.h>
959 960
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 33b80ced4bc1..e0fcea8c64c3 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -57,6 +57,7 @@ struct cpuinfo_S390
57 57
58extern void s390_adjust_jiffies(void); 58extern void s390_adjust_jiffies(void);
59extern void print_cpu_info(struct cpuinfo_S390 *); 59extern void print_cpu_info(struct cpuinfo_S390 *);
60extern int get_cpu_capability(unsigned int *);
60 61
61/* Lazy FPU handling on uni-processor */ 62/* Lazy FPU handling on uni-processor */
62extern struct task_struct *last_task_used_math; 63extern struct task_struct *last_task_used_math;
@@ -196,6 +197,7 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
196extern char *task_show_regs(struct task_struct *task, char *buffer); 197extern char *task_show_regs(struct task_struct *task, char *buffer);
197 198
198extern void show_registers(struct pt_regs *regs); 199extern void show_registers(struct pt_regs *regs);
200extern void show_code(struct pt_regs *regs);
199extern void show_trace(struct task_struct *task, unsigned long *sp); 201extern void show_trace(struct task_struct *task, unsigned long *sp);
200 202
201unsigned long get_wchan(struct task_struct *p); 203unsigned long get_wchan(struct task_struct *p);
diff --git a/include/asm-s390/sclp.h b/include/asm-s390/sclp.h
index 468b97018405..21ed64773210 100644
--- a/include/asm-s390/sclp.h
+++ b/include/asm-s390/sclp.h
@@ -9,6 +9,7 @@
9#define _ASM_S390_SCLP_H 9#define _ASM_S390_SCLP_H
10 10
11#include <linux/types.h> 11#include <linux/types.h>
12#include <asm/chpid.h>
12 13
13struct sccb_header { 14struct sccb_header {
14 u16 length; 15 u16 length;
@@ -33,7 +34,20 @@ struct sclp_readinfo_sccb {
33 u8 _reserved3[4096 - 112]; /* 112-4095 */ 34 u8 _reserved3[4096 - 112]; /* 112-4095 */
34} __attribute__((packed, aligned(4096))); 35} __attribute__((packed, aligned(4096)));
35 36
37#define SCLP_CHP_INFO_MASK_SIZE 32
38
39struct sclp_chp_info {
40 u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
41 u8 standby[SCLP_CHP_INFO_MASK_SIZE];
42 u8 configured[SCLP_CHP_INFO_MASK_SIZE];
43};
44
36extern struct sclp_readinfo_sccb s390_readinfo_sccb; 45extern struct sclp_readinfo_sccb s390_readinfo_sccb;
37extern void sclp_readinfo_early(void); 46extern void sclp_readinfo_early(void);
47extern int sclp_sdias_blk_count(void);
48extern int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
49extern int sclp_chp_configure(struct chp_id chpid);
50extern int sclp_chp_deconfigure(struct chp_id chpid);
51extern int sclp_chp_read_info(struct sclp_chp_info *info);
38 52
39#endif /* _ASM_S390_SCLP_H */ 53#endif /* _ASM_S390_SCLP_H */
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h
index 44c7aee2bd34..a76a6b8fd887 100644
--- a/include/asm-s390/setup.h
+++ b/include/asm-s390/setup.h
@@ -40,6 +40,7 @@ struct mem_chunk {
40}; 40};
41 41
42extern struct mem_chunk memory_chunk[]; 42extern struct mem_chunk memory_chunk[];
43extern unsigned long real_memory_size;
43 44
44#ifdef CONFIG_S390_SWITCH_AMODE 45#ifdef CONFIG_S390_SWITCH_AMODE
45extern unsigned int switch_amode; 46extern unsigned int switch_amode;
@@ -77,6 +78,7 @@ extern unsigned long machine_flags;
77#endif /* __s390x__ */ 78#endif /* __s390x__ */
78 79
79#define MACHINE_HAS_SCLP (!MACHINE_IS_P390) 80#define MACHINE_HAS_SCLP (!MACHINE_IS_P390)
81#define ZFCPDUMP_HSA_SIZE (32UL<<20)
80 82
81/* 83/*
82 * Console mode. Override with conmode= 84 * Console mode. Override with conmode=
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index b957e4cda464..0a28e6d6ef40 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -54,9 +54,6 @@ extern int smp_call_function_on(void (*func) (void *info), void *info,
54 54
55#define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr) 55#define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr)
56 56
57extern int smp_get_cpu(cpumask_t cpu_map);
58extern void smp_put_cpu(int cpu);
59
60static inline __u16 hard_smp_processor_id(void) 57static inline __u16 hard_smp_processor_id(void)
61{ 58{
62 __u16 cpu_address; 59 __u16 cpu_address;
@@ -114,9 +111,8 @@ static inline void smp_send_stop(void)
114} 111}
115 112
116#define smp_cpu_not_running(cpu) 1 113#define smp_cpu_not_running(cpu) 1
117#define smp_get_cpu(cpu) ({ 0; })
118#define smp_put_cpu(cpu) ({ 0; })
119#define smp_setup_cpu_possible_map() do { } while (0) 114#define smp_setup_cpu_possible_map() do { } while (0)
120#endif 115#endif
121 116
117extern union save_area *zfcpdump_save_areas[NR_CPUS + 1];
122#endif 118#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 9cd0d0eaf523..96326594e55d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -133,7 +133,7 @@
133static inline void SetPageUptodate(struct page *page) 133static inline void SetPageUptodate(struct page *page)
134{ 134{
135 if (!test_and_set_bit(PG_uptodate, &page->flags)) 135 if (!test_and_set_bit(PG_uptodate, &page->flags))
136 page_test_and_clear_dirty(page); 136 page_clear_dirty(page);
137} 137}
138#else 138#else
139#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags) 139#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags)
diff --git a/mm/rmap.c b/mm/rmap.c
index b82146e6dfc9..59da5b734c80 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -498,8 +498,10 @@ int page_mkclean(struct page *page)
498 struct address_space *mapping = page_mapping(page); 498 struct address_space *mapping = page_mapping(page);
499 if (mapping) 499 if (mapping)
500 ret = page_mkclean_file(mapping, page); 500 ret = page_mkclean_file(mapping, page);
501 if (page_test_and_clear_dirty(page)) 501 if (page_test_dirty(page)) {
502 page_clear_dirty(page);
502 ret = 1; 503 ret = 1;
504 }
503 } 505 }
504 506
505 return ret; 507 return ret;
@@ -605,8 +607,10 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
605 * Leaving it set also helps swapoff to reinstate ptes 607 * Leaving it set also helps swapoff to reinstate ptes
606 * faster for those pages still in swapcache. 608 * faster for those pages still in swapcache.
607 */ 609 */
608 if (page_test_and_clear_dirty(page)) 610 if (page_test_dirty(page)) {
611 page_clear_dirty(page);
609 set_page_dirty(page); 612 set_page_dirty(page);
613 }
610 __dec_zone_page_state(page, 614 __dec_zone_page_state(page,
611 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); 615 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
612 } 616 }