aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/s390/crypto/crypto-API.txt83
-rw-r--r--Documentation/s390/zfcpdump.txt87
-rw-r--r--arch/avr32/Kconfig13
-rw-r--r--arch/avr32/Makefile1
-rw-r--r--arch/avr32/boards/atngw100/Makefile1
-rw-r--r--arch/avr32/boards/atngw100/flash.c95
-rw-r--r--arch/avr32/boards/atngw100/setup.c124
-rw-r--r--arch/avr32/boards/atstk1000/atstk1002.c4
-rw-r--r--arch/avr32/boards/atstk1000/setup.c30
-rw-r--r--arch/avr32/configs/atngw100_defconfig1085
-rw-r--r--arch/avr32/kernel/cpu.c64
-rw-r--r--arch/avr32/kernel/entry-avr32b.S124
-rw-r--r--arch/avr32/kernel/module.c11
-rw-r--r--arch/avr32/kernel/process.c193
-rw-r--r--arch/avr32/kernel/setup.c484
-rw-r--r--arch/avr32/kernel/time.c150
-rw-r--r--arch/avr32/kernel/traps.c421
-rw-r--r--arch/avr32/kernel/vmlinux.lds.c9
-rw-r--r--arch/avr32/mach-at32ap/Kconfig31
-rw-r--r--arch/avr32/mach-at32ap/Makefile1
-rw-r--r--arch/avr32/mach-at32ap/at32ap7000.c70
-rw-r--r--arch/avr32/mach-at32ap/hmatrix.h182
-rw-r--r--arch/avr32/mach-at32ap/hsmc.c23
-rw-r--r--arch/avr32/mach-at32ap/time-tc.c218
-rw-r--r--arch/avr32/mm/fault.c116
-rw-r--r--arch/avr32/mm/init.c238
-rw-r--r--arch/s390/Kconfig13
-rw-r--r--arch/s390/Makefile5
-rw-r--r--arch/s390/appldata/appldata_base.c38
-rw-r--r--arch/s390/crypto/sha1_s390.c129
-rw-r--r--arch/s390/crypto/sha256_s390.c38
-rw-r--r--arch/s390/defconfig3
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/compat_linux.c60
-rw-r--r--arch/s390/kernel/compat_signal.c14
-rw-r--r--arch/s390/kernel/dis.c1278
-rw-r--r--arch/s390/kernel/early.c7
-rw-r--r--arch/s390/kernel/entry.S87
-rw-r--r--arch/s390/kernel/entry64.S100
-rw-r--r--arch/s390/kernel/head64.S72
-rw-r--r--arch/s390/kernel/ipl.c253
-rw-r--r--arch/s390/kernel/module.c4
-rw-r--r--arch/s390/kernel/process.c82
-rw-r--r--arch/s390/kernel/setup.c38
-rw-r--r--arch/s390/kernel/signal.c10
-rw-r--r--arch/s390/kernel/smp.c369
-rw-r--r--arch/s390/kernel/sys_s390.c20
-rw-r--r--arch/s390/kernel/syscalls.S14
-rw-r--r--arch/s390/kernel/time.c34
-rw-r--r--arch/s390/kernel/traps.c72
-rw-r--r--arch/s390/kernel/vmlinux.lds.S10
-rw-r--r--arch/s390/kernel/vtime.c16
-rw-r--r--arch/s390/mm/fault.c331
-rw-r--r--drivers/s390/block/dasd.c3
-rw-r--r--drivers/s390/block/dasd_devmap.c58
-rw-r--r--drivers/s390/char/Makefile5
-rw-r--r--drivers/s390/char/con3215.c7
-rw-r--r--drivers/s390/char/con3270.c7
-rw-r--r--drivers/s390/char/sclp.c10
-rw-r--r--drivers/s390/char/sclp.h72
-rw-r--r--drivers/s390/char/sclp_chp.c196
-rw-r--r--drivers/s390/char/sclp_config.c75
-rw-r--r--drivers/s390/char/sclp_cpi.c4
-rw-r--r--drivers/s390/char/sclp_quiesce.c2
-rw-r--r--drivers/s390/char/sclp_rw.c16
-rw-r--r--drivers/s390/char/sclp_sdias.c255
-rw-r--r--drivers/s390/char/sclp_tty.c6
-rw-r--r--drivers/s390/char/sclp_vt220.c8
-rw-r--r--drivers/s390/char/vmlogrdr.c9
-rw-r--r--drivers/s390/char/zcore.c651
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/ccwgroup.c33
-rw-r--r--drivers/s390/cio/chp.c683
-rw-r--r--drivers/s390/cio/chp.h53
-rw-r--r--drivers/s390/cio/chsc.c1024
-rw-r--r--drivers/s390/cio/chsc.h42
-rw-r--r--drivers/s390/cio/cio.c52
-rw-r--r--drivers/s390/cio/cio.h17
-rw-r--r--drivers/s390/cio/cmf.c2
-rw-r--r--drivers/s390/cio/css.c201
-rw-r--r--drivers/s390/cio/css.h16
-rw-r--r--drivers/s390/cio/device.c246
-rw-r--r--drivers/s390/cio/device_fsm.c8
-rw-r--r--drivers/s390/cio/device_ops.c7
-rw-r--r--drivers/s390/cio/idset.c112
-rw-r--r--drivers/s390/cio/idset.h25
-rw-r--r--drivers/s390/cio/ioasm.h5
-rw-r--r--drivers/s390/net/ctcmain.c23
-rw-r--r--drivers/s390/s390mach.c25
-rw-r--r--drivers/s390/sysinfo.c18
-rw-r--r--include/asm-avr32/arch-at32ap/io.h39
-rw-r--r--include/asm-avr32/arch-at32ap/smc.h22
-rw-r--r--include/asm-avr32/arch-at32ap/time.h112
-rw-r--r--include/asm-avr32/atomic.h2
-rw-r--r--include/asm-avr32/bug.h50
-rw-r--r--include/asm-avr32/io.h326
-rw-r--r--include/asm-avr32/processor.h15
-rw-r--r--include/asm-avr32/setup.h13
-rw-r--r--include/asm-avr32/sysreg.h543
-rw-r--r--include/asm-avr32/system.h13
-rw-r--r--include/asm-avr32/thread_info.h2
-rw-r--r--include/asm-avr32/uaccess.h13
-rw-r--r--include/asm-generic/pgtable.h11
-rw-r--r--include/asm-s390/bug.h69
-rw-r--r--include/asm-s390/ccwgroup.h1
-rw-r--r--include/asm-s390/chpid.h53
-rw-r--r--include/asm-s390/cio.h8
-rw-r--r--include/asm-s390/ipl.h35
-rw-r--r--include/asm-s390/lowcore.h46
-rw-r--r--include/asm-s390/pgtable.h15
-rw-r--r--include/asm-s390/processor.h2
-rw-r--r--include/asm-s390/sclp.h14
-rw-r--r--include/asm-s390/setup.h2
-rw-r--r--include/asm-s390/smp.h6
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/rmap.c8
117 files changed, 8801 insertions, 3560 deletions
diff --git a/Documentation/s390/crypto/crypto-API.txt b/Documentation/s390/crypto/crypto-API.txt
deleted file mode 100644
index 71ae6ca9f2c2..000000000000
--- a/Documentation/s390/crypto/crypto-API.txt
+++ /dev/null
@@ -1,83 +0,0 @@
1crypto-API support for z990 Message Security Assist (MSA) instructions
2~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
3
4AUTHOR: Thomas Spatzier (tspat@de.ibm.com)
5
6
71. Introduction crypto-API
8~~~~~~~~~~~~~~~~~~~~~~~~~~
9See Documentation/crypto/api-intro.txt for an introduction/description of the
10kernel crypto API.
11According to api-intro.txt support for z990 crypto instructions has been added
12in the algorithm api layer of the crypto API. Several files containing z990
13optimized implementations of crypto algorithms are placed in the
14arch/s390/crypto directory.
15
16
172. Probing for availability of MSA
18~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
19It should be possible to use Kernels with the z990 crypto implementations both
20on machines with MSA available and on those without MSA (pre z990 or z990
21without MSA). Therefore a simple probing mechanism has been implemented:
22In the init function of each crypto module the availability of MSA and of the
23respective crypto algorithm in particular will be tested. If the algorithm is
24available the module will load and register its algorithm with the crypto API.
25
26If the respective crypto algorithm is not available, the init function will
27return -ENOSYS. In that case a fallback to the standard software implementation
28of the crypto algorithm must be taken ( -> the standard crypto modules are
29also built when compiling the kernel).
30
31
323. Ensuring z990 crypto module preference
33~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
34If z990 crypto instructions are available the optimized modules should be
35preferred instead of standard modules.
36
373.1. compiled-in modules
38~~~~~~~~~~~~~~~~~~~~~~~~
39For compiled-in modules it has to be ensured that the z990 modules are linked
40before the standard crypto modules. Then, on system startup the init functions
41of z990 crypto modules will be called first and query for availability of z990
42crypto instructions. If instruction is available, the z990 module will register
43its crypto algorithm implementation -> the load of the standard module will fail
44since the algorithm is already registered.
45If z990 crypto instruction is not available the load of the z990 module will
46fail -> the standard module will load and register its algorithm.
47
483.2. dynamic modules
49~~~~~~~~~~~~~~~~~~~~
50A system administrator has to take care of giving preference to z990 crypto
51modules. If MSA is available appropriate lines have to be added to
52/etc/modprobe.conf.
53
54Example: z990 crypto instruction for SHA1 algorithm is available
55
56 add the following line to /etc/modprobe.conf (assuming the
57 z990 crypto modules for SHA1 is called sha1_z990):
58
59 alias sha1 sha1_z990
60
61 -> when the sha1 algorithm is requested through the crypto API
62 (which has a module autoloader) the z990 module will be loaded.
63
64TBD: a userspace module probing mechanism
65 something like 'probe sha1 sha1_z990 sha1' in modprobe.conf
66 -> try module sha1_z990, if it fails to load standard module sha1
67 the 'probe' statement is currently not supported in modprobe.conf
68
69
704. Currently implemented z990 crypto algorithms
71~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
72The following crypto algorithms with z990 MSA support are currently implemented.
73The name of each algorithm under which it is registered in crypto API and the
74name of the respective module is given in square brackets.
75
76- SHA1 Digest Algorithm [sha1 -> sha1_z990]
77- DES Encrypt/Decrypt Algorithm (64bit key) [des -> des_z990]
78- Triple DES Encrypt/Decrypt Algorithm (128bit key) [des3_ede128 -> des_z990]
79- Triple DES Encrypt/Decrypt Algorithm (192bit key) [des3_ede -> des_z990]
80
81In order to load, for example, the sha1_z990 module when the sha1 algorithm is
82requested (see 3.2.) add 'alias sha1 sha1_z990' to /etc/modprobe.conf.
83
diff --git a/Documentation/s390/zfcpdump.txt b/Documentation/s390/zfcpdump.txt
new file mode 100644
index 000000000000..cf45d27c4608
--- /dev/null
+++ b/Documentation/s390/zfcpdump.txt
@@ -0,0 +1,87 @@
1s390 SCSI dump tool (zfcpdump)
2
3System z machines (z900 or higher) provide hardware support for creating system
4dumps on SCSI disks. The dump process is initiated by booting a dump tool, which
5has to create a dump of the current (probably crashed) Linux image. In order to
6not overwrite memory of the crashed Linux with data of the dump tool, the
7hardware saves some memory plus the register sets of the boot cpu before the
8dump tool is loaded. There exists an SCLP hardware interface to obtain the saved
9memory afterwards. Currently 32 MB are saved.
10
11This zfcpdump implementation consists of a Linux dump kernel together with
12a userspace dump tool, which are loaded together into the saved memory region
13below 32 MB. zfcpdump is installed on a SCSI disk using zipl (as contained in
14the s390-tools package) to make the device bootable. The operator of a Linux
15system can then trigger a SCSI dump by booting the SCSI disk, where zfcpdump
16resides on.
17
18The kernel part of zfcpdump is implemented as a debugfs file under "zcore/mem",
19which exports memory and registers of the crashed Linux in an s390
20standalone dump format. It can be used in the same way as e.g. /dev/mem. The
21dump format defines a 4K header followed by plain uncompressed memory. The
22register sets are stored in the prefix pages of the respective cpus. To build a
23dump enabled kernel with the zcore driver, the kernel config option
24CONFIG_ZFCPDUMP has to be set. When reading from "zcore/mem", the part of
25memory, which has been saved by hardware is read by the driver via the SCLP
26hardware interface. The second part is just copied from the non overwritten real
27memory.
28
29The userspace application of zfcpdump can reside e.g. in an intitramfs or an
30initrd. It reads from zcore/mem and writes the system dump to a file on a
31SCSI disk.
32
33To build a zfcpdump kernel use the following settings in your kernel
34configuration:
35 * CONFIG_ZFCPDUMP=y
36 * Enable ZFCP driver
37 * Enable SCSI driver
38 * Enable ext2 and ext3 filesystems
39 * Disable as many features as possible to keep the kernel small.
40 E.g. network support is not needed at all.
41
42To use the zfcpdump userspace application in an initramfs you have to do the
43following:
44
45 * Copy the zfcpdump executable somewhere into your Linux tree.
46 E.g. to "arch/s390/boot/zfcpdump. If you do not want to include
47 shared libraries, compile the tool with the "-static" gcc option.
48 * If you want to include e2fsck, add it to your source tree, too. The zfcpdump
49 application attempts to start /sbin/e2fsck from the ramdisk.
50 * Use an initramfs config file like the following:
51
52 dir /dev 755 0 0
53 nod /dev/console 644 0 0 c 5 1
54 nod /dev/null 644 0 0 c 1 3
55 nod /dev/sda1 644 0 0 b 8 1
56 nod /dev/sda2 644 0 0 b 8 2
57 nod /dev/sda3 644 0 0 b 8 3
58 nod /dev/sda4 644 0 0 b 8 4
59 nod /dev/sda5 644 0 0 b 8 5
60 nod /dev/sda6 644 0 0 b 8 6
61 nod /dev/sda7 644 0 0 b 8 7
62 nod /dev/sda8 644 0 0 b 8 8
63 nod /dev/sda9 644 0 0 b 8 9
64 nod /dev/sda10 644 0 0 b 8 10
65 nod /dev/sda11 644 0 0 b 8 11
66 nod /dev/sda12 644 0 0 b 8 12
67 nod /dev/sda13 644 0 0 b 8 13
68 nod /dev/sda14 644 0 0 b 8 14
69 nod /dev/sda15 644 0 0 b 8 15
70 file /init arch/s390/boot/zfcpdump 755 0 0
71 file /sbin/e2fsck arch/s390/boot/e2fsck 755 0 0
72 dir /proc 755 0 0
73 dir /sys 755 0 0
74 dir /mnt 755 0 0
75 dir /sbin 755 0 0
76
77 * Issue "make image" to build the zfcpdump image with initramfs.
78
79In a Linux distribution the zfcpdump enabled kernel image must be copied to
80/usr/share/zfcpdump/zfcpdump.image, where the s390 zipl tool is looking for the
81dump kernel when preparing a SCSI dump disk.
82
83If you use a ramdisk copy it to "/usr/share/zfcpdump/zfcpdump.rd".
84
85For more information on how to use zfcpdump refer to the s390 'Using the Dump
86Tools book', which is available from
87http://www.ibm.com/developerworks/linux/linux390.
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index ce4013aee59b..3ec76586877e 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -57,9 +57,6 @@ config ARCH_HAS_ILOG2_U64
57 bool 57 bool
58 default n 58 default n
59 59
60config GENERIC_BUST_SPINLOCK
61 bool
62
63config GENERIC_HWEIGHT 60config GENERIC_HWEIGHT
64 bool 61 bool
65 default y 62 default y
@@ -68,6 +65,11 @@ config GENERIC_CALIBRATE_DELAY
68 bool 65 bool
69 default y 66 default y
70 67
68config GENERIC_BUG
69 bool
70 default y
71 depends on BUG
72
71source "init/Kconfig" 73source "init/Kconfig"
72 74
73menu "System Type and features" 75menu "System Type and features"
@@ -106,6 +108,9 @@ choice
106config BOARD_ATSTK1000 108config BOARD_ATSTK1000
107 bool "ATSTK1000 evaluation board" 109 bool "ATSTK1000 evaluation board"
108 select BOARD_ATSTK1002 if CPU_AT32AP7000 110 select BOARD_ATSTK1002 if CPU_AT32AP7000
111
112config BOARD_ATNGW100
113 bool "ATNGW100 Network Gateway"
109endchoice 114endchoice
110 115
111choice 116choice
@@ -116,6 +121,8 @@ config LOADER_U_BOOT
116 bool "U-Boot (or similar) bootloader" 121 bool "U-Boot (or similar) bootloader"
117endchoice 122endchoice
118 123
124source "arch/avr32/mach-at32ap/Kconfig"
125
119config LOAD_ADDRESS 126config LOAD_ADDRESS
120 hex 127 hex
121 default 0x10000000 if LOADER_U_BOOT=y && CPU_AT32AP7000=y 128 default 0x10000000 if LOADER_U_BOOT=y && CPU_AT32AP7000=y
diff --git a/arch/avr32/Makefile b/arch/avr32/Makefile
index 7b842e98efed..6115fc1f0cfa 100644
--- a/arch/avr32/Makefile
+++ b/arch/avr32/Makefile
@@ -27,6 +27,7 @@ head-$(CONFIG_LOADER_U_BOOT) += arch/avr32/boot/u-boot/head.o
27head-y += arch/avr32/kernel/head.o 27head-y += arch/avr32/kernel/head.o
28core-$(CONFIG_PLATFORM_AT32AP) += arch/avr32/mach-at32ap/ 28core-$(CONFIG_PLATFORM_AT32AP) += arch/avr32/mach-at32ap/
29core-$(CONFIG_BOARD_ATSTK1000) += arch/avr32/boards/atstk1000/ 29core-$(CONFIG_BOARD_ATSTK1000) += arch/avr32/boards/atstk1000/
30core-$(CONFIG_BOARD_ATNGW100) += arch/avr32/boards/atngw100/
30core-$(CONFIG_LOADER_U_BOOT) += arch/avr32/boot/u-boot/ 31core-$(CONFIG_LOADER_U_BOOT) += arch/avr32/boot/u-boot/
31core-y += arch/avr32/kernel/ 32core-y += arch/avr32/kernel/
32core-y += arch/avr32/mm/ 33core-y += arch/avr32/mm/
diff --git a/arch/avr32/boards/atngw100/Makefile b/arch/avr32/boards/atngw100/Makefile
new file mode 100644
index 000000000000..c740aa116755
--- /dev/null
+++ b/arch/avr32/boards/atngw100/Makefile
@@ -0,0 +1 @@
obj-y += setup.o flash.o
diff --git a/arch/avr32/boards/atngw100/flash.c b/arch/avr32/boards/atngw100/flash.c
new file mode 100644
index 000000000000..f9b32a8eab9b
--- /dev/null
+++ b/arch/avr32/boards/atngw100/flash.c
@@ -0,0 +1,95 @@
1/*
2 * ATNGW100 board-specific flash initialization
3 *
4 * Copyright (C) 2005-2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/init.h>
11#include <linux/platform_device.h>
12#include <linux/mtd/mtd.h>
13#include <linux/mtd/partitions.h>
14#include <linux/mtd/physmap.h>
15
16#include <asm/arch/smc.h>
17
18static struct smc_config flash_config __initdata = {
19 .ncs_read_setup = 0,
20 .nrd_setup = 40,
21 .ncs_write_setup = 0,
22 .nwe_setup = 10,
23
24 .ncs_read_pulse = 80,
25 .nrd_pulse = 40,
26 .ncs_write_pulse = 65,
27 .nwe_pulse = 55,
28
29 .read_cycle = 120,
30 .write_cycle = 120,
31
32 .bus_width = 2,
33 .nrd_controlled = 1,
34 .nwe_controlled = 1,
35 .byte_write = 1,
36};
37
38static struct mtd_partition flash_parts[] = {
39 {
40 .name = "u-boot",
41 .offset = 0x00000000,
42 .size = 0x00020000, /* 128 KiB */
43 .mask_flags = MTD_WRITEABLE,
44 },
45 {
46 .name = "root",
47 .offset = 0x00020000,
48 .size = 0x007d0000,
49 },
50 {
51 .name = "env",
52 .offset = 0x007f0000,
53 .size = 0x00010000,
54 .mask_flags = MTD_WRITEABLE,
55 },
56};
57
58static struct physmap_flash_data flash_data = {
59 .width = 2,
60 .nr_parts = ARRAY_SIZE(flash_parts),
61 .parts = flash_parts,
62};
63
64static struct resource flash_resource = {
65 .start = 0x00000000,
66 .end = 0x007fffff,
67 .flags = IORESOURCE_MEM,
68};
69
70static struct platform_device flash_device = {
71 .name = "physmap-flash",
72 .id = 0,
73 .resource = &flash_resource,
74 .num_resources = 1,
75 .dev = {
76 .platform_data = &flash_data,
77 },
78};
79
80/* This needs to be called after the SMC has been initialized */
81static int __init atngw100_flash_init(void)
82{
83 int ret;
84
85 ret = smc_set_configuration(0, &flash_config);
86 if (ret < 0) {
87 printk(KERN_ERR "atngw100: failed to set NOR flash timing\n");
88 return ret;
89 }
90
91 platform_device_register(&flash_device);
92
93 return 0;
94}
95device_initcall(atngw100_flash_init);
diff --git a/arch/avr32/boards/atngw100/setup.c b/arch/avr32/boards/atngw100/setup.c
new file mode 100644
index 000000000000..9bc37d4f6687
--- /dev/null
+++ b/arch/avr32/boards/atngw100/setup.c
@@ -0,0 +1,124 @@
1/*
2 * Board-specific setup code for the ATNGW100 Network Gateway
3 *
4 * Copyright (C) 2005-2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/clk.h>
11#include <linux/etherdevice.h>
12#include <linux/init.h>
13#include <linux/linkage.h>
14#include <linux/platform_device.h>
15#include <linux/types.h>
16#include <linux/spi/spi.h>
17
18#include <asm/io.h>
19#include <asm/setup.h>
20
21#include <asm/arch/at32ap7000.h>
22#include <asm/arch/board.h>
23#include <asm/arch/init.h>
24
25/* Initialized by bootloader-specific startup code. */
26struct tag *bootloader_tags __initdata;
27
28struct eth_addr {
29 u8 addr[6];
30};
31static struct eth_addr __initdata hw_addr[2];
32static struct eth_platform_data __initdata eth_data[2];
33
34static struct spi_board_info spi0_board_info[] __initdata = {
35 {
36 .modalias = "mtd_dataflash",
37 .max_speed_hz = 10000000,
38 .chip_select = 0,
39 },
40};
41
42/*
43 * The next two functions should go away as the boot loader is
44 * supposed to initialize the macb address registers with a valid
45 * ethernet address. But we need to keep it around for a while until
46 * we can be reasonably sure the boot loader does this.
47 *
48 * The phy_id is ignored as the driver will probe for it.
49 */
50static int __init parse_tag_ethernet(struct tag *tag)
51{
52 int i;
53
54 i = tag->u.ethernet.mac_index;
55 if (i < ARRAY_SIZE(hw_addr))
56 memcpy(hw_addr[i].addr, tag->u.ethernet.hw_address,
57 sizeof(hw_addr[i].addr));
58
59 return 0;
60}
61__tagtable(ATAG_ETHERNET, parse_tag_ethernet);
62
63static void __init set_hw_addr(struct platform_device *pdev)
64{
65 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
66 const u8 *addr;
67 void __iomem *regs;
68 struct clk *pclk;
69
70 if (!res)
71 return;
72 if (pdev->id >= ARRAY_SIZE(hw_addr))
73 return;
74
75 addr = hw_addr[pdev->id].addr;
76 if (!is_valid_ether_addr(addr))
77 return;
78
79 /*
80 * Since this is board-specific code, we'll cheat and use the
81 * physical address directly as we happen to know that it's
82 * the same as the virtual address.
83 */
84 regs = (void __iomem __force *)res->start;
85 pclk = clk_get(&pdev->dev, "pclk");
86 if (!pclk)
87 return;
88
89 clk_enable(pclk);
90 __raw_writel((addr[3] << 24) | (addr[2] << 16)
91 | (addr[1] << 8) | addr[0], regs + 0x98);
92 __raw_writel((addr[5] << 8) | addr[4], regs + 0x9c);
93 clk_disable(pclk);
94 clk_put(pclk);
95}
96
97struct platform_device *at32_usart_map[1];
98unsigned int at32_nr_usarts = 1;
99
100void __init setup_board(void)
101{
102 at32_map_usart(1, 0); /* USART 1: /dev/ttyS0, DB9 */
103 at32_setup_serial_console(0);
104}
105
106static int __init atngw100_init(void)
107{
108 /*
109 * ATNGW100 uses 16-bit SDRAM interface, so we don't need to
110 * reserve any pins for it.
111 */
112
113 at32_add_system_devices();
114
115 at32_add_device_usart(0);
116
117 set_hw_addr(at32_add_device_eth(0, &eth_data[0]));
118 set_hw_addr(at32_add_device_eth(1, &eth_data[1]));
119
120 at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info));
121
122 return 0;
123}
124postcore_initcall(atngw100_init);
diff --git a/arch/avr32/boards/atstk1000/atstk1002.c b/arch/avr32/boards/atstk1000/atstk1002.c
index 5974768a59e5..abe6ca203fa7 100644
--- a/arch/avr32/boards/atstk1000/atstk1002.c
+++ b/arch/avr32/boards/atstk1000/atstk1002.c
@@ -33,7 +33,7 @@ struct eth_addr {
33static struct eth_addr __initdata hw_addr[2]; 33static struct eth_addr __initdata hw_addr[2];
34 34
35static struct eth_platform_data __initdata eth_data[2]; 35static struct eth_platform_data __initdata eth_data[2];
36extern struct lcdc_platform_data atstk1000_fb0_data; 36static struct lcdc_platform_data atstk1000_fb0_data;
37 37
38static struct spi_board_info spi0_board_info[] __initdata = { 38static struct spi_board_info spi0_board_info[] __initdata = {
39 { 39 {
@@ -148,6 +148,8 @@ static int __init atstk1002_init(void)
148 set_hw_addr(at32_add_device_eth(0, &eth_data[0])); 148 set_hw_addr(at32_add_device_eth(0, &eth_data[0]));
149 149
150 at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info)); 150 at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info));
151 atstk1000_fb0_data.fbmem_start = fbmem_start;
152 atstk1000_fb0_data.fbmem_size = fbmem_size;
151 at32_add_device_lcdc(0, &atstk1000_fb0_data); 153 at32_add_device_lcdc(0, &atstk1000_fb0_data);
152 154
153 return 0; 155 return 0;
diff --git a/arch/avr32/boards/atstk1000/setup.c b/arch/avr32/boards/atstk1000/setup.c
index 272c011802a7..2bc4b88d7edb 100644
--- a/arch/avr32/boards/atstk1000/setup.c
+++ b/arch/avr32/boards/atstk1000/setup.c
@@ -18,33 +18,3 @@
18 18
19/* Initialized by bootloader-specific startup code. */ 19/* Initialized by bootloader-specific startup code. */
20struct tag *bootloader_tags __initdata; 20struct tag *bootloader_tags __initdata;
21
22struct lcdc_platform_data __initdata atstk1000_fb0_data;
23
24void __init board_setup_fbmem(unsigned long fbmem_start,
25 unsigned long fbmem_size)
26{
27 if (!fbmem_size)
28 return;
29
30 if (!fbmem_start) {
31 void *fbmem;
32
33 fbmem = alloc_bootmem_low_pages(fbmem_size);
34 fbmem_start = __pa(fbmem);
35 } else {
36 pg_data_t *pgdat;
37
38 for_each_online_pgdat(pgdat) {
39 if (fbmem_start >= pgdat->bdata->node_boot_start
40 && fbmem_start <= pgdat->bdata->node_low_pfn)
41 reserve_bootmem_node(pgdat, fbmem_start,
42 fbmem_size);
43 }
44 }
45
46 printk("%luKiB framebuffer memory at address 0x%08lx\n",
47 fbmem_size >> 10, fbmem_start);
48 atstk1000_fb0_data.fbmem_start = fbmem_start;
49 atstk1000_fb0_data.fbmem_size = fbmem_size;
50}
diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig
new file mode 100644
index 000000000000..c254ffcfa458
--- /dev/null
+++ b/arch/avr32/configs/atngw100_defconfig
@@ -0,0 +1,1085 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.21-rc6
4# Thu Apr 12 16:35:07 2007
5#
6CONFIG_AVR32=y
7CONFIG_GENERIC_GPIO=y
8CONFIG_GENERIC_HARDIRQS=y
9CONFIG_HARDIRQS_SW_RESEND=y
10CONFIG_GENERIC_IRQ_PROBE=y
11CONFIG_RWSEM_GENERIC_SPINLOCK=y
12CONFIG_GENERIC_TIME=y
13# CONFIG_ARCH_HAS_ILOG2_U32 is not set
14# CONFIG_ARCH_HAS_ILOG2_U64 is not set
15CONFIG_GENERIC_HWEIGHT=y
16CONFIG_GENERIC_CALIBRATE_DELAY=y
17CONFIG_GENERIC_BUG=y
18CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
19
20#
21# Code maturity level options
22#
23CONFIG_EXPERIMENTAL=y
24CONFIG_BROKEN_ON_SMP=y
25CONFIG_INIT_ENV_ARG_LIMIT=32
26
27#
28# General setup
29#
30CONFIG_LOCALVERSION=""
31# CONFIG_LOCALVERSION_AUTO is not set
32CONFIG_SWAP=y
33CONFIG_SYSVIPC=y
34# CONFIG_IPC_NS is not set
35CONFIG_SYSVIPC_SYSCTL=y
36CONFIG_POSIX_MQUEUE=y
37CONFIG_BSD_PROCESS_ACCT=y
38CONFIG_BSD_PROCESS_ACCT_V3=y
39# CONFIG_TASKSTATS is not set
40# CONFIG_UTS_NS is not set
41# CONFIG_AUDIT is not set
42# CONFIG_IKCONFIG is not set
43CONFIG_SYSFS_DEPRECATED=y
44# CONFIG_RELAY is not set
45CONFIG_BLK_DEV_INITRD=y
46CONFIG_INITRAMFS_SOURCE=""
47CONFIG_CC_OPTIMIZE_FOR_SIZE=y
48CONFIG_SYSCTL=y
49CONFIG_EMBEDDED=y
50# CONFIG_SYSCTL_SYSCALL is not set
51CONFIG_KALLSYMS=y
52# CONFIG_KALLSYMS_ALL is not set
53# CONFIG_KALLSYMS_EXTRA_PASS is not set
54CONFIG_HOTPLUG=y
55CONFIG_PRINTK=y
56CONFIG_BUG=y
57CONFIG_ELF_CORE=y
58# CONFIG_BASE_FULL is not set
59CONFIG_FUTEX=y
60CONFIG_EPOLL=y
61CONFIG_SHMEM=y
62CONFIG_SLAB=y
63CONFIG_VM_EVENT_COUNTERS=y
64CONFIG_RT_MUTEXES=y
65# CONFIG_TINY_SHMEM is not set
66CONFIG_BASE_SMALL=1
67# CONFIG_SLOB is not set
68
69#
70# Loadable module support
71#
72CONFIG_MODULES=y
73CONFIG_MODULE_UNLOAD=y
74CONFIG_MODULE_FORCE_UNLOAD=y
75# CONFIG_MODVERSIONS is not set
76# CONFIG_MODULE_SRCVERSION_ALL is not set
77CONFIG_KMOD=y
78
79#
80# Block layer
81#
82CONFIG_BLOCK=y
83# CONFIG_LBD is not set
84# CONFIG_BLK_DEV_IO_TRACE is not set
85# CONFIG_LSF is not set
86
87#
88# IO Schedulers
89#
90CONFIG_IOSCHED_NOOP=y
91# CONFIG_IOSCHED_AS is not set
92# CONFIG_IOSCHED_DEADLINE is not set
93CONFIG_IOSCHED_CFQ=y
94# CONFIG_DEFAULT_AS is not set
95# CONFIG_DEFAULT_DEADLINE is not set
96CONFIG_DEFAULT_CFQ=y
97# CONFIG_DEFAULT_NOOP is not set
98CONFIG_DEFAULT_IOSCHED="cfq"
99
100#
101# System Type and features
102#
103CONFIG_SUBARCH_AVR32B=y
104CONFIG_MMU=y
105CONFIG_PERFORMANCE_COUNTERS=y
106CONFIG_PLATFORM_AT32AP=y
107CONFIG_CPU_AT32AP7000=y
108# CONFIG_BOARD_ATSTK1000 is not set
109CONFIG_BOARD_ATNGW100=y
110CONFIG_LOADER_U_BOOT=y
111
112#
113# Atmel AVR32 AP options
114#
115# CONFIG_AP7000_32_BIT_SMC is not set
116CONFIG_AP7000_16_BIT_SMC=y
117# CONFIG_AP7000_8_BIT_SMC is not set
118CONFIG_LOAD_ADDRESS=0x10000000
119CONFIG_ENTRY_ADDRESS=0x90000000
120CONFIG_PHYS_OFFSET=0x10000000
121CONFIG_PREEMPT_NONE=y
122# CONFIG_PREEMPT_VOLUNTARY is not set
123# CONFIG_PREEMPT is not set
124# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set
125# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
126# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
127CONFIG_ARCH_FLATMEM_ENABLE=y
128# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
129# CONFIG_ARCH_SPARSEMEM_ENABLE is not set
130CONFIG_SELECT_MEMORY_MODEL=y
131CONFIG_FLATMEM_MANUAL=y
132# CONFIG_DISCONTIGMEM_MANUAL is not set
133# CONFIG_SPARSEMEM_MANUAL is not set
134CONFIG_FLATMEM=y
135CONFIG_FLAT_NODE_MEM_MAP=y
136# CONFIG_SPARSEMEM_STATIC is not set
137CONFIG_SPLIT_PTLOCK_CPUS=4
138# CONFIG_RESOURCES_64BIT is not set
139CONFIG_ZONE_DMA_FLAG=0
140# CONFIG_OWNERSHIP_TRACE is not set
141# CONFIG_HZ_100 is not set
142CONFIG_HZ_250=y
143# CONFIG_HZ_300 is not set
144# CONFIG_HZ_1000 is not set
145CONFIG_HZ=250
146CONFIG_CMDLINE=""
147
148#
149# Bus options
150#
151
152#
153# PCCARD (PCMCIA/CardBus) support
154#
155# CONFIG_PCCARD is not set
156
157#
158# Executable file formats
159#
160CONFIG_BINFMT_ELF=y
161# CONFIG_BINFMT_MISC is not set
162
163#
164# Networking
165#
166CONFIG_NET=y
167
168#
169# Networking options
170#
171# CONFIG_NETDEBUG is not set
172CONFIG_PACKET=y
173CONFIG_PACKET_MMAP=y
174CONFIG_UNIX=y
175CONFIG_XFRM=y
176CONFIG_XFRM_USER=y
177# CONFIG_XFRM_SUB_POLICY is not set
178# CONFIG_XFRM_MIGRATE is not set
179CONFIG_NET_KEY=y
180# CONFIG_NET_KEY_MIGRATE is not set
181CONFIG_INET=y
182CONFIG_IP_MULTICAST=y
183CONFIG_IP_ADVANCED_ROUTER=y
184CONFIG_ASK_IP_FIB_HASH=y
185# CONFIG_IP_FIB_TRIE is not set
186CONFIG_IP_FIB_HASH=y
187# CONFIG_IP_MULTIPLE_TABLES is not set
188# CONFIG_IP_ROUTE_MULTIPATH is not set
189# CONFIG_IP_ROUTE_VERBOSE is not set
190CONFIG_IP_PNP=y
191CONFIG_IP_PNP_DHCP=y
192# CONFIG_IP_PNP_BOOTP is not set
193# CONFIG_IP_PNP_RARP is not set
194# CONFIG_NET_IPIP is not set
195# CONFIG_NET_IPGRE is not set
196CONFIG_IP_MROUTE=y
197CONFIG_IP_PIMSM_V1=y
198# CONFIG_IP_PIMSM_V2 is not set
199# CONFIG_ARPD is not set
200CONFIG_SYN_COOKIES=y
201CONFIG_INET_AH=y
202CONFIG_INET_ESP=y
203CONFIG_INET_IPCOMP=y
204CONFIG_INET_XFRM_TUNNEL=y
205CONFIG_INET_TUNNEL=y
206CONFIG_INET_XFRM_MODE_TRANSPORT=y
207CONFIG_INET_XFRM_MODE_TUNNEL=y
208CONFIG_INET_XFRM_MODE_BEET=y
209CONFIG_INET_DIAG=y
210CONFIG_INET_TCP_DIAG=y
211# CONFIG_TCP_CONG_ADVANCED is not set
212CONFIG_TCP_CONG_CUBIC=y
213CONFIG_DEFAULT_TCP_CONG="cubic"
214# CONFIG_TCP_MD5SIG is not set
215
216#
217# IP: Virtual Server Configuration
218#
219# CONFIG_IP_VS is not set
220CONFIG_IPV6=y
221# CONFIG_IPV6_PRIVACY is not set
222# CONFIG_IPV6_ROUTER_PREF is not set
223CONFIG_INET6_AH=y
224CONFIG_INET6_ESP=y
225CONFIG_INET6_IPCOMP=y
226# CONFIG_IPV6_MIP6 is not set
227CONFIG_INET6_XFRM_TUNNEL=y
228CONFIG_INET6_TUNNEL=y
229CONFIG_INET6_XFRM_MODE_TRANSPORT=y
230CONFIG_INET6_XFRM_MODE_TUNNEL=y
231CONFIG_INET6_XFRM_MODE_BEET=y
232# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
233CONFIG_IPV6_SIT=y
234# CONFIG_IPV6_TUNNEL is not set
235# CONFIG_IPV6_MULTIPLE_TABLES is not set
236# CONFIG_NETWORK_SECMARK is not set
237CONFIG_NETFILTER=y
238# CONFIG_NETFILTER_DEBUG is not set
239
240#
241# Core Netfilter Configuration
242#
243# CONFIG_NETFILTER_NETLINK is not set
244CONFIG_NF_CONNTRACK_ENABLED=m
245CONFIG_NF_CONNTRACK_SUPPORT=y
246# CONFIG_IP_NF_CONNTRACK_SUPPORT is not set
247CONFIG_NF_CONNTRACK=m
248CONFIG_NF_CT_ACCT=y
249CONFIG_NF_CONNTRACK_MARK=y
250# CONFIG_NF_CONNTRACK_EVENTS is not set
251CONFIG_NF_CT_PROTO_GRE=m
252# CONFIG_NF_CT_PROTO_SCTP is not set
253CONFIG_NF_CONNTRACK_AMANDA=m
254CONFIG_NF_CONNTRACK_FTP=m
255CONFIG_NF_CONNTRACK_H323=m
256CONFIG_NF_CONNTRACK_IRC=m
257CONFIG_NF_CONNTRACK_NETBIOS_NS=m
258CONFIG_NF_CONNTRACK_PPTP=m
259CONFIG_NF_CONNTRACK_SANE=m
260CONFIG_NF_CONNTRACK_SIP=m
261CONFIG_NF_CONNTRACK_TFTP=m
262CONFIG_NETFILTER_XTABLES=y
263CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
264# CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set
265# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
266CONFIG_NETFILTER_XT_TARGET_MARK=m
267CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
268CONFIG_NETFILTER_XT_TARGET_NFLOG=m
269# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
270CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
271CONFIG_NETFILTER_XT_MATCH_COMMENT=m
272CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
273CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
274CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
275# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
276# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
277CONFIG_NETFILTER_XT_MATCH_ESP=m
278CONFIG_NETFILTER_XT_MATCH_HELPER=m
279CONFIG_NETFILTER_XT_MATCH_LENGTH=m
280CONFIG_NETFILTER_XT_MATCH_LIMIT=m
281CONFIG_NETFILTER_XT_MATCH_MAC=m
282CONFIG_NETFILTER_XT_MATCH_MARK=m
283CONFIG_NETFILTER_XT_MATCH_POLICY=m
284CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
285CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
286CONFIG_NETFILTER_XT_MATCH_QUOTA=m
287CONFIG_NETFILTER_XT_MATCH_REALM=m
288# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
289CONFIG_NETFILTER_XT_MATCH_STATE=m
290CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
291CONFIG_NETFILTER_XT_MATCH_STRING=m
292CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
293CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
294
295#
296# IP: Netfilter Configuration
297#
298CONFIG_NF_CONNTRACK_IPV4=m
299CONFIG_NF_CONNTRACK_PROC_COMPAT=y
300# CONFIG_IP_NF_QUEUE is not set
301CONFIG_IP_NF_IPTABLES=m
302CONFIG_IP_NF_MATCH_IPRANGE=m
303CONFIG_IP_NF_MATCH_TOS=m
304CONFIG_IP_NF_MATCH_RECENT=m
305CONFIG_IP_NF_MATCH_ECN=m
306CONFIG_IP_NF_MATCH_AH=m
307CONFIG_IP_NF_MATCH_TTL=m
308CONFIG_IP_NF_MATCH_OWNER=m
309CONFIG_IP_NF_MATCH_ADDRTYPE=m
310CONFIG_IP_NF_FILTER=m
311CONFIG_IP_NF_TARGET_REJECT=m
312CONFIG_IP_NF_TARGET_LOG=m
313# CONFIG_IP_NF_TARGET_ULOG is not set
314CONFIG_NF_NAT=m
315CONFIG_NF_NAT_NEEDED=y
316CONFIG_IP_NF_TARGET_MASQUERADE=m
317CONFIG_IP_NF_TARGET_REDIRECT=m
318CONFIG_IP_NF_TARGET_NETMAP=m
319CONFIG_IP_NF_TARGET_SAME=m
320CONFIG_NF_NAT_SNMP_BASIC=m
321CONFIG_NF_NAT_PROTO_GRE=m
322CONFIG_NF_NAT_FTP=m
323CONFIG_NF_NAT_IRC=m
324CONFIG_NF_NAT_TFTP=m
325CONFIG_NF_NAT_AMANDA=m
326CONFIG_NF_NAT_PPTP=m
327CONFIG_NF_NAT_H323=m
328CONFIG_NF_NAT_SIP=m
329CONFIG_IP_NF_MANGLE=m
330CONFIG_IP_NF_TARGET_TOS=m
331CONFIG_IP_NF_TARGET_ECN=m
332CONFIG_IP_NF_TARGET_TTL=m
333CONFIG_IP_NF_TARGET_CLUSTERIP=m
334CONFIG_IP_NF_RAW=m
335CONFIG_IP_NF_ARPTABLES=m
336CONFIG_IP_NF_ARPFILTER=m
337CONFIG_IP_NF_ARP_MANGLE=m
338
339#
340# IPv6: Netfilter Configuration (EXPERIMENTAL)
341#
342CONFIG_NF_CONNTRACK_IPV6=m
343CONFIG_IP6_NF_QUEUE=m
344CONFIG_IP6_NF_IPTABLES=m
345CONFIG_IP6_NF_MATCH_RT=m
346CONFIG_IP6_NF_MATCH_OPTS=m
347CONFIG_IP6_NF_MATCH_FRAG=m
348CONFIG_IP6_NF_MATCH_HL=m
349CONFIG_IP6_NF_MATCH_OWNER=m
350CONFIG_IP6_NF_MATCH_IPV6HEADER=m
351CONFIG_IP6_NF_MATCH_AH=m
352CONFIG_IP6_NF_MATCH_MH=m
353CONFIG_IP6_NF_MATCH_EUI64=m
354CONFIG_IP6_NF_FILTER=m
355CONFIG_IP6_NF_TARGET_LOG=m
356CONFIG_IP6_NF_TARGET_REJECT=m
357CONFIG_IP6_NF_MANGLE=m
358CONFIG_IP6_NF_TARGET_HL=m
359CONFIG_IP6_NF_RAW=m
360
361#
362# DCCP Configuration (EXPERIMENTAL)
363#
364# CONFIG_IP_DCCP is not set
365
366#
367# SCTP Configuration (EXPERIMENTAL)
368#
369# CONFIG_IP_SCTP is not set
370
371#
372# TIPC Configuration (EXPERIMENTAL)
373#
374# CONFIG_TIPC is not set
375# CONFIG_ATM is not set
376# CONFIG_BRIDGE is not set
377CONFIG_VLAN_8021Q=m
378# CONFIG_DECNET is not set
379# CONFIG_LLC2 is not set
380# CONFIG_IPX is not set
381# CONFIG_ATALK is not set
382# CONFIG_X25 is not set
383# CONFIG_LAPB is not set
384# CONFIG_ECONET is not set
385# CONFIG_WAN_ROUTER is not set
386
387#
388# QoS and/or fair queueing
389#
390# CONFIG_NET_SCHED is not set
391CONFIG_NET_CLS_ROUTE=y
392
393#
394# Network testing
395#
396# CONFIG_NET_PKTGEN is not set
397# CONFIG_HAMRADIO is not set
398# CONFIG_IRDA is not set
399# CONFIG_BT is not set
400# CONFIG_IEEE80211 is not set
401
402#
403# Device Drivers
404#
405
406#
407# Generic Driver Options
408#
409CONFIG_STANDALONE=y
410# CONFIG_PREVENT_FIRMWARE_BUILD is not set
411# CONFIG_FW_LOADER is not set
412# CONFIG_DEBUG_DRIVER is not set
413# CONFIG_DEBUG_DEVRES is not set
414# CONFIG_SYS_HYPERVISOR is not set
415
416#
417# Connector - unified userspace <-> kernelspace linker
418#
419# CONFIG_CONNECTOR is not set
420
421#
422# Memory Technology Devices (MTD)
423#
424CONFIG_MTD=y
425# CONFIG_MTD_DEBUG is not set
426# CONFIG_MTD_CONCAT is not set
427CONFIG_MTD_PARTITIONS=y
428# CONFIG_MTD_REDBOOT_PARTS is not set
429CONFIG_MTD_CMDLINE_PARTS=y
430
431#
432# User Modules And Translation Layers
433#
434CONFIG_MTD_CHAR=y
435CONFIG_MTD_BLKDEVS=y
436CONFIG_MTD_BLOCK=y
437# CONFIG_FTL is not set
438# CONFIG_NFTL is not set
439# CONFIG_INFTL is not set
440# CONFIG_RFD_FTL is not set
441# CONFIG_SSFDC is not set
442
443#
444# RAM/ROM/Flash chip drivers
445#
446CONFIG_MTD_CFI=y
447# CONFIG_MTD_JEDECPROBE is not set
448CONFIG_MTD_GEN_PROBE=y
449# CONFIG_MTD_CFI_ADV_OPTIONS is not set
450CONFIG_MTD_MAP_BANK_WIDTH_1=y
451CONFIG_MTD_MAP_BANK_WIDTH_2=y
452CONFIG_MTD_MAP_BANK_WIDTH_4=y
453# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
454# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
455# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
456CONFIG_MTD_CFI_I1=y
457CONFIG_MTD_CFI_I2=y
458# CONFIG_MTD_CFI_I4 is not set
459# CONFIG_MTD_CFI_I8 is not set
460# CONFIG_MTD_CFI_INTELEXT is not set
461CONFIG_MTD_CFI_AMDSTD=y
462# CONFIG_MTD_CFI_STAA is not set
463CONFIG_MTD_CFI_UTIL=y
464# CONFIG_MTD_RAM is not set
465# CONFIG_MTD_ROM is not set
466# CONFIG_MTD_ABSENT is not set
467# CONFIG_MTD_OBSOLETE_CHIPS is not set
468
469#
470# Mapping drivers for chip access
471#
472# CONFIG_MTD_COMPLEX_MAPPINGS is not set
473CONFIG_MTD_PHYSMAP=y
474CONFIG_MTD_PHYSMAP_START=0x80000000
475CONFIG_MTD_PHYSMAP_LEN=0x0
476CONFIG_MTD_PHYSMAP_BANKWIDTH=2
477# CONFIG_MTD_PLATRAM is not set
478
479#
480# Self-contained MTD device drivers
481#
482CONFIG_MTD_DATAFLASH=y
483# CONFIG_MTD_M25P80 is not set
484# CONFIG_MTD_SLRAM is not set
485# CONFIG_MTD_PHRAM is not set
486# CONFIG_MTD_MTDRAM is not set
487# CONFIG_MTD_BLOCK2MTD is not set
488
489#
490# Disk-On-Chip Device Drivers
491#
492# CONFIG_MTD_DOC2000 is not set
493# CONFIG_MTD_DOC2001 is not set
494# CONFIG_MTD_DOC2001PLUS is not set
495
496#
497# NAND Flash Device Drivers
498#
499# CONFIG_MTD_NAND is not set
500
501#
502# OneNAND Flash Device Drivers
503#
504# CONFIG_MTD_ONENAND is not set
505
506#
507# Parallel port support
508#
509# CONFIG_PARPORT is not set
510
511#
512# Plug and Play support
513#
514# CONFIG_PNPACPI is not set
515
516#
517# Block devices
518#
519# CONFIG_BLK_DEV_COW_COMMON is not set
520CONFIG_BLK_DEV_LOOP=m
521# CONFIG_BLK_DEV_CRYPTOLOOP is not set
522CONFIG_BLK_DEV_NBD=m
523CONFIG_BLK_DEV_RAM=m
524CONFIG_BLK_DEV_RAM_COUNT=16
525CONFIG_BLK_DEV_RAM_SIZE=4096
526CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
527# CONFIG_CDROM_PKTCDVD is not set
528# CONFIG_ATA_OVER_ETH is not set
529
530#
531# Misc devices
532#
533
534#
535# ATA/ATAPI/MFM/RLL support
536#
537# CONFIG_IDE is not set
538
539#
540# SCSI device support
541#
542# CONFIG_RAID_ATTRS is not set
543# CONFIG_SCSI is not set
544# CONFIG_SCSI_NETLINK is not set
545
546#
547# Serial ATA (prod) and Parallel ATA (experimental) drivers
548#
549# CONFIG_ATA is not set
550
551#
552# Multi-device support (RAID and LVM)
553#
554# CONFIG_MD is not set
555
556#
557# Fusion MPT device support
558#
559# CONFIG_FUSION is not set
560
561#
562# IEEE 1394 (FireWire) support
563#
564
565#
566# I2O device support
567#
568
569#
570# Network device support
571#
572CONFIG_NETDEVICES=y
573# CONFIG_DUMMY is not set
574# CONFIG_BONDING is not set
575# CONFIG_EQUALIZER is not set
576CONFIG_TUN=m
577
578#
579# PHY device support
580#
581# CONFIG_PHYLIB is not set
582
583#
584# Ethernet (10 or 100Mbit)
585#
586CONFIG_NET_ETHERNET=y
587CONFIG_MII=y
588CONFIG_MACB=y
589
590#
591# Ethernet (1000 Mbit)
592#
593
594#
595# Ethernet (10000 Mbit)
596#
597
598#
599# Token Ring devices
600#
601
602#
603# Wireless LAN (non-hamradio)
604#
605# CONFIG_NET_RADIO is not set
606
607#
608# Wan interfaces
609#
610# CONFIG_WAN is not set
611CONFIG_PPP=m
612# CONFIG_PPP_MULTILINK is not set
613CONFIG_PPP_FILTER=y
614CONFIG_PPP_ASYNC=m
615# CONFIG_PPP_SYNC_TTY is not set
616CONFIG_PPP_DEFLATE=m
617CONFIG_PPP_BSDCOMP=m
618CONFIG_PPP_MPPE=m
619CONFIG_PPPOE=m
620# CONFIG_SLIP is not set
621CONFIG_SLHC=m
622# CONFIG_SHAPER is not set
623# CONFIG_NETCONSOLE is not set
624# CONFIG_NETPOLL is not set
625# CONFIG_NET_POLL_CONTROLLER is not set
626
627#
628# ISDN subsystem
629#
630# CONFIG_ISDN is not set
631
632#
633# Telephony Support
634#
635# CONFIG_PHONE is not set
636
637#
638# Input device support
639#
640# CONFIG_INPUT is not set
641
642#
643# Hardware I/O ports
644#
645# CONFIG_SERIO is not set
646# CONFIG_GAMEPORT is not set
647
648#
649# Character devices
650#
651# CONFIG_VT is not set
652# CONFIG_SERIAL_NONSTANDARD is not set
653
654#
655# Serial drivers
656#
657# CONFIG_SERIAL_8250 is not set
658
659#
660# Non-8250 serial port support
661#
662CONFIG_SERIAL_ATMEL=y
663CONFIG_SERIAL_ATMEL_CONSOLE=y
664# CONFIG_SERIAL_ATMEL_TTYAT is not set
665CONFIG_SERIAL_CORE=y
666CONFIG_SERIAL_CORE_CONSOLE=y
667CONFIG_UNIX98_PTYS=y
668# CONFIG_LEGACY_PTYS is not set
669
670#
671# IPMI
672#
673# CONFIG_IPMI_HANDLER is not set
674
675#
676# Watchdog Cards
677#
678# CONFIG_WATCHDOG is not set
679# CONFIG_HW_RANDOM is not set
680# CONFIG_RTC is not set
681# CONFIG_GEN_RTC is not set
682# CONFIG_DTLK is not set
683# CONFIG_R3964 is not set
684# CONFIG_RAW_DRIVER is not set
685
686#
687# TPM devices
688#
689# CONFIG_TCG_TPM is not set
690
691#
692# I2C support
693#
694# CONFIG_I2C is not set
695
696#
697# SPI support
698#
699CONFIG_SPI=y
700# CONFIG_SPI_DEBUG is not set
701CONFIG_SPI_MASTER=y
702
703#
704# SPI Master Controller Drivers
705#
706CONFIG_SPI_ATMEL=y
707# CONFIG_SPI_BITBANG is not set
708
709#
710# SPI Protocol Masters
711#
712# CONFIG_SPI_AT25 is not set
713
714#
715# Dallas's 1-wire bus
716#
717# CONFIG_W1 is not set
718
719#
720# Hardware Monitoring support
721#
722# CONFIG_HWMON is not set
723# CONFIG_HWMON_VID is not set
724
725#
726# Multifunction device drivers
727#
728# CONFIG_MFD_SM501 is not set
729
730#
731# Multimedia devices
732#
733# CONFIG_VIDEO_DEV is not set
734
735#
736# Digital Video Broadcasting Devices
737#
738# CONFIG_DVB is not set
739
740#
741# Graphics support
742#
743# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
744# CONFIG_FB is not set
745
746#
747# Sound
748#
749# CONFIG_SOUND is not set
750
751#
752# USB support
753#
754# CONFIG_USB_ARCH_HAS_HCD is not set
755# CONFIG_USB_ARCH_HAS_OHCI is not set
756# CONFIG_USB_ARCH_HAS_EHCI is not set
757
758#
759# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
760#
761
762#
763# USB Gadget Support
764#
765# CONFIG_USB_GADGET is not set
766
767#
768# MMC/SD Card support
769#
770# CONFIG_MMC is not set
771
772#
773# LED devices
774#
775# CONFIG_NEW_LEDS is not set
776
777#
778# LED drivers
779#
780
781#
782# LED Triggers
783#
784
785#
786# InfiniBand support
787#
788
789#
790# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
791#
792
793#
794# Real Time Clock
795#
796# CONFIG_RTC_CLASS is not set
797
798#
799# DMA Engine support
800#
801# CONFIG_DMA_ENGINE is not set
802
803#
804# DMA Clients
805#
806
807#
808# DMA Devices
809#
810
811#
812# Auxiliary Display support
813#
814
815#
816# Virtualization
817#
818
819#
820# File systems
821#
822CONFIG_EXT2_FS=y
823# CONFIG_EXT2_FS_XATTR is not set
824# CONFIG_EXT2_FS_XIP is not set
825CONFIG_EXT3_FS=y
826# CONFIG_EXT3_FS_XATTR is not set
827# CONFIG_EXT4DEV_FS is not set
828CONFIG_JBD=y
829# CONFIG_JBD_DEBUG is not set
830# CONFIG_REISERFS_FS is not set
831# CONFIG_JFS_FS is not set
832# CONFIG_FS_POSIX_ACL is not set
833# CONFIG_XFS_FS is not set
834# CONFIG_GFS2_FS is not set
835# CONFIG_OCFS2_FS is not set
836# CONFIG_MINIX_FS is not set
837# CONFIG_ROMFS_FS is not set
838# CONFIG_INOTIFY is not set
839# CONFIG_QUOTA is not set
840# CONFIG_DNOTIFY is not set
841# CONFIG_AUTOFS_FS is not set
842# CONFIG_AUTOFS4_FS is not set
843CONFIG_FUSE_FS=m
844
845#
846# CD-ROM/DVD Filesystems
847#
848# CONFIG_ISO9660_FS is not set
849# CONFIG_UDF_FS is not set
850
851#
852# DOS/FAT/NT Filesystems
853#
854CONFIG_FAT_FS=m
855CONFIG_MSDOS_FS=m
856CONFIG_VFAT_FS=m
857CONFIG_FAT_DEFAULT_CODEPAGE=850
858CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
859# CONFIG_NTFS_FS is not set
860
861#
862# Pseudo filesystems
863#
864CONFIG_PROC_FS=y
865# CONFIG_PROC_KCORE is not set
866CONFIG_PROC_SYSCTL=y
867CONFIG_SYSFS=y
868CONFIG_TMPFS=y
869# CONFIG_TMPFS_POSIX_ACL is not set
870# CONFIG_HUGETLB_PAGE is not set
871CONFIG_RAMFS=y
872CONFIG_CONFIGFS_FS=y
873
874#
875# Miscellaneous filesystems
876#
877# CONFIG_ADFS_FS is not set
878# CONFIG_AFFS_FS is not set
879# CONFIG_HFS_FS is not set
880# CONFIG_HFSPLUS_FS is not set
881# CONFIG_BEFS_FS is not set
882# CONFIG_BFS_FS is not set
883# CONFIG_EFS_FS is not set
884CONFIG_JFFS2_FS=y
885CONFIG_JFFS2_FS_DEBUG=0
886CONFIG_JFFS2_FS_WRITEBUFFER=y
887# CONFIG_JFFS2_SUMMARY is not set
888# CONFIG_JFFS2_FS_XATTR is not set
889# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
890CONFIG_JFFS2_ZLIB=y
891CONFIG_JFFS2_RTIME=y
892# CONFIG_JFFS2_RUBIN is not set
893# CONFIG_CRAMFS is not set
894# CONFIG_VXFS_FS is not set
895# CONFIG_HPFS_FS is not set
896# CONFIG_QNX4FS_FS is not set
897# CONFIG_SYSV_FS is not set
898# CONFIG_UFS_FS is not set
899
900#
901# Network File Systems
902#
903CONFIG_NFS_FS=y
904CONFIG_NFS_V3=y
905# CONFIG_NFS_V3_ACL is not set
906# CONFIG_NFS_V4 is not set
907# CONFIG_NFS_DIRECTIO is not set
908# CONFIG_NFSD is not set
909CONFIG_ROOT_NFS=y
910CONFIG_LOCKD=y
911CONFIG_LOCKD_V4=y
912CONFIG_NFS_COMMON=y
913CONFIG_SUNRPC=y
914# CONFIG_RPCSEC_GSS_KRB5 is not set
915# CONFIG_RPCSEC_GSS_SPKM3 is not set
916CONFIG_SMB_FS=m
917# CONFIG_SMB_NLS_DEFAULT is not set
918CONFIG_CIFS=m
919# CONFIG_CIFS_STATS is not set
920# CONFIG_CIFS_WEAK_PW_HASH is not set
921# CONFIG_CIFS_XATTR is not set
922# CONFIG_CIFS_DEBUG2 is not set
923# CONFIG_CIFS_EXPERIMENTAL is not set
924# CONFIG_NCP_FS is not set
925# CONFIG_CODA_FS is not set
926# CONFIG_AFS_FS is not set
927# CONFIG_9P_FS is not set
928
929#
930# Partition Types
931#
932# CONFIG_PARTITION_ADVANCED is not set
933CONFIG_MSDOS_PARTITION=y
934
935#
936# Native Language Support
937#
938CONFIG_NLS=y
939CONFIG_NLS_DEFAULT="iso8859-1"
940# CONFIG_NLS_CODEPAGE_437 is not set
941# CONFIG_NLS_CODEPAGE_737 is not set
942# CONFIG_NLS_CODEPAGE_775 is not set
943CONFIG_NLS_CODEPAGE_850=y
944# CONFIG_NLS_CODEPAGE_852 is not set
945# CONFIG_NLS_CODEPAGE_855 is not set
946# CONFIG_NLS_CODEPAGE_857 is not set
947# CONFIG_NLS_CODEPAGE_860 is not set
948# CONFIG_NLS_CODEPAGE_861 is not set
949# CONFIG_NLS_CODEPAGE_862 is not set
950# CONFIG_NLS_CODEPAGE_863 is not set
951# CONFIG_NLS_CODEPAGE_864 is not set
952# CONFIG_NLS_CODEPAGE_865 is not set
953# CONFIG_NLS_CODEPAGE_866 is not set
954# CONFIG_NLS_CODEPAGE_869 is not set
955# CONFIG_NLS_CODEPAGE_936 is not set
956# CONFIG_NLS_CODEPAGE_950 is not set
957# CONFIG_NLS_CODEPAGE_932 is not set
958# CONFIG_NLS_CODEPAGE_949 is not set
959# CONFIG_NLS_CODEPAGE_874 is not set
960# CONFIG_NLS_ISO8859_8 is not set
961# CONFIG_NLS_CODEPAGE_1250 is not set
962# CONFIG_NLS_CODEPAGE_1251 is not set
963# CONFIG_NLS_ASCII is not set
964CONFIG_NLS_ISO8859_1=y
965# CONFIG_NLS_ISO8859_2 is not set
966# CONFIG_NLS_ISO8859_3 is not set
967# CONFIG_NLS_ISO8859_4 is not set
968# CONFIG_NLS_ISO8859_5 is not set
969# CONFIG_NLS_ISO8859_6 is not set
970# CONFIG_NLS_ISO8859_7 is not set
971# CONFIG_NLS_ISO8859_9 is not set
972# CONFIG_NLS_ISO8859_13 is not set
973# CONFIG_NLS_ISO8859_14 is not set
974# CONFIG_NLS_ISO8859_15 is not set
975# CONFIG_NLS_KOI8_R is not set
976# CONFIG_NLS_KOI8_U is not set
977CONFIG_NLS_UTF8=y
978
979#
980# Distributed Lock Manager
981#
982# CONFIG_DLM is not set
983
984#
985# Kernel hacking
986#
987CONFIG_TRACE_IRQFLAGS_SUPPORT=y
988# CONFIG_PRINTK_TIME is not set
989CONFIG_ENABLE_MUST_CHECK=y
990CONFIG_MAGIC_SYSRQ=y
991# CONFIG_UNUSED_SYMBOLS is not set
992# CONFIG_DEBUG_FS is not set
993# CONFIG_HEADERS_CHECK is not set
994CONFIG_DEBUG_KERNEL=y
995# CONFIG_DEBUG_SHIRQ is not set
996CONFIG_LOG_BUF_SHIFT=14
997CONFIG_DETECT_SOFTLOCKUP=y
998# CONFIG_SCHEDSTATS is not set
999# CONFIG_TIMER_STATS is not set
1000# CONFIG_DEBUG_SLAB is not set
1001# CONFIG_DEBUG_RT_MUTEXES is not set
1002# CONFIG_RT_MUTEX_TESTER is not set
1003# CONFIG_DEBUG_SPINLOCK is not set
1004# CONFIG_DEBUG_MUTEXES is not set
1005# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1006# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1007# CONFIG_DEBUG_KOBJECT is not set
1008CONFIG_DEBUG_BUGVERBOSE=y
1009# CONFIG_DEBUG_INFO is not set
1010# CONFIG_DEBUG_VM is not set
1011# CONFIG_DEBUG_LIST is not set
1012CONFIG_FRAME_POINTER=y
1013# CONFIG_FORCED_INLINING is not set
1014# CONFIG_RCU_TORTURE_TEST is not set
1015# CONFIG_FAULT_INJECTION is not set
1016# CONFIG_KPROBES is not set
1017
1018#
1019# Security options
1020#
1021# CONFIG_KEYS is not set
1022# CONFIG_SECURITY is not set
1023
1024#
1025# Cryptographic options
1026#
1027CONFIG_CRYPTO=y
1028CONFIG_CRYPTO_ALGAPI=y
1029CONFIG_CRYPTO_BLKCIPHER=y
1030CONFIG_CRYPTO_HASH=y
1031CONFIG_CRYPTO_MANAGER=y
1032CONFIG_CRYPTO_HMAC=y
1033# CONFIG_CRYPTO_XCBC is not set
1034# CONFIG_CRYPTO_NULL is not set
1035# CONFIG_CRYPTO_MD4 is not set
1036CONFIG_CRYPTO_MD5=y
1037CONFIG_CRYPTO_SHA1=y
1038# CONFIG_CRYPTO_SHA256 is not set
1039# CONFIG_CRYPTO_SHA512 is not set
1040# CONFIG_CRYPTO_WP512 is not set
1041# CONFIG_CRYPTO_TGR192 is not set
1042# CONFIG_CRYPTO_GF128MUL is not set
1043CONFIG_CRYPTO_ECB=m
1044CONFIG_CRYPTO_CBC=y
1045CONFIG_CRYPTO_PCBC=m
1046# CONFIG_CRYPTO_LRW is not set
1047CONFIG_CRYPTO_DES=y
1048# CONFIG_CRYPTO_FCRYPT is not set
1049# CONFIG_CRYPTO_BLOWFISH is not set
1050# CONFIG_CRYPTO_TWOFISH is not set
1051# CONFIG_CRYPTO_SERPENT is not set
1052# CONFIG_CRYPTO_AES is not set
1053# CONFIG_CRYPTO_CAST5 is not set
1054# CONFIG_CRYPTO_CAST6 is not set
1055# CONFIG_CRYPTO_TEA is not set
1056CONFIG_CRYPTO_ARC4=m
1057# CONFIG_CRYPTO_KHAZAD is not set
1058# CONFIG_CRYPTO_ANUBIS is not set
1059CONFIG_CRYPTO_DEFLATE=y
1060# CONFIG_CRYPTO_MICHAEL_MIC is not set
1061# CONFIG_CRYPTO_CRC32C is not set
1062# CONFIG_CRYPTO_CAMELLIA is not set
1063# CONFIG_CRYPTO_TEST is not set
1064
1065#
1066# Hardware crypto devices
1067#
1068
1069#
1070# Library routines
1071#
1072CONFIG_BITREVERSE=y
1073CONFIG_CRC_CCITT=m
1074# CONFIG_CRC16 is not set
1075CONFIG_CRC32=y
1076# CONFIG_LIBCRC32C is not set
1077CONFIG_ZLIB_INFLATE=y
1078CONFIG_ZLIB_DEFLATE=y
1079CONFIG_TEXTSEARCH=y
1080CONFIG_TEXTSEARCH_KMP=m
1081CONFIG_TEXTSEARCH_BM=m
1082CONFIG_TEXTSEARCH_FSM=m
1083CONFIG_PLIST=y
1084CONFIG_HAS_IOMEM=y
1085CONFIG_HAS_IOPORT=y
diff --git a/arch/avr32/kernel/cpu.c b/arch/avr32/kernel/cpu.c
index 2e72fd2699df..2714cf6452b5 100644
--- a/arch/avr32/kernel/cpu.c
+++ b/arch/avr32/kernel/cpu.c
@@ -209,16 +209,17 @@ static const char *mmu_types[] = {
209void __init setup_processor(void) 209void __init setup_processor(void)
210{ 210{
211 unsigned long config0, config1; 211 unsigned long config0, config1;
212 unsigned long features;
212 unsigned cpu_id, cpu_rev, arch_id, arch_rev, mmu_type; 213 unsigned cpu_id, cpu_rev, arch_id, arch_rev, mmu_type;
213 unsigned tmp; 214 unsigned tmp;
214 215
215 config0 = sysreg_read(CONFIG0); /* 0x0000013e; */ 216 config0 = sysreg_read(CONFIG0);
216 config1 = sysreg_read(CONFIG1); /* 0x01f689a2; */ 217 config1 = sysreg_read(CONFIG1);
217 cpu_id = config0 >> 24; 218 cpu_id = SYSREG_BFEXT(PROCESSORID, config0);
218 cpu_rev = (config0 >> 16) & 0xff; 219 cpu_rev = SYSREG_BFEXT(PROCESSORREVISION, config0);
219 arch_id = (config0 >> 13) & 0x07; 220 arch_id = SYSREG_BFEXT(AT, config0);
220 arch_rev = (config0 >> 10) & 0x07; 221 arch_rev = SYSREG_BFEXT(AR, config0);
221 mmu_type = (config0 >> 7) & 0x03; 222 mmu_type = SYSREG_BFEXT(MMUT, config0);
222 223
223 boot_cpu_data.arch_type = arch_id; 224 boot_cpu_data.arch_type = arch_id;
224 boot_cpu_data.cpu_type = cpu_id; 225 boot_cpu_data.cpu_type = cpu_id;
@@ -226,16 +227,16 @@ void __init setup_processor(void)
226 boot_cpu_data.cpu_revision = cpu_rev; 227 boot_cpu_data.cpu_revision = cpu_rev;
227 boot_cpu_data.tlb_config = mmu_type; 228 boot_cpu_data.tlb_config = mmu_type;
228 229
229 tmp = (config1 >> 13) & 0x07; 230 tmp = SYSREG_BFEXT(ILSZ, config1);
230 if (tmp) { 231 if (tmp) {
231 boot_cpu_data.icache.ways = 1 << ((config1 >> 10) & 0x07); 232 boot_cpu_data.icache.ways = 1 << SYSREG_BFEXT(IASS, config1);
232 boot_cpu_data.icache.sets = 1 << ((config1 >> 16) & 0x0f); 233 boot_cpu_data.icache.sets = 1 << SYSREG_BFEXT(ISET, config1);
233 boot_cpu_data.icache.linesz = 1 << (tmp + 1); 234 boot_cpu_data.icache.linesz = 1 << (tmp + 1);
234 } 235 }
235 tmp = (config1 >> 3) & 0x07; 236 tmp = SYSREG_BFEXT(DLSZ, config1);
236 if (tmp) { 237 if (tmp) {
237 boot_cpu_data.dcache.ways = 1 << (config1 & 0x07); 238 boot_cpu_data.dcache.ways = 1 << SYSREG_BFEXT(DASS, config1);
238 boot_cpu_data.dcache.sets = 1 << ((config1 >> 6) & 0x0f); 239 boot_cpu_data.dcache.sets = 1 << SYSREG_BFEXT(DSET, config1);
239 boot_cpu_data.dcache.linesz = 1 << (tmp + 1); 240 boot_cpu_data.dcache.linesz = 1 << (tmp + 1);
240 } 241 }
241 242
@@ -250,16 +251,39 @@ void __init setup_processor(void)
250 cpu_names[cpu_id], cpu_id, cpu_rev, 251 cpu_names[cpu_id], cpu_id, cpu_rev,
251 arch_names[arch_id], arch_rev); 252 arch_names[arch_id], arch_rev);
252 printk ("CPU: MMU configuration: %s\n", mmu_types[mmu_type]); 253 printk ("CPU: MMU configuration: %s\n", mmu_types[mmu_type]);
254
253 printk ("CPU: features:"); 255 printk ("CPU: features:");
254 if (config0 & (1 << 6)) 256 features = 0;
255 printk(" fpu"); 257 if (config0 & SYSREG_BIT(CONFIG0_R)) {
256 if (config0 & (1 << 5)) 258 features |= AVR32_FEATURE_RMW;
257 printk(" java"); 259 printk(" rmw");
258 if (config0 & (1 << 4)) 260 }
259 printk(" perfctr"); 261 if (config0 & SYSREG_BIT(CONFIG0_D)) {
260 if (config0 & (1 << 3)) 262 features |= AVR32_FEATURE_DSP;
263 printk(" dsp");
264 }
265 if (config0 & SYSREG_BIT(CONFIG0_S)) {
266 features |= AVR32_FEATURE_SIMD;
267 printk(" simd");
268 }
269 if (config0 & SYSREG_BIT(CONFIG0_O)) {
270 features |= AVR32_FEATURE_OCD;
261 printk(" ocd"); 271 printk(" ocd");
272 }
273 if (config0 & SYSREG_BIT(CONFIG0_P)) {
274 features |= AVR32_FEATURE_PCTR;
275 printk(" perfctr");
276 }
277 if (config0 & SYSREG_BIT(CONFIG0_J)) {
278 features |= AVR32_FEATURE_JAVA;
279 printk(" java");
280 }
281 if (config0 & SYSREG_BIT(CONFIG0_F)) {
282 features |= AVR32_FEATURE_FPU;
283 printk(" fpu");
284 }
262 printk("\n"); 285 printk("\n");
286 boot_cpu_data.features = features;
263} 287}
264 288
265#ifdef CONFIG_PROC_FS 289#ifdef CONFIG_PROC_FS
diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S
index eeb66792bc37..42657f1703b2 100644
--- a/arch/avr32/kernel/entry-avr32b.S
+++ b/arch/avr32/kernel/entry-avr32b.S
@@ -100,55 +100,49 @@ dtlb_miss_write:
100 100
101 .global tlb_miss_common 101 .global tlb_miss_common
102tlb_miss_common: 102tlb_miss_common:
103 mfsr r0, SYSREG_PTBR 103 mfsr r0, SYSREG_TLBEAR
104 mfsr r1, SYSREG_TLBEAR 104 mfsr r1, SYSREG_PTBR
105 105
106 /* Is it the vmalloc space? */ 106 /* Is it the vmalloc space? */
107 bld r1, 31 107 bld r0, 31
108 brcs handle_vmalloc_miss 108 brcs handle_vmalloc_miss
109 109
110 /* First level lookup */ 110 /* First level lookup */
111pgtbl_lookup: 111pgtbl_lookup:
112 lsr r2, r1, PGDIR_SHIFT 112 lsr r2, r0, PGDIR_SHIFT
113 ld.w r0, r0[r2 << 2] 113 ld.w r3, r1[r2 << 2]
114 bld r0, _PAGE_BIT_PRESENT 114 bfextu r1, r0, PAGE_SHIFT, PGDIR_SHIFT - PAGE_SHIFT
115 bld r3, _PAGE_BIT_PRESENT
115 brcc page_table_not_present 116 brcc page_table_not_present
116 117
117 /* TODO: Check access rights on page table if necessary */
118
119 /* Translate to virtual address in P1. */ 118 /* Translate to virtual address in P1. */
120 andl r0, 0xf000 119 andl r3, 0xf000
121 sbr r0, 31 120 sbr r3, 31
122 121
123 /* Second level lookup */ 122 /* Second level lookup */
124 lsl r1, (32 - PGDIR_SHIFT) 123 ld.w r2, r3[r1 << 2]
125 lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT 124 mfsr r0, SYSREG_TLBARLO
126 add r2, r0, r1 << 2 125 bld r2, _PAGE_BIT_PRESENT
127 ld.w r1, r2[0]
128 bld r1, _PAGE_BIT_PRESENT
129 brcc page_not_present 126 brcc page_not_present
130 127
131 /* Mark the page as accessed */ 128 /* Mark the page as accessed */
132 sbr r1, _PAGE_BIT_ACCESSED 129 sbr r2, _PAGE_BIT_ACCESSED
133 st.w r2[0], r1 130 st.w r3[r1 << 2], r2
134 131
135 /* Drop software flags */ 132 /* Drop software flags */
136 andl r1, _PAGE_FLAGS_HARDWARE_MASK & 0xffff 133 andl r2, _PAGE_FLAGS_HARDWARE_MASK & 0xffff
137 mtsr SYSREG_TLBELO, r1 134 mtsr SYSREG_TLBELO, r2
138 135
139 /* Figure out which entry we want to replace */ 136 /* Figure out which entry we want to replace */
140 mfsr r0, SYSREG_TLBARLO 137 mfsr r1, SYSREG_MMUCR
141 clz r2, r0 138 clz r2, r0
142 brcc 1f 139 brcc 1f
143 mov r1, -1 /* All entries have been accessed, */ 140 mov r3, -1 /* All entries have been accessed, */
144 mtsr SYSREG_TLBARLO, r1 /* so reset TLBAR */ 141 mov r2, 0 /* so start at 0 */
145 mov r2, 0 /* and start at 0 */ 142 mtsr SYSREG_TLBARLO, r3 /* and reset TLBAR */
1461: mfsr r1, SYSREG_MMUCR
147 lsl r2, 14
148 andl r1, 0x3fff, COH
149 or r1, r2
150 mtsr SYSREG_MMUCR, r1
151 143
1441: bfins r1, r2, SYSREG_DRP_OFFSET, SYSREG_DRP_SIZE
145 mtsr SYSREG_MMUCR, r1
152 tlbw 146 tlbw
153 147
154 tlbmiss_restore 148 tlbmiss_restore
@@ -156,8 +150,8 @@ pgtbl_lookup:
156 150
157handle_vmalloc_miss: 151handle_vmalloc_miss:
158 /* Simply do the lookup in init's page table */ 152 /* Simply do the lookup in init's page table */
159 mov r0, lo(swapper_pg_dir) 153 mov r1, lo(swapper_pg_dir)
160 orh r0, hi(swapper_pg_dir) 154 orh r1, hi(swapper_pg_dir)
161 rjmp pgtbl_lookup 155 rjmp pgtbl_lookup
162 156
163 157
@@ -340,12 +334,34 @@ do_bus_error_read:
340do_nmi_ll: 334do_nmi_ll:
341 sub sp, 4 335 sub sp, 4
342 stmts --sp, r0-lr 336 stmts --sp, r0-lr
343 /* FIXME: Make sure RAR_NMI and RSR_NMI are pushed instead of *_EX */ 337 mfsr r9, SYSREG_RSR_NMI
344 rcall save_full_context_ex 338 mfsr r8, SYSREG_RAR_NMI
339 bfextu r0, r9, MODE_SHIFT, 3
340 brne 2f
341
3421: pushm r8, r9 /* PC and SR */
345 mfsr r12, SYSREG_ECR 343 mfsr r12, SYSREG_ECR
346 mov r11, sp 344 mov r11, sp
347 rcall do_nmi 345 rcall do_nmi
348 rjmp bad_return 346 popm r8-r9
347 mtsr SYSREG_RAR_NMI, r8
348 tst r0, r0
349 mtsr SYSREG_RSR_NMI, r9
350 brne 3f
351
352 ldmts sp++, r0-lr
353 sub sp, -4 /* skip r12_orig */
354 rete
355
3562: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
357 stdsp sp[4], r10 /* replace saved SP */
358 rjmp 1b
359
3603: popm lr
361 sub sp, -4 /* skip sp */
362 popm r0-r12
363 sub sp, -4 /* skip r12_orig */
364 rete
349 365
350handle_address_fault: 366handle_address_fault:
351 sub sp, 4 367 sub sp, 4
@@ -630,9 +646,12 @@ irq_level\level:
630 rcall do_IRQ 646 rcall do_IRQ
631 647
632 lddsp r4, sp[REG_SR] 648 lddsp r4, sp[REG_SR]
633 andh r4, (MODE_MASK >> 16), COH 649 bfextu r4, r4, SYSREG_M0_OFFSET, 3
650 cp.w r4, MODE_SUPERVISOR >> SYSREG_M0_OFFSET
651 breq 2f
652 cp.w r4, MODE_USER >> SYSREG_M0_OFFSET
634#ifdef CONFIG_PREEMPT 653#ifdef CONFIG_PREEMPT
635 brne 2f 654 brne 3f
636#else 655#else
637 brne 1f 656 brne 1f
638#endif 657#endif
@@ -649,9 +668,18 @@ irq_level\level:
649 sub sp, -4 /* ignore r12_orig */ 668 sub sp, -4 /* ignore r12_orig */
650 rete 669 rete
651 670
6712: get_thread_info r0
672 ld.w r1, r0[TI_flags]
673 bld r1, TIF_CPU_GOING_TO_SLEEP
652#ifdef CONFIG_PREEMPT 674#ifdef CONFIG_PREEMPT
6532: 675 brcc 3f
654 get_thread_info r0 676#else
677 brcc 1b
678#endif
679 sub r1, pc, . - cpu_idle_skip_sleep
680 stdsp sp[REG_PC], r1
681#ifdef CONFIG_PREEMPT
6823: get_thread_info r0
655 ld.w r2, r0[TI_preempt_count] 683 ld.w r2, r0[TI_preempt_count]
656 cp.w r2, 0 684 cp.w r2, 0
657 brne 1b 685 brne 1b
@@ -662,12 +690,32 @@ irq_level\level:
662 bld r4, SYSREG_GM_OFFSET 690 bld r4, SYSREG_GM_OFFSET
663 brcs 1b 691 brcs 1b
664 rcall preempt_schedule_irq 692 rcall preempt_schedule_irq
665 rjmp 1b
666#endif 693#endif
694 rjmp 1b
667 .endm 695 .endm
668 696
669 .section .irq.text,"ax",@progbits 697 .section .irq.text,"ax",@progbits
670 698
699.global cpu_idle_sleep
700cpu_idle_sleep:
701 mask_interrupts
702 get_thread_info r8
703 ld.w r9, r8[TI_flags]
704 bld r9, TIF_NEED_RESCHED
705 brcs cpu_idle_enable_int_and_exit
706 sbr r9, TIF_CPU_GOING_TO_SLEEP
707 st.w r8[TI_flags], r9
708 unmask_interrupts
709 sleep 0
710cpu_idle_skip_sleep:
711 mask_interrupts
712 ld.w r9, r8[TI_flags]
713 cbr r9, TIF_CPU_GOING_TO_SLEEP
714 st.w r8[TI_flags], r9
715cpu_idle_enable_int_and_exit:
716 unmask_interrupts
717 retal r12
718
671 .global irq_level0 719 .global irq_level0
672 .global irq_level1 720 .global irq_level1
673 .global irq_level2 721 .global irq_level2
diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c
index b599eae64576..1167fe9cf6c4 100644
--- a/arch/avr32/kernel/module.c
+++ b/arch/avr32/kernel/module.c
@@ -12,10 +12,11 @@
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14 14
15#include <linux/moduleloader.h> 15#include <linux/bug.h>
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/elf.h> 16#include <linux/elf.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/moduleloader.h>
19#include <linux/vmalloc.h> 20#include <linux/vmalloc.h>
20 21
21void *module_alloc(unsigned long size) 22void *module_alloc(unsigned long size)
@@ -315,10 +316,10 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
315 vfree(module->arch.syminfo); 316 vfree(module->arch.syminfo);
316 module->arch.syminfo = NULL; 317 module->arch.syminfo = NULL;
317 318
318 return 0; 319 return module_bug_finalize(hdr, sechdrs, module);
319} 320}
320 321
321void module_arch_cleanup(struct module *module) 322void module_arch_cleanup(struct module *module)
322{ 323{
323 324 module_bug_cleanup(module);
324} 325}
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index 0b4325946a41..4e4181ed1c6d 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -11,6 +11,7 @@
11#include <linux/fs.h> 11#include <linux/fs.h>
12#include <linux/ptrace.h> 12#include <linux/ptrace.h>
13#include <linux/reboot.h> 13#include <linux/reboot.h>
14#include <linux/uaccess.h>
14#include <linux/unistd.h> 15#include <linux/unistd.h>
15 16
16#include <asm/sysreg.h> 17#include <asm/sysreg.h>
@@ -19,6 +20,8 @@
19void (*pm_power_off)(void) = NULL; 20void (*pm_power_off)(void) = NULL;
20EXPORT_SYMBOL(pm_power_off); 21EXPORT_SYMBOL(pm_power_off);
21 22
23extern void cpu_idle_sleep(void);
24
22/* 25/*
23 * This file handles the architecture-dependent parts of process handling.. 26 * This file handles the architecture-dependent parts of process handling..
24 */ 27 */
@@ -27,9 +30,8 @@ void cpu_idle(void)
27{ 30{
28 /* endless idle loop with no priority at all */ 31 /* endless idle loop with no priority at all */
29 while (1) { 32 while (1) {
30 /* TODO: Enter sleep mode */
31 while (!need_resched()) 33 while (!need_resched())
32 cpu_relax(); 34 cpu_idle_sleep();
33 preempt_enable_no_resched(); 35 preempt_enable_no_resched();
34 schedule(); 36 schedule();
35 preempt_disable(); 37 preempt_disable();
@@ -114,39 +116,178 @@ void release_thread(struct task_struct *dead_task)
114 /* do nothing */ 116 /* do nothing */
115} 117}
116 118
119static void dump_mem(const char *str, const char *log_lvl,
120 unsigned long bottom, unsigned long top)
121{
122 unsigned long p;
123 int i;
124
125 printk("%s%s(0x%08lx to 0x%08lx)\n", log_lvl, str, bottom, top);
126
127 for (p = bottom & ~31; p < top; ) {
128 printk("%s%04lx: ", log_lvl, p & 0xffff);
129
130 for (i = 0; i < 8; i++, p += 4) {
131 unsigned int val;
132
133 if (p < bottom || p >= top)
134 printk(" ");
135 else {
136 if (__get_user(val, (unsigned int __user *)p)) {
137 printk("\n");
138 goto out;
139 }
140 printk("%08x ", val);
141 }
142 }
143 printk("\n");
144 }
145
146out:
147 return;
148}
149
150static inline int valid_stack_ptr(struct thread_info *tinfo, unsigned long p)
151{
152 return (p > (unsigned long)tinfo)
153 && (p < (unsigned long)tinfo + THREAD_SIZE - 3);
154}
155
156#ifdef CONFIG_FRAME_POINTER
157static void show_trace_log_lvl(struct task_struct *tsk, unsigned long *sp,
158 struct pt_regs *regs, const char *log_lvl)
159{
160 unsigned long lr, fp;
161 struct thread_info *tinfo;
162
163 if (regs)
164 fp = regs->r7;
165 else if (tsk == current)
166 asm("mov %0, r7" : "=r"(fp));
167 else
168 fp = tsk->thread.cpu_context.r7;
169
170 /*
171 * Walk the stack as long as the frame pointer (a) is within
172 * the kernel stack of the task, and (b) it doesn't move
173 * downwards.
174 */
175 tinfo = task_thread_info(tsk);
176 printk("%sCall trace:\n", log_lvl);
177 while (valid_stack_ptr(tinfo, fp)) {
178 unsigned long new_fp;
179
180 lr = *(unsigned long *)fp;
181#ifdef CONFIG_KALLSYMS
182 printk("%s [<%08lx>] ", log_lvl, lr);
183#else
184 printk(" [<%08lx>] ", lr);
185#endif
186 print_symbol("%s\n", lr);
187
188 new_fp = *(unsigned long *)(fp + 4);
189 if (new_fp <= fp)
190 break;
191 fp = new_fp;
192 }
193 printk("\n");
194}
195#else
196static void show_trace_log_lvl(struct task_struct *tsk, unsigned long *sp,
197 struct pt_regs *regs, const char *log_lvl)
198{
199 unsigned long addr;
200
201 printk("%sCall trace:\n", log_lvl);
202
203 while (!kstack_end(sp)) {
204 addr = *sp++;
205 if (kernel_text_address(addr)) {
206#ifdef CONFIG_KALLSYMS
207 printk("%s [<%08lx>] ", log_lvl, addr);
208#else
209 printk(" [<%08lx>] ", addr);
210#endif
211 print_symbol("%s\n", addr);
212 }
213 }
214 printk("\n");
215}
216#endif
217
218void show_stack_log_lvl(struct task_struct *tsk, unsigned long sp,
219 struct pt_regs *regs, const char *log_lvl)
220{
221 struct thread_info *tinfo;
222
223 if (sp == 0) {
224 if (tsk)
225 sp = tsk->thread.cpu_context.ksp;
226 else
227 sp = (unsigned long)&tinfo;
228 }
229 if (!tsk)
230 tsk = current;
231
232 tinfo = task_thread_info(tsk);
233
234 if (valid_stack_ptr(tinfo, sp)) {
235 dump_mem("Stack: ", log_lvl, sp,
236 THREAD_SIZE + (unsigned long)tinfo);
237 show_trace_log_lvl(tsk, (unsigned long *)sp, regs, log_lvl);
238 }
239}
240
241void show_stack(struct task_struct *tsk, unsigned long *stack)
242{
243 show_stack_log_lvl(tsk, (unsigned long)stack, NULL, "");
244}
245
246void dump_stack(void)
247{
248 unsigned long stack;
249
250 show_trace_log_lvl(current, &stack, NULL, "");
251}
252EXPORT_SYMBOL(dump_stack);
253
117static const char *cpu_modes[] = { 254static const char *cpu_modes[] = {
118 "Application", "Supervisor", "Interrupt level 0", "Interrupt level 1", 255 "Application", "Supervisor", "Interrupt level 0", "Interrupt level 1",
119 "Interrupt level 2", "Interrupt level 3", "Exception", "NMI" 256 "Interrupt level 2", "Interrupt level 3", "Exception", "NMI"
120}; 257};
121 258
122void show_regs(struct pt_regs *regs) 259void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl)
123{ 260{
124 unsigned long sp = regs->sp; 261 unsigned long sp = regs->sp;
125 unsigned long lr = regs->lr; 262 unsigned long lr = regs->lr;
126 unsigned long mode = (regs->sr & MODE_MASK) >> MODE_SHIFT; 263 unsigned long mode = (regs->sr & MODE_MASK) >> MODE_SHIFT;
127 264
128 if (!user_mode(regs)) 265 if (!user_mode(regs)) {
129 sp = (unsigned long)regs + FRAME_SIZE_FULL; 266 sp = (unsigned long)regs + FRAME_SIZE_FULL;
130 267
131 print_symbol("PC is at %s\n", instruction_pointer(regs)); 268 printk("%s", log_lvl);
132 print_symbol("LR is at %s\n", lr); 269 print_symbol("PC is at %s\n", instruction_pointer(regs));
133 printk("pc : [<%08lx>] lr : [<%08lx>] %s\n" 270 printk("%s", log_lvl);
134 "sp : %08lx r12: %08lx r11: %08lx\n", 271 print_symbol("LR is at %s\n", lr);
135 instruction_pointer(regs), 272 }
136 lr, print_tainted(), sp, regs->r12, regs->r11); 273
137 printk("r10: %08lx r9 : %08lx r8 : %08lx\n", 274 printk("%spc : [<%08lx>] lr : [<%08lx>] %s\n"
138 regs->r10, regs->r9, regs->r8); 275 "%ssp : %08lx r12: %08lx r11: %08lx\n",
139 printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", 276 log_lvl, instruction_pointer(regs), lr, print_tainted(),
140 regs->r7, regs->r6, regs->r5, regs->r4); 277 log_lvl, sp, regs->r12, regs->r11);
141 printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", 278 printk("%sr10: %08lx r9 : %08lx r8 : %08lx\n",
142 regs->r3, regs->r2, regs->r1, regs->r0); 279 log_lvl, regs->r10, regs->r9, regs->r8);
143 printk("Flags: %c%c%c%c%c\n", 280 printk("%sr7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
281 log_lvl, regs->r7, regs->r6, regs->r5, regs->r4);
282 printk("%sr3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
283 log_lvl, regs->r3, regs->r2, regs->r1, regs->r0);
284 printk("%sFlags: %c%c%c%c%c\n", log_lvl,
144 regs->sr & SR_Q ? 'Q' : 'q', 285 regs->sr & SR_Q ? 'Q' : 'q',
145 regs->sr & SR_V ? 'V' : 'v', 286 regs->sr & SR_V ? 'V' : 'v',
146 regs->sr & SR_N ? 'N' : 'n', 287 regs->sr & SR_N ? 'N' : 'n',
147 regs->sr & SR_Z ? 'Z' : 'z', 288 regs->sr & SR_Z ? 'Z' : 'z',
148 regs->sr & SR_C ? 'C' : 'c'); 289 regs->sr & SR_C ? 'C' : 'c');
149 printk("Mode bits: %c%c%c%c%c%c%c%c%c\n", 290 printk("%sMode bits: %c%c%c%c%c%c%c%c%c\n", log_lvl,
150 regs->sr & SR_H ? 'H' : 'h', 291 regs->sr & SR_H ? 'H' : 'h',
151 regs->sr & SR_R ? 'R' : 'r', 292 regs->sr & SR_R ? 'R' : 'r',
152 regs->sr & SR_J ? 'J' : 'j', 293 regs->sr & SR_J ? 'J' : 'j',
@@ -156,9 +297,21 @@ void show_regs(struct pt_regs *regs)
156 regs->sr & SR_I1M ? '1' : '.', 297 regs->sr & SR_I1M ? '1' : '.',
157 regs->sr & SR_I0M ? '0' : '.', 298 regs->sr & SR_I0M ? '0' : '.',
158 regs->sr & SR_GM ? 'G' : 'g'); 299 regs->sr & SR_GM ? 'G' : 'g');
159 printk("CPU Mode: %s\n", cpu_modes[mode]); 300 printk("%sCPU Mode: %s\n", log_lvl, cpu_modes[mode]);
301 printk("%sProcess: %s [%d] (task: %p thread: %p)\n",
302 log_lvl, current->comm, current->pid, current,
303 task_thread_info(current));
304}
305
306void show_regs(struct pt_regs *regs)
307{
308 unsigned long sp = regs->sp;
309
310 if (!user_mode(regs))
311 sp = (unsigned long)regs + FRAME_SIZE_FULL;
160 312
161 show_trace(NULL, (unsigned long *)sp, regs); 313 show_regs_log_lvl(regs, "");
314 show_trace_log_lvl(current, (unsigned long *)sp, regs, "");
162} 315}
163EXPORT_SYMBOL(show_regs); 316EXPORT_SYMBOL(show_regs);
164 317
diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c
index a1a7c3c3f522..b279d66acf5f 100644
--- a/arch/avr32/kernel/setup.c
+++ b/arch/avr32/kernel/setup.c
@@ -8,12 +8,14 @@
8 8
9#include <linux/clk.h> 9#include <linux/clk.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/initrd.h>
11#include <linux/sched.h> 12#include <linux/sched.h>
12#include <linux/console.h> 13#include <linux/console.h>
13#include <linux/ioport.h> 14#include <linux/ioport.h>
14#include <linux/bootmem.h> 15#include <linux/bootmem.h>
15#include <linux/fs.h> 16#include <linux/fs.h>
16#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/pfn.h>
17#include <linux/root_dev.h> 19#include <linux/root_dev.h>
18#include <linux/cpu.h> 20#include <linux/cpu.h>
19#include <linux/kernel.h> 21#include <linux/kernel.h>
@@ -30,13 +32,6 @@
30extern int root_mountflags; 32extern int root_mountflags;
31 33
32/* 34/*
33 * Bootloader-provided information about physical memory
34 */
35struct tag_mem_range *mem_phys;
36struct tag_mem_range *mem_reserved;
37struct tag_mem_range *mem_ramdisk;
38
39/*
40 * Initialize loops_per_jiffy as 5000000 (500MIPS). 35 * Initialize loops_per_jiffy as 5000000 (500MIPS).
41 * Better make it too large than too small... 36 * Better make it too large than too small...
42 */ 37 */
@@ -48,48 +43,193 @@ EXPORT_SYMBOL(boot_cpu_data);
48static char __initdata command_line[COMMAND_LINE_SIZE]; 43static char __initdata command_line[COMMAND_LINE_SIZE];
49 44
50/* 45/*
51 * Should be more than enough, but if you have a _really_ complex 46 * Standard memory resources
52 * setup, you might need to increase the size of this...
53 */ 47 */
54static struct tag_mem_range __initdata mem_range_cache[32]; 48static struct resource __initdata kernel_data = {
55static unsigned mem_range_next_free; 49 .name = "Kernel data",
50 .start = 0,
51 .end = 0,
52 .flags = IORESOURCE_MEM,
53};
54static struct resource __initdata kernel_code = {
55 .name = "Kernel code",
56 .start = 0,
57 .end = 0,
58 .flags = IORESOURCE_MEM,
59 .sibling = &kernel_data,
60};
56 61
57/* 62/*
58 * Standard memory resources 63 * Available system RAM and reserved regions as singly linked
64 * lists. These lists are traversed using the sibling pointer in
65 * struct resource and are kept sorted at all times.
59 */ 66 */
60static struct resource mem_res[] = { 67static struct resource *__initdata system_ram;
61 { 68static struct resource *__initdata reserved = &kernel_code;
62 .name = "Kernel code", 69
63 .start = 0, 70/*
64 .end = 0, 71 * We need to allocate these before the bootmem allocator is up and
65 .flags = IORESOURCE_MEM 72 * running, so we need this "cache". 32 entries are probably enough
66 }, 73 * for all but the most insanely complex systems.
67 { 74 */
68 .name = "Kernel data", 75static struct resource __initdata res_cache[32];
69 .start = 0, 76static unsigned int __initdata res_cache_next_free;
70 .end = 0, 77
71 .flags = IORESOURCE_MEM, 78static void __init resource_init(void)
72 }, 79{
73}; 80 struct resource *mem, *res;
81 struct resource *new;
82
83 kernel_code.start = __pa(init_mm.start_code);
84
85 for (mem = system_ram; mem; mem = mem->sibling) {
86 new = alloc_bootmem_low(sizeof(struct resource));
87 memcpy(new, mem, sizeof(struct resource));
88
89 new->sibling = NULL;
90 if (request_resource(&iomem_resource, new))
91 printk(KERN_WARNING "Bad RAM resource %08x-%08x\n",
92 mem->start, mem->end);
93 }
94
95 for (res = reserved; res; res = res->sibling) {
96 new = alloc_bootmem_low(sizeof(struct resource));
97 memcpy(new, res, sizeof(struct resource));
98
99 new->sibling = NULL;
100 if (insert_resource(&iomem_resource, new))
101 printk(KERN_WARNING
102 "Bad reserved resource %s (%08x-%08x)\n",
103 res->name, res->start, res->end);
104 }
105}
106
107static void __init
108add_physical_memory(resource_size_t start, resource_size_t end)
109{
110 struct resource *new, *next, **pprev;
111
112 for (pprev = &system_ram, next = system_ram; next;
113 pprev = &next->sibling, next = next->sibling) {
114 if (end < next->start)
115 break;
116 if (start <= next->end) {
117 printk(KERN_WARNING
118 "Warning: Physical memory map is broken\n");
119 printk(KERN_WARNING
120 "Warning: %08x-%08x overlaps %08x-%08x\n",
121 start, end, next->start, next->end);
122 return;
123 }
124 }
125
126 if (res_cache_next_free >= ARRAY_SIZE(res_cache)) {
127 printk(KERN_WARNING
128 "Warning: Failed to add physical memory %08x-%08x\n",
129 start, end);
130 return;
131 }
132
133 new = &res_cache[res_cache_next_free++];
134 new->start = start;
135 new->end = end;
136 new->name = "System RAM";
137 new->flags = IORESOURCE_MEM;
138
139 *pprev = new;
140}
141
142static int __init
143add_reserved_region(resource_size_t start, resource_size_t end,
144 const char *name)
145{
146 struct resource *new, *next, **pprev;
147
148 if (end < start)
149 return -EINVAL;
150
151 if (res_cache_next_free >= ARRAY_SIZE(res_cache))
152 return -ENOMEM;
153
154 for (pprev = &reserved, next = reserved; next;
155 pprev = &next->sibling, next = next->sibling) {
156 if (end < next->start)
157 break;
158 if (start <= next->end)
159 return -EBUSY;
160 }
161
162 new = &res_cache[res_cache_next_free++];
163 new->start = start;
164 new->end = end;
165 new->name = name;
166 new->flags = IORESOURCE_MEM;
167
168 *pprev = new;
169
170 return 0;
171}
172
173static unsigned long __init
174find_free_region(const struct resource *mem, resource_size_t size,
175 resource_size_t align)
176{
177 struct resource *res;
178 unsigned long target;
179
180 target = ALIGN(mem->start, align);
181 for (res = reserved; res; res = res->sibling) {
182 if ((target + size) <= res->start)
183 break;
184 if (target <= res->end)
185 target = ALIGN(res->end + 1, align);
186 }
187
188 if ((target + size) > (mem->end + 1))
189 return mem->end + 1;
190
191 return target;
192}
193
194static int __init
195alloc_reserved_region(resource_size_t *start, resource_size_t size,
196 resource_size_t align, const char *name)
197{
198 struct resource *mem;
199 resource_size_t target;
200 int ret;
201
202 for (mem = system_ram; mem; mem = mem->sibling) {
203 target = find_free_region(mem, size, align);
204 if (target <= mem->end) {
205 ret = add_reserved_region(target, target + size - 1,
206 name);
207 if (!ret)
208 *start = target;
209 return ret;
210 }
211 }
74 212
75#define kernel_code mem_res[0] 213 return -ENOMEM;
76#define kernel_data mem_res[1] 214}
77 215
78/* 216/*
79 * Early framebuffer allocation. Works as follows: 217 * Early framebuffer allocation. Works as follows:
80 * - If fbmem_size is zero, nothing will be allocated or reserved. 218 * - If fbmem_size is zero, nothing will be allocated or reserved.
81 * - If fbmem_start is zero when setup_bootmem() is called, 219 * - If fbmem_start is zero when setup_bootmem() is called,
82 * fbmem_size bytes will be allocated from the bootmem allocator. 220 * a block of fbmem_size bytes will be reserved before bootmem
221 * initialization. It will be aligned to the largest page size
222 * that fbmem_size is a multiple of.
83 * - If fbmem_start is nonzero, an area of size fbmem_size will be 223 * - If fbmem_start is nonzero, an area of size fbmem_size will be
84 * reserved at the physical address fbmem_start if necessary. If 224 * reserved at the physical address fbmem_start if possible. If
85 * the area isn't in a memory region known to the kernel, it will 225 * it collides with other reserved memory, a different block of
86 * be left alone. 226 * same size will be allocated, just as if fbmem_start was zero.
87 * 227 *
88 * Board-specific code may use these variables to set up platform data 228 * Board-specific code may use these variables to set up platform data
89 * for the framebuffer driver if fbmem_size is nonzero. 229 * for the framebuffer driver if fbmem_size is nonzero.
90 */ 230 */
91static unsigned long __initdata fbmem_start; 231resource_size_t __initdata fbmem_start;
92static unsigned long __initdata fbmem_size; 232resource_size_t __initdata fbmem_size;
93 233
94/* 234/*
95 * "fbmem=xxx[kKmM]" allocates the specified amount of boot memory for 235 * "fbmem=xxx[kKmM]" allocates the specified amount of boot memory for
@@ -103,48 +243,42 @@ static unsigned long __initdata fbmem_size;
103 */ 243 */
104static int __init early_parse_fbmem(char *p) 244static int __init early_parse_fbmem(char *p)
105{ 245{
246 int ret;
247 unsigned long align;
248
106 fbmem_size = memparse(p, &p); 249 fbmem_size = memparse(p, &p);
107 if (*p == '@') 250 if (*p == '@') {
108 fbmem_start = memparse(p, &p); 251 fbmem_start = memparse(p, &p);
109 return 0; 252 ret = add_reserved_region(fbmem_start,
110} 253 fbmem_start + fbmem_size - 1,
111early_param("fbmem", early_parse_fbmem); 254 "Framebuffer");
112 255 if (ret) {
113static inline void __init resource_init(void) 256 printk(KERN_WARNING
114{ 257 "Failed to reserve framebuffer memory\n");
115 struct tag_mem_range *region; 258 fbmem_start = 0;
116 259 }
117 kernel_code.start = __pa(init_mm.start_code); 260 }
118 kernel_code.end = __pa(init_mm.end_code - 1);
119 kernel_data.start = __pa(init_mm.end_code);
120 kernel_data.end = __pa(init_mm.brk - 1);
121
122 for (region = mem_phys; region; region = region->next) {
123 struct resource *res;
124 unsigned long phys_start, phys_end;
125
126 if (region->size == 0)
127 continue;
128
129 phys_start = region->addr;
130 phys_end = phys_start + region->size - 1;
131
132 res = alloc_bootmem_low(sizeof(*res));
133 res->name = "System RAM";
134 res->start = phys_start;
135 res->end = phys_end;
136 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
137
138 request_resource (&iomem_resource, res);
139 261
140 if (kernel_code.start >= res->start && 262 if (!fbmem_start) {
141 kernel_code.end <= res->end) 263 if ((fbmem_size & 0x000fffffUL) == 0)
142 request_resource (res, &kernel_code); 264 align = 0x100000; /* 1 MiB */
143 if (kernel_data.start >= res->start && 265 else if ((fbmem_size & 0x0000ffffUL) == 0)
144 kernel_data.end <= res->end) 266 align = 0x10000; /* 64 KiB */
145 request_resource (res, &kernel_data); 267 else
268 align = 0x1000; /* 4 KiB */
269
270 ret = alloc_reserved_region(&fbmem_start, fbmem_size,
271 align, "Framebuffer");
272 if (ret) {
273 printk(KERN_WARNING
274 "Failed to allocate framebuffer memory\n");
275 fbmem_size = 0;
276 }
146 } 277 }
278
279 return 0;
147} 280}
281early_param("fbmem", early_parse_fbmem);
148 282
149static int __init parse_tag_core(struct tag *tag) 283static int __init parse_tag_core(struct tag *tag)
150{ 284{
@@ -157,11 +291,9 @@ static int __init parse_tag_core(struct tag *tag)
157} 291}
158__tagtable(ATAG_CORE, parse_tag_core); 292__tagtable(ATAG_CORE, parse_tag_core);
159 293
160static int __init parse_tag_mem_range(struct tag *tag, 294static int __init parse_tag_mem(struct tag *tag)
161 struct tag_mem_range **root)
162{ 295{
163 struct tag_mem_range *cur, **pprev; 296 unsigned long start, end;
164 struct tag_mem_range *new;
165 297
166 /* 298 /*
167 * Ignore zero-sized entries. If we're running standalone, the 299 * Ignore zero-sized entries. If we're running standalone, the
@@ -171,34 +303,53 @@ static int __init parse_tag_mem_range(struct tag *tag,
171 if (tag->u.mem_range.size == 0) 303 if (tag->u.mem_range.size == 0)
172 return 0; 304 return 0;
173 305
174 /* 306 start = tag->u.mem_range.addr;
175 * Copy the data so the bootmem init code doesn't need to care 307 end = tag->u.mem_range.addr + tag->u.mem_range.size - 1;
176 * about it. 308
177 */ 309 add_physical_memory(start, end);
178 if (mem_range_next_free >= ARRAY_SIZE(mem_range_cache)) 310 return 0;
179 panic("Physical memory map too complex!\n"); 311}
312__tagtable(ATAG_MEM, parse_tag_mem);
313
314static int __init parse_tag_rdimg(struct tag *tag)
315{
316#ifdef CONFIG_INITRD
317 struct tag_mem_range *mem = &tag->u.mem_range;
318 int ret;
180 319
181 new = &mem_range_cache[mem_range_next_free++]; 320 if (initrd_start) {
182 *new = tag->u.mem_range; 321 printk(KERN_WARNING
322 "Warning: Only the first initrd image will be used\n");
323 return 0;
324 }
183 325
184 pprev = root; 326 ret = add_reserved_region(mem->start, mem->start + mem->size - 1,
185 cur = *root; 327 "initrd");
186 while (cur) { 328 if (ret) {
187 pprev = &cur->next; 329 printk(KERN_WARNING
188 cur = cur->next; 330 "Warning: Failed to reserve initrd memory\n");
331 return ret;
189 } 332 }
190 333
191 *pprev = new; 334 initrd_start = (unsigned long)__va(mem->addr);
192 new->next = NULL; 335 initrd_end = initrd_start + mem->size;
336#else
337 printk(KERN_WARNING "RAM disk image present, but "
338 "no initrd support in kernel, ignoring\n");
339#endif
193 340
194 return 0; 341 return 0;
195} 342}
343__tagtable(ATAG_RDIMG, parse_tag_rdimg);
196 344
197static int __init parse_tag_mem(struct tag *tag) 345static int __init parse_tag_rsvd_mem(struct tag *tag)
198{ 346{
199 return parse_tag_mem_range(tag, &mem_phys); 347 struct tag_mem_range *mem = &tag->u.mem_range;
348
349 return add_reserved_region(mem->addr, mem->addr + mem->size - 1,
350 "Reserved");
200} 351}
201__tagtable(ATAG_MEM, parse_tag_mem); 352__tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem);
202 353
203static int __init parse_tag_cmdline(struct tag *tag) 354static int __init parse_tag_cmdline(struct tag *tag)
204{ 355{
@@ -207,12 +358,6 @@ static int __init parse_tag_cmdline(struct tag *tag)
207} 358}
208__tagtable(ATAG_CMDLINE, parse_tag_cmdline); 359__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
209 360
210static int __init parse_tag_rdimg(struct tag *tag)
211{
212 return parse_tag_mem_range(tag, &mem_ramdisk);
213}
214__tagtable(ATAG_RDIMG, parse_tag_rdimg);
215
216static int __init parse_tag_clock(struct tag *tag) 361static int __init parse_tag_clock(struct tag *tag)
217{ 362{
218 /* 363 /*
@@ -223,12 +368,6 @@ static int __init parse_tag_clock(struct tag *tag)
223} 368}
224__tagtable(ATAG_CLOCK, parse_tag_clock); 369__tagtable(ATAG_CLOCK, parse_tag_clock);
225 370
226static int __init parse_tag_rsvd_mem(struct tag *tag)
227{
228 return parse_tag_mem_range(tag, &mem_reserved);
229}
230__tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem);
231
232/* 371/*
233 * Scan the tag table for this tag, and call its parse function. The 372 * Scan the tag table for this tag, and call its parse function. The
234 * tag table is built by the linker from all the __tagtable 373 * tag table is built by the linker from all the __tagtable
@@ -260,10 +399,137 @@ static void __init parse_tags(struct tag *t)
260 t->hdr.tag); 399 t->hdr.tag);
261} 400}
262 401
402/*
403 * Find a free memory region large enough for storing the
404 * bootmem bitmap.
405 */
406static unsigned long __init
407find_bootmap_pfn(const struct resource *mem)
408{
409 unsigned long bootmap_pages, bootmap_len;
410 unsigned long node_pages = PFN_UP(mem->end - mem->start + 1);
411 unsigned long bootmap_start;
412
413 bootmap_pages = bootmem_bootmap_pages(node_pages);
414 bootmap_len = bootmap_pages << PAGE_SHIFT;
415
416 /*
417 * Find a large enough region without reserved pages for
418 * storing the bootmem bitmap. We can take advantage of the
419 * fact that all lists have been sorted.
420 *
421 * We have to check that we don't collide with any reserved
422 * regions, which includes the kernel image and any RAMDISK
423 * images.
424 */
425 bootmap_start = find_free_region(mem, bootmap_len, PAGE_SIZE);
426
427 return bootmap_start >> PAGE_SHIFT;
428}
429
430#define MAX_LOWMEM HIGHMEM_START
431#define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM)
432
433static void __init setup_bootmem(void)
434{
435 unsigned bootmap_size;
436 unsigned long first_pfn, bootmap_pfn, pages;
437 unsigned long max_pfn, max_low_pfn;
438 unsigned node = 0;
439 struct resource *res;
440
441 printk(KERN_INFO "Physical memory:\n");
442 for (res = system_ram; res; res = res->sibling)
443 printk(" %08x-%08x\n", res->start, res->end);
444 printk(KERN_INFO "Reserved memory:\n");
445 for (res = reserved; res; res = res->sibling)
446 printk(" %08x-%08x: %s\n",
447 res->start, res->end, res->name);
448
449 nodes_clear(node_online_map);
450
451 if (system_ram->sibling)
452 printk(KERN_WARNING "Only using first memory bank\n");
453
454 for (res = system_ram; res; res = NULL) {
455 first_pfn = PFN_UP(res->start);
456 max_low_pfn = max_pfn = PFN_DOWN(res->end + 1);
457 bootmap_pfn = find_bootmap_pfn(res);
458 if (bootmap_pfn > max_pfn)
459 panic("No space for bootmem bitmap!\n");
460
461 if (max_low_pfn > MAX_LOWMEM_PFN) {
462 max_low_pfn = MAX_LOWMEM_PFN;
463#ifndef CONFIG_HIGHMEM
464 /*
465 * Lowmem is memory that can be addressed
466 * directly through P1/P2
467 */
468 printk(KERN_WARNING
469 "Node %u: Only %ld MiB of memory will be used.\n",
470 node, MAX_LOWMEM >> 20);
471 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
472#else
473#error HIGHMEM is not supported by AVR32 yet
474#endif
475 }
476
477 /* Initialize the boot-time allocator with low memory only. */
478 bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn,
479 first_pfn, max_low_pfn);
480
481 /*
482 * Register fully available RAM pages with the bootmem
483 * allocator.
484 */
485 pages = max_low_pfn - first_pfn;
486 free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn),
487 PFN_PHYS(pages));
488
489 /* Reserve space for the bootmem bitmap... */
490 reserve_bootmem_node(NODE_DATA(node),
491 PFN_PHYS(bootmap_pfn),
492 bootmap_size);
493
494 /* ...and any other reserved regions. */
495 for (res = reserved; res; res = res->sibling) {
496 if (res->start > PFN_PHYS(max_pfn))
497 break;
498
499 /*
500 * resource_init will complain about partial
501 * overlaps, so we'll just ignore such
502 * resources for now.
503 */
504 if (res->start >= PFN_PHYS(first_pfn)
505 && res->end < PFN_PHYS(max_pfn))
506 reserve_bootmem_node(
507 NODE_DATA(node), res->start,
508 res->end - res->start + 1);
509 }
510
511 node_set_online(node);
512 }
513}
514
263void __init setup_arch (char **cmdline_p) 515void __init setup_arch (char **cmdline_p)
264{ 516{
265 struct clk *cpu_clk; 517 struct clk *cpu_clk;
266 518
519 init_mm.start_code = (unsigned long)_text;
520 init_mm.end_code = (unsigned long)_etext;
521 init_mm.end_data = (unsigned long)_edata;
522 init_mm.brk = (unsigned long)_end;
523
524 /*
525 * Include .init section to make allocations easier. It will
526 * be removed before the resource is actually requested.
527 */
528 kernel_code.start = __pa(__init_begin);
529 kernel_code.end = __pa(init_mm.end_code - 1);
530 kernel_data.start = __pa(init_mm.end_code);
531 kernel_data.end = __pa(init_mm.brk - 1);
532
267 parse_tags(bootloader_tags); 533 parse_tags(bootloader_tags);
268 534
269 setup_processor(); 535 setup_processor();
@@ -289,24 +555,16 @@ void __init setup_arch (char **cmdline_p)
289 ((cpu_hz + 500) / 1000) % 1000); 555 ((cpu_hz + 500) / 1000) % 1000);
290 } 556 }
291 557
292 init_mm.start_code = (unsigned long) &_text;
293 init_mm.end_code = (unsigned long) &_etext;
294 init_mm.end_data = (unsigned long) &_edata;
295 init_mm.brk = (unsigned long) &_end;
296
297 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); 558 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
298 *cmdline_p = command_line; 559 *cmdline_p = command_line;
299 parse_early_param(); 560 parse_early_param();
300 561
301 setup_bootmem(); 562 setup_bootmem();
302 563
303 board_setup_fbmem(fbmem_start, fbmem_size);
304
305#ifdef CONFIG_VT 564#ifdef CONFIG_VT
306 conswitchp = &dummy_con; 565 conswitchp = &dummy_con;
307#endif 566#endif
308 567
309 paging_init(); 568 paging_init();
310
311 resource_init(); 569 resource_init();
312} 570}
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index c10833f2ee0c..7014a3571ec0 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2004-2006 Atmel Corporation 2 * Copyright (C) 2004-2007 Atmel Corporation
3 * 3 *
4 * Based on MIPS implementation arch/mips/kernel/time.c 4 * Based on MIPS implementation arch/mips/kernel/time.c
5 * Copyright 2001 MontaVista Software Inc. 5 * Copyright 2001 MontaVista Software Inc.
@@ -20,18 +20,25 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/profile.h> 21#include <linux/profile.h>
22#include <linux/sysdev.h> 22#include <linux/sysdev.h>
23#include <linux/err.h>
23 24
24#include <asm/div64.h> 25#include <asm/div64.h>
25#include <asm/sysreg.h> 26#include <asm/sysreg.h>
26#include <asm/io.h> 27#include <asm/io.h>
27#include <asm/sections.h> 28#include <asm/sections.h>
28 29
29static cycle_t read_cycle_count(void) 30/* how many counter cycles in a jiffy? */
31static u32 cycles_per_jiffy;
32
33/* the count value for the next timer interrupt */
34static u32 expirelo;
35
36cycle_t __weak read_cycle_count(void)
30{ 37{
31 return (cycle_t)sysreg_read(COUNT); 38 return (cycle_t)sysreg_read(COUNT);
32} 39}
33 40
34static struct clocksource clocksource_avr32 = { 41struct clocksource __weak clocksource_avr32 = {
35 .name = "avr32", 42 .name = "avr32",
36 .rating = 350, 43 .rating = 350,
37 .read = read_cycle_count, 44 .read = read_cycle_count,
@@ -40,12 +47,20 @@ static struct clocksource clocksource_avr32 = {
40 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 47 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
41}; 48};
42 49
50irqreturn_t __weak timer_interrupt(int irq, void *dev_id);
51
52struct irqaction timer_irqaction = {
53 .handler = timer_interrupt,
54 .flags = IRQF_DISABLED,
55 .name = "timer",
56};
57
43/* 58/*
44 * By default we provide the null RTC ops 59 * By default we provide the null RTC ops
45 */ 60 */
46static unsigned long null_rtc_get_time(void) 61static unsigned long null_rtc_get_time(void)
47{ 62{
48 return mktime(2004, 1, 1, 0, 0, 0); 63 return mktime(2007, 1, 1, 0, 0, 0);
49} 64}
50 65
51static int null_rtc_set_time(unsigned long sec) 66static int null_rtc_set_time(unsigned long sec)
@@ -56,23 +71,14 @@ static int null_rtc_set_time(unsigned long sec)
56static unsigned long (*rtc_get_time)(void) = null_rtc_get_time; 71static unsigned long (*rtc_get_time)(void) = null_rtc_get_time;
57static int (*rtc_set_time)(unsigned long) = null_rtc_set_time; 72static int (*rtc_set_time)(unsigned long) = null_rtc_set_time;
58 73
59/* how many counter cycles in a jiffy? */
60static unsigned long cycles_per_jiffy;
61
62/* cycle counter value at the previous timer interrupt */
63static unsigned int timerhi, timerlo;
64
65/* the count value for the next timer interrupt */
66static unsigned int expirelo;
67
68static void avr32_timer_ack(void) 74static void avr32_timer_ack(void)
69{ 75{
70 unsigned int count; 76 u32 count;
71 77
72 /* Ack this timer interrupt and set the next one */ 78 /* Ack this timer interrupt and set the next one */
73 expirelo += cycles_per_jiffy; 79 expirelo += cycles_per_jiffy;
80 /* setting COMPARE to 0 stops the COUNT-COMPARE */
74 if (expirelo == 0) { 81 if (expirelo == 0) {
75 printk(KERN_DEBUG "expirelo == 0\n");
76 sysreg_write(COMPARE, expirelo + 1); 82 sysreg_write(COMPARE, expirelo + 1);
77 } else { 83 } else {
78 sysreg_write(COMPARE, expirelo); 84 sysreg_write(COMPARE, expirelo);
@@ -86,27 +92,56 @@ static void avr32_timer_ack(void)
86 } 92 }
87} 93}
88 94
89static unsigned int avr32_hpt_read(void) 95int __weak avr32_hpt_init(void)
90{ 96{
91 return sysreg_read(COUNT); 97 int ret;
98 unsigned long mult, shift, count_hz;
99
100 count_hz = clk_get_rate(boot_cpu_data.clk);
101 shift = clocksource_avr32.shift;
102 mult = clocksource_hz2mult(count_hz, shift);
103 clocksource_avr32.mult = mult;
104
105 {
106 u64 tmp;
107
108 tmp = TICK_NSEC;
109 tmp <<= shift;
110 tmp += mult / 2;
111 do_div(tmp, mult);
112
113 cycles_per_jiffy = tmp;
114 }
115
116 ret = setup_irq(0, &timer_irqaction);
117 if (ret) {
118 pr_debug("timer: could not request IRQ 0: %d\n", ret);
119 return -ENODEV;
120 }
121
122 printk(KERN_INFO "timer: AT32AP COUNT-COMPARE at irq 0, "
123 "%lu.%03lu MHz\n",
124 ((count_hz + 500) / 1000) / 1000,
125 ((count_hz + 500) / 1000) % 1000);
126
127 return 0;
92} 128}
93 129
94/* 130/*
95 * Taken from MIPS c0_hpt_timer_init(). 131 * Taken from MIPS c0_hpt_timer_init().
96 * 132 *
97 * Why is it so complicated, and what is "count"? My assumption is 133 * The reason COUNT is written twice is probably to make sure we don't get any
98 * that `count' specifies the "reference cycle", i.e. the cycle since 134 * timer interrupts while we are messing with the counter.
99 * reset that should mean "zero". The reason COUNT is written twice is
100 * probably to make sure we don't get any timer interrupts while we
101 * are messing with the counter.
102 */ 135 */
103static void avr32_hpt_init(unsigned int count) 136int __weak avr32_hpt_start(void)
104{ 137{
105 count = sysreg_read(COUNT) - count; 138 u32 count = sysreg_read(COUNT);
106 expirelo = (count / cycles_per_jiffy + 1) * cycles_per_jiffy; 139 expirelo = (count / cycles_per_jiffy + 1) * cycles_per_jiffy;
107 sysreg_write(COUNT, expirelo - cycles_per_jiffy); 140 sysreg_write(COUNT, expirelo - cycles_per_jiffy);
108 sysreg_write(COMPARE, expirelo); 141 sysreg_write(COMPARE, expirelo);
109 sysreg_write(COUNT, count); 142 sysreg_write(COUNT, count);
143
144 return 0;
110} 145}
111 146
112/* 147/*
@@ -115,26 +150,18 @@ static void avr32_hpt_init(unsigned int count)
115 * 150 *
116 * In UP mode, it is invoked from the (global) timer_interrupt. 151 * In UP mode, it is invoked from the (global) timer_interrupt.
117 */ 152 */
118static void local_timer_interrupt(int irq, void *dev_id) 153void local_timer_interrupt(int irq, void *dev_id)
119{ 154{
120 if (current->pid) 155 if (current->pid)
121 profile_tick(CPU_PROFILING); 156 profile_tick(CPU_PROFILING);
122 update_process_times(user_mode(get_irq_regs())); 157 update_process_times(user_mode(get_irq_regs()));
123} 158}
124 159
125static irqreturn_t 160irqreturn_t __weak timer_interrupt(int irq, void *dev_id)
126timer_interrupt(int irq, void *dev_id)
127{ 161{
128 unsigned int count;
129
130 /* ack timer interrupt and try to set next interrupt */ 162 /* ack timer interrupt and try to set next interrupt */
131 count = avr32_hpt_read();
132 avr32_timer_ack(); 163 avr32_timer_ack();
133 164
134 /* Update timerhi/timerlo for intra-jiffy calibration */
135 timerhi += count < timerlo; /* Wrap around */
136 timerlo = count;
137
138 /* 165 /*
139 * Call the generic timer interrupt handler 166 * Call the generic timer interrupt handler
140 */ 167 */
@@ -153,60 +180,37 @@ timer_interrupt(int irq, void *dev_id)
153 return IRQ_HANDLED; 180 return IRQ_HANDLED;
154} 181}
155 182
156static struct irqaction timer_irqaction = {
157 .handler = timer_interrupt,
158 .flags = IRQF_DISABLED,
159 .name = "timer",
160};
161
162void __init time_init(void) 183void __init time_init(void)
163{ 184{
164 unsigned long mult, shift, count_hz;
165 int ret; 185 int ret;
166 186
187 /*
188 * Make sure we don't get any COMPARE interrupts before we can
189 * handle them.
190 */
191 sysreg_write(COMPARE, 0);
192
167 xtime.tv_sec = rtc_get_time(); 193 xtime.tv_sec = rtc_get_time();
168 xtime.tv_nsec = 0; 194 xtime.tv_nsec = 0;
169 195
170 set_normalized_timespec(&wall_to_monotonic, 196 set_normalized_timespec(&wall_to_monotonic,
171 -xtime.tv_sec, -xtime.tv_nsec); 197 -xtime.tv_sec, -xtime.tv_nsec);
172 198
173 printk("Before time_init: count=%08lx, compare=%08lx\n", 199 ret = avr32_hpt_init();
174 (unsigned long)sysreg_read(COUNT), 200 if (ret) {
175 (unsigned long)sysreg_read(COMPARE)); 201 pr_debug("timer: failed setup: %d\n", ret);
176 202 return;
177 count_hz = clk_get_rate(boot_cpu_data.clk);
178 shift = clocksource_avr32.shift;
179 mult = clocksource_hz2mult(count_hz, shift);
180 clocksource_avr32.mult = mult;
181
182 printk("Cycle counter: mult=%lu, shift=%lu\n", mult, shift);
183
184 {
185 u64 tmp;
186
187 tmp = TICK_NSEC;
188 tmp <<= shift;
189 tmp += mult / 2;
190 do_div(tmp, mult);
191
192 cycles_per_jiffy = tmp;
193 } 203 }
194 204
195 /* This sets up the high precision timer for the first interrupt. */
196 avr32_hpt_init(avr32_hpt_read());
197
198 printk("After time_init: count=%08lx, compare=%08lx\n",
199 (unsigned long)sysreg_read(COUNT),
200 (unsigned long)sysreg_read(COMPARE));
201
202 ret = clocksource_register(&clocksource_avr32); 205 ret = clocksource_register(&clocksource_avr32);
203 if (ret) 206 if (ret)
204 printk(KERN_ERR 207 pr_debug("timer: could not register clocksource: %d\n", ret);
205 "timer: could not register clocksource: %d\n", ret);
206 208
207 ret = setup_irq(0, &timer_irqaction); 209 ret = avr32_hpt_start();
208 if (ret) 210 if (ret) {
209 printk("timer: could not request IRQ 0: %d\n", ret); 211 pr_debug("timer: failed starting: %d\n", ret);
212 return;
213 }
210} 214}
211 215
212static struct sysdev_class timer_class = { 216static struct sysdev_class timer_class = {
diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c
index adc01a12d154..4f0382d8483f 100644
--- a/arch/avr32/kernel/traps.c
+++ b/arch/avr32/kernel/traps.c
@@ -5,158 +5,25 @@
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8#undef DEBUG 8
9#include <linux/sched.h> 9#include <linux/bug.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/kallsyms.h> 11#include <linux/kallsyms.h>
12#include <linux/module.h>
13#include <linux/notifier.h> 13#include <linux/notifier.h>
14#include <linux/sched.h>
15#include <linux/uaccess.h>
14 16
15#include <asm/traps.h>
16#include <asm/sysreg.h>
17#include <asm/addrspace.h> 17#include <asm/addrspace.h>
18#include <asm/ocd.h>
19#include <asm/mmu_context.h> 18#include <asm/mmu_context.h>
20#include <asm/uaccess.h> 19#include <asm/ocd.h>
21 20#include <asm/sysreg.h>
22static void dump_mem(const char *str, unsigned long bottom, unsigned long top) 21#include <asm/traps.h>
23{
24 unsigned long p;
25 int i;
26
27 printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
28
29 for (p = bottom & ~31; p < top; ) {
30 printk("%04lx: ", p & 0xffff);
31
32 for (i = 0; i < 8; i++, p += 4) {
33 unsigned int val;
34
35 if (p < bottom || p >= top)
36 printk(" ");
37 else {
38 if (__get_user(val, (unsigned int __user *)p)) {
39 printk("\n");
40 goto out;
41 }
42 printk("%08x ", val);
43 }
44 }
45 printk("\n");
46 }
47
48out:
49 return;
50}
51
52static inline int valid_stack_ptr(struct thread_info *tinfo, unsigned long p)
53{
54 return (p > (unsigned long)tinfo)
55 && (p < (unsigned long)tinfo + THREAD_SIZE - 3);
56}
57
58#ifdef CONFIG_FRAME_POINTER
59static inline void __show_trace(struct task_struct *tsk, unsigned long *sp,
60 struct pt_regs *regs)
61{
62 unsigned long lr, fp;
63 struct thread_info *tinfo;
64
65 tinfo = (struct thread_info *)
66 ((unsigned long)sp & ~(THREAD_SIZE - 1));
67
68 if (regs)
69 fp = regs->r7;
70 else if (tsk == current)
71 asm("mov %0, r7" : "=r"(fp));
72 else
73 fp = tsk->thread.cpu_context.r7;
74
75 /*
76 * Walk the stack as long as the frame pointer (a) is within
77 * the kernel stack of the task, and (b) it doesn't move
78 * downwards.
79 */
80 while (valid_stack_ptr(tinfo, fp)) {
81 unsigned long new_fp;
82
83 lr = *(unsigned long *)fp;
84 printk(" [<%08lx>] ", lr);
85 print_symbol("%s\n", lr);
86
87 new_fp = *(unsigned long *)(fp + 4);
88 if (new_fp <= fp)
89 break;
90 fp = new_fp;
91 }
92 printk("\n");
93}
94#else
95static inline void __show_trace(struct task_struct *tsk, unsigned long *sp,
96 struct pt_regs *regs)
97{
98 unsigned long addr;
99
100 while (!kstack_end(sp)) {
101 addr = *sp++;
102 if (kernel_text_address(addr)) {
103 printk(" [<%08lx>] ", addr);
104 print_symbol("%s\n", addr);
105 }
106 }
107}
108#endif
109
110void show_trace(struct task_struct *tsk, unsigned long *sp,
111 struct pt_regs *regs)
112{
113 if (regs &&
114 (((regs->sr & MODE_MASK) == MODE_EXCEPTION) ||
115 ((regs->sr & MODE_MASK) == MODE_USER)))
116 return;
117
118 printk ("Call trace:");
119#ifdef CONFIG_KALLSYMS
120 printk("\n");
121#endif
122
123 __show_trace(tsk, sp, regs);
124 printk("\n");
125}
126
127void show_stack(struct task_struct *tsk, unsigned long *sp)
128{
129 unsigned long stack;
130
131 if (!tsk)
132 tsk = current;
133 if (sp == 0) {
134 if (tsk == current) {
135 register unsigned long *real_sp __asm__("sp");
136 sp = real_sp;
137 } else {
138 sp = (unsigned long *)tsk->thread.cpu_context.ksp;
139 }
140 }
141
142 stack = (unsigned long)sp;
143 dump_mem("Stack: ", stack,
144 THREAD_SIZE + (unsigned long)tsk->thread_info);
145 show_trace(tsk, sp, NULL);
146}
147
148void dump_stack(void)
149{
150 show_stack(NULL, NULL);
151}
152EXPORT_SYMBOL(dump_stack);
153 22
154ATOMIC_NOTIFIER_HEAD(avr32_die_chain); 23ATOMIC_NOTIFIER_HEAD(avr32_die_chain);
155 24
156int register_die_notifier(struct notifier_block *nb) 25int register_die_notifier(struct notifier_block *nb)
157{ 26{
158 pr_debug("register_die_notifier: %p\n", nb);
159
160 return atomic_notifier_chain_register(&avr32_die_chain, nb); 27 return atomic_notifier_chain_register(&avr32_die_chain, nb);
161} 28}
162EXPORT_SYMBOL(register_die_notifier); 29EXPORT_SYMBOL(register_die_notifier);
@@ -169,93 +36,103 @@ EXPORT_SYMBOL(unregister_die_notifier);
169 36
170static DEFINE_SPINLOCK(die_lock); 37static DEFINE_SPINLOCK(die_lock);
171 38
172void __die(const char *str, struct pt_regs *regs, unsigned long err, 39void NORET_TYPE die(const char *str, struct pt_regs *regs, long err)
173 const char *file, const char *func, unsigned long line)
174{ 40{
175 struct task_struct *tsk = current;
176 static int die_counter; 41 static int die_counter;
177 42
178 console_verbose(); 43 console_verbose();
179 spin_lock_irq(&die_lock); 44 spin_lock_irq(&die_lock);
180 bust_spinlocks(1); 45 bust_spinlocks(1);
181 46
182 printk(KERN_ALERT "%s", str); 47 printk(KERN_ALERT "Oops: %s, sig: %ld [#%d]\n" KERN_EMERG,
183 if (file && func) 48 str, err, ++die_counter);
184 printk(" in %s:%s, line %ld", file, func, line); 49#ifdef CONFIG_PREEMPT
185 printk("[#%d]:\n", ++die_counter); 50 printk("PREEMPT ");
186 print_modules(); 51#endif
187 show_regs(regs); 52#ifdef CONFIG_FRAME_POINTER
188 printk("Process %s (pid: %d, stack limit = 0x%p)\n", 53 printk("FRAME_POINTER ");
189 tsk->comm, tsk->pid, tsk->thread_info + 1); 54#endif
190 55 if (current_cpu_data.features & AVR32_FEATURE_OCD) {
191 if (!user_mode(regs) || in_interrupt()) { 56 unsigned long did = __mfdr(DBGREG_DID);
192 dump_mem("Stack: ", regs->sp, 57 printk("chip: 0x%03lx:0x%04lx rev %lu\n",
193 THREAD_SIZE + (unsigned long)tsk->thread_info); 58 (did >> 1) & 0x7ff,
59 (did >> 12) & 0x7fff,
60 (did >> 28) & 0xf);
61 } else {
62 printk("cpu: arch %u r%u / core %u r%u\n",
63 current_cpu_data.arch_type,
64 current_cpu_data.arch_revision,
65 current_cpu_data.cpu_type,
66 current_cpu_data.cpu_revision);
194 } 67 }
195 68
69 print_modules();
70 show_regs_log_lvl(regs, KERN_EMERG);
71 show_stack_log_lvl(current, regs->sp, regs, KERN_EMERG);
196 bust_spinlocks(0); 72 bust_spinlocks(0);
197 spin_unlock_irq(&die_lock); 73 spin_unlock_irq(&die_lock);
198 do_exit(SIGSEGV); 74
75 if (in_interrupt())
76 panic("Fatal exception in interrupt");
77
78 if (panic_on_oops)
79 panic("Fatal exception");
80
81 do_exit(err);
199} 82}
200 83
201void __die_if_kernel(const char *str, struct pt_regs *regs, unsigned long err, 84void _exception(long signr, struct pt_regs *regs, int code,
202 const char *file, const char *func, unsigned long line) 85 unsigned long addr)
203{ 86{
87 siginfo_t info;
88
204 if (!user_mode(regs)) 89 if (!user_mode(regs))
205 __die(str, regs, err, file, func, line); 90 die("Unhandled exception in kernel mode", regs, signr);
206} 91
92 memset(&info, 0, sizeof(info));
93 info.si_signo = signr;
94 info.si_code = code;
95 info.si_addr = (void __user *)addr;
96 force_sig_info(signr, &info, current);
207 97
208asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs)
209{
210#ifdef CONFIG_SUBARCH_AVR32B
211 /* 98 /*
212 * The exception entry always saves RSR_EX. For NMI, this is 99 * Init gets no signals that it doesn't have a handler for.
213 * wrong; it should be RSR_NMI 100 * That's all very well, but if it has caused a synchronous
101 * exception and we ignore the resulting signal, it will just
102 * generate the same exception over and over again and we get
103 * nowhere. Better to kill it and let the kernel panic.
214 */ 104 */
215 regs->sr = sysreg_read(RSR_NMI); 105 if (is_init(current)) {
216#endif 106 __sighandler_t handler;
107
108 spin_lock_irq(&current->sighand->siglock);
109 handler = current->sighand->action[signr-1].sa.sa_handler;
110 spin_unlock_irq(&current->sighand->siglock);
111 if (handler == SIG_DFL) {
112 /* init has generated a synchronous exception
113 and it doesn't have a handler for the signal */
114 printk(KERN_CRIT "init has generated signal %ld "
115 "but has no handler for it\n", signr);
116 do_exit(signr);
117 }
118 }
119}
217 120
218 printk("NMI taken!!!!\n"); 121asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs)
219 die("NMI", regs, ecr); 122{
220 BUG(); 123 printk(KERN_ALERT "Got Non-Maskable Interrupt, dumping regs\n");
124 show_regs_log_lvl(regs, KERN_ALERT);
125 show_stack_log_lvl(current, regs->sp, regs, KERN_ALERT);
221} 126}
222 127
223asmlinkage void do_critical_exception(unsigned long ecr, struct pt_regs *regs) 128asmlinkage void do_critical_exception(unsigned long ecr, struct pt_regs *regs)
224{ 129{
225 printk("Unable to handle critical exception %lu at pc = %08lx!\n", 130 die("Critical exception", regs, SIGKILL);
226 ecr, regs->pc);
227 die("Oops", regs, ecr);
228 BUG();
229} 131}
230 132
231asmlinkage void do_address_exception(unsigned long ecr, struct pt_regs *regs) 133asmlinkage void do_address_exception(unsigned long ecr, struct pt_regs *regs)
232{ 134{
233 siginfo_t info; 135 _exception(SIGBUS, regs, BUS_ADRALN, regs->pc);
234
235 die_if_kernel("Oops: Address exception in kernel mode", regs, ecr);
236
237#ifdef DEBUG
238 if (ecr == ECR_ADDR_ALIGN_X)
239 pr_debug("Instruction Address Exception at pc = %08lx\n",
240 regs->pc);
241 else if (ecr == ECR_ADDR_ALIGN_R)
242 pr_debug("Data Address Exception (Read) at pc = %08lx\n",
243 regs->pc);
244 else if (ecr == ECR_ADDR_ALIGN_W)
245 pr_debug("Data Address Exception (Write) at pc = %08lx\n",
246 regs->pc);
247 else
248 BUG();
249
250 show_regs(regs);
251#endif
252
253 info.si_signo = SIGBUS;
254 info.si_errno = 0;
255 info.si_code = BUS_ADRALN;
256 info.si_addr = (void __user *)regs->pc;
257
258 force_sig_info(SIGBUS, &info, current);
259} 136}
260 137
261/* This way of handling undefined instructions is stolen from ARM */ 138/* This way of handling undefined instructions is stolen from ARM */
@@ -280,7 +157,8 @@ static int do_cop_absent(u32 insn)
280{ 157{
281 int cop_nr; 158 int cop_nr;
282 u32 cpucr; 159 u32 cpucr;
283 if ( (insn & 0xfdf00000) == 0xf1900000 ) 160
161 if ((insn & 0xfdf00000) == 0xf1900000)
284 /* LDC0 */ 162 /* LDC0 */
285 cop_nr = 0; 163 cop_nr = 0;
286 else 164 else
@@ -292,136 +170,91 @@ static int do_cop_absent(u32 insn)
292 sysreg_write(CPUCR, cpucr); 170 sysreg_write(CPUCR, cpucr);
293 171
294 cpucr = sysreg_read(CPUCR); 172 cpucr = sysreg_read(CPUCR);
295 if ( !(cpucr & (1 << (24 + cop_nr))) ){ 173 if (!(cpucr & (1 << (24 + cop_nr))))
296 printk("Coprocessor #%i not found!\n", cop_nr); 174 return -ENODEV;
297 return -1;
298 }
299 175
300 return 0; 176 return 0;
301} 177}
302 178
303#ifdef CONFIG_BUG 179int is_valid_bugaddr(unsigned long pc)
304#ifdef CONFIG_DEBUG_BUGVERBOSE
305static inline void do_bug_verbose(struct pt_regs *regs, u32 insn)
306{
307 char *file;
308 u16 line;
309 char c;
310
311 if (__get_user(line, (u16 __user *)(regs->pc + 2)))
312 return;
313 if (__get_user(file, (char * __user *)(regs->pc + 4))
314 || (unsigned long)file < PAGE_OFFSET
315 || __get_user(c, file))
316 file = "<bad filename>";
317
318 printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line);
319}
320#else
321static inline void do_bug_verbose(struct pt_regs *regs, u32 insn)
322{ 180{
181 unsigned short opcode;
182
183 if (pc < PAGE_OFFSET)
184 return 0;
185 if (probe_kernel_address((u16 *)pc, opcode))
186 return 0;
323 187
188 return opcode == AVR32_BUG_OPCODE;
324} 189}
325#endif
326#endif
327 190
328asmlinkage void do_illegal_opcode(unsigned long ecr, struct pt_regs *regs) 191asmlinkage void do_illegal_opcode(unsigned long ecr, struct pt_regs *regs)
329{ 192{
330 u32 insn; 193 u32 insn;
331 struct undef_hook *hook; 194 struct undef_hook *hook;
332 siginfo_t info;
333 void __user *pc; 195 void __user *pc;
196 long code;
334 197
335 if (!user_mode(regs)) 198 if (!user_mode(regs) && (ecr == ECR_ILLEGAL_OPCODE)) {
336 goto kernel_trap; 199 enum bug_trap_type type;
200
201 type = report_bug(regs->pc);
202 switch (type) {
203 case BUG_TRAP_TYPE_NONE:
204 break;
205 case BUG_TRAP_TYPE_WARN:
206 regs->pc += 2;
207 return;
208 case BUG_TRAP_TYPE_BUG:
209 die("Kernel BUG", regs, SIGKILL);
210 }
211 }
337 212
338 local_irq_enable(); 213 local_irq_enable();
339 214
340 pc = (void __user *)instruction_pointer(regs); 215 if (user_mode(regs)) {
341 if (__get_user(insn, (u32 __user *)pc)) 216 pc = (void __user *)instruction_pointer(regs);
342 goto invalid_area; 217 if (get_user(insn, (u32 __user *)pc))
218 goto invalid_area;
343 219
344 if (ecr == ECR_COPROC_ABSENT) { 220 if (ecr == ECR_COPROC_ABSENT && !do_cop_absent(insn))
345 if (do_cop_absent(insn) == 0)
346 return; 221 return;
347 }
348 222
349 spin_lock_irq(&undef_lock); 223 spin_lock_irq(&undef_lock);
350 list_for_each_entry(hook, &undef_hook, node) { 224 list_for_each_entry(hook, &undef_hook, node) {
351 if ((insn & hook->insn_mask) == hook->insn_val) { 225 if ((insn & hook->insn_mask) == hook->insn_val) {
352 if (hook->fn(regs, insn) == 0) { 226 if (hook->fn(regs, insn) == 0) {
353 spin_unlock_irq(&undef_lock); 227 spin_unlock_irq(&undef_lock);
354 return; 228 return;
229 }
355 } 230 }
356 } 231 }
232 spin_unlock_irq(&undef_lock);
357 } 233 }
358 spin_unlock_irq(&undef_lock);
359
360invalid_area:
361 234
362#ifdef DEBUG
363 printk("Illegal instruction at pc = %08lx\n", regs->pc);
364 if (regs->pc < TASK_SIZE) {
365 unsigned long ptbr, pgd, pte, *p;
366
367 ptbr = sysreg_read(PTBR);
368 p = (unsigned long *)ptbr;
369 pgd = p[regs->pc >> 22];
370 p = (unsigned long *)((pgd & 0x1ffff000) | 0x80000000);
371 pte = p[(regs->pc >> 12) & 0x3ff];
372 printk("page table: 0x%08lx -> 0x%08lx -> 0x%08lx\n", ptbr, pgd, pte);
373 }
374#endif
375
376 info.si_signo = SIGILL;
377 info.si_errno = 0;
378 info.si_addr = (void __user *)regs->pc;
379 switch (ecr) { 235 switch (ecr) {
380 case ECR_ILLEGAL_OPCODE:
381 case ECR_UNIMPL_INSTRUCTION:
382 info.si_code = ILL_ILLOPC;
383 break;
384 case ECR_PRIVILEGE_VIOLATION: 236 case ECR_PRIVILEGE_VIOLATION:
385 info.si_code = ILL_PRVOPC; 237 code = ILL_PRVOPC;
386 break; 238 break;
387 case ECR_COPROC_ABSENT: 239 case ECR_COPROC_ABSENT:
388 info.si_code = ILL_COPROC; 240 code = ILL_COPROC;
389 break; 241 break;
390 default: 242 default:
391 BUG(); 243 code = ILL_ILLOPC;
244 break;
392 } 245 }
393 246
394 force_sig_info(SIGILL, &info, current); 247 _exception(SIGILL, regs, code, regs->pc);
395 return; 248 return;
396 249
397kernel_trap: 250invalid_area:
398#ifdef CONFIG_BUG 251 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->pc);
399 if (__kernel_text_address(instruction_pointer(regs))) {
400 insn = *(u16 *)instruction_pointer(regs);
401 if (insn == AVR32_BUG_OPCODE) {
402 do_bug_verbose(regs, insn);
403 die("Kernel BUG", regs, 0);
404 return;
405 }
406 }
407#endif
408
409 die("Oops: Illegal instruction in kernel code", regs, ecr);
410} 252}
411 253
412asmlinkage void do_fpe(unsigned long ecr, struct pt_regs *regs) 254asmlinkage void do_fpe(unsigned long ecr, struct pt_regs *regs)
413{ 255{
414 siginfo_t info; 256 /* We have no FPU yet */
415 257 _exception(SIGILL, regs, ILL_COPROC, regs->pc);
416 printk("Floating-point exception at pc = %08lx\n", regs->pc);
417
418 /* We have no FPU... */
419 info.si_signo = SIGILL;
420 info.si_errno = 0;
421 info.si_addr = (void __user *)regs->pc;
422 info.si_code = ILL_COPROC;
423
424 force_sig_info(SIGILL, &info, current);
425} 258}
426 259
427 260
diff --git a/arch/avr32/kernel/vmlinux.lds.c b/arch/avr32/kernel/vmlinux.lds.c
index ef13b7c78935..7ad20cfb48a8 100644
--- a/arch/avr32/kernel/vmlinux.lds.c
+++ b/arch/avr32/kernel/vmlinux.lds.c
@@ -26,6 +26,12 @@ SECTIONS
26 _sinittext = .; 26 _sinittext = .;
27 *(.text.reset) 27 *(.text.reset)
28 *(.init.text) 28 *(.init.text)
29 /*
30 * .exit.text is discarded at runtime, not
31 * link time, to deal with references from
32 * __bug_table
33 */
34 *(.exit.text)
29 _einittext = .; 35 _einittext = .;
30 . = ALIGN(4); 36 . = ALIGN(4);
31 __tagtable_begin = .; 37 __tagtable_begin = .;
@@ -86,6 +92,8 @@ SECTIONS
86 __stop___ex_table = .; 92 __stop___ex_table = .;
87 } 93 }
88 94
95 BUG_TABLE
96
89 RODATA 97 RODATA
90 98
91 . = ALIGN(8192); 99 . = ALIGN(8192);
@@ -126,7 +134,6 @@ SECTIONS
126 * thrown away, as cleanup code is never called unless it's a module. 134 * thrown away, as cleanup code is never called unless it's a module.
127 */ 135 */
128 /DISCARD/ : { 136 /DISCARD/ : {
129 *(.exit.text)
130 *(.exit.data) 137 *(.exit.data)
131 *(.exitcall.exit) 138 *(.exitcall.exit)
132 } 139 }
diff --git a/arch/avr32/mach-at32ap/Kconfig b/arch/avr32/mach-at32ap/Kconfig
new file mode 100644
index 000000000000..eb307838457b
--- /dev/null
+++ b/arch/avr32/mach-at32ap/Kconfig
@@ -0,0 +1,31 @@
1if PLATFORM_AT32AP
2
3menu "Atmel AVR32 AP options"
4
5choice
6 prompt "AT32AP7000 static memory bus width"
7 depends on CPU_AT32AP7000
8 default AP7000_16_BIT_SMC
9 help
10 Define the width of the AP7000 external static memory interface.
11 This is used to determine how to mangle the address and/or data
12 when doing little-endian port access.
13
14 The current code can only support a single external memory bus
15 width for all chip selects, excluding the flash (which is using
16 raw access and is thus not affected by any of this.)
17
18config AP7000_32_BIT_SMC
19 bool "32 bit"
20
21config AP7000_16_BIT_SMC
22 bool "16 bit"
23
24config AP7000_8_BIT_SMC
25 bool "8 bit"
26
27endchoice
28
29endmenu
30
31endif # PLATFORM_AT32AP
diff --git a/arch/avr32/mach-at32ap/Makefile b/arch/avr32/mach-at32ap/Makefile
index b21bea9af8b1..f1d395724ac6 100644
--- a/arch/avr32/mach-at32ap/Makefile
+++ b/arch/avr32/mach-at32ap/Makefile
@@ -1,2 +1,3 @@
1obj-y += at32ap.o clock.o intc.o extint.o pio.o hsmc.o 1obj-y += at32ap.o clock.o intc.o extint.o pio.o hsmc.o
2obj-$(CONFIG_CPU_AT32AP7000) += at32ap7000.o 2obj-$(CONFIG_CPU_AT32AP7000) += at32ap7000.o
3obj-$(CONFIG_CPU_AT32AP7000) += time-tc.o
diff --git a/arch/avr32/mach-at32ap/at32ap7000.c b/arch/avr32/mach-at32ap/at32ap7000.c
index 472703f90c22..56db45b99a0f 100644
--- a/arch/avr32/mach-at32ap/at32ap7000.c
+++ b/arch/avr32/mach-at32ap/at32ap7000.c
@@ -18,6 +18,7 @@
18#include <asm/arch/sm.h> 18#include <asm/arch/sm.h>
19 19
20#include "clock.h" 20#include "clock.h"
21#include "hmatrix.h"
21#include "pio.h" 22#include "pio.h"
22#include "sm.h" 23#include "sm.h"
23 24
@@ -416,7 +417,15 @@ struct platform_device at32_sm_device = {
416 .resource = sm_resource, 417 .resource = sm_resource,
417 .num_resources = ARRAY_SIZE(sm_resource), 418 .num_resources = ARRAY_SIZE(sm_resource),
418}; 419};
419DEV_CLK(pclk, at32_sm, pbb, 0); 420static struct clk at32_sm_pclk = {
421 .name = "pclk",
422 .dev = &at32_sm_device.dev,
423 .parent = &pbb_clk,
424 .mode = pbb_clk_mode,
425 .get_rate = pbb_clk_get_rate,
426 .users = 1,
427 .index = 0,
428};
420 429
421static struct resource intc0_resource[] = { 430static struct resource intc0_resource[] = {
422 PBMEM(0xfff00400), 431 PBMEM(0xfff00400),
@@ -442,6 +451,7 @@ static struct clk hramc_clk = {
442 .mode = hsb_clk_mode, 451 .mode = hsb_clk_mode,
443 .get_rate = hsb_clk_get_rate, 452 .get_rate = hsb_clk_get_rate,
444 .users = 1, 453 .users = 1,
454 .index = 3,
445}; 455};
446 456
447static struct resource smc0_resource[] = { 457static struct resource smc0_resource[] = {
@@ -467,6 +477,57 @@ static struct clk pico_clk = {
467}; 477};
468 478
469/* -------------------------------------------------------------------- 479/* --------------------------------------------------------------------
480 * HMATRIX
481 * -------------------------------------------------------------------- */
482
483static struct clk hmatrix_clk = {
484 .name = "hmatrix_clk",
485 .parent = &pbb_clk,
486 .mode = pbb_clk_mode,
487 .get_rate = pbb_clk_get_rate,
488 .index = 2,
489 .users = 1,
490};
491#define HMATRIX_BASE ((void __iomem *)0xfff00800)
492
493#define hmatrix_readl(reg) \
494 __raw_readl((HMATRIX_BASE) + HMATRIX_##reg)
495#define hmatrix_writel(reg,value) \
496 __raw_writel((value), (HMATRIX_BASE) + HMATRIX_##reg)
497
498/*
499 * Set bits in the HMATRIX Special Function Register (SFR) used by the
500 * External Bus Interface (EBI). This can be used to enable special
501 * features like CompactFlash support, NAND Flash support, etc. on
502 * certain chipselects.
503 */
504static inline void set_ebi_sfr_bits(u32 mask)
505{
506 u32 sfr;
507
508 clk_enable(&hmatrix_clk);
509 sfr = hmatrix_readl(SFR4);
510 sfr |= mask;
511 hmatrix_writel(SFR4, sfr);
512 clk_disable(&hmatrix_clk);
513}
514
515/* --------------------------------------------------------------------
516 * System Timer/Counter (TC)
517 * -------------------------------------------------------------------- */
518static struct resource at32_systc0_resource[] = {
519 PBMEM(0xfff00c00),
520 IRQ(22),
521};
522struct platform_device at32_systc0_device = {
523 .name = "systc",
524 .id = 0,
525 .resource = at32_systc0_resource,
526 .num_resources = ARRAY_SIZE(at32_systc0_resource),
527};
528DEV_CLK(pclk, at32_systc0, pbb, 3);
529
530/* --------------------------------------------------------------------
470 * PIO 531 * PIO
471 * -------------------------------------------------------------------- */ 532 * -------------------------------------------------------------------- */
472 533
@@ -514,6 +575,8 @@ void __init at32_add_system_devices(void)
514 platform_device_register(&smc0_device); 575 platform_device_register(&smc0_device);
515 platform_device_register(&pdc_device); 576 platform_device_register(&pdc_device);
516 577
578 platform_device_register(&at32_systc0_device);
579
517 platform_device_register(&pio0_device); 580 platform_device_register(&pio0_device);
518 platform_device_register(&pio1_device); 581 platform_device_register(&pio1_device);
519 platform_device_register(&pio2_device); 582 platform_device_register(&pio2_device);
@@ -950,6 +1013,7 @@ struct clk *at32_clock_list[] = {
950 &pbb_clk, 1013 &pbb_clk,
951 &at32_sm_pclk, 1014 &at32_sm_pclk,
952 &at32_intc0_pclk, 1015 &at32_intc0_pclk,
1016 &hmatrix_clk,
953 &ebi_clk, 1017 &ebi_clk,
954 &hramc_clk, 1018 &hramc_clk,
955 &smc0_pclk, 1019 &smc0_pclk,
@@ -962,6 +1026,7 @@ struct clk *at32_clock_list[] = {
962 &pio2_mck, 1026 &pio2_mck,
963 &pio3_mck, 1027 &pio3_mck,
964 &pio4_mck, 1028 &pio4_mck,
1029 &at32_systc0_pclk,
965 &atmel_usart0_usart, 1030 &atmel_usart0_usart,
966 &atmel_usart1_usart, 1031 &atmel_usart1_usart,
967 &atmel_usart2_usart, 1032 &atmel_usart2_usart,
@@ -1024,6 +1089,9 @@ void __init at32_clock_init(void)
1024 for (i = 0; i < ARRAY_SIZE(at32_clock_list); i++) { 1089 for (i = 0; i < ARRAY_SIZE(at32_clock_list); i++) {
1025 struct clk *clk = at32_clock_list[i]; 1090 struct clk *clk = at32_clock_list[i];
1026 1091
1092 if (clk->users == 0)
1093 continue;
1094
1027 if (clk->mode == &cpu_clk_mode) 1095 if (clk->mode == &cpu_clk_mode)
1028 cpu_mask |= 1 << clk->index; 1096 cpu_mask |= 1 << clk->index;
1029 else if (clk->mode == &hsb_clk_mode) 1097 else if (clk->mode == &hsb_clk_mode)
diff --git a/arch/avr32/mach-at32ap/hmatrix.h b/arch/avr32/mach-at32ap/hmatrix.h
new file mode 100644
index 000000000000..d10bfb60d68d
--- /dev/null
+++ b/arch/avr32/mach-at32ap/hmatrix.h
@@ -0,0 +1,182 @@
1/*
2 * Register definitions for High-Speed Bus Matrix
3 */
4#ifndef __HMATRIX_H
5#define __HMATRIX_H
6
7/* HMATRIX register offsets */
8#define HMATRIX_MCFG0 0x0000
9#define HMATRIX_MCFG1 0x0004
10#define HMATRIX_MCFG2 0x0008
11#define HMATRIX_MCFG3 0x000c
12#define HMATRIX_MCFG4 0x0010
13#define HMATRIX_MCFG5 0x0014
14#define HMATRIX_MCFG6 0x0018
15#define HMATRIX_MCFG7 0x001c
16#define HMATRIX_MCFG8 0x0020
17#define HMATRIX_MCFG9 0x0024
18#define HMATRIX_MCFG10 0x0028
19#define HMATRIX_MCFG11 0x002c
20#define HMATRIX_MCFG12 0x0030
21#define HMATRIX_MCFG13 0x0034
22#define HMATRIX_MCFG14 0x0038
23#define HMATRIX_MCFG15 0x003c
24#define HMATRIX_SCFG0 0x0040
25#define HMATRIX_SCFG1 0x0044
26#define HMATRIX_SCFG2 0x0048
27#define HMATRIX_SCFG3 0x004c
28#define HMATRIX_SCFG4 0x0050
29#define HMATRIX_SCFG5 0x0054
30#define HMATRIX_SCFG6 0x0058
31#define HMATRIX_SCFG7 0x005c
32#define HMATRIX_SCFG8 0x0060
33#define HMATRIX_SCFG9 0x0064
34#define HMATRIX_SCFG10 0x0068
35#define HMATRIX_SCFG11 0x006c
36#define HMATRIX_SCFG12 0x0070
37#define HMATRIX_SCFG13 0x0074
38#define HMATRIX_SCFG14 0x0078
39#define HMATRIX_SCFG15 0x007c
40#define HMATRIX_PRAS0 0x0080
41#define HMATRIX_PRBS0 0x0084
42#define HMATRIX_PRAS1 0x0088
43#define HMATRIX_PRBS1 0x008c
44#define HMATRIX_PRAS2 0x0090
45#define HMATRIX_PRBS2 0x0094
46#define HMATRIX_PRAS3 0x0098
47#define HMATRIX_PRBS3 0x009c
48#define HMATRIX_PRAS4 0x00a0
49#define HMATRIX_PRBS4 0x00a4
50#define HMATRIX_PRAS5 0x00a8
51#define HMATRIX_PRBS5 0x00ac
52#define HMATRIX_PRAS6 0x00b0
53#define HMATRIX_PRBS6 0x00b4
54#define HMATRIX_PRAS7 0x00b8
55#define HMATRIX_PRBS7 0x00bc
56#define HMATRIX_PRAS8 0x00c0
57#define HMATRIX_PRBS8 0x00c4
58#define HMATRIX_PRAS9 0x00c8
59#define HMATRIX_PRBS9 0x00cc
60#define HMATRIX_PRAS10 0x00d0
61#define HMATRIX_PRBS10 0x00d4
62#define HMATRIX_PRAS11 0x00d8
63#define HMATRIX_PRBS11 0x00dc
64#define HMATRIX_PRAS12 0x00e0
65#define HMATRIX_PRBS12 0x00e4
66#define HMATRIX_PRAS13 0x00e8
67#define HMATRIX_PRBS13 0x00ec
68#define HMATRIX_PRAS14 0x00f0
69#define HMATRIX_PRBS14 0x00f4
70#define HMATRIX_PRAS15 0x00f8
71#define HMATRIX_PRBS15 0x00fc
72#define HMATRIX_MRCR 0x0100
73#define HMATRIX_SFR0 0x0110
74#define HMATRIX_SFR1 0x0114
75#define HMATRIX_SFR2 0x0118
76#define HMATRIX_SFR3 0x011c
77#define HMATRIX_SFR4 0x0120
78#define HMATRIX_SFR5 0x0124
79#define HMATRIX_SFR6 0x0128
80#define HMATRIX_SFR7 0x012c
81#define HMATRIX_SFR8 0x0130
82#define HMATRIX_SFR9 0x0134
83#define HMATRIX_SFR10 0x0138
84#define HMATRIX_SFR11 0x013c
85#define HMATRIX_SFR12 0x0140
86#define HMATRIX_SFR13 0x0144
87#define HMATRIX_SFR14 0x0148
88#define HMATRIX_SFR15 0x014c
89
90/* Bitfields in MCFGx */
91#define HMATRIX_ULBT_OFFSET 0
92#define HMATRIX_ULBT_SIZE 3
93
94/* Bitfields in SCFGx */
95#define HMATRIX_SLOT_CYCLE_OFFSET 0
96#define HMATRIX_SLOT_CYCLE_SIZE 8
97#define HMATRIX_DEFMSTR_TYPE_OFFSET 16
98#define HMATRIX_DEFMSTR_TYPE_SIZE 2
99#define HMATRIX_FIXED_DEFMSTR_OFFSET 18
100#define HMATRIX_FIXED_DEFMSTR_SIZE 4
101#define HMATRIX_ARBT_OFFSET 24
102#define HMATRIX_ARBT_SIZE 2
103
104/* Bitfields in PRASx */
105#define HMATRIX_M0PR_OFFSET 0
106#define HMATRIX_M0PR_SIZE 4
107#define HMATRIX_M1PR_OFFSET 4
108#define HMATRIX_M1PR_SIZE 4
109#define HMATRIX_M2PR_OFFSET 8
110#define HMATRIX_M2PR_SIZE 4
111#define HMATRIX_M3PR_OFFSET 12
112#define HMATRIX_M3PR_SIZE 4
113#define HMATRIX_M4PR_OFFSET 16
114#define HMATRIX_M4PR_SIZE 4
115#define HMATRIX_M5PR_OFFSET 20
116#define HMATRIX_M5PR_SIZE 4
117#define HMATRIX_M6PR_OFFSET 24
118#define HMATRIX_M6PR_SIZE 4
119#define HMATRIX_M7PR_OFFSET 28
120#define HMATRIX_M7PR_SIZE 4
121
122/* Bitfields in PRBSx */
123#define HMATRIX_M8PR_OFFSET 0
124#define HMATRIX_M8PR_SIZE 4
125#define HMATRIX_M9PR_OFFSET 4
126#define HMATRIX_M9PR_SIZE 4
127#define HMATRIX_M10PR_OFFSET 8
128#define HMATRIX_M10PR_SIZE 4
129#define HMATRIX_M11PR_OFFSET 12
130#define HMATRIX_M11PR_SIZE 4
131#define HMATRIX_M12PR_OFFSET 16
132#define HMATRIX_M12PR_SIZE 4
133#define HMATRIX_M13PR_OFFSET 20
134#define HMATRIX_M13PR_SIZE 4
135#define HMATRIX_M14PR_OFFSET 24
136#define HMATRIX_M14PR_SIZE 4
137#define HMATRIX_M15PR_OFFSET 28
138#define HMATRIX_M15PR_SIZE 4
139
140/* Bitfields in SFR4 */
141#define HMATRIX_CS1A_OFFSET 1
142#define HMATRIX_CS1A_SIZE 1
143#define HMATRIX_CS3A_OFFSET 3
144#define HMATRIX_CS3A_SIZE 1
145#define HMATRIX_CS4A_OFFSET 4
146#define HMATRIX_CS4A_SIZE 1
147#define HMATRIX_CS5A_OFFSET 5
148#define HMATRIX_CS5A_SIZE 1
149#define HMATRIX_DBPUC_OFFSET 8
150#define HMATRIX_DBPUC_SIZE 1
151
152/* Constants for ULBT */
153#define HMATRIX_ULBT_INFINITE 0
154#define HMATRIX_ULBT_SINGLE 1
155#define HMATRIX_ULBT_FOUR_BEAT 2
156#define HMATRIX_ULBT_EIGHT_BEAT 3
157#define HMATRIX_ULBT_SIXTEEN_BEAT 4
158
159/* Constants for DEFMSTR_TYPE */
160#define HMATRIX_DEFMSTR_TYPE_NO_DEFAULT 0
161#define HMATRIX_DEFMSTR_TYPE_LAST_DEFAULT 1
162#define HMATRIX_DEFMSTR_TYPE_FIXED_DEFAULT 2
163
164/* Constants for ARBT */
165#define HMATRIX_ARBT_ROUND_ROBIN 0
166#define HMATRIX_ARBT_FIXED_PRIORITY 1
167
168/* Bit manipulation macros */
169#define HMATRIX_BIT(name) \
170 (1 << HMATRIX_##name##_OFFSET)
171#define HMATRIX_BF(name,value) \
172 (((value) & ((1 << HMATRIX_##name##_SIZE) - 1)) \
173 << HMATRIX_##name##_OFFSET)
174#define HMATRIX_BFEXT(name,value) \
175 (((value) >> HMATRIX_##name##_OFFSET) \
176 & ((1 << HMATRIX_##name##_SIZE) - 1))
177#define HMATRIX_BFINS(name,value,old) \
178 (((old) & ~(((1 << HMATRIX_##name##_SIZE) - 1) \
179 << HMATRIX_##name##_OFFSET)) \
180 | HMATRIX_BF(name,value))
181
182#endif /* __HMATRIX_H */
diff --git a/arch/avr32/mach-at32ap/hsmc.c b/arch/avr32/mach-at32ap/hsmc.c
index 7691721928a7..5e22a750632b 100644
--- a/arch/avr32/mach-at32ap/hsmc.c
+++ b/arch/avr32/mach-at32ap/hsmc.c
@@ -75,12 +75,35 @@ int smc_set_configuration(int cs, const struct smc_config *config)
75 return -EINVAL; 75 return -EINVAL;
76 } 76 }
77 77
78 switch (config->nwait_mode) {
79 case 0:
80 mode |= HSMC_BF(EXNW_MODE, HSMC_EXNW_MODE_DISABLED);
81 break;
82 case 1:
83 mode |= HSMC_BF(EXNW_MODE, HSMC_EXNW_MODE_RESERVED);
84 break;
85 case 2:
86 mode |= HSMC_BF(EXNW_MODE, HSMC_EXNW_MODE_FROZEN);
87 break;
88 case 3:
89 mode |= HSMC_BF(EXNW_MODE, HSMC_EXNW_MODE_READY);
90 break;
91 default:
92 return -EINVAL;
93 }
94
95 if (config->tdf_cycles) {
96 mode |= HSMC_BF(TDF_CYCLES, config->tdf_cycles);
97 }
98
78 if (config->nrd_controlled) 99 if (config->nrd_controlled)
79 mode |= HSMC_BIT(READ_MODE); 100 mode |= HSMC_BIT(READ_MODE);
80 if (config->nwe_controlled) 101 if (config->nwe_controlled)
81 mode |= HSMC_BIT(WRITE_MODE); 102 mode |= HSMC_BIT(WRITE_MODE);
82 if (config->byte_write) 103 if (config->byte_write)
83 mode |= HSMC_BIT(BAT); 104 mode |= HSMC_BIT(BAT);
105 if (config->tdf_mode)
106 mode |= HSMC_BIT(TDF_MODE);
84 107
85 pr_debug("smc cs%d: setup/%08x pulse/%08x cycle/%08x mode/%08x\n", 108 pr_debug("smc cs%d: setup/%08x pulse/%08x cycle/%08x mode/%08x\n",
86 cs, setup, pulse, cycle, mode); 109 cs, setup, pulse, cycle, mode);
diff --git a/arch/avr32/mach-at32ap/time-tc.c b/arch/avr32/mach-at32ap/time-tc.c
new file mode 100644
index 000000000000..e3070bdd4bb9
--- /dev/null
+++ b/arch/avr32/mach-at32ap/time-tc.c
@@ -0,0 +1,218 @@
1/*
2 * Copyright (C) 2004-2007 Atmel Corporation
3 *
4 * Based on MIPS implementation arch/mips/kernel/time.c
5 * Copyright 2001 MontaVista Software Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/clk.h>
13#include <linux/clocksource.h>
14#include <linux/time.h>
15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/irq.h>
18#include <linux/kernel_stat.h>
19#include <linux/errno.h>
20#include <linux/init.h>
21#include <linux/profile.h>
22#include <linux/sysdev.h>
23#include <linux/err.h>
24
25#include <asm/div64.h>
26#include <asm/sysreg.h>
27#include <asm/io.h>
28#include <asm/sections.h>
29
30#include <asm/arch/time.h>
31
32/* how many counter cycles in a jiffy? */
33static u32 cycles_per_jiffy;
34
35/* the count value for the next timer interrupt */
36static u32 expirelo;
37
38/* the I/O registers of the TC module */
39static void __iomem *ioregs;
40
41cycle_t read_cycle_count(void)
42{
43 return (cycle_t)timer_read(ioregs, 0, CV);
44}
45
46struct clocksource clocksource_avr32 = {
47 .name = "avr32",
48 .rating = 342,
49 .read = read_cycle_count,
50 .mask = CLOCKSOURCE_MASK(16),
51 .shift = 16,
52 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
53};
54
55static void avr32_timer_ack(void)
56{
57 u16 count = expirelo;
58
59 /* Ack this timer interrupt and set the next one, use a u16
60 * variable so it will wrap around correctly */
61 count += cycles_per_jiffy;
62 expirelo = count;
63 timer_write(ioregs, 0, RC, expirelo);
64
65 /* Check to see if we have missed any timer interrupts */
66 count = timer_read(ioregs, 0, CV);
67 if ((count - expirelo) < 0x7fff) {
68 expirelo = count + cycles_per_jiffy;
69 timer_write(ioregs, 0, RC, expirelo);
70 }
71}
72
73u32 avr32_hpt_read(void)
74{
75 return timer_read(ioregs, 0, CV);
76}
77
78static int avr32_timer_calc_div_and_set_jiffies(struct clk *pclk)
79{
80 unsigned int cycles_max = (clocksource_avr32.mask + 1) / 2;
81 unsigned int divs[] = { 4, 8, 16, 32 };
82 int divs_size = sizeof(divs) / sizeof(*divs);
83 int i = 0;
84 unsigned long count_hz;
85 unsigned long shift;
86 unsigned long mult;
87 int clock_div = -1;
88 u64 tmp;
89
90 shift = clocksource_avr32.shift;
91
92 do {
93 count_hz = clk_get_rate(pclk) / divs[i];
94 mult = clocksource_hz2mult(count_hz, shift);
95 clocksource_avr32.mult = mult;
96
97 tmp = TICK_NSEC;
98 tmp <<= shift;
99 tmp += mult / 2;
100 do_div(tmp, mult);
101
102 cycles_per_jiffy = tmp;
103 } while (cycles_per_jiffy > cycles_max && ++i < divs_size);
104
105 clock_div = i + 1;
106
107 if (clock_div > divs_size) {
108 pr_debug("timer: could not calculate clock divider\n");
109 return -EFAULT;
110 }
111
112 /* Set the clock divider */
113 timer_write(ioregs, 0, CMR, TIMER_BF(CMR_TCCLKS, clock_div));
114
115 return 0;
116}
117
118int avr32_hpt_init(unsigned int count)
119{
120 struct resource *regs;
121 struct clk *pclk;
122 int irq = -1;
123 int ret = 0;
124
125 ret = -ENXIO;
126
127 irq = platform_get_irq(&at32_systc0_device, 0);
128 if (irq < 0) {
129 pr_debug("timer: could not get irq\n");
130 goto out_error;
131 }
132
133 pclk = clk_get(&at32_systc0_device.dev, "pclk");
134 if (IS_ERR(pclk)) {
135 pr_debug("timer: could not get clk: %ld\n", PTR_ERR(pclk));
136 goto out_error;
137 }
138 clk_enable(pclk);
139
140 regs = platform_get_resource(&at32_systc0_device, IORESOURCE_MEM, 0);
141 if (!regs) {
142 pr_debug("timer: could not get resource\n");
143 goto out_error_clk;
144 }
145
146 ioregs = ioremap(regs->start, regs->end - regs->start + 1);
147 if (!ioregs) {
148 pr_debug("timer: could not get ioregs\n");
149 goto out_error_clk;
150 }
151
152 ret = avr32_timer_calc_div_and_set_jiffies(pclk);
153 if (ret)
154 goto out_error_io;
155
156 ret = setup_irq(irq, &timer_irqaction);
157 if (ret) {
158 pr_debug("timer: could not request irq %d: %d\n",
159 irq, ret);
160 goto out_error_io;
161 }
162
163 expirelo = (timer_read(ioregs, 0, CV) / cycles_per_jiffy + 1)
164 * cycles_per_jiffy;
165
166 /* Enable clock and interrupts on RC compare */
167 timer_write(ioregs, 0, CCR, TIMER_BIT(CCR_CLKEN));
168 timer_write(ioregs, 0, IER, TIMER_BIT(IER_CPCS));
169 /* Set cycles to first interrupt */
170 timer_write(ioregs, 0, RC, expirelo);
171
172 printk(KERN_INFO "timer: AT32AP system timer/counter at 0x%p irq %d\n",
173 ioregs, irq);
174
175 return 0;
176
177out_error_io:
178 iounmap(ioregs);
179out_error_clk:
180 clk_put(pclk);
181out_error:
182 return ret;
183}
184
185int avr32_hpt_start(void)
186{
187 timer_write(ioregs, 0, CCR, TIMER_BIT(CCR_SWTRG));
188 return 0;
189}
190
191irqreturn_t timer_interrupt(int irq, void *dev_id)
192{
193 unsigned int sr = timer_read(ioregs, 0, SR);
194
195 if (sr & TIMER_BIT(SR_CPCS)) {
196 /* ack timer interrupt and try to set next interrupt */
197 avr32_timer_ack();
198
199 /*
200 * Call the generic timer interrupt handler
201 */
202 write_seqlock(&xtime_lock);
203 do_timer(1);
204 write_sequnlock(&xtime_lock);
205
206 /*
207 * In UP mode, we call local_timer_interrupt() to do profiling
208 * and process accounting.
209 *
210 * SMP is not supported yet.
211 */
212 local_timer_interrupt(irq, dev_id);
213
214 return IRQ_HANDLED;
215 }
216
217 return IRQ_NONE;
218}
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
index 678557260a35..146ebdbdc302 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -16,26 +16,8 @@
16#include <asm/kdebug.h> 16#include <asm/kdebug.h>
17#include <asm/mmu_context.h> 17#include <asm/mmu_context.h>
18#include <asm/sysreg.h> 18#include <asm/sysreg.h>
19#include <asm/uaccess.h>
20#include <asm/tlb.h> 19#include <asm/tlb.h>
21 20#include <asm/uaccess.h>
22#ifdef DEBUG
23static void dump_code(unsigned long pc)
24{
25 char *p = (char *)pc;
26 char val;
27 int i;
28
29
30 printk(KERN_DEBUG "Code:");
31 for (i = 0; i < 16; i++) {
32 if (__get_user(val, p + i))
33 break;
34 printk(" %02x", val);
35 }
36 printk("\n");
37}
38#endif
39 21
40#ifdef CONFIG_KPROBES 22#ifdef CONFIG_KPROBES
41ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); 23ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
@@ -68,17 +50,19 @@ static inline int notify_page_fault(enum die_val val, struct pt_regs *regs,
68} 50}
69#endif 51#endif
70 52
53int exception_trace = 1;
54
71/* 55/*
72 * This routine handles page faults. It determines the address and the 56 * This routine handles page faults. It determines the address and the
73 * problem, and then passes it off to one of the appropriate routines. 57 * problem, and then passes it off to one of the appropriate routines.
74 * 58 *
75 * ecr is the Exception Cause Register. Possible values are: 59 * ecr is the Exception Cause Register. Possible values are:
76 * 5: Page not found (instruction access)
77 * 6: Protection fault (instruction access) 60 * 6: Protection fault (instruction access)
78 * 12: Page not found (read access) 61 * 15: Protection fault (read access)
79 * 13: Page not found (write access) 62 * 16: Protection fault (write access)
80 * 14: Protection fault (read access) 63 * 20: Page not found (instruction access)
81 * 15: Protection fault (write access) 64 * 24: Page not found (read access)
65 * 28: Page not found (write access)
82 */ 66 */
83asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) 67asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
84{ 68{
@@ -88,7 +72,9 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
88 const struct exception_table_entry *fixup; 72 const struct exception_table_entry *fixup;
89 unsigned long address; 73 unsigned long address;
90 unsigned long page; 74 unsigned long page;
91 int writeaccess = 0; 75 int writeaccess;
76 long signr;
77 int code;
92 78
93 if (notify_page_fault(DIE_PAGE_FAULT, regs, 79 if (notify_page_fault(DIE_PAGE_FAULT, regs,
94 ecr, SIGSEGV) == NOTIFY_STOP) 80 ecr, SIGSEGV) == NOTIFY_STOP)
@@ -99,6 +85,9 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
99 tsk = current; 85 tsk = current;
100 mm = tsk->mm; 86 mm = tsk->mm;
101 87
88 signr = SIGSEGV;
89 code = SEGV_MAPERR;
90
102 /* 91 /*
103 * If we're in an interrupt or have no user context, we must 92 * If we're in an interrupt or have no user context, we must
104 * not take the fault... 93 * not take the fault...
@@ -125,7 +114,9 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
125 * can handle it... 114 * can handle it...
126 */ 115 */
127good_area: 116good_area:
128 //pr_debug("good area: vm_flags = 0x%lx\n", vma->vm_flags); 117 code = SEGV_ACCERR;
118 writeaccess = 0;
119
129 switch (ecr) { 120 switch (ecr) {
130 case ECR_PROTECTION_X: 121 case ECR_PROTECTION_X:
131 case ECR_TLB_MISS_X: 122 case ECR_TLB_MISS_X:
@@ -176,46 +167,24 @@ survive:
176 * map. Fix it, but check if it's kernel or user first... 167 * map. Fix it, but check if it's kernel or user first...
177 */ 168 */
178bad_area: 169bad_area:
179 pr_debug("Bad area [%s:%u]: addr %08lx, ecr %lu\n",
180 tsk->comm, tsk->pid, address, ecr);
181
182 up_read(&mm->mmap_sem); 170 up_read(&mm->mmap_sem);
183 171
184 if (user_mode(regs)) { 172 if (user_mode(regs)) {
185 /* Hmm...we have to pass address and ecr somehow... */ 173 if (exception_trace)
186 /* tsk->thread.address = address; 174 printk("%s%s[%d]: segfault at %08lx pc %08lx "
187 tsk->thread.error_code = ecr; */ 175 "sp %08lx ecr %lu\n",
188#ifdef DEBUG 176 is_init(tsk) ? KERN_EMERG : KERN_INFO,
189 show_regs(regs); 177 tsk->comm, tsk->pid, address, regs->pc,
190 dump_code(regs->pc); 178 regs->sp, ecr);
191 179 _exception(SIGSEGV, regs, code, address);
192 page = sysreg_read(PTBR);
193 printk("ptbr = %08lx", page);
194 if (page) {
195 page = ((unsigned long *)page)[address >> 22];
196 printk(" pgd = %08lx", page);
197 if (page & _PAGE_PRESENT) {
198 page &= PAGE_MASK;
199 address &= 0x003ff000;
200 page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT];
201 printk(" pte = %08lx\n", page);
202 }
203 }
204#endif
205 pr_debug("Sending SIGSEGV to PID %d...\n",
206 tsk->pid);
207 force_sig(SIGSEGV, tsk);
208 return; 180 return;
209 } 181 }
210 182
211no_context: 183no_context:
212 pr_debug("No context\n");
213
214 /* Are we prepared to handle this kernel fault? */ 184 /* Are we prepared to handle this kernel fault? */
215 fixup = search_exception_tables(regs->pc); 185 fixup = search_exception_tables(regs->pc);
216 if (fixup) { 186 if (fixup) {
217 regs->pc = fixup->fixup; 187 regs->pc = fixup->fixup;
218 pr_debug("Found fixup at %08lx\n", fixup->fixup);
219 return; 188 return;
220 } 189 }
221 190
@@ -230,7 +199,6 @@ no_context:
230 printk(KERN_ALERT 199 printk(KERN_ALERT
231 "Unable to handle kernel paging request"); 200 "Unable to handle kernel paging request");
232 printk(" at virtual address %08lx\n", address); 201 printk(" at virtual address %08lx\n", address);
233 printk(KERN_ALERT "pc = %08lx\n", regs->pc);
234 202
235 page = sysreg_read(PTBR); 203 page = sysreg_read(PTBR);
236 printk(KERN_ALERT "ptbr = %08lx", page); 204 printk(KERN_ALERT "ptbr = %08lx", page);
@@ -241,20 +209,20 @@ no_context:
241 page &= PAGE_MASK; 209 page &= PAGE_MASK;
242 address &= 0x003ff000; 210 address &= 0x003ff000;
243 page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT]; 211 page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT];
244 printk(" pte = %08lx\n", page); 212 printk(" pte = %08lx", page);
245 } 213 }
246 } 214 }
247 die("\nOops", regs, ecr); 215 printk("\n");
248 do_exit(SIGKILL); 216 die("Kernel access of bad area", regs, signr);
217 return;
249 218
250 /* 219 /*
251 * We ran out of memory, or some other thing happened to us 220 * We ran out of memory, or some other thing happened to us
252 * that made us unable to handle the page fault gracefully. 221 * that made us unable to handle the page fault gracefully.
253 */ 222 */
254out_of_memory: 223out_of_memory:
255 printk("Out of memory\n");
256 up_read(&mm->mmap_sem); 224 up_read(&mm->mmap_sem);
257 if (current->pid == 1) { 225 if (is_init(current)) {
258 yield(); 226 yield();
259 down_read(&mm->mmap_sem); 227 down_read(&mm->mmap_sem);
260 goto survive; 228 goto survive;
@@ -267,21 +235,20 @@ out_of_memory:
267do_sigbus: 235do_sigbus:
268 up_read(&mm->mmap_sem); 236 up_read(&mm->mmap_sem);
269 237
270 /*
271 * Send a sigbus, regardless of whether we were in kernel or
272 * user mode.
273 */
274 /* address, error_code, trap_no, ... */
275#ifdef DEBUG
276 show_regs(regs);
277 dump_code(regs->pc);
278#endif
279 pr_debug("Sending SIGBUS to PID %d...\n", tsk->pid);
280 force_sig(SIGBUS, tsk);
281
282 /* Kernel mode? Handle exceptions or die */ 238 /* Kernel mode? Handle exceptions or die */
239 signr = SIGBUS;
240 code = BUS_ADRERR;
283 if (!user_mode(regs)) 241 if (!user_mode(regs))
284 goto no_context; 242 goto no_context;
243
244 if (exception_trace)
245 printk("%s%s[%d]: bus error at %08lx pc %08lx "
246 "sp %08lx ecr %lu\n",
247 is_init(tsk) ? KERN_EMERG : KERN_INFO,
248 tsk->comm, tsk->pid, address, regs->pc,
249 regs->sp, ecr);
250
251 _exception(SIGBUS, regs, BUS_ADRERR, address);
285} 252}
286 253
287asmlinkage void do_bus_error(unsigned long addr, int write_access, 254asmlinkage void do_bus_error(unsigned long addr, int write_access,
@@ -292,8 +259,7 @@ asmlinkage void do_bus_error(unsigned long addr, int write_access,
292 addr, write_access ? "write" : "read"); 259 addr, write_access ? "write" : "read");
293 printk(KERN_INFO "DTLB dump:\n"); 260 printk(KERN_INFO "DTLB dump:\n");
294 dump_dtlb(); 261 dump_dtlb();
295 die("Bus Error", regs, write_access); 262 die("Bus Error", regs, SIGKILL);
296 do_exit(SIGKILL);
297} 263}
298 264
299/* 265/*
diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c
index 70da6894acc1..82cf70854b90 100644
--- a/arch/avr32/mm/init.c
+++ b/arch/avr32/mm/init.c
@@ -10,11 +10,9 @@
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/swap.h> 11#include <linux/swap.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/initrd.h>
14#include <linux/mmzone.h> 13#include <linux/mmzone.h>
15#include <linux/bootmem.h> 14#include <linux/bootmem.h>
16#include <linux/pagemap.h> 15#include <linux/pagemap.h>
17#include <linux/pfn.h>
18#include <linux/nodemask.h> 16#include <linux/nodemask.h>
19 17
20#include <asm/page.h> 18#include <asm/page.h>
@@ -78,242 +76,6 @@ void show_mem(void)
78 printk ("%d pages swap cached\n", cached); 76 printk ("%d pages swap cached\n", cached);
79} 77}
80 78
81static void __init print_memory_map(const char *what,
82 struct tag_mem_range *mem)
83{
84 printk ("%s:\n", what);
85 for (; mem; mem = mem->next) {
86 printk (" %08lx - %08lx\n",
87 (unsigned long)mem->addr,
88 (unsigned long)(mem->addr + mem->size));
89 }
90}
91
92#define MAX_LOWMEM HIGHMEM_START
93#define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM)
94
95/*
96 * Sort a list of memory regions in-place by ascending address.
97 *
98 * We're using bubble sort because we only have singly linked lists
99 * with few elements.
100 */
101static void __init sort_mem_list(struct tag_mem_range **pmem)
102{
103 int done;
104 struct tag_mem_range **a, **b;
105
106 if (!*pmem)
107 return;
108
109 do {
110 done = 1;
111 a = pmem, b = &(*pmem)->next;
112 while (*b) {
113 if ((*a)->addr > (*b)->addr) {
114 struct tag_mem_range *tmp;
115 tmp = (*b)->next;
116 (*b)->next = *a;
117 *a = *b;
118 *b = tmp;
119 done = 0;
120 }
121 a = &(*a)->next;
122 b = &(*a)->next;
123 }
124 } while (!done);
125}
126
127/*
128 * Find a free memory region large enough for storing the
129 * bootmem bitmap.
130 */
131static unsigned long __init
132find_bootmap_pfn(const struct tag_mem_range *mem)
133{
134 unsigned long bootmap_pages, bootmap_len;
135 unsigned long node_pages = PFN_UP(mem->size);
136 unsigned long bootmap_addr = mem->addr;
137 struct tag_mem_range *reserved = mem_reserved;
138 struct tag_mem_range *ramdisk = mem_ramdisk;
139 unsigned long kern_start = virt_to_phys(_stext);
140 unsigned long kern_end = virt_to_phys(_end);
141
142 bootmap_pages = bootmem_bootmap_pages(node_pages);
143 bootmap_len = bootmap_pages << PAGE_SHIFT;
144
145 /*
146 * Find a large enough region without reserved pages for
147 * storing the bootmem bitmap. We can take advantage of the
148 * fact that all lists have been sorted.
149 *
150 * We have to check explicitly reserved regions as well as the
151 * kernel image and any RAMDISK images...
152 *
153 * Oh, and we have to make sure we don't overwrite the taglist
154 * since we're going to use it until the bootmem allocator is
155 * fully up and running.
156 */
157 while (1) {
158 if ((bootmap_addr < kern_end) &&
159 ((bootmap_addr + bootmap_len) > kern_start))
160 bootmap_addr = kern_end;
161
162 while (reserved &&
163 (bootmap_addr >= (reserved->addr + reserved->size)))
164 reserved = reserved->next;
165
166 if (reserved &&
167 ((bootmap_addr + bootmap_len) >= reserved->addr)) {
168 bootmap_addr = reserved->addr + reserved->size;
169 continue;
170 }
171
172 while (ramdisk &&
173 (bootmap_addr >= (ramdisk->addr + ramdisk->size)))
174 ramdisk = ramdisk->next;
175
176 if (!ramdisk ||
177 ((bootmap_addr + bootmap_len) < ramdisk->addr))
178 break;
179
180 bootmap_addr = ramdisk->addr + ramdisk->size;
181 }
182
183 if ((PFN_UP(bootmap_addr) + bootmap_len) >= (mem->addr + mem->size))
184 return ~0UL;
185
186 return PFN_UP(bootmap_addr);
187}
188
189void __init setup_bootmem(void)
190{
191 unsigned bootmap_size;
192 unsigned long first_pfn, bootmap_pfn, pages;
193 unsigned long max_pfn, max_low_pfn;
194 unsigned long kern_start = virt_to_phys(_stext);
195 unsigned long kern_end = virt_to_phys(_end);
196 unsigned node = 0;
197 struct tag_mem_range *bank, *res;
198
199 sort_mem_list(&mem_phys);
200 sort_mem_list(&mem_reserved);
201
202 print_memory_map("Physical memory", mem_phys);
203 print_memory_map("Reserved memory", mem_reserved);
204
205 nodes_clear(node_online_map);
206
207 if (mem_ramdisk) {
208#ifdef CONFIG_BLK_DEV_INITRD
209 initrd_start = (unsigned long)__va(mem_ramdisk->addr);
210 initrd_end = initrd_start + mem_ramdisk->size;
211
212 print_memory_map("RAMDISK images", mem_ramdisk);
213 if (mem_ramdisk->next)
214 printk(KERN_WARNING
215 "Warning: Only the first RAMDISK image "
216 "will be used\n");
217 sort_mem_list(&mem_ramdisk);
218#else
219 printk(KERN_WARNING "RAM disk image present, but "
220 "no initrd support in kernel!\n");
221#endif
222 }
223
224 if (mem_phys->next)
225 printk(KERN_WARNING "Only using first memory bank\n");
226
227 for (bank = mem_phys; bank; bank = NULL) {
228 first_pfn = PFN_UP(bank->addr);
229 max_low_pfn = max_pfn = PFN_DOWN(bank->addr + bank->size);
230 bootmap_pfn = find_bootmap_pfn(bank);
231 if (bootmap_pfn > max_pfn)
232 panic("No space for bootmem bitmap!\n");
233
234 if (max_low_pfn > MAX_LOWMEM_PFN) {
235 max_low_pfn = MAX_LOWMEM_PFN;
236#ifndef CONFIG_HIGHMEM
237 /*
238 * Lowmem is memory that can be addressed
239 * directly through P1/P2
240 */
241 printk(KERN_WARNING
242 "Node %u: Only %ld MiB of memory will be used.\n",
243 node, MAX_LOWMEM >> 20);
244 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
245#else
246#error HIGHMEM is not supported by AVR32 yet
247#endif
248 }
249
250 /* Initialize the boot-time allocator with low memory only. */
251 bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn,
252 first_pfn, max_low_pfn);
253
254 printk("Node %u: bdata = %p, bdata->node_bootmem_map = %p\n",
255 node, NODE_DATA(node)->bdata,
256 NODE_DATA(node)->bdata->node_bootmem_map);
257
258 /*
259 * Register fully available RAM pages with the bootmem
260 * allocator.
261 */
262 pages = max_low_pfn - first_pfn;
263 free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn),
264 PFN_PHYS(pages));
265
266 /*
267 * Reserve space for the kernel image (if present in
268 * this node)...
269 */
270 if ((kern_start >= PFN_PHYS(first_pfn)) &&
271 (kern_start < PFN_PHYS(max_pfn))) {
272 printk("Node %u: Kernel image %08lx - %08lx\n",
273 node, kern_start, kern_end);
274 reserve_bootmem_node(NODE_DATA(node), kern_start,
275 kern_end - kern_start);
276 }
277
278 /* ...the bootmem bitmap... */
279 reserve_bootmem_node(NODE_DATA(node),
280 PFN_PHYS(bootmap_pfn),
281 bootmap_size);
282
283 /* ...any RAMDISK images... */
284 for (res = mem_ramdisk; res; res = res->next) {
285 if (res->addr > PFN_PHYS(max_pfn))
286 break;
287
288 if (res->addr >= PFN_PHYS(first_pfn)) {
289 printk("Node %u: RAMDISK %08lx - %08lx\n",
290 node,
291 (unsigned long)res->addr,
292 (unsigned long)(res->addr + res->size));
293 reserve_bootmem_node(NODE_DATA(node),
294 res->addr, res->size);
295 }
296 }
297
298 /* ...and any other reserved regions. */
299 for (res = mem_reserved; res; res = res->next) {
300 if (res->addr > PFN_PHYS(max_pfn))
301 break;
302
303 if (res->addr >= PFN_PHYS(first_pfn)) {
304 printk("Node %u: Reserved %08lx - %08lx\n",
305 node,
306 (unsigned long)res->addr,
307 (unsigned long)(res->addr + res->size));
308 reserve_bootmem_node(NODE_DATA(node),
309 res->addr, res->size);
310 }
311 }
312
313 node_set_online(node);
314 }
315}
316
317/* 79/*
318 * paging_init() sets up the page tables 80 * paging_init() sets up the page tables
319 * 81 *
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 0f293aa7b0fa..e6ec418093e5 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -41,6 +41,11 @@ config GENERIC_HWEIGHT
41config GENERIC_TIME 41config GENERIC_TIME
42 def_bool y 42 def_bool y
43 43
44config GENERIC_BUG
45 bool
46 depends on BUG
47 default y
48
44config NO_IOMEM 49config NO_IOMEM
45 def_bool y 50 def_bool y
46 51
@@ -514,6 +519,14 @@ config KEXEC
514 current kernel, and to start another kernel. It is like a reboot 519 current kernel, and to start another kernel. It is like a reboot
515 but is independent of hardware/microcode support. 520 but is independent of hardware/microcode support.
516 521
522config ZFCPDUMP
523 tristate "zfcpdump support"
524 select SMP
525 default n
526 help
527 Select this option if you want to build an zfcpdump enabled kernel.
528 Refer to "Documentation/s390/zfcpdump.txt" for more details on this.
529
517endmenu 530endmenu
518 531
519source "net/Kconfig" 532source "net/Kconfig"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index b1e558496469..68441e0e74b6 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -67,8 +67,10 @@ endif
67 67
68ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y) 68ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y)
69cflags-$(CONFIG_CHECK_STACK) += -mstack-size=$(STACK_SIZE) 69cflags-$(CONFIG_CHECK_STACK) += -mstack-size=$(STACK_SIZE)
70ifneq ($(call cc-option-yn,-mstack-size=8192),y)
70cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD) 71cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD)
71endif 72endif
73endif
72 74
73ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y) 75ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
74cflags-$(CONFIG_WARN_STACK) += -mwarn-dynamicstack 76cflags-$(CONFIG_WARN_STACK) += -mwarn-dynamicstack
@@ -103,6 +105,9 @@ install: vmlinux
103image: vmlinux 105image: vmlinux
104 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 106 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
105 107
108zfcpdump:
109 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
110
106archclean: 111archclean:
107 $(Q)$(MAKE) $(clean)=$(boot) 112 $(Q)$(MAKE) $(clean)=$(boot)
108 113
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 0c3cf4b16ae4..ee89b33145d5 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -668,45 +668,7 @@ EXPORT_SYMBOL_GPL(appldata_register_ops);
668EXPORT_SYMBOL_GPL(appldata_unregister_ops); 668EXPORT_SYMBOL_GPL(appldata_unregister_ops);
669EXPORT_SYMBOL_GPL(appldata_diag); 669EXPORT_SYMBOL_GPL(appldata_diag);
670 670
671#ifdef MODULE
672/*
673 * Kernel symbols needed by appldata_mem and appldata_os modules.
674 * However, if this file is compiled as a module (for testing only), these
675 * symbols are not exported. In this case, we define them locally and export
676 * those.
677 */
678void si_swapinfo(struct sysinfo *val)
679{
680 val->freeswap = -1ul;
681 val->totalswap = -1ul;
682}
683
684unsigned long avenrun[3] = {-1 - FIXED_1/200, -1 - FIXED_1/200,
685 -1 - FIXED_1/200};
686int nr_threads = -1;
687
688void get_full_page_state(struct page_state *ps)
689{
690 memset(ps, -1, sizeof(struct page_state));
691}
692
693unsigned long nr_running(void)
694{
695 return -1;
696}
697
698unsigned long nr_iowait(void)
699{
700 return -1;
701}
702
703/*unsigned long nr_context_switches(void)
704{
705 return -1;
706}*/
707#endif /* MODULE */
708EXPORT_SYMBOL_GPL(si_swapinfo); 671EXPORT_SYMBOL_GPL(si_swapinfo);
709EXPORT_SYMBOL_GPL(nr_threads); 672EXPORT_SYMBOL_GPL(nr_threads);
710EXPORT_SYMBOL_GPL(nr_running); 673EXPORT_SYMBOL_GPL(nr_running);
711EXPORT_SYMBOL_GPL(nr_iowait); 674EXPORT_SYMBOL_GPL(nr_iowait);
712//EXPORT_SYMBOL_GPL(nr_context_switches);
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 969639f31977..af4460ec381f 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -25,99 +25,100 @@
25 */ 25 */
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/mm.h>
29#include <linux/crypto.h> 28#include <linux/crypto.h>
30#include <asm/scatterlist.h> 29
31#include <asm/byteorder.h>
32#include "crypt_s390.h" 30#include "crypt_s390.h"
33 31
34#define SHA1_DIGEST_SIZE 20 32#define SHA1_DIGEST_SIZE 20
35#define SHA1_BLOCK_SIZE 64 33#define SHA1_BLOCK_SIZE 64
36 34
37struct crypt_s390_sha1_ctx { 35struct s390_sha1_ctx {
38 u64 count; 36 u64 count; /* message length */
39 u32 state[5]; 37 u32 state[5];
40 u32 buf_len; 38 u8 buf[2 * SHA1_BLOCK_SIZE];
41 u8 buffer[2 * SHA1_BLOCK_SIZE];
42}; 39};
43 40
44static void sha1_init(struct crypto_tfm *tfm) 41static void sha1_init(struct crypto_tfm *tfm)
45{ 42{
46 struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm); 43 struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
47 44
48 ctx->state[0] = 0x67452301; 45 sctx->state[0] = 0x67452301;
49 ctx->state[1] = 0xEFCDAB89; 46 sctx->state[1] = 0xEFCDAB89;
50 ctx->state[2] = 0x98BADCFE; 47 sctx->state[2] = 0x98BADCFE;
51 ctx->state[3] = 0x10325476; 48 sctx->state[3] = 0x10325476;
52 ctx->state[4] = 0xC3D2E1F0; 49 sctx->state[4] = 0xC3D2E1F0;
53 50 sctx->count = 0;
54 ctx->count = 0;
55 ctx->buf_len = 0;
56} 51}
57 52
58static void sha1_update(struct crypto_tfm *tfm, const u8 *data, 53static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
59 unsigned int len) 54 unsigned int len)
60{ 55{
61 struct crypt_s390_sha1_ctx *sctx; 56 struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
62 long imd_len; 57 unsigned int index;
63 58 int ret;
64 sctx = crypto_tfm_ctx(tfm); 59
65 sctx->count += len * 8; /* message bit length */ 60 /* how much is already in the buffer? */
66 61 index = sctx->count & 0x3f;
67 /* anything in buffer yet? -> must be completed */ 62
68 if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) { 63 sctx->count += len;
69 /* complete full block and hash */ 64
70 memcpy(sctx->buffer + sctx->buf_len, data, 65 if (index + len < SHA1_BLOCK_SIZE)
71 SHA1_BLOCK_SIZE - sctx->buf_len); 66 goto store;
72 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, 67
73 SHA1_BLOCK_SIZE); 68 /* process one stored block */
74 data += SHA1_BLOCK_SIZE - sctx->buf_len; 69 if (index) {
75 len -= SHA1_BLOCK_SIZE - sctx->buf_len; 70 memcpy(sctx->buf + index, data, SHA1_BLOCK_SIZE - index);
76 sctx->buf_len = 0; 71 ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf,
72 SHA1_BLOCK_SIZE);
73 BUG_ON(ret != SHA1_BLOCK_SIZE);
74 data += SHA1_BLOCK_SIZE - index;
75 len -= SHA1_BLOCK_SIZE - index;
77 } 76 }
78 77
79 /* rest of data contains full blocks? */ 78 /* process as many blocks as possible */
80 imd_len = len & ~0x3ful; 79 if (len >= SHA1_BLOCK_SIZE) {
81 if (imd_len) { 80 ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, data,
82 crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len); 81 len & ~(SHA1_BLOCK_SIZE - 1));
83 data += imd_len; 82 BUG_ON(ret != (len & ~(SHA1_BLOCK_SIZE - 1)));
84 len -= imd_len; 83 data += ret;
84 len -= ret;
85 } 85 }
86 /* anything left? store in buffer */
87 if (len) {
88 memcpy(sctx->buffer + sctx->buf_len , data, len);
89 sctx->buf_len += len;
90 }
91}
92 86
87store:
88 /* anything left? */
89 if (len)
90 memcpy(sctx->buf + index , data, len);
91}
93 92
94static void pad_message(struct crypt_s390_sha1_ctx* sctx) 93/* Add padding and return the message digest. */
94static void sha1_final(struct crypto_tfm *tfm, u8 *out)
95{ 95{
96 int index; 96 struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
97 u64 bits;
98 unsigned int index, end;
99 int ret;
100
101 /* must perform manual padding */
102 index = sctx->count & 0x3f;
103 end = (index < 56) ? SHA1_BLOCK_SIZE : (2 * SHA1_BLOCK_SIZE);
97 104
98 index = sctx->buf_len;
99 sctx->buf_len = (sctx->buf_len < 56) ?
100 SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE;
101 /* start pad with 1 */ 105 /* start pad with 1 */
102 sctx->buffer[index] = 0x80; 106 sctx->buf[index] = 0x80;
107
103 /* pad with zeros */ 108 /* pad with zeros */
104 index++; 109 index++;
105 memset(sctx->buffer + index, 0x00, sctx->buf_len - index); 110 memset(sctx->buf + index, 0x00, end - index - 8);
106 /* append length */
107 memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count,
108 sizeof sctx->count);
109}
110 111
111/* Add padding and return the message digest. */ 112 /* append message length */
112static void sha1_final(struct crypto_tfm *tfm, u8 *out) 113 bits = sctx->count * 8;
113{ 114 memcpy(sctx->buf + end - 8, &bits, sizeof(bits));
114 struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); 115
116 ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf, end);
117 BUG_ON(ret != end);
115 118
116 /* must perform manual padding */
117 pad_message(sctx);
118 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len);
119 /* copy digest to out */ 119 /* copy digest to out */
120 memcpy(out, sctx->state, SHA1_DIGEST_SIZE); 120 memcpy(out, sctx->state, SHA1_DIGEST_SIZE);
121
121 /* wipe context */ 122 /* wipe context */
122 memset(sctx, 0, sizeof *sctx); 123 memset(sctx, 0, sizeof *sctx);
123} 124}
@@ -128,7 +129,7 @@ static struct crypto_alg alg = {
128 .cra_priority = CRYPT_S390_PRIORITY, 129 .cra_priority = CRYPT_S390_PRIORITY,
129 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 130 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
130 .cra_blocksize = SHA1_BLOCK_SIZE, 131 .cra_blocksize = SHA1_BLOCK_SIZE,
131 .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx), 132 .cra_ctxsize = sizeof(struct s390_sha1_ctx),
132 .cra_module = THIS_MODULE, 133 .cra_module = THIS_MODULE,
133 .cra_list = LIST_HEAD_INIT(alg.cra_list), 134 .cra_list = LIST_HEAD_INIT(alg.cra_list),
134 .cra_u = { .digest = { 135 .cra_u = { .digest = {
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 78436c696d37..2ced3330bce0 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -26,7 +26,7 @@
26#define SHA256_BLOCK_SIZE 64 26#define SHA256_BLOCK_SIZE 64
27 27
28struct s390_sha256_ctx { 28struct s390_sha256_ctx {
29 u64 count; 29 u64 count; /* message length */
30 u32 state[8]; 30 u32 state[8];
31 u8 buf[2 * SHA256_BLOCK_SIZE]; 31 u8 buf[2 * SHA256_BLOCK_SIZE];
32}; 32};
@@ -54,10 +54,9 @@ static void sha256_update(struct crypto_tfm *tfm, const u8 *data,
54 int ret; 54 int ret;
55 55
56 /* how much is already in the buffer? */ 56 /* how much is already in the buffer? */
57 index = sctx->count / 8 & 0x3f; 57 index = sctx->count & 0x3f;
58 58
59 /* update message bit length */ 59 sctx->count += len;
60 sctx->count += len * 8;
61 60
62 if ((index + len) < SHA256_BLOCK_SIZE) 61 if ((index + len) < SHA256_BLOCK_SIZE)
63 goto store; 62 goto store;
@@ -87,12 +86,17 @@ store:
87 memcpy(sctx->buf + index , data, len); 86 memcpy(sctx->buf + index , data, len);
88} 87}
89 88
90static void pad_message(struct s390_sha256_ctx* sctx) 89/* Add padding and return the message digest */
90static void sha256_final(struct crypto_tfm *tfm, u8 *out)
91{ 91{
92 int index, end; 92 struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
93 u64 bits;
94 unsigned int index, end;
95 int ret;
93 96
94 index = sctx->count / 8 & 0x3f; 97 /* must perform manual padding */
95 end = index < 56 ? SHA256_BLOCK_SIZE : 2 * SHA256_BLOCK_SIZE; 98 index = sctx->count & 0x3f;
99 end = (index < 56) ? SHA256_BLOCK_SIZE : (2 * SHA256_BLOCK_SIZE);
96 100
97 /* start pad with 1 */ 101 /* start pad with 1 */
98 sctx->buf[index] = 0x80; 102 sctx->buf[index] = 0x80;
@@ -102,21 +106,11 @@ static void pad_message(struct s390_sha256_ctx* sctx)
102 memset(sctx->buf + index, 0x00, end - index - 8); 106 memset(sctx->buf + index, 0x00, end - index - 8);
103 107
104 /* append message length */ 108 /* append message length */
105 memcpy(sctx->buf + end - 8, &sctx->count, sizeof sctx->count); 109 bits = sctx->count * 8;
106 110 memcpy(sctx->buf + end - 8, &bits, sizeof(bits));
107 sctx->count = end * 8;
108}
109
110/* Add padding and return the message digest */
111static void sha256_final(struct crypto_tfm *tfm, u8 *out)
112{
113 struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm);
114
115 /* must perform manual padding */
116 pad_message(sctx);
117 111
118 crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, 112 ret = crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, end);
119 sctx->count / 8); 113 BUG_ON(ret != end);
120 114
121 /* copy digest to out */ 115 /* copy digest to out */
122 memcpy(out, sctx->state, SHA256_DIGEST_SIZE); 116 memcpy(out, sctx->state, SHA256_DIGEST_SIZE);
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 741d2bbb2b37..0e4da8a7d826 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -12,6 +12,7 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y
12# CONFIG_ARCH_HAS_ILOG2_U64 is not set 12# CONFIG_ARCH_HAS_ILOG2_U64 is not set
13CONFIG_GENERIC_HWEIGHT=y 13CONFIG_GENERIC_HWEIGHT=y
14CONFIG_GENERIC_TIME=y 14CONFIG_GENERIC_TIME=y
15CONFIG_GENERIC_BUG=y
15CONFIG_NO_IOMEM=y 16CONFIG_NO_IOMEM=y
16CONFIG_S390=y 17CONFIG_S390=y
17CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 18CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
@@ -166,6 +167,7 @@ CONFIG_NO_IDLE_HZ=y
166CONFIG_NO_IDLE_HZ_INIT=y 167CONFIG_NO_IDLE_HZ_INIT=y
167CONFIG_S390_HYPFS_FS=y 168CONFIG_S390_HYPFS_FS=y
168CONFIG_KEXEC=y 169CONFIG_KEXEC=y
170# CONFIG_ZFCPDUMP is not set
169 171
170# 172#
171# Networking 173# Networking
@@ -705,6 +707,7 @@ CONFIG_DEBUG_MUTEXES=y
705CONFIG_DEBUG_SPINLOCK_SLEEP=y 707CONFIG_DEBUG_SPINLOCK_SLEEP=y
706# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 708# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
707# CONFIG_DEBUG_KOBJECT is not set 709# CONFIG_DEBUG_KOBJECT is not set
710CONFIG_DEBUG_BUGVERBOSE=y
708# CONFIG_DEBUG_INFO is not set 711# CONFIG_DEBUG_INFO is not set
709# CONFIG_DEBUG_VM is not set 712# CONFIG_DEBUG_VM is not set
710# CONFIG_DEBUG_LIST is not set 713# CONFIG_DEBUG_LIST is not set
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 5492d25d7d69..3195d375bd51 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -6,7 +6,7 @@ EXTRA_AFLAGS := -traditional
6 6
7obj-y := bitmap.o traps.o time.o process.o base.o early.o \ 7obj-y := bitmap.o traps.o time.o process.o base.o early.o \
8 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 8 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
9 semaphore.o s390_ext.o debug.o irq.o ipl.o 9 semaphore.o s390_ext.o debug.o irq.o ipl.o dis.o
10 10
11obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 11obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
12obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 12obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 664c669b1856..5236fdb17fcb 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -495,29 +495,34 @@ sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo)
495 * sys32_execve() executes a new program after the asm stub has set 495 * sys32_execve() executes a new program after the asm stub has set
496 * things up for us. This should basically do what I want it to. 496 * things up for us. This should basically do what I want it to.
497 */ 497 */
498asmlinkage long 498asmlinkage long sys32_execve(void)
499sys32_execve(struct pt_regs regs)
500{ 499{
501 int error; 500 struct pt_regs *regs = task_pt_regs(current);
502 char * filename; 501 char *filename;
502 unsigned long result;
503 int rc;
503 504
504 filename = getname(compat_ptr(regs.orig_gpr2)); 505 filename = getname(compat_ptr(regs->orig_gpr2));
505 error = PTR_ERR(filename); 506 if (IS_ERR(filename)) {
506 if (IS_ERR(filename)) 507 result = PTR_ERR(filename);
507 goto out; 508 goto out;
508 error = compat_do_execve(filename, compat_ptr(regs.gprs[3]),
509 compat_ptr(regs.gprs[4]), &regs);
510 if (error == 0)
511 {
512 task_lock(current);
513 current->ptrace &= ~PT_DTRACE;
514 task_unlock(current);
515 current->thread.fp_regs.fpc=0;
516 asm volatile("sfpc %0,0" : : "d" (0));
517 } 509 }
510 rc = compat_do_execve(filename, compat_ptr(regs->gprs[3]),
511 compat_ptr(regs->gprs[4]), regs);
512 if (rc) {
513 result = rc;
514 goto out_putname;
515 }
516 task_lock(current);
517 current->ptrace &= ~PT_DTRACE;
518 task_unlock(current);
519 current->thread.fp_regs.fpc=0;
520 asm volatile("sfpc %0,0" : : "d" (0));
521 result = regs->gprs[2];
522out_putname:
518 putname(filename); 523 putname(filename);
519out: 524out:
520 return error; 525 return result;
521} 526}
522 527
523 528
@@ -918,19 +923,20 @@ asmlinkage long sys32_write(unsigned int fd, char __user * buf, size_t count)
918 return sys_write(fd, buf, count); 923 return sys_write(fd, buf, count);
919} 924}
920 925
921asmlinkage long sys32_clone(struct pt_regs regs) 926asmlinkage long sys32_clone(void)
922{ 927{
923 unsigned long clone_flags; 928 struct pt_regs *regs = task_pt_regs(current);
924 unsigned long newsp; 929 unsigned long clone_flags;
930 unsigned long newsp;
925 int __user *parent_tidptr, *child_tidptr; 931 int __user *parent_tidptr, *child_tidptr;
926 932
927 clone_flags = regs.gprs[3] & 0xffffffffUL; 933 clone_flags = regs->gprs[3] & 0xffffffffUL;
928 newsp = regs.orig_gpr2 & 0x7fffffffUL; 934 newsp = regs->orig_gpr2 & 0x7fffffffUL;
929 parent_tidptr = compat_ptr(regs.gprs[4]); 935 parent_tidptr = compat_ptr(regs->gprs[4]);
930 child_tidptr = compat_ptr(regs.gprs[5]); 936 child_tidptr = compat_ptr(regs->gprs[5]);
931 if (!newsp) 937 if (!newsp)
932 newsp = regs.gprs[15]; 938 newsp = regs->gprs[15];
933 return do_fork(clone_flags, newsp, &regs, 0, 939 return do_fork(clone_flags, newsp, regs, 0,
934 parent_tidptr, child_tidptr); 940 parent_tidptr, child_tidptr);
935} 941}
936 942
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 887a9881d0d0..80a54a0149ab 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -255,9 +255,9 @@ sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
255} 255}
256 256
257asmlinkage long 257asmlinkage long
258sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss, 258sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss)
259 struct pt_regs *regs)
260{ 259{
260 struct pt_regs *regs = task_pt_regs(current);
261 stack_t kss, koss; 261 stack_t kss, koss;
262 unsigned long ss_sp; 262 unsigned long ss_sp;
263 int ret, err = 0; 263 int ret, err = 0;
@@ -344,8 +344,9 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
344 return 0; 344 return 0;
345} 345}
346 346
347asmlinkage long sys32_sigreturn(struct pt_regs *regs) 347asmlinkage long sys32_sigreturn(void)
348{ 348{
349 struct pt_regs *regs = task_pt_regs(current);
349 sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15]; 350 sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
350 sigset_t set; 351 sigset_t set;
351 352
@@ -370,8 +371,9 @@ badframe:
370 return 0; 371 return 0;
371} 372}
372 373
373asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs) 374asmlinkage long sys32_rt_sigreturn(void)
374{ 375{
376 struct pt_regs *regs = task_pt_regs(current);
375 rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15]; 377 rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
376 sigset_t set; 378 sigset_t set;
377 stack_t st; 379 stack_t st;
@@ -407,8 +409,8 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
407 return regs->gprs[2]; 409 return regs->gprs[2];
408 410
409badframe: 411badframe:
410 force_sig(SIGSEGV, current); 412 force_sig(SIGSEGV, current);
411 return 0; 413 return 0;
412} 414}
413 415
414/* 416/*
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
new file mode 100644
index 000000000000..dabaf98943d0
--- /dev/null
+++ b/arch/s390/kernel/dis.c
@@ -0,0 +1,1278 @@
1/*
2 * arch/s390/kernel/dis.c
3 *
4 * Disassemble s390 instructions.
5 *
6 * Copyright IBM Corp. 2007
7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8 */
9
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/errno.h>
14#include <linux/ptrace.h>
15#include <linux/timer.h>
16#include <linux/mm.h>
17#include <linux/smp.h>
18#include <linux/smp_lock.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/delay.h>
22#include <linux/module.h>
23#include <linux/kallsyms.h>
24#include <linux/reboot.h>
25#include <linux/kprobes.h>
26
27#include <asm/system.h>
28#include <asm/uaccess.h>
29#include <asm/io.h>
30#include <asm/atomic.h>
31#include <asm/mathemu.h>
32#include <asm/cpcmd.h>
33#include <asm/s390_ext.h>
34#include <asm/lowcore.h>
35#include <asm/debug.h>
36#include <asm/kdebug.h>
37
38#ifndef CONFIG_64BIT
39#define ONELONG "%08lx: "
40#else /* CONFIG_64BIT */
41#define ONELONG "%016lx: "
42#endif /* CONFIG_64BIT */
43
44#define OPERAND_GPR 0x1 /* Operand printed as %rx */
45#define OPERAND_FPR 0x2 /* Operand printed as %fx */
46#define OPERAND_AR 0x4 /* Operand printed as %ax */
47#define OPERAND_CR 0x8 /* Operand printed as %cx */
48#define OPERAND_DISP 0x10 /* Operand printed as displacement */
49#define OPERAND_BASE 0x20 /* Operand printed as base register */
50#define OPERAND_INDEX 0x40 /* Operand printed as index register */
51#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */
52#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */
53#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */
54
55enum {
56 UNUSED, /* Indicates the end of the operand list */
57 R_8, /* GPR starting at position 8 */
58 R_12, /* GPR starting at position 12 */
59 R_16, /* GPR starting at position 16 */
60 R_20, /* GPR starting at position 20 */
61 R_24, /* GPR starting at position 24 */
62 R_28, /* GPR starting at position 28 */
63 R_32, /* GPR starting at position 32 */
64 F_8, /* FPR starting at position 8 */
65 F_12, /* FPR starting at position 12 */
66 F_16, /* FPR starting at position 16 */
67 F_20, /* FPR starting at position 16 */
68 F_24, /* FPR starting at position 24 */
69 F_28, /* FPR starting at position 28 */
70 F_32, /* FPR starting at position 32 */
71 A_8, /* Access reg. starting at position 8 */
72 A_12, /* Access reg. starting at position 12 */
73 A_24, /* Access reg. starting at position 24 */
74 A_28, /* Access reg. starting at position 28 */
75 C_8, /* Control reg. starting at position 8 */
76 C_12, /* Control reg. starting at position 12 */
77 B_16, /* Base register starting at position 16 */
78 B_32, /* Base register starting at position 32 */
79 X_12, /* Index register starting at position 12 */
80 D_20, /* Displacement starting at position 20 */
81 D_36, /* Displacement starting at position 36 */
82 D20_20, /* 20 bit displacement starting at 20 */
83 L4_8, /* 4 bit length starting at position 8 */
84 L4_12, /* 4 bit length starting at position 12 */
85 L8_8, /* 8 bit length starting at position 8 */
86 U4_8, /* 4 bit unsigned value starting at 8 */
87 U4_12, /* 4 bit unsigned value starting at 12 */
88 U4_16, /* 4 bit unsigned value starting at 16 */
89 U4_20, /* 4 bit unsigned value starting at 20 */
90 U8_8, /* 8 bit unsigned value starting at 8 */
91 U8_16, /* 8 bit unsigned value starting at 16 */
92 I16_16, /* 16 bit signed value starting at 16 */
93 U16_16, /* 16 bit unsigned value starting at 16 */
94 J16_16, /* PC relative jump offset at 16 */
95 J32_16, /* PC relative long offset at 16 */
96 I32_16, /* 32 bit signed value starting at 16 */
97 U32_16, /* 32 bit unsigned value starting at 16 */
98 M_16, /* 4 bit optional mask starting at 16 */
99 RO_28, /* optional GPR starting at position 28 */
100};
101
102/*
103 * Enumeration of the different instruction formats.
104 * For details consult the principles of operation.
105 */
106enum {
107 INSTR_INVALID,
108 INSTR_E, INSTR_RIE_RRP, INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU,
109 INSTR_RIL_UP, INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP,
110 INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0,
111 INSTR_RRE_FF, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF, INSTR_RRE_RR,
112 INSTR_RRE_RR_OPT, INSTR_RRF_F0FF, INSTR_RRF_FUFF, INSTR_RRF_M0RR,
113 INSTR_RRF_R0RR, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF,
114 INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR,
115 INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, INSTR_RSI_RRP,
116 INSTR_RSL_R0RD, INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD,
117 INSTR_RSY_RURD, INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD,
118 INSTR_RS_RRRD, INSTR_RS_RURD, INSTR_RXE_FRRD, INSTR_RXE_RRRD,
119 INSTR_RXF_FRRDF, INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RX_FRRD,
120 INSTR_RX_RRRD, INSTR_RX_URRD, INSTR_SIY_URD, INSTR_SI_URD,
121 INSTR_SSE_RDRD, INSTR_SSF_RRDRD, INSTR_SS_L0RDRD, INSTR_SS_LIRDRD,
122 INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3,
123 INSTR_S_00, INSTR_S_RD,
124};
125
126struct operand {
127 int bits; /* The number of bits in the operand. */
128 int shift; /* The number of bits to shift. */
129 int flags; /* One bit syntax flags. */
130};
131
132struct insn {
133 const char name[5];
134 unsigned char opfrag;
135 unsigned char format;
136};
137
138static const struct operand operands[] =
139{
140 [UNUSED] = { 0, 0, 0 },
141 [R_8] = { 4, 8, OPERAND_GPR },
142 [R_12] = { 4, 12, OPERAND_GPR },
143 [R_16] = { 4, 16, OPERAND_GPR },
144 [R_20] = { 4, 20, OPERAND_GPR },
145 [R_24] = { 4, 24, OPERAND_GPR },
146 [R_28] = { 4, 28, OPERAND_GPR },
147 [R_32] = { 4, 32, OPERAND_GPR },
148 [F_8] = { 4, 8, OPERAND_FPR },
149 [F_12] = { 4, 12, OPERAND_FPR },
150 [F_16] = { 4, 16, OPERAND_FPR },
151 [F_20] = { 4, 16, OPERAND_FPR },
152 [F_24] = { 4, 24, OPERAND_FPR },
153 [F_28] = { 4, 28, OPERAND_FPR },
154 [F_32] = { 4, 32, OPERAND_FPR },
155 [A_8] = { 4, 8, OPERAND_AR },
156 [A_12] = { 4, 12, OPERAND_AR },
157 [A_24] = { 4, 24, OPERAND_AR },
158 [A_28] = { 4, 28, OPERAND_AR },
159 [C_8] = { 4, 8, OPERAND_CR },
160 [C_12] = { 4, 12, OPERAND_CR },
161 [B_16] = { 4, 16, OPERAND_BASE | OPERAND_GPR },
162 [B_32] = { 4, 32, OPERAND_BASE | OPERAND_GPR },
163 [X_12] = { 4, 12, OPERAND_INDEX | OPERAND_GPR },
164 [D_20] = { 12, 20, OPERAND_DISP },
165 [D_36] = { 12, 36, OPERAND_DISP },
166 [D20_20] = { 20, 20, OPERAND_DISP | OPERAND_SIGNED },
167 [L4_8] = { 4, 8, OPERAND_LENGTH },
168 [L4_12] = { 4, 12, OPERAND_LENGTH },
169 [L8_8] = { 8, 8, OPERAND_LENGTH },
170 [U4_8] = { 4, 8, 0 },
171 [U4_12] = { 4, 12, 0 },
172 [U4_16] = { 4, 16, 0 },
173 [U4_20] = { 4, 20, 0 },
174 [U8_8] = { 8, 8, 0 },
175 [U8_16] = { 8, 16, 0 },
176 [I16_16] = { 16, 16, OPERAND_SIGNED },
177 [U16_16] = { 16, 16, 0 },
178 [J16_16] = { 16, 16, OPERAND_PCREL },
179 [J32_16] = { 32, 16, OPERAND_PCREL },
180 [I32_16] = { 32, 16, OPERAND_SIGNED },
181 [U32_16] = { 32, 16, 0 },
182 [M_16] = { 4, 16, 0 },
183 [RO_28] = { 4, 28, OPERAND_GPR }
184};
185
186static const unsigned char formats[][7] = {
187 [INSTR_E] = { 0xff, 0,0,0,0,0,0 }, /* e.g. pr */
188 [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxhg */
189 [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, /* e.g. brasl */
190 [INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 }, /* e.g. brcl */
191 [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, /* e.g. afi */
192 [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, /* e.g. alfi */
193 [INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 }, /* e.g. ahi */
194 [INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 }, /* e.g. brct */
195 [INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 }, /* e.g. tml */
196 [INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 }, /* e.g. brc */
197 [INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. palb */
198 [INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 }, /* e.g. tb */
199 [INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 }, /* e.g. cpya */
200 [INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 }, /* e.g. sar */
201 [INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 }, /* e.g. sqer */
202 [INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 }, /* e.g. debr */
203 [INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 }, /* e.g. ipm */
204 [INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 }, /* e.g. ear */
205 [INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 }, /* e.g. cefbr */
206 [INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 }, /* e.g. lura */
207 [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 }, /* efpc, sfpc */
208 [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */
209 [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */
210 [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */
211 [INSTR_RRF_R0RR] = { 0xff, R_24,R_28,R_16,0,0,0 }, /* e.g. idte */
212 [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */
213 [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */
214 [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */
215 [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 }, /* e.g. adr */
216 [INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 }, /* e.g. spm */
217 [INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 }, /* e.g. lr */
218 [INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 }, /* e.g. svc */
219 [INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 }, /* e.g. bcr */
220 [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. lmh */
221 [INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lmh */
222 [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icmh */
223 [INSTR_RSL_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. tp */
224 [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxh */
225 [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },/* e.g. stmy */
226 [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
227 /* e.g. icmh */
228 [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },/* e.g. lamy */
229 [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },/* e.g. lamy */
230 [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, /* e.g. lam */
231 [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lctl */
232 [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. sll */
233 [INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. cs */
234 [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icm */
235 [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. axbr */
236 [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. lg */
237 [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 },
238 /* e.g. madb */
239 [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },/* e.g. ly */
240 [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },/* e.g. ley */
241 [INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. ae */
242 [INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. l */
243 [INSTR_RX_URRD] = { 0x00, U4_8,D_20,X_12,B_16,0,0 }, /* e.g. bc */
244 [INSTR_SI_URD] = { 0x00, D_20,B_16,U8_8,0,0,0 }, /* e.g. cli */
245 [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 }, /* e.g. tmy */
246 [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, /* e.g. mvsdk */
247 [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 },
248 /* e.g. mvc */
249 [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 },
250 /* e.g. srp */
251 [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 },
252 /* e.g. pack */
253 [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 },
254 /* e.g. mvck */
255 [INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 },
256 /* e.g. plo */
257 [INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 },
258 /* e.g. lmd */
259 [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. hsch */
260 [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, /* e.g. lpsw */
261 [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 },
262 /* e.g. mvcos */
263};
264
265static struct insn opcode[] = {
266#ifdef CONFIG_64BIT
267 { "lmd", 0xef, INSTR_SS_RRRDRD3 },
268#endif
269 { "spm", 0x04, INSTR_RR_R0 },
270 { "balr", 0x05, INSTR_RR_RR },
271 { "bctr", 0x06, INSTR_RR_RR },
272 { "bcr", 0x07, INSTR_RR_UR },
273 { "svc", 0x0a, INSTR_RR_U0 },
274 { "bsm", 0x0b, INSTR_RR_RR },
275 { "bassm", 0x0c, INSTR_RR_RR },
276 { "basr", 0x0d, INSTR_RR_RR },
277 { "mvcl", 0x0e, INSTR_RR_RR },
278 { "clcl", 0x0f, INSTR_RR_RR },
279 { "lpr", 0x10, INSTR_RR_RR },
280 { "lnr", 0x11, INSTR_RR_RR },
281 { "ltr", 0x12, INSTR_RR_RR },
282 { "lcr", 0x13, INSTR_RR_RR },
283 { "nr", 0x14, INSTR_RR_RR },
284 { "clr", 0x15, INSTR_RR_RR },
285 { "or", 0x16, INSTR_RR_RR },
286 { "xr", 0x17, INSTR_RR_RR },
287 { "lr", 0x18, INSTR_RR_RR },
288 { "cr", 0x19, INSTR_RR_RR },
289 { "ar", 0x1a, INSTR_RR_RR },
290 { "sr", 0x1b, INSTR_RR_RR },
291 { "mr", 0x1c, INSTR_RR_RR },
292 { "dr", 0x1d, INSTR_RR_RR },
293 { "alr", 0x1e, INSTR_RR_RR },
294 { "slr", 0x1f, INSTR_RR_RR },
295 { "lpdr", 0x20, INSTR_RR_FF },
296 { "lndr", 0x21, INSTR_RR_FF },
297 { "ltdr", 0x22, INSTR_RR_FF },
298 { "lcdr", 0x23, INSTR_RR_FF },
299 { "hdr", 0x24, INSTR_RR_FF },
300 { "ldxr", 0x25, INSTR_RR_FF },
301 { "lrdr", 0x25, INSTR_RR_FF },
302 { "mxr", 0x26, INSTR_RR_FF },
303 { "mxdr", 0x27, INSTR_RR_FF },
304 { "ldr", 0x28, INSTR_RR_FF },
305 { "cdr", 0x29, INSTR_RR_FF },
306 { "adr", 0x2a, INSTR_RR_FF },
307 { "sdr", 0x2b, INSTR_RR_FF },
308 { "mdr", 0x2c, INSTR_RR_FF },
309 { "ddr", 0x2d, INSTR_RR_FF },
310 { "awr", 0x2e, INSTR_RR_FF },
311 { "swr", 0x2f, INSTR_RR_FF },
312 { "lper", 0x30, INSTR_RR_FF },
313 { "lner", 0x31, INSTR_RR_FF },
314 { "lter", 0x32, INSTR_RR_FF },
315 { "lcer", 0x33, INSTR_RR_FF },
316 { "her", 0x34, INSTR_RR_FF },
317 { "ledr", 0x35, INSTR_RR_FF },
318 { "lrer", 0x35, INSTR_RR_FF },
319 { "axr", 0x36, INSTR_RR_FF },
320 { "sxr", 0x37, INSTR_RR_FF },
321 { "ler", 0x38, INSTR_RR_FF },
322 { "cer", 0x39, INSTR_RR_FF },
323 { "aer", 0x3a, INSTR_RR_FF },
324 { "ser", 0x3b, INSTR_RR_FF },
325 { "mder", 0x3c, INSTR_RR_FF },
326 { "mer", 0x3c, INSTR_RR_FF },
327 { "der", 0x3d, INSTR_RR_FF },
328 { "aur", 0x3e, INSTR_RR_FF },
329 { "sur", 0x3f, INSTR_RR_FF },
330 { "sth", 0x40, INSTR_RX_RRRD },
331 { "la", 0x41, INSTR_RX_RRRD },
332 { "stc", 0x42, INSTR_RX_RRRD },
333 { "ic", 0x43, INSTR_RX_RRRD },
334 { "ex", 0x44, INSTR_RX_RRRD },
335 { "bal", 0x45, INSTR_RX_RRRD },
336 { "bct", 0x46, INSTR_RX_RRRD },
337 { "bc", 0x47, INSTR_RX_URRD },
338 { "lh", 0x48, INSTR_RX_RRRD },
339 { "ch", 0x49, INSTR_RX_RRRD },
340 { "ah", 0x4a, INSTR_RX_RRRD },
341 { "sh", 0x4b, INSTR_RX_RRRD },
342 { "mh", 0x4c, INSTR_RX_RRRD },
343 { "bas", 0x4d, INSTR_RX_RRRD },
344 { "cvd", 0x4e, INSTR_RX_RRRD },
345 { "cvb", 0x4f, INSTR_RX_RRRD },
346 { "st", 0x50, INSTR_RX_RRRD },
347 { "lae", 0x51, INSTR_RX_RRRD },
348 { "n", 0x54, INSTR_RX_RRRD },
349 { "cl", 0x55, INSTR_RX_RRRD },
350 { "o", 0x56, INSTR_RX_RRRD },
351 { "x", 0x57, INSTR_RX_RRRD },
352 { "l", 0x58, INSTR_RX_RRRD },
353 { "c", 0x59, INSTR_RX_RRRD },
354 { "a", 0x5a, INSTR_RX_RRRD },
355 { "s", 0x5b, INSTR_RX_RRRD },
356 { "m", 0x5c, INSTR_RX_RRRD },
357 { "d", 0x5d, INSTR_RX_RRRD },
358 { "al", 0x5e, INSTR_RX_RRRD },
359 { "sl", 0x5f, INSTR_RX_RRRD },
360 { "std", 0x60, INSTR_RX_FRRD },
361 { "mxd", 0x67, INSTR_RX_FRRD },
362 { "ld", 0x68, INSTR_RX_FRRD },
363 { "cd", 0x69, INSTR_RX_FRRD },
364 { "ad", 0x6a, INSTR_RX_FRRD },
365 { "sd", 0x6b, INSTR_RX_FRRD },
366 { "md", 0x6c, INSTR_RX_FRRD },
367 { "dd", 0x6d, INSTR_RX_FRRD },
368 { "aw", 0x6e, INSTR_RX_FRRD },
369 { "sw", 0x6f, INSTR_RX_FRRD },
370 { "ste", 0x70, INSTR_RX_FRRD },
371 { "ms", 0x71, INSTR_RX_RRRD },
372 { "le", 0x78, INSTR_RX_FRRD },
373 { "ce", 0x79, INSTR_RX_FRRD },
374 { "ae", 0x7a, INSTR_RX_FRRD },
375 { "se", 0x7b, INSTR_RX_FRRD },
376 { "mde", 0x7c, INSTR_RX_FRRD },
377 { "me", 0x7c, INSTR_RX_FRRD },
378 { "de", 0x7d, INSTR_RX_FRRD },
379 { "au", 0x7e, INSTR_RX_FRRD },
380 { "su", 0x7f, INSTR_RX_FRRD },
381 { "ssm", 0x80, INSTR_S_RD },
382 { "lpsw", 0x82, INSTR_S_RD },
383 { "diag", 0x83, INSTR_RS_RRRD },
384 { "brxh", 0x84, INSTR_RSI_RRP },
385 { "brxle", 0x85, INSTR_RSI_RRP },
386 { "bxh", 0x86, INSTR_RS_RRRD },
387 { "bxle", 0x87, INSTR_RS_RRRD },
388 { "srl", 0x88, INSTR_RS_R0RD },
389 { "sll", 0x89, INSTR_RS_R0RD },
390 { "sra", 0x8a, INSTR_RS_R0RD },
391 { "sla", 0x8b, INSTR_RS_R0RD },
392 { "srdl", 0x8c, INSTR_RS_R0RD },
393 { "sldl", 0x8d, INSTR_RS_R0RD },
394 { "srda", 0x8e, INSTR_RS_R0RD },
395 { "slda", 0x8f, INSTR_RS_R0RD },
396 { "stm", 0x90, INSTR_RS_RRRD },
397 { "tm", 0x91, INSTR_SI_URD },
398 { "mvi", 0x92, INSTR_SI_URD },
399 { "ts", 0x93, INSTR_S_RD },
400 { "ni", 0x94, INSTR_SI_URD },
401 { "cli", 0x95, INSTR_SI_URD },
402 { "oi", 0x96, INSTR_SI_URD },
403 { "xi", 0x97, INSTR_SI_URD },
404 { "lm", 0x98, INSTR_RS_RRRD },
405 { "trace", 0x99, INSTR_RS_RRRD },
406 { "lam", 0x9a, INSTR_RS_AARD },
407 { "stam", 0x9b, INSTR_RS_AARD },
408 { "mvcle", 0xa8, INSTR_RS_RRRD },
409 { "clcle", 0xa9, INSTR_RS_RRRD },
410 { "stnsm", 0xac, INSTR_SI_URD },
411 { "stosm", 0xad, INSTR_SI_URD },
412 { "sigp", 0xae, INSTR_RS_RRRD },
413 { "mc", 0xaf, INSTR_SI_URD },
414 { "lra", 0xb1, INSTR_RX_RRRD },
415 { "stctl", 0xb6, INSTR_RS_CCRD },
416 { "lctl", 0xb7, INSTR_RS_CCRD },
417 { "cs", 0xba, INSTR_RS_RRRD },
418 { "cds", 0xbb, INSTR_RS_RRRD },
419 { "clm", 0xbd, INSTR_RS_RURD },
420 { "stcm", 0xbe, INSTR_RS_RURD },
421 { "icm", 0xbf, INSTR_RS_RURD },
422 { "mvn", 0xd1, INSTR_SS_L0RDRD },
423 { "mvc", 0xd2, INSTR_SS_L0RDRD },
424 { "mvz", 0xd3, INSTR_SS_L0RDRD },
425 { "nc", 0xd4, INSTR_SS_L0RDRD },
426 { "clc", 0xd5, INSTR_SS_L0RDRD },
427 { "oc", 0xd6, INSTR_SS_L0RDRD },
428 { "xc", 0xd7, INSTR_SS_L0RDRD },
429 { "mvck", 0xd9, INSTR_SS_RRRDRD },
430 { "mvcp", 0xda, INSTR_SS_RRRDRD },
431 { "mvcs", 0xdb, INSTR_SS_RRRDRD },
432 { "tr", 0xdc, INSTR_SS_L0RDRD },
433 { "trt", 0xdd, INSTR_SS_L0RDRD },
434 { "ed", 0xde, INSTR_SS_L0RDRD },
435 { "edmk", 0xdf, INSTR_SS_L0RDRD },
436 { "pku", 0xe1, INSTR_SS_L0RDRD },
437 { "unpku", 0xe2, INSTR_SS_L0RDRD },
438 { "mvcin", 0xe8, INSTR_SS_L0RDRD },
439 { "pka", 0xe9, INSTR_SS_L0RDRD },
440 { "unpka", 0xea, INSTR_SS_L0RDRD },
441 { "plo", 0xee, INSTR_SS_RRRDRD2 },
442 { "srp", 0xf0, INSTR_SS_LIRDRD },
443 { "mvo", 0xf1, INSTR_SS_LLRDRD },
444 { "pack", 0xf2, INSTR_SS_LLRDRD },
445 { "unpk", 0xf3, INSTR_SS_LLRDRD },
446 { "zap", 0xf8, INSTR_SS_LLRDRD },
447 { "cp", 0xf9, INSTR_SS_LLRDRD },
448 { "ap", 0xfa, INSTR_SS_LLRDRD },
449 { "sp", 0xfb, INSTR_SS_LLRDRD },
450 { "mp", 0xfc, INSTR_SS_LLRDRD },
451 { "dp", 0xfd, INSTR_SS_LLRDRD },
452 { "", 0, INSTR_INVALID }
453};
454
455static struct insn opcode_01[] = {
456#ifdef CONFIG_64BIT
457 { "sam64", 0x0e, INSTR_E },
458#endif
459 { "pr", 0x01, INSTR_E },
460 { "upt", 0x02, INSTR_E },
461 { "sckpf", 0x07, INSTR_E },
462 { "tam", 0x0b, INSTR_E },
463 { "sam24", 0x0c, INSTR_E },
464 { "sam31", 0x0d, INSTR_E },
465 { "trap2", 0xff, INSTR_E },
466 { "", 0, INSTR_INVALID }
467};
468
469static struct insn opcode_a5[] = {
470#ifdef CONFIG_64BIT
471 { "iihh", 0x00, INSTR_RI_RU },
472 { "iihl", 0x01, INSTR_RI_RU },
473 { "iilh", 0x02, INSTR_RI_RU },
474 { "iill", 0x03, INSTR_RI_RU },
475 { "nihh", 0x04, INSTR_RI_RU },
476 { "nihl", 0x05, INSTR_RI_RU },
477 { "nilh", 0x06, INSTR_RI_RU },
478 { "nill", 0x07, INSTR_RI_RU },
479 { "oihh", 0x08, INSTR_RI_RU },
480 { "oihl", 0x09, INSTR_RI_RU },
481 { "oilh", 0x0a, INSTR_RI_RU },
482 { "oill", 0x0b, INSTR_RI_RU },
483 { "llihh", 0x0c, INSTR_RI_RU },
484 { "llihl", 0x0d, INSTR_RI_RU },
485 { "llilh", 0x0e, INSTR_RI_RU },
486 { "llill", 0x0f, INSTR_RI_RU },
487#endif
488 { "", 0, INSTR_INVALID }
489};
490
491static struct insn opcode_a7[] = {
492#ifdef CONFIG_64BIT
493 { "tmhh", 0x02, INSTR_RI_RU },
494 { "tmhl", 0x03, INSTR_RI_RU },
495 { "brctg", 0x07, INSTR_RI_RP },
496 { "lghi", 0x09, INSTR_RI_RI },
497 { "aghi", 0x0b, INSTR_RI_RI },
498 { "mghi", 0x0d, INSTR_RI_RI },
499 { "cghi", 0x0f, INSTR_RI_RI },
500#endif
501 { "tmlh", 0x00, INSTR_RI_RU },
502 { "tmll", 0x01, INSTR_RI_RU },
503 { "brc", 0x04, INSTR_RI_UP },
504 { "bras", 0x05, INSTR_RI_RP },
505 { "brct", 0x06, INSTR_RI_RP },
506 { "lhi", 0x08, INSTR_RI_RI },
507 { "ahi", 0x0a, INSTR_RI_RI },
508 { "mhi", 0x0c, INSTR_RI_RI },
509 { "chi", 0x0e, INSTR_RI_RI },
510 { "", 0, INSTR_INVALID }
511};
512
513static struct insn opcode_b2[] = {
514#ifdef CONFIG_64BIT
515 { "sske", 0x2b, INSTR_RRF_M0RR },
516 { "stckf", 0x7c, INSTR_S_RD },
517 { "cu21", 0xa6, INSTR_RRF_M0RR },
518 { "cuutf", 0xa6, INSTR_RRF_M0RR },
519 { "cu12", 0xa7, INSTR_RRF_M0RR },
520 { "cutfu", 0xa7, INSTR_RRF_M0RR },
521 { "stfle", 0xb0, INSTR_S_RD },
522 { "lpswe", 0xb2, INSTR_S_RD },
523#endif
524 { "stidp", 0x02, INSTR_S_RD },
525 { "sck", 0x04, INSTR_S_RD },
526 { "stck", 0x05, INSTR_S_RD },
527 { "sckc", 0x06, INSTR_S_RD },
528 { "stckc", 0x07, INSTR_S_RD },
529 { "spt", 0x08, INSTR_S_RD },
530 { "stpt", 0x09, INSTR_S_RD },
531 { "spka", 0x0a, INSTR_S_RD },
532 { "ipk", 0x0b, INSTR_S_00 },
533 { "ptlb", 0x0d, INSTR_S_00 },
534 { "spx", 0x10, INSTR_S_RD },
535 { "stpx", 0x11, INSTR_S_RD },
536 { "stap", 0x12, INSTR_S_RD },
537 { "sie", 0x14, INSTR_S_RD },
538 { "pc", 0x18, INSTR_S_RD },
539 { "sac", 0x19, INSTR_S_RD },
540 { "cfc", 0x1a, INSTR_S_RD },
541 { "ipte", 0x21, INSTR_RRE_RR },
542 { "ipm", 0x22, INSTR_RRE_R0 },
543 { "ivsk", 0x23, INSTR_RRE_RR },
544 { "iac", 0x24, INSTR_RRE_R0 },
545 { "ssar", 0x25, INSTR_RRE_R0 },
546 { "epar", 0x26, INSTR_RRE_R0 },
547 { "esar", 0x27, INSTR_RRE_R0 },
548 { "pt", 0x28, INSTR_RRE_RR },
549 { "iske", 0x29, INSTR_RRE_RR },
550 { "rrbe", 0x2a, INSTR_RRE_RR },
551 { "sske", 0x2b, INSTR_RRE_RR },
552 { "tb", 0x2c, INSTR_RRE_0R },
553 { "dxr", 0x2d, INSTR_RRE_F0 },
554 { "pgin", 0x2e, INSTR_RRE_RR },
555 { "pgout", 0x2f, INSTR_RRE_RR },
556 { "csch", 0x30, INSTR_S_00 },
557 { "hsch", 0x31, INSTR_S_00 },
558 { "msch", 0x32, INSTR_S_RD },
559 { "ssch", 0x33, INSTR_S_RD },
560 { "stsch", 0x34, INSTR_S_RD },
561 { "tsch", 0x35, INSTR_S_RD },
562 { "tpi", 0x36, INSTR_S_RD },
563 { "sal", 0x37, INSTR_S_00 },
564 { "rsch", 0x38, INSTR_S_00 },
565 { "stcrw", 0x39, INSTR_S_RD },
566 { "stcps", 0x3a, INSTR_S_RD },
567 { "rchp", 0x3b, INSTR_S_00 },
568 { "schm", 0x3c, INSTR_S_00 },
569 { "bakr", 0x40, INSTR_RRE_RR },
570 { "cksm", 0x41, INSTR_RRE_RR },
571 { "sqdr", 0x44, INSTR_RRE_F0 },
572 { "sqer", 0x45, INSTR_RRE_F0 },
573 { "stura", 0x46, INSTR_RRE_RR },
574 { "msta", 0x47, INSTR_RRE_R0 },
575 { "palb", 0x48, INSTR_RRE_00 },
576 { "ereg", 0x49, INSTR_RRE_RR },
577 { "esta", 0x4a, INSTR_RRE_RR },
578 { "lura", 0x4b, INSTR_RRE_RR },
579 { "tar", 0x4c, INSTR_RRE_AR },
580 { "cpya", INSTR_RRE_AA },
581 { "sar", 0x4e, INSTR_RRE_AR },
582 { "ear", 0x4f, INSTR_RRE_RA },
583 { "csp", 0x50, INSTR_RRE_RR },
584 { "msr", 0x52, INSTR_RRE_RR },
585 { "mvpg", 0x54, INSTR_RRE_RR },
586 { "mvst", 0x55, INSTR_RRE_RR },
587 { "cuse", 0x57, INSTR_RRE_RR },
588 { "bsg", 0x58, INSTR_RRE_RR },
589 { "bsa", 0x5a, INSTR_RRE_RR },
590 { "clst", 0x5d, INSTR_RRE_RR },
591 { "srst", 0x5e, INSTR_RRE_RR },
592 { "cmpsc", 0x63, INSTR_RRE_RR },
593 { "cmpsc", 0x63, INSTR_RRE_RR },
594 { "siga", 0x74, INSTR_S_RD },
595 { "xsch", 0x76, INSTR_S_00 },
596 { "rp", 0x77, INSTR_S_RD },
597 { "stcke", 0x78, INSTR_S_RD },
598 { "sacf", 0x79, INSTR_S_RD },
599 { "stsi", 0x7d, INSTR_S_RD },
600 { "srnm", 0x99, INSTR_S_RD },
601 { "stfpc", 0x9c, INSTR_S_RD },
602 { "lfpc", 0x9d, INSTR_S_RD },
603 { "tre", 0xa5, INSTR_RRE_RR },
604 { "cuutf", 0xa6, INSTR_RRE_RR },
605 { "cutfu", 0xa7, INSTR_RRE_RR },
606 { "stfl", 0xb1, INSTR_S_RD },
607 { "trap4", 0xff, INSTR_S_RD },
608 { "", 0, INSTR_INVALID }
609};
610
611static struct insn opcode_b3[] = {
612#ifdef CONFIG_64BIT
613 { "maylr", 0x38, INSTR_RRF_F0FF },
614 { "mylr", 0x39, INSTR_RRF_F0FF },
615 { "mayr", 0x3a, INSTR_RRF_F0FF },
616 { "myr", 0x3b, INSTR_RRF_F0FF },
617 { "mayhr", 0x3c, INSTR_RRF_F0FF },
618 { "myhr", 0x3d, INSTR_RRF_F0FF },
619 { "cegbr", 0xa4, INSTR_RRE_RR },
620 { "cdgbr", 0xa5, INSTR_RRE_RR },
621 { "cxgbr", 0xa6, INSTR_RRE_RR },
622 { "cgebr", 0xa8, INSTR_RRF_U0RF },
623 { "cgdbr", 0xa9, INSTR_RRF_U0RF },
624 { "cgxbr", 0xaa, INSTR_RRF_U0RF },
625 { "cfer", 0xb8, INSTR_RRF_U0RF },
626 { "cfdr", 0xb9, INSTR_RRF_U0RF },
627 { "cfxr", 0xba, INSTR_RRF_U0RF },
628 { "cegr", 0xc4, INSTR_RRE_RR },
629 { "cdgr", 0xc5, INSTR_RRE_RR },
630 { "cxgr", 0xc6, INSTR_RRE_RR },
631 { "cger", 0xc8, INSTR_RRF_U0RF },
632 { "cgdr", 0xc9, INSTR_RRF_U0RF },
633 { "cgxr", 0xca, INSTR_RRF_U0RF },
634#endif
635 { "lpebr", 0x00, INSTR_RRE_FF },
636 { "lnebr", 0x01, INSTR_RRE_FF },
637 { "ltebr", 0x02, INSTR_RRE_FF },
638 { "lcebr", 0x03, INSTR_RRE_FF },
639 { "ldebr", 0x04, INSTR_RRE_FF },
640 { "lxdbr", 0x05, INSTR_RRE_FF },
641 { "lxebr", 0x06, INSTR_RRE_FF },
642 { "mxdbr", 0x07, INSTR_RRE_FF },
643 { "kebr", 0x08, INSTR_RRE_FF },
644 { "cebr", 0x09, INSTR_RRE_FF },
645 { "aebr", 0x0a, INSTR_RRE_FF },
646 { "sebr", 0x0b, INSTR_RRE_FF },
647 { "mdebr", 0x0c, INSTR_RRE_FF },
648 { "debr", 0x0d, INSTR_RRE_FF },
649 { "maebr", 0x0e, INSTR_RRF_F0FF },
650 { "msebr", 0x0f, INSTR_RRF_F0FF },
651 { "lpdbr", 0x10, INSTR_RRE_FF },
652 { "lndbr", 0x11, INSTR_RRE_FF },
653 { "ltdbr", 0x12, INSTR_RRE_FF },
654 { "lcdbr", 0x13, INSTR_RRE_FF },
655 { "sqebr", 0x14, INSTR_RRE_FF },
656 { "sqdbr", 0x15, INSTR_RRE_FF },
657 { "sqxbr", 0x16, INSTR_RRE_FF },
658 { "meebr", 0x17, INSTR_RRE_FF },
659 { "kdbr", 0x18, INSTR_RRE_FF },
660 { "cdbr", 0x19, INSTR_RRE_FF },
661 { "adbr", 0x1a, INSTR_RRE_FF },
662 { "sdbr", 0x1b, INSTR_RRE_FF },
663 { "mdbr", 0x1c, INSTR_RRE_FF },
664 { "ddbr", 0x1d, INSTR_RRE_FF },
665 { "madbr", 0x1e, INSTR_RRF_F0FF },
666 { "msdbr", 0x1f, INSTR_RRF_F0FF },
667 { "lder", 0x24, INSTR_RRE_FF },
668 { "lxdr", 0x25, INSTR_RRE_FF },
669 { "lxer", 0x26, INSTR_RRE_FF },
670 { "maer", 0x2e, INSTR_RRF_F0FF },
671 { "mser", 0x2f, INSTR_RRF_F0FF },
672 { "sqxr", 0x36, INSTR_RRE_FF },
673 { "meer", 0x37, INSTR_RRE_FF },
674 { "madr", 0x3e, INSTR_RRF_F0FF },
675 { "msdr", 0x3f, INSTR_RRF_F0FF },
676 { "lpxbr", 0x40, INSTR_RRE_FF },
677 { "lnxbr", 0x41, INSTR_RRE_FF },
678 { "ltxbr", 0x42, INSTR_RRE_FF },
679 { "lcxbr", 0x43, INSTR_RRE_FF },
680 { "ledbr", 0x44, INSTR_RRE_FF },
681 { "ldxbr", 0x45, INSTR_RRE_FF },
682 { "lexbr", 0x46, INSTR_RRE_FF },
683 { "fixbr", 0x47, INSTR_RRF_U0FF },
684 { "kxbr", 0x48, INSTR_RRE_FF },
685 { "cxbr", 0x49, INSTR_RRE_FF },
686 { "axbr", 0x4a, INSTR_RRE_FF },
687 { "sxbr", 0x4b, INSTR_RRE_FF },
688 { "mxbr", 0x4c, INSTR_RRE_FF },
689 { "dxbr", 0x4d, INSTR_RRE_FF },
690 { "tbedr", 0x50, INSTR_RRF_U0FF },
691 { "tbdr", 0x51, INSTR_RRF_U0FF },
692 { "diebr", 0x53, INSTR_RRF_FUFF },
693 { "fiebr", 0x57, INSTR_RRF_U0FF },
694 { "thder", 0x58, INSTR_RRE_RR },
695 { "thdr", 0x59, INSTR_RRE_RR },
696 { "didbr", 0x5b, INSTR_RRF_FUFF },
697 { "fidbr", 0x5f, INSTR_RRF_U0FF },
698 { "lpxr", 0x60, INSTR_RRE_FF },
699 { "lnxr", 0x61, INSTR_RRE_FF },
700 { "ltxr", 0x62, INSTR_RRE_FF },
701 { "lcxr", 0x63, INSTR_RRE_FF },
702 { "lxr", 0x65, INSTR_RRE_RR },
703 { "lexr", 0x66, INSTR_RRE_FF },
704 { "fixr", 0x67, INSTR_RRF_U0FF },
705 { "cxr", 0x69, INSTR_RRE_FF },
706 { "lzer", 0x74, INSTR_RRE_R0 },
707 { "lzdr", 0x75, INSTR_RRE_R0 },
708 { "lzxr", 0x76, INSTR_RRE_R0 },
709 { "fier", 0x77, INSTR_RRF_U0FF },
710 { "fidr", 0x7f, INSTR_RRF_U0FF },
711 { "sfpc", 0x84, INSTR_RRE_RR_OPT },
712 { "efpc", 0x8c, INSTR_RRE_RR_OPT },
713 { "cefbr", 0x94, INSTR_RRE_RF },
714 { "cdfbr", 0x95, INSTR_RRE_RF },
715 { "cxfbr", 0x96, INSTR_RRE_RF },
716 { "cfebr", 0x98, INSTR_RRF_U0RF },
717 { "cfdbr", 0x99, INSTR_RRF_U0RF },
718 { "cfxbr", 0x9a, INSTR_RRF_U0RF },
719 { "cefr", 0xb4, INSTR_RRE_RF },
720 { "cdfr", 0xb5, INSTR_RRE_RF },
721 { "cxfr", 0xb6, INSTR_RRE_RF },
722 { "", 0, INSTR_INVALID }
723};
724
725static struct insn opcode_b9[] = {
726#ifdef CONFIG_64BIT
727 { "lpgr", 0x00, INSTR_RRE_RR },
728 { "lngr", 0x01, INSTR_RRE_RR },
729 { "ltgr", 0x02, INSTR_RRE_RR },
730 { "lcgr", 0x03, INSTR_RRE_RR },
731 { "lgr", 0x04, INSTR_RRE_RR },
732 { "lurag", 0x05, INSTR_RRE_RR },
733 { "lgbr", 0x06, INSTR_RRE_RR },
734 { "lghr", 0x07, INSTR_RRE_RR },
735 { "agr", 0x08, INSTR_RRE_RR },
736 { "sgr", 0x09, INSTR_RRE_RR },
737 { "algr", 0x0a, INSTR_RRE_RR },
738 { "slgr", 0x0b, INSTR_RRE_RR },
739 { "msgr", 0x0c, INSTR_RRE_RR },
740 { "dsgr", 0x0d, INSTR_RRE_RR },
741 { "eregg", 0x0e, INSTR_RRE_RR },
742 { "lrvgr", 0x0f, INSTR_RRE_RR },
743 { "lpgfr", 0x10, INSTR_RRE_RR },
744 { "lngfr", 0x11, INSTR_RRE_RR },
745 { "ltgfr", 0x12, INSTR_RRE_RR },
746 { "lcgfr", 0x13, INSTR_RRE_RR },
747 { "lgfr", 0x14, INSTR_RRE_RR },
748 { "llgfr", 0x16, INSTR_RRE_RR },
749 { "llgtr", 0x17, INSTR_RRE_RR },
750 { "agfr", 0x18, INSTR_RRE_RR },
751 { "sgfr", 0x19, INSTR_RRE_RR },
752 { "algfr", 0x1a, INSTR_RRE_RR },
753 { "slgfr", 0x1b, INSTR_RRE_RR },
754 { "msgfr", 0x1c, INSTR_RRE_RR },
755 { "dsgfr", 0x1d, INSTR_RRE_RR },
756 { "cgr", 0x20, INSTR_RRE_RR },
757 { "clgr", 0x21, INSTR_RRE_RR },
758 { "sturg", 0x25, INSTR_RRE_RR },
759 { "lbr", 0x26, INSTR_RRE_RR },
760 { "lhr", 0x27, INSTR_RRE_RR },
761 { "cgfr", 0x30, INSTR_RRE_RR },
762 { "clgfr", 0x31, INSTR_RRE_RR },
763 { "bctgr", 0x46, INSTR_RRE_RR },
764 { "ngr", 0x80, INSTR_RRE_RR },
765 { "ogr", 0x81, INSTR_RRE_RR },
766 { "xgr", 0x82, INSTR_RRE_RR },
767 { "flogr", 0x83, INSTR_RRE_RR },
768 { "llgcr", 0x84, INSTR_RRE_RR },
769 { "llghr", 0x85, INSTR_RRE_RR },
770 { "mlgr", 0x86, INSTR_RRE_RR },
771 { "dlgr", 0x87, INSTR_RRE_RR },
772 { "alcgr", 0x88, INSTR_RRE_RR },
773 { "slbgr", 0x89, INSTR_RRE_RR },
774 { "cspg", 0x8a, INSTR_RRE_RR },
775 { "idte", 0x8e, INSTR_RRF_R0RR },
776 { "llcr", 0x94, INSTR_RRE_RR },
777 { "llhr", 0x95, INSTR_RRE_RR },
778 { "esea", 0x9d, INSTR_RRE_R0 },
779 { "lptea", 0xaa, INSTR_RRF_RURR },
780 { "cu14", 0xb0, INSTR_RRF_M0RR },
781 { "cu24", 0xb1, INSTR_RRF_M0RR },
782 { "cu41", 0xb2, INSTR_RRF_M0RR },
783 { "cu42", 0xb3, INSTR_RRF_M0RR },
784#endif
785 { "kmac", 0x1e, INSTR_RRE_RR },
786 { "lrvr", 0x1f, INSTR_RRE_RR },
787 { "km", 0x2e, INSTR_RRE_RR },
788 { "kmc", 0x2f, INSTR_RRE_RR },
789 { "kimd", 0x3e, INSTR_RRE_RR },
790 { "klmd", 0x3f, INSTR_RRE_RR },
791 { "epsw", 0x8d, INSTR_RRE_RR },
792 { "trtt", 0x90, INSTR_RRE_RR },
793 { "trtt", 0x90, INSTR_RRF_M0RR },
794 { "trto", 0x91, INSTR_RRE_RR },
795 { "trto", 0x91, INSTR_RRF_M0RR },
796 { "trot", 0x92, INSTR_RRE_RR },
797 { "trot", 0x92, INSTR_RRF_M0RR },
798 { "troo", 0x93, INSTR_RRE_RR },
799 { "troo", 0x93, INSTR_RRF_M0RR },
800 { "mlr", 0x96, INSTR_RRE_RR },
801 { "dlr", 0x97, INSTR_RRE_RR },
802 { "alcr", 0x98, INSTR_RRE_RR },
803 { "slbr", 0x99, INSTR_RRE_RR },
804 { "", 0, INSTR_INVALID }
805};
806
807static struct insn opcode_c0[] = {
808#ifdef CONFIG_64BIT
809 { "lgfi", 0x01, INSTR_RIL_RI },
810 { "xihf", 0x06, INSTR_RIL_RU },
811 { "xilf", 0x07, INSTR_RIL_RU },
812 { "iihf", 0x08, INSTR_RIL_RU },
813 { "iilf", 0x09, INSTR_RIL_RU },
814 { "nihf", 0x0a, INSTR_RIL_RU },
815 { "nilf", 0x0b, INSTR_RIL_RU },
816 { "oihf", 0x0c, INSTR_RIL_RU },
817 { "oilf", 0x0d, INSTR_RIL_RU },
818 { "llihf", 0x0e, INSTR_RIL_RU },
819 { "llilf", 0x0f, INSTR_RIL_RU },
820#endif
821 { "larl", 0x00, INSTR_RIL_RP },
822 { "brcl", 0x04, INSTR_RIL_UP },
823 { "brasl", 0x05, INSTR_RIL_RP },
824 { "", 0, INSTR_INVALID }
825};
826
827static struct insn opcode_c2[] = {
828#ifdef CONFIG_64BIT
829 { "slgfi", 0x04, INSTR_RIL_RU },
830 { "slfi", 0x05, INSTR_RIL_RU },
831 { "agfi", 0x08, INSTR_RIL_RI },
832 { "afi", 0x09, INSTR_RIL_RI },
833 { "algfi", 0x0a, INSTR_RIL_RU },
834 { "alfi", 0x0b, INSTR_RIL_RU },
835 { "cgfi", 0x0c, INSTR_RIL_RI },
836 { "cfi", 0x0d, INSTR_RIL_RI },
837 { "clgfi", 0x0e, INSTR_RIL_RU },
838 { "clfi", 0x0f, INSTR_RIL_RU },
839#endif
840 { "", 0, INSTR_INVALID }
841};
842
843static struct insn opcode_c8[] = {
844#ifdef CONFIG_64BIT
845 { "mvcos", 0x00, INSTR_SSF_RRDRD },
846#endif
847 { "", 0, INSTR_INVALID }
848};
849
850static struct insn opcode_e3[] = {
851#ifdef CONFIG_64BIT
852 { "ltg", 0x02, INSTR_RXY_RRRD },
853 { "lrag", 0x03, INSTR_RXY_RRRD },
854 { "lg", 0x04, INSTR_RXY_RRRD },
855 { "cvby", 0x06, INSTR_RXY_RRRD },
856 { "ag", 0x08, INSTR_RXY_RRRD },
857 { "sg", 0x09, INSTR_RXY_RRRD },
858 { "alg", 0x0a, INSTR_RXY_RRRD },
859 { "slg", 0x0b, INSTR_RXY_RRRD },
860 { "msg", 0x0c, INSTR_RXY_RRRD },
861 { "dsg", 0x0d, INSTR_RXY_RRRD },
862 { "cvbg", 0x0e, INSTR_RXY_RRRD },
863 { "lrvg", 0x0f, INSTR_RXY_RRRD },
864 { "lt", 0x12, INSTR_RXY_RRRD },
865 { "lray", 0x13, INSTR_RXY_RRRD },
866 { "lgf", 0x14, INSTR_RXY_RRRD },
867 { "lgh", 0x15, INSTR_RXY_RRRD },
868 { "llgf", 0x16, INSTR_RXY_RRRD },
869 { "llgt", 0x17, INSTR_RXY_RRRD },
870 { "agf", 0x18, INSTR_RXY_RRRD },
871 { "sgf", 0x19, INSTR_RXY_RRRD },
872 { "algf", 0x1a, INSTR_RXY_RRRD },
873 { "slgf", 0x1b, INSTR_RXY_RRRD },
874 { "msgf", 0x1c, INSTR_RXY_RRRD },
875 { "dsgf", 0x1d, INSTR_RXY_RRRD },
876 { "cg", 0x20, INSTR_RXY_RRRD },
877 { "clg", 0x21, INSTR_RXY_RRRD },
878 { "stg", 0x24, INSTR_RXY_RRRD },
879 { "cvdy", 0x26, INSTR_RXY_RRRD },
880 { "cvdg", 0x2e, INSTR_RXY_RRRD },
881 { "strvg", 0x2f, INSTR_RXY_RRRD },
882 { "cgf", 0x30, INSTR_RXY_RRRD },
883 { "clgf", 0x31, INSTR_RXY_RRRD },
884 { "strvh", 0x3f, INSTR_RXY_RRRD },
885 { "bctg", 0x46, INSTR_RXY_RRRD },
886 { "sty", 0x50, INSTR_RXY_RRRD },
887 { "msy", 0x51, INSTR_RXY_RRRD },
888 { "ny", 0x54, INSTR_RXY_RRRD },
889 { "cly", 0x55, INSTR_RXY_RRRD },
890 { "oy", 0x56, INSTR_RXY_RRRD },
891 { "xy", 0x57, INSTR_RXY_RRRD },
892 { "ly", 0x58, INSTR_RXY_RRRD },
893 { "cy", 0x59, INSTR_RXY_RRRD },
894 { "ay", 0x5a, INSTR_RXY_RRRD },
895 { "sy", 0x5b, INSTR_RXY_RRRD },
896 { "aly", 0x5e, INSTR_RXY_RRRD },
897 { "sly", 0x5f, INSTR_RXY_RRRD },
898 { "sthy", 0x70, INSTR_RXY_RRRD },
899 { "lay", 0x71, INSTR_RXY_RRRD },
900 { "stcy", 0x72, INSTR_RXY_RRRD },
901 { "icy", 0x73, INSTR_RXY_RRRD },
902 { "lb", 0x76, INSTR_RXY_RRRD },
903 { "lgb", 0x77, INSTR_RXY_RRRD },
904 { "lhy", 0x78, INSTR_RXY_RRRD },
905 { "chy", 0x79, INSTR_RXY_RRRD },
906 { "ahy", 0x7a, INSTR_RXY_RRRD },
907 { "shy", 0x7b, INSTR_RXY_RRRD },
908 { "ng", 0x80, INSTR_RXY_RRRD },
909 { "og", 0x81, INSTR_RXY_RRRD },
910 { "xg", 0x82, INSTR_RXY_RRRD },
911 { "mlg", 0x86, INSTR_RXY_RRRD },
912 { "dlg", 0x87, INSTR_RXY_RRRD },
913 { "alcg", 0x88, INSTR_RXY_RRRD },
914 { "slbg", 0x89, INSTR_RXY_RRRD },
915 { "stpq", 0x8e, INSTR_RXY_RRRD },
916 { "lpq", 0x8f, INSTR_RXY_RRRD },
917 { "llgc", 0x90, INSTR_RXY_RRRD },
918 { "llgh", 0x91, INSTR_RXY_RRRD },
919 { "llc", 0x94, INSTR_RXY_RRRD },
920 { "llh", 0x95, INSTR_RXY_RRRD },
921#endif
922 { "lrv", 0x1e, INSTR_RXY_RRRD },
923 { "lrvh", 0x1f, INSTR_RXY_RRRD },
924 { "strv", 0x3e, INSTR_RXY_RRRD },
925 { "ml", 0x96, INSTR_RXY_RRRD },
926 { "dl", 0x97, INSTR_RXY_RRRD },
927 { "alc", 0x98, INSTR_RXY_RRRD },
928 { "slb", 0x99, INSTR_RXY_RRRD },
929 { "", 0, INSTR_INVALID }
930};
931
932static struct insn opcode_e5[] = {
933#ifdef CONFIG_64BIT
934 { "strag", 0x02, INSTR_SSE_RDRD },
935#endif
936 { "lasp", 0x00, INSTR_SSE_RDRD },
937 { "tprot", 0x01, INSTR_SSE_RDRD },
938 { "mvcsk", 0x0e, INSTR_SSE_RDRD },
939 { "mvcdk", 0x0f, INSTR_SSE_RDRD },
940 { "", 0, INSTR_INVALID }
941};
942
943static struct insn opcode_eb[] = {
944#ifdef CONFIG_64BIT
945 { "lmg", 0x04, INSTR_RSY_RRRD },
946 { "srag", 0x0a, INSTR_RSY_RRRD },
947 { "slag", 0x0b, INSTR_RSY_RRRD },
948 { "srlg", 0x0c, INSTR_RSY_RRRD },
949 { "sllg", 0x0d, INSTR_RSY_RRRD },
950 { "tracg", 0x0f, INSTR_RSY_RRRD },
951 { "csy", 0x14, INSTR_RSY_RRRD },
952 { "rllg", 0x1c, INSTR_RSY_RRRD },
953 { "clmh", 0x20, INSTR_RSY_RURD },
954 { "clmy", 0x21, INSTR_RSY_RURD },
955 { "stmg", 0x24, INSTR_RSY_RRRD },
956 { "stctg", 0x25, INSTR_RSY_CCRD },
957 { "stmh", 0x26, INSTR_RSY_RRRD },
958 { "stcmh", 0x2c, INSTR_RSY_RURD },
959 { "stcmy", 0x2d, INSTR_RSY_RURD },
960 { "lctlg", 0x2f, INSTR_RSY_CCRD },
961 { "csg", 0x30, INSTR_RSY_RRRD },
962 { "cdsy", 0x31, INSTR_RSY_RRRD },
963 { "cdsg", 0x3e, INSTR_RSY_RRRD },
964 { "bxhg", 0x44, INSTR_RSY_RRRD },
965 { "bxleg", 0x45, INSTR_RSY_RRRD },
966 { "tmy", 0x51, INSTR_SIY_URD },
967 { "mviy", 0x52, INSTR_SIY_URD },
968 { "niy", 0x54, INSTR_SIY_URD },
969 { "cliy", 0x55, INSTR_SIY_URD },
970 { "oiy", 0x56, INSTR_SIY_URD },
971 { "xiy", 0x57, INSTR_SIY_URD },
972 { "icmh", 0x80, INSTR_RSE_RURD },
973 { "icmh", 0x80, INSTR_RSY_RURD },
974 { "icmy", 0x81, INSTR_RSY_RURD },
975 { "clclu", 0x8f, INSTR_RSY_RRRD },
976 { "stmy", 0x90, INSTR_RSY_RRRD },
977 { "lmh", 0x96, INSTR_RSY_RRRD },
978 { "lmy", 0x98, INSTR_RSY_RRRD },
979 { "lamy", 0x9a, INSTR_RSY_AARD },
980 { "stamy", 0x9b, INSTR_RSY_AARD },
981#endif
982 { "rll", 0x1d, INSTR_RSY_RRRD },
983 { "mvclu", 0x8e, INSTR_RSY_RRRD },
984 { "tp", 0xc0, INSTR_RSL_R0RD },
985 { "", 0, INSTR_INVALID }
986};
987
988static struct insn opcode_ec[] = {
989#ifdef CONFIG_64BIT
990 { "brxhg", 0x44, INSTR_RIE_RRP },
991 { "brxlg", 0x45, INSTR_RIE_RRP },
992#endif
993 { "", 0, INSTR_INVALID }
994};
995
996static struct insn opcode_ed[] = {
997#ifdef CONFIG_64BIT
998 { "mayl", 0x38, INSTR_RXF_FRRDF },
999 { "myl", 0x39, INSTR_RXF_FRRDF },
1000 { "may", 0x3a, INSTR_RXF_FRRDF },
1001 { "my", 0x3b, INSTR_RXF_FRRDF },
1002 { "mayh", 0x3c, INSTR_RXF_FRRDF },
1003 { "myh", 0x3d, INSTR_RXF_FRRDF },
1004 { "ley", 0x64, INSTR_RXY_FRRD },
1005 { "ldy", 0x65, INSTR_RXY_FRRD },
1006 { "stey", 0x66, INSTR_RXY_FRRD },
1007 { "stdy", 0x67, INSTR_RXY_FRRD },
1008#endif
1009 { "ldeb", 0x04, INSTR_RXE_FRRD },
1010 { "lxdb", 0x05, INSTR_RXE_FRRD },
1011 { "lxeb", 0x06, INSTR_RXE_FRRD },
1012 { "mxdb", 0x07, INSTR_RXE_FRRD },
1013 { "keb", 0x08, INSTR_RXE_FRRD },
1014 { "ceb", 0x09, INSTR_RXE_FRRD },
1015 { "aeb", 0x0a, INSTR_RXE_FRRD },
1016 { "seb", 0x0b, INSTR_RXE_FRRD },
1017 { "mdeb", 0x0c, INSTR_RXE_FRRD },
1018 { "deb", 0x0d, INSTR_RXE_FRRD },
1019 { "maeb", 0x0e, INSTR_RXF_FRRDF },
1020 { "mseb", 0x0f, INSTR_RXF_FRRDF },
1021 { "tceb", 0x10, INSTR_RXE_FRRD },
1022 { "tcdb", 0x11, INSTR_RXE_FRRD },
1023 { "tcxb", 0x12, INSTR_RXE_FRRD },
1024 { "sqeb", 0x14, INSTR_RXE_FRRD },
1025 { "sqdb", 0x15, INSTR_RXE_FRRD },
1026 { "meeb", 0x17, INSTR_RXE_FRRD },
1027 { "kdb", 0x18, INSTR_RXE_FRRD },
1028 { "cdb", 0x19, INSTR_RXE_FRRD },
1029 { "adb", 0x1a, INSTR_RXE_FRRD },
1030 { "sdb", 0x1b, INSTR_RXE_FRRD },
1031 { "mdb", 0x1c, INSTR_RXE_FRRD },
1032 { "ddb", 0x1d, INSTR_RXE_FRRD },
1033 { "madb", 0x1e, INSTR_RXF_FRRDF },
1034 { "msdb", 0x1f, INSTR_RXF_FRRDF },
1035 { "lde", 0x24, INSTR_RXE_FRRD },
1036 { "lxd", 0x25, INSTR_RXE_FRRD },
1037 { "lxe", 0x26, INSTR_RXE_FRRD },
1038 { "mae", 0x2e, INSTR_RXF_FRRDF },
1039 { "mse", 0x2f, INSTR_RXF_FRRDF },
1040 { "sqe", 0x34, INSTR_RXE_FRRD },
1041 { "mee", 0x37, INSTR_RXE_FRRD },
1042 { "mad", 0x3e, INSTR_RXF_FRRDF },
1043 { "msd", 0x3f, INSTR_RXF_FRRDF },
1044 { "", 0, INSTR_INVALID }
1045};
1046
1047/* Extracts an operand value from an instruction. */
1048static unsigned int extract_operand(unsigned char *code,
1049 const struct operand *operand)
1050{
1051 unsigned int val;
1052 int bits;
1053
1054 /* Extract fragments of the operand byte for byte. */
1055 code += operand->shift / 8;
1056 bits = (operand->shift & 7) + operand->bits;
1057 val = 0;
1058 do {
1059 val <<= 8;
1060 val |= (unsigned int) *code++;
1061 bits -= 8;
1062 } while (bits > 0);
1063 val >>= -bits;
1064 val &= ((1U << (operand->bits - 1)) << 1) - 1;
1065
1066 /* Check for special long displacement case. */
1067 if (operand->bits == 20 && operand->shift == 20)
1068 val = (val & 0xff) << 12 | (val & 0xfff00) >> 8;
1069
1070 /* Sign extend value if the operand is signed or pc relative. */
1071 if ((operand->flags & (OPERAND_SIGNED | OPERAND_PCREL)) &&
1072 (val & (1U << (operand->bits - 1))))
1073 val |= (-1U << (operand->bits - 1)) << 1;
1074
1075 /* Double value if the operand is pc relative. */
1076 if (operand->flags & OPERAND_PCREL)
1077 val <<= 1;
1078
1079 /* Length x in an instructions has real length x + 1. */
1080 if (operand->flags & OPERAND_LENGTH)
1081 val++;
1082 return val;
1083}
1084
1085static inline int insn_length(unsigned char code)
1086{
1087 return ((((int) code + 64) >> 7) + 1) << 1;
1088}
1089
1090static struct insn *find_insn(unsigned char *code)
1091{
1092 unsigned char opfrag = code[1];
1093 unsigned char opmask;
1094 struct insn *table;
1095
1096 switch (code[0]) {
1097 case 0x01:
1098 table = opcode_01;
1099 break;
1100 case 0xa5:
1101 table = opcode_a5;
1102 break;
1103 case 0xa7:
1104 table = opcode_a7;
1105 break;
1106 case 0xb2:
1107 table = opcode_b2;
1108 break;
1109 case 0xb3:
1110 table = opcode_b3;
1111 break;
1112 case 0xb9:
1113 table = opcode_b9;
1114 break;
1115 case 0xc0:
1116 table = opcode_c0;
1117 break;
1118 case 0xc2:
1119 table = opcode_c2;
1120 break;
1121 case 0xc8:
1122 table = opcode_c8;
1123 break;
1124 case 0xe3:
1125 table = opcode_e3;
1126 opfrag = code[5];
1127 break;
1128 case 0xe5:
1129 table = opcode_e5;
1130 break;
1131 case 0xeb:
1132 table = opcode_eb;
1133 opfrag = code[5];
1134 break;
1135 case 0xec:
1136 table = opcode_ec;
1137 opfrag = code[5];
1138 break;
1139 case 0xed:
1140 table = opcode_ed;
1141 opfrag = code[5];
1142 break;
1143 default:
1144 table = opcode;
1145 opfrag = code[0];
1146 break;
1147 }
1148 while (table->format != INSTR_INVALID) {
1149 opmask = formats[table->format][0];
1150 if (table->opfrag == (opfrag & opmask))
1151 return table;
1152 table++;
1153 }
1154 return NULL;
1155}
1156
1157static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
1158{
1159 struct insn *insn;
1160 const unsigned char *ops;
1161 const struct operand *operand;
1162 unsigned int value;
1163 char separator;
1164 char *ptr;
1165
1166 ptr = buffer;
1167 insn = find_insn(code);
1168 if (insn) {
1169 ptr += sprintf(ptr, "%.5s\t", insn->name);
1170 /* Extract the operands. */
1171 separator = 0;
1172 for (ops = formats[insn->format] + 1; *ops != 0; ops++) {
1173 operand = operands + *ops;
1174 value = extract_operand(code, operand);
1175 if ((operand->flags & OPERAND_INDEX) && value == 0)
1176 continue;
1177 if ((operand->flags & OPERAND_BASE) &&
1178 value == 0 && separator == '(') {
1179 separator = ',';
1180 continue;
1181 }
1182 if (separator)
1183 ptr += sprintf(ptr, "%c", separator);
1184 if (operand->flags & OPERAND_GPR)
1185 ptr += sprintf(ptr, "%%r%i", value);
1186 else if (operand->flags & OPERAND_FPR)
1187 ptr += sprintf(ptr, "%%f%i", value);
1188 else if (operand->flags & OPERAND_AR)
1189 ptr += sprintf(ptr, "%%a%i", value);
1190 else if (operand->flags & OPERAND_CR)
1191 ptr += sprintf(ptr, "%%c%i", value);
1192 else if (operand->flags & OPERAND_PCREL)
1193 ptr += sprintf(ptr, "%lx", value + addr);
1194 else if (operand->flags & OPERAND_SIGNED)
1195 ptr += sprintf(ptr, "%i", value);
1196 else
1197 ptr += sprintf(ptr, "%u", value);
1198 if (operand->flags & OPERAND_DISP)
1199 separator = '(';
1200 else if (operand->flags & OPERAND_BASE) {
1201 ptr += sprintf(ptr, ")");
1202 separator = ',';
1203 } else
1204 separator = ',';
1205 }
1206 } else
1207 ptr += sprintf(ptr, "unknown");
1208 return (int) (ptr - buffer);
1209}
1210
1211void show_code(struct pt_regs *regs)
1212{
1213 char *mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
1214 unsigned char code[64];
1215 char buffer[64], *ptr;
1216 mm_segment_t old_fs;
1217 unsigned long addr;
1218 int start, end, opsize, hops, i;
1219
1220 /* Get a snapshot of the 64 bytes surrounding the fault address. */
1221 old_fs = get_fs();
1222 set_fs((regs->psw.mask & PSW_MASK_PSTATE) ? USER_DS : KERNEL_DS);
1223 for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) {
1224 addr = regs->psw.addr - 34 + start;
1225 if (__copy_from_user(code + start - 2,
1226 (char __user *) addr, 2))
1227 break;
1228 }
1229 for (end = 32; end < 64; end += 2) {
1230 addr = regs->psw.addr + end - 32;
1231 if (__copy_from_user(code + end,
1232 (char __user *) addr, 2))
1233 break;
1234 }
1235 set_fs(old_fs);
1236 /* Code snapshot useable ? */
1237 if ((regs->psw.addr & 1) || start >= end) {
1238 printk("%s Code: Bad PSW.\n", mode);
1239 return;
1240 }
1241 /* Find a starting point for the disassembly. */
1242 while (start < 32) {
1243 hops = 0;
1244 for (i = 0, hops = 0; start + i < 32 && hops < 3; hops++) {
1245 if (!find_insn(code + start + i))
1246 break;
1247 i += insn_length(code[start + i]);
1248 }
1249 if (start + i == 32)
1250 /* Looks good, sequence ends at PSW. */
1251 break;
1252 start += 2;
1253 }
1254 /* Decode the instructions. */
1255 ptr = buffer;
1256 ptr += sprintf(ptr, "%s Code:", mode);
1257 hops = 0;
1258 while (start < end && hops < 8) {
1259 *ptr++ = (start == 32) ? '>' : ' ';
1260 addr = regs->psw.addr + start - 32;
1261 ptr += sprintf(ptr, ONELONG, addr);
1262 opsize = insn_length(code[start]);
1263 if (start + opsize >= end)
1264 break;
1265 for (i = 0; i < opsize; i++)
1266 ptr += sprintf(ptr, "%02x", code[start + i]);
1267 *ptr++ = '\t';
1268 if (i < 6)
1269 *ptr++ = '\t';
1270 ptr += print_insn(ptr, code + start, addr);
1271 start += opsize;
1272 printk(buffer);
1273 ptr = buffer;
1274 ptr += sprintf(ptr, "\n ");
1275 hops++;
1276 }
1277 printk("\n");
1278}
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 5e47936573f2..50538e545618 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -253,11 +253,10 @@ static noinline __init void find_memory_chunks(unsigned long memsize)
253 break; 253 break;
254#endif 254#endif
255 /* 255 /*
256 * Finish memory detection at the first hole, unless 256 * Finish memory detection at the first hole
257 * - we reached the hsa -> skip it. 257 * if storage size is unknown.
258 * - we know there must be more.
259 */ 258 */
260 if (cc == -1UL && !memsize && old_addr != ADDR2G) 259 if (cc == -1UL && !memsize)
261 break; 260 break;
262 if (memsize && addr >= memsize) 261 if (memsize && addr >= memsize)
263 break; 262 break;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index dddc3de30401..c8a2212014e0 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -249,8 +249,6 @@ sysc_do_restart:
249 bnz BASED(sysc_tracesys) 249 bnz BASED(sysc_tracesys)
250 basr %r14,%r8 # call sys_xxxx 250 basr %r14,%r8 # call sys_xxxx
251 st %r2,SP_R2(%r15) # store return value (change R2 on stack) 251 st %r2,SP_R2(%r15) # store return value (change R2 on stack)
252 # ATTENTION: check sys_execve_glue before
253 # changing anything here !!
254 252
255sysc_return: 253sysc_return:
256 tm SP_PSW+1(%r15),0x01 # returning to user ? 254 tm SP_PSW+1(%r15),0x01 # returning to user ?
@@ -381,50 +379,37 @@ ret_from_fork:
381 b BASED(sysc_return) 379 b BASED(sysc_return)
382 380
383# 381#
384# clone, fork, vfork, exec and sigreturn need glue, 382# kernel_execve function needs to deal with pt_regs that is not
385# because they all expect pt_regs as parameter, 383# at the usual place
386# but are called with different parameter.
387# return-address is set up above
388# 384#
389sys_clone_glue: 385 .globl kernel_execve
390 la %r2,SP_PTREGS(%r15) # load pt_regs 386kernel_execve:
391 l %r1,BASED(.Lclone) 387 stm %r12,%r15,48(%r15)
392 br %r1 # branch to sys_clone 388 lr %r14,%r15
393 389 l %r13,__LC_SVC_NEW_PSW+4
394sys_fork_glue: 390 s %r15,BASED(.Lc_spsize)
395 la %r2,SP_PTREGS(%r15) # load pt_regs 391 st %r14,__SF_BACKCHAIN(%r15)
396 l %r1,BASED(.Lfork) 392 la %r12,SP_PTREGS(%r15)
397 br %r1 # branch to sys_fork 393 xc 0(__PT_SIZE,%r12),0(%r12)
398 394 l %r1,BASED(.Ldo_execve)
399sys_vfork_glue: 395 lr %r5,%r12
400 la %r2,SP_PTREGS(%r15) # load pt_regs 396 basr %r14,%r1
401 l %r1,BASED(.Lvfork) 397 ltr %r2,%r2
402 br %r1 # branch to sys_vfork 398 be BASED(0f)
403 399 a %r15,BASED(.Lc_spsize)
404sys_execve_glue: 400 lm %r12,%r15,48(%r15)
405 la %r2,SP_PTREGS(%r15) # load pt_regs 401 br %r14
406 l %r1,BASED(.Lexecve) 402 # execve succeeded.
407 lr %r12,%r14 # save return address 4030: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
408 basr %r14,%r1 # call sys_execve 404 l %r15,__LC_KERNEL_STACK # load ksp
409 ltr %r2,%r2 # check if execve failed 405 s %r15,BASED(.Lc_spsize) # make room for registers & psw
410 bnz 0(%r12) # it did fail -> store result in gpr2 406 l %r9,__LC_THREAD_INFO
411 b 4(%r12) # SKIP ST 2,SP_R2(15) after BASR 14,8 407 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
412 # in system_call/sysc_tracesys 408 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
413 409 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
414sys_sigreturn_glue: 410 l %r1,BASED(.Lexecve_tail)
415 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter 411 basr %r14,%r1
416 l %r1,BASED(.Lsigreturn) 412 b BASED(sysc_return)
417 br %r1 # branch to sys_sigreturn
418
419sys_rt_sigreturn_glue:
420 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
421 l %r1,BASED(.Lrt_sigreturn)
422 br %r1 # branch to sys_sigreturn
423
424sys_sigaltstack_glue:
425 la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
426 l %r1,BASED(.Lsigaltstack)
427 br %r1 # branch to sys_sigreturn
428 413
429/* 414/*
430 * Program check handler routine 415 * Program check handler routine
@@ -1031,19 +1016,11 @@ cleanup_io_leave_insn:
1031.Ldo_extint: .long do_extint 1016.Ldo_extint: .long do_extint
1032.Ldo_signal: .long do_signal 1017.Ldo_signal: .long do_signal
1033.Lhandle_per: .long do_single_step 1018.Lhandle_per: .long do_single_step
1019.Ldo_execve: .long do_execve
1020.Lexecve_tail: .long execve_tail
1034.Ljump_table: .long pgm_check_table 1021.Ljump_table: .long pgm_check_table
1035.Lschedule: .long schedule 1022.Lschedule: .long schedule
1036.Lclone: .long sys_clone
1037.Lexecve: .long sys_execve
1038.Lfork: .long sys_fork
1039.Lrt_sigreturn: .long sys_rt_sigreturn
1040.Lrt_sigsuspend:
1041 .long sys_rt_sigsuspend
1042.Lsigreturn: .long sys_sigreturn
1043.Lsigsuspend: .long sys_sigsuspend
1044.Lsigaltstack: .long sys_sigaltstack
1045.Ltrace: .long syscall_trace 1023.Ltrace: .long syscall_trace
1046.Lvfork: .long sys_vfork
1047.Lschedtail: .long schedule_tail 1024.Lschedtail: .long schedule_tail
1048.Lsysc_table: .long sys_call_table 1025.Lsysc_table: .long sys_call_table
1049#ifdef CONFIG_TRACE_IRQFLAGS 1026#ifdef CONFIG_TRACE_IRQFLAGS
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 0f758c329a5d..93745fd8f555 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -244,8 +244,6 @@ sysc_noemu:
244 jnz sysc_tracesys 244 jnz sysc_tracesys
245 basr %r14,%r8 # call sys_xxxx 245 basr %r14,%r8 # call sys_xxxx
246 stg %r2,SP_R2(%r15) # store return value (change R2 on stack) 246 stg %r2,SP_R2(%r15) # store return value (change R2 on stack)
247 # ATTENTION: check sys_execve_glue before
248 # changing anything here !!
249 247
250sysc_return: 248sysc_return:
251 tm SP_PSW+1(%r15),0x01 # returning to user ? 249 tm SP_PSW+1(%r15),0x01 # returning to user ?
@@ -371,77 +369,35 @@ ret_from_fork:
371 j sysc_return 369 j sysc_return
372 370
373# 371#
374# clone, fork, vfork, exec and sigreturn need glue, 372# kernel_execve function needs to deal with pt_regs that is not
375# because they all expect pt_regs as parameter, 373# at the usual place
376# but are called with different parameter.
377# return-address is set up above
378# 374#
379sys_clone_glue: 375 .globl kernel_execve
380 la %r2,SP_PTREGS(%r15) # load pt_regs 376kernel_execve:
381 jg sys_clone # branch to sys_clone 377 stmg %r12,%r15,96(%r15)
382 378 lgr %r14,%r15
383#ifdef CONFIG_COMPAT 379 aghi %r15,-SP_SIZE
384sys32_clone_glue: 380 stg %r14,__SF_BACKCHAIN(%r15)
385 la %r2,SP_PTREGS(%r15) # load pt_regs 381 la %r12,SP_PTREGS(%r15)
386 jg sys32_clone # branch to sys32_clone 382 xc 0(__PT_SIZE,%r12),0(%r12)
387#endif 383 lgr %r5,%r12
388 384 brasl %r14,do_execve
389sys_fork_glue: 385 ltgfr %r2,%r2
390 la %r2,SP_PTREGS(%r15) # load pt_regs 386 je 0f
391 jg sys_fork # branch to sys_fork 387 aghi %r15,SP_SIZE
392 388 lmg %r12,%r15,96(%r15)
393sys_vfork_glue: 389 br %r14
394 la %r2,SP_PTREGS(%r15) # load pt_regs 390 # execve succeeded.
395 jg sys_vfork # branch to sys_vfork 3910: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
396 392 lg %r15,__LC_KERNEL_STACK # load ksp
397sys_execve_glue: 393 aghi %r15,-SP_SIZE # make room for registers & psw
398 la %r2,SP_PTREGS(%r15) # load pt_regs 394 lg %r13,__LC_SVC_NEW_PSW+8
399 lgr %r12,%r14 # save return address 395 lg %r9,__LC_THREAD_INFO
400 brasl %r14,sys_execve # call sys_execve 396 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
401 ltgr %r2,%r2 # check if execve failed 397 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
402 bnz 0(%r12) # it did fail -> store result in gpr2 398 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
403 b 6(%r12) # SKIP STG 2,SP_R2(15) in 399 brasl %r14,execve_tail
404 # system_call/sysc_tracesys 400 j sysc_return
405#ifdef CONFIG_COMPAT
406sys32_execve_glue:
407 la %r2,SP_PTREGS(%r15) # load pt_regs
408 lgr %r12,%r14 # save return address
409 brasl %r14,sys32_execve # call sys32_execve
410 ltgr %r2,%r2 # check if execve failed
411 bnz 0(%r12) # it did fail -> store result in gpr2
412 b 6(%r12) # SKIP STG 2,SP_R2(15) in
413 # system_call/sysc_tracesys
414#endif
415
416sys_sigreturn_glue:
417 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
418 jg sys_sigreturn # branch to sys_sigreturn
419
420#ifdef CONFIG_COMPAT
421sys32_sigreturn_glue:
422 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
423 jg sys32_sigreturn # branch to sys32_sigreturn
424#endif
425
426sys_rt_sigreturn_glue:
427 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
428 jg sys_rt_sigreturn # branch to sys_sigreturn
429
430#ifdef CONFIG_COMPAT
431sys32_rt_sigreturn_glue:
432 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
433 jg sys32_rt_sigreturn # branch to sys32_sigreturn
434#endif
435
436sys_sigaltstack_glue:
437 la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
438 jg sys_sigaltstack # branch to sys_sigreturn
439
440#ifdef CONFIG_COMPAT
441sys32_sigaltstack_glue:
442 la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
443 jg sys32_sigaltstack_wrapper # branch to sys_sigreturn
444#endif
445 401
446/* 402/*
447 * Program check handler routine 403 * Program check handler routine
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 37010709fe68..a87b1976d409 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -39,7 +39,69 @@ startup_continue:
39 basr %r13,0 # get base 39 basr %r13,0 # get base
40.LPG1: sll %r13,1 # remove high order bit 40.LPG1: sll %r13,1 # remove high order bit
41 srl %r13,1 41 srl %r13,1
42 lhi %r1,1 # mode 1 = esame 42
43#ifdef CONFIG_ZFCPDUMP
44
45 # check if we have been ipled using zfcp dump:
46
47 tm 0xb9,0x01 # test if subchannel is enabled
48 jno .nodump # subchannel disabled
49 l %r1,0xb8
50 la %r5,.Lipl_schib-.LPG1(%r13)
51 stsch 0(%r5) # get schib of subchannel
52 jne .nodump # schib not available
53 tm 5(%r5),0x01 # devno valid?
54 jno .nodump
55 tm 4(%r5),0x80 # qdio capable device?
56 jno .nodump
57 l %r2,20(%r0) # address of ipl parameter block
58 lhi %r3,0
59 ic %r3,0x148(%r2) # get opt field
60 chi %r3,0x20 # load with dump?
61 jne .nodump
62
63 # store all prefix registers in case of load with dump:
64
65 la %r7,0 # base register for 0 page
66 la %r8,0 # first cpu
67 l %r11,.Lpref_arr_ptr-.LPG1(%r13) # address of prefix array
68 ahi %r11,4 # skip boot cpu
69 lr %r12,%r11
70 ahi %r12,(CONFIG_NR_CPUS*4) # end of prefix array
71 stap .Lcurrent_cpu+2-.LPG1(%r13) # store current cpu addr
721:
73 cl %r8,.Lcurrent_cpu-.LPG1(%r13) # is ipl cpu ?
74 je 4f # if yes get next cpu
752:
76 lr %r9,%r7
77 sigp %r9,%r8,0x9 # stop & store status of cpu
78 brc 8,3f # accepted
79 brc 4,4f # status stored: next cpu
80 brc 2,2b # busy: try again
81 brc 1,4f # not op: next cpu
823:
83 mvc 0(4,%r11),264(%r7) # copy prefix register to prefix array
84 ahi %r11,4 # next element in prefix array
85 clr %r11,%r12
86 je 5f # no more space in prefix array
874:
88 ahi %r8,1 # next cpu (r8 += 1)
89 cl %r8,.Llast_cpu-.LPG1(%r13) # is last possible cpu ?
90 jl 1b # jump if not last cpu
915:
92 lhi %r1,2 # mode 2 = esame (dump)
93 j 6f
94 .align 4
95.Lipl_schib:
96 .rept 13
97 .long 0
98 .endr
99.nodump:
100 lhi %r1,1 # mode 1 = esame (normal ipl)
1016:
102#else
103 lhi %r1,1 # mode 1 = esame (normal ipl)
104#endif /* CONFIG_ZFCPDUMP */
43 mvi __LC_AR_MODE_ID,1 # set esame flag 105 mvi __LC_AR_MODE_ID,1 # set esame flag
44 slr %r0,%r0 # set cpuid to zero 106 slr %r0,%r0 # set cpuid to zero
45 sigp %r1,%r0,0x12 # switch to esame mode 107 sigp %r1,%r0,0x12 # switch to esame mode
@@ -149,6 +211,14 @@ startup_continue:
149.L4malign:.quad 0xffffffffffc00000 211.L4malign:.quad 0xffffffffffc00000
150.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 212.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
151.Lnop: .long 0x07000700 213.Lnop: .long 0x07000700
214#ifdef CONFIG_ZFCPDUMP
215.Lcurrent_cpu:
216 .long 0x0
217.Llast_cpu:
218 .long 0x0000ffff
219.Lpref_arr_ptr:
220 .long zfcpdump_prefix_array
221#endif /* CONFIG_ZFCPDUMP */
152.Lparmaddr: 222.Lparmaddr:
153 .quad PARMAREA 223 .quad PARMAREA
154 .align 64 224 .align 64
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index f731185bf2bd..06833ac2b115 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -29,36 +29,21 @@
29#define SCCB_LOADPARM (&s390_readinfo_sccb.loadparm) 29#define SCCB_LOADPARM (&s390_readinfo_sccb.loadparm)
30#define SCCB_FLAG (s390_readinfo_sccb.flags) 30#define SCCB_FLAG (s390_readinfo_sccb.flags)
31 31
32enum ipl_type { 32#define IPL_UNKNOWN_STR "unknown"
33 IPL_TYPE_NONE = 1, 33#define IPL_CCW_STR "ccw"
34 IPL_TYPE_UNKNOWN = 2, 34#define IPL_FCP_STR "fcp"
35 IPL_TYPE_CCW = 4, 35#define IPL_FCP_DUMP_STR "fcp_dump"
36 IPL_TYPE_FCP = 8, 36#define IPL_NSS_STR "nss"
37 IPL_TYPE_NSS = 16,
38};
39
40#define IPL_NONE_STR "none"
41#define IPL_UNKNOWN_STR "unknown"
42#define IPL_CCW_STR "ccw"
43#define IPL_FCP_STR "fcp"
44#define IPL_NSS_STR "nss"
45
46/*
47 * Must be in data section since the bss section
48 * is not cleared when these are accessed.
49 */
50u16 ipl_devno __attribute__((__section__(".data"))) = 0;
51u32 ipl_flags __attribute__((__section__(".data"))) = 0;
52 37
53static char *ipl_type_str(enum ipl_type type) 38static char *ipl_type_str(enum ipl_type type)
54{ 39{
55 switch (type) { 40 switch (type) {
56 case IPL_TYPE_NONE:
57 return IPL_NONE_STR;
58 case IPL_TYPE_CCW: 41 case IPL_TYPE_CCW:
59 return IPL_CCW_STR; 42 return IPL_CCW_STR;
60 case IPL_TYPE_FCP: 43 case IPL_TYPE_FCP:
61 return IPL_FCP_STR; 44 return IPL_FCP_STR;
45 case IPL_TYPE_FCP_DUMP:
46 return IPL_FCP_DUMP_STR;
62 case IPL_TYPE_NSS: 47 case IPL_TYPE_NSS:
63 return IPL_NSS_STR; 48 return IPL_NSS_STR;
64 case IPL_TYPE_UNKNOWN: 49 case IPL_TYPE_UNKNOWN:
@@ -67,15 +52,55 @@ static char *ipl_type_str(enum ipl_type type)
67 } 52 }
68} 53}
69 54
55enum dump_type {
56 DUMP_TYPE_NONE = 1,
57 DUMP_TYPE_CCW = 2,
58 DUMP_TYPE_FCP = 4,
59};
60
61#define DUMP_NONE_STR "none"
62#define DUMP_CCW_STR "ccw"
63#define DUMP_FCP_STR "fcp"
64
65static char *dump_type_str(enum dump_type type)
66{
67 switch (type) {
68 case DUMP_TYPE_NONE:
69 return DUMP_NONE_STR;
70 case DUMP_TYPE_CCW:
71 return DUMP_CCW_STR;
72 case DUMP_TYPE_FCP:
73 return DUMP_FCP_STR;
74 default:
75 return NULL;
76 }
77}
78
79/*
80 * Must be in data section since the bss section
81 * is not cleared when these are accessed.
82 */
83static u16 ipl_devno __attribute__((__section__(".data"))) = 0;
84u32 ipl_flags __attribute__((__section__(".data"))) = 0;
85
70enum ipl_method { 86enum ipl_method {
71 IPL_METHOD_NONE, 87 REIPL_METHOD_CCW_CIO,
72 IPL_METHOD_CCW_CIO, 88 REIPL_METHOD_CCW_DIAG,
73 IPL_METHOD_CCW_DIAG, 89 REIPL_METHOD_CCW_VM,
74 IPL_METHOD_CCW_VM, 90 REIPL_METHOD_FCP_RO_DIAG,
75 IPL_METHOD_FCP_RO_DIAG, 91 REIPL_METHOD_FCP_RW_DIAG,
76 IPL_METHOD_FCP_RW_DIAG, 92 REIPL_METHOD_FCP_RO_VM,
77 IPL_METHOD_FCP_RO_VM, 93 REIPL_METHOD_FCP_DUMP,
78 IPL_METHOD_NSS, 94 REIPL_METHOD_NSS,
95 REIPL_METHOD_DEFAULT,
96};
97
98enum dump_method {
99 DUMP_METHOD_NONE,
100 DUMP_METHOD_CCW_CIO,
101 DUMP_METHOD_CCW_DIAG,
102 DUMP_METHOD_CCW_VM,
103 DUMP_METHOD_FCP_DIAG,
79}; 104};
80 105
81enum shutdown_action { 106enum shutdown_action {
@@ -107,15 +132,15 @@ static int diag308_set_works = 0;
107static int reipl_capabilities = IPL_TYPE_UNKNOWN; 132static int reipl_capabilities = IPL_TYPE_UNKNOWN;
108 133
109static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN; 134static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN;
110static enum ipl_method reipl_method = IPL_METHOD_NONE; 135static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT;
111static struct ipl_parameter_block *reipl_block_fcp; 136static struct ipl_parameter_block *reipl_block_fcp;
112static struct ipl_parameter_block *reipl_block_ccw; 137static struct ipl_parameter_block *reipl_block_ccw;
113 138
114static char reipl_nss_name[NSS_NAME_SIZE + 1]; 139static char reipl_nss_name[NSS_NAME_SIZE + 1];
115 140
116static int dump_capabilities = IPL_TYPE_NONE; 141static int dump_capabilities = DUMP_TYPE_NONE;
117static enum ipl_type dump_type = IPL_TYPE_NONE; 142static enum dump_type dump_type = DUMP_TYPE_NONE;
118static enum ipl_method dump_method = IPL_METHOD_NONE; 143static enum dump_method dump_method = DUMP_METHOD_NONE;
119static struct ipl_parameter_block *dump_block_fcp; 144static struct ipl_parameter_block *dump_block_fcp;
120static struct ipl_parameter_block *dump_block_ccw; 145static struct ipl_parameter_block *dump_block_ccw;
121 146
@@ -134,6 +159,7 @@ int diag308(unsigned long subcode, void *addr)
134 : "d" (subcode) : "cc", "memory"); 159 : "d" (subcode) : "cc", "memory");
135 return _rc; 160 return _rc;
136} 161}
162EXPORT_SYMBOL_GPL(diag308);
137 163
138/* SYSFS */ 164/* SYSFS */
139 165
@@ -197,7 +223,7 @@ static void make_attrs_ro(struct attribute **attrs)
197 * ipl section 223 * ipl section
198 */ 224 */
199 225
200static enum ipl_type ipl_get_type(void) 226static __init enum ipl_type get_ipl_type(void)
201{ 227{
202 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; 228 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
203 229
@@ -211,12 +237,44 @@ static enum ipl_type ipl_get_type(void)
211 return IPL_TYPE_UNKNOWN; 237 return IPL_TYPE_UNKNOWN;
212 if (ipl->hdr.pbt != DIAG308_IPL_TYPE_FCP) 238 if (ipl->hdr.pbt != DIAG308_IPL_TYPE_FCP)
213 return IPL_TYPE_UNKNOWN; 239 return IPL_TYPE_UNKNOWN;
240 if (ipl->ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP)
241 return IPL_TYPE_FCP_DUMP;
214 return IPL_TYPE_FCP; 242 return IPL_TYPE_FCP;
215} 243}
216 244
245void __init setup_ipl_info(void)
246{
247 ipl_info.type = get_ipl_type();
248 switch (ipl_info.type) {
249 case IPL_TYPE_CCW:
250 ipl_info.data.ccw.dev_id.devno = ipl_devno;
251 ipl_info.data.ccw.dev_id.ssid = 0;
252 break;
253 case IPL_TYPE_FCP:
254 case IPL_TYPE_FCP_DUMP:
255 ipl_info.data.fcp.dev_id.devno =
256 IPL_PARMBLOCK_START->ipl_info.fcp.devno;
257 ipl_info.data.fcp.dev_id.ssid = 0;
258 ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
259 ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
260 break;
261 case IPL_TYPE_NSS:
262 strncpy(ipl_info.data.nss.name, kernel_nss_name,
263 sizeof(ipl_info.data.nss.name));
264 break;
265 case IPL_TYPE_UNKNOWN:
266 default:
267 /* We have no info to copy */
268 break;
269 }
270}
271
272struct ipl_info ipl_info;
273EXPORT_SYMBOL_GPL(ipl_info);
274
217static ssize_t ipl_type_show(struct subsystem *subsys, char *page) 275static ssize_t ipl_type_show(struct subsystem *subsys, char *page)
218{ 276{
219 return sprintf(page, "%s\n", ipl_type_str(ipl_get_type())); 277 return sprintf(page, "%s\n", ipl_type_str(ipl_info.type));
220} 278}
221 279
222static struct subsys_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); 280static struct subsys_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
@@ -225,10 +283,11 @@ static ssize_t sys_ipl_device_show(struct subsystem *subsys, char *page)
225{ 283{
226 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; 284 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
227 285
228 switch (ipl_get_type()) { 286 switch (ipl_info.type) {
229 case IPL_TYPE_CCW: 287 case IPL_TYPE_CCW:
230 return sprintf(page, "0.0.%04x\n", ipl_devno); 288 return sprintf(page, "0.0.%04x\n", ipl_devno);
231 case IPL_TYPE_FCP: 289 case IPL_TYPE_FCP:
290 case IPL_TYPE_FCP_DUMP:
232 return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno); 291 return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno);
233 default: 292 default:
234 return 0; 293 return 0;
@@ -485,23 +544,29 @@ static int reipl_set_type(enum ipl_type type)
485 switch(type) { 544 switch(type) {
486 case IPL_TYPE_CCW: 545 case IPL_TYPE_CCW:
487 if (MACHINE_IS_VM) 546 if (MACHINE_IS_VM)
488 reipl_method = IPL_METHOD_CCW_VM; 547 reipl_method = REIPL_METHOD_CCW_VM;
489 else 548 else
490 reipl_method = IPL_METHOD_CCW_CIO; 549 reipl_method = REIPL_METHOD_CCW_CIO;
491 break; 550 break;
492 case IPL_TYPE_FCP: 551 case IPL_TYPE_FCP:
493 if (diag308_set_works) 552 if (diag308_set_works)
494 reipl_method = IPL_METHOD_FCP_RW_DIAG; 553 reipl_method = REIPL_METHOD_FCP_RW_DIAG;
495 else if (MACHINE_IS_VM) 554 else if (MACHINE_IS_VM)
496 reipl_method = IPL_METHOD_FCP_RO_VM; 555 reipl_method = REIPL_METHOD_FCP_RO_VM;
497 else 556 else
498 reipl_method = IPL_METHOD_FCP_RO_DIAG; 557 reipl_method = REIPL_METHOD_FCP_RO_DIAG;
558 break;
559 case IPL_TYPE_FCP_DUMP:
560 reipl_method = REIPL_METHOD_FCP_DUMP;
499 break; 561 break;
500 case IPL_TYPE_NSS: 562 case IPL_TYPE_NSS:
501 reipl_method = IPL_METHOD_NSS; 563 reipl_method = REIPL_METHOD_NSS;
564 break;
565 case IPL_TYPE_UNKNOWN:
566 reipl_method = REIPL_METHOD_DEFAULT;
502 break; 567 break;
503 default: 568 default:
504 reipl_method = IPL_METHOD_NONE; 569 BUG();
505 } 570 }
506 reipl_type = type; 571 reipl_type = type;
507 return 0; 572 return 0;
@@ -579,22 +644,22 @@ static struct attribute_group dump_ccw_attr_group = {
579 644
580/* dump type */ 645/* dump type */
581 646
582static int dump_set_type(enum ipl_type type) 647static int dump_set_type(enum dump_type type)
583{ 648{
584 if (!(dump_capabilities & type)) 649 if (!(dump_capabilities & type))
585 return -EINVAL; 650 return -EINVAL;
586 switch(type) { 651 switch(type) {
587 case IPL_TYPE_CCW: 652 case DUMP_TYPE_CCW:
588 if (MACHINE_IS_VM) 653 if (MACHINE_IS_VM)
589 dump_method = IPL_METHOD_CCW_VM; 654 dump_method = DUMP_METHOD_CCW_VM;
590 else 655 else
591 dump_method = IPL_METHOD_CCW_CIO; 656 dump_method = DUMP_METHOD_CCW_CIO;
592 break; 657 break;
593 case IPL_TYPE_FCP: 658 case DUMP_TYPE_FCP:
594 dump_method = IPL_METHOD_FCP_RW_DIAG; 659 dump_method = DUMP_METHOD_FCP_DIAG;
595 break; 660 break;
596 default: 661 default:
597 dump_method = IPL_METHOD_NONE; 662 dump_method = DUMP_METHOD_NONE;
598 } 663 }
599 dump_type = type; 664 dump_type = type;
600 return 0; 665 return 0;
@@ -602,7 +667,7 @@ static int dump_set_type(enum ipl_type type)
602 667
603static ssize_t dump_type_show(struct subsystem *subsys, char *page) 668static ssize_t dump_type_show(struct subsystem *subsys, char *page)
604{ 669{
605 return sprintf(page, "%s\n", ipl_type_str(dump_type)); 670 return sprintf(page, "%s\n", dump_type_str(dump_type));
606} 671}
607 672
608static ssize_t dump_type_store(struct subsystem *subsys, const char *buf, 673static ssize_t dump_type_store(struct subsystem *subsys, const char *buf,
@@ -610,12 +675,12 @@ static ssize_t dump_type_store(struct subsystem *subsys, const char *buf,
610{ 675{
611 int rc = -EINVAL; 676 int rc = -EINVAL;
612 677
613 if (strncmp(buf, IPL_NONE_STR, strlen(IPL_NONE_STR)) == 0) 678 if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0)
614 rc = dump_set_type(IPL_TYPE_NONE); 679 rc = dump_set_type(DUMP_TYPE_NONE);
615 else if (strncmp(buf, IPL_CCW_STR, strlen(IPL_CCW_STR)) == 0) 680 else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0)
616 rc = dump_set_type(IPL_TYPE_CCW); 681 rc = dump_set_type(DUMP_TYPE_CCW);
617 else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0) 682 else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0)
618 rc = dump_set_type(IPL_TYPE_FCP); 683 rc = dump_set_type(DUMP_TYPE_FCP);
619 return (rc != 0) ? rc : len; 684 return (rc != 0) ? rc : len;
620} 685}
621 686
@@ -664,14 +729,14 @@ void do_reipl(void)
664 char loadparm[LOADPARM_LEN + 1]; 729 char loadparm[LOADPARM_LEN + 1];
665 730
666 switch (reipl_method) { 731 switch (reipl_method) {
667 case IPL_METHOD_CCW_CIO: 732 case REIPL_METHOD_CCW_CIO:
668 devid.devno = reipl_block_ccw->ipl_info.ccw.devno; 733 devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
669 if (ipl_get_type() == IPL_TYPE_CCW && devid.devno == ipl_devno) 734 if (ipl_info.type == IPL_TYPE_CCW && devid.devno == ipl_devno)
670 diag308(DIAG308_IPL, NULL); 735 diag308(DIAG308_IPL, NULL);
671 devid.ssid = 0; 736 devid.ssid = 0;
672 reipl_ccw_dev(&devid); 737 reipl_ccw_dev(&devid);
673 break; 738 break;
674 case IPL_METHOD_CCW_VM: 739 case REIPL_METHOD_CCW_VM:
675 reipl_get_ascii_loadparm(loadparm); 740 reipl_get_ascii_loadparm(loadparm);
676 if (strlen(loadparm) == 0) 741 if (strlen(loadparm) == 0)
677 sprintf(buf, "IPL %X", 742 sprintf(buf, "IPL %X",
@@ -681,30 +746,32 @@ void do_reipl(void)
681 reipl_block_ccw->ipl_info.ccw.devno, loadparm); 746 reipl_block_ccw->ipl_info.ccw.devno, loadparm);
682 __cpcmd(buf, NULL, 0, NULL); 747 __cpcmd(buf, NULL, 0, NULL);
683 break; 748 break;
684 case IPL_METHOD_CCW_DIAG: 749 case REIPL_METHOD_CCW_DIAG:
685 diag308(DIAG308_SET, reipl_block_ccw); 750 diag308(DIAG308_SET, reipl_block_ccw);
686 diag308(DIAG308_IPL, NULL); 751 diag308(DIAG308_IPL, NULL);
687 break; 752 break;
688 case IPL_METHOD_FCP_RW_DIAG: 753 case REIPL_METHOD_FCP_RW_DIAG:
689 diag308(DIAG308_SET, reipl_block_fcp); 754 diag308(DIAG308_SET, reipl_block_fcp);
690 diag308(DIAG308_IPL, NULL); 755 diag308(DIAG308_IPL, NULL);
691 break; 756 break;
692 case IPL_METHOD_FCP_RO_DIAG: 757 case REIPL_METHOD_FCP_RO_DIAG:
693 diag308(DIAG308_IPL, NULL); 758 diag308(DIAG308_IPL, NULL);
694 break; 759 break;
695 case IPL_METHOD_FCP_RO_VM: 760 case REIPL_METHOD_FCP_RO_VM:
696 __cpcmd("IPL", NULL, 0, NULL); 761 __cpcmd("IPL", NULL, 0, NULL);
697 break; 762 break;
698 case IPL_METHOD_NSS: 763 case REIPL_METHOD_NSS:
699 sprintf(buf, "IPL %s", reipl_nss_name); 764 sprintf(buf, "IPL %s", reipl_nss_name);
700 __cpcmd(buf, NULL, 0, NULL); 765 __cpcmd(buf, NULL, 0, NULL);
701 break; 766 break;
702 case IPL_METHOD_NONE: 767 case REIPL_METHOD_DEFAULT:
703 default:
704 if (MACHINE_IS_VM) 768 if (MACHINE_IS_VM)
705 __cpcmd("IPL", NULL, 0, NULL); 769 __cpcmd("IPL", NULL, 0, NULL);
706 diag308(DIAG308_IPL, NULL); 770 diag308(DIAG308_IPL, NULL);
707 break; 771 break;
772 case REIPL_METHOD_FCP_DUMP:
773 default:
774 break;
708 } 775 }
709 signal_processor(smp_processor_id(), sigp_stop_and_store_status); 776 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
710} 777}
@@ -715,28 +782,28 @@ static void do_dump(void)
715 static char buf[100]; 782 static char buf[100];
716 783
717 switch (dump_method) { 784 switch (dump_method) {
718 case IPL_METHOD_CCW_CIO: 785 case DUMP_METHOD_CCW_CIO:
719 smp_send_stop(); 786 smp_send_stop();
720 devid.devno = dump_block_ccw->ipl_info.ccw.devno; 787 devid.devno = dump_block_ccw->ipl_info.ccw.devno;
721 devid.ssid = 0; 788 devid.ssid = 0;
722 reipl_ccw_dev(&devid); 789 reipl_ccw_dev(&devid);
723 break; 790 break;
724 case IPL_METHOD_CCW_VM: 791 case DUMP_METHOD_CCW_VM:
725 smp_send_stop(); 792 smp_send_stop();
726 sprintf(buf, "STORE STATUS"); 793 sprintf(buf, "STORE STATUS");
727 __cpcmd(buf, NULL, 0, NULL); 794 __cpcmd(buf, NULL, 0, NULL);
728 sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno); 795 sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
729 __cpcmd(buf, NULL, 0, NULL); 796 __cpcmd(buf, NULL, 0, NULL);
730 break; 797 break;
731 case IPL_METHOD_CCW_DIAG: 798 case DUMP_METHOD_CCW_DIAG:
732 diag308(DIAG308_SET, dump_block_ccw); 799 diag308(DIAG308_SET, dump_block_ccw);
733 diag308(DIAG308_DUMP, NULL); 800 diag308(DIAG308_DUMP, NULL);
734 break; 801 break;
735 case IPL_METHOD_FCP_RW_DIAG: 802 case DUMP_METHOD_FCP_DIAG:
736 diag308(DIAG308_SET, dump_block_fcp); 803 diag308(DIAG308_SET, dump_block_fcp);
737 diag308(DIAG308_DUMP, NULL); 804 diag308(DIAG308_DUMP, NULL);
738 break; 805 break;
739 case IPL_METHOD_NONE: 806 case DUMP_METHOD_NONE:
740 default: 807 default:
741 return; 808 return;
742 } 809 }
@@ -777,12 +844,13 @@ static int __init ipl_init(void)
777 rc = firmware_register(&ipl_subsys); 844 rc = firmware_register(&ipl_subsys);
778 if (rc) 845 if (rc)
779 return rc; 846 return rc;
780 switch (ipl_get_type()) { 847 switch (ipl_info.type) {
781 case IPL_TYPE_CCW: 848 case IPL_TYPE_CCW:
782 rc = sysfs_create_group(&ipl_subsys.kset.kobj, 849 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
783 &ipl_ccw_attr_group); 850 &ipl_ccw_attr_group);
784 break; 851 break;
785 case IPL_TYPE_FCP: 852 case IPL_TYPE_FCP:
853 case IPL_TYPE_FCP_DUMP:
786 rc = ipl_register_fcp_files(); 854 rc = ipl_register_fcp_files();
787 break; 855 break;
788 case IPL_TYPE_NSS: 856 case IPL_TYPE_NSS:
@@ -852,7 +920,7 @@ static int __init reipl_ccw_init(void)
852 /* FIXME: check for diag308_set_works when enabling diag ccw reipl */ 920 /* FIXME: check for diag308_set_works when enabling diag ccw reipl */
853 if (!MACHINE_IS_VM) 921 if (!MACHINE_IS_VM)
854 sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO; 922 sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
855 if (ipl_get_type() == IPL_TYPE_CCW) 923 if (ipl_info.type == IPL_TYPE_CCW)
856 reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; 924 reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
857 reipl_capabilities |= IPL_TYPE_CCW; 925 reipl_capabilities |= IPL_TYPE_CCW;
858 return 0; 926 return 0;
@@ -862,9 +930,9 @@ static int __init reipl_fcp_init(void)
862{ 930{
863 int rc; 931 int rc;
864 932
865 if ((!diag308_set_works) && (ipl_get_type() != IPL_TYPE_FCP)) 933 if ((!diag308_set_works) && (ipl_info.type != IPL_TYPE_FCP))
866 return 0; 934 return 0;
867 if ((!diag308_set_works) && (ipl_get_type() == IPL_TYPE_FCP)) 935 if ((!diag308_set_works) && (ipl_info.type == IPL_TYPE_FCP))
868 make_attrs_ro(reipl_fcp_attrs); 936 make_attrs_ro(reipl_fcp_attrs);
869 937
870 reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); 938 reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
@@ -875,7 +943,7 @@ static int __init reipl_fcp_init(void)
875 free_page((unsigned long)reipl_block_fcp); 943 free_page((unsigned long)reipl_block_fcp);
876 return rc; 944 return rc;
877 } 945 }
878 if (ipl_get_type() == IPL_TYPE_FCP) { 946 if (ipl_info.type == IPL_TYPE_FCP) {
879 memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); 947 memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE);
880 } else { 948 } else {
881 reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN; 949 reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
@@ -909,7 +977,7 @@ static int __init reipl_init(void)
909 rc = reipl_nss_init(); 977 rc = reipl_nss_init();
910 if (rc) 978 if (rc)
911 return rc; 979 return rc;
912 rc = reipl_set_type(ipl_get_type()); 980 rc = reipl_set_type(ipl_info.type);
913 if (rc) 981 if (rc)
914 return rc; 982 return rc;
915 return 0; 983 return 0;
@@ -931,7 +999,7 @@ static int __init dump_ccw_init(void)
931 dump_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION; 999 dump_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
932 dump_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN; 1000 dump_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
933 dump_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW; 1001 dump_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
934 dump_capabilities |= IPL_TYPE_CCW; 1002 dump_capabilities |= DUMP_TYPE_CCW;
935 return 0; 1003 return 0;
936} 1004}
937 1005
@@ -956,7 +1024,7 @@ static int __init dump_fcp_init(void)
956 dump_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN; 1024 dump_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN;
957 dump_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP; 1025 dump_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP;
958 dump_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_DUMP; 1026 dump_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_DUMP;
959 dump_capabilities |= IPL_TYPE_FCP; 1027 dump_capabilities |= DUMP_TYPE_FCP;
960 return 0; 1028 return 0;
961} 1029}
962 1030
@@ -995,7 +1063,7 @@ static int __init dump_init(void)
995 rc = dump_fcp_init(); 1063 rc = dump_fcp_init();
996 if (rc) 1064 if (rc)
997 return rc; 1065 return rc;
998 dump_set_type(IPL_TYPE_NONE); 1066 dump_set_type(DUMP_TYPE_NONE);
999 return 0; 1067 return 0;
1000} 1068}
1001 1069
@@ -1038,6 +1106,27 @@ static int __init s390_ipl_init(void)
1038 1106
1039__initcall(s390_ipl_init); 1107__initcall(s390_ipl_init);
1040 1108
1109void __init ipl_save_parameters(void)
1110{
1111 struct cio_iplinfo iplinfo;
1112 unsigned int *ipl_ptr;
1113 void *src, *dst;
1114
1115 if (cio_get_iplinfo(&iplinfo))
1116 return;
1117
1118 ipl_devno = iplinfo.devno;
1119 ipl_flags |= IPL_DEVNO_VALID;
1120 if (!iplinfo.is_qdio)
1121 return;
1122 ipl_flags |= IPL_PARMBLOCK_VALID;
1123 ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR;
1124 src = (void *)(unsigned long)*ipl_ptr;
1125 dst = (void *)IPL_PARMBLOCK_ORIGIN;
1126 memmove(dst, src, PAGE_SIZE);
1127 *ipl_ptr = IPL_PARMBLOCK_ORIGIN;
1128}
1129
1041static LIST_HEAD(rcall); 1130static LIST_HEAD(rcall);
1042static DEFINE_MUTEX(rcall_mutex); 1131static DEFINE_MUTEX(rcall_mutex);
1043 1132
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 39d1dd752529..59b4e796680a 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -31,6 +31,7 @@
31#include <linux/string.h> 31#include <linux/string.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/moduleloader.h> 33#include <linux/moduleloader.h>
34#include <linux/bug.h>
34 35
35#if 0 36#if 0
36#define DEBUGP printk 37#define DEBUGP printk
@@ -398,9 +399,10 @@ int module_finalize(const Elf_Ehdr *hdr,
398 struct module *me) 399 struct module *me)
399{ 400{
400 vfree(me->arch.syminfo); 401 vfree(me->arch.syminfo);
401 return 0; 402 return module_bug_finalize(hdr, sechdrs, me);
402} 403}
403 404
404void module_arch_cleanup(struct module *mod) 405void module_arch_cleanup(struct module *mod)
405{ 406{
407 module_bug_cleanup(mod);
406} 408}
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 5acfac654f9d..11d9b0197626 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -280,24 +280,26 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
280 return 0; 280 return 0;
281} 281}
282 282
283asmlinkage long sys_fork(struct pt_regs regs) 283asmlinkage long sys_fork(void)
284{ 284{
285 return do_fork(SIGCHLD, regs.gprs[15], &regs, 0, NULL, NULL); 285 struct pt_regs *regs = task_pt_regs(current);
286 return do_fork(SIGCHLD, regs->gprs[15], regs, 0, NULL, NULL);
286} 287}
287 288
288asmlinkage long sys_clone(struct pt_regs regs) 289asmlinkage long sys_clone(void)
289{ 290{
290 unsigned long clone_flags; 291 struct pt_regs *regs = task_pt_regs(current);
291 unsigned long newsp; 292 unsigned long clone_flags;
293 unsigned long newsp;
292 int __user *parent_tidptr, *child_tidptr; 294 int __user *parent_tidptr, *child_tidptr;
293 295
294 clone_flags = regs.gprs[3]; 296 clone_flags = regs->gprs[3];
295 newsp = regs.orig_gpr2; 297 newsp = regs->orig_gpr2;
296 parent_tidptr = (int __user *) regs.gprs[4]; 298 parent_tidptr = (int __user *) regs->gprs[4];
297 child_tidptr = (int __user *) regs.gprs[5]; 299 child_tidptr = (int __user *) regs->gprs[5];
298 if (!newsp) 300 if (!newsp)
299 newsp = regs.gprs[15]; 301 newsp = regs->gprs[15];
300 return do_fork(clone_flags, newsp, &regs, 0, 302 return do_fork(clone_flags, newsp, regs, 0,
301 parent_tidptr, child_tidptr); 303 parent_tidptr, child_tidptr);
302} 304}
303 305
@@ -311,40 +313,52 @@ asmlinkage long sys_clone(struct pt_regs regs)
311 * do not have enough call-clobbered registers to hold all 313 * do not have enough call-clobbered registers to hold all
312 * the information you need. 314 * the information you need.
313 */ 315 */
314asmlinkage long sys_vfork(struct pt_regs regs) 316asmlinkage long sys_vfork(void)
315{ 317{
318 struct pt_regs *regs = task_pt_regs(current);
316 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 319 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
317 regs.gprs[15], &regs, 0, NULL, NULL); 320 regs->gprs[15], regs, 0, NULL, NULL);
321}
322
323asmlinkage void execve_tail(void)
324{
325 task_lock(current);
326 current->ptrace &= ~PT_DTRACE;
327 task_unlock(current);
328 current->thread.fp_regs.fpc = 0;
329 if (MACHINE_HAS_IEEE)
330 asm volatile("sfpc %0,%0" : : "d" (0));
318} 331}
319 332
320/* 333/*
321 * sys_execve() executes a new program. 334 * sys_execve() executes a new program.
322 */ 335 */
323asmlinkage long sys_execve(struct pt_regs regs) 336asmlinkage long sys_execve(void)
324{ 337{
325 int error; 338 struct pt_regs *regs = task_pt_regs(current);
326 char * filename; 339 char *filename;
327 340 unsigned long result;
328 filename = getname((char __user *) regs.orig_gpr2); 341 int rc;
329 error = PTR_ERR(filename); 342
330 if (IS_ERR(filename)) 343 filename = getname((char __user *) regs->orig_gpr2);
331 goto out; 344 if (IS_ERR(filename)) {
332 error = do_execve(filename, (char __user * __user *) regs.gprs[3], 345 result = PTR_ERR(filename);
333 (char __user * __user *) regs.gprs[4], &regs); 346 goto out;
334 if (error == 0) {
335 task_lock(current);
336 current->ptrace &= ~PT_DTRACE;
337 task_unlock(current);
338 current->thread.fp_regs.fpc = 0;
339 if (MACHINE_HAS_IEEE)
340 asm volatile("sfpc %0,%0" : : "d" (0));
341 } 347 }
342 putname(filename); 348 rc = do_execve(filename, (char __user * __user *) regs->gprs[3],
349 (char __user * __user *) regs->gprs[4], regs);
350 if (rc) {
351 result = rc;
352 goto out_putname;
353 }
354 execve_tail();
355 result = regs->gprs[2];
356out_putname:
357 putname(filename);
343out: 358out:
344 return error; 359 return result;
345} 360}
346 361
347
348/* 362/*
349 * fill in the FPU structure for a core dump. 363 * fill in the FPU structure for a core dump.
350 */ 364 */
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 863c8d08c026..3dfd0985861c 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -285,6 +285,26 @@ static void __init conmode_default(void)
285 } 285 }
286} 286}
287 287
288#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
289static void __init setup_zfcpdump(unsigned int console_devno)
290{
291 static char str[64];
292
293 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
294 return;
295 if (console_devno != -1)
296 sprintf(str, "cio_ignore=all,!0.0.%04x,!0.0.%04x",
297 ipl_info.data.fcp.dev_id.devno, console_devno);
298 else
299 sprintf(str, "cio_ignore=all,!0.0.%04x",
300 ipl_info.data.fcp.dev_id.devno);
301 strcat(COMMAND_LINE, str);
302 console_loglevel = 2;
303}
304#else
305static inline void setup_zfcpdump(unsigned int console_devno) {}
306#endif /* CONFIG_ZFCPDUMP */
307
288#ifdef CONFIG_SMP 308#ifdef CONFIG_SMP
289void (*_machine_restart)(char *command) = machine_restart_smp; 309void (*_machine_restart)(char *command) = machine_restart_smp;
290void (*_machine_halt)(void) = machine_halt_smp; 310void (*_machine_halt)(void) = machine_halt_smp;
@@ -586,13 +606,20 @@ setup_resources(void)
586 } 606 }
587} 607}
588 608
609unsigned long real_memory_size;
610EXPORT_SYMBOL_GPL(real_memory_size);
611
589static void __init setup_memory_end(void) 612static void __init setup_memory_end(void)
590{ 613{
591 unsigned long real_size, memory_size; 614 unsigned long memory_size;
592 unsigned long max_mem, max_phys; 615 unsigned long max_mem, max_phys;
593 int i; 616 int i;
594 617
595 memory_size = real_size = 0; 618#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
619 if (ipl_info.type == IPL_TYPE_FCP_DUMP)
620 memory_end = ZFCPDUMP_HSA_SIZE;
621#endif
622 memory_size = 0;
596 max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE; 623 max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE;
597 memory_end &= PAGE_MASK; 624 memory_end &= PAGE_MASK;
598 625
@@ -601,7 +628,8 @@ static void __init setup_memory_end(void)
601 for (i = 0; i < MEMORY_CHUNKS; i++) { 628 for (i = 0; i < MEMORY_CHUNKS; i++) {
602 struct mem_chunk *chunk = &memory_chunk[i]; 629 struct mem_chunk *chunk = &memory_chunk[i];
603 630
604 real_size = max(real_size, chunk->addr + chunk->size); 631 real_memory_size = max(real_memory_size,
632 chunk->addr + chunk->size);
605 if (chunk->addr >= max_mem) { 633 if (chunk->addr >= max_mem) {
606 memset(chunk, 0, sizeof(*chunk)); 634 memset(chunk, 0, sizeof(*chunk));
607 continue; 635 continue;
@@ -765,6 +793,7 @@ setup_arch(char **cmdline_p)
765 793
766 parse_early_param(); 794 parse_early_param();
767 795
796 setup_ipl_info();
768 setup_memory_end(); 797 setup_memory_end();
769 setup_addressing_mode(); 798 setup_addressing_mode();
770 setup_memory(); 799 setup_memory();
@@ -782,6 +811,9 @@ setup_arch(char **cmdline_p)
782 811
783 /* Setup default console */ 812 /* Setup default console */
784 conmode_default(); 813 conmode_default();
814
815 /* Setup zfcpdump support */
816 setup_zfcpdump(console_devno);
785} 817}
786 818
787void print_cpu_info(struct cpuinfo_S390 *cpuinfo) 819void print_cpu_info(struct cpuinfo_S390 *cpuinfo)
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 554f9cf7499c..3c41907799a1 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -102,9 +102,9 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
102} 102}
103 103
104asmlinkage long 104asmlinkage long
105sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 105sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
106 struct pt_regs *regs)
107{ 106{
107 struct pt_regs *regs = task_pt_regs(current);
108 return do_sigaltstack(uss, uoss, regs->gprs[15]); 108 return do_sigaltstack(uss, uoss, regs->gprs[15]);
109} 109}
110 110
@@ -163,8 +163,9 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
163 return 0; 163 return 0;
164} 164}
165 165
166asmlinkage long sys_sigreturn(struct pt_regs *regs) 166asmlinkage long sys_sigreturn(void)
167{ 167{
168 struct pt_regs *regs = task_pt_regs(current);
168 sigframe __user *frame = (sigframe __user *)regs->gprs[15]; 169 sigframe __user *frame = (sigframe __user *)regs->gprs[15];
169 sigset_t set; 170 sigset_t set;
170 171
@@ -189,8 +190,9 @@ badframe:
189 return 0; 190 return 0;
190} 191}
191 192
192asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) 193asmlinkage long sys_rt_sigreturn(void)
193{ 194{
195 struct pt_regs *regs = task_pt_regs(current);
194 rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15]; 196 rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15];
195 sigset_t set; 197 sigset_t set;
196 198
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 97764f710bb7..3754e2031b39 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -1,12 +1,12 @@
1/* 1/*
2 * arch/s390/kernel/smp.c 2 * arch/s390/kernel/smp.c
3 * 3 *
4 * Copyright (C) IBM Corp. 1999,2006 4 * Copyright IBM Corp. 1999,2007
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Heiko Carstens (heiko.carstens@de.ibm.com) 7 * Heiko Carstens (heiko.carstens@de.ibm.com)
8 * 8 *
9 * based on other smp stuff by 9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> 10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar 11 * (c) 1998 Ingo Molnar
12 * 12 *
@@ -31,6 +31,7 @@
31#include <linux/interrupt.h> 31#include <linux/interrupt.h>
32#include <linux/cpu.h> 32#include <linux/cpu.h>
33#include <linux/timex.h> 33#include <linux/timex.h>
34#include <linux/bootmem.h>
34#include <asm/ipl.h> 35#include <asm/ipl.h>
35#include <asm/setup.h> 36#include <asm/setup.h>
36#include <asm/sigp.h> 37#include <asm/sigp.h>
@@ -40,17 +41,19 @@
40#include <asm/cpcmd.h> 41#include <asm/cpcmd.h>
41#include <asm/tlbflush.h> 42#include <asm/tlbflush.h>
42#include <asm/timer.h> 43#include <asm/timer.h>
43 44#include <asm/lowcore.h>
44extern volatile int __cpu_logical_map[];
45 45
46/* 46/*
47 * An array with a pointer the lowcore of every CPU. 47 * An array with a pointer the lowcore of every CPU.
48 */ 48 */
49
50struct _lowcore *lowcore_ptr[NR_CPUS]; 49struct _lowcore *lowcore_ptr[NR_CPUS];
50EXPORT_SYMBOL(lowcore_ptr);
51 51
52cpumask_t cpu_online_map = CPU_MASK_NONE; 52cpumask_t cpu_online_map = CPU_MASK_NONE;
53EXPORT_SYMBOL(cpu_online_map);
54
53cpumask_t cpu_possible_map = CPU_MASK_NONE; 55cpumask_t cpu_possible_map = CPU_MASK_NONE;
56EXPORT_SYMBOL(cpu_possible_map);
54 57
55static struct task_struct *current_set[NR_CPUS]; 58static struct task_struct *current_set[NR_CPUS];
56 59
@@ -70,7 +73,7 @@ struct call_data_struct {
70 int wait; 73 int wait;
71}; 74};
72 75
73static struct call_data_struct * call_data; 76static struct call_data_struct *call_data;
74 77
75/* 78/*
76 * 'Call function' interrupt callback 79 * 'Call function' interrupt callback
@@ -150,8 +153,8 @@ out:
150 * 153 *
151 * Run a function on all other CPUs. 154 * Run a function on all other CPUs.
152 * 155 *
153 * You must not call this function with disabled interrupts or from a 156 * You must not call this function with disabled interrupts, from a
154 * hardware interrupt handler. You may call it from a bottom half. 157 * hardware interrupt handler or from a bottom half.
155 */ 158 */
156int smp_call_function(void (*func) (void *info), void *info, int nonatomic, 159int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
157 int wait) 160 int wait)
@@ -177,11 +180,11 @@ EXPORT_SYMBOL(smp_call_function);
177 * 180 *
178 * Run a function on one processor. 181 * Run a function on one processor.
179 * 182 *
180 * You must not call this function with disabled interrupts or from a 183 * You must not call this function with disabled interrupts, from a
181 * hardware interrupt handler. You may call it from a bottom half. 184 * hardware interrupt handler or from a bottom half.
182 */ 185 */
183int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, 186int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic,
184 int wait, int cpu) 187 int wait, int cpu)
185{ 188{
186 cpumask_t map = CPU_MASK_NONE; 189 cpumask_t map = CPU_MASK_NONE;
187 190
@@ -195,9 +198,9 @@ EXPORT_SYMBOL(smp_call_function_on);
195 198
196static void do_send_stop(void) 199static void do_send_stop(void)
197{ 200{
198 int cpu, rc; 201 int cpu, rc;
199 202
200 /* stop all processors */ 203 /* stop all processors */
201 for_each_online_cpu(cpu) { 204 for_each_online_cpu(cpu) {
202 if (cpu == smp_processor_id()) 205 if (cpu == smp_processor_id())
203 continue; 206 continue;
@@ -209,9 +212,9 @@ static void do_send_stop(void)
209 212
210static void do_store_status(void) 213static void do_store_status(void)
211{ 214{
212 int cpu, rc; 215 int cpu, rc;
213 216
214 /* store status of all processors in their lowcores (real 0) */ 217 /* store status of all processors in their lowcores (real 0) */
215 for_each_online_cpu(cpu) { 218 for_each_online_cpu(cpu) {
216 if (cpu == smp_processor_id()) 219 if (cpu == smp_processor_id())
217 continue; 220 continue;
@@ -219,8 +222,8 @@ static void do_store_status(void)
219 rc = signal_processor_p( 222 rc = signal_processor_p(
220 (__u32)(unsigned long) lowcore_ptr[cpu], cpu, 223 (__u32)(unsigned long) lowcore_ptr[cpu], cpu,
221 sigp_store_status_at_address); 224 sigp_store_status_at_address);
222 } while(rc == sigp_busy); 225 } while (rc == sigp_busy);
223 } 226 }
224} 227}
225 228
226static void do_wait_for_stop(void) 229static void do_wait_for_stop(void)
@@ -231,7 +234,7 @@ static void do_wait_for_stop(void)
231 for_each_online_cpu(cpu) { 234 for_each_online_cpu(cpu) {
232 if (cpu == smp_processor_id()) 235 if (cpu == smp_processor_id())
233 continue; 236 continue;
234 while(!smp_cpu_not_running(cpu)) 237 while (!smp_cpu_not_running(cpu))
235 cpu_relax(); 238 cpu_relax();
236 } 239 }
237} 240}
@@ -245,7 +248,7 @@ void smp_send_stop(void)
245 /* Disable all interrupts/machine checks */ 248 /* Disable all interrupts/machine checks */
246 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); 249 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
247 250
248 /* write magic number to zero page (absolute 0) */ 251 /* write magic number to zero page (absolute 0) */
249 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; 252 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
250 253
251 /* stop other processors. */ 254 /* stop other processors. */
@@ -261,8 +264,7 @@ void smp_send_stop(void)
261/* 264/*
262 * Reboot, halt and power_off routines for SMP. 265 * Reboot, halt and power_off routines for SMP.
263 */ 266 */
264 267void machine_restart_smp(char *__unused)
265void machine_restart_smp(char * __unused)
266{ 268{
267 smp_send_stop(); 269 smp_send_stop();
268 do_reipl(); 270 do_reipl();
@@ -293,17 +295,17 @@ void machine_power_off_smp(void)
293 295
294static void do_ext_call_interrupt(__u16 code) 296static void do_ext_call_interrupt(__u16 code)
295{ 297{
296 unsigned long bits; 298 unsigned long bits;
297 299
298 /* 300 /*
299 * handle bit signal external calls 301 * handle bit signal external calls
300 * 302 *
301 * For the ec_schedule signal we have to do nothing. All the work 303 * For the ec_schedule signal we have to do nothing. All the work
302 * is done automatically when we return from the interrupt. 304 * is done automatically when we return from the interrupt.
303 */ 305 */
304 bits = xchg(&S390_lowcore.ext_call_fast, 0); 306 bits = xchg(&S390_lowcore.ext_call_fast, 0);
305 307
306 if (test_bit(ec_call_function, &bits)) 308 if (test_bit(ec_call_function, &bits))
307 do_call_function(); 309 do_call_function();
308} 310}
309 311
@@ -313,11 +315,11 @@ static void do_ext_call_interrupt(__u16 code)
313 */ 315 */
314static void smp_ext_bitcall(int cpu, ec_bit_sig sig) 316static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
315{ 317{
316 /* 318 /*
317 * Set signaling bit in lowcore of target cpu and kick it 319 * Set signaling bit in lowcore of target cpu and kick it
318 */ 320 */
319 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); 321 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
320 while(signal_processor(cpu, sigp_emergency_signal) == sigp_busy) 322 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
321 udelay(10); 323 udelay(10);
322} 324}
323 325
@@ -332,7 +334,7 @@ void smp_ptlb_callback(void *info)
332 334
333void smp_ptlb_all(void) 335void smp_ptlb_all(void)
334{ 336{
335 on_each_cpu(smp_ptlb_callback, NULL, 0, 1); 337 on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
336} 338}
337EXPORT_SYMBOL(smp_ptlb_all); 339EXPORT_SYMBOL(smp_ptlb_all);
338#endif /* ! CONFIG_64BIT */ 340#endif /* ! CONFIG_64BIT */
@@ -344,7 +346,7 @@ EXPORT_SYMBOL(smp_ptlb_all);
344 */ 346 */
345void smp_send_reschedule(int cpu) 347void smp_send_reschedule(int cpu)
346{ 348{
347 smp_ext_bitcall(cpu, ec_schedule); 349 smp_ext_bitcall(cpu, ec_schedule);
348} 350}
349 351
350/* 352/*
@@ -358,11 +360,12 @@ struct ec_creg_mask_parms {
358/* 360/*
359 * callback for setting/clearing control bits 361 * callback for setting/clearing control bits
360 */ 362 */
361static void smp_ctl_bit_callback(void *info) { 363static void smp_ctl_bit_callback(void *info)
364{
362 struct ec_creg_mask_parms *pp = info; 365 struct ec_creg_mask_parms *pp = info;
363 unsigned long cregs[16]; 366 unsigned long cregs[16];
364 int i; 367 int i;
365 368
366 __ctl_store(cregs, 0, 15); 369 __ctl_store(cregs, 0, 15);
367 for (i = 0; i <= 15; i++) 370 for (i = 0; i <= 15; i++)
368 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; 371 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
@@ -381,6 +384,7 @@ void smp_ctl_set_bit(int cr, int bit)
381 parms.orvals[cr] = 1 << bit; 384 parms.orvals[cr] = 1 << bit;
382 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 385 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
383} 386}
387EXPORT_SYMBOL(smp_ctl_set_bit);
384 388
385/* 389/*
386 * Clear a bit in a control register of all cpus 390 * Clear a bit in a control register of all cpus
@@ -394,13 +398,72 @@ void smp_ctl_clear_bit(int cr, int bit)
394 parms.andvals[cr] = ~(1L << bit); 398 parms.andvals[cr] = ~(1L << bit);
395 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 399 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
396} 400}
401EXPORT_SYMBOL(smp_ctl_clear_bit);
402
403#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
404
405/*
406 * zfcpdump_prefix_array holds prefix registers for the following scenario:
407 * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
408 * save its prefix registers, since they get lost, when switching from 31 bit
409 * to 64 bit.
410 */
411unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \
412 __attribute__((__section__(".data")));
413
414static void __init smp_get_save_areas(void)
415{
416 unsigned int cpu, cpu_num, rc;
417 __u16 boot_cpu_addr;
418
419 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
420 return;
421 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
422 cpu_num = 1;
423 for (cpu = 0; cpu <= 65535; cpu++) {
424 if ((u16) cpu == boot_cpu_addr)
425 continue;
426 __cpu_logical_map[1] = (__u16) cpu;
427 if (signal_processor(1, sigp_sense) == sigp_not_operational)
428 continue;
429 if (cpu_num >= NR_CPUS) {
430 printk("WARNING: Registers for cpu %i are not "
431 "saved, since dump kernel was compiled with"
432 "NR_CPUS=%i!\n", cpu_num, NR_CPUS);
433 continue;
434 }
435 zfcpdump_save_areas[cpu_num] =
436 alloc_bootmem(sizeof(union save_area));
437 while (1) {
438 rc = signal_processor(1, sigp_stop_and_store_status);
439 if (rc != sigp_busy)
440 break;
441 cpu_relax();
442 }
443 memcpy(zfcpdump_save_areas[cpu_num],
444 (void *)(unsigned long) store_prefix() +
445 SAVE_AREA_BASE, SAVE_AREA_SIZE);
446#ifdef __s390x__
447 /* copy original prefix register */
448 zfcpdump_save_areas[cpu_num]->s390x.pref_reg =
449 zfcpdump_prefix_array[cpu_num];
450#endif
451 cpu_num++;
452 }
453}
454
455union save_area *zfcpdump_save_areas[NR_CPUS + 1];
456EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
457
458#else
459#define smp_get_save_areas() do { } while (0)
460#endif
397 461
398/* 462/*
399 * Lets check how many CPUs we have. 463 * Lets check how many CPUs we have.
400 */ 464 */
401 465
402static unsigned int 466static unsigned int __init smp_count_cpus(void)
403__init smp_count_cpus(void)
404{ 467{
405 unsigned int cpu, num_cpus; 468 unsigned int cpu, num_cpus;
406 __u16 boot_cpu_addr; 469 __u16 boot_cpu_addr;
@@ -416,31 +479,30 @@ __init smp_count_cpus(void)
416 if ((__u16) cpu == boot_cpu_addr) 479 if ((__u16) cpu == boot_cpu_addr)
417 continue; 480 continue;
418 __cpu_logical_map[1] = (__u16) cpu; 481 __cpu_logical_map[1] = (__u16) cpu;
419 if (signal_processor(1, sigp_sense) == 482 if (signal_processor(1, sigp_sense) == sigp_not_operational)
420 sigp_not_operational)
421 continue; 483 continue;
422 num_cpus++; 484 num_cpus++;
423 } 485 }
424 486
425 printk("Detected %d CPU's\n",(int) num_cpus); 487 printk("Detected %d CPU's\n", (int) num_cpus);
426 printk("Boot cpu address %2X\n", boot_cpu_addr); 488 printk("Boot cpu address %2X\n", boot_cpu_addr);
427 489
428 return num_cpus; 490 return num_cpus;
429} 491}
430 492
431/* 493/*
432 * Activate a secondary processor. 494 * Activate a secondary processor.
433 */ 495 */
434int __devinit start_secondary(void *cpuvoid) 496int __devinit start_secondary(void *cpuvoid)
435{ 497{
436 /* Setup the cpu */ 498 /* Setup the cpu */
437 cpu_init(); 499 cpu_init();
438 preempt_disable(); 500 preempt_disable();
439 /* Enable TOD clock interrupts on the secondary cpu. */ 501 /* Enable TOD clock interrupts on the secondary cpu. */
440 init_cpu_timer(); 502 init_cpu_timer();
441#ifdef CONFIG_VIRT_TIMER 503#ifdef CONFIG_VIRT_TIMER
442 /* Enable cpu timer interrupts on the secondary cpu. */ 504 /* Enable cpu timer interrupts on the secondary cpu. */
443 init_cpu_vtimer(); 505 init_cpu_vtimer();
444#endif 506#endif
445 /* Enable pfault pseudo page faults on this cpu. */ 507 /* Enable pfault pseudo page faults on this cpu. */
446 pfault_init(); 508 pfault_init();
@@ -449,11 +511,11 @@ int __devinit start_secondary(void *cpuvoid)
449 cpu_set(smp_processor_id(), cpu_online_map); 511 cpu_set(smp_processor_id(), cpu_online_map);
450 /* Switch on interrupts */ 512 /* Switch on interrupts */
451 local_irq_enable(); 513 local_irq_enable();
452 /* Print info about this processor */ 514 /* Print info about this processor */
453 print_cpu_info(&S390_lowcore.cpu_data); 515 print_cpu_info(&S390_lowcore.cpu_data);
454 /* cpu_idle will call schedule for us */ 516 /* cpu_idle will call schedule for us */
455 cpu_idle(); 517 cpu_idle();
456 return 0; 518 return 0;
457} 519}
458 520
459static void __init smp_create_idle(unsigned int cpu) 521static void __init smp_create_idle(unsigned int cpu)
@@ -470,56 +532,13 @@ static void __init smp_create_idle(unsigned int cpu)
470 current_set[cpu] = p; 532 current_set[cpu] = p;
471} 533}
472 534
473/* Reserving and releasing of CPUs */ 535static int cpu_stopped(int cpu)
474
475static DEFINE_SPINLOCK(smp_reserve_lock);
476static int smp_cpu_reserved[NR_CPUS];
477
478int
479smp_get_cpu(cpumask_t cpu_mask)
480{
481 unsigned long flags;
482 int cpu;
483
484 spin_lock_irqsave(&smp_reserve_lock, flags);
485 /* Try to find an already reserved cpu. */
486 for_each_cpu_mask(cpu, cpu_mask) {
487 if (smp_cpu_reserved[cpu] != 0) {
488 smp_cpu_reserved[cpu]++;
489 /* Found one. */
490 goto out;
491 }
492 }
493 /* Reserve a new cpu from cpu_mask. */
494 for_each_cpu_mask(cpu, cpu_mask) {
495 if (cpu_online(cpu)) {
496 smp_cpu_reserved[cpu]++;
497 goto out;
498 }
499 }
500 cpu = -ENODEV;
501out:
502 spin_unlock_irqrestore(&smp_reserve_lock, flags);
503 return cpu;
504}
505
506void
507smp_put_cpu(int cpu)
508{
509 unsigned long flags;
510
511 spin_lock_irqsave(&smp_reserve_lock, flags);
512 smp_cpu_reserved[cpu]--;
513 spin_unlock_irqrestore(&smp_reserve_lock, flags);
514}
515
516static int
517cpu_stopped(int cpu)
518{ 536{
519 __u32 status; 537 __u32 status;
520 538
521 /* Check for stopped state */ 539 /* Check for stopped state */
522 if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) { 540 if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
541 sigp_status_stored) {
523 if (status & 0x40) 542 if (status & 0x40)
524 return 1; 543 return 1;
525 } 544 }
@@ -528,14 +547,13 @@ cpu_stopped(int cpu)
528 547
529/* Upping and downing of CPUs */ 548/* Upping and downing of CPUs */
530 549
531int 550int __cpu_up(unsigned int cpu)
532__cpu_up(unsigned int cpu)
533{ 551{
534 struct task_struct *idle; 552 struct task_struct *idle;
535 struct _lowcore *cpu_lowcore; 553 struct _lowcore *cpu_lowcore;
536 struct stack_frame *sf; 554 struct stack_frame *sf;
537 sigp_ccode ccode; 555 sigp_ccode ccode;
538 int curr_cpu; 556 int curr_cpu;
539 557
540 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { 558 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
541 __cpu_logical_map[cpu] = (__u16) curr_cpu; 559 __cpu_logical_map[cpu] = (__u16) curr_cpu;
@@ -548,7 +566,7 @@ __cpu_up(unsigned int cpu)
548 566
549 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), 567 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
550 cpu, sigp_set_prefix); 568 cpu, sigp_set_prefix);
551 if (ccode){ 569 if (ccode) {
552 printk("sigp_set_prefix failed for cpu %d " 570 printk("sigp_set_prefix failed for cpu %d "
553 "with condition code %d\n", 571 "with condition code %d\n",
554 (int) cpu, (int) ccode); 572 (int) cpu, (int) ccode);
@@ -556,9 +574,9 @@ __cpu_up(unsigned int cpu)
556 } 574 }
557 575
558 idle = current_set[cpu]; 576 idle = current_set[cpu];
559 cpu_lowcore = lowcore_ptr[cpu]; 577 cpu_lowcore = lowcore_ptr[cpu];
560 cpu_lowcore->kernel_stack = (unsigned long) 578 cpu_lowcore->kernel_stack = (unsigned long)
561 task_stack_page(idle) + (THREAD_SIZE); 579 task_stack_page(idle) + THREAD_SIZE;
562 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack 580 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
563 - sizeof(struct pt_regs) 581 - sizeof(struct pt_regs)
564 - sizeof(struct stack_frame)); 582 - sizeof(struct stack_frame));
@@ -570,11 +588,11 @@ __cpu_up(unsigned int cpu)
570 " stam 0,15,0(%0)" 588 " stam 0,15,0(%0)"
571 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); 589 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
572 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; 590 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
573 cpu_lowcore->current_task = (unsigned long) idle; 591 cpu_lowcore->current_task = (unsigned long) idle;
574 cpu_lowcore->cpu_data.cpu_nr = cpu; 592 cpu_lowcore->cpu_data.cpu_nr = cpu;
575 eieio(); 593 eieio();
576 594
577 while (signal_processor(cpu,sigp_restart) == sigp_busy) 595 while (signal_processor(cpu, sigp_restart) == sigp_busy)
578 udelay(10); 596 udelay(10);
579 597
580 while (!cpu_online(cpu)) 598 while (!cpu_online(cpu))
@@ -589,6 +607,7 @@ void __init smp_setup_cpu_possible_map(void)
589{ 607{
590 unsigned int phy_cpus, pos_cpus, cpu; 608 unsigned int phy_cpus, pos_cpus, cpu;
591 609
610 smp_get_save_areas();
592 phy_cpus = smp_count_cpus(); 611 phy_cpus = smp_count_cpus();
593 pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS); 612 pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS);
594 613
@@ -620,18 +639,11 @@ static int __init setup_possible_cpus(char *s)
620} 639}
621early_param("possible_cpus", setup_possible_cpus); 640early_param("possible_cpus", setup_possible_cpus);
622 641
623int 642int __cpu_disable(void)
624__cpu_disable(void)
625{ 643{
626 unsigned long flags;
627 struct ec_creg_mask_parms cr_parms; 644 struct ec_creg_mask_parms cr_parms;
628 int cpu = smp_processor_id(); 645 int cpu = smp_processor_id();
629 646
630 spin_lock_irqsave(&smp_reserve_lock, flags);
631 if (smp_cpu_reserved[cpu] != 0) {
632 spin_unlock_irqrestore(&smp_reserve_lock, flags);
633 return -EBUSY;
634 }
635 cpu_clear(cpu, cpu_online_map); 647 cpu_clear(cpu, cpu_online_map);
636 648
637 /* Disable pfault pseudo page faults on this cpu. */ 649 /* Disable pfault pseudo page faults on this cpu. */
@@ -642,24 +654,23 @@ __cpu_disable(void)
642 654
643 /* disable all external interrupts */ 655 /* disable all external interrupts */
644 cr_parms.orvals[0] = 0; 656 cr_parms.orvals[0] = 0;
645 cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 | 657 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |
646 1<<11 | 1<<10 | 1<< 6 | 1<< 4); 658 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4);
647 /* disable all I/O interrupts */ 659 /* disable all I/O interrupts */
648 cr_parms.orvals[6] = 0; 660 cr_parms.orvals[6] = 0;
649 cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 | 661 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
650 1<<27 | 1<<26 | 1<<25 | 1<<24); 662 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
651 /* disable most machine checks */ 663 /* disable most machine checks */
652 cr_parms.orvals[14] = 0; 664 cr_parms.orvals[14] = 0;
653 cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); 665 cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
666 1 << 25 | 1 << 24);
654 667
655 smp_ctl_bit_callback(&cr_parms); 668 smp_ctl_bit_callback(&cr_parms);
656 669
657 spin_unlock_irqrestore(&smp_reserve_lock, flags);
658 return 0; 670 return 0;
659} 671}
660 672
661void 673void __cpu_die(unsigned int cpu)
662__cpu_die(unsigned int cpu)
663{ 674{
664 /* Wait until target cpu is down */ 675 /* Wait until target cpu is down */
665 while (!smp_cpu_not_running(cpu)) 676 while (!smp_cpu_not_running(cpu))
@@ -667,13 +678,12 @@ __cpu_die(unsigned int cpu)
667 printk("Processor %d spun down\n", cpu); 678 printk("Processor %d spun down\n", cpu);
668} 679}
669 680
670void 681void cpu_die(void)
671cpu_die(void)
672{ 682{
673 idle_task_exit(); 683 idle_task_exit();
674 signal_processor(smp_processor_id(), sigp_stop); 684 signal_processor(smp_processor_id(), sigp_stop);
675 BUG(); 685 BUG();
676 for(;;); 686 for (;;);
677} 687}
678 688
679#endif /* CONFIG_HOTPLUG_CPU */ 689#endif /* CONFIG_HOTPLUG_CPU */
@@ -686,36 +696,36 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
686{ 696{
687 unsigned long stack; 697 unsigned long stack;
688 unsigned int cpu; 698 unsigned int cpu;
689 int i; 699 int i;
690 700
691 /* request the 0x1201 emergency signal external interrupt */ 701 /* request the 0x1201 emergency signal external interrupt */
692 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 702 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
693 panic("Couldn't request external interrupt 0x1201"); 703 panic("Couldn't request external interrupt 0x1201");
694 memset(lowcore_ptr,0,sizeof(lowcore_ptr)); 704 memset(lowcore_ptr, 0, sizeof(lowcore_ptr));
695 /* 705 /*
696 * Initialize prefix pages and stacks for all possible cpus 706 * Initialize prefix pages and stacks for all possible cpus
697 */ 707 */
698 print_cpu_info(&S390_lowcore.cpu_data); 708 print_cpu_info(&S390_lowcore.cpu_data);
699 709
700 for_each_possible_cpu(i) { 710 for_each_possible_cpu(i) {
701 lowcore_ptr[i] = (struct _lowcore *) 711 lowcore_ptr[i] = (struct _lowcore *)
702 __get_free_pages(GFP_KERNEL|GFP_DMA, 712 __get_free_pages(GFP_KERNEL | GFP_DMA,
703 sizeof(void*) == 8 ? 1 : 0); 713 sizeof(void*) == 8 ? 1 : 0);
704 stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER); 714 stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
705 if (lowcore_ptr[i] == NULL || stack == 0ULL) 715 if (!lowcore_ptr[i] || !stack)
706 panic("smp_boot_cpus failed to allocate memory\n"); 716 panic("smp_boot_cpus failed to allocate memory\n");
707 717
708 *(lowcore_ptr[i]) = S390_lowcore; 718 *(lowcore_ptr[i]) = S390_lowcore;
709 lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE); 719 lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE;
710 stack = __get_free_pages(GFP_KERNEL,0); 720 stack = __get_free_pages(GFP_KERNEL, 0);
711 if (stack == 0ULL) 721 if (!stack)
712 panic("smp_boot_cpus failed to allocate memory\n"); 722 panic("smp_boot_cpus failed to allocate memory\n");
713 lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); 723 lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE;
714#ifndef CONFIG_64BIT 724#ifndef CONFIG_64BIT
715 if (MACHINE_HAS_IEEE) { 725 if (MACHINE_HAS_IEEE) {
716 lowcore_ptr[i]->extended_save_area_addr = 726 lowcore_ptr[i]->extended_save_area_addr =
717 (__u32) __get_free_pages(GFP_KERNEL,0); 727 (__u32) __get_free_pages(GFP_KERNEL, 0);
718 if (lowcore_ptr[i]->extended_save_area_addr == 0) 728 if (!lowcore_ptr[i]->extended_save_area_addr)
719 panic("smp_boot_cpus failed to " 729 panic("smp_boot_cpus failed to "
720 "allocate memory\n"); 730 "allocate memory\n");
721 } 731 }
@@ -754,34 +764,63 @@ void smp_cpus_done(unsigned int max_cpus)
754 */ 764 */
755int setup_profiling_timer(unsigned int multiplier) 765int setup_profiling_timer(unsigned int multiplier)
756{ 766{
757 return 0; 767 return 0;
758} 768}
759 769
760static DEFINE_PER_CPU(struct cpu, cpu_devices); 770static DEFINE_PER_CPU(struct cpu, cpu_devices);
761 771
772static ssize_t show_capability(struct sys_device *dev, char *buf)
773{
774 unsigned int capability;
775 int rc;
776
777 rc = get_cpu_capability(&capability);
778 if (rc)
779 return rc;
780 return sprintf(buf, "%u\n", capability);
781}
782static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
783
784static int __cpuinit smp_cpu_notify(struct notifier_block *self,
785 unsigned long action, void *hcpu)
786{
787 unsigned int cpu = (unsigned int)(long)hcpu;
788 struct cpu *c = &per_cpu(cpu_devices, cpu);
789 struct sys_device *s = &c->sysdev;
790
791 switch (action) {
792 case CPU_ONLINE:
793 if (sysdev_create_file(s, &attr_capability))
794 return NOTIFY_BAD;
795 break;
796 case CPU_DEAD:
797 sysdev_remove_file(s, &attr_capability);
798 break;
799 }
800 return NOTIFY_OK;
801}
802
803static struct notifier_block __cpuinitdata smp_cpu_nb = {
804 .notifier_call = smp_cpu_notify,
805};
806
762static int __init topology_init(void) 807static int __init topology_init(void)
763{ 808{
764 int cpu; 809 int cpu;
765 int ret; 810
811 register_cpu_notifier(&smp_cpu_nb);
766 812
767 for_each_possible_cpu(cpu) { 813 for_each_possible_cpu(cpu) {
768 struct cpu *c = &per_cpu(cpu_devices, cpu); 814 struct cpu *c = &per_cpu(cpu_devices, cpu);
815 struct sys_device *s = &c->sysdev;
769 816
770 c->hotpluggable = 1; 817 c->hotpluggable = 1;
771 ret = register_cpu(c, cpu); 818 register_cpu(c, cpu);
772 if (ret) 819 if (!cpu_online(cpu))
773 printk(KERN_WARNING "topology_init: register_cpu %d " 820 continue;
774 "failed (%d)\n", cpu, ret); 821 s = &c->sysdev;
822 sysdev_create_file(s, &attr_capability);
775 } 823 }
776 return 0; 824 return 0;
777} 825}
778
779subsys_initcall(topology_init); 826subsys_initcall(topology_init);
780
781EXPORT_SYMBOL(cpu_online_map);
782EXPORT_SYMBOL(cpu_possible_map);
783EXPORT_SYMBOL(lowcore_ptr);
784EXPORT_SYMBOL(smp_ctl_set_bit);
785EXPORT_SYMBOL(smp_ctl_clear_bit);
786EXPORT_SYMBOL(smp_get_cpu);
787EXPORT_SYMBOL(smp_put_cpu);
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index 584ed95f3380..3a77c22cda78 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -266,23 +266,3 @@ s390_fadvise64_64(struct fadvise64_64_args __user *args)
266 return -EFAULT; 266 return -EFAULT;
267 return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice); 267 return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
268} 268}
269
270/*
271 * Do a system call from kernel instead of calling sys_execve so we
272 * end up with proper pt_regs.
273 */
274int kernel_execve(const char *filename, char *const argv[], char *const envp[])
275{
276 register const char *__arg1 asm("2") = filename;
277 register char *const*__arg2 asm("3") = argv;
278 register char *const*__arg3 asm("4") = envp;
279 register long __svcres asm("2");
280 asm volatile(
281 "svc %b1"
282 : "=d" (__svcres)
283 : "i" (__NR_execve),
284 "0" (__arg1),
285 "d" (__arg2),
286 "d" (__arg3) : "memory");
287 return __svcres;
288}
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index c774f1069e10..cd8d321cd0c2 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -10,7 +10,7 @@
10 10
11NI_SYSCALL /* 0 */ 11NI_SYSCALL /* 0 */
12SYSCALL(sys_exit,sys_exit,sys32_exit_wrapper) 12SYSCALL(sys_exit,sys_exit,sys32_exit_wrapper)
13SYSCALL(sys_fork_glue,sys_fork_glue,sys_fork_glue) 13SYSCALL(sys_fork,sys_fork,sys_fork)
14SYSCALL(sys_read,sys_read,sys32_read_wrapper) 14SYSCALL(sys_read,sys_read,sys32_read_wrapper)
15SYSCALL(sys_write,sys_write,sys32_write_wrapper) 15SYSCALL(sys_write,sys_write,sys32_write_wrapper)
16SYSCALL(sys_open,sys_open,sys32_open_wrapper) /* 5 */ 16SYSCALL(sys_open,sys_open,sys32_open_wrapper) /* 5 */
@@ -19,7 +19,7 @@ SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall)
19SYSCALL(sys_creat,sys_creat,sys32_creat_wrapper) 19SYSCALL(sys_creat,sys_creat,sys32_creat_wrapper)
20SYSCALL(sys_link,sys_link,sys32_link_wrapper) 20SYSCALL(sys_link,sys_link,sys32_link_wrapper)
21SYSCALL(sys_unlink,sys_unlink,sys32_unlink_wrapper) /* 10 */ 21SYSCALL(sys_unlink,sys_unlink,sys32_unlink_wrapper) /* 10 */
22SYSCALL(sys_execve_glue,sys_execve_glue,sys32_execve_glue) 22SYSCALL(sys_execve,sys_execve,sys32_execve)
23SYSCALL(sys_chdir,sys_chdir,sys32_chdir_wrapper) 23SYSCALL(sys_chdir,sys_chdir,sys32_chdir_wrapper)
24SYSCALL(sys_time,sys_ni_syscall,sys32_time_wrapper) /* old time syscall */ 24SYSCALL(sys_time,sys_ni_syscall,sys32_time_wrapper) /* old time syscall */
25SYSCALL(sys_mknod,sys_mknod,sys32_mknod_wrapper) 25SYSCALL(sys_mknod,sys_mknod,sys32_mknod_wrapper)
@@ -127,8 +127,8 @@ SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper) /* 115 */
127SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper) 127SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper)
128SYSCALL(sys_ipc,sys_ipc,sys32_ipc_wrapper) 128SYSCALL(sys_ipc,sys_ipc,sys32_ipc_wrapper)
129SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper) 129SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper)
130SYSCALL(sys_sigreturn_glue,sys_sigreturn_glue,sys32_sigreturn_glue) 130SYSCALL(sys_sigreturn,sys_sigreturn,sys32_sigreturn)
131SYSCALL(sys_clone_glue,sys_clone_glue,sys32_clone_glue) /* 120 */ 131SYSCALL(sys_clone,sys_clone,sys32_clone) /* 120 */
132SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper) 132SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper)
133SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper) 133SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper)
134NI_SYSCALL /* modify_ldt for i386 */ 134NI_SYSCALL /* modify_ldt for i386 */
@@ -181,7 +181,7 @@ SYSCALL(sys_nfsservctl,sys_nfsservctl,compat_sys_nfsservctl_wrapper)
181SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper) /* 170 old setresgid16 syscall */ 181SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper) /* 170 old setresgid16 syscall */
182SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */ 182SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */
183SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper) 183SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper)
184SYSCALL(sys_rt_sigreturn_glue,sys_rt_sigreturn_glue,sys32_rt_sigreturn_glue) 184SYSCALL(sys_rt_sigreturn,sys_rt_sigreturn,sys32_rt_sigreturn)
185SYSCALL(sys_rt_sigaction,sys_rt_sigaction,sys32_rt_sigaction_wrapper) 185SYSCALL(sys_rt_sigaction,sys_rt_sigaction,sys32_rt_sigaction_wrapper)
186SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,sys32_rt_sigprocmask_wrapper) /* 175 */ 186SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,sys32_rt_sigprocmask_wrapper) /* 175 */
187SYSCALL(sys_rt_sigpending,sys_rt_sigpending,sys32_rt_sigpending_wrapper) 187SYSCALL(sys_rt_sigpending,sys_rt_sigpending,sys32_rt_sigpending_wrapper)
@@ -194,11 +194,11 @@ SYSCALL(sys_chown16,sys_ni_syscall,sys32_chown16_wrapper) /* old chown16 syscall
194SYSCALL(sys_getcwd,sys_getcwd,sys32_getcwd_wrapper) 194SYSCALL(sys_getcwd,sys_getcwd,sys32_getcwd_wrapper)
195SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper) 195SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper)
196SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper) /* 185 */ 196SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper) /* 185 */
197SYSCALL(sys_sigaltstack_glue,sys_sigaltstack_glue,sys32_sigaltstack_glue) 197SYSCALL(sys_sigaltstack,sys_sigaltstack,sys32_sigaltstack)
198SYSCALL(sys_sendfile,sys_sendfile64,sys32_sendfile_wrapper) 198SYSCALL(sys_sendfile,sys_sendfile64,sys32_sendfile_wrapper)
199NI_SYSCALL /* streams1 */ 199NI_SYSCALL /* streams1 */
200NI_SYSCALL /* streams2 */ 200NI_SYSCALL /* streams2 */
201SYSCALL(sys_vfork_glue,sys_vfork_glue,sys_vfork_glue) /* 190 */ 201SYSCALL(sys_vfork,sys_vfork,sys_vfork) /* 190 */
202SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit_wrapper) 202SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit_wrapper)
203SYSCALL(sys_mmap2,sys_mmap2,sys32_mmap2_wrapper) 203SYSCALL(sys_mmap2,sys_mmap2,sys32_mmap2_wrapper)
204SYSCALL(sys_truncate64,sys_ni_syscall,sys32_truncate64_wrapper) 204SYSCALL(sys_truncate64,sys_ni_syscall,sys32_truncate64_wrapper)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index e1ad464b6f20..711dae8da7ad 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -280,7 +280,6 @@ static void clock_comparator_interrupt(__u16 code)
280} 280}
281 281
282static void etr_reset(void); 282static void etr_reset(void);
283static void etr_init(void);
284static void etr_ext_handler(__u16); 283static void etr_ext_handler(__u16);
285 284
286/* 285/*
@@ -355,7 +354,6 @@ void __init time_init(void)
355#ifdef CONFIG_VIRT_TIMER 354#ifdef CONFIG_VIRT_TIMER
356 vtime_init(); 355 vtime_init();
357#endif 356#endif
358 etr_init();
359} 357}
360 358
361/* 359/*
@@ -426,11 +424,11 @@ static struct etr_aib etr_port1;
426static int etr_port1_uptodate; 424static int etr_port1_uptodate;
427static unsigned long etr_events; 425static unsigned long etr_events;
428static struct timer_list etr_timer; 426static struct timer_list etr_timer;
429static struct tasklet_struct etr_tasklet;
430static DEFINE_PER_CPU(atomic_t, etr_sync_word); 427static DEFINE_PER_CPU(atomic_t, etr_sync_word);
431 428
432static void etr_timeout(unsigned long dummy); 429static void etr_timeout(unsigned long dummy);
433static void etr_tasklet_fn(unsigned long dummy); 430static void etr_work_fn(struct work_struct *work);
431static DECLARE_WORK(etr_work, etr_work_fn);
434 432
435/* 433/*
436 * The etr get_clock function. It will write the current clock value 434 * The etr get_clock function. It will write the current clock value
@@ -507,29 +505,31 @@ static void etr_reset(void)
507 } 505 }
508} 506}
509 507
510static void etr_init(void) 508static int __init etr_init(void)
511{ 509{
512 struct etr_aib aib; 510 struct etr_aib aib;
513 511
514 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) 512 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
515 return; 513 return 0;
516 /* Check if this machine has the steai instruction. */ 514 /* Check if this machine has the steai instruction. */
517 if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) 515 if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
518 set_bit(ETR_FLAG_STEAI, &etr_flags); 516 set_bit(ETR_FLAG_STEAI, &etr_flags);
519 setup_timer(&etr_timer, etr_timeout, 0UL); 517 setup_timer(&etr_timer, etr_timeout, 0UL);
520 tasklet_init(&etr_tasklet, etr_tasklet_fn, 0);
521 if (!etr_port0_online && !etr_port1_online) 518 if (!etr_port0_online && !etr_port1_online)
522 set_bit(ETR_FLAG_EACCES, &etr_flags); 519 set_bit(ETR_FLAG_EACCES, &etr_flags);
523 if (etr_port0_online) { 520 if (etr_port0_online) {
524 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); 521 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
525 tasklet_hi_schedule(&etr_tasklet); 522 schedule_work(&etr_work);
526 } 523 }
527 if (etr_port1_online) { 524 if (etr_port1_online) {
528 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); 525 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
529 tasklet_hi_schedule(&etr_tasklet); 526 schedule_work(&etr_work);
530 } 527 }
528 return 0;
531} 529}
532 530
531arch_initcall(etr_init);
532
533/* 533/*
534 * Two sorts of ETR machine checks. The architecture reads: 534 * Two sorts of ETR machine checks. The architecture reads:
535 * "When a machine-check niterruption occurs and if a switch-to-local or 535 * "When a machine-check niterruption occurs and if a switch-to-local or
@@ -549,7 +549,7 @@ void etr_switch_to_local(void)
549 return; 549 return;
550 etr_disable_sync_clock(NULL); 550 etr_disable_sync_clock(NULL);
551 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); 551 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
552 tasklet_hi_schedule(&etr_tasklet); 552 schedule_work(&etr_work);
553} 553}
554 554
555/* 555/*
@@ -564,7 +564,7 @@ void etr_sync_check(void)
564 return; 564 return;
565 etr_disable_sync_clock(NULL); 565 etr_disable_sync_clock(NULL);
566 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); 566 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
567 tasklet_hi_schedule(&etr_tasklet); 567 schedule_work(&etr_work);
568} 568}
569 569
570/* 570/*
@@ -591,13 +591,13 @@ static void etr_ext_handler(__u16 code)
591 * Both ports are not up-to-date now. 591 * Both ports are not up-to-date now.
592 */ 592 */
593 set_bit(ETR_EVENT_PORT_ALERT, &etr_events); 593 set_bit(ETR_EVENT_PORT_ALERT, &etr_events);
594 tasklet_hi_schedule(&etr_tasklet); 594 schedule_work(&etr_work);
595} 595}
596 596
597static void etr_timeout(unsigned long dummy) 597static void etr_timeout(unsigned long dummy)
598{ 598{
599 set_bit(ETR_EVENT_UPDATE, &etr_events); 599 set_bit(ETR_EVENT_UPDATE, &etr_events);
600 tasklet_hi_schedule(&etr_tasklet); 600 schedule_work(&etr_work);
601} 601}
602 602
603/* 603/*
@@ -927,7 +927,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
927 if (!eacr.e0 && !eacr.e1) 927 if (!eacr.e0 && !eacr.e1)
928 return eacr; 928 return eacr;
929 929
930 /* Update port0 or port1 with aib stored in etr_tasklet_fn. */ 930 /* Update port0 or port1 with aib stored in etr_work_fn. */
931 if (aib->esw.q == 0) { 931 if (aib->esw.q == 0) {
932 /* Information for port 0 stored. */ 932 /* Information for port 0 stored. */
933 if (eacr.p0 && !etr_port0_uptodate) { 933 if (eacr.p0 && !etr_port0_uptodate) {
@@ -1007,7 +1007,7 @@ static void etr_update_eacr(struct etr_eacr eacr)
1007 * particular this is the only function that calls etr_update_eacr(), 1007 * particular this is the only function that calls etr_update_eacr(),
1008 * it "controls" the etr control register. 1008 * it "controls" the etr control register.
1009 */ 1009 */
1010static void etr_tasklet_fn(unsigned long dummy) 1010static void etr_work_fn(struct work_struct *work)
1011{ 1011{
1012 unsigned long long now; 1012 unsigned long long now;
1013 struct etr_eacr eacr; 1013 struct etr_eacr eacr;
@@ -1220,13 +1220,13 @@ static ssize_t etr_online_store(struct sys_device *dev,
1220 return count; /* Nothing to do. */ 1220 return count; /* Nothing to do. */
1221 etr_port0_online = value; 1221 etr_port0_online = value;
1222 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); 1222 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
1223 tasklet_hi_schedule(&etr_tasklet); 1223 schedule_work(&etr_work);
1224 } else { 1224 } else {
1225 if (etr_port1_online == value) 1225 if (etr_port1_online == value)
1226 return count; /* Nothing to do. */ 1226 return count; /* Nothing to do. */
1227 etr_port1_online = value; 1227 etr_port1_online = value;
1228 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); 1228 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
1229 tasklet_hi_schedule(&etr_tasklet); 1229 schedule_work(&etr_work);
1230 } 1230 }
1231 return count; 1231 return count;
1232} 1232}
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index f0e5a320e2ec..49dec830373a 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -30,7 +30,7 @@
30#include <linux/kallsyms.h> 30#include <linux/kallsyms.h>
31#include <linux/reboot.h> 31#include <linux/reboot.h>
32#include <linux/kprobes.h> 32#include <linux/kprobes.h>
33 33#include <linux/bug.h>
34#include <asm/system.h> 34#include <asm/system.h>
35#include <asm/uaccess.h> 35#include <asm/uaccess.h>
36#include <asm/io.h> 36#include <asm/io.h>
@@ -188,18 +188,31 @@ void dump_stack(void)
188 188
189EXPORT_SYMBOL(dump_stack); 189EXPORT_SYMBOL(dump_stack);
190 190
191static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
192{
193 return (regs->psw.mask & bits) / ((~bits + 1) & bits);
194}
195
191void show_registers(struct pt_regs *regs) 196void show_registers(struct pt_regs *regs)
192{ 197{
193 mm_segment_t old_fs;
194 char *mode; 198 char *mode;
195 int i;
196 199
197 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; 200 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
198 printk("%s PSW : %p %p", 201 printk("%s PSW : %p %p",
199 mode, (void *) regs->psw.mask, 202 mode, (void *) regs->psw.mask,
200 (void *) regs->psw.addr); 203 (void *) regs->psw.addr);
201 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); 204 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
202 printk("%s GPRS: " FOURLONG, mode, 205 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
206 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
207 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
208 mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
209 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
210 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
211 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
212#ifdef CONFIG_64BIT
213 printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS));
214#endif
215 printk("\n%s GPRS: " FOURLONG, mode,
203 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); 216 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
204 printk(" " FOURLONG, 217 printk(" " FOURLONG,
205 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); 218 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
@@ -208,41 +221,7 @@ void show_registers(struct pt_regs *regs)
208 printk(" " FOURLONG, 221 printk(" " FOURLONG,
209 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); 222 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
210 223
211#if 0 224 show_code(regs);
212 /* FIXME: this isn't needed any more but it changes the ksymoops
213 * input. To remove or not to remove ... */
214 save_access_regs(regs->acrs);
215 printk("%s ACRS: %08x %08x %08x %08x\n", mode,
216 regs->acrs[0], regs->acrs[1], regs->acrs[2], regs->acrs[3]);
217 printk(" %08x %08x %08x %08x\n",
218 regs->acrs[4], regs->acrs[5], regs->acrs[6], regs->acrs[7]);
219 printk(" %08x %08x %08x %08x\n",
220 regs->acrs[8], regs->acrs[9], regs->acrs[10], regs->acrs[11]);
221 printk(" %08x %08x %08x %08x\n",
222 regs->acrs[12], regs->acrs[13], regs->acrs[14], regs->acrs[15]);
223#endif
224
225 /*
226 * Print the first 20 byte of the instruction stream at the
227 * time of the fault.
228 */
229 old_fs = get_fs();
230 if (regs->psw.mask & PSW_MASK_PSTATE)
231 set_fs(USER_DS);
232 else
233 set_fs(KERNEL_DS);
234 printk("%s Code: ", mode);
235 for (i = 0; i < 20; i++) {
236 unsigned char c;
237 if (__get_user(c, (char __user *)(regs->psw.addr + i))) {
238 printk(" Bad PSW.");
239 break;
240 }
241 printk("%02x ", c);
242 }
243 set_fs(old_fs);
244
245 printk("\n");
246} 225}
247 226
248/* This is called from fs/proc/array.c */ 227/* This is called from fs/proc/array.c */
@@ -318,6 +297,11 @@ report_user_fault(long interruption_code, struct pt_regs *regs)
318#endif 297#endif
319} 298}
320 299
300int is_valid_bugaddr(unsigned long addr)
301{
302 return 1;
303}
304
321static void __kprobes inline do_trap(long interruption_code, int signr, 305static void __kprobes inline do_trap(long interruption_code, int signr,
322 char *str, struct pt_regs *regs, 306 char *str, struct pt_regs *regs,
323 siginfo_t *info) 307 siginfo_t *info)
@@ -344,8 +328,14 @@ static void __kprobes inline do_trap(long interruption_code, int signr,
344 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 328 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
345 if (fixup) 329 if (fixup)
346 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; 330 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
347 else 331 else {
348 die(str, regs, interruption_code); 332 enum bug_trap_type btt;
333
334 btt = report_bug(regs->psw.addr & PSW_ADDR_INSN);
335 if (btt == BUG_TRAP_TYPE_WARN)
336 return;
337 die(str, regs, interruption_code);
338 }
349 } 339 }
350} 340}
351 341
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index c30716ae130c..418f6426a949 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -45,6 +45,8 @@ SECTIONS
45 __ex_table : { *(__ex_table) } 45 __ex_table : { *(__ex_table) }
46 __stop___ex_table = .; 46 __stop___ex_table = .;
47 47
48 BUG_TABLE
49
48 .data : { /* Data */ 50 .data : { /* Data */
49 *(.data) 51 *(.data)
50 CONSTRUCTORS 52 CONSTRUCTORS
@@ -77,6 +79,12 @@ SECTIONS
77 *(.init.text) 79 *(.init.text)
78 _einittext = .; 80 _einittext = .;
79 } 81 }
82 /*
83 * .exit.text is discarded at runtime, not link time,
84 * to deal with references from __bug_table
85 */
86 .exit.text : { *(.exit.text) }
87
80 .init.data : { *(.init.data) } 88 .init.data : { *(.init.data) }
81 . = ALIGN(256); 89 . = ALIGN(256);
82 __setup_start = .; 90 __setup_start = .;
@@ -116,7 +124,7 @@ SECTIONS
116 124
117 /* Sections to be discarded */ 125 /* Sections to be discarded */
118 /DISCARD/ : { 126 /DISCARD/ : {
119 *(.exit.text) *(.exit.data) *(.exitcall.exit) 127 *(.exit.data) *(.exitcall.exit)
120 } 128 }
121 129
122 /* Stabs debugging sections. */ 130 /* Stabs debugging sections. */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 9d5b02801b46..1e1a6ee2cac1 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -128,7 +128,7 @@ static inline void set_vtimer(__u64 expires)
128 S390_lowcore.last_update_timer = expires; 128 S390_lowcore.last_update_timer = expires;
129 129
130 /* store expire time for this CPU timer */ 130 /* store expire time for this CPU timer */
131 per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires; 131 __get_cpu_var(virt_cpu_timer).to_expire = expires;
132} 132}
133#else 133#else
134static inline void set_vtimer(__u64 expires) 134static inline void set_vtimer(__u64 expires)
@@ -137,7 +137,7 @@ static inline void set_vtimer(__u64 expires)
137 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); 137 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
138 138
139 /* store expire time for this CPU timer */ 139 /* store expire time for this CPU timer */
140 per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires; 140 __get_cpu_var(virt_cpu_timer).to_expire = expires;
141} 141}
142#endif 142#endif
143 143
@@ -145,7 +145,7 @@ static void start_cpu_timer(void)
145{ 145{
146 struct vtimer_queue *vt_list; 146 struct vtimer_queue *vt_list;
147 147
148 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); 148 vt_list = &__get_cpu_var(virt_cpu_timer);
149 149
150 /* CPU timer interrupt is pending, don't reprogramm it */ 150 /* CPU timer interrupt is pending, don't reprogramm it */
151 if (vt_list->idle & 1LL<<63) 151 if (vt_list->idle & 1LL<<63)
@@ -159,7 +159,7 @@ static void stop_cpu_timer(void)
159{ 159{
160 struct vtimer_queue *vt_list; 160 struct vtimer_queue *vt_list;
161 161
162 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); 162 vt_list = &__get_cpu_var(virt_cpu_timer);
163 163
164 /* nothing to do */ 164 /* nothing to do */
165 if (list_empty(&vt_list->list)) { 165 if (list_empty(&vt_list->list)) {
@@ -219,7 +219,7 @@ static void do_callbacks(struct list_head *cb_list)
219 if (list_empty(cb_list)) 219 if (list_empty(cb_list))
220 return; 220 return;
221 221
222 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); 222 vt_list = &__get_cpu_var(virt_cpu_timer);
223 223
224 list_for_each_entry_safe(event, tmp, cb_list, entry) { 224 list_for_each_entry_safe(event, tmp, cb_list, entry) {
225 fn = event->function; 225 fn = event->function;
@@ -244,7 +244,6 @@ static void do_callbacks(struct list_head *cb_list)
244 */ 244 */
245static void do_cpu_timer_interrupt(__u16 error_code) 245static void do_cpu_timer_interrupt(__u16 error_code)
246{ 246{
247 int cpu;
248 __u64 next, delta; 247 __u64 next, delta;
249 struct vtimer_queue *vt_list; 248 struct vtimer_queue *vt_list;
250 struct vtimer_list *event, *tmp; 249 struct vtimer_list *event, *tmp;
@@ -253,8 +252,7 @@ static void do_cpu_timer_interrupt(__u16 error_code)
253 struct list_head cb_list; 252 struct list_head cb_list;
254 253
255 INIT_LIST_HEAD(&cb_list); 254 INIT_LIST_HEAD(&cb_list);
256 cpu = smp_processor_id(); 255 vt_list = &__get_cpu_var(virt_cpu_timer);
257 vt_list = &per_cpu(virt_cpu_timer, cpu);
258 256
259 /* walk timer list, fire all expired events */ 257 /* walk timer list, fire all expired events */
260 spin_lock(&vt_list->lock); 258 spin_lock(&vt_list->lock);
@@ -534,7 +532,7 @@ void init_cpu_vtimer(void)
534 /* enable cpu timer interrupts */ 532 /* enable cpu timer interrupts */
535 __ctl_set_bit(0,10); 533 __ctl_set_bit(0,10);
536 534
537 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); 535 vt_list = &__get_cpu_var(virt_cpu_timer);
538 INIT_LIST_HEAD(&vt_list->list); 536 INIT_LIST_HEAD(&vt_list->list);
539 spin_lock_init(&vt_list->lock); 537 spin_lock_init(&vt_list->lock);
540 vt_list->to_expire = 0; 538 vt_list->to_expire = 0;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 7462aebd3eb6..2b76a879a7b5 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -26,9 +26,9 @@
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/hardirq.h> 27#include <linux/hardirq.h>
28#include <linux/kprobes.h> 28#include <linux/kprobes.h>
29#include <linux/uaccess.h>
29 30
30#include <asm/system.h> 31#include <asm/system.h>
31#include <asm/uaccess.h>
32#include <asm/pgtable.h> 32#include <asm/pgtable.h>
33#include <asm/kdebug.h> 33#include <asm/kdebug.h>
34#include <asm/s390_ext.h> 34#include <asm/s390_ext.h>
@@ -63,21 +63,25 @@ int unregister_page_fault_notifier(struct notifier_block *nb)
63 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb); 63 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
64} 64}
65 65
66static inline int notify_page_fault(enum die_val val, const char *str, 66static int __kprobes __notify_page_fault(struct pt_regs *regs, long err)
67 struct pt_regs *regs, long err, int trap, int sig)
68{ 67{
69 struct die_args args = { 68 struct die_args args = { .str = "page fault",
70 .regs = regs, 69 .trapnr = 14,
71 .str = str, 70 .signr = SIGSEGV };
72 .err = err, 71 args.regs = regs;
73 .trapnr = trap, 72 args.err = err;
74 .signr = sig 73 return atomic_notifier_call_chain(&notify_page_fault_chain,
75 }; 74 DIE_PAGE_FAULT, &args);
76 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args); 75}
76
77static inline int notify_page_fault(struct pt_regs *regs, long err)
78{
79 if (unlikely(kprobe_running()))
80 return __notify_page_fault(regs, err);
81 return NOTIFY_DONE;
77} 82}
78#else 83#else
79static inline int notify_page_fault(enum die_val val, const char *str, 84static inline int notify_page_fault(struct pt_regs *regs, long err)
80 struct pt_regs *regs, long err, int trap, int sig)
81{ 85{
82 return NOTIFY_DONE; 86 return NOTIFY_DONE;
83} 87}
@@ -170,74 +174,127 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
170 force_sig_info(SIGSEGV, &si, current); 174 force_sig_info(SIGSEGV, &si, current);
171} 175}
172 176
177static void do_no_context(struct pt_regs *regs, unsigned long error_code,
178 unsigned long address)
179{
180 const struct exception_table_entry *fixup;
181
182 /* Are we prepared to handle this kernel fault? */
183 fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK);
184 if (fixup) {
185 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
186 return;
187 }
188
189 /*
190 * Oops. The kernel tried to access some bad page. We'll have to
191 * terminate things with extreme prejudice.
192 */
193 if (check_space(current) == 0)
194 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
195 " at virtual kernel address %p\n", (void *)address);
196 else
197 printk(KERN_ALERT "Unable to handle kernel paging request"
198 " at virtual user address %p\n", (void *)address);
199
200 die("Oops", regs, error_code);
201 do_exit(SIGKILL);
202}
203
204static void do_low_address(struct pt_regs *regs, unsigned long error_code)
205{
206 /* Low-address protection hit in kernel mode means
207 NULL pointer write access in kernel mode. */
208 if (regs->psw.mask & PSW_MASK_PSTATE) {
209 /* Low-address protection hit in user mode 'cannot happen'. */
210 die ("Low-address protection", regs, error_code);
211 do_exit(SIGKILL);
212 }
213
214 do_no_context(regs, error_code, 0);
215}
216
217/*
218 * We ran out of memory, or some other thing happened to us that made
219 * us unable to handle the page fault gracefully.
220 */
221static int do_out_of_memory(struct pt_regs *regs, unsigned long error_code,
222 unsigned long address)
223{
224 struct task_struct *tsk = current;
225 struct mm_struct *mm = tsk->mm;
226
227 up_read(&mm->mmap_sem);
228 if (is_init(tsk)) {
229 yield();
230 down_read(&mm->mmap_sem);
231 return 1;
232 }
233 printk("VM: killing process %s\n", tsk->comm);
234 if (regs->psw.mask & PSW_MASK_PSTATE)
235 do_exit(SIGKILL);
236 do_no_context(regs, error_code, address);
237 return 0;
238}
239
240static void do_sigbus(struct pt_regs *regs, unsigned long error_code,
241 unsigned long address)
242{
243 struct task_struct *tsk = current;
244 struct mm_struct *mm = tsk->mm;
245
246 up_read(&mm->mmap_sem);
247 /*
248 * Send a sigbus, regardless of whether we were in kernel
249 * or user mode.
250 */
251 tsk->thread.prot_addr = address;
252 tsk->thread.trap_no = error_code;
253 force_sig(SIGBUS, tsk);
254
255 /* Kernel mode? Handle exceptions or die */
256 if (!(regs->psw.mask & PSW_MASK_PSTATE))
257 do_no_context(regs, error_code, address);
258}
259
173#ifdef CONFIG_S390_EXEC_PROTECT 260#ifdef CONFIG_S390_EXEC_PROTECT
174extern long sys_sigreturn(struct pt_regs *regs); 261extern long sys_sigreturn(struct pt_regs *regs);
175extern long sys_rt_sigreturn(struct pt_regs *regs); 262extern long sys_rt_sigreturn(struct pt_regs *regs);
176extern long sys32_sigreturn(struct pt_regs *regs); 263extern long sys32_sigreturn(struct pt_regs *regs);
177extern long sys32_rt_sigreturn(struct pt_regs *regs); 264extern long sys32_rt_sigreturn(struct pt_regs *regs);
178 265
179static inline void do_sigreturn(struct mm_struct *mm, struct pt_regs *regs, 266static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
180 int rt) 267 unsigned long address, unsigned long error_code)
181{ 268{
269 u16 instruction;
270 int rc, compat;
271
272 pagefault_disable();
273 rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
274 pagefault_enable();
275 if (rc)
276 return -EFAULT;
277
182 up_read(&mm->mmap_sem); 278 up_read(&mm->mmap_sem);
183 clear_tsk_thread_flag(current, TIF_SINGLE_STEP); 279 clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
184#ifdef CONFIG_COMPAT 280#ifdef CONFIG_COMPAT
185 if (test_tsk_thread_flag(current, TIF_31BIT)) { 281 compat = test_tsk_thread_flag(current, TIF_31BIT);
186 if (rt) 282 if (compat && instruction == 0x0a77)
187 sys32_rt_sigreturn(regs); 283 sys32_sigreturn(regs);
188 else 284 else if (compat && instruction == 0x0aad)
189 sys32_sigreturn(regs); 285 sys32_rt_sigreturn(regs);
190 return;
191 }
192#endif /* CONFIG_COMPAT */
193 if (rt)
194 sys_rt_sigreturn(regs);
195 else 286 else
287#endif
288 if (instruction == 0x0a77)
196 sys_sigreturn(regs); 289 sys_sigreturn(regs);
197 return; 290 else if (instruction == 0x0aad)
198} 291 sys_rt_sigreturn(regs);
199
200static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
201 unsigned long address, unsigned long error_code)
202{
203 pgd_t *pgd;
204 pmd_t *pmd;
205 pte_t *pte;
206 u16 *instruction;
207 unsigned long pfn, uaddr = regs->psw.addr;
208
209 spin_lock(&mm->page_table_lock);
210 pgd = pgd_offset(mm, uaddr);
211 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
212 goto out_fault;
213 pmd = pmd_offset(pgd, uaddr);
214 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
215 goto out_fault;
216 pte = pte_offset_map(pmd_offset(pgd_offset(mm, uaddr), uaddr), uaddr);
217 if (!pte || !pte_present(*pte))
218 goto out_fault;
219 pfn = pte_pfn(*pte);
220 if (!pfn_valid(pfn))
221 goto out_fault;
222 spin_unlock(&mm->page_table_lock);
223
224 instruction = (u16 *) ((pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE-1)));
225 if (*instruction == 0x0a77)
226 do_sigreturn(mm, regs, 0);
227 else if (*instruction == 0x0aad)
228 do_sigreturn(mm, regs, 1);
229 else { 292 else {
230 printk("- XXX - do_exception: task = %s, primary, NO EXEC "
231 "-> SIGSEGV\n", current->comm);
232 up_read(&mm->mmap_sem);
233 current->thread.prot_addr = address; 293 current->thread.prot_addr = address;
234 current->thread.trap_no = error_code; 294 current->thread.trap_no = error_code;
235 do_sigsegv(regs, error_code, SEGV_MAPERR, address); 295 do_sigsegv(regs, error_code, SEGV_MAPERR, address);
236 } 296 }
237 return 0; 297 return 0;
238out_fault:
239 spin_unlock(&mm->page_table_lock);
240 return -EFAULT;
241} 298}
242#endif /* CONFIG_S390_EXEC_PROTECT */ 299#endif /* CONFIG_S390_EXEC_PROTECT */
243 300
@@ -253,49 +310,23 @@ out_fault:
253 * 3b Region third trans. -> Not present (nullification) 310 * 3b Region third trans. -> Not present (nullification)
254 */ 311 */
255static inline void 312static inline void
256do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) 313do_exception(struct pt_regs *regs, unsigned long error_code, int write)
257{ 314{
258 struct task_struct *tsk; 315 struct task_struct *tsk;
259 struct mm_struct *mm; 316 struct mm_struct *mm;
260 struct vm_area_struct * vma; 317 struct vm_area_struct *vma;
261 unsigned long address; 318 unsigned long address;
262 const struct exception_table_entry *fixup;
263 int si_code;
264 int space; 319 int space;
320 int si_code;
265 321
266 tsk = current; 322 if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
267 mm = tsk->mm;
268
269 if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
270 SIGSEGV) == NOTIFY_STOP)
271 return; 323 return;
272 324
273 /* 325 tsk = current;
274 * Check for low-address protection. This needs to be treated 326 mm = tsk->mm;
275 * as a special case because the translation exception code
276 * field is not guaranteed to contain valid data in this case.
277 */
278 if (is_protection && !(S390_lowcore.trans_exc_code & 4)) {
279
280 /* Low-address protection hit in kernel mode means
281 NULL pointer write access in kernel mode. */
282 if (!(regs->psw.mask & PSW_MASK_PSTATE)) {
283 address = 0;
284 space = 0;
285 goto no_context;
286 }
287
288 /* Low-address protection hit in user mode 'cannot happen'. */
289 die ("Low-address protection", regs, error_code);
290 do_exit(SIGKILL);
291 }
292 327
293 /* 328 /* get the failing address and the affected space */
294 * get the failing address 329 address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
295 * more specific the segment and page table portion of
296 * the address
297 */
298 address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
299 space = check_space(tsk); 330 space = check_space(tsk);
300 331
301 /* 332 /*
@@ -313,7 +344,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
313 */ 344 */
314 local_irq_enable(); 345 local_irq_enable();
315 346
316 down_read(&mm->mmap_sem); 347 down_read(&mm->mmap_sem);
317 348
318 si_code = SEGV_MAPERR; 349 si_code = SEGV_MAPERR;
319 vma = find_vma(mm, address); 350 vma = find_vma(mm, address);
@@ -330,19 +361,19 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
330 return; 361 return;
331#endif 362#endif
332 363
333 if (vma->vm_start <= address) 364 if (vma->vm_start <= address)
334 goto good_area; 365 goto good_area;
335 if (!(vma->vm_flags & VM_GROWSDOWN)) 366 if (!(vma->vm_flags & VM_GROWSDOWN))
336 goto bad_area; 367 goto bad_area;
337 if (expand_stack(vma, address)) 368 if (expand_stack(vma, address))
338 goto bad_area; 369 goto bad_area;
339/* 370/*
340 * Ok, we have a good vm_area for this memory access, so 371 * Ok, we have a good vm_area for this memory access, so
341 * we can handle it.. 372 * we can handle it..
342 */ 373 */
343good_area: 374good_area:
344 si_code = SEGV_ACCERR; 375 si_code = SEGV_ACCERR;
345 if (!is_protection) { 376 if (!write) {
346 /* page not present, check vm flags */ 377 /* page not present, check vm flags */
347 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) 378 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
348 goto bad_area; 379 goto bad_area;
@@ -357,7 +388,7 @@ survive:
357 * make sure we exit gracefully rather than endlessly redo 388 * make sure we exit gracefully rather than endlessly redo
358 * the fault. 389 * the fault.
359 */ 390 */
360 switch (handle_mm_fault(mm, vma, address, is_protection)) { 391 switch (handle_mm_fault(mm, vma, address, write)) {
361 case VM_FAULT_MINOR: 392 case VM_FAULT_MINOR:
362 tsk->min_flt++; 393 tsk->min_flt++;
363 break; 394 break;
@@ -365,9 +396,12 @@ survive:
365 tsk->maj_flt++; 396 tsk->maj_flt++;
366 break; 397 break;
367 case VM_FAULT_SIGBUS: 398 case VM_FAULT_SIGBUS:
368 goto do_sigbus; 399 do_sigbus(regs, error_code, address);
400 return;
369 case VM_FAULT_OOM: 401 case VM_FAULT_OOM:
370 goto out_of_memory; 402 if (do_out_of_memory(regs, error_code, address))
403 goto survive;
404 return;
371 default: 405 default:
372 BUG(); 406 BUG();
373 } 407 }
@@ -385,75 +419,34 @@ survive:
385 * Fix it, but check if it's kernel or user first.. 419 * Fix it, but check if it's kernel or user first..
386 */ 420 */
387bad_area: 421bad_area:
388 up_read(&mm->mmap_sem); 422 up_read(&mm->mmap_sem);
389 423
390 /* User mode accesses just cause a SIGSEGV */ 424 /* User mode accesses just cause a SIGSEGV */
391 if (regs->psw.mask & PSW_MASK_PSTATE) { 425 if (regs->psw.mask & PSW_MASK_PSTATE) {
392 tsk->thread.prot_addr = address; 426 tsk->thread.prot_addr = address;
393 tsk->thread.trap_no = error_code; 427 tsk->thread.trap_no = error_code;
394 do_sigsegv(regs, error_code, si_code, address); 428 do_sigsegv(regs, error_code, si_code, address);
395 return; 429 return;
396 } 430 }
397 431
398no_context: 432no_context:
399 /* Are we prepared to handle this kernel fault? */ 433 do_no_context(regs, error_code, address);
400 fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK);
401 if (fixup) {
402 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
403 return;
404 }
405
406/*
407 * Oops. The kernel tried to access some bad page. We'll have to
408 * terminate things with extreme prejudice.
409 */
410 if (space == 0)
411 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
412 " at virtual kernel address %p\n", (void *)address);
413 else
414 printk(KERN_ALERT "Unable to handle kernel paging request"
415 " at virtual user address %p\n", (void *)address);
416
417 die("Oops", regs, error_code);
418 do_exit(SIGKILL);
419
420
421/*
422 * We ran out of memory, or some other thing happened to us that made
423 * us unable to handle the page fault gracefully.
424*/
425out_of_memory:
426 up_read(&mm->mmap_sem);
427 if (is_init(tsk)) {
428 yield();
429 down_read(&mm->mmap_sem);
430 goto survive;
431 }
432 printk("VM: killing process %s\n", tsk->comm);
433 if (regs->psw.mask & PSW_MASK_PSTATE)
434 do_exit(SIGKILL);
435 goto no_context;
436
437do_sigbus:
438 up_read(&mm->mmap_sem);
439
440 /*
441 * Send a sigbus, regardless of whether we were in kernel
442 * or user mode.
443 */
444 tsk->thread.prot_addr = address;
445 tsk->thread.trap_no = error_code;
446 force_sig(SIGBUS, tsk);
447
448 /* Kernel mode? Handle exceptions or die */
449 if (!(regs->psw.mask & PSW_MASK_PSTATE))
450 goto no_context;
451} 434}
452 435
453void __kprobes do_protection_exception(struct pt_regs *regs, 436void __kprobes do_protection_exception(struct pt_regs *regs,
454 unsigned long error_code) 437 unsigned long error_code)
455{ 438{
439 /* Protection exception is supressing, decrement psw address. */
456 regs->psw.addr -= (error_code >> 16); 440 regs->psw.addr -= (error_code >> 16);
441 /*
442 * Check for low-address protection. This needs to be treated
443 * as a special case because the translation exception code
444 * field is not guaranteed to contain valid data in this case.
445 */
446 if (unlikely(!(S390_lowcore.trans_exc_code & 4))) {
447 do_low_address(regs, error_code);
448 return;
449 }
457 do_exception(regs, 4, 1); 450 do_exception(regs, 4, 1);
458} 451}
459 452
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index eb5dc62f0d9c..e71929db8b06 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -398,6 +398,9 @@ dasd_change_state(struct dasd_device *device)
398 398
399 if (device->state == device->target) 399 if (device->state == device->target)
400 wake_up(&dasd_init_waitq); 400 wake_up(&dasd_init_waitq);
401
402 /* let user-space know that the device status changed */
403 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
401} 404}
402 405
403/* 406/*
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index ed70852cc915..6a89cefe99bb 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -19,6 +19,7 @@
19 19
20#include <asm/debug.h> 20#include <asm/debug.h>
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22#include <asm/ipl.h>
22 23
23/* This is ugly... */ 24/* This is ugly... */
24#define PRINTK_HEADER "dasd_devmap:" 25#define PRINTK_HEADER "dasd_devmap:"
@@ -133,6 +134,8 @@ dasd_call_setup(char *str)
133__setup ("dasd=", dasd_call_setup); 134__setup ("dasd=", dasd_call_setup);
134#endif /* #ifndef MODULE */ 135#endif /* #ifndef MODULE */
135 136
137#define DASD_IPLDEV "ipldev"
138
136/* 139/*
137 * Read a device busid/devno from a string. 140 * Read a device busid/devno from a string.
138 */ 141 */
@@ -141,6 +144,20 @@ dasd_busid(char **str, int *id0, int *id1, int *devno)
141{ 144{
142 int val, old_style; 145 int val, old_style;
143 146
147 /* Interpret ipldev busid */
148 if (strncmp(DASD_IPLDEV, *str, strlen(DASD_IPLDEV)) == 0) {
149 if (ipl_info.type != IPL_TYPE_CCW) {
150 MESSAGE(KERN_ERR, "%s", "ipl device is not a ccw "
151 "device");
152 return -EINVAL;
153 }
154 *id0 = 0;
155 *id1 = ipl_info.data.ccw.dev_id.ssid;
156 *devno = ipl_info.data.ccw.dev_id.devno;
157 *str += strlen(DASD_IPLDEV);
158
159 return 0;
160 }
144 /* check for leading '0x' */ 161 /* check for leading '0x' */
145 old_style = 0; 162 old_style = 0;
146 if ((*str)[0] == '0' && (*str)[1] == 'x') { 163 if ((*str)[0] == '0' && (*str)[1] == 'x') {
@@ -829,6 +846,46 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr,
829static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL); 846static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL);
830 847
831static ssize_t 848static ssize_t
849dasd_device_status_show(struct device *dev, struct device_attribute *attr,
850 char *buf)
851{
852 struct dasd_device *device;
853 ssize_t len;
854
855 device = dasd_device_from_cdev(to_ccwdev(dev));
856 if (!IS_ERR(device)) {
857 switch (device->state) {
858 case DASD_STATE_NEW:
859 len = snprintf(buf, PAGE_SIZE, "new\n");
860 break;
861 case DASD_STATE_KNOWN:
862 len = snprintf(buf, PAGE_SIZE, "detected\n");
863 break;
864 case DASD_STATE_BASIC:
865 len = snprintf(buf, PAGE_SIZE, "basic\n");
866 break;
867 case DASD_STATE_UNFMT:
868 len = snprintf(buf, PAGE_SIZE, "unformatted\n");
869 break;
870 case DASD_STATE_READY:
871 len = snprintf(buf, PAGE_SIZE, "ready\n");
872 break;
873 case DASD_STATE_ONLINE:
874 len = snprintf(buf, PAGE_SIZE, "online\n");
875 break;
876 default:
877 len = snprintf(buf, PAGE_SIZE, "no stat\n");
878 break;
879 }
880 dasd_put_device(device);
881 } else
882 len = snprintf(buf, PAGE_SIZE, "unknown\n");
883 return len;
884}
885
886static DEVICE_ATTR(status, 0444, dasd_device_status_show, NULL);
887
888static ssize_t
832dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf) 889dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf)
833{ 890{
834 struct dasd_devmap *devmap; 891 struct dasd_devmap *devmap;
@@ -939,6 +996,7 @@ static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store);
939static struct attribute * dasd_attrs[] = { 996static struct attribute * dasd_attrs[] = {
940 &dev_attr_readonly.attr, 997 &dev_attr_readonly.attr,
941 &dev_attr_discipline.attr, 998 &dev_attr_discipline.attr,
999 &dev_attr_status.attr,
942 &dev_attr_alias.attr, 1000 &dev_attr_alias.attr,
943 &dev_attr_vendor.attr, 1001 &dev_attr_vendor.attr,
944 &dev_attr_uid.attr, 1002 &dev_attr_uid.attr,
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 293e667b50f2..c210784bdf46 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ 5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
6 sclp_info.o 6 sclp_info.o sclp_config.o sclp_chp.o
7 7
8obj-$(CONFIG_TN3270) += raw3270.o 8obj-$(CONFIG_TN3270) += raw3270.o
9obj-$(CONFIG_TN3270_CONSOLE) += con3270.o 9obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
@@ -29,3 +29,6 @@ obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o
29obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o 29obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o
30obj-$(CONFIG_MONREADER) += monreader.o 30obj-$(CONFIG_MONREADER) += monreader.o
31obj-$(CONFIG_MONWRITER) += monwriter.o 31obj-$(CONFIG_MONWRITER) += monwriter.o
32
33zcore_mod-objs := sclp_sdias.o zcore.o
34obj-$(CONFIG_ZFCPDUMP) += zcore_mod.o
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 9a328f14a641..6000bdee4082 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -813,12 +813,6 @@ con3215_unblank(void)
813 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); 813 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
814} 814}
815 815
816static int __init
817con3215_consetup(struct console *co, char *options)
818{
819 return 0;
820}
821
822/* 816/*
823 * The console structure for the 3215 console 817 * The console structure for the 3215 console
824 */ 818 */
@@ -827,7 +821,6 @@ static struct console con3215 = {
827 .write = con3215_write, 821 .write = con3215_write,
828 .device = con3215_device, 822 .device = con3215_device,
829 .unblank = con3215_unblank, 823 .unblank = con3215_unblank,
830 .setup = con3215_consetup,
831 .flags = CON_PRINTBUFFER, 824 .flags = CON_PRINTBUFFER,
832}; 825};
833 826
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 8e7f2d7633d6..fd3479119eb4 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -555,12 +555,6 @@ con3270_unblank(void)
555 spin_unlock_irqrestore(&cp->view.lock, flags); 555 spin_unlock_irqrestore(&cp->view.lock, flags);
556} 556}
557 557
558static int __init
559con3270_consetup(struct console *co, char *options)
560{
561 return 0;
562}
563
564/* 558/*
565 * The console structure for the 3270 console 559 * The console structure for the 3270 console
566 */ 560 */
@@ -569,7 +563,6 @@ static struct console con3270 = {
569 .write = con3270_write, 563 .write = con3270_write,
570 .device = con3270_device, 564 .device = con3270_device,
571 .unblank = con3270_unblank, 565 .unblank = con3270_unblank,
572 .setup = con3270_consetup,
573 .flags = CON_PRINTBUFFER, 566 .flags = CON_PRINTBUFFER,
574}; 567};
575 568
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index f171de3b0b11..fa62e6944057 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -15,6 +15,7 @@
15#include <linux/timer.h> 15#include <linux/timer.h>
16#include <linux/reboot.h> 16#include <linux/reboot.h>
17#include <linux/jiffies.h> 17#include <linux/jiffies.h>
18#include <linux/init.h>
18#include <asm/types.h> 19#include <asm/types.h>
19#include <asm/s390_ext.h> 20#include <asm/s390_ext.h>
20 21
@@ -510,7 +511,7 @@ sclp_state_change_cb(struct evbuf_header *evbuf)
510} 511}
511 512
512static struct sclp_register sclp_state_change_event = { 513static struct sclp_register sclp_state_change_event = {
513 .receive_mask = EvTyp_StateChange_Mask, 514 .receive_mask = EVTYP_STATECHANGE_MASK,
514 .receiver_fn = sclp_state_change_cb 515 .receiver_fn = sclp_state_change_cb
515}; 516};
516 517
@@ -930,3 +931,10 @@ sclp_init(void)
930 sclp_init_mask(1); 931 sclp_init_mask(1);
931 return 0; 932 return 0;
932} 933}
934
935static __init int sclp_initcall(void)
936{
937 return sclp_init();
938}
939
940arch_initcall(sclp_initcall);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 7d29ab45a6ed..87ac4a3ad49d 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -19,33 +19,37 @@
19#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3) 19#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
20#define MAX_CONSOLE_PAGES 4 20#define MAX_CONSOLE_PAGES 4
21 21
22#define EvTyp_OpCmd 0x01 22#define EVTYP_OPCMD 0x01
23#define EvTyp_Msg 0x02 23#define EVTYP_MSG 0x02
24#define EvTyp_StateChange 0x08 24#define EVTYP_STATECHANGE 0x08
25#define EvTyp_PMsgCmd 0x09 25#define EVTYP_PMSGCMD 0x09
26#define EvTyp_CntlProgOpCmd 0x20 26#define EVTYP_CNTLPROGOPCMD 0x20
27#define EvTyp_CntlProgIdent 0x0B 27#define EVTYP_CNTLPROGIDENT 0x0B
28#define EvTyp_SigQuiesce 0x1D 28#define EVTYP_SIGQUIESCE 0x1D
29#define EvTyp_VT220Msg 0x1A 29#define EVTYP_VT220MSG 0x1A
30 30#define EVTYP_CONFMGMDATA 0x04
31#define EvTyp_OpCmd_Mask 0x80000000 31#define EVTYP_SDIAS 0x1C
32#define EvTyp_Msg_Mask 0x40000000 32
33#define EvTyp_StateChange_Mask 0x01000000 33#define EVTYP_OPCMD_MASK 0x80000000
34#define EvTyp_PMsgCmd_Mask 0x00800000 34#define EVTYP_MSG_MASK 0x40000000
35#define EvTyp_CtlProgOpCmd_Mask 0x00000001 35#define EVTYP_STATECHANGE_MASK 0x01000000
36#define EvTyp_CtlProgIdent_Mask 0x00200000 36#define EVTYP_PMSGCMD_MASK 0x00800000
37#define EvTyp_SigQuiesce_Mask 0x00000008 37#define EVTYP_CTLPROGOPCMD_MASK 0x00000001
38#define EvTyp_VT220Msg_Mask 0x00000040 38#define EVTYP_CTLPROGIDENT_MASK 0x00200000
39 39#define EVTYP_SIGQUIESCE_MASK 0x00000008
40#define GnrlMsgFlgs_DOM 0x8000 40#define EVTYP_VT220MSG_MASK 0x00000040
41#define GnrlMsgFlgs_SndAlrm 0x4000 41#define EVTYP_CONFMGMDATA_MASK 0x10000000
42#define GnrlMsgFlgs_HoldMsg 0x2000 42#define EVTYP_SDIAS_MASK 0x00000010
43 43
44#define LnTpFlgs_CntlText 0x8000 44#define GNRLMSGFLGS_DOM 0x8000
45#define LnTpFlgs_LabelText 0x4000 45#define GNRLMSGFLGS_SNDALRM 0x4000
46#define LnTpFlgs_DataText 0x2000 46#define GNRLMSGFLGS_HOLDMSG 0x2000
47#define LnTpFlgs_EndText 0x1000 47
48#define LnTpFlgs_PromptText 0x0800 48#define LNTPFLGS_CNTLTEXT 0x8000
49#define LNTPFLGS_LABELTEXT 0x4000
50#define LNTPFLGS_DATATEXT 0x2000
51#define LNTPFLGS_ENDTEXT 0x1000
52#define LNTPFLGS_PROMPTTEXT 0x0800
49 53
50typedef unsigned int sclp_cmdw_t; 54typedef unsigned int sclp_cmdw_t;
51 55
@@ -56,15 +60,15 @@ typedef unsigned int sclp_cmdw_t;
56#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 60#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
57 61
58#define GDS_ID_MDSMU 0x1310 62#define GDS_ID_MDSMU 0x1310
59#define GDS_ID_MDSRouteInfo 0x1311 63#define GDS_ID_MDSROUTEINFO 0x1311
60#define GDS_ID_AgUnWrkCorr 0x1549 64#define GDS_ID_AGUNWRKCORR 0x1549
61#define GDS_ID_SNACondReport 0x1532 65#define GDS_ID_SNACONDREPORT 0x1532
62#define GDS_ID_CPMSU 0x1212 66#define GDS_ID_CPMSU 0x1212
63#define GDS_ID_RoutTargInstr 0x154D 67#define GDS_ID_ROUTTARGINSTR 0x154D
64#define GDS_ID_OpReq 0x8070 68#define GDS_ID_OPREQ 0x8070
65#define GDS_ID_TextCmd 0x1320 69#define GDS_ID_TEXTCMD 0x1320
66 70
67#define GDS_KEY_SelfDefTextMsg 0x31 71#define GDS_KEY_SELFDEFTEXTMSG 0x31
68 72
69typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ 73typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */
70 74
diff --git a/drivers/s390/char/sclp_chp.c b/drivers/s390/char/sclp_chp.c
new file mode 100644
index 000000000000..a66b914519b5
--- /dev/null
+++ b/drivers/s390/char/sclp_chp.c
@@ -0,0 +1,196 @@
1/*
2 * drivers/s390/char/sclp_chp.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/types.h>
9#include <linux/gfp.h>
10#include <linux/errno.h>
11#include <linux/completion.h>
12#include <asm/sclp.h>
13#include <asm/chpid.h>
14
15#include "sclp.h"
16
17#define TAG "sclp_chp: "
18
19#define SCLP_CMDW_CONFIGURE_CHANNEL_PATH 0x000f0001
20#define SCLP_CMDW_DECONFIGURE_CHANNEL_PATH 0x000e0001
21#define SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION 0x00030001
22
23static inline sclp_cmdw_t get_configure_cmdw(struct chp_id chpid)
24{
25 return SCLP_CMDW_CONFIGURE_CHANNEL_PATH | chpid.id << 8;
26}
27
28static inline sclp_cmdw_t get_deconfigure_cmdw(struct chp_id chpid)
29{
30 return SCLP_CMDW_DECONFIGURE_CHANNEL_PATH | chpid.id << 8;
31}
32
33static void chp_callback(struct sclp_req *req, void *data)
34{
35 struct completion *completion = data;
36
37 complete(completion);
38}
39
40struct chp_cfg_sccb {
41 struct sccb_header header;
42 u8 ccm;
43 u8 reserved[6];
44 u8 cssid;
45} __attribute__((packed));
46
47struct chp_cfg_data {
48 struct chp_cfg_sccb sccb;
49 struct sclp_req req;
50 struct completion completion;
51} __attribute__((packed));
52
53static int do_configure(sclp_cmdw_t cmd)
54{
55 struct chp_cfg_data *data;
56 int rc;
57
58 /* Prepare sccb. */
59 data = (struct chp_cfg_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
60 if (!data)
61 return -ENOMEM;
62 data->sccb.header.length = sizeof(struct chp_cfg_sccb);
63 data->req.command = cmd;
64 data->req.sccb = &(data->sccb);
65 data->req.status = SCLP_REQ_FILLED;
66 data->req.callback = chp_callback;
67 data->req.callback_data = &(data->completion);
68 init_completion(&data->completion);
69
70 /* Perform sclp request. */
71 rc = sclp_add_request(&(data->req));
72 if (rc)
73 goto out;
74 wait_for_completion(&data->completion);
75
76 /* Check response .*/
77 if (data->req.status != SCLP_REQ_DONE) {
78 printk(KERN_WARNING TAG "configure channel-path request failed "
79 "(status=0x%02x)\n", data->req.status);
80 rc = -EIO;
81 goto out;
82 }
83 switch (data->sccb.header.response_code) {
84 case 0x0020:
85 case 0x0120:
86 case 0x0440:
87 case 0x0450:
88 break;
89 default:
90 printk(KERN_WARNING TAG "configure channel-path failed "
91 "(cmd=0x%08x, response=0x%04x)\n", cmd,
92 data->sccb.header.response_code);
93 rc = -EIO;
94 break;
95 }
96out:
97 free_page((unsigned long) data);
98
99 return rc;
100}
101
102/**
103 * sclp_chp_configure - perform configure channel-path sclp command
104 * @chpid: channel-path ID
105 *
106 * Perform configure channel-path command sclp command for specified chpid.
107 * Return 0 after command successfully finished, non-zero otherwise.
108 */
109int sclp_chp_configure(struct chp_id chpid)
110{
111 return do_configure(get_configure_cmdw(chpid));
112}
113
114/**
115 * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
116 * @chpid: channel-path ID
117 *
118 * Perform deconfigure channel-path command sclp command for specified chpid
119 * and wait for completion. On success return 0. Return non-zero otherwise.
120 */
121int sclp_chp_deconfigure(struct chp_id chpid)
122{
123 return do_configure(get_deconfigure_cmdw(chpid));
124}
125
126struct chp_info_sccb {
127 struct sccb_header header;
128 u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
129 u8 standby[SCLP_CHP_INFO_MASK_SIZE];
130 u8 configured[SCLP_CHP_INFO_MASK_SIZE];
131 u8 ccm;
132 u8 reserved[6];
133 u8 cssid;
134} __attribute__((packed));
135
136struct chp_info_data {
137 struct chp_info_sccb sccb;
138 struct sclp_req req;
139 struct completion completion;
140} __attribute__((packed));
141
142/**
143 * sclp_chp_read_info - perform read channel-path information sclp command
144 * @info: resulting channel-path information data
145 *
146 * Perform read channel-path information sclp command and wait for completion.
147 * On success, store channel-path information in @info and return 0. Return
148 * non-zero otherwise.
149 */
150int sclp_chp_read_info(struct sclp_chp_info *info)
151{
152 struct chp_info_data *data;
153 int rc;
154
155 /* Prepare sccb. */
156 data = (struct chp_info_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
157 if (!data)
158 return -ENOMEM;
159 data->sccb.header.length = sizeof(struct chp_info_sccb);
160 data->req.command = SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION;
161 data->req.sccb = &(data->sccb);
162 data->req.status = SCLP_REQ_FILLED;
163 data->req.callback = chp_callback;
164 data->req.callback_data = &(data->completion);
165 init_completion(&data->completion);
166
167 /* Perform sclp request. */
168 rc = sclp_add_request(&(data->req));
169 if (rc)
170 goto out;
171 wait_for_completion(&data->completion);
172
173 /* Check response .*/
174 if (data->req.status != SCLP_REQ_DONE) {
175 printk(KERN_WARNING TAG "read channel-path info request failed "
176 "(status=0x%02x)\n", data->req.status);
177 rc = -EIO;
178 goto out;
179 }
180 if (data->sccb.header.response_code != 0x0010) {
181 printk(KERN_WARNING TAG "read channel-path info failed "
182 "(response=0x%04x)\n", data->sccb.header.response_code);
183 rc = -EIO;
184 goto out;
185 }
186 memcpy(info->recognized, data->sccb.recognized,
187 SCLP_CHP_INFO_MASK_SIZE);
188 memcpy(info->standby, data->sccb.standby,
189 SCLP_CHP_INFO_MASK_SIZE);
190 memcpy(info->configured, data->sccb.configured,
191 SCLP_CHP_INFO_MASK_SIZE);
192out:
193 free_page((unsigned long) data);
194
195 return rc;
196}
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
new file mode 100644
index 000000000000..5322e5e54a98
--- /dev/null
+++ b/drivers/s390/char/sclp_config.c
@@ -0,0 +1,75 @@
1/*
2 * drivers/s390/char/sclp_config.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/init.h>
9#include <linux/errno.h>
10#include <linux/cpu.h>
11#include <linux/sysdev.h>
12#include <linux/workqueue.h>
13#include "sclp.h"
14
15#define TAG "sclp_config: "
16
17struct conf_mgm_data {
18 u8 reserved;
19 u8 ev_qualifier;
20} __attribute__((packed));
21
22#define EV_QUAL_CAP_CHANGE 3
23
24static struct work_struct sclp_cpu_capability_work;
25
26static void sclp_cpu_capability_notify(struct work_struct *work)
27{
28 int cpu;
29 struct sys_device *sysdev;
30
31 printk(KERN_WARNING TAG "cpu capability changed.\n");
32 lock_cpu_hotplug();
33 for_each_online_cpu(cpu) {
34 sysdev = get_cpu_sysdev(cpu);
35 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
36 }
37 unlock_cpu_hotplug();
38}
39
40static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
41{
42 struct conf_mgm_data *cdata;
43
44 cdata = (struct conf_mgm_data *)(evbuf + 1);
45 if (cdata->ev_qualifier == EV_QUAL_CAP_CHANGE)
46 schedule_work(&sclp_cpu_capability_work);
47}
48
49static struct sclp_register sclp_conf_register =
50{
51 .receive_mask = EVTYP_CONFMGMDATA_MASK,
52 .receiver_fn = sclp_conf_receiver_fn,
53};
54
55static int __init sclp_conf_init(void)
56{
57 int rc;
58
59 INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify);
60
61 rc = sclp_register(&sclp_conf_register);
62 if (rc) {
63 printk(KERN_ERR TAG "failed to register (%d).\n", rc);
64 return rc;
65 }
66
67 if (!(sclp_conf_register.sclp_receive_mask & EVTYP_CONFMGMDATA_MASK)) {
68 printk(KERN_WARNING TAG "no configuration management.\n");
69 sclp_unregister(&sclp_conf_register);
70 rc = -ENOSYS;
71 }
72 return rc;
73}
74
75__initcall(sclp_conf_init);
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
index 65aa2c85737f..29fe2a5ec2fe 100644
--- a/drivers/s390/char/sclp_cpi.c
+++ b/drivers/s390/char/sclp_cpi.c
@@ -46,7 +46,7 @@ struct cpi_sccb {
46/* Event type structure for write message and write priority message */ 46/* Event type structure for write message and write priority message */
47static struct sclp_register sclp_cpi_event = 47static struct sclp_register sclp_cpi_event =
48{ 48{
49 .send_mask = EvTyp_CtlProgIdent_Mask 49 .send_mask = EVTYP_CTLPROGIDENT_MASK
50}; 50};
51 51
52MODULE_LICENSE("GPL"); 52MODULE_LICENSE("GPL");
@@ -201,7 +201,7 @@ cpi_module_init(void)
201 "console.\n"); 201 "console.\n");
202 return -EINVAL; 202 return -EINVAL;
203 } 203 }
204 if (!(sclp_cpi_event.sclp_send_mask & EvTyp_CtlProgIdent_Mask)) { 204 if (!(sclp_cpi_event.sclp_send_mask & EVTYP_CTLPROGIDENT_MASK)) {
205 printk(KERN_WARNING "cpi: no control program identification " 205 printk(KERN_WARNING "cpi: no control program identification "
206 "support\n"); 206 "support\n");
207 sclp_unregister(&sclp_cpi_event); 207 sclp_unregister(&sclp_cpi_event);
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index baa8fe669ed2..45ff25e787cb 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -43,7 +43,7 @@ sclp_quiesce_handler(struct evbuf_header *evbuf)
43} 43}
44 44
45static struct sclp_register sclp_quiesce_event = { 45static struct sclp_register sclp_quiesce_event = {
46 .receive_mask = EvTyp_SigQuiesce_Mask, 46 .receive_mask = EVTYP_SIGQUIESCE_MASK,
47 .receiver_fn = sclp_quiesce_handler 47 .receiver_fn = sclp_quiesce_handler
48}; 48};
49 49
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index 2486783ea58e..bbd5b8b66f42 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -30,7 +30,7 @@
30 30
31/* Event type structure for write message and write priority message */ 31/* Event type structure for write message and write priority message */
32static struct sclp_register sclp_rw_event = { 32static struct sclp_register sclp_rw_event = {
33 .send_mask = EvTyp_Msg_Mask | EvTyp_PMsgCmd_Mask 33 .send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK
34}; 34};
35 35
36/* 36/*
@@ -64,7 +64,7 @@ sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
64 memset(sccb, 0, sizeof(struct write_sccb)); 64 memset(sccb, 0, sizeof(struct write_sccb));
65 sccb->header.length = sizeof(struct write_sccb); 65 sccb->header.length = sizeof(struct write_sccb);
66 sccb->msg_buf.header.length = sizeof(struct msg_buf); 66 sccb->msg_buf.header.length = sizeof(struct msg_buf);
67 sccb->msg_buf.header.type = EvTyp_Msg; 67 sccb->msg_buf.header.type = EVTYP_MSG;
68 sccb->msg_buf.mdb.header.length = sizeof(struct mdb); 68 sccb->msg_buf.mdb.header.length = sizeof(struct mdb);
69 sccb->msg_buf.mdb.header.type = 1; 69 sccb->msg_buf.mdb.header.type = 1;
70 sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */ 70 sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */
@@ -114,7 +114,7 @@ sclp_initialize_mto(struct sclp_buffer *buffer, int max_len)
114 memset(mto, 0, sizeof(struct mto)); 114 memset(mto, 0, sizeof(struct mto));
115 mto->length = sizeof(struct mto); 115 mto->length = sizeof(struct mto);
116 mto->type = 4; /* message text object */ 116 mto->type = 4; /* message text object */
117 mto->line_type_flags = LnTpFlgs_EndText; /* end text */ 117 mto->line_type_flags = LNTPFLGS_ENDTEXT; /* end text */
118 118
119 /* set pointer to first byte after struct mto. */ 119 /* set pointer to first byte after struct mto. */
120 buffer->current_line = (char *) (mto + 1); 120 buffer->current_line = (char *) (mto + 1);
@@ -215,7 +215,7 @@ sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count)
215 case '\a': /* bell, one for several times */ 215 case '\a': /* bell, one for several times */
216 /* set SCLP sound alarm bit in General Object */ 216 /* set SCLP sound alarm bit in General Object */
217 buffer->sccb->msg_buf.mdb.go.general_msg_flags |= 217 buffer->sccb->msg_buf.mdb.go.general_msg_flags |=
218 GnrlMsgFlgs_SndAlrm; 218 GNRLMSGFLGS_SNDALRM;
219 break; 219 break;
220 case '\t': /* horizontal tabulator */ 220 case '\t': /* horizontal tabulator */
221 /* check if new mto needs to be created */ 221 /* check if new mto needs to be created */
@@ -452,12 +452,12 @@ sclp_emit_buffer(struct sclp_buffer *buffer,
452 return -EIO; 452 return -EIO;
453 453
454 sccb = buffer->sccb; 454 sccb = buffer->sccb;
455 if (sclp_rw_event.sclp_send_mask & EvTyp_Msg_Mask) 455 if (sclp_rw_event.sclp_send_mask & EVTYP_MSG_MASK)
456 /* Use normal write message */ 456 /* Use normal write message */
457 sccb->msg_buf.header.type = EvTyp_Msg; 457 sccb->msg_buf.header.type = EVTYP_MSG;
458 else if (sclp_rw_event.sclp_send_mask & EvTyp_PMsgCmd_Mask) 458 else if (sclp_rw_event.sclp_send_mask & EVTYP_PMSGCMD_MASK)
459 /* Use write priority message */ 459 /* Use write priority message */
460 sccb->msg_buf.header.type = EvTyp_PMsgCmd; 460 sccb->msg_buf.header.type = EVTYP_PMSGCMD;
461 else 461 else
462 return -ENOSYS; 462 return -ENOSYS;
463 buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA; 463 buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
new file mode 100644
index 000000000000..52283daddaef
--- /dev/null
+++ b/drivers/s390/char/sclp_sdias.c
@@ -0,0 +1,255 @@
1/*
2 * Sclp "store data in absolut storage"
3 *
4 * Copyright IBM Corp. 2003,2007
5 * Author(s): Michael Holzheu
6 */
7
8#include <linux/sched.h>
9#include <asm/sclp.h>
10#include <asm/debug.h>
11#include <asm/ipl.h>
12#include "sclp.h"
13#include "sclp_rw.h"
14
15#define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x)
16#define ERROR_MSG(x...) printk ( KERN_ALERT "SDIAS: " x )
17
18#define SDIAS_RETRIES 300
19#define SDIAS_SLEEP_TICKS 50
20
21#define EQ_STORE_DATA 0x0
22#define EQ_SIZE 0x1
23#define DI_FCP_DUMP 0x0
24#define ASA_SIZE_32 0x0
25#define ASA_SIZE_64 0x1
26#define EVSTATE_ALL_STORED 0x0
27#define EVSTATE_NO_DATA 0x3
28#define EVSTATE_PART_STORED 0x10
29
30static struct debug_info *sdias_dbf;
31
32static struct sclp_register sclp_sdias_register = {
33 .send_mask = EVTYP_SDIAS_MASK,
34};
35
36struct sdias_evbuf {
37 struct evbuf_header hdr;
38 u8 event_qual;
39 u8 data_id;
40 u64 reserved2;
41 u32 event_id;
42 u16 reserved3;
43 u8 asa_size;
44 u8 event_status;
45 u32 reserved4;
46 u32 blk_cnt;
47 u64 asa;
48 u32 reserved5;
49 u32 fbn;
50 u32 reserved6;
51 u32 lbn;
52 u16 reserved7;
53 u16 dbs;
54} __attribute__((packed));
55
56struct sdias_sccb {
57 struct sccb_header hdr;
58 struct sdias_evbuf evbuf;
59} __attribute__((packed));
60
61static struct sdias_sccb sccb __attribute__((aligned(4096)));
62
63static int sclp_req_done;
64static wait_queue_head_t sdias_wq;
65static DEFINE_MUTEX(sdias_mutex);
66
67static void sdias_callback(struct sclp_req *request, void *data)
68{
69 struct sdias_sccb *sccb;
70
71 sccb = (struct sdias_sccb *) request->sccb;
72 sclp_req_done = 1;
73 wake_up(&sdias_wq); /* Inform caller, that request is complete */
74 TRACE("callback done\n");
75}
76
77static int sdias_sclp_send(struct sclp_req *req)
78{
79 int retries;
80 int rc;
81
82 for (retries = SDIAS_RETRIES; retries; retries--) {
83 sclp_req_done = 0;
84 TRACE("add request\n");
85 rc = sclp_add_request(req);
86 if (rc) {
87 /* not initiated, wait some time and retry */
88 set_current_state(TASK_INTERRUPTIBLE);
89 TRACE("add request failed: rc = %i\n",rc);
90 schedule_timeout(SDIAS_SLEEP_TICKS);
91 continue;
92 }
93 /* initiated, wait for completion of service call */
94 wait_event(sdias_wq, (sclp_req_done == 1));
95 if (req->status == SCLP_REQ_FAILED) {
96 TRACE("sclp request failed\n");
97 rc = -EIO;
98 continue;
99 }
100 TRACE("request done\n");
101 break;
102 }
103 return rc;
104}
105
106/*
107 * Get number of blocks (4K) available in the HSA
108 */
109int sclp_sdias_blk_count(void)
110{
111 struct sclp_req request;
112 int rc;
113
114 mutex_lock(&sdias_mutex);
115
116 memset(&sccb, 0, sizeof(sccb));
117 memset(&request, 0, sizeof(request));
118
119 sccb.hdr.length = sizeof(sccb);
120 sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
121 sccb.evbuf.hdr.type = EVTYP_SDIAS;
122 sccb.evbuf.event_qual = EQ_SIZE;
123 sccb.evbuf.data_id = DI_FCP_DUMP;
124 sccb.evbuf.event_id = 4712;
125 sccb.evbuf.dbs = 1;
126
127 request.sccb = &sccb;
128 request.command = SCLP_CMDW_WRITE_EVENT_DATA;
129 request.status = SCLP_REQ_FILLED;
130 request.callback = sdias_callback;
131
132 rc = sdias_sclp_send(&request);
133 if (rc) {
134 ERROR_MSG("sclp_send failed for get_nr_blocks\n");
135 goto out;
136 }
137 if (sccb.hdr.response_code != 0x0020) {
138 TRACE("send failed: %x\n", sccb.hdr.response_code);
139 rc = -EIO;
140 goto out;
141 }
142
143 switch (sccb.evbuf.event_status) {
144 case 0:
145 rc = sccb.evbuf.blk_cnt;
146 break;
147 default:
148 ERROR_MSG("SCLP error: %x\n", sccb.evbuf.event_status);
149 rc = -EIO;
150 goto out;
151 }
152 TRACE("%i blocks\n", rc);
153out:
154 mutex_unlock(&sdias_mutex);
155 return rc;
156}
157
158/*
159 * Copy from HSA to absolute storage (not reentrant):
160 *
161 * @dest : Address of buffer where data should be copied
162 * @start_blk: Start Block (beginning with 1)
163 * @nr_blks : Number of 4K blocks to copy
164 *
165 * Return Value: 0 : Requested 'number' of blocks of data copied
166 * <0: ERROR - negative event status
167 */
168int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
169{
170 struct sclp_req request;
171 int rc;
172
173 mutex_lock(&sdias_mutex);
174
175 memset(&sccb, 0, sizeof(sccb));
176 memset(&request, 0, sizeof(request));
177
178 sccb.hdr.length = sizeof(sccb);
179 sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
180 sccb.evbuf.hdr.type = EVTYP_SDIAS;
181 sccb.evbuf.hdr.flags = 0;
182 sccb.evbuf.event_qual = EQ_STORE_DATA;
183 sccb.evbuf.data_id = DI_FCP_DUMP;
184 sccb.evbuf.event_id = 4712;
185#ifdef __s390x__
186 sccb.evbuf.asa_size = ASA_SIZE_64;
187#else
188 sccb.evbuf.asa_size = ASA_SIZE_32;
189#endif
190 sccb.evbuf.event_status = 0;
191 sccb.evbuf.blk_cnt = nr_blks;
192 sccb.evbuf.asa = (unsigned long)dest;
193 sccb.evbuf.fbn = start_blk;
194 sccb.evbuf.lbn = 0;
195 sccb.evbuf.dbs = 1;
196
197 request.sccb = &sccb;
198 request.command = SCLP_CMDW_WRITE_EVENT_DATA;
199 request.status = SCLP_REQ_FILLED;
200 request.callback = sdias_callback;
201
202 rc = sdias_sclp_send(&request);
203 if (rc) {
204 ERROR_MSG("sclp_send failed: %x\n", rc);
205 goto out;
206 }
207 if (sccb.hdr.response_code != 0x0020) {
208 TRACE("copy failed: %x\n", sccb.hdr.response_code);
209 rc = -EIO;
210 goto out;
211 }
212
213 switch (sccb.evbuf.event_status) {
214 case EVSTATE_ALL_STORED:
215 TRACE("all stored\n");
216 case EVSTATE_PART_STORED:
217 TRACE("part stored: %i\n", sccb.evbuf.blk_cnt);
218 break;
219 case EVSTATE_NO_DATA:
220 TRACE("no data\n");
221 default:
222 ERROR_MSG("Error from SCLP while copying hsa. "
223 "Event status = %x\n",
224 sccb.evbuf.event_status);
225 rc = -EIO;
226 }
227out:
228 mutex_unlock(&sdias_mutex);
229 return rc;
230}
231
232int __init sdias_init(void)
233{
234 int rc;
235
236 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
237 return 0;
238 sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long));
239 debug_register_view(sdias_dbf, &debug_sprintf_view);
240 debug_set_level(sdias_dbf, 6);
241 rc = sclp_register(&sclp_sdias_register);
242 if (rc) {
243 ERROR_MSG("sclp register failed\n");
244 return rc;
245 }
246 init_waitqueue_head(&sdias_wq);
247 TRACE("init done\n");
248 return 0;
249}
250
251void __exit sdias_exit(void)
252{
253 debug_unregister(sdias_dbf);
254 sclp_unregister(&sclp_sdias_register);
255}
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 076816b9d528..e3b3d390b4a3 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -648,7 +648,7 @@ sclp_eval_textcmd(struct gds_subvector *start,
648 subvec = start; 648 subvec = start;
649 while (subvec < end) { 649 while (subvec < end) {
650 subvec = find_gds_subvector(subvec, end, 650 subvec = find_gds_subvector(subvec, end,
651 GDS_KEY_SelfDefTextMsg); 651 GDS_KEY_SELFDEFTEXTMSG);
652 if (!subvec) 652 if (!subvec)
653 break; 653 break;
654 sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1), 654 sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1),
@@ -664,7 +664,7 @@ sclp_eval_cpmsu(struct gds_vector *start, struct gds_vector *end)
664 664
665 vec = start; 665 vec = start;
666 while (vec < end) { 666 while (vec < end) {
667 vec = find_gds_vector(vec, end, GDS_ID_TextCmd); 667 vec = find_gds_vector(vec, end, GDS_ID_TEXTCMD);
668 if (!vec) 668 if (!vec)
669 break; 669 break;
670 sclp_eval_textcmd((struct gds_subvector *)(vec + 1), 670 sclp_eval_textcmd((struct gds_subvector *)(vec + 1),
@@ -703,7 +703,7 @@ sclp_tty_state_change(struct sclp_register *reg)
703 703
704static struct sclp_register sclp_input_event = 704static struct sclp_register sclp_input_event =
705{ 705{
706 .receive_mask = EvTyp_OpCmd_Mask | EvTyp_PMsgCmd_Mask, 706 .receive_mask = EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK,
707 .state_change_fn = sclp_tty_state_change, 707 .state_change_fn = sclp_tty_state_change,
708 .receiver_fn = sclp_tty_receiver 708 .receiver_fn = sclp_tty_receiver
709}; 709};
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index f77dc33b5f8d..726334757bbf 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -99,8 +99,8 @@ static void sclp_vt220_emit_current(void);
99 99
100/* Registration structure for our interest in SCLP event buffers */ 100/* Registration structure for our interest in SCLP event buffers */
101static struct sclp_register sclp_vt220_register = { 101static struct sclp_register sclp_vt220_register = {
102 .send_mask = EvTyp_VT220Msg_Mask, 102 .send_mask = EVTYP_VT220MSG_MASK,
103 .receive_mask = EvTyp_VT220Msg_Mask, 103 .receive_mask = EVTYP_VT220MSG_MASK,
104 .state_change_fn = NULL, 104 .state_change_fn = NULL,
105 .receiver_fn = sclp_vt220_receiver_fn 105 .receiver_fn = sclp_vt220_receiver_fn
106}; 106};
@@ -202,7 +202,7 @@ sclp_vt220_callback(struct sclp_req *request, void *data)
202static int 202static int
203__sclp_vt220_emit(struct sclp_vt220_request *request) 203__sclp_vt220_emit(struct sclp_vt220_request *request)
204{ 204{
205 if (!(sclp_vt220_register.sclp_send_mask & EvTyp_VT220Msg_Mask)) { 205 if (!(sclp_vt220_register.sclp_send_mask & EVTYP_VT220MSG_MASK)) {
206 request->sclp_req.status = SCLP_REQ_FAILED; 206 request->sclp_req.status = SCLP_REQ_FAILED;
207 return -EIO; 207 return -EIO;
208 } 208 }
@@ -284,7 +284,7 @@ sclp_vt220_initialize_page(void *page)
284 sccb->header.length = sizeof(struct sclp_vt220_sccb); 284 sccb->header.length = sizeof(struct sclp_vt220_sccb);
285 sccb->header.function_code = SCLP_NORMAL_WRITE; 285 sccb->header.function_code = SCLP_NORMAL_WRITE;
286 sccb->header.response_code = 0x0000; 286 sccb->header.response_code = 0x0000;
287 sccb->evbuf.type = EvTyp_VT220Msg; 287 sccb->evbuf.type = EVTYP_VT220MSG;
288 sccb->evbuf.length = sizeof(struct evbuf_header); 288 sccb->evbuf.length = sizeof(struct evbuf_header);
289 289
290 return request; 290 return request;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index b87d3b019936..a5a00e9ae4d0 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -125,7 +125,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
125 .recording_name = "EREP", 125 .recording_name = "EREP",
126 .minor_num = 0, 126 .minor_num = 0,
127 .buffer_free = 1, 127 .buffer_free = 1,
128 .priv_lock = SPIN_LOCK_UNLOCKED, 128 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
129 .autorecording = 1, 129 .autorecording = 1,
130 .autopurge = 1, 130 .autopurge = 1,
131 }, 131 },
@@ -134,7 +134,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
134 .recording_name = "ACCOUNT", 134 .recording_name = "ACCOUNT",
135 .minor_num = 1, 135 .minor_num = 1,
136 .buffer_free = 1, 136 .buffer_free = 1,
137 .priv_lock = SPIN_LOCK_UNLOCKED, 137 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
138 .autorecording = 1, 138 .autorecording = 1,
139 .autopurge = 1, 139 .autopurge = 1,
140 }, 140 },
@@ -143,7 +143,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
143 .recording_name = "SYMPTOM", 143 .recording_name = "SYMPTOM",
144 .minor_num = 2, 144 .minor_num = 2,
145 .buffer_free = 1, 145 .buffer_free = 1,
146 .priv_lock = SPIN_LOCK_UNLOCKED, 146 .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
147 .autorecording = 1, 147 .autorecording = 1,
148 .autopurge = 1, 148 .autopurge = 1,
149 } 149 }
@@ -385,6 +385,9 @@ static int vmlogrdr_release (struct inode *inode, struct file *filp)
385 385
386 struct vmlogrdr_priv_t * logptr = filp->private_data; 386 struct vmlogrdr_priv_t * logptr = filp->private_data;
387 387
388 iucv_path_sever(logptr->path, NULL);
389 kfree(logptr->path);
390 logptr->path = NULL;
388 if (logptr->autorecording) { 391 if (logptr->autorecording) {
389 ret = vmlogrdr_recording(logptr,0,logptr->autopurge); 392 ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
390 if (ret) 393 if (ret)
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
new file mode 100644
index 000000000000..89d439316a53
--- /dev/null
+++ b/drivers/s390/char/zcore.c
@@ -0,0 +1,651 @@
1/*
2 * zcore module to export memory content and register sets for creating system
3 * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
4 * dump format as s390 standalone dumps.
5 *
6 * For more information please refer to Documentation/s390/zfcpdump.txt
7 *
8 * Copyright IBM Corp. 2003,2007
9 * Author(s): Michael Holzheu
10 */
11
12#include <linux/init.h>
13#include <linux/miscdevice.h>
14#include <linux/utsname.h>
15#include <linux/debugfs.h>
16#include <asm/ipl.h>
17#include <asm/sclp.h>
18#include <asm/setup.h>
19#include <asm/sigp.h>
20#include <asm/uaccess.h>
21#include <asm/debug.h>
22#include <asm/processor.h>
23#include <asm/irqflags.h>
24
25#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
26#define MSG(x...) printk( KERN_ALERT x )
27#define ERROR_MSG(x...) printk ( KERN_ALERT "DUMP: " x )
28
29#define TO_USER 0
30#define TO_KERNEL 1
31
32enum arch_id {
33 ARCH_S390 = 0,
34 ARCH_S390X = 1,
35};
36
37/* dump system info */
38
39struct sys_info {
40 enum arch_id arch;
41 unsigned long sa_base;
42 u32 sa_size;
43 int cpu_map[NR_CPUS];
44 unsigned long mem_size;
45 union save_area lc_mask;
46};
47
48static struct sys_info sys_info;
49static struct debug_info *zcore_dbf;
50static int hsa_available;
51static struct dentry *zcore_dir;
52static struct dentry *zcore_file;
53
54/*
55 * Copy memory from HSA to kernel or user memory (not reentrant):
56 *
57 * @dest: Kernel or user buffer where memory should be copied to
58 * @src: Start address within HSA where data should be copied
59 * @count: Size of buffer, which should be copied
60 * @mode: Either TO_KERNEL or TO_USER
61 */
62static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
63{
64 int offs, blk_num;
65 static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
66
67 if (count == 0)
68 return 0;
69
70 /* copy first block */
71 offs = 0;
72 if ((src % PAGE_SIZE) != 0) {
73 blk_num = src / PAGE_SIZE + 2;
74 if (sclp_sdias_copy(buf, blk_num, 1)) {
75 TRACE("sclp_sdias_copy() failed\n");
76 return -EIO;
77 }
78 offs = min((PAGE_SIZE - (src % PAGE_SIZE)), count);
79 if (mode == TO_USER) {
80 if (copy_to_user((__force __user void*) dest,
81 buf + (src % PAGE_SIZE), offs))
82 return -EFAULT;
83 } else
84 memcpy(dest, buf + (src % PAGE_SIZE), offs);
85 }
86 if (offs == count)
87 goto out;
88
89 /* copy middle */
90 for (; (offs + PAGE_SIZE) <= count; offs += PAGE_SIZE) {
91 blk_num = (src + offs) / PAGE_SIZE + 2;
92 if (sclp_sdias_copy(buf, blk_num, 1)) {
93 TRACE("sclp_sdias_copy() failed\n");
94 return -EIO;
95 }
96 if (mode == TO_USER) {
97 if (copy_to_user((__force __user void*) dest + offs,
98 buf, PAGE_SIZE))
99 return -EFAULT;
100 } else
101 memcpy(dest + offs, buf, PAGE_SIZE);
102 }
103 if (offs == count)
104 goto out;
105
106 /* copy last block */
107 blk_num = (src + offs) / PAGE_SIZE + 2;
108 if (sclp_sdias_copy(buf, blk_num, 1)) {
109 TRACE("sclp_sdias_copy() failed\n");
110 return -EIO;
111 }
112 if (mode == TO_USER) {
113 if (copy_to_user((__force __user void*) dest + offs, buf,
114 PAGE_SIZE))
115 return -EFAULT;
116 } else
117 memcpy(dest + offs, buf, count - offs);
118out:
119 return 0;
120}
121
122static int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
123{
124 return memcpy_hsa((void __force *) dest, src, count, TO_USER);
125}
126
127static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
128{
129 return memcpy_hsa(dest, src, count, TO_KERNEL);
130}
131
132static int memcpy_real(void *dest, unsigned long src, size_t count)
133{
134 unsigned long flags;
135 int rc = -EFAULT;
136 register unsigned long _dest asm("2") = (unsigned long) dest;
137 register unsigned long _len1 asm("3") = (unsigned long) count;
138 register unsigned long _src asm("4") = src;
139 register unsigned long _len2 asm("5") = (unsigned long) count;
140
141 if (count == 0)
142 return 0;
143 flags = __raw_local_irq_stnsm(0xf8); /* switch to real mode */
144 asm volatile (
145 "0: mvcle %1,%2,0x0\n"
146 "1: jo 0b\n"
147 " lhi %0,0x0\n"
148 "2:\n"
149 EX_TABLE(1b,2b)
150 : "+d" (rc)
151 : "d" (_dest), "d" (_src), "d" (_len1), "d" (_len2)
152 : "cc", "memory");
153 __raw_local_irq_ssm(flags);
154
155 return rc;
156}
157
158static int memcpy_real_user(__user void *dest, unsigned long src, size_t count)
159{
160 static char buf[4096];
161 int offs = 0, size;
162
163 while (offs < count) {
164 size = min(sizeof(buf), count - offs);
165 if (memcpy_real(buf, src + offs, size))
166 return -EFAULT;
167 if (copy_to_user(dest + offs, buf, size))
168 return -EFAULT;
169 offs += size;
170 }
171 return 0;
172}
173
174#ifdef __s390x__
175/*
176 * Convert s390x (64 bit) cpu info to s390 (32 bit) cpu info
177 */
178static void __init s390x_to_s390_regs(union save_area *out, union save_area *in,
179 int cpu)
180{
181 int i;
182
183 for (i = 0; i < 16; i++) {
184 out->s390.gp_regs[i] = in->s390x.gp_regs[i] & 0x00000000ffffffff;
185 out->s390.acc_regs[i] = in->s390x.acc_regs[i];
186 out->s390.ctrl_regs[i] =
187 in->s390x.ctrl_regs[i] & 0x00000000ffffffff;
188 }
189 /* locore for 31 bit has only space for fpregs 0,2,4,6 */
190 out->s390.fp_regs[0] = in->s390x.fp_regs[0];
191 out->s390.fp_regs[1] = in->s390x.fp_regs[2];
192 out->s390.fp_regs[2] = in->s390x.fp_regs[4];
193 out->s390.fp_regs[3] = in->s390x.fp_regs[6];
194 memcpy(&(out->s390.psw[0]), &(in->s390x.psw[0]), 4);
195 out->s390.psw[1] |= 0x8; /* set bit 12 */
196 memcpy(&(out->s390.psw[4]),&(in->s390x.psw[12]), 4);
197 out->s390.psw[4] |= 0x80; /* set (31bit) addressing bit */
198 out->s390.pref_reg = in->s390x.pref_reg;
199 out->s390.timer = in->s390x.timer;
200 out->s390.clk_cmp = in->s390x.clk_cmp;
201}
202
203static void __init s390x_to_s390_save_areas(void)
204{
205 int i = 1;
206 static union save_area tmp;
207
208 while (zfcpdump_save_areas[i]) {
209 s390x_to_s390_regs(&tmp, zfcpdump_save_areas[i], i);
210 memcpy(zfcpdump_save_areas[i], &tmp, sizeof(tmp));
211 i++;
212 }
213}
214
215#endif /* __s390x__ */
216
217static int __init init_cpu_info(enum arch_id arch)
218{
219 union save_area *sa;
220
221 /* get info for boot cpu from lowcore, stored in the HSA */
222
223 sa = kmalloc(sizeof(*sa), GFP_KERNEL);
224 if (!sa) {
225 ERROR_MSG("kmalloc failed: %s: %i\n",__FUNCTION__, __LINE__);
226 return -ENOMEM;
227 }
228 if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
229 ERROR_MSG("could not copy from HSA\n");
230 kfree(sa);
231 return -EIO;
232 }
233 zfcpdump_save_areas[0] = sa;
234
235#ifdef __s390x__
236 /* convert s390x regs to s390, if we are dumping an s390 Linux */
237
238 if (arch == ARCH_S390)
239 s390x_to_s390_save_areas();
240#endif
241
242 return 0;
243}
244
245static DEFINE_MUTEX(zcore_mutex);
246
247#define DUMP_VERSION 0x3
248#define DUMP_MAGIC 0xa8190173618f23fdULL
249#define DUMP_ARCH_S390X 2
250#define DUMP_ARCH_S390 1
251#define HEADER_SIZE 4096
252
253/* dump header dumped according to s390 crash dump format */
254
255struct zcore_header {
256 u64 magic;
257 u32 version;
258 u32 header_size;
259 u32 dump_level;
260 u32 page_size;
261 u64 mem_size;
262 u64 mem_start;
263 u64 mem_end;
264 u32 num_pages;
265 u32 pad1;
266 u64 tod;
267 cpuid_t cpu_id;
268 u32 arch_id;
269 u32 build_arch;
270 char pad2[4016];
271} __attribute__((packed,__aligned__(16)));
272
273static struct zcore_header zcore_header = {
274 .magic = DUMP_MAGIC,
275 .version = DUMP_VERSION,
276 .header_size = 4096,
277 .dump_level = 0,
278 .page_size = PAGE_SIZE,
279 .mem_start = 0,
280#ifdef __s390x__
281 .build_arch = DUMP_ARCH_S390X,
282#else
283 .build_arch = DUMP_ARCH_S390,
284#endif
285};
286
287/*
288 * Copy lowcore info to buffer. Use map in order to copy only register parts.
289 *
290 * @buf: User buffer
291 * @sa: Pointer to save area
292 * @sa_off: Offset in save area to copy
293 * @len: Number of bytes to copy
294 */
295static int copy_lc(void __user *buf, void *sa, int sa_off, int len)
296{
297 int i;
298 char *lc_mask = (char*)&sys_info.lc_mask;
299
300 for (i = 0; i < len; i++) {
301 if (!lc_mask[i + sa_off])
302 continue;
303 if (copy_to_user(buf + i, sa + sa_off + i, 1))
304 return -EFAULT;
305 }
306 return 0;
307}
308
309/*
310 * Copy lowcores info to memory, if necessary
311 *
312 * @buf: User buffer
313 * @addr: Start address of buffer in dump memory
314 * @count: Size of buffer
315 */
316static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
317{
318 unsigned long end;
319 int i = 0;
320
321 if (count == 0)
322 return 0;
323
324 end = start + count;
325 while (zfcpdump_save_areas[i]) {
326 unsigned long cp_start, cp_end; /* copy range */
327 unsigned long sa_start, sa_end; /* save area range */
328 unsigned long prefix;
329 unsigned long sa_off, len, buf_off;
330
331 if (sys_info.arch == ARCH_S390)
332 prefix = zfcpdump_save_areas[i]->s390.pref_reg;
333 else
334 prefix = zfcpdump_save_areas[i]->s390x.pref_reg;
335
336 sa_start = prefix + sys_info.sa_base;
337 sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
338
339 if ((end < sa_start) || (start > sa_end))
340 goto next;
341 cp_start = max(start, sa_start);
342 cp_end = min(end, sa_end);
343
344 buf_off = cp_start - start;
345 sa_off = cp_start - sa_start;
346 len = cp_end - cp_start;
347
348 TRACE("copy_lc for: %lx\n", start);
349 if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len))
350 return -EFAULT;
351next:
352 i++;
353 }
354 return 0;
355}
356
357/*
358 * Read routine for zcore character device
359 * First 4K are dump header
360 * Next 32MB are HSA Memory
361 * Rest is read from absolute Memory
362 */
363static ssize_t zcore_read(struct file *file, char __user *buf, size_t count,
364 loff_t *ppos)
365{
366 unsigned long mem_start; /* Start address in memory */
367 size_t mem_offs; /* Offset in dump memory */
368 size_t hdr_count; /* Size of header part of output buffer */
369 size_t size;
370 int rc;
371
372 mutex_lock(&zcore_mutex);
373
374 if (*ppos > (sys_info.mem_size + HEADER_SIZE)) {
375 rc = -EINVAL;
376 goto fail;
377 }
378
379 count = min(count, (size_t) (sys_info.mem_size + HEADER_SIZE - *ppos));
380
381 /* Copy dump header */
382 if (*ppos < HEADER_SIZE) {
383 size = min(count, (size_t) (HEADER_SIZE - *ppos));
384 if (copy_to_user(buf, &zcore_header + *ppos, size)) {
385 rc = -EFAULT;
386 goto fail;
387 }
388 hdr_count = size;
389 mem_start = 0;
390 } else {
391 hdr_count = 0;
392 mem_start = *ppos - HEADER_SIZE;
393 }
394
395 mem_offs = 0;
396
397 /* Copy from HSA data */
398 if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) {
399 size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE
400 - mem_start));
401 rc = memcpy_hsa_user(buf + hdr_count, mem_start, size);
402 if (rc)
403 goto fail;
404
405 mem_offs += size;
406 }
407
408 /* Copy from real mem */
409 size = count - mem_offs - hdr_count;
410 rc = memcpy_real_user(buf + hdr_count + mem_offs, mem_start + mem_offs,
411 size);
412 if (rc)
413 goto fail;
414
415 /*
416 * Since s390 dump analysis tools like lcrash or crash
417 * expect register sets in the prefix pages of the cpus,
418 * we copy them into the read buffer, if necessary.
419 * buf + hdr_count: Start of memory part of output buffer
420 * mem_start: Start memory address to copy from
421 * count - hdr_count: Size of memory area to copy
422 */
423 if (zcore_add_lc(buf + hdr_count, mem_start, count - hdr_count)) {
424 rc = -EFAULT;
425 goto fail;
426 }
427 *ppos += count;
428fail:
429 mutex_unlock(&zcore_mutex);
430 return (rc < 0) ? rc : count;
431}
432
433static int zcore_open(struct inode *inode, struct file *filp)
434{
435 if (!hsa_available)
436 return -ENODATA;
437 else
438 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
439}
440
441static int zcore_release(struct inode *inode, struct file *filep)
442{
443 diag308(DIAG308_REL_HSA, NULL);
444 hsa_available = 0;
445 return 0;
446}
447
448static loff_t zcore_lseek(struct file *file, loff_t offset, int orig)
449{
450 loff_t rc;
451
452 mutex_lock(&zcore_mutex);
453 switch (orig) {
454 case 0:
455 file->f_pos = offset;
456 rc = file->f_pos;
457 break;
458 case 1:
459 file->f_pos += offset;
460 rc = file->f_pos;
461 break;
462 default:
463 rc = -EINVAL;
464 }
465 mutex_unlock(&zcore_mutex);
466 return rc;
467}
468
469static struct file_operations zcore_fops = {
470 .owner = THIS_MODULE,
471 .llseek = zcore_lseek,
472 .read = zcore_read,
473 .open = zcore_open,
474 .release = zcore_release,
475};
476
477
478static void __init set_s390_lc_mask(union save_area *map)
479{
480 memset(&map->s390.ext_save, 0xff, sizeof(map->s390.ext_save));
481 memset(&map->s390.timer, 0xff, sizeof(map->s390.timer));
482 memset(&map->s390.clk_cmp, 0xff, sizeof(map->s390.clk_cmp));
483 memset(&map->s390.psw, 0xff, sizeof(map->s390.psw));
484 memset(&map->s390.pref_reg, 0xff, sizeof(map->s390.pref_reg));
485 memset(&map->s390.acc_regs, 0xff, sizeof(map->s390.acc_regs));
486 memset(&map->s390.fp_regs, 0xff, sizeof(map->s390.fp_regs));
487 memset(&map->s390.gp_regs, 0xff, sizeof(map->s390.gp_regs));
488 memset(&map->s390.ctrl_regs, 0xff, sizeof(map->s390.ctrl_regs));
489}
490
491static void __init set_s390x_lc_mask(union save_area *map)
492{
493 memset(&map->s390x.fp_regs, 0xff, sizeof(map->s390x.fp_regs));
494 memset(&map->s390x.gp_regs, 0xff, sizeof(map->s390x.gp_regs));
495 memset(&map->s390x.psw, 0xff, sizeof(map->s390x.psw));
496 memset(&map->s390x.pref_reg, 0xff, sizeof(map->s390x.pref_reg));
497 memset(&map->s390x.fp_ctrl_reg, 0xff, sizeof(map->s390x.fp_ctrl_reg));
498 memset(&map->s390x.tod_reg, 0xff, sizeof(map->s390x.tod_reg));
499 memset(&map->s390x.timer, 0xff, sizeof(map->s390x.timer));
500 memset(&map->s390x.clk_cmp, 0xff, sizeof(map->s390x.clk_cmp));
501 memset(&map->s390x.acc_regs, 0xff, sizeof(map->s390x.acc_regs));
502 memset(&map->s390x.ctrl_regs, 0xff, sizeof(map->s390x.ctrl_regs));
503}
504
505/*
506 * Initialize dump globals for a given architecture
507 */
508static int __init sys_info_init(enum arch_id arch)
509{
510 switch (arch) {
511 case ARCH_S390X:
512 MSG("DETECTED 'S390X (64 bit) OS'\n");
513 sys_info.sa_base = SAVE_AREA_BASE_S390X;
514 sys_info.sa_size = sizeof(struct save_area_s390x);
515 set_s390x_lc_mask(&sys_info.lc_mask);
516 break;
517 case ARCH_S390:
518 MSG("DETECTED 'S390 (32 bit) OS'\n");
519 sys_info.sa_base = SAVE_AREA_BASE_S390;
520 sys_info.sa_size = sizeof(struct save_area_s390);
521 set_s390_lc_mask(&sys_info.lc_mask);
522 break;
523 default:
524 ERROR_MSG("unknown architecture 0x%x.\n",arch);
525 return -EINVAL;
526 }
527 sys_info.arch = arch;
528 if (init_cpu_info(arch)) {
529 ERROR_MSG("get cpu info failed\n");
530 return -ENOMEM;
531 }
532 sys_info.mem_size = real_memory_size;
533
534 return 0;
535}
536
537static int __init check_sdias(void)
538{
539 int rc, act_hsa_size;
540
541 rc = sclp_sdias_blk_count();
542 if (rc < 0) {
543 ERROR_MSG("Could not determine HSA size\n");
544 return rc;
545 }
546 act_hsa_size = (rc - 1) * PAGE_SIZE;
547 if (act_hsa_size < ZFCPDUMP_HSA_SIZE) {
548 ERROR_MSG("HSA size too small: %i\n", act_hsa_size);
549 return -EINVAL;
550 }
551 return 0;
552}
553
554static void __init zcore_header_init(int arch, struct zcore_header *hdr)
555{
556 if (arch == ARCH_S390X)
557 hdr->arch_id = DUMP_ARCH_S390X;
558 else
559 hdr->arch_id = DUMP_ARCH_S390;
560 hdr->mem_size = sys_info.mem_size;
561 hdr->mem_end = sys_info.mem_size;
562 hdr->num_pages = sys_info.mem_size / PAGE_SIZE;
563 hdr->tod = get_clock();
564 get_cpu_id(&hdr->cpu_id);
565}
566
567extern int sdias_init(void);
568
569static int __init zcore_init(void)
570{
571 unsigned char arch;
572 int rc;
573
574 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
575 return -ENODATA;
576
577 zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
578 debug_register_view(zcore_dbf, &debug_sprintf_view);
579 debug_set_level(zcore_dbf, 6);
580
581 TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno);
582 TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
583 TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
584
585 rc = sdias_init();
586 if (rc)
587 goto fail;
588
589 rc = check_sdias();
590 if (rc) {
591 ERROR_MSG("Dump initialization failed\n");
592 goto fail;
593 }
594
595 rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
596 if (rc) {
597 ERROR_MSG("sdial memcpy for arch id failed\n");
598 goto fail;
599 }
600
601#ifndef __s390x__
602 if (arch == ARCH_S390X) {
603 ERROR_MSG("32 bit dumper can't dump 64 bit system!\n");
604 rc = -EINVAL;
605 goto fail;
606 }
607#endif
608
609 rc = sys_info_init(arch);
610 if (rc) {
611 ERROR_MSG("arch init failed\n");
612 goto fail;
613 }
614
615 zcore_header_init(arch, &zcore_header);
616
617 zcore_dir = debugfs_create_dir("zcore" , NULL);
618 if (!zcore_dir) {
619 rc = -ENOMEM;
620 goto fail;
621 }
622 zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL,
623 &zcore_fops);
624 if (!zcore_file) {
625 debugfs_remove(zcore_dir);
626 rc = -ENOMEM;
627 goto fail;
628 }
629 hsa_available = 1;
630 return 0;
631
632fail:
633 diag308(DIAG308_REL_HSA, NULL);
634 return rc;
635}
636
637extern void sdias_exit(void);
638
639static void __exit zcore_exit(void)
640{
641 debug_unregister(zcore_dbf);
642 sdias_exit();
643 diag308(DIAG308_REL_HSA, NULL);
644}
645
646MODULE_AUTHOR("Copyright IBM Corp. 2003,2007");
647MODULE_DESCRIPTION("zcore module for zfcpdump support");
648MODULE_LICENSE("GPL");
649
650subsys_initcall(zcore_init);
651module_exit(zcore_exit);
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index c490c2a1c2fc..cfaf77b320f5 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the S/390 common i/o drivers 2# Makefile for the S/390 common i/o drivers
3# 3#
4 4
5obj-y += airq.o blacklist.o chsc.o cio.o css.o 5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o
6ccw_device-objs += device.o device_fsm.o device_ops.o 6ccw_device-objs += device.o device_fsm.o device_ops.o
7ccw_device-objs += device_id.o device_pgid.o device_status.o 7ccw_device-objs += device_id.o device_pgid.o device_status.o
8obj-y += ccw_device.o cmf.o 8obj-y += ccw_device.o cmf.o
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 5aeb68e732b0..e5ccda63e883 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -75,8 +75,10 @@ static void ccwgroup_ungroup_callback(struct device *dev)
75{ 75{
76 struct ccwgroup_device *gdev = to_ccwgroupdev(dev); 76 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
77 77
78 mutex_lock(&gdev->reg_mutex);
78 __ccwgroup_remove_symlinks(gdev); 79 __ccwgroup_remove_symlinks(gdev);
79 device_unregister(dev); 80 device_unregister(dev);
81 mutex_unlock(&gdev->reg_mutex);
80} 82}
81 83
82static ssize_t 84static ssize_t
@@ -173,7 +175,8 @@ ccwgroup_create(struct device *root,
173 return -ENOMEM; 175 return -ENOMEM;
174 176
175 atomic_set(&gdev->onoff, 0); 177 atomic_set(&gdev->onoff, 0);
176 178 mutex_init(&gdev->reg_mutex);
179 mutex_lock(&gdev->reg_mutex);
177 for (i = 0; i < argc; i++) { 180 for (i = 0; i < argc; i++) {
178 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); 181 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]);
179 182
@@ -183,12 +186,12 @@ ccwgroup_create(struct device *root,
183 || gdev->cdev[i]->id.driver_info != 186 || gdev->cdev[i]->id.driver_info !=
184 gdev->cdev[0]->id.driver_info) { 187 gdev->cdev[0]->id.driver_info) {
185 rc = -EINVAL; 188 rc = -EINVAL;
186 goto free_dev; 189 goto error;
187 } 190 }
188 /* Don't allow a device to belong to more than one group. */ 191 /* Don't allow a device to belong to more than one group. */
189 if (gdev->cdev[i]->dev.driver_data) { 192 if (gdev->cdev[i]->dev.driver_data) {
190 rc = -EINVAL; 193 rc = -EINVAL;
191 goto free_dev; 194 goto error;
192 } 195 }
193 gdev->cdev[i]->dev.driver_data = gdev; 196 gdev->cdev[i]->dev.driver_data = gdev;
194 } 197 }
@@ -203,9 +206,8 @@ ccwgroup_create(struct device *root,
203 gdev->cdev[0]->dev.bus_id); 206 gdev->cdev[0]->dev.bus_id);
204 207
205 rc = device_register(&gdev->dev); 208 rc = device_register(&gdev->dev);
206
207 if (rc) 209 if (rc)
208 goto free_dev; 210 goto error;
209 get_device(&gdev->dev); 211 get_device(&gdev->dev);
210 rc = device_create_file(&gdev->dev, &dev_attr_ungroup); 212 rc = device_create_file(&gdev->dev, &dev_attr_ungroup);
211 213
@@ -216,6 +218,7 @@ ccwgroup_create(struct device *root,
216 218
217 rc = __ccwgroup_create_symlinks(gdev); 219 rc = __ccwgroup_create_symlinks(gdev);
218 if (!rc) { 220 if (!rc) {
221 mutex_unlock(&gdev->reg_mutex);
219 put_device(&gdev->dev); 222 put_device(&gdev->dev);
220 return 0; 223 return 0;
221 } 224 }
@@ -224,19 +227,12 @@ ccwgroup_create(struct device *root,
224error: 227error:
225 for (i = 0; i < argc; i++) 228 for (i = 0; i < argc; i++)
226 if (gdev->cdev[i]) { 229 if (gdev->cdev[i]) {
227 put_device(&gdev->cdev[i]->dev);
228 gdev->cdev[i]->dev.driver_data = NULL;
229 }
230 put_device(&gdev->dev);
231 return rc;
232free_dev:
233 for (i = 0; i < argc; i++)
234 if (gdev->cdev[i]) {
235 if (gdev->cdev[i]->dev.driver_data == gdev) 230 if (gdev->cdev[i]->dev.driver_data == gdev)
236 gdev->cdev[i]->dev.driver_data = NULL; 231 gdev->cdev[i]->dev.driver_data = NULL;
237 put_device(&gdev->cdev[i]->dev); 232 put_device(&gdev->cdev[i]->dev);
238 } 233 }
239 kfree(gdev); 234 mutex_unlock(&gdev->reg_mutex);
235 put_device(&gdev->dev);
240 return rc; 236 return rc;
241} 237}
242 238
@@ -422,8 +418,12 @@ ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver)
422 get_driver(&cdriver->driver); 418 get_driver(&cdriver->driver);
423 while ((dev = driver_find_device(&cdriver->driver, NULL, NULL, 419 while ((dev = driver_find_device(&cdriver->driver, NULL, NULL,
424 __ccwgroup_match_all))) { 420 __ccwgroup_match_all))) {
425 __ccwgroup_remove_symlinks(to_ccwgroupdev(dev)); 421 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
422
423 mutex_lock(&gdev->reg_mutex);
424 __ccwgroup_remove_symlinks(gdev);
426 device_unregister(dev); 425 device_unregister(dev);
426 mutex_unlock(&gdev->reg_mutex);
427 put_device(dev); 427 put_device(dev);
428 } 428 }
429 put_driver(&cdriver->driver); 429 put_driver(&cdriver->driver);
@@ -444,8 +444,10 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
444 if (cdev->dev.driver_data) { 444 if (cdev->dev.driver_data) {
445 gdev = (struct ccwgroup_device *)cdev->dev.driver_data; 445 gdev = (struct ccwgroup_device *)cdev->dev.driver_data;
446 if (get_device(&gdev->dev)) { 446 if (get_device(&gdev->dev)) {
447 mutex_lock(&gdev->reg_mutex);
447 if (device_is_registered(&gdev->dev)) 448 if (device_is_registered(&gdev->dev))
448 return gdev; 449 return gdev;
450 mutex_unlock(&gdev->reg_mutex);
449 put_device(&gdev->dev); 451 put_device(&gdev->dev);
450 } 452 }
451 return NULL; 453 return NULL;
@@ -465,6 +467,7 @@ ccwgroup_remove_ccwdev(struct ccw_device *cdev)
465 if (gdev) { 467 if (gdev) {
466 __ccwgroup_remove_symlinks(gdev); 468 __ccwgroup_remove_symlinks(gdev);
467 device_unregister(&gdev->dev); 469 device_unregister(&gdev->dev);
470 mutex_unlock(&gdev->reg_mutex);
468 put_device(&gdev->dev); 471 put_device(&gdev->dev);
469 } 472 }
470} 473}
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
new file mode 100644
index 000000000000..ac289e6eadfe
--- /dev/null
+++ b/drivers/s390/cio/chp.c
@@ -0,0 +1,683 @@
1/*
2 * drivers/s390/cio/chp.c
3 *
4 * Copyright IBM Corp. 1999,2007
5 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
6 * Arnd Bergmann (arndb@de.ibm.com)
7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 */
9
10#include <linux/bug.h>
11#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/init.h>
14#include <linux/jiffies.h>
15#include <linux/wait.h>
16#include <linux/mutex.h>
17#include <asm/errno.h>
18#include <asm/chpid.h>
19#include <asm/sclp.h>
20
21#include "cio.h"
22#include "css.h"
23#include "ioasm.h"
24#include "cio_debug.h"
25#include "chp.h"
26
27#define to_channelpath(device) container_of(device, struct channel_path, dev)
28#define CHP_INFO_UPDATE_INTERVAL 1*HZ
29
30enum cfg_task_t {
31 cfg_none,
32 cfg_configure,
33 cfg_deconfigure
34};
35
36/* Map for pending configure tasks. */
37static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1];
38static DEFINE_MUTEX(cfg_lock);
39static int cfg_busy;
40
41/* Map for channel-path status. */
42static struct sclp_chp_info chp_info;
43static DEFINE_MUTEX(info_lock);
44
45/* Time after which channel-path status may be outdated. */
46static unsigned long chp_info_expires;
47
48/* Workqueue to perform pending configure tasks. */
49static struct workqueue_struct *chp_wq;
50static struct work_struct cfg_work;
51
52/* Wait queue for configure completion events. */
53static wait_queue_head_t cfg_wait_queue;
54
55/* Return channel_path struct for given chpid. */
56static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
57{
58 return css[chpid.cssid]->chps[chpid.id];
59}
60
61/* Set vary state for given chpid. */
62static void set_chp_logically_online(struct chp_id chpid, int onoff)
63{
64 chpid_to_chp(chpid)->state = onoff;
65}
66
67/* On succes return 0 if channel-path is varied offline, 1 if it is varied
68 * online. Return -ENODEV if channel-path is not registered. */
69int chp_get_status(struct chp_id chpid)
70{
71 return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV);
72}
73
74/**
75 * chp_get_sch_opm - return opm for subchannel
76 * @sch: subchannel
77 *
78 * Calculate and return the operational path mask (opm) based on the chpids
79 * used by the subchannel and the status of the associated channel-paths.
80 */
81u8 chp_get_sch_opm(struct subchannel *sch)
82{
83 struct chp_id chpid;
84 int opm;
85 int i;
86
87 opm = 0;
88 chp_id_init(&chpid);
89 for (i=0; i < 8; i++) {
90 opm <<= 1;
91 chpid.id = sch->schib.pmcw.chpid[i];
92 if (chp_get_status(chpid) != 0)
93 opm |= 1;
94 }
95 return opm;
96}
97
98/**
99 * chp_is_registered - check if a channel-path is registered
100 * @chpid: channel-path ID
101 *
102 * Return non-zero if a channel-path with the given chpid is registered,
103 * zero otherwise.
104 */
105int chp_is_registered(struct chp_id chpid)
106{
107 return chpid_to_chp(chpid) != NULL;
108}
109
110/*
111 * Function: s390_vary_chpid
112 * Varies the specified chpid online or offline
113 */
114static int s390_vary_chpid(struct chp_id chpid, int on)
115{
116 char dbf_text[15];
117 int status;
118
119 sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid,
120 chpid.id);
121 CIO_TRACE_EVENT( 2, dbf_text);
122
123 status = chp_get_status(chpid);
124 if (status < 0) {
125 printk(KERN_ERR "Can't vary unknown chpid %x.%02x\n",
126 chpid.cssid, chpid.id);
127 return -EINVAL;
128 }
129
130 if (!on && !status) {
131 printk(KERN_ERR "chpid %x.%02x is already offline\n",
132 chpid.cssid, chpid.id);
133 return -EINVAL;
134 }
135
136 set_chp_logically_online(chpid, on);
137 chsc_chp_vary(chpid, on);
138 return 0;
139}
140
141/*
142 * Channel measurement related functions
143 */
144static ssize_t chp_measurement_chars_read(struct kobject *kobj, char *buf,
145 loff_t off, size_t count)
146{
147 struct channel_path *chp;
148 unsigned int size;
149
150 chp = to_channelpath(container_of(kobj, struct device, kobj));
151 if (!chp->cmg_chars)
152 return 0;
153
154 size = sizeof(struct cmg_chars);
155
156 if (off > size)
157 return 0;
158 if (off + count > size)
159 count = size - off;
160 memcpy(buf, chp->cmg_chars + off, count);
161 return count;
162}
163
164static struct bin_attribute chp_measurement_chars_attr = {
165 .attr = {
166 .name = "measurement_chars",
167 .mode = S_IRUSR,
168 .owner = THIS_MODULE,
169 },
170 .size = sizeof(struct cmg_chars),
171 .read = chp_measurement_chars_read,
172};
173
174static void chp_measurement_copy_block(struct cmg_entry *buf,
175 struct channel_subsystem *css,
176 struct chp_id chpid)
177{
178 void *area;
179 struct cmg_entry *entry, reference_buf;
180 int idx;
181
182 if (chpid.id < 128) {
183 area = css->cub_addr1;
184 idx = chpid.id;
185 } else {
186 area = css->cub_addr2;
187 idx = chpid.id - 128;
188 }
189 entry = area + (idx * sizeof(struct cmg_entry));
190 do {
191 memcpy(buf, entry, sizeof(*entry));
192 memcpy(&reference_buf, entry, sizeof(*entry));
193 } while (reference_buf.values[0] != buf->values[0]);
194}
195
196static ssize_t chp_measurement_read(struct kobject *kobj, char *buf,
197 loff_t off, size_t count)
198{
199 struct channel_path *chp;
200 struct channel_subsystem *css;
201 unsigned int size;
202
203 chp = to_channelpath(container_of(kobj, struct device, kobj));
204 css = to_css(chp->dev.parent);
205
206 size = sizeof(struct cmg_entry);
207
208 /* Only allow single reads. */
209 if (off || count < size)
210 return 0;
211 chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid);
212 count = size;
213 return count;
214}
215
216static struct bin_attribute chp_measurement_attr = {
217 .attr = {
218 .name = "measurement",
219 .mode = S_IRUSR,
220 .owner = THIS_MODULE,
221 },
222 .size = sizeof(struct cmg_entry),
223 .read = chp_measurement_read,
224};
225
226void chp_remove_cmg_attr(struct channel_path *chp)
227{
228 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
229 device_remove_bin_file(&chp->dev, &chp_measurement_attr);
230}
231
232int chp_add_cmg_attr(struct channel_path *chp)
233{
234 int ret;
235
236 ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
237 if (ret)
238 return ret;
239 ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
240 if (ret)
241 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
242 return ret;
243}
244
245/*
246 * Files for the channel path entries.
247 */
248static ssize_t chp_status_show(struct device *dev,
249 struct device_attribute *attr, char *buf)
250{
251 struct channel_path *chp = container_of(dev, struct channel_path, dev);
252
253 if (!chp)
254 return 0;
255 return (chp_get_status(chp->chpid) ? sprintf(buf, "online\n") :
256 sprintf(buf, "offline\n"));
257}
258
259static ssize_t chp_status_write(struct device *dev,
260 struct device_attribute *attr,
261 const char *buf, size_t count)
262{
263 struct channel_path *cp = container_of(dev, struct channel_path, dev);
264 char cmd[10];
265 int num_args;
266 int error;
267
268 num_args = sscanf(buf, "%5s", cmd);
269 if (!num_args)
270 return count;
271
272 if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1"))
273 error = s390_vary_chpid(cp->chpid, 1);
274 else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0"))
275 error = s390_vary_chpid(cp->chpid, 0);
276 else
277 error = -EINVAL;
278
279 return error < 0 ? error : count;
280
281}
282
283static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
284
285static ssize_t chp_configure_show(struct device *dev,
286 struct device_attribute *attr, char *buf)
287{
288 struct channel_path *cp;
289 int status;
290
291 cp = container_of(dev, struct channel_path, dev);
292 status = chp_info_get_status(cp->chpid);
293 if (status < 0)
294 return status;
295
296 return snprintf(buf, PAGE_SIZE, "%d\n", status);
297}
298
299static int cfg_wait_idle(void);
300
301static ssize_t chp_configure_write(struct device *dev,
302 struct device_attribute *attr,
303 const char *buf, size_t count)
304{
305 struct channel_path *cp;
306 int val;
307 char delim;
308
309 if (sscanf(buf, "%d %c", &val, &delim) != 1)
310 return -EINVAL;
311 if (val != 0 && val != 1)
312 return -EINVAL;
313 cp = container_of(dev, struct channel_path, dev);
314 chp_cfg_schedule(cp->chpid, val);
315 cfg_wait_idle();
316
317 return count;
318}
319
320static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write);
321
322static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
323 char *buf)
324{
325 struct channel_path *chp = container_of(dev, struct channel_path, dev);
326
327 if (!chp)
328 return 0;
329 return sprintf(buf, "%x\n", chp->desc.desc);
330}
331
332static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
333
334static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr,
335 char *buf)
336{
337 struct channel_path *chp = to_channelpath(dev);
338
339 if (!chp)
340 return 0;
341 if (chp->cmg == -1) /* channel measurements not available */
342 return sprintf(buf, "unknown\n");
343 return sprintf(buf, "%x\n", chp->cmg);
344}
345
346static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
347
348static ssize_t chp_shared_show(struct device *dev,
349 struct device_attribute *attr, char *buf)
350{
351 struct channel_path *chp = to_channelpath(dev);
352
353 if (!chp)
354 return 0;
355 if (chp->shared == -1) /* channel measurements not available */
356 return sprintf(buf, "unknown\n");
357 return sprintf(buf, "%x\n", chp->shared);
358}
359
360static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
361
362static struct attribute * chp_attrs[] = {
363 &dev_attr_status.attr,
364 &dev_attr_configure.attr,
365 &dev_attr_type.attr,
366 &dev_attr_cmg.attr,
367 &dev_attr_shared.attr,
368 NULL,
369};
370
371static struct attribute_group chp_attr_group = {
372 .attrs = chp_attrs,
373};
374
375static void chp_release(struct device *dev)
376{
377 struct channel_path *cp;
378
379 cp = container_of(dev, struct channel_path, dev);
380 kfree(cp);
381}
382
383/**
384 * chp_new - register a new channel-path
385 * @chpid - channel-path ID
386 *
387 * Create and register data structure representing new channel-path. Return
388 * zero on success, non-zero otherwise.
389 */
390int chp_new(struct chp_id chpid)
391{
392 struct channel_path *chp;
393 int ret;
394
395 if (chp_is_registered(chpid))
396 return 0;
397 chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
398 if (!chp)
399 return -ENOMEM;
400
401 /* fill in status, etc. */
402 chp->chpid = chpid;
403 chp->state = 1;
404 chp->dev.parent = &css[chpid.cssid]->device;
405 chp->dev.release = chp_release;
406 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp%x.%02x", chpid.cssid,
407 chpid.id);
408
409 /* Obtain channel path description and fill it in. */
410 ret = chsc_determine_channel_path_description(chpid, &chp->desc);
411 if (ret)
412 goto out_free;
413 if ((chp->desc.flags & 0x80) == 0) {
414 ret = -ENODEV;
415 goto out_free;
416 }
417 /* Get channel-measurement characteristics. */
418 if (css_characteristics_avail && css_chsc_characteristics.scmc
419 && css_chsc_characteristics.secm) {
420 ret = chsc_get_channel_measurement_chars(chp);
421 if (ret)
422 goto out_free;
423 } else {
424 static int msg_done;
425
426 if (!msg_done) {
427 printk(KERN_WARNING "cio: Channel measurements not "
428 "available, continuing.\n");
429 msg_done = 1;
430 }
431 chp->cmg = -1;
432 }
433
434 /* make it known to the system */
435 ret = device_register(&chp->dev);
436 if (ret) {
437 printk(KERN_WARNING "%s: could not register %x.%02x\n",
438 __func__, chpid.cssid, chpid.id);
439 goto out_free;
440 }
441 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
442 if (ret) {
443 device_unregister(&chp->dev);
444 goto out_free;
445 }
446 mutex_lock(&css[chpid.cssid]->mutex);
447 if (css[chpid.cssid]->cm_enabled) {
448 ret = chp_add_cmg_attr(chp);
449 if (ret) {
450 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
451 device_unregister(&chp->dev);
452 mutex_unlock(&css[chpid.cssid]->mutex);
453 goto out_free;
454 }
455 }
456 css[chpid.cssid]->chps[chpid.id] = chp;
457 mutex_unlock(&css[chpid.cssid]->mutex);
458 return ret;
459out_free:
460 kfree(chp);
461 return ret;
462}
463
464/**
465 * chp_get_chp_desc - return newly allocated channel-path description
466 * @chpid: channel-path ID
467 *
468 * On success return a newly allocated copy of the channel-path description
469 * data associated with the given channel-path ID. Return %NULL on error.
470 */
471void *chp_get_chp_desc(struct chp_id chpid)
472{
473 struct channel_path *chp;
474 struct channel_path_desc *desc;
475
476 chp = chpid_to_chp(chpid);
477 if (!chp)
478 return NULL;
479 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
480 if (!desc)
481 return NULL;
482 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
483 return desc;
484}
485
486/**
487 * chp_process_crw - process channel-path status change
488 * @id: channel-path ID number
489 * @status: non-zero if channel-path has become available, zero otherwise
490 *
491 * Handle channel-report-words indicating that the status of a channel-path
492 * has changed.
493 */
494void chp_process_crw(int id, int status)
495{
496 struct chp_id chpid;
497
498 chp_id_init(&chpid);
499 chpid.id = id;
500 if (status) {
501 if (!chp_is_registered(chpid))
502 chp_new(chpid);
503 chsc_chp_online(chpid);
504 } else
505 chsc_chp_offline(chpid);
506}
507
508static inline int info_bit_num(struct chp_id id)
509{
510 return id.id + id.cssid * (__MAX_CHPID + 1);
511}
512
513/* Force chp_info refresh on next call to info_validate(). */
514static void info_expire(void)
515{
516 mutex_lock(&info_lock);
517 chp_info_expires = jiffies - 1;
518 mutex_unlock(&info_lock);
519}
520
521/* Ensure that chp_info is up-to-date. */
522static int info_update(void)
523{
524 int rc;
525
526 mutex_lock(&info_lock);
527 rc = 0;
528 if (time_after(jiffies, chp_info_expires)) {
529 /* Data is too old, update. */
530 rc = sclp_chp_read_info(&chp_info);
531 chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ;
532 }
533 mutex_unlock(&info_lock);
534
535 return rc;
536}
537
538/**
539 * chp_info_get_status - retrieve configure status of a channel-path
540 * @chpid: channel-path ID
541 *
542 * On success, return 0 for standby, 1 for configured, 2 for reserved,
543 * 3 for not recognized. Return negative error code on error.
544 */
545int chp_info_get_status(struct chp_id chpid)
546{
547 int rc;
548 int bit;
549
550 rc = info_update();
551 if (rc)
552 return rc;
553
554 bit = info_bit_num(chpid);
555 mutex_lock(&info_lock);
556 if (!chp_test_bit(chp_info.recognized, bit))
557 rc = CHP_STATUS_NOT_RECOGNIZED;
558 else if (chp_test_bit(chp_info.configured, bit))
559 rc = CHP_STATUS_CONFIGURED;
560 else if (chp_test_bit(chp_info.standby, bit))
561 rc = CHP_STATUS_STANDBY;
562 else
563 rc = CHP_STATUS_RESERVED;
564 mutex_unlock(&info_lock);
565
566 return rc;
567}
568
569/* Return configure task for chpid. */
570static enum cfg_task_t cfg_get_task(struct chp_id chpid)
571{
572 return chp_cfg_task[chpid.cssid][chpid.id];
573}
574
575/* Set configure task for chpid. */
576static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg)
577{
578 chp_cfg_task[chpid.cssid][chpid.id] = cfg;
579}
580
581/* Perform one configure/deconfigure request. Reschedule work function until
582 * last request. */
583static void cfg_func(struct work_struct *work)
584{
585 struct chp_id chpid;
586 enum cfg_task_t t;
587
588 mutex_lock(&cfg_lock);
589 t = cfg_none;
590 chp_id_for_each(&chpid) {
591 t = cfg_get_task(chpid);
592 if (t != cfg_none) {
593 cfg_set_task(chpid, cfg_none);
594 break;
595 }
596 }
597 mutex_unlock(&cfg_lock);
598
599 switch (t) {
600 case cfg_configure:
601 sclp_chp_configure(chpid);
602 info_expire();
603 chsc_chp_online(chpid);
604 break;
605 case cfg_deconfigure:
606 sclp_chp_deconfigure(chpid);
607 info_expire();
608 chsc_chp_offline(chpid);
609 break;
610 case cfg_none:
611 /* Get updated information after last change. */
612 info_update();
613 mutex_lock(&cfg_lock);
614 cfg_busy = 0;
615 mutex_unlock(&cfg_lock);
616 wake_up_interruptible(&cfg_wait_queue);
617 return;
618 }
619 queue_work(chp_wq, &cfg_work);
620}
621
622/**
623 * chp_cfg_schedule - schedule chpid configuration request
624 * @chpid - channel-path ID
625 * @configure - Non-zero for configure, zero for deconfigure
626 *
627 * Schedule a channel-path configuration/deconfiguration request.
628 */
629void chp_cfg_schedule(struct chp_id chpid, int configure)
630{
631 CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id,
632 configure);
633 mutex_lock(&cfg_lock);
634 cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure);
635 cfg_busy = 1;
636 mutex_unlock(&cfg_lock);
637 queue_work(chp_wq, &cfg_work);
638}
639
640/**
641 * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request
642 * @chpid - channel-path ID
643 *
644 * Cancel an active channel-path deconfiguration request if it has not yet
645 * been performed.
646 */
647void chp_cfg_cancel_deconfigure(struct chp_id chpid)
648{
649 CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id);
650 mutex_lock(&cfg_lock);
651 if (cfg_get_task(chpid) == cfg_deconfigure)
652 cfg_set_task(chpid, cfg_none);
653 mutex_unlock(&cfg_lock);
654}
655
656static int cfg_wait_idle(void)
657{
658 if (wait_event_interruptible(cfg_wait_queue, !cfg_busy))
659 return -ERESTARTSYS;
660 return 0;
661}
662
663static int __init chp_init(void)
664{
665 struct chp_id chpid;
666
667 chp_wq = create_singlethread_workqueue("cio_chp");
668 if (!chp_wq)
669 return -ENOMEM;
670 INIT_WORK(&cfg_work, cfg_func);
671 init_waitqueue_head(&cfg_wait_queue);
672 if (info_update())
673 return 0;
674 /* Register available channel-paths. */
675 chp_id_for_each(&chpid) {
676 if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED)
677 chp_new(chpid);
678 }
679
680 return 0;
681}
682
683subsys_initcall(chp_init);
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
new file mode 100644
index 000000000000..65286563c592
--- /dev/null
+++ b/drivers/s390/cio/chp.h
@@ -0,0 +1,53 @@
1/*
2 * drivers/s390/cio/chp.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#ifndef S390_CHP_H
9#define S390_CHP_H S390_CHP_H
10
11#include <linux/types.h>
12#include <linux/device.h>
13#include <asm/chpid.h>
14#include "chsc.h"
15
16#define CHP_STATUS_STANDBY 0
17#define CHP_STATUS_CONFIGURED 1
18#define CHP_STATUS_RESERVED 2
19#define CHP_STATUS_NOT_RECOGNIZED 3
20
21static inline int chp_test_bit(u8 *bitmap, int num)
22{
23 int byte = num >> 3;
24 int mask = 128 >> (num & 7);
25
26 return (bitmap[byte] & mask) ? 1 : 0;
27}
28
29
30struct channel_path {
31 struct chp_id chpid;
32 int state;
33 struct channel_path_desc desc;
34 /* Channel-measurement related stuff: */
35 int cmg;
36 int shared;
37 void *cmg_chars;
38 struct device dev;
39};
40
41int chp_get_status(struct chp_id chpid);
42u8 chp_get_sch_opm(struct subchannel *sch);
43int chp_is_registered(struct chp_id chpid);
44void *chp_get_chp_desc(struct chp_id chpid);
45void chp_process_crw(int id, int available);
46void chp_remove_cmg_attr(struct channel_path *chp);
47int chp_add_cmg_attr(struct channel_path *chp);
48int chp_new(struct chp_id chpid);
49void chp_cfg_schedule(struct chp_id chpid, int configure);
50void chp_cfg_cancel_deconfigure(struct chp_id chpid);
51int chp_info_get_status(struct chp_id chpid);
52
53#endif /* S390_CHP_H */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 6f05a44e3817..ea92ac4d6577 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -15,202 +15,124 @@
15#include <linux/device.h> 15#include <linux/device.h>
16 16
17#include <asm/cio.h> 17#include <asm/cio.h>
18#include <asm/chpid.h>
18 19
19#include "css.h" 20#include "css.h"
20#include "cio.h" 21#include "cio.h"
21#include "cio_debug.h" 22#include "cio_debug.h"
22#include "ioasm.h" 23#include "ioasm.h"
24#include "chp.h"
23#include "chsc.h" 25#include "chsc.h"
24 26
25static void *sei_page; 27static void *sei_page;
26 28
27static int new_channel_path(int chpid); 29struct chsc_ssd_area {
28 30 struct chsc_header request;
29static inline void 31 u16 :10;
30set_chp_logically_online(int chp, int onoff) 32 u16 ssid:2;
31{ 33 u16 :4;
32 css[0]->chps[chp]->state = onoff; 34 u16 f_sch; /* first subchannel */
33} 35 u16 :16;
34 36 u16 l_sch; /* last subchannel */
35static int 37 u32 :32;
36get_chp_status(int chp) 38 struct chsc_header response;
37{ 39 u32 :32;
38 return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV); 40 u8 sch_valid : 1;
39} 41 u8 dev_valid : 1;
40 42 u8 st : 3; /* subchannel type */
41void 43 u8 zeroes : 3;
42chsc_validate_chpids(struct subchannel *sch) 44 u8 unit_addr; /* unit address */
43{ 45 u16 devno; /* device number */
44 int mask, chp; 46 u8 path_mask;
45 47 u8 fla_valid_mask;
46 for (chp = 0; chp <= 7; chp++) { 48 u16 sch; /* subchannel */
47 mask = 0x80 >> chp; 49 u8 chpid[8]; /* chpids 0-7 */
48 if (!get_chp_status(sch->schib.pmcw.chpid[chp])) 50 u16 fla[8]; /* full link addresses 0-7 */
49 /* disable using this path */ 51} __attribute__ ((packed));
50 sch->opm &= ~mask;
51 }
52}
53
54void
55chpid_is_actually_online(int chp)
56{
57 int state;
58
59 state = get_chp_status(chp);
60 if (state < 0) {
61 need_rescan = 1;
62 queue_work(slow_path_wq, &slow_path_work);
63 } else
64 WARN_ON(!state);
65}
66 52
67/* FIXME: this is _always_ called for every subchannel. shouldn't we 53int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
68 * process more than one at a time? */
69static int
70chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
71{ 54{
72 int ccode, j; 55 unsigned long page;
73 56 struct chsc_ssd_area *ssd_area;
74 struct { 57 int ccode;
75 struct chsc_header request; 58 int ret;
76 u16 reserved1a:10; 59 int i;
77 u16 ssid:2; 60 int mask;
78 u16 reserved1b:4;
79 u16 f_sch; /* first subchannel */
80 u16 reserved2;
81 u16 l_sch; /* last subchannel */
82 u32 reserved3;
83 struct chsc_header response;
84 u32 reserved4;
85 u8 sch_valid : 1;
86 u8 dev_valid : 1;
87 u8 st : 3; /* subchannel type */
88 u8 zeroes : 3;
89 u8 unit_addr; /* unit address */
90 u16 devno; /* device number */
91 u8 path_mask;
92 u8 fla_valid_mask;
93 u16 sch; /* subchannel */
94 u8 chpid[8]; /* chpids 0-7 */
95 u16 fla[8]; /* full link addresses 0-7 */
96 } __attribute__ ((packed)) *ssd_area;
97
98 ssd_area = page;
99 61
62 page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
63 if (!page)
64 return -ENOMEM;
65 ssd_area = (struct chsc_ssd_area *) page;
100 ssd_area->request.length = 0x0010; 66 ssd_area->request.length = 0x0010;
101 ssd_area->request.code = 0x0004; 67 ssd_area->request.code = 0x0004;
102 68 ssd_area->ssid = schid.ssid;
103 ssd_area->ssid = sch->schid.ssid; 69 ssd_area->f_sch = schid.sch_no;
104 ssd_area->f_sch = sch->schid.sch_no; 70 ssd_area->l_sch = schid.sch_no;
105 ssd_area->l_sch = sch->schid.sch_no;
106 71
107 ccode = chsc(ssd_area); 72 ccode = chsc(ssd_area);
73 /* Check response. */
108 if (ccode > 0) { 74 if (ccode > 0) {
109 pr_debug("chsc returned with ccode = %d\n", ccode); 75 ret = (ccode == 3) ? -ENODEV : -EBUSY;
110 return (ccode == 3) ? -ENODEV : -EBUSY; 76 goto out_free;
111 } 77 }
112 78 if (ssd_area->response.code != 0x0001) {
113 switch (ssd_area->response.code) { 79 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
114 case 0x0001: /* everything ok */ 80 schid.ssid, schid.sch_no,
115 break;
116 case 0x0002:
117 CIO_CRW_EVENT(2, "Invalid command!\n");
118 return -EINVAL;
119 case 0x0003:
120 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
121 return -EINVAL;
122 case 0x0004:
123 CIO_CRW_EVENT(2, "Model does not provide ssd\n");
124 return -EOPNOTSUPP;
125 default:
126 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
127 ssd_area->response.code); 81 ssd_area->response.code);
128 return -EIO; 82 ret = -EIO;
83 goto out_free;
129 } 84 }
130 85 if (!ssd_area->sch_valid) {
131 /* 86 ret = -ENODEV;
132 * ssd_area->st stores the type of the detected 87 goto out_free;
133 * subchannel, with the following definitions:
134 *
135 * 0: I/O subchannel: All fields have meaning
136 * 1: CHSC subchannel: Only sch_val, st and sch
137 * have meaning
138 * 2: Message subchannel: All fields except unit_addr
139 * have meaning
140 * 3: ADM subchannel: Only sch_val, st and sch
141 * have meaning
142 *
143 * Other types are currently undefined.
144 */
145 if (ssd_area->st > 3) { /* uhm, that looks strange... */
146 CIO_CRW_EVENT(0, "Strange subchannel type %d"
147 " for sch 0.%x.%04x\n", ssd_area->st,
148 sch->schid.ssid, sch->schid.sch_no);
149 /*
150 * There may have been a new subchannel type defined in the
151 * time since this code was written; since we don't know which
152 * fields have meaning and what to do with it we just jump out
153 */
154 return 0;
155 } else {
156 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
157 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
158 sch->schid.ssid, sch->schid.sch_no,
159 type[ssd_area->st]);
160
161 sch->ssd_info.valid = 1;
162 sch->ssd_info.type = ssd_area->st;
163 } 88 }
164 89 /* Copy data */
165 if (ssd_area->st == 0 || ssd_area->st == 2) { 90 ret = 0;
166 for (j = 0; j < 8; j++) { 91 memset(ssd, 0, sizeof(struct chsc_ssd_info));
167 if (!((0x80 >> j) & ssd_area->path_mask & 92 if ((ssd_area->st != 0) && (ssd_area->st != 2))
168 ssd_area->fla_valid_mask)) 93 goto out_free;
169 continue; 94 ssd->path_mask = ssd_area->path_mask;
170 sch->ssd_info.chpid[j] = ssd_area->chpid[j]; 95 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
171 sch->ssd_info.fla[j] = ssd_area->fla[j]; 96 for (i = 0; i < 8; i++) {
97 mask = 0x80 >> i;
98 if (ssd_area->path_mask & mask) {
99 chp_id_init(&ssd->chpid[i]);
100 ssd->chpid[i].id = ssd_area->chpid[i];
172 } 101 }
102 if (ssd_area->fla_valid_mask & mask)
103 ssd->fla[i] = ssd_area->fla[i];
173 } 104 }
174 return 0; 105out_free:
106 free_page(page);
107 return ret;
175} 108}
176 109
177int 110static int check_for_io_on_path(struct subchannel *sch, int mask)
178css_get_ssd_info(struct subchannel *sch)
179{ 111{
180 int ret; 112 int cc;
181 void *page;
182 113
183 page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 114 cc = stsch(sch->schid, &sch->schib);
184 if (!page) 115 if (cc)
185 return -ENOMEM; 116 return 0;
186 spin_lock_irq(sch->lock); 117 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
187 ret = chsc_get_sch_desc_irq(sch, page); 118 return 1;
188 if (ret) { 119 return 0;
189 static int cio_chsc_err_msg; 120}
190 121
191 if (!cio_chsc_err_msg) { 122static void terminate_internal_io(struct subchannel *sch)
192 printk(KERN_ERR 123{
193 "chsc_get_sch_descriptions:" 124 if (cio_clear(sch)) {
194 " Error %d while doing chsc; " 125 /* Recheck device in case clear failed. */
195 "processing some machine checks may " 126 sch->lpm = 0;
196 "not work\n", ret); 127 if (device_trigger_verify(sch) != 0)
197 cio_chsc_err_msg = 1; 128 css_schedule_eval(sch->schid);
198 } 129 return;
199 }
200 spin_unlock_irq(sch->lock);
201 free_page((unsigned long)page);
202 if (!ret) {
203 int j, chpid, mask;
204 /* Allocate channel path structures, if needed. */
205 for (j = 0; j < 8; j++) {
206 mask = 0x80 >> j;
207 chpid = sch->ssd_info.chpid[j];
208 if ((sch->schib.pmcw.pim & mask) &&
209 (get_chp_status(chpid) < 0))
210 new_channel_path(chpid);
211 }
212 } 130 }
213 return ret; 131 /* Request retry of internal operation. */
132 device_set_intretry(sch);
133 /* Call handler. */
134 if (sch->driver && sch->driver->termination)
135 sch->driver->termination(&sch->dev);
214} 136}
215 137
216static int 138static int
@@ -219,7 +141,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
219 int j; 141 int j;
220 int mask; 142 int mask;
221 struct subchannel *sch; 143 struct subchannel *sch;
222 struct channel_path *chpid; 144 struct chp_id *chpid;
223 struct schib schib; 145 struct schib schib;
224 146
225 sch = to_subchannel(dev); 147 sch = to_subchannel(dev);
@@ -243,106 +165,50 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
243 if (sch->schib.pmcw.pim == 0x80) 165 if (sch->schib.pmcw.pim == 0x80)
244 goto out_unreg; 166 goto out_unreg;
245 167
246 if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && 168 if (check_for_io_on_path(sch, mask)) {
247 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && 169 if (device_is_online(sch))
248 (sch->schib.pmcw.lpum == mask)) { 170 device_kill_io(sch);
249 int cc; 171 else {
250 172 terminate_internal_io(sch);
251 cc = cio_clear(sch); 173 /* Re-start path verification. */
252 if (cc == -ENODEV) 174 if (sch->driver && sch->driver->verify)
175 sch->driver->verify(&sch->dev);
176 }
177 } else {
178 /* trigger path verification. */
179 if (sch->driver && sch->driver->verify)
180 sch->driver->verify(&sch->dev);
181 else if (sch->lpm == mask)
253 goto out_unreg; 182 goto out_unreg;
254 /* Request retry of internal operation. */
255 device_set_intretry(sch);
256 /* Call handler. */
257 if (sch->driver && sch->driver->termination)
258 sch->driver->termination(&sch->dev);
259 goto out_unlock;
260 } 183 }
261 184
262 /* trigger path verification. */
263 if (sch->driver && sch->driver->verify)
264 sch->driver->verify(&sch->dev);
265 else if (sch->lpm == mask)
266 goto out_unreg;
267out_unlock:
268 spin_unlock_irq(sch->lock); 185 spin_unlock_irq(sch->lock);
269 return 0; 186 return 0;
187
270out_unreg: 188out_unreg:
271 spin_unlock_irq(sch->lock);
272 sch->lpm = 0; 189 sch->lpm = 0;
273 if (css_enqueue_subchannel_slow(sch->schid)) { 190 spin_unlock_irq(sch->lock);
274 css_clear_subchannel_slow_list(); 191 css_schedule_eval(sch->schid);
275 need_rescan = 1;
276 }
277 return 0; 192 return 0;
278} 193}
279 194
280static void 195void chsc_chp_offline(struct chp_id chpid)
281s390_set_chpid_offline( __u8 chpid)
282{ 196{
283 char dbf_txt[15]; 197 char dbf_txt[15];
284 struct device *dev;
285 198
286 sprintf(dbf_txt, "chpr%x", chpid); 199 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
287 CIO_TRACE_EVENT(2, dbf_txt); 200 CIO_TRACE_EVENT(2, dbf_txt);
288 201
289 if (get_chp_status(chpid) <= 0) 202 if (chp_get_status(chpid) <= 0)
290 return; 203 return;
291 dev = get_device(&css[0]->chps[chpid]->dev); 204 bus_for_each_dev(&css_bus_type, NULL, &chpid,
292 bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
293 s390_subchannel_remove_chpid); 205 s390_subchannel_remove_chpid);
294
295 if (need_rescan || css_slow_subchannels_exist())
296 queue_work(slow_path_wq, &slow_path_work);
297 put_device(dev);
298}
299
300struct res_acc_data {
301 struct channel_path *chp;
302 u32 fla_mask;
303 u16 fla;
304};
305
306static int
307s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
308{
309 int found;
310 int chp;
311 int ccode;
312
313 found = 0;
314 for (chp = 0; chp <= 7; chp++)
315 /*
316 * check if chpid is in information updated by ssd
317 */
318 if (sch->ssd_info.valid &&
319 sch->ssd_info.chpid[chp] == res_data->chp->id &&
320 (sch->ssd_info.fla[chp] & res_data->fla_mask)
321 == res_data->fla) {
322 found = 1;
323 break;
324 }
325
326 if (found == 0)
327 return 0;
328
329 /*
330 * Do a stsch to update our subchannel structure with the
331 * new path information and eventually check for logically
332 * offline chpids.
333 */
334 ccode = stsch(sch->schid, &sch->schib);
335 if (ccode > 0)
336 return 0;
337
338 return 0x80 >> chp;
339} 206}
340 207
341static int 208static int
342s390_process_res_acc_new_sch(struct subchannel_id schid) 209s390_process_res_acc_new_sch(struct subchannel_id schid)
343{ 210{
344 struct schib schib; 211 struct schib schib;
345 int ret;
346 /* 212 /*
347 * We don't know the device yet, but since a path 213 * We don't know the device yet, but since a path
348 * may be available now to the device we'll have 214 * may be available now to the device we'll have
@@ -353,14 +219,35 @@ s390_process_res_acc_new_sch(struct subchannel_id schid)
353 */ 219 */
354 if (stsch_err(schid, &schib)) 220 if (stsch_err(schid, &schib))
355 /* We're through */ 221 /* We're through */
356 return need_rescan ? -EAGAIN : -ENXIO; 222 return -ENXIO;
357 223
358 /* Put it on the slow path. */ 224 /* Put it on the slow path. */
359 ret = css_enqueue_subchannel_slow(schid); 225 css_schedule_eval(schid);
360 if (ret) { 226 return 0;
361 css_clear_subchannel_slow_list(); 227}
362 need_rescan = 1; 228
363 return -EAGAIN; 229struct res_acc_data {
230 struct chp_id chpid;
231 u32 fla_mask;
232 u16 fla;
233};
234
235static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
236 struct res_acc_data *data)
237{
238 int i;
239 int mask;
240
241 for (i = 0; i < 8; i++) {
242 mask = 0x80 >> i;
243 if (!(ssd->path_mask & mask))
244 continue;
245 if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
246 continue;
247 if ((ssd->fla_valid_mask & mask) &&
248 ((ssd->fla[i] & data->fla_mask) != data->fla))
249 continue;
250 return mask;
364 } 251 }
365 return 0; 252 return 0;
366} 253}
@@ -379,14 +266,11 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
379 return s390_process_res_acc_new_sch(schid); 266 return s390_process_res_acc_new_sch(schid);
380 267
381 spin_lock_irq(sch->lock); 268 spin_lock_irq(sch->lock);
382 269 chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
383 chp_mask = s390_process_res_acc_sch(res_data, sch); 270 if (chp_mask == 0)
384 271 goto out;
385 if (chp_mask == 0) { 272 if (stsch(sch->schid, &sch->schib))
386 spin_unlock_irq(sch->lock); 273 goto out;
387 put_device(&sch->dev);
388 return 0;
389 }
390 old_lpm = sch->lpm; 274 old_lpm = sch->lpm;
391 sch->lpm = ((sch->schib.pmcw.pim & 275 sch->lpm = ((sch->schib.pmcw.pim &
392 sch->schib.pmcw.pam & 276 sch->schib.pmcw.pam &
@@ -396,20 +280,18 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
396 device_trigger_reprobe(sch); 280 device_trigger_reprobe(sch);
397 else if (sch->driver && sch->driver->verify) 281 else if (sch->driver && sch->driver->verify)
398 sch->driver->verify(&sch->dev); 282 sch->driver->verify(&sch->dev);
399 283out:
400 spin_unlock_irq(sch->lock); 284 spin_unlock_irq(sch->lock);
401 put_device(&sch->dev); 285 put_device(&sch->dev);
402 return 0; 286 return 0;
403} 287}
404 288
405 289static void s390_process_res_acc (struct res_acc_data *res_data)
406static int
407s390_process_res_acc (struct res_acc_data *res_data)
408{ 290{
409 int rc;
410 char dbf_txt[15]; 291 char dbf_txt[15];
411 292
412 sprintf(dbf_txt, "accpr%x", res_data->chp->id); 293 sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
294 res_data->chpid.id);
413 CIO_TRACE_EVENT( 2, dbf_txt); 295 CIO_TRACE_EVENT( 2, dbf_txt);
414 if (res_data->fla != 0) { 296 if (res_data->fla != 0) {
415 sprintf(dbf_txt, "fla%x", res_data->fla); 297 sprintf(dbf_txt, "fla%x", res_data->fla);
@@ -423,12 +305,7 @@ s390_process_res_acc (struct res_acc_data *res_data)
423 * The more information we have (info), the less scanning 305 * The more information we have (info), the less scanning
424 * will we have to do. 306 * will we have to do.
425 */ 307 */
426 rc = for_each_subchannel(__s390_process_res_acc, res_data); 308 for_each_subchannel(__s390_process_res_acc, res_data);
427 if (css_slow_subchannels_exist())
428 rc = -EAGAIN;
429 else if (rc != -EAGAIN)
430 rc = 0;
431 return rc;
432} 309}
433 310
434static int 311static int
@@ -480,43 +357,45 @@ struct chsc_sei_area {
480 /* ccdf has to be big enough for a link-incident record */ 357 /* ccdf has to be big enough for a link-incident record */
481} __attribute__ ((packed)); 358} __attribute__ ((packed));
482 359
483static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) 360static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
484{ 361{
485 int chpid; 362 struct chp_id chpid;
363 int id;
486 364
487 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", 365 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
488 sei_area->rs, sei_area->rsid); 366 sei_area->rs, sei_area->rsid);
489 if (sei_area->rs != 4) 367 if (sei_area->rs != 4)
490 return 0; 368 return;
491 chpid = __get_chpid_from_lir(sei_area->ccdf); 369 id = __get_chpid_from_lir(sei_area->ccdf);
492 if (chpid < 0) 370 if (id < 0)
493 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); 371 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
494 else 372 else {
495 s390_set_chpid_offline(chpid); 373 chp_id_init(&chpid);
496 374 chpid.id = id;
497 return 0; 375 chsc_chp_offline(chpid);
376 }
498} 377}
499 378
500static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) 379static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
501{ 380{
502 struct res_acc_data res_data; 381 struct res_acc_data res_data;
503 struct device *dev; 382 struct chp_id chpid;
504 int status; 383 int status;
505 int rc;
506 384
507 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " 385 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
508 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); 386 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
509 if (sei_area->rs != 4) 387 if (sei_area->rs != 4)
510 return 0; 388 return;
389 chp_id_init(&chpid);
390 chpid.id = sei_area->rsid;
511 /* allocate a new channel path structure, if needed */ 391 /* allocate a new channel path structure, if needed */
512 status = get_chp_status(sei_area->rsid); 392 status = chp_get_status(chpid);
513 if (status < 0) 393 if (status < 0)
514 new_channel_path(sei_area->rsid); 394 chp_new(chpid);
515 else if (!status) 395 else if (!status)
516 return 0; 396 return;
517 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
518 memset(&res_data, 0, sizeof(struct res_acc_data)); 397 memset(&res_data, 0, sizeof(struct res_acc_data));
519 res_data.chp = to_channelpath(dev); 398 res_data.chpid = chpid;
520 if ((sei_area->vf & 0xc0) != 0) { 399 if ((sei_area->vf & 0xc0) != 0) {
521 res_data.fla = sei_area->fla; 400 res_data.fla = sei_area->fla;
522 if ((sei_area->vf & 0xc0) == 0xc0) 401 if ((sei_area->vf & 0xc0) == 0xc0)
@@ -526,51 +405,82 @@ static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
526 /* link address */ 405 /* link address */
527 res_data.fla_mask = 0xff00; 406 res_data.fla_mask = 0xff00;
528 } 407 }
529 rc = s390_process_res_acc(&res_data); 408 s390_process_res_acc(&res_data);
530 put_device(dev);
531
532 return rc;
533} 409}
534 410
535static int chsc_process_sei(struct chsc_sei_area *sei_area) 411struct chp_config_data {
412 u8 map[32];
413 u8 op;
414 u8 pc;
415};
416
417static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
536{ 418{
537 int rc; 419 struct chp_config_data *data;
420 struct chp_id chpid;
421 int num;
422
423 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
424 if (sei_area->rs != 0)
425 return;
426 data = (struct chp_config_data *) &(sei_area->ccdf);
427 chp_id_init(&chpid);
428 for (num = 0; num <= __MAX_CHPID; num++) {
429 if (!chp_test_bit(data->map, num))
430 continue;
431 chpid.id = num;
432 printk(KERN_WARNING "cio: processing configure event %d for "
433 "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
434 switch (data->op) {
435 case 0:
436 chp_cfg_schedule(chpid, 1);
437 break;
438 case 1:
439 chp_cfg_schedule(chpid, 0);
440 break;
441 case 2:
442 chp_cfg_cancel_deconfigure(chpid);
443 break;
444 }
445 }
446}
538 447
448static void chsc_process_sei(struct chsc_sei_area *sei_area)
449{
539 /* Check if we might have lost some information. */ 450 /* Check if we might have lost some information. */
540 if (sei_area->flags & 0x40) 451 if (sei_area->flags & 0x40) {
541 CIO_CRW_EVENT(2, "chsc: event overflow\n"); 452 CIO_CRW_EVENT(2, "chsc: event overflow\n");
453 css_schedule_eval_all();
454 }
542 /* which kind of information was stored? */ 455 /* which kind of information was stored? */
543 rc = 0;
544 switch (sei_area->cc) { 456 switch (sei_area->cc) {
545 case 1: /* link incident*/ 457 case 1: /* link incident*/
546 rc = chsc_process_sei_link_incident(sei_area); 458 chsc_process_sei_link_incident(sei_area);
547 break; 459 break;
548 case 2: /* i/o resource accessibiliy */ 460 case 2: /* i/o resource accessibiliy */
549 rc = chsc_process_sei_res_acc(sei_area); 461 chsc_process_sei_res_acc(sei_area);
462 break;
463 case 8: /* channel-path-configuration notification */
464 chsc_process_sei_chp_config(sei_area);
550 break; 465 break;
551 default: /* other stuff */ 466 default: /* other stuff */
552 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", 467 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
553 sei_area->cc); 468 sei_area->cc);
554 break; 469 break;
555 } 470 }
556
557 return rc;
558} 471}
559 472
560int chsc_process_crw(void) 473void chsc_process_crw(void)
561{ 474{
562 struct chsc_sei_area *sei_area; 475 struct chsc_sei_area *sei_area;
563 int ret;
564 int rc;
565 476
566 if (!sei_page) 477 if (!sei_page)
567 return 0; 478 return;
568 /* Access to sei_page is serialized through machine check handler 479 /* Access to sei_page is serialized through machine check handler
569 * thread, so no need for locking. */ 480 * thread, so no need for locking. */
570 sei_area = sei_page; 481 sei_area = sei_page;
571 482
572 CIO_TRACE_EVENT( 2, "prcss"); 483 CIO_TRACE_EVENT( 2, "prcss");
573 ret = 0;
574 do { 484 do {
575 memset(sei_area, 0, sizeof(*sei_area)); 485 memset(sei_area, 0, sizeof(*sei_area));
576 sei_area->request.length = 0x0010; 486 sei_area->request.length = 0x0010;
@@ -580,37 +490,26 @@ int chsc_process_crw(void)
580 490
581 if (sei_area->response.code == 0x0001) { 491 if (sei_area->response.code == 0x0001) {
582 CIO_CRW_EVENT(4, "chsc: sei successful\n"); 492 CIO_CRW_EVENT(4, "chsc: sei successful\n");
583 rc = chsc_process_sei(sei_area); 493 chsc_process_sei(sei_area);
584 if (rc)
585 ret = rc;
586 } else { 494 } else {
587 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 495 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
588 sei_area->response.code); 496 sei_area->response.code);
589 ret = 0;
590 break; 497 break;
591 } 498 }
592 } while (sei_area->flags & 0x80); 499 } while (sei_area->flags & 0x80);
593
594 return ret;
595} 500}
596 501
597static int 502static int
598__chp_add_new_sch(struct subchannel_id schid) 503__chp_add_new_sch(struct subchannel_id schid)
599{ 504{
600 struct schib schib; 505 struct schib schib;
601 int ret;
602 506
603 if (stsch_err(schid, &schib)) 507 if (stsch_err(schid, &schib))
604 /* We're through */ 508 /* We're through */
605 return need_rescan ? -EAGAIN : -ENXIO; 509 return -ENXIO;
606 510
607 /* Put it on the slow path. */ 511 /* Put it on the slow path. */
608 ret = css_enqueue_subchannel_slow(schid); 512 css_schedule_eval(schid);
609 if (ret) {
610 css_clear_subchannel_slow_list();
611 need_rescan = 1;
612 return -EAGAIN;
613 }
614 return 0; 513 return 0;
615} 514}
616 515
@@ -619,10 +518,10 @@ static int
619__chp_add(struct subchannel_id schid, void *data) 518__chp_add(struct subchannel_id schid, void *data)
620{ 519{
621 int i, mask; 520 int i, mask;
622 struct channel_path *chp; 521 struct chp_id *chpid;
623 struct subchannel *sch; 522 struct subchannel *sch;
624 523
625 chp = data; 524 chpid = data;
626 sch = get_subchannel_by_schid(schid); 525 sch = get_subchannel_by_schid(schid);
627 if (!sch) 526 if (!sch)
628 /* Check if the subchannel is now available. */ 527 /* Check if the subchannel is now available. */
@@ -631,7 +530,7 @@ __chp_add(struct subchannel_id schid, void *data)
631 for (i=0; i<8; i++) { 530 for (i=0; i<8; i++) {
632 mask = 0x80 >> i; 531 mask = 0x80 >> i;
633 if ((sch->schib.pmcw.pim & mask) && 532 if ((sch->schib.pmcw.pim & mask) &&
634 (sch->schib.pmcw.chpid[i] == chp->id)) { 533 (sch->schib.pmcw.chpid[i] == chpid->id)) {
635 if (stsch(sch->schid, &sch->schib) != 0) { 534 if (stsch(sch->schid, &sch->schib) != 0) {
636 /* Endgame. */ 535 /* Endgame. */
637 spin_unlock_irq(sch->lock); 536 spin_unlock_irq(sch->lock);
@@ -657,122 +556,58 @@ __chp_add(struct subchannel_id schid, void *data)
657 return 0; 556 return 0;
658} 557}
659 558
660static int 559void chsc_chp_online(struct chp_id chpid)
661chp_add(int chpid)
662{ 560{
663 int rc;
664 char dbf_txt[15]; 561 char dbf_txt[15];
665 struct device *dev;
666 562
667 if (!get_chp_status(chpid)) 563 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
668 return 0; /* no need to do the rest */
669
670 sprintf(dbf_txt, "cadd%x", chpid);
671 CIO_TRACE_EVENT(2, dbf_txt); 564 CIO_TRACE_EVENT(2, dbf_txt);
672 565
673 dev = get_device(&css[0]->chps[chpid]->dev); 566 if (chp_get_status(chpid) != 0)
674 rc = for_each_subchannel(__chp_add, to_channelpath(dev)); 567 for_each_subchannel(__chp_add, &chpid);
675 if (css_slow_subchannels_exist())
676 rc = -EAGAIN;
677 if (rc != -EAGAIN)
678 rc = 0;
679 put_device(dev);
680 return rc;
681} 568}
682 569
683/* 570static void __s390_subchannel_vary_chpid(struct subchannel *sch,
684 * Handling of crw machine checks with channel path source. 571 struct chp_id chpid, int on)
685 */
686int
687chp_process_crw(int chpid, int on)
688{
689 if (on == 0) {
690 /* Path has gone. We use the link incident routine.*/
691 s390_set_chpid_offline(chpid);
692 return 0; /* De-register is async anyway. */
693 }
694 /*
695 * Path has come. Allocate a new channel path structure,
696 * if needed.
697 */
698 if (get_chp_status(chpid) < 0)
699 new_channel_path(chpid);
700 /* Avoid the extra overhead in process_rec_acc. */
701 return chp_add(chpid);
702}
703
704static int check_for_io_on_path(struct subchannel *sch, int index)
705{
706 int cc;
707
708 cc = stsch(sch->schid, &sch->schib);
709 if (cc)
710 return 0;
711 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index))
712 return 1;
713 return 0;
714}
715
716static void terminate_internal_io(struct subchannel *sch)
717{
718 if (cio_clear(sch)) {
719 /* Recheck device in case clear failed. */
720 sch->lpm = 0;
721 if (device_trigger_verify(sch) != 0) {
722 if(css_enqueue_subchannel_slow(sch->schid)) {
723 css_clear_subchannel_slow_list();
724 need_rescan = 1;
725 }
726 }
727 return;
728 }
729 /* Request retry of internal operation. */
730 device_set_intretry(sch);
731 /* Call handler. */
732 if (sch->driver && sch->driver->termination)
733 sch->driver->termination(&sch->dev);
734}
735
736static void
737__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
738{ 572{
739 int chp, old_lpm; 573 int chp, old_lpm;
574 int mask;
740 unsigned long flags; 575 unsigned long flags;
741 576
742 if (!sch->ssd_info.valid)
743 return;
744
745 spin_lock_irqsave(sch->lock, flags); 577 spin_lock_irqsave(sch->lock, flags);
746 old_lpm = sch->lpm; 578 old_lpm = sch->lpm;
747 for (chp = 0; chp < 8; chp++) { 579 for (chp = 0; chp < 8; chp++) {
748 if (sch->ssd_info.chpid[chp] != chpid) 580 mask = 0x80 >> chp;
581 if (!(sch->ssd_info.path_mask & mask))
582 continue;
583 if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
749 continue; 584 continue;
750 585
751 if (on) { 586 if (on) {
752 sch->opm |= (0x80 >> chp); 587 sch->opm |= mask;
753 sch->lpm |= (0x80 >> chp); 588 sch->lpm |= mask;
754 if (!old_lpm) 589 if (!old_lpm)
755 device_trigger_reprobe(sch); 590 device_trigger_reprobe(sch);
756 else if (sch->driver && sch->driver->verify) 591 else if (sch->driver && sch->driver->verify)
757 sch->driver->verify(&sch->dev); 592 sch->driver->verify(&sch->dev);
758 break; 593 break;
759 } 594 }
760 sch->opm &= ~(0x80 >> chp); 595 sch->opm &= ~mask;
761 sch->lpm &= ~(0x80 >> chp); 596 sch->lpm &= ~mask;
762 if (check_for_io_on_path(sch, chp)) { 597 if (check_for_io_on_path(sch, mask)) {
763 if (device_is_online(sch)) 598 if (device_is_online(sch))
764 /* Path verification is done after killing. */ 599 /* Path verification is done after killing. */
765 device_kill_io(sch); 600 device_kill_io(sch);
766 else 601 else {
767 /* Kill and retry internal I/O. */ 602 /* Kill and retry internal I/O. */
768 terminate_internal_io(sch); 603 terminate_internal_io(sch);
769 } else if (!sch->lpm) { 604 /* Re-start path verification. */
770 if (device_trigger_verify(sch) != 0) { 605 if (sch->driver && sch->driver->verify)
771 if (css_enqueue_subchannel_slow(sch->schid)) { 606 sch->driver->verify(&sch->dev);
772 css_clear_subchannel_slow_list();
773 need_rescan = 1;
774 }
775 } 607 }
608 } else if (!sch->lpm) {
609 if (device_trigger_verify(sch) != 0)
610 css_schedule_eval(sch->schid);
776 } else if (sch->driver && sch->driver->verify) 611 } else if (sch->driver && sch->driver->verify)
777 sch->driver->verify(&sch->dev); 612 sch->driver->verify(&sch->dev);
778 break; 613 break;
@@ -780,11 +615,10 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
780 spin_unlock_irqrestore(sch->lock, flags); 615 spin_unlock_irqrestore(sch->lock, flags);
781} 616}
782 617
783static int 618static int s390_subchannel_vary_chpid_off(struct device *dev, void *data)
784s390_subchannel_vary_chpid_off(struct device *dev, void *data)
785{ 619{
786 struct subchannel *sch; 620 struct subchannel *sch;
787 __u8 *chpid; 621 struct chp_id *chpid;
788 622
789 sch = to_subchannel(dev); 623 sch = to_subchannel(dev);
790 chpid = data; 624 chpid = data;
@@ -793,11 +627,10 @@ s390_subchannel_vary_chpid_off(struct device *dev, void *data)
793 return 0; 627 return 0;
794} 628}
795 629
796static int 630static int s390_subchannel_vary_chpid_on(struct device *dev, void *data)
797s390_subchannel_vary_chpid_on(struct device *dev, void *data)
798{ 631{
799 struct subchannel *sch; 632 struct subchannel *sch;
800 __u8 *chpid; 633 struct chp_id *chpid;
801 634
802 sch = to_subchannel(dev); 635 sch = to_subchannel(dev);
803 chpid = data; 636 chpid = data;
@@ -821,40 +654,17 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
821 /* We're through */ 654 /* We're through */
822 return -ENXIO; 655 return -ENXIO;
823 /* Put it on the slow path. */ 656 /* Put it on the slow path. */
824 if (css_enqueue_subchannel_slow(schid)) { 657 css_schedule_eval(schid);
825 css_clear_subchannel_slow_list();
826 need_rescan = 1;
827 return -EAGAIN;
828 }
829 return 0; 658 return 0;
830} 659}
831 660
832/* 661/**
833 * Function: s390_vary_chpid 662 * chsc_chp_vary - propagate channel-path vary operation to subchannels
834 * Varies the specified chpid online or offline 663 * @chpid: channl-path ID
664 * @on: non-zero for vary online, zero for vary offline
835 */ 665 */
836static int 666int chsc_chp_vary(struct chp_id chpid, int on)
837s390_vary_chpid( __u8 chpid, int on)
838{ 667{
839 char dbf_text[15];
840 int status;
841
842 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
843 CIO_TRACE_EVENT( 2, dbf_text);
844
845 status = get_chp_status(chpid);
846 if (status < 0) {
847 printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
848 return -EINVAL;
849 }
850
851 if (!on && !status) {
852 printk(KERN_ERR "chpid %x is already offline\n", chpid);
853 return -EINVAL;
854 }
855
856 set_chp_logically_online(chpid, on);
857
858 /* 668 /*
859 * Redo PathVerification on the devices the chpid connects to 669 * Redo PathVerification on the devices the chpid connects to
860 */ 670 */
@@ -865,118 +675,9 @@ s390_vary_chpid( __u8 chpid, int on)
865 if (on) 675 if (on)
866 /* Scan for new devices on varied on path. */ 676 /* Scan for new devices on varied on path. */
867 for_each_subchannel(__s390_vary_chpid_on, NULL); 677 for_each_subchannel(__s390_vary_chpid_on, NULL);
868 if (need_rescan || css_slow_subchannels_exist())
869 queue_work(slow_path_wq, &slow_path_work);
870 return 0; 678 return 0;
871} 679}
872 680
873/*
874 * Channel measurement related functions
875 */
876static ssize_t
877chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off,
878 size_t count)
879{
880 struct channel_path *chp;
881 unsigned int size;
882
883 chp = to_channelpath(container_of(kobj, struct device, kobj));
884 if (!chp->cmg_chars)
885 return 0;
886
887 size = sizeof(struct cmg_chars);
888
889 if (off > size)
890 return 0;
891 if (off + count > size)
892 count = size - off;
893 memcpy(buf, chp->cmg_chars + off, count);
894 return count;
895}
896
897static struct bin_attribute chp_measurement_chars_attr = {
898 .attr = {
899 .name = "measurement_chars",
900 .mode = S_IRUSR,
901 .owner = THIS_MODULE,
902 },
903 .size = sizeof(struct cmg_chars),
904 .read = chp_measurement_chars_read,
905};
906
907static void
908chp_measurement_copy_block(struct cmg_entry *buf,
909 struct channel_subsystem *css, int chpid)
910{
911 void *area;
912 struct cmg_entry *entry, reference_buf;
913 int idx;
914
915 if (chpid < 128) {
916 area = css->cub_addr1;
917 idx = chpid;
918 } else {
919 area = css->cub_addr2;
920 idx = chpid - 128;
921 }
922 entry = area + (idx * sizeof(struct cmg_entry));
923 do {
924 memcpy(buf, entry, sizeof(*entry));
925 memcpy(&reference_buf, entry, sizeof(*entry));
926 } while (reference_buf.values[0] != buf->values[0]);
927}
928
929static ssize_t
930chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
931{
932 struct channel_path *chp;
933 struct channel_subsystem *css;
934 unsigned int size;
935
936 chp = to_channelpath(container_of(kobj, struct device, kobj));
937 css = to_css(chp->dev.parent);
938
939 size = sizeof(struct cmg_entry);
940
941 /* Only allow single reads. */
942 if (off || count < size)
943 return 0;
944 chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id);
945 count = size;
946 return count;
947}
948
949static struct bin_attribute chp_measurement_attr = {
950 .attr = {
951 .name = "measurement",
952 .mode = S_IRUSR,
953 .owner = THIS_MODULE,
954 },
955 .size = sizeof(struct cmg_entry),
956 .read = chp_measurement_read,
957};
958
959static void
960chsc_remove_chp_cmg_attr(struct channel_path *chp)
961{
962 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
963 device_remove_bin_file(&chp->dev, &chp_measurement_attr);
964}
965
966static int
967chsc_add_chp_cmg_attr(struct channel_path *chp)
968{
969 int ret;
970
971 ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
972 if (ret)
973 return ret;
974 ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
975 if (ret)
976 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
977 return ret;
978}
979
980static void 681static void
981chsc_remove_cmg_attr(struct channel_subsystem *css) 682chsc_remove_cmg_attr(struct channel_subsystem *css)
982{ 683{
@@ -985,7 +686,7 @@ chsc_remove_cmg_attr(struct channel_subsystem *css)
985 for (i = 0; i <= __MAX_CHPID; i++) { 686 for (i = 0; i <= __MAX_CHPID; i++) {
986 if (!css->chps[i]) 687 if (!css->chps[i])
987 continue; 688 continue;
988 chsc_remove_chp_cmg_attr(css->chps[i]); 689 chp_remove_cmg_attr(css->chps[i]);
989 } 690 }
990} 691}
991 692
@@ -998,7 +699,7 @@ chsc_add_cmg_attr(struct channel_subsystem *css)
998 for (i = 0; i <= __MAX_CHPID; i++) { 699 for (i = 0; i <= __MAX_CHPID; i++) {
999 if (!css->chps[i]) 700 if (!css->chps[i])
1000 continue; 701 continue;
1001 ret = chsc_add_chp_cmg_attr(css->chps[i]); 702 ret = chp_add_cmg_attr(css->chps[i]);
1002 if (ret) 703 if (ret)
1003 goto cleanup; 704 goto cleanup;
1004 } 705 }
@@ -1007,12 +708,11 @@ cleanup:
1007 for (--i; i >= 0; i--) { 708 for (--i; i >= 0; i--) {
1008 if (!css->chps[i]) 709 if (!css->chps[i])
1009 continue; 710 continue;
1010 chsc_remove_chp_cmg_attr(css->chps[i]); 711 chp_remove_cmg_attr(css->chps[i]);
1011 } 712 }
1012 return ret; 713 return ret;
1013} 714}
1014 715
1015
1016static int 716static int
1017__chsc_do_secm(struct channel_subsystem *css, int enable, void *page) 717__chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
1018{ 718{
@@ -1118,7 +818,7 @@ chsc_secm(struct channel_subsystem *css, int enable)
1118 } else 818 } else
1119 chsc_remove_cmg_attr(css); 819 chsc_remove_cmg_attr(css);
1120 } 820 }
1121 if (enable && !css->cm_enabled) { 821 if (!css->cm_enabled) {
1122 free_page((unsigned long)css->cub_addr1); 822 free_page((unsigned long)css->cub_addr1);
1123 free_page((unsigned long)css->cub_addr2); 823 free_page((unsigned long)css->cub_addr2);
1124 } 824 }
@@ -1127,109 +827,8 @@ chsc_secm(struct channel_subsystem *css, int enable)
1127 return ret; 827 return ret;
1128} 828}
1129 829
1130/* 830int chsc_determine_channel_path_description(struct chp_id chpid,
1131 * Files for the channel path entries. 831 struct channel_path_desc *desc)
1132 */
1133static ssize_t
1134chp_status_show(struct device *dev, struct device_attribute *attr, char *buf)
1135{
1136 struct channel_path *chp = container_of(dev, struct channel_path, dev);
1137
1138 if (!chp)
1139 return 0;
1140 return (get_chp_status(chp->id) ? sprintf(buf, "online\n") :
1141 sprintf(buf, "offline\n"));
1142}
1143
1144static ssize_t
1145chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1146{
1147 struct channel_path *cp = container_of(dev, struct channel_path, dev);
1148 char cmd[10];
1149 int num_args;
1150 int error;
1151
1152 num_args = sscanf(buf, "%5s", cmd);
1153 if (!num_args)
1154 return count;
1155
1156 if (!strnicmp(cmd, "on", 2))
1157 error = s390_vary_chpid(cp->id, 1);
1158 else if (!strnicmp(cmd, "off", 3))
1159 error = s390_vary_chpid(cp->id, 0);
1160 else
1161 error = -EINVAL;
1162
1163 return error < 0 ? error : count;
1164
1165}
1166
1167static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
1168
1169static ssize_t
1170chp_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1171{
1172 struct channel_path *chp = container_of(dev, struct channel_path, dev);
1173
1174 if (!chp)
1175 return 0;
1176 return sprintf(buf, "%x\n", chp->desc.desc);
1177}
1178
1179static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
1180
1181static ssize_t
1182chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf)
1183{
1184 struct channel_path *chp = to_channelpath(dev);
1185
1186 if (!chp)
1187 return 0;
1188 if (chp->cmg == -1) /* channel measurements not available */
1189 return sprintf(buf, "unknown\n");
1190 return sprintf(buf, "%x\n", chp->cmg);
1191}
1192
1193static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
1194
1195static ssize_t
1196chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf)
1197{
1198 struct channel_path *chp = to_channelpath(dev);
1199
1200 if (!chp)
1201 return 0;
1202 if (chp->shared == -1) /* channel measurements not available */
1203 return sprintf(buf, "unknown\n");
1204 return sprintf(buf, "%x\n", chp->shared);
1205}
1206
1207static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
1208
1209static struct attribute * chp_attrs[] = {
1210 &dev_attr_status.attr,
1211 &dev_attr_type.attr,
1212 &dev_attr_cmg.attr,
1213 &dev_attr_shared.attr,
1214 NULL,
1215};
1216
1217static struct attribute_group chp_attr_group = {
1218 .attrs = chp_attrs,
1219};
1220
1221static void
1222chp_release(struct device *dev)
1223{
1224 struct channel_path *cp;
1225
1226 cp = container_of(dev, struct channel_path, dev);
1227 kfree(cp);
1228}
1229
1230static int
1231chsc_determine_channel_path_description(int chpid,
1232 struct channel_path_desc *desc)
1233{ 832{
1234 int ccode, ret; 833 int ccode, ret;
1235 834
@@ -1252,8 +851,8 @@ chsc_determine_channel_path_description(int chpid,
1252 scpd_area->request.length = 0x0010; 851 scpd_area->request.length = 0x0010;
1253 scpd_area->request.code = 0x0002; 852 scpd_area->request.code = 0x0002;
1254 853
1255 scpd_area->first_chpid = chpid; 854 scpd_area->first_chpid = chpid.id;
1256 scpd_area->last_chpid = chpid; 855 scpd_area->last_chpid = chpid.id;
1257 856
1258 ccode = chsc(scpd_area); 857 ccode = chsc(scpd_area);
1259 if (ccode > 0) { 858 if (ccode > 0) {
@@ -1316,8 +915,7 @@ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
1316 } 915 }
1317} 916}
1318 917
1319static int 918int chsc_get_channel_measurement_chars(struct channel_path *chp)
1320chsc_get_channel_measurement_chars(struct channel_path *chp)
1321{ 919{
1322 int ccode, ret; 920 int ccode, ret;
1323 921
@@ -1349,8 +947,8 @@ chsc_get_channel_measurement_chars(struct channel_path *chp)
1349 scmc_area->request.length = 0x0010; 947 scmc_area->request.length = 0x0010;
1350 scmc_area->request.code = 0x0022; 948 scmc_area->request.code = 0x0022;
1351 949
1352 scmc_area->first_chpid = chp->id; 950 scmc_area->first_chpid = chp->chpid.id;
1353 scmc_area->last_chpid = chp->id; 951 scmc_area->last_chpid = chp->chpid.id;
1354 952
1355 ccode = chsc(scmc_area); 953 ccode = chsc(scmc_area);
1356 if (ccode > 0) { 954 if (ccode > 0) {
@@ -1392,94 +990,6 @@ out:
1392 return ret; 990 return ret;
1393} 991}
1394 992
1395/*
1396 * Entries for chpids on the system bus.
1397 * This replaces /proc/chpids.
1398 */
1399static int
1400new_channel_path(int chpid)
1401{
1402 struct channel_path *chp;
1403 int ret;
1404
1405 chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
1406 if (!chp)
1407 return -ENOMEM;
1408
1409 /* fill in status, etc. */
1410 chp->id = chpid;
1411 chp->state = 1;
1412 chp->dev.parent = &css[0]->device;
1413 chp->dev.release = chp_release;
1414 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
1415
1416 /* Obtain channel path description and fill it in. */
1417 ret = chsc_determine_channel_path_description(chpid, &chp->desc);
1418 if (ret)
1419 goto out_free;
1420 /* Get channel-measurement characteristics. */
1421 if (css_characteristics_avail && css_chsc_characteristics.scmc
1422 && css_chsc_characteristics.secm) {
1423 ret = chsc_get_channel_measurement_chars(chp);
1424 if (ret)
1425 goto out_free;
1426 } else {
1427 static int msg_done;
1428
1429 if (!msg_done) {
1430 printk(KERN_WARNING "cio: Channel measurements not "
1431 "available, continuing.\n");
1432 msg_done = 1;
1433 }
1434 chp->cmg = -1;
1435 }
1436
1437 /* make it known to the system */
1438 ret = device_register(&chp->dev);
1439 if (ret) {
1440 printk(KERN_WARNING "%s: could not register %02x\n",
1441 __func__, chpid);
1442 goto out_free;
1443 }
1444 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
1445 if (ret) {
1446 device_unregister(&chp->dev);
1447 goto out_free;
1448 }
1449 mutex_lock(&css[0]->mutex);
1450 if (css[0]->cm_enabled) {
1451 ret = chsc_add_chp_cmg_attr(chp);
1452 if (ret) {
1453 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
1454 device_unregister(&chp->dev);
1455 mutex_unlock(&css[0]->mutex);
1456 goto out_free;
1457 }
1458 }
1459 css[0]->chps[chpid] = chp;
1460 mutex_unlock(&css[0]->mutex);
1461 return ret;
1462out_free:
1463 kfree(chp);
1464 return ret;
1465}
1466
1467void *
1468chsc_get_chp_desc(struct subchannel *sch, int chp_no)
1469{
1470 struct channel_path *chp;
1471 struct channel_path_desc *desc;
1472
1473 chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]];
1474 if (!chp)
1475 return NULL;
1476 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
1477 if (!desc)
1478 return NULL;
1479 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
1480 return desc;
1481}
1482
1483static int __init 993static int __init
1484chsc_alloc_sei_area(void) 994chsc_alloc_sei_area(void)
1485{ 995{
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 0fb2b024208f..2ad81d11cf7b 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -1,9 +1,10 @@
1#ifndef S390_CHSC_H 1#ifndef S390_CHSC_H
2#define S390_CHSC_H 2#define S390_CHSC_H
3 3
4#define CHSC_SEI_ACC_CHPID 1 4#include <linux/types.h>
5#define CHSC_SEI_ACC_LINKADDR 2 5#include <linux/device.h>
6#define CHSC_SEI_ACC_FULLLINKADDR 3 6#include <asm/chpid.h>
7#include "schid.h"
7 8
8#define CHSC_SDA_OC_MSS 0x2 9#define CHSC_SDA_OC_MSS 0x2
9 10
@@ -33,23 +34,9 @@ struct channel_path_desc {
33 u8 chpp; 34 u8 chpp;
34} __attribute__ ((packed)); 35} __attribute__ ((packed));
35 36
36struct channel_path { 37struct channel_path;
37 int id;
38 int state;
39 struct channel_path_desc desc;
40 /* Channel-measurement related stuff: */
41 int cmg;
42 int shared;
43 void *cmg_chars;
44 struct device dev;
45};
46 38
47extern void s390_process_css( void ); 39extern void chsc_process_crw(void);
48extern void chsc_validate_chpids(struct subchannel *);
49extern void chpid_is_actually_online(int);
50extern int css_get_ssd_info(struct subchannel *);
51extern int chsc_process_crw(void);
52extern int chp_process_crw(int, int);
53 40
54struct css_general_char { 41struct css_general_char {
55 u64 : 41; 42 u64 : 41;
@@ -82,15 +69,26 @@ struct css_chsc_char {
82extern struct css_general_char css_general_characteristics; 69extern struct css_general_char css_general_characteristics;
83extern struct css_chsc_char css_chsc_characteristics; 70extern struct css_chsc_char css_chsc_characteristics;
84 71
72struct chsc_ssd_info {
73 u8 path_mask;
74 u8 fla_valid_mask;
75 struct chp_id chpid[8];
76 u16 fla[8];
77};
78extern int chsc_get_ssd_info(struct subchannel_id schid,
79 struct chsc_ssd_info *ssd);
85extern int chsc_determine_css_characteristics(void); 80extern int chsc_determine_css_characteristics(void);
86extern int css_characteristics_avail; 81extern int css_characteristics_avail;
87 82
88extern void *chsc_get_chp_desc(struct subchannel*, int);
89
90extern int chsc_enable_facility(int); 83extern int chsc_enable_facility(int);
91struct channel_subsystem; 84struct channel_subsystem;
92extern int chsc_secm(struct channel_subsystem *, int); 85extern int chsc_secm(struct channel_subsystem *, int);
93 86
94#define to_channelpath(device) container_of(device, struct channel_path, dev) 87int chsc_chp_vary(struct chp_id chpid, int on);
88int chsc_determine_channel_path_description(struct chp_id chpid,
89 struct channel_path_desc *desc);
90void chsc_chp_online(struct chp_id chpid);
91void chsc_chp_offline(struct chp_id chpid);
92int chsc_get_channel_measurement_chars(struct channel_path *chp);
95 93
96#endif 94#endif
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 9cb129ab5be5..ea1defba5693 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -22,6 +22,7 @@
22#include <asm/setup.h> 22#include <asm/setup.h>
23#include <asm/reset.h> 23#include <asm/reset.h>
24#include <asm/ipl.h> 24#include <asm/ipl.h>
25#include <asm/chpid.h>
25#include "airq.h" 26#include "airq.h"
26#include "cio.h" 27#include "cio.h"
27#include "css.h" 28#include "css.h"
@@ -29,6 +30,7 @@
29#include "ioasm.h" 30#include "ioasm.h"
30#include "blacklist.h" 31#include "blacklist.h"
31#include "cio_debug.h" 32#include "cio_debug.h"
33#include "chp.h"
32#include "../s390mach.h" 34#include "../s390mach.h"
33 35
34debug_info_t *cio_debug_msg_id; 36debug_info_t *cio_debug_msg_id;
@@ -592,9 +594,10 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
592 err = -ENODEV; 594 err = -ENODEV;
593 goto out; 595 goto out;
594 } 596 }
595 sch->opm = 0xff; 597 if (cio_is_console(sch->schid))
596 if (!cio_is_console(sch->schid)) 598 sch->opm = 0xff;
597 chsc_validate_chpids(sch); 599 else
600 sch->opm = chp_get_sch_opm(sch);
598 sch->lpm = sch->schib.pmcw.pam & sch->opm; 601 sch->lpm = sch->schib.pmcw.pam & sch->opm;
599 602
600 CIO_DEBUG(KERN_INFO, 0, 603 CIO_DEBUG(KERN_INFO, 0,
@@ -954,6 +957,7 @@ static void css_reset(void)
954{ 957{
955 int i, ret; 958 int i, ret;
956 unsigned long long timeout; 959 unsigned long long timeout;
960 struct chp_id chpid;
957 961
958 /* Reset subchannels. */ 962 /* Reset subchannels. */
959 for_each_subchannel(__shutdown_subchannel_easy, NULL); 963 for_each_subchannel(__shutdown_subchannel_easy, NULL);
@@ -963,8 +967,10 @@ static void css_reset(void)
963 __ctl_set_bit(14, 28); 967 __ctl_set_bit(14, 28);
964 /* Temporarily reenable machine checks. */ 968 /* Temporarily reenable machine checks. */
965 local_mcck_enable(); 969 local_mcck_enable();
970 chp_id_init(&chpid);
966 for (i = 0; i <= __MAX_CHPID; i++) { 971 for (i = 0; i <= __MAX_CHPID; i++) {
967 ret = rchp(i); 972 chpid.id = i;
973 ret = rchp(chpid);
968 if ((ret == 0) || (ret == 2)) 974 if ((ret == 0) || (ret == 2))
969 /* 975 /*
970 * rchp either succeeded, or another rchp is already 976 * rchp either succeeded, or another rchp is already
@@ -1048,37 +1054,19 @@ void reipl_ccw_dev(struct ccw_dev_id *devid)
1048 do_reipl_asm(*((__u32*)&schid)); 1054 do_reipl_asm(*((__u32*)&schid));
1049} 1055}
1050 1056
1051static struct schib __initdata ipl_schib; 1057int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
1052
1053/*
1054 * ipl_save_parameters gets called very early. It is not allowed to access
1055 * anything in the bss section at all. The bss section is not cleared yet,
1056 * but may contain some ipl parameters written by the firmware.
1057 * These parameters (if present) are copied to 0x2000.
1058 * To avoid corruption of the ipl parameters, all variables used by this
1059 * function must reside on the stack or in the data section.
1060 */
1061void ipl_save_parameters(void)
1062{ 1058{
1063 struct subchannel_id schid; 1059 struct subchannel_id schid;
1064 unsigned int *ipl_ptr; 1060 struct schib schib;
1065 void *src, *dst;
1066 1061
1067 schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; 1062 schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID;
1068 if (!schid.one) 1063 if (!schid.one)
1069 return; 1064 return -ENODEV;
1070 if (stsch(schid, &ipl_schib)) 1065 if (stsch(schid, &schib))
1071 return; 1066 return -ENODEV;
1072 if (!ipl_schib.pmcw.dnv) 1067 if (!schib.pmcw.dnv)
1073 return; 1068 return -ENODEV;
1074 ipl_devno = ipl_schib.pmcw.dev; 1069 iplinfo->devno = schib.pmcw.dev;
1075 ipl_flags |= IPL_DEVNO_VALID; 1070 iplinfo->is_qdio = schib.pmcw.qf;
1076 if (!ipl_schib.pmcw.qf) 1071 return 0;
1077 return;
1078 ipl_flags |= IPL_PARMBLOCK_VALID;
1079 ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR;
1080 src = (void *)(unsigned long)*ipl_ptr;
1081 dst = (void *)IPL_PARMBLOCK_ORIGIN;
1082 memmove(dst, src, PAGE_SIZE);
1083 *ipl_ptr = IPL_PARMBLOCK_ORIGIN;
1084} 1072}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 35154a210357..7446c39951a7 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -1,18 +1,11 @@
1#ifndef S390_CIO_H 1#ifndef S390_CIO_H
2#define S390_CIO_H 2#define S390_CIO_H
3 3
4#include "schid.h"
5#include <linux/mutex.h> 4#include <linux/mutex.h>
6 5#include <linux/device.h>
7/* 6#include <asm/chpid.h>
8 * where we put the ssd info 7#include "chsc.h"
9 */ 8#include "schid.h"
10struct ssd_info {
11 __u8 valid:1;
12 __u8 type:7; /* subchannel type */
13 __u8 chpid[8]; /* chpids */
14 __u16 fla[8]; /* full link addresses */
15} __attribute__ ((packed));
16 9
17/* 10/*
18 * path management control word 11 * path management control word
@@ -108,7 +101,7 @@ struct subchannel {
108 struct schib schib; /* subchannel information block */ 101 struct schib schib; /* subchannel information block */
109 struct orb orb; /* operation request block */ 102 struct orb orb; /* operation request block */
110 struct ccw1 sense_ccw; /* static ccw for sense command */ 103 struct ccw1 sense_ccw; /* static ccw for sense command */
111 struct ssd_info ssd_info; /* subchannel description */ 104 struct chsc_ssd_info ssd_info; /* subchannel description */
112 struct device dev; /* entry in device tree */ 105 struct device dev; /* entry in device tree */
113 struct css_driver *driver; 106 struct css_driver *driver;
114} __attribute__ ((aligned(8))); 107} __attribute__ ((aligned(8)));
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 90b22faabbf7..28abd697be1a 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -476,7 +476,7 @@ struct cmb_area {
476}; 476};
477 477
478static struct cmb_area cmb_area = { 478static struct cmb_area cmb_area = {
479 .lock = SPIN_LOCK_UNLOCKED, 479 .lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock),
480 .list = LIST_HEAD_INIT(cmb_area.list), 480 .list = LIST_HEAD_INIT(cmb_area.list),
481 .num_channels = 1024, 481 .num_channels = 1024,
482}; 482};
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index fe0ace7aece8..27c6d9e55b23 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -20,8 +20,9 @@
20#include "ioasm.h" 20#include "ioasm.h"
21#include "chsc.h" 21#include "chsc.h"
22#include "device.h" 22#include "device.h"
23#include "idset.h"
24#include "chp.h"
23 25
24int need_rescan = 0;
25int css_init_done = 0; 26int css_init_done = 0;
26static int need_reprobe = 0; 27static int need_reprobe = 0;
27static int max_ssid = 0; 28static int max_ssid = 0;
@@ -125,8 +126,52 @@ void css_sch_device_unregister(struct subchannel *sch)
125 mutex_unlock(&sch->reg_mutex); 126 mutex_unlock(&sch->reg_mutex);
126} 127}
127 128
128static int 129static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
129css_register_subchannel(struct subchannel *sch) 130{
131 int i;
132 int mask;
133
134 memset(ssd, 0, sizeof(struct chsc_ssd_info));
135 ssd->path_mask = pmcw->pim;
136 for (i = 0; i < 8; i++) {
137 mask = 0x80 >> i;
138 if (pmcw->pim & mask) {
139 chp_id_init(&ssd->chpid[i]);
140 ssd->chpid[i].id = pmcw->chpid[i];
141 }
142 }
143}
144
145static void ssd_register_chpids(struct chsc_ssd_info *ssd)
146{
147 int i;
148 int mask;
149
150 for (i = 0; i < 8; i++) {
151 mask = 0x80 >> i;
152 if (ssd->path_mask & mask)
153 if (!chp_is_registered(ssd->chpid[i]))
154 chp_new(ssd->chpid[i]);
155 }
156}
157
158void css_update_ssd_info(struct subchannel *sch)
159{
160 int ret;
161
162 if (cio_is_console(sch->schid)) {
163 /* Console is initialized too early for functions requiring
164 * memory allocation. */
165 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
166 } else {
167 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
168 if (ret)
169 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
170 ssd_register_chpids(&sch->ssd_info);
171 }
172}
173
174static int css_register_subchannel(struct subchannel *sch)
130{ 175{
131 int ret; 176 int ret;
132 177
@@ -135,9 +180,7 @@ css_register_subchannel(struct subchannel *sch)
135 sch->dev.bus = &css_bus_type; 180 sch->dev.bus = &css_bus_type;
136 sch->dev.release = &css_subchannel_release; 181 sch->dev.release = &css_subchannel_release;
137 sch->dev.groups = subch_attr_groups; 182 sch->dev.groups = subch_attr_groups;
138 183 css_update_ssd_info(sch);
139 css_get_ssd_info(sch);
140
141 /* make it known to the system */ 184 /* make it known to the system */
142 ret = css_sch_device_register(sch); 185 ret = css_sch_device_register(sch);
143 if (ret) { 186 if (ret) {
@@ -306,7 +349,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
306 return css_probe_device(schid); 349 return css_probe_device(schid);
307} 350}
308 351
309static int css_evaluate_subchannel(struct subchannel_id schid, int slow) 352static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
310{ 353{
311 struct subchannel *sch; 354 struct subchannel *sch;
312 int ret; 355 int ret;
@@ -317,53 +360,66 @@ static int css_evaluate_subchannel(struct subchannel_id schid, int slow)
317 put_device(&sch->dev); 360 put_device(&sch->dev);
318 } else 361 } else
319 ret = css_evaluate_new_subchannel(schid, slow); 362 ret = css_evaluate_new_subchannel(schid, slow);
320 363 if (ret == -EAGAIN)
321 return ret; 364 css_schedule_eval(schid);
322} 365}
323 366
324static int 367static struct idset *slow_subchannel_set;
325css_rescan_devices(struct subchannel_id schid, void *data) 368static spinlock_t slow_subchannel_lock;
369
370static int __init slow_subchannel_init(void)
326{ 371{
327 return css_evaluate_subchannel(schid, 1); 372 spin_lock_init(&slow_subchannel_lock);
373 slow_subchannel_set = idset_sch_new();
374 if (!slow_subchannel_set) {
375 printk(KERN_WARNING "cio: could not allocate slow subchannel "
376 "set\n");
377 return -ENOMEM;
378 }
379 return 0;
328} 380}
329 381
330struct slow_subchannel { 382subsys_initcall(slow_subchannel_init);
331 struct list_head slow_list;
332 struct subchannel_id schid;
333};
334
335static LIST_HEAD(slow_subchannels_head);
336static DEFINE_SPINLOCK(slow_subchannel_lock);
337 383
338static void 384static void css_slow_path_func(struct work_struct *unused)
339css_trigger_slow_path(struct work_struct *unused)
340{ 385{
341 CIO_TRACE_EVENT(4, "slowpath"); 386 struct subchannel_id schid;
342
343 if (need_rescan) {
344 need_rescan = 0;
345 for_each_subchannel(css_rescan_devices, NULL);
346 return;
347 }
348 387
388 CIO_TRACE_EVENT(4, "slowpath");
349 spin_lock_irq(&slow_subchannel_lock); 389 spin_lock_irq(&slow_subchannel_lock);
350 while (!list_empty(&slow_subchannels_head)) { 390 init_subchannel_id(&schid);
351 struct slow_subchannel *slow_sch = 391 while (idset_sch_get_first(slow_subchannel_set, &schid)) {
352 list_entry(slow_subchannels_head.next, 392 idset_sch_del(slow_subchannel_set, schid);
353 struct slow_subchannel, slow_list);
354
355 list_del_init(slow_subchannels_head.next);
356 spin_unlock_irq(&slow_subchannel_lock); 393 spin_unlock_irq(&slow_subchannel_lock);
357 css_evaluate_subchannel(slow_sch->schid, 1); 394 css_evaluate_subchannel(schid, 1);
358 spin_lock_irq(&slow_subchannel_lock); 395 spin_lock_irq(&slow_subchannel_lock);
359 kfree(slow_sch);
360 } 396 }
361 spin_unlock_irq(&slow_subchannel_lock); 397 spin_unlock_irq(&slow_subchannel_lock);
362} 398}
363 399
364DECLARE_WORK(slow_path_work, css_trigger_slow_path); 400static DECLARE_WORK(slow_path_work, css_slow_path_func);
365struct workqueue_struct *slow_path_wq; 401struct workqueue_struct *slow_path_wq;
366 402
403void css_schedule_eval(struct subchannel_id schid)
404{
405 unsigned long flags;
406
407 spin_lock_irqsave(&slow_subchannel_lock, flags);
408 idset_sch_add(slow_subchannel_set, schid);
409 queue_work(slow_path_wq, &slow_path_work);
410 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
411}
412
413void css_schedule_eval_all(void)
414{
415 unsigned long flags;
416
417 spin_lock_irqsave(&slow_subchannel_lock, flags);
418 idset_fill(slow_subchannel_set);
419 queue_work(slow_path_wq, &slow_path_work);
420 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
421}
422
367/* Reprobe subchannel if unregistered. */ 423/* Reprobe subchannel if unregistered. */
368static int reprobe_subchannel(struct subchannel_id schid, void *data) 424static int reprobe_subchannel(struct subchannel_id schid, void *data)
369{ 425{
@@ -426,33 +482,14 @@ void css_schedule_reprobe(void)
426EXPORT_SYMBOL_GPL(css_schedule_reprobe); 482EXPORT_SYMBOL_GPL(css_schedule_reprobe);
427 483
428/* 484/*
429 * Rescan for new devices. FIXME: This is slow.
430 * This function is called when we have lost CRWs due to overflows and we have
431 * to do subchannel housekeeping.
432 */
433void
434css_reiterate_subchannels(void)
435{
436 css_clear_subchannel_slow_list();
437 need_rescan = 1;
438}
439
440/*
441 * Called from the machine check handler for subchannel report words. 485 * Called from the machine check handler for subchannel report words.
442 */ 486 */
443int 487void css_process_crw(int rsid1, int rsid2)
444css_process_crw(int rsid1, int rsid2)
445{ 488{
446 int ret;
447 struct subchannel_id mchk_schid; 489 struct subchannel_id mchk_schid;
448 490
449 CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", 491 CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n",
450 rsid1, rsid2); 492 rsid1, rsid2);
451
452 if (need_rescan)
453 /* We need to iterate all subchannels anyway. */
454 return -EAGAIN;
455
456 init_subchannel_id(&mchk_schid); 493 init_subchannel_id(&mchk_schid);
457 mchk_schid.sch_no = rsid1; 494 mchk_schid.sch_no = rsid1;
458 if (rsid2 != 0) 495 if (rsid2 != 0)
@@ -463,14 +500,7 @@ css_process_crw(int rsid1, int rsid2)
463 * use stsch() to find out if the subchannel in question has come 500 * use stsch() to find out if the subchannel in question has come
464 * or gone. 501 * or gone.
465 */ 502 */
466 ret = css_evaluate_subchannel(mchk_schid, 0); 503 css_evaluate_subchannel(mchk_schid, 0);
467 if (ret == -EAGAIN) {
468 if (css_enqueue_subchannel_slow(mchk_schid)) {
469 css_clear_subchannel_slow_list();
470 need_rescan = 1;
471 }
472 }
473 return ret;
474} 504}
475 505
476static int __init 506static int __init
@@ -745,47 +775,6 @@ struct bus_type css_bus_type = {
745 775
746subsys_initcall(init_channel_subsystem); 776subsys_initcall(init_channel_subsystem);
747 777
748int
749css_enqueue_subchannel_slow(struct subchannel_id schid)
750{
751 struct slow_subchannel *new_slow_sch;
752 unsigned long flags;
753
754 new_slow_sch = kzalloc(sizeof(struct slow_subchannel), GFP_ATOMIC);
755 if (!new_slow_sch)
756 return -ENOMEM;
757 new_slow_sch->schid = schid;
758 spin_lock_irqsave(&slow_subchannel_lock, flags);
759 list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head);
760 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
761 return 0;
762}
763
764void
765css_clear_subchannel_slow_list(void)
766{
767 unsigned long flags;
768
769 spin_lock_irqsave(&slow_subchannel_lock, flags);
770 while (!list_empty(&slow_subchannels_head)) {
771 struct slow_subchannel *slow_sch =
772 list_entry(slow_subchannels_head.next,
773 struct slow_subchannel, slow_list);
774
775 list_del_init(slow_subchannels_head.next);
776 kfree(slow_sch);
777 }
778 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
779}
780
781
782
783int
784css_slow_subchannels_exist(void)
785{
786 return (!list_empty(&slow_subchannels_head));
787}
788
789MODULE_LICENSE("GPL"); 778MODULE_LICENSE("GPL");
790EXPORT_SYMBOL(css_bus_type); 779EXPORT_SYMBOL(css_bus_type);
791EXPORT_SYMBOL_GPL(css_characteristics_avail); 780EXPORT_SYMBOL_GPL(css_characteristics_avail);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index ca2bab932a8a..71fcfdc42800 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -4,8 +4,11 @@
4#include <linux/mutex.h> 4#include <linux/mutex.h>
5#include <linux/wait.h> 5#include <linux/wait.h>
6#include <linux/workqueue.h> 6#include <linux/workqueue.h>
7#include <linux/device.h>
8#include <linux/types.h>
7 9
8#include <asm/cio.h> 10#include <asm/cio.h>
11#include <asm/chpid.h>
9 12
10#include "schid.h" 13#include "schid.h"
11 14
@@ -143,13 +146,12 @@ extern void css_sch_device_unregister(struct subchannel *);
143extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); 146extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
144extern int css_init_done; 147extern int css_init_done;
145extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); 148extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
146extern int css_process_crw(int, int); 149extern void css_process_crw(int, int);
147extern void css_reiterate_subchannels(void); 150extern void css_reiterate_subchannels(void);
151void css_update_ssd_info(struct subchannel *sch);
148 152
149#define __MAX_SUBCHANNEL 65535 153#define __MAX_SUBCHANNEL 65535
150#define __MAX_SSID 3 154#define __MAX_SSID 3
151#define __MAX_CHPID 255
152#define __MAX_CSSID 0
153 155
154struct channel_subsystem { 156struct channel_subsystem {
155 u8 cssid; 157 u8 cssid;
@@ -185,16 +187,12 @@ int device_trigger_verify(struct subchannel *sch);
185void device_kill_pending_timer(struct subchannel *); 187void device_kill_pending_timer(struct subchannel *);
186 188
187/* Helper functions to build lists for the slow path. */ 189/* Helper functions to build lists for the slow path. */
188extern int css_enqueue_subchannel_slow(struct subchannel_id schid); 190void css_schedule_eval(struct subchannel_id schid);
189void css_walk_subchannel_slow_list(void (*fn)(unsigned long)); 191void css_schedule_eval_all(void);
190void css_clear_subchannel_slow_list(void);
191int css_slow_subchannels_exist(void);
192extern int need_rescan;
193 192
194int sch_is_pseudo_sch(struct subchannel *); 193int sch_is_pseudo_sch(struct subchannel *);
195 194
196extern struct workqueue_struct *slow_path_wq; 195extern struct workqueue_struct *slow_path_wq;
197extern struct work_struct slow_path_work;
198 196
199int subchannel_add_files (struct device *); 197int subchannel_add_files (struct device *);
200extern struct attribute_group *subch_attr_groups[]; 198extern struct attribute_group *subch_attr_groups[];
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e322111fb369..03355902c582 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -56,13 +56,12 @@ ccw_bus_match (struct device * dev, struct device_driver * drv)
56/* Store modalias string delimited by prefix/suffix string into buffer with 56/* Store modalias string delimited by prefix/suffix string into buffer with
57 * specified size. Return length of resulting string (excluding trailing '\0') 57 * specified size. Return length of resulting string (excluding trailing '\0')
58 * even if string doesn't fit buffer (snprintf semantics). */ 58 * even if string doesn't fit buffer (snprintf semantics). */
59static int snprint_alias(char *buf, size_t size, const char *prefix, 59static int snprint_alias(char *buf, size_t size,
60 struct ccw_device_id *id, const char *suffix) 60 struct ccw_device_id *id, const char *suffix)
61{ 61{
62 int len; 62 int len;
63 63
64 len = snprintf(buf, size, "%sccw:t%04Xm%02X", prefix, id->cu_type, 64 len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
65 id->cu_model);
66 if (len > size) 65 if (len > size)
67 return len; 66 return len;
68 buf += len; 67 buf += len;
@@ -85,53 +84,40 @@ static int ccw_uevent(struct device *dev, char **envp, int num_envp,
85 struct ccw_device *cdev = to_ccwdev(dev); 84 struct ccw_device *cdev = to_ccwdev(dev);
86 struct ccw_device_id *id = &(cdev->id); 85 struct ccw_device_id *id = &(cdev->id);
87 int i = 0; 86 int i = 0;
88 int len; 87 int len = 0;
88 int ret;
89 char modalias_buf[30];
89 90
90 /* CU_TYPE= */ 91 /* CU_TYPE= */
91 len = snprintf(buffer, buffer_size, "CU_TYPE=%04X", id->cu_type) + 1; 92 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
92 if (len > buffer_size || i >= num_envp) 93 "CU_TYPE=%04X", id->cu_type);
93 return -ENOMEM; 94 if (ret)
94 envp[i++] = buffer; 95 return ret;
95 buffer += len;
96 buffer_size -= len;
97 96
98 /* CU_MODEL= */ 97 /* CU_MODEL= */
99 len = snprintf(buffer, buffer_size, "CU_MODEL=%02X", id->cu_model) + 1; 98 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
100 if (len > buffer_size || i >= num_envp) 99 "CU_MODEL=%02X", id->cu_model);
101 return -ENOMEM; 100 if (ret)
102 envp[i++] = buffer; 101 return ret;
103 buffer += len;
104 buffer_size -= len;
105 102
106 /* The next two can be zero, that's ok for us */ 103 /* The next two can be zero, that's ok for us */
107 /* DEV_TYPE= */ 104 /* DEV_TYPE= */
108 len = snprintf(buffer, buffer_size, "DEV_TYPE=%04X", id->dev_type) + 1; 105 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
109 if (len > buffer_size || i >= num_envp) 106 "DEV_TYPE=%04X", id->dev_type);
110 return -ENOMEM; 107 if (ret)
111 envp[i++] = buffer; 108 return ret;
112 buffer += len;
113 buffer_size -= len;
114 109
115 /* DEV_MODEL= */ 110 /* DEV_MODEL= */
116 len = snprintf(buffer, buffer_size, "DEV_MODEL=%02X", 111 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
117 (unsigned char) id->dev_model) + 1; 112 "DEV_MODEL=%02X", id->dev_model);
118 if (len > buffer_size || i >= num_envp) 113 if (ret)
119 return -ENOMEM; 114 return ret;
120 envp[i++] = buffer;
121 buffer += len;
122 buffer_size -= len;
123 115
124 /* MODALIAS= */ 116 /* MODALIAS= */
125 len = snprint_alias(buffer, buffer_size, "MODALIAS=", id, "") + 1; 117 snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
126 if (len > buffer_size || i >= num_envp) 118 ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len,
127 return -ENOMEM; 119 "MODALIAS=%s", modalias_buf);
128 envp[i++] = buffer; 120 return ret;
129 buffer += len;
130 buffer_size -= len;
131
132 envp[i] = NULL;
133
134 return 0;
135} 121}
136 122
137struct bus_type ccw_bus_type; 123struct bus_type ccw_bus_type;
@@ -230,12 +216,18 @@ static ssize_t
230chpids_show (struct device * dev, struct device_attribute *attr, char * buf) 216chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
231{ 217{
232 struct subchannel *sch = to_subchannel(dev); 218 struct subchannel *sch = to_subchannel(dev);
233 struct ssd_info *ssd = &sch->ssd_info; 219 struct chsc_ssd_info *ssd = &sch->ssd_info;
234 ssize_t ret = 0; 220 ssize_t ret = 0;
235 int chp; 221 int chp;
222 int mask;
236 223
237 for (chp = 0; chp < 8; chp++) 224 for (chp = 0; chp < 8; chp++) {
238 ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]); 225 mask = 0x80 >> chp;
226 if (ssd->path_mask & mask)
227 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
228 else
229 ret += sprintf(buf + ret, "00 ");
230 }
239 ret += sprintf (buf+ret, "\n"); 231 ret += sprintf (buf+ret, "\n");
240 return min((ssize_t)PAGE_SIZE, ret); 232 return min((ssize_t)PAGE_SIZE, ret);
241} 233}
@@ -280,7 +272,7 @@ modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
280 struct ccw_device_id *id = &(cdev->id); 272 struct ccw_device_id *id = &(cdev->id);
281 int len; 273 int len;
282 274
283 len = snprint_alias(buf, PAGE_SIZE, "", id, "\n") + 1; 275 len = snprint_alias(buf, PAGE_SIZE, id, "\n") + 1;
284 276
285 return len > PAGE_SIZE ? PAGE_SIZE : len; 277 return len > PAGE_SIZE ? PAGE_SIZE : len;
286} 278}
@@ -298,16 +290,10 @@ int ccw_device_is_orphan(struct ccw_device *cdev)
298 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent)); 290 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
299} 291}
300 292
301static void ccw_device_unregister(struct work_struct *work) 293static void ccw_device_unregister(struct ccw_device *cdev)
302{ 294{
303 struct ccw_device_private *priv;
304 struct ccw_device *cdev;
305
306 priv = container_of(work, struct ccw_device_private, kick_work);
307 cdev = priv->cdev;
308 if (test_and_clear_bit(1, &cdev->private->registered)) 295 if (test_and_clear_bit(1, &cdev->private->registered))
309 device_unregister(&cdev->dev); 296 device_del(&cdev->dev);
310 put_device(&cdev->dev);
311} 297}
312 298
313static void 299static void
@@ -324,11 +310,8 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
324 spin_lock_irqsave(cdev->ccwlock, flags); 310 spin_lock_irqsave(cdev->ccwlock, flags);
325 cdev->private->state = DEV_STATE_NOT_OPER; 311 cdev->private->state = DEV_STATE_NOT_OPER;
326 spin_unlock_irqrestore(cdev->ccwlock, flags); 312 spin_unlock_irqrestore(cdev->ccwlock, flags);
327 if (get_device(&cdev->dev)) { 313 ccw_device_unregister(cdev);
328 PREPARE_WORK(&cdev->private->kick_work, 314 put_device(&cdev->dev);
329 ccw_device_unregister);
330 queue_work(ccw_device_work, &cdev->private->kick_work);
331 }
332 return ; 315 return ;
333 } 316 }
334 sch = to_subchannel(cdev->dev.parent); 317 sch = to_subchannel(cdev->dev.parent);
@@ -413,11 +396,60 @@ ccw_device_set_online(struct ccw_device *cdev)
413 return (ret == 0) ? -ENODEV : ret; 396 return (ret == 0) ? -ENODEV : ret;
414} 397}
415 398
416static ssize_t 399static void online_store_handle_offline(struct ccw_device *cdev)
417online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 400{
401 if (cdev->private->state == DEV_STATE_DISCONNECTED)
402 ccw_device_remove_disconnected(cdev);
403 else if (cdev->drv && cdev->drv->set_offline)
404 ccw_device_set_offline(cdev);
405}
406
407static int online_store_recog_and_online(struct ccw_device *cdev)
408{
409 int ret;
410
411 /* Do device recognition, if needed. */
412 if (cdev->id.cu_type == 0) {
413 ret = ccw_device_recognition(cdev);
414 if (ret) {
415 printk(KERN_WARNING"Couldn't start recognition "
416 "for device %s (ret=%d)\n",
417 cdev->dev.bus_id, ret);
418 return ret;
419 }
420 wait_event(cdev->private->wait_q,
421 cdev->private->flags.recog_done);
422 }
423 if (cdev->drv && cdev->drv->set_online)
424 ccw_device_set_online(cdev);
425 return 0;
426}
427static void online_store_handle_online(struct ccw_device *cdev, int force)
428{
429 int ret;
430
431 ret = online_store_recog_and_online(cdev);
432 if (ret)
433 return;
434 if (force && cdev->private->state == DEV_STATE_BOXED) {
435 ret = ccw_device_stlck(cdev);
436 if (ret) {
437 printk(KERN_WARNING"ccw_device_stlck for device %s "
438 "returned %d!\n", cdev->dev.bus_id, ret);
439 return;
440 }
441 if (cdev->id.cu_type == 0)
442 cdev->private->state = DEV_STATE_NOT_OPER;
443 online_store_recog_and_online(cdev);
444 }
445
446}
447
448static ssize_t online_store (struct device *dev, struct device_attribute *attr,
449 const char *buf, size_t count)
418{ 450{
419 struct ccw_device *cdev = to_ccwdev(dev); 451 struct ccw_device *cdev = to_ccwdev(dev);
420 int i, force, ret; 452 int i, force;
421 char *tmp; 453 char *tmp;
422 454
423 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) 455 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
@@ -434,51 +466,17 @@ online_store (struct device *dev, struct device_attribute *attr, const char *buf
434 force = 0; 466 force = 0;
435 i = simple_strtoul(buf, &tmp, 16); 467 i = simple_strtoul(buf, &tmp, 16);
436 } 468 }
437 if (i == 1) { 469
438 /* Do device recognition, if needed. */ 470 switch (i) {
439 if (cdev->id.cu_type == 0) { 471 case 0:
440 ret = ccw_device_recognition(cdev); 472 online_store_handle_offline(cdev);
441 if (ret) { 473 break;
442 printk(KERN_WARNING"Couldn't start recognition " 474 case 1:
443 "for device %s (ret=%d)\n", 475 online_store_handle_online(cdev, force);
444 cdev->dev.bus_id, ret); 476 break;
445 goto out; 477 default:
446 } 478 count = -EINVAL;
447 wait_event(cdev->private->wait_q,
448 cdev->private->flags.recog_done);
449 }
450 if (cdev->drv && cdev->drv->set_online)
451 ccw_device_set_online(cdev);
452 } else if (i == 0) {
453 if (cdev->private->state == DEV_STATE_DISCONNECTED)
454 ccw_device_remove_disconnected(cdev);
455 else if (cdev->drv && cdev->drv->set_offline)
456 ccw_device_set_offline(cdev);
457 }
458 if (force && cdev->private->state == DEV_STATE_BOXED) {
459 ret = ccw_device_stlck(cdev);
460 if (ret) {
461 printk(KERN_WARNING"ccw_device_stlck for device %s "
462 "returned %d!\n", cdev->dev.bus_id, ret);
463 goto out;
464 }
465 /* Do device recognition, if needed. */
466 if (cdev->id.cu_type == 0) {
467 cdev->private->state = DEV_STATE_NOT_OPER;
468 ret = ccw_device_recognition(cdev);
469 if (ret) {
470 printk(KERN_WARNING"Couldn't start recognition "
471 "for device %s (ret=%d)\n",
472 cdev->dev.bus_id, ret);
473 goto out;
474 }
475 wait_event(cdev->private->wait_q,
476 cdev->private->flags.recog_done);
477 }
478 if (cdev->drv && cdev->drv->set_online)
479 ccw_device_set_online(cdev);
480 } 479 }
481 out:
482 if (cdev->drv) 480 if (cdev->drv)
483 module_put(cdev->drv->owner); 481 module_put(cdev->drv->owner);
484 atomic_set(&cdev->private->onoff, 0); 482 atomic_set(&cdev->private->onoff, 0);
@@ -548,17 +546,10 @@ static struct attribute_group ccwdev_attr_group = {
548 .attrs = ccwdev_attrs, 546 .attrs = ccwdev_attrs,
549}; 547};
550 548
551static int 549struct attribute_group *ccwdev_attr_groups[] = {
552device_add_files (struct device *dev) 550 &ccwdev_attr_group,
553{ 551 NULL,
554 return sysfs_create_group(&dev->kobj, &ccwdev_attr_group); 552};
555}
556
557static void
558device_remove_files(struct device *dev)
559{
560 sysfs_remove_group(&dev->kobj, &ccwdev_attr_group);
561}
562 553
563/* this is a simple abstraction for device_register that sets the 554/* this is a simple abstraction for device_register that sets the
564 * correct bus type and adds the bus specific files */ 555 * correct bus type and adds the bus specific files */
@@ -573,10 +564,6 @@ static int ccw_device_register(struct ccw_device *cdev)
573 return ret; 564 return ret;
574 565
575 set_bit(1, &cdev->private->registered); 566 set_bit(1, &cdev->private->registered);
576 if ((ret = device_add_files(dev))) {
577 if (test_and_clear_bit(1, &cdev->private->registered))
578 device_del(dev);
579 }
580 return ret; 567 return ret;
581} 568}
582 569
@@ -648,10 +635,6 @@ ccw_device_add_changed(struct work_struct *work)
648 return; 635 return;
649 } 636 }
650 set_bit(1, &cdev->private->registered); 637 set_bit(1, &cdev->private->registered);
651 if (device_add_files(&cdev->dev)) {
652 if (test_and_clear_bit(1, &cdev->private->registered))
653 device_unregister(&cdev->dev);
654 }
655} 638}
656 639
657void ccw_device_do_unreg_rereg(struct work_struct *work) 640void ccw_device_do_unreg_rereg(struct work_struct *work)
@@ -664,9 +647,7 @@ void ccw_device_do_unreg_rereg(struct work_struct *work)
664 cdev = priv->cdev; 647 cdev = priv->cdev;
665 sch = to_subchannel(cdev->dev.parent); 648 sch = to_subchannel(cdev->dev.parent);
666 649
667 device_remove_files(&cdev->dev); 650 ccw_device_unregister(cdev);
668 if (test_and_clear_bit(1, &cdev->private->registered))
669 device_del(&cdev->dev);
670 PREPARE_WORK(&cdev->private->kick_work, 651 PREPARE_WORK(&cdev->private->kick_work,
671 ccw_device_add_changed); 652 ccw_device_add_changed);
672 queue_work(ccw_device_work, &cdev->private->kick_work); 653 queue_work(ccw_device_work, &cdev->private->kick_work);
@@ -705,6 +686,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
705 cdev->dev.parent = &sch->dev; 686 cdev->dev.parent = &sch->dev;
706 cdev->dev.release = ccw_device_release; 687 cdev->dev.release = ccw_device_release;
707 INIT_LIST_HEAD(&cdev->private->kick_work.entry); 688 INIT_LIST_HEAD(&cdev->private->kick_work.entry);
689 cdev->dev.groups = ccwdev_attr_groups;
708 /* Do first half of device_register. */ 690 /* Do first half of device_register. */
709 device_initialize(&cdev->dev); 691 device_initialize(&cdev->dev);
710 if (!get_device(&sch->dev)) { 692 if (!get_device(&sch->dev)) {
@@ -736,6 +718,7 @@ static int io_subchannel_recog(struct ccw_device *, struct subchannel *);
736static void sch_attach_device(struct subchannel *sch, 718static void sch_attach_device(struct subchannel *sch,
737 struct ccw_device *cdev) 719 struct ccw_device *cdev)
738{ 720{
721 css_update_ssd_info(sch);
739 spin_lock_irq(sch->lock); 722 spin_lock_irq(sch->lock);
740 sch->dev.driver_data = cdev; 723 sch->dev.driver_data = cdev;
741 cdev->private->schid = sch->schid; 724 cdev->private->schid = sch->schid;
@@ -871,7 +854,7 @@ io_subchannel_register(struct work_struct *work)
871 priv = container_of(work, struct ccw_device_private, kick_work); 854 priv = container_of(work, struct ccw_device_private, kick_work);
872 cdev = priv->cdev; 855 cdev = priv->cdev;
873 sch = to_subchannel(cdev->dev.parent); 856 sch = to_subchannel(cdev->dev.parent);
874 857 css_update_ssd_info(sch);
875 /* 858 /*
876 * io_subchannel_register() will also be called after device 859 * io_subchannel_register() will also be called after device
877 * recognition has been done for a boxed device (which will already 860 * recognition has been done for a boxed device (which will already
@@ -1133,15 +1116,8 @@ io_subchannel_remove (struct subchannel *sch)
1133 sch->dev.driver_data = NULL; 1116 sch->dev.driver_data = NULL;
1134 cdev->private->state = DEV_STATE_NOT_OPER; 1117 cdev->private->state = DEV_STATE_NOT_OPER;
1135 spin_unlock_irqrestore(cdev->ccwlock, flags); 1118 spin_unlock_irqrestore(cdev->ccwlock, flags);
1136 /* 1119 ccw_device_unregister(cdev);
1137 * Put unregistration on workqueue to avoid livelocks on the css bus 1120 put_device(&cdev->dev);
1138 * semaphore.
1139 */
1140 if (get_device(&cdev->dev)) {
1141 PREPARE_WORK(&cdev->private->kick_work,
1142 ccw_device_unregister);
1143 queue_work(ccw_device_work, &cdev->private->kick_work);
1144 }
1145 return 0; 1121 return 0;
1146} 1122}
1147 1123
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 089a3ddd6265..898ec3b2bebb 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -15,6 +15,7 @@
15 15
16#include <asm/ccwdev.h> 16#include <asm/ccwdev.h>
17#include <asm/cio.h> 17#include <asm/cio.h>
18#include <asm/chpid.h>
18 19
19#include "cio.h" 20#include "cio.h"
20#include "cio_debug.h" 21#include "cio_debug.h"
@@ -22,6 +23,7 @@
22#include "device.h" 23#include "device.h"
23#include "chsc.h" 24#include "chsc.h"
24#include "ioasm.h" 25#include "ioasm.h"
26#include "chp.h"
25 27
26int 28int
27device_is_online(struct subchannel *sch) 29device_is_online(struct subchannel *sch)
@@ -210,14 +212,18 @@ static void
210__recover_lost_chpids(struct subchannel *sch, int old_lpm) 212__recover_lost_chpids(struct subchannel *sch, int old_lpm)
211{ 213{
212 int mask, i; 214 int mask, i;
215 struct chp_id chpid;
213 216
217 chp_id_init(&chpid);
214 for (i = 0; i<8; i++) { 218 for (i = 0; i<8; i++) {
215 mask = 0x80 >> i; 219 mask = 0x80 >> i;
216 if (!(sch->lpm & mask)) 220 if (!(sch->lpm & mask))
217 continue; 221 continue;
218 if (old_lpm & mask) 222 if (old_lpm & mask)
219 continue; 223 continue;
220 chpid_is_actually_online(sch->schib.pmcw.chpid[i]); 224 chpid.id = sch->schib.pmcw.chpid[i];
225 if (!chp_is_registered(chpid))
226 css_schedule_eval_all();
221 } 227 }
222} 228}
223 229
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 7c7775aae38a..16f59fcb66b1 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -16,12 +16,14 @@
16 16
17#include <asm/ccwdev.h> 17#include <asm/ccwdev.h>
18#include <asm/idals.h> 18#include <asm/idals.h>
19#include <asm/chpid.h>
19 20
20#include "cio.h" 21#include "cio.h"
21#include "cio_debug.h" 22#include "cio_debug.h"
22#include "css.h" 23#include "css.h"
23#include "chsc.h" 24#include "chsc.h"
24#include "device.h" 25#include "device.h"
26#include "chp.h"
25 27
26int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags) 28int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
27{ 29{
@@ -606,9 +608,12 @@ void *
606ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) 608ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
607{ 609{
608 struct subchannel *sch; 610 struct subchannel *sch;
611 struct chp_id chpid;
609 612
610 sch = to_subchannel(cdev->dev.parent); 613 sch = to_subchannel(cdev->dev.parent);
611 return chsc_get_chp_desc(sch, chp_no); 614 chp_id_init(&chpid);
615 chpid.id = sch->schib.pmcw.chpid[chp_no];
616 return chp_get_chp_desc(chpid);
612} 617}
613 618
614// FIXME: these have to go: 619// FIXME: these have to go:
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
new file mode 100644
index 000000000000..16ea828e99f7
--- /dev/null
+++ b/drivers/s390/cio/idset.c
@@ -0,0 +1,112 @@
1/*
2 * drivers/s390/cio/idset.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/slab.h>
9#include <asm/bitops.h>
10#include "idset.h"
11#include "css.h"
12
13struct idset {
14 int num_ssid;
15 int num_id;
16 unsigned long bitmap[0];
17};
18
19static inline unsigned long bitmap_size(int num_ssid, int num_id)
20{
21 return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long);
22}
23
24static struct idset *idset_new(int num_ssid, int num_id)
25{
26 struct idset *set;
27
28 set = kzalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id),
29 GFP_KERNEL);
30 if (set) {
31 set->num_ssid = num_ssid;
32 set->num_id = num_id;
33 }
34 return set;
35}
36
37void idset_free(struct idset *set)
38{
39 kfree(set);
40}
41
42void idset_clear(struct idset *set)
43{
44 memset(set->bitmap, 0, bitmap_size(set->num_ssid, set->num_id));
45}
46
47void idset_fill(struct idset *set)
48{
49 memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id));
50}
51
52static inline void idset_add(struct idset *set, int ssid, int id)
53{
54 set_bit(ssid * set->num_id + id, set->bitmap);
55}
56
57static inline void idset_del(struct idset *set, int ssid, int id)
58{
59 clear_bit(ssid * set->num_id + id, set->bitmap);
60}
61
62static inline int idset_contains(struct idset *set, int ssid, int id)
63{
64 return test_bit(ssid * set->num_id + id, set->bitmap);
65}
66
67static inline int idset_get_first(struct idset *set, int *ssid, int *id)
68{
69 int bitnum;
70
71 bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
72 if (bitnum >= set->num_ssid * set->num_id)
73 return 0;
74 *ssid = bitnum / set->num_id;
75 *id = bitnum % set->num_id;
76 return 1;
77}
78
79struct idset *idset_sch_new(void)
80{
81 return idset_new(__MAX_SSID + 1, __MAX_SUBCHANNEL + 1);
82}
83
84void idset_sch_add(struct idset *set, struct subchannel_id schid)
85{
86 idset_add(set, schid.ssid, schid.sch_no);
87}
88
89void idset_sch_del(struct idset *set, struct subchannel_id schid)
90{
91 idset_del(set, schid.ssid, schid.sch_no);
92}
93
94int idset_sch_contains(struct idset *set, struct subchannel_id schid)
95{
96 return idset_contains(set, schid.ssid, schid.sch_no);
97}
98
99int idset_sch_get_first(struct idset *set, struct subchannel_id *schid)
100{
101 int ssid = 0;
102 int id = 0;
103 int rc;
104
105 rc = idset_get_first(set, &ssid, &id);
106 if (rc) {
107 init_subchannel_id(schid);
108 schid->ssid = ssid;
109 schid->sch_no = id;
110 }
111 return rc;
112}
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
new file mode 100644
index 000000000000..144466ab8c15
--- /dev/null
+++ b/drivers/s390/cio/idset.h
@@ -0,0 +1,25 @@
1/*
2 * drivers/s390/cio/idset.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#ifndef S390_IDSET_H
9#define S390_IDSET_H S390_IDSET_H
10
11#include "schid.h"
12
13struct idset;
14
15void idset_free(struct idset *set);
16void idset_clear(struct idset *set);
17void idset_fill(struct idset *set);
18
19struct idset *idset_sch_new(void);
20void idset_sch_add(struct idset *set, struct subchannel_id id);
21void idset_sch_del(struct idset *set, struct subchannel_id id);
22int idset_sch_contains(struct idset *set, struct subchannel_id id);
23int idset_sch_get_first(struct idset *set, struct subchannel_id *id);
24
25#endif /* S390_IDSET_H */
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index ad6d82940069..7153dd959082 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -1,6 +1,7 @@
1#ifndef S390_CIO_IOASM_H 1#ifndef S390_CIO_IOASM_H
2#define S390_CIO_IOASM_H 2#define S390_CIO_IOASM_H
3 3
4#include <asm/chpid.h>
4#include "schid.h" 5#include "schid.h"
5 6
6/* 7/*
@@ -189,9 +190,9 @@ static inline int chsc(void *chsc_area)
189 return cc; 190 return cc;
190} 191}
191 192
192static inline int rchp(int chpid) 193static inline int rchp(struct chp_id chpid)
193{ 194{
194 register unsigned int reg1 asm ("1") = chpid; 195 register struct chp_id reg1 asm ("1") = chpid;
195 int ccode; 196 int ccode;
196 197
197 asm volatile( 198 asm volatile(
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index 0d6d5fcc128b..570a960bfb5b 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -1638,21 +1638,19 @@ add_channel(struct ccw_device *cdev, enum channel_types type)
1638 struct channel *ch; 1638 struct channel *ch;
1639 1639
1640 DBF_TEXT(trace, 2, __FUNCTION__); 1640 DBF_TEXT(trace, 2, __FUNCTION__);
1641 if ((ch = 1641 ch = kzalloc(sizeof(struct channel), GFP_KERNEL);
1642 (struct channel *) kmalloc(sizeof (struct channel), 1642 if (!ch) {
1643 GFP_KERNEL)) == NULL) {
1644 ctc_pr_warn("ctc: Out of memory in add_channel\n"); 1643 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1645 return -1; 1644 return -1;
1646 } 1645 }
1647 memset(ch, 0, sizeof (struct channel)); 1646 /* assure all flags and counters are reset */
1648 if ((ch->ccw = kmalloc(8*sizeof(struct ccw1), 1647 ch->ccw = kzalloc(8 * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
1649 GFP_KERNEL | GFP_DMA)) == NULL) { 1648 if (!ch->ccw) {
1650 kfree(ch); 1649 kfree(ch);
1651 ctc_pr_warn("ctc: Out of memory in add_channel\n"); 1650 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1652 return -1; 1651 return -1;
1653 } 1652 }
1654 1653
1655 memset(ch->ccw, 0, 8*sizeof(struct ccw1)); // assure all flags and counters are reset
1656 1654
1657 /** 1655 /**
1658 * "static" ccws are used in the following way: 1656 * "static" ccws are used in the following way:
@@ -1692,15 +1690,14 @@ add_channel(struct ccw_device *cdev, enum channel_types type)
1692 return -1; 1690 return -1;
1693 } 1691 }
1694 fsm_newstate(ch->fsm, CH_STATE_IDLE); 1692 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1695 if ((ch->irb = kmalloc(sizeof (struct irb), 1693 ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL);
1696 GFP_KERNEL)) == NULL) { 1694 if (!ch->irb) {
1697 ctc_pr_warn("ctc: Out of memory in add_channel\n"); 1695 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1698 kfree_fsm(ch->fsm); 1696 kfree_fsm(ch->fsm);
1699 kfree(ch->ccw); 1697 kfree(ch->ccw);
1700 kfree(ch); 1698 kfree(ch);
1701 return -1; 1699 return -1;
1702 } 1700 }
1703 memset(ch->irb, 0, sizeof (struct irb));
1704 while (*c && less_than((*c)->id, ch->id)) 1701 while (*c && less_than((*c)->id, ch->id))
1705 c = &(*c)->next; 1702 c = &(*c)->next;
1706 if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) { 1703 if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) {
@@ -2745,14 +2742,13 @@ ctc_probe_device(struct ccwgroup_device *cgdev)
2745 if (!get_device(&cgdev->dev)) 2742 if (!get_device(&cgdev->dev))
2746 return -ENODEV; 2743 return -ENODEV;
2747 2744
2748 priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL); 2745 priv = kzalloc(sizeof(struct ctc_priv), GFP_KERNEL);
2749 if (!priv) { 2746 if (!priv) {
2750 ctc_pr_err("%s: Out of memory\n", __func__); 2747 ctc_pr_err("%s: Out of memory\n", __func__);
2751 put_device(&cgdev->dev); 2748 put_device(&cgdev->dev);
2752 return -ENOMEM; 2749 return -ENOMEM;
2753 } 2750 }
2754 2751
2755 memset(priv, 0, sizeof (struct ctc_priv));
2756 rc = ctc_add_files(&cgdev->dev); 2752 rc = ctc_add_files(&cgdev->dev);
2757 if (rc) { 2753 if (rc) {
2758 kfree(priv); 2754 kfree(priv);
@@ -2793,10 +2789,9 @@ ctc_init_netdevice(struct net_device * dev, int alloc_device,
2793 DBF_TEXT(setup, 3, __FUNCTION__); 2789 DBF_TEXT(setup, 3, __FUNCTION__);
2794 2790
2795 if (alloc_device) { 2791 if (alloc_device) {
2796 dev = kmalloc(sizeof (struct net_device), GFP_KERNEL); 2792 dev = kzalloc(sizeof(struct net_device), GFP_KERNEL);
2797 if (!dev) 2793 if (!dev)
2798 return NULL; 2794 return NULL;
2799 memset(dev, 0, sizeof (struct net_device));
2800 } 2795 }
2801 2796
2802 dev->priv = privptr; 2797 dev->priv = privptr;
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 806bb1a921eb..644a06eba828 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -21,6 +21,7 @@
21#include "cio/cio.h" 21#include "cio/cio.h"
22#include "cio/chsc.h" 22#include "cio/chsc.h"
23#include "cio/css.h" 23#include "cio/css.h"
24#include "cio/chp.h"
24#include "s390mach.h" 25#include "s390mach.h"
25 26
26static struct semaphore m_sem; 27static struct semaphore m_sem;
@@ -44,14 +45,13 @@ static int
44s390_collect_crw_info(void *param) 45s390_collect_crw_info(void *param)
45{ 46{
46 struct crw crw[2]; 47 struct crw crw[2];
47 int ccode, ret, slow; 48 int ccode;
48 struct semaphore *sem; 49 struct semaphore *sem;
49 unsigned int chain; 50 unsigned int chain;
50 51
51 sem = (struct semaphore *)param; 52 sem = (struct semaphore *)param;
52repeat: 53repeat:
53 down_interruptible(sem); 54 down_interruptible(sem);
54 slow = 0;
55 chain = 0; 55 chain = 0;
56 while (1) { 56 while (1) {
57 if (unlikely(chain > 1)) { 57 if (unlikely(chain > 1)) {
@@ -84,9 +84,8 @@ repeat:
84 /* Check for overflows. */ 84 /* Check for overflows. */
85 if (crw[chain].oflw) { 85 if (crw[chain].oflw) {
86 pr_debug("%s: crw overflow detected!\n", __FUNCTION__); 86 pr_debug("%s: crw overflow detected!\n", __FUNCTION__);
87 css_reiterate_subchannels(); 87 css_schedule_eval_all();
88 chain = 0; 88 chain = 0;
89 slow = 1;
90 continue; 89 continue;
91 } 90 }
92 switch (crw[chain].rsc) { 91 switch (crw[chain].rsc) {
@@ -94,10 +93,7 @@ repeat:
94 if (crw[0].chn && !chain) 93 if (crw[0].chn && !chain)
95 break; 94 break;
96 pr_debug("source is subchannel %04X\n", crw[0].rsid); 95 pr_debug("source is subchannel %04X\n", crw[0].rsid);
97 ret = css_process_crw (crw[0].rsid, 96 css_process_crw(crw[0].rsid, chain ? crw[1].rsid : 0);
98 chain ? crw[1].rsid : 0);
99 if (ret == -EAGAIN)
100 slow = 1;
101 break; 97 break;
102 case CRW_RSC_MONITOR: 98 case CRW_RSC_MONITOR:
103 pr_debug("source is monitoring facility\n"); 99 pr_debug("source is monitoring facility\n");
@@ -116,28 +112,23 @@ repeat:
116 } 112 }
117 switch (crw[0].erc) { 113 switch (crw[0].erc) {
118 case CRW_ERC_IPARM: /* Path has come. */ 114 case CRW_ERC_IPARM: /* Path has come. */
119 ret = chp_process_crw(crw[0].rsid, 1); 115 chp_process_crw(crw[0].rsid, 1);
120 break; 116 break;
121 case CRW_ERC_PERRI: /* Path has gone. */ 117 case CRW_ERC_PERRI: /* Path has gone. */
122 case CRW_ERC_PERRN: 118 case CRW_ERC_PERRN:
123 ret = chp_process_crw(crw[0].rsid, 0); 119 chp_process_crw(crw[0].rsid, 0);
124 break; 120 break;
125 default: 121 default:
126 pr_debug("Don't know how to handle erc=%x\n", 122 pr_debug("Don't know how to handle erc=%x\n",
127 crw[0].erc); 123 crw[0].erc);
128 ret = 0;
129 } 124 }
130 if (ret == -EAGAIN)
131 slow = 1;
132 break; 125 break;
133 case CRW_RSC_CONFIG: 126 case CRW_RSC_CONFIG:
134 pr_debug("source is configuration-alert facility\n"); 127 pr_debug("source is configuration-alert facility\n");
135 break; 128 break;
136 case CRW_RSC_CSS: 129 case CRW_RSC_CSS:
137 pr_debug("source is channel subsystem\n"); 130 pr_debug("source is channel subsystem\n");
138 ret = chsc_process_crw(); 131 chsc_process_crw();
139 if (ret == -EAGAIN)
140 slow = 1;
141 break; 132 break;
142 default: 133 default:
143 pr_debug("unknown source\n"); 134 pr_debug("unknown source\n");
@@ -146,8 +137,6 @@ repeat:
146 /* chain is always 0 or 1 here. */ 137 /* chain is always 0 or 1 here. */
147 chain = crw[chain].chn ? chain + 1 : 0; 138 chain = crw[chain].chn ? chain + 1 : 0;
148 } 139 }
149 if (slow)
150 queue_work(slow_path_wq, &slow_path_work);
151 goto repeat; 140 goto repeat;
152 return 0; 141 return 0;
153} 142}
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index 090743d2f914..19343f9675c3 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -357,6 +357,24 @@ static __init int create_proc_sysinfo(void)
357 357
358__initcall(create_proc_sysinfo); 358__initcall(create_proc_sysinfo);
359 359
360int get_cpu_capability(unsigned int *capability)
361{
362 struct sysinfo_1_2_2 *info;
363 int rc;
364
365 info = (void *) get_zeroed_page(GFP_KERNEL);
366 if (!info)
367 return -ENOMEM;
368 rc = stsi(info, 1, 2, 2);
369 if (rc == -ENOSYS)
370 goto out;
371 rc = 0;
372 *capability = info->capability;
373out:
374 free_page((unsigned long) info);
375 return rc;
376}
377
360/* 378/*
361 * CPU capability might have changed. Therefore recalculate loops_per_jiffy. 379 * CPU capability might have changed. Therefore recalculate loops_per_jiffy.
362 */ 380 */
diff --git a/include/asm-avr32/arch-at32ap/io.h b/include/asm-avr32/arch-at32ap/io.h
new file mode 100644
index 000000000000..ee59e401f041
--- /dev/null
+++ b/include/asm-avr32/arch-at32ap/io.h
@@ -0,0 +1,39 @@
1#ifndef __ASM_AVR32_ARCH_AT32AP_IO_H
2#define __ASM_AVR32_ARCH_AT32AP_IO_H
3
4/* For "bizarre" halfword swapping */
5#include <linux/byteorder/swabb.h>
6
7#if defined(CONFIG_AP7000_32_BIT_SMC)
8# define __swizzle_addr_b(addr) (addr ^ 3UL)
9# define __swizzle_addr_w(addr) (addr ^ 2UL)
10# define __swizzle_addr_l(addr) (addr)
11# define ioswabb(a, x) (x)
12# define ioswabw(a, x) (x)
13# define ioswabl(a, x) (x)
14# define __mem_ioswabb(a, x) (x)
15# define __mem_ioswabw(a, x) swab16(x)
16# define __mem_ioswabl(a, x) swab32(x)
17#elif defined(CONFIG_AP7000_16_BIT_SMC)
18# define __swizzle_addr_b(addr) (addr ^ 1UL)
19# define __swizzle_addr_w(addr) (addr)
20# define __swizzle_addr_l(addr) (addr)
21# define ioswabb(a, x) (x)
22# define ioswabw(a, x) (x)
23# define ioswabl(a, x) swahw32(x)
24# define __mem_ioswabb(a, x) (x)
25# define __mem_ioswabw(a, x) swab16(x)
26# define __mem_ioswabl(a, x) swahb32(x)
27#else
28# define __swizzle_addr_b(addr) (addr)
29# define __swizzle_addr_w(addr) (addr)
30# define __swizzle_addr_l(addr) (addr)
31# define ioswabb(a, x) (x)
32# define ioswabw(a, x) swab16(x)
33# define ioswabl(a, x) swab32(x)
34# define __mem_ioswabb(a, x) (x)
35# define __mem_ioswabw(a, x) (x)
36# define __mem_ioswabl(a, x) (x)
37#endif
38
39#endif /* __ASM_AVR32_ARCH_AT32AP_IO_H */
diff --git a/include/asm-avr32/arch-at32ap/smc.h b/include/asm-avr32/arch-at32ap/smc.h
index 3732b328303d..07152b7fd9c9 100644
--- a/include/asm-avr32/arch-at32ap/smc.h
+++ b/include/asm-avr32/arch-at32ap/smc.h
@@ -48,10 +48,32 @@ struct smc_config {
48 unsigned int nwe_controlled:1; 48 unsigned int nwe_controlled:1;
49 49
50 /* 50 /*
51 * 0: NWAIT is disabled
52 * 1: Reserved
53 * 2: NWAIT is frozen mode
54 * 3: NWAIT in ready mode
55 */
56 unsigned int nwait_mode:2;
57
58 /*
51 * 0: Byte select access type 59 * 0: Byte select access type
52 * 1: Byte write access type 60 * 1: Byte write access type
53 */ 61 */
54 unsigned int byte_write:1; 62 unsigned int byte_write:1;
63
64 /*
65 * Number of clock cycles before data is released after
66 * the rising edge of the read controlling signal
67 *
68 * Total cycles from SMC is tdf_cycles + 1
69 */
70 unsigned int tdf_cycles:4;
71
72 /*
73 * 0: TDF optimization disabled
74 * 1: TDF optimization enabled
75 */
76 unsigned int tdf_mode:1;
55}; 77};
56 78
57extern int smc_set_configuration(int cs, const struct smc_config *config); 79extern int smc_set_configuration(int cs, const struct smc_config *config);
diff --git a/include/asm-avr32/arch-at32ap/time.h b/include/asm-avr32/arch-at32ap/time.h
new file mode 100644
index 000000000000..cc8a43418a4d
--- /dev/null
+++ b/include/asm-avr32/arch-at32ap/time.h
@@ -0,0 +1,112 @@
1/*
2 * Copyright (C) 2007 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_AVR32_ARCH_AT32AP_TIME_H
10#define _ASM_AVR32_ARCH_AT32AP_TIME_H
11
12#include <linux/platform_device.h>
13
14extern struct irqaction timer_irqaction;
15extern struct platform_device at32_systc0_device;
16extern void local_timer_interrupt(int irq, void *dev_id);
17
18#define TIMER_BCR 0x000000c0
19#define TIMER_BCR_SYNC 0
20#define TIMER_BMR 0x000000c4
21#define TIMER_BMR_TC0XC0S 0
22#define TIMER_BMR_TC1XC1S 2
23#define TIMER_BMR_TC2XC2S 4
24#define TIMER_CCR 0x00000000
25#define TIMER_CCR_CLKDIS 1
26#define TIMER_CCR_CLKEN 0
27#define TIMER_CCR_SWTRG 2
28#define TIMER_CMR 0x00000004
29#define TIMER_CMR_ABETRG 10
30#define TIMER_CMR_ACPA 16
31#define TIMER_CMR_ACPC 18
32#define TIMER_CMR_AEEVT 20
33#define TIMER_CMR_ASWTRG 22
34#define TIMER_CMR_BCPB 24
35#define TIMER_CMR_BCPC 26
36#define TIMER_CMR_BEEVT 28
37#define TIMER_CMR_BSWTRG 30
38#define TIMER_CMR_BURST 4
39#define TIMER_CMR_CLKI 3
40#define TIMER_CMR_CPCDIS 7
41#define TIMER_CMR_CPCSTOP 6
42#define TIMER_CMR_CPCTRG 14
43#define TIMER_CMR_EEVT 10
44#define TIMER_CMR_EEVTEDG 8
45#define TIMER_CMR_ENETRG 12
46#define TIMER_CMR_ETRGEDG 8
47#define TIMER_CMR_LDBDIS 7
48#define TIMER_CMR_LDBSTOP 6
49#define TIMER_CMR_LDRA 16
50#define TIMER_CMR_LDRB 18
51#define TIMER_CMR_TCCLKS 0
52#define TIMER_CMR_WAVE 15
53#define TIMER_CMR_WAVSEL 13
54#define TIMER_CV 0x00000010
55#define TIMER_CV_CV 0
56#define TIMER_IDR 0x00000028
57#define TIMER_IDR_COVFS 0
58#define TIMER_IDR_CPAS 2
59#define TIMER_IDR_CPBS 3
60#define TIMER_IDR_CPCS 4
61#define TIMER_IDR_ETRGS 7
62#define TIMER_IDR_LDRAS 5
63#define TIMER_IDR_LDRBS 6
64#define TIMER_IDR_LOVRS 1
65#define TIMER_IER 0x00000024
66#define TIMER_IER_COVFS 0
67#define TIMER_IER_CPAS 2
68#define TIMER_IER_CPBS 3
69#define TIMER_IER_CPCS 4
70#define TIMER_IER_ETRGS 7
71#define TIMER_IER_LDRAS 5
72#define TIMER_IER_LDRBS 6
73#define TIMER_IER_LOVRS 1
74#define TIMER_IMR 0x0000002c
75#define TIMER_IMR_COVFS 0
76#define TIMER_IMR_CPAS 2
77#define TIMER_IMR_CPBS 3
78#define TIMER_IMR_CPCS 4
79#define TIMER_IMR_ETRGS 7
80#define TIMER_IMR_LDRAS 5
81#define TIMER_IMR_LDRBS 6
82#define TIMER_IMR_LOVRS 1
83#define TIMER_RA 0x00000014
84#define TIMER_RA_RA 0
85#define TIMER_RB 0x00000018
86#define TIMER_RB_RB 0
87#define TIMER_RC 0x0000001c
88#define TIMER_RC_RC 0
89#define TIMER_SR 0x00000020
90#define TIMER_SR_CLKSTA 16
91#define TIMER_SR_COVFS 0
92#define TIMER_SR_CPAS 2
93#define TIMER_SR_CPBS 3
94#define TIMER_SR_CPCS 4
95#define TIMER_SR_ETRGS 7
96#define TIMER_SR_LDRAS 5
97#define TIMER_SR_LDRBS 6
98#define TIMER_SR_LOVRS 1
99#define TIMER_SR_MTIOA 17
100#define TIMER_SR_MTIOB 18
101
102/* Bit manipulation macros */
103#define TIMER_BIT(name) (1 << TIMER_##name)
104#define TIMER_BF(name,value) ((value) << TIMER_##name)
105
106/* Register access macros */
107#define timer_read(port,instance,reg) \
108 __raw_readl(port + (0x40 * instance) + TIMER_##reg)
109#define timer_write(port,instance,reg,value) \
110 __raw_writel((value), port + (0x40 * instance) + TIMER_##reg)
111
112#endif /* _ASM_AVR32_ARCH_AT32AP_TIME_H */
diff --git a/include/asm-avr32/atomic.h b/include/asm-avr32/atomic.h
index c40b6032c480..b9c2548a52f3 100644
--- a/include/asm-avr32/atomic.h
+++ b/include/asm-avr32/atomic.h
@@ -173,7 +173,7 @@ static inline int atomic_sub_if_positive(int i, atomic_t *v)
173} 173}
174 174
175#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 175#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
176#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 176#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
177 177
178#define atomic_sub(i, v) (void)atomic_sub_return(i, v) 178#define atomic_sub(i, v) (void)atomic_sub_return(i, v)
179#define atomic_add(i, v) (void)atomic_add_return(i, v) 179#define atomic_add(i, v) (void)atomic_add_return(i, v)
diff --git a/include/asm-avr32/bug.h b/include/asm-avr32/bug.h
index 521766bc9366..afdcd79a2966 100644
--- a/include/asm-avr32/bug.h
+++ b/include/asm-avr32/bug.h
@@ -18,27 +18,53 @@
18 18
19#ifdef CONFIG_DEBUG_BUGVERBOSE 19#ifdef CONFIG_DEBUG_BUGVERBOSE
20 20
21#define BUG() \ 21#define _BUG_OR_WARN(flags) \
22 do { \ 22 asm volatile( \
23 asm volatile(".hword %0\n\t" \ 23 "1: .hword %0\n" \
24 ".hword %1\n\t" \ 24 " .section __bug_table,\"a\",@progbits\n" \
25 ".long %2" \ 25 "2: .long 1b\n" \
26 : \ 26 " .long %1\n" \
27 : "n"(AVR32_BUG_OPCODE), \ 27 " .short %2\n" \
28 "i"(__LINE__), "X"(__FILE__)); \ 28 " .short %3\n" \
29 } while (0) 29 " .org 2b + %4\n" \
30 " .previous" \
31 : \
32 : "i"(AVR32_BUG_OPCODE), "i"(__FILE__), \
33 "i"(__LINE__), "i"(flags), \
34 "i"(sizeof(struct bug_entry)))
30 35
31#else 36#else
32 37
38#define _BUG_OR_WARN(flags) \
39 asm volatile( \
40 "1: .hword %0\n" \
41 " .section __bug_table,\"a\",@progbits\n" \
42 "2: .long 1b\n" \
43 " .short %1\n" \
44 " .org 2b + %2\n" \
45 " .previous" \
46 : \
47 : "i"(AVR32_BUG_OPCODE), "i"(flags), \
48 "i"(sizeof(struct bug_entry)))
49
50#endif /* CONFIG_DEBUG_BUGVERBOSE */
51
33#define BUG() \ 52#define BUG() \
34 do { \ 53 do { \
35 asm volatile(".hword %0\n\t" \ 54 _BUG_OR_WARN(0); \
36 : : "n"(AVR32_BUG_OPCODE)); \ 55 for (;;); \
37 } while (0) 56 } while (0)
38 57
39#endif /* CONFIG_DEBUG_BUGVERBOSE */ 58#define WARN_ON(condition) \
59 ({ \
60 typeof(condition) __ret_warn_on = (condition); \
61 if (unlikely(__ret_warn_on)) \
62 _BUG_OR_WARN(BUGFLAG_WARNING); \
63 unlikely(__ret_warn_on); \
64 })
40 65
41#define HAVE_ARCH_BUG 66#define HAVE_ARCH_BUG
67#define HAVE_ARCH_WARN_ON
42 68
43#endif /* CONFIG_BUG */ 69#endif /* CONFIG_BUG */
44 70
diff --git a/include/asm-avr32/io.h b/include/asm-avr32/io.h
index c08e81048393..e30d4b3bd836 100644
--- a/include/asm-avr32/io.h
+++ b/include/asm-avr32/io.h
@@ -1,13 +1,15 @@
1#ifndef __ASM_AVR32_IO_H 1#ifndef __ASM_AVR32_IO_H
2#define __ASM_AVR32_IO_H 2#define __ASM_AVR32_IO_H
3 3
4#include <linux/kernel.h>
4#include <linux/string.h> 5#include <linux/string.h>
5 6#include <linux/types.h>
6#ifdef __KERNEL__
7 7
8#include <asm/addrspace.h> 8#include <asm/addrspace.h>
9#include <asm/byteorder.h> 9#include <asm/byteorder.h>
10 10
11#include <asm/arch/io.h>
12
11/* virt_to_phys will only work when address is in P1 or P2 */ 13/* virt_to_phys will only work when address is in P1 or P2 */
12static __inline__ unsigned long virt_to_phys(volatile void *address) 14static __inline__ unsigned long virt_to_phys(volatile void *address)
13{ 15{
@@ -36,104 +38,215 @@ extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen);
36extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen); 38extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
37extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); 39extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
38 40
39static inline void writeb(unsigned char b, volatile void __iomem *addr) 41static inline void __raw_writeb(u8 v, volatile void __iomem *addr)
40{ 42{
41 *(volatile unsigned char __force *)addr = b; 43 *(volatile u8 __force *)addr = v;
42} 44}
43static inline void writew(unsigned short b, volatile void __iomem *addr) 45static inline void __raw_writew(u16 v, volatile void __iomem *addr)
44{ 46{
45 *(volatile unsigned short __force *)addr = b; 47 *(volatile u16 __force *)addr = v;
46} 48}
47static inline void writel(unsigned int b, volatile void __iomem *addr) 49static inline void __raw_writel(u32 v, volatile void __iomem *addr)
48{ 50{
49 *(volatile unsigned int __force *)addr = b; 51 *(volatile u32 __force *)addr = v;
50} 52}
51#define __raw_writeb writeb
52#define __raw_writew writew
53#define __raw_writel writel
54 53
55static inline unsigned char readb(const volatile void __iomem *addr) 54static inline u8 __raw_readb(const volatile void __iomem *addr)
56{ 55{
57 return *(const volatile unsigned char __force *)addr; 56 return *(const volatile u8 __force *)addr;
58} 57}
59static inline unsigned short readw(const volatile void __iomem *addr) 58static inline u16 __raw_readw(const volatile void __iomem *addr)
60{ 59{
61 return *(const volatile unsigned short __force *)addr; 60 return *(const volatile u16 __force *)addr;
62} 61}
63static inline unsigned int readl(const volatile void __iomem *addr) 62static inline u32 __raw_readl(const volatile void __iomem *addr)
64{ 63{
65 return *(const volatile unsigned int __force *)addr; 64 return *(const volatile u32 __force *)addr;
65}
66
67/* Convert I/O port address to virtual address */
68#ifndef __io
69# define __io(p) ((void *)phys_to_uncached(p))
70#endif
71
72/*
73 * Not really sure about the best way to slow down I/O on
74 * AVR32. Defining it as a no-op until we have an actual test case.
75 */
76#define SLOW_DOWN_IO do { } while (0)
77
78#define __BUILD_MEMORY_SINGLE(pfx, bwl, type) \
79static inline void \
80pfx##write##bwl(type val, volatile void __iomem *addr) \
81{ \
82 volatile type *__addr; \
83 type __val; \
84 \
85 __addr = (void *)__swizzle_addr_##bwl((unsigned long)(addr)); \
86 __val = pfx##ioswab##bwl(__addr, val); \
87 \
88 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
89 \
90 *__addr = __val; \
91} \
92 \
93static inline type pfx##read##bwl(const volatile void __iomem *addr) \
94{ \
95 volatile type *__addr; \
96 type __val; \
97 \
98 __addr = (void *)__swizzle_addr_##bwl((unsigned long)(addr)); \
99 \
100 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
101 \
102 __val = *__addr; \
103 return pfx##ioswab##bwl(__addr, __val); \
104}
105
106#define __BUILD_IOPORT_SINGLE(pfx, bwl, type, p, slow) \
107static inline void pfx##out##bwl##p(type val, unsigned long port) \
108{ \
109 volatile type *__addr; \
110 type __val; \
111 \
112 __addr = __io(__swizzle_addr_##bwl(port)); \
113 __val = pfx##ioswab##bwl(__addr, val); \
114 \
115 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
116 \
117 *__addr = __val; \
118 slow; \
119} \
120 \
121static inline type pfx##in##bwl##p(unsigned long port) \
122{ \
123 volatile type *__addr; \
124 type __val; \
125 \
126 __addr = __io(__swizzle_addr_##bwl(port)); \
127 \
128 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
129 \
130 __val = *__addr; \
131 slow; \
132 \
133 return pfx##ioswab##bwl(__addr, __val); \
134}
135
136#define __BUILD_MEMORY_PFX(bus, bwl, type) \
137 __BUILD_MEMORY_SINGLE(bus, bwl, type)
138
139#define BUILDIO_MEM(bwl, type) \
140 __BUILD_MEMORY_PFX(, bwl, type) \
141 __BUILD_MEMORY_PFX(__mem_, bwl, type)
142
143#define __BUILD_IOPORT_PFX(bus, bwl, type) \
144 __BUILD_IOPORT_SINGLE(bus, bwl, type, ,) \
145 __BUILD_IOPORT_SINGLE(bus, bwl, type, _p, SLOW_DOWN_IO)
146
147#define BUILDIO_IOPORT(bwl, type) \
148 __BUILD_IOPORT_PFX(, bwl, type) \
149 __BUILD_IOPORT_PFX(__mem_, bwl, type)
150
151BUILDIO_MEM(b, u8)
152BUILDIO_MEM(w, u16)
153BUILDIO_MEM(l, u32)
154
155BUILDIO_IOPORT(b, u8)
156BUILDIO_IOPORT(w, u16)
157BUILDIO_IOPORT(l, u32)
158
159#define readb_relaxed readb
160#define readw_relaxed readw
161#define readl_relaxed readl
162
163#define __BUILD_MEMORY_STRING(bwl, type) \
164static inline void writes##bwl(volatile void __iomem *addr, \
165 const void *data, unsigned int count) \
166{ \
167 const type *__data = data; \
168 \
169 while (count--) \
170 __mem_write##bwl(*__data++, addr); \
171} \
172 \
173static inline void reads##bwl(const volatile void __iomem *addr, \
174 void *data, unsigned int count) \
175{ \
176 type *__data = data; \
177 \
178 while (count--) \
179 *__data++ = __mem_read##bwl(addr); \
66} 180}
67#define __raw_readb readb
68#define __raw_readw readw
69#define __raw_readl readl
70 181
71#define writesb(p, d, l) __raw_writesb((unsigned int)p, d, l) 182#define __BUILD_IOPORT_STRING(bwl, type) \
72#define writesw(p, d, l) __raw_writesw((unsigned int)p, d, l) 183static inline void outs##bwl(unsigned long port, const void *data, \
73#define writesl(p, d, l) __raw_writesl((unsigned int)p, d, l) 184 unsigned int count) \
185{ \
186 const type *__data = data; \
187 \
188 while (count--) \
189 __mem_out##bwl(*__data++, port); \
190} \
191 \
192static inline void ins##bwl(unsigned long port, void *data, \
193 unsigned int count) \
194{ \
195 type *__data = data; \
196 \
197 while (count--) \
198 *__data++ = __mem_in##bwl(port); \
199}
74 200
75#define readsb(p, d, l) __raw_readsb((unsigned int)p, d, l) 201#define BUILDSTRING(bwl, type) \
76#define readsw(p, d, l) __raw_readsw((unsigned int)p, d, l) 202 __BUILD_MEMORY_STRING(bwl, type) \
77#define readsl(p, d, l) __raw_readsl((unsigned int)p, d, l) 203 __BUILD_IOPORT_STRING(bwl, type)
78 204
205BUILDSTRING(b, u8)
206BUILDSTRING(w, u16)
207BUILDSTRING(l, u32)
79 208
80/* 209/*
81 * io{read,write}{8,16,32} macros in both le (for PCI style consumers) and native be 210 * io{read,write}{8,16,32} macros in both le (for PCI style consumers) and native be
82 */ 211 */
83#ifndef ioread8 212#ifndef ioread8
84 213
85#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __v; }) 214#define ioread8(p) ((unsigned int)readb(p))
86 215
87#define ioread16(p) ({ unsigned int __v = le16_to_cpu(__raw_readw(p)); __v; }) 216#define ioread16(p) ((unsigned int)readw(p))
88#define ioread16be(p) ({ unsigned int __v = be16_to_cpu(__raw_readw(p)); __v; }) 217#define ioread16be(p) ((unsigned int)__raw_readw(p))
89 218
90#define ioread32(p) ({ unsigned int __v = le32_to_cpu(__raw_readl(p)); __v; }) 219#define ioread32(p) ((unsigned int)readl(p))
91#define ioread32be(p) ({ unsigned int __v = be32_to_cpu(__raw_readl(p)); __v; }) 220#define ioread32be(p) ((unsigned int)__raw_readl(p))
92 221
93#define iowrite8(v,p) __raw_writeb(v, p) 222#define iowrite8(v,p) writeb(v, p)
94 223
95#define iowrite16(v,p) __raw_writew(cpu_to_le16(v), p) 224#define iowrite16(v,p) writew(v, p)
96#define iowrite16be(v,p) __raw_writew(cpu_to_be16(v), p) 225#define iowrite16be(v,p) __raw_writew(v, p)
97 226
98#define iowrite32(v,p) __raw_writel(cpu_to_le32(v), p) 227#define iowrite32(v,p) writel(v, p)
99#define iowrite32be(v,p) __raw_writel(cpu_to_be32(v), p) 228#define iowrite32be(v,p) __raw_writel(v, p)
100 229
101#define ioread8_rep(p,d,c) __raw_readsb(p,d,c) 230#define ioread8_rep(p,d,c) readsb(p,d,c)
102#define ioread16_rep(p,d,c) __raw_readsw(p,d,c) 231#define ioread16_rep(p,d,c) readsw(p,d,c)
103#define ioread32_rep(p,d,c) __raw_readsl(p,d,c) 232#define ioread32_rep(p,d,c) readsl(p,d,c)
104 233
105#define iowrite8_rep(p,s,c) __raw_writesb(p,s,c) 234#define iowrite8_rep(p,s,c) writesb(p,s,c)
106#define iowrite16_rep(p,s,c) __raw_writesw(p,s,c) 235#define iowrite16_rep(p,s,c) writesw(p,s,c)
107#define iowrite32_rep(p,s,c) __raw_writesl(p,s,c) 236#define iowrite32_rep(p,s,c) writesl(p,s,c)
108 237
109#endif 238#endif
110 239
111
112/*
113 * These two are only here because ALSA _thinks_ it needs them...
114 */
115static inline void memcpy_fromio(void * to, const volatile void __iomem *from, 240static inline void memcpy_fromio(void * to, const volatile void __iomem *from,
116 unsigned long count) 241 unsigned long count)
117{ 242{
118 char *p = to; 243 memcpy(to, (const void __force *)from, count);
119 while (count) {
120 count--;
121 *p = readb(from);
122 p++;
123 from++;
124 }
125} 244}
126 245
127static inline void memcpy_toio(volatile void __iomem *to, const void * from, 246static inline void memcpy_toio(volatile void __iomem *to, const void * from,
128 unsigned long count) 247 unsigned long count)
129{ 248{
130 const char *p = from; 249 memcpy((void __force *)to, from, count);
131 while (count) {
132 count--;
133 writeb(*p, to);
134 p++;
135 to++;
136 }
137} 250}
138 251
139static inline void memset_io(volatile void __iomem *addr, unsigned char val, 252static inline void memset_io(volatile void __iomem *addr, unsigned char val,
@@ -142,99 +255,8 @@ static inline void memset_io(volatile void __iomem *addr, unsigned char val,
142 memset((void __force *)addr, val, count); 255 memset((void __force *)addr, val, count);
143} 256}
144 257
145/*
146 * Bad read/write accesses...
147 */
148extern void __readwrite_bug(const char *fn);
149
150#define IO_SPACE_LIMIT 0xffffffff 258#define IO_SPACE_LIMIT 0xffffffff
151 259
152/* Convert I/O port address to virtual address */
153#define __io(p) ((void __iomem *)phys_to_uncached(p))
154
155/*
156 * IO port access primitives
157 * -------------------------
158 *
159 * The AVR32 doesn't have special IO access instructions; all IO is memory
160 * mapped. Note that these are defined to perform little endian accesses
161 * only. Their primary purpose is to access PCI and ISA peripherals.
162 *
163 * Note that for a big endian machine, this implies that the following
164 * big endian mode connectivity is in place.
165 *
166 * The machine specific io.h include defines __io to translate an "IO"
167 * address to a memory address.
168 *
169 * Note that we prevent GCC re-ordering or caching values in expressions
170 * by introducing sequence points into the in*() definitions. Note that
171 * __raw_* do not guarantee this behaviour.
172 *
173 * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space.
174 */
175#define outb(v, p) __raw_writeb(v, __io(p))
176#define outw(v, p) __raw_writew(cpu_to_le16(v), __io(p))
177#define outl(v, p) __raw_writel(cpu_to_le32(v), __io(p))
178
179#define inb(p) __raw_readb(__io(p))
180#define inw(p) le16_to_cpu(__raw_readw(__io(p)))
181#define inl(p) le32_to_cpu(__raw_readl(__io(p)))
182
183static inline void __outsb(unsigned long port, void *addr, unsigned int count)
184{
185 while (count--) {
186 outb(*(u8 *)addr, port);
187 addr++;
188 }
189}
190
191static inline void __insb(unsigned long port, void *addr, unsigned int count)
192{
193 while (count--) {
194 *(u8 *)addr = inb(port);
195 addr++;
196 }
197}
198
199static inline void __outsw(unsigned long port, void *addr, unsigned int count)
200{
201 while (count--) {
202 outw(*(u16 *)addr, port);
203 addr += 2;
204 }
205}
206
207static inline void __insw(unsigned long port, void *addr, unsigned int count)
208{
209 while (count--) {
210 *(u16 *)addr = inw(port);
211 addr += 2;
212 }
213}
214
215static inline void __outsl(unsigned long port, void *addr, unsigned int count)
216{
217 while (count--) {
218 outl(*(u32 *)addr, port);
219 addr += 4;
220 }
221}
222
223static inline void __insl(unsigned long port, void *addr, unsigned int count)
224{
225 while (count--) {
226 *(u32 *)addr = inl(port);
227 addr += 4;
228 }
229}
230
231#define outsb(port, addr, count) __outsb(port, addr, count)
232#define insb(port, addr, count) __insb(port, addr, count)
233#define outsw(port, addr, count) __outsw(port, addr, count)
234#define insw(port, addr, count) __insw(port, addr, count)
235#define outsl(port, addr, count) __outsl(port, addr, count)
236#define insl(port, addr, count) __insl(port, addr, count)
237
238extern void __iomem *__ioremap(unsigned long offset, size_t size, 260extern void __iomem *__ioremap(unsigned long offset, size_t size,
239 unsigned long flags); 261 unsigned long flags);
240extern void __iounmap(void __iomem *addr); 262extern void __iounmap(void __iomem *addr);
@@ -292,6 +314,4 @@ extern void __iounmap(void __iomem *addr);
292 */ 314 */
293#define xlate_dev_kmem_ptr(p) p 315#define xlate_dev_kmem_ptr(p) p
294 316
295#endif /* __KERNEL__ */
296
297#endif /* __ASM_AVR32_IO_H */ 317#endif /* __ASM_AVR32_IO_H */
diff --git a/include/asm-avr32/processor.h b/include/asm-avr32/processor.h
index f6913778a45f..6a64833756a6 100644
--- a/include/asm-avr32/processor.h
+++ b/include/asm-avr32/processor.h
@@ -40,6 +40,14 @@ enum tlb_config {
40 TLB_INVALID 40 TLB_INVALID
41}; 41};
42 42
43#define AVR32_FEATURE_RMW (1 << 0)
44#define AVR32_FEATURE_DSP (1 << 1)
45#define AVR32_FEATURE_SIMD (1 << 2)
46#define AVR32_FEATURE_OCD (1 << 3)
47#define AVR32_FEATURE_PCTR (1 << 4)
48#define AVR32_FEATURE_JAVA (1 << 5)
49#define AVR32_FEATURE_FPU (1 << 6)
50
43struct avr32_cpuinfo { 51struct avr32_cpuinfo {
44 struct clk *clk; 52 struct clk *clk;
45 unsigned long loops_per_jiffy; 53 unsigned long loops_per_jiffy;
@@ -48,6 +56,7 @@ struct avr32_cpuinfo {
48 unsigned short arch_revision; 56 unsigned short arch_revision;
49 unsigned short cpu_revision; 57 unsigned short cpu_revision;
50 enum tlb_config tlb_config; 58 enum tlb_config tlb_config;
59 unsigned long features;
51 60
52 struct cache_info icache; 61 struct cache_info icache;
53 struct cache_info dcache; 62 struct cache_info dcache;
@@ -125,10 +134,10 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
125#define thread_saved_pc(tsk) ((tsk)->thread.cpu_context.pc) 134#define thread_saved_pc(tsk) ((tsk)->thread.cpu_context.pc)
126 135
127struct pt_regs; 136struct pt_regs;
128void show_trace(struct task_struct *task, unsigned long *stack,
129 struct pt_regs *regs);
130
131extern unsigned long get_wchan(struct task_struct *p); 137extern unsigned long get_wchan(struct task_struct *p);
138extern void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl);
139extern void show_stack_log_lvl(struct task_struct *tsk, unsigned long sp,
140 struct pt_regs *regs, const char *log_lvl);
132 141
133#define KSTK_EIP(tsk) ((tsk)->thread.cpu_context.pc) 142#define KSTK_EIP(tsk) ((tsk)->thread.cpu_context.pc)
134#define KSTK_ESP(tsk) ((tsk)->thread.cpu_context.ksp) 143#define KSTK_ESP(tsk) ((tsk)->thread.cpu_context.ksp)
diff --git a/include/asm-avr32/setup.h b/include/asm-avr32/setup.h
index 0a5224245e44..1ff1a217015d 100644
--- a/include/asm-avr32/setup.h
+++ b/include/asm-avr32/setup.h
@@ -124,19 +124,12 @@ struct tagtable {
124#define for_each_tag(t,base) \ 124#define for_each_tag(t,base) \
125 for (t = base; t->hdr.size; t = tag_next(t)) 125 for (t = base; t->hdr.size; t = tag_next(t))
126 126
127extern struct tag_mem_range *mem_phys;
128extern struct tag_mem_range *mem_reserved;
129extern struct tag_mem_range *mem_ramdisk;
130
131extern struct tag *bootloader_tags; 127extern struct tag *bootloader_tags;
132 128
133extern void setup_bootmem(void); 129extern resource_size_t fbmem_start;
134extern void setup_processor(void); 130extern resource_size_t fbmem_size;
135extern void board_setup_fbmem(unsigned long fbmem_start,
136 unsigned long fbmem_size);
137 131
138/* Chip-specific hook to enable the use of SDRAM */ 132void setup_processor(void);
139void chip_enable_sdram(void);
140 133
141#endif /* !__ASSEMBLY__ */ 134#endif /* !__ASSEMBLY__ */
142 135
diff --git a/include/asm-avr32/sysreg.h b/include/asm-avr32/sysreg.h
index f91975f330f6..c02bc8304b13 100644
--- a/include/asm-avr32/sysreg.h
+++ b/include/asm-avr32/sysreg.h
@@ -7,326 +7,281 @@
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#ifndef __ASM_AVR32_SYSREG_H__ 10#ifndef __ASM_AVR32_SYSREG_H
11#define __ASM_AVR32_SYSREG_H__ 11#define __ASM_AVR32_SYSREG_H
12 12
13/* sysreg register offsets */ 13/* sysreg register offsets */
14#define SYSREG_SR 0x0000 14#define SYSREG_SR 0x0000
15#define SYSREG_EVBA 0x0004 15#define SYSREG_EVBA 0x0004
16#define SYSREG_ACBA 0x0008 16#define SYSREG_ACBA 0x0008
17#define SYSREG_CPUCR 0x000c 17#define SYSREG_CPUCR 0x000c
18#define SYSREG_ECR 0x0010 18#define SYSREG_ECR 0x0010
19#define SYSREG_RSR_SUP 0x0014 19#define SYSREG_RSR_SUP 0x0014
20#define SYSREG_RSR_INT0 0x0018 20#define SYSREG_RSR_INT0 0x0018
21#define SYSREG_RSR_INT1 0x001c 21#define SYSREG_RSR_INT1 0x001c
22#define SYSREG_RSR_INT2 0x0020 22#define SYSREG_RSR_INT2 0x0020
23#define SYSREG_RSR_INT3 0x0024 23#define SYSREG_RSR_INT3 0x0024
24#define SYSREG_RSR_EX 0x0028 24#define SYSREG_RSR_EX 0x0028
25#define SYSREG_RSR_NMI 0x002c 25#define SYSREG_RSR_NMI 0x002c
26#define SYSREG_RSR_DBG 0x0030 26#define SYSREG_RSR_DBG 0x0030
27#define SYSREG_RAR_SUP 0x0034 27#define SYSREG_RAR_SUP 0x0034
28#define SYSREG_RAR_INT0 0x0038 28#define SYSREG_RAR_INT0 0x0038
29#define SYSREG_RAR_INT1 0x003c 29#define SYSREG_RAR_INT1 0x003c
30#define SYSREG_RAR_INT2 0x0040 30#define SYSREG_RAR_INT2 0x0040
31#define SYSREG_RAR_INT3 0x0044 31#define SYSREG_RAR_INT3 0x0044
32#define SYSREG_RAR_EX 0x0048 32#define SYSREG_RAR_EX 0x0048
33#define SYSREG_RAR_NMI 0x004c 33#define SYSREG_RAR_NMI 0x004c
34#define SYSREG_RAR_DBG 0x0050 34#define SYSREG_RAR_DBG 0x0050
35#define SYSREG_JECR 0x0054 35#define SYSREG_JECR 0x0054
36#define SYSREG_JOSP 0x0058 36#define SYSREG_JOSP 0x0058
37#define SYSREG_JAVA_LV0 0x005c 37#define SYSREG_JAVA_LV0 0x005c
38#define SYSREG_JAVA_LV1 0x0060 38#define SYSREG_JAVA_LV1 0x0060
39#define SYSREG_JAVA_LV2 0x0064 39#define SYSREG_JAVA_LV2 0x0064
40#define SYSREG_JAVA_LV3 0x0068 40#define SYSREG_JAVA_LV3 0x0068
41#define SYSREG_JAVA_LV4 0x006c 41#define SYSREG_JAVA_LV4 0x006c
42#define SYSREG_JAVA_LV5 0x0070 42#define SYSREG_JAVA_LV5 0x0070
43#define SYSREG_JAVA_LV6 0x0074 43#define SYSREG_JAVA_LV6 0x0074
44#define SYSREG_JAVA_LV7 0x0078 44#define SYSREG_JAVA_LV7 0x0078
45#define SYSREG_JTBA 0x007c 45#define SYSREG_JTBA 0x007c
46#define SYSREG_JBCR 0x0080 46#define SYSREG_JBCR 0x0080
47#define SYSREG_CONFIG0 0x0100 47#define SYSREG_CONFIG0 0x0100
48#define SYSREG_CONFIG1 0x0104 48#define SYSREG_CONFIG1 0x0104
49#define SYSREG_COUNT 0x0108 49#define SYSREG_COUNT 0x0108
50#define SYSREG_COMPARE 0x010c 50#define SYSREG_COMPARE 0x010c
51#define SYSREG_TLBEHI 0x0110 51#define SYSREG_TLBEHI 0x0110
52#define SYSREG_TLBELO 0x0114 52#define SYSREG_TLBELO 0x0114
53#define SYSREG_PTBR 0x0118 53#define SYSREG_PTBR 0x0118
54#define SYSREG_TLBEAR 0x011c 54#define SYSREG_TLBEAR 0x011c
55#define SYSREG_MMUCR 0x0120 55#define SYSREG_MMUCR 0x0120
56#define SYSREG_TLBARLO 0x0124 56#define SYSREG_TLBARLO 0x0124
57#define SYSREG_TLBARHI 0x0128 57#define SYSREG_TLBARHI 0x0128
58#define SYSREG_PCCNT 0x012c 58#define SYSREG_PCCNT 0x012c
59#define SYSREG_PCNT0 0x0130 59#define SYSREG_PCNT0 0x0130
60#define SYSREG_PCNT1 0x0134 60#define SYSREG_PCNT1 0x0134
61#define SYSREG_PCCR 0x0138 61#define SYSREG_PCCR 0x0138
62#define SYSREG_BEAR 0x013c 62#define SYSREG_BEAR 0x013c
63#define SYSREG_SABAL 0x0300
64#define SYSREG_SABAH 0x0304
65#define SYSREG_SABD 0x0308
63 66
64/* Bitfields in SR */ 67/* Bitfields in SR */
65#define SYSREG_SR_C_OFFSET 0 68#define SYSREG_SR_C_OFFSET 0
66#define SYSREG_SR_C_SIZE 1 69#define SYSREG_SR_C_SIZE 1
67#define SYSREG_Z_OFFSET 1 70#define SYSREG_Z_OFFSET 1
68#define SYSREG_Z_SIZE 1 71#define SYSREG_Z_SIZE 1
69#define SYSREG_SR_N_OFFSET 2 72#define SYSREG_SR_N_OFFSET 2
70#define SYSREG_SR_N_SIZE 1 73#define SYSREG_SR_N_SIZE 1
71#define SYSREG_SR_V_OFFSET 3 74#define SYSREG_SR_V_OFFSET 3
72#define SYSREG_SR_V_SIZE 1 75#define SYSREG_SR_V_SIZE 1
73#define SYSREG_Q_OFFSET 4 76#define SYSREG_Q_OFFSET 4
74#define SYSREG_Q_SIZE 1 77#define SYSREG_Q_SIZE 1
75#define SYSREG_GM_OFFSET 16 78#define SYSREG_L_OFFSET 5
76#define SYSREG_GM_SIZE 1 79#define SYSREG_L_SIZE 1
77#define SYSREG_I0M_OFFSET 17 80#define SYSREG_T_OFFSET 14
78#define SYSREG_I0M_SIZE 1 81#define SYSREG_T_SIZE 1
79#define SYSREG_I1M_OFFSET 18 82#define SYSREG_SR_R_OFFSET 15
80#define SYSREG_I1M_SIZE 1 83#define SYSREG_SR_R_SIZE 1
81#define SYSREG_I2M_OFFSET 19 84#define SYSREG_GM_OFFSET 16
82#define SYSREG_I2M_SIZE 1 85#define SYSREG_GM_SIZE 1
83#define SYSREG_I3M_OFFSET 20 86#define SYSREG_I0M_OFFSET 17
84#define SYSREG_I3M_SIZE 1 87#define SYSREG_I0M_SIZE 1
85#define SYSREG_EM_OFFSET 21 88#define SYSREG_I1M_OFFSET 18
86#define SYSREG_EM_SIZE 1 89#define SYSREG_I1M_SIZE 1
87#define SYSREG_M0_OFFSET 22 90#define SYSREG_I2M_OFFSET 19
88#define SYSREG_M0_SIZE 1 91#define SYSREG_I2M_SIZE 1
89#define SYSREG_M1_OFFSET 23 92#define SYSREG_I3M_OFFSET 20
90#define SYSREG_M1_SIZE 1 93#define SYSREG_I3M_SIZE 1
91#define SYSREG_M2_OFFSET 24 94#define SYSREG_EM_OFFSET 21
92#define SYSREG_M2_SIZE 1 95#define SYSREG_EM_SIZE 1
93#define SYSREG_SR_D_OFFSET 26 96#define SYSREG_M0_OFFSET 22
94#define SYSREG_SR_D_SIZE 1 97#define SYSREG_M0_SIZE 1
95#define SYSREG_DM_OFFSET 27 98#define SYSREG_M1_OFFSET 23
96#define SYSREG_DM_SIZE 1 99#define SYSREG_M1_SIZE 1
97#define SYSREG_SR_J_OFFSET 28 100#define SYSREG_M2_OFFSET 24
98#define SYSREG_SR_J_SIZE 1 101#define SYSREG_M2_SIZE 1
99#define SYSREG_R_OFFSET 29 102#define SYSREG_SR_D_OFFSET 26
100#define SYSREG_R_SIZE 1 103#define SYSREG_SR_D_SIZE 1
101#define SYSREG_H_OFFSET 30 104#define SYSREG_DM_OFFSET 27
102#define SYSREG_H_SIZE 1 105#define SYSREG_DM_SIZE 1
103 106#define SYSREG_SR_J_OFFSET 28
104/* Bitfields in EVBA */ 107#define SYSREG_SR_J_SIZE 1
105 108#define SYSREG_H_OFFSET 29
106/* Bitfields in ACBA */ 109#define SYSREG_H_SIZE 1
107 110
108/* Bitfields in CPUCR */ 111/* Bitfields in CPUCR */
109#define SYSREG_BI_OFFSET 0 112#define SYSREG_BI_OFFSET 0
110#define SYSREG_BI_SIZE 1 113#define SYSREG_BI_SIZE 1
111#define SYSREG_BE_OFFSET 1 114#define SYSREG_BE_OFFSET 1
112#define SYSREG_BE_SIZE 1 115#define SYSREG_BE_SIZE 1
113#define SYSREG_FE_OFFSET 2 116#define SYSREG_FE_OFFSET 2
114#define SYSREG_FE_SIZE 1 117#define SYSREG_FE_SIZE 1
115#define SYSREG_RE_OFFSET 3 118#define SYSREG_RE_OFFSET 3
116#define SYSREG_RE_SIZE 1 119#define SYSREG_RE_SIZE 1
117#define SYSREG_IBE_OFFSET 4 120#define SYSREG_IBE_OFFSET 4
118#define SYSREG_IBE_SIZE 1 121#define SYSREG_IBE_SIZE 1
119#define SYSREG_IEE_OFFSET 5 122#define SYSREG_IEE_OFFSET 5
120#define SYSREG_IEE_SIZE 1 123#define SYSREG_IEE_SIZE 1
121
122/* Bitfields in ECR */
123#define SYSREG_ECR_OFFSET 0
124#define SYSREG_ECR_SIZE 32
125
126/* Bitfields in RSR_SUP */
127
128/* Bitfields in RSR_INT0 */
129
130/* Bitfields in RSR_INT1 */
131
132/* Bitfields in RSR_INT2 */
133
134/* Bitfields in RSR_INT3 */
135
136/* Bitfields in RSR_EX */
137
138/* Bitfields in RSR_NMI */
139
140/* Bitfields in RSR_DBG */
141
142/* Bitfields in RAR_SUP */
143
144/* Bitfields in RAR_INT0 */
145
146/* Bitfields in RAR_INT1 */
147
148/* Bitfields in RAR_INT2 */
149
150/* Bitfields in RAR_INT3 */
151
152/* Bitfields in RAR_EX */
153
154/* Bitfields in RAR_NMI */
155
156/* Bitfields in RAR_DBG */
157
158/* Bitfields in JECR */
159
160/* Bitfields in JOSP */
161
162/* Bitfields in JAVA_LV0 */
163
164/* Bitfields in JAVA_LV1 */
165
166/* Bitfields in JAVA_LV2 */
167
168/* Bitfields in JAVA_LV3 */
169
170/* Bitfields in JAVA_LV4 */
171
172/* Bitfields in JAVA_LV5 */
173
174/* Bitfields in JAVA_LV6 */
175
176/* Bitfields in JAVA_LV7 */
177
178/* Bitfields in JTBA */
179
180/* Bitfields in JBCR */
181 124
182/* Bitfields in CONFIG0 */ 125/* Bitfields in CONFIG0 */
183#define SYSREG_CONFIG0_D_OFFSET 1 126#define SYSREG_CONFIG0_R_OFFSET 0
184#define SYSREG_CONFIG0_D_SIZE 1 127#define SYSREG_CONFIG0_R_SIZE 1
185#define SYSREG_CONFIG0_S_OFFSET 2 128#define SYSREG_CONFIG0_D_OFFSET 1
186#define SYSREG_CONFIG0_S_SIZE 1 129#define SYSREG_CONFIG0_D_SIZE 1
187#define SYSREG_O_OFFSET 3 130#define SYSREG_CONFIG0_S_OFFSET 2
188#define SYSREG_O_SIZE 1 131#define SYSREG_CONFIG0_S_SIZE 1
189#define SYSREG_P_OFFSET 4 132#define SYSREG_CONFIG0_O_OFFSET 3
190#define SYSREG_P_SIZE 1 133#define SYSREG_CONFIG0_O_SIZE 1
191#define SYSREG_CONFIG0_J_OFFSET 5 134#define SYSREG_CONFIG0_P_OFFSET 4
192#define SYSREG_CONFIG0_J_SIZE 1 135#define SYSREG_CONFIG0_P_SIZE 1
193#define SYSREG_F_OFFSET 6 136#define SYSREG_CONFIG0_J_OFFSET 5
194#define SYSREG_F_SIZE 1 137#define SYSREG_CONFIG0_J_SIZE 1
195#define SYSREG_MMUT_OFFSET 7 138#define SYSREG_CONFIG0_F_OFFSET 6
196#define SYSREG_MMUT_SIZE 3 139#define SYSREG_CONFIG0_F_SIZE 1
197#define SYSREG_AR_OFFSET 10 140#define SYSREG_MMUT_OFFSET 7
198#define SYSREG_AR_SIZE 3 141#define SYSREG_MMUT_SIZE 3
199#define SYSREG_AT_OFFSET 13 142#define SYSREG_AR_OFFSET 10
200#define SYSREG_AT_SIZE 3 143#define SYSREG_AR_SIZE 3
201#define SYSREG_PROCESSORREVISION_OFFSET 16 144#define SYSREG_AT_OFFSET 13
202#define SYSREG_PROCESSORREVISION_SIZE 8 145#define SYSREG_AT_SIZE 3
203#define SYSREG_PROCESSORID_OFFSET 24 146#define SYSREG_PROCESSORREVISION_OFFSET 16
204#define SYSREG_PROCESSORID_SIZE 8 147#define SYSREG_PROCESSORREVISION_SIZE 8
148#define SYSREG_PROCESSORID_OFFSET 24
149#define SYSREG_PROCESSORID_SIZE 8
205 150
206/* Bitfields in CONFIG1 */ 151/* Bitfields in CONFIG1 */
207#define SYSREG_DASS_OFFSET 0 152#define SYSREG_DASS_OFFSET 0
208#define SYSREG_DASS_SIZE 3 153#define SYSREG_DASS_SIZE 3
209#define SYSREG_DLSZ_OFFSET 3 154#define SYSREG_DLSZ_OFFSET 3
210#define SYSREG_DLSZ_SIZE 3 155#define SYSREG_DLSZ_SIZE 3
211#define SYSREG_DSET_OFFSET 6 156#define SYSREG_DSET_OFFSET 6
212#define SYSREG_DSET_SIZE 4 157#define SYSREG_DSET_SIZE 4
213#define SYSREG_IASS_OFFSET 10 158#define SYSREG_IASS_OFFSET 10
214#define SYSREG_IASS_SIZE 2 159#define SYSREG_IASS_SIZE 3
215#define SYSREG_ILSZ_OFFSET 13 160#define SYSREG_ILSZ_OFFSET 13
216#define SYSREG_ILSZ_SIZE 3 161#define SYSREG_ILSZ_SIZE 3
217#define SYSREG_ISET_OFFSET 16 162#define SYSREG_ISET_OFFSET 16
218#define SYSREG_ISET_SIZE 4 163#define SYSREG_ISET_SIZE 4
219#define SYSREG_DMMUSZ_OFFSET 20 164#define SYSREG_DMMUSZ_OFFSET 20
220#define SYSREG_DMMUSZ_SIZE 6 165#define SYSREG_DMMUSZ_SIZE 6
221#define SYSREG_IMMUSZ_OFFSET 26 166#define SYSREG_IMMUSZ_OFFSET 26
222#define SYSREG_IMMUSZ_SIZE 6 167#define SYSREG_IMMUSZ_SIZE 6
223
224/* Bitfields in COUNT */
225
226/* Bitfields in COMPARE */
227 168
228/* Bitfields in TLBEHI */ 169/* Bitfields in TLBEHI */
229#define SYSREG_ASID_OFFSET 0 170#define SYSREG_ASID_OFFSET 0
230#define SYSREG_ASID_SIZE 8 171#define SYSREG_ASID_SIZE 8
231#define SYSREG_TLBEHI_I_OFFSET 8 172#define SYSREG_TLBEHI_I_OFFSET 8
232#define SYSREG_TLBEHI_I_SIZE 1 173#define SYSREG_TLBEHI_I_SIZE 1
233#define SYSREG_TLBEHI_V_OFFSET 9 174#define SYSREG_TLBEHI_V_OFFSET 9
234#define SYSREG_TLBEHI_V_SIZE 1 175#define SYSREG_TLBEHI_V_SIZE 1
235#define SYSREG_VPN_OFFSET 10 176#define SYSREG_VPN_OFFSET 10
236#define SYSREG_VPN_SIZE 22 177#define SYSREG_VPN_SIZE 22
237 178
238/* Bitfields in TLBELO */ 179/* Bitfields in TLBELO */
239#define SYSREG_W_OFFSET 0 180#define SYSREG_W_OFFSET 0
240#define SYSREG_W_SIZE 1 181#define SYSREG_W_SIZE 1
241#define SYSREG_TLBELO_D_OFFSET 1 182#define SYSREG_TLBELO_D_OFFSET 1
242#define SYSREG_TLBELO_D_SIZE 1 183#define SYSREG_TLBELO_D_SIZE 1
243#define SYSREG_SZ_OFFSET 2 184#define SYSREG_SZ_OFFSET 2
244#define SYSREG_SZ_SIZE 2 185#define SYSREG_SZ_SIZE 2
245#define SYSREG_AP_OFFSET 4 186#define SYSREG_AP_OFFSET 4
246#define SYSREG_AP_SIZE 3 187#define SYSREG_AP_SIZE 3
247#define SYSREG_B_OFFSET 7 188#define SYSREG_B_OFFSET 7
248#define SYSREG_B_SIZE 1 189#define SYSREG_B_SIZE 1
249#define SYSREG_G_OFFSET 8 190#define SYSREG_G_OFFSET 8
250#define SYSREG_G_SIZE 1 191#define SYSREG_G_SIZE 1
251#define SYSREG_TLBELO_C_OFFSET 9 192#define SYSREG_TLBELO_C_OFFSET 9
252#define SYSREG_TLBELO_C_SIZE 1 193#define SYSREG_TLBELO_C_SIZE 1
253#define SYSREG_PFN_OFFSET 10 194#define SYSREG_PFN_OFFSET 10
254#define SYSREG_PFN_SIZE 22 195#define SYSREG_PFN_SIZE 22
255
256/* Bitfields in PTBR */
257
258/* Bitfields in TLBEAR */
259 196
260/* Bitfields in MMUCR */ 197/* Bitfields in MMUCR */
261#define SYSREG_E_OFFSET 0 198#define SYSREG_E_OFFSET 0
262#define SYSREG_E_SIZE 1 199#define SYSREG_E_SIZE 1
263#define SYSREG_M_OFFSET 1 200#define SYSREG_M_OFFSET 1
264#define SYSREG_M_SIZE 1 201#define SYSREG_M_SIZE 1
265#define SYSREG_MMUCR_I_OFFSET 2 202#define SYSREG_MMUCR_I_OFFSET 2
266#define SYSREG_MMUCR_I_SIZE 1 203#define SYSREG_MMUCR_I_SIZE 1
267#define SYSREG_MMUCR_N_OFFSET 3 204#define SYSREG_MMUCR_N_OFFSET 3
268#define SYSREG_MMUCR_N_SIZE 1 205#define SYSREG_MMUCR_N_SIZE 1
269#define SYSREG_MMUCR_S_OFFSET 4 206#define SYSREG_MMUCR_S_OFFSET 4
270#define SYSREG_MMUCR_S_SIZE 1 207#define SYSREG_MMUCR_S_SIZE 1
271#define SYSREG_DLA_OFFSET 8 208#define SYSREG_DLA_OFFSET 8
272#define SYSREG_DLA_SIZE 6 209#define SYSREG_DLA_SIZE 6
273#define SYSREG_DRP_OFFSET 14 210#define SYSREG_DRP_OFFSET 14
274#define SYSREG_DRP_SIZE 6 211#define SYSREG_DRP_SIZE 6
275#define SYSREG_ILA_OFFSET 20 212#define SYSREG_ILA_OFFSET 20
276#define SYSREG_ILA_SIZE 6 213#define SYSREG_ILA_SIZE 6
277#define SYSREG_IRP_OFFSET 26 214#define SYSREG_IRP_OFFSET 26
278#define SYSREG_IRP_SIZE 6 215#define SYSREG_IRP_SIZE 6
279
280/* Bitfields in TLBARLO */
281
282/* Bitfields in TLBARHI */
283
284/* Bitfields in PCCNT */
285
286/* Bitfields in PCNT0 */
287
288/* Bitfields in PCNT1 */
289 216
290/* Bitfields in PCCR */ 217/* Bitfields in PCCR */
291 218#define SYSREG_PCCR_R_OFFSET 1
292/* Bitfields in BEAR */ 219#define SYSREG_PCCR_R_SIZE 1
220#define SYSREG_PCCR_C_OFFSET 2
221#define SYSREG_PCCR_C_SIZE 1
222#define SYSREG_PCCR_S_OFFSET 3
223#define SYSREG_PCCR_S_SIZE 1
224#define SYSREG_IEC_OFFSET 4
225#define SYSREG_IEC_SIZE 1
226#define SYSREG_IE0_OFFSET 5
227#define SYSREG_IE0_SIZE 1
228#define SYSREG_IE1_OFFSET 6
229#define SYSREG_IE1_SIZE 1
230#define SYSREG_FC_OFFSET 8
231#define SYSREG_FC_SIZE 1
232#define SYSREG_F0_OFFSET 9
233#define SYSREG_F0_SIZE 1
234#define SYSREG_F1_OFFSET 10
235#define SYSREG_F1_SIZE 1
236#define SYSREG_CONF0_OFFSET 12
237#define SYSREG_CONF0_SIZE 6
238#define SYSREG_CONF1_OFFSET 18
239#define SYSREG_CONF1_SIZE 6
293 240
294/* Constants for ECR */ 241/* Constants for ECR */
295#define ECR_UNRECOVERABLE 0 242#define ECR_UNRECOVERABLE 0
296#define ECR_TLB_MULTIPLE 1 243#define ECR_TLB_MULTIPLE 1
297#define ECR_BUS_ERROR_WRITE 2 244#define ECR_BUS_ERROR_WRITE 2
298#define ECR_BUS_ERROR_READ 3 245#define ECR_BUS_ERROR_READ 3
299#define ECR_NMI 4 246#define ECR_NMI 4
300#define ECR_ADDR_ALIGN_X 5 247#define ECR_ADDR_ALIGN_X 5
301#define ECR_PROTECTION_X 6 248#define ECR_PROTECTION_X 6
302#define ECR_DEBUG 7 249#define ECR_DEBUG 7
303#define ECR_ILLEGAL_OPCODE 8 250#define ECR_ILLEGAL_OPCODE 8
304#define ECR_UNIMPL_INSTRUCTION 9 251#define ECR_UNIMPL_INSTRUCTION 9
305#define ECR_PRIVILEGE_VIOLATION 10 252#define ECR_PRIVILEGE_VIOLATION 10
306#define ECR_FPE 11 253#define ECR_FPE 11
307#define ECR_COPROC_ABSENT 12 254#define ECR_COPROC_ABSENT 12
308#define ECR_ADDR_ALIGN_R 13 255#define ECR_ADDR_ALIGN_R 13
309#define ECR_ADDR_ALIGN_W 14 256#define ECR_ADDR_ALIGN_W 14
310#define ECR_PROTECTION_R 15 257#define ECR_PROTECTION_R 15
311#define ECR_PROTECTION_W 16 258#define ECR_PROTECTION_W 16
312#define ECR_DTLB_MODIFIED 17 259#define ECR_DTLB_MODIFIED 17
313#define ECR_TLB_MISS_X 20 260#define ECR_TLB_MISS_X 20
314#define ECR_TLB_MISS_R 24 261#define ECR_TLB_MISS_R 24
315#define ECR_TLB_MISS_W 28 262#define ECR_TLB_MISS_W 28
316 263
317/* Bit manipulation macros */ 264/* Bit manipulation macros */
318#define SYSREG_BIT(name) (1 << SYSREG_##name##_OFFSET) 265#define SYSREG_BIT(name) \
319#define SYSREG_BF(name,value) (((value) & ((1 << SYSREG_##name##_SIZE) - 1)) << SYSREG_##name##_OFFSET) 266 (1 << SYSREG_##name##_OFFSET)
320#define SYSREG_BFEXT(name,value) (((value) >> SYSREG_##name##_OFFSET) & ((1 << SYSREG_##name##_SIZE) - 1)) 267#define SYSREG_BF(name,value) \
321#define SYSREG_BFINS(name,value,old) (((old) & ~(((1 << SYSREG_##name##_SIZE) - 1) << SYSREG_##name##_OFFSET)) | SYSREG_BF(name,value)) 268 (((value) & ((1 << SYSREG_##name##_SIZE) - 1)) \
269 << SYSREG_##name##_OFFSET)
270#define SYSREG_BFEXT(name,value)\
271 (((value) >> SYSREG_##name##_OFFSET) \
272 & ((1 << SYSREG_##name##_SIZE) - 1))
273#define SYSREG_BFINS(name,value,old) \
274 (((old) & ~(((1 << SYSREG_##name##_SIZE) - 1) \
275 << SYSREG_##name##_OFFSET)) \
276 | SYSREG_BF(name,value))
322 277
278/* Register access macros */
323#ifdef __CHECKER__ 279#ifdef __CHECKER__
324extern unsigned long __builtin_mfsr(unsigned long reg); 280extern unsigned long __builtin_mfsr(unsigned long reg);
325extern void __builtin_mtsr(unsigned long reg, unsigned long value); 281extern void __builtin_mtsr(unsigned long reg, unsigned long value);
326#endif 282#endif
327 283
328/* Register access macros */ 284#define sysreg_read(reg) __builtin_mfsr(SYSREG_##reg)
329#define sysreg_read(reg) __builtin_mfsr(SYSREG_##reg) 285#define sysreg_write(reg, value) __builtin_mtsr(SYSREG_##reg, value)
330#define sysreg_write(reg, value) __builtin_mtsr(SYSREG_##reg, value)
331 286
332#endif /* __ASM_AVR32_SYSREG_H__ */ 287#endif /* __ASM_AVR32_SYSREG_H */
diff --git a/include/asm-avr32/system.h b/include/asm-avr32/system.h
index ac596058697d..a8236bacc878 100644
--- a/include/asm-avr32/system.h
+++ b/include/asm-avr32/system.h
@@ -9,6 +9,7 @@
9#define __ASM_AVR32_SYSTEM_H 9#define __ASM_AVR32_SYSTEM_H
10 10
11#include <linux/compiler.h> 11#include <linux/compiler.h>
12#include <linux/linkage.h>
12#include <linux/types.h> 13#include <linux/types.h>
13 14
14#include <asm/ptrace.h> 15#include <asm/ptrace.h>
@@ -140,15 +141,9 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
140 sizeof(*(ptr)))) 141 sizeof(*(ptr))))
141 142
142struct pt_regs; 143struct pt_regs;
143extern void __die(const char *, struct pt_regs *, unsigned long, 144void NORET_TYPE die(const char *str, struct pt_regs *regs, long err);
144 const char *, const char *, unsigned long); 145void _exception(long signr, struct pt_regs *regs, int code,
145extern void __die_if_kernel(const char *, struct pt_regs *, unsigned long, 146 unsigned long addr);
146 const char *, const char *, unsigned long);
147
148#define die(msg, regs, err) \
149 __die(msg, regs, err, __FILE__ ":", __FUNCTION__, __LINE__)
150#define die_if_kernel(msg, regs, err) \
151 __die_if_kernel(msg, regs, err, __FILE__ ":", __FUNCTION__, __LINE__)
152 147
153#define arch_align_stack(x) (x) 148#define arch_align_stack(x) (x)
154 149
diff --git a/include/asm-avr32/thread_info.h b/include/asm-avr32/thread_info.h
index d1f5b35ebd54..a2e606dd4f4a 100644
--- a/include/asm-avr32/thread_info.h
+++ b/include/asm-avr32/thread_info.h
@@ -83,6 +83,7 @@ static inline struct thread_info *current_thread_info(void)
83#define TIF_SINGLE_STEP 6 /* single step after next break */ 83#define TIF_SINGLE_STEP 6 /* single step after next break */
84#define TIF_MEMDIE 7 84#define TIF_MEMDIE 7
85#define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal */ 85#define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal */
86#define TIF_CPU_GOING_TO_SLEEP 9 /* CPU is entering sleep 0 mode */
86#define TIF_USERSPACE 31 /* true if FS sets userspace */ 87#define TIF_USERSPACE 31 /* true if FS sets userspace */
87 88
88#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 89#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
@@ -94,6 +95,7 @@ static inline struct thread_info *current_thread_info(void)
94#define _TIF_SINGLE_STEP (1 << TIF_SINGLE_STEP) 95#define _TIF_SINGLE_STEP (1 << TIF_SINGLE_STEP)
95#define _TIF_MEMDIE (1 << TIF_MEMDIE) 96#define _TIF_MEMDIE (1 << TIF_MEMDIE)
96#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 97#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
98#define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP)
97 99
98/* XXX: These two masks must never span more than 16 bits! */ 100/* XXX: These two masks must never span more than 16 bits! */
99/* work to do on interrupt/exception return */ 101/* work to do on interrupt/exception return */
diff --git a/include/asm-avr32/uaccess.h b/include/asm-avr32/uaccess.h
index 74a679e9098c..ed092395215e 100644
--- a/include/asm-avr32/uaccess.h
+++ b/include/asm-avr32/uaccess.h
@@ -181,24 +181,23 @@ extern int __put_user_bad(void);
181 181
182#define __get_user_nocheck(x, ptr, size) \ 182#define __get_user_nocheck(x, ptr, size) \
183({ \ 183({ \
184 typeof(*(ptr)) __gu_val = (typeof(*(ptr)) __force)0; \ 184 unsigned long __gu_val = 0; \
185 int __gu_err = 0; \ 185 int __gu_err = 0; \
186 \ 186 \
187 switch (size) { \ 187 switch (size) { \
188 case 1: __get_user_asm("ub", __gu_val, ptr, __gu_err); break; \ 188 case 1: __get_user_asm("ub", __gu_val, ptr, __gu_err); break; \
189 case 2: __get_user_asm("uh", __gu_val, ptr, __gu_err); break; \ 189 case 2: __get_user_asm("uh", __gu_val, ptr, __gu_err); break; \
190 case 4: __get_user_asm("w", __gu_val, ptr, __gu_err); break; \ 190 case 4: __get_user_asm("w", __gu_val, ptr, __gu_err); break; \
191 case 8: __get_user_asm("d", __gu_val, ptr, __gu_err); break; \
192 default: __gu_err = __get_user_bad(); break; \ 191 default: __gu_err = __get_user_bad(); break; \
193 } \ 192 } \
194 \ 193 \
195 x = __gu_val; \ 194 x = (typeof(*(ptr)))__gu_val; \
196 __gu_err; \ 195 __gu_err; \
197}) 196})
198 197
199#define __get_user_check(x, ptr, size) \ 198#define __get_user_check(x, ptr, size) \
200({ \ 199({ \
201 typeof(*(ptr)) __gu_val = (typeof(*(ptr)) __force)0; \ 200 unsigned long __gu_val = 0; \
202 const typeof(*(ptr)) __user * __gu_addr = (ptr); \ 201 const typeof(*(ptr)) __user * __gu_addr = (ptr); \
203 int __gu_err = 0; \ 202 int __gu_err = 0; \
204 \ 203 \
@@ -216,10 +215,6 @@ extern int __put_user_bad(void);
216 __get_user_asm("w", __gu_val, __gu_addr, \ 215 __get_user_asm("w", __gu_val, __gu_addr, \
217 __gu_err); \ 216 __gu_err); \
218 break; \ 217 break; \
219 case 8: \
220 __get_user_asm("d", __gu_val, __gu_addr, \
221 __gu_err); \
222 break; \
223 default: \ 218 default: \
224 __gu_err = __get_user_bad(); \ 219 __gu_err = __get_user_bad(); \
225 break; \ 220 break; \
@@ -227,7 +222,7 @@ extern int __put_user_bad(void);
227 } else { \ 222 } else { \
228 __gu_err = -EFAULT; \ 223 __gu_err = -EFAULT; \
229 } \ 224 } \
230 x = __gu_val; \ 225 x = (typeof(*(ptr)))__gu_val; \
231 __gu_err; \ 226 __gu_err; \
232}) 227})
233 228
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 6d7e279b1490..dc8f99ee305f 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -139,8 +139,15 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
139#define pte_same(A,B) (pte_val(A) == pte_val(B)) 139#define pte_same(A,B) (pte_val(A) == pte_val(B))
140#endif 140#endif
141 141
142#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY 142#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
143#define page_test_and_clear_dirty(page) (0) 143#define page_test_dirty(page) (0)
144#endif
145
146#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
147#define page_clear_dirty(page) do { } while (0)
148#endif
149
150#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
144#define pte_maybe_dirty(pte) pte_dirty(pte) 151#define pte_maybe_dirty(pte) pte_dirty(pte)
145#else 152#else
146#define pte_maybe_dirty(pte) (1) 153#define pte_maybe_dirty(pte) (1)
diff --git a/include/asm-s390/bug.h b/include/asm-s390/bug.h
index 876898363944..838684dc6d35 100644
--- a/include/asm-s390/bug.h
+++ b/include/asm-s390/bug.h
@@ -1,27 +1,70 @@
1#ifndef _S390_BUG_H 1#ifndef _ASM_S390_BUG_H
2#define _S390_BUG_H 2#define _ASM_S390_BUG_H
3 3
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5 5
6#ifdef CONFIG_BUG 6#ifdef CONFIG_BUG
7 7
8static inline __attribute__((noreturn)) void __do_illegal_op(void) 8#ifdef CONFIG_64BIT
9{ 9#define S390_LONG ".quad"
10#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)
11 __builtin_trap();
12#else 10#else
13 asm volatile(".long 0"); 11#define S390_LONG ".long"
14#endif 12#endif
15}
16 13
17#define BUG() do { \ 14#ifdef CONFIG_DEBUG_BUGVERBOSE
18 printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ 15
19 __do_illegal_op(); \ 16#define __EMIT_BUG(x) do { \
17 asm volatile( \
18 "0: j 0b+2\n" \
19 "1:\n" \
20 ".section .rodata.str,\"aMS\",@progbits,1\n" \
21 "2: .asciz \""__FILE__"\"\n" \
22 ".previous\n" \
23 ".section __bug_table,\"a\"\n" \
24 "3:\t" S390_LONG "\t1b,2b\n" \
25 " .short %0,%1\n" \
26 " .org 3b+%2\n" \
27 ".previous\n" \
28 : : "i" (__LINE__), \
29 "i" (x), \
30 "i" (sizeof(struct bug_entry))); \
20} while (0) 31} while (0)
21 32
33#else /* CONFIG_DEBUG_BUGVERBOSE */
34
35#define __EMIT_BUG(x) do { \
36 asm volatile( \
37 "0: j 0b+2\n" \
38 "1:\n" \
39 ".section __bug_table,\"a\"\n" \
40 "2:\t" S390_LONG "\t1b\n" \
41 " .short %0\n" \
42 " .org 2b+%1\n" \
43 ".previous\n" \
44 : : "i" (x), \
45 "i" (sizeof(struct bug_entry))); \
46} while (0)
47
48#endif /* CONFIG_DEBUG_BUGVERBOSE */
49
50#define BUG() __EMIT_BUG(0)
51
52#define WARN_ON(x) ({ \
53 typeof(x) __ret_warn_on = (x); \
54 if (__builtin_constant_p(__ret_warn_on)) { \
55 if (__ret_warn_on) \
56 __EMIT_BUG(BUGFLAG_WARNING); \
57 } else { \
58 if (unlikely(__ret_warn_on)) \
59 __EMIT_BUG(BUGFLAG_WARNING); \
60 } \
61 unlikely(__ret_warn_on); \
62})
63
22#define HAVE_ARCH_BUG 64#define HAVE_ARCH_BUG
23#endif 65#define HAVE_ARCH_WARN_ON
66#endif /* CONFIG_BUG */
24 67
25#include <asm-generic/bug.h> 68#include <asm-generic/bug.h>
26 69
27#endif 70#endif /* _ASM_S390_BUG_H */
diff --git a/include/asm-s390/ccwgroup.h b/include/asm-s390/ccwgroup.h
index d2f9c0d53a97..925b3ddfa141 100644
--- a/include/asm-s390/ccwgroup.h
+++ b/include/asm-s390/ccwgroup.h
@@ -11,6 +11,7 @@ struct ccwgroup_device {
11 CCWGROUP_ONLINE, 11 CCWGROUP_ONLINE,
12 } state; 12 } state;
13 atomic_t onoff; 13 atomic_t onoff;
14 struct mutex reg_mutex;
14 unsigned int count; /* number of attached slave devices */ 15 unsigned int count; /* number of attached slave devices */
15 struct device dev; /* master device */ 16 struct device dev; /* master device */
16 struct ccw_device *cdev[0]; /* variable number, allocate as needed */ 17 struct ccw_device *cdev[0]; /* variable number, allocate as needed */
diff --git a/include/asm-s390/chpid.h b/include/asm-s390/chpid.h
new file mode 100644
index 000000000000..b203336fd892
--- /dev/null
+++ b/include/asm-s390/chpid.h
@@ -0,0 +1,53 @@
1/*
2 * drivers/s390/cio/chpid.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#ifndef _ASM_S390_CHPID_H
9#define _ASM_S390_CHPID_H _ASM_S390_CHPID_H
10
11#include <linux/string.h>
12#include <asm/types.h>
13#include <asm/cio.h>
14
15#define __MAX_CHPID 255
16
17struct chp_id {
18 u8 reserved1;
19 u8 cssid;
20 u8 reserved2;
21 u8 id;
22} __attribute__((packed));
23
24static inline void chp_id_init(struct chp_id *chpid)
25{
26 memset(chpid, 0, sizeof(struct chp_id));
27}
28
29static inline int chp_id_is_equal(struct chp_id *a, struct chp_id *b)
30{
31 return (a->id == b->id) && (a->cssid == b->cssid);
32}
33
34static inline void chp_id_next(struct chp_id *chpid)
35{
36 if (chpid->id < __MAX_CHPID)
37 chpid->id++;
38 else {
39 chpid->id = 0;
40 chpid->cssid++;
41 }
42}
43
44static inline int chp_id_is_valid(struct chp_id *chpid)
45{
46 return (chpid->cssid <= __MAX_CSSID);
47}
48
49
50#define chp_id_for_each(c) \
51 for (chp_id_init(c); chp_id_is_valid(c); chp_id_next(c))
52
53#endif /* _ASM_S390_CHPID_H */
diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h
index d92785030980..f738d2827582 100644
--- a/include/asm-s390/cio.h
+++ b/include/asm-s390/cio.h
@@ -13,6 +13,7 @@
13#ifdef __KERNEL__ 13#ifdef __KERNEL__
14 14
15#define LPM_ANYPATH 0xff 15#define LPM_ANYPATH 0xff
16#define __MAX_CSSID 0
16 17
17/* 18/*
18 * subchannel status word 19 * subchannel status word
@@ -292,6 +293,13 @@ extern void css_schedule_reprobe(void);
292 293
293extern void reipl_ccw_dev(struct ccw_dev_id *id); 294extern void reipl_ccw_dev(struct ccw_dev_id *id);
294 295
296struct cio_iplinfo {
297 u16 devno;
298 int is_qdio;
299};
300
301extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo);
302
295#endif 303#endif
296 304
297#endif 305#endif
diff --git a/include/asm-s390/ipl.h b/include/asm-s390/ipl.h
index 0eb64083480a..bdcd448d43fb 100644
--- a/include/asm-s390/ipl.h
+++ b/include/asm-s390/ipl.h
@@ -8,6 +8,8 @@
8#define _ASM_S390_IPL_H 8#define _ASM_S390_IPL_H
9 9
10#include <asm/types.h> 10#include <asm/types.h>
11#include <asm/cio.h>
12#include <asm/setup.h>
11 13
12#define IPL_PARMBLOCK_ORIGIN 0x2000 14#define IPL_PARMBLOCK_ORIGIN 0x2000
13 15
@@ -74,12 +76,12 @@ struct ipl_parameter_block {
74} __attribute__((packed)); 76} __attribute__((packed));
75 77
76/* 78/*
77 * IPL validity flags and parameters as detected in head.S 79 * IPL validity flags
78 */ 80 */
79extern u32 ipl_flags; 81extern u32 ipl_flags;
80extern u16 ipl_devno;
81 82
82extern u32 dump_prefix_page; 83extern u32 dump_prefix_page;
84
83extern void do_reipl(void); 85extern void do_reipl(void);
84extern void ipl_save_parameters(void); 86extern void ipl_save_parameters(void);
85 87
@@ -89,6 +91,35 @@ enum {
89 IPL_NSS_VALID = 4, 91 IPL_NSS_VALID = 4,
90}; 92};
91 93
94enum ipl_type {
95 IPL_TYPE_UNKNOWN = 1,
96 IPL_TYPE_CCW = 2,
97 IPL_TYPE_FCP = 4,
98 IPL_TYPE_FCP_DUMP = 8,
99 IPL_TYPE_NSS = 16,
100};
101
102struct ipl_info
103{
104 enum ipl_type type;
105 union {
106 struct {
107 struct ccw_dev_id dev_id;
108 } ccw;
109 struct {
110 struct ccw_dev_id dev_id;
111 u64 wwpn;
112 u64 lun;
113 } fcp;
114 struct {
115 char name[NSS_NAME_SIZE + 1];
116 } nss;
117 } data;
118};
119
120extern struct ipl_info ipl_info;
121extern void setup_ipl_info(void);
122
92/* 123/*
93 * DIAG 308 support 124 * DIAG 308 support
94 */ 125 */
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index 4a31d0a7ee83..ffc9788a21a7 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -147,6 +147,52 @@ void pgm_check_handler(void);
147void mcck_int_handler(void); 147void mcck_int_handler(void);
148void io_int_handler(void); 148void io_int_handler(void);
149 149
150struct save_area_s390 {
151 u32 ext_save;
152 u64 timer;
153 u64 clk_cmp;
154 u8 pad1[24];
155 u8 psw[8];
156 u32 pref_reg;
157 u8 pad2[20];
158 u32 acc_regs[16];
159 u64 fp_regs[4];
160 u32 gp_regs[16];
161 u32 ctrl_regs[16];
162} __attribute__((packed));
163
164struct save_area_s390x {
165 u64 fp_regs[16];
166 u64 gp_regs[16];
167 u8 psw[16];
168 u8 pad1[8];
169 u32 pref_reg;
170 u32 fp_ctrl_reg;
171 u8 pad2[4];
172 u32 tod_reg;
173 u64 timer;
174 u64 clk_cmp;
175 u8 pad3[8];
176 u32 acc_regs[16];
177 u64 ctrl_regs[16];
178} __attribute__((packed));
179
180union save_area {
181 struct save_area_s390 s390;
182 struct save_area_s390x s390x;
183};
184
185#define SAVE_AREA_BASE_S390 0xd4
186#define SAVE_AREA_BASE_S390X 0x1200
187
188#ifndef __s390x__
189#define SAVE_AREA_SIZE sizeof(struct save_area_s390)
190#define SAVE_AREA_BASE SAVE_AREA_BASE_S390
191#else
192#define SAVE_AREA_SIZE sizeof(struct save_area_s390x)
193#define SAVE_AREA_BASE SAVE_AREA_BASE_S390X
194#endif
195
150struct _lowcore 196struct _lowcore
151{ 197{
152#ifndef __s390x__ 198#ifndef __s390x__
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 13c16546eff5..8fe8d42e64c3 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -753,14 +753,14 @@ ptep_establish(struct vm_area_struct *vma,
753 * should therefore only be called if it is not mapped in any 753 * should therefore only be called if it is not mapped in any
754 * address space. 754 * address space.
755 */ 755 */
756static inline int page_test_and_clear_dirty(struct page *page) 756static inline int page_test_dirty(struct page *page)
757{ 757{
758 unsigned long physpage = page_to_phys(page); 758 return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
759 int skey = page_get_storage_key(physpage); 759}
760 760
761 if (skey & _PAGE_CHANGED) 761static inline void page_clear_dirty(struct page *page)
762 page_set_storage_key(physpage, skey & ~_PAGE_CHANGED); 762{
763 return skey & _PAGE_CHANGED; 763 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY);
764} 764}
765 765
766/* 766/*
@@ -953,7 +953,8 @@ extern void memmap_init(unsigned long, int, unsigned long, unsigned long);
953#define __HAVE_ARCH_PTEP_CLEAR_FLUSH 953#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
954#define __HAVE_ARCH_PTEP_SET_WRPROTECT 954#define __HAVE_ARCH_PTEP_SET_WRPROTECT
955#define __HAVE_ARCH_PTE_SAME 955#define __HAVE_ARCH_PTE_SAME
956#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY 956#define __HAVE_ARCH_PAGE_TEST_DIRTY
957#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
957#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG 958#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
958#include <asm-generic/pgtable.h> 959#include <asm-generic/pgtable.h>
959 960
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 33b80ced4bc1..e0fcea8c64c3 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -57,6 +57,7 @@ struct cpuinfo_S390
57 57
58extern void s390_adjust_jiffies(void); 58extern void s390_adjust_jiffies(void);
59extern void print_cpu_info(struct cpuinfo_S390 *); 59extern void print_cpu_info(struct cpuinfo_S390 *);
60extern int get_cpu_capability(unsigned int *);
60 61
61/* Lazy FPU handling on uni-processor */ 62/* Lazy FPU handling on uni-processor */
62extern struct task_struct *last_task_used_math; 63extern struct task_struct *last_task_used_math;
@@ -196,6 +197,7 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
196extern char *task_show_regs(struct task_struct *task, char *buffer); 197extern char *task_show_regs(struct task_struct *task, char *buffer);
197 198
198extern void show_registers(struct pt_regs *regs); 199extern void show_registers(struct pt_regs *regs);
200extern void show_code(struct pt_regs *regs);
199extern void show_trace(struct task_struct *task, unsigned long *sp); 201extern void show_trace(struct task_struct *task, unsigned long *sp);
200 202
201unsigned long get_wchan(struct task_struct *p); 203unsigned long get_wchan(struct task_struct *p);
diff --git a/include/asm-s390/sclp.h b/include/asm-s390/sclp.h
index 468b97018405..21ed64773210 100644
--- a/include/asm-s390/sclp.h
+++ b/include/asm-s390/sclp.h
@@ -9,6 +9,7 @@
9#define _ASM_S390_SCLP_H 9#define _ASM_S390_SCLP_H
10 10
11#include <linux/types.h> 11#include <linux/types.h>
12#include <asm/chpid.h>
12 13
13struct sccb_header { 14struct sccb_header {
14 u16 length; 15 u16 length;
@@ -33,7 +34,20 @@ struct sclp_readinfo_sccb {
33 u8 _reserved3[4096 - 112]; /* 112-4095 */ 34 u8 _reserved3[4096 - 112]; /* 112-4095 */
34} __attribute__((packed, aligned(4096))); 35} __attribute__((packed, aligned(4096)));
35 36
37#define SCLP_CHP_INFO_MASK_SIZE 32
38
39struct sclp_chp_info {
40 u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
41 u8 standby[SCLP_CHP_INFO_MASK_SIZE];
42 u8 configured[SCLP_CHP_INFO_MASK_SIZE];
43};
44
36extern struct sclp_readinfo_sccb s390_readinfo_sccb; 45extern struct sclp_readinfo_sccb s390_readinfo_sccb;
37extern void sclp_readinfo_early(void); 46extern void sclp_readinfo_early(void);
47extern int sclp_sdias_blk_count(void);
48extern int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
49extern int sclp_chp_configure(struct chp_id chpid);
50extern int sclp_chp_deconfigure(struct chp_id chpid);
51extern int sclp_chp_read_info(struct sclp_chp_info *info);
38 52
39#endif /* _ASM_S390_SCLP_H */ 53#endif /* _ASM_S390_SCLP_H */
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h
index 44c7aee2bd34..a76a6b8fd887 100644
--- a/include/asm-s390/setup.h
+++ b/include/asm-s390/setup.h
@@ -40,6 +40,7 @@ struct mem_chunk {
40}; 40};
41 41
42extern struct mem_chunk memory_chunk[]; 42extern struct mem_chunk memory_chunk[];
43extern unsigned long real_memory_size;
43 44
44#ifdef CONFIG_S390_SWITCH_AMODE 45#ifdef CONFIG_S390_SWITCH_AMODE
45extern unsigned int switch_amode; 46extern unsigned int switch_amode;
@@ -77,6 +78,7 @@ extern unsigned long machine_flags;
77#endif /* __s390x__ */ 78#endif /* __s390x__ */
78 79
79#define MACHINE_HAS_SCLP (!MACHINE_IS_P390) 80#define MACHINE_HAS_SCLP (!MACHINE_IS_P390)
81#define ZFCPDUMP_HSA_SIZE (32UL<<20)
80 82
81/* 83/*
82 * Console mode. Override with conmode= 84 * Console mode. Override with conmode=
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index b957e4cda464..0a28e6d6ef40 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -54,9 +54,6 @@ extern int smp_call_function_on(void (*func) (void *info), void *info,
54 54
55#define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr) 55#define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr)
56 56
57extern int smp_get_cpu(cpumask_t cpu_map);
58extern void smp_put_cpu(int cpu);
59
60static inline __u16 hard_smp_processor_id(void) 57static inline __u16 hard_smp_processor_id(void)
61{ 58{
62 __u16 cpu_address; 59 __u16 cpu_address;
@@ -114,9 +111,8 @@ static inline void smp_send_stop(void)
114} 111}
115 112
116#define smp_cpu_not_running(cpu) 1 113#define smp_cpu_not_running(cpu) 1
117#define smp_get_cpu(cpu) ({ 0; })
118#define smp_put_cpu(cpu) ({ 0; })
119#define smp_setup_cpu_possible_map() do { } while (0) 114#define smp_setup_cpu_possible_map() do { } while (0)
120#endif 115#endif
121 116
117extern union save_area *zfcpdump_save_areas[NR_CPUS + 1];
122#endif 118#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 9cd0d0eaf523..96326594e55d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -133,7 +133,7 @@
133static inline void SetPageUptodate(struct page *page) 133static inline void SetPageUptodate(struct page *page)
134{ 134{
135 if (!test_and_set_bit(PG_uptodate, &page->flags)) 135 if (!test_and_set_bit(PG_uptodate, &page->flags))
136 page_test_and_clear_dirty(page); 136 page_clear_dirty(page);
137} 137}
138#else 138#else
139#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags) 139#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index f469e3cd08e8..a794945fd194 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -67,12 +67,12 @@ static inline long sync_writeback_pages(void)
67/* 67/*
68 * Start background writeback (via pdflush) at this percentage 68 * Start background writeback (via pdflush) at this percentage
69 */ 69 */
70int dirty_background_ratio = 10; 70int dirty_background_ratio = 5;
71 71
72/* 72/*
73 * The generator of dirty data starts writeback at this percentage 73 * The generator of dirty data starts writeback at this percentage
74 */ 74 */
75int vm_dirty_ratio = 40; 75int vm_dirty_ratio = 10;
76 76
77/* 77/*
78 * The interval between `kupdate'-style writebacks, in jiffies 78 * The interval between `kupdate'-style writebacks, in jiffies
diff --git a/mm/rmap.c b/mm/rmap.c
index b82146e6dfc9..59da5b734c80 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -498,8 +498,10 @@ int page_mkclean(struct page *page)
498 struct address_space *mapping = page_mapping(page); 498 struct address_space *mapping = page_mapping(page);
499 if (mapping) 499 if (mapping)
500 ret = page_mkclean_file(mapping, page); 500 ret = page_mkclean_file(mapping, page);
501 if (page_test_and_clear_dirty(page)) 501 if (page_test_dirty(page)) {
502 page_clear_dirty(page);
502 ret = 1; 503 ret = 1;
504 }
503 } 505 }
504 506
505 return ret; 507 return ret;
@@ -605,8 +607,10 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
605 * Leaving it set also helps swapoff to reinstate ptes 607 * Leaving it set also helps swapoff to reinstate ptes
606 * faster for those pages still in swapcache. 608 * faster for those pages still in swapcache.
607 */ 609 */
608 if (page_test_and_clear_dirty(page)) 610 if (page_test_dirty(page)) {
611 page_clear_dirty(page);
609 set_page_dirty(page); 612 set_page_dirty(page);
613 }
610 __dec_zone_page_state(page, 614 __dec_zone_page_state(page,
611 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); 615 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
612 } 616 }