aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-01-27 01:52:53 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-01-27 01:52:53 -0500
commit0444fa78751260b38f0db3418e001bf86593f05f (patch)
treeffd3bd72c7078d705216ef156cae63d990125113
parent69e4d4c3e1cb23fce81deeae08b502d27bad6993 (diff)
parent9d40d2e3955185b69c264583d080eb3defcb05a0 (diff)
Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6: (56 commits) [S390] replace lock_cpu_hotplug with get_online_cpus [S390] usage of s390dbf: shrink number of debug areas to use. [S390] constify function pointer tables. [S390] do local_irq_restore while spinning in spin_lock_irqsave. [S390] add smp_call_function_mask [S390] dasd: fix loop in request expiration handling [S390] Unused field / extern declaration in processor.h [S390] Remove TOPDIR from Makefile [S390] dasd: add hyper PAV support to DASD device driver, part 1 [S390] single-step cleanup [S390] Move NOTES and BUG_TABLE. [S390] drivers/s390/: Spelling fixes [S390] include/asm-s390/: Spelling fixes [S390] arch/s390/: Spelling fixes [S390] Use diag308 subcodes 3 and 6 for reboot and dump when possible. [S390] vmemmap: allocate struct pages before 1:1 mapping [S390] Initialize sclp_ipl_info [S390] Allocate and free cpu lowcores and stacks when needed/possible. [S390] use LIST_HEAD instead of LIST_HEAD_INIT [S390] Load disabled wait psw instead of stopping cpu on halt. ...
-rw-r--r--Documentation/DocBook/s390-drivers.tmpl1
-rw-r--r--Documentation/cpu-hotplug.txt2
-rw-r--r--Documentation/kernel-parameters.txt9
-rw-r--r--Documentation/s390/CommonIO5
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/crypto/Kconfig60
-rw-r--r--arch/s390/crypto/aes_s390.c2
-rw-r--r--arch/s390/crypto/prng.c4
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/kernel/head64.S2
-rw-r--r--arch/s390/kernel/ipl.c948
-rw-r--r--arch/s390/kernel/process.c18
-rw-r--r--arch/s390/kernel/ptrace.c15
-rw-r--r--arch/s390/kernel/setup.c139
-rw-r--r--arch/s390/kernel/signal.c20
-rw-r--r--arch/s390/kernel/smp.c575
-rw-r--r--arch/s390/kernel/traps.c20
-rw-r--r--arch/s390/kernel/vmlinux.lds.S12
-rw-r--r--arch/s390/lib/spinlock.c35
-rw-r--r--arch/s390/mm/extmem.c2
-rw-r--r--arch/s390/mm/vmem.c26
-rw-r--r--drivers/crypto/Kconfig63
-rw-r--r--drivers/s390/block/Makefile4
-rw-r--r--drivers/s390/block/dasd.c1690
-rw-r--r--drivers/s390/block/dasd_3370_erp.c84
-rw-r--r--drivers/s390/block/dasd_3990_erp.c360
-rw-r--r--drivers/s390/block/dasd_9336_erp.c41
-rw-r--r--drivers/s390/block/dasd_9343_erp.c21
-rw-r--r--drivers/s390/block/dasd_alias.c903
-rw-r--r--drivers/s390/block/dasd_devmap.c94
-rw-r--r--drivers/s390/block/dasd_diag.c107
-rw-r--r--drivers/s390/block/dasd_eckd.c789
-rw-r--r--drivers/s390/block/dasd_eckd.h125
-rw-r--r--drivers/s390/block/dasd_eer.c11
-rw-r--r--drivers/s390/block/dasd_erp.c25
-rw-r--r--drivers/s390/block/dasd_fba.c119
-rw-r--r--drivers/s390/block/dasd_genhd.c76
-rw-r--r--drivers/s390/block/dasd_int.h209
-rw-r--r--drivers/s390/block/dasd_ioctl.c172
-rw-r--r--drivers/s390/block/dasd_proc.c23
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/char/Makefile2
-rw-r--r--drivers/s390/char/monwriter.c2
-rw-r--r--drivers/s390/char/raw3270.c4
-rw-r--r--drivers/s390/char/sclp.h4
-rw-r--r--drivers/s390/char/sclp_chp.c200
-rw-r--r--drivers/s390/char/sclp_cmd.c398
-rw-r--r--drivers/s390/char/sclp_cpi.c246
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c400
-rw-r--r--drivers/s390/char/sclp_cpi_sys.h15
-rw-r--r--drivers/s390/char/sclp_info.c116
-rw-r--r--drivers/s390/char/sclp_rw.c2
-rw-r--r--drivers/s390/char/tape_3590.c2
-rw-r--r--drivers/s390/char/tape_core.c2
-rw-r--r--drivers/s390/char/tape_proc.c2
-rw-r--r--drivers/s390/char/vmlogrdr.c2
-rw-r--r--drivers/s390/char/vmur.c2
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/airq.c171
-rw-r--r--drivers/s390/cio/airq.h10
-rw-r--r--drivers/s390/cio/blacklist.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c25
-rw-r--r--drivers/s390/cio/chsc.c121
-rw-r--r--drivers/s390/cio/cio.c106
-rw-r--r--drivers/s390/cio/cio.h89
-rw-r--r--drivers/s390/cio/cio_debug.h22
-rw-r--r--drivers/s390/cio/css.c198
-rw-r--r--drivers/s390/cio/css.h78
-rw-r--r--drivers/s390/cio/device.c157
-rw-r--r--drivers/s390/cio/device.h5
-rw-r--r--drivers/s390/cio/device_fsm.c124
-rw-r--r--drivers/s390/cio/device_id.c9
-rw-r--r--drivers/s390/cio/device_ops.c2
-rw-r--r--drivers/s390/cio/device_pgid.c6
-rw-r--r--drivers/s390/cio/device_status.c13
-rw-r--r--drivers/s390/cio/io_sch.h163
-rw-r--r--drivers/s390/cio/ioasm.h66
-rw-r--r--drivers/s390/cio/qdio.c38
-rw-r--r--drivers/s390/cio/qdio.h2
-rw-r--r--drivers/s390/net/claw.c2
-rw-r--r--drivers/s390/net/lcs.c2
-rw-r--r--drivers/s390/net/netiucv.c3
-rw-r--r--drivers/s390/net/qeth_proc.c4
-rw-r--r--drivers/s390/net/smsgiucv.c2
-rw-r--r--drivers/s390/scsi/zfcp_erp.c2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c10
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c2
-rw-r--r--include/asm-s390/airq.h19
-rw-r--r--include/asm-s390/cio.h4
-rw-r--r--include/asm-s390/dasd.h2
-rw-r--r--include/asm-s390/ipl.h8
-rw-r--r--include/asm-s390/mmu_context.h27
-rw-r--r--include/asm-s390/pgtable.h46
-rw-r--r--include/asm-s390/processor.h4
-rw-r--r--include/asm-s390/ptrace.h8
-rw-r--r--include/asm-s390/qdio.h2
-rw-r--r--include/asm-s390/rwsem.h4
-rw-r--r--include/asm-s390/sclp.h20
-rw-r--r--include/asm-s390/smp.h5
-rw-r--r--include/asm-s390/spinlock.h32
-rw-r--r--include/asm-s390/spinlock_types.h1
-rw-r--r--include/asm-s390/tlbflush.h32
-rw-r--r--include/asm-s390/zcrypt.h2
-rw-r--r--kernel/sysctl_check.c1
105 files changed, 6216 insertions, 3658 deletions
diff --git a/Documentation/DocBook/s390-drivers.tmpl b/Documentation/DocBook/s390-drivers.tmpl
index 254e769282a4..3d2f31b99dd9 100644
--- a/Documentation/DocBook/s390-drivers.tmpl
+++ b/Documentation/DocBook/s390-drivers.tmpl
@@ -116,6 +116,7 @@
116!Iinclude/asm-s390/ccwdev.h 116!Iinclude/asm-s390/ccwdev.h
117!Edrivers/s390/cio/device.c 117!Edrivers/s390/cio/device.c
118!Edrivers/s390/cio/device_ops.c 118!Edrivers/s390/cio/device_ops.c
119!Edrivers/s390/cio/airq.c
119 </sect1> 120 </sect1>
120 <sect1 id="cmf"> 121 <sect1 id="cmf">
121 <title>The channel-measurement facility</title> 122 <title>The channel-measurement facility</title>
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
index fb94f5a71b68..ba0aacde94fb 100644
--- a/Documentation/cpu-hotplug.txt
+++ b/Documentation/cpu-hotplug.txt
@@ -50,7 +50,7 @@ additional_cpus=n (*) Use this to limit hotpluggable cpus. This option sets
50 cpu_possible_map = cpu_present_map + additional_cpus 50 cpu_possible_map = cpu_present_map + additional_cpus
51 51
52(*) Option valid only for following architectures 52(*) Option valid only for following architectures
53- x86_64, ia64, s390 53- x86_64, ia64
54 54
55ia64 and x86_64 use the number of disabled local apics in ACPI tables MADT 55ia64 and x86_64 use the number of disabled local apics in ACPI tables MADT
56to determine the number of potentially hot-pluggable cpus. The implementation 56to determine the number of potentially hot-pluggable cpus. The implementation
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 65de5ba7b74c..880f882160e2 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -370,7 +370,8 @@ and is between 256 and 4096 characters. It is defined in the file
370 configured. Potentially dangerous and should only be 370 configured. Potentially dangerous and should only be
371 used if you are entirely sure of the consequences. 371 used if you are entirely sure of the consequences.
372 372
373 chandev= [HW,NET] Generic channel device initialisation 373 ccw_timeout_log [S390]
374 See Documentation/s390/CommonIO for details.
374 375
375 checkreqprot [SELINUX] Set initial checkreqprot flag value. 376 checkreqprot [SELINUX] Set initial checkreqprot flag value.
376 Format: { "0" | "1" } 377 Format: { "0" | "1" }
@@ -382,6 +383,12 @@ and is between 256 and 4096 characters. It is defined in the file
382 Value can be changed at runtime via 383 Value can be changed at runtime via
383 /selinux/checkreqprot. 384 /selinux/checkreqprot.
384 385
386 cio_ignore= [S390]
387 See Documentation/s390/CommonIO for details.
388
389 cio_msg= [S390]
390 See Documentation/s390/CommonIO for details.
391
385 clock= [BUGS=X86-32, HW] gettimeofday clocksource override. 392 clock= [BUGS=X86-32, HW] gettimeofday clocksource override.
386 [Deprecated] 393 [Deprecated]
387 Forces specified clocksource (if available) to be used 394 Forces specified clocksource (if available) to be used
diff --git a/Documentation/s390/CommonIO b/Documentation/s390/CommonIO
index 86320aa3fb0b..8fbc0a852870 100644
--- a/Documentation/s390/CommonIO
+++ b/Documentation/s390/CommonIO
@@ -4,6 +4,11 @@ S/390 common I/O-Layer - command line parameters, procfs and debugfs entries
4Command line parameters 4Command line parameters
5----------------------- 5-----------------------
6 6
7* ccw_timeout_log
8
9 Enable logging of debug information in case of ccw device timeouts.
10
11
7* cio_msg = yes | no 12* cio_msg = yes | no
8 13
9 Determines whether information on found devices and sensed device 14 Determines whether information on found devices and sensed device
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 1330061020ab..6ef54d27fc00 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -276,9 +276,6 @@ source "kernel/Kconfig.preempt"
276 276
277source "mm/Kconfig" 277source "mm/Kconfig"
278 278
279config HOLES_IN_ZONE
280 def_bool y
281
282comment "I/O subsystem configuration" 279comment "I/O subsystem configuration"
283 280
284config MACHCHK_WARNING 281config MACHCHK_WARNING
diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig
deleted file mode 100644
index d1defbbfcd81..000000000000
--- a/arch/s390/crypto/Kconfig
+++ /dev/null
@@ -1,60 +0,0 @@
1config CRYPTO_SHA1_S390
2 tristate "SHA1 digest algorithm"
3 depends on S390
4 select CRYPTO_ALGAPI
5 help
6 This is the s390 hardware accelerated implementation of the
7 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
8
9config CRYPTO_SHA256_S390
10 tristate "SHA256 digest algorithm"
11 depends on S390
12 select CRYPTO_ALGAPI
13 help
14 This is the s390 hardware accelerated implementation of the
15 SHA256 secure hash standard (DFIPS 180-2).
16
17 This version of SHA implements a 256 bit hash with 128 bits of
18 security against collision attacks.
19
20config CRYPTO_DES_S390
21 tristate "DES and Triple DES cipher algorithms"
22 depends on S390
23 select CRYPTO_ALGAPI
24 select CRYPTO_BLKCIPHER
25 help
26 This us the s390 hardware accelerated implementation of the
27 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
28
29config CRYPTO_AES_S390
30 tristate "AES cipher algorithms"
31 depends on S390
32 select CRYPTO_ALGAPI
33 select CRYPTO_BLKCIPHER
34 help
35 This is the s390 hardware accelerated implementation of the
36 AES cipher algorithms (FIPS-197). AES uses the Rijndael
37 algorithm.
38
39 Rijndael appears to be consistently a very good performer in
40 both hardware and software across a wide range of computing
41 environments regardless of its use in feedback or non-feedback
42 modes. Its key setup time is excellent, and its key agility is
43 good. Rijndael's very low memory requirements make it very well
44 suited for restricted-space environments, in which it also
45 demonstrates excellent performance. Rijndael's operations are
46 among the easiest to defend against power and timing attacks.
47
48 On s390 the System z9-109 currently only supports the key size
49 of 128 bit.
50
51config S390_PRNG
52 tristate "Pseudo random number generator device driver"
53 depends on S390
54 default "m"
55 help
56 Select this option if you want to use the s390 pseudo random number
57 generator. The PRNG is part of the cryptographic processor functions
58 and uses triple-DES to generate secure random numbers like the
59 ANSI X9.17 standard. The PRNG is usable via the char device
60 /dev/prandom.
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 46c97058ebe1..a3f67f8b5427 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -516,7 +516,7 @@ static int __init aes_init(void)
516 /* z9 109 and z9 BC/EC only support 128 bit key length */ 516 /* z9 109 and z9 BC/EC only support 128 bit key length */
517 if (keylen_flag == AES_KEYLEN_128) 517 if (keylen_flag == AES_KEYLEN_128)
518 printk(KERN_INFO 518 printk(KERN_INFO
519 "aes_s390: hardware acceleration only available for" 519 "aes_s390: hardware acceleration only available for "
520 "128 bit keys\n"); 520 "128 bit keys\n");
521 521
522 ret = crypto_register_alg(&aes_alg); 522 ret = crypto_register_alg(&aes_alg);
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 8eb3a1aedc22..0cfefddd8375 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -90,7 +90,7 @@ static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
90 int ret = 0; 90 int ret = 0;
91 int tmp; 91 int tmp;
92 92
93 /* nbytes can be arbitrary long, we spilt it into chunks */ 93 /* nbytes can be arbitrary length, we split it into chunks */
94 while (nbytes) { 94 while (nbytes) {
95 /* same as in extract_entropy_user in random.c */ 95 /* same as in extract_entropy_user in random.c */
96 if (need_resched()) { 96 if (need_resched()) {
@@ -146,7 +146,7 @@ static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
146 return ret; 146 return ret;
147} 147}
148 148
149static struct file_operations prng_fops = { 149static const struct file_operations prng_fops = {
150 .owner = THIS_MODULE, 150 .owner = THIS_MODULE,
151 .open = &prng_open, 151 .open = &prng_open,
152 .release = NULL, 152 .release = NULL,
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 56cb71007cd9..b3b650a93c7c 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -31,7 +31,3 @@ S390_KEXEC_OBJS := machine_kexec.o crash.o
31S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o) 31S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
32obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS) 32obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS)
33 33
34#
35# This is just to get the dependencies...
36#
37binfmt_elf32.o: $(TOPDIR)/fs/binfmt_elf.c
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 1b3af7dab816..9f7b73b180f0 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -276,7 +276,7 @@ void __init startup_init(void)
276 create_kernel_nss(); 276 create_kernel_nss();
277 sort_main_extable(); 277 sort_main_extable();
278 setup_lowcore_early(); 278 setup_lowcore_early();
279 sclp_readinfo_early(); 279 sclp_read_info_early();
280 sclp_facilities_detect(); 280 sclp_facilities_detect();
281 memsize = sclp_memory_detect(); 281 memsize = sclp_memory_detect();
282#ifndef CONFIG_64BIT 282#ifndef CONFIG_64BIT
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index a87b1976d409..79dccd206a6e 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -157,7 +157,7 @@ startup_continue:
157 .long 0xb2b10000 # store facility list 157 .long 0xb2b10000 # store facility list
158 tm 0xc8,0x08 # check bit for clearing-by-ASCE 158 tm 0xc8,0x08 # check bit for clearing-by-ASCE
159 bno 0f-.LPG1(%r13) 159 bno 0f-.LPG1(%r13)
160 lhi %r1,2094 160 lhi %r1,2048
161 lhi %r2,0 161 lhi %r2,0
162 .long 0xb98e2001 162 .long 0xb98e2001
163 oi 7(%r12),0x80 # set IDTE flag 163 oi 7(%r12),0x80 # set IDTE flag
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index b97694fa62ec..db28cca81fef 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2,7 +2,7 @@
2 * arch/s390/kernel/ipl.c 2 * arch/s390/kernel/ipl.c
3 * ipl/reipl/dump support for Linux on s390. 3 * ipl/reipl/dump support for Linux on s390.
4 * 4 *
5 * Copyright (C) IBM Corp. 2005,2006 5 * Copyright IBM Corp. 2005,2007
6 * Author(s): Michael Holzheu <holzheu@de.ibm.com> 6 * Author(s): Michael Holzheu <holzheu@de.ibm.com>
7 * Heiko Carstens <heiko.carstens@de.ibm.com> 7 * Heiko Carstens <heiko.carstens@de.ibm.com>
8 * Volker Sameske <sameske@de.ibm.com> 8 * Volker Sameske <sameske@de.ibm.com>
@@ -31,6 +31,43 @@
31#define IPL_FCP_DUMP_STR "fcp_dump" 31#define IPL_FCP_DUMP_STR "fcp_dump"
32#define IPL_NSS_STR "nss" 32#define IPL_NSS_STR "nss"
33 33
34#define DUMP_CCW_STR "ccw"
35#define DUMP_FCP_STR "fcp"
36#define DUMP_NONE_STR "none"
37
38/*
39 * Four shutdown trigger types are supported:
40 * - panic
41 * - halt
42 * - power off
43 * - reipl
44 */
45#define ON_PANIC_STR "on_panic"
46#define ON_HALT_STR "on_halt"
47#define ON_POFF_STR "on_poff"
48#define ON_REIPL_STR "on_reboot"
49
50struct shutdown_action;
51struct shutdown_trigger {
52 char *name;
53 struct shutdown_action *action;
54};
55
56/*
57 * Five shutdown action types are supported:
58 */
59#define SHUTDOWN_ACTION_IPL_STR "ipl"
60#define SHUTDOWN_ACTION_REIPL_STR "reipl"
61#define SHUTDOWN_ACTION_DUMP_STR "dump"
62#define SHUTDOWN_ACTION_VMCMD_STR "vmcmd"
63#define SHUTDOWN_ACTION_STOP_STR "stop"
64
65struct shutdown_action {
66 char *name;
67 void (*fn) (struct shutdown_trigger *trigger);
68 int (*init) (void);
69};
70
34static char *ipl_type_str(enum ipl_type type) 71static char *ipl_type_str(enum ipl_type type)
35{ 72{
36 switch (type) { 73 switch (type) {
@@ -54,10 +91,6 @@ enum dump_type {
54 DUMP_TYPE_FCP = 4, 91 DUMP_TYPE_FCP = 4,
55}; 92};
56 93
57#define DUMP_NONE_STR "none"
58#define DUMP_CCW_STR "ccw"
59#define DUMP_FCP_STR "fcp"
60
61static char *dump_type_str(enum dump_type type) 94static char *dump_type_str(enum dump_type type)
62{ 95{
63 switch (type) { 96 switch (type) {
@@ -99,30 +132,6 @@ enum dump_method {
99 DUMP_METHOD_FCP_DIAG, 132 DUMP_METHOD_FCP_DIAG,
100}; 133};
101 134
102enum shutdown_action {
103 SHUTDOWN_REIPL,
104 SHUTDOWN_DUMP,
105 SHUTDOWN_STOP,
106};
107
108#define SHUTDOWN_REIPL_STR "reipl"
109#define SHUTDOWN_DUMP_STR "dump"
110#define SHUTDOWN_STOP_STR "stop"
111
112static char *shutdown_action_str(enum shutdown_action action)
113{
114 switch (action) {
115 case SHUTDOWN_REIPL:
116 return SHUTDOWN_REIPL_STR;
117 case SHUTDOWN_DUMP:
118 return SHUTDOWN_DUMP_STR;
119 case SHUTDOWN_STOP:
120 return SHUTDOWN_STOP_STR;
121 default:
122 return NULL;
123 }
124}
125
126static int diag308_set_works = 0; 135static int diag308_set_works = 0;
127 136
128static int reipl_capabilities = IPL_TYPE_UNKNOWN; 137static int reipl_capabilities = IPL_TYPE_UNKNOWN;
@@ -140,8 +149,6 @@ static enum dump_method dump_method = DUMP_METHOD_NONE;
140static struct ipl_parameter_block *dump_block_fcp; 149static struct ipl_parameter_block *dump_block_fcp;
141static struct ipl_parameter_block *dump_block_ccw; 150static struct ipl_parameter_block *dump_block_ccw;
142 151
143static enum shutdown_action on_panic_action = SHUTDOWN_STOP;
144
145static struct sclp_ipl_info sclp_ipl_info; 152static struct sclp_ipl_info sclp_ipl_info;
146 153
147int diag308(unsigned long subcode, void *addr) 154int diag308(unsigned long subcode, void *addr)
@@ -205,8 +212,8 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
205 struct kobj_attribute *attr, \ 212 struct kobj_attribute *attr, \
206 const char *buf, size_t len) \ 213 const char *buf, size_t len) \
207{ \ 214{ \
208 if (sscanf(buf, _fmt_in, _value) != 1) \ 215 strncpy(_value, buf, sizeof(_value) - 1); \
209 return -EINVAL; \ 216 strstrip(_value); \
210 return len; \ 217 return len; \
211} \ 218} \
212static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ 219static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
@@ -245,33 +252,6 @@ static __init enum ipl_type get_ipl_type(void)
245 return IPL_TYPE_FCP; 252 return IPL_TYPE_FCP;
246} 253}
247 254
248void __init setup_ipl_info(void)
249{
250 ipl_info.type = get_ipl_type();
251 switch (ipl_info.type) {
252 case IPL_TYPE_CCW:
253 ipl_info.data.ccw.dev_id.devno = ipl_devno;
254 ipl_info.data.ccw.dev_id.ssid = 0;
255 break;
256 case IPL_TYPE_FCP:
257 case IPL_TYPE_FCP_DUMP:
258 ipl_info.data.fcp.dev_id.devno =
259 IPL_PARMBLOCK_START->ipl_info.fcp.devno;
260 ipl_info.data.fcp.dev_id.ssid = 0;
261 ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
262 ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
263 break;
264 case IPL_TYPE_NSS:
265 strncpy(ipl_info.data.nss.name, kernel_nss_name,
266 sizeof(ipl_info.data.nss.name));
267 break;
268 case IPL_TYPE_UNKNOWN:
269 default:
270 /* We have no info to copy */
271 break;
272 }
273}
274
275struct ipl_info ipl_info; 255struct ipl_info ipl_info;
276EXPORT_SYMBOL_GPL(ipl_info); 256EXPORT_SYMBOL_GPL(ipl_info);
277 257
@@ -428,8 +408,74 @@ static struct attribute_group ipl_unknown_attr_group = {
428 408
429static struct kset *ipl_kset; 409static struct kset *ipl_kset;
430 410
411static int __init ipl_register_fcp_files(void)
412{
413 int rc;
414
415 rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
416 if (rc)
417 goto out;
418 rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_parameter_attr);
419 if (rc)
420 goto out_ipl_parm;
421 rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_scp_data_attr);
422 if (!rc)
423 goto out;
424
425 sysfs_remove_bin_file(&ipl_kset->kobj, &ipl_parameter_attr);
426
427out_ipl_parm:
428 sysfs_remove_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
429out:
430 return rc;
431}
432
433static void ipl_run(struct shutdown_trigger *trigger)
434{
435 diag308(DIAG308_IPL, NULL);
436 if (MACHINE_IS_VM)
437 __cpcmd("IPL", NULL, 0, NULL);
438 else if (ipl_info.type == IPL_TYPE_CCW)
439 reipl_ccw_dev(&ipl_info.data.ccw.dev_id);
440}
441
442static int ipl_init(void)
443{
444 int rc;
445
446 ipl_kset = kset_create_and_add("ipl", NULL, firmware_kobj);
447 if (!ipl_kset) {
448 rc = -ENOMEM;
449 goto out;
450 }
451 switch (ipl_info.type) {
452 case IPL_TYPE_CCW:
453 rc = sysfs_create_group(&ipl_kset->kobj, &ipl_ccw_attr_group);
454 break;
455 case IPL_TYPE_FCP:
456 case IPL_TYPE_FCP_DUMP:
457 rc = ipl_register_fcp_files();
458 break;
459 case IPL_TYPE_NSS:
460 rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nss_attr_group);
461 break;
462 default:
463 rc = sysfs_create_group(&ipl_kset->kobj,
464 &ipl_unknown_attr_group);
465 break;
466 }
467out:
468 if (rc)
469 panic("ipl_init failed: rc = %i\n", rc);
470
471 return 0;
472}
473
474static struct shutdown_action ipl_action = {SHUTDOWN_ACTION_IPL_STR, ipl_run,
475 ipl_init};
476
431/* 477/*
432 * reipl section 478 * reipl shutdown action: Reboot Linux on shutdown.
433 */ 479 */
434 480
435/* FCP reipl device attributes */ 481/* FCP reipl device attributes */
@@ -549,7 +595,9 @@ static int reipl_set_type(enum ipl_type type)
549 595
550 switch(type) { 596 switch(type) {
551 case IPL_TYPE_CCW: 597 case IPL_TYPE_CCW:
552 if (MACHINE_IS_VM) 598 if (diag308_set_works)
599 reipl_method = REIPL_METHOD_CCW_DIAG;
600 else if (MACHINE_IS_VM)
553 reipl_method = REIPL_METHOD_CCW_VM; 601 reipl_method = REIPL_METHOD_CCW_VM;
554 else 602 else
555 reipl_method = REIPL_METHOD_CCW_CIO; 603 reipl_method = REIPL_METHOD_CCW_CIO;
@@ -600,143 +648,11 @@ static ssize_t reipl_type_store(struct kobject *kobj,
600} 648}
601 649
602static struct kobj_attribute reipl_type_attr = 650static struct kobj_attribute reipl_type_attr =
603 __ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store); 651 __ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store);
604 652
605static struct kset *reipl_kset; 653static struct kset *reipl_kset;
606 654
607/* 655void reipl_run(struct shutdown_trigger *trigger)
608 * dump section
609 */
610
611/* FCP dump device attributes */
612
613DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n",
614 dump_block_fcp->ipl_info.fcp.wwpn);
615DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n",
616 dump_block_fcp->ipl_info.fcp.lun);
617DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
618 dump_block_fcp->ipl_info.fcp.bootprog);
619DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
620 dump_block_fcp->ipl_info.fcp.br_lba);
621DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
622 dump_block_fcp->ipl_info.fcp.devno);
623
624static struct attribute *dump_fcp_attrs[] = {
625 &sys_dump_fcp_device_attr.attr,
626 &sys_dump_fcp_wwpn_attr.attr,
627 &sys_dump_fcp_lun_attr.attr,
628 &sys_dump_fcp_bootprog_attr.attr,
629 &sys_dump_fcp_br_lba_attr.attr,
630 NULL,
631};
632
633static struct attribute_group dump_fcp_attr_group = {
634 .name = IPL_FCP_STR,
635 .attrs = dump_fcp_attrs,
636};
637
638/* CCW dump device attributes */
639
640DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
641 dump_block_ccw->ipl_info.ccw.devno);
642
643static struct attribute *dump_ccw_attrs[] = {
644 &sys_dump_ccw_device_attr.attr,
645 NULL,
646};
647
648static struct attribute_group dump_ccw_attr_group = {
649 .name = IPL_CCW_STR,
650 .attrs = dump_ccw_attrs,
651};
652
653/* dump type */
654
655static int dump_set_type(enum dump_type type)
656{
657 if (!(dump_capabilities & type))
658 return -EINVAL;
659 switch(type) {
660 case DUMP_TYPE_CCW:
661 if (MACHINE_IS_VM)
662 dump_method = DUMP_METHOD_CCW_VM;
663 else if (diag308_set_works)
664 dump_method = DUMP_METHOD_CCW_DIAG;
665 else
666 dump_method = DUMP_METHOD_CCW_CIO;
667 break;
668 case DUMP_TYPE_FCP:
669 dump_method = DUMP_METHOD_FCP_DIAG;
670 break;
671 default:
672 dump_method = DUMP_METHOD_NONE;
673 }
674 dump_type = type;
675 return 0;
676}
677
678static ssize_t dump_type_show(struct kobject *kobj,
679 struct kobj_attribute *attr, char *page)
680{
681 return sprintf(page, "%s\n", dump_type_str(dump_type));
682}
683
684static ssize_t dump_type_store(struct kobject *kobj,
685 struct kobj_attribute *attr,
686 const char *buf, size_t len)
687{
688 int rc = -EINVAL;
689
690 if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0)
691 rc = dump_set_type(DUMP_TYPE_NONE);
692 else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0)
693 rc = dump_set_type(DUMP_TYPE_CCW);
694 else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0)
695 rc = dump_set_type(DUMP_TYPE_FCP);
696 return (rc != 0) ? rc : len;
697}
698
699static struct kobj_attribute dump_type_attr =
700 __ATTR(dump_type, 0644, dump_type_show, dump_type_store);
701
702static struct kset *dump_kset;
703
704/*
705 * Shutdown actions section
706 */
707
708static struct kset *shutdown_actions_kset;
709
710/* on panic */
711
712static ssize_t on_panic_show(struct kobject *kobj,
713 struct kobj_attribute *attr, char *page)
714{
715 return sprintf(page, "%s\n", shutdown_action_str(on_panic_action));
716}
717
718static ssize_t on_panic_store(struct kobject *kobj,
719 struct kobj_attribute *attr,
720 const char *buf, size_t len)
721{
722 if (strncmp(buf, SHUTDOWN_REIPL_STR, strlen(SHUTDOWN_REIPL_STR)) == 0)
723 on_panic_action = SHUTDOWN_REIPL;
724 else if (strncmp(buf, SHUTDOWN_DUMP_STR,
725 strlen(SHUTDOWN_DUMP_STR)) == 0)
726 on_panic_action = SHUTDOWN_DUMP;
727 else if (strncmp(buf, SHUTDOWN_STOP_STR,
728 strlen(SHUTDOWN_STOP_STR)) == 0)
729 on_panic_action = SHUTDOWN_STOP;
730 else
731 return -EINVAL;
732
733 return len;
734}
735
736static struct kobj_attribute on_panic_attr =
737 __ATTR(on_panic, 0644, on_panic_show, on_panic_store);
738
739void do_reipl(void)
740{ 656{
741 struct ccw_dev_id devid; 657 struct ccw_dev_id devid;
742 static char buf[100]; 658 static char buf[100];
@@ -745,8 +661,6 @@ void do_reipl(void)
745 switch (reipl_method) { 661 switch (reipl_method) {
746 case REIPL_METHOD_CCW_CIO: 662 case REIPL_METHOD_CCW_CIO:
747 devid.devno = reipl_block_ccw->ipl_info.ccw.devno; 663 devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
748 if (ipl_info.type == IPL_TYPE_CCW && devid.devno == ipl_devno)
749 diag308(DIAG308_IPL, NULL);
750 devid.ssid = 0; 664 devid.ssid = 0;
751 reipl_ccw_dev(&devid); 665 reipl_ccw_dev(&devid);
752 break; 666 break;
@@ -787,98 +701,6 @@ void do_reipl(void)
787 default: 701 default:
788 break; 702 break;
789 } 703 }
790 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
791}
792
793static void do_dump(void)
794{
795 struct ccw_dev_id devid;
796 static char buf[100];
797
798 switch (dump_method) {
799 case DUMP_METHOD_CCW_CIO:
800 smp_send_stop();
801 devid.devno = dump_block_ccw->ipl_info.ccw.devno;
802 devid.ssid = 0;
803 reipl_ccw_dev(&devid);
804 break;
805 case DUMP_METHOD_CCW_VM:
806 smp_send_stop();
807 sprintf(buf, "STORE STATUS");
808 __cpcmd(buf, NULL, 0, NULL);
809 sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
810 __cpcmd(buf, NULL, 0, NULL);
811 break;
812 case DUMP_METHOD_CCW_DIAG:
813 diag308(DIAG308_SET, dump_block_ccw);
814 diag308(DIAG308_DUMP, NULL);
815 break;
816 case DUMP_METHOD_FCP_DIAG:
817 diag308(DIAG308_SET, dump_block_fcp);
818 diag308(DIAG308_DUMP, NULL);
819 break;
820 case DUMP_METHOD_NONE:
821 default:
822 return;
823 }
824 printk(KERN_EMERG "Dump failed!\n");
825}
826
827/* init functions */
828
829static int __init ipl_register_fcp_files(void)
830{
831 int rc;
832
833 rc = sysfs_create_group(&ipl_kset->kobj,
834 &ipl_fcp_attr_group);
835 if (rc)
836 goto out;
837 rc = sysfs_create_bin_file(&ipl_kset->kobj,
838 &ipl_parameter_attr);
839 if (rc)
840 goto out_ipl_parm;
841 rc = sysfs_create_bin_file(&ipl_kset->kobj,
842 &ipl_scp_data_attr);
843 if (!rc)
844 goto out;
845
846 sysfs_remove_bin_file(&ipl_kset->kobj, &ipl_parameter_attr);
847
848out_ipl_parm:
849 sysfs_remove_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
850out:
851 return rc;
852}
853
854static int __init ipl_init(void)
855{
856 int rc;
857
858 ipl_kset = kset_create_and_add("ipl", NULL, firmware_kobj);
859 if (!ipl_kset)
860 return -ENOMEM;
861 switch (ipl_info.type) {
862 case IPL_TYPE_CCW:
863 rc = sysfs_create_group(&ipl_kset->kobj,
864 &ipl_ccw_attr_group);
865 break;
866 case IPL_TYPE_FCP:
867 case IPL_TYPE_FCP_DUMP:
868 rc = ipl_register_fcp_files();
869 break;
870 case IPL_TYPE_NSS:
871 rc = sysfs_create_group(&ipl_kset->kobj,
872 &ipl_nss_attr_group);
873 break;
874 default:
875 rc = sysfs_create_group(&ipl_kset->kobj,
876 &ipl_unknown_attr_group);
877 break;
878 }
879 if (rc)
880 kset_unregister(ipl_kset);
881 return rc;
882} 704}
883 705
884static void __init reipl_probe(void) 706static void __init reipl_probe(void)
@@ -923,6 +745,7 @@ static int __init reipl_ccw_init(void)
923 reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION; 745 reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
924 reipl_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN; 746 reipl_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
925 reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW; 747 reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
748 reipl_block_ccw->hdr.flags = DIAG308_FLAGS_LP_VALID;
926 /* check if read scp info worked and set loadparm */ 749 /* check if read scp info worked and set loadparm */
927 if (sclp_ipl_info.is_valid) 750 if (sclp_ipl_info.is_valid)
928 memcpy(reipl_block_ccw->ipl_info.ccw.load_param, 751 memcpy(reipl_block_ccw->ipl_info.ccw.load_param,
@@ -931,8 +754,7 @@ static int __init reipl_ccw_init(void)
931 /* read scp info failed: set empty loadparm (EBCDIC blanks) */ 754 /* read scp info failed: set empty loadparm (EBCDIC blanks) */
932 memset(reipl_block_ccw->ipl_info.ccw.load_param, 0x40, 755 memset(reipl_block_ccw->ipl_info.ccw.load_param, 0x40,
933 LOADPARM_LEN); 756 LOADPARM_LEN);
934 /* FIXME: check for diag308_set_works when enabling diag ccw reipl */ 757 if (!MACHINE_IS_VM && !diag308_set_works)
935 if (!MACHINE_IS_VM)
936 sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO; 758 sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
937 if (ipl_info.type == IPL_TYPE_CCW) 759 if (ipl_info.type == IPL_TYPE_CCW)
938 reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; 760 reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
@@ -970,7 +792,7 @@ static int __init reipl_fcp_init(void)
970 return 0; 792 return 0;
971} 793}
972 794
973static int __init reipl_init(void) 795static int reipl_init(void)
974{ 796{
975 int rc; 797 int rc;
976 798
@@ -997,6 +819,140 @@ static int __init reipl_init(void)
997 return 0; 819 return 0;
998} 820}
999 821
822static struct shutdown_action reipl_action = {SHUTDOWN_ACTION_REIPL_STR,
823 reipl_run, reipl_init};
824
825/*
826 * dump shutdown action: Dump Linux on shutdown.
827 */
828
829/* FCP dump device attributes */
830
831DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n",
832 dump_block_fcp->ipl_info.fcp.wwpn);
833DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n",
834 dump_block_fcp->ipl_info.fcp.lun);
835DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
836 dump_block_fcp->ipl_info.fcp.bootprog);
837DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
838 dump_block_fcp->ipl_info.fcp.br_lba);
839DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
840 dump_block_fcp->ipl_info.fcp.devno);
841
842static struct attribute *dump_fcp_attrs[] = {
843 &sys_dump_fcp_device_attr.attr,
844 &sys_dump_fcp_wwpn_attr.attr,
845 &sys_dump_fcp_lun_attr.attr,
846 &sys_dump_fcp_bootprog_attr.attr,
847 &sys_dump_fcp_br_lba_attr.attr,
848 NULL,
849};
850
851static struct attribute_group dump_fcp_attr_group = {
852 .name = IPL_FCP_STR,
853 .attrs = dump_fcp_attrs,
854};
855
856/* CCW dump device attributes */
857
858DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
859 dump_block_ccw->ipl_info.ccw.devno);
860
861static struct attribute *dump_ccw_attrs[] = {
862 &sys_dump_ccw_device_attr.attr,
863 NULL,
864};
865
866static struct attribute_group dump_ccw_attr_group = {
867 .name = IPL_CCW_STR,
868 .attrs = dump_ccw_attrs,
869};
870
871/* dump type */
872
873static int dump_set_type(enum dump_type type)
874{
875 if (!(dump_capabilities & type))
876 return -EINVAL;
877 switch (type) {
878 case DUMP_TYPE_CCW:
879 if (diag308_set_works)
880 dump_method = DUMP_METHOD_CCW_DIAG;
881 else if (MACHINE_IS_VM)
882 dump_method = DUMP_METHOD_CCW_VM;
883 else
884 dump_method = DUMP_METHOD_CCW_CIO;
885 break;
886 case DUMP_TYPE_FCP:
887 dump_method = DUMP_METHOD_FCP_DIAG;
888 break;
889 default:
890 dump_method = DUMP_METHOD_NONE;
891 }
892 dump_type = type;
893 return 0;
894}
895
896static ssize_t dump_type_show(struct kobject *kobj,
897 struct kobj_attribute *attr, char *page)
898{
899 return sprintf(page, "%s\n", dump_type_str(dump_type));
900}
901
902static ssize_t dump_type_store(struct kobject *kobj,
903 struct kobj_attribute *attr,
904 const char *buf, size_t len)
905{
906 int rc = -EINVAL;
907
908 if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0)
909 rc = dump_set_type(DUMP_TYPE_NONE);
910 else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0)
911 rc = dump_set_type(DUMP_TYPE_CCW);
912 else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0)
913 rc = dump_set_type(DUMP_TYPE_FCP);
914 return (rc != 0) ? rc : len;
915}
916
917static struct kobj_attribute dump_type_attr =
918 __ATTR(dump_type, 0644, dump_type_show, dump_type_store);
919
920static struct kset *dump_kset;
921
922static void dump_run(struct shutdown_trigger *trigger)
923{
924 struct ccw_dev_id devid;
925 static char buf[100];
926
927 switch (dump_method) {
928 case DUMP_METHOD_CCW_CIO:
929 smp_send_stop();
930 devid.devno = dump_block_ccw->ipl_info.ccw.devno;
931 devid.ssid = 0;
932 reipl_ccw_dev(&devid);
933 break;
934 case DUMP_METHOD_CCW_VM:
935 smp_send_stop();
936 sprintf(buf, "STORE STATUS");
937 __cpcmd(buf, NULL, 0, NULL);
938 sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
939 __cpcmd(buf, NULL, 0, NULL);
940 break;
941 case DUMP_METHOD_CCW_DIAG:
942 diag308(DIAG308_SET, dump_block_ccw);
943 diag308(DIAG308_DUMP, NULL);
944 break;
945 case DUMP_METHOD_FCP_DIAG:
946 diag308(DIAG308_SET, dump_block_fcp);
947 diag308(DIAG308_DUMP, NULL);
948 break;
949 case DUMP_METHOD_NONE:
950 default:
951 return;
952 }
953 printk(KERN_EMERG "Dump failed!\n");
954}
955
1000static int __init dump_ccw_init(void) 956static int __init dump_ccw_init(void)
1001{ 957{
1002 int rc; 958 int rc;
@@ -1042,31 +998,14 @@ static int __init dump_fcp_init(void)
1042 return 0; 998 return 0;
1043} 999}
1044 1000
1045#define SHUTDOWN_ON_PANIC_PRIO 0 1001static int dump_init(void)
1046
1047static int shutdown_on_panic_notify(struct notifier_block *self,
1048 unsigned long event, void *data)
1049{
1050 if (on_panic_action == SHUTDOWN_DUMP)
1051 do_dump();
1052 else if (on_panic_action == SHUTDOWN_REIPL)
1053 do_reipl();
1054 return NOTIFY_OK;
1055}
1056
1057static struct notifier_block shutdown_on_panic_nb = {
1058 .notifier_call = shutdown_on_panic_notify,
1059 .priority = SHUTDOWN_ON_PANIC_PRIO
1060};
1061
1062static int __init dump_init(void)
1063{ 1002{
1064 int rc; 1003 int rc;
1065 1004
1066 dump_kset = kset_create_and_add("dump", NULL, firmware_kobj); 1005 dump_kset = kset_create_and_add("dump", NULL, firmware_kobj);
1067 if (!dump_kset) 1006 if (!dump_kset)
1068 return -ENOMEM; 1007 return -ENOMEM;
1069 rc = sysfs_create_file(&dump_kset->kobj, &dump_type_attr); 1008 rc = sysfs_create_file(&dump_kset->kobj, &dump_type_attr.attr);
1070 if (rc) { 1009 if (rc) {
1071 kset_unregister(dump_kset); 1010 kset_unregister(dump_kset);
1072 return rc; 1011 return rc;
@@ -1081,47 +1020,381 @@ static int __init dump_init(void)
1081 return 0; 1020 return 0;
1082} 1021}
1083 1022
1084static int __init shutdown_actions_init(void) 1023static struct shutdown_action dump_action = {SHUTDOWN_ACTION_DUMP_STR,
1024 dump_run, dump_init};
1025
1026/*
1027 * vmcmd shutdown action: Trigger vm command on shutdown.
1028 */
1029
1030static char vmcmd_on_reboot[128];
1031static char vmcmd_on_panic[128];
1032static char vmcmd_on_halt[128];
1033static char vmcmd_on_poff[128];
1034
1035DEFINE_IPL_ATTR_STR_RW(vmcmd, on_reboot, "%s\n", "%s\n", vmcmd_on_reboot);
1036DEFINE_IPL_ATTR_STR_RW(vmcmd, on_panic, "%s\n", "%s\n", vmcmd_on_panic);
1037DEFINE_IPL_ATTR_STR_RW(vmcmd, on_halt, "%s\n", "%s\n", vmcmd_on_halt);
1038DEFINE_IPL_ATTR_STR_RW(vmcmd, on_poff, "%s\n", "%s\n", vmcmd_on_poff);
1039
1040static struct attribute *vmcmd_attrs[] = {
1041 &sys_vmcmd_on_reboot_attr.attr,
1042 &sys_vmcmd_on_panic_attr.attr,
1043 &sys_vmcmd_on_halt_attr.attr,
1044 &sys_vmcmd_on_poff_attr.attr,
1045 NULL,
1046};
1047
1048static struct attribute_group vmcmd_attr_group = {
1049 .attrs = vmcmd_attrs,
1050};
1051
1052static struct kset *vmcmd_kset;
1053
1054static void vmcmd_run(struct shutdown_trigger *trigger)
1055{
1056 char *cmd, *next_cmd;
1057
1058 if (strcmp(trigger->name, ON_REIPL_STR) == 0)
1059 cmd = vmcmd_on_reboot;
1060 else if (strcmp(trigger->name, ON_PANIC_STR) == 0)
1061 cmd = vmcmd_on_panic;
1062 else if (strcmp(trigger->name, ON_HALT_STR) == 0)
1063 cmd = vmcmd_on_halt;
1064 else if (strcmp(trigger->name, ON_POFF_STR) == 0)
1065 cmd = vmcmd_on_poff;
1066 else
1067 return;
1068
1069 if (strlen(cmd) == 0)
1070 return;
1071 do {
1072 next_cmd = strchr(cmd, '\n');
1073 if (next_cmd) {
1074 next_cmd[0] = 0;
1075 next_cmd += 1;
1076 }
1077 __cpcmd(cmd, NULL, 0, NULL);
1078 cmd = next_cmd;
1079 } while (cmd != NULL);
1080}
1081
1082static int vmcmd_init(void)
1085{ 1083{
1086 int rc; 1084 if (!MACHINE_IS_VM)
1085 return -ENOTSUPP;
1086 vmcmd_kset = kset_create_and_add("vmcmd", NULL, firmware_kobj);
1087 if (!vmcmd_kset)
1088 return -ENOMEM;
1089 return sysfs_create_group(&vmcmd_kset->kobj, &vmcmd_attr_group);
1090}
1091
1092static struct shutdown_action vmcmd_action = {SHUTDOWN_ACTION_VMCMD_STR,
1093 vmcmd_run, vmcmd_init};
1094
1095/*
1096 * stop shutdown action: Stop Linux on shutdown.
1097 */
1098
1099static void stop_run(struct shutdown_trigger *trigger)
1100{
1101 if (strcmp(trigger->name, ON_PANIC_STR) == 0)
1102 disabled_wait((unsigned long) __builtin_return_address(0));
1103 else {
1104 signal_processor(smp_processor_id(), sigp_stop);
1105 for (;;);
1106 }
1107}
1108
1109static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR,
1110 stop_run, NULL};
1111
1112/* action list */
1113
1114static struct shutdown_action *shutdown_actions_list[] = {
1115 &ipl_action, &reipl_action, &dump_action, &vmcmd_action, &stop_action};
1116#define SHUTDOWN_ACTIONS_COUNT (sizeof(shutdown_actions_list) / sizeof(void *))
1117
1118/*
1119 * Trigger section
1120 */
1121
1122static struct kset *shutdown_actions_kset;
1123
1124static int set_trigger(const char *buf, struct shutdown_trigger *trigger,
1125 size_t len)
1126{
1127 int i;
1128 for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) {
1129 if (!shutdown_actions_list[i])
1130 continue;
1131 if (strncmp(buf, shutdown_actions_list[i]->name,
1132 strlen(shutdown_actions_list[i]->name)) == 0) {
1133 trigger->action = shutdown_actions_list[i];
1134 return len;
1135 }
1136 }
1137 return -EINVAL;
1138}
1139
1140/* on reipl */
1141
1142static struct shutdown_trigger on_reboot_trigger = {ON_REIPL_STR,
1143 &reipl_action};
1144
1145static ssize_t on_reboot_show(struct kobject *kobj,
1146 struct kobj_attribute *attr, char *page)
1147{
1148 return sprintf(page, "%s\n", on_reboot_trigger.action->name);
1149}
1150
1151static ssize_t on_reboot_store(struct kobject *kobj,
1152 struct kobj_attribute *attr,
1153 const char *buf, size_t len)
1154{
1155 return set_trigger(buf, &on_reboot_trigger, len);
1156}
1157
1158static struct kobj_attribute on_reboot_attr =
1159 __ATTR(on_reboot, 0644, on_reboot_show, on_reboot_store);
1160
1161static void do_machine_restart(char *__unused)
1162{
1163 smp_send_stop();
1164 on_reboot_trigger.action->fn(&on_reboot_trigger);
1165 reipl_run(NULL);
1166}
1167void (*_machine_restart)(char *command) = do_machine_restart;
1168
1169/* on panic */
1170
1171static struct shutdown_trigger on_panic_trigger = {ON_PANIC_STR, &stop_action};
1172
1173static ssize_t on_panic_show(struct kobject *kobj,
1174 struct kobj_attribute *attr, char *page)
1175{
1176 return sprintf(page, "%s\n", on_panic_trigger.action->name);
1177}
1087 1178
1179static ssize_t on_panic_store(struct kobject *kobj,
1180 struct kobj_attribute *attr,
1181 const char *buf, size_t len)
1182{
1183 return set_trigger(buf, &on_panic_trigger, len);
1184}
1185
1186static struct kobj_attribute on_panic_attr =
1187 __ATTR(on_panic, 0644, on_panic_show, on_panic_store);
1188
1189static void do_panic(void)
1190{
1191 on_panic_trigger.action->fn(&on_panic_trigger);
1192 stop_run(&on_panic_trigger);
1193}
1194
1195/* on halt */
1196
1197static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action};
1198
1199static ssize_t on_halt_show(struct kobject *kobj,
1200 struct kobj_attribute *attr, char *page)
1201{
1202 return sprintf(page, "%s\n", on_halt_trigger.action->name);
1203}
1204
1205static ssize_t on_halt_store(struct kobject *kobj,
1206 struct kobj_attribute *attr,
1207 const char *buf, size_t len)
1208{
1209 return set_trigger(buf, &on_halt_trigger, len);
1210}
1211
1212static struct kobj_attribute on_halt_attr =
1213 __ATTR(on_halt, 0644, on_halt_show, on_halt_store);
1214
1215
1216static void do_machine_halt(void)
1217{
1218 smp_send_stop();
1219 on_halt_trigger.action->fn(&on_halt_trigger);
1220 stop_run(&on_halt_trigger);
1221}
1222void (*_machine_halt)(void) = do_machine_halt;
1223
1224/* on power off */
1225
1226static struct shutdown_trigger on_poff_trigger = {ON_POFF_STR, &stop_action};
1227
1228static ssize_t on_poff_show(struct kobject *kobj,
1229 struct kobj_attribute *attr, char *page)
1230{
1231 return sprintf(page, "%s\n", on_poff_trigger.action->name);
1232}
1233
1234static ssize_t on_poff_store(struct kobject *kobj,
1235 struct kobj_attribute *attr,
1236 const char *buf, size_t len)
1237{
1238 return set_trigger(buf, &on_poff_trigger, len);
1239}
1240
1241static struct kobj_attribute on_poff_attr =
1242 __ATTR(on_poff, 0644, on_poff_show, on_poff_store);
1243
1244
1245static void do_machine_power_off(void)
1246{
1247 smp_send_stop();
1248 on_poff_trigger.action->fn(&on_poff_trigger);
1249 stop_run(&on_poff_trigger);
1250}
1251void (*_machine_power_off)(void) = do_machine_power_off;
1252
1253static void __init shutdown_triggers_init(void)
1254{
1088 shutdown_actions_kset = kset_create_and_add("shutdown_actions", NULL, 1255 shutdown_actions_kset = kset_create_and_add("shutdown_actions", NULL,
1089 firmware_kobj); 1256 firmware_kobj);
1090 if (!shutdown_actions_kset) 1257 if (!shutdown_actions_kset)
1091 return -ENOMEM; 1258 goto fail;
1092 rc = sysfs_create_file(&shutdown_actions_kset->kobj, &on_panic_attr); 1259 if (sysfs_create_file(&shutdown_actions_kset->kobj,
1093 if (rc) { 1260 &on_reboot_attr.attr))
1094 kset_unregister(shutdown_actions_kset); 1261 goto fail;
1095 return rc; 1262 if (sysfs_create_file(&shutdown_actions_kset->kobj,
1263 &on_panic_attr.attr))
1264 goto fail;
1265 if (sysfs_create_file(&shutdown_actions_kset->kobj,
1266 &on_halt_attr.attr))
1267 goto fail;
1268 if (sysfs_create_file(&shutdown_actions_kset->kobj,
1269 &on_poff_attr.attr))
1270 goto fail;
1271
1272 return;
1273fail:
1274 panic("shutdown_triggers_init failed\n");
1275}
1276
1277static void __init shutdown_actions_init(void)
1278{
1279 int i;
1280
1281 for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) {
1282 if (!shutdown_actions_list[i]->init)
1283 continue;
1284 if (shutdown_actions_list[i]->init())
1285 shutdown_actions_list[i] = NULL;
1096 } 1286 }
1097 atomic_notifier_chain_register(&panic_notifier_list,
1098 &shutdown_on_panic_nb);
1099 return 0;
1100} 1287}
1101 1288
1102static int __init s390_ipl_init(void) 1289static int __init s390_ipl_init(void)
1103{ 1290{
1104 int rc;
1105
1106 sclp_get_ipl_info(&sclp_ipl_info);
1107 reipl_probe(); 1291 reipl_probe();
1108 rc = ipl_init(); 1292 sclp_get_ipl_info(&sclp_ipl_info);
1109 if (rc) 1293 shutdown_actions_init();
1110 return rc; 1294 shutdown_triggers_init();
1111 rc = reipl_init();
1112 if (rc)
1113 return rc;
1114 rc = dump_init();
1115 if (rc)
1116 return rc;
1117 rc = shutdown_actions_init();
1118 if (rc)
1119 return rc;
1120 return 0; 1295 return 0;
1121} 1296}
1122 1297
1123__initcall(s390_ipl_init); 1298__initcall(s390_ipl_init);
1124 1299
1300static void __init strncpy_skip_quote(char *dst, char *src, int n)
1301{
1302 int sx, dx;
1303
1304 dx = 0;
1305 for (sx = 0; src[sx] != 0; sx++) {
1306 if (src[sx] == '"')
1307 continue;
1308 dst[dx++] = src[sx];
1309 if (dx >= n)
1310 break;
1311 }
1312}
1313
1314static int __init vmcmd_on_reboot_setup(char *str)
1315{
1316 if (!MACHINE_IS_VM)
1317 return 1;
1318 strncpy_skip_quote(vmcmd_on_reboot, str, 127);
1319 vmcmd_on_reboot[127] = 0;
1320 on_reboot_trigger.action = &vmcmd_action;
1321 return 1;
1322}
1323__setup("vmreboot=", vmcmd_on_reboot_setup);
1324
1325static int __init vmcmd_on_panic_setup(char *str)
1326{
1327 if (!MACHINE_IS_VM)
1328 return 1;
1329 strncpy_skip_quote(vmcmd_on_panic, str, 127);
1330 vmcmd_on_panic[127] = 0;
1331 on_panic_trigger.action = &vmcmd_action;
1332 return 1;
1333}
1334__setup("vmpanic=", vmcmd_on_panic_setup);
1335
1336static int __init vmcmd_on_halt_setup(char *str)
1337{
1338 if (!MACHINE_IS_VM)
1339 return 1;
1340 strncpy_skip_quote(vmcmd_on_halt, str, 127);
1341 vmcmd_on_halt[127] = 0;
1342 on_halt_trigger.action = &vmcmd_action;
1343 return 1;
1344}
1345__setup("vmhalt=", vmcmd_on_halt_setup);
1346
1347static int __init vmcmd_on_poff_setup(char *str)
1348{
1349 if (!MACHINE_IS_VM)
1350 return 1;
1351 strncpy_skip_quote(vmcmd_on_poff, str, 127);
1352 vmcmd_on_poff[127] = 0;
1353 on_poff_trigger.action = &vmcmd_action;
1354 return 1;
1355}
1356__setup("vmpoff=", vmcmd_on_poff_setup);
1357
1358static int on_panic_notify(struct notifier_block *self,
1359 unsigned long event, void *data)
1360{
1361 do_panic();
1362 return NOTIFY_OK;
1363}
1364
1365static struct notifier_block on_panic_nb = {
1366 .notifier_call = on_panic_notify,
1367 .priority = 0,
1368};
1369
1370void __init setup_ipl(void)
1371{
1372 ipl_info.type = get_ipl_type();
1373 switch (ipl_info.type) {
1374 case IPL_TYPE_CCW:
1375 ipl_info.data.ccw.dev_id.devno = ipl_devno;
1376 ipl_info.data.ccw.dev_id.ssid = 0;
1377 break;
1378 case IPL_TYPE_FCP:
1379 case IPL_TYPE_FCP_DUMP:
1380 ipl_info.data.fcp.dev_id.devno =
1381 IPL_PARMBLOCK_START->ipl_info.fcp.devno;
1382 ipl_info.data.fcp.dev_id.ssid = 0;
1383 ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
1384 ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
1385 break;
1386 case IPL_TYPE_NSS:
1387 strncpy(ipl_info.data.nss.name, kernel_nss_name,
1388 sizeof(ipl_info.data.nss.name));
1389 break;
1390 case IPL_TYPE_UNKNOWN:
1391 default:
1392 /* We have no info to copy */
1393 break;
1394 }
1395 atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
1396}
1397
1125void __init ipl_save_parameters(void) 1398void __init ipl_save_parameters(void)
1126{ 1399{
1127 struct cio_iplinfo iplinfo; 1400 struct cio_iplinfo iplinfo;
@@ -1202,3 +1475,4 @@ void s390_reset_system(void)
1202 1475
1203 do_reset_calls(); 1476 do_reset_calls();
1204} 1477}
1478
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 29f7884b4ffa..0e7aca039307 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -36,7 +36,7 @@
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/notifier.h> 38#include <linux/notifier.h>
39 39#include <linux/utsname.h>
40#include <asm/uaccess.h> 40#include <asm/uaccess.h>
41#include <asm/pgtable.h> 41#include <asm/pgtable.h>
42#include <asm/system.h> 42#include <asm/system.h>
@@ -182,13 +182,15 @@ void cpu_idle(void)
182 182
183void show_regs(struct pt_regs *regs) 183void show_regs(struct pt_regs *regs)
184{ 184{
185 struct task_struct *tsk = current; 185 print_modules();
186 186 printk("CPU: %d %s %s %.*s\n",
187 printk("CPU: %d %s\n", task_thread_info(tsk)->cpu, print_tainted()); 187 task_thread_info(current)->cpu, print_tainted(),
188 printk("Process %s (pid: %d, task: %p, ksp: %p)\n", 188 init_utsname()->release,
189 current->comm, task_pid_nr(current), (void *) tsk, 189 (int)strcspn(init_utsname()->version, " "),
190 (void *) tsk->thread.ksp); 190 init_utsname()->version);
191 191 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
192 current->comm, current->pid, current,
193 (void *) current->thread.ksp);
192 show_registers(regs); 194 show_registers(regs);
193 /* Show stack backtrace if pt_regs is from kernel mode */ 195 /* Show stack backtrace if pt_regs is from kernel mode */
194 if (!(regs->psw.mask & PSW_MASK_PSTATE)) 196 if (!(regs->psw.mask & PSW_MASK_PSTATE))
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 1d81bf9488ae..6e036bae9875 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -86,13 +86,13 @@ FixPerRegisters(struct task_struct *task)
86 per_info->control_regs.bits.storage_alt_space_ctl = 0; 86 per_info->control_regs.bits.storage_alt_space_ctl = 0;
87} 87}
88 88
89static void set_single_step(struct task_struct *task) 89void user_enable_single_step(struct task_struct *task)
90{ 90{
91 task->thread.per_info.single_step = 1; 91 task->thread.per_info.single_step = 1;
92 FixPerRegisters(task); 92 FixPerRegisters(task);
93} 93}
94 94
95static void clear_single_step(struct task_struct *task) 95void user_disable_single_step(struct task_struct *task)
96{ 96{
97 task->thread.per_info.single_step = 0; 97 task->thread.per_info.single_step = 0;
98 FixPerRegisters(task); 98 FixPerRegisters(task);
@@ -107,7 +107,7 @@ void
107ptrace_disable(struct task_struct *child) 107ptrace_disable(struct task_struct *child)
108{ 108{
109 /* make sure the single step bit is not set. */ 109 /* make sure the single step bit is not set. */
110 clear_single_step(child); 110 user_disable_single_step(child);
111} 111}
112 112
113#ifndef CONFIG_64BIT 113#ifndef CONFIG_64BIT
@@ -651,7 +651,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
651 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 651 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
652 child->exit_code = data; 652 child->exit_code = data;
653 /* make sure the single step bit is not set. */ 653 /* make sure the single step bit is not set. */
654 clear_single_step(child); 654 user_disable_single_step(child);
655 wake_up_process(child); 655 wake_up_process(child);
656 return 0; 656 return 0;
657 657
@@ -665,7 +665,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
665 return 0; 665 return 0;
666 child->exit_code = SIGKILL; 666 child->exit_code = SIGKILL;
667 /* make sure the single step bit is not set. */ 667 /* make sure the single step bit is not set. */
668 clear_single_step(child); 668 user_disable_single_step(child);
669 wake_up_process(child); 669 wake_up_process(child);
670 return 0; 670 return 0;
671 671
@@ -675,10 +675,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
675 return -EIO; 675 return -EIO;
676 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 676 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
677 child->exit_code = data; 677 child->exit_code = data;
678 if (data) 678 user_enable_single_step(child);
679 set_tsk_thread_flag(child, TIF_SINGLE_STEP);
680 else
681 set_single_step(child);
682 /* give it a chance to run. */ 679 /* give it a chance to run. */
683 wake_up_process(child); 680 wake_up_process(child);
684 return 0; 681 return 0;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 577aa7dd660e..766c783bd7a7 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -126,75 +126,6 @@ void __cpuinit cpu_init(void)
126} 126}
127 127
128/* 128/*
129 * VM halt and poweroff setup routines
130 */
131char vmhalt_cmd[128] = "";
132char vmpoff_cmd[128] = "";
133static char vmpanic_cmd[128] = "";
134
135static void strncpy_skip_quote(char *dst, char *src, int n)
136{
137 int sx, dx;
138
139 dx = 0;
140 for (sx = 0; src[sx] != 0; sx++) {
141 if (src[sx] == '"') continue;
142 dst[dx++] = src[sx];
143 if (dx >= n) break;
144 }
145}
146
147static int __init vmhalt_setup(char *str)
148{
149 strncpy_skip_quote(vmhalt_cmd, str, 127);
150 vmhalt_cmd[127] = 0;
151 return 1;
152}
153
154__setup("vmhalt=", vmhalt_setup);
155
156static int __init vmpoff_setup(char *str)
157{
158 strncpy_skip_quote(vmpoff_cmd, str, 127);
159 vmpoff_cmd[127] = 0;
160 return 1;
161}
162
163__setup("vmpoff=", vmpoff_setup);
164
165static int vmpanic_notify(struct notifier_block *self, unsigned long event,
166 void *data)
167{
168 if (MACHINE_IS_VM && strlen(vmpanic_cmd) > 0)
169 cpcmd(vmpanic_cmd, NULL, 0, NULL);
170
171 return NOTIFY_OK;
172}
173
174#define PANIC_PRI_VMPANIC 0
175
176static struct notifier_block vmpanic_nb = {
177 .notifier_call = vmpanic_notify,
178 .priority = PANIC_PRI_VMPANIC
179};
180
181static int __init vmpanic_setup(char *str)
182{
183 static int register_done __initdata = 0;
184
185 strncpy_skip_quote(vmpanic_cmd, str, 127);
186 vmpanic_cmd[127] = 0;
187 if (!register_done) {
188 register_done = 1;
189 atomic_notifier_chain_register(&panic_notifier_list,
190 &vmpanic_nb);
191 }
192 return 1;
193}
194
195__setup("vmpanic=", vmpanic_setup);
196
197/*
198 * condev= and conmode= setup parameter. 129 * condev= and conmode= setup parameter.
199 */ 130 */
200 131
@@ -308,38 +239,6 @@ static void __init setup_zfcpdump(unsigned int console_devno)
308static inline void setup_zfcpdump(unsigned int console_devno) {} 239static inline void setup_zfcpdump(unsigned int console_devno) {}
309#endif /* CONFIG_ZFCPDUMP */ 240#endif /* CONFIG_ZFCPDUMP */
310 241
311#ifdef CONFIG_SMP
312void (*_machine_restart)(char *command) = machine_restart_smp;
313void (*_machine_halt)(void) = machine_halt_smp;
314void (*_machine_power_off)(void) = machine_power_off_smp;
315#else
316/*
317 * Reboot, halt and power_off routines for non SMP.
318 */
319static void do_machine_restart_nonsmp(char * __unused)
320{
321 do_reipl();
322}
323
324static void do_machine_halt_nonsmp(void)
325{
326 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
327 __cpcmd(vmhalt_cmd, NULL, 0, NULL);
328 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
329}
330
331static void do_machine_power_off_nonsmp(void)
332{
333 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
334 __cpcmd(vmpoff_cmd, NULL, 0, NULL);
335 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
336}
337
338void (*_machine_restart)(char *command) = do_machine_restart_nonsmp;
339void (*_machine_halt)(void) = do_machine_halt_nonsmp;
340void (*_machine_power_off)(void) = do_machine_power_off_nonsmp;
341#endif
342
343 /* 242 /*
344 * Reboot, halt and power_off stubs. They just call _machine_restart, 243 * Reboot, halt and power_off stubs. They just call _machine_restart,
345 * _machine_halt or _machine_power_off. 244 * _machine_halt or _machine_power_off.
@@ -559,7 +458,9 @@ setup_resources(void)
559 data_resource.start = (unsigned long) &_etext; 458 data_resource.start = (unsigned long) &_etext;
560 data_resource.end = (unsigned long) &_edata - 1; 459 data_resource.end = (unsigned long) &_edata - 1;
561 460
562 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 461 for (i = 0; i < MEMORY_CHUNKS; i++) {
462 if (!memory_chunk[i].size)
463 continue;
563 res = alloc_bootmem_low(sizeof(struct resource)); 464 res = alloc_bootmem_low(sizeof(struct resource));
564 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; 465 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
565 switch (memory_chunk[i].type) { 466 switch (memory_chunk[i].type) {
@@ -617,7 +518,7 @@ EXPORT_SYMBOL_GPL(real_memory_size);
617static void __init setup_memory_end(void) 518static void __init setup_memory_end(void)
618{ 519{
619 unsigned long memory_size; 520 unsigned long memory_size;
620 unsigned long max_mem, max_phys; 521 unsigned long max_mem;
621 int i; 522 int i;
622 523
623#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) 524#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
@@ -625,10 +526,31 @@ static void __init setup_memory_end(void)
625 memory_end = ZFCPDUMP_HSA_SIZE; 526 memory_end = ZFCPDUMP_HSA_SIZE;
626#endif 527#endif
627 memory_size = 0; 528 memory_size = 0;
628 max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE;
629 memory_end &= PAGE_MASK; 529 memory_end &= PAGE_MASK;
630 530
631 max_mem = memory_end ? min(max_phys, memory_end) : max_phys; 531 max_mem = memory_end ? min(VMALLOC_START, memory_end) : VMALLOC_START;
532 memory_end = min(max_mem, memory_end);
533
534 /*
535 * Make sure all chunks are MAX_ORDER aligned so we don't need the
536 * extra checks that HOLES_IN_ZONE would require.
537 */
538 for (i = 0; i < MEMORY_CHUNKS; i++) {
539 unsigned long start, end;
540 struct mem_chunk *chunk;
541 unsigned long align;
542
543 chunk = &memory_chunk[i];
544 align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1);
545 start = (chunk->addr + align - 1) & ~(align - 1);
546 end = (chunk->addr + chunk->size) & ~(align - 1);
547 if (start >= end)
548 memset(chunk, 0, sizeof(*chunk));
549 else {
550 chunk->addr = start;
551 chunk->size = end - start;
552 }
553 }
632 554
633 for (i = 0; i < MEMORY_CHUNKS; i++) { 555 for (i = 0; i < MEMORY_CHUNKS; i++) {
634 struct mem_chunk *chunk = &memory_chunk[i]; 556 struct mem_chunk *chunk = &memory_chunk[i];
@@ -890,7 +812,7 @@ setup_arch(char **cmdline_p)
890 812
891 parse_early_param(); 813 parse_early_param();
892 814
893 setup_ipl_info(); 815 setup_ipl();
894 setup_memory_end(); 816 setup_memory_end();
895 setup_addressing_mode(); 817 setup_addressing_mode();
896 setup_memory(); 818 setup_memory();
@@ -899,7 +821,6 @@ setup_arch(char **cmdline_p)
899 821
900 cpu_init(); 822 cpu_init();
901 __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; 823 __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
902 smp_setup_cpu_possible_map();
903 824
904 /* 825 /*
905 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). 826 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
@@ -920,7 +841,7 @@ setup_arch(char **cmdline_p)
920 841
921void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo) 842void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo)
922{ 843{
923 printk("cpu %d " 844 printk(KERN_INFO "cpu %d "
924#ifdef CONFIG_SMP 845#ifdef CONFIG_SMP
925 "phys_idx=%d " 846 "phys_idx=%d "
926#endif 847#endif
@@ -996,7 +917,7 @@ static void *c_next(struct seq_file *m, void *v, loff_t *pos)
996static void c_stop(struct seq_file *m, void *v) 917static void c_stop(struct seq_file *m, void *v)
997{ 918{
998} 919}
999struct seq_operations cpuinfo_op = { 920const struct seq_operations cpuinfo_op = {
1000 .start = c_start, 921 .start = c_start,
1001 .next = c_next, 922 .next = c_next,
1002 .stop = c_stop, 923 .stop = c_stop,
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index d264671c1b71..4449bf32cbf1 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -471,6 +471,7 @@ void do_signal(struct pt_regs *regs)
471 471
472 if (signr > 0) { 472 if (signr > 0) {
473 /* Whee! Actually deliver the signal. */ 473 /* Whee! Actually deliver the signal. */
474 int ret;
474#ifdef CONFIG_COMPAT 475#ifdef CONFIG_COMPAT
475 if (test_thread_flag(TIF_31BIT)) { 476 if (test_thread_flag(TIF_31BIT)) {
476 extern int handle_signal32(unsigned long sig, 477 extern int handle_signal32(unsigned long sig,
@@ -478,15 +479,12 @@ void do_signal(struct pt_regs *regs)
478 siginfo_t *info, 479 siginfo_t *info,
479 sigset_t *oldset, 480 sigset_t *oldset,
480 struct pt_regs *regs); 481 struct pt_regs *regs);
481 if (handle_signal32( 482 ret = handle_signal32(signr, &ka, &info, oldset, regs);
482 signr, &ka, &info, oldset, regs) == 0) {
483 if (test_thread_flag(TIF_RESTORE_SIGMASK))
484 clear_thread_flag(TIF_RESTORE_SIGMASK);
485 }
486 return;
487 } 483 }
484 else
488#endif 485#endif
489 if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { 486 ret = handle_signal(signr, &ka, &info, oldset, regs);
487 if (!ret) {
490 /* 488 /*
491 * A signal was successfully delivered; the saved 489 * A signal was successfully delivered; the saved
492 * sigmask will have been stored in the signal frame, 490 * sigmask will have been stored in the signal frame,
@@ -495,6 +493,14 @@ void do_signal(struct pt_regs *regs)
495 */ 493 */
496 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 494 if (test_thread_flag(TIF_RESTORE_SIGMASK))
497 clear_thread_flag(TIF_RESTORE_SIGMASK); 495 clear_thread_flag(TIF_RESTORE_SIGMASK);
496
497 /*
498 * If we would have taken a single-step trap
499 * for a normal instruction, act like we took
500 * one for the handler setup.
501 */
502 if (current->thread.per_info.single_step)
503 set_thread_flag(TIF_SINGLE_STEP);
498 } 504 }
499 return; 505 return;
500 } 506 }
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 264ea906db4c..aa37fa154512 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -42,6 +42,7 @@
42#include <asm/tlbflush.h> 42#include <asm/tlbflush.h>
43#include <asm/timer.h> 43#include <asm/timer.h>
44#include <asm/lowcore.h> 44#include <asm/lowcore.h>
45#include <asm/sclp.h>
45#include <asm/cpu.h> 46#include <asm/cpu.h>
46 47
47/* 48/*
@@ -53,11 +54,27 @@ EXPORT_SYMBOL(lowcore_ptr);
53cpumask_t cpu_online_map = CPU_MASK_NONE; 54cpumask_t cpu_online_map = CPU_MASK_NONE;
54EXPORT_SYMBOL(cpu_online_map); 55EXPORT_SYMBOL(cpu_online_map);
55 56
56cpumask_t cpu_possible_map = CPU_MASK_NONE; 57cpumask_t cpu_possible_map = CPU_MASK_ALL;
57EXPORT_SYMBOL(cpu_possible_map); 58EXPORT_SYMBOL(cpu_possible_map);
58 59
59static struct task_struct *current_set[NR_CPUS]; 60static struct task_struct *current_set[NR_CPUS];
60 61
62static u8 smp_cpu_type;
63static int smp_use_sigp_detection;
64
65enum s390_cpu_state {
66 CPU_STATE_STANDBY,
67 CPU_STATE_CONFIGURED,
68};
69
70#ifdef CONFIG_HOTPLUG_CPU
71static DEFINE_MUTEX(smp_cpu_state_mutex);
72#endif
73static int smp_cpu_state[NR_CPUS];
74
75static DEFINE_PER_CPU(struct cpu, cpu_devices);
76DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
77
61static void smp_ext_bitcall(int, ec_bit_sig); 78static void smp_ext_bitcall(int, ec_bit_sig);
62 79
63/* 80/*
@@ -193,6 +210,33 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
193} 210}
194EXPORT_SYMBOL(smp_call_function_single); 211EXPORT_SYMBOL(smp_call_function_single);
195 212
213/**
214 * smp_call_function_mask(): Run a function on a set of other CPUs.
215 * @mask: The set of cpus to run on. Must not include the current cpu.
216 * @func: The function to run. This must be fast and non-blocking.
217 * @info: An arbitrary pointer to pass to the function.
218 * @wait: If true, wait (atomically) until function has completed on other CPUs.
219 *
220 * Returns 0 on success, else a negative status code.
221 *
222 * If @wait is true, then returns once @func has returned; otherwise
223 * it returns just before the target cpu calls @func.
224 *
225 * You must not call this function with disabled interrupts or from a
226 * hardware interrupt handler or from a bottom half handler.
227 */
228int
229smp_call_function_mask(cpumask_t mask,
230 void (*func)(void *), void *info,
231 int wait)
232{
233 preempt_disable();
234 __smp_call_function_map(func, info, 0, wait, mask);
235 preempt_enable();
236 return 0;
237}
238EXPORT_SYMBOL(smp_call_function_mask);
239
196void smp_send_stop(void) 240void smp_send_stop(void)
197{ 241{
198 int cpu, rc; 242 int cpu, rc;
@@ -217,33 +261,6 @@ void smp_send_stop(void)
217} 261}
218 262
219/* 263/*
220 * Reboot, halt and power_off routines for SMP.
221 */
222void machine_restart_smp(char *__unused)
223{
224 smp_send_stop();
225 do_reipl();
226}
227
228void machine_halt_smp(void)
229{
230 smp_send_stop();
231 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
232 __cpcmd(vmhalt_cmd, NULL, 0, NULL);
233 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
234 for (;;);
235}
236
237void machine_power_off_smp(void)
238{
239 smp_send_stop();
240 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
241 __cpcmd(vmpoff_cmd, NULL, 0, NULL);
242 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
243 for (;;);
244}
245
246/*
247 * This is the main routine where commands issued by other 264 * This is the main routine where commands issued by other
248 * cpus are handled. 265 * cpus are handled.
249 */ 266 */
@@ -355,6 +372,13 @@ void smp_ctl_clear_bit(int cr, int bit)
355} 372}
356EXPORT_SYMBOL(smp_ctl_clear_bit); 373EXPORT_SYMBOL(smp_ctl_clear_bit);
357 374
375/*
376 * In early ipl state a temp. logically cpu number is needed, so the sigp
377 * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
378 * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
379 */
380#define CPU_INIT_NO 1
381
358#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) 382#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
359 383
360/* 384/*
@@ -375,9 +399,10 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
375 "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS); 399 "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS);
376 return; 400 return;
377 } 401 }
378 zfcpdump_save_areas[cpu] = alloc_bootmem(sizeof(union save_area)); 402 zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL);
379 __cpu_logical_map[1] = (__u16) phy_cpu; 403 __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu;
380 while (signal_processor(1, sigp_stop_and_store_status) == sigp_busy) 404 while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) ==
405 sigp_busy)
381 cpu_relax(); 406 cpu_relax();
382 memcpy(zfcpdump_save_areas[cpu], 407 memcpy(zfcpdump_save_areas[cpu],
383 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, 408 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
@@ -397,32 +422,155 @@ static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
397 422
398#endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */ 423#endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */
399 424
400/* 425static int cpu_stopped(int cpu)
401 * Lets check how many CPUs we have.
402 */
403static unsigned int __init smp_count_cpus(void)
404{ 426{
405 unsigned int cpu, num_cpus; 427 __u32 status;
406 __u16 boot_cpu_addr;
407 428
408 /* 429 /* Check for stopped state */
409 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu. 430 if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
410 */ 431 sigp_status_stored) {
432 if (status & 0x40)
433 return 1;
434 }
435 return 0;
436}
437
438static int cpu_known(int cpu_id)
439{
440 int cpu;
441
442 for_each_present_cpu(cpu) {
443 if (__cpu_logical_map[cpu] == cpu_id)
444 return 1;
445 }
446 return 0;
447}
448
449static int smp_rescan_cpus_sigp(cpumask_t avail)
450{
451 int cpu_id, logical_cpu;
452
453 logical_cpu = first_cpu(avail);
454 if (logical_cpu == NR_CPUS)
455 return 0;
456 for (cpu_id = 0; cpu_id <= 65535; cpu_id++) {
457 if (cpu_known(cpu_id))
458 continue;
459 __cpu_logical_map[logical_cpu] = cpu_id;
460 if (!cpu_stopped(logical_cpu))
461 continue;
462 cpu_set(logical_cpu, cpu_present_map);
463 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
464 logical_cpu = next_cpu(logical_cpu, avail);
465 if (logical_cpu == NR_CPUS)
466 break;
467 }
468 return 0;
469}
470
471static int smp_rescan_cpus_sclp(cpumask_t avail)
472{
473 struct sclp_cpu_info *info;
474 int cpu_id, logical_cpu, cpu;
475 int rc;
476
477 logical_cpu = first_cpu(avail);
478 if (logical_cpu == NR_CPUS)
479 return 0;
480 info = kmalloc(sizeof(*info), GFP_KERNEL);
481 if (!info)
482 return -ENOMEM;
483 rc = sclp_get_cpu_info(info);
484 if (rc)
485 goto out;
486 for (cpu = 0; cpu < info->combined; cpu++) {
487 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
488 continue;
489 cpu_id = info->cpu[cpu].address;
490 if (cpu_known(cpu_id))
491 continue;
492 __cpu_logical_map[logical_cpu] = cpu_id;
493 cpu_set(logical_cpu, cpu_present_map);
494 if (cpu >= info->configured)
495 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
496 else
497 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
498 logical_cpu = next_cpu(logical_cpu, avail);
499 if (logical_cpu == NR_CPUS)
500 break;
501 }
502out:
503 kfree(info);
504 return rc;
505}
506
507static int smp_rescan_cpus(void)
508{
509 cpumask_t avail;
510
511 cpus_xor(avail, cpu_possible_map, cpu_present_map);
512 if (smp_use_sigp_detection)
513 return smp_rescan_cpus_sigp(avail);
514 else
515 return smp_rescan_cpus_sclp(avail);
516}
517
518static void __init smp_detect_cpus(void)
519{
520 unsigned int cpu, c_cpus, s_cpus;
521 struct sclp_cpu_info *info;
522 u16 boot_cpu_addr, cpu_addr;
523
524 c_cpus = 1;
525 s_cpus = 0;
411 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; 526 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
412 current_thread_info()->cpu = 0; 527 info = kmalloc(sizeof(*info), GFP_KERNEL);
413 num_cpus = 1; 528 if (!info)
414 for (cpu = 0; cpu <= 65535; cpu++) { 529 panic("smp_detect_cpus failed to allocate memory\n");
415 if ((__u16) cpu == boot_cpu_addr) 530 /* Use sigp detection algorithm if sclp doesn't work. */
531 if (sclp_get_cpu_info(info)) {
532 smp_use_sigp_detection = 1;
533 for (cpu = 0; cpu <= 65535; cpu++) {
534 if (cpu == boot_cpu_addr)
535 continue;
536 __cpu_logical_map[CPU_INIT_NO] = cpu;
537 if (!cpu_stopped(CPU_INIT_NO))
538 continue;
539 smp_get_save_area(c_cpus, cpu);
540 c_cpus++;
541 }
542 goto out;
543 }
544
545 if (info->has_cpu_type) {
546 for (cpu = 0; cpu < info->combined; cpu++) {
547 if (info->cpu[cpu].address == boot_cpu_addr) {
548 smp_cpu_type = info->cpu[cpu].type;
549 break;
550 }
551 }
552 }
553
554 for (cpu = 0; cpu < info->combined; cpu++) {
555 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
556 continue;
557 cpu_addr = info->cpu[cpu].address;
558 if (cpu_addr == boot_cpu_addr)
416 continue; 559 continue;
417 __cpu_logical_map[1] = (__u16) cpu; 560 __cpu_logical_map[CPU_INIT_NO] = cpu_addr;
418 if (signal_processor(1, sigp_sense) == sigp_not_operational) 561 if (!cpu_stopped(CPU_INIT_NO)) {
562 s_cpus++;
419 continue; 563 continue;
420 smp_get_save_area(num_cpus, cpu); 564 }
421 num_cpus++; 565 smp_get_save_area(c_cpus, cpu_addr);
566 c_cpus++;
422 } 567 }
423 printk("Detected %d CPU's\n", (int) num_cpus); 568out:
424 printk("Boot cpu address %2X\n", boot_cpu_addr); 569 kfree(info);
425 return num_cpus; 570 printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus);
571 get_online_cpus();
572 smp_rescan_cpus();
573 put_online_cpus();
426} 574}
427 575
428/* 576/*
@@ -453,8 +601,6 @@ int __cpuinit start_secondary(void *cpuvoid)
453 return 0; 601 return 0;
454} 602}
455 603
456DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
457
458static void __init smp_create_idle(unsigned int cpu) 604static void __init smp_create_idle(unsigned int cpu)
459{ 605{
460 struct task_struct *p; 606 struct task_struct *p;
@@ -470,37 +616,82 @@ static void __init smp_create_idle(unsigned int cpu)
470 spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock); 616 spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock);
471} 617}
472 618
473static int cpu_stopped(int cpu) 619static int __cpuinit smp_alloc_lowcore(int cpu)
474{ 620{
475 __u32 status; 621 unsigned long async_stack, panic_stack;
622 struct _lowcore *lowcore;
623 int lc_order;
624
625 lc_order = sizeof(long) == 8 ? 1 : 0;
626 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
627 if (!lowcore)
628 return -ENOMEM;
629 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
630 if (!async_stack)
631 goto out_async_stack;
632 panic_stack = __get_free_page(GFP_KERNEL);
633 if (!panic_stack)
634 goto out_panic_stack;
635
636 *lowcore = S390_lowcore;
637 lowcore->async_stack = async_stack + ASYNC_SIZE;
638 lowcore->panic_stack = panic_stack + PAGE_SIZE;
476 639
477 /* Check for stopped state */ 640#ifndef CONFIG_64BIT
478 if (signal_processor_ps(&status, 0, cpu, sigp_sense) == 641 if (MACHINE_HAS_IEEE) {
479 sigp_status_stored) { 642 unsigned long save_area;
480 if (status & 0x40) 643
481 return 1; 644 save_area = get_zeroed_page(GFP_KERNEL);
645 if (!save_area)
646 goto out_save_area;
647 lowcore->extended_save_area_addr = (u32) save_area;
482 } 648 }
649#endif
650 lowcore_ptr[cpu] = lowcore;
483 return 0; 651 return 0;
652
653#ifndef CONFIG_64BIT
654out_save_area:
655 free_page(panic_stack);
656#endif
657out_panic_stack:
658 free_pages(async_stack, ASYNC_ORDER);
659out_async_stack:
660 free_pages((unsigned long) lowcore, lc_order);
661 return -ENOMEM;
484} 662}
485 663
486/* Upping and downing of CPUs */ 664#ifdef CONFIG_HOTPLUG_CPU
665static void smp_free_lowcore(int cpu)
666{
667 struct _lowcore *lowcore;
668 int lc_order;
669
670 lc_order = sizeof(long) == 8 ? 1 : 0;
671 lowcore = lowcore_ptr[cpu];
672#ifndef CONFIG_64BIT
673 if (MACHINE_HAS_IEEE)
674 free_page((unsigned long) lowcore->extended_save_area_addr);
675#endif
676 free_page(lowcore->panic_stack - PAGE_SIZE);
677 free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
678 free_pages((unsigned long) lowcore, lc_order);
679 lowcore_ptr[cpu] = NULL;
680}
681#endif /* CONFIG_HOTPLUG_CPU */
487 682
488int __cpu_up(unsigned int cpu) 683/* Upping and downing of CPUs */
684int __cpuinit __cpu_up(unsigned int cpu)
489{ 685{
490 struct task_struct *idle; 686 struct task_struct *idle;
491 struct _lowcore *cpu_lowcore; 687 struct _lowcore *cpu_lowcore;
492 struct stack_frame *sf; 688 struct stack_frame *sf;
493 sigp_ccode ccode; 689 sigp_ccode ccode;
494 int curr_cpu;
495 690
496 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { 691 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
497 __cpu_logical_map[cpu] = (__u16) curr_cpu; 692 return -EIO;
498 if (cpu_stopped(cpu)) 693 if (smp_alloc_lowcore(cpu))
499 break; 694 return -ENOMEM;
500 }
501
502 if (!cpu_stopped(cpu))
503 return -ENODEV;
504 695
505 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), 696 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
506 cpu, sigp_set_prefix); 697 cpu, sigp_set_prefix);
@@ -515,6 +706,7 @@ int __cpu_up(unsigned int cpu)
515 cpu_lowcore = lowcore_ptr[cpu]; 706 cpu_lowcore = lowcore_ptr[cpu];
516 cpu_lowcore->kernel_stack = (unsigned long) 707 cpu_lowcore->kernel_stack = (unsigned long)
517 task_stack_page(idle) + THREAD_SIZE; 708 task_stack_page(idle) + THREAD_SIZE;
709 cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
518 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack 710 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
519 - sizeof(struct pt_regs) 711 - sizeof(struct pt_regs)
520 - sizeof(struct stack_frame)); 712 - sizeof(struct stack_frame));
@@ -528,6 +720,8 @@ int __cpu_up(unsigned int cpu)
528 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; 720 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
529 cpu_lowcore->current_task = (unsigned long) idle; 721 cpu_lowcore->current_task = (unsigned long) idle;
530 cpu_lowcore->cpu_data.cpu_nr = cpu; 722 cpu_lowcore->cpu_data.cpu_nr = cpu;
723 cpu_lowcore->softirq_pending = 0;
724 cpu_lowcore->ext_call_fast = 0;
531 eieio(); 725 eieio();
532 726
533 while (signal_processor(cpu, sigp_restart) == sigp_busy) 727 while (signal_processor(cpu, sigp_restart) == sigp_busy)
@@ -538,44 +732,20 @@ int __cpu_up(unsigned int cpu)
538 return 0; 732 return 0;
539} 733}
540 734
541static unsigned int __initdata additional_cpus; 735static int __init setup_possible_cpus(char *s)
542static unsigned int __initdata possible_cpus;
543
544void __init smp_setup_cpu_possible_map(void)
545{ 736{
546 unsigned int phy_cpus, pos_cpus, cpu; 737 int pcpus, cpu;
547
548 phy_cpus = smp_count_cpus();
549 pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS);
550
551 if (possible_cpus)
552 pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS);
553 738
554 for (cpu = 0; cpu < pos_cpus; cpu++) 739 pcpus = simple_strtoul(s, NULL, 0);
740 cpu_possible_map = cpumask_of_cpu(0);
741 for (cpu = 1; cpu < pcpus && cpu < NR_CPUS; cpu++)
555 cpu_set(cpu, cpu_possible_map); 742 cpu_set(cpu, cpu_possible_map);
556
557 phy_cpus = min(phy_cpus, pos_cpus);
558
559 for (cpu = 0; cpu < phy_cpus; cpu++)
560 cpu_set(cpu, cpu_present_map);
561}
562
563#ifdef CONFIG_HOTPLUG_CPU
564
565static int __init setup_additional_cpus(char *s)
566{
567 additional_cpus = simple_strtoul(s, NULL, 0);
568 return 0;
569}
570early_param("additional_cpus", setup_additional_cpus);
571
572static int __init setup_possible_cpus(char *s)
573{
574 possible_cpus = simple_strtoul(s, NULL, 0);
575 return 0; 743 return 0;
576} 744}
577early_param("possible_cpus", setup_possible_cpus); 745early_param("possible_cpus", setup_possible_cpus);
578 746
747#ifdef CONFIG_HOTPLUG_CPU
748
579int __cpu_disable(void) 749int __cpu_disable(void)
580{ 750{
581 struct ec_creg_mask_parms cr_parms; 751 struct ec_creg_mask_parms cr_parms;
@@ -612,7 +782,8 @@ void __cpu_die(unsigned int cpu)
612 /* Wait until target cpu is down */ 782 /* Wait until target cpu is down */
613 while (!smp_cpu_not_running(cpu)) 783 while (!smp_cpu_not_running(cpu))
614 cpu_relax(); 784 cpu_relax();
615 printk("Processor %d spun down\n", cpu); 785 smp_free_lowcore(cpu);
786 printk(KERN_INFO "Processor %d spun down\n", cpu);
616} 787}
617 788
618void cpu_die(void) 789void cpu_die(void)
@@ -625,49 +796,19 @@ void cpu_die(void)
625 796
626#endif /* CONFIG_HOTPLUG_CPU */ 797#endif /* CONFIG_HOTPLUG_CPU */
627 798
628/*
629 * Cycle through the processors and setup structures.
630 */
631
632void __init smp_prepare_cpus(unsigned int max_cpus) 799void __init smp_prepare_cpus(unsigned int max_cpus)
633{ 800{
634 unsigned long stack;
635 unsigned int cpu; 801 unsigned int cpu;
636 int i; 802
803 smp_detect_cpus();
637 804
638 /* request the 0x1201 emergency signal external interrupt */ 805 /* request the 0x1201 emergency signal external interrupt */
639 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 806 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
640 panic("Couldn't request external interrupt 0x1201"); 807 panic("Couldn't request external interrupt 0x1201");
641 memset(lowcore_ptr, 0, sizeof(lowcore_ptr)); 808 memset(lowcore_ptr, 0, sizeof(lowcore_ptr));
642 /*
643 * Initialize prefix pages and stacks for all possible cpus
644 */
645 print_cpu_info(&S390_lowcore.cpu_data); 809 print_cpu_info(&S390_lowcore.cpu_data);
810 smp_alloc_lowcore(smp_processor_id());
646 811
647 for_each_possible_cpu(i) {
648 lowcore_ptr[i] = (struct _lowcore *)
649 __get_free_pages(GFP_KERNEL | GFP_DMA,
650 sizeof(void*) == 8 ? 1 : 0);
651 stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
652 if (!lowcore_ptr[i] || !stack)
653 panic("smp_boot_cpus failed to allocate memory\n");
654
655 *(lowcore_ptr[i]) = S390_lowcore;
656 lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE;
657 stack = __get_free_pages(GFP_KERNEL, 0);
658 if (!stack)
659 panic("smp_boot_cpus failed to allocate memory\n");
660 lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE;
661#ifndef CONFIG_64BIT
662 if (MACHINE_HAS_IEEE) {
663 lowcore_ptr[i]->extended_save_area_addr =
664 (__u32) __get_free_pages(GFP_KERNEL, 0);
665 if (!lowcore_ptr[i]->extended_save_area_addr)
666 panic("smp_boot_cpus failed to "
667 "allocate memory\n");
668 }
669#endif
670 }
671#ifndef CONFIG_64BIT 812#ifndef CONFIG_64BIT
672 if (MACHINE_HAS_IEEE) 813 if (MACHINE_HAS_IEEE)
673 ctl_set_bit(14, 29); /* enable extended save area */ 814 ctl_set_bit(14, 29); /* enable extended save area */
@@ -683,15 +824,17 @@ void __init smp_prepare_boot_cpu(void)
683{ 824{
684 BUG_ON(smp_processor_id() != 0); 825 BUG_ON(smp_processor_id() != 0);
685 826
827 current_thread_info()->cpu = 0;
828 cpu_set(0, cpu_present_map);
686 cpu_set(0, cpu_online_map); 829 cpu_set(0, cpu_online_map);
687 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 830 S390_lowcore.percpu_offset = __per_cpu_offset[0];
688 current_set[0] = current; 831 current_set[0] = current;
832 smp_cpu_state[0] = CPU_STATE_CONFIGURED;
689 spin_lock_init(&(&__get_cpu_var(s390_idle))->lock); 833 spin_lock_init(&(&__get_cpu_var(s390_idle))->lock);
690} 834}
691 835
692void __init smp_cpus_done(unsigned int max_cpus) 836void __init smp_cpus_done(unsigned int max_cpus)
693{ 837{
694 cpu_present_map = cpu_possible_map;
695} 838}
696 839
697/* 840/*
@@ -705,7 +848,79 @@ int setup_profiling_timer(unsigned int multiplier)
705 return 0; 848 return 0;
706} 849}
707 850
708static DEFINE_PER_CPU(struct cpu, cpu_devices); 851#ifdef CONFIG_HOTPLUG_CPU
852static ssize_t cpu_configure_show(struct sys_device *dev, char *buf)
853{
854 ssize_t count;
855
856 mutex_lock(&smp_cpu_state_mutex);
857 count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
858 mutex_unlock(&smp_cpu_state_mutex);
859 return count;
860}
861
862static ssize_t cpu_configure_store(struct sys_device *dev, const char *buf,
863 size_t count)
864{
865 int cpu = dev->id;
866 int val, rc;
867 char delim;
868
869 if (sscanf(buf, "%d %c", &val, &delim) != 1)
870 return -EINVAL;
871 if (val != 0 && val != 1)
872 return -EINVAL;
873
874 mutex_lock(&smp_cpu_state_mutex);
875 get_online_cpus();
876 rc = -EBUSY;
877 if (cpu_online(cpu))
878 goto out;
879 rc = 0;
880 switch (val) {
881 case 0:
882 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
883 rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
884 if (!rc)
885 smp_cpu_state[cpu] = CPU_STATE_STANDBY;
886 }
887 break;
888 case 1:
889 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
890 rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
891 if (!rc)
892 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
893 }
894 break;
895 default:
896 break;
897 }
898out:
899 put_online_cpus();
900 mutex_unlock(&smp_cpu_state_mutex);
901 return rc ? rc : count;
902}
903static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
904#endif /* CONFIG_HOTPLUG_CPU */
905
906static ssize_t show_cpu_address(struct sys_device *dev, char *buf)
907{
908 return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
909}
910static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
911
912
913static struct attribute *cpu_common_attrs[] = {
914#ifdef CONFIG_HOTPLUG_CPU
915 &attr_configure.attr,
916#endif
917 &attr_address.attr,
918 NULL,
919};
920
921static struct attribute_group cpu_common_attr_group = {
922 .attrs = cpu_common_attrs,
923};
709 924
710static ssize_t show_capability(struct sys_device *dev, char *buf) 925static ssize_t show_capability(struct sys_device *dev, char *buf)
711{ 926{
@@ -750,15 +965,15 @@ static ssize_t show_idle_time(struct sys_device *dev, char *buf)
750} 965}
751static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL); 966static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
752 967
753static struct attribute *cpu_attrs[] = { 968static struct attribute *cpu_online_attrs[] = {
754 &attr_capability.attr, 969 &attr_capability.attr,
755 &attr_idle_count.attr, 970 &attr_idle_count.attr,
756 &attr_idle_time_us.attr, 971 &attr_idle_time_us.attr,
757 NULL, 972 NULL,
758}; 973};
759 974
760static struct attribute_group cpu_attr_group = { 975static struct attribute_group cpu_online_attr_group = {
761 .attrs = cpu_attrs, 976 .attrs = cpu_online_attrs,
762}; 977};
763 978
764static int __cpuinit smp_cpu_notify(struct notifier_block *self, 979static int __cpuinit smp_cpu_notify(struct notifier_block *self,
@@ -778,12 +993,12 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
778 idle->idle_time = 0; 993 idle->idle_time = 0;
779 idle->idle_count = 0; 994 idle->idle_count = 0;
780 spin_unlock_irq(&idle->lock); 995 spin_unlock_irq(&idle->lock);
781 if (sysfs_create_group(&s->kobj, &cpu_attr_group)) 996 if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
782 return NOTIFY_BAD; 997 return NOTIFY_BAD;
783 break; 998 break;
784 case CPU_DEAD: 999 case CPU_DEAD:
785 case CPU_DEAD_FROZEN: 1000 case CPU_DEAD_FROZEN:
786 sysfs_remove_group(&s->kobj, &cpu_attr_group); 1001 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
787 break; 1002 break;
788 } 1003 }
789 return NOTIFY_OK; 1004 return NOTIFY_OK;
@@ -793,6 +1008,62 @@ static struct notifier_block __cpuinitdata smp_cpu_nb = {
793 .notifier_call = smp_cpu_notify, 1008 .notifier_call = smp_cpu_notify,
794}; 1009};
795 1010
1011static int smp_add_present_cpu(int cpu)
1012{
1013 struct cpu *c = &per_cpu(cpu_devices, cpu);
1014 struct sys_device *s = &c->sysdev;
1015 int rc;
1016
1017 c->hotpluggable = 1;
1018 rc = register_cpu(c, cpu);
1019 if (rc)
1020 goto out;
1021 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1022 if (rc)
1023 goto out_cpu;
1024 if (!cpu_online(cpu))
1025 goto out;
1026 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1027 if (!rc)
1028 return 0;
1029 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1030out_cpu:
1031#ifdef CONFIG_HOTPLUG_CPU
1032 unregister_cpu(c);
1033#endif
1034out:
1035 return rc;
1036}
1037
1038#ifdef CONFIG_HOTPLUG_CPU
1039static ssize_t rescan_store(struct sys_device *dev, const char *buf,
1040 size_t count)
1041{
1042 cpumask_t newcpus;
1043 int cpu;
1044 int rc;
1045
1046 mutex_lock(&smp_cpu_state_mutex);
1047 get_online_cpus();
1048 newcpus = cpu_present_map;
1049 rc = smp_rescan_cpus();
1050 if (rc)
1051 goto out;
1052 cpus_andnot(newcpus, cpu_present_map, newcpus);
1053 for_each_cpu_mask(cpu, newcpus) {
1054 rc = smp_add_present_cpu(cpu);
1055 if (rc)
1056 cpu_clear(cpu, cpu_present_map);
1057 }
1058 rc = 0;
1059out:
1060 put_online_cpus();
1061 mutex_unlock(&smp_cpu_state_mutex);
1062 return rc ? rc : count;
1063}
1064static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store);
1065#endif /* CONFIG_HOTPLUG_CPU */
1066
796static int __init topology_init(void) 1067static int __init topology_init(void)
797{ 1068{
798 int cpu; 1069 int cpu;
@@ -800,16 +1071,14 @@ static int __init topology_init(void)
800 1071
801 register_cpu_notifier(&smp_cpu_nb); 1072 register_cpu_notifier(&smp_cpu_nb);
802 1073
803 for_each_possible_cpu(cpu) { 1074#ifdef CONFIG_HOTPLUG_CPU
804 struct cpu *c = &per_cpu(cpu_devices, cpu); 1075 rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
805 struct sys_device *s = &c->sysdev; 1076 &attr_rescan.attr);
806 1077 if (rc)
807 c->hotpluggable = 1; 1078 return rc;
808 register_cpu(c, cpu); 1079#endif
809 if (!cpu_online(cpu)) 1080 for_each_present_cpu(cpu) {
810 continue; 1081 rc = smp_add_present_cpu(cpu);
811 s = &c->sysdev;
812 rc = sysfs_create_group(&s->kobj, &cpu_attr_group);
813 if (rc) 1082 if (rc)
814 return rc; 1083 return rc;
815 } 1084 }
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 8ed16a83fba7..52b8342c6bf2 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -31,6 +31,7 @@
31#include <linux/reboot.h> 31#include <linux/reboot.h>
32#include <linux/kprobes.h> 32#include <linux/kprobes.h>
33#include <linux/bug.h> 33#include <linux/bug.h>
34#include <linux/utsname.h>
34#include <asm/system.h> 35#include <asm/system.h>
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
36#include <asm/io.h> 37#include <asm/io.h>
@@ -168,9 +169,16 @@ void show_stack(struct task_struct *task, unsigned long *sp)
168 */ 169 */
169void dump_stack(void) 170void dump_stack(void)
170{ 171{
172 printk("CPU: %d %s %s %.*s\n",
173 task_thread_info(current)->cpu, print_tainted(),
174 init_utsname()->release,
175 (int)strcspn(init_utsname()->version, " "),
176 init_utsname()->version);
177 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
178 current->comm, current->pid, current,
179 (void *) current->thread.ksp);
171 show_stack(NULL, NULL); 180 show_stack(NULL, NULL);
172} 181}
173
174EXPORT_SYMBOL(dump_stack); 182EXPORT_SYMBOL(dump_stack);
175 183
176static inline int mask_bits(struct pt_regs *regs, unsigned long bits) 184static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
@@ -258,8 +266,14 @@ void die(const char * str, struct pt_regs * regs, long err)
258 console_verbose(); 266 console_verbose();
259 spin_lock_irq(&die_lock); 267 spin_lock_irq(&die_lock);
260 bust_spinlocks(1); 268 bust_spinlocks(1);
261 printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); 269 printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
262 print_modules(); 270#ifdef CONFIG_PREEMPT
271 printk("PREEMPT ");
272#endif
273#ifdef CONFIG_SMP
274 printk("SMP");
275#endif
276 printk("\n");
263 notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV); 277 notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
264 show_regs(regs); 278 show_regs(regs);
265 bust_spinlocks(0); 279 bust_spinlocks(0);
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 849120e3e28a..936159199346 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -17,6 +17,12 @@ ENTRY(_start)
17jiffies = jiffies_64; 17jiffies = jiffies_64;
18#endif 18#endif
19 19
20PHDRS {
21 text PT_LOAD FLAGS(5); /* R_E */
22 data PT_LOAD FLAGS(7); /* RWE */
23 note PT_NOTE FLAGS(0); /* ___ */
24}
25
20SECTIONS 26SECTIONS
21{ 27{
22 . = 0x00000000; 28 . = 0x00000000;
@@ -33,6 +39,9 @@ SECTIONS
33 39
34 _etext = .; /* End of text section */ 40 _etext = .; /* End of text section */
35 41
42 NOTES :text :note
43 BUG_TABLE :text
44
36 RODATA 45 RODATA
37 46
38#ifdef CONFIG_SHARED_KERNEL 47#ifdef CONFIG_SHARED_KERNEL
@@ -49,9 +58,6 @@ SECTIONS
49 __stop___ex_table = .; 58 __stop___ex_table = .;
50 } 59 }
51 60
52 NOTES
53 BUG_TABLE
54
55 .data : { /* Data */ 61 .data : { /* Data */
56 DATA_DATA 62 DATA_DATA
57 CONSTRUCTORS 63 CONSTRUCTORS
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 8d76403fcf89..e41f4008afc5 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu)
39 _raw_yield(); 39 _raw_yield();
40} 40}
41 41
42void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc) 42void _raw_spin_lock_wait(raw_spinlock_t *lp)
43{ 43{
44 int count = spin_retry; 44 int count = spin_retry;
45 unsigned int cpu = ~smp_processor_id(); 45 unsigned int cpu = ~smp_processor_id();
@@ -53,15 +53,36 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc)
53 } 53 }
54 if (__raw_spin_is_locked(lp)) 54 if (__raw_spin_is_locked(lp))
55 continue; 55 continue;
56 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) { 56 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
57 lp->owner_pc = pc;
58 return; 57 return;
59 }
60 } 58 }
61} 59}
62EXPORT_SYMBOL(_raw_spin_lock_wait); 60EXPORT_SYMBOL(_raw_spin_lock_wait);
63 61
64int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc) 62void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
63{
64 int count = spin_retry;
65 unsigned int cpu = ~smp_processor_id();
66
67 local_irq_restore(flags);
68 while (1) {
69 if (count-- <= 0) {
70 unsigned int owner = lp->owner_cpu;
71 if (owner != 0)
72 _raw_yield_cpu(~owner);
73 count = spin_retry;
74 }
75 if (__raw_spin_is_locked(lp))
76 continue;
77 local_irq_disable();
78 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
79 return;
80 local_irq_restore(flags);
81 }
82}
83EXPORT_SYMBOL(_raw_spin_lock_wait_flags);
84
85int _raw_spin_trylock_retry(raw_spinlock_t *lp)
65{ 86{
66 unsigned int cpu = ~smp_processor_id(); 87 unsigned int cpu = ~smp_processor_id();
67 int count; 88 int count;
@@ -69,10 +90,8 @@ int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
69 for (count = spin_retry; count > 0; count--) { 90 for (count = spin_retry; count > 0; count--) {
70 if (__raw_spin_is_locked(lp)) 91 if (__raw_spin_is_locked(lp))
71 continue; 92 continue;
72 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) { 93 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
73 lp->owner_pc = pc;
74 return 1; 94 return 1;
75 }
76 } 95 }
77 return 0; 96 return 0;
78} 97}
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 394980b05e6f..880b0ebf894b 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -83,7 +83,7 @@ struct dcss_segment {
83}; 83};
84 84
85static DEFINE_MUTEX(dcss_lock); 85static DEFINE_MUTEX(dcss_lock);
86static struct list_head dcss_list = LIST_HEAD_INIT(dcss_list); 86static LIST_HEAD(dcss_list);
87static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC", 87static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC",
88 "EW/EN-MIXED" }; 88 "EW/EN-MIXED" };
89 89
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index fb9c5a85aa56..79d13a166a3d 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -15,10 +15,6 @@
15#include <asm/setup.h> 15#include <asm/setup.h>
16#include <asm/tlbflush.h> 16#include <asm/tlbflush.h>
17 17
18unsigned long vmalloc_end;
19EXPORT_SYMBOL(vmalloc_end);
20
21static struct page *vmem_map;
22static DEFINE_MUTEX(vmem_mutex); 18static DEFINE_MUTEX(vmem_mutex);
23 19
24struct memory_segment { 20struct memory_segment {
@@ -188,8 +184,8 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
188 pte_t pte; 184 pte_t pte;
189 int ret = -ENOMEM; 185 int ret = -ENOMEM;
190 186
191 map_start = vmem_map + PFN_DOWN(start); 187 map_start = VMEM_MAP + PFN_DOWN(start);
192 map_end = vmem_map + PFN_DOWN(start + size); 188 map_end = VMEM_MAP + PFN_DOWN(start + size);
193 189
194 start_addr = (unsigned long) map_start & PAGE_MASK; 190 start_addr = (unsigned long) map_start & PAGE_MASK;
195 end_addr = PFN_ALIGN((unsigned long) map_end); 191 end_addr = PFN_ALIGN((unsigned long) map_end);
@@ -240,10 +236,10 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
240{ 236{
241 int ret; 237 int ret;
242 238
243 ret = vmem_add_range(start, size); 239 ret = vmem_add_mem_map(start, size);
244 if (ret) 240 if (ret)
245 return ret; 241 return ret;
246 return vmem_add_mem_map(start, size); 242 return vmem_add_range(start, size);
247} 243}
248 244
249/* 245/*
@@ -254,7 +250,7 @@ static int insert_memory_segment(struct memory_segment *seg)
254{ 250{
255 struct memory_segment *tmp; 251 struct memory_segment *tmp;
256 252
257 if (PFN_DOWN(seg->start + seg->size) > max_pfn || 253 if (seg->start + seg->size >= VMALLOC_START ||
258 seg->start + seg->size < seg->start) 254 seg->start + seg->size < seg->start)
259 return -ERANGE; 255 return -ERANGE;
260 256
@@ -357,17 +353,15 @@ out:
357 353
358/* 354/*
359 * map whole physical memory to virtual memory (identity mapping) 355 * map whole physical memory to virtual memory (identity mapping)
356 * we reserve enough space in the vmalloc area for vmemmap to hotplug
357 * additional memory segments.
360 */ 358 */
361void __init vmem_map_init(void) 359void __init vmem_map_init(void)
362{ 360{
363 unsigned long map_size;
364 int i; 361 int i;
365 362
366 map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page); 363 BUILD_BUG_ON((unsigned long)VMEM_MAP + VMEM_MAP_SIZE > VMEM_MAP_MAX);
367 vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size); 364 NODE_DATA(0)->node_mem_map = VMEM_MAP;
368 vmem_map = (struct page *) vmalloc_end;
369 NODE_DATA(0)->node_mem_map = vmem_map;
370
371 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) 365 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
372 vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size); 366 vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
373} 367}
@@ -382,7 +376,7 @@ static int __init vmem_convert_memory_chunk(void)
382 int i; 376 int i;
383 377
384 mutex_lock(&vmem_mutex); 378 mutex_lock(&vmem_mutex);
385 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 379 for (i = 0; i < MEMORY_CHUNKS; i++) {
386 if (!memory_chunk[i].size) 380 if (!memory_chunk[i].size)
387 continue; 381 continue;
388 seg = kzalloc(sizeof(*seg), GFP_KERNEL); 382 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 8a70a9edabda..6b658d84d521 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -48,8 +48,6 @@ config CRYPTO_DEV_PADLOCK_SHA
48 If unsure say M. The compiled module will be 48 If unsure say M. The compiled module will be
49 called padlock-sha.ko 49 called padlock-sha.ko
50 50
51source "arch/s390/crypto/Kconfig"
52
53config CRYPTO_DEV_GEODE 51config CRYPTO_DEV_GEODE
54 tristate "Support for the Geode LX AES engine" 52 tristate "Support for the Geode LX AES engine"
55 depends on X86_32 && PCI 53 depends on X86_32 && PCI
@@ -83,6 +81,67 @@ config ZCRYPT_MONOLITHIC
83 that contains all parts of the crypto device driver (ap bus, 81 that contains all parts of the crypto device driver (ap bus,
84 request router and all the card drivers). 82 request router and all the card drivers).
85 83
84config CRYPTO_SHA1_S390
85 tristate "SHA1 digest algorithm"
86 depends on S390
87 select CRYPTO_ALGAPI
88 help
89 This is the s390 hardware accelerated implementation of the
90 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
91
92config CRYPTO_SHA256_S390
93 tristate "SHA256 digest algorithm"
94 depends on S390
95 select CRYPTO_ALGAPI
96 help
97 This is the s390 hardware accelerated implementation of the
98 SHA256 secure hash standard (DFIPS 180-2).
99
100 This version of SHA implements a 256 bit hash with 128 bits of
101 security against collision attacks.
102
103config CRYPTO_DES_S390
104 tristate "DES and Triple DES cipher algorithms"
105 depends on S390
106 select CRYPTO_ALGAPI
107 select CRYPTO_BLKCIPHER
108 help
109 This us the s390 hardware accelerated implementation of the
110 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
111
112config CRYPTO_AES_S390
113 tristate "AES cipher algorithms"
114 depends on S390
115 select CRYPTO_ALGAPI
116 select CRYPTO_BLKCIPHER
117 help
118 This is the s390 hardware accelerated implementation of the
119 AES cipher algorithms (FIPS-197). AES uses the Rijndael
120 algorithm.
121
122 Rijndael appears to be consistently a very good performer in
123 both hardware and software across a wide range of computing
124 environments regardless of its use in feedback or non-feedback
125 modes. Its key setup time is excellent, and its key agility is
126 good. Rijndael's very low memory requirements make it very well
127 suited for restricted-space environments, in which it also
128 demonstrates excellent performance. Rijndael's operations are
129 among the easiest to defend against power and timing attacks.
130
131 On s390 the System z9-109 currently only supports the key size
132 of 128 bit.
133
134config S390_PRNG
135 tristate "Pseudo random number generator device driver"
136 depends on S390
137 default "m"
138 help
139 Select this option if you want to use the s390 pseudo random number
140 generator. The PRNG is part of the cryptographic processor functions
141 and uses triple-DES to generate secure random numbers like the
142 ANSI X9.17 standard. The PRNG is usable via the char device
143 /dev/prandom.
144
86config CRYPTO_DEV_HIFN_795X 145config CRYPTO_DEV_HIFN_795X
87 tristate "Driver HIFN 795x crypto accelerator chips" 146 tristate "Driver HIFN 795x crypto accelerator chips"
88 select CRYPTO_DES 147 select CRYPTO_DES
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
index be9f22d52fd8..0a89e080b389 100644
--- a/drivers/s390/block/Makefile
+++ b/drivers/s390/block/Makefile
@@ -2,8 +2,8 @@
2# S/390 block devices 2# S/390 block devices
3# 3#
4 4
5dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_9343_erp.o 5dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_alias.o
6dasd_fba_mod-objs := dasd_fba.o dasd_3370_erp.o dasd_9336_erp.o 6dasd_fba_mod-objs := dasd_fba.o
7dasd_diag_mod-objs := dasd_diag.o 7dasd_diag_mod-objs := dasd_diag.o
8dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \ 8dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \
9 dasd_genhd.o dasd_erp.o 9 dasd_genhd.o dasd_erp.o
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index e6bfce690ca3..1db15f3e5d20 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -48,13 +48,15 @@ MODULE_LICENSE("GPL");
48/* 48/*
49 * SECTION: prototypes for static functions of dasd.c 49 * SECTION: prototypes for static functions of dasd.c
50 */ 50 */
51static int dasd_alloc_queue(struct dasd_device * device); 51static int dasd_alloc_queue(struct dasd_block *);
52static void dasd_setup_queue(struct dasd_device * device); 52static void dasd_setup_queue(struct dasd_block *);
53static void dasd_free_queue(struct dasd_device * device); 53static void dasd_free_queue(struct dasd_block *);
54static void dasd_flush_request_queue(struct dasd_device *); 54static void dasd_flush_request_queue(struct dasd_block *);
55static int dasd_flush_ccw_queue(struct dasd_device *, int); 55static int dasd_flush_block_queue(struct dasd_block *);
56static void dasd_tasklet(struct dasd_device *); 56static void dasd_device_tasklet(struct dasd_device *);
57static void dasd_block_tasklet(struct dasd_block *);
57static void do_kick_device(struct work_struct *); 58static void do_kick_device(struct work_struct *);
59static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
58 60
59/* 61/*
60 * SECTION: Operations on the device structure. 62 * SECTION: Operations on the device structure.
@@ -65,26 +67,23 @@ static wait_queue_head_t dasd_flush_wq;
65/* 67/*
66 * Allocate memory for a new device structure. 68 * Allocate memory for a new device structure.
67 */ 69 */
68struct dasd_device * 70struct dasd_device *dasd_alloc_device(void)
69dasd_alloc_device(void)
70{ 71{
71 struct dasd_device *device; 72 struct dasd_device *device;
72 73
73 device = kzalloc(sizeof (struct dasd_device), GFP_ATOMIC); 74 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
74 if (device == NULL) 75 if (!device)
75 return ERR_PTR(-ENOMEM); 76 return ERR_PTR(-ENOMEM);
76 /* open_count = 0 means device online but not in use */
77 atomic_set(&device->open_count, -1);
78 77
79 /* Get two pages for normal block device operations. */ 78 /* Get two pages for normal block device operations. */
80 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 79 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
81 if (device->ccw_mem == NULL) { 80 if (!device->ccw_mem) {
82 kfree(device); 81 kfree(device);
83 return ERR_PTR(-ENOMEM); 82 return ERR_PTR(-ENOMEM);
84 } 83 }
85 /* Get one page for error recovery. */ 84 /* Get one page for error recovery. */
86 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 85 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
87 if (device->erp_mem == NULL) { 86 if (!device->erp_mem) {
88 free_pages((unsigned long) device->ccw_mem, 1); 87 free_pages((unsigned long) device->ccw_mem, 1);
89 kfree(device); 88 kfree(device);
90 return ERR_PTR(-ENOMEM); 89 return ERR_PTR(-ENOMEM);
@@ -93,10 +92,9 @@ dasd_alloc_device(void)
93 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 92 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
94 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 93 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
95 spin_lock_init(&device->mem_lock); 94 spin_lock_init(&device->mem_lock);
96 spin_lock_init(&device->request_queue_lock); 95 atomic_set(&device->tasklet_scheduled, 0);
97 atomic_set (&device->tasklet_scheduled, 0);
98 tasklet_init(&device->tasklet, 96 tasklet_init(&device->tasklet,
99 (void (*)(unsigned long)) dasd_tasklet, 97 (void (*)(unsigned long)) dasd_device_tasklet,
100 (unsigned long) device); 98 (unsigned long) device);
101 INIT_LIST_HEAD(&device->ccw_queue); 99 INIT_LIST_HEAD(&device->ccw_queue);
102 init_timer(&device->timer); 100 init_timer(&device->timer);
@@ -110,8 +108,7 @@ dasd_alloc_device(void)
110/* 108/*
111 * Free memory of a device structure. 109 * Free memory of a device structure.
112 */ 110 */
113void 111void dasd_free_device(struct dasd_device *device)
114dasd_free_device(struct dasd_device *device)
115{ 112{
116 kfree(device->private); 113 kfree(device->private);
117 free_page((unsigned long) device->erp_mem); 114 free_page((unsigned long) device->erp_mem);
@@ -120,10 +117,42 @@ dasd_free_device(struct dasd_device *device)
120} 117}
121 118
122/* 119/*
120 * Allocate memory for a new device structure.
121 */
122struct dasd_block *dasd_alloc_block(void)
123{
124 struct dasd_block *block;
125
126 block = kzalloc(sizeof(*block), GFP_ATOMIC);
127 if (!block)
128 return ERR_PTR(-ENOMEM);
129 /* open_count = 0 means device online but not in use */
130 atomic_set(&block->open_count, -1);
131
132 spin_lock_init(&block->request_queue_lock);
133 atomic_set(&block->tasklet_scheduled, 0);
134 tasklet_init(&block->tasklet,
135 (void (*)(unsigned long)) dasd_block_tasklet,
136 (unsigned long) block);
137 INIT_LIST_HEAD(&block->ccw_queue);
138 spin_lock_init(&block->queue_lock);
139 init_timer(&block->timer);
140
141 return block;
142}
143
144/*
145 * Free memory of a device structure.
146 */
147void dasd_free_block(struct dasd_block *block)
148{
149 kfree(block);
150}
151
152/*
123 * Make a new device known to the system. 153 * Make a new device known to the system.
124 */ 154 */
125static int 155static int dasd_state_new_to_known(struct dasd_device *device)
126dasd_state_new_to_known(struct dasd_device *device)
127{ 156{
128 int rc; 157 int rc;
129 158
@@ -133,12 +162,13 @@ dasd_state_new_to_known(struct dasd_device *device)
133 */ 162 */
134 dasd_get_device(device); 163 dasd_get_device(device);
135 164
136 rc = dasd_alloc_queue(device); 165 if (device->block) {
137 if (rc) { 166 rc = dasd_alloc_queue(device->block);
138 dasd_put_device(device); 167 if (rc) {
139 return rc; 168 dasd_put_device(device);
169 return rc;
170 }
140 } 171 }
141
142 device->state = DASD_STATE_KNOWN; 172 device->state = DASD_STATE_KNOWN;
143 return 0; 173 return 0;
144} 174}
@@ -146,21 +176,24 @@ dasd_state_new_to_known(struct dasd_device *device)
146/* 176/*
147 * Let the system forget about a device. 177 * Let the system forget about a device.
148 */ 178 */
149static int 179static int dasd_state_known_to_new(struct dasd_device *device)
150dasd_state_known_to_new(struct dasd_device * device)
151{ 180{
152 /* Disable extended error reporting for this device. */ 181 /* Disable extended error reporting for this device. */
153 dasd_eer_disable(device); 182 dasd_eer_disable(device);
154 /* Forget the discipline information. */ 183 /* Forget the discipline information. */
155 if (device->discipline) 184 if (device->discipline) {
185 if (device->discipline->uncheck_device)
186 device->discipline->uncheck_device(device);
156 module_put(device->discipline->owner); 187 module_put(device->discipline->owner);
188 }
157 device->discipline = NULL; 189 device->discipline = NULL;
158 if (device->base_discipline) 190 if (device->base_discipline)
159 module_put(device->base_discipline->owner); 191 module_put(device->base_discipline->owner);
160 device->base_discipline = NULL; 192 device->base_discipline = NULL;
161 device->state = DASD_STATE_NEW; 193 device->state = DASD_STATE_NEW;
162 194
163 dasd_free_queue(device); 195 if (device->block)
196 dasd_free_queue(device->block);
164 197
165 /* Give up reference we took in dasd_state_new_to_known. */ 198 /* Give up reference we took in dasd_state_new_to_known. */
166 dasd_put_device(device); 199 dasd_put_device(device);
@@ -170,19 +203,19 @@ dasd_state_known_to_new(struct dasd_device * device)
170/* 203/*
171 * Request the irq line for the device. 204 * Request the irq line for the device.
172 */ 205 */
173static int 206static int dasd_state_known_to_basic(struct dasd_device *device)
174dasd_state_known_to_basic(struct dasd_device * device)
175{ 207{
176 int rc; 208 int rc;
177 209
178 /* Allocate and register gendisk structure. */ 210 /* Allocate and register gendisk structure. */
179 rc = dasd_gendisk_alloc(device); 211 if (device->block) {
180 if (rc) 212 rc = dasd_gendisk_alloc(device->block);
181 return rc; 213 if (rc)
182 214 return rc;
215 }
183 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 216 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
184 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2, 217 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 1,
185 8 * sizeof (long)); 218 8 * sizeof(long));
186 debug_register_view(device->debug_area, &debug_sprintf_view); 219 debug_register_view(device->debug_area, &debug_sprintf_view);
187 debug_set_level(device->debug_area, DBF_WARNING); 220 debug_set_level(device->debug_area, DBF_WARNING);
188 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 221 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
@@ -194,16 +227,17 @@ dasd_state_known_to_basic(struct dasd_device * device)
194/* 227/*
195 * Release the irq line for the device. Terminate any running i/o. 228 * Release the irq line for the device. Terminate any running i/o.
196 */ 229 */
197static int 230static int dasd_state_basic_to_known(struct dasd_device *device)
198dasd_state_basic_to_known(struct dasd_device * device)
199{ 231{
200 int rc; 232 int rc;
201 233 if (device->block) {
202 dasd_gendisk_free(device); 234 dasd_gendisk_free(device->block);
203 rc = dasd_flush_ccw_queue(device, 1); 235 dasd_block_clear_timer(device->block);
236 }
237 rc = dasd_flush_device_queue(device);
204 if (rc) 238 if (rc)
205 return rc; 239 return rc;
206 dasd_clear_timer(device); 240 dasd_device_clear_timer(device);
207 241
208 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 242 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
209 if (device->debug_area != NULL) { 243 if (device->debug_area != NULL) {
@@ -228,26 +262,32 @@ dasd_state_basic_to_known(struct dasd_device * device)
228 * In case the analysis returns an error, the device setup is stopped 262 * In case the analysis returns an error, the device setup is stopped
229 * (a fake disk was already added to allow formatting). 263 * (a fake disk was already added to allow formatting).
230 */ 264 */
231static int 265static int dasd_state_basic_to_ready(struct dasd_device *device)
232dasd_state_basic_to_ready(struct dasd_device * device)
233{ 266{
234 int rc; 267 int rc;
268 struct dasd_block *block;
235 269
236 rc = 0; 270 rc = 0;
237 if (device->discipline->do_analysis != NULL) 271 block = device->block;
238 rc = device->discipline->do_analysis(device);
239 if (rc) {
240 if (rc != -EAGAIN)
241 device->state = DASD_STATE_UNFMT;
242 return rc;
243 }
244 /* make disk known with correct capacity */ 272 /* make disk known with correct capacity */
245 dasd_setup_queue(device); 273 if (block) {
246 set_capacity(device->gdp, device->blocks << device->s2b_shift); 274 if (block->base->discipline->do_analysis != NULL)
247 device->state = DASD_STATE_READY; 275 rc = block->base->discipline->do_analysis(block);
248 rc = dasd_scan_partitions(device); 276 if (rc) {
249 if (rc) 277 if (rc != -EAGAIN)
250 device->state = DASD_STATE_BASIC; 278 device->state = DASD_STATE_UNFMT;
279 return rc;
280 }
281 dasd_setup_queue(block);
282 set_capacity(block->gdp,
283 block->blocks << block->s2b_shift);
284 device->state = DASD_STATE_READY;
285 rc = dasd_scan_partitions(block);
286 if (rc)
287 device->state = DASD_STATE_BASIC;
288 } else {
289 device->state = DASD_STATE_READY;
290 }
251 return rc; 291 return rc;
252} 292}
253 293
@@ -256,28 +296,31 @@ dasd_state_basic_to_ready(struct dasd_device * device)
256 * Forget format information. Check if the target level is basic 296 * Forget format information. Check if the target level is basic
257 * and if it is create fake disk for formatting. 297 * and if it is create fake disk for formatting.
258 */ 298 */
259static int 299static int dasd_state_ready_to_basic(struct dasd_device *device)
260dasd_state_ready_to_basic(struct dasd_device * device)
261{ 300{
262 int rc; 301 int rc;
263 302
264 rc = dasd_flush_ccw_queue(device, 0);
265 if (rc)
266 return rc;
267 dasd_destroy_partitions(device);
268 dasd_flush_request_queue(device);
269 device->blocks = 0;
270 device->bp_block = 0;
271 device->s2b_shift = 0;
272 device->state = DASD_STATE_BASIC; 303 device->state = DASD_STATE_BASIC;
304 if (device->block) {
305 struct dasd_block *block = device->block;
306 rc = dasd_flush_block_queue(block);
307 if (rc) {
308 device->state = DASD_STATE_READY;
309 return rc;
310 }
311 dasd_destroy_partitions(block);
312 dasd_flush_request_queue(block);
313 block->blocks = 0;
314 block->bp_block = 0;
315 block->s2b_shift = 0;
316 }
273 return 0; 317 return 0;
274} 318}
275 319
276/* 320/*
277 * Back to basic. 321 * Back to basic.
278 */ 322 */
279static int 323static int dasd_state_unfmt_to_basic(struct dasd_device *device)
280dasd_state_unfmt_to_basic(struct dasd_device * device)
281{ 324{
282 device->state = DASD_STATE_BASIC; 325 device->state = DASD_STATE_BASIC;
283 return 0; 326 return 0;
@@ -291,17 +334,31 @@ dasd_state_unfmt_to_basic(struct dasd_device * device)
291static int 334static int
292dasd_state_ready_to_online(struct dasd_device * device) 335dasd_state_ready_to_online(struct dasd_device * device)
293{ 336{
337 int rc;
338
339 if (device->discipline->ready_to_online) {
340 rc = device->discipline->ready_to_online(device);
341 if (rc)
342 return rc;
343 }
294 device->state = DASD_STATE_ONLINE; 344 device->state = DASD_STATE_ONLINE;
295 dasd_schedule_bh(device); 345 if (device->block)
346 dasd_schedule_block_bh(device->block);
296 return 0; 347 return 0;
297} 348}
298 349
299/* 350/*
300 * Stop the requeueing of requests again. 351 * Stop the requeueing of requests again.
301 */ 352 */
302static int 353static int dasd_state_online_to_ready(struct dasd_device *device)
303dasd_state_online_to_ready(struct dasd_device * device)
304{ 354{
355 int rc;
356
357 if (device->discipline->online_to_ready) {
358 rc = device->discipline->online_to_ready(device);
359 if (rc)
360 return rc;
361 }
305 device->state = DASD_STATE_READY; 362 device->state = DASD_STATE_READY;
306 return 0; 363 return 0;
307} 364}
@@ -309,8 +366,7 @@ dasd_state_online_to_ready(struct dasd_device * device)
309/* 366/*
310 * Device startup state changes. 367 * Device startup state changes.
311 */ 368 */
312static int 369static int dasd_increase_state(struct dasd_device *device)
313dasd_increase_state(struct dasd_device *device)
314{ 370{
315 int rc; 371 int rc;
316 372
@@ -345,8 +401,7 @@ dasd_increase_state(struct dasd_device *device)
345/* 401/*
346 * Device shutdown state changes. 402 * Device shutdown state changes.
347 */ 403 */
348static int 404static int dasd_decrease_state(struct dasd_device *device)
349dasd_decrease_state(struct dasd_device *device)
350{ 405{
351 int rc; 406 int rc;
352 407
@@ -381,8 +436,7 @@ dasd_decrease_state(struct dasd_device *device)
381/* 436/*
382 * This is the main startup/shutdown routine. 437 * This is the main startup/shutdown routine.
383 */ 438 */
384static void 439static void dasd_change_state(struct dasd_device *device)
385dasd_change_state(struct dasd_device *device)
386{ 440{
387 int rc; 441 int rc;
388 442
@@ -409,17 +463,15 @@ dasd_change_state(struct dasd_device *device)
409 * dasd_kick_device will schedule a call do do_kick_device to the kernel 463 * dasd_kick_device will schedule a call do do_kick_device to the kernel
410 * event daemon. 464 * event daemon.
411 */ 465 */
412static void 466static void do_kick_device(struct work_struct *work)
413do_kick_device(struct work_struct *work)
414{ 467{
415 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 468 struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
416 dasd_change_state(device); 469 dasd_change_state(device);
417 dasd_schedule_bh(device); 470 dasd_schedule_device_bh(device);
418 dasd_put_device(device); 471 dasd_put_device(device);
419} 472}
420 473
421void 474void dasd_kick_device(struct dasd_device *device)
422dasd_kick_device(struct dasd_device *device)
423{ 475{
424 dasd_get_device(device); 476 dasd_get_device(device);
425 /* queue call to dasd_kick_device to the kernel event daemon. */ 477 /* queue call to dasd_kick_device to the kernel event daemon. */
@@ -429,8 +481,7 @@ dasd_kick_device(struct dasd_device *device)
429/* 481/*
430 * Set the target state for a device and starts the state change. 482 * Set the target state for a device and starts the state change.
431 */ 483 */
432void 484void dasd_set_target_state(struct dasd_device *device, int target)
433dasd_set_target_state(struct dasd_device *device, int target)
434{ 485{
435 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 486 /* If we are in probeonly mode stop at DASD_STATE_READY. */
436 if (dasd_probeonly && target > DASD_STATE_READY) 487 if (dasd_probeonly && target > DASD_STATE_READY)
@@ -447,14 +498,12 @@ dasd_set_target_state(struct dasd_device *device, int target)
447/* 498/*
448 * Enable devices with device numbers in [from..to]. 499 * Enable devices with device numbers in [from..to].
449 */ 500 */
450static inline int 501static inline int _wait_for_device(struct dasd_device *device)
451_wait_for_device(struct dasd_device *device)
452{ 502{
453 return (device->state == device->target); 503 return (device->state == device->target);
454} 504}
455 505
456void 506void dasd_enable_device(struct dasd_device *device)
457dasd_enable_device(struct dasd_device *device)
458{ 507{
459 dasd_set_target_state(device, DASD_STATE_ONLINE); 508 dasd_set_target_state(device, DASD_STATE_ONLINE);
460 if (device->state <= DASD_STATE_KNOWN) 509 if (device->state <= DASD_STATE_KNOWN)
@@ -475,20 +524,20 @@ unsigned int dasd_profile_level = DASD_PROFILE_OFF;
475/* 524/*
476 * Increments counter in global and local profiling structures. 525 * Increments counter in global and local profiling structures.
477 */ 526 */
478#define dasd_profile_counter(value, counter, device) \ 527#define dasd_profile_counter(value, counter, block) \
479{ \ 528{ \
480 int index; \ 529 int index; \
481 for (index = 0; index < 31 && value >> (2+index); index++); \ 530 for (index = 0; index < 31 && value >> (2+index); index++); \
482 dasd_global_profile.counter[index]++; \ 531 dasd_global_profile.counter[index]++; \
483 device->profile.counter[index]++; \ 532 block->profile.counter[index]++; \
484} 533}
485 534
486/* 535/*
487 * Add profiling information for cqr before execution. 536 * Add profiling information for cqr before execution.
488 */ 537 */
489static void 538static void dasd_profile_start(struct dasd_block *block,
490dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, 539 struct dasd_ccw_req *cqr,
491 struct request *req) 540 struct request *req)
492{ 541{
493 struct list_head *l; 542 struct list_head *l;
494 unsigned int counter; 543 unsigned int counter;
@@ -498,19 +547,19 @@ dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
498 547
499 /* count the length of the chanq for statistics */ 548 /* count the length of the chanq for statistics */
500 counter = 0; 549 counter = 0;
501 list_for_each(l, &device->ccw_queue) 550 list_for_each(l, &block->ccw_queue)
502 if (++counter >= 31) 551 if (++counter >= 31)
503 break; 552 break;
504 dasd_global_profile.dasd_io_nr_req[counter]++; 553 dasd_global_profile.dasd_io_nr_req[counter]++;
505 device->profile.dasd_io_nr_req[counter]++; 554 block->profile.dasd_io_nr_req[counter]++;
506} 555}
507 556
508/* 557/*
509 * Add profiling information for cqr after execution. 558 * Add profiling information for cqr after execution.
510 */ 559 */
511static void 560static void dasd_profile_end(struct dasd_block *block,
512dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, 561 struct dasd_ccw_req *cqr,
513 struct request *req) 562 struct request *req)
514{ 563{
515 long strtime, irqtime, endtime, tottime; /* in microseconds */ 564 long strtime, irqtime, endtime, tottime; /* in microseconds */
516 long tottimeps, sectors; 565 long tottimeps, sectors;
@@ -532,27 +581,27 @@ dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
532 581
533 if (!dasd_global_profile.dasd_io_reqs) 582 if (!dasd_global_profile.dasd_io_reqs)
534 memset(&dasd_global_profile, 0, 583 memset(&dasd_global_profile, 0,
535 sizeof (struct dasd_profile_info_t)); 584 sizeof(struct dasd_profile_info_t));
536 dasd_global_profile.dasd_io_reqs++; 585 dasd_global_profile.dasd_io_reqs++;
537 dasd_global_profile.dasd_io_sects += sectors; 586 dasd_global_profile.dasd_io_sects += sectors;
538 587
539 if (!device->profile.dasd_io_reqs) 588 if (!block->profile.dasd_io_reqs)
540 memset(&device->profile, 0, 589 memset(&block->profile, 0,
541 sizeof (struct dasd_profile_info_t)); 590 sizeof(struct dasd_profile_info_t));
542 device->profile.dasd_io_reqs++; 591 block->profile.dasd_io_reqs++;
543 device->profile.dasd_io_sects += sectors; 592 block->profile.dasd_io_sects += sectors;
544 593
545 dasd_profile_counter(sectors, dasd_io_secs, device); 594 dasd_profile_counter(sectors, dasd_io_secs, block);
546 dasd_profile_counter(tottime, dasd_io_times, device); 595 dasd_profile_counter(tottime, dasd_io_times, block);
547 dasd_profile_counter(tottimeps, dasd_io_timps, device); 596 dasd_profile_counter(tottimeps, dasd_io_timps, block);
548 dasd_profile_counter(strtime, dasd_io_time1, device); 597 dasd_profile_counter(strtime, dasd_io_time1, block);
549 dasd_profile_counter(irqtime, dasd_io_time2, device); 598 dasd_profile_counter(irqtime, dasd_io_time2, block);
550 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, device); 599 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block);
551 dasd_profile_counter(endtime, dasd_io_time3, device); 600 dasd_profile_counter(endtime, dasd_io_time3, block);
552} 601}
553#else 602#else
554#define dasd_profile_start(device, cqr, req) do {} while (0) 603#define dasd_profile_start(block, cqr, req) do {} while (0)
555#define dasd_profile_end(device, cqr, req) do {} while (0) 604#define dasd_profile_end(block, cqr, req) do {} while (0)
556#endif /* CONFIG_DASD_PROFILE */ 605#endif /* CONFIG_DASD_PROFILE */
557 606
558/* 607/*
@@ -562,9 +611,9 @@ dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
562 * memory and 2) dasd_smalloc_request uses the static ccw memory 611 * memory and 2) dasd_smalloc_request uses the static ccw memory
563 * that gets allocated for each device. 612 * that gets allocated for each device.
564 */ 613 */
565struct dasd_ccw_req * 614struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength,
566dasd_kmalloc_request(char *magic, int cplength, int datasize, 615 int datasize,
567 struct dasd_device * device) 616 struct dasd_device *device)
568{ 617{
569 struct dasd_ccw_req *cqr; 618 struct dasd_ccw_req *cqr;
570 619
@@ -600,9 +649,9 @@ dasd_kmalloc_request(char *magic, int cplength, int datasize,
600 return cqr; 649 return cqr;
601} 650}
602 651
603struct dasd_ccw_req * 652struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength,
604dasd_smalloc_request(char *magic, int cplength, int datasize, 653 int datasize,
605 struct dasd_device * device) 654 struct dasd_device *device)
606{ 655{
607 unsigned long flags; 656 unsigned long flags;
608 struct dasd_ccw_req *cqr; 657 struct dasd_ccw_req *cqr;
@@ -649,8 +698,7 @@ dasd_smalloc_request(char *magic, int cplength, int datasize,
649 * idal lists that might have been created by dasd_set_cda and the 698 * idal lists that might have been created by dasd_set_cda and the
650 * struct dasd_ccw_req itself. 699 * struct dasd_ccw_req itself.
651 */ 700 */
652void 701void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
653dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
654{ 702{
655#ifdef CONFIG_64BIT 703#ifdef CONFIG_64BIT
656 struct ccw1 *ccw; 704 struct ccw1 *ccw;
@@ -667,8 +715,7 @@ dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
667 dasd_put_device(device); 715 dasd_put_device(device);
668} 716}
669 717
670void 718void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
671dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
672{ 719{
673 unsigned long flags; 720 unsigned long flags;
674 721
@@ -681,14 +728,13 @@ dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
681/* 728/*
682 * Check discipline magic in cqr. 729 * Check discipline magic in cqr.
683 */ 730 */
684static inline int 731static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
685dasd_check_cqr(struct dasd_ccw_req *cqr)
686{ 732{
687 struct dasd_device *device; 733 struct dasd_device *device;
688 734
689 if (cqr == NULL) 735 if (cqr == NULL)
690 return -EINVAL; 736 return -EINVAL;
691 device = cqr->device; 737 device = cqr->startdev;
692 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 738 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
693 DEV_MESSAGE(KERN_WARNING, device, 739 DEV_MESSAGE(KERN_WARNING, device,
694 " dasd_ccw_req 0x%08x magic doesn't match" 740 " dasd_ccw_req 0x%08x magic doesn't match"
@@ -706,8 +752,7 @@ dasd_check_cqr(struct dasd_ccw_req *cqr)
706 * ccw_device_clear can fail if the i/o subsystem 752 * ccw_device_clear can fail if the i/o subsystem
707 * is in a bad mood. 753 * is in a bad mood.
708 */ 754 */
709int 755int dasd_term_IO(struct dasd_ccw_req *cqr)
710dasd_term_IO(struct dasd_ccw_req * cqr)
711{ 756{
712 struct dasd_device *device; 757 struct dasd_device *device;
713 int retries, rc; 758 int retries, rc;
@@ -717,13 +762,13 @@ dasd_term_IO(struct dasd_ccw_req * cqr)
717 if (rc) 762 if (rc)
718 return rc; 763 return rc;
719 retries = 0; 764 retries = 0;
720 device = (struct dasd_device *) cqr->device; 765 device = (struct dasd_device *) cqr->startdev;
721 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 766 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
722 rc = ccw_device_clear(device->cdev, (long) cqr); 767 rc = ccw_device_clear(device->cdev, (long) cqr);
723 switch (rc) { 768 switch (rc) {
724 case 0: /* termination successful */ 769 case 0: /* termination successful */
725 cqr->retries--; 770 cqr->retries--;
726 cqr->status = DASD_CQR_CLEAR; 771 cqr->status = DASD_CQR_CLEAR_PENDING;
727 cqr->stopclk = get_clock(); 772 cqr->stopclk = get_clock();
728 cqr->starttime = 0; 773 cqr->starttime = 0;
729 DBF_DEV_EVENT(DBF_DEBUG, device, 774 DBF_DEV_EVENT(DBF_DEBUG, device,
@@ -753,7 +798,7 @@ dasd_term_IO(struct dasd_ccw_req * cqr)
753 } 798 }
754 retries++; 799 retries++;
755 } 800 }
756 dasd_schedule_bh(device); 801 dasd_schedule_device_bh(device);
757 return rc; 802 return rc;
758} 803}
759 804
@@ -761,8 +806,7 @@ dasd_term_IO(struct dasd_ccw_req * cqr)
761 * Start the i/o. This start_IO can fail if the channel is really busy. 806 * Start the i/o. This start_IO can fail if the channel is really busy.
762 * In that case set up a timer to start the request later. 807 * In that case set up a timer to start the request later.
763 */ 808 */
764int 809int dasd_start_IO(struct dasd_ccw_req *cqr)
765dasd_start_IO(struct dasd_ccw_req * cqr)
766{ 810{
767 struct dasd_device *device; 811 struct dasd_device *device;
768 int rc; 812 int rc;
@@ -771,12 +815,12 @@ dasd_start_IO(struct dasd_ccw_req * cqr)
771 rc = dasd_check_cqr(cqr); 815 rc = dasd_check_cqr(cqr);
772 if (rc) 816 if (rc)
773 return rc; 817 return rc;
774 device = (struct dasd_device *) cqr->device; 818 device = (struct dasd_device *) cqr->startdev;
775 if (cqr->retries < 0) { 819 if (cqr->retries < 0) {
776 DEV_MESSAGE(KERN_DEBUG, device, 820 DEV_MESSAGE(KERN_DEBUG, device,
777 "start_IO: request %p (%02x/%i) - no retry left.", 821 "start_IO: request %p (%02x/%i) - no retry left.",
778 cqr, cqr->status, cqr->retries); 822 cqr, cqr->status, cqr->retries);
779 cqr->status = DASD_CQR_FAILED; 823 cqr->status = DASD_CQR_ERROR;
780 return -EIO; 824 return -EIO;
781 } 825 }
782 cqr->startclk = get_clock(); 826 cqr->startclk = get_clock();
@@ -833,8 +877,7 @@ dasd_start_IO(struct dasd_ccw_req * cqr)
833 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 877 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
834 * DASD_CQR_QUEUED for 2) and 3). 878 * DASD_CQR_QUEUED for 2) and 3).
835 */ 879 */
836static void 880static void dasd_device_timeout(unsigned long ptr)
837dasd_timeout_device(unsigned long ptr)
838{ 881{
839 unsigned long flags; 882 unsigned long flags;
840 struct dasd_device *device; 883 struct dasd_device *device;
@@ -844,14 +887,13 @@ dasd_timeout_device(unsigned long ptr)
844 /* re-activate request queue */ 887 /* re-activate request queue */
845 device->stopped &= ~DASD_STOPPED_PENDING; 888 device->stopped &= ~DASD_STOPPED_PENDING;
846 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 889 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
847 dasd_schedule_bh(device); 890 dasd_schedule_device_bh(device);
848} 891}
849 892
850/* 893/*
851 * Setup timeout for a device in jiffies. 894 * Setup timeout for a device in jiffies.
852 */ 895 */
853void 896void dasd_device_set_timer(struct dasd_device *device, int expires)
854dasd_set_timer(struct dasd_device *device, int expires)
855{ 897{
856 if (expires == 0) { 898 if (expires == 0) {
857 if (timer_pending(&device->timer)) 899 if (timer_pending(&device->timer))
@@ -862,7 +904,7 @@ dasd_set_timer(struct dasd_device *device, int expires)
862 if (mod_timer(&device->timer, jiffies + expires)) 904 if (mod_timer(&device->timer, jiffies + expires))
863 return; 905 return;
864 } 906 }
865 device->timer.function = dasd_timeout_device; 907 device->timer.function = dasd_device_timeout;
866 device->timer.data = (unsigned long) device; 908 device->timer.data = (unsigned long) device;
867 device->timer.expires = jiffies + expires; 909 device->timer.expires = jiffies + expires;
868 add_timer(&device->timer); 910 add_timer(&device->timer);
@@ -871,15 +913,14 @@ dasd_set_timer(struct dasd_device *device, int expires)
871/* 913/*
872 * Clear timeout for a device. 914 * Clear timeout for a device.
873 */ 915 */
874void 916void dasd_device_clear_timer(struct dasd_device *device)
875dasd_clear_timer(struct dasd_device *device)
876{ 917{
877 if (timer_pending(&device->timer)) 918 if (timer_pending(&device->timer))
878 del_timer(&device->timer); 919 del_timer(&device->timer);
879} 920}
880 921
881static void 922static void dasd_handle_killed_request(struct ccw_device *cdev,
882dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm) 923 unsigned long intparm)
883{ 924{
884 struct dasd_ccw_req *cqr; 925 struct dasd_ccw_req *cqr;
885 struct dasd_device *device; 926 struct dasd_device *device;
@@ -893,7 +934,7 @@ dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
893 return; 934 return;
894 } 935 }
895 936
896 device = (struct dasd_device *) cqr->device; 937 device = (struct dasd_device *) cqr->startdev;
897 if (device == NULL || 938 if (device == NULL ||
898 device != dasd_device_from_cdev_locked(cdev) || 939 device != dasd_device_from_cdev_locked(cdev) ||
899 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 940 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
@@ -905,46 +946,32 @@ dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
905 /* Schedule request to be retried. */ 946 /* Schedule request to be retried. */
906 cqr->status = DASD_CQR_QUEUED; 947 cqr->status = DASD_CQR_QUEUED;
907 948
908 dasd_clear_timer(device); 949 dasd_device_clear_timer(device);
909 dasd_schedule_bh(device); 950 dasd_schedule_device_bh(device);
910 dasd_put_device(device); 951 dasd_put_device(device);
911} 952}
912 953
913static void 954void dasd_generic_handle_state_change(struct dasd_device *device)
914dasd_handle_state_change_pending(struct dasd_device *device)
915{ 955{
916 struct dasd_ccw_req *cqr;
917 struct list_head *l, *n;
918
919 /* First of all start sense subsystem status request. */ 956 /* First of all start sense subsystem status request. */
920 dasd_eer_snss(device); 957 dasd_eer_snss(device);
921 958
922 device->stopped &= ~DASD_STOPPED_PENDING; 959 device->stopped &= ~DASD_STOPPED_PENDING;
923 960 dasd_schedule_device_bh(device);
924 /* restart all 'running' IO on queue */ 961 if (device->block)
925 list_for_each_safe(l, n, &device->ccw_queue) { 962 dasd_schedule_block_bh(device->block);
926 cqr = list_entry(l, struct dasd_ccw_req, list);
927 if (cqr->status == DASD_CQR_IN_IO) {
928 cqr->status = DASD_CQR_QUEUED;
929 }
930 }
931 dasd_clear_timer(device);
932 dasd_schedule_bh(device);
933} 963}
934 964
935/* 965/*
936 * Interrupt handler for "normal" ssch-io based dasd devices. 966 * Interrupt handler for "normal" ssch-io based dasd devices.
937 */ 967 */
938void 968void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
939dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 969 struct irb *irb)
940 struct irb *irb)
941{ 970{
942 struct dasd_ccw_req *cqr, *next; 971 struct dasd_ccw_req *cqr, *next;
943 struct dasd_device *device; 972 struct dasd_device *device;
944 unsigned long long now; 973 unsigned long long now;
945 int expires; 974 int expires;
946 dasd_era_t era;
947 char mask;
948 975
949 if (IS_ERR(irb)) { 976 if (IS_ERR(irb)) {
950 switch (PTR_ERR(irb)) { 977 switch (PTR_ERR(irb)) {
@@ -969,29 +996,25 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
969 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat), 996 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat),
970 (unsigned int) intparm); 997 (unsigned int) intparm);
971 998
972 /* first of all check for state change pending interrupt */ 999 /* check for unsolicited interrupts */
973 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 1000 cqr = (struct dasd_ccw_req *) intparm;
974 if ((irb->scsw.dstat & mask) == mask) { 1001 if (!cqr || ((irb->scsw.cc == 1) &&
1002 (irb->scsw.fctl & SCSW_FCTL_START_FUNC) &&
1003 (irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) ) {
1004 if (cqr && cqr->status == DASD_CQR_IN_IO)
1005 cqr->status = DASD_CQR_QUEUED;
975 device = dasd_device_from_cdev_locked(cdev); 1006 device = dasd_device_from_cdev_locked(cdev);
976 if (!IS_ERR(device)) { 1007 if (!IS_ERR(device)) {
977 dasd_handle_state_change_pending(device); 1008 dasd_device_clear_timer(device);
1009 device->discipline->handle_unsolicited_interrupt(device,
1010 irb);
978 dasd_put_device(device); 1011 dasd_put_device(device);
979 } 1012 }
980 return; 1013 return;
981 } 1014 }
982 1015
983 cqr = (struct dasd_ccw_req *) intparm; 1016 device = (struct dasd_device *) cqr->startdev;
984 1017 if (!device ||
985 /* check for unsolicited interrupts */
986 if (cqr == NULL) {
987 MESSAGE(KERN_DEBUG,
988 "unsolicited interrupt received: bus_id %s",
989 cdev->dev.bus_id);
990 return;
991 }
992
993 device = (struct dasd_device *) cqr->device;
994 if (device == NULL ||
995 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1018 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
996 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 1019 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
997 cdev->dev.bus_id); 1020 cdev->dev.bus_id);
@@ -999,12 +1022,12 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
999 } 1022 }
1000 1023
1001 /* Check for clear pending */ 1024 /* Check for clear pending */
1002 if (cqr->status == DASD_CQR_CLEAR && 1025 if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1003 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { 1026 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
1004 cqr->status = DASD_CQR_QUEUED; 1027 cqr->status = DASD_CQR_CLEARED;
1005 dasd_clear_timer(device); 1028 dasd_device_clear_timer(device);
1006 wake_up(&dasd_flush_wq); 1029 wake_up(&dasd_flush_wq);
1007 dasd_schedule_bh(device); 1030 dasd_schedule_device_bh(device);
1008 return; 1031 return;
1009 } 1032 }
1010 1033
@@ -1017,277 +1040,170 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1017 } 1040 }
1018 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", 1041 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
1019 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr); 1042 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr);
1020 1043 next = NULL;
1021 /* Find out the appropriate era_action. */
1022 if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC)
1023 era = dasd_era_fatal;
1024 else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1025 irb->scsw.cstat == 0 &&
1026 !irb->esw.esw0.erw.cons)
1027 era = dasd_era_none;
1028 else if (irb->esw.esw0.erw.cons)
1029 era = device->discipline->examine_error(cqr, irb);
1030 else
1031 era = dasd_era_recover;
1032
1033 DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era);
1034 expires = 0; 1044 expires = 0;
1035 if (era == dasd_era_none) { 1045 if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1036 cqr->status = DASD_CQR_DONE; 1046 irb->scsw.cstat == 0 && !irb->esw.esw0.erw.cons) {
1047 /* request was completed successfully */
1048 cqr->status = DASD_CQR_SUCCESS;
1037 cqr->stopclk = now; 1049 cqr->stopclk = now;
1038 /* Start first request on queue if possible -> fast_io. */ 1050 /* Start first request on queue if possible -> fast_io. */
1039 if (cqr->list.next != &device->ccw_queue) { 1051 if (cqr->devlist.next != &device->ccw_queue) {
1040 next = list_entry(cqr->list.next, 1052 next = list_entry(cqr->devlist.next,
1041 struct dasd_ccw_req, list); 1053 struct dasd_ccw_req, devlist);
1042 if ((next->status == DASD_CQR_QUEUED) &&
1043 (!device->stopped)) {
1044 if (device->discipline->start_IO(next) == 0)
1045 expires = next->expires;
1046 else
1047 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1048 "Interrupt fastpath "
1049 "failed!");
1050 }
1051 } 1054 }
1052 } else { /* error */ 1055 } else { /* error */
1053 memcpy(&cqr->irb, irb, sizeof (struct irb)); 1056 memcpy(&cqr->irb, irb, sizeof(struct irb));
1054 if (device->features & DASD_FEATURE_ERPLOG) { 1057 if (device->features & DASD_FEATURE_ERPLOG) {
1055 /* dump sense data */
1056 dasd_log_sense(cqr, irb); 1058 dasd_log_sense(cqr, irb);
1057 } 1059 }
1058 switch (era) { 1060 /* If we have no sense data, or we just don't want complex ERP
1059 case dasd_era_fatal: 1061 * for this request, but if we have retries left, then just
1060 cqr->status = DASD_CQR_FAILED; 1062 * reset this request and retry it in the fastpath
1061 cqr->stopclk = now; 1063 */
1062 break; 1064 if (!(cqr->irb.esw.esw0.erw.cons &&
1063 case dasd_era_recover: 1065 test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) &&
1066 cqr->retries > 0) {
1067 DEV_MESSAGE(KERN_DEBUG, device,
1068 "default ERP in fastpath (%i retries left)",
1069 cqr->retries);
1070 cqr->lpm = LPM_ANYPATH;
1071 cqr->status = DASD_CQR_QUEUED;
1072 next = cqr;
1073 } else
1064 cqr->status = DASD_CQR_ERROR; 1074 cqr->status = DASD_CQR_ERROR;
1065 break; 1075 }
1066 default: 1076 if (next && (next->status == DASD_CQR_QUEUED) &&
1067 BUG(); 1077 (!device->stopped)) {
1068 } 1078 if (device->discipline->start_IO(next) == 0)
1079 expires = next->expires;
1080 else
1081 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1082 "Interrupt fastpath "
1083 "failed!");
1069 } 1084 }
1070 if (expires != 0) 1085 if (expires != 0)
1071 dasd_set_timer(device, expires); 1086 dasd_device_set_timer(device, expires);
1072 else 1087 else
1073 dasd_clear_timer(device); 1088 dasd_device_clear_timer(device);
1074 dasd_schedule_bh(device); 1089 dasd_schedule_device_bh(device);
1075} 1090}
1076 1091
1077/* 1092/*
1078 * posts the buffer_cache about a finalized request 1093 * If we have an error on a dasd_block layer request then we cancel
1094 * and return all further requests from the same dasd_block as well.
1079 */ 1095 */
1080static inline void 1096static void __dasd_device_recovery(struct dasd_device *device,
1081dasd_end_request(struct request *req, int uptodate) 1097 struct dasd_ccw_req *ref_cqr)
1082{ 1098{
1083 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 1099 struct list_head *l, *n;
1084 BUG(); 1100 struct dasd_ccw_req *cqr;
1085 add_disk_randomness(req->rq_disk);
1086 end_that_request_last(req, uptodate);
1087}
1088 1101
1089/* 1102 /*
1090 * Process finished error recovery ccw. 1103 * only requeue request that came from the dasd_block layer
1091 */ 1104 */
1092static inline void 1105 if (!ref_cqr->block)
1093__dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr) 1106 return;
1094{
1095 dasd_erp_fn_t erp_fn;
1096 1107
1097 if (cqr->status == DASD_CQR_DONE) 1108 list_for_each_safe(l, n, &device->ccw_queue) {
1098 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 1109 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1099 else 1110 if (cqr->status == DASD_CQR_QUEUED &&
1100 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful"); 1111 ref_cqr->block == cqr->block) {
1101 erp_fn = device->discipline->erp_postaction(cqr); 1112 cqr->status = DASD_CQR_CLEARED;
1102 erp_fn(cqr); 1113 }
1103} 1114 }
1115};
1104 1116
1105/* 1117/*
1106 * Process ccw request queue. 1118 * Remove those ccw requests from the queue that need to be returned
1119 * to the upper layer.
1107 */ 1120 */
1108static void 1121static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1109__dasd_process_ccw_queue(struct dasd_device * device, 1122 struct list_head *final_queue)
1110 struct list_head *final_queue)
1111{ 1123{
1112 struct list_head *l, *n; 1124 struct list_head *l, *n;
1113 struct dasd_ccw_req *cqr; 1125 struct dasd_ccw_req *cqr;
1114 dasd_erp_fn_t erp_fn;
1115 1126
1116restart:
1117 /* Process request with final status. */ 1127 /* Process request with final status. */
1118 list_for_each_safe(l, n, &device->ccw_queue) { 1128 list_for_each_safe(l, n, &device->ccw_queue) {
1119 cqr = list_entry(l, struct dasd_ccw_req, list); 1129 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1130
1120 /* Stop list processing at the first non-final request. */ 1131 /* Stop list processing at the first non-final request. */
1121 if (cqr->status != DASD_CQR_DONE && 1132 if (cqr->status == DASD_CQR_QUEUED ||
1122 cqr->status != DASD_CQR_FAILED && 1133 cqr->status == DASD_CQR_IN_IO ||
1123 cqr->status != DASD_CQR_ERROR) 1134 cqr->status == DASD_CQR_CLEAR_PENDING)
1124 break; 1135 break;
1125 /* Process requests with DASD_CQR_ERROR */
1126 if (cqr->status == DASD_CQR_ERROR) { 1136 if (cqr->status == DASD_CQR_ERROR) {
1127 if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) { 1137 __dasd_device_recovery(device, cqr);
1128 cqr->status = DASD_CQR_FAILED;
1129 cqr->stopclk = get_clock();
1130 } else {
1131 if (cqr->irb.esw.esw0.erw.cons &&
1132 test_bit(DASD_CQR_FLAGS_USE_ERP,
1133 &cqr->flags)) {
1134 erp_fn = device->discipline->
1135 erp_action(cqr);
1136 erp_fn(cqr);
1137 } else
1138 dasd_default_erp_action(cqr);
1139 }
1140 goto restart;
1141 }
1142
1143 /* First of all call extended error reporting. */
1144 if (dasd_eer_enabled(device) &&
1145 cqr->status == DASD_CQR_FAILED) {
1146 dasd_eer_write(device, cqr, DASD_EER_FATALERROR);
1147
1148 /* restart request */
1149 cqr->status = DASD_CQR_QUEUED;
1150 cqr->retries = 255;
1151 device->stopped |= DASD_STOPPED_QUIESCE;
1152 goto restart;
1153 } 1138 }
1154
1155 /* Process finished ERP request. */
1156 if (cqr->refers) {
1157 __dasd_process_erp(device, cqr);
1158 goto restart;
1159 }
1160
1161 /* Rechain finished requests to final queue */ 1139 /* Rechain finished requests to final queue */
1162 cqr->endclk = get_clock(); 1140 list_move_tail(&cqr->devlist, final_queue);
1163 list_move_tail(&cqr->list, final_queue);
1164 } 1141 }
1165} 1142}
1166 1143
1167static void
1168dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
1169{
1170 struct request *req;
1171 struct dasd_device *device;
1172 int status;
1173
1174 req = (struct request *) data;
1175 device = cqr->device;
1176 dasd_profile_end(device, cqr, req);
1177 status = cqr->device->discipline->free_cp(cqr,req);
1178 spin_lock_irq(&device->request_queue_lock);
1179 dasd_end_request(req, status);
1180 spin_unlock_irq(&device->request_queue_lock);
1181}
1182
1183
1184/* 1144/*
1185 * Fetch requests from the block device queue. 1145 * the cqrs from the final queue are returned to the upper layer
1146 * by setting a dasd_block state and calling the callback function
1186 */ 1147 */
1187static void 1148static void __dasd_device_process_final_queue(struct dasd_device *device,
1188__dasd_process_blk_queue(struct dasd_device * device) 1149 struct list_head *final_queue)
1189{ 1150{
1190 struct request_queue *queue; 1151 struct list_head *l, *n;
1191 struct request *req;
1192 struct dasd_ccw_req *cqr; 1152 struct dasd_ccw_req *cqr;
1193 int nr_queued;
1194
1195 queue = device->request_queue;
1196 /* No queue ? Then there is nothing to do. */
1197 if (queue == NULL)
1198 return;
1199
1200 /*
1201 * We requeue request from the block device queue to the ccw
1202 * queue only in two states. In state DASD_STATE_READY the
1203 * partition detection is done and we need to requeue requests
1204 * for that. State DASD_STATE_ONLINE is normal block device
1205 * operation.
1206 */
1207 if (device->state != DASD_STATE_READY &&
1208 device->state != DASD_STATE_ONLINE)
1209 return;
1210 nr_queued = 0;
1211 /* Now we try to fetch requests from the request queue */
1212 list_for_each_entry(cqr, &device->ccw_queue, list)
1213 if (cqr->status == DASD_CQR_QUEUED)
1214 nr_queued++;
1215 while (!blk_queue_plugged(queue) &&
1216 elv_next_request(queue) &&
1217 nr_queued < DASD_CHANQ_MAX_SIZE) {
1218 req = elv_next_request(queue);
1219 1153
1220 if (device->features & DASD_FEATURE_READONLY && 1154 list_for_each_safe(l, n, final_queue) {
1221 rq_data_dir(req) == WRITE) { 1155 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1222 DBF_DEV_EVENT(DBF_ERR, device, 1156 list_del_init(&cqr->devlist);
1223 "Rejecting write request %p", 1157 if (cqr->block)
1224 req); 1158 spin_lock_bh(&cqr->block->queue_lock);
1225 blkdev_dequeue_request(req); 1159 switch (cqr->status) {
1226 dasd_end_request(req, 0); 1160 case DASD_CQR_SUCCESS:
1227 continue; 1161 cqr->status = DASD_CQR_DONE;
1228 } 1162 break;
1229 if (device->stopped & DASD_STOPPED_DC_EIO) { 1163 case DASD_CQR_ERROR:
1230 blkdev_dequeue_request(req); 1164 cqr->status = DASD_CQR_NEED_ERP;
1231 dasd_end_request(req, 0); 1165 break;
1232 continue; 1166 case DASD_CQR_CLEARED:
1233 } 1167 cqr->status = DASD_CQR_TERMINATED;
1234 cqr = device->discipline->build_cp(device, req); 1168 break;
1235 if (IS_ERR(cqr)) { 1169 default:
1236 if (PTR_ERR(cqr) == -ENOMEM) 1170 DEV_MESSAGE(KERN_ERR, device,
1237 break; /* terminate request queue loop */ 1171 "wrong cqr status in __dasd_process_final_queue "
1238 if (PTR_ERR(cqr) == -EAGAIN) { 1172 "for cqr %p, status %x",
1239 /* 1173 cqr, cqr->status);
1240 * The current request cannot be build right 1174 BUG();
1241 * now, we have to try later. If this request
1242 * is the head-of-queue we stop the device
1243 * for 1/2 second.
1244 */
1245 if (!list_empty(&device->ccw_queue))
1246 break;
1247 device->stopped |= DASD_STOPPED_PENDING;
1248 dasd_set_timer(device, HZ/2);
1249 break;
1250 }
1251 DBF_DEV_EVENT(DBF_ERR, device,
1252 "CCW creation failed (rc=%ld) "
1253 "on request %p",
1254 PTR_ERR(cqr), req);
1255 blkdev_dequeue_request(req);
1256 dasd_end_request(req, 0);
1257 continue;
1258 } 1175 }
1259 cqr->callback = dasd_end_request_cb; 1176 if (cqr->block)
1260 cqr->callback_data = (void *) req; 1177 spin_unlock_bh(&cqr->block->queue_lock);
1261 cqr->status = DASD_CQR_QUEUED; 1178 if (cqr->callback != NULL)
1262 blkdev_dequeue_request(req); 1179 (cqr->callback)(cqr, cqr->callback_data);
1263 list_add_tail(&cqr->list, &device->ccw_queue);
1264 dasd_profile_start(device, cqr, req);
1265 nr_queued++;
1266 } 1180 }
1267} 1181}
1268 1182
1183
1184
1269/* 1185/*
1270 * Take a look at the first request on the ccw queue and check 1186 * Take a look at the first request on the ccw queue and check
1271 * if it reached its expire time. If so, terminate the IO. 1187 * if it reached its expire time. If so, terminate the IO.
1272 */ 1188 */
1273static void 1189static void __dasd_device_check_expire(struct dasd_device *device)
1274__dasd_check_expire(struct dasd_device * device)
1275{ 1190{
1276 struct dasd_ccw_req *cqr; 1191 struct dasd_ccw_req *cqr;
1277 1192
1278 if (list_empty(&device->ccw_queue)) 1193 if (list_empty(&device->ccw_queue))
1279 return; 1194 return;
1280 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1195 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1281 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1196 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1282 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1197 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1283 if (device->discipline->term_IO(cqr) != 0) { 1198 if (device->discipline->term_IO(cqr) != 0) {
1284 /* Hmpf, try again in 5 sec */ 1199 /* Hmpf, try again in 5 sec */
1285 dasd_set_timer(device, 5*HZ);
1286 DEV_MESSAGE(KERN_ERR, device, 1200 DEV_MESSAGE(KERN_ERR, device,
1287 "internal error - timeout (%is) expired " 1201 "internal error - timeout (%is) expired "
1288 "for cqr %p, termination failed, " 1202 "for cqr %p, termination failed, "
1289 "retrying in 5s", 1203 "retrying in 5s",
1290 (cqr->expires/HZ), cqr); 1204 (cqr->expires/HZ), cqr);
1205 cqr->expires += 5*HZ;
1206 dasd_device_set_timer(device, 5*HZ);
1291 } else { 1207 } else {
1292 DEV_MESSAGE(KERN_ERR, device, 1208 DEV_MESSAGE(KERN_ERR, device,
1293 "internal error - timeout (%is) expired " 1209 "internal error - timeout (%is) expired "
@@ -1301,77 +1217,53 @@ __dasd_check_expire(struct dasd_device * device)
1301 * Take a look at the first request on the ccw queue and check 1217 * Take a look at the first request on the ccw queue and check
1302 * if it needs to be started. 1218 * if it needs to be started.
1303 */ 1219 */
1304static void 1220static void __dasd_device_start_head(struct dasd_device *device)
1305__dasd_start_head(struct dasd_device * device)
1306{ 1221{
1307 struct dasd_ccw_req *cqr; 1222 struct dasd_ccw_req *cqr;
1308 int rc; 1223 int rc;
1309 1224
1310 if (list_empty(&device->ccw_queue)) 1225 if (list_empty(&device->ccw_queue))
1311 return; 1226 return;
1312 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1227 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1313 if (cqr->status != DASD_CQR_QUEUED) 1228 if (cqr->status != DASD_CQR_QUEUED)
1314 return; 1229 return;
1315 /* Non-temporary stop condition will trigger fail fast */ 1230 /* when device is stopped, return request to previous layer */
1316 if (device->stopped & ~DASD_STOPPED_PENDING && 1231 if (device->stopped) {
1317 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 1232 cqr->status = DASD_CQR_CLEARED;
1318 (!dasd_eer_enabled(device))) { 1233 dasd_schedule_device_bh(device);
1319 cqr->status = DASD_CQR_FAILED;
1320 dasd_schedule_bh(device);
1321 return; 1234 return;
1322 } 1235 }
1323 /* Don't try to start requests if device is stopped */
1324 if (device->stopped)
1325 return;
1326 1236
1327 rc = device->discipline->start_IO(cqr); 1237 rc = device->discipline->start_IO(cqr);
1328 if (rc == 0) 1238 if (rc == 0)
1329 dasd_set_timer(device, cqr->expires); 1239 dasd_device_set_timer(device, cqr->expires);
1330 else if (rc == -EACCES) { 1240 else if (rc == -EACCES) {
1331 dasd_schedule_bh(device); 1241 dasd_schedule_device_bh(device);
1332 } else 1242 } else
1333 /* Hmpf, try again in 1/2 sec */ 1243 /* Hmpf, try again in 1/2 sec */
1334 dasd_set_timer(device, 50); 1244 dasd_device_set_timer(device, 50);
1335}
1336
1337static inline int
1338_wait_for_clear(struct dasd_ccw_req *cqr)
1339{
1340 return (cqr->status == DASD_CQR_QUEUED);
1341} 1245}
1342 1246
1343/* 1247/*
1344 * Remove all requests from the ccw queue (all = '1') or only block device 1248 * Go through all request on the dasd_device request queue,
1345 * requests in case all = '0'. 1249 * terminate them on the cdev if necessary, and return them to the
1346 * Take care of the erp-chain (chained via cqr->refers) and remove either 1250 * submitting layer via callback.
1347 * the whole erp-chain or none of the erp-requests. 1251 * Note:
1348 * If a request is currently running, term_IO is called and the request 1252 * Make sure that all 'submitting layers' still exist when
1349 * is re-queued. Prior to removing the terminated request we need to wait 1253 * this function is called!. In other words, when 'device' is a base
1350 * for the clear-interrupt. 1254 * device then all block layer requests must have been removed before
1351 * In case termination is not possible we stop processing and just finishing 1255 * via dasd_flush_block_queue.
1352 * the already moved requests.
1353 */ 1256 */
1354static int 1257int dasd_flush_device_queue(struct dasd_device *device)
1355dasd_flush_ccw_queue(struct dasd_device * device, int all)
1356{ 1258{
1357 struct dasd_ccw_req *cqr, *orig, *n; 1259 struct dasd_ccw_req *cqr, *n;
1358 int rc, i; 1260 int rc;
1359
1360 struct list_head flush_queue; 1261 struct list_head flush_queue;
1361 1262
1362 INIT_LIST_HEAD(&flush_queue); 1263 INIT_LIST_HEAD(&flush_queue);
1363 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1264 spin_lock_irq(get_ccwdev_lock(device->cdev));
1364 rc = 0; 1265 rc = 0;
1365restart: 1266 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
1366 list_for_each_entry_safe(cqr, n, &device->ccw_queue, list) {
1367 /* get original request of erp request-chain */
1368 for (orig = cqr; orig->refers != NULL; orig = orig->refers);
1369
1370 /* Flush all request or only block device requests? */
1371 if (all == 0 && cqr->callback != dasd_end_request_cb &&
1372 orig->callback != dasd_end_request_cb) {
1373 continue;
1374 }
1375 /* Check status and move request to flush_queue */ 1267 /* Check status and move request to flush_queue */
1376 switch (cqr->status) { 1268 switch (cqr->status) {
1377 case DASD_CQR_IN_IO: 1269 case DASD_CQR_IN_IO:
@@ -1387,90 +1279,60 @@ restart:
1387 } 1279 }
1388 break; 1280 break;
1389 case DASD_CQR_QUEUED: 1281 case DASD_CQR_QUEUED:
1390 case DASD_CQR_ERROR:
1391 /* set request to FAILED */
1392 cqr->stopclk = get_clock(); 1282 cqr->stopclk = get_clock();
1393 cqr->status = DASD_CQR_FAILED; 1283 cqr->status = DASD_CQR_CLEARED;
1394 break; 1284 break;
1395 default: /* do not touch the others */ 1285 default: /* no need to modify the others */
1396 break; 1286 break;
1397 } 1287 }
1398 /* Rechain request (including erp chain) */ 1288 list_move_tail(&cqr->devlist, &flush_queue);
1399 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) {
1400 cqr->endclk = get_clock();
1401 list_move_tail(&cqr->list, &flush_queue);
1402 }
1403 if (i > 1)
1404 /* moved more than one request - need to restart */
1405 goto restart;
1406 } 1289 }
1407
1408finished: 1290finished:
1409 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1291 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1410 /* Now call the callback function of flushed requests */ 1292 /*
1411restart_cb: 1293 * After this point all requests must be in state CLEAR_PENDING,
1412 list_for_each_entry_safe(cqr, n, &flush_queue, list) { 1294 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
1413 if (cqr->status == DASD_CQR_CLEAR) { 1295 * one of the others.
1414 /* wait for clear interrupt! */ 1296 */
1415 wait_event(dasd_flush_wq, _wait_for_clear(cqr)); 1297 list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
1416 cqr->status = DASD_CQR_FAILED; 1298 wait_event(dasd_flush_wq,
1417 } 1299 (cqr->status != DASD_CQR_CLEAR_PENDING));
1418 /* Process finished ERP request. */ 1300 /*
1419 if (cqr->refers) { 1301 * Now set each request back to TERMINATED, DONE or NEED_ERP
1420 __dasd_process_erp(device, cqr); 1302 * and call the callback function of flushed requests
1421 /* restart list_for_xx loop since dasd_process_erp 1303 */
1422 * might remove multiple elements */ 1304 __dasd_device_process_final_queue(device, &flush_queue);
1423 goto restart_cb;
1424 }
1425 /* call the callback function */
1426 cqr->endclk = get_clock();
1427 if (cqr->callback != NULL)
1428 (cqr->callback)(cqr, cqr->callback_data);
1429 }
1430 return rc; 1305 return rc;
1431} 1306}
1432 1307
1433/* 1308/*
1434 * Acquire the device lock and process queues for the device. 1309 * Acquire the device lock and process queues for the device.
1435 */ 1310 */
1436static void 1311static void dasd_device_tasklet(struct dasd_device *device)
1437dasd_tasklet(struct dasd_device * device)
1438{ 1312{
1439 struct list_head final_queue; 1313 struct list_head final_queue;
1440 struct list_head *l, *n;
1441 struct dasd_ccw_req *cqr;
1442 1314
1443 atomic_set (&device->tasklet_scheduled, 0); 1315 atomic_set (&device->tasklet_scheduled, 0);
1444 INIT_LIST_HEAD(&final_queue); 1316 INIT_LIST_HEAD(&final_queue);
1445 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1317 spin_lock_irq(get_ccwdev_lock(device->cdev));
1446 /* Check expire time of first request on the ccw queue. */ 1318 /* Check expire time of first request on the ccw queue. */
1447 __dasd_check_expire(device); 1319 __dasd_device_check_expire(device);
1448 /* Finish off requests on ccw queue */ 1320 /* find final requests on ccw queue */
1449 __dasd_process_ccw_queue(device, &final_queue); 1321 __dasd_device_process_ccw_queue(device, &final_queue);
1450 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1322 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1451 /* Now call the callback function of requests with final status */ 1323 /* Now call the callback function of requests with final status */
1452 list_for_each_safe(l, n, &final_queue) { 1324 __dasd_device_process_final_queue(device, &final_queue);
1453 cqr = list_entry(l, struct dasd_ccw_req, list); 1325 spin_lock_irq(get_ccwdev_lock(device->cdev));
1454 list_del_init(&cqr->list);
1455 if (cqr->callback != NULL)
1456 (cqr->callback)(cqr, cqr->callback_data);
1457 }
1458 spin_lock_irq(&device->request_queue_lock);
1459 spin_lock(get_ccwdev_lock(device->cdev));
1460 /* Get new request from the block device request queue */
1461 __dasd_process_blk_queue(device);
1462 /* Now check if the head of the ccw queue needs to be started. */ 1326 /* Now check if the head of the ccw queue needs to be started. */
1463 __dasd_start_head(device); 1327 __dasd_device_start_head(device);
1464 spin_unlock(get_ccwdev_lock(device->cdev)); 1328 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1465 spin_unlock_irq(&device->request_queue_lock);
1466 dasd_put_device(device); 1329 dasd_put_device(device);
1467} 1330}
1468 1331
1469/* 1332/*
1470 * Schedules a call to dasd_tasklet over the device tasklet. 1333 * Schedules a call to dasd_tasklet over the device tasklet.
1471 */ 1334 */
1472void 1335void dasd_schedule_device_bh(struct dasd_device *device)
1473dasd_schedule_bh(struct dasd_device * device)
1474{ 1336{
1475 /* Protect against rescheduling. */ 1337 /* Protect against rescheduling. */
1476 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 1338 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
@@ -1480,160 +1342,109 @@ dasd_schedule_bh(struct dasd_device * device)
1480} 1342}
1481 1343
1482/* 1344/*
1483 * Queue a request to the head of the ccw_queue. Start the I/O if 1345 * Queue a request to the head of the device ccw_queue.
1484 * possible. 1346 * Start the I/O if possible.
1485 */ 1347 */
1486void 1348void dasd_add_request_head(struct dasd_ccw_req *cqr)
1487dasd_add_request_head(struct dasd_ccw_req *req)
1488{ 1349{
1489 struct dasd_device *device; 1350 struct dasd_device *device;
1490 unsigned long flags; 1351 unsigned long flags;
1491 1352
1492 device = req->device; 1353 device = cqr->startdev;
1493 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1354 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1494 req->status = DASD_CQR_QUEUED; 1355 cqr->status = DASD_CQR_QUEUED;
1495 req->device = device; 1356 list_add(&cqr->devlist, &device->ccw_queue);
1496 list_add(&req->list, &device->ccw_queue);
1497 /* let the bh start the request to keep them in order */ 1357 /* let the bh start the request to keep them in order */
1498 dasd_schedule_bh(device); 1358 dasd_schedule_device_bh(device);
1499 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1359 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1500} 1360}
1501 1361
1502/* 1362/*
1503 * Queue a request to the tail of the ccw_queue. Start the I/O if 1363 * Queue a request to the tail of the device ccw_queue.
1504 * possible. 1364 * Start the I/O if possible.
1505 */ 1365 */
1506void 1366void dasd_add_request_tail(struct dasd_ccw_req *cqr)
1507dasd_add_request_tail(struct dasd_ccw_req *req)
1508{ 1367{
1509 struct dasd_device *device; 1368 struct dasd_device *device;
1510 unsigned long flags; 1369 unsigned long flags;
1511 1370
1512 device = req->device; 1371 device = cqr->startdev;
1513 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1372 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1514 req->status = DASD_CQR_QUEUED; 1373 cqr->status = DASD_CQR_QUEUED;
1515 req->device = device; 1374 list_add_tail(&cqr->devlist, &device->ccw_queue);
1516 list_add_tail(&req->list, &device->ccw_queue);
1517 /* let the bh start the request to keep them in order */ 1375 /* let the bh start the request to keep them in order */
1518 dasd_schedule_bh(device); 1376 dasd_schedule_device_bh(device);
1519 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1377 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1520} 1378}
1521 1379
1522/* 1380/*
1523 * Wakeup callback. 1381 * Wakeup helper for the 'sleep_on' functions.
1524 */ 1382 */
1525static void 1383static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1526dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1527{ 1384{
1528 wake_up((wait_queue_head_t *) data); 1385 wake_up((wait_queue_head_t *) data);
1529} 1386}
1530 1387
1531static inline int 1388static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
1532_wait_for_wakeup(struct dasd_ccw_req *cqr)
1533{ 1389{
1534 struct dasd_device *device; 1390 struct dasd_device *device;
1535 int rc; 1391 int rc;
1536 1392
1537 device = cqr->device; 1393 device = cqr->startdev;
1538 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1394 spin_lock_irq(get_ccwdev_lock(device->cdev));
1539 rc = ((cqr->status == DASD_CQR_DONE || 1395 rc = ((cqr->status == DASD_CQR_DONE ||
1540 cqr->status == DASD_CQR_FAILED) && 1396 cqr->status == DASD_CQR_NEED_ERP ||
1541 list_empty(&cqr->list)); 1397 cqr->status == DASD_CQR_TERMINATED) &&
1398 list_empty(&cqr->devlist));
1542 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1399 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1543 return rc; 1400 return rc;
1544} 1401}
1545 1402
1546/* 1403/*
1547 * Attempts to start a special ccw queue and waits for its completion. 1404 * Queue a request to the tail of the device ccw_queue and wait for
1405 * it's completion.
1548 */ 1406 */
1549int 1407int dasd_sleep_on(struct dasd_ccw_req *cqr)
1550dasd_sleep_on(struct dasd_ccw_req * cqr)
1551{ 1408{
1552 wait_queue_head_t wait_q; 1409 wait_queue_head_t wait_q;
1553 struct dasd_device *device; 1410 struct dasd_device *device;
1554 int rc; 1411 int rc;
1555 1412
1556 device = cqr->device; 1413 device = cqr->startdev;
1557 spin_lock_irq(get_ccwdev_lock(device->cdev));
1558 1414
1559 init_waitqueue_head (&wait_q); 1415 init_waitqueue_head (&wait_q);
1560 cqr->callback = dasd_wakeup_cb; 1416 cqr->callback = dasd_wakeup_cb;
1561 cqr->callback_data = (void *) &wait_q; 1417 cqr->callback_data = (void *) &wait_q;
1562 cqr->status = DASD_CQR_QUEUED; 1418 dasd_add_request_tail(cqr);
1563 list_add_tail(&cqr->list, &device->ccw_queue);
1564
1565 /* let the bh start the request to keep them in order */
1566 dasd_schedule_bh(device);
1567
1568 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1569
1570 wait_event(wait_q, _wait_for_wakeup(cqr)); 1419 wait_event(wait_q, _wait_for_wakeup(cqr));
1571 1420
1572 /* Request status is either done or failed. */ 1421 /* Request status is either done or failed. */
1573 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; 1422 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1574 return rc; 1423 return rc;
1575} 1424}
1576 1425
1577/* 1426/*
1578 * Attempts to start a special ccw queue and wait interruptible 1427 * Queue a request to the tail of the device ccw_queue and wait
1579 * for its completion. 1428 * interruptible for it's completion.
1580 */ 1429 */
1581int 1430int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
1582dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
1583{ 1431{
1584 wait_queue_head_t wait_q; 1432 wait_queue_head_t wait_q;
1585 struct dasd_device *device; 1433 struct dasd_device *device;
1586 int rc, finished; 1434 int rc;
1587
1588 device = cqr->device;
1589 spin_lock_irq(get_ccwdev_lock(device->cdev));
1590 1435
1436 device = cqr->startdev;
1591 init_waitqueue_head (&wait_q); 1437 init_waitqueue_head (&wait_q);
1592 cqr->callback = dasd_wakeup_cb; 1438 cqr->callback = dasd_wakeup_cb;
1593 cqr->callback_data = (void *) &wait_q; 1439 cqr->callback_data = (void *) &wait_q;
1594 cqr->status = DASD_CQR_QUEUED; 1440 dasd_add_request_tail(cqr);
1595 list_add_tail(&cqr->list, &device->ccw_queue); 1441 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
1596 1442 if (rc == -ERESTARTSYS) {
1597 /* let the bh start the request to keep them in order */ 1443 dasd_cancel_req(cqr);
1598 dasd_schedule_bh(device); 1444 /* wait (non-interruptible) for final status */
1599 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1445 wait_event(wait_q, _wait_for_wakeup(cqr));
1600
1601 finished = 0;
1602 while (!finished) {
1603 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
1604 if (rc != -ERESTARTSYS) {
1605 /* Request is final (done or failed) */
1606 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1607 break;
1608 }
1609 spin_lock_irq(get_ccwdev_lock(device->cdev));
1610 switch (cqr->status) {
1611 case DASD_CQR_IN_IO:
1612 /* terminate runnig cqr */
1613 if (device->discipline->term_IO) {
1614 cqr->retries = -1;
1615 device->discipline->term_IO(cqr);
1616 /* wait (non-interruptible) for final status
1617 * because signal ist still pending */
1618 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1619 wait_event(wait_q, _wait_for_wakeup(cqr));
1620 spin_lock_irq(get_ccwdev_lock(device->cdev));
1621 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1622 finished = 1;
1623 }
1624 break;
1625 case DASD_CQR_QUEUED:
1626 /* request */
1627 list_del_init(&cqr->list);
1628 rc = -EIO;
1629 finished = 1;
1630 break;
1631 default:
1632 /* cqr with 'non-interruptable' status - just wait */
1633 break;
1634 }
1635 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1636 } 1446 }
1447 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1637 return rc; 1448 return rc;
1638} 1449}
1639 1450
@@ -1643,25 +1454,23 @@ dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
1643 * and be put back to status queued, before the special request is added 1454 * and be put back to status queued, before the special request is added
1644 * to the head of the queue. Then the special request is waited on normally. 1455 * to the head of the queue. Then the special request is waited on normally.
1645 */ 1456 */
1646static inline int 1457static inline int _dasd_term_running_cqr(struct dasd_device *device)
1647_dasd_term_running_cqr(struct dasd_device *device)
1648{ 1458{
1649 struct dasd_ccw_req *cqr; 1459 struct dasd_ccw_req *cqr;
1650 1460
1651 if (list_empty(&device->ccw_queue)) 1461 if (list_empty(&device->ccw_queue))
1652 return 0; 1462 return 0;
1653 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1463 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1654 return device->discipline->term_IO(cqr); 1464 return device->discipline->term_IO(cqr);
1655} 1465}
1656 1466
1657int 1467int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1658dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
1659{ 1468{
1660 wait_queue_head_t wait_q; 1469 wait_queue_head_t wait_q;
1661 struct dasd_device *device; 1470 struct dasd_device *device;
1662 int rc; 1471 int rc;
1663 1472
1664 device = cqr->device; 1473 device = cqr->startdev;
1665 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1474 spin_lock_irq(get_ccwdev_lock(device->cdev));
1666 rc = _dasd_term_running_cqr(device); 1475 rc = _dasd_term_running_cqr(device);
1667 if (rc) { 1476 if (rc) {
@@ -1673,17 +1482,17 @@ dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
1673 cqr->callback = dasd_wakeup_cb; 1482 cqr->callback = dasd_wakeup_cb;
1674 cqr->callback_data = (void *) &wait_q; 1483 cqr->callback_data = (void *) &wait_q;
1675 cqr->status = DASD_CQR_QUEUED; 1484 cqr->status = DASD_CQR_QUEUED;
1676 list_add(&cqr->list, &device->ccw_queue); 1485 list_add(&cqr->devlist, &device->ccw_queue);
1677 1486
1678 /* let the bh start the request to keep them in order */ 1487 /* let the bh start the request to keep them in order */
1679 dasd_schedule_bh(device); 1488 dasd_schedule_device_bh(device);
1680 1489
1681 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1490 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1682 1491
1683 wait_event(wait_q, _wait_for_wakeup(cqr)); 1492 wait_event(wait_q, _wait_for_wakeup(cqr));
1684 1493
1685 /* Request status is either done or failed. */ 1494 /* Request status is either done or failed. */
1686 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; 1495 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1687 return rc; 1496 return rc;
1688} 1497}
1689 1498
@@ -1692,11 +1501,14 @@ dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
1692 * This is useful to timeout requests. The request will be 1501 * This is useful to timeout requests. The request will be
1693 * terminated if it is currently in i/o. 1502 * terminated if it is currently in i/o.
1694 * Returns 1 if the request has been terminated. 1503 * Returns 1 if the request has been terminated.
1504 * 0 if there was no need to terminate the request (not started yet)
1505 * negative error code if termination failed
1506 * Cancellation of a request is an asynchronous operation! The calling
1507 * function has to wait until the request is properly returned via callback.
1695 */ 1508 */
1696int 1509int dasd_cancel_req(struct dasd_ccw_req *cqr)
1697dasd_cancel_req(struct dasd_ccw_req *cqr)
1698{ 1510{
1699 struct dasd_device *device = cqr->device; 1511 struct dasd_device *device = cqr->startdev;
1700 unsigned long flags; 1512 unsigned long flags;
1701 int rc; 1513 int rc;
1702 1514
@@ -1704,74 +1516,453 @@ dasd_cancel_req(struct dasd_ccw_req *cqr)
1704 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1516 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1705 switch (cqr->status) { 1517 switch (cqr->status) {
1706 case DASD_CQR_QUEUED: 1518 case DASD_CQR_QUEUED:
1707 /* request was not started - just set to failed */ 1519 /* request was not started - just set to cleared */
1708 cqr->status = DASD_CQR_FAILED; 1520 cqr->status = DASD_CQR_CLEARED;
1709 break; 1521 break;
1710 case DASD_CQR_IN_IO: 1522 case DASD_CQR_IN_IO:
1711 /* request in IO - terminate IO and release again */ 1523 /* request in IO - terminate IO and release again */
1712 if (device->discipline->term_IO(cqr) != 0) 1524 rc = device->discipline->term_IO(cqr);
1713 /* what to do if unable to terminate ?????? 1525 if (rc) {
1714 e.g. not _IN_IO */ 1526 DEV_MESSAGE(KERN_ERR, device,
1715 cqr->status = DASD_CQR_FAILED; 1527 "dasd_cancel_req is unable "
1716 cqr->stopclk = get_clock(); 1528 " to terminate request %p, rc = %d",
1717 rc = 1; 1529 cqr, rc);
1530 } else {
1531 cqr->stopclk = get_clock();
1532 rc = 1;
1533 }
1718 break; 1534 break;
1719 case DASD_CQR_DONE: 1535 default: /* already finished or clear pending - do nothing */
1720 case DASD_CQR_FAILED:
1721 /* already finished - do nothing */
1722 break; 1536 break;
1723 default: 1537 }
1724 DEV_MESSAGE(KERN_ALERT, device, 1538 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1725 "invalid status %02x in request", 1539 dasd_schedule_device_bh(device);
1726 cqr->status); 1540 return rc;
1541}
1542
1543
1544/*
1545 * SECTION: Operations of the dasd_block layer.
1546 */
1547
1548/*
1549 * Timeout function for dasd_block. This is used when the block layer
1550 * is waiting for something that may not come reliably, (e.g. a state
1551 * change interrupt)
1552 */
1553static void dasd_block_timeout(unsigned long ptr)
1554{
1555 unsigned long flags;
1556 struct dasd_block *block;
1557
1558 block = (struct dasd_block *) ptr;
1559 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
1560 /* re-activate request queue */
1561 block->base->stopped &= ~DASD_STOPPED_PENDING;
1562 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
1563 dasd_schedule_block_bh(block);
1564}
1565
1566/*
1567 * Setup timeout for a dasd_block in jiffies.
1568 */
1569void dasd_block_set_timer(struct dasd_block *block, int expires)
1570{
1571 if (expires == 0) {
1572 if (timer_pending(&block->timer))
1573 del_timer(&block->timer);
1574 return;
1575 }
1576 if (timer_pending(&block->timer)) {
1577 if (mod_timer(&block->timer, jiffies + expires))
1578 return;
1579 }
1580 block->timer.function = dasd_block_timeout;
1581 block->timer.data = (unsigned long) block;
1582 block->timer.expires = jiffies + expires;
1583 add_timer(&block->timer);
1584}
1585
1586/*
1587 * Clear timeout for a dasd_block.
1588 */
1589void dasd_block_clear_timer(struct dasd_block *block)
1590{
1591 if (timer_pending(&block->timer))
1592 del_timer(&block->timer);
1593}
1594
1595/*
1596 * posts the buffer_cache about a finalized request
1597 */
1598static inline void dasd_end_request(struct request *req, int uptodate)
1599{
1600 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
1727 BUG(); 1601 BUG();
1602 add_disk_randomness(req->rq_disk);
1603 end_that_request_last(req, uptodate);
1604}
1605
1606/*
1607 * Process finished error recovery ccw.
1608 */
1609static inline void __dasd_block_process_erp(struct dasd_block *block,
1610 struct dasd_ccw_req *cqr)
1611{
1612 dasd_erp_fn_t erp_fn;
1613 struct dasd_device *device = block->base;
1728 1614
1615 if (cqr->status == DASD_CQR_DONE)
1616 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1617 else
1618 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful");
1619 erp_fn = device->discipline->erp_postaction(cqr);
1620 erp_fn(cqr);
1621}
1622
1623/*
1624 * Fetch requests from the block device queue.
1625 */
1626static void __dasd_process_request_queue(struct dasd_block *block)
1627{
1628 struct request_queue *queue;
1629 struct request *req;
1630 struct dasd_ccw_req *cqr;
1631 struct dasd_device *basedev;
1632 unsigned long flags;
1633 queue = block->request_queue;
1634 basedev = block->base;
1635 /* No queue ? Then there is nothing to do. */
1636 if (queue == NULL)
1637 return;
1638
1639 /*
1640 * We requeue request from the block device queue to the ccw
1641 * queue only in two states. In state DASD_STATE_READY the
1642 * partition detection is done and we need to requeue requests
1643 * for that. State DASD_STATE_ONLINE is normal block device
1644 * operation.
1645 */
1646 if (basedev->state < DASD_STATE_READY)
1647 return;
1648 /* Now we try to fetch requests from the request queue */
1649 while (!blk_queue_plugged(queue) &&
1650 elv_next_request(queue)) {
1651
1652 req = elv_next_request(queue);
1653
1654 if (basedev->features & DASD_FEATURE_READONLY &&
1655 rq_data_dir(req) == WRITE) {
1656 DBF_DEV_EVENT(DBF_ERR, basedev,
1657 "Rejecting write request %p",
1658 req);
1659 blkdev_dequeue_request(req);
1660 dasd_end_request(req, 0);
1661 continue;
1662 }
1663 cqr = basedev->discipline->build_cp(basedev, block, req);
1664 if (IS_ERR(cqr)) {
1665 if (PTR_ERR(cqr) == -EBUSY)
1666 break; /* normal end condition */
1667 if (PTR_ERR(cqr) == -ENOMEM)
1668 break; /* terminate request queue loop */
1669 if (PTR_ERR(cqr) == -EAGAIN) {
1670 /*
1671 * The current request cannot be build right
1672 * now, we have to try later. If this request
1673 * is the head-of-queue we stop the device
1674 * for 1/2 second.
1675 */
1676 if (!list_empty(&block->ccw_queue))
1677 break;
1678 spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags);
1679 basedev->stopped |= DASD_STOPPED_PENDING;
1680 spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags);
1681 dasd_block_set_timer(block, HZ/2);
1682 break;
1683 }
1684 DBF_DEV_EVENT(DBF_ERR, basedev,
1685 "CCW creation failed (rc=%ld) "
1686 "on request %p",
1687 PTR_ERR(cqr), req);
1688 blkdev_dequeue_request(req);
1689 dasd_end_request(req, 0);
1690 continue;
1691 }
1692 /*
1693 * Note: callback is set to dasd_return_cqr_cb in
1694 * __dasd_block_start_head to cover erp requests as well
1695 */
1696 cqr->callback_data = (void *) req;
1697 cqr->status = DASD_CQR_FILLED;
1698 blkdev_dequeue_request(req);
1699 list_add_tail(&cqr->blocklist, &block->ccw_queue);
1700 dasd_profile_start(block, cqr, req);
1701 }
1702}
1703
1704static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1705{
1706 struct request *req;
1707 int status;
1708
1709 req = (struct request *) cqr->callback_data;
1710 dasd_profile_end(cqr->block, cqr, req);
1711 status = cqr->memdev->discipline->free_cp(cqr, req);
1712 dasd_end_request(req, status);
1713}
1714
1715/*
1716 * Process ccw request queue.
1717 */
1718static void __dasd_process_block_ccw_queue(struct dasd_block *block,
1719 struct list_head *final_queue)
1720{
1721 struct list_head *l, *n;
1722 struct dasd_ccw_req *cqr;
1723 dasd_erp_fn_t erp_fn;
1724 unsigned long flags;
1725 struct dasd_device *base = block->base;
1726
1727restart:
1728 /* Process request with final status. */
1729 list_for_each_safe(l, n, &block->ccw_queue) {
1730 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1731 if (cqr->status != DASD_CQR_DONE &&
1732 cqr->status != DASD_CQR_FAILED &&
1733 cqr->status != DASD_CQR_NEED_ERP &&
1734 cqr->status != DASD_CQR_TERMINATED)
1735 continue;
1736
1737 if (cqr->status == DASD_CQR_TERMINATED) {
1738 base->discipline->handle_terminated_request(cqr);
1739 goto restart;
1740 }
1741
1742 /* Process requests that may be recovered */
1743 if (cqr->status == DASD_CQR_NEED_ERP) {
1744 if (cqr->irb.esw.esw0.erw.cons &&
1745 test_bit(DASD_CQR_FLAGS_USE_ERP,
1746 &cqr->flags)) {
1747 erp_fn = base->discipline->erp_action(cqr);
1748 erp_fn(cqr);
1749 }
1750 goto restart;
1751 }
1752
1753 /* First of all call extended error reporting. */
1754 if (dasd_eer_enabled(base) &&
1755 cqr->status == DASD_CQR_FAILED) {
1756 dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
1757
1758 /* restart request */
1759 cqr->status = DASD_CQR_FILLED;
1760 cqr->retries = 255;
1761 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
1762 base->stopped |= DASD_STOPPED_QUIESCE;
1763 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
1764 flags);
1765 goto restart;
1766 }
1767
1768 /* Process finished ERP request. */
1769 if (cqr->refers) {
1770 __dasd_block_process_erp(block, cqr);
1771 goto restart;
1772 }
1773
1774 /* Rechain finished requests to final queue */
1775 cqr->endclk = get_clock();
1776 list_move_tail(&cqr->blocklist, final_queue);
1777 }
1778}
1779
1780static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
1781{
1782 dasd_schedule_block_bh(cqr->block);
1783}
1784
1785static void __dasd_block_start_head(struct dasd_block *block)
1786{
1787 struct dasd_ccw_req *cqr;
1788
1789 if (list_empty(&block->ccw_queue))
1790 return;
1791 /* We allways begin with the first requests on the queue, as some
1792 * of previously started requests have to be enqueued on a
1793 * dasd_device again for error recovery.
1794 */
1795 list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
1796 if (cqr->status != DASD_CQR_FILLED)
1797 continue;
1798 /* Non-temporary stop condition will trigger fail fast */
1799 if (block->base->stopped & ~DASD_STOPPED_PENDING &&
1800 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1801 (!dasd_eer_enabled(block->base))) {
1802 cqr->status = DASD_CQR_FAILED;
1803 dasd_schedule_block_bh(block);
1804 continue;
1805 }
1806 /* Don't try to start requests if device is stopped */
1807 if (block->base->stopped)
1808 return;
1809
1810 /* just a fail safe check, should not happen */
1811 if (!cqr->startdev)
1812 cqr->startdev = block->base;
1813
1814 /* make sure that the requests we submit find their way back */
1815 cqr->callback = dasd_return_cqr_cb;
1816
1817 dasd_add_request_tail(cqr);
1818 }
1819}
1820
1821/*
1822 * Central dasd_block layer routine. Takes requests from the generic
1823 * block layer request queue, creates ccw requests, enqueues them on
1824 * a dasd_device and processes ccw requests that have been returned.
1825 */
1826static void dasd_block_tasklet(struct dasd_block *block)
1827{
1828 struct list_head final_queue;
1829 struct list_head *l, *n;
1830 struct dasd_ccw_req *cqr;
1831
1832 atomic_set(&block->tasklet_scheduled, 0);
1833 INIT_LIST_HEAD(&final_queue);
1834 spin_lock(&block->queue_lock);
1835 /* Finish off requests on ccw queue */
1836 __dasd_process_block_ccw_queue(block, &final_queue);
1837 spin_unlock(&block->queue_lock);
1838 /* Now call the callback function of requests with final status */
1839 spin_lock_irq(&block->request_queue_lock);
1840 list_for_each_safe(l, n, &final_queue) {
1841 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1842 list_del_init(&cqr->blocklist);
1843 __dasd_cleanup_cqr(cqr);
1844 }
1845 spin_lock(&block->queue_lock);
1846 /* Get new request from the block device request queue */
1847 __dasd_process_request_queue(block);
1848 /* Now check if the head of the ccw queue needs to be started. */
1849 __dasd_block_start_head(block);
1850 spin_unlock(&block->queue_lock);
1851 spin_unlock_irq(&block->request_queue_lock);
1852 dasd_put_device(block->base);
1853}
1854
1855static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
1856{
1857 wake_up(&dasd_flush_wq);
1858}
1859
1860/*
1861 * Go through all request on the dasd_block request queue, cancel them
1862 * on the respective dasd_device, and return them to the generic
1863 * block layer.
1864 */
1865static int dasd_flush_block_queue(struct dasd_block *block)
1866{
1867 struct dasd_ccw_req *cqr, *n;
1868 int rc, i;
1869 struct list_head flush_queue;
1870
1871 INIT_LIST_HEAD(&flush_queue);
1872 spin_lock_bh(&block->queue_lock);
1873 rc = 0;
1874restart:
1875 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
1876 /* if this request currently owned by a dasd_device cancel it */
1877 if (cqr->status >= DASD_CQR_QUEUED)
1878 rc = dasd_cancel_req(cqr);
1879 if (rc < 0)
1880 break;
1881 /* Rechain request (including erp chain) so it won't be
1882 * touched by the dasd_block_tasklet anymore.
1883 * Replace the callback so we notice when the request
1884 * is returned from the dasd_device layer.
1885 */
1886 cqr->callback = _dasd_wake_block_flush_cb;
1887 for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
1888 list_move_tail(&cqr->blocklist, &flush_queue);
1889 if (i > 1)
1890 /* moved more than one request - need to restart */
1891 goto restart;
1892 }
1893 spin_unlock_bh(&block->queue_lock);
1894 /* Now call the callback function of flushed requests */
1895restart_cb:
1896 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
1897 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
1898 /* Process finished ERP request. */
1899 if (cqr->refers) {
1900 __dasd_block_process_erp(block, cqr);
1901 /* restart list_for_xx loop since dasd_process_erp
1902 * might remove multiple elements */
1903 goto restart_cb;
1904 }
1905 /* call the callback function */
1906 cqr->endclk = get_clock();
1907 list_del_init(&cqr->blocklist);
1908 __dasd_cleanup_cqr(cqr);
1729 } 1909 }
1730 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1731 dasd_schedule_bh(device);
1732 return rc; 1910 return rc;
1733} 1911}
1734 1912
1735/* 1913/*
1736 * SECTION: Block device operations (request queue, partitions, open, release). 1914 * Schedules a call to dasd_tasklet over the device tasklet.
1915 */
1916void dasd_schedule_block_bh(struct dasd_block *block)
1917{
1918 /* Protect against rescheduling. */
1919 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
1920 return;
1921 /* life cycle of block is bound to it's base device */
1922 dasd_get_device(block->base);
1923 tasklet_hi_schedule(&block->tasklet);
1924}
1925
1926
1927/*
1928 * SECTION: external block device operations
1929 * (request queue handling, open, release, etc.)
1737 */ 1930 */
1738 1931
1739/* 1932/*
1740 * Dasd request queue function. Called from ll_rw_blk.c 1933 * Dasd request queue function. Called from ll_rw_blk.c
1741 */ 1934 */
1742static void 1935static void do_dasd_request(struct request_queue *queue)
1743do_dasd_request(struct request_queue * queue)
1744{ 1936{
1745 struct dasd_device *device; 1937 struct dasd_block *block;
1746 1938
1747 device = (struct dasd_device *) queue->queuedata; 1939 block = queue->queuedata;
1748 spin_lock(get_ccwdev_lock(device->cdev)); 1940 spin_lock(&block->queue_lock);
1749 /* Get new request from the block device request queue */ 1941 /* Get new request from the block device request queue */
1750 __dasd_process_blk_queue(device); 1942 __dasd_process_request_queue(block);
1751 /* Now check if the head of the ccw queue needs to be started. */ 1943 /* Now check if the head of the ccw queue needs to be started. */
1752 __dasd_start_head(device); 1944 __dasd_block_start_head(block);
1753 spin_unlock(get_ccwdev_lock(device->cdev)); 1945 spin_unlock(&block->queue_lock);
1754} 1946}
1755 1947
1756/* 1948/*
1757 * Allocate and initialize request queue and default I/O scheduler. 1949 * Allocate and initialize request queue and default I/O scheduler.
1758 */ 1950 */
1759static int 1951static int dasd_alloc_queue(struct dasd_block *block)
1760dasd_alloc_queue(struct dasd_device * device)
1761{ 1952{
1762 int rc; 1953 int rc;
1763 1954
1764 device->request_queue = blk_init_queue(do_dasd_request, 1955 block->request_queue = blk_init_queue(do_dasd_request,
1765 &device->request_queue_lock); 1956 &block->request_queue_lock);
1766 if (device->request_queue == NULL) 1957 if (block->request_queue == NULL)
1767 return -ENOMEM; 1958 return -ENOMEM;
1768 1959
1769 device->request_queue->queuedata = device; 1960 block->request_queue->queuedata = block;
1770 1961
1771 elevator_exit(device->request_queue->elevator); 1962 elevator_exit(block->request_queue->elevator);
1772 rc = elevator_init(device->request_queue, "deadline"); 1963 rc = elevator_init(block->request_queue, "deadline");
1773 if (rc) { 1964 if (rc) {
1774 blk_cleanup_queue(device->request_queue); 1965 blk_cleanup_queue(block->request_queue);
1775 return rc; 1966 return rc;
1776 } 1967 }
1777 return 0; 1968 return 0;
@@ -1780,79 +1971,76 @@ dasd_alloc_queue(struct dasd_device * device)
1780/* 1971/*
1781 * Allocate and initialize request queue. 1972 * Allocate and initialize request queue.
1782 */ 1973 */
1783static void 1974static void dasd_setup_queue(struct dasd_block *block)
1784dasd_setup_queue(struct dasd_device * device)
1785{ 1975{
1786 int max; 1976 int max;
1787 1977
1788 blk_queue_hardsect_size(device->request_queue, device->bp_block); 1978 blk_queue_hardsect_size(block->request_queue, block->bp_block);
1789 max = device->discipline->max_blocks << device->s2b_shift; 1979 max = block->base->discipline->max_blocks << block->s2b_shift;
1790 blk_queue_max_sectors(device->request_queue, max); 1980 blk_queue_max_sectors(block->request_queue, max);
1791 blk_queue_max_phys_segments(device->request_queue, -1L); 1981 blk_queue_max_phys_segments(block->request_queue, -1L);
1792 blk_queue_max_hw_segments(device->request_queue, -1L); 1982 blk_queue_max_hw_segments(block->request_queue, -1L);
1793 blk_queue_max_segment_size(device->request_queue, -1L); 1983 blk_queue_max_segment_size(block->request_queue, -1L);
1794 blk_queue_segment_boundary(device->request_queue, -1L); 1984 blk_queue_segment_boundary(block->request_queue, -1L);
1795 blk_queue_ordered(device->request_queue, QUEUE_ORDERED_TAG, NULL); 1985 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL);
1796} 1986}
1797 1987
1798/* 1988/*
1799 * Deactivate and free request queue. 1989 * Deactivate and free request queue.
1800 */ 1990 */
1801static void 1991static void dasd_free_queue(struct dasd_block *block)
1802dasd_free_queue(struct dasd_device * device)
1803{ 1992{
1804 if (device->request_queue) { 1993 if (block->request_queue) {
1805 blk_cleanup_queue(device->request_queue); 1994 blk_cleanup_queue(block->request_queue);
1806 device->request_queue = NULL; 1995 block->request_queue = NULL;
1807 } 1996 }
1808} 1997}
1809 1998
1810/* 1999/*
1811 * Flush request on the request queue. 2000 * Flush request on the request queue.
1812 */ 2001 */
1813static void 2002static void dasd_flush_request_queue(struct dasd_block *block)
1814dasd_flush_request_queue(struct dasd_device * device)
1815{ 2003{
1816 struct request *req; 2004 struct request *req;
1817 2005
1818 if (!device->request_queue) 2006 if (!block->request_queue)
1819 return; 2007 return;
1820 2008
1821 spin_lock_irq(&device->request_queue_lock); 2009 spin_lock_irq(&block->request_queue_lock);
1822 while ((req = elv_next_request(device->request_queue))) { 2010 while ((req = elv_next_request(block->request_queue))) {
1823 blkdev_dequeue_request(req); 2011 blkdev_dequeue_request(req);
1824 dasd_end_request(req, 0); 2012 dasd_end_request(req, 0);
1825 } 2013 }
1826 spin_unlock_irq(&device->request_queue_lock); 2014 spin_unlock_irq(&block->request_queue_lock);
1827} 2015}
1828 2016
1829static int 2017static int dasd_open(struct inode *inp, struct file *filp)
1830dasd_open(struct inode *inp, struct file *filp)
1831{ 2018{
1832 struct gendisk *disk = inp->i_bdev->bd_disk; 2019 struct gendisk *disk = inp->i_bdev->bd_disk;
1833 struct dasd_device *device = disk->private_data; 2020 struct dasd_block *block = disk->private_data;
2021 struct dasd_device *base = block->base;
1834 int rc; 2022 int rc;
1835 2023
1836 atomic_inc(&device->open_count); 2024 atomic_inc(&block->open_count);
1837 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2025 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
1838 rc = -ENODEV; 2026 rc = -ENODEV;
1839 goto unlock; 2027 goto unlock;
1840 } 2028 }
1841 2029
1842 if (!try_module_get(device->discipline->owner)) { 2030 if (!try_module_get(base->discipline->owner)) {
1843 rc = -EINVAL; 2031 rc = -EINVAL;
1844 goto unlock; 2032 goto unlock;
1845 } 2033 }
1846 2034
1847 if (dasd_probeonly) { 2035 if (dasd_probeonly) {
1848 DEV_MESSAGE(KERN_INFO, device, "%s", 2036 DEV_MESSAGE(KERN_INFO, base, "%s",
1849 "No access to device due to probeonly mode"); 2037 "No access to device due to probeonly mode");
1850 rc = -EPERM; 2038 rc = -EPERM;
1851 goto out; 2039 goto out;
1852 } 2040 }
1853 2041
1854 if (device->state <= DASD_STATE_BASIC) { 2042 if (base->state <= DASD_STATE_BASIC) {
1855 DBF_DEV_EVENT(DBF_ERR, device, " %s", 2043 DBF_DEV_EVENT(DBF_ERR, base, " %s",
1856 " Cannot open unrecognized device"); 2044 " Cannot open unrecognized device");
1857 rc = -ENODEV; 2045 rc = -ENODEV;
1858 goto out; 2046 goto out;
@@ -1861,41 +2049,41 @@ dasd_open(struct inode *inp, struct file *filp)
1861 return 0; 2049 return 0;
1862 2050
1863out: 2051out:
1864 module_put(device->discipline->owner); 2052 module_put(base->discipline->owner);
1865unlock: 2053unlock:
1866 atomic_dec(&device->open_count); 2054 atomic_dec(&block->open_count);
1867 return rc; 2055 return rc;
1868} 2056}
1869 2057
1870static int 2058static int dasd_release(struct inode *inp, struct file *filp)
1871dasd_release(struct inode *inp, struct file *filp)
1872{ 2059{
1873 struct gendisk *disk = inp->i_bdev->bd_disk; 2060 struct gendisk *disk = inp->i_bdev->bd_disk;
1874 struct dasd_device *device = disk->private_data; 2061 struct dasd_block *block = disk->private_data;
1875 2062
1876 atomic_dec(&device->open_count); 2063 atomic_dec(&block->open_count);
1877 module_put(device->discipline->owner); 2064 module_put(block->base->discipline->owner);
1878 return 0; 2065 return 0;
1879} 2066}
1880 2067
1881/* 2068/*
1882 * Return disk geometry. 2069 * Return disk geometry.
1883 */ 2070 */
1884static int 2071static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1885dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1886{ 2072{
1887 struct dasd_device *device; 2073 struct dasd_block *block;
2074 struct dasd_device *base;
1888 2075
1889 device = bdev->bd_disk->private_data; 2076 block = bdev->bd_disk->private_data;
1890 if (!device) 2077 base = block->base;
2078 if (!block)
1891 return -ENODEV; 2079 return -ENODEV;
1892 2080
1893 if (!device->discipline || 2081 if (!base->discipline ||
1894 !device->discipline->fill_geometry) 2082 !base->discipline->fill_geometry)
1895 return -EINVAL; 2083 return -EINVAL;
1896 2084
1897 device->discipline->fill_geometry(device, geo); 2085 base->discipline->fill_geometry(block, geo);
1898 geo->start = get_start_sect(bdev) >> device->s2b_shift; 2086 geo->start = get_start_sect(bdev) >> block->s2b_shift;
1899 return 0; 2087 return 0;
1900} 2088}
1901 2089
@@ -1909,6 +2097,9 @@ dasd_device_operations = {
1909 .getgeo = dasd_getgeo, 2097 .getgeo = dasd_getgeo,
1910}; 2098};
1911 2099
2100/*******************************************************************************
2101 * end of block device operations
2102 */
1912 2103
1913static void 2104static void
1914dasd_exit(void) 2105dasd_exit(void)
@@ -1937,9 +2128,8 @@ dasd_exit(void)
1937 * Initial attempt at a probe function. this can be simplified once 2128 * Initial attempt at a probe function. this can be simplified once
1938 * the other detection code is gone. 2129 * the other detection code is gone.
1939 */ 2130 */
1940int 2131int dasd_generic_probe(struct ccw_device *cdev,
1941dasd_generic_probe (struct ccw_device *cdev, 2132 struct dasd_discipline *discipline)
1942 struct dasd_discipline *discipline)
1943{ 2133{
1944 int ret; 2134 int ret;
1945 2135
@@ -1969,19 +2159,20 @@ dasd_generic_probe (struct ccw_device *cdev,
1969 ret = ccw_device_set_online(cdev); 2159 ret = ccw_device_set_online(cdev);
1970 if (ret) 2160 if (ret)
1971 printk(KERN_WARNING 2161 printk(KERN_WARNING
1972 "dasd_generic_probe: could not initially online " 2162 "dasd_generic_probe: could not initially "
1973 "ccw-device %s\n", cdev->dev.bus_id); 2163 "online ccw-device %s; return code: %d\n",
1974 return ret; 2164 cdev->dev.bus_id, ret);
2165 return 0;
1975} 2166}
1976 2167
1977/* 2168/*
1978 * This will one day be called from a global not_oper handler. 2169 * This will one day be called from a global not_oper handler.
1979 * It is also used by driver_unregister during module unload. 2170 * It is also used by driver_unregister during module unload.
1980 */ 2171 */
1981void 2172void dasd_generic_remove(struct ccw_device *cdev)
1982dasd_generic_remove (struct ccw_device *cdev)
1983{ 2173{
1984 struct dasd_device *device; 2174 struct dasd_device *device;
2175 struct dasd_block *block;
1985 2176
1986 cdev->handler = NULL; 2177 cdev->handler = NULL;
1987 2178
@@ -2001,7 +2192,15 @@ dasd_generic_remove (struct ccw_device *cdev)
2001 */ 2192 */
2002 dasd_set_target_state(device, DASD_STATE_NEW); 2193 dasd_set_target_state(device, DASD_STATE_NEW);
2003 /* dasd_delete_device destroys the device reference. */ 2194 /* dasd_delete_device destroys the device reference. */
2195 block = device->block;
2196 device->block = NULL;
2004 dasd_delete_device(device); 2197 dasd_delete_device(device);
2198 /*
2199 * life cycle of block is bound to device, so delete it after
2200 * device was safely removed
2201 */
2202 if (block)
2203 dasd_free_block(block);
2005} 2204}
2006 2205
2007/* 2206/*
@@ -2009,10 +2208,8 @@ dasd_generic_remove (struct ccw_device *cdev)
2009 * the device is detected for the first time and is supposed to be used 2208 * the device is detected for the first time and is supposed to be used
2010 * or the user has started activation through sysfs. 2209 * or the user has started activation through sysfs.
2011 */ 2210 */
2012int 2211int dasd_generic_set_online(struct ccw_device *cdev,
2013dasd_generic_set_online (struct ccw_device *cdev, 2212 struct dasd_discipline *base_discipline)
2014 struct dasd_discipline *base_discipline)
2015
2016{ 2213{
2017 struct dasd_discipline *discipline; 2214 struct dasd_discipline *discipline;
2018 struct dasd_device *device; 2215 struct dasd_device *device;
@@ -2048,6 +2245,7 @@ dasd_generic_set_online (struct ccw_device *cdev,
2048 device->base_discipline = base_discipline; 2245 device->base_discipline = base_discipline;
2049 device->discipline = discipline; 2246 device->discipline = discipline;
2050 2247
2248 /* check_device will allocate block device if necessary */
2051 rc = discipline->check_device(device); 2249 rc = discipline->check_device(device);
2052 if (rc) { 2250 if (rc) {
2053 printk (KERN_WARNING 2251 printk (KERN_WARNING
@@ -2067,6 +2265,8 @@ dasd_generic_set_online (struct ccw_device *cdev,
2067 cdev->dev.bus_id); 2265 cdev->dev.bus_id);
2068 rc = -ENODEV; 2266 rc = -ENODEV;
2069 dasd_set_target_state(device, DASD_STATE_NEW); 2267 dasd_set_target_state(device, DASD_STATE_NEW);
2268 if (device->block)
2269 dasd_free_block(device->block);
2070 dasd_delete_device(device); 2270 dasd_delete_device(device);
2071 } else 2271 } else
2072 pr_debug("dasd_generic device %s found\n", 2272 pr_debug("dasd_generic device %s found\n",
@@ -2081,10 +2281,10 @@ dasd_generic_set_online (struct ccw_device *cdev,
2081 return rc; 2281 return rc;
2082} 2282}
2083 2283
2084int 2284int dasd_generic_set_offline(struct ccw_device *cdev)
2085dasd_generic_set_offline (struct ccw_device *cdev)
2086{ 2285{
2087 struct dasd_device *device; 2286 struct dasd_device *device;
2287 struct dasd_block *block;
2088 int max_count, open_count; 2288 int max_count, open_count;
2089 2289
2090 device = dasd_device_from_cdev(cdev); 2290 device = dasd_device_from_cdev(cdev);
@@ -2101,30 +2301,39 @@ dasd_generic_set_offline (struct ccw_device *cdev)
2101 * the blkdev_get in dasd_scan_partitions. We are only interested 2301 * the blkdev_get in dasd_scan_partitions. We are only interested
2102 * in the other openers. 2302 * in the other openers.
2103 */ 2303 */
2104 max_count = device->bdev ? 0 : -1; 2304 if (device->block) {
2105 open_count = (int) atomic_read(&device->open_count); 2305 struct dasd_block *block = device->block;
2106 if (open_count > max_count) { 2306 max_count = block->bdev ? 0 : -1;
2107 if (open_count > 0) 2307 open_count = (int) atomic_read(&block->open_count);
2108 printk (KERN_WARNING "Can't offline dasd device with " 2308 if (open_count > max_count) {
2109 "open count = %i.\n", 2309 if (open_count > 0)
2110 open_count); 2310 printk(KERN_WARNING "Can't offline dasd "
2111 else 2311 "device with open count = %i.\n",
2112 printk (KERN_WARNING "%s", 2312 open_count);
2113 "Can't offline dasd device due to internal " 2313 else
2114 "use\n"); 2314 printk(KERN_WARNING "%s",
2115 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 2315 "Can't offline dasd device due "
2116 dasd_put_device(device); 2316 "to internal use\n");
2117 return -EBUSY; 2317 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
2318 dasd_put_device(device);
2319 return -EBUSY;
2320 }
2118 } 2321 }
2119 dasd_set_target_state(device, DASD_STATE_NEW); 2322 dasd_set_target_state(device, DASD_STATE_NEW);
2120 /* dasd_delete_device destroys the device reference. */ 2323 /* dasd_delete_device destroys the device reference. */
2324 block = device->block;
2325 device->block = NULL;
2121 dasd_delete_device(device); 2326 dasd_delete_device(device);
2122 2327 /*
2328 * life cycle of block is bound to device, so delete it after
2329 * device was safely removed
2330 */
2331 if (block)
2332 dasd_free_block(block);
2123 return 0; 2333 return 0;
2124} 2334}
2125 2335
2126int 2336int dasd_generic_notify(struct ccw_device *cdev, int event)
2127dasd_generic_notify(struct ccw_device *cdev, int event)
2128{ 2337{
2129 struct dasd_device *device; 2338 struct dasd_device *device;
2130 struct dasd_ccw_req *cqr; 2339 struct dasd_ccw_req *cqr;
@@ -2145,27 +2354,22 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
2145 if (device->state < DASD_STATE_BASIC) 2354 if (device->state < DASD_STATE_BASIC)
2146 break; 2355 break;
2147 /* Device is active. We want to keep it. */ 2356 /* Device is active. We want to keep it. */
2148 if (test_bit(DASD_FLAG_DSC_ERROR, &device->flags)) { 2357 list_for_each_entry(cqr, &device->ccw_queue, devlist)
2149 list_for_each_entry(cqr, &device->ccw_queue, list) 2358 if (cqr->status == DASD_CQR_IN_IO) {
2150 if (cqr->status == DASD_CQR_IN_IO) 2359 cqr->status = DASD_CQR_QUEUED;
2151 cqr->status = DASD_CQR_FAILED; 2360 cqr->retries++;
2152 device->stopped |= DASD_STOPPED_DC_EIO; 2361 }
2153 } else { 2362 device->stopped |= DASD_STOPPED_DC_WAIT;
2154 list_for_each_entry(cqr, &device->ccw_queue, list) 2363 dasd_device_clear_timer(device);
2155 if (cqr->status == DASD_CQR_IN_IO) { 2364 dasd_schedule_device_bh(device);
2156 cqr->status = DASD_CQR_QUEUED;
2157 cqr->retries++;
2158 }
2159 device->stopped |= DASD_STOPPED_DC_WAIT;
2160 dasd_set_timer(device, 0);
2161 }
2162 dasd_schedule_bh(device);
2163 ret = 1; 2365 ret = 1;
2164 break; 2366 break;
2165 case CIO_OPER: 2367 case CIO_OPER:
2166 /* FIXME: add a sanity check. */ 2368 /* FIXME: add a sanity check. */
2167 device->stopped &= ~(DASD_STOPPED_DC_WAIT|DASD_STOPPED_DC_EIO); 2369 device->stopped &= ~DASD_STOPPED_DC_WAIT;
2168 dasd_schedule_bh(device); 2370 dasd_schedule_device_bh(device);
2371 if (device->block)
2372 dasd_schedule_block_bh(device->block);
2169 ret = 1; 2373 ret = 1;
2170 break; 2374 break;
2171 } 2375 }
@@ -2195,7 +2399,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2195 ccw->cda = (__u32)(addr_t)rdc_buffer; 2399 ccw->cda = (__u32)(addr_t)rdc_buffer;
2196 ccw->count = rdc_buffer_size; 2400 ccw->count = rdc_buffer_size;
2197 2401
2198 cqr->device = device; 2402 cqr->startdev = device;
2403 cqr->memdev = device;
2199 cqr->expires = 10*HZ; 2404 cqr->expires = 10*HZ;
2200 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2405 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2201 cqr->retries = 2; 2406 cqr->retries = 2;
@@ -2217,13 +2422,12 @@ int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic,
2217 return PTR_ERR(cqr); 2422 return PTR_ERR(cqr);
2218 2423
2219 ret = dasd_sleep_on(cqr); 2424 ret = dasd_sleep_on(cqr);
2220 dasd_sfree_request(cqr, cqr->device); 2425 dasd_sfree_request(cqr, cqr->memdev);
2221 return ret; 2426 return ret;
2222} 2427}
2223EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 2428EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
2224 2429
2225static int __init 2430static int __init dasd_init(void)
2226dasd_init(void)
2227{ 2431{
2228 int rc; 2432 int rc;
2229 2433
@@ -2231,7 +2435,7 @@ dasd_init(void)
2231 init_waitqueue_head(&dasd_flush_wq); 2435 init_waitqueue_head(&dasd_flush_wq);
2232 2436
2233 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 2437 /* register 'common' DASD debug area, used for all DBF_XXX calls */
2234 dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long)); 2438 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
2235 if (dasd_debug_area == NULL) { 2439 if (dasd_debug_area == NULL) {
2236 rc = -ENOMEM; 2440 rc = -ENOMEM;
2237 goto failed; 2441 goto failed;
@@ -2277,15 +2481,18 @@ EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2277EXPORT_SYMBOL(dasd_add_request_head); 2481EXPORT_SYMBOL(dasd_add_request_head);
2278EXPORT_SYMBOL(dasd_add_request_tail); 2482EXPORT_SYMBOL(dasd_add_request_tail);
2279EXPORT_SYMBOL(dasd_cancel_req); 2483EXPORT_SYMBOL(dasd_cancel_req);
2280EXPORT_SYMBOL(dasd_clear_timer); 2484EXPORT_SYMBOL(dasd_device_clear_timer);
2485EXPORT_SYMBOL(dasd_block_clear_timer);
2281EXPORT_SYMBOL(dasd_enable_device); 2486EXPORT_SYMBOL(dasd_enable_device);
2282EXPORT_SYMBOL(dasd_int_handler); 2487EXPORT_SYMBOL(dasd_int_handler);
2283EXPORT_SYMBOL(dasd_kfree_request); 2488EXPORT_SYMBOL(dasd_kfree_request);
2284EXPORT_SYMBOL(dasd_kick_device); 2489EXPORT_SYMBOL(dasd_kick_device);
2285EXPORT_SYMBOL(dasd_kmalloc_request); 2490EXPORT_SYMBOL(dasd_kmalloc_request);
2286EXPORT_SYMBOL(dasd_schedule_bh); 2491EXPORT_SYMBOL(dasd_schedule_device_bh);
2492EXPORT_SYMBOL(dasd_schedule_block_bh);
2287EXPORT_SYMBOL(dasd_set_target_state); 2493EXPORT_SYMBOL(dasd_set_target_state);
2288EXPORT_SYMBOL(dasd_set_timer); 2494EXPORT_SYMBOL(dasd_device_set_timer);
2495EXPORT_SYMBOL(dasd_block_set_timer);
2289EXPORT_SYMBOL(dasd_sfree_request); 2496EXPORT_SYMBOL(dasd_sfree_request);
2290EXPORT_SYMBOL(dasd_sleep_on); 2497EXPORT_SYMBOL(dasd_sleep_on);
2291EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2498EXPORT_SYMBOL(dasd_sleep_on_immediatly);
@@ -2299,4 +2506,7 @@ EXPORT_SYMBOL_GPL(dasd_generic_remove);
2299EXPORT_SYMBOL_GPL(dasd_generic_notify); 2506EXPORT_SYMBOL_GPL(dasd_generic_notify);
2300EXPORT_SYMBOL_GPL(dasd_generic_set_online); 2507EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2301EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 2508EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
2302 2509EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
2510EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2511EXPORT_SYMBOL_GPL(dasd_alloc_block);
2512EXPORT_SYMBOL_GPL(dasd_free_block);
diff --git a/drivers/s390/block/dasd_3370_erp.c b/drivers/s390/block/dasd_3370_erp.c
deleted file mode 100644
index 1ddab8991d92..000000000000
--- a/drivers/s390/block/dasd_3370_erp.c
+++ /dev/null
@@ -1,84 +0,0 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_3370_erp.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
6 *
7 */
8
9#define PRINTK_HEADER "dasd_erp(3370)"
10
11#include "dasd_int.h"
12
13
14/*
15 * DASD_3370_ERP_EXAMINE
16 *
17 * DESCRIPTION
18 * Checks only for fatal/no/recover error.
19 * A detailed examination of the sense data is done later outside
20 * the interrupt handler.
21 *
22 * The logic is based on the 'IBM 3880 Storage Control Reference' manual
23 * 'Chapter 7. 3370 Sense Data'.
24 *
25 * RETURN VALUES
26 * dasd_era_none no error
27 * dasd_era_fatal for all fatal (unrecoverable errors)
28 * dasd_era_recover for all others.
29 */
30dasd_era_t
31dasd_3370_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
32{
33 char *sense = irb->ecw;
34
35 /* check for successful execution first */
36 if (irb->scsw.cstat == 0x00 &&
37 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
38 return dasd_era_none;
39 if (sense[0] & 0x80) { /* CMD reject */
40 return dasd_era_fatal;
41 }
42 if (sense[0] & 0x40) { /* Drive offline */
43 return dasd_era_recover;
44 }
45 if (sense[0] & 0x20) { /* Bus out parity */
46 return dasd_era_recover;
47 }
48 if (sense[0] & 0x10) { /* equipment check */
49 if (sense[1] & 0x80) {
50 return dasd_era_fatal;
51 }
52 return dasd_era_recover;
53 }
54 if (sense[0] & 0x08) { /* data check */
55 if (sense[1] & 0x80) {
56 return dasd_era_fatal;
57 }
58 return dasd_era_recover;
59 }
60 if (sense[0] & 0x04) { /* overrun */
61 if (sense[1] & 0x80) {
62 return dasd_era_fatal;
63 }
64 return dasd_era_recover;
65 }
66 if (sense[1] & 0x40) { /* invalid blocksize */
67 return dasd_era_fatal;
68 }
69 if (sense[1] & 0x04) { /* file protected */
70 return dasd_era_recover;
71 }
72 if (sense[1] & 0x01) { /* operation incomplete */
73 return dasd_era_recover;
74 }
75 if (sense[2] & 0x80) { /* check data erroor */
76 return dasd_era_recover;
77 }
78 if (sense[2] & 0x10) { /* Env. data present */
79 return dasd_era_recover;
80 }
81 /* examine the 24 byte sense data */
82 return dasd_era_recover;
83
84} /* END dasd_3370_erp_examine */
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 5b7385e430ea..c361ab69ec00 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -26,158 +26,6 @@ struct DCTL_data {
26 26
27/* 27/*
28 ***************************************************************************** 28 *****************************************************************************
29 * SECTION ERP EXAMINATION
30 *****************************************************************************
31 */
32
33/*
34 * DASD_3990_ERP_EXAMINE_24
35 *
36 * DESCRIPTION
37 * Checks only for fatal (unrecoverable) error.
38 * A detailed examination of the sense data is done later outside
39 * the interrupt handler.
40 *
41 * Each bit configuration leading to an action code 2 (Exit with
42 * programming error or unusual condition indication)
43 * are handled as fatal errors.
44 *
45 * All other configurations are handled as recoverable errors.
46 *
47 * RETURN VALUES
48 * dasd_era_fatal for all fatal (unrecoverable errors)
49 * dasd_era_recover for all others.
50 */
51static dasd_era_t
52dasd_3990_erp_examine_24(struct dasd_ccw_req * cqr, char *sense)
53{
54
55 struct dasd_device *device = cqr->device;
56
57 /* check for 'Command Reject' */
58 if ((sense[0] & SNS0_CMD_REJECT) &&
59 (!(sense[2] & SNS2_ENV_DATA_PRESENT))) {
60
61 DEV_MESSAGE(KERN_ERR, device, "%s",
62 "EXAMINE 24: Command Reject detected - "
63 "fatal error");
64
65 return dasd_era_fatal;
66 }
67
68 /* check for 'Invalid Track Format' */
69 if ((sense[1] & SNS1_INV_TRACK_FORMAT) &&
70 (!(sense[2] & SNS2_ENV_DATA_PRESENT))) {
71
72 DEV_MESSAGE(KERN_ERR, device, "%s",
73 "EXAMINE 24: Invalid Track Format detected "
74 "- fatal error");
75
76 return dasd_era_fatal;
77 }
78
79 /* check for 'No Record Found' */
80 if (sense[1] & SNS1_NO_REC_FOUND) {
81
82 /* FIXME: fatal error ?!? */
83 DEV_MESSAGE(KERN_ERR, device,
84 "EXAMINE 24: No Record Found detected %s",
85 device->state <= DASD_STATE_BASIC ?
86 " " : "- fatal error");
87
88 return dasd_era_fatal;
89 }
90
91 /* return recoverable for all others */
92 return dasd_era_recover;
93} /* END dasd_3990_erp_examine_24 */
94
95/*
96 * DASD_3990_ERP_EXAMINE_32
97 *
98 * DESCRIPTION
99 * Checks only for fatal/no/recoverable error.
100 * A detailed examination of the sense data is done later outside
101 * the interrupt handler.
102 *
103 * RETURN VALUES
104 * dasd_era_none no error
105 * dasd_era_fatal for all fatal (unrecoverable errors)
106 * dasd_era_recover for recoverable others.
107 */
108static dasd_era_t
109dasd_3990_erp_examine_32(struct dasd_ccw_req * cqr, char *sense)
110{
111
112 struct dasd_device *device = cqr->device;
113
114 switch (sense[25]) {
115 case 0x00:
116 return dasd_era_none;
117
118 case 0x01:
119 DEV_MESSAGE(KERN_ERR, device, "%s", "EXAMINE 32: fatal error");
120
121 return dasd_era_fatal;
122
123 default:
124
125 return dasd_era_recover;
126 }
127
128} /* end dasd_3990_erp_examine_32 */
129
130/*
131 * DASD_3990_ERP_EXAMINE
132 *
133 * DESCRIPTION
134 * Checks only for fatal/no/recover error.
135 * A detailed examination of the sense data is done later outside
136 * the interrupt handler.
137 *
138 * The logic is based on the 'IBM 3990 Storage Control Reference' manual
139 * 'Chapter 7. Error Recovery Procedures'.
140 *
141 * RETURN VALUES
142 * dasd_era_none no error
143 * dasd_era_fatal for all fatal (unrecoverable errors)
144 * dasd_era_recover for all others.
145 */
146dasd_era_t
147dasd_3990_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
148{
149
150 char *sense = irb->ecw;
151 dasd_era_t era = dasd_era_recover;
152 struct dasd_device *device = cqr->device;
153
154 /* check for successful execution first */
155 if (irb->scsw.cstat == 0x00 &&
156 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
157 return dasd_era_none;
158
159 /* distinguish between 24 and 32 byte sense data */
160 if (sense[27] & DASD_SENSE_BIT_0) {
161
162 era = dasd_3990_erp_examine_24(cqr, sense);
163
164 } else {
165
166 era = dasd_3990_erp_examine_32(cqr, sense);
167
168 }
169
170 /* log the erp chain if fatal error occurred */
171 if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) {
172 dasd_log_sense(cqr, irb);
173 }
174
175 return era;
176
177} /* END dasd_3990_erp_examine */
178
179/*
180 *****************************************************************************
181 * SECTION ERP HANDLING 29 * SECTION ERP HANDLING
182 ***************************************************************************** 30 *****************************************************************************
183 */ 31 */
@@ -206,7 +54,7 @@ dasd_3990_erp_cleanup(struct dasd_ccw_req * erp, char final_status)
206{ 54{
207 struct dasd_ccw_req *cqr = erp->refers; 55 struct dasd_ccw_req *cqr = erp->refers;
208 56
209 dasd_free_erp_request(erp, erp->device); 57 dasd_free_erp_request(erp, erp->memdev);
210 cqr->status = final_status; 58 cqr->status = final_status;
211 return cqr; 59 return cqr;
212 60
@@ -224,15 +72,17 @@ static void
224dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires) 72dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires)
225{ 73{
226 74
227 struct dasd_device *device = erp->device; 75 struct dasd_device *device = erp->startdev;
76 unsigned long flags;
228 77
229 DEV_MESSAGE(KERN_INFO, device, 78 DEV_MESSAGE(KERN_INFO, device,
230 "blocking request queue for %is", expires/HZ); 79 "blocking request queue for %is", expires/HZ);
231 80
81 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
232 device->stopped |= DASD_STOPPED_PENDING; 82 device->stopped |= DASD_STOPPED_PENDING;
233 erp->status = DASD_CQR_QUEUED; 83 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
234 84 erp->status = DASD_CQR_FILLED;
235 dasd_set_timer(device, expires); 85 dasd_block_set_timer(device->block, expires);
236} 86}
237 87
238/* 88/*
@@ -251,7 +101,7 @@ static struct dasd_ccw_req *
251dasd_3990_erp_int_req(struct dasd_ccw_req * erp) 101dasd_3990_erp_int_req(struct dasd_ccw_req * erp)
252{ 102{
253 103
254 struct dasd_device *device = erp->device; 104 struct dasd_device *device = erp->startdev;
255 105
256 /* first time set initial retry counter and erp_function */ 106 /* first time set initial retry counter and erp_function */
257 /* and retry once without blocking queue */ 107 /* and retry once without blocking queue */
@@ -292,11 +142,14 @@ dasd_3990_erp_int_req(struct dasd_ccw_req * erp)
292static void 142static void
293dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp) 143dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
294{ 144{
295 struct dasd_device *device = erp->device; 145 struct dasd_device *device = erp->startdev;
296 __u8 opm; 146 __u8 opm;
147 unsigned long flags;
297 148
298 /* try alternate valid path */ 149 /* try alternate valid path */
150 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
299 opm = ccw_device_get_path_mask(device->cdev); 151 opm = ccw_device_get_path_mask(device->cdev);
152 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
300 //FIXME: start with get_opm ? 153 //FIXME: start with get_opm ?
301 if (erp->lpm == 0) 154 if (erp->lpm == 0)
302 erp->lpm = LPM_ANYPATH & ~(erp->irb.esw.esw0.sublog.lpum); 155 erp->lpm = LPM_ANYPATH & ~(erp->irb.esw.esw0.sublog.lpum);
@@ -309,9 +162,8 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
309 "try alternate lpm=%x (lpum=%x / opm=%x)", 162 "try alternate lpm=%x (lpum=%x / opm=%x)",
310 erp->lpm, erp->irb.esw.esw0.sublog.lpum, opm); 163 erp->lpm, erp->irb.esw.esw0.sublog.lpum, opm);
311 164
312 /* reset status to queued to handle the request again... */ 165 /* reset status to submit the request again... */
313 if (erp->status > DASD_CQR_QUEUED) 166 erp->status = DASD_CQR_FILLED;
314 erp->status = DASD_CQR_QUEUED;
315 erp->retries = 1; 167 erp->retries = 1;
316 } else { 168 } else {
317 DEV_MESSAGE(KERN_ERR, device, 169 DEV_MESSAGE(KERN_ERR, device,
@@ -320,8 +172,7 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
320 erp->irb.esw.esw0.sublog.lpum, opm); 172 erp->irb.esw.esw0.sublog.lpum, opm);
321 173
322 /* post request with permanent error */ 174 /* post request with permanent error */
323 if (erp->status > DASD_CQR_QUEUED) 175 erp->status = DASD_CQR_FAILED;
324 erp->status = DASD_CQR_FAILED;
325 } 176 }
326} /* end dasd_3990_erp_alternate_path */ 177} /* end dasd_3990_erp_alternate_path */
327 178
@@ -344,14 +195,14 @@ static struct dasd_ccw_req *
344dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier) 195dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
345{ 196{
346 197
347 struct dasd_device *device = erp->device; 198 struct dasd_device *device = erp->startdev;
348 struct DCTL_data *DCTL_data; 199 struct DCTL_data *DCTL_data;
349 struct ccw1 *ccw; 200 struct ccw1 *ccw;
350 struct dasd_ccw_req *dctl_cqr; 201 struct dasd_ccw_req *dctl_cqr;
351 202
352 dctl_cqr = dasd_alloc_erp_request((char *) &erp->magic, 1, 203 dctl_cqr = dasd_alloc_erp_request((char *) &erp->magic, 1,
353 sizeof (struct DCTL_data), 204 sizeof(struct DCTL_data),
354 erp->device); 205 device);
355 if (IS_ERR(dctl_cqr)) { 206 if (IS_ERR(dctl_cqr)) {
356 DEV_MESSAGE(KERN_ERR, device, "%s", 207 DEV_MESSAGE(KERN_ERR, device, "%s",
357 "Unable to allocate DCTL-CQR"); 208 "Unable to allocate DCTL-CQR");
@@ -365,13 +216,14 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
365 DCTL_data->modifier = modifier; 216 DCTL_data->modifier = modifier;
366 217
367 ccw = dctl_cqr->cpaddr; 218 ccw = dctl_cqr->cpaddr;
368 memset(ccw, 0, sizeof (struct ccw1)); 219 memset(ccw, 0, sizeof(struct ccw1));
369 ccw->cmd_code = CCW_CMD_DCTL; 220 ccw->cmd_code = CCW_CMD_DCTL;
370 ccw->count = 4; 221 ccw->count = 4;
371 ccw->cda = (__u32)(addr_t) DCTL_data; 222 ccw->cda = (__u32)(addr_t) DCTL_data;
372 dctl_cqr->function = dasd_3990_erp_DCTL; 223 dctl_cqr->function = dasd_3990_erp_DCTL;
373 dctl_cqr->refers = erp; 224 dctl_cqr->refers = erp;
374 dctl_cqr->device = erp->device; 225 dctl_cqr->startdev = device;
226 dctl_cqr->memdev = device;
375 dctl_cqr->magic = erp->magic; 227 dctl_cqr->magic = erp->magic;
376 dctl_cqr->expires = 5 * 60 * HZ; 228 dctl_cqr->expires = 5 * 60 * HZ;
377 dctl_cqr->retries = 2; 229 dctl_cqr->retries = 2;
@@ -435,7 +287,7 @@ static struct dasd_ccw_req *
435dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense) 287dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
436{ 288{
437 289
438 struct dasd_device *device = erp->device; 290 struct dasd_device *device = erp->startdev;
439 291
440 /* first time set initial retry counter and erp_function */ 292 /* first time set initial retry counter and erp_function */
441 /* and retry once without waiting for state change pending */ 293 /* and retry once without waiting for state change pending */
@@ -472,7 +324,7 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
472 "redriving request immediately, " 324 "redriving request immediately, "
473 "%d retries left", 325 "%d retries left",
474 erp->retries); 326 erp->retries);
475 erp->status = DASD_CQR_QUEUED; 327 erp->status = DASD_CQR_FILLED;
476 } 328 }
477 } 329 }
478 330
@@ -530,7 +382,7 @@ static void
530dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense) 382dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
531{ 383{
532 384
533 struct dasd_device *device = erp->device; 385 struct dasd_device *device = erp->startdev;
534 char msg_format = (sense[7] & 0xF0); 386 char msg_format = (sense[7] & 0xF0);
535 char msg_no = (sense[7] & 0x0F); 387 char msg_no = (sense[7] & 0x0F);
536 388
@@ -1157,7 +1009,7 @@ static struct dasd_ccw_req *
1157dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense) 1009dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
1158{ 1010{
1159 1011
1160 struct dasd_device *device = erp->device; 1012 struct dasd_device *device = erp->startdev;
1161 1013
1162 erp->function = dasd_3990_erp_com_rej; 1014 erp->function = dasd_3990_erp_com_rej;
1163 1015
@@ -1198,7 +1050,7 @@ static struct dasd_ccw_req *
1198dasd_3990_erp_bus_out(struct dasd_ccw_req * erp) 1050dasd_3990_erp_bus_out(struct dasd_ccw_req * erp)
1199{ 1051{
1200 1052
1201 struct dasd_device *device = erp->device; 1053 struct dasd_device *device = erp->startdev;
1202 1054
1203 /* first time set initial retry counter and erp_function */ 1055 /* first time set initial retry counter and erp_function */
1204 /* and retry once without blocking queue */ 1056 /* and retry once without blocking queue */
@@ -1237,7 +1089,7 @@ static struct dasd_ccw_req *
1237dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense) 1089dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense)
1238{ 1090{
1239 1091
1240 struct dasd_device *device = erp->device; 1092 struct dasd_device *device = erp->startdev;
1241 1093
1242 erp->function = dasd_3990_erp_equip_check; 1094 erp->function = dasd_3990_erp_equip_check;
1243 1095
@@ -1279,7 +1131,6 @@ dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense)
1279 1131
1280 erp = dasd_3990_erp_action_5(erp); 1132 erp = dasd_3990_erp_action_5(erp);
1281 } 1133 }
1282
1283 return erp; 1134 return erp;
1284 1135
1285} /* end dasd_3990_erp_equip_check */ 1136} /* end dasd_3990_erp_equip_check */
@@ -1299,7 +1150,7 @@ static struct dasd_ccw_req *
1299dasd_3990_erp_data_check(struct dasd_ccw_req * erp, char *sense) 1150dasd_3990_erp_data_check(struct dasd_ccw_req * erp, char *sense)
1300{ 1151{
1301 1152
1302 struct dasd_device *device = erp->device; 1153 struct dasd_device *device = erp->startdev;
1303 1154
1304 erp->function = dasd_3990_erp_data_check; 1155 erp->function = dasd_3990_erp_data_check;
1305 1156
@@ -1358,7 +1209,7 @@ static struct dasd_ccw_req *
1358dasd_3990_erp_overrun(struct dasd_ccw_req * erp, char *sense) 1209dasd_3990_erp_overrun(struct dasd_ccw_req * erp, char *sense)
1359{ 1210{
1360 1211
1361 struct dasd_device *device = erp->device; 1212 struct dasd_device *device = erp->startdev;
1362 1213
1363 erp->function = dasd_3990_erp_overrun; 1214 erp->function = dasd_3990_erp_overrun;
1364 1215
@@ -1387,7 +1238,7 @@ static struct dasd_ccw_req *
1387dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense) 1238dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense)
1388{ 1239{
1389 1240
1390 struct dasd_device *device = erp->device; 1241 struct dasd_device *device = erp->startdev;
1391 1242
1392 erp->function = dasd_3990_erp_inv_format; 1243 erp->function = dasd_3990_erp_inv_format;
1393 1244
@@ -1403,8 +1254,7 @@ dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense)
1403 1254
1404 } else { 1255 } else {
1405 DEV_MESSAGE(KERN_ERR, device, "%s", 1256 DEV_MESSAGE(KERN_ERR, device, "%s",
1406 "Invalid Track Format - Fatal error should have " 1257 "Invalid Track Format - Fatal error");
1407 "been handled within the interrupt handler");
1408 1258
1409 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); 1259 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
1410 } 1260 }
@@ -1428,7 +1278,7 @@ static struct dasd_ccw_req *
1428dasd_3990_erp_EOC(struct dasd_ccw_req * default_erp, char *sense) 1278dasd_3990_erp_EOC(struct dasd_ccw_req * default_erp, char *sense)
1429{ 1279{
1430 1280
1431 struct dasd_device *device = default_erp->device; 1281 struct dasd_device *device = default_erp->startdev;
1432 1282
1433 DEV_MESSAGE(KERN_ERR, device, "%s", 1283 DEV_MESSAGE(KERN_ERR, device, "%s",
1434 "End-of-Cylinder - must never happen"); 1284 "End-of-Cylinder - must never happen");
@@ -1453,7 +1303,7 @@ static struct dasd_ccw_req *
1453dasd_3990_erp_env_data(struct dasd_ccw_req * erp, char *sense) 1303dasd_3990_erp_env_data(struct dasd_ccw_req * erp, char *sense)
1454{ 1304{
1455 1305
1456 struct dasd_device *device = erp->device; 1306 struct dasd_device *device = erp->startdev;
1457 1307
1458 erp->function = dasd_3990_erp_env_data; 1308 erp->function = dasd_3990_erp_env_data;
1459 1309
@@ -1463,11 +1313,9 @@ dasd_3990_erp_env_data(struct dasd_ccw_req * erp, char *sense)
1463 1313
1464 /* don't retry on disabled interface */ 1314 /* don't retry on disabled interface */
1465 if (sense[7] != 0x0F) { 1315 if (sense[7] != 0x0F) {
1466
1467 erp = dasd_3990_erp_action_4(erp, sense); 1316 erp = dasd_3990_erp_action_4(erp, sense);
1468 } else { 1317 } else {
1469 1318 erp->status = DASD_CQR_FILLED;
1470 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_IN_IO);
1471 } 1319 }
1472 1320
1473 return erp; 1321 return erp;
@@ -1490,11 +1338,10 @@ static struct dasd_ccw_req *
1490dasd_3990_erp_no_rec(struct dasd_ccw_req * default_erp, char *sense) 1338dasd_3990_erp_no_rec(struct dasd_ccw_req * default_erp, char *sense)
1491{ 1339{
1492 1340
1493 struct dasd_device *device = default_erp->device; 1341 struct dasd_device *device = default_erp->startdev;
1494 1342
1495 DEV_MESSAGE(KERN_ERR, device, "%s", 1343 DEV_MESSAGE(KERN_ERR, device, "%s",
1496 "No Record Found - Fatal error should " 1344 "No Record Found - Fatal error ");
1497 "have been handled within the interrupt handler");
1498 1345
1499 return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED); 1346 return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
1500 1347
@@ -1517,7 +1364,7 @@ static struct dasd_ccw_req *
1517dasd_3990_erp_file_prot(struct dasd_ccw_req * erp) 1364dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
1518{ 1365{
1519 1366
1520 struct dasd_device *device = erp->device; 1367 struct dasd_device *device = erp->startdev;
1521 1368
1522 DEV_MESSAGE(KERN_ERR, device, "%s", "File Protected"); 1369 DEV_MESSAGE(KERN_ERR, device, "%s", "File Protected");
1523 1370
@@ -1526,6 +1373,43 @@ dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
1526} /* end dasd_3990_erp_file_prot */ 1373} /* end dasd_3990_erp_file_prot */
1527 1374
1528/* 1375/*
1376 * DASD_3990_ERP_INSPECT_ALIAS
1377 *
1378 * DESCRIPTION
1379 * Checks if the original request was started on an alias device.
1380 * If yes, it modifies the original and the erp request so that
1381 * the erp request can be started on a base device.
1382 *
1383 * PARAMETER
1384 * erp pointer to the currently created default ERP
1385 *
1386 * RETURN VALUES
1387 * erp pointer to the modified ERP, or NULL
1388 */
1389
1390static struct dasd_ccw_req *dasd_3990_erp_inspect_alias(
1391 struct dasd_ccw_req *erp)
1392{
1393 struct dasd_ccw_req *cqr = erp->refers;
1394
1395 if (cqr->block &&
1396 (cqr->block->base != cqr->startdev)) {
1397 if (cqr->startdev->features & DASD_FEATURE_ERPLOG) {
1398 DEV_MESSAGE(KERN_ERR, cqr->startdev,
1399 "ERP on alias device for request %p,"
1400 " recover on base device %s", cqr,
1401 cqr->block->base->cdev->dev.bus_id);
1402 }
1403 dasd_eckd_reset_ccw_to_base_io(cqr);
1404 erp->startdev = cqr->block->base;
1405 erp->function = dasd_3990_erp_inspect_alias;
1406 return erp;
1407 } else
1408 return NULL;
1409}
1410
1411
1412/*
1529 * DASD_3990_ERP_INSPECT_24 1413 * DASD_3990_ERP_INSPECT_24
1530 * 1414 *
1531 * DESCRIPTION 1415 * DESCRIPTION
@@ -1623,7 +1507,7 @@ static struct dasd_ccw_req *
1623dasd_3990_erp_action_10_32(struct dasd_ccw_req * erp, char *sense) 1507dasd_3990_erp_action_10_32(struct dasd_ccw_req * erp, char *sense)
1624{ 1508{
1625 1509
1626 struct dasd_device *device = erp->device; 1510 struct dasd_device *device = erp->startdev;
1627 1511
1628 erp->retries = 256; 1512 erp->retries = 256;
1629 erp->function = dasd_3990_erp_action_10_32; 1513 erp->function = dasd_3990_erp_action_10_32;
@@ -1657,13 +1541,14 @@ static struct dasd_ccw_req *
1657dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense) 1541dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1658{ 1542{
1659 1543
1660 struct dasd_device *device = default_erp->device; 1544 struct dasd_device *device = default_erp->startdev;
1661 __u32 cpa = 0; 1545 __u32 cpa = 0;
1662 struct dasd_ccw_req *cqr; 1546 struct dasd_ccw_req *cqr;
1663 struct dasd_ccw_req *erp; 1547 struct dasd_ccw_req *erp;
1664 struct DE_eckd_data *DE_data; 1548 struct DE_eckd_data *DE_data;
1549 struct PFX_eckd_data *PFX_data;
1665 char *LO_data; /* LO_eckd_data_t */ 1550 char *LO_data; /* LO_eckd_data_t */
1666 struct ccw1 *ccw; 1551 struct ccw1 *ccw, *oldccw;
1667 1552
1668 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1553 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1669 "Write not finished because of unexpected condition"); 1554 "Write not finished because of unexpected condition");
@@ -1702,8 +1587,8 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1702 /* Build new ERP request including DE/LO */ 1587 /* Build new ERP request including DE/LO */
1703 erp = dasd_alloc_erp_request((char *) &cqr->magic, 1588 erp = dasd_alloc_erp_request((char *) &cqr->magic,
1704 2 + 1,/* DE/LO + TIC */ 1589 2 + 1,/* DE/LO + TIC */
1705 sizeof (struct DE_eckd_data) + 1590 sizeof(struct DE_eckd_data) +
1706 sizeof (struct LO_eckd_data), device); 1591 sizeof(struct LO_eckd_data), device);
1707 1592
1708 if (IS_ERR(erp)) { 1593 if (IS_ERR(erp)) {
1709 DEV_MESSAGE(KERN_ERR, device, "%s", "Unable to allocate ERP"); 1594 DEV_MESSAGE(KERN_ERR, device, "%s", "Unable to allocate ERP");
@@ -1712,10 +1597,16 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1712 1597
1713 /* use original DE */ 1598 /* use original DE */
1714 DE_data = erp->data; 1599 DE_data = erp->data;
1715 memcpy(DE_data, cqr->data, sizeof (struct DE_eckd_data)); 1600 oldccw = cqr->cpaddr;
1601 if (oldccw->cmd_code == DASD_ECKD_CCW_PFX) {
1602 PFX_data = cqr->data;
1603 memcpy(DE_data, &PFX_data->define_extend,
1604 sizeof(struct DE_eckd_data));
1605 } else
1606 memcpy(DE_data, cqr->data, sizeof(struct DE_eckd_data));
1716 1607
1717 /* create LO */ 1608 /* create LO */
1718 LO_data = erp->data + sizeof (struct DE_eckd_data); 1609 LO_data = erp->data + sizeof(struct DE_eckd_data);
1719 1610
1720 if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) { 1611 if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
1721 1612
@@ -1748,7 +1639,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1748 1639
1749 /* create DE ccw */ 1640 /* create DE ccw */
1750 ccw = erp->cpaddr; 1641 ccw = erp->cpaddr;
1751 memset(ccw, 0, sizeof (struct ccw1)); 1642 memset(ccw, 0, sizeof(struct ccw1));
1752 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT; 1643 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
1753 ccw->flags = CCW_FLAG_CC; 1644 ccw->flags = CCW_FLAG_CC;
1754 ccw->count = 16; 1645 ccw->count = 16;
@@ -1756,7 +1647,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1756 1647
1757 /* create LO ccw */ 1648 /* create LO ccw */
1758 ccw++; 1649 ccw++;
1759 memset(ccw, 0, sizeof (struct ccw1)); 1650 memset(ccw, 0, sizeof(struct ccw1));
1760 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD; 1651 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
1761 ccw->flags = CCW_FLAG_CC; 1652 ccw->flags = CCW_FLAG_CC;
1762 ccw->count = 16; 1653 ccw->count = 16;
@@ -1770,7 +1661,8 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1770 /* fill erp related fields */ 1661 /* fill erp related fields */
1771 erp->function = dasd_3990_erp_action_1B_32; 1662 erp->function = dasd_3990_erp_action_1B_32;
1772 erp->refers = default_erp->refers; 1663 erp->refers = default_erp->refers;
1773 erp->device = device; 1664 erp->startdev = device;
1665 erp->memdev = device;
1774 erp->magic = default_erp->magic; 1666 erp->magic = default_erp->magic;
1775 erp->expires = 0; 1667 erp->expires = 0;
1776 erp->retries = 256; 1668 erp->retries = 256;
@@ -1803,7 +1695,7 @@ static struct dasd_ccw_req *
1803dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense) 1695dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
1804{ 1696{
1805 1697
1806 struct dasd_device *device = previous_erp->device; 1698 struct dasd_device *device = previous_erp->startdev;
1807 __u32 cpa = 0; 1699 __u32 cpa = 0;
1808 struct dasd_ccw_req *cqr; 1700 struct dasd_ccw_req *cqr;
1809 struct dasd_ccw_req *erp; 1701 struct dasd_ccw_req *erp;
@@ -1827,7 +1719,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
1827 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1719 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1828 "Imprecise ending is set - just retry"); 1720 "Imprecise ending is set - just retry");
1829 1721
1830 previous_erp->status = DASD_CQR_QUEUED; 1722 previous_erp->status = DASD_CQR_FILLED;
1831 1723
1832 return previous_erp; 1724 return previous_erp;
1833 } 1725 }
@@ -1850,7 +1742,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
1850 erp = previous_erp; 1742 erp = previous_erp;
1851 1743
1852 /* update the LO with the new returned sense data */ 1744 /* update the LO with the new returned sense data */
1853 LO_data = erp->data + sizeof (struct DE_eckd_data); 1745 LO_data = erp->data + sizeof(struct DE_eckd_data);
1854 1746
1855 if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) { 1747 if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
1856 1748
@@ -1889,7 +1781,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
1889 ccw++; /* addr of TIC ccw */ 1781 ccw++; /* addr of TIC ccw */
1890 ccw->cda = cpa; 1782 ccw->cda = cpa;
1891 1783
1892 erp->status = DASD_CQR_QUEUED; 1784 erp->status = DASD_CQR_FILLED;
1893 1785
1894 return erp; 1786 return erp;
1895 1787
@@ -1968,9 +1860,7 @@ dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
1968 * try further actions. */ 1860 * try further actions. */
1969 1861
1970 erp->lpm = 0; 1862 erp->lpm = 0;
1971 1863 erp->status = DASD_CQR_NEED_ERP;
1972 erp->status = DASD_CQR_ERROR;
1973
1974 } 1864 }
1975 } 1865 }
1976 1866
@@ -2047,7 +1937,7 @@ dasd_3990_erp_compound_config(struct dasd_ccw_req * erp, char *sense)
2047 if ((sense[25] & DASD_SENSE_BIT_1) && (sense[26] & DASD_SENSE_BIT_2)) { 1937 if ((sense[25] & DASD_SENSE_BIT_1) && (sense[26] & DASD_SENSE_BIT_2)) {
2048 1938
2049 /* set to suspended duplex state then restart */ 1939 /* set to suspended duplex state then restart */
2050 struct dasd_device *device = erp->device; 1940 struct dasd_device *device = erp->startdev;
2051 1941
2052 DEV_MESSAGE(KERN_ERR, device, "%s", 1942 DEV_MESSAGE(KERN_ERR, device, "%s",
2053 "Set device to suspended duplex state should be " 1943 "Set device to suspended duplex state should be "
@@ -2081,28 +1971,26 @@ dasd_3990_erp_compound(struct dasd_ccw_req * erp, char *sense)
2081{ 1971{
2082 1972
2083 if ((erp->function == dasd_3990_erp_compound_retry) && 1973 if ((erp->function == dasd_3990_erp_compound_retry) &&
2084 (erp->status == DASD_CQR_ERROR)) { 1974 (erp->status == DASD_CQR_NEED_ERP)) {
2085 1975
2086 dasd_3990_erp_compound_path(erp, sense); 1976 dasd_3990_erp_compound_path(erp, sense);
2087 } 1977 }
2088 1978
2089 if ((erp->function == dasd_3990_erp_compound_path) && 1979 if ((erp->function == dasd_3990_erp_compound_path) &&
2090 (erp->status == DASD_CQR_ERROR)) { 1980 (erp->status == DASD_CQR_NEED_ERP)) {
2091 1981
2092 erp = dasd_3990_erp_compound_code(erp, sense); 1982 erp = dasd_3990_erp_compound_code(erp, sense);
2093 } 1983 }
2094 1984
2095 if ((erp->function == dasd_3990_erp_compound_code) && 1985 if ((erp->function == dasd_3990_erp_compound_code) &&
2096 (erp->status == DASD_CQR_ERROR)) { 1986 (erp->status == DASD_CQR_NEED_ERP)) {
2097 1987
2098 dasd_3990_erp_compound_config(erp, sense); 1988 dasd_3990_erp_compound_config(erp, sense);
2099 } 1989 }
2100 1990
2101 /* if no compound action ERP specified, the request failed */ 1991 /* if no compound action ERP specified, the request failed */
2102 if (erp->status == DASD_CQR_ERROR) { 1992 if (erp->status == DASD_CQR_NEED_ERP)
2103
2104 erp->status = DASD_CQR_FAILED; 1993 erp->status = DASD_CQR_FAILED;
2105 }
2106 1994
2107 return erp; 1995 return erp;
2108 1996
@@ -2127,7 +2015,7 @@ static struct dasd_ccw_req *
2127dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense) 2015dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
2128{ 2016{
2129 2017
2130 struct dasd_device *device = erp->device; 2018 struct dasd_device *device = erp->startdev;
2131 2019
2132 erp->function = dasd_3990_erp_inspect_32; 2020 erp->function = dasd_3990_erp_inspect_32;
2133 2021
@@ -2149,8 +2037,7 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
2149 2037
2150 case 0x01: /* fatal error */ 2038 case 0x01: /* fatal error */
2151 DEV_MESSAGE(KERN_ERR, device, "%s", 2039 DEV_MESSAGE(KERN_ERR, device, "%s",
2152 "Fatal error should have been " 2040 "Retry not recommended - Fatal error");
2153 "handled within the interrupt handler");
2154 2041
2155 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); 2042 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
2156 break; 2043 break;
@@ -2253,6 +2140,11 @@ dasd_3990_erp_inspect(struct dasd_ccw_req * erp)
2253 /* already set up new ERP ! */ 2140 /* already set up new ERP ! */
2254 char *sense = erp->refers->irb.ecw; 2141 char *sense = erp->refers->irb.ecw;
2255 2142
2143 /* if this problem occured on an alias retry on base */
2144 erp_new = dasd_3990_erp_inspect_alias(erp);
2145 if (erp_new)
2146 return erp_new;
2147
2256 /* distinguish between 24 and 32 byte sense data */ 2148 /* distinguish between 24 and 32 byte sense data */
2257 if (sense[27] & DASD_SENSE_BIT_0) { 2149 if (sense[27] & DASD_SENSE_BIT_0) {
2258 2150
@@ -2287,13 +2179,13 @@ static struct dasd_ccw_req *
2287dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr) 2179dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr)
2288{ 2180{
2289 2181
2290 struct dasd_device *device = cqr->device; 2182 struct dasd_device *device = cqr->startdev;
2291 struct ccw1 *ccw; 2183 struct ccw1 *ccw;
2292 2184
2293 /* allocate additional request block */ 2185 /* allocate additional request block */
2294 struct dasd_ccw_req *erp; 2186 struct dasd_ccw_req *erp;
2295 2187
2296 erp = dasd_alloc_erp_request((char *) &cqr->magic, 2, 0, cqr->device); 2188 erp = dasd_alloc_erp_request((char *) &cqr->magic, 2, 0, device);
2297 if (IS_ERR(erp)) { 2189 if (IS_ERR(erp)) {
2298 if (cqr->retries <= 0) { 2190 if (cqr->retries <= 0) {
2299 DEV_MESSAGE(KERN_ERR, device, "%s", 2191 DEV_MESSAGE(KERN_ERR, device, "%s",
@@ -2305,7 +2197,7 @@ dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr)
2305 "Unable to allocate ERP request " 2197 "Unable to allocate ERP request "
2306 "(%i retries left)", 2198 "(%i retries left)",
2307 cqr->retries); 2199 cqr->retries);
2308 dasd_set_timer(device, (HZ << 3)); 2200 dasd_block_set_timer(device->block, (HZ << 3));
2309 } 2201 }
2310 return cqr; 2202 return cqr;
2311 } 2203 }
@@ -2319,7 +2211,9 @@ dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr)
2319 ccw->cda = (long)(cqr->cpaddr); 2211 ccw->cda = (long)(cqr->cpaddr);
2320 erp->function = dasd_3990_erp_add_erp; 2212 erp->function = dasd_3990_erp_add_erp;
2321 erp->refers = cqr; 2213 erp->refers = cqr;
2322 erp->device = cqr->device; 2214 erp->startdev = device;
2215 erp->memdev = device;
2216 erp->block = cqr->block;
2323 erp->magic = cqr->magic; 2217 erp->magic = cqr->magic;
2324 erp->expires = 0; 2218 erp->expires = 0;
2325 erp->retries = 256; 2219 erp->retries = 256;
@@ -2466,7 +2360,7 @@ static struct dasd_ccw_req *
2466dasd_3990_erp_further_erp(struct dasd_ccw_req *erp) 2360dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
2467{ 2361{
2468 2362
2469 struct dasd_device *device = erp->device; 2363 struct dasd_device *device = erp->startdev;
2470 char *sense = erp->irb.ecw; 2364 char *sense = erp->irb.ecw;
2471 2365
2472 /* check for 24 byte sense ERP */ 2366 /* check for 24 byte sense ERP */
@@ -2557,7 +2451,7 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
2557 struct dasd_ccw_req *erp) 2451 struct dasd_ccw_req *erp)
2558{ 2452{
2559 2453
2560 struct dasd_device *device = erp_head->device; 2454 struct dasd_device *device = erp_head->startdev;
2561 struct dasd_ccw_req *erp_done = erp_head; /* finished req */ 2455 struct dasd_ccw_req *erp_done = erp_head; /* finished req */
2562 struct dasd_ccw_req *erp_free = NULL; /* req to be freed */ 2456 struct dasd_ccw_req *erp_free = NULL; /* req to be freed */
2563 2457
@@ -2569,13 +2463,13 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
2569 "original request was lost\n"); 2463 "original request was lost\n");
2570 2464
2571 /* remove the request from the device queue */ 2465 /* remove the request from the device queue */
2572 list_del(&erp_done->list); 2466 list_del(&erp_done->blocklist);
2573 2467
2574 erp_free = erp_done; 2468 erp_free = erp_done;
2575 erp_done = erp_done->refers; 2469 erp_done = erp_done->refers;
2576 2470
2577 /* free the finished erp request */ 2471 /* free the finished erp request */
2578 dasd_free_erp_request(erp_free, erp_free->device); 2472 dasd_free_erp_request(erp_free, erp_free->memdev);
2579 2473
2580 } /* end while */ 2474 } /* end while */
2581 2475
@@ -2603,7 +2497,7 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
2603 erp->retries, erp); 2497 erp->retries, erp);
2604 2498
2605 /* handle the request again... */ 2499 /* handle the request again... */
2606 erp->status = DASD_CQR_QUEUED; 2500 erp->status = DASD_CQR_FILLED;
2607 } 2501 }
2608 2502
2609 } else { 2503 } else {
@@ -2620,7 +2514,7 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
2620 * DASD_3990_ERP_ACTION 2514 * DASD_3990_ERP_ACTION
2621 * 2515 *
2622 * DESCRIPTION 2516 * DESCRIPTION
2623 * controll routine for 3990 erp actions. 2517 * control routine for 3990 erp actions.
2624 * Has to be called with the queue lock (namely the s390_irq_lock) acquired. 2518 * Has to be called with the queue lock (namely the s390_irq_lock) acquired.
2625 * 2519 *
2626 * PARAMETER 2520 * PARAMETER
@@ -2636,9 +2530,8 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
2636struct dasd_ccw_req * 2530struct dasd_ccw_req *
2637dasd_3990_erp_action(struct dasd_ccw_req * cqr) 2531dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2638{ 2532{
2639
2640 struct dasd_ccw_req *erp = NULL; 2533 struct dasd_ccw_req *erp = NULL;
2641 struct dasd_device *device = cqr->device; 2534 struct dasd_device *device = cqr->startdev;
2642 struct dasd_ccw_req *temp_erp = NULL; 2535 struct dasd_ccw_req *temp_erp = NULL;
2643 2536
2644 if (device->features & DASD_FEATURE_ERPLOG) { 2537 if (device->features & DASD_FEATURE_ERPLOG) {
@@ -2704,10 +2597,11 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2704 } 2597 }
2705 } 2598 }
2706 2599
2707 /* enqueue added ERP request */ 2600 /* enqueue ERP request if it's a new one */
2708 if (erp->status == DASD_CQR_FILLED) { 2601 if (list_empty(&erp->blocklist)) {
2709 erp->status = DASD_CQR_QUEUED; 2602 cqr->status = DASD_CQR_IN_ERP;
2710 list_add(&erp->list, &device->ccw_queue); 2603 /* add erp request before the cqr */
2604 list_add_tail(&erp->blocklist, &cqr->blocklist);
2711 } 2605 }
2712 2606
2713 return erp; 2607 return erp;
diff --git a/drivers/s390/block/dasd_9336_erp.c b/drivers/s390/block/dasd_9336_erp.c
deleted file mode 100644
index 6e082688475a..000000000000
--- a/drivers/s390/block/dasd_9336_erp.c
+++ /dev/null
@@ -1,41 +0,0 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_9336_erp.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
6 *
7 */
8
9#define PRINTK_HEADER "dasd_erp(9336)"
10
11#include "dasd_int.h"
12
13
14/*
15 * DASD_9336_ERP_EXAMINE
16 *
17 * DESCRIPTION
18 * Checks only for fatal/no/recover error.
19 * A detailed examination of the sense data is done later outside
20 * the interrupt handler.
21 *
22 * The logic is based on the 'IBM 3880 Storage Control Reference' manual
23 * 'Chapter 7. 9336 Sense Data'.
24 *
25 * RETURN VALUES
26 * dasd_era_none no error
27 * dasd_era_fatal for all fatal (unrecoverable errors)
28 * dasd_era_recover for all others.
29 */
30dasd_era_t
31dasd_9336_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
32{
33 /* check for successful execution first */
34 if (irb->scsw.cstat == 0x00 &&
35 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
36 return dasd_era_none;
37
38 /* examine the 24 byte sense data */
39 return dasd_era_recover;
40
41} /* END dasd_9336_erp_examine */
diff --git a/drivers/s390/block/dasd_9343_erp.c b/drivers/s390/block/dasd_9343_erp.c
deleted file mode 100644
index ddecb9808ed4..000000000000
--- a/drivers/s390/block/dasd_9343_erp.c
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_9345_erp.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
6 *
7 */
8
9#define PRINTK_HEADER "dasd_erp(9343)"
10
11#include "dasd_int.h"
12
13dasd_era_t
14dasd_9343_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
15{
16 if (irb->scsw.cstat == 0x00 &&
17 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
18 return dasd_era_none;
19
20 return dasd_era_recover;
21}
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
new file mode 100644
index 000000000000..3a40bee9d358
--- /dev/null
+++ b/drivers/s390/block/dasd_alias.c
@@ -0,0 +1,903 @@
1/*
2 * PAV alias management for the DASD ECKD discipline
3 *
4 * Copyright IBM Corporation, 2007
5 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
6 */
7
8#include <linux/list.h>
9#include <asm/ebcdic.h>
10#include "dasd_int.h"
11#include "dasd_eckd.h"
12
13#ifdef PRINTK_HEADER
14#undef PRINTK_HEADER
15#endif /* PRINTK_HEADER */
16#define PRINTK_HEADER "dasd(eckd):"
17
18
19/*
20 * General concept of alias management:
21 * - PAV and DASD alias management is specific to the eckd discipline.
22 * - A device is connected to an lcu as long as the device exists.
23 * dasd_alias_make_device_known_to_lcu will be called wenn the
24 * device is checked by the eckd discipline and
25 * dasd_alias_disconnect_device_from_lcu will be called
26 * before the device is deleted.
27 * - The dasd_alias_add_device / dasd_alias_remove_device
28 * functions mark the point when a device is 'ready for service'.
29 * - A summary unit check is a rare occasion, but it is mandatory to
30 * support it. It requires some complex recovery actions before the
31 * devices can be used again (see dasd_alias_handle_summary_unit_check).
32 * - dasd_alias_get_start_dev will find an alias device that can be used
33 * instead of the base device and does some (very simple) load balancing.
34 * This is the function that gets called for each I/O, so when improving
35 * something, this function should get faster or better, the rest has just
36 * to be correct.
37 */
38
39
40static void summary_unit_check_handling_work(struct work_struct *);
41static void lcu_update_work(struct work_struct *);
42static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
43
44static struct alias_root aliastree = {
45 .serverlist = LIST_HEAD_INIT(aliastree.serverlist),
46 .lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
47};
48
49static struct alias_server *_find_server(struct dasd_uid *uid)
50{
51 struct alias_server *pos;
52 list_for_each_entry(pos, &aliastree.serverlist, server) {
53 if (!strncmp(pos->uid.vendor, uid->vendor,
54 sizeof(uid->vendor))
55 && !strncmp(pos->uid.serial, uid->serial,
56 sizeof(uid->serial)))
57 return pos;
58 };
59 return NULL;
60}
61
62static struct alias_lcu *_find_lcu(struct alias_server *server,
63 struct dasd_uid *uid)
64{
65 struct alias_lcu *pos;
66 list_for_each_entry(pos, &server->lculist, lcu) {
67 if (pos->uid.ssid == uid->ssid)
68 return pos;
69 };
70 return NULL;
71}
72
73static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
74 struct dasd_uid *uid)
75{
76 struct alias_pav_group *pos;
77 __u8 search_unit_addr;
78
79 /* for hyper pav there is only one group */
80 if (lcu->pav == HYPER_PAV) {
81 if (list_empty(&lcu->grouplist))
82 return NULL;
83 else
84 return list_first_entry(&lcu->grouplist,
85 struct alias_pav_group, group);
86 }
87
88 /* for base pav we have to find the group that matches the base */
89 if (uid->type == UA_BASE_DEVICE)
90 search_unit_addr = uid->real_unit_addr;
91 else
92 search_unit_addr = uid->base_unit_addr;
93 list_for_each_entry(pos, &lcu->grouplist, group) {
94 if (pos->uid.base_unit_addr == search_unit_addr)
95 return pos;
96 };
97 return NULL;
98}
99
100static struct alias_server *_allocate_server(struct dasd_uid *uid)
101{
102 struct alias_server *server;
103
104 server = kzalloc(sizeof(*server), GFP_KERNEL);
105 if (!server)
106 return ERR_PTR(-ENOMEM);
107 memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
108 memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
109 INIT_LIST_HEAD(&server->server);
110 INIT_LIST_HEAD(&server->lculist);
111 return server;
112}
113
114static void _free_server(struct alias_server *server)
115{
116 kfree(server);
117}
118
119static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
120{
121 struct alias_lcu *lcu;
122
123 lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
124 if (!lcu)
125 return ERR_PTR(-ENOMEM);
126 lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
127 if (!lcu->uac)
128 goto out_err1;
129 lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
130 if (!lcu->rsu_cqr)
131 goto out_err2;
132 lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
133 GFP_KERNEL | GFP_DMA);
134 if (!lcu->rsu_cqr->cpaddr)
135 goto out_err3;
136 lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
137 if (!lcu->rsu_cqr->data)
138 goto out_err4;
139
140 memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
141 memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
142 lcu->uid.ssid = uid->ssid;
143 lcu->pav = NO_PAV;
144 lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
145 INIT_LIST_HEAD(&lcu->lcu);
146 INIT_LIST_HEAD(&lcu->inactive_devices);
147 INIT_LIST_HEAD(&lcu->active_devices);
148 INIT_LIST_HEAD(&lcu->grouplist);
149 INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
150 INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
151 spin_lock_init(&lcu->lock);
152 return lcu;
153
154out_err4:
155 kfree(lcu->rsu_cqr->cpaddr);
156out_err3:
157 kfree(lcu->rsu_cqr);
158out_err2:
159 kfree(lcu->uac);
160out_err1:
161 kfree(lcu);
162 return ERR_PTR(-ENOMEM);
163}
164
165static void _free_lcu(struct alias_lcu *lcu)
166{
167 kfree(lcu->rsu_cqr->data);
168 kfree(lcu->rsu_cqr->cpaddr);
169 kfree(lcu->rsu_cqr);
170 kfree(lcu->uac);
171 kfree(lcu);
172}
173
174/*
175 * This is the function that will allocate all the server and lcu data,
176 * so this function must be called first for a new device.
177 * If the return value is 1, the lcu was already known before, if it
178 * is 0, this is a new lcu.
179 * Negative return code indicates that something went wrong (e.g. -ENOMEM)
180 */
181int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
182{
183 struct dasd_eckd_private *private;
184 unsigned long flags;
185 struct alias_server *server, *newserver;
186 struct alias_lcu *lcu, *newlcu;
187 int is_lcu_known;
188 struct dasd_uid *uid;
189
190 private = (struct dasd_eckd_private *) device->private;
191 uid = &private->uid;
192 spin_lock_irqsave(&aliastree.lock, flags);
193 is_lcu_known = 1;
194 server = _find_server(uid);
195 if (!server) {
196 spin_unlock_irqrestore(&aliastree.lock, flags);
197 newserver = _allocate_server(uid);
198 if (IS_ERR(newserver))
199 return PTR_ERR(newserver);
200 spin_lock_irqsave(&aliastree.lock, flags);
201 server = _find_server(uid);
202 if (!server) {
203 list_add(&newserver->server, &aliastree.serverlist);
204 server = newserver;
205 is_lcu_known = 0;
206 } else {
207 /* someone was faster */
208 _free_server(newserver);
209 }
210 }
211
212 lcu = _find_lcu(server, uid);
213 if (!lcu) {
214 spin_unlock_irqrestore(&aliastree.lock, flags);
215 newlcu = _allocate_lcu(uid);
216 if (IS_ERR(newlcu))
217 return PTR_ERR(lcu);
218 spin_lock_irqsave(&aliastree.lock, flags);
219 lcu = _find_lcu(server, uid);
220 if (!lcu) {
221 list_add(&newlcu->lcu, &server->lculist);
222 lcu = newlcu;
223 is_lcu_known = 0;
224 } else {
225 /* someone was faster */
226 _free_lcu(newlcu);
227 }
228 is_lcu_known = 0;
229 }
230 spin_lock(&lcu->lock);
231 list_add(&device->alias_list, &lcu->inactive_devices);
232 private->lcu = lcu;
233 spin_unlock(&lcu->lock);
234 spin_unlock_irqrestore(&aliastree.lock, flags);
235
236 return is_lcu_known;
237}
238
239/*
240 * This function removes a device from the scope of alias management.
241 * The complicated part is to make sure that it is not in use by
242 * any of the workers. If necessary cancel the work.
243 */
244void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
245{
246 struct dasd_eckd_private *private;
247 unsigned long flags;
248 struct alias_lcu *lcu;
249 struct alias_server *server;
250 int was_pending;
251
252 private = (struct dasd_eckd_private *) device->private;
253 lcu = private->lcu;
254 spin_lock_irqsave(&lcu->lock, flags);
255 list_del_init(&device->alias_list);
256 /* make sure that the workers don't use this device */
257 if (device == lcu->suc_data.device) {
258 spin_unlock_irqrestore(&lcu->lock, flags);
259 cancel_work_sync(&lcu->suc_data.worker);
260 spin_lock_irqsave(&lcu->lock, flags);
261 if (device == lcu->suc_data.device)
262 lcu->suc_data.device = NULL;
263 }
264 was_pending = 0;
265 if (device == lcu->ruac_data.device) {
266 spin_unlock_irqrestore(&lcu->lock, flags);
267 was_pending = 1;
268 cancel_delayed_work_sync(&lcu->ruac_data.dwork);
269 spin_lock_irqsave(&lcu->lock, flags);
270 if (device == lcu->ruac_data.device)
271 lcu->ruac_data.device = NULL;
272 }
273 private->lcu = NULL;
274 spin_unlock_irqrestore(&lcu->lock, flags);
275
276 spin_lock_irqsave(&aliastree.lock, flags);
277 spin_lock(&lcu->lock);
278 if (list_empty(&lcu->grouplist) &&
279 list_empty(&lcu->active_devices) &&
280 list_empty(&lcu->inactive_devices)) {
281 list_del(&lcu->lcu);
282 spin_unlock(&lcu->lock);
283 _free_lcu(lcu);
284 lcu = NULL;
285 } else {
286 if (was_pending)
287 _schedule_lcu_update(lcu, NULL);
288 spin_unlock(&lcu->lock);
289 }
290 server = _find_server(&private->uid);
291 if (server && list_empty(&server->lculist)) {
292 list_del(&server->server);
293 _free_server(server);
294 }
295 spin_unlock_irqrestore(&aliastree.lock, flags);
296}
297
298/*
299 * This function assumes that the unit address configuration stored
300 * in the lcu is up to date and will update the device uid before
301 * adding it to a pav group.
302 */
303static int _add_device_to_lcu(struct alias_lcu *lcu,
304 struct dasd_device *device)
305{
306
307 struct dasd_eckd_private *private;
308 struct alias_pav_group *group;
309 struct dasd_uid *uid;
310
311 private = (struct dasd_eckd_private *) device->private;
312 uid = &private->uid;
313 uid->type = lcu->uac->unit[uid->real_unit_addr].ua_type;
314 uid->base_unit_addr = lcu->uac->unit[uid->real_unit_addr].base_ua;
315 dasd_set_uid(device->cdev, &private->uid);
316
317 /* if we have no PAV anyway, we don't need to bother with PAV groups */
318 if (lcu->pav == NO_PAV) {
319 list_move(&device->alias_list, &lcu->active_devices);
320 return 0;
321 }
322
323 group = _find_group(lcu, uid);
324 if (!group) {
325 group = kzalloc(sizeof(*group), GFP_ATOMIC);
326 if (!group)
327 return -ENOMEM;
328 memcpy(group->uid.vendor, uid->vendor, sizeof(uid->vendor));
329 memcpy(group->uid.serial, uid->serial, sizeof(uid->serial));
330 group->uid.ssid = uid->ssid;
331 if (uid->type == UA_BASE_DEVICE)
332 group->uid.base_unit_addr = uid->real_unit_addr;
333 else
334 group->uid.base_unit_addr = uid->base_unit_addr;
335 INIT_LIST_HEAD(&group->group);
336 INIT_LIST_HEAD(&group->baselist);
337 INIT_LIST_HEAD(&group->aliaslist);
338 list_add(&group->group, &lcu->grouplist);
339 }
340 if (uid->type == UA_BASE_DEVICE)
341 list_move(&device->alias_list, &group->baselist);
342 else
343 list_move(&device->alias_list, &group->aliaslist);
344 private->pavgroup = group;
345 return 0;
346};
347
348static void _remove_device_from_lcu(struct alias_lcu *lcu,
349 struct dasd_device *device)
350{
351 struct dasd_eckd_private *private;
352 struct alias_pav_group *group;
353
354 private = (struct dasd_eckd_private *) device->private;
355 list_move(&device->alias_list, &lcu->inactive_devices);
356 group = private->pavgroup;
357 if (!group)
358 return;
359 private->pavgroup = NULL;
360 if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
361 list_del(&group->group);
362 kfree(group);
363 return;
364 }
365 if (group->next == device)
366 group->next = NULL;
367};
368
369static int read_unit_address_configuration(struct dasd_device *device,
370 struct alias_lcu *lcu)
371{
372 struct dasd_psf_prssd_data *prssdp;
373 struct dasd_ccw_req *cqr;
374 struct ccw1 *ccw;
375 int rc;
376 unsigned long flags;
377
378 cqr = dasd_kmalloc_request("ECKD",
379 1 /* PSF */ + 1 /* RSSD */ ,
380 (sizeof(struct dasd_psf_prssd_data)),
381 device);
382 if (IS_ERR(cqr))
383 return PTR_ERR(cqr);
384 cqr->startdev = device;
385 cqr->memdev = device;
386 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
387 cqr->retries = 10;
388 cqr->expires = 20 * HZ;
389
390 /* Prepare for Read Subsystem Data */
391 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
392 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
393 prssdp->order = PSF_ORDER_PRSSD;
394 prssdp->suborder = 0x0e; /* Read unit address configuration */
395 /* all other bytes of prssdp must be zero */
396
397 ccw = cqr->cpaddr;
398 ccw->cmd_code = DASD_ECKD_CCW_PSF;
399 ccw->count = sizeof(struct dasd_psf_prssd_data);
400 ccw->flags |= CCW_FLAG_CC;
401 ccw->cda = (__u32)(addr_t) prssdp;
402
403 /* Read Subsystem Data - feature codes */
404 memset(lcu->uac, 0, sizeof(*(lcu->uac)));
405
406 ccw++;
407 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
408 ccw->count = sizeof(*(lcu->uac));
409 ccw->cda = (__u32)(addr_t) lcu->uac;
410
411 cqr->buildclk = get_clock();
412 cqr->status = DASD_CQR_FILLED;
413
414 /* need to unset flag here to detect race with summary unit check */
415 spin_lock_irqsave(&lcu->lock, flags);
416 lcu->flags &= ~NEED_UAC_UPDATE;
417 spin_unlock_irqrestore(&lcu->lock, flags);
418
419 do {
420 rc = dasd_sleep_on(cqr);
421 } while (rc && (cqr->retries > 0));
422 if (rc) {
423 spin_lock_irqsave(&lcu->lock, flags);
424 lcu->flags |= NEED_UAC_UPDATE;
425 spin_unlock_irqrestore(&lcu->lock, flags);
426 }
427 dasd_kfree_request(cqr, cqr->memdev);
428 return rc;
429}
430
431static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
432{
433 unsigned long flags;
434 struct alias_pav_group *pavgroup, *tempgroup;
435 struct dasd_device *device, *tempdev;
436 int i, rc;
437 struct dasd_eckd_private *private;
438
439 spin_lock_irqsave(&lcu->lock, flags);
440 list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
441 list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
442 alias_list) {
443 list_move(&device->alias_list, &lcu->active_devices);
444 private = (struct dasd_eckd_private *) device->private;
445 private->pavgroup = NULL;
446 }
447 list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
448 alias_list) {
449 list_move(&device->alias_list, &lcu->active_devices);
450 private = (struct dasd_eckd_private *) device->private;
451 private->pavgroup = NULL;
452 }
453 list_del(&pavgroup->group);
454 kfree(pavgroup);
455 }
456 spin_unlock_irqrestore(&lcu->lock, flags);
457
458 rc = read_unit_address_configuration(refdev, lcu);
459 if (rc)
460 return rc;
461
462 spin_lock_irqsave(&lcu->lock, flags);
463 lcu->pav = NO_PAV;
464 for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
465 switch (lcu->uac->unit[i].ua_type) {
466 case UA_BASE_PAV_ALIAS:
467 lcu->pav = BASE_PAV;
468 break;
469 case UA_HYPER_PAV_ALIAS:
470 lcu->pav = HYPER_PAV;
471 break;
472 }
473 if (lcu->pav != NO_PAV)
474 break;
475 }
476
477 list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
478 alias_list) {
479 _add_device_to_lcu(lcu, device);
480 }
481 spin_unlock_irqrestore(&lcu->lock, flags);
482 return 0;
483}
484
485static void lcu_update_work(struct work_struct *work)
486{
487 struct alias_lcu *lcu;
488 struct read_uac_work_data *ruac_data;
489 struct dasd_device *device;
490 unsigned long flags;
491 int rc;
492
493 ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
494 lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
495 device = ruac_data->device;
496 rc = _lcu_update(device, lcu);
497 /*
498 * Need to check flags again, as there could have been another
499 * prepare_update or a new device a new device while we were still
500 * processing the data
501 */
502 spin_lock_irqsave(&lcu->lock, flags);
503 if (rc || (lcu->flags & NEED_UAC_UPDATE)) {
504 DEV_MESSAGE(KERN_WARNING, device, "could not update"
505 " alias data in lcu (rc = %d), retry later", rc);
506 schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
507 } else {
508 lcu->ruac_data.device = NULL;
509 lcu->flags &= ~UPDATE_PENDING;
510 }
511 spin_unlock_irqrestore(&lcu->lock, flags);
512}
513
514static int _schedule_lcu_update(struct alias_lcu *lcu,
515 struct dasd_device *device)
516{
517 struct dasd_device *usedev = NULL;
518 struct alias_pav_group *group;
519
520 lcu->flags |= NEED_UAC_UPDATE;
521 if (lcu->ruac_data.device) {
522 /* already scheduled or running */
523 return 0;
524 }
525 if (device && !list_empty(&device->alias_list))
526 usedev = device;
527
528 if (!usedev && !list_empty(&lcu->grouplist)) {
529 group = list_first_entry(&lcu->grouplist,
530 struct alias_pav_group, group);
531 if (!list_empty(&group->baselist))
532 usedev = list_first_entry(&group->baselist,
533 struct dasd_device,
534 alias_list);
535 else if (!list_empty(&group->aliaslist))
536 usedev = list_first_entry(&group->aliaslist,
537 struct dasd_device,
538 alias_list);
539 }
540 if (!usedev && !list_empty(&lcu->active_devices)) {
541 usedev = list_first_entry(&lcu->active_devices,
542 struct dasd_device, alias_list);
543 }
544 /*
545 * if we haven't found a proper device yet, give up for now, the next
546 * device that will be set active will trigger an lcu update
547 */
548 if (!usedev)
549 return -EINVAL;
550 lcu->ruac_data.device = usedev;
551 schedule_delayed_work(&lcu->ruac_data.dwork, 0);
552 return 0;
553}
554
555int dasd_alias_add_device(struct dasd_device *device)
556{
557 struct dasd_eckd_private *private;
558 struct alias_lcu *lcu;
559 unsigned long flags;
560 int rc;
561
562 private = (struct dasd_eckd_private *) device->private;
563 lcu = private->lcu;
564 rc = 0;
565 spin_lock_irqsave(&lcu->lock, flags);
566 if (!(lcu->flags & UPDATE_PENDING)) {
567 rc = _add_device_to_lcu(lcu, device);
568 if (rc)
569 lcu->flags |= UPDATE_PENDING;
570 }
571 if (lcu->flags & UPDATE_PENDING) {
572 list_move(&device->alias_list, &lcu->active_devices);
573 _schedule_lcu_update(lcu, device);
574 }
575 spin_unlock_irqrestore(&lcu->lock, flags);
576 return rc;
577}
578
579int dasd_alias_remove_device(struct dasd_device *device)
580{
581 struct dasd_eckd_private *private;
582 struct alias_lcu *lcu;
583 unsigned long flags;
584
585 private = (struct dasd_eckd_private *) device->private;
586 lcu = private->lcu;
587 spin_lock_irqsave(&lcu->lock, flags);
588 _remove_device_from_lcu(lcu, device);
589 spin_unlock_irqrestore(&lcu->lock, flags);
590 return 0;
591}
592
593struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
594{
595
596 struct dasd_device *alias_device;
597 struct alias_pav_group *group;
598 struct alias_lcu *lcu;
599 struct dasd_eckd_private *private, *alias_priv;
600 unsigned long flags;
601
602 private = (struct dasd_eckd_private *) base_device->private;
603 group = private->pavgroup;
604 lcu = private->lcu;
605 if (!group || !lcu)
606 return NULL;
607 if (lcu->pav == NO_PAV ||
608 lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
609 return NULL;
610
611 spin_lock_irqsave(&lcu->lock, flags);
612 alias_device = group->next;
613 if (!alias_device) {
614 if (list_empty(&group->aliaslist)) {
615 spin_unlock_irqrestore(&lcu->lock, flags);
616 return NULL;
617 } else {
618 alias_device = list_first_entry(&group->aliaslist,
619 struct dasd_device,
620 alias_list);
621 }
622 }
623 if (list_is_last(&alias_device->alias_list, &group->aliaslist))
624 group->next = list_first_entry(&group->aliaslist,
625 struct dasd_device, alias_list);
626 else
627 group->next = list_first_entry(&alias_device->alias_list,
628 struct dasd_device, alias_list);
629 spin_unlock_irqrestore(&lcu->lock, flags);
630 alias_priv = (struct dasd_eckd_private *) alias_device->private;
631 if ((alias_priv->count < private->count) && !alias_device->stopped)
632 return alias_device;
633 else
634 return NULL;
635}
636
637/*
638 * Summary unit check handling depends on the way alias devices
639 * are handled so it is done here rather then in dasd_eckd.c
640 */
641static int reset_summary_unit_check(struct alias_lcu *lcu,
642 struct dasd_device *device,
643 char reason)
644{
645 struct dasd_ccw_req *cqr;
646 int rc = 0;
647
648 cqr = lcu->rsu_cqr;
649 strncpy((char *) &cqr->magic, "ECKD", 4);
650 ASCEBC((char *) &cqr->magic, 4);
651 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RSCK;
652 cqr->cpaddr->flags = 0 ;
653 cqr->cpaddr->count = 16;
654 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
655 ((char *)cqr->data)[0] = reason;
656
657 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
658 cqr->retries = 255; /* set retry counter to enable basic ERP */
659 cqr->startdev = device;
660 cqr->memdev = device;
661 cqr->block = NULL;
662 cqr->expires = 5 * HZ;
663 cqr->buildclk = get_clock();
664 cqr->status = DASD_CQR_FILLED;
665
666 rc = dasd_sleep_on_immediatly(cqr);
667 return rc;
668}
669
670static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
671{
672 struct alias_pav_group *pavgroup;
673 struct dasd_device *device;
674 struct dasd_eckd_private *private;
675
676 /* active and inactive list can contain alias as well as base devices */
677 list_for_each_entry(device, &lcu->active_devices, alias_list) {
678 private = (struct dasd_eckd_private *) device->private;
679 if (private->uid.type != UA_BASE_DEVICE)
680 continue;
681 dasd_schedule_block_bh(device->block);
682 dasd_schedule_device_bh(device);
683 }
684 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
685 private = (struct dasd_eckd_private *) device->private;
686 if (private->uid.type != UA_BASE_DEVICE)
687 continue;
688 dasd_schedule_block_bh(device->block);
689 dasd_schedule_device_bh(device);
690 }
691 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
692 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
693 dasd_schedule_block_bh(device->block);
694 dasd_schedule_device_bh(device);
695 }
696 }
697}
698
699static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
700{
701 struct alias_pav_group *pavgroup;
702 struct dasd_device *device, *temp;
703 struct dasd_eckd_private *private;
704 int rc;
705 unsigned long flags;
706 LIST_HEAD(active);
707
708 /*
709 * Problem here ist that dasd_flush_device_queue may wait
710 * for termination of a request to complete. We can't keep
711 * the lcu lock during that time, so we must assume that
712 * the lists may have changed.
713 * Idea: first gather all active alias devices in a separate list,
714 * then flush the first element of this list unlocked, and afterwards
715 * check if it is still on the list before moving it to the
716 * active_devices list.
717 */
718
719 spin_lock_irqsave(&lcu->lock, flags);
720 list_for_each_entry_safe(device, temp, &lcu->active_devices,
721 alias_list) {
722 private = (struct dasd_eckd_private *) device->private;
723 if (private->uid.type == UA_BASE_DEVICE)
724 continue;
725 list_move(&device->alias_list, &active);
726 }
727
728 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
729 list_splice_init(&pavgroup->aliaslist, &active);
730 }
731 while (!list_empty(&active)) {
732 device = list_first_entry(&active, struct dasd_device,
733 alias_list);
734 spin_unlock_irqrestore(&lcu->lock, flags);
735 rc = dasd_flush_device_queue(device);
736 spin_lock_irqsave(&lcu->lock, flags);
737 /*
738 * only move device around if it wasn't moved away while we
739 * were waiting for the flush
740 */
741 if (device == list_first_entry(&active,
742 struct dasd_device, alias_list))
743 list_move(&device->alias_list, &lcu->active_devices);
744 }
745 spin_unlock_irqrestore(&lcu->lock, flags);
746}
747
748/*
749 * This function is called in interrupt context, so the
750 * cdev lock for device is already locked!
751 */
752static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
753 struct dasd_device *device)
754{
755 struct alias_pav_group *pavgroup;
756 struct dasd_device *pos;
757
758 list_for_each_entry(pos, &lcu->active_devices, alias_list) {
759 if (pos != device)
760 spin_lock(get_ccwdev_lock(pos->cdev));
761 pos->stopped |= DASD_STOPPED_SU;
762 if (pos != device)
763 spin_unlock(get_ccwdev_lock(pos->cdev));
764 }
765 list_for_each_entry(pos, &lcu->inactive_devices, alias_list) {
766 if (pos != device)
767 spin_lock(get_ccwdev_lock(pos->cdev));
768 pos->stopped |= DASD_STOPPED_SU;
769 if (pos != device)
770 spin_unlock(get_ccwdev_lock(pos->cdev));
771 }
772 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
773 list_for_each_entry(pos, &pavgroup->baselist, alias_list) {
774 if (pos != device)
775 spin_lock(get_ccwdev_lock(pos->cdev));
776 pos->stopped |= DASD_STOPPED_SU;
777 if (pos != device)
778 spin_unlock(get_ccwdev_lock(pos->cdev));
779 }
780 list_for_each_entry(pos, &pavgroup->aliaslist, alias_list) {
781 if (pos != device)
782 spin_lock(get_ccwdev_lock(pos->cdev));
783 pos->stopped |= DASD_STOPPED_SU;
784 if (pos != device)
785 spin_unlock(get_ccwdev_lock(pos->cdev));
786 }
787 }
788}
789
790static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
791{
792 struct alias_pav_group *pavgroup;
793 struct dasd_device *device;
794 unsigned long flags;
795
796 list_for_each_entry(device, &lcu->active_devices, alias_list) {
797 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
798 device->stopped &= ~DASD_STOPPED_SU;
799 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
800 }
801
802 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
803 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
804 device->stopped &= ~DASD_STOPPED_SU;
805 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
806 }
807
808 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
809 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
810 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
811 device->stopped &= ~DASD_STOPPED_SU;
812 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
813 flags);
814 }
815 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
816 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
817 device->stopped &= ~DASD_STOPPED_SU;
818 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
819 flags);
820 }
821 }
822}
823
824static void summary_unit_check_handling_work(struct work_struct *work)
825{
826 struct alias_lcu *lcu;
827 struct summary_unit_check_work_data *suc_data;
828 unsigned long flags;
829 struct dasd_device *device;
830
831 suc_data = container_of(work, struct summary_unit_check_work_data,
832 worker);
833 lcu = container_of(suc_data, struct alias_lcu, suc_data);
834 device = suc_data->device;
835
836 /* 1. flush alias devices */
837 flush_all_alias_devices_on_lcu(lcu);
838
839 /* 2. reset summary unit check */
840 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
841 device->stopped &= ~(DASD_STOPPED_SU | DASD_STOPPED_PENDING);
842 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
843 reset_summary_unit_check(lcu, device, suc_data->reason);
844
845 spin_lock_irqsave(&lcu->lock, flags);
846 _unstop_all_devices_on_lcu(lcu);
847 _restart_all_base_devices_on_lcu(lcu);
848 /* 3. read new alias configuration */
849 _schedule_lcu_update(lcu, device);
850 lcu->suc_data.device = NULL;
851 spin_unlock_irqrestore(&lcu->lock, flags);
852}
853
854/*
855 * note: this will be called from int handler context (cdev locked)
856 */
857void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
858 struct irb *irb)
859{
860 struct alias_lcu *lcu;
861 char reason;
862 struct dasd_eckd_private *private;
863
864 private = (struct dasd_eckd_private *) device->private;
865
866 reason = irb->ecw[8];
867 DEV_MESSAGE(KERN_WARNING, device, "%s %x",
868 "eckd handle summary unit check: reason", reason);
869
870 lcu = private->lcu;
871 if (!lcu) {
872 DEV_MESSAGE(KERN_WARNING, device, "%s",
873 "device not ready to handle summary"
874 " unit check (no lcu structure)");
875 return;
876 }
877 spin_lock(&lcu->lock);
878 _stop_all_devices_on_lcu(lcu, device);
879 /* prepare for lcu_update */
880 private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
881 /* If this device is about to be removed just return and wait for
882 * the next interrupt on a different device
883 */
884 if (list_empty(&device->alias_list)) {
885 DEV_MESSAGE(KERN_WARNING, device, "%s",
886 "device is in offline processing,"
887 " don't do summary unit check handling");
888 spin_unlock(&lcu->lock);
889 return;
890 }
891 if (lcu->suc_data.device) {
892 /* already scheduled or running */
893 DEV_MESSAGE(KERN_WARNING, device, "%s",
894 "previous instance of summary unit check worker"
895 " still pending");
896 spin_unlock(&lcu->lock);
897 return ;
898 }
899 lcu->suc_data.reason = reason;
900 lcu->suc_data.device = device;
901 spin_unlock(&lcu->lock);
902 schedule_work(&lcu->suc_data.worker);
903};
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 0c67258fb9ec..f4fb40257348 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -49,22 +49,6 @@ struct dasd_devmap {
49}; 49};
50 50
51/* 51/*
52 * dasd_server_ssid_map contains a globally unique storage server subsystem ID.
53 * dasd_server_ssid_list contains the list of all subsystem IDs accessed by
54 * the DASD device driver.
55 */
56struct dasd_server_ssid_map {
57 struct list_head list;
58 struct system_id {
59 char vendor[4];
60 char serial[15];
61 __u16 ssid;
62 } sid;
63};
64
65static struct list_head dasd_server_ssid_list;
66
67/*
68 * Parameter parsing functions for dasd= parameter. The syntax is: 52 * Parameter parsing functions for dasd= parameter. The syntax is:
69 * <devno> : (0x)?[0-9a-fA-F]+ 53 * <devno> : (0x)?[0-9a-fA-F]+
70 * <busid> : [0-0a-f]\.[0-9a-f]\.(0x)?[0-9a-fA-F]+ 54 * <busid> : [0-0a-f]\.[0-9a-f]\.(0x)?[0-9a-fA-F]+
@@ -721,8 +705,9 @@ dasd_ro_store(struct device *dev, struct device_attribute *attr,
721 devmap->features &= ~DASD_FEATURE_READONLY; 705 devmap->features &= ~DASD_FEATURE_READONLY;
722 if (devmap->device) 706 if (devmap->device)
723 devmap->device->features = devmap->features; 707 devmap->device->features = devmap->features;
724 if (devmap->device && devmap->device->gdp) 708 if (devmap->device && devmap->device->block
725 set_disk_ro(devmap->device->gdp, val); 709 && devmap->device->block->gdp)
710 set_disk_ro(devmap->device->block->gdp, val);
726 spin_unlock(&dasd_devmap_lock); 711 spin_unlock(&dasd_devmap_lock);
727 return count; 712 return count;
728} 713}
@@ -893,12 +878,16 @@ dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf)
893 878
894 devmap = dasd_find_busid(dev->bus_id); 879 devmap = dasd_find_busid(dev->bus_id);
895 spin_lock(&dasd_devmap_lock); 880 spin_lock(&dasd_devmap_lock);
896 if (!IS_ERR(devmap)) 881 if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) {
897 alias = devmap->uid.alias; 882 spin_unlock(&dasd_devmap_lock);
883 return sprintf(buf, "0\n");
884 }
885 if (devmap->uid.type == UA_BASE_PAV_ALIAS ||
886 devmap->uid.type == UA_HYPER_PAV_ALIAS)
887 alias = 1;
898 else 888 else
899 alias = 0; 889 alias = 0;
900 spin_unlock(&dasd_devmap_lock); 890 spin_unlock(&dasd_devmap_lock);
901
902 return sprintf(buf, alias ? "1\n" : "0\n"); 891 return sprintf(buf, alias ? "1\n" : "0\n");
903} 892}
904 893
@@ -930,19 +919,36 @@ static ssize_t
930dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf) 919dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
931{ 920{
932 struct dasd_devmap *devmap; 921 struct dasd_devmap *devmap;
933 char uid[UID_STRLEN]; 922 char uid_string[UID_STRLEN];
923 char ua_string[3];
924 struct dasd_uid *uid;
934 925
935 devmap = dasd_find_busid(dev->bus_id); 926 devmap = dasd_find_busid(dev->bus_id);
936 spin_lock(&dasd_devmap_lock); 927 spin_lock(&dasd_devmap_lock);
937 if (!IS_ERR(devmap) && strlen(devmap->uid.vendor) > 0) 928 if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) {
938 snprintf(uid, sizeof(uid), "%s.%s.%04x.%02x", 929 spin_unlock(&dasd_devmap_lock);
939 devmap->uid.vendor, devmap->uid.serial, 930 return sprintf(buf, "\n");
940 devmap->uid.ssid, devmap->uid.unit_addr); 931 }
941 else 932 uid = &devmap->uid;
942 uid[0] = 0; 933 switch (uid->type) {
934 case UA_BASE_DEVICE:
935 sprintf(ua_string, "%02x", uid->real_unit_addr);
936 break;
937 case UA_BASE_PAV_ALIAS:
938 sprintf(ua_string, "%02x", uid->base_unit_addr);
939 break;
940 case UA_HYPER_PAV_ALIAS:
941 sprintf(ua_string, "xx");
942 break;
943 default:
944 /* should not happen, treat like base device */
945 sprintf(ua_string, "%02x", uid->real_unit_addr);
946 break;
947 }
948 snprintf(uid_string, sizeof(uid_string), "%s.%s.%04x.%s",
949 uid->vendor, uid->serial, uid->ssid, ua_string);
943 spin_unlock(&dasd_devmap_lock); 950 spin_unlock(&dasd_devmap_lock);
944 951 return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
945 return snprintf(buf, PAGE_SIZE, "%s\n", uid);
946} 952}
947 953
948static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL); 954static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL);
@@ -1040,39 +1046,16 @@ int
1040dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid) 1046dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid)
1041{ 1047{
1042 struct dasd_devmap *devmap; 1048 struct dasd_devmap *devmap;
1043 struct dasd_server_ssid_map *srv, *tmp;
1044 1049
1045 devmap = dasd_find_busid(cdev->dev.bus_id); 1050 devmap = dasd_find_busid(cdev->dev.bus_id);
1046 if (IS_ERR(devmap)) 1051 if (IS_ERR(devmap))
1047 return PTR_ERR(devmap); 1052 return PTR_ERR(devmap);
1048 1053
1049 /* generate entry for server_ssid_map */
1050 srv = (struct dasd_server_ssid_map *)
1051 kzalloc(sizeof(struct dasd_server_ssid_map), GFP_KERNEL);
1052 if (!srv)
1053 return -ENOMEM;
1054 strncpy(srv->sid.vendor, uid->vendor, sizeof(srv->sid.vendor) - 1);
1055 strncpy(srv->sid.serial, uid->serial, sizeof(srv->sid.serial) - 1);
1056 srv->sid.ssid = uid->ssid;
1057
1058 /* server is already contained ? */
1059 spin_lock(&dasd_devmap_lock); 1054 spin_lock(&dasd_devmap_lock);
1060 devmap->uid = *uid; 1055 devmap->uid = *uid;
1061 list_for_each_entry(tmp, &dasd_server_ssid_list, list) {
1062 if (!memcmp(&srv->sid, &tmp->sid,
1063 sizeof(struct system_id))) {
1064 kfree(srv);
1065 srv = NULL;
1066 break;
1067 }
1068 }
1069
1070 /* add servermap to serverlist */
1071 if (srv)
1072 list_add(&srv->list, &dasd_server_ssid_list);
1073 spin_unlock(&dasd_devmap_lock); 1056 spin_unlock(&dasd_devmap_lock);
1074 1057
1075 return (srv ? 1 : 0); 1058 return 0;
1076} 1059}
1077EXPORT_SYMBOL_GPL(dasd_set_uid); 1060EXPORT_SYMBOL_GPL(dasd_set_uid);
1078 1061
@@ -1138,9 +1121,6 @@ dasd_devmap_init(void)
1138 dasd_max_devindex = 0; 1121 dasd_max_devindex = 0;
1139 for (i = 0; i < 256; i++) 1122 for (i = 0; i < 256; i++)
1140 INIT_LIST_HEAD(&dasd_hashlists[i]); 1123 INIT_LIST_HEAD(&dasd_hashlists[i]);
1141
1142 /* Initialize servermap structure. */
1143 INIT_LIST_HEAD(&dasd_server_ssid_list);
1144 return 0; 1124 return 0;
1145} 1125}
1146 1126
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 571320ab9e1a..d91df38ee4f7 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -142,7 +142,7 @@ dasd_diag_erp(struct dasd_device *device)
142 int rc; 142 int rc;
143 143
144 mdsk_term_io(device); 144 mdsk_term_io(device);
145 rc = mdsk_init_io(device, device->bp_block, 0, NULL); 145 rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
146 if (rc) 146 if (rc)
147 DEV_MESSAGE(KERN_WARNING, device, "DIAG ERP unsuccessful, " 147 DEV_MESSAGE(KERN_WARNING, device, "DIAG ERP unsuccessful, "
148 "rc=%d", rc); 148 "rc=%d", rc);
@@ -158,11 +158,11 @@ dasd_start_diag(struct dasd_ccw_req * cqr)
158 struct dasd_diag_req *dreq; 158 struct dasd_diag_req *dreq;
159 int rc; 159 int rc;
160 160
161 device = cqr->device; 161 device = cqr->startdev;
162 if (cqr->retries < 0) { 162 if (cqr->retries < 0) {
163 DEV_MESSAGE(KERN_WARNING, device, "DIAG start_IO: request %p " 163 DEV_MESSAGE(KERN_WARNING, device, "DIAG start_IO: request %p "
164 "- no retry left)", cqr); 164 "- no retry left)", cqr);
165 cqr->status = DASD_CQR_FAILED; 165 cqr->status = DASD_CQR_ERROR;
166 return -EIO; 166 return -EIO;
167 } 167 }
168 private = (struct dasd_diag_private *) device->private; 168 private = (struct dasd_diag_private *) device->private;
@@ -184,7 +184,7 @@ dasd_start_diag(struct dasd_ccw_req * cqr)
184 switch (rc) { 184 switch (rc) {
185 case 0: /* Synchronous I/O finished successfully */ 185 case 0: /* Synchronous I/O finished successfully */
186 cqr->stopclk = get_clock(); 186 cqr->stopclk = get_clock();
187 cqr->status = DASD_CQR_DONE; 187 cqr->status = DASD_CQR_SUCCESS;
188 /* Indicate to calling function that only a dasd_schedule_bh() 188 /* Indicate to calling function that only a dasd_schedule_bh()
189 and no timer is needed */ 189 and no timer is needed */
190 rc = -EACCES; 190 rc = -EACCES;
@@ -209,12 +209,12 @@ dasd_diag_term_IO(struct dasd_ccw_req * cqr)
209{ 209{
210 struct dasd_device *device; 210 struct dasd_device *device;
211 211
212 device = cqr->device; 212 device = cqr->startdev;
213 mdsk_term_io(device); 213 mdsk_term_io(device);
214 mdsk_init_io(device, device->bp_block, 0, NULL); 214 mdsk_init_io(device, device->block->bp_block, 0, NULL);
215 cqr->status = DASD_CQR_CLEAR; 215 cqr->status = DASD_CQR_CLEAR_PENDING;
216 cqr->stopclk = get_clock(); 216 cqr->stopclk = get_clock();
217 dasd_schedule_bh(device); 217 dasd_schedule_device_bh(device);
218 return 0; 218 return 0;
219} 219}
220 220
@@ -247,7 +247,7 @@ dasd_ext_handler(__u16 code)
247 return; 247 return;
248 } 248 }
249 cqr = (struct dasd_ccw_req *) ip; 249 cqr = (struct dasd_ccw_req *) ip;
250 device = (struct dasd_device *) cqr->device; 250 device = (struct dasd_device *) cqr->startdev;
251 if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 251 if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
252 DEV_MESSAGE(KERN_WARNING, device, 252 DEV_MESSAGE(KERN_WARNING, device,
253 " magic number of dasd_ccw_req 0x%08X doesn't" 253 " magic number of dasd_ccw_req 0x%08X doesn't"
@@ -260,10 +260,10 @@ dasd_ext_handler(__u16 code)
260 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 260 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
261 261
262 /* Check for a pending clear operation */ 262 /* Check for a pending clear operation */
263 if (cqr->status == DASD_CQR_CLEAR) { 263 if (cqr->status == DASD_CQR_CLEAR_PENDING) {
264 cqr->status = DASD_CQR_QUEUED; 264 cqr->status = DASD_CQR_CLEARED;
265 dasd_clear_timer(device); 265 dasd_device_clear_timer(device);
266 dasd_schedule_bh(device); 266 dasd_schedule_device_bh(device);
267 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 267 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
268 return; 268 return;
269 } 269 }
@@ -272,11 +272,11 @@ dasd_ext_handler(__u16 code)
272 272
273 expires = 0; 273 expires = 0;
274 if (status == 0) { 274 if (status == 0) {
275 cqr->status = DASD_CQR_DONE; 275 cqr->status = DASD_CQR_SUCCESS;
276 /* Start first request on queue if possible -> fast_io. */ 276 /* Start first request on queue if possible -> fast_io. */
277 if (!list_empty(&device->ccw_queue)) { 277 if (!list_empty(&device->ccw_queue)) {
278 next = list_entry(device->ccw_queue.next, 278 next = list_entry(device->ccw_queue.next,
279 struct dasd_ccw_req, list); 279 struct dasd_ccw_req, devlist);
280 if (next->status == DASD_CQR_QUEUED) { 280 if (next->status == DASD_CQR_QUEUED) {
281 rc = dasd_start_diag(next); 281 rc = dasd_start_diag(next);
282 if (rc == 0) 282 if (rc == 0)
@@ -296,10 +296,10 @@ dasd_ext_handler(__u16 code)
296 } 296 }
297 297
298 if (expires != 0) 298 if (expires != 0)
299 dasd_set_timer(device, expires); 299 dasd_device_set_timer(device, expires);
300 else 300 else
301 dasd_clear_timer(device); 301 dasd_device_clear_timer(device);
302 dasd_schedule_bh(device); 302 dasd_schedule_device_bh(device);
303 303
304 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 304 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
305} 305}
@@ -309,6 +309,7 @@ dasd_ext_handler(__u16 code)
309static int 309static int
310dasd_diag_check_device(struct dasd_device *device) 310dasd_diag_check_device(struct dasd_device *device)
311{ 311{
312 struct dasd_block *block;
312 struct dasd_diag_private *private; 313 struct dasd_diag_private *private;
313 struct dasd_diag_characteristics *rdc_data; 314 struct dasd_diag_characteristics *rdc_data;
314 struct dasd_diag_bio bio; 315 struct dasd_diag_bio bio;
@@ -328,6 +329,16 @@ dasd_diag_check_device(struct dasd_device *device)
328 ccw_device_get_id(device->cdev, &private->dev_id); 329 ccw_device_get_id(device->cdev, &private->dev_id);
329 device->private = (void *) private; 330 device->private = (void *) private;
330 } 331 }
332 block = dasd_alloc_block();
333 if (IS_ERR(block)) {
334 DEV_MESSAGE(KERN_WARNING, device, "%s",
335 "could not allocate dasd block structure");
336 kfree(device->private);
337 return PTR_ERR(block);
338 }
339 device->block = block;
340 block->base = device;
341
331 /* Read Device Characteristics */ 342 /* Read Device Characteristics */
332 rdc_data = (void *) &(private->rdc_data); 343 rdc_data = (void *) &(private->rdc_data);
333 rdc_data->dev_nr = private->dev_id.devno; 344 rdc_data->dev_nr = private->dev_id.devno;
@@ -409,14 +420,14 @@ dasd_diag_check_device(struct dasd_device *device)
409 sizeof(DASD_DIAG_CMS1)) == 0) { 420 sizeof(DASD_DIAG_CMS1)) == 0) {
410 /* get formatted blocksize from label block */ 421 /* get formatted blocksize from label block */
411 bsize = (unsigned int) label->block_size; 422 bsize = (unsigned int) label->block_size;
412 device->blocks = (unsigned long) label->block_count; 423 block->blocks = (unsigned long) label->block_count;
413 } else 424 } else
414 device->blocks = end_block; 425 block->blocks = end_block;
415 device->bp_block = bsize; 426 block->bp_block = bsize;
416 device->s2b_shift = 0; /* bits to shift 512 to get a block */ 427 block->s2b_shift = 0; /* bits to shift 512 to get a block */
417 for (sb = 512; sb < bsize; sb = sb << 1) 428 for (sb = 512; sb < bsize; sb = sb << 1)
418 device->s2b_shift++; 429 block->s2b_shift++;
419 rc = mdsk_init_io(device, device->bp_block, 0, NULL); 430 rc = mdsk_init_io(device, block->bp_block, 0, NULL);
420 if (rc) { 431 if (rc) {
421 DEV_MESSAGE(KERN_WARNING, device, "DIAG initialization " 432 DEV_MESSAGE(KERN_WARNING, device, "DIAG initialization "
422 "failed (rc=%d)", rc); 433 "failed (rc=%d)", rc);
@@ -424,9 +435,9 @@ dasd_diag_check_device(struct dasd_device *device)
424 } else { 435 } else {
425 DEV_MESSAGE(KERN_INFO, device, 436 DEV_MESSAGE(KERN_INFO, device,
426 "(%ld B/blk): %ldkB", 437 "(%ld B/blk): %ldkB",
427 (unsigned long) device->bp_block, 438 (unsigned long) block->bp_block,
428 (unsigned long) (device->blocks << 439 (unsigned long) (block->blocks <<
429 device->s2b_shift) >> 1); 440 block->s2b_shift) >> 1);
430 } 441 }
431out: 442out:
432 free_page((long) label); 443 free_page((long) label);
@@ -436,22 +447,16 @@ out:
436/* Fill in virtual disk geometry for device. Return zero on success, non-zero 447/* Fill in virtual disk geometry for device. Return zero on success, non-zero
437 * otherwise. */ 448 * otherwise. */
438static int 449static int
439dasd_diag_fill_geometry(struct dasd_device *device, struct hd_geometry *geo) 450dasd_diag_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
440{ 451{
441 if (dasd_check_blocksize(device->bp_block) != 0) 452 if (dasd_check_blocksize(block->bp_block) != 0)
442 return -EINVAL; 453 return -EINVAL;
443 geo->cylinders = (device->blocks << device->s2b_shift) >> 10; 454 geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
444 geo->heads = 16; 455 geo->heads = 16;
445 geo->sectors = 128 >> device->s2b_shift; 456 geo->sectors = 128 >> block->s2b_shift;
446 return 0; 457 return 0;
447} 458}
448 459
449static dasd_era_t
450dasd_diag_examine_error(struct dasd_ccw_req * cqr, struct irb * stat)
451{
452 return dasd_era_fatal;
453}
454
455static dasd_erp_fn_t 460static dasd_erp_fn_t
456dasd_diag_erp_action(struct dasd_ccw_req * cqr) 461dasd_diag_erp_action(struct dasd_ccw_req * cqr)
457{ 462{
@@ -466,8 +471,9 @@ dasd_diag_erp_postaction(struct dasd_ccw_req * cqr)
466 471
467/* Create DASD request from block device request. Return pointer to new 472/* Create DASD request from block device request. Return pointer to new
468 * request on success, ERR_PTR otherwise. */ 473 * request on success, ERR_PTR otherwise. */
469static struct dasd_ccw_req * 474static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
470dasd_diag_build_cp(struct dasd_device * device, struct request *req) 475 struct dasd_block *block,
476 struct request *req)
471{ 477{
472 struct dasd_ccw_req *cqr; 478 struct dasd_ccw_req *cqr;
473 struct dasd_diag_req *dreq; 479 struct dasd_diag_req *dreq;
@@ -486,17 +492,17 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
486 rw_cmd = MDSK_WRITE_REQ; 492 rw_cmd = MDSK_WRITE_REQ;
487 else 493 else
488 return ERR_PTR(-EINVAL); 494 return ERR_PTR(-EINVAL);
489 blksize = device->bp_block; 495 blksize = block->bp_block;
490 /* Calculate record id of first and last block. */ 496 /* Calculate record id of first and last block. */
491 first_rec = req->sector >> device->s2b_shift; 497 first_rec = req->sector >> block->s2b_shift;
492 last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift; 498 last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
493 /* Check struct bio and count the number of blocks for the request. */ 499 /* Check struct bio and count the number of blocks for the request. */
494 count = 0; 500 count = 0;
495 rq_for_each_segment(bv, req, iter) { 501 rq_for_each_segment(bv, req, iter) {
496 if (bv->bv_len & (blksize - 1)) 502 if (bv->bv_len & (blksize - 1))
497 /* Fba can only do full blocks. */ 503 /* Fba can only do full blocks. */
498 return ERR_PTR(-EINVAL); 504 return ERR_PTR(-EINVAL);
499 count += bv->bv_len >> (device->s2b_shift + 9); 505 count += bv->bv_len >> (block->s2b_shift + 9);
500 } 506 }
501 /* Paranoia. */ 507 /* Paranoia. */
502 if (count != last_rec - first_rec + 1) 508 if (count != last_rec - first_rec + 1)
@@ -505,7 +511,7 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
505 datasize = sizeof(struct dasd_diag_req) + 511 datasize = sizeof(struct dasd_diag_req) +
506 count*sizeof(struct dasd_diag_bio); 512 count*sizeof(struct dasd_diag_bio);
507 cqr = dasd_smalloc_request(dasd_diag_discipline.name, 0, 513 cqr = dasd_smalloc_request(dasd_diag_discipline.name, 0,
508 datasize, device); 514 datasize, memdev);
509 if (IS_ERR(cqr)) 515 if (IS_ERR(cqr))
510 return cqr; 516 return cqr;
511 517
@@ -529,7 +535,9 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
529 cqr->buildclk = get_clock(); 535 cqr->buildclk = get_clock();
530 if (req->cmd_flags & REQ_FAILFAST) 536 if (req->cmd_flags & REQ_FAILFAST)
531 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 537 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
532 cqr->device = device; 538 cqr->startdev = memdev;
539 cqr->memdev = memdev;
540 cqr->block = block;
533 cqr->expires = DIAG_TIMEOUT; 541 cqr->expires = DIAG_TIMEOUT;
534 cqr->status = DASD_CQR_FILLED; 542 cqr->status = DASD_CQR_FILLED;
535 return cqr; 543 return cqr;
@@ -543,10 +551,15 @@ dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req)
543 int status; 551 int status;
544 552
545 status = cqr->status == DASD_CQR_DONE; 553 status = cqr->status == DASD_CQR_DONE;
546 dasd_sfree_request(cqr, cqr->device); 554 dasd_sfree_request(cqr, cqr->memdev);
547 return status; 555 return status;
548} 556}
549 557
558static void dasd_diag_handle_terminated_request(struct dasd_ccw_req *cqr)
559{
560 cqr->status = DASD_CQR_FILLED;
561};
562
550/* Fill in IOCTL data for device. */ 563/* Fill in IOCTL data for device. */
551static int 564static int
552dasd_diag_fill_info(struct dasd_device * device, 565dasd_diag_fill_info(struct dasd_device * device,
@@ -583,7 +596,7 @@ static struct dasd_discipline dasd_diag_discipline = {
583 .fill_geometry = dasd_diag_fill_geometry, 596 .fill_geometry = dasd_diag_fill_geometry,
584 .start_IO = dasd_start_diag, 597 .start_IO = dasd_start_diag,
585 .term_IO = dasd_diag_term_IO, 598 .term_IO = dasd_diag_term_IO,
586 .examine_error = dasd_diag_examine_error, 599 .handle_terminated_request = dasd_diag_handle_terminated_request,
587 .erp_action = dasd_diag_erp_action, 600 .erp_action = dasd_diag_erp_action,
588 .erp_postaction = dasd_diag_erp_postaction, 601 .erp_postaction = dasd_diag_erp_postaction,
589 .build_cp = dasd_diag_build_cp, 602 .build_cp = dasd_diag_build_cp,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 44adf8496bda..61f16937c1e0 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -52,16 +52,6 @@ MODULE_LICENSE("GPL");
52 52
53static struct dasd_discipline dasd_eckd_discipline; 53static struct dasd_discipline dasd_eckd_discipline;
54 54
55struct dasd_eckd_private {
56 struct dasd_eckd_characteristics rdc_data;
57 struct dasd_eckd_confdata conf_data;
58 struct dasd_eckd_path path_data;
59 struct eckd_count count_area[5];
60 int init_cqr_status;
61 int uses_cdl;
62 struct attrib_data_t attrib; /* e.g. cache operations */
63};
64
65/* The ccw bus type uses this table to find devices that it sends to 55/* The ccw bus type uses this table to find devices that it sends to
66 * dasd_eckd_probe */ 56 * dasd_eckd_probe */
67static struct ccw_device_id dasd_eckd_ids[] = { 57static struct ccw_device_id dasd_eckd_ids[] = {
@@ -188,7 +178,7 @@ check_XRC (struct ccw1 *de_ccw,
188 if (rc == -ENOSYS || rc == -EACCES) 178 if (rc == -ENOSYS || rc == -EACCES)
189 rc = 0; 179 rc = 0;
190 180
191 de_ccw->count = sizeof (struct DE_eckd_data); 181 de_ccw->count = sizeof(struct DE_eckd_data);
192 de_ccw->flags |= CCW_FLAG_SLI; 182 de_ccw->flags |= CCW_FLAG_SLI;
193 return rc; 183 return rc;
194} 184}
@@ -208,7 +198,7 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
208 ccw->count = 16; 198 ccw->count = 16;
209 ccw->cda = (__u32) __pa(data); 199 ccw->cda = (__u32) __pa(data);
210 200
211 memset(data, 0, sizeof (struct DE_eckd_data)); 201 memset(data, 0, sizeof(struct DE_eckd_data));
212 switch (cmd) { 202 switch (cmd) {
213 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 203 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
214 case DASD_ECKD_CCW_READ_RECORD_ZERO: 204 case DASD_ECKD_CCW_READ_RECORD_ZERO:
@@ -280,6 +270,132 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
280 return rc; 270 return rc;
281} 271}
282 272
273static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
274 struct dasd_device *device)
275{
276 struct dasd_eckd_private *private;
277 int rc;
278
279 private = (struct dasd_eckd_private *) device->private;
280 if (!private->rdc_data.facilities.XRC_supported)
281 return 0;
282
283 /* switch on System Time Stamp - needed for XRC Support */
284 pfxdata->define_extend.ga_extended |= 0x08; /* 'Time Stamp Valid' */
285 pfxdata->define_extend.ga_extended |= 0x02; /* 'Extended Parameter' */
286 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */
287
288 rc = get_sync_clock(&pfxdata->define_extend.ep_sys_time);
289 /* Ignore return code if sync clock is switched off. */
290 if (rc == -ENOSYS || rc == -EACCES)
291 rc = 0;
292 return rc;
293}
294
295static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, int trk,
296 int totrk, int cmd, struct dasd_device *basedev,
297 struct dasd_device *startdev)
298{
299 struct dasd_eckd_private *basepriv, *startpriv;
300 struct DE_eckd_data *data;
301 struct ch_t geo, beg, end;
302 int rc = 0;
303
304 basepriv = (struct dasd_eckd_private *) basedev->private;
305 startpriv = (struct dasd_eckd_private *) startdev->private;
306 data = &pfxdata->define_extend;
307
308 ccw->cmd_code = DASD_ECKD_CCW_PFX;
309 ccw->flags = 0;
310 ccw->count = sizeof(*pfxdata);
311 ccw->cda = (__u32) __pa(pfxdata);
312
313 memset(pfxdata, 0, sizeof(*pfxdata));
314 /* prefix data */
315 pfxdata->format = 0;
316 pfxdata->base_address = basepriv->conf_data.ned1.unit_addr;
317 pfxdata->base_lss = basepriv->conf_data.ned1.ID;
318 pfxdata->validity.define_extend = 1;
319
320 /* private uid is kept up to date, conf_data may be outdated */
321 if (startpriv->uid.type != UA_BASE_DEVICE) {
322 pfxdata->validity.verify_base = 1;
323 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
324 pfxdata->validity.hyper_pav = 1;
325 }
326
327 /* define extend data (mostly)*/
328 switch (cmd) {
329 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
330 case DASD_ECKD_CCW_READ_RECORD_ZERO:
331 case DASD_ECKD_CCW_READ:
332 case DASD_ECKD_CCW_READ_MT:
333 case DASD_ECKD_CCW_READ_CKD:
334 case DASD_ECKD_CCW_READ_CKD_MT:
335 case DASD_ECKD_CCW_READ_KD:
336 case DASD_ECKD_CCW_READ_KD_MT:
337 case DASD_ECKD_CCW_READ_COUNT:
338 data->mask.perm = 0x1;
339 data->attributes.operation = basepriv->attrib.operation;
340 break;
341 case DASD_ECKD_CCW_WRITE:
342 case DASD_ECKD_CCW_WRITE_MT:
343 case DASD_ECKD_CCW_WRITE_KD:
344 case DASD_ECKD_CCW_WRITE_KD_MT:
345 data->mask.perm = 0x02;
346 data->attributes.operation = basepriv->attrib.operation;
347 rc = check_XRC_on_prefix(pfxdata, basedev);
348 break;
349 case DASD_ECKD_CCW_WRITE_CKD:
350 case DASD_ECKD_CCW_WRITE_CKD_MT:
351 data->attributes.operation = DASD_BYPASS_CACHE;
352 rc = check_XRC_on_prefix(pfxdata, basedev);
353 break;
354 case DASD_ECKD_CCW_ERASE:
355 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
356 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
357 data->mask.perm = 0x3;
358 data->mask.auth = 0x1;
359 data->attributes.operation = DASD_BYPASS_CACHE;
360 rc = check_XRC_on_prefix(pfxdata, basedev);
361 break;
362 default:
363 DEV_MESSAGE(KERN_ERR, basedev, "unknown opcode 0x%x", cmd);
364 break;
365 }
366
367 data->attributes.mode = 0x3; /* ECKD */
368
369 if ((basepriv->rdc_data.cu_type == 0x2105 ||
370 basepriv->rdc_data.cu_type == 0x2107 ||
371 basepriv->rdc_data.cu_type == 0x1750)
372 && !(basepriv->uses_cdl && trk < 2))
373 data->ga_extended |= 0x40; /* Regular Data Format Mode */
374
375 geo.cyl = basepriv->rdc_data.no_cyl;
376 geo.head = basepriv->rdc_data.trk_per_cyl;
377 beg.cyl = trk / geo.head;
378 beg.head = trk % geo.head;
379 end.cyl = totrk / geo.head;
380 end.head = totrk % geo.head;
381
382 /* check for sequential prestage - enhance cylinder range */
383 if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
384 data->attributes.operation == DASD_SEQ_ACCESS) {
385
386 if (end.cyl + basepriv->attrib.nr_cyl < geo.cyl)
387 end.cyl += basepriv->attrib.nr_cyl;
388 else
389 end.cyl = (geo.cyl - 1);
390 }
391
392 data->beg_ext.cyl = beg.cyl;
393 data->beg_ext.head = beg.head;
394 data->end_ext.cyl = end.cyl;
395 data->end_ext.head = end.head;
396 return rc;
397}
398
283static void 399static void
284locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk, 400locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk,
285 int rec_on_trk, int no_rec, int cmd, 401 int rec_on_trk, int no_rec, int cmd,
@@ -300,7 +416,7 @@ locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk,
300 ccw->count = 16; 416 ccw->count = 16;
301 ccw->cda = (__u32) __pa(data); 417 ccw->cda = (__u32) __pa(data);
302 418
303 memset(data, 0, sizeof (struct LO_eckd_data)); 419 memset(data, 0, sizeof(struct LO_eckd_data));
304 sector = 0; 420 sector = 0;
305 if (rec_on_trk) { 421 if (rec_on_trk) {
306 switch (private->rdc_data.dev_type) { 422 switch (private->rdc_data.dev_type) {
@@ -441,12 +557,15 @@ dasd_eckd_generate_uid(struct dasd_device *device, struct dasd_uid *uid)
441 sizeof(uid->serial) - 1); 557 sizeof(uid->serial) - 1);
442 EBCASC(uid->serial, sizeof(uid->serial) - 1); 558 EBCASC(uid->serial, sizeof(uid->serial) - 1);
443 uid->ssid = confdata->neq.subsystemID; 559 uid->ssid = confdata->neq.subsystemID;
444 if (confdata->ned2.sneq.flags == 0x40) { 560 uid->real_unit_addr = confdata->ned1.unit_addr;
445 uid->alias = 1; 561 if (confdata->ned2.sneq.flags == 0x40 &&
446 uid->unit_addr = confdata->ned2.sneq.base_unit_addr; 562 confdata->ned2.sneq.format == 0x0001) {
447 } else 563 uid->type = confdata->ned2.sneq.sua_flags;
448 uid->unit_addr = confdata->ned1.unit_addr; 564 if (uid->type == UA_BASE_PAV_ALIAS)
449 565 uid->base_unit_addr = confdata->ned2.sneq.base_unit_addr;
566 } else {
567 uid->type = UA_BASE_DEVICE;
568 }
450 return 0; 569 return 0;
451} 570}
452 571
@@ -470,7 +589,9 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
470 ccw->cda = (__u32)(addr_t)rcd_buffer; 589 ccw->cda = (__u32)(addr_t)rcd_buffer;
471 ccw->count = ciw->count; 590 ccw->count = ciw->count;
472 591
473 cqr->device = device; 592 cqr->startdev = device;
593 cqr->memdev = device;
594 cqr->block = NULL;
474 cqr->expires = 10*HZ; 595 cqr->expires = 10*HZ;
475 cqr->lpm = lpm; 596 cqr->lpm = lpm;
476 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 597 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -511,7 +632,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
511 /* 632 /*
512 * on success we update the user input parms 633 * on success we update the user input parms
513 */ 634 */
514 dasd_sfree_request(cqr, cqr->device); 635 dasd_sfree_request(cqr, cqr->memdev);
515 if (ret) 636 if (ret)
516 goto out_error; 637 goto out_error;
517 638
@@ -557,19 +678,19 @@ dasd_eckd_read_conf(struct dasd_device *device)
557 "data retrieved"); 678 "data retrieved");
558 continue; /* no error */ 679 continue; /* no error */
559 } 680 }
560 if (conf_len != sizeof (struct dasd_eckd_confdata)) { 681 if (conf_len != sizeof(struct dasd_eckd_confdata)) {
561 MESSAGE(KERN_WARNING, 682 MESSAGE(KERN_WARNING,
562 "sizes of configuration data mismatch" 683 "sizes of configuration data mismatch"
563 "%d (read) vs %ld (expected)", 684 "%d (read) vs %ld (expected)",
564 conf_len, 685 conf_len,
565 sizeof (struct dasd_eckd_confdata)); 686 sizeof(struct dasd_eckd_confdata));
566 kfree(conf_data); 687 kfree(conf_data);
567 continue; /* no error */ 688 continue; /* no error */
568 } 689 }
569 /* save first valid configuration data */ 690 /* save first valid configuration data */
570 if (!conf_data_saved){ 691 if (!conf_data_saved){
571 memcpy(&private->conf_data, conf_data, 692 memcpy(&private->conf_data, conf_data,
572 sizeof (struct dasd_eckd_confdata)); 693 sizeof(struct dasd_eckd_confdata));
573 conf_data_saved++; 694 conf_data_saved++;
574 } 695 }
575 switch (((char *)conf_data)[242] & 0x07){ 696 switch (((char *)conf_data)[242] & 0x07){
@@ -586,39 +707,104 @@ dasd_eckd_read_conf(struct dasd_device *device)
586 return 0; 707 return 0;
587} 708}
588 709
710static int dasd_eckd_read_features(struct dasd_device *device)
711{
712 struct dasd_psf_prssd_data *prssdp;
713 struct dasd_rssd_features *features;
714 struct dasd_ccw_req *cqr;
715 struct ccw1 *ccw;
716 int rc;
717 struct dasd_eckd_private *private;
718
719 private = (struct dasd_eckd_private *) device->private;
720 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
721 1 /* PSF */ + 1 /* RSSD */ ,
722 (sizeof(struct dasd_psf_prssd_data) +
723 sizeof(struct dasd_rssd_features)),
724 device);
725 if (IS_ERR(cqr)) {
726 DEV_MESSAGE(KERN_WARNING, device, "%s",
727 "Could not allocate initialization request");
728 return PTR_ERR(cqr);
729 }
730 cqr->startdev = device;
731 cqr->memdev = device;
732 cqr->block = NULL;
733 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
734 cqr->retries = 5;
735 cqr->expires = 10 * HZ;
736
737 /* Prepare for Read Subsystem Data */
738 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
739 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
740 prssdp->order = PSF_ORDER_PRSSD;
741 prssdp->suborder = 0x41; /* Read Feature Codes */
742 /* all other bytes of prssdp must be zero */
743
744 ccw = cqr->cpaddr;
745 ccw->cmd_code = DASD_ECKD_CCW_PSF;
746 ccw->count = sizeof(struct dasd_psf_prssd_data);
747 ccw->flags |= CCW_FLAG_CC;
748 ccw->cda = (__u32)(addr_t) prssdp;
749
750 /* Read Subsystem Data - feature codes */
751 features = (struct dasd_rssd_features *) (prssdp + 1);
752 memset(features, 0, sizeof(struct dasd_rssd_features));
753
754 ccw++;
755 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
756 ccw->count = sizeof(struct dasd_rssd_features);
757 ccw->cda = (__u32)(addr_t) features;
758
759 cqr->buildclk = get_clock();
760 cqr->status = DASD_CQR_FILLED;
761 rc = dasd_sleep_on(cqr);
762 if (rc == 0) {
763 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
764 features = (struct dasd_rssd_features *) (prssdp + 1);
765 memcpy(&private->features, features,
766 sizeof(struct dasd_rssd_features));
767 }
768 dasd_sfree_request(cqr, cqr->memdev);
769 return rc;
770}
771
772
589/* 773/*
590 * Build CP for Perform Subsystem Function - SSC. 774 * Build CP for Perform Subsystem Function - SSC.
591 */ 775 */
592static struct dasd_ccw_req * 776static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device)
593dasd_eckd_build_psf_ssc(struct dasd_device *device)
594{ 777{
595 struct dasd_ccw_req *cqr; 778 struct dasd_ccw_req *cqr;
596 struct dasd_psf_ssc_data *psf_ssc_data; 779 struct dasd_psf_ssc_data *psf_ssc_data;
597 struct ccw1 *ccw; 780 struct ccw1 *ccw;
598 781
599 cqr = dasd_smalloc_request("ECKD", 1 /* PSF */ , 782 cqr = dasd_smalloc_request("ECKD", 1 /* PSF */ ,
600 sizeof(struct dasd_psf_ssc_data), 783 sizeof(struct dasd_psf_ssc_data),
601 device); 784 device);
602 785
603 if (IS_ERR(cqr)) { 786 if (IS_ERR(cqr)) {
604 DEV_MESSAGE(KERN_WARNING, device, "%s", 787 DEV_MESSAGE(KERN_WARNING, device, "%s",
605 "Could not allocate PSF-SSC request"); 788 "Could not allocate PSF-SSC request");
606 return cqr; 789 return cqr;
607 } 790 }
608 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data; 791 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
609 psf_ssc_data->order = PSF_ORDER_SSC; 792 psf_ssc_data->order = PSF_ORDER_SSC;
610 psf_ssc_data->suborder = 0x08; 793 psf_ssc_data->suborder = 0x88;
611 794 psf_ssc_data->reserved[0] = 0x88;
612 ccw = cqr->cpaddr; 795
613 ccw->cmd_code = DASD_ECKD_CCW_PSF; 796 ccw = cqr->cpaddr;
614 ccw->cda = (__u32)(addr_t)psf_ssc_data; 797 ccw->cmd_code = DASD_ECKD_CCW_PSF;
615 ccw->count = 66; 798 ccw->cda = (__u32)(addr_t)psf_ssc_data;
616 799 ccw->count = 66;
617 cqr->device = device; 800
618 cqr->expires = 10*HZ; 801 cqr->startdev = device;
619 cqr->buildclk = get_clock(); 802 cqr->memdev = device;
620 cqr->status = DASD_CQR_FILLED; 803 cqr->block = NULL;
621 return cqr; 804 cqr->expires = 10*HZ;
805 cqr->buildclk = get_clock();
806 cqr->status = DASD_CQR_FILLED;
807 return cqr;
622} 808}
623 809
624/* 810/*
@@ -629,28 +815,28 @@ dasd_eckd_build_psf_ssc(struct dasd_device *device)
629static int 815static int
630dasd_eckd_psf_ssc(struct dasd_device *device) 816dasd_eckd_psf_ssc(struct dasd_device *device)
631{ 817{
632 struct dasd_ccw_req *cqr; 818 struct dasd_ccw_req *cqr;
633 int rc; 819 int rc;
634 820
635 cqr = dasd_eckd_build_psf_ssc(device); 821 cqr = dasd_eckd_build_psf_ssc(device);
636 if (IS_ERR(cqr)) 822 if (IS_ERR(cqr))
637 return PTR_ERR(cqr); 823 return PTR_ERR(cqr);
638 824
639 rc = dasd_sleep_on(cqr); 825 rc = dasd_sleep_on(cqr);
640 if (!rc) 826 if (!rc)
641 /* trigger CIO to reprobe devices */ 827 /* trigger CIO to reprobe devices */
642 css_schedule_reprobe(); 828 css_schedule_reprobe();
643 dasd_sfree_request(cqr, cqr->device); 829 dasd_sfree_request(cqr, cqr->memdev);
644 return rc; 830 return rc;
645} 831}
646 832
647/* 833/*
648 * Valide storage server of current device. 834 * Valide storage server of current device.
649 */ 835 */
650static int 836static int dasd_eckd_validate_server(struct dasd_device *device)
651dasd_eckd_validate_server(struct dasd_device *device, struct dasd_uid *uid)
652{ 837{
653 int rc; 838 int rc;
839 struct dasd_eckd_private *private;
654 840
655 /* Currently PAV is the only reason to 'validate' server on LPAR */ 841 /* Currently PAV is the only reason to 'validate' server on LPAR */
656 if (dasd_nopav || MACHINE_IS_VM) 842 if (dasd_nopav || MACHINE_IS_VM)
@@ -659,9 +845,11 @@ dasd_eckd_validate_server(struct dasd_device *device, struct dasd_uid *uid)
659 rc = dasd_eckd_psf_ssc(device); 845 rc = dasd_eckd_psf_ssc(device);
660 /* may be requested feature is not available on server, 846 /* may be requested feature is not available on server,
661 * therefore just report error and go ahead */ 847 * therefore just report error and go ahead */
848 private = (struct dasd_eckd_private *) device->private;
662 DEV_MESSAGE(KERN_INFO, device, 849 DEV_MESSAGE(KERN_INFO, device,
663 "PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d", 850 "PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d",
664 uid->vendor, uid->serial, uid->ssid, rc); 851 private->uid.vendor, private->uid.serial,
852 private->uid.ssid, rc);
665 /* RE-Read Configuration Data */ 853 /* RE-Read Configuration Data */
666 return dasd_eckd_read_conf(device); 854 return dasd_eckd_read_conf(device);
667} 855}
@@ -674,9 +862,9 @@ static int
674dasd_eckd_check_characteristics(struct dasd_device *device) 862dasd_eckd_check_characteristics(struct dasd_device *device)
675{ 863{
676 struct dasd_eckd_private *private; 864 struct dasd_eckd_private *private;
677 struct dasd_uid uid; 865 struct dasd_block *block;
678 void *rdc_data; 866 void *rdc_data;
679 int rc; 867 int is_known, rc;
680 868
681 private = (struct dasd_eckd_private *) device->private; 869 private = (struct dasd_eckd_private *) device->private;
682 if (private == NULL) { 870 if (private == NULL) {
@@ -699,27 +887,54 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
699 /* Read Configuration Data */ 887 /* Read Configuration Data */
700 rc = dasd_eckd_read_conf(device); 888 rc = dasd_eckd_read_conf(device);
701 if (rc) 889 if (rc)
702 return rc; 890 goto out_err1;
703 891
704 /* Generate device unique id and register in devmap */ 892 /* Generate device unique id and register in devmap */
705 rc = dasd_eckd_generate_uid(device, &uid); 893 rc = dasd_eckd_generate_uid(device, &private->uid);
706 if (rc) 894 if (rc)
707 return rc; 895 goto out_err1;
708 rc = dasd_set_uid(device->cdev, &uid); 896 dasd_set_uid(device->cdev, &private->uid);
709 if (rc == 1) /* new server found */ 897
710 rc = dasd_eckd_validate_server(device, &uid); 898 if (private->uid.type == UA_BASE_DEVICE) {
899 block = dasd_alloc_block();
900 if (IS_ERR(block)) {
901 DEV_MESSAGE(KERN_WARNING, device, "%s",
902 "could not allocate dasd block structure");
903 rc = PTR_ERR(block);
904 goto out_err1;
905 }
906 device->block = block;
907 block->base = device;
908 }
909
910 /* register lcu with alias handling, enable PAV if this is a new lcu */
911 is_known = dasd_alias_make_device_known_to_lcu(device);
912 if (is_known < 0) {
913 rc = is_known;
914 goto out_err2;
915 }
916 if (!is_known) {
917 /* new lcu found */
918 rc = dasd_eckd_validate_server(device); /* will switch pav on */
919 if (rc)
920 goto out_err3;
921 }
922
923 /* Read Feature Codes */
924 rc = dasd_eckd_read_features(device);
711 if (rc) 925 if (rc)
712 return rc; 926 goto out_err3;
713 927
714 /* Read Device Characteristics */ 928 /* Read Device Characteristics */
715 rdc_data = (void *) &(private->rdc_data); 929 rdc_data = (void *) &(private->rdc_data);
716 memset(rdc_data, 0, sizeof(rdc_data)); 930 memset(rdc_data, 0, sizeof(rdc_data));
717 rc = dasd_generic_read_dev_chars(device, "ECKD", &rdc_data, 64); 931 rc = dasd_generic_read_dev_chars(device, "ECKD", &rdc_data, 64);
718 if (rc) 932 if (rc) {
719 DEV_MESSAGE(KERN_WARNING, device, 933 DEV_MESSAGE(KERN_WARNING, device,
720 "Read device characteristics returned " 934 "Read device characteristics returned "
721 "rc=%d", rc); 935 "rc=%d", rc);
722 936 goto out_err3;
937 }
723 DEV_MESSAGE(KERN_INFO, device, 938 DEV_MESSAGE(KERN_INFO, device,
724 "%04X/%02X(CU:%04X/%02X) Cyl:%d Head:%d Sec:%d", 939 "%04X/%02X(CU:%04X/%02X) Cyl:%d Head:%d Sec:%d",
725 private->rdc_data.dev_type, 940 private->rdc_data.dev_type,
@@ -729,9 +944,24 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
729 private->rdc_data.no_cyl, 944 private->rdc_data.no_cyl,
730 private->rdc_data.trk_per_cyl, 945 private->rdc_data.trk_per_cyl,
731 private->rdc_data.sec_per_trk); 946 private->rdc_data.sec_per_trk);
947 return 0;
948
949out_err3:
950 dasd_alias_disconnect_device_from_lcu(device);
951out_err2:
952 dasd_free_block(device->block);
953 device->block = NULL;
954out_err1:
955 kfree(device->private);
956 device->private = NULL;
732 return rc; 957 return rc;
733} 958}
734 959
960static void dasd_eckd_uncheck_device(struct dasd_device *device)
961{
962 dasd_alias_disconnect_device_from_lcu(device);
963}
964
735static struct dasd_ccw_req * 965static struct dasd_ccw_req *
736dasd_eckd_analysis_ccw(struct dasd_device *device) 966dasd_eckd_analysis_ccw(struct dasd_device *device)
737{ 967{
@@ -755,7 +985,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
755 /* Define extent for the first 3 tracks. */ 985 /* Define extent for the first 3 tracks. */
756 define_extent(ccw++, cqr->data, 0, 2, 986 define_extent(ccw++, cqr->data, 0, 2,
757 DASD_ECKD_CCW_READ_COUNT, device); 987 DASD_ECKD_CCW_READ_COUNT, device);
758 LO_data = cqr->data + sizeof (struct DE_eckd_data); 988 LO_data = cqr->data + sizeof(struct DE_eckd_data);
759 /* Locate record for the first 4 records on track 0. */ 989 /* Locate record for the first 4 records on track 0. */
760 ccw[-1].flags |= CCW_FLAG_CC; 990 ccw[-1].flags |= CCW_FLAG_CC;
761 locate_record(ccw++, LO_data++, 0, 0, 4, 991 locate_record(ccw++, LO_data++, 0, 0, 4,
@@ -783,7 +1013,9 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
783 ccw->count = 8; 1013 ccw->count = 8;
784 ccw->cda = (__u32)(addr_t) count_data; 1014 ccw->cda = (__u32)(addr_t) count_data;
785 1015
786 cqr->device = device; 1016 cqr->block = NULL;
1017 cqr->startdev = device;
1018 cqr->memdev = device;
787 cqr->retries = 0; 1019 cqr->retries = 0;
788 cqr->buildclk = get_clock(); 1020 cqr->buildclk = get_clock();
789 cqr->status = DASD_CQR_FILLED; 1021 cqr->status = DASD_CQR_FILLED;
@@ -803,7 +1035,7 @@ dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data)
803 struct dasd_eckd_private *private; 1035 struct dasd_eckd_private *private;
804 struct dasd_device *device; 1036 struct dasd_device *device;
805 1037
806 device = init_cqr->device; 1038 device = init_cqr->startdev;
807 private = (struct dasd_eckd_private *) device->private; 1039 private = (struct dasd_eckd_private *) device->private;
808 private->init_cqr_status = init_cqr->status; 1040 private->init_cqr_status = init_cqr->status;
809 dasd_sfree_request(init_cqr, device); 1041 dasd_sfree_request(init_cqr, device);
@@ -811,13 +1043,13 @@ dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data)
811} 1043}
812 1044
813static int 1045static int
814dasd_eckd_start_analysis(struct dasd_device *device) 1046dasd_eckd_start_analysis(struct dasd_block *block)
815{ 1047{
816 struct dasd_eckd_private *private; 1048 struct dasd_eckd_private *private;
817 struct dasd_ccw_req *init_cqr; 1049 struct dasd_ccw_req *init_cqr;
818 1050
819 private = (struct dasd_eckd_private *) device->private; 1051 private = (struct dasd_eckd_private *) block->base->private;
820 init_cqr = dasd_eckd_analysis_ccw(device); 1052 init_cqr = dasd_eckd_analysis_ccw(block->base);
821 if (IS_ERR(init_cqr)) 1053 if (IS_ERR(init_cqr))
822 return PTR_ERR(init_cqr); 1054 return PTR_ERR(init_cqr);
823 init_cqr->callback = dasd_eckd_analysis_callback; 1055 init_cqr->callback = dasd_eckd_analysis_callback;
@@ -828,13 +1060,15 @@ dasd_eckd_start_analysis(struct dasd_device *device)
828} 1060}
829 1061
830static int 1062static int
831dasd_eckd_end_analysis(struct dasd_device *device) 1063dasd_eckd_end_analysis(struct dasd_block *block)
832{ 1064{
1065 struct dasd_device *device;
833 struct dasd_eckd_private *private; 1066 struct dasd_eckd_private *private;
834 struct eckd_count *count_area; 1067 struct eckd_count *count_area;
835 unsigned int sb, blk_per_trk; 1068 unsigned int sb, blk_per_trk;
836 int status, i; 1069 int status, i;
837 1070
1071 device = block->base;
838 private = (struct dasd_eckd_private *) device->private; 1072 private = (struct dasd_eckd_private *) device->private;
839 status = private->init_cqr_status; 1073 status = private->init_cqr_status;
840 private->init_cqr_status = -1; 1074 private->init_cqr_status = -1;
@@ -846,7 +1080,7 @@ dasd_eckd_end_analysis(struct dasd_device *device)
846 1080
847 private->uses_cdl = 1; 1081 private->uses_cdl = 1;
848 /* Calculate number of blocks/records per track. */ 1082 /* Calculate number of blocks/records per track. */
849 blk_per_trk = recs_per_track(&private->rdc_data, 0, device->bp_block); 1083 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
850 /* Check Track 0 for Compatible Disk Layout */ 1084 /* Check Track 0 for Compatible Disk Layout */
851 count_area = NULL; 1085 count_area = NULL;
852 for (i = 0; i < 3; i++) { 1086 for (i = 0; i < 3; i++) {
@@ -876,56 +1110,65 @@ dasd_eckd_end_analysis(struct dasd_device *device)
876 if (count_area != NULL && count_area->kl == 0) { 1110 if (count_area != NULL && count_area->kl == 0) {
877 /* we found notthing violating our disk layout */ 1111 /* we found notthing violating our disk layout */
878 if (dasd_check_blocksize(count_area->dl) == 0) 1112 if (dasd_check_blocksize(count_area->dl) == 0)
879 device->bp_block = count_area->dl; 1113 block->bp_block = count_area->dl;
880 } 1114 }
881 if (device->bp_block == 0) { 1115 if (block->bp_block == 0) {
882 DEV_MESSAGE(KERN_WARNING, device, "%s", 1116 DEV_MESSAGE(KERN_WARNING, device, "%s",
883 "Volume has incompatible disk layout"); 1117 "Volume has incompatible disk layout");
884 return -EMEDIUMTYPE; 1118 return -EMEDIUMTYPE;
885 } 1119 }
886 device->s2b_shift = 0; /* bits to shift 512 to get a block */ 1120 block->s2b_shift = 0; /* bits to shift 512 to get a block */
887 for (sb = 512; sb < device->bp_block; sb = sb << 1) 1121 for (sb = 512; sb < block->bp_block; sb = sb << 1)
888 device->s2b_shift++; 1122 block->s2b_shift++;
889 1123
890 blk_per_trk = recs_per_track(&private->rdc_data, 0, device->bp_block); 1124 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
891 device->blocks = (private->rdc_data.no_cyl * 1125 block->blocks = (private->rdc_data.no_cyl *
892 private->rdc_data.trk_per_cyl * 1126 private->rdc_data.trk_per_cyl *
893 blk_per_trk); 1127 blk_per_trk);
894 1128
895 DEV_MESSAGE(KERN_INFO, device, 1129 DEV_MESSAGE(KERN_INFO, device,
896 "(%dkB blks): %dkB at %dkB/trk %s", 1130 "(%dkB blks): %dkB at %dkB/trk %s",
897 (device->bp_block >> 10), 1131 (block->bp_block >> 10),
898 ((private->rdc_data.no_cyl * 1132 ((private->rdc_data.no_cyl *
899 private->rdc_data.trk_per_cyl * 1133 private->rdc_data.trk_per_cyl *
900 blk_per_trk * (device->bp_block >> 9)) >> 1), 1134 blk_per_trk * (block->bp_block >> 9)) >> 1),
901 ((blk_per_trk * device->bp_block) >> 10), 1135 ((blk_per_trk * block->bp_block) >> 10),
902 private->uses_cdl ? 1136 private->uses_cdl ?
903 "compatible disk layout" : "linux disk layout"); 1137 "compatible disk layout" : "linux disk layout");
904 1138
905 return 0; 1139 return 0;
906} 1140}
907 1141
908static int 1142static int dasd_eckd_do_analysis(struct dasd_block *block)
909dasd_eckd_do_analysis(struct dasd_device *device)
910{ 1143{
911 struct dasd_eckd_private *private; 1144 struct dasd_eckd_private *private;
912 1145
913 private = (struct dasd_eckd_private *) device->private; 1146 private = (struct dasd_eckd_private *) block->base->private;
914 if (private->init_cqr_status < 0) 1147 if (private->init_cqr_status < 0)
915 return dasd_eckd_start_analysis(device); 1148 return dasd_eckd_start_analysis(block);
916 else 1149 else
917 return dasd_eckd_end_analysis(device); 1150 return dasd_eckd_end_analysis(block);
918} 1151}
919 1152
1153static int dasd_eckd_ready_to_online(struct dasd_device *device)
1154{
1155 return dasd_alias_add_device(device);
1156};
1157
1158static int dasd_eckd_online_to_ready(struct dasd_device *device)
1159{
1160 return dasd_alias_remove_device(device);
1161};
1162
920static int 1163static int
921dasd_eckd_fill_geometry(struct dasd_device *device, struct hd_geometry *geo) 1164dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
922{ 1165{
923 struct dasd_eckd_private *private; 1166 struct dasd_eckd_private *private;
924 1167
925 private = (struct dasd_eckd_private *) device->private; 1168 private = (struct dasd_eckd_private *) block->base->private;
926 if (dasd_check_blocksize(device->bp_block) == 0) { 1169 if (dasd_check_blocksize(block->bp_block) == 0) {
927 geo->sectors = recs_per_track(&private->rdc_data, 1170 geo->sectors = recs_per_track(&private->rdc_data,
928 0, device->bp_block); 1171 0, block->bp_block);
929 } 1172 }
930 geo->cylinders = private->rdc_data.no_cyl; 1173 geo->cylinders = private->rdc_data.no_cyl;
931 geo->heads = private->rdc_data.trk_per_cyl; 1174 geo->heads = private->rdc_data.trk_per_cyl;
@@ -1037,7 +1280,7 @@ dasd_eckd_format_device(struct dasd_device * device,
1037 locate_record(ccw++, (struct LO_eckd_data *) data, 1280 locate_record(ccw++, (struct LO_eckd_data *) data,
1038 fdata->start_unit, 0, rpt + 1, 1281 fdata->start_unit, 0, rpt + 1,
1039 DASD_ECKD_CCW_WRITE_RECORD_ZERO, device, 1282 DASD_ECKD_CCW_WRITE_RECORD_ZERO, device,
1040 device->bp_block); 1283 device->block->bp_block);
1041 data += sizeof(struct LO_eckd_data); 1284 data += sizeof(struct LO_eckd_data);
1042 break; 1285 break;
1043 case 0x04: /* Invalidate track. */ 1286 case 0x04: /* Invalidate track. */
@@ -1110,43 +1353,28 @@ dasd_eckd_format_device(struct dasd_device * device,
1110 ccw++; 1353 ccw++;
1111 } 1354 }
1112 } 1355 }
1113 fcp->device = device; 1356 fcp->startdev = device;
1114 fcp->retries = 2; /* set retry counter to enable ERP */ 1357 fcp->memdev = device;
1358 clear_bit(DASD_CQR_FLAGS_USE_ERP, &fcp->flags);
1359 fcp->retries = 5; /* set retry counter to enable default ERP */
1115 fcp->buildclk = get_clock(); 1360 fcp->buildclk = get_clock();
1116 fcp->status = DASD_CQR_FILLED; 1361 fcp->status = DASD_CQR_FILLED;
1117 return fcp; 1362 return fcp;
1118} 1363}
1119 1364
1120static dasd_era_t 1365static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
1121dasd_eckd_examine_error(struct dasd_ccw_req * cqr, struct irb * irb)
1122{ 1366{
1123 struct dasd_device *device = (struct dasd_device *) cqr->device; 1367 cqr->status = DASD_CQR_FILLED;
1124 struct ccw_device *cdev = device->cdev; 1368 if (cqr->block && (cqr->startdev != cqr->block->base)) {
1125 1369 dasd_eckd_reset_ccw_to_base_io(cqr);
1126 if (irb->scsw.cstat == 0x00 && 1370 cqr->startdev = cqr->block->base;
1127 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
1128 return dasd_era_none;
1129
1130 switch (cdev->id.cu_type) {
1131 case 0x3990:
1132 case 0x2105:
1133 case 0x2107:
1134 case 0x1750:
1135 return dasd_3990_erp_examine(cqr, irb);
1136 case 0x9343:
1137 return dasd_9343_erp_examine(cqr, irb);
1138 case 0x3880:
1139 default:
1140 DEV_MESSAGE(KERN_WARNING, device, "%s",
1141 "default (unknown CU type) - RECOVERABLE return");
1142 return dasd_era_recover;
1143 } 1371 }
1144} 1372};
1145 1373
1146static dasd_erp_fn_t 1374static dasd_erp_fn_t
1147dasd_eckd_erp_action(struct dasd_ccw_req * cqr) 1375dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
1148{ 1376{
1149 struct dasd_device *device = (struct dasd_device *) cqr->device; 1377 struct dasd_device *device = (struct dasd_device *) cqr->startdev;
1150 struct ccw_device *cdev = device->cdev; 1378 struct ccw_device *cdev = device->cdev;
1151 1379
1152 switch (cdev->id.cu_type) { 1380 switch (cdev->id.cu_type) {
@@ -1168,8 +1396,37 @@ dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
1168 return dasd_default_erp_postaction; 1396 return dasd_default_erp_postaction;
1169} 1397}
1170 1398
1171static struct dasd_ccw_req * 1399
1172dasd_eckd_build_cp(struct dasd_device * device, struct request *req) 1400static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1401 struct irb *irb)
1402{
1403 char mask;
1404
1405 /* first of all check for state change pending interrupt */
1406 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
1407 if ((irb->scsw.dstat & mask) == mask) {
1408 dasd_generic_handle_state_change(device);
1409 return;
1410 }
1411
1412 /* summary unit check */
1413 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && irb->ecw[7] == 0x0D) {
1414 dasd_alias_handle_summary_unit_check(device, irb);
1415 return;
1416 }
1417
1418 /* just report other unsolicited interrupts */
1419 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1420 "unsolicited interrupt received");
1421 device->discipline->dump_sense(device, NULL, irb);
1422 dasd_schedule_device_bh(device);
1423
1424 return;
1425};
1426
1427static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
1428 struct dasd_block *block,
1429 struct request *req)
1173{ 1430{
1174 struct dasd_eckd_private *private; 1431 struct dasd_eckd_private *private;
1175 unsigned long *idaws; 1432 unsigned long *idaws;
@@ -1185,8 +1442,11 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1185 sector_t first_trk, last_trk; 1442 sector_t first_trk, last_trk;
1186 unsigned int first_offs, last_offs; 1443 unsigned int first_offs, last_offs;
1187 unsigned char cmd, rcmd; 1444 unsigned char cmd, rcmd;
1445 int use_prefix;
1446 struct dasd_device *basedev;
1188 1447
1189 private = (struct dasd_eckd_private *) device->private; 1448 basedev = block->base;
1449 private = (struct dasd_eckd_private *) basedev->private;
1190 if (rq_data_dir(req) == READ) 1450 if (rq_data_dir(req) == READ)
1191 cmd = DASD_ECKD_CCW_READ_MT; 1451 cmd = DASD_ECKD_CCW_READ_MT;
1192 else if (rq_data_dir(req) == WRITE) 1452 else if (rq_data_dir(req) == WRITE)
@@ -1194,13 +1454,13 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1194 else 1454 else
1195 return ERR_PTR(-EINVAL); 1455 return ERR_PTR(-EINVAL);
1196 /* Calculate number of blocks/records per track. */ 1456 /* Calculate number of blocks/records per track. */
1197 blksize = device->bp_block; 1457 blksize = block->bp_block;
1198 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 1458 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
1199 /* Calculate record id of first and last block. */ 1459 /* Calculate record id of first and last block. */
1200 first_rec = first_trk = req->sector >> device->s2b_shift; 1460 first_rec = first_trk = req->sector >> block->s2b_shift;
1201 first_offs = sector_div(first_trk, blk_per_trk); 1461 first_offs = sector_div(first_trk, blk_per_trk);
1202 last_rec = last_trk = 1462 last_rec = last_trk =
1203 (req->sector + req->nr_sectors - 1) >> device->s2b_shift; 1463 (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
1204 last_offs = sector_div(last_trk, blk_per_trk); 1464 last_offs = sector_div(last_trk, blk_per_trk);
1205 /* Check struct bio and count the number of blocks for the request. */ 1465 /* Check struct bio and count the number of blocks for the request. */
1206 count = 0; 1466 count = 0;
@@ -1209,20 +1469,33 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1209 if (bv->bv_len & (blksize - 1)) 1469 if (bv->bv_len & (blksize - 1))
1210 /* Eckd can only do full blocks. */ 1470 /* Eckd can only do full blocks. */
1211 return ERR_PTR(-EINVAL); 1471 return ERR_PTR(-EINVAL);
1212 count += bv->bv_len >> (device->s2b_shift + 9); 1472 count += bv->bv_len >> (block->s2b_shift + 9);
1213#if defined(CONFIG_64BIT) 1473#if defined(CONFIG_64BIT)
1214 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) 1474 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
1215 cidaw += bv->bv_len >> (device->s2b_shift + 9); 1475 cidaw += bv->bv_len >> (block->s2b_shift + 9);
1216#endif 1476#endif
1217 } 1477 }
1218 /* Paranoia. */ 1478 /* Paranoia. */
1219 if (count != last_rec - first_rec + 1) 1479 if (count != last_rec - first_rec + 1)
1220 return ERR_PTR(-EINVAL); 1480 return ERR_PTR(-EINVAL);
1221 /* 1x define extent + 1x locate record + number of blocks */ 1481
1222 cplength = 2 + count; 1482 /* use the prefix command if available */
1223 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */ 1483 use_prefix = private->features.feature[8] & 0x01;
1224 datasize = sizeof(struct DE_eckd_data) + sizeof(struct LO_eckd_data) + 1484 if (use_prefix) {
1225 cidaw * sizeof(unsigned long); 1485 /* 1x prefix + number of blocks */
1486 cplength = 2 + count;
1487 /* 1x prefix + cidaws*sizeof(long) */
1488 datasize = sizeof(struct PFX_eckd_data) +
1489 sizeof(struct LO_eckd_data) +
1490 cidaw * sizeof(unsigned long);
1491 } else {
1492 /* 1x define extent + 1x locate record + number of blocks */
1493 cplength = 2 + count;
1494 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
1495 datasize = sizeof(struct DE_eckd_data) +
1496 sizeof(struct LO_eckd_data) +
1497 cidaw * sizeof(unsigned long);
1498 }
1226 /* Find out the number of additional locate record ccws for cdl. */ 1499 /* Find out the number of additional locate record ccws for cdl. */
1227 if (private->uses_cdl && first_rec < 2*blk_per_trk) { 1500 if (private->uses_cdl && first_rec < 2*blk_per_trk) {
1228 if (last_rec >= 2*blk_per_trk) 1501 if (last_rec >= 2*blk_per_trk)
@@ -1232,26 +1505,42 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1232 } 1505 }
1233 /* Allocate the ccw request. */ 1506 /* Allocate the ccw request. */
1234 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1507 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1235 cplength, datasize, device); 1508 cplength, datasize, startdev);
1236 if (IS_ERR(cqr)) 1509 if (IS_ERR(cqr))
1237 return cqr; 1510 return cqr;
1238 ccw = cqr->cpaddr; 1511 ccw = cqr->cpaddr;
1239 /* First ccw is define extent. */ 1512 /* First ccw is define extent or prefix. */
1240 if (define_extent(ccw++, cqr->data, first_trk, 1513 if (use_prefix) {
1241 last_trk, cmd, device) == -EAGAIN) { 1514 if (prefix(ccw++, cqr->data, first_trk,
1242 /* Clock not in sync and XRC is enabled. Try again later. */ 1515 last_trk, cmd, basedev, startdev) == -EAGAIN) {
1243 dasd_sfree_request(cqr, device); 1516 /* Clock not in sync and XRC is enabled.
1244 return ERR_PTR(-EAGAIN); 1517 * Try again later.
1518 */
1519 dasd_sfree_request(cqr, startdev);
1520 return ERR_PTR(-EAGAIN);
1521 }
1522 idaws = (unsigned long *) (cqr->data +
1523 sizeof(struct PFX_eckd_data));
1524 } else {
1525 if (define_extent(ccw++, cqr->data, first_trk,
1526 last_trk, cmd, startdev) == -EAGAIN) {
1527 /* Clock not in sync and XRC is enabled.
1528 * Try again later.
1529 */
1530 dasd_sfree_request(cqr, startdev);
1531 return ERR_PTR(-EAGAIN);
1532 }
1533 idaws = (unsigned long *) (cqr->data +
1534 sizeof(struct DE_eckd_data));
1245 } 1535 }
1246 /* Build locate_record+read/write/ccws. */ 1536 /* Build locate_record+read/write/ccws. */
1247 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data));
1248 LO_data = (struct LO_eckd_data *) (idaws + cidaw); 1537 LO_data = (struct LO_eckd_data *) (idaws + cidaw);
1249 recid = first_rec; 1538 recid = first_rec;
1250 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) { 1539 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
1251 /* Only standard blocks so there is just one locate record. */ 1540 /* Only standard blocks so there is just one locate record. */
1252 ccw[-1].flags |= CCW_FLAG_CC; 1541 ccw[-1].flags |= CCW_FLAG_CC;
1253 locate_record(ccw++, LO_data++, first_trk, first_offs + 1, 1542 locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
1254 last_rec - recid + 1, cmd, device, blksize); 1543 last_rec - recid + 1, cmd, basedev, blksize);
1255 } 1544 }
1256 rq_for_each_segment(bv, req, iter) { 1545 rq_for_each_segment(bv, req, iter) {
1257 dst = page_address(bv->bv_page) + bv->bv_offset; 1546 dst = page_address(bv->bv_page) + bv->bv_offset;
@@ -1281,7 +1570,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1281 ccw[-1].flags |= CCW_FLAG_CC; 1570 ccw[-1].flags |= CCW_FLAG_CC;
1282 locate_record(ccw++, LO_data++, 1571 locate_record(ccw++, LO_data++,
1283 trkid, recoffs + 1, 1572 trkid, recoffs + 1,
1284 1, rcmd, device, count); 1573 1, rcmd, basedev, count);
1285 } 1574 }
1286 /* Locate record for standard blocks ? */ 1575 /* Locate record for standard blocks ? */
1287 if (private->uses_cdl && recid == 2*blk_per_trk) { 1576 if (private->uses_cdl && recid == 2*blk_per_trk) {
@@ -1289,7 +1578,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1289 locate_record(ccw++, LO_data++, 1578 locate_record(ccw++, LO_data++,
1290 trkid, recoffs + 1, 1579 trkid, recoffs + 1,
1291 last_rec - recid + 1, 1580 last_rec - recid + 1,
1292 cmd, device, count); 1581 cmd, basedev, count);
1293 } 1582 }
1294 /* Read/write ccw. */ 1583 /* Read/write ccw. */
1295 ccw[-1].flags |= CCW_FLAG_CC; 1584 ccw[-1].flags |= CCW_FLAG_CC;
@@ -1310,7 +1599,9 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1310 } 1599 }
1311 if (req->cmd_flags & REQ_FAILFAST) 1600 if (req->cmd_flags & REQ_FAILFAST)
1312 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1601 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1313 cqr->device = device; 1602 cqr->startdev = startdev;
1603 cqr->memdev = startdev;
1604 cqr->block = block;
1314 cqr->expires = 5 * 60 * HZ; /* 5 minutes */ 1605 cqr->expires = 5 * 60 * HZ; /* 5 minutes */
1315 cqr->lpm = private->path_data.ppm; 1606 cqr->lpm = private->path_data.ppm;
1316 cqr->retries = 256; 1607 cqr->retries = 256;
@@ -1333,10 +1624,10 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
1333 1624
1334 if (!dasd_page_cache) 1625 if (!dasd_page_cache)
1335 goto out; 1626 goto out;
1336 private = (struct dasd_eckd_private *) cqr->device->private; 1627 private = (struct dasd_eckd_private *) cqr->block->base->private;
1337 blksize = cqr->device->bp_block; 1628 blksize = cqr->block->bp_block;
1338 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 1629 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
1339 recid = req->sector >> cqr->device->s2b_shift; 1630 recid = req->sector >> cqr->block->s2b_shift;
1340 ccw = cqr->cpaddr; 1631 ccw = cqr->cpaddr;
1341 /* Skip over define extent & locate record. */ 1632 /* Skip over define extent & locate record. */
1342 ccw++; 1633 ccw++;
@@ -1367,10 +1658,71 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
1367 } 1658 }
1368out: 1659out:
1369 status = cqr->status == DASD_CQR_DONE; 1660 status = cqr->status == DASD_CQR_DONE;
1370 dasd_sfree_request(cqr, cqr->device); 1661 dasd_sfree_request(cqr, cqr->memdev);
1371 return status; 1662 return status;
1372} 1663}
1373 1664
1665/*
1666 * Modify ccw chain in cqr so it can be started on a base device.
1667 *
1668 * Note that this is not enough to restart the cqr!
1669 * Either reset cqr->startdev as well (summary unit check handling)
1670 * or restart via separate cqr (as in ERP handling).
1671 */
1672void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
1673{
1674 struct ccw1 *ccw;
1675 struct PFX_eckd_data *pfxdata;
1676
1677 ccw = cqr->cpaddr;
1678 pfxdata = cqr->data;
1679
1680 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
1681 pfxdata->validity.verify_base = 0;
1682 pfxdata->validity.hyper_pav = 0;
1683 }
1684}
1685
1686#define DASD_ECKD_CHANQ_MAX_SIZE 4
1687
1688static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
1689 struct dasd_block *block,
1690 struct request *req)
1691{
1692 struct dasd_eckd_private *private;
1693 struct dasd_device *startdev;
1694 unsigned long flags;
1695 struct dasd_ccw_req *cqr;
1696
1697 startdev = dasd_alias_get_start_dev(base);
1698 if (!startdev)
1699 startdev = base;
1700 private = (struct dasd_eckd_private *) startdev->private;
1701 if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
1702 return ERR_PTR(-EBUSY);
1703
1704 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
1705 private->count++;
1706 cqr = dasd_eckd_build_cp(startdev, block, req);
1707 if (IS_ERR(cqr))
1708 private->count--;
1709 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
1710 return cqr;
1711}
1712
1713static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
1714 struct request *req)
1715{
1716 struct dasd_eckd_private *private;
1717 unsigned long flags;
1718
1719 spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
1720 private = (struct dasd_eckd_private *) cqr->memdev->private;
1721 private->count--;
1722 spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
1723 return dasd_eckd_free_cp(cqr, req);
1724}
1725
1374static int 1726static int
1375dasd_eckd_fill_info(struct dasd_device * device, 1727dasd_eckd_fill_info(struct dasd_device * device,
1376 struct dasd_information2_t * info) 1728 struct dasd_information2_t * info)
@@ -1384,9 +1736,9 @@ dasd_eckd_fill_info(struct dasd_device * device,
1384 info->characteristics_size = sizeof(struct dasd_eckd_characteristics); 1736 info->characteristics_size = sizeof(struct dasd_eckd_characteristics);
1385 memcpy(info->characteristics, &private->rdc_data, 1737 memcpy(info->characteristics, &private->rdc_data,
1386 sizeof(struct dasd_eckd_characteristics)); 1738 sizeof(struct dasd_eckd_characteristics));
1387 info->confdata_size = sizeof (struct dasd_eckd_confdata); 1739 info->confdata_size = sizeof(struct dasd_eckd_confdata);
1388 memcpy(info->configuration_data, &private->conf_data, 1740 memcpy(info->configuration_data, &private->conf_data,
1389 sizeof (struct dasd_eckd_confdata)); 1741 sizeof(struct dasd_eckd_confdata));
1390 return 0; 1742 return 0;
1391} 1743}
1392 1744
@@ -1419,7 +1771,8 @@ dasd_eckd_release(struct dasd_device *device)
1419 cqr->cpaddr->flags |= CCW_FLAG_SLI; 1771 cqr->cpaddr->flags |= CCW_FLAG_SLI;
1420 cqr->cpaddr->count = 32; 1772 cqr->cpaddr->count = 32;
1421 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 1773 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1422 cqr->device = device; 1774 cqr->startdev = device;
1775 cqr->memdev = device;
1423 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1776 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1424 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1777 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1425 cqr->retries = 2; /* set retry counter to enable basic ERP */ 1778 cqr->retries = 2; /* set retry counter to enable basic ERP */
@@ -1429,7 +1782,7 @@ dasd_eckd_release(struct dasd_device *device)
1429 1782
1430 rc = dasd_sleep_on_immediatly(cqr); 1783 rc = dasd_sleep_on_immediatly(cqr);
1431 1784
1432 dasd_sfree_request(cqr, cqr->device); 1785 dasd_sfree_request(cqr, cqr->memdev);
1433 return rc; 1786 return rc;
1434} 1787}
1435 1788
@@ -1459,7 +1812,8 @@ dasd_eckd_reserve(struct dasd_device *device)
1459 cqr->cpaddr->flags |= CCW_FLAG_SLI; 1812 cqr->cpaddr->flags |= CCW_FLAG_SLI;
1460 cqr->cpaddr->count = 32; 1813 cqr->cpaddr->count = 32;
1461 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 1814 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1462 cqr->device = device; 1815 cqr->startdev = device;
1816 cqr->memdev = device;
1463 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1817 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1464 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1818 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1465 cqr->retries = 2; /* set retry counter to enable basic ERP */ 1819 cqr->retries = 2; /* set retry counter to enable basic ERP */
@@ -1469,7 +1823,7 @@ dasd_eckd_reserve(struct dasd_device *device)
1469 1823
1470 rc = dasd_sleep_on_immediatly(cqr); 1824 rc = dasd_sleep_on_immediatly(cqr);
1471 1825
1472 dasd_sfree_request(cqr, cqr->device); 1826 dasd_sfree_request(cqr, cqr->memdev);
1473 return rc; 1827 return rc;
1474} 1828}
1475 1829
@@ -1498,7 +1852,8 @@ dasd_eckd_steal_lock(struct dasd_device *device)
1498 cqr->cpaddr->flags |= CCW_FLAG_SLI; 1852 cqr->cpaddr->flags |= CCW_FLAG_SLI;
1499 cqr->cpaddr->count = 32; 1853 cqr->cpaddr->count = 32;
1500 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 1854 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1501 cqr->device = device; 1855 cqr->startdev = device;
1856 cqr->memdev = device;
1502 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1857 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1503 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1858 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1504 cqr->retries = 2; /* set retry counter to enable basic ERP */ 1859 cqr->retries = 2; /* set retry counter to enable basic ERP */
@@ -1508,7 +1863,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
1508 1863
1509 rc = dasd_sleep_on_immediatly(cqr); 1864 rc = dasd_sleep_on_immediatly(cqr);
1510 1865
1511 dasd_sfree_request(cqr, cqr->device); 1866 dasd_sfree_request(cqr, cqr->memdev);
1512 return rc; 1867 return rc;
1513} 1868}
1514 1869
@@ -1526,52 +1881,52 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
1526 1881
1527 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1882 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1528 1 /* PSF */ + 1 /* RSSD */ , 1883 1 /* PSF */ + 1 /* RSSD */ ,
1529 (sizeof (struct dasd_psf_prssd_data) + 1884 (sizeof(struct dasd_psf_prssd_data) +
1530 sizeof (struct dasd_rssd_perf_stats_t)), 1885 sizeof(struct dasd_rssd_perf_stats_t)),
1531 device); 1886 device);
1532 if (IS_ERR(cqr)) { 1887 if (IS_ERR(cqr)) {
1533 DEV_MESSAGE(KERN_WARNING, device, "%s", 1888 DEV_MESSAGE(KERN_WARNING, device, "%s",
1534 "Could not allocate initialization request"); 1889 "Could not allocate initialization request");
1535 return PTR_ERR(cqr); 1890 return PTR_ERR(cqr);
1536 } 1891 }
1537 cqr->device = device; 1892 cqr->startdev = device;
1893 cqr->memdev = device;
1538 cqr->retries = 0; 1894 cqr->retries = 0;
1539 cqr->expires = 10 * HZ; 1895 cqr->expires = 10 * HZ;
1540 1896
1541 /* Prepare for Read Subsystem Data */ 1897 /* Prepare for Read Subsystem Data */
1542 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 1898 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1543 memset(prssdp, 0, sizeof (struct dasd_psf_prssd_data)); 1899 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
1544 prssdp->order = PSF_ORDER_PRSSD; 1900 prssdp->order = PSF_ORDER_PRSSD;
1545 prssdp->suborder = 0x01; /* Perfomance Statistics */ 1901 prssdp->suborder = 0x01; /* Performance Statistics */
1546 prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */ 1902 prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */
1547 1903
1548 ccw = cqr->cpaddr; 1904 ccw = cqr->cpaddr;
1549 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1905 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1550 ccw->count = sizeof (struct dasd_psf_prssd_data); 1906 ccw->count = sizeof(struct dasd_psf_prssd_data);
1551 ccw->flags |= CCW_FLAG_CC; 1907 ccw->flags |= CCW_FLAG_CC;
1552 ccw->cda = (__u32)(addr_t) prssdp; 1908 ccw->cda = (__u32)(addr_t) prssdp;
1553 1909
1554 /* Read Subsystem Data - Performance Statistics */ 1910 /* Read Subsystem Data - Performance Statistics */
1555 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); 1911 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
1556 memset(stats, 0, sizeof (struct dasd_rssd_perf_stats_t)); 1912 memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
1557 1913
1558 ccw++; 1914 ccw++;
1559 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 1915 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1560 ccw->count = sizeof (struct dasd_rssd_perf_stats_t); 1916 ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
1561 ccw->cda = (__u32)(addr_t) stats; 1917 ccw->cda = (__u32)(addr_t) stats;
1562 1918
1563 cqr->buildclk = get_clock(); 1919 cqr->buildclk = get_clock();
1564 cqr->status = DASD_CQR_FILLED; 1920 cqr->status = DASD_CQR_FILLED;
1565 rc = dasd_sleep_on(cqr); 1921 rc = dasd_sleep_on(cqr);
1566 if (rc == 0) { 1922 if (rc == 0) {
1567 /* Prepare for Read Subsystem Data */
1568 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 1923 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1569 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); 1924 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
1570 if (copy_to_user(argp, stats, 1925 if (copy_to_user(argp, stats,
1571 sizeof(struct dasd_rssd_perf_stats_t))) 1926 sizeof(struct dasd_rssd_perf_stats_t)))
1572 rc = -EFAULT; 1927 rc = -EFAULT;
1573 } 1928 }
1574 dasd_sfree_request(cqr, cqr->device); 1929 dasd_sfree_request(cqr, cqr->memdev);
1575 return rc; 1930 return rc;
1576} 1931}
1577 1932
@@ -1594,7 +1949,7 @@ dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
1594 1949
1595 rc = 0; 1950 rc = 0;
1596 if (copy_to_user(argp, (long *) &attrib, 1951 if (copy_to_user(argp, (long *) &attrib,
1597 sizeof (struct attrib_data_t))) 1952 sizeof(struct attrib_data_t)))
1598 rc = -EFAULT; 1953 rc = -EFAULT;
1599 1954
1600 return rc; 1955 return rc;
@@ -1627,8 +1982,10 @@ dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
1627} 1982}
1628 1983
1629static int 1984static int
1630dasd_eckd_ioctl(struct dasd_device *device, unsigned int cmd, void __user *argp) 1985dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
1631{ 1986{
1987 struct dasd_device *device = block->base;
1988
1632 switch (cmd) { 1989 switch (cmd) {
1633 case BIODASDGATTR: 1990 case BIODASDGATTR:
1634 return dasd_eckd_get_attrib(device, argp); 1991 return dasd_eckd_get_attrib(device, argp);
@@ -1685,9 +2042,8 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
1685 * Print sense data and related channel program. 2042 * Print sense data and related channel program.
1686 * Parts are printed because printk buffer is only 1024 bytes. 2043 * Parts are printed because printk buffer is only 1024 bytes.
1687 */ 2044 */
1688static void 2045static void dasd_eckd_dump_sense(struct dasd_device *device,
1689dasd_eckd_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, 2046 struct dasd_ccw_req *req, struct irb *irb)
1690 struct irb *irb)
1691{ 2047{
1692 char *page; 2048 char *page;
1693 struct ccw1 *first, *last, *fail, *from, *to; 2049 struct ccw1 *first, *last, *fail, *from, *to;
@@ -1743,37 +2099,40 @@ dasd_eckd_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
1743 } 2099 }
1744 printk("%s", page); 2100 printk("%s", page);
1745 2101
1746 /* dump the Channel Program (max 140 Bytes per line) */ 2102 if (req) {
1747 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */ 2103 /* req == NULL for unsolicited interrupts */
1748 first = req->cpaddr; 2104 /* dump the Channel Program (max 140 Bytes per line) */
1749 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); 2105 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
1750 to = min(first + 6, last); 2106 first = req->cpaddr;
1751 len = sprintf(page, KERN_ERR PRINTK_HEADER 2107 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
1752 " Related CP in req: %p\n", req); 2108 to = min(first + 6, last);
1753 dasd_eckd_dump_ccw_range(first, to, page + len); 2109 len = sprintf(page, KERN_ERR PRINTK_HEADER
1754 printk("%s", page); 2110 " Related CP in req: %p\n", req);
2111 dasd_eckd_dump_ccw_range(first, to, page + len);
2112 printk("%s", page);
1755 2113
1756 /* print failing CCW area (maximum 4) */ 2114 /* print failing CCW area (maximum 4) */
1757 /* scsw->cda is either valid or zero */ 2115 /* scsw->cda is either valid or zero */
1758 len = 0; 2116 len = 0;
1759 from = ++to; 2117 from = ++to;
1760 fail = (struct ccw1 *)(addr_t) irb->scsw.cpa; /* failing CCW */ 2118 fail = (struct ccw1 *)(addr_t) irb->scsw.cpa; /* failing CCW */
1761 if (from < fail - 2) { 2119 if (from < fail - 2) {
1762 from = fail - 2; /* there is a gap - print header */ 2120 from = fail - 2; /* there is a gap - print header */
1763 len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n"); 2121 len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
1764 } 2122 }
1765 to = min(fail + 1, last); 2123 to = min(fail + 1, last);
1766 len += dasd_eckd_dump_ccw_range(from, to, page + len); 2124 len += dasd_eckd_dump_ccw_range(from, to, page + len);
1767 2125
1768 /* print last CCWs (maximum 2) */ 2126 /* print last CCWs (maximum 2) */
1769 from = max(from, ++to); 2127 from = max(from, ++to);
1770 if (from < last - 1) { 2128 if (from < last - 1) {
1771 from = last - 1; /* there is a gap - print header */ 2129 from = last - 1; /* there is a gap - print header */
1772 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); 2130 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
2131 }
2132 len += dasd_eckd_dump_ccw_range(from, last, page + len);
2133 if (len > 0)
2134 printk("%s", page);
1773 } 2135 }
1774 len += dasd_eckd_dump_ccw_range(from, last, page + len);
1775 if (len > 0)
1776 printk("%s", page);
1777 free_page((unsigned long) page); 2136 free_page((unsigned long) page);
1778} 2137}
1779 2138
@@ -1796,16 +2155,20 @@ static struct dasd_discipline dasd_eckd_discipline = {
1796 .ebcname = "ECKD", 2155 .ebcname = "ECKD",
1797 .max_blocks = 240, 2156 .max_blocks = 240,
1798 .check_device = dasd_eckd_check_characteristics, 2157 .check_device = dasd_eckd_check_characteristics,
2158 .uncheck_device = dasd_eckd_uncheck_device,
1799 .do_analysis = dasd_eckd_do_analysis, 2159 .do_analysis = dasd_eckd_do_analysis,
2160 .ready_to_online = dasd_eckd_ready_to_online,
2161 .online_to_ready = dasd_eckd_online_to_ready,
1800 .fill_geometry = dasd_eckd_fill_geometry, 2162 .fill_geometry = dasd_eckd_fill_geometry,
1801 .start_IO = dasd_start_IO, 2163 .start_IO = dasd_start_IO,
1802 .term_IO = dasd_term_IO, 2164 .term_IO = dasd_term_IO,
2165 .handle_terminated_request = dasd_eckd_handle_terminated_request,
1803 .format_device = dasd_eckd_format_device, 2166 .format_device = dasd_eckd_format_device,
1804 .examine_error = dasd_eckd_examine_error,
1805 .erp_action = dasd_eckd_erp_action, 2167 .erp_action = dasd_eckd_erp_action,
1806 .erp_postaction = dasd_eckd_erp_postaction, 2168 .erp_postaction = dasd_eckd_erp_postaction,
1807 .build_cp = dasd_eckd_build_cp, 2169 .handle_unsolicited_interrupt = dasd_eckd_handle_unsolicited_interrupt,
1808 .free_cp = dasd_eckd_free_cp, 2170 .build_cp = dasd_eckd_build_alias_cp,
2171 .free_cp = dasd_eckd_free_alias_cp,
1809 .dump_sense = dasd_eckd_dump_sense, 2172 .dump_sense = dasd_eckd_dump_sense,
1810 .fill_info = dasd_eckd_fill_info, 2173 .fill_info = dasd_eckd_fill_info,
1811 .ioctl = dasd_eckd_ioctl, 2174 .ioctl = dasd_eckd_ioctl,
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 712ff1650134..fc2509c939bc 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -39,6 +39,8 @@
39#define DASD_ECKD_CCW_READ_CKD_MT 0x9e 39#define DASD_ECKD_CCW_READ_CKD_MT 0x9e
40#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d 40#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d
41#define DASD_ECKD_CCW_RESERVE 0xB4 41#define DASD_ECKD_CCW_RESERVE 0xB4
42#define DASD_ECKD_CCW_PFX 0xE7
43#define DASD_ECKD_CCW_RSCK 0xF9
42 44
43/* 45/*
44 * Perform Subsystem Function / Sub-Orders 46 * Perform Subsystem Function / Sub-Orders
@@ -137,6 +139,25 @@ struct LO_eckd_data {
137 __u16 length; 139 __u16 length;
138} __attribute__ ((packed)); 140} __attribute__ ((packed));
139 141
142/* Prefix data for format 0x00 and 0x01 */
143struct PFX_eckd_data {
144 unsigned char format;
145 struct {
146 unsigned char define_extend:1;
147 unsigned char time_stamp:1;
148 unsigned char verify_base:1;
149 unsigned char hyper_pav:1;
150 unsigned char reserved:4;
151 } __attribute__ ((packed)) validity;
152 __u8 base_address;
153 __u8 aux;
154 __u8 base_lss;
155 __u8 reserved[7];
156 struct DE_eckd_data define_extend;
157 struct LO_eckd_data locate_record;
158 __u8 LO_extended_data[4];
159} __attribute__ ((packed));
160
140struct dasd_eckd_characteristics { 161struct dasd_eckd_characteristics {
141 __u16 cu_type; 162 __u16 cu_type;
142 struct { 163 struct {
@@ -254,7 +275,9 @@ struct dasd_eckd_confdata {
254 } __attribute__ ((packed)) ned; 275 } __attribute__ ((packed)) ned;
255 struct { 276 struct {
256 unsigned char flags; /* byte 0 */ 277 unsigned char flags; /* byte 0 */
257 unsigned char res2[7]; /* byte 1- 7 */ 278 unsigned char res1; /* byte 1 */
279 __u16 format; /* byte 2-3 */
280 unsigned char res2[4]; /* byte 4-7 */
258 unsigned char sua_flags; /* byte 8 */ 281 unsigned char sua_flags; /* byte 8 */
259 __u8 base_unit_addr; /* byte 9 */ 282 __u8 base_unit_addr; /* byte 9 */
260 unsigned char res3[22]; /* byte 10-31 */ 283 unsigned char res3[22]; /* byte 10-31 */
@@ -343,6 +366,11 @@ struct dasd_eckd_path {
343 __u8 npm; 366 __u8 npm;
344}; 367};
345 368
369struct dasd_rssd_features {
370 char feature[256];
371} __attribute__((packed));
372
373
346/* 374/*
347 * Perform Subsystem Function - Prepare for Read Subsystem Data 375 * Perform Subsystem Function - Prepare for Read Subsystem Data
348 */ 376 */
@@ -365,4 +393,99 @@ struct dasd_psf_ssc_data {
365 unsigned char reserved[59]; 393 unsigned char reserved[59];
366} __attribute__((packed)); 394} __attribute__((packed));
367 395
396
397/*
398 * some structures and definitions for alias handling
399 */
400struct dasd_unit_address_configuration {
401 struct {
402 char ua_type;
403 char base_ua;
404 } unit[256];
405} __attribute__((packed));
406
407
408#define MAX_DEVICES_PER_LCU 256
409
410/* flags on the LCU */
411#define NEED_UAC_UPDATE 0x01
412#define UPDATE_PENDING 0x02
413
414enum pavtype {NO_PAV, BASE_PAV, HYPER_PAV};
415
416
417struct alias_root {
418 struct list_head serverlist;
419 spinlock_t lock;
420};
421
422struct alias_server {
423 struct list_head server;
424 struct dasd_uid uid;
425 struct list_head lculist;
426};
427
428struct summary_unit_check_work_data {
429 char reason;
430 struct dasd_device *device;
431 struct work_struct worker;
432};
433
434struct read_uac_work_data {
435 struct dasd_device *device;
436 struct delayed_work dwork;
437};
438
439struct alias_lcu {
440 struct list_head lcu;
441 struct dasd_uid uid;
442 enum pavtype pav;
443 char flags;
444 spinlock_t lock;
445 struct list_head grouplist;
446 struct list_head active_devices;
447 struct list_head inactive_devices;
448 struct dasd_unit_address_configuration *uac;
449 struct summary_unit_check_work_data suc_data;
450 struct read_uac_work_data ruac_data;
451 struct dasd_ccw_req *rsu_cqr;
452};
453
454struct alias_pav_group {
455 struct list_head group;
456 struct dasd_uid uid;
457 struct alias_lcu *lcu;
458 struct list_head baselist;
459 struct list_head aliaslist;
460 struct dasd_device *next;
461};
462
463
464struct dasd_eckd_private {
465 struct dasd_eckd_characteristics rdc_data;
466 struct dasd_eckd_confdata conf_data;
467 struct dasd_eckd_path path_data;
468 struct eckd_count count_area[5];
469 int init_cqr_status;
470 int uses_cdl;
471 struct attrib_data_t attrib; /* e.g. cache operations */
472 struct dasd_rssd_features features;
473
474 /* alias managemnet */
475 struct dasd_uid uid;
476 struct alias_pav_group *pavgroup;
477 struct alias_lcu *lcu;
478 int count;
479};
480
481
482
483int dasd_alias_make_device_known_to_lcu(struct dasd_device *);
484void dasd_alias_disconnect_device_from_lcu(struct dasd_device *);
485int dasd_alias_add_device(struct dasd_device *);
486int dasd_alias_remove_device(struct dasd_device *);
487struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *);
488void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *);
489void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
490
368#endif /* DASD_ECKD_H */ 491#endif /* DASD_ECKD_H */
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 0c081a664ee8..6e53ab606e97 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -336,7 +336,7 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device,
336 unsigned long flags; 336 unsigned long flags;
337 struct eerbuffer *eerb; 337 struct eerbuffer *eerb;
338 338
339 snss_rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; 339 snss_rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
340 if (snss_rc) 340 if (snss_rc)
341 data_size = 0; 341 data_size = 0;
342 else 342 else
@@ -404,10 +404,11 @@ void dasd_eer_snss(struct dasd_device *device)
404 set_bit(DASD_FLAG_EER_SNSS, &device->flags); 404 set_bit(DASD_FLAG_EER_SNSS, &device->flags);
405 return; 405 return;
406 } 406 }
407 /* cdev is already locked, can't use dasd_add_request_head */
407 clear_bit(DASD_FLAG_EER_SNSS, &device->flags); 408 clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
408 cqr->status = DASD_CQR_QUEUED; 409 cqr->status = DASD_CQR_QUEUED;
409 list_add(&cqr->list, &device->ccw_queue); 410 list_add(&cqr->devlist, &device->ccw_queue);
410 dasd_schedule_bh(device); 411 dasd_schedule_device_bh(device);
411} 412}
412 413
413/* 414/*
@@ -415,7 +416,7 @@ void dasd_eer_snss(struct dasd_device *device)
415 */ 416 */
416static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data) 417static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
417{ 418{
418 struct dasd_device *device = cqr->device; 419 struct dasd_device *device = cqr->startdev;
419 unsigned long flags; 420 unsigned long flags;
420 421
421 dasd_eer_write(device, cqr, DASD_EER_STATECHANGE); 422 dasd_eer_write(device, cqr, DASD_EER_STATECHANGE);
@@ -458,7 +459,7 @@ int dasd_eer_enable(struct dasd_device *device)
458 if (!cqr) 459 if (!cqr)
459 return -ENOMEM; 460 return -ENOMEM;
460 461
461 cqr->device = device; 462 cqr->startdev = device;
462 cqr->retries = 255; 463 cqr->retries = 255;
463 cqr->expires = 10 * HZ; 464 cqr->expires = 10 * HZ;
464 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 465 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index caa5d91420f8..8f10000851a3 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -46,6 +46,8 @@ dasd_alloc_erp_request(char *magic, int cplength, int datasize,
46 if (cqr == NULL) 46 if (cqr == NULL)
47 return ERR_PTR(-ENOMEM); 47 return ERR_PTR(-ENOMEM);
48 memset(cqr, 0, sizeof(struct dasd_ccw_req)); 48 memset(cqr, 0, sizeof(struct dasd_ccw_req));
49 INIT_LIST_HEAD(&cqr->devlist);
50 INIT_LIST_HEAD(&cqr->blocklist);
49 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); 51 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
50 cqr->cpaddr = NULL; 52 cqr->cpaddr = NULL;
51 if (cplength > 0) { 53 if (cplength > 0) {
@@ -66,7 +68,7 @@ dasd_alloc_erp_request(char *magic, int cplength, int datasize,
66} 68}
67 69
68void 70void
69dasd_free_erp_request(struct dasd_ccw_req * cqr, struct dasd_device * device) 71dasd_free_erp_request(struct dasd_ccw_req *cqr, struct dasd_device * device)
70{ 72{
71 unsigned long flags; 73 unsigned long flags;
72 74
@@ -81,11 +83,11 @@ dasd_free_erp_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
81 * dasd_default_erp_action just retries the current cqr 83 * dasd_default_erp_action just retries the current cqr
82 */ 84 */
83struct dasd_ccw_req * 85struct dasd_ccw_req *
84dasd_default_erp_action(struct dasd_ccw_req * cqr) 86dasd_default_erp_action(struct dasd_ccw_req *cqr)
85{ 87{
86 struct dasd_device *device; 88 struct dasd_device *device;
87 89
88 device = cqr->device; 90 device = cqr->startdev;
89 91
90 /* just retry - there is nothing to save ... I got no sense data.... */ 92 /* just retry - there is nothing to save ... I got no sense data.... */
91 if (cqr->retries > 0) { 93 if (cqr->retries > 0) {
@@ -93,12 +95,12 @@ dasd_default_erp_action(struct dasd_ccw_req * cqr)
93 "default ERP called (%i retries left)", 95 "default ERP called (%i retries left)",
94 cqr->retries); 96 cqr->retries);
95 cqr->lpm = LPM_ANYPATH; 97 cqr->lpm = LPM_ANYPATH;
96 cqr->status = DASD_CQR_QUEUED; 98 cqr->status = DASD_CQR_FILLED;
97 } else { 99 } else {
98 DEV_MESSAGE (KERN_WARNING, device, "%s", 100 DEV_MESSAGE (KERN_WARNING, device, "%s",
99 "default ERP called (NO retry left)"); 101 "default ERP called (NO retry left)");
100 cqr->status = DASD_CQR_FAILED; 102 cqr->status = DASD_CQR_FAILED;
101 cqr->stopclk = get_clock (); 103 cqr->stopclk = get_clock();
102 } 104 }
103 return cqr; 105 return cqr;
104} /* end dasd_default_erp_action */ 106} /* end dasd_default_erp_action */
@@ -117,15 +119,12 @@ dasd_default_erp_action(struct dasd_ccw_req * cqr)
117 * RETURN VALUES 119 * RETURN VALUES
118 * cqr pointer to the original CQR 120 * cqr pointer to the original CQR
119 */ 121 */
120struct dasd_ccw_req * 122struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr)
121dasd_default_erp_postaction(struct dasd_ccw_req * cqr)
122{ 123{
123 struct dasd_device *device;
124 int success; 124 int success;
125 125
126 BUG_ON(cqr->refers == NULL || cqr->function == NULL); 126 BUG_ON(cqr->refers == NULL || cqr->function == NULL);
127 127
128 device = cqr->device;
129 success = cqr->status == DASD_CQR_DONE; 128 success = cqr->status == DASD_CQR_DONE;
130 129
131 /* free all ERPs - but NOT the original cqr */ 130 /* free all ERPs - but NOT the original cqr */
@@ -133,10 +132,10 @@ dasd_default_erp_postaction(struct dasd_ccw_req * cqr)
133 struct dasd_ccw_req *refers; 132 struct dasd_ccw_req *refers;
134 133
135 refers = cqr->refers; 134 refers = cqr->refers;
136 /* remove the request from the device queue */ 135 /* remove the request from the block queue */
137 list_del(&cqr->list); 136 list_del(&cqr->blocklist);
138 /* free the finished erp request */ 137 /* free the finished erp request */
139 dasd_free_erp_request(cqr, device); 138 dasd_free_erp_request(cqr, cqr->memdev);
140 cqr = refers; 139 cqr = refers;
141 } 140 }
142 141
@@ -157,7 +156,7 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
157{ 156{
158 struct dasd_device *device; 157 struct dasd_device *device;
159 158
160 device = cqr->device; 159 device = cqr->startdev;
161 /* dump sense data */ 160 /* dump sense data */
162 if (device->discipline && device->discipline->dump_sense) 161 if (device->discipline && device->discipline->dump_sense)
163 device->discipline->dump_sense(device, cqr, irb); 162 device->discipline->dump_sense(device, cqr, irb);
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 1d95822e0b8e..d13ea05089a7 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -117,6 +117,7 @@ locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
117static int 117static int
118dasd_fba_check_characteristics(struct dasd_device *device) 118dasd_fba_check_characteristics(struct dasd_device *device)
119{ 119{
120 struct dasd_block *block;
120 struct dasd_fba_private *private; 121 struct dasd_fba_private *private;
121 struct ccw_device *cdev = device->cdev; 122 struct ccw_device *cdev = device->cdev;
122 void *rdc_data; 123 void *rdc_data;
@@ -133,6 +134,16 @@ dasd_fba_check_characteristics(struct dasd_device *device)
133 } 134 }
134 device->private = (void *) private; 135 device->private = (void *) private;
135 } 136 }
137 block = dasd_alloc_block();
138 if (IS_ERR(block)) {
139 DEV_MESSAGE(KERN_WARNING, device, "%s",
140 "could not allocate dasd block structure");
141 kfree(device->private);
142 return PTR_ERR(block);
143 }
144 device->block = block;
145 block->base = device;
146
136 /* Read Device Characteristics */ 147 /* Read Device Characteristics */
137 rdc_data = (void *) &(private->rdc_data); 148 rdc_data = (void *) &(private->rdc_data);
138 rc = dasd_generic_read_dev_chars(device, "FBA ", &rdc_data, 32); 149 rc = dasd_generic_read_dev_chars(device, "FBA ", &rdc_data, 32);
@@ -155,60 +166,37 @@ dasd_fba_check_characteristics(struct dasd_device *device)
155 return 0; 166 return 0;
156} 167}
157 168
158static int 169static int dasd_fba_do_analysis(struct dasd_block *block)
159dasd_fba_do_analysis(struct dasd_device *device)
160{ 170{
161 struct dasd_fba_private *private; 171 struct dasd_fba_private *private;
162 int sb, rc; 172 int sb, rc;
163 173
164 private = (struct dasd_fba_private *) device->private; 174 private = (struct dasd_fba_private *) block->base->private;
165 rc = dasd_check_blocksize(private->rdc_data.blk_size); 175 rc = dasd_check_blocksize(private->rdc_data.blk_size);
166 if (rc) { 176 if (rc) {
167 DEV_MESSAGE(KERN_INFO, device, "unknown blocksize %d", 177 DEV_MESSAGE(KERN_INFO, block->base, "unknown blocksize %d",
168 private->rdc_data.blk_size); 178 private->rdc_data.blk_size);
169 return rc; 179 return rc;
170 } 180 }
171 device->blocks = private->rdc_data.blk_bdsa; 181 block->blocks = private->rdc_data.blk_bdsa;
172 device->bp_block = private->rdc_data.blk_size; 182 block->bp_block = private->rdc_data.blk_size;
173 device->s2b_shift = 0; /* bits to shift 512 to get a block */ 183 block->s2b_shift = 0; /* bits to shift 512 to get a block */
174 for (sb = 512; sb < private->rdc_data.blk_size; sb = sb << 1) 184 for (sb = 512; sb < private->rdc_data.blk_size; sb = sb << 1)
175 device->s2b_shift++; 185 block->s2b_shift++;
176 return 0; 186 return 0;
177} 187}
178 188
179static int 189static int dasd_fba_fill_geometry(struct dasd_block *block,
180dasd_fba_fill_geometry(struct dasd_device *device, struct hd_geometry *geo) 190 struct hd_geometry *geo)
181{ 191{
182 if (dasd_check_blocksize(device->bp_block) != 0) 192 if (dasd_check_blocksize(block->bp_block) != 0)
183 return -EINVAL; 193 return -EINVAL;
184 geo->cylinders = (device->blocks << device->s2b_shift) >> 10; 194 geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
185 geo->heads = 16; 195 geo->heads = 16;
186 geo->sectors = 128 >> device->s2b_shift; 196 geo->sectors = 128 >> block->s2b_shift;
187 return 0; 197 return 0;
188} 198}
189 199
190static dasd_era_t
191dasd_fba_examine_error(struct dasd_ccw_req * cqr, struct irb * irb)
192{
193 struct dasd_device *device;
194 struct ccw_device *cdev;
195
196 device = (struct dasd_device *) cqr->device;
197 if (irb->scsw.cstat == 0x00 &&
198 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
199 return dasd_era_none;
200
201 cdev = device->cdev;
202 switch (cdev->id.dev_type) {
203 case 0x3370:
204 return dasd_3370_erp_examine(cqr, irb);
205 case 0x9336:
206 return dasd_9336_erp_examine(cqr, irb);
207 default:
208 return dasd_era_recover;
209 }
210}
211
212static dasd_erp_fn_t 200static dasd_erp_fn_t
213dasd_fba_erp_action(struct dasd_ccw_req * cqr) 201dasd_fba_erp_action(struct dasd_ccw_req * cqr)
214{ 202{
@@ -221,13 +209,34 @@ dasd_fba_erp_postaction(struct dasd_ccw_req * cqr)
221 if (cqr->function == dasd_default_erp_action) 209 if (cqr->function == dasd_default_erp_action)
222 return dasd_default_erp_postaction; 210 return dasd_default_erp_postaction;
223 211
224 DEV_MESSAGE(KERN_WARNING, cqr->device, "unknown ERP action %p", 212 DEV_MESSAGE(KERN_WARNING, cqr->startdev, "unknown ERP action %p",
225 cqr->function); 213 cqr->function);
226 return NULL; 214 return NULL;
227} 215}
228 216
229static struct dasd_ccw_req * 217static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device,
230dasd_fba_build_cp(struct dasd_device * device, struct request *req) 218 struct irb *irb)
219{
220 char mask;
221
222 /* first of all check for state change pending interrupt */
223 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
224 if ((irb->scsw.dstat & mask) == mask) {
225 dasd_generic_handle_state_change(device);
226 return;
227 }
228
229 /* check for unsolicited interrupts */
230 DEV_MESSAGE(KERN_DEBUG, device, "%s",
231 "unsolicited interrupt received");
232 device->discipline->dump_sense(device, NULL, irb);
233 dasd_schedule_device_bh(device);
234 return;
235};
236
237static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
238 struct dasd_block *block,
239 struct request *req)
231{ 240{
232 struct dasd_fba_private *private; 241 struct dasd_fba_private *private;
233 unsigned long *idaws; 242 unsigned long *idaws;
@@ -242,17 +251,17 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
242 unsigned int blksize, off; 251 unsigned int blksize, off;
243 unsigned char cmd; 252 unsigned char cmd;
244 253
245 private = (struct dasd_fba_private *) device->private; 254 private = (struct dasd_fba_private *) block->base->private;
246 if (rq_data_dir(req) == READ) { 255 if (rq_data_dir(req) == READ) {
247 cmd = DASD_FBA_CCW_READ; 256 cmd = DASD_FBA_CCW_READ;
248 } else if (rq_data_dir(req) == WRITE) { 257 } else if (rq_data_dir(req) == WRITE) {
249 cmd = DASD_FBA_CCW_WRITE; 258 cmd = DASD_FBA_CCW_WRITE;
250 } else 259 } else
251 return ERR_PTR(-EINVAL); 260 return ERR_PTR(-EINVAL);
252 blksize = device->bp_block; 261 blksize = block->bp_block;
253 /* Calculate record id of first and last block. */ 262 /* Calculate record id of first and last block. */
254 first_rec = req->sector >> device->s2b_shift; 263 first_rec = req->sector >> block->s2b_shift;
255 last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift; 264 last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
256 /* Check struct bio and count the number of blocks for the request. */ 265 /* Check struct bio and count the number of blocks for the request. */
257 count = 0; 266 count = 0;
258 cidaw = 0; 267 cidaw = 0;
@@ -260,7 +269,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
260 if (bv->bv_len & (blksize - 1)) 269 if (bv->bv_len & (blksize - 1))
261 /* Fba can only do full blocks. */ 270 /* Fba can only do full blocks. */
262 return ERR_PTR(-EINVAL); 271 return ERR_PTR(-EINVAL);
263 count += bv->bv_len >> (device->s2b_shift + 9); 272 count += bv->bv_len >> (block->s2b_shift + 9);
264#if defined(CONFIG_64BIT) 273#if defined(CONFIG_64BIT)
265 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) 274 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
266 cidaw += bv->bv_len / blksize; 275 cidaw += bv->bv_len / blksize;
@@ -284,13 +293,13 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
284 } 293 }
285 /* Allocate the ccw request. */ 294 /* Allocate the ccw request. */
286 cqr = dasd_smalloc_request(dasd_fba_discipline.name, 295 cqr = dasd_smalloc_request(dasd_fba_discipline.name,
287 cplength, datasize, device); 296 cplength, datasize, memdev);
288 if (IS_ERR(cqr)) 297 if (IS_ERR(cqr))
289 return cqr; 298 return cqr;
290 ccw = cqr->cpaddr; 299 ccw = cqr->cpaddr;
291 /* First ccw is define extent. */ 300 /* First ccw is define extent. */
292 define_extent(ccw++, cqr->data, rq_data_dir(req), 301 define_extent(ccw++, cqr->data, rq_data_dir(req),
293 device->bp_block, req->sector, req->nr_sectors); 302 block->bp_block, req->sector, req->nr_sectors);
294 /* Build locate_record + read/write ccws. */ 303 /* Build locate_record + read/write ccws. */
295 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data)); 304 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
296 LO_data = (struct LO_fba_data *) (idaws + cidaw); 305 LO_data = (struct LO_fba_data *) (idaws + cidaw);
@@ -326,7 +335,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
326 ccw[-1].flags |= CCW_FLAG_CC; 335 ccw[-1].flags |= CCW_FLAG_CC;
327 } 336 }
328 ccw->cmd_code = cmd; 337 ccw->cmd_code = cmd;
329 ccw->count = device->bp_block; 338 ccw->count = block->bp_block;
330 if (idal_is_needed(dst, blksize)) { 339 if (idal_is_needed(dst, blksize)) {
331 ccw->cda = (__u32)(addr_t) idaws; 340 ccw->cda = (__u32)(addr_t) idaws;
332 ccw->flags = CCW_FLAG_IDA; 341 ccw->flags = CCW_FLAG_IDA;
@@ -342,7 +351,9 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
342 } 351 }
343 if (req->cmd_flags & REQ_FAILFAST) 352 if (req->cmd_flags & REQ_FAILFAST)
344 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 353 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
345 cqr->device = device; 354 cqr->startdev = memdev;
355 cqr->memdev = memdev;
356 cqr->block = block;
346 cqr->expires = 5 * 60 * HZ; /* 5 minutes */ 357 cqr->expires = 5 * 60 * HZ; /* 5 minutes */
347 cqr->retries = 32; 358 cqr->retries = 32;
348 cqr->buildclk = get_clock(); 359 cqr->buildclk = get_clock();
@@ -363,8 +374,8 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
363 374
364 if (!dasd_page_cache) 375 if (!dasd_page_cache)
365 goto out; 376 goto out;
366 private = (struct dasd_fba_private *) cqr->device->private; 377 private = (struct dasd_fba_private *) cqr->block->base->private;
367 blksize = cqr->device->bp_block; 378 blksize = cqr->block->bp_block;
368 ccw = cqr->cpaddr; 379 ccw = cqr->cpaddr;
369 /* Skip over define extent & locate record. */ 380 /* Skip over define extent & locate record. */
370 ccw++; 381 ccw++;
@@ -394,10 +405,15 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
394 } 405 }
395out: 406out:
396 status = cqr->status == DASD_CQR_DONE; 407 status = cqr->status == DASD_CQR_DONE;
397 dasd_sfree_request(cqr, cqr->device); 408 dasd_sfree_request(cqr, cqr->memdev);
398 return status; 409 return status;
399} 410}
400 411
412static void dasd_fba_handle_terminated_request(struct dasd_ccw_req *cqr)
413{
414 cqr->status = DASD_CQR_FILLED;
415};
416
401static int 417static int
402dasd_fba_fill_info(struct dasd_device * device, 418dasd_fba_fill_info(struct dasd_device * device,
403 struct dasd_information2_t * info) 419 struct dasd_information2_t * info)
@@ -546,9 +562,10 @@ static struct dasd_discipline dasd_fba_discipline = {
546 .fill_geometry = dasd_fba_fill_geometry, 562 .fill_geometry = dasd_fba_fill_geometry,
547 .start_IO = dasd_start_IO, 563 .start_IO = dasd_start_IO,
548 .term_IO = dasd_term_IO, 564 .term_IO = dasd_term_IO,
549 .examine_error = dasd_fba_examine_error, 565 .handle_terminated_request = dasd_fba_handle_terminated_request,
550 .erp_action = dasd_fba_erp_action, 566 .erp_action = dasd_fba_erp_action,
551 .erp_postaction = dasd_fba_erp_postaction, 567 .erp_postaction = dasd_fba_erp_postaction,
568 .handle_unsolicited_interrupt = dasd_fba_handle_unsolicited_interrupt,
552 .build_cp = dasd_fba_build_cp, 569 .build_cp = dasd_fba_build_cp,
553 .free_cp = dasd_fba_free_cp, 570 .free_cp = dasd_fba_free_cp,
554 .dump_sense = dasd_fba_dump_sense, 571 .dump_sense = dasd_fba_dump_sense,
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 47ba4462708d..aee6565aaf98 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -25,14 +25,15 @@
25/* 25/*
26 * Allocate and register gendisk structure for device. 26 * Allocate and register gendisk structure for device.
27 */ 27 */
28int 28int dasd_gendisk_alloc(struct dasd_block *block)
29dasd_gendisk_alloc(struct dasd_device *device)
30{ 29{
31 struct gendisk *gdp; 30 struct gendisk *gdp;
31 struct dasd_device *base;
32 int len; 32 int len;
33 33
34 /* Make sure the minor for this device exists. */ 34 /* Make sure the minor for this device exists. */
35 if (device->devindex >= DASD_PER_MAJOR) 35 base = block->base;
36 if (base->devindex >= DASD_PER_MAJOR)
36 return -EBUSY; 37 return -EBUSY;
37 38
38 gdp = alloc_disk(1 << DASD_PARTN_BITS); 39 gdp = alloc_disk(1 << DASD_PARTN_BITS);
@@ -41,9 +42,9 @@ dasd_gendisk_alloc(struct dasd_device *device)
41 42
42 /* Initialize gendisk structure. */ 43 /* Initialize gendisk structure. */
43 gdp->major = DASD_MAJOR; 44 gdp->major = DASD_MAJOR;
44 gdp->first_minor = device->devindex << DASD_PARTN_BITS; 45 gdp->first_minor = base->devindex << DASD_PARTN_BITS;
45 gdp->fops = &dasd_device_operations; 46 gdp->fops = &dasd_device_operations;
46 gdp->driverfs_dev = &device->cdev->dev; 47 gdp->driverfs_dev = &base->cdev->dev;
47 48
48 /* 49 /*
49 * Set device name. 50 * Set device name.
@@ -53,53 +54,51 @@ dasd_gendisk_alloc(struct dasd_device *device)
53 * dasdaaaa - dasdzzzz : 456976 devices, added up = 475252 54 * dasdaaaa - dasdzzzz : 456976 devices, added up = 475252
54 */ 55 */
55 len = sprintf(gdp->disk_name, "dasd"); 56 len = sprintf(gdp->disk_name, "dasd");
56 if (device->devindex > 25) { 57 if (base->devindex > 25) {
57 if (device->devindex > 701) { 58 if (base->devindex > 701) {
58 if (device->devindex > 18277) 59 if (base->devindex > 18277)
59 len += sprintf(gdp->disk_name + len, "%c", 60 len += sprintf(gdp->disk_name + len, "%c",
60 'a'+(((device->devindex-18278) 61 'a'+(((base->devindex-18278)
61 /17576)%26)); 62 /17576)%26));
62 len += sprintf(gdp->disk_name + len, "%c", 63 len += sprintf(gdp->disk_name + len, "%c",
63 'a'+(((device->devindex-702)/676)%26)); 64 'a'+(((base->devindex-702)/676)%26));
64 } 65 }
65 len += sprintf(gdp->disk_name + len, "%c", 66 len += sprintf(gdp->disk_name + len, "%c",
66 'a'+(((device->devindex-26)/26)%26)); 67 'a'+(((base->devindex-26)/26)%26));
67 } 68 }
68 len += sprintf(gdp->disk_name + len, "%c", 'a'+(device->devindex%26)); 69 len += sprintf(gdp->disk_name + len, "%c", 'a'+(base->devindex%26));
69 70
70 if (device->features & DASD_FEATURE_READONLY) 71 if (block->base->features & DASD_FEATURE_READONLY)
71 set_disk_ro(gdp, 1); 72 set_disk_ro(gdp, 1);
72 gdp->private_data = device; 73 gdp->private_data = block;
73 gdp->queue = device->request_queue; 74 gdp->queue = block->request_queue;
74 device->gdp = gdp; 75 block->gdp = gdp;
75 set_capacity(device->gdp, 0); 76 set_capacity(block->gdp, 0);
76 add_disk(device->gdp); 77 add_disk(block->gdp);
77 return 0; 78 return 0;
78} 79}
79 80
80/* 81/*
81 * Unregister and free gendisk structure for device. 82 * Unregister and free gendisk structure for device.
82 */ 83 */
83void 84void dasd_gendisk_free(struct dasd_block *block)
84dasd_gendisk_free(struct dasd_device *device)
85{ 85{
86 if (device->gdp) { 86 if (block->gdp) {
87 del_gendisk(device->gdp); 87 del_gendisk(block->gdp);
88 device->gdp->queue = NULL; 88 block->gdp->queue = NULL;
89 put_disk(device->gdp); 89 put_disk(block->gdp);
90 device->gdp = NULL; 90 block->gdp = NULL;
91 } 91 }
92} 92}
93 93
94/* 94/*
95 * Trigger a partition detection. 95 * Trigger a partition detection.
96 */ 96 */
97int 97int dasd_scan_partitions(struct dasd_block *block)
98dasd_scan_partitions(struct dasd_device * device)
99{ 98{
100 struct block_device *bdev; 99 struct block_device *bdev;
101 100
102 bdev = bdget_disk(device->gdp, 0); 101 bdev = bdget_disk(block->gdp, 0);
103 if (!bdev || blkdev_get(bdev, FMODE_READ, 1) < 0) 102 if (!bdev || blkdev_get(bdev, FMODE_READ, 1) < 0)
104 return -ENODEV; 103 return -ENODEV;
105 /* 104 /*
@@ -117,7 +116,7 @@ dasd_scan_partitions(struct dasd_device * device)
117 * is why the assignment to device->bdev is done AFTER 116 * is why the assignment to device->bdev is done AFTER
118 * the BLKRRPART ioctl. 117 * the BLKRRPART ioctl.
119 */ 118 */
120 device->bdev = bdev; 119 block->bdev = bdev;
121 return 0; 120 return 0;
122} 121}
123 122
@@ -125,8 +124,7 @@ dasd_scan_partitions(struct dasd_device * device)
125 * Remove all inodes in the system for a device, delete the 124 * Remove all inodes in the system for a device, delete the
126 * partitions and make device unusable by setting its size to zero. 125 * partitions and make device unusable by setting its size to zero.
127 */ 126 */
128void 127void dasd_destroy_partitions(struct dasd_block *block)
129dasd_destroy_partitions(struct dasd_device * device)
130{ 128{
131 /* The two structs have 168/176 byte on 31/64 bit. */ 129 /* The two structs have 168/176 byte on 31/64 bit. */
132 struct blkpg_partition bpart; 130 struct blkpg_partition bpart;
@@ -137,8 +135,8 @@ dasd_destroy_partitions(struct dasd_device * device)
137 * Get the bdev pointer from the device structure and clear 135 * Get the bdev pointer from the device structure and clear
138 * device->bdev to lower the offline open_count limit again. 136 * device->bdev to lower the offline open_count limit again.
139 */ 137 */
140 bdev = device->bdev; 138 bdev = block->bdev;
141 device->bdev = NULL; 139 block->bdev = NULL;
142 140
143 /* 141 /*
144 * See fs/partition/check.c:delete_partition 142 * See fs/partition/check.c:delete_partition
@@ -149,17 +147,16 @@ dasd_destroy_partitions(struct dasd_device * device)
149 memset(&barg, 0, sizeof(struct blkpg_ioctl_arg)); 147 memset(&barg, 0, sizeof(struct blkpg_ioctl_arg));
150 barg.data = (void __force __user *) &bpart; 148 barg.data = (void __force __user *) &bpart;
151 barg.op = BLKPG_DEL_PARTITION; 149 barg.op = BLKPG_DEL_PARTITION;
152 for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--) 150 for (bpart.pno = block->gdp->minors - 1; bpart.pno > 0; bpart.pno--)
153 ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg); 151 ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg);
154 152
155 invalidate_partition(device->gdp, 0); 153 invalidate_partition(block->gdp, 0);
156 /* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */ 154 /* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
157 blkdev_put(bdev); 155 blkdev_put(bdev);
158 set_capacity(device->gdp, 0); 156 set_capacity(block->gdp, 0);
159} 157}
160 158
161int 159int dasd_gendisk_init(void)
162dasd_gendisk_init(void)
163{ 160{
164 int rc; 161 int rc;
165 162
@@ -174,8 +171,7 @@ dasd_gendisk_init(void)
174 return 0; 171 return 0;
175} 172}
176 173
177void 174void dasd_gendisk_exit(void)
178dasd_gendisk_exit(void)
179{ 175{
180 unregister_blkdev(DASD_MAJOR, "dasd"); 176 unregister_blkdev(DASD_MAJOR, "dasd");
181} 177}
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index d427daeef511..44b2984dfbee 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -64,13 +64,7 @@
64 * SECTION: Type definitions 64 * SECTION: Type definitions
65 */ 65 */
66struct dasd_device; 66struct dasd_device;
67 67struct dasd_block;
68typedef enum {
69 dasd_era_fatal = -1, /* no chance to recover */
70 dasd_era_none = 0, /* don't recover, everything alright */
71 dasd_era_msg = 1, /* don't recover, just report... */
72 dasd_era_recover = 2 /* recovery action recommended */
73} dasd_era_t;
74 68
75/* BIT DEFINITIONS FOR SENSE DATA */ 69/* BIT DEFINITIONS FOR SENSE DATA */
76#define DASD_SENSE_BIT_0 0x80 70#define DASD_SENSE_BIT_0 0x80
@@ -151,19 +145,22 @@ do { \
151 145
152struct dasd_ccw_req { 146struct dasd_ccw_req {
153 unsigned int magic; /* Eye catcher */ 147 unsigned int magic; /* Eye catcher */
154 struct list_head list; /* list_head for request queueing. */ 148 struct list_head devlist; /* for dasd_device request queue */
149 struct list_head blocklist; /* for dasd_block request queue */
155 150
156 /* Where to execute what... */ 151 /* Where to execute what... */
157 struct dasd_device *device; /* device the request is for */ 152 struct dasd_block *block; /* the originating block device */
153 struct dasd_device *memdev; /* the device used to allocate this */
154 struct dasd_device *startdev; /* device the request is started on */
158 struct ccw1 *cpaddr; /* address of channel program */ 155 struct ccw1 *cpaddr; /* address of channel program */
159 char status; /* status of this request */ 156 char status; /* status of this request */
160 short retries; /* A retry counter */ 157 short retries; /* A retry counter */
161 unsigned long flags; /* flags of this request */ 158 unsigned long flags; /* flags of this request */
162 159
163 /* ... and how */ 160 /* ... and how */
164 unsigned long starttime; /* jiffies time of request start */ 161 unsigned long starttime; /* jiffies time of request start */
165 int expires; /* expiration period in jiffies */ 162 int expires; /* expiration period in jiffies */
166 char lpm; /* logical path mask */ 163 char lpm; /* logical path mask */
167 void *data; /* pointer to data area */ 164 void *data; /* pointer to data area */
168 165
169 /* these are important for recovering erroneous requests */ 166 /* these are important for recovering erroneous requests */
@@ -178,20 +175,27 @@ struct dasd_ccw_req {
178 unsigned long long endclk; /* TOD-clock of request termination */ 175 unsigned long long endclk; /* TOD-clock of request termination */
179 176
180 /* Callback that is called after reaching final status. */ 177 /* Callback that is called after reaching final status. */
181 void (*callback)(struct dasd_ccw_req *, void *data); 178 void (*callback)(struct dasd_ccw_req *, void *data);
182 void *callback_data; 179 void *callback_data;
183}; 180};
184 181
185/* 182/*
186 * dasd_ccw_req -> status can be: 183 * dasd_ccw_req -> status can be:
187 */ 184 */
188#define DASD_CQR_FILLED 0x00 /* request is ready to be processed */ 185#define DASD_CQR_FILLED 0x00 /* request is ready to be processed */
189#define DASD_CQR_QUEUED 0x01 /* request is queued to be processed */ 186#define DASD_CQR_DONE 0x01 /* request is completed successfully */
190#define DASD_CQR_IN_IO 0x02 /* request is currently in IO */ 187#define DASD_CQR_NEED_ERP 0x02 /* request needs recovery action */
191#define DASD_CQR_DONE 0x03 /* request is completed successfully */ 188#define DASD_CQR_IN_ERP 0x03 /* request is in recovery */
192#define DASD_CQR_ERROR 0x04 /* request is completed with error */ 189#define DASD_CQR_FAILED 0x04 /* request is finally failed */
193#define DASD_CQR_FAILED 0x05 /* request is finally failed */ 190#define DASD_CQR_TERMINATED 0x05 /* request was stopped by driver */
194#define DASD_CQR_CLEAR 0x06 /* request is clear pending */ 191
192#define DASD_CQR_QUEUED 0x80 /* request is queued to be processed */
193#define DASD_CQR_IN_IO 0x81 /* request is currently in IO */
194#define DASD_CQR_ERROR 0x82 /* request is completed with error */
195#define DASD_CQR_CLEAR_PENDING 0x83 /* request is clear pending */
196#define DASD_CQR_CLEARED 0x84 /* request was cleared */
197#define DASD_CQR_SUCCESS 0x85 /* request was successfull */
198
195 199
196/* per dasd_ccw_req flags */ 200/* per dasd_ccw_req flags */
197#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */ 201#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */
@@ -214,52 +218,71 @@ struct dasd_discipline {
214 218
215 struct list_head list; /* used for list of disciplines */ 219 struct list_head list; /* used for list of disciplines */
216 220
217 /* 221 /*
218 * Device recognition functions. check_device is used to verify 222 * Device recognition functions. check_device is used to verify
219 * the sense data and the information returned by read device 223 * the sense data and the information returned by read device
220 * characteristics. It returns 0 if the discipline can be used 224 * characteristics. It returns 0 if the discipline can be used
221 * for the device in question. 225 * for the device in question. uncheck_device is called during
222 * do_analysis is used in the step from device state "basic" to 226 * device shutdown to deregister a device from its discipline.
223 * state "accept". It returns 0 if the device can be made ready, 227 */
224 * it returns -EMEDIUMTYPE if the device can't be made ready or 228 int (*check_device) (struct dasd_device *);
225 * -EAGAIN if do_analysis started a ccw that needs to complete 229 void (*uncheck_device) (struct dasd_device *);
226 * before the analysis may be repeated. 230
227 */ 231 /*
228 int (*check_device)(struct dasd_device *); 232 * do_analysis is used in the step from device state "basic" to
229 int (*do_analysis) (struct dasd_device *); 233 * state "accept". It returns 0 if the device can be made ready,
230 234 * it returns -EMEDIUMTYPE if the device can't be made ready or
231 /* 235 * -EAGAIN if do_analysis started a ccw that needs to complete
232 * Device operation functions. build_cp creates a ccw chain for 236 * before the analysis may be repeated.
233 * a block device request, start_io starts the request and 237 */
234 * term_IO cancels it (e.g. in case of a timeout). format_device 238 int (*do_analysis) (struct dasd_block *);
235 * returns a ccw chain to be used to format the device. 239
236 */ 240 /*
241 * Last things to do when a device is set online, and first things
242 * when it is set offline.
243 */
244 int (*ready_to_online) (struct dasd_device *);
245 int (*online_to_ready) (struct dasd_device *);
246
247 /*
248 * Device operation functions. build_cp creates a ccw chain for
249 * a block device request, start_io starts the request and
250 * term_IO cancels it (e.g. in case of a timeout). format_device
251 * returns a ccw chain to be used to format the device.
252 * handle_terminated_request allows to examine a cqr and prepare
253 * it for retry.
254 */
237 struct dasd_ccw_req *(*build_cp) (struct dasd_device *, 255 struct dasd_ccw_req *(*build_cp) (struct dasd_device *,
256 struct dasd_block *,
238 struct request *); 257 struct request *);
239 int (*start_IO) (struct dasd_ccw_req *); 258 int (*start_IO) (struct dasd_ccw_req *);
240 int (*term_IO) (struct dasd_ccw_req *); 259 int (*term_IO) (struct dasd_ccw_req *);
260 void (*handle_terminated_request) (struct dasd_ccw_req *);
241 struct dasd_ccw_req *(*format_device) (struct dasd_device *, 261 struct dasd_ccw_req *(*format_device) (struct dasd_device *,
242 struct format_data_t *); 262 struct format_data_t *);
243 int (*free_cp) (struct dasd_ccw_req *, struct request *); 263 int (*free_cp) (struct dasd_ccw_req *, struct request *);
244 /* 264
245 * Error recovery functions. examine_error() returns a value that 265 /*
246 * indicates what to do for an error condition. If examine_error() 266 * Error recovery functions. examine_error() returns a value that
267 * indicates what to do for an error condition. If examine_error()
247 * returns 'dasd_era_recover' erp_action() is called to create a 268 * returns 'dasd_era_recover' erp_action() is called to create a
248 * special error recovery ccw. erp_postaction() is called after 269 * special error recovery ccw. erp_postaction() is called after
249 * an error recovery ccw has finished its execution. dump_sense 270 * an error recovery ccw has finished its execution. dump_sense
250 * is called for every error condition to print the sense data 271 * is called for every error condition to print the sense data
251 * to the console. 272 * to the console.
252 */ 273 */
253 dasd_era_t(*examine_error) (struct dasd_ccw_req *, struct irb *);
254 dasd_erp_fn_t(*erp_action) (struct dasd_ccw_req *); 274 dasd_erp_fn_t(*erp_action) (struct dasd_ccw_req *);
255 dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *); 275 dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *);
256 void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *, 276 void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
257 struct irb *); 277 struct irb *);
258 278
279 void (*handle_unsolicited_interrupt) (struct dasd_device *,
280 struct irb *);
281
259 /* i/o control functions. */ 282 /* i/o control functions. */
260 int (*fill_geometry) (struct dasd_device *, struct hd_geometry *); 283 int (*fill_geometry) (struct dasd_block *, struct hd_geometry *);
261 int (*fill_info) (struct dasd_device *, struct dasd_information2_t *); 284 int (*fill_info) (struct dasd_device *, struct dasd_information2_t *);
262 int (*ioctl) (struct dasd_device *, unsigned int, void __user *); 285 int (*ioctl) (struct dasd_block *, unsigned int, void __user *);
263}; 286};
264 287
265extern struct dasd_discipline *dasd_diag_discipline_pointer; 288extern struct dasd_discipline *dasd_diag_discipline_pointer;
@@ -267,12 +290,18 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer;
267/* 290/*
268 * Unique identifier for dasd device. 291 * Unique identifier for dasd device.
269 */ 292 */
293#define UA_NOT_CONFIGURED 0x00
294#define UA_BASE_DEVICE 0x01
295#define UA_BASE_PAV_ALIAS 0x02
296#define UA_HYPER_PAV_ALIAS 0x03
297
270struct dasd_uid { 298struct dasd_uid {
271 __u8 alias; 299 __u8 type;
272 char vendor[4]; 300 char vendor[4];
273 char serial[15]; 301 char serial[15];
274 __u16 ssid; 302 __u16 ssid;
275 __u8 unit_addr; 303 __u8 real_unit_addr;
304 __u8 base_unit_addr;
276}; 305};
277 306
278/* 307/*
@@ -293,14 +322,9 @@ struct dasd_uid {
293 322
294struct dasd_device { 323struct dasd_device {
295 /* Block device stuff. */ 324 /* Block device stuff. */
296 struct gendisk *gdp; 325 struct dasd_block *block;
297 struct request_queue *request_queue; 326
298 spinlock_t request_queue_lock;
299 struct block_device *bdev;
300 unsigned int devindex; 327 unsigned int devindex;
301 unsigned long blocks; /* size of volume in blocks */
302 unsigned int bp_block; /* bytes per block */
303 unsigned int s2b_shift; /* log2 (bp_block/512) */
304 unsigned long flags; /* per device flags */ 328 unsigned long flags; /* per device flags */
305 unsigned short features; /* copy of devmap-features (read-only!) */ 329 unsigned short features; /* copy of devmap-features (read-only!) */
306 330
@@ -316,9 +340,8 @@ struct dasd_device {
316 int state, target; 340 int state, target;
317 int stopped; /* device (ccw_device_start) was stopped */ 341 int stopped; /* device (ccw_device_start) was stopped */
318 342
319 /* Open and reference count. */ 343 /* reference count. */
320 atomic_t ref_count; 344 atomic_t ref_count;
321 atomic_t open_count;
322 345
323 /* ccw queue and memory for static ccw/erp buffers. */ 346 /* ccw queue and memory for static ccw/erp buffers. */
324 struct list_head ccw_queue; 347 struct list_head ccw_queue;
@@ -337,20 +360,45 @@ struct dasd_device {
337 360
338 struct ccw_device *cdev; 361 struct ccw_device *cdev;
339 362
363 /* hook for alias management */
364 struct list_head alias_list;
365};
366
367struct dasd_block {
368 /* Block device stuff. */
369 struct gendisk *gdp;
370 struct request_queue *request_queue;
371 spinlock_t request_queue_lock;
372 struct block_device *bdev;
373 atomic_t open_count;
374
375 unsigned long blocks; /* size of volume in blocks */
376 unsigned int bp_block; /* bytes per block */
377 unsigned int s2b_shift; /* log2 (bp_block/512) */
378
379 struct dasd_device *base;
380 struct list_head ccw_queue;
381 spinlock_t queue_lock;
382
383 atomic_t tasklet_scheduled;
384 struct tasklet_struct tasklet;
385 struct timer_list timer;
386
340#ifdef CONFIG_DASD_PROFILE 387#ifdef CONFIG_DASD_PROFILE
341 struct dasd_profile_info_t profile; 388 struct dasd_profile_info_t profile;
342#endif 389#endif
343}; 390};
344 391
392
393
345/* reasons why device (ccw_device_start) was stopped */ 394/* reasons why device (ccw_device_start) was stopped */
346#define DASD_STOPPED_NOT_ACC 1 /* not accessible */ 395#define DASD_STOPPED_NOT_ACC 1 /* not accessible */
347#define DASD_STOPPED_QUIESCE 2 /* Quiesced */ 396#define DASD_STOPPED_QUIESCE 2 /* Quiesced */
348#define DASD_STOPPED_PENDING 4 /* long busy */ 397#define DASD_STOPPED_PENDING 4 /* long busy */
349#define DASD_STOPPED_DC_WAIT 8 /* disconnected, wait */ 398#define DASD_STOPPED_DC_WAIT 8 /* disconnected, wait */
350#define DASD_STOPPED_DC_EIO 16 /* disconnected, return -EIO */ 399#define DASD_STOPPED_SU 16 /* summary unit check handling */
351 400
352/* per device flags */ 401/* per device flags */
353#define DASD_FLAG_DSC_ERROR 2 /* return -EIO when disconnected */
354#define DASD_FLAG_OFFLINE 3 /* device is in offline processing */ 402#define DASD_FLAG_OFFLINE 3 /* device is in offline processing */
355#define DASD_FLAG_EER_SNSS 4 /* A SNSS is required */ 403#define DASD_FLAG_EER_SNSS 4 /* A SNSS is required */
356#define DASD_FLAG_EER_IN_USE 5 /* A SNSS request is running */ 404#define DASD_FLAG_EER_IN_USE 5 /* A SNSS request is running */
@@ -489,6 +537,9 @@ dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device)
489struct dasd_device *dasd_alloc_device(void); 537struct dasd_device *dasd_alloc_device(void);
490void dasd_free_device(struct dasd_device *); 538void dasd_free_device(struct dasd_device *);
491 539
540struct dasd_block *dasd_alloc_block(void);
541void dasd_free_block(struct dasd_block *);
542
492void dasd_enable_device(struct dasd_device *); 543void dasd_enable_device(struct dasd_device *);
493void dasd_set_target_state(struct dasd_device *, int); 544void dasd_set_target_state(struct dasd_device *, int);
494void dasd_kick_device(struct dasd_device *); 545void dasd_kick_device(struct dasd_device *);
@@ -497,18 +548,23 @@ void dasd_add_request_head(struct dasd_ccw_req *);
497void dasd_add_request_tail(struct dasd_ccw_req *); 548void dasd_add_request_tail(struct dasd_ccw_req *);
498int dasd_start_IO(struct dasd_ccw_req *); 549int dasd_start_IO(struct dasd_ccw_req *);
499int dasd_term_IO(struct dasd_ccw_req *); 550int dasd_term_IO(struct dasd_ccw_req *);
500void dasd_schedule_bh(struct dasd_device *); 551void dasd_schedule_device_bh(struct dasd_device *);
552void dasd_schedule_block_bh(struct dasd_block *);
501int dasd_sleep_on(struct dasd_ccw_req *); 553int dasd_sleep_on(struct dasd_ccw_req *);
502int dasd_sleep_on_immediatly(struct dasd_ccw_req *); 554int dasd_sleep_on_immediatly(struct dasd_ccw_req *);
503int dasd_sleep_on_interruptible(struct dasd_ccw_req *); 555int dasd_sleep_on_interruptible(struct dasd_ccw_req *);
504void dasd_set_timer(struct dasd_device *, int); 556void dasd_device_set_timer(struct dasd_device *, int);
505void dasd_clear_timer(struct dasd_device *); 557void dasd_device_clear_timer(struct dasd_device *);
558void dasd_block_set_timer(struct dasd_block *, int);
559void dasd_block_clear_timer(struct dasd_block *);
506int dasd_cancel_req(struct dasd_ccw_req *); 560int dasd_cancel_req(struct dasd_ccw_req *);
561int dasd_flush_device_queue(struct dasd_device *);
507int dasd_generic_probe (struct ccw_device *, struct dasd_discipline *); 562int dasd_generic_probe (struct ccw_device *, struct dasd_discipline *);
508void dasd_generic_remove (struct ccw_device *cdev); 563void dasd_generic_remove (struct ccw_device *cdev);
509int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *); 564int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
510int dasd_generic_set_offline (struct ccw_device *cdev); 565int dasd_generic_set_offline (struct ccw_device *cdev);
511int dasd_generic_notify(struct ccw_device *, int); 566int dasd_generic_notify(struct ccw_device *, int);
567void dasd_generic_handle_state_change(struct dasd_device *);
512 568
513int dasd_generic_read_dev_chars(struct dasd_device *, char *, void **, int); 569int dasd_generic_read_dev_chars(struct dasd_device *, char *, void **, int);
514 570
@@ -542,10 +598,10 @@ int dasd_busid_known(char *);
542/* externals in dasd_gendisk.c */ 598/* externals in dasd_gendisk.c */
543int dasd_gendisk_init(void); 599int dasd_gendisk_init(void);
544void dasd_gendisk_exit(void); 600void dasd_gendisk_exit(void);
545int dasd_gendisk_alloc(struct dasd_device *); 601int dasd_gendisk_alloc(struct dasd_block *);
546void dasd_gendisk_free(struct dasd_device *); 602void dasd_gendisk_free(struct dasd_block *);
547int dasd_scan_partitions(struct dasd_device *); 603int dasd_scan_partitions(struct dasd_block *);
548void dasd_destroy_partitions(struct dasd_device *); 604void dasd_destroy_partitions(struct dasd_block *);
549 605
550/* externals in dasd_ioctl.c */ 606/* externals in dasd_ioctl.c */
551int dasd_ioctl(struct inode *, struct file *, unsigned int, unsigned long); 607int dasd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
@@ -563,20 +619,9 @@ struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int,
563void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *); 619void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
564void dasd_log_sense(struct dasd_ccw_req *, struct irb *); 620void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
565 621
566/* externals in dasd_3370_erp.c */
567dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *);
568
569/* externals in dasd_3990_erp.c */ 622/* externals in dasd_3990_erp.c */
570dasd_era_t dasd_3990_erp_examine(struct dasd_ccw_req *, struct irb *);
571struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *); 623struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *);
572 624
573/* externals in dasd_9336_erp.c */
574dasd_era_t dasd_9336_erp_examine(struct dasd_ccw_req *, struct irb *);
575
576/* externals in dasd_9336_erp.c */
577dasd_era_t dasd_9343_erp_examine(struct dasd_ccw_req *, struct irb *);
578struct dasd_ccw_req *dasd_9343_erp_action(struct dasd_ccw_req *);
579
580/* externals in dasd_eer.c */ 625/* externals in dasd_eer.c */
581#ifdef CONFIG_DASD_EER 626#ifdef CONFIG_DASD_EER
582int dasd_eer_init(void); 627int dasd_eer_init(void);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 672eb0a3dd0b..91a64630cb0f 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -38,15 +38,15 @@ dasd_ioctl_api_version(void __user *argp)
38static int 38static int
39dasd_ioctl_enable(struct block_device *bdev) 39dasd_ioctl_enable(struct block_device *bdev)
40{ 40{
41 struct dasd_device *device = bdev->bd_disk->private_data; 41 struct dasd_block *block = bdev->bd_disk->private_data;
42 42
43 if (!capable(CAP_SYS_ADMIN)) 43 if (!capable(CAP_SYS_ADMIN))
44 return -EACCES; 44 return -EACCES;
45 45
46 dasd_enable_device(device); 46 dasd_enable_device(block->base);
47 /* Formatting the dasd device can change the capacity. */ 47 /* Formatting the dasd device can change the capacity. */
48 mutex_lock(&bdev->bd_mutex); 48 mutex_lock(&bdev->bd_mutex);
49 i_size_write(bdev->bd_inode, (loff_t)get_capacity(device->gdp) << 9); 49 i_size_write(bdev->bd_inode, (loff_t)get_capacity(block->gdp) << 9);
50 mutex_unlock(&bdev->bd_mutex); 50 mutex_unlock(&bdev->bd_mutex);
51 return 0; 51 return 0;
52} 52}
@@ -58,7 +58,7 @@ dasd_ioctl_enable(struct block_device *bdev)
58static int 58static int
59dasd_ioctl_disable(struct block_device *bdev) 59dasd_ioctl_disable(struct block_device *bdev)
60{ 60{
61 struct dasd_device *device = bdev->bd_disk->private_data; 61 struct dasd_block *block = bdev->bd_disk->private_data;
62 62
63 if (!capable(CAP_SYS_ADMIN)) 63 if (!capable(CAP_SYS_ADMIN))
64 return -EACCES; 64 return -EACCES;
@@ -71,7 +71,7 @@ dasd_ioctl_disable(struct block_device *bdev)
71 * using the BIODASDFMT ioctl. Therefore the correct state for the 71 * using the BIODASDFMT ioctl. Therefore the correct state for the
72 * device is DASD_STATE_BASIC that allows to do basic i/o. 72 * device is DASD_STATE_BASIC that allows to do basic i/o.
73 */ 73 */
74 dasd_set_target_state(device, DASD_STATE_BASIC); 74 dasd_set_target_state(block->base, DASD_STATE_BASIC);
75 /* 75 /*
76 * Set i_size to zero, since read, write, etc. check against this 76 * Set i_size to zero, since read, write, etc. check against this
77 * value. 77 * value.
@@ -85,19 +85,19 @@ dasd_ioctl_disable(struct block_device *bdev)
85/* 85/*
86 * Quiesce device. 86 * Quiesce device.
87 */ 87 */
88static int 88static int dasd_ioctl_quiesce(struct dasd_block *block)
89dasd_ioctl_quiesce(struct dasd_device *device)
90{ 89{
91 unsigned long flags; 90 unsigned long flags;
91 struct dasd_device *base;
92 92
93 base = block->base;
93 if (!capable (CAP_SYS_ADMIN)) 94 if (!capable (CAP_SYS_ADMIN))
94 return -EACCES; 95 return -EACCES;
95 96
96 DEV_MESSAGE (KERN_DEBUG, device, "%s", 97 DEV_MESSAGE(KERN_DEBUG, base, "%s", "Quiesce IO on device");
97 "Quiesce IO on device"); 98 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
98 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 99 base->stopped |= DASD_STOPPED_QUIESCE;
99 device->stopped |= DASD_STOPPED_QUIESCE; 100 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
100 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
101 return 0; 101 return 0;
102} 102}
103 103
@@ -105,22 +105,21 @@ dasd_ioctl_quiesce(struct dasd_device *device)
105/* 105/*
106 * Quiesce device. 106 * Quiesce device.
107 */ 107 */
108static int 108static int dasd_ioctl_resume(struct dasd_block *block)
109dasd_ioctl_resume(struct dasd_device *device)
110{ 109{
111 unsigned long flags; 110 unsigned long flags;
111 struct dasd_device *base;
112 112
113 base = block->base;
113 if (!capable (CAP_SYS_ADMIN)) 114 if (!capable (CAP_SYS_ADMIN))
114 return -EACCES; 115 return -EACCES;
115 116
116 DEV_MESSAGE (KERN_DEBUG, device, "%s", 117 DEV_MESSAGE(KERN_DEBUG, base, "%s", "resume IO on device");
117 "resume IO on device"); 118 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
118 119 base->stopped &= ~DASD_STOPPED_QUIESCE;
119 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 120 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
120 device->stopped &= ~DASD_STOPPED_QUIESCE;
121 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
122 121
123 dasd_schedule_bh (device); 122 dasd_schedule_block_bh(block);
124 return 0; 123 return 0;
125} 124}
126 125
@@ -130,22 +129,23 @@ dasd_ioctl_resume(struct dasd_device *device)
130 * commands to format a single unit of the device. In terms of the ECKD 129 * commands to format a single unit of the device. In terms of the ECKD
131 * devices this means CCWs are generated to format a single track. 130 * devices this means CCWs are generated to format a single track.
132 */ 131 */
133static int 132static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
134dasd_format(struct dasd_device * device, struct format_data_t * fdata)
135{ 133{
136 struct dasd_ccw_req *cqr; 134 struct dasd_ccw_req *cqr;
135 struct dasd_device *base;
137 int rc; 136 int rc;
138 137
139 if (device->discipline->format_device == NULL) 138 base = block->base;
139 if (base->discipline->format_device == NULL)
140 return -EPERM; 140 return -EPERM;
141 141
142 if (device->state != DASD_STATE_BASIC) { 142 if (base->state != DASD_STATE_BASIC) {
143 DEV_MESSAGE(KERN_WARNING, device, "%s", 143 DEV_MESSAGE(KERN_WARNING, base, "%s",
144 "dasd_format: device is not disabled! "); 144 "dasd_format: device is not disabled! ");
145 return -EBUSY; 145 return -EBUSY;
146 } 146 }
147 147
148 DBF_DEV_EVENT(DBF_NOTICE, device, 148 DBF_DEV_EVENT(DBF_NOTICE, base,
149 "formatting units %d to %d (%d B blocks) flags %d", 149 "formatting units %d to %d (%d B blocks) flags %d",
150 fdata->start_unit, 150 fdata->start_unit,
151 fdata->stop_unit, fdata->blksize, fdata->intensity); 151 fdata->stop_unit, fdata->blksize, fdata->intensity);
@@ -156,20 +156,20 @@ dasd_format(struct dasd_device * device, struct format_data_t * fdata)
156 * enabling the device later. 156 * enabling the device later.
157 */ 157 */
158 if (fdata->start_unit == 0) { 158 if (fdata->start_unit == 0) {
159 struct block_device *bdev = bdget_disk(device->gdp, 0); 159 struct block_device *bdev = bdget_disk(block->gdp, 0);
160 bdev->bd_inode->i_blkbits = blksize_bits(fdata->blksize); 160 bdev->bd_inode->i_blkbits = blksize_bits(fdata->blksize);
161 bdput(bdev); 161 bdput(bdev);
162 } 162 }
163 163
164 while (fdata->start_unit <= fdata->stop_unit) { 164 while (fdata->start_unit <= fdata->stop_unit) {
165 cqr = device->discipline->format_device(device, fdata); 165 cqr = base->discipline->format_device(base, fdata);
166 if (IS_ERR(cqr)) 166 if (IS_ERR(cqr))
167 return PTR_ERR(cqr); 167 return PTR_ERR(cqr);
168 rc = dasd_sleep_on_interruptible(cqr); 168 rc = dasd_sleep_on_interruptible(cqr);
169 dasd_sfree_request(cqr, cqr->device); 169 dasd_sfree_request(cqr, cqr->memdev);
170 if (rc) { 170 if (rc) {
171 if (rc != -ERESTARTSYS) 171 if (rc != -ERESTARTSYS)
172 DEV_MESSAGE(KERN_ERR, device, 172 DEV_MESSAGE(KERN_ERR, base,
173 " Formatting of unit %d failed " 173 " Formatting of unit %d failed "
174 "with rc = %d", 174 "with rc = %d",
175 fdata->start_unit, rc); 175 fdata->start_unit, rc);
@@ -186,7 +186,7 @@ dasd_format(struct dasd_device * device, struct format_data_t * fdata)
186static int 186static int
187dasd_ioctl_format(struct block_device *bdev, void __user *argp) 187dasd_ioctl_format(struct block_device *bdev, void __user *argp)
188{ 188{
189 struct dasd_device *device = bdev->bd_disk->private_data; 189 struct dasd_block *block = bdev->bd_disk->private_data;
190 struct format_data_t fdata; 190 struct format_data_t fdata;
191 191
192 if (!capable(CAP_SYS_ADMIN)) 192 if (!capable(CAP_SYS_ADMIN))
@@ -194,51 +194,47 @@ dasd_ioctl_format(struct block_device *bdev, void __user *argp)
194 if (!argp) 194 if (!argp)
195 return -EINVAL; 195 return -EINVAL;
196 196
197 if (device->features & DASD_FEATURE_READONLY) 197 if (block->base->features & DASD_FEATURE_READONLY)
198 return -EROFS; 198 return -EROFS;
199 if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) 199 if (copy_from_user(&fdata, argp, sizeof(struct format_data_t)))
200 return -EFAULT; 200 return -EFAULT;
201 if (bdev != bdev->bd_contains) { 201 if (bdev != bdev->bd_contains) {
202 DEV_MESSAGE(KERN_WARNING, device, "%s", 202 DEV_MESSAGE(KERN_WARNING, block->base, "%s",
203 "Cannot low-level format a partition"); 203 "Cannot low-level format a partition");
204 return -EINVAL; 204 return -EINVAL;
205 } 205 }
206 return dasd_format(device, &fdata); 206 return dasd_format(block, &fdata);
207} 207}
208 208
209#ifdef CONFIG_DASD_PROFILE 209#ifdef CONFIG_DASD_PROFILE
210/* 210/*
211 * Reset device profile information 211 * Reset device profile information
212 */ 212 */
213static int 213static int dasd_ioctl_reset_profile(struct dasd_block *block)
214dasd_ioctl_reset_profile(struct dasd_device *device)
215{ 214{
216 memset(&device->profile, 0, sizeof (struct dasd_profile_info_t)); 215 memset(&block->profile, 0, sizeof(struct dasd_profile_info_t));
217 return 0; 216 return 0;
218} 217}
219 218
220/* 219/*
221 * Return device profile information 220 * Return device profile information
222 */ 221 */
223static int 222static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
224dasd_ioctl_read_profile(struct dasd_device *device, void __user *argp)
225{ 223{
226 if (dasd_profile_level == DASD_PROFILE_OFF) 224 if (dasd_profile_level == DASD_PROFILE_OFF)
227 return -EIO; 225 return -EIO;
228 if (copy_to_user(argp, &device->profile, 226 if (copy_to_user(argp, &block->profile,
229 sizeof (struct dasd_profile_info_t))) 227 sizeof(struct dasd_profile_info_t)))
230 return -EFAULT; 228 return -EFAULT;
231 return 0; 229 return 0;
232} 230}
233#else 231#else
234static int 232static int dasd_ioctl_reset_profile(struct dasd_block *block)
235dasd_ioctl_reset_profile(struct dasd_device *device)
236{ 233{
237 return -ENOSYS; 234 return -ENOSYS;
238} 235}
239 236
240static int 237static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
241dasd_ioctl_read_profile(struct dasd_device *device, void __user *argp)
242{ 238{
243 return -ENOSYS; 239 return -ENOSYS;
244} 240}
@@ -247,87 +243,88 @@ dasd_ioctl_read_profile(struct dasd_device *device, void __user *argp)
247/* 243/*
248 * Return dasd information. Used for BIODASDINFO and BIODASDINFO2. 244 * Return dasd information. Used for BIODASDINFO and BIODASDINFO2.
249 */ 245 */
250static int 246static int dasd_ioctl_information(struct dasd_block *block,
251dasd_ioctl_information(struct dasd_device *device, 247 unsigned int cmd, void __user *argp)
252 unsigned int cmd, void __user *argp)
253{ 248{
254 struct dasd_information2_t *dasd_info; 249 struct dasd_information2_t *dasd_info;
255 unsigned long flags; 250 unsigned long flags;
256 int rc; 251 int rc;
252 struct dasd_device *base;
257 struct ccw_device *cdev; 253 struct ccw_device *cdev;
258 struct ccw_dev_id dev_id; 254 struct ccw_dev_id dev_id;
259 255
260 if (!device->discipline->fill_info) 256 base = block->base;
257 if (!base->discipline->fill_info)
261 return -EINVAL; 258 return -EINVAL;
262 259
263 dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL); 260 dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
264 if (dasd_info == NULL) 261 if (dasd_info == NULL)
265 return -ENOMEM; 262 return -ENOMEM;
266 263
267 rc = device->discipline->fill_info(device, dasd_info); 264 rc = base->discipline->fill_info(base, dasd_info);
268 if (rc) { 265 if (rc) {
269 kfree(dasd_info); 266 kfree(dasd_info);
270 return rc; 267 return rc;
271 } 268 }
272 269
273 cdev = device->cdev; 270 cdev = base->cdev;
274 ccw_device_get_id(cdev, &dev_id); 271 ccw_device_get_id(cdev, &dev_id);
275 272
276 dasd_info->devno = dev_id.devno; 273 dasd_info->devno = dev_id.devno;
277 dasd_info->schid = _ccw_device_get_subchannel_number(device->cdev); 274 dasd_info->schid = _ccw_device_get_subchannel_number(base->cdev);
278 dasd_info->cu_type = cdev->id.cu_type; 275 dasd_info->cu_type = cdev->id.cu_type;
279 dasd_info->cu_model = cdev->id.cu_model; 276 dasd_info->cu_model = cdev->id.cu_model;
280 dasd_info->dev_type = cdev->id.dev_type; 277 dasd_info->dev_type = cdev->id.dev_type;
281 dasd_info->dev_model = cdev->id.dev_model; 278 dasd_info->dev_model = cdev->id.dev_model;
282 dasd_info->status = device->state; 279 dasd_info->status = base->state;
283 /* 280 /*
284 * The open_count is increased for every opener, that includes 281 * The open_count is increased for every opener, that includes
285 * the blkdev_get in dasd_scan_partitions. 282 * the blkdev_get in dasd_scan_partitions.
286 * This must be hidden from user-space. 283 * This must be hidden from user-space.
287 */ 284 */
288 dasd_info->open_count = atomic_read(&device->open_count); 285 dasd_info->open_count = atomic_read(&block->open_count);
289 if (!device->bdev) 286 if (!block->bdev)
290 dasd_info->open_count++; 287 dasd_info->open_count++;
291 288
292 /* 289 /*
293 * check if device is really formatted 290 * check if device is really formatted
294 * LDL / CDL was returned by 'fill_info' 291 * LDL / CDL was returned by 'fill_info'
295 */ 292 */
296 if ((device->state < DASD_STATE_READY) || 293 if ((base->state < DASD_STATE_READY) ||
297 (dasd_check_blocksize(device->bp_block))) 294 (dasd_check_blocksize(block->bp_block)))
298 dasd_info->format = DASD_FORMAT_NONE; 295 dasd_info->format = DASD_FORMAT_NONE;
299 296
300 dasd_info->features |= 297 dasd_info->features |=
301 ((device->features & DASD_FEATURE_READONLY) != 0); 298 ((base->features & DASD_FEATURE_READONLY) != 0);
302 299
303 if (device->discipline) 300 if (base->discipline)
304 memcpy(dasd_info->type, device->discipline->name, 4); 301 memcpy(dasd_info->type, base->discipline->name, 4);
305 else 302 else
306 memcpy(dasd_info->type, "none", 4); 303 memcpy(dasd_info->type, "none", 4);
307 304
308 if (device->request_queue->request_fn) { 305 if (block->request_queue->request_fn) {
309 struct list_head *l; 306 struct list_head *l;
310#ifdef DASD_EXTENDED_PROFILING 307#ifdef DASD_EXTENDED_PROFILING
311 { 308 {
312 struct list_head *l; 309 struct list_head *l;
313 spin_lock_irqsave(&device->lock, flags); 310 spin_lock_irqsave(&block->lock, flags);
314 list_for_each(l, &device->request_queue->queue_head) 311 list_for_each(l, &block->request_queue->queue_head)
315 dasd_info->req_queue_len++; 312 dasd_info->req_queue_len++;
316 spin_unlock_irqrestore(&device->lock, flags); 313 spin_unlock_irqrestore(&block->lock, flags);
317 } 314 }
318#endif /* DASD_EXTENDED_PROFILING */ 315#endif /* DASD_EXTENDED_PROFILING */
319 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 316 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
320 list_for_each(l, &device->ccw_queue) 317 list_for_each(l, &base->ccw_queue)
321 dasd_info->chanq_len++; 318 dasd_info->chanq_len++;
322 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), 319 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
323 flags); 320 flags);
324 } 321 }
325 322
326 rc = 0; 323 rc = 0;
327 if (copy_to_user(argp, dasd_info, 324 if (copy_to_user(argp, dasd_info,
328 ((cmd == (unsigned int) BIODASDINFO2) ? 325 ((cmd == (unsigned int) BIODASDINFO2) ?
329 sizeof (struct dasd_information2_t) : 326 sizeof(struct dasd_information2_t) :
330 sizeof (struct dasd_information_t)))) 327 sizeof(struct dasd_information_t))))
331 rc = -EFAULT; 328 rc = -EFAULT;
332 kfree(dasd_info); 329 kfree(dasd_info);
333 return rc; 330 return rc;
@@ -339,7 +336,7 @@ dasd_ioctl_information(struct dasd_device *device,
339static int 336static int
340dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp) 337dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
341{ 338{
342 struct dasd_device *device = bdev->bd_disk->private_data; 339 struct dasd_block *block = bdev->bd_disk->private_data;
343 int intval; 340 int intval;
344 341
345 if (!capable(CAP_SYS_ADMIN)) 342 if (!capable(CAP_SYS_ADMIN))
@@ -351,11 +348,10 @@ dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
351 return -EFAULT; 348 return -EFAULT;
352 349
353 set_disk_ro(bdev->bd_disk, intval); 350 set_disk_ro(bdev->bd_disk, intval);
354 return dasd_set_feature(device->cdev, DASD_FEATURE_READONLY, intval); 351 return dasd_set_feature(block->base->cdev, DASD_FEATURE_READONLY, intval);
355} 352}
356 353
357static int 354static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
358dasd_ioctl_readall_cmb(struct dasd_device *device, unsigned int cmd,
359 unsigned long arg) 355 unsigned long arg)
360{ 356{
361 struct cmbdata __user *argp = (void __user *) arg; 357 struct cmbdata __user *argp = (void __user *) arg;
@@ -363,7 +359,7 @@ dasd_ioctl_readall_cmb(struct dasd_device *device, unsigned int cmd,
363 struct cmbdata data; 359 struct cmbdata data;
364 int ret; 360 int ret;
365 361
366 ret = cmf_readall(device->cdev, &data); 362 ret = cmf_readall(block->base->cdev, &data);
367 if (!ret && copy_to_user(argp, &data, min(size, sizeof(*argp)))) 363 if (!ret && copy_to_user(argp, &data, min(size, sizeof(*argp))))
368 return -EFAULT; 364 return -EFAULT;
369 return ret; 365 return ret;
@@ -374,10 +370,10 @@ dasd_ioctl(struct inode *inode, struct file *file,
374 unsigned int cmd, unsigned long arg) 370 unsigned int cmd, unsigned long arg)
375{ 371{
376 struct block_device *bdev = inode->i_bdev; 372 struct block_device *bdev = inode->i_bdev;
377 struct dasd_device *device = bdev->bd_disk->private_data; 373 struct dasd_block *block = bdev->bd_disk->private_data;
378 void __user *argp = (void __user *)arg; 374 void __user *argp = (void __user *)arg;
379 375
380 if (!device) 376 if (!block)
381 return -ENODEV; 377 return -ENODEV;
382 378
383 if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) { 379 if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) {
@@ -391,33 +387,33 @@ dasd_ioctl(struct inode *inode, struct file *file,
391 case BIODASDENABLE: 387 case BIODASDENABLE:
392 return dasd_ioctl_enable(bdev); 388 return dasd_ioctl_enable(bdev);
393 case BIODASDQUIESCE: 389 case BIODASDQUIESCE:
394 return dasd_ioctl_quiesce(device); 390 return dasd_ioctl_quiesce(block);
395 case BIODASDRESUME: 391 case BIODASDRESUME:
396 return dasd_ioctl_resume(device); 392 return dasd_ioctl_resume(block);
397 case BIODASDFMT: 393 case BIODASDFMT:
398 return dasd_ioctl_format(bdev, argp); 394 return dasd_ioctl_format(bdev, argp);
399 case BIODASDINFO: 395 case BIODASDINFO:
400 return dasd_ioctl_information(device, cmd, argp); 396 return dasd_ioctl_information(block, cmd, argp);
401 case BIODASDINFO2: 397 case BIODASDINFO2:
402 return dasd_ioctl_information(device, cmd, argp); 398 return dasd_ioctl_information(block, cmd, argp);
403 case BIODASDPRRD: 399 case BIODASDPRRD:
404 return dasd_ioctl_read_profile(device, argp); 400 return dasd_ioctl_read_profile(block, argp);
405 case BIODASDPRRST: 401 case BIODASDPRRST:
406 return dasd_ioctl_reset_profile(device); 402 return dasd_ioctl_reset_profile(block);
407 case BLKROSET: 403 case BLKROSET:
408 return dasd_ioctl_set_ro(bdev, argp); 404 return dasd_ioctl_set_ro(bdev, argp);
409 case DASDAPIVER: 405 case DASDAPIVER:
410 return dasd_ioctl_api_version(argp); 406 return dasd_ioctl_api_version(argp);
411 case BIODASDCMFENABLE: 407 case BIODASDCMFENABLE:
412 return enable_cmf(device->cdev); 408 return enable_cmf(block->base->cdev);
413 case BIODASDCMFDISABLE: 409 case BIODASDCMFDISABLE:
414 return disable_cmf(device->cdev); 410 return disable_cmf(block->base->cdev);
415 case BIODASDREADALLCMB: 411 case BIODASDREADALLCMB:
416 return dasd_ioctl_readall_cmb(device, cmd, arg); 412 return dasd_ioctl_readall_cmb(block, cmd, arg);
417 default: 413 default:
418 /* if the discipline has an ioctl method try it. */ 414 /* if the discipline has an ioctl method try it. */
419 if (device->discipline->ioctl) { 415 if (block->base->discipline->ioctl) {
420 int rval = device->discipline->ioctl(device, cmd, argp); 416 int rval = block->base->discipline->ioctl(block, cmd, argp);
421 if (rval != -ENOIOCTLCMD) 417 if (rval != -ENOIOCTLCMD)
422 return rval; 418 return rval;
423 } 419 }
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index ac7e8ef504cb..28a86f070048 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -54,11 +54,16 @@ static int
54dasd_devices_show(struct seq_file *m, void *v) 54dasd_devices_show(struct seq_file *m, void *v)
55{ 55{
56 struct dasd_device *device; 56 struct dasd_device *device;
57 struct dasd_block *block;
57 char *substr; 58 char *substr;
58 59
59 device = dasd_device_from_devindex((unsigned long) v - 1); 60 device = dasd_device_from_devindex((unsigned long) v - 1);
60 if (IS_ERR(device)) 61 if (IS_ERR(device))
61 return 0; 62 return 0;
63 if (device->block)
64 block = device->block;
65 else
66 return 0;
62 /* Print device number. */ 67 /* Print device number. */
63 seq_printf(m, "%s", device->cdev->dev.bus_id); 68 seq_printf(m, "%s", device->cdev->dev.bus_id);
64 /* Print discipline string. */ 69 /* Print discipline string. */
@@ -67,14 +72,14 @@ dasd_devices_show(struct seq_file *m, void *v)
67 else 72 else
68 seq_printf(m, "(none)"); 73 seq_printf(m, "(none)");
69 /* Print kdev. */ 74 /* Print kdev. */
70 if (device->gdp) 75 if (block->gdp)
71 seq_printf(m, " at (%3d:%6d)", 76 seq_printf(m, " at (%3d:%6d)",
72 device->gdp->major, device->gdp->first_minor); 77 block->gdp->major, block->gdp->first_minor);
73 else 78 else
74 seq_printf(m, " at (???:??????)"); 79 seq_printf(m, " at (???:??????)");
75 /* Print device name. */ 80 /* Print device name. */
76 if (device->gdp) 81 if (block->gdp)
77 seq_printf(m, " is %-8s", device->gdp->disk_name); 82 seq_printf(m, " is %-8s", block->gdp->disk_name);
78 else 83 else
79 seq_printf(m, " is ????????"); 84 seq_printf(m, " is ????????");
80 /* Print devices features. */ 85 /* Print devices features. */
@@ -100,14 +105,14 @@ dasd_devices_show(struct seq_file *m, void *v)
100 case DASD_STATE_READY: 105 case DASD_STATE_READY:
101 case DASD_STATE_ONLINE: 106 case DASD_STATE_ONLINE:
102 seq_printf(m, "active "); 107 seq_printf(m, "active ");
103 if (dasd_check_blocksize(device->bp_block)) 108 if (dasd_check_blocksize(block->bp_block))
104 seq_printf(m, "n/f "); 109 seq_printf(m, "n/f ");
105 else 110 else
106 seq_printf(m, 111 seq_printf(m,
107 "at blocksize: %d, %ld blocks, %ld MB", 112 "at blocksize: %d, %ld blocks, %ld MB",
108 device->bp_block, device->blocks, 113 block->bp_block, block->blocks,
109 ((device->bp_block >> 9) * 114 ((block->bp_block >> 9) *
110 device->blocks) >> 11); 115 block->blocks) >> 11);
111 break; 116 break;
112 default: 117 default:
113 seq_printf(m, "no stat"); 118 seq_printf(m, "no stat");
@@ -137,7 +142,7 @@ static void dasd_devices_stop(struct seq_file *m, void *v)
137{ 142{
138} 143}
139 144
140static struct seq_operations dasd_devices_seq_ops = { 145static const struct seq_operations dasd_devices_seq_ops = {
141 .start = dasd_devices_start, 146 .start = dasd_devices_start,
142 .next = dasd_devices_next, 147 .next = dasd_devices_next,
143 .stop = dasd_devices_stop, 148 .stop = dasd_devices_stop,
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 15a5789b7734..7779bfce1c31 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -82,7 +82,7 @@ struct dcssblk_dev_info {
82 struct request_queue *dcssblk_queue; 82 struct request_queue *dcssblk_queue;
83}; 83};
84 84
85static struct list_head dcssblk_devices = LIST_HEAD_INIT(dcssblk_devices); 85static LIST_HEAD(dcssblk_devices);
86static struct rw_semaphore dcssblk_devices_sem; 86static struct rw_semaphore dcssblk_devices_sem;
87 87
88/* 88/*
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 130de19916f2..7e73e39a1741 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ 5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
6 sclp_info.o sclp_config.o sclp_chp.o 6 sclp_cmd.o sclp_config.o sclp_cpi_sys.o
7 7
8obj-$(CONFIG_TN3270) += raw3270.o 8obj-$(CONFIG_TN3270) += raw3270.o
9obj-$(CONFIG_TN3270_CONSOLE) += con3270.o 9obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 20442fbf9346..a86c0534cd49 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -295,7 +295,7 @@ module_init(mon_init);
295module_exit(mon_exit); 295module_exit(mon_exit);
296 296
297module_param_named(max_bufs, mon_max_bufs, int, 0644); 297module_param_named(max_bufs, mon_max_bufs, int, 0644);
298MODULE_PARM_DESC(max_bufs, "Maximum number of sample monitor data buffers" 298MODULE_PARM_DESC(max_bufs, "Maximum number of sample monitor data buffers "
299 "that can be active at one time"); 299 "that can be active at one time");
300 300
301MODULE_AUTHOR("Melissa Howland <Melissa.Howland@us.ibm.com>"); 301MODULE_AUTHOR("Melissa Howland <Melissa.Howland@us.ibm.com>");
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 8d1c64a24dec..0d98f1ff2edd 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -66,7 +66,7 @@ struct raw3270 {
66static DEFINE_MUTEX(raw3270_mutex); 66static DEFINE_MUTEX(raw3270_mutex);
67 67
68/* List of 3270 devices. */ 68/* List of 3270 devices. */
69static struct list_head raw3270_devices = LIST_HEAD_INIT(raw3270_devices); 69static LIST_HEAD(raw3270_devices);
70 70
71/* 71/*
72 * Flag to indicate if the driver has been registered. Some operations 72 * Flag to indicate if the driver has been registered. Some operations
@@ -1210,7 +1210,7 @@ struct raw3270_notifier {
1210 void (*notifier)(int, int); 1210 void (*notifier)(int, int);
1211}; 1211};
1212 1212
1213static struct list_head raw3270_notifier = LIST_HEAD_INIT(raw3270_notifier); 1213static LIST_HEAD(raw3270_notifier);
1214 1214
1215int raw3270_register_notifier(void (*notifier)(int, int)) 1215int raw3270_register_notifier(void (*notifier)(int, int))
1216{ 1216{
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index c7318a125852..aa8186d18aee 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -56,8 +56,6 @@ typedef unsigned int sclp_cmdw_t;
56#define SCLP_CMDW_READ_EVENT_DATA 0x00770005 56#define SCLP_CMDW_READ_EVENT_DATA 0x00770005
57#define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005 57#define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005
58#define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005 58#define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005
59#define SCLP_CMDW_READ_SCP_INFO 0x00020001
60#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
61 59
62#define GDS_ID_MDSMU 0x1310 60#define GDS_ID_MDSMU 0x1310
63#define GDS_ID_MDSROUTEINFO 0x1311 61#define GDS_ID_MDSROUTEINFO 0x1311
@@ -83,6 +81,8 @@ extern u64 sclp_facilities;
83 81
84#define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL) 82#define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL)
85#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL) 83#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL)
84#define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL)
85#define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL)
86 86
87struct gds_subvector { 87struct gds_subvector {
88 u8 length; 88 u8 length;
diff --git a/drivers/s390/char/sclp_chp.c b/drivers/s390/char/sclp_chp.c
deleted file mode 100644
index c68f5e7e63a0..000000000000
--- a/drivers/s390/char/sclp_chp.c
+++ /dev/null
@@ -1,200 +0,0 @@
1/*
2 * drivers/s390/char/sclp_chp.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/types.h>
9#include <linux/gfp.h>
10#include <linux/errno.h>
11#include <linux/completion.h>
12#include <asm/sclp.h>
13#include <asm/chpid.h>
14
15#include "sclp.h"
16
17#define TAG "sclp_chp: "
18
19#define SCLP_CMDW_CONFIGURE_CHANNEL_PATH 0x000f0001
20#define SCLP_CMDW_DECONFIGURE_CHANNEL_PATH 0x000e0001
21#define SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION 0x00030001
22
23static inline sclp_cmdw_t get_configure_cmdw(struct chp_id chpid)
24{
25 return SCLP_CMDW_CONFIGURE_CHANNEL_PATH | chpid.id << 8;
26}
27
28static inline sclp_cmdw_t get_deconfigure_cmdw(struct chp_id chpid)
29{
30 return SCLP_CMDW_DECONFIGURE_CHANNEL_PATH | chpid.id << 8;
31}
32
33static void chp_callback(struct sclp_req *req, void *data)
34{
35 struct completion *completion = data;
36
37 complete(completion);
38}
39
40struct chp_cfg_sccb {
41 struct sccb_header header;
42 u8 ccm;
43 u8 reserved[6];
44 u8 cssid;
45} __attribute__((packed));
46
47struct chp_cfg_data {
48 struct chp_cfg_sccb sccb;
49 struct sclp_req req;
50 struct completion completion;
51} __attribute__((packed));
52
53static int do_configure(sclp_cmdw_t cmd)
54{
55 struct chp_cfg_data *data;
56 int rc;
57
58 if (!SCLP_HAS_CHP_RECONFIG)
59 return -EOPNOTSUPP;
60 /* Prepare sccb. */
61 data = (struct chp_cfg_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
62 if (!data)
63 return -ENOMEM;
64 data->sccb.header.length = sizeof(struct chp_cfg_sccb);
65 data->req.command = cmd;
66 data->req.sccb = &(data->sccb);
67 data->req.status = SCLP_REQ_FILLED;
68 data->req.callback = chp_callback;
69 data->req.callback_data = &(data->completion);
70 init_completion(&data->completion);
71
72 /* Perform sclp request. */
73 rc = sclp_add_request(&(data->req));
74 if (rc)
75 goto out;
76 wait_for_completion(&data->completion);
77
78 /* Check response .*/
79 if (data->req.status != SCLP_REQ_DONE) {
80 printk(KERN_WARNING TAG "configure channel-path request failed "
81 "(status=0x%02x)\n", data->req.status);
82 rc = -EIO;
83 goto out;
84 }
85 switch (data->sccb.header.response_code) {
86 case 0x0020:
87 case 0x0120:
88 case 0x0440:
89 case 0x0450:
90 break;
91 default:
92 printk(KERN_WARNING TAG "configure channel-path failed "
93 "(cmd=0x%08x, response=0x%04x)\n", cmd,
94 data->sccb.header.response_code);
95 rc = -EIO;
96 break;
97 }
98out:
99 free_page((unsigned long) data);
100
101 return rc;
102}
103
104/**
105 * sclp_chp_configure - perform configure channel-path sclp command
106 * @chpid: channel-path ID
107 *
108 * Perform configure channel-path command sclp command for specified chpid.
109 * Return 0 after command successfully finished, non-zero otherwise.
110 */
111int sclp_chp_configure(struct chp_id chpid)
112{
113 return do_configure(get_configure_cmdw(chpid));
114}
115
116/**
117 * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
118 * @chpid: channel-path ID
119 *
120 * Perform deconfigure channel-path command sclp command for specified chpid
121 * and wait for completion. On success return 0. Return non-zero otherwise.
122 */
123int sclp_chp_deconfigure(struct chp_id chpid)
124{
125 return do_configure(get_deconfigure_cmdw(chpid));
126}
127
128struct chp_info_sccb {
129 struct sccb_header header;
130 u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
131 u8 standby[SCLP_CHP_INFO_MASK_SIZE];
132 u8 configured[SCLP_CHP_INFO_MASK_SIZE];
133 u8 ccm;
134 u8 reserved[6];
135 u8 cssid;
136} __attribute__((packed));
137
138struct chp_info_data {
139 struct chp_info_sccb sccb;
140 struct sclp_req req;
141 struct completion completion;
142} __attribute__((packed));
143
144/**
145 * sclp_chp_read_info - perform read channel-path information sclp command
146 * @info: resulting channel-path information data
147 *
148 * Perform read channel-path information sclp command and wait for completion.
149 * On success, store channel-path information in @info and return 0. Return
150 * non-zero otherwise.
151 */
152int sclp_chp_read_info(struct sclp_chp_info *info)
153{
154 struct chp_info_data *data;
155 int rc;
156
157 if (!SCLP_HAS_CHP_INFO)
158 return -EOPNOTSUPP;
159 /* Prepare sccb. */
160 data = (struct chp_info_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
161 if (!data)
162 return -ENOMEM;
163 data->sccb.header.length = sizeof(struct chp_info_sccb);
164 data->req.command = SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION;
165 data->req.sccb = &(data->sccb);
166 data->req.status = SCLP_REQ_FILLED;
167 data->req.callback = chp_callback;
168 data->req.callback_data = &(data->completion);
169 init_completion(&data->completion);
170
171 /* Perform sclp request. */
172 rc = sclp_add_request(&(data->req));
173 if (rc)
174 goto out;
175 wait_for_completion(&data->completion);
176
177 /* Check response .*/
178 if (data->req.status != SCLP_REQ_DONE) {
179 printk(KERN_WARNING TAG "read channel-path info request failed "
180 "(status=0x%02x)\n", data->req.status);
181 rc = -EIO;
182 goto out;
183 }
184 if (data->sccb.header.response_code != 0x0010) {
185 printk(KERN_WARNING TAG "read channel-path info failed "
186 "(response=0x%04x)\n", data->sccb.header.response_code);
187 rc = -EIO;
188 goto out;
189 }
190 memcpy(info->recognized, data->sccb.recognized,
191 SCLP_CHP_INFO_MASK_SIZE);
192 memcpy(info->standby, data->sccb.standby,
193 SCLP_CHP_INFO_MASK_SIZE);
194 memcpy(info->configured, data->sccb.configured,
195 SCLP_CHP_INFO_MASK_SIZE);
196out:
197 free_page((unsigned long) data);
198
199 return rc;
200}
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
new file mode 100644
index 000000000000..b5c23396f8fe
--- /dev/null
+++ b/drivers/s390/char/sclp_cmd.c
@@ -0,0 +1,398 @@
1/*
2 * drivers/s390/char/sclp_cmd.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
6 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
7 */
8
9#include <linux/completion.h>
10#include <linux/init.h>
11#include <linux/errno.h>
12#include <linux/slab.h>
13#include <linux/string.h>
14#include <asm/chpid.h>
15#include <asm/sclp.h>
16#include "sclp.h"
17
18#define TAG "sclp_cmd: "
19
20#define SCLP_CMDW_READ_SCP_INFO 0x00020001
21#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
22
23struct read_info_sccb {
24 struct sccb_header header; /* 0-7 */
25 u16 rnmax; /* 8-9 */
26 u8 rnsize; /* 10 */
27 u8 _reserved0[24 - 11]; /* 11-15 */
28 u8 loadparm[8]; /* 24-31 */
29 u8 _reserved1[48 - 32]; /* 32-47 */
30 u64 facilities; /* 48-55 */
31 u8 _reserved2[84 - 56]; /* 56-83 */
32 u8 fac84; /* 84 */
33 u8 _reserved3[91 - 85]; /* 85-90 */
34 u8 flags; /* 91 */
35 u8 _reserved4[100 - 92]; /* 92-99 */
36 u32 rnsize2; /* 100-103 */
37 u64 rnmax2; /* 104-111 */
38 u8 _reserved5[4096 - 112]; /* 112-4095 */
39} __attribute__((packed, aligned(PAGE_SIZE)));
40
41static struct read_info_sccb __initdata early_read_info_sccb;
42static int __initdata early_read_info_sccb_valid;
43
44u64 sclp_facilities;
45static u8 sclp_fac84;
46
47static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
48{
49 int rc;
50
51 __ctl_set_bit(0, 9);
52 rc = sclp_service_call(cmd, sccb);
53 if (rc)
54 goto out;
55 __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT |
56 PSW_MASK_WAIT | PSW_DEFAULT_KEY);
57 local_irq_disable();
58out:
59 /* Contents of the sccb might have changed. */
60 barrier();
61 __ctl_clear_bit(0, 9);
62 return rc;
63}
64
65void __init sclp_read_info_early(void)
66{
67 int rc;
68 int i;
69 struct read_info_sccb *sccb;
70 sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
71 SCLP_CMDW_READ_SCP_INFO};
72
73 sccb = &early_read_info_sccb;
74 for (i = 0; i < ARRAY_SIZE(commands); i++) {
75 do {
76 memset(sccb, 0, sizeof(*sccb));
77 sccb->header.length = sizeof(*sccb);
78 sccb->header.control_mask[2] = 0x80;
79 rc = sclp_cmd_sync_early(commands[i], sccb);
80 } while (rc == -EBUSY);
81
82 if (rc)
83 break;
84 if (sccb->header.response_code == 0x10) {
85 early_read_info_sccb_valid = 1;
86 break;
87 }
88 if (sccb->header.response_code != 0x1f0)
89 break;
90 }
91}
92
93void __init sclp_facilities_detect(void)
94{
95 if (!early_read_info_sccb_valid)
96 return;
97 sclp_facilities = early_read_info_sccb.facilities;
98 sclp_fac84 = early_read_info_sccb.fac84;
99}
100
101unsigned long long __init sclp_memory_detect(void)
102{
103 unsigned long long memsize;
104 struct read_info_sccb *sccb;
105
106 if (!early_read_info_sccb_valid)
107 return 0;
108 sccb = &early_read_info_sccb;
109 if (sccb->rnsize)
110 memsize = sccb->rnsize << 20;
111 else
112 memsize = sccb->rnsize2 << 20;
113 if (sccb->rnmax)
114 memsize *= sccb->rnmax;
115 else
116 memsize *= sccb->rnmax2;
117 return memsize;
118}
119
120/*
121 * This function will be called after sclp_memory_detect(), which gets called
122 * early from early.c code. Therefore the sccb should have valid contents.
123 */
124void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
125{
126 struct read_info_sccb *sccb;
127
128 if (!early_read_info_sccb_valid)
129 return;
130 sccb = &early_read_info_sccb;
131 info->is_valid = 1;
132 if (sccb->flags & 0x2)
133 info->has_dump = 1;
134 memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
135}
136
137static void sclp_sync_callback(struct sclp_req *req, void *data)
138{
139 struct completion *completion = data;
140
141 complete(completion);
142}
143
144static int do_sync_request(sclp_cmdw_t cmd, void *sccb)
145{
146 struct completion completion;
147 struct sclp_req *request;
148 int rc;
149
150 request = kzalloc(sizeof(*request), GFP_KERNEL);
151 if (!request)
152 return -ENOMEM;
153 request->command = cmd;
154 request->sccb = sccb;
155 request->status = SCLP_REQ_FILLED;
156 request->callback = sclp_sync_callback;
157 request->callback_data = &completion;
158 init_completion(&completion);
159
160 /* Perform sclp request. */
161 rc = sclp_add_request(request);
162 if (rc)
163 goto out;
164 wait_for_completion(&completion);
165
166 /* Check response. */
167 if (request->status != SCLP_REQ_DONE) {
168 printk(KERN_WARNING TAG "sync request failed "
169 "(cmd=0x%08x, status=0x%02x)\n", cmd, request->status);
170 rc = -EIO;
171 }
172out:
173 kfree(request);
174 return rc;
175}
176
177/*
178 * CPU configuration related functions.
179 */
180
181#define SCLP_CMDW_READ_CPU_INFO 0x00010001
182#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
183#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
184
185struct read_cpu_info_sccb {
186 struct sccb_header header;
187 u16 nr_configured;
188 u16 offset_configured;
189 u16 nr_standby;
190 u16 offset_standby;
191 u8 reserved[4096 - 16];
192} __attribute__((packed, aligned(PAGE_SIZE)));
193
194static void sclp_fill_cpu_info(struct sclp_cpu_info *info,
195 struct read_cpu_info_sccb *sccb)
196{
197 char *page = (char *) sccb;
198
199 memset(info, 0, sizeof(*info));
200 info->configured = sccb->nr_configured;
201 info->standby = sccb->nr_standby;
202 info->combined = sccb->nr_configured + sccb->nr_standby;
203 info->has_cpu_type = sclp_fac84 & 0x1;
204 memcpy(&info->cpu, page + sccb->offset_configured,
205 info->combined * sizeof(struct sclp_cpu_entry));
206}
207
208int sclp_get_cpu_info(struct sclp_cpu_info *info)
209{
210 int rc;
211 struct read_cpu_info_sccb *sccb;
212
213 if (!SCLP_HAS_CPU_INFO)
214 return -EOPNOTSUPP;
215 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
216 if (!sccb)
217 return -ENOMEM;
218 sccb->header.length = sizeof(*sccb);
219 rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb);
220 if (rc)
221 goto out;
222 if (sccb->header.response_code != 0x0010) {
223 printk(KERN_WARNING TAG "readcpuinfo failed "
224 "(response=0x%04x)\n", sccb->header.response_code);
225 rc = -EIO;
226 goto out;
227 }
228 sclp_fill_cpu_info(info, sccb);
229out:
230 free_page((unsigned long) sccb);
231 return rc;
232}
233
234struct cpu_configure_sccb {
235 struct sccb_header header;
236} __attribute__((packed, aligned(8)));
237
238static int do_cpu_configure(sclp_cmdw_t cmd)
239{
240 struct cpu_configure_sccb *sccb;
241 int rc;
242
243 if (!SCLP_HAS_CPU_RECONFIG)
244 return -EOPNOTSUPP;
245 /*
246 * This is not going to cross a page boundary since we force
247 * kmalloc to have a minimum alignment of 8 bytes on s390.
248 */
249 sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
250 if (!sccb)
251 return -ENOMEM;
252 sccb->header.length = sizeof(*sccb);
253 rc = do_sync_request(cmd, sccb);
254 if (rc)
255 goto out;
256 switch (sccb->header.response_code) {
257 case 0x0020:
258 case 0x0120:
259 break;
260 default:
261 printk(KERN_WARNING TAG "configure cpu failed (cmd=0x%08x, "
262 "response=0x%04x)\n", cmd, sccb->header.response_code);
263 rc = -EIO;
264 break;
265 }
266out:
267 kfree(sccb);
268 return rc;
269}
270
271int sclp_cpu_configure(u8 cpu)
272{
273 return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8);
274}
275
276int sclp_cpu_deconfigure(u8 cpu)
277{
278 return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
279}
280
281/*
282 * Channel path configuration related functions.
283 */
284
285#define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
286#define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
287#define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
288
289struct chp_cfg_sccb {
290 struct sccb_header header;
291 u8 ccm;
292 u8 reserved[6];
293 u8 cssid;
294} __attribute__((packed));
295
296static int do_chp_configure(sclp_cmdw_t cmd)
297{
298 struct chp_cfg_sccb *sccb;
299 int rc;
300
301 if (!SCLP_HAS_CHP_RECONFIG)
302 return -EOPNOTSUPP;
303 /* Prepare sccb. */
304 sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
305 if (!sccb)
306 return -ENOMEM;
307 sccb->header.length = sizeof(*sccb);
308 rc = do_sync_request(cmd, sccb);
309 if (rc)
310 goto out;
311 switch (sccb->header.response_code) {
312 case 0x0020:
313 case 0x0120:
314 case 0x0440:
315 case 0x0450:
316 break;
317 default:
318 printk(KERN_WARNING TAG "configure channel-path failed "
319 "(cmd=0x%08x, response=0x%04x)\n", cmd,
320 sccb->header.response_code);
321 rc = -EIO;
322 break;
323 }
324out:
325 free_page((unsigned long) sccb);
326 return rc;
327}
328
329/**
330 * sclp_chp_configure - perform configure channel-path sclp command
331 * @chpid: channel-path ID
332 *
333 * Perform configure channel-path command sclp command for specified chpid.
334 * Return 0 after command successfully finished, non-zero otherwise.
335 */
336int sclp_chp_configure(struct chp_id chpid)
337{
338 return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
339}
340
341/**
342 * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
343 * @chpid: channel-path ID
344 *
345 * Perform deconfigure channel-path command sclp command for specified chpid
346 * and wait for completion. On success return 0. Return non-zero otherwise.
347 */
348int sclp_chp_deconfigure(struct chp_id chpid)
349{
350 return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
351}
352
353struct chp_info_sccb {
354 struct sccb_header header;
355 u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
356 u8 standby[SCLP_CHP_INFO_MASK_SIZE];
357 u8 configured[SCLP_CHP_INFO_MASK_SIZE];
358 u8 ccm;
359 u8 reserved[6];
360 u8 cssid;
361} __attribute__((packed));
362
363/**
364 * sclp_chp_read_info - perform read channel-path information sclp command
365 * @info: resulting channel-path information data
366 *
367 * Perform read channel-path information sclp command and wait for completion.
368 * On success, store channel-path information in @info and return 0. Return
369 * non-zero otherwise.
370 */
371int sclp_chp_read_info(struct sclp_chp_info *info)
372{
373 struct chp_info_sccb *sccb;
374 int rc;
375
376 if (!SCLP_HAS_CHP_INFO)
377 return -EOPNOTSUPP;
378 /* Prepare sccb. */
379 sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
380 if (!sccb)
381 return -ENOMEM;
382 sccb->header.length = sizeof(*sccb);
383 rc = do_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
384 if (rc)
385 goto out;
386 if (sccb->header.response_code != 0x0010) {
387 printk(KERN_WARNING TAG "read channel-path info failed "
388 "(response=0x%04x)\n", sccb->header.response_code);
389 rc = -EIO;
390 goto out;
391 }
392 memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
393 memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
394 memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
395out:
396 free_page((unsigned long) sccb);
397 return rc;
398}
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
index 82a13d9fdfe4..5716487b8c9d 100644
--- a/drivers/s390/char/sclp_cpi.c
+++ b/drivers/s390/char/sclp_cpi.c
@@ -1,255 +1,41 @@
1/* 1/*
2 * Author: Martin Peschke <mpeschke@de.ibm.com> 2 * drivers/s390/char/sclp_cpi.c
3 * Copyright (C) 2001 IBM Entwicklung GmbH, IBM Corporation 3 * SCLP control programm identification
4 * 4 *
5 * SCLP Control-Program Identification. 5 * Copyright IBM Corp. 2001, 2007
6 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
7 * Michael Ernst <mernst@de.ibm.com>
6 */ 8 */
7 9
8#include <linux/version.h>
9#include <linux/kmod.h> 10#include <linux/kmod.h>
10#include <linux/module.h> 11#include <linux/module.h>
11#include <linux/moduleparam.h> 12#include <linux/moduleparam.h>
12#include <linux/init.h> 13#include <linux/version.h>
13#include <linux/timer.h> 14#include "sclp_cpi_sys.h"
14#include <linux/string.h>
15#include <linux/err.h>
16#include <linux/slab.h>
17#include <asm/ebcdic.h>
18#include <asm/semaphore.h>
19
20#include "sclp.h"
21#include "sclp_rw.h"
22
23#define CPI_LENGTH_SYSTEM_TYPE 8
24#define CPI_LENGTH_SYSTEM_NAME 8
25#define CPI_LENGTH_SYSPLEX_NAME 8
26
27struct cpi_evbuf {
28 struct evbuf_header header;
29 u8 id_format;
30 u8 reserved0;
31 u8 system_type[CPI_LENGTH_SYSTEM_TYPE];
32 u64 reserved1;
33 u8 system_name[CPI_LENGTH_SYSTEM_NAME];
34 u64 reserved2;
35 u64 system_level;
36 u64 reserved3;
37 u8 sysplex_name[CPI_LENGTH_SYSPLEX_NAME];
38 u8 reserved4[16];
39} __attribute__((packed));
40
41struct cpi_sccb {
42 struct sccb_header header;
43 struct cpi_evbuf cpi_evbuf;
44} __attribute__((packed));
45
46/* Event type structure for write message and write priority message */
47static struct sclp_register sclp_cpi_event =
48{
49 .send_mask = EVTYP_CTLPROGIDENT_MASK
50};
51 15
52MODULE_LICENSE("GPL"); 16MODULE_LICENSE("GPL");
17MODULE_DESCRIPTION("Identify this operating system instance "
18 "to the System z hardware");
19MODULE_AUTHOR("Martin Peschke <mpeschke@de.ibm.com>, "
20 "Michael Ernst <mernst@de.ibm.com>");
53 21
54MODULE_AUTHOR( 22static char *system_name = "";
55 "Martin Peschke, IBM Deutschland Entwicklung GmbH " 23static char *sysplex_name = "";
56 "<mpeschke@de.ibm.com>");
57
58MODULE_DESCRIPTION(
59 "identify this operating system instance to the S/390 "
60 "or zSeries hardware");
61 24
62static char *system_name = NULL;
63module_param(system_name, charp, 0); 25module_param(system_name, charp, 0);
64MODULE_PARM_DESC(system_name, "e.g. hostname - max. 8 characters"); 26MODULE_PARM_DESC(system_name, "e.g. hostname - max. 8 characters");
65
66static char *sysplex_name = NULL;
67#ifdef ALLOW_SYSPLEX_NAME
68module_param(sysplex_name, charp, 0); 27module_param(sysplex_name, charp, 0);
69MODULE_PARM_DESC(sysplex_name, "if applicable - max. 8 characters"); 28MODULE_PARM_DESC(sysplex_name, "if applicable - max. 8 characters");
70#endif
71
72/* use default value for this field (as well as for system level) */
73static char *system_type = "LINUX";
74 29
75static int 30static int __init cpi_module_init(void)
76cpi_check_parms(void)
77{ 31{
78 /* reject if no system type specified */ 32 return sclp_cpi_set_data(system_name, sysplex_name, "LINUX",
79 if (!system_type) { 33 LINUX_VERSION_CODE);
80 printk("cpi: bug: no system type specified\n");
81 return -EINVAL;
82 }
83
84 /* reject if system type larger than 8 characters */
85 if (strlen(system_type) > CPI_LENGTH_SYSTEM_NAME) {
86 printk("cpi: bug: system type has length of %li characters - "
87 "only %i characters supported\n",
88 strlen(system_type), CPI_LENGTH_SYSTEM_TYPE);
89 return -EINVAL;
90 }
91
92 /* reject if no system name specified */
93 if (!system_name) {
94 printk("cpi: no system name specified\n");
95 return -EINVAL;
96 }
97
98 /* reject if system name larger than 8 characters */
99 if (strlen(system_name) > CPI_LENGTH_SYSTEM_NAME) {
100 printk("cpi: system name has length of %li characters - "
101 "only %i characters supported\n",
102 strlen(system_name), CPI_LENGTH_SYSTEM_NAME);
103 return -EINVAL;
104 }
105
106 /* reject if specified sysplex name larger than 8 characters */
107 if (sysplex_name && strlen(sysplex_name) > CPI_LENGTH_SYSPLEX_NAME) {
108 printk("cpi: sysplex name has length of %li characters"
109 " - only %i characters supported\n",
110 strlen(sysplex_name), CPI_LENGTH_SYSPLEX_NAME);
111 return -EINVAL;
112 }
113 return 0;
114} 34}
115 35
116static void
117cpi_callback(struct sclp_req *req, void *data)
118{
119 struct semaphore *sem;
120
121 sem = (struct semaphore *) data;
122 up(sem);
123}
124
125static struct sclp_req *
126cpi_prepare_req(void)
127{
128 struct sclp_req *req;
129 struct cpi_sccb *sccb;
130 struct cpi_evbuf *evb;
131
132 req = kmalloc(sizeof(struct sclp_req), GFP_KERNEL);
133 if (req == NULL)
134 return ERR_PTR(-ENOMEM);
135 sccb = (struct cpi_sccb *) __get_free_page(GFP_KERNEL | GFP_DMA);
136 if (sccb == NULL) {
137 kfree(req);
138 return ERR_PTR(-ENOMEM);
139 }
140 memset(sccb, 0, sizeof(struct cpi_sccb));
141
142 /* setup SCCB for Control-Program Identification */
143 sccb->header.length = sizeof(struct cpi_sccb);
144 sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf);
145 sccb->cpi_evbuf.header.type = 0x0B;
146 evb = &sccb->cpi_evbuf;
147
148 /* set system type */
149 memset(evb->system_type, ' ', CPI_LENGTH_SYSTEM_TYPE);
150 memcpy(evb->system_type, system_type, strlen(system_type));
151 sclp_ascebc_str(evb->system_type, CPI_LENGTH_SYSTEM_TYPE);
152 EBC_TOUPPER(evb->system_type, CPI_LENGTH_SYSTEM_TYPE);
153
154 /* set system name */
155 memset(evb->system_name, ' ', CPI_LENGTH_SYSTEM_NAME);
156 memcpy(evb->system_name, system_name, strlen(system_name));
157 sclp_ascebc_str(evb->system_name, CPI_LENGTH_SYSTEM_NAME);
158 EBC_TOUPPER(evb->system_name, CPI_LENGTH_SYSTEM_NAME);
159
160 /* set system level */
161 evb->system_level = LINUX_VERSION_CODE;
162
163 /* set sysplex name */
164 if (sysplex_name) {
165 memset(evb->sysplex_name, ' ', CPI_LENGTH_SYSPLEX_NAME);
166 memcpy(evb->sysplex_name, sysplex_name, strlen(sysplex_name));
167 sclp_ascebc_str(evb->sysplex_name, CPI_LENGTH_SYSPLEX_NAME);
168 EBC_TOUPPER(evb->sysplex_name, CPI_LENGTH_SYSPLEX_NAME);
169 }
170
171 /* prepare request data structure presented to SCLP driver */
172 req->command = SCLP_CMDW_WRITE_EVENT_DATA;
173 req->sccb = sccb;
174 req->status = SCLP_REQ_FILLED;
175 req->callback = cpi_callback;
176 return req;
177}
178
179static void
180cpi_free_req(struct sclp_req *req)
181{
182 free_page((unsigned long) req->sccb);
183 kfree(req);
184}
185
186static int __init
187cpi_module_init(void)
188{
189 struct semaphore sem;
190 struct sclp_req *req;
191 int rc;
192
193 rc = cpi_check_parms();
194 if (rc)
195 return rc;
196
197 rc = sclp_register(&sclp_cpi_event);
198 if (rc) {
199 /* could not register sclp event. Die. */
200 printk(KERN_WARNING "cpi: could not register to hardware "
201 "console.\n");
202 return -EINVAL;
203 }
204 if (!(sclp_cpi_event.sclp_send_mask & EVTYP_CTLPROGIDENT_MASK)) {
205 printk(KERN_WARNING "cpi: no control program identification "
206 "support\n");
207 sclp_unregister(&sclp_cpi_event);
208 return -EOPNOTSUPP;
209 }
210
211 req = cpi_prepare_req();
212 if (IS_ERR(req)) {
213 printk(KERN_WARNING "cpi: couldn't allocate request\n");
214 sclp_unregister(&sclp_cpi_event);
215 return PTR_ERR(req);
216 }
217
218 /* Prepare semaphore */
219 sema_init(&sem, 0);
220 req->callback_data = &sem;
221 /* Add request to sclp queue */
222 rc = sclp_add_request(req);
223 if (rc) {
224 printk(KERN_WARNING "cpi: could not start request\n");
225 cpi_free_req(req);
226 sclp_unregister(&sclp_cpi_event);
227 return rc;
228 }
229 /* make "insmod" sleep until callback arrives */
230 down(&sem);
231
232 rc = ((struct cpi_sccb *) req->sccb)->header.response_code;
233 if (rc != 0x0020) {
234 printk(KERN_WARNING "cpi: failed with response code 0x%x\n",
235 rc);
236 rc = -ECOMM;
237 } else
238 rc = 0;
239
240 cpi_free_req(req);
241 sclp_unregister(&sclp_cpi_event);
242
243 return rc;
244}
245
246
247static void __exit cpi_module_exit(void) 36static void __exit cpi_module_exit(void)
248{ 37{
249} 38}
250 39
251
252/* declare driver module init/cleanup functions */
253module_init(cpi_module_init); 40module_init(cpi_module_init);
254module_exit(cpi_module_exit); 41module_exit(cpi_module_exit);
255
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
new file mode 100644
index 000000000000..41617032afdc
--- /dev/null
+++ b/drivers/s390/char/sclp_cpi_sys.c
@@ -0,0 +1,400 @@
1/*
2 * drivers/s390/char/sclp_cpi_sys.c
3 * SCLP control program identification sysfs interface
4 *
5 * Copyright IBM Corp. 2001, 2007
6 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
7 * Michael Ernst <mernst@de.ibm.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/stat.h>
13#include <linux/device.h>
14#include <linux/string.h>
15#include <linux/ctype.h>
16#include <linux/kmod.h>
17#include <linux/timer.h>
18#include <linux/err.h>
19#include <linux/slab.h>
20#include <linux/completion.h>
21#include <asm/ebcdic.h>
22#include <asm/sclp.h>
23#include "sclp.h"
24#include "sclp_rw.h"
25#include "sclp_cpi_sys.h"
26
27#define CPI_LENGTH_NAME 8
28#define CPI_LENGTH_LEVEL 16
29
30struct cpi_evbuf {
31 struct evbuf_header header;
32 u8 id_format;
33 u8 reserved0;
34 u8 system_type[CPI_LENGTH_NAME];
35 u64 reserved1;
36 u8 system_name[CPI_LENGTH_NAME];
37 u64 reserved2;
38 u64 system_level;
39 u64 reserved3;
40 u8 sysplex_name[CPI_LENGTH_NAME];
41 u8 reserved4[16];
42} __attribute__((packed));
43
44struct cpi_sccb {
45 struct sccb_header header;
46 struct cpi_evbuf cpi_evbuf;
47} __attribute__((packed));
48
49static struct sclp_register sclp_cpi_event = {
50 .send_mask = EVTYP_CTLPROGIDENT_MASK,
51};
52
53static char system_name[CPI_LENGTH_NAME + 1];
54static char sysplex_name[CPI_LENGTH_NAME + 1];
55static char system_type[CPI_LENGTH_NAME + 1];
56static u64 system_level;
57
58static void set_data(char *field, char *data)
59{
60 memset(field, ' ', CPI_LENGTH_NAME);
61 memcpy(field, data, strlen(data));
62 sclp_ascebc_str(field, CPI_LENGTH_NAME);
63}
64
65static void cpi_callback(struct sclp_req *req, void *data)
66{
67 struct completion *completion = data;
68
69 complete(completion);
70}
71
72static struct sclp_req *cpi_prepare_req(void)
73{
74 struct sclp_req *req;
75 struct cpi_sccb *sccb;
76 struct cpi_evbuf *evb;
77
78 req = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
79 if (!req)
80 return ERR_PTR(-ENOMEM);
81 sccb = (struct cpi_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
82 if (!sccb) {
83 kfree(req);
84 return ERR_PTR(-ENOMEM);
85 }
86
87 /* setup SCCB for Control-Program Identification */
88 sccb->header.length = sizeof(struct cpi_sccb);
89 sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf);
90 sccb->cpi_evbuf.header.type = 0x0b;
91 evb = &sccb->cpi_evbuf;
92
93 /* set system type */
94 set_data(evb->system_type, system_type);
95
96 /* set system name */
97 set_data(evb->system_name, system_name);
98
99 /* set sytem level */
100 evb->system_level = system_level;
101
102 /* set sysplex name */
103 set_data(evb->sysplex_name, sysplex_name);
104
105 /* prepare request data structure presented to SCLP driver */
106 req->command = SCLP_CMDW_WRITE_EVENT_DATA;
107 req->sccb = sccb;
108 req->status = SCLP_REQ_FILLED;
109 req->callback = cpi_callback;
110 return req;
111}
112
113static void cpi_free_req(struct sclp_req *req)
114{
115 free_page((unsigned long) req->sccb);
116 kfree(req);
117}
118
119static int cpi_req(void)
120{
121 struct completion completion;
122 struct sclp_req *req;
123 int rc;
124 int response;
125
126 rc = sclp_register(&sclp_cpi_event);
127 if (rc) {
128 printk(KERN_WARNING "cpi: could not register "
129 "to hardware console.\n");
130 goto out;
131 }
132 if (!(sclp_cpi_event.sclp_send_mask & EVTYP_CTLPROGIDENT_MASK)) {
133 printk(KERN_WARNING "cpi: no control program "
134 "identification support\n");
135 rc = -EOPNOTSUPP;
136 goto out_unregister;
137 }
138
139 req = cpi_prepare_req();
140 if (IS_ERR(req)) {
141 printk(KERN_WARNING "cpi: could not allocate request\n");
142 rc = PTR_ERR(req);
143 goto out_unregister;
144 }
145
146 init_completion(&completion);
147 req->callback_data = &completion;
148
149 /* Add request to sclp queue */
150 rc = sclp_add_request(req);
151 if (rc) {
152 printk(KERN_WARNING "cpi: could not start request\n");
153 goto out_free_req;
154 }
155
156 wait_for_completion(&completion);
157
158 if (req->status != SCLP_REQ_DONE) {
159 printk(KERN_WARNING "cpi: request failed (status=0x%02x)\n",
160 req->status);
161 rc = -EIO;
162 goto out_free_req;
163 }
164
165 response = ((struct cpi_sccb *) req->sccb)->header.response_code;
166 if (response != 0x0020) {
167 printk(KERN_WARNING "cpi: failed with "
168 "response code 0x%x\n", response);
169 rc = -EIO;
170 }
171
172out_free_req:
173 cpi_free_req(req);
174
175out_unregister:
176 sclp_unregister(&sclp_cpi_event);
177
178out:
179 return rc;
180}
181
182static int check_string(const char *attr, const char *str)
183{
184 size_t len;
185 size_t i;
186
187 len = strlen(str);
188
189 if ((len > 0) && (str[len - 1] == '\n'))
190 len--;
191
192 if (len > CPI_LENGTH_NAME)
193 return -EINVAL;
194
195 for (i = 0; i < len ; i++) {
196 if (isalpha(str[i]) || isdigit(str[i]) ||
197 strchr("$@# ", str[i]))
198 continue;
199 return -EINVAL;
200 }
201
202 return 0;
203}
204
205static void set_string(char *attr, const char *value)
206{
207 size_t len;
208 size_t i;
209
210 len = strlen(value);
211
212 if ((len > 0) && (value[len - 1] == '\n'))
213 len--;
214
215 for (i = 0; i < CPI_LENGTH_NAME; i++) {
216 if (i < len)
217 attr[i] = toupper(value[i]);
218 else
219 attr[i] = ' ';
220 }
221}
222
223static ssize_t system_name_show(struct kobject *kobj,
224 struct kobj_attribute *attr, char *page)
225{
226 return snprintf(page, PAGE_SIZE, "%s\n", system_name);
227}
228
229static ssize_t system_name_store(struct kobject *kobj,
230 struct kobj_attribute *attr,
231 const char *buf,
232 size_t len)
233{
234 int rc;
235
236 rc = check_string("system_name", buf);
237 if (rc)
238 return rc;
239
240 set_string(system_name, buf);
241
242 return len;
243}
244
245static struct kobj_attribute system_name_attr =
246 __ATTR(system_name, 0644, system_name_show, system_name_store);
247
248static ssize_t sysplex_name_show(struct kobject *kobj,
249 struct kobj_attribute *attr, char *page)
250{
251 return snprintf(page, PAGE_SIZE, "%s\n", sysplex_name);
252}
253
254static ssize_t sysplex_name_store(struct kobject *kobj,
255 struct kobj_attribute *attr,
256 const char *buf,
257 size_t len)
258{
259 int rc;
260
261 rc = check_string("sysplex_name", buf);
262 if (rc)
263 return rc;
264
265 set_string(sysplex_name, buf);
266
267 return len;
268}
269
270static struct kobj_attribute sysplex_name_attr =
271 __ATTR(sysplex_name, 0644, sysplex_name_show, sysplex_name_store);
272
273static ssize_t system_type_show(struct kobject *kobj,
274 struct kobj_attribute *attr, char *page)
275{
276 return snprintf(page, PAGE_SIZE, "%s\n", system_type);
277}
278
279static ssize_t system_type_store(struct kobject *kobj,
280 struct kobj_attribute *attr,
281 const char *buf,
282 size_t len)
283{
284 int rc;
285
286 rc = check_string("system_type", buf);
287 if (rc)
288 return rc;
289
290 set_string(system_type, buf);
291
292 return len;
293}
294
295static struct kobj_attribute system_type_attr =
296 __ATTR(system_type, 0644, system_type_show, system_type_store);
297
298static ssize_t system_level_show(struct kobject *kobj,
299 struct kobj_attribute *attr, char *page)
300{
301 unsigned long long level = system_level;
302
303 return snprintf(page, PAGE_SIZE, "%#018llx\n", level);
304}
305
306static ssize_t system_level_store(struct kobject *kobj,
307 struct kobj_attribute *attr,
308 const char *buf,
309 size_t len)
310{
311 unsigned long long level;
312 char *endp;
313
314 level = simple_strtoull(buf, &endp, 16);
315
316 if (endp == buf)
317 return -EINVAL;
318 if (*endp == '\n')
319 endp++;
320 if (*endp)
321 return -EINVAL;
322
323 system_level = level;
324
325 return len;
326}
327
328static struct kobj_attribute system_level_attr =
329 __ATTR(system_level, 0644, system_level_show, system_level_store);
330
331static ssize_t set_store(struct kobject *kobj,
332 struct kobj_attribute *attr,
333 const char *buf, size_t len)
334{
335 int rc;
336
337 rc = cpi_req();
338 if (rc)
339 return rc;
340
341 return len;
342}
343
344static struct kobj_attribute set_attr = __ATTR(set, 0200, NULL, set_store);
345
346static struct attribute *cpi_attrs[] = {
347 &system_name_attr.attr,
348 &sysplex_name_attr.attr,
349 &system_type_attr.attr,
350 &system_level_attr.attr,
351 &set_attr.attr,
352 NULL,
353};
354
355static struct attribute_group cpi_attr_group = {
356 .attrs = cpi_attrs,
357};
358
359static struct kset *cpi_kset;
360
361int sclp_cpi_set_data(const char *system, const char *sysplex, const char *type,
362 const u64 level)
363{
364 int rc;
365
366 rc = check_string("system_name", system);
367 if (rc)
368 return rc;
369 rc = check_string("sysplex_name", sysplex);
370 if (rc)
371 return rc;
372 rc = check_string("system_type", type);
373 if (rc)
374 return rc;
375
376 set_string(system_name, system);
377 set_string(sysplex_name, sysplex);
378 set_string(system_type, type);
379 system_level = level;
380
381 return cpi_req();
382}
383EXPORT_SYMBOL(sclp_cpi_set_data);
384
385static int __init cpi_init(void)
386{
387 int rc;
388
389 cpi_kset = kset_create_and_add("cpi", NULL, firmware_kobj);
390 if (!cpi_kset)
391 return -ENOMEM;
392
393 rc = sysfs_create_group(&cpi_kset->kobj, &cpi_attr_group);
394 if (rc)
395 kset_unregister(cpi_kset);
396
397 return rc;
398}
399
400__initcall(cpi_init);
diff --git a/drivers/s390/char/sclp_cpi_sys.h b/drivers/s390/char/sclp_cpi_sys.h
new file mode 100644
index 000000000000..deef3e6ff496
--- /dev/null
+++ b/drivers/s390/char/sclp_cpi_sys.h
@@ -0,0 +1,15 @@
1/*
2 * drivers/s390/char/sclp_cpi_sys.h
3 * SCLP control program identification sysfs interface
4 *
5 * Copyright IBM Corp. 2007
6 * Author(s): Michael Ernst <mernst@de.ibm.com>
7 */
8
9#ifndef __SCLP_CPI_SYS_H__
10#define __SCLP_CPI_SYS_H__
11
12int sclp_cpi_set_data(const char *system, const char *sysplex,
13 const char *type, u64 level);
14
15#endif /* __SCLP_CPI_SYS_H__ */
diff --git a/drivers/s390/char/sclp_info.c b/drivers/s390/char/sclp_info.c
deleted file mode 100644
index a1136e052750..000000000000
--- a/drivers/s390/char/sclp_info.c
+++ /dev/null
@@ -1,116 +0,0 @@
1/*
2 * drivers/s390/char/sclp_info.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/init.h>
9#include <linux/errno.h>
10#include <linux/string.h>
11#include <asm/sclp.h>
12#include "sclp.h"
13
14struct sclp_readinfo_sccb {
15 struct sccb_header header; /* 0-7 */
16 u16 rnmax; /* 8-9 */
17 u8 rnsize; /* 10 */
18 u8 _reserved0[24 - 11]; /* 11-23 */
19 u8 loadparm[8]; /* 24-31 */
20 u8 _reserved1[48 - 32]; /* 32-47 */
21 u64 facilities; /* 48-55 */
22 u8 _reserved2[91 - 56]; /* 56-90 */
23 u8 flags; /* 91 */
24 u8 _reserved3[100 - 92]; /* 92-99 */
25 u32 rnsize2; /* 100-103 */
26 u64 rnmax2; /* 104-111 */
27 u8 _reserved4[4096 - 112]; /* 112-4095 */
28} __attribute__((packed, aligned(4096)));
29
30static struct sclp_readinfo_sccb __initdata early_readinfo_sccb;
31static int __initdata early_readinfo_sccb_valid;
32
33u64 sclp_facilities;
34
35void __init sclp_readinfo_early(void)
36{
37 int ret;
38 int i;
39 struct sclp_readinfo_sccb *sccb;
40 sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
41 SCLP_CMDW_READ_SCP_INFO};
42
43 /* Enable service signal subclass mask. */
44 __ctl_set_bit(0, 9);
45 sccb = &early_readinfo_sccb;
46 for (i = 0; i < ARRAY_SIZE(commands); i++) {
47 do {
48 memset(sccb, 0, sizeof(*sccb));
49 sccb->header.length = sizeof(*sccb);
50 sccb->header.control_mask[2] = 0x80;
51 ret = sclp_service_call(commands[i], sccb);
52 } while (ret == -EBUSY);
53
54 if (ret)
55 break;
56 __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT |
57 PSW_MASK_WAIT | PSW_DEFAULT_KEY);
58 local_irq_disable();
59 /*
60 * Contents of the sccb might have changed
61 * therefore a barrier is needed.
62 */
63 barrier();
64 if (sccb->header.response_code == 0x10) {
65 early_readinfo_sccb_valid = 1;
66 break;
67 }
68 if (sccb->header.response_code != 0x1f0)
69 break;
70 }
71 /* Disable service signal subclass mask again. */
72 __ctl_clear_bit(0, 9);
73}
74
75void __init sclp_facilities_detect(void)
76{
77 if (!early_readinfo_sccb_valid)
78 return;
79 sclp_facilities = early_readinfo_sccb.facilities;
80}
81
82unsigned long long __init sclp_memory_detect(void)
83{
84 unsigned long long memsize;
85 struct sclp_readinfo_sccb *sccb;
86
87 if (!early_readinfo_sccb_valid)
88 return 0;
89 sccb = &early_readinfo_sccb;
90 if (sccb->rnsize)
91 memsize = sccb->rnsize << 20;
92 else
93 memsize = sccb->rnsize2 << 20;
94 if (sccb->rnmax)
95 memsize *= sccb->rnmax;
96 else
97 memsize *= sccb->rnmax2;
98 return memsize;
99}
100
101/*
102 * This function will be called after sclp_memory_detect(), which gets called
103 * early from early.c code. Therefore the sccb should have valid contents.
104 */
105void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
106{
107 struct sclp_readinfo_sccb *sccb;
108
109 if (!early_readinfo_sccb_valid)
110 return;
111 sccb = &early_readinfo_sccb;
112 info->is_valid = 1;
113 if (sccb->flags & 0x2)
114 info->has_dump = 1;
115 memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
116}
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index d6b06ab81188..ad7195d3de0c 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -76,7 +76,7 @@ sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
76} 76}
77 77
78/* 78/*
79 * Return a pointer to the orignal page that has been used to create 79 * Return a pointer to the original page that has been used to create
80 * the buffer. 80 * the buffer.
81 */ 81 */
82void * 82void *
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index da25f8e24152..8246ef3ab095 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -1495,7 +1495,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1495 device->cdev->dev.bus_id); 1495 device->cdev->dev.bus_id);
1496 return tape_3590_erp_basic(device, request, irb, -EPERM); 1496 return tape_3590_erp_basic(device, request, irb, -EPERM);
1497 case 0x8013: 1497 case 0x8013:
1498 PRINT_WARN("(%s): Another host has priviliged access to the " 1498 PRINT_WARN("(%s): Another host has privileged access to the "
1499 "tape device\n", device->cdev->dev.bus_id); 1499 "tape device\n", device->cdev->dev.bus_id);
1500 PRINT_WARN("(%s): To solve the problem unload the current " 1500 PRINT_WARN("(%s): To solve the problem unload the current "
1501 "cartridge!\n", device->cdev->dev.bus_id); 1501 "cartridge!\n", device->cdev->dev.bus_id);
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 2fae6338ee1c..7ad8cf157641 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -37,7 +37,7 @@ static void tape_long_busy_timeout(unsigned long data);
37 * we can assign the devices to minor numbers of the same major 37 * we can assign the devices to minor numbers of the same major
38 * The list is protected by the rwlock 38 * The list is protected by the rwlock
39 */ 39 */
40static struct list_head tape_device_list = LIST_HEAD_INIT(tape_device_list); 40static LIST_HEAD(tape_device_list);
41static DEFINE_RWLOCK(tape_device_lock); 41static DEFINE_RWLOCK(tape_device_lock);
42 42
43/* 43/*
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
index cea49f001f89..c9b96d51b28f 100644
--- a/drivers/s390/char/tape_proc.c
+++ b/drivers/s390/char/tape_proc.c
@@ -97,7 +97,7 @@ static void tape_proc_stop(struct seq_file *m, void *v)
97{ 97{
98} 98}
99 99
100static struct seq_operations tape_proc_seq = { 100static const struct seq_operations tape_proc_seq = {
101 .start = tape_proc_start, 101 .start = tape_proc_start,
102 .next = tape_proc_next, 102 .next = tape_proc_next,
103 .stop = tape_proc_stop, 103 .stop = tape_proc_stop,
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index e0c4c508e121..d364e0bfae12 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -683,7 +683,7 @@ static int vmlogrdr_register_driver(void)
683 /* Register with iucv driver */ 683 /* Register with iucv driver */
684 ret = iucv_register(&vmlogrdr_iucv_handler, 1); 684 ret = iucv_register(&vmlogrdr_iucv_handler, 1);
685 if (ret) { 685 if (ret) {
686 printk (KERN_ERR "vmlogrdr: failed to register with" 686 printk (KERN_ERR "vmlogrdr: failed to register with "
687 "iucv driver\n"); 687 "iucv driver\n");
688 goto out; 688 goto out;
689 } 689 }
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index d70a6e65bf14..7689b500a104 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -759,7 +759,7 @@ static loff_t ur_llseek(struct file *file, loff_t offset, int whence)
759 return newpos; 759 return newpos;
760} 760}
761 761
762static struct file_operations ur_fops = { 762static const struct file_operations ur_fops = {
763 .owner = THIS_MODULE, 763 .owner = THIS_MODULE,
764 .open = ur_open, 764 .open = ur_open,
765 .release = ur_release, 765 .release = ur_release,
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 7073daf77981..f523501e6e6c 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -470,7 +470,7 @@ static loff_t zcore_lseek(struct file *file, loff_t offset, int orig)
470 return rc; 470 return rc;
471} 471}
472 472
473static struct file_operations zcore_fops = { 473static const struct file_operations zcore_fops = {
474 .owner = THIS_MODULE, 474 .owner = THIS_MODULE,
475 .llseek = zcore_lseek, 475 .llseek = zcore_lseek,
476 .read = zcore_read, 476 .read = zcore_read,
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index 5287631fbfc8..b7a07a866291 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -1,12 +1,12 @@
1/* 1/*
2 * drivers/s390/cio/airq.c 2 * drivers/s390/cio/airq.c
3 * S/390 common I/O routines -- support for adapter interruptions 3 * Support for adapter interruptions
4 * 4 *
5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 5 * Copyright IBM Corp. 1999,2007
6 * IBM Corporation 6 * Author(s): Ingo Adlung <adlung@de.ibm.com>
7 * Author(s): Ingo Adlung (adlung@de.ibm.com) 7 * Cornelia Huck <cornelia.huck@de.ibm.com>
8 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 * Arnd Bergmann <arndb@de.ibm.com>
9 * Arnd Bergmann (arndb@de.ibm.com) 9 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
10 */ 10 */
11 11
12#include <linux/init.h> 12#include <linux/init.h>
@@ -14,72 +14,131 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/rcupdate.h> 15#include <linux/rcupdate.h>
16 16
17#include <asm/airq.h>
18
19#include "cio.h"
17#include "cio_debug.h" 20#include "cio_debug.h"
18#include "airq.h"
19 21
20static adapter_int_handler_t adapter_handler; 22#define NR_AIRQS 32
23#define NR_AIRQS_PER_WORD sizeof(unsigned long)
24#define NR_AIRQ_WORDS (NR_AIRQS / NR_AIRQS_PER_WORD)
21 25
22/* 26union indicator_t {
23 * register for adapter interrupts 27 unsigned long word[NR_AIRQ_WORDS];
24 * 28 unsigned char byte[NR_AIRQS];
25 * With HiperSockets the zSeries architecture provides for 29} __attribute__((packed));
26 * means of adapter interrups, pseudo I/O interrupts that are
27 * not tied to an I/O subchannel, but to an adapter. However,
28 * it doesn't disclose the info how to enable/disable them, but
29 * to recognize them only. Perhaps we should consider them
30 * being shared interrupts, and thus build a linked list
31 * of adapter handlers ... to be evaluated ...
32 */
33int
34s390_register_adapter_interrupt (adapter_int_handler_t handler)
35{
36 int ret;
37 char dbf_txt[15];
38 30
39 CIO_TRACE_EVENT (4, "rgaint"); 31struct airq_t {
32 adapter_int_handler_t handler;
33 void *drv_data;
34};
40 35
41 if (handler == NULL) 36static union indicator_t indicators;
42 ret = -EINVAL; 37static struct airq_t *airqs[NR_AIRQS];
43 else
44 ret = (cmpxchg(&adapter_handler, NULL, handler) ? -EBUSY : 0);
45 if (!ret)
46 synchronize_sched(); /* Allow interrupts to complete. */
47 38
48 sprintf (dbf_txt, "ret:%d", ret); 39static int register_airq(struct airq_t *airq)
49 CIO_TRACE_EVENT (4, dbf_txt); 40{
41 int i;
50 42
51 return ret; 43 for (i = 0; i < NR_AIRQS; i++)
44 if (!cmpxchg(&airqs[i], NULL, airq))
45 return i;
46 return -ENOMEM;
52} 47}
53 48
54int 49/**
55s390_unregister_adapter_interrupt (adapter_int_handler_t handler) 50 * s390_register_adapter_interrupt() - register adapter interrupt handler
51 * @handler: adapter handler to be registered
52 * @drv_data: driver data passed with each call to the handler
53 *
54 * Returns:
55 * Pointer to the indicator to be used on success
56 * ERR_PTR() if registration failed
57 */
58void *s390_register_adapter_interrupt(adapter_int_handler_t handler,
59 void *drv_data)
56{ 60{
61 struct airq_t *airq;
62 char dbf_txt[16];
57 int ret; 63 int ret;
58 char dbf_txt[15];
59 64
60 CIO_TRACE_EVENT (4, "urgaint"); 65 airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL);
61 66 if (!airq) {
62 if (handler == NULL) 67 ret = -ENOMEM;
63 ret = -EINVAL; 68 goto out;
64 else {
65 adapter_handler = NULL;
66 synchronize_sched(); /* Allow interrupts to complete. */
67 ret = 0;
68 } 69 }
69 sprintf (dbf_txt, "ret:%d", ret); 70 airq->handler = handler;
70 CIO_TRACE_EVENT (4, dbf_txt); 71 airq->drv_data = drv_data;
71 72 ret = register_airq(airq);
72 return ret; 73 if (ret < 0)
74 kfree(airq);
75out:
76 snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret);
77 CIO_TRACE_EVENT(4, dbf_txt);
78 if (ret < 0)
79 return ERR_PTR(ret);
80 else
81 return &indicators.byte[ret];
73} 82}
83EXPORT_SYMBOL(s390_register_adapter_interrupt);
74 84
75void 85/**
76do_adapter_IO (void) 86 * s390_unregister_adapter_interrupt - unregister adapter interrupt handler
87 * @ind: indicator for which the handler is to be unregistered
88 */
89void s390_unregister_adapter_interrupt(void *ind)
77{ 90{
78 CIO_TRACE_EVENT (6, "doaio"); 91 struct airq_t *airq;
92 char dbf_txt[16];
93 int i;
79 94
80 if (adapter_handler) 95 i = (int) ((addr_t) ind) - ((addr_t) &indicators.byte[0]);
81 (*adapter_handler) (); 96 snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i);
97 CIO_TRACE_EVENT(4, dbf_txt);
98 indicators.byte[i] = 0;
99 airq = xchg(&airqs[i], NULL);
100 /*
101 * Allow interrupts to complete. This will ensure that the airq handle
102 * is no longer referenced by any interrupt handler.
103 */
104 synchronize_sched();
105 kfree(airq);
82} 106}
107EXPORT_SYMBOL(s390_unregister_adapter_interrupt);
108
109#define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8))
83 110
84EXPORT_SYMBOL (s390_register_adapter_interrupt); 111void do_adapter_IO(void)
85EXPORT_SYMBOL (s390_unregister_adapter_interrupt); 112{
113 int w;
114 int i;
115 unsigned long word;
116 struct airq_t *airq;
117
118 /*
119 * Access indicator array in word-sized chunks to minimize storage
120 * fetch operations.
121 */
122 for (w = 0; w < NR_AIRQ_WORDS; w++) {
123 word = indicators.word[w];
124 i = w * NR_AIRQS_PER_WORD;
125 /*
126 * Check bytes within word for active indicators.
127 */
128 while (word) {
129 if (word & INDICATOR_MASK) {
130 airq = airqs[i];
131 if (likely(airq))
132 airq->handler(&indicators.byte[i],
133 airq->drv_data);
134 else
135 /*
136 * Reset ill-behaved indicator.
137 */
138 indicators.byte[i] = 0;
139 }
140 word <<= 8;
141 i++;
142 }
143 }
144}
diff --git a/drivers/s390/cio/airq.h b/drivers/s390/cio/airq.h
deleted file mode 100644
index 7d6be3fdcd66..000000000000
--- a/drivers/s390/cio/airq.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef S390_AINTERRUPT_H
2#define S390_AINTERRUPT_H
3
4typedef int (*adapter_int_handler_t)(void);
5
6extern int s390_register_adapter_interrupt(adapter_int_handler_t handler);
7extern int s390_unregister_adapter_interrupt(adapter_int_handler_t handler);
8extern void do_adapter_IO (void);
9
10#endif
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index bd5f16f80bf8..e8597ec92247 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -348,7 +348,7 @@ cio_ignore_write(struct file *file, const char __user *user_buf,
348 return user_len; 348 return user_len;
349} 349}
350 350
351static struct seq_operations cio_ignore_proc_seq_ops = { 351static const struct seq_operations cio_ignore_proc_seq_ops = {
352 .start = cio_ignore_proc_seq_start, 352 .start = cio_ignore_proc_seq_start,
353 .stop = cio_ignore_proc_seq_stop, 353 .stop = cio_ignore_proc_seq_stop,
354 .next = cio_ignore_proc_seq_next, 354 .next = cio_ignore_proc_seq_next,
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 5baa517c3b66..3964056a9a47 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -35,8 +35,8 @@ ccwgroup_bus_match (struct device * dev, struct device_driver * drv)
35 struct ccwgroup_device *gdev; 35 struct ccwgroup_device *gdev;
36 struct ccwgroup_driver *gdrv; 36 struct ccwgroup_driver *gdrv;
37 37
38 gdev = container_of(dev, struct ccwgroup_device, dev); 38 gdev = to_ccwgroupdev(dev);
39 gdrv = container_of(drv, struct ccwgroup_driver, driver); 39 gdrv = to_ccwgroupdrv(drv);
40 40
41 if (gdev->creator_id == gdrv->driver_id) 41 if (gdev->creator_id == gdrv->driver_id)
42 return 1; 42 return 1;
@@ -75,8 +75,10 @@ static void ccwgroup_ungroup_callback(struct device *dev)
75 struct ccwgroup_device *gdev = to_ccwgroupdev(dev); 75 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
76 76
77 mutex_lock(&gdev->reg_mutex); 77 mutex_lock(&gdev->reg_mutex);
78 __ccwgroup_remove_symlinks(gdev); 78 if (device_is_registered(&gdev->dev)) {
79 device_unregister(dev); 79 __ccwgroup_remove_symlinks(gdev);
80 device_unregister(dev);
81 }
80 mutex_unlock(&gdev->reg_mutex); 82 mutex_unlock(&gdev->reg_mutex);
81} 83}
82 84
@@ -111,7 +113,7 @@ ccwgroup_release (struct device *dev)
111 gdev = to_ccwgroupdev(dev); 113 gdev = to_ccwgroupdev(dev);
112 114
113 for (i = 0; i < gdev->count; i++) { 115 for (i = 0; i < gdev->count; i++) {
114 gdev->cdev[i]->dev.driver_data = NULL; 116 dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
115 put_device(&gdev->cdev[i]->dev); 117 put_device(&gdev->cdev[i]->dev);
116 } 118 }
117 kfree(gdev); 119 kfree(gdev);
@@ -196,11 +198,11 @@ int ccwgroup_create(struct device *root, unsigned int creator_id,
196 goto error; 198 goto error;
197 } 199 }
198 /* Don't allow a device to belong to more than one group. */ 200 /* Don't allow a device to belong to more than one group. */
199 if (gdev->cdev[i]->dev.driver_data) { 201 if (dev_get_drvdata(&gdev->cdev[i]->dev)) {
200 rc = -EINVAL; 202 rc = -EINVAL;
201 goto error; 203 goto error;
202 } 204 }
203 gdev->cdev[i]->dev.driver_data = gdev; 205 dev_set_drvdata(&gdev->cdev[i]->dev, gdev);
204 } 206 }
205 207
206 gdev->creator_id = creator_id; 208 gdev->creator_id = creator_id;
@@ -234,8 +236,8 @@ int ccwgroup_create(struct device *root, unsigned int creator_id,
234error: 236error:
235 for (i = 0; i < argc; i++) 237 for (i = 0; i < argc; i++)
236 if (gdev->cdev[i]) { 238 if (gdev->cdev[i]) {
237 if (gdev->cdev[i]->dev.driver_data == gdev) 239 if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
238 gdev->cdev[i]->dev.driver_data = NULL; 240 dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
239 put_device(&gdev->cdev[i]->dev); 241 put_device(&gdev->cdev[i]->dev);
240 } 242 }
241 mutex_unlock(&gdev->reg_mutex); 243 mutex_unlock(&gdev->reg_mutex);
@@ -408,6 +410,7 @@ int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
408 /* register our new driver with the core */ 410 /* register our new driver with the core */
409 cdriver->driver.bus = &ccwgroup_bus_type; 411 cdriver->driver.bus = &ccwgroup_bus_type;
410 cdriver->driver.name = cdriver->name; 412 cdriver->driver.name = cdriver->name;
413 cdriver->driver.owner = cdriver->owner;
411 414
412 return driver_register(&cdriver->driver); 415 return driver_register(&cdriver->driver);
413} 416}
@@ -463,8 +466,8 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
463{ 466{
464 struct ccwgroup_device *gdev; 467 struct ccwgroup_device *gdev;
465 468
466 if (cdev->dev.driver_data) { 469 gdev = dev_get_drvdata(&cdev->dev);
467 gdev = (struct ccwgroup_device *)cdev->dev.driver_data; 470 if (gdev) {
468 if (get_device(&gdev->dev)) { 471 if (get_device(&gdev->dev)) {
469 mutex_lock(&gdev->reg_mutex); 472 mutex_lock(&gdev->reg_mutex);
470 if (device_is_registered(&gdev->dev)) 473 if (device_is_registered(&gdev->dev))
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 597c0c76a2ad..e7ba16a74ef7 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -89,7 +89,8 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
89 /* Copy data */ 89 /* Copy data */
90 ret = 0; 90 ret = 0;
91 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 91 memset(ssd, 0, sizeof(struct chsc_ssd_info));
92 if ((ssd_area->st != 0) && (ssd_area->st != 2)) 92 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
93 (ssd_area->st != SUBCHANNEL_TYPE_MSG))
93 goto out_free; 94 goto out_free;
94 ssd->path_mask = ssd_area->path_mask; 95 ssd->path_mask = ssd_area->path_mask;
95 ssd->fla_valid_mask = ssd_area->fla_valid_mask; 96 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
@@ -132,20 +133,16 @@ static void terminate_internal_io(struct subchannel *sch)
132 device_set_intretry(sch); 133 device_set_intretry(sch);
133 /* Call handler. */ 134 /* Call handler. */
134 if (sch->driver && sch->driver->termination) 135 if (sch->driver && sch->driver->termination)
135 sch->driver->termination(&sch->dev); 136 sch->driver->termination(sch);
136} 137}
137 138
138static int 139static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
139s390_subchannel_remove_chpid(struct device *dev, void *data)
140{ 140{
141 int j; 141 int j;
142 int mask; 142 int mask;
143 struct subchannel *sch; 143 struct chp_id *chpid = data;
144 struct chp_id *chpid;
145 struct schib schib; 144 struct schib schib;
146 145
147 sch = to_subchannel(dev);
148 chpid = data;
149 for (j = 0; j < 8; j++) { 146 for (j = 0; j < 8; j++) {
150 mask = 0x80 >> j; 147 mask = 0x80 >> j;
151 if ((sch->schib.pmcw.pim & mask) && 148 if ((sch->schib.pmcw.pim & mask) &&
@@ -158,7 +155,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
158 spin_lock_irq(sch->lock); 155 spin_lock_irq(sch->lock);
159 156
160 stsch(sch->schid, &schib); 157 stsch(sch->schid, &schib);
161 if (!schib.pmcw.dnv) 158 if (!css_sch_is_valid(&schib))
162 goto out_unreg; 159 goto out_unreg;
163 memcpy(&sch->schib, &schib, sizeof(struct schib)); 160 memcpy(&sch->schib, &schib, sizeof(struct schib));
164 /* Check for single path devices. */ 161 /* Check for single path devices. */
@@ -172,12 +169,12 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
172 terminate_internal_io(sch); 169 terminate_internal_io(sch);
173 /* Re-start path verification. */ 170 /* Re-start path verification. */
174 if (sch->driver && sch->driver->verify) 171 if (sch->driver && sch->driver->verify)
175 sch->driver->verify(&sch->dev); 172 sch->driver->verify(sch);
176 } 173 }
177 } else { 174 } else {
178 /* trigger path verification. */ 175 /* trigger path verification. */
179 if (sch->driver && sch->driver->verify) 176 if (sch->driver && sch->driver->verify)
180 sch->driver->verify(&sch->dev); 177 sch->driver->verify(sch);
181 else if (sch->lpm == mask) 178 else if (sch->lpm == mask)
182 goto out_unreg; 179 goto out_unreg;
183 } 180 }
@@ -201,12 +198,10 @@ void chsc_chp_offline(struct chp_id chpid)
201 198
202 if (chp_get_status(chpid) <= 0) 199 if (chp_get_status(chpid) <= 0)
203 return; 200 return;
204 bus_for_each_dev(&css_bus_type, NULL, &chpid, 201 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
205 s390_subchannel_remove_chpid);
206} 202}
207 203
208static int 204static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
209s390_process_res_acc_new_sch(struct subchannel_id schid)
210{ 205{
211 struct schib schib; 206 struct schib schib;
212 /* 207 /*
@@ -252,18 +247,10 @@ static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
252 return 0; 247 return 0;
253} 248}
254 249
255static int 250static int __s390_process_res_acc(struct subchannel *sch, void *data)
256__s390_process_res_acc(struct subchannel_id schid, void *data)
257{ 251{
258 int chp_mask, old_lpm; 252 int chp_mask, old_lpm;
259 struct res_acc_data *res_data; 253 struct res_acc_data *res_data = data;
260 struct subchannel *sch;
261
262 res_data = data;
263 sch = get_subchannel_by_schid(schid);
264 if (!sch)
265 /* Check if a subchannel is newly available. */
266 return s390_process_res_acc_new_sch(schid);
267 254
268 spin_lock_irq(sch->lock); 255 spin_lock_irq(sch->lock);
269 chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data); 256 chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
@@ -279,10 +266,10 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
279 if (!old_lpm && sch->lpm) 266 if (!old_lpm && sch->lpm)
280 device_trigger_reprobe(sch); 267 device_trigger_reprobe(sch);
281 else if (sch->driver && sch->driver->verify) 268 else if (sch->driver && sch->driver->verify)
282 sch->driver->verify(&sch->dev); 269 sch->driver->verify(sch);
283out: 270out:
284 spin_unlock_irq(sch->lock); 271 spin_unlock_irq(sch->lock);
285 put_device(&sch->dev); 272
286 return 0; 273 return 0;
287} 274}
288 275
@@ -305,7 +292,8 @@ static void s390_process_res_acc (struct res_acc_data *res_data)
305 * The more information we have (info), the less scanning 292 * The more information we have (info), the less scanning
306 * will we have to do. 293 * will we have to do.
307 */ 294 */
308 for_each_subchannel(__s390_process_res_acc, res_data); 295 for_each_subchannel_staged(__s390_process_res_acc,
296 s390_process_res_acc_new_sch, res_data);
309} 297}
310 298
311static int 299static int
@@ -499,8 +487,7 @@ void chsc_process_crw(void)
499 } while (sei_area->flags & 0x80); 487 } while (sei_area->flags & 0x80);
500} 488}
501 489
502static int 490static int __chp_add_new_sch(struct subchannel_id schid, void *data)
503__chp_add_new_sch(struct subchannel_id schid)
504{ 491{
505 struct schib schib; 492 struct schib schib;
506 493
@@ -514,45 +501,37 @@ __chp_add_new_sch(struct subchannel_id schid)
514} 501}
515 502
516 503
517static int 504static int __chp_add(struct subchannel *sch, void *data)
518__chp_add(struct subchannel_id schid, void *data)
519{ 505{
520 int i, mask; 506 int i, mask;
521 struct chp_id *chpid; 507 struct chp_id *chpid = data;
522 struct subchannel *sch; 508
523
524 chpid = data;
525 sch = get_subchannel_by_schid(schid);
526 if (!sch)
527 /* Check if the subchannel is now available. */
528 return __chp_add_new_sch(schid);
529 spin_lock_irq(sch->lock); 509 spin_lock_irq(sch->lock);
530 for (i=0; i<8; i++) { 510 for (i=0; i<8; i++) {
531 mask = 0x80 >> i; 511 mask = 0x80 >> i;
532 if ((sch->schib.pmcw.pim & mask) && 512 if ((sch->schib.pmcw.pim & mask) &&
533 (sch->schib.pmcw.chpid[i] == chpid->id)) { 513 (sch->schib.pmcw.chpid[i] == chpid->id))
534 if (stsch(sch->schid, &sch->schib) != 0) {
535 /* Endgame. */
536 spin_unlock_irq(sch->lock);
537 return -ENXIO;
538 }
539 break; 514 break;
540 }
541 } 515 }
542 if (i==8) { 516 if (i==8) {
543 spin_unlock_irq(sch->lock); 517 spin_unlock_irq(sch->lock);
544 return 0; 518 return 0;
545 } 519 }
520 if (stsch(sch->schid, &sch->schib)) {
521 spin_unlock_irq(sch->lock);
522 css_schedule_eval(sch->schid);
523 return 0;
524 }
546 sch->lpm = ((sch->schib.pmcw.pim & 525 sch->lpm = ((sch->schib.pmcw.pim &
547 sch->schib.pmcw.pam & 526 sch->schib.pmcw.pam &
548 sch->schib.pmcw.pom) 527 sch->schib.pmcw.pom)
549 | mask) & sch->opm; 528 | mask) & sch->opm;
550 529
551 if (sch->driver && sch->driver->verify) 530 if (sch->driver && sch->driver->verify)
552 sch->driver->verify(&sch->dev); 531 sch->driver->verify(sch);
553 532
554 spin_unlock_irq(sch->lock); 533 spin_unlock_irq(sch->lock);
555 put_device(&sch->dev); 534
556 return 0; 535 return 0;
557} 536}
558 537
@@ -564,7 +543,8 @@ void chsc_chp_online(struct chp_id chpid)
564 CIO_TRACE_EVENT(2, dbf_txt); 543 CIO_TRACE_EVENT(2, dbf_txt);
565 544
566 if (chp_get_status(chpid) != 0) 545 if (chp_get_status(chpid) != 0)
567 for_each_subchannel(__chp_add, &chpid); 546 for_each_subchannel_staged(__chp_add, __chp_add_new_sch,
547 &chpid);
568} 548}
569 549
570static void __s390_subchannel_vary_chpid(struct subchannel *sch, 550static void __s390_subchannel_vary_chpid(struct subchannel *sch,
@@ -589,7 +569,7 @@ static void __s390_subchannel_vary_chpid(struct subchannel *sch,
589 if (!old_lpm) 569 if (!old_lpm)
590 device_trigger_reprobe(sch); 570 device_trigger_reprobe(sch);
591 else if (sch->driver && sch->driver->verify) 571 else if (sch->driver && sch->driver->verify)
592 sch->driver->verify(&sch->dev); 572 sch->driver->verify(sch);
593 break; 573 break;
594 } 574 }
595 sch->opm &= ~mask; 575 sch->opm &= ~mask;
@@ -603,37 +583,29 @@ static void __s390_subchannel_vary_chpid(struct subchannel *sch,
603 terminate_internal_io(sch); 583 terminate_internal_io(sch);
604 /* Re-start path verification. */ 584 /* Re-start path verification. */
605 if (sch->driver && sch->driver->verify) 585 if (sch->driver && sch->driver->verify)
606 sch->driver->verify(&sch->dev); 586 sch->driver->verify(sch);
607 } 587 }
608 } else if (!sch->lpm) { 588 } else if (!sch->lpm) {
609 if (device_trigger_verify(sch) != 0) 589 if (device_trigger_verify(sch) != 0)
610 css_schedule_eval(sch->schid); 590 css_schedule_eval(sch->schid);
611 } else if (sch->driver && sch->driver->verify) 591 } else if (sch->driver && sch->driver->verify)
612 sch->driver->verify(&sch->dev); 592 sch->driver->verify(sch);
613 break; 593 break;
614 } 594 }
615 spin_unlock_irqrestore(sch->lock, flags); 595 spin_unlock_irqrestore(sch->lock, flags);
616} 596}
617 597
618static int s390_subchannel_vary_chpid_off(struct device *dev, void *data) 598static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
619{ 599{
620 struct subchannel *sch; 600 struct chp_id *chpid = data;
621 struct chp_id *chpid;
622
623 sch = to_subchannel(dev);
624 chpid = data;
625 601
626 __s390_subchannel_vary_chpid(sch, *chpid, 0); 602 __s390_subchannel_vary_chpid(sch, *chpid, 0);
627 return 0; 603 return 0;
628} 604}
629 605
630static int s390_subchannel_vary_chpid_on(struct device *dev, void *data) 606static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
631{ 607{
632 struct subchannel *sch; 608 struct chp_id *chpid = data;
633 struct chp_id *chpid;
634
635 sch = to_subchannel(dev);
636 chpid = data;
637 609
638 __s390_subchannel_vary_chpid(sch, *chpid, 1); 610 __s390_subchannel_vary_chpid(sch, *chpid, 1);
639 return 0; 611 return 0;
@@ -643,13 +615,7 @@ static int
643__s390_vary_chpid_on(struct subchannel_id schid, void *data) 615__s390_vary_chpid_on(struct subchannel_id schid, void *data)
644{ 616{
645 struct schib schib; 617 struct schib schib;
646 struct subchannel *sch;
647 618
648 sch = get_subchannel_by_schid(schid);
649 if (sch) {
650 put_device(&sch->dev);
651 return 0;
652 }
653 if (stsch_err(schid, &schib)) 619 if (stsch_err(schid, &schib))
654 /* We're through */ 620 /* We're through */
655 return -ENXIO; 621 return -ENXIO;
@@ -669,12 +635,13 @@ int chsc_chp_vary(struct chp_id chpid, int on)
669 * Redo PathVerification on the devices the chpid connects to 635 * Redo PathVerification on the devices the chpid connects to
670 */ 636 */
671 637
672 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
673 s390_subchannel_vary_chpid_on :
674 s390_subchannel_vary_chpid_off);
675 if (on) 638 if (on)
676 /* Scan for new devices on varied on path. */ 639 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
677 for_each_subchannel(__s390_vary_chpid_on, NULL); 640 __s390_vary_chpid_on, &chpid);
641 else
642 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
643 NULL, &chpid);
644
678 return 0; 645 return 0;
679} 646}
680 647
@@ -1075,7 +1042,7 @@ chsc_determine_css_characteristics(void)
1075 1042
1076 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1043 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1077 if (!scsc_area) { 1044 if (!scsc_area) {
1078 CIO_MSG_EVENT(0, "Was not able to determine available" 1045 CIO_MSG_EVENT(0, "Was not able to determine available "
1079 "CHSCs due to no memory.\n"); 1046 "CHSCs due to no memory.\n");
1080 return -ENOMEM; 1047 return -ENOMEM;
1081 } 1048 }
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 46905345159e..60590a12d529 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -23,11 +23,12 @@
23#include <asm/reset.h> 23#include <asm/reset.h>
24#include <asm/ipl.h> 24#include <asm/ipl.h>
25#include <asm/chpid.h> 25#include <asm/chpid.h>
26#include "airq.h" 26#include <asm/airq.h>
27#include "cio.h" 27#include "cio.h"
28#include "css.h" 28#include "css.h"
29#include "chsc.h" 29#include "chsc.h"
30#include "ioasm.h" 30#include "ioasm.h"
31#include "io_sch.h"
31#include "blacklist.h" 32#include "blacklist.h"
32#include "cio_debug.h" 33#include "cio_debug.h"
33#include "chp.h" 34#include "chp.h"
@@ -56,39 +57,37 @@ __setup ("cio_msg=", cio_setup);
56 57
57/* 58/*
58 * Function: cio_debug_init 59 * Function: cio_debug_init
59 * Initializes three debug logs (under /proc/s390dbf) for common I/O: 60 * Initializes three debug logs for common I/O:
60 * - cio_msg logs the messages which are printk'ed when CONFIG_DEBUG_IO is on 61 * - cio_msg logs generic cio messages
61 * - cio_trace logs the calling of different functions 62 * - cio_trace logs the calling of different functions
62 * - cio_crw logs the messages which are printk'ed when CONFIG_DEBUG_CRW is on 63 * - cio_crw logs machine check related cio messages
63 * debug levels depend on CONFIG_DEBUG_IO resp. CONFIG_DEBUG_CRW
64 */ 64 */
65static int __init 65static int __init cio_debug_init(void)
66cio_debug_init (void)
67{ 66{
68 cio_debug_msg_id = debug_register ("cio_msg", 16, 4, 16*sizeof (long)); 67 cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long));
69 if (!cio_debug_msg_id) 68 if (!cio_debug_msg_id)
70 goto out_unregister; 69 goto out_unregister;
71 debug_register_view (cio_debug_msg_id, &debug_sprintf_view); 70 debug_register_view(cio_debug_msg_id, &debug_sprintf_view);
72 debug_set_level (cio_debug_msg_id, 2); 71 debug_set_level(cio_debug_msg_id, 2);
73 cio_debug_trace_id = debug_register ("cio_trace", 16, 4, 16); 72 cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16);
74 if (!cio_debug_trace_id) 73 if (!cio_debug_trace_id)
75 goto out_unregister; 74 goto out_unregister;
76 debug_register_view (cio_debug_trace_id, &debug_hex_ascii_view); 75 debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view);
77 debug_set_level (cio_debug_trace_id, 2); 76 debug_set_level(cio_debug_trace_id, 2);
78 cio_debug_crw_id = debug_register ("cio_crw", 4, 4, 16*sizeof (long)); 77 cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long));
79 if (!cio_debug_crw_id) 78 if (!cio_debug_crw_id)
80 goto out_unregister; 79 goto out_unregister;
81 debug_register_view (cio_debug_crw_id, &debug_sprintf_view); 80 debug_register_view(cio_debug_crw_id, &debug_sprintf_view);
82 debug_set_level (cio_debug_crw_id, 2); 81 debug_set_level(cio_debug_crw_id, 4);
83 return 0; 82 return 0;
84 83
85out_unregister: 84out_unregister:
86 if (cio_debug_msg_id) 85 if (cio_debug_msg_id)
87 debug_unregister (cio_debug_msg_id); 86 debug_unregister(cio_debug_msg_id);
88 if (cio_debug_trace_id) 87 if (cio_debug_trace_id)
89 debug_unregister (cio_debug_trace_id); 88 debug_unregister(cio_debug_trace_id);
90 if (cio_debug_crw_id) 89 if (cio_debug_crw_id)
91 debug_unregister (cio_debug_crw_id); 90 debug_unregister(cio_debug_crw_id);
92 printk(KERN_WARNING"cio: could not initialize debugging\n"); 91 printk(KERN_WARNING"cio: could not initialize debugging\n");
93 return -1; 92 return -1;
94} 93}
@@ -147,7 +146,7 @@ cio_tpi(void)
147 spin_lock(sch->lock); 146 spin_lock(sch->lock);
148 memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw)); 147 memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw));
149 if (sch->driver && sch->driver->irq) 148 if (sch->driver && sch->driver->irq)
150 sch->driver->irq(&sch->dev); 149 sch->driver->irq(sch);
151 spin_unlock(sch->lock); 150 spin_unlock(sch->lock);
152 irq_exit (); 151 irq_exit ();
153 _local_bh_enable(); 152 _local_bh_enable();
@@ -184,33 +183,35 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
184{ 183{
185 char dbf_txt[15]; 184 char dbf_txt[15];
186 int ccode; 185 int ccode;
186 struct orb *orb;
187 187
188 CIO_TRACE_EVENT (4, "stIO"); 188 CIO_TRACE_EVENT(4, "stIO");
189 CIO_TRACE_EVENT (4, sch->dev.bus_id); 189 CIO_TRACE_EVENT(4, sch->dev.bus_id);
190 190
191 orb = &to_io_private(sch)->orb;
191 /* sch is always under 2G. */ 192 /* sch is always under 2G. */
192 sch->orb.intparm = (__u32)(unsigned long)sch; 193 orb->intparm = (u32)(addr_t)sch;
193 sch->orb.fmt = 1; 194 orb->fmt = 1;
194 195
195 sch->orb.pfch = sch->options.prefetch == 0; 196 orb->pfch = sch->options.prefetch == 0;
196 sch->orb.spnd = sch->options.suspend; 197 orb->spnd = sch->options.suspend;
197 sch->orb.ssic = sch->options.suspend && sch->options.inter; 198 orb->ssic = sch->options.suspend && sch->options.inter;
198 sch->orb.lpm = (lpm != 0) ? lpm : sch->lpm; 199 orb->lpm = (lpm != 0) ? lpm : sch->lpm;
199#ifdef CONFIG_64BIT 200#ifdef CONFIG_64BIT
200 /* 201 /*
201 * for 64 bit we always support 64 bit IDAWs with 4k page size only 202 * for 64 bit we always support 64 bit IDAWs with 4k page size only
202 */ 203 */
203 sch->orb.c64 = 1; 204 orb->c64 = 1;
204 sch->orb.i2k = 0; 205 orb->i2k = 0;
205#endif 206#endif
206 sch->orb.key = key >> 4; 207 orb->key = key >> 4;
207 /* issue "Start Subchannel" */ 208 /* issue "Start Subchannel" */
208 sch->orb.cpa = (__u32) __pa (cpa); 209 orb->cpa = (__u32) __pa(cpa);
209 ccode = ssch (sch->schid, &sch->orb); 210 ccode = ssch(sch->schid, orb);
210 211
211 /* process condition code */ 212 /* process condition code */
212 sprintf (dbf_txt, "ccode:%d", ccode); 213 sprintf(dbf_txt, "ccode:%d", ccode);
213 CIO_TRACE_EVENT (4, dbf_txt); 214 CIO_TRACE_EVENT(4, dbf_txt);
214 215
215 switch (ccode) { 216 switch (ccode) {
216 case 0: 217 case 0:
@@ -405,8 +406,8 @@ cio_modify (struct subchannel *sch)
405/* 406/*
406 * Enable subchannel. 407 * Enable subchannel.
407 */ 408 */
408int 409int cio_enable_subchannel(struct subchannel *sch, unsigned int isc,
409cio_enable_subchannel (struct subchannel *sch, unsigned int isc) 410 u32 intparm)
410{ 411{
411 char dbf_txt[15]; 412 char dbf_txt[15];
412 int ccode; 413 int ccode;
@@ -425,7 +426,7 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
425 for (retry = 5, ret = 0; retry > 0; retry--) { 426 for (retry = 5, ret = 0; retry > 0; retry--) {
426 sch->schib.pmcw.ena = 1; 427 sch->schib.pmcw.ena = 1;
427 sch->schib.pmcw.isc = isc; 428 sch->schib.pmcw.isc = isc;
428 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; 429 sch->schib.pmcw.intparm = intparm;
429 ret = cio_modify(sch); 430 ret = cio_modify(sch);
430 if (ret == -ENODEV) 431 if (ret == -ENODEV)
431 break; 432 break;
@@ -567,7 +568,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
567 */ 568 */
568 if (sch->st != 0) { 569 if (sch->st != 0) {
569 CIO_DEBUG(KERN_INFO, 0, 570 CIO_DEBUG(KERN_INFO, 0,
570 "cio: Subchannel 0.%x.%04x reports " 571 "Subchannel 0.%x.%04x reports "
571 "non-I/O subchannel type %04X\n", 572 "non-I/O subchannel type %04X\n",
572 sch->schid.ssid, sch->schid.sch_no, sch->st); 573 sch->schid.ssid, sch->schid.sch_no, sch->st);
573 /* We stop here for non-io subchannels. */ 574 /* We stop here for non-io subchannels. */
@@ -576,11 +577,11 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
576 } 577 }
577 578
578 /* Initialization for io subchannels. */ 579 /* Initialization for io subchannels. */
579 if (!sch->schib.pmcw.dnv) { 580 if (!css_sch_is_valid(&sch->schib)) {
580 /* io subchannel but device number is invalid. */
581 err = -ENODEV; 581 err = -ENODEV;
582 goto out; 582 goto out;
583 } 583 }
584
584 /* Devno is valid. */ 585 /* Devno is valid. */
585 if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) { 586 if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) {
586 /* 587 /*
@@ -600,7 +601,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
600 sch->lpm = sch->schib.pmcw.pam & sch->opm; 601 sch->lpm = sch->schib.pmcw.pam & sch->opm;
601 602
602 CIO_DEBUG(KERN_INFO, 0, 603 CIO_DEBUG(KERN_INFO, 0,
603 "cio: Detected device %04x on subchannel 0.%x.%04X" 604 "Detected device %04x on subchannel 0.%x.%04X"
604 " - PIM = %02X, PAM = %02X, POM = %02X\n", 605 " - PIM = %02X, PAM = %02X, POM = %02X\n",
605 sch->schib.pmcw.dev, sch->schid.ssid, 606 sch->schib.pmcw.dev, sch->schid.ssid,
606 sch->schid.sch_no, sch->schib.pmcw.pim, 607 sch->schid.sch_no, sch->schib.pmcw.pim,
@@ -680,7 +681,7 @@ do_IRQ (struct pt_regs *regs)
680 sizeof (irb->scsw)); 681 sizeof (irb->scsw));
681 /* Call interrupt handler if there is one. */ 682 /* Call interrupt handler if there is one. */
682 if (sch->driver && sch->driver->irq) 683 if (sch->driver && sch->driver->irq)
683 sch->driver->irq(&sch->dev); 684 sch->driver->irq(sch);
684 } 685 }
685 if (sch) 686 if (sch)
686 spin_unlock(sch->lock); 687 spin_unlock(sch->lock);
@@ -698,8 +699,14 @@ do_IRQ (struct pt_regs *regs)
698 699
699#ifdef CONFIG_CCW_CONSOLE 700#ifdef CONFIG_CCW_CONSOLE
700static struct subchannel console_subchannel; 701static struct subchannel console_subchannel;
702static struct io_subchannel_private console_priv;
701static int console_subchannel_in_use; 703static int console_subchannel_in_use;
702 704
705void *cio_get_console_priv(void)
706{
707 return &console_priv;
708}
709
703/* 710/*
704 * busy wait for the next interrupt on the console 711 * busy wait for the next interrupt on the console
705 */ 712 */
@@ -738,9 +745,9 @@ cio_test_for_console(struct subchannel_id schid, void *data)
738{ 745{
739 if (stsch_err(schid, &console_subchannel.schib) != 0) 746 if (stsch_err(schid, &console_subchannel.schib) != 0)
740 return -ENXIO; 747 return -ENXIO;
741 if (console_subchannel.schib.pmcw.dnv && 748 if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) &&
742 console_subchannel.schib.pmcw.dev == 749 console_subchannel.schib.pmcw.dnv &&
743 console_devno) { 750 (console_subchannel.schib.pmcw.dev == console_devno)) {
744 console_irq = schid.sch_no; 751 console_irq = schid.sch_no;
745 return 1; /* found */ 752 return 1; /* found */
746 } 753 }
@@ -758,6 +765,7 @@ cio_get_console_sch_no(void)
758 /* VM provided us with the irq number of the console. */ 765 /* VM provided us with the irq number of the console. */
759 schid.sch_no = console_irq; 766 schid.sch_no = console_irq;
760 if (stsch(schid, &console_subchannel.schib) != 0 || 767 if (stsch(schid, &console_subchannel.schib) != 0 ||
768 (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) ||
761 !console_subchannel.schib.pmcw.dnv) 769 !console_subchannel.schib.pmcw.dnv)
762 return -1; 770 return -1;
763 console_devno = console_subchannel.schib.pmcw.dev; 771 console_devno = console_subchannel.schib.pmcw.dev;
@@ -804,7 +812,7 @@ cio_probe_console(void)
804 ctl_set_bit(6, 24); 812 ctl_set_bit(6, 24);
805 console_subchannel.schib.pmcw.isc = 7; 813 console_subchannel.schib.pmcw.isc = 7;
806 console_subchannel.schib.pmcw.intparm = 814 console_subchannel.schib.pmcw.intparm =
807 (__u32)(unsigned long)&console_subchannel; 815 (u32)(addr_t)&console_subchannel;
808 ret = cio_modify(&console_subchannel); 816 ret = cio_modify(&console_subchannel);
809 if (ret) { 817 if (ret) {
810 console_subchannel_in_use = 0; 818 console_subchannel_in_use = 0;
@@ -1022,7 +1030,7 @@ static int __reipl_subchannel_match(struct subchannel_id schid, void *data)
1022 1030
1023 if (stsch_reset(schid, &schib)) 1031 if (stsch_reset(schid, &schib))
1024 return -ENXIO; 1032 return -ENXIO;
1025 if (schib.pmcw.dnv && 1033 if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
1026 (schib.pmcw.dev == match_id->devid.devno) && 1034 (schib.pmcw.dev == match_id->devid.devno) &&
1027 (schid.ssid == match_id->devid.ssid)) { 1035 (schid.ssid == match_id->devid.ssid)) {
1028 match_id->schid = schid; 1036 match_id->schid = schid;
@@ -1068,6 +1076,8 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
1068 return -ENODEV; 1076 return -ENODEV;
1069 if (stsch(schid, &schib)) 1077 if (stsch(schid, &schib))
1070 return -ENODEV; 1078 return -ENODEV;
1079 if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
1080 return -ENODEV;
1071 if (!schib.pmcw.dnv) 1081 if (!schib.pmcw.dnv)
1072 return -ENODEV; 1082 return -ENODEV;
1073 iplinfo->devno = schib.pmcw.dev; 1083 iplinfo->devno = schib.pmcw.dev;
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 7446c39951a7..52afa4c784de 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -11,32 +11,32 @@
11 * path management control word 11 * path management control word
12 */ 12 */
13struct pmcw { 13struct pmcw {
14 __u32 intparm; /* interruption parameter */ 14 u32 intparm; /* interruption parameter */
15 __u32 qf : 1; /* qdio facility */ 15 u32 qf : 1; /* qdio facility */
16 __u32 res0 : 1; /* reserved zeros */ 16 u32 res0 : 1; /* reserved zeros */
17 __u32 isc : 3; /* interruption sublass */ 17 u32 isc : 3; /* interruption sublass */
18 __u32 res5 : 3; /* reserved zeros */ 18 u32 res5 : 3; /* reserved zeros */
19 __u32 ena : 1; /* enabled */ 19 u32 ena : 1; /* enabled */
20 __u32 lm : 2; /* limit mode */ 20 u32 lm : 2; /* limit mode */
21 __u32 mme : 2; /* measurement-mode enable */ 21 u32 mme : 2; /* measurement-mode enable */
22 __u32 mp : 1; /* multipath mode */ 22 u32 mp : 1; /* multipath mode */
23 __u32 tf : 1; /* timing facility */ 23 u32 tf : 1; /* timing facility */
24 __u32 dnv : 1; /* device number valid */ 24 u32 dnv : 1; /* device number valid */
25 __u32 dev : 16; /* device number */ 25 u32 dev : 16; /* device number */
26 __u8 lpm; /* logical path mask */ 26 u8 lpm; /* logical path mask */
27 __u8 pnom; /* path not operational mask */ 27 u8 pnom; /* path not operational mask */
28 __u8 lpum; /* last path used mask */ 28 u8 lpum; /* last path used mask */
29 __u8 pim; /* path installed mask */ 29 u8 pim; /* path installed mask */
30 __u16 mbi; /* measurement-block index */ 30 u16 mbi; /* measurement-block index */
31 __u8 pom; /* path operational mask */ 31 u8 pom; /* path operational mask */
32 __u8 pam; /* path available mask */ 32 u8 pam; /* path available mask */
33 __u8 chpid[8]; /* CHPID 0-7 (if available) */ 33 u8 chpid[8]; /* CHPID 0-7 (if available) */
34 __u32 unused1 : 8; /* reserved zeros */ 34 u32 unused1 : 8; /* reserved zeros */
35 __u32 st : 3; /* subchannel type */ 35 u32 st : 3; /* subchannel type */
36 __u32 unused2 : 18; /* reserved zeros */ 36 u32 unused2 : 18; /* reserved zeros */
37 __u32 mbfc : 1; /* measurement block format control */ 37 u32 mbfc : 1; /* measurement block format control */
38 __u32 xmwme : 1; /* extended measurement word mode enable */ 38 u32 xmwme : 1; /* extended measurement word mode enable */
39 __u32 csense : 1; /* concurrent sense; can be enabled ...*/ 39 u32 csense : 1; /* concurrent sense; can be enabled ...*/
40 /* ... per MSCH, however, if facility */ 40 /* ... per MSCH, however, if facility */
41 /* ... is not installed, this results */ 41 /* ... is not installed, this results */
42 /* ... in an operand exception. */ 42 /* ... in an operand exception. */
@@ -52,31 +52,6 @@ struct schib {
52 __u8 mda[4]; /* model dependent area */ 52 __u8 mda[4]; /* model dependent area */
53} __attribute__ ((packed,aligned(4))); 53} __attribute__ ((packed,aligned(4)));
54 54
55/*
56 * operation request block
57 */
58struct orb {
59 __u32 intparm; /* interruption parameter */
60 __u32 key : 4; /* flags, like key, suspend control, etc. */
61 __u32 spnd : 1; /* suspend control */
62 __u32 res1 : 1; /* reserved */
63 __u32 mod : 1; /* modification control */
64 __u32 sync : 1; /* synchronize control */
65 __u32 fmt : 1; /* format control */
66 __u32 pfch : 1; /* prefetch control */
67 __u32 isic : 1; /* initial-status-interruption control */
68 __u32 alcc : 1; /* address-limit-checking control */
69 __u32 ssic : 1; /* suppress-suspended-interr. control */
70 __u32 res2 : 1; /* reserved */
71 __u32 c64 : 1; /* IDAW/QDIO 64 bit control */
72 __u32 i2k : 1; /* IDAW 2/4kB block size control */
73 __u32 lpm : 8; /* logical path mask */
74 __u32 ils : 1; /* incorrect length */
75 __u32 zero : 6; /* reserved zeros */
76 __u32 orbx : 1; /* ORB extension control */
77 __u32 cpa; /* channel program address */
78} __attribute__ ((packed,aligned(4)));
79
80/* subchannel data structure used by I/O subroutines */ 55/* subchannel data structure used by I/O subroutines */
81struct subchannel { 56struct subchannel {
82 struct subchannel_id schid; 57 struct subchannel_id schid;
@@ -85,7 +60,7 @@ struct subchannel {
85 enum { 60 enum {
86 SUBCHANNEL_TYPE_IO = 0, 61 SUBCHANNEL_TYPE_IO = 0,
87 SUBCHANNEL_TYPE_CHSC = 1, 62 SUBCHANNEL_TYPE_CHSC = 1,
88 SUBCHANNEL_TYPE_MESSAGE = 2, 63 SUBCHANNEL_TYPE_MSG = 2,
89 SUBCHANNEL_TYPE_ADM = 3, 64 SUBCHANNEL_TYPE_ADM = 3,
90 } st; /* subchannel type */ 65 } st; /* subchannel type */
91 66
@@ -99,11 +74,10 @@ struct subchannel {
99 __u8 lpm; /* logical path mask */ 74 __u8 lpm; /* logical path mask */
100 __u8 opm; /* operational path mask */ 75 __u8 opm; /* operational path mask */
101 struct schib schib; /* subchannel information block */ 76 struct schib schib; /* subchannel information block */
102 struct orb orb; /* operation request block */
103 struct ccw1 sense_ccw; /* static ccw for sense command */
104 struct chsc_ssd_info ssd_info; /* subchannel description */ 77 struct chsc_ssd_info ssd_info; /* subchannel description */
105 struct device dev; /* entry in device tree */ 78 struct device dev; /* entry in device tree */
106 struct css_driver *driver; 79 struct css_driver *driver;
80 void *private; /* private per subchannel type data */
107} __attribute__ ((aligned(8))); 81} __attribute__ ((aligned(8)));
108 82
109#define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */ 83#define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */
@@ -111,7 +85,7 @@ struct subchannel {
111#define to_subchannel(n) container_of(n, struct subchannel, dev) 85#define to_subchannel(n) container_of(n, struct subchannel, dev)
112 86
113extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id); 87extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
114extern int cio_enable_subchannel (struct subchannel *, unsigned int); 88extern int cio_enable_subchannel(struct subchannel *, unsigned int, u32);
115extern int cio_disable_subchannel (struct subchannel *); 89extern int cio_disable_subchannel (struct subchannel *);
116extern int cio_cancel (struct subchannel *); 90extern int cio_cancel (struct subchannel *);
117extern int cio_clear (struct subchannel *); 91extern int cio_clear (struct subchannel *);
@@ -125,6 +99,7 @@ extern int cio_get_options (struct subchannel *);
125extern int cio_modify (struct subchannel *); 99extern int cio_modify (struct subchannel *);
126 100
127int cio_create_sch_lock(struct subchannel *); 101int cio_create_sch_lock(struct subchannel *);
102void do_adapter_IO(void);
128 103
129/* Use with care. */ 104/* Use with care. */
130#ifdef CONFIG_CCW_CONSOLE 105#ifdef CONFIG_CCW_CONSOLE
@@ -133,10 +108,12 @@ extern void cio_release_console(void);
133extern int cio_is_console(struct subchannel_id); 108extern int cio_is_console(struct subchannel_id);
134extern struct subchannel *cio_get_console_subchannel(void); 109extern struct subchannel *cio_get_console_subchannel(void);
135extern spinlock_t * cio_get_console_lock(void); 110extern spinlock_t * cio_get_console_lock(void);
111extern void *cio_get_console_priv(void);
136#else 112#else
137#define cio_is_console(schid) 0 113#define cio_is_console(schid) 0
138#define cio_get_console_subchannel() NULL 114#define cio_get_console_subchannel() NULL
139#define cio_get_console_lock() NULL; 115#define cio_get_console_lock() NULL
116#define cio_get_console_priv() NULL
140#endif 117#endif
141 118
142extern int cio_show_msg; 119extern int cio_show_msg;
diff --git a/drivers/s390/cio/cio_debug.h b/drivers/s390/cio/cio_debug.h
index c9bf8989930f..d7429ef6c666 100644
--- a/drivers/s390/cio/cio_debug.h
+++ b/drivers/s390/cio/cio_debug.h
@@ -8,20 +8,19 @@ extern debug_info_t *cio_debug_msg_id;
8extern debug_info_t *cio_debug_trace_id; 8extern debug_info_t *cio_debug_trace_id;
9extern debug_info_t *cio_debug_crw_id; 9extern debug_info_t *cio_debug_crw_id;
10 10
11#define CIO_TRACE_EVENT(imp, txt) do { \ 11#define CIO_TRACE_EVENT(imp, txt) do { \
12 debug_text_event(cio_debug_trace_id, imp, txt); \ 12 debug_text_event(cio_debug_trace_id, imp, txt); \
13 } while (0) 13 } while (0)
14 14
15#define CIO_MSG_EVENT(imp, args...) do { \ 15#define CIO_MSG_EVENT(imp, args...) do { \
16 debug_sprintf_event(cio_debug_msg_id, imp , ##args); \ 16 debug_sprintf_event(cio_debug_msg_id, imp , ##args); \
17 } while (0) 17 } while (0)
18 18
19#define CIO_CRW_EVENT(imp, args...) do { \ 19#define CIO_CRW_EVENT(imp, args...) do { \
20 debug_sprintf_event(cio_debug_crw_id, imp , ##args); \ 20 debug_sprintf_event(cio_debug_crw_id, imp , ##args); \
21 } while (0) 21 } while (0)
22 22
23static inline void 23static inline void CIO_HEX_EVENT(int level, void *data, int length)
24CIO_HEX_EVENT(int level, void *data, int length)
25{ 24{
26 if (unlikely(!cio_debug_trace_id)) 25 if (unlikely(!cio_debug_trace_id))
27 return; 26 return;
@@ -32,9 +31,10 @@ CIO_HEX_EVENT(int level, void *data, int length)
32 } 31 }
33} 32}
34 33
35#define CIO_DEBUG(printk_level,event_level,msg...) ({ \ 34#define CIO_DEBUG(printk_level, event_level, msg...) do { \
36 if (cio_show_msg) printk(printk_level msg); \ 35 if (cio_show_msg) \
37 CIO_MSG_EVENT (event_level, msg); \ 36 printk(printk_level "cio: " msg); \
38}) 37 CIO_MSG_EVENT(event_level, msg); \
38 } while (0)
39 39
40#endif 40#endif
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index c3df2cd009a4..3b45bbe6cce0 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -51,6 +51,62 @@ for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
51 return ret; 51 return ret;
52} 52}
53 53
54struct cb_data {
55 void *data;
56 struct idset *set;
57 int (*fn_known_sch)(struct subchannel *, void *);
58 int (*fn_unknown_sch)(struct subchannel_id, void *);
59};
60
61static int call_fn_known_sch(struct device *dev, void *data)
62{
63 struct subchannel *sch = to_subchannel(dev);
64 struct cb_data *cb = data;
65 int rc = 0;
66
67 idset_sch_del(cb->set, sch->schid);
68 if (cb->fn_known_sch)
69 rc = cb->fn_known_sch(sch, cb->data);
70 return rc;
71}
72
73static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
74{
75 struct cb_data *cb = data;
76 int rc = 0;
77
78 if (idset_sch_contains(cb->set, schid))
79 rc = cb->fn_unknown_sch(schid, cb->data);
80 return rc;
81}
82
83int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
84 int (*fn_unknown)(struct subchannel_id,
85 void *), void *data)
86{
87 struct cb_data cb;
88 int rc;
89
90 cb.set = idset_sch_new();
91 if (!cb.set)
92 return -ENOMEM;
93 idset_fill(cb.set);
94 cb.data = data;
95 cb.fn_known_sch = fn_known;
96 cb.fn_unknown_sch = fn_unknown;
97 /* Process registered subchannels. */
98 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
99 if (rc)
100 goto out;
101 /* Process unregistered subchannels. */
102 if (fn_unknown)
103 rc = for_each_subchannel(call_fn_unknown_sch, &cb);
104out:
105 idset_free(cb.set);
106
107 return rc;
108}
109
54static struct subchannel * 110static struct subchannel *
55css_alloc_subchannel(struct subchannel_id schid) 111css_alloc_subchannel(struct subchannel_id schid)
56{ 112{
@@ -77,7 +133,7 @@ css_alloc_subchannel(struct subchannel_id schid)
77 * This is fine even on 64bit since the subchannel is always located 133 * This is fine even on 64bit since the subchannel is always located
78 * under 2G. 134 * under 2G.
79 */ 135 */
80 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; 136 sch->schib.pmcw.intparm = (u32)(addr_t)sch;
81 ret = cio_modify(sch); 137 ret = cio_modify(sch);
82 if (ret) { 138 if (ret) {
83 kfree(sch->lock); 139 kfree(sch->lock);
@@ -237,11 +293,25 @@ get_subchannel_by_schid(struct subchannel_id schid)
237 return dev ? to_subchannel(dev) : NULL; 293 return dev ? to_subchannel(dev) : NULL;
238} 294}
239 295
296/**
297 * css_sch_is_valid() - check if a subchannel is valid
298 * @schib: subchannel information block for the subchannel
299 */
300int css_sch_is_valid(struct schib *schib)
301{
302 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
303 return 0;
304 return 1;
305}
306EXPORT_SYMBOL_GPL(css_sch_is_valid);
307
240static int css_get_subchannel_status(struct subchannel *sch) 308static int css_get_subchannel_status(struct subchannel *sch)
241{ 309{
242 struct schib schib; 310 struct schib schib;
243 311
244 if (stsch(sch->schid, &schib) || !schib.pmcw.dnv) 312 if (stsch(sch->schid, &schib))
313 return CIO_GONE;
314 if (!css_sch_is_valid(&schib))
245 return CIO_GONE; 315 return CIO_GONE;
246 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev)) 316 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
247 return CIO_REVALIDATE; 317 return CIO_REVALIDATE;
@@ -293,7 +363,7 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
293 action = UNREGISTER; 363 action = UNREGISTER;
294 if (sch->driver && sch->driver->notify) { 364 if (sch->driver && sch->driver->notify) {
295 spin_unlock_irqrestore(sch->lock, flags); 365 spin_unlock_irqrestore(sch->lock, flags);
296 ret = sch->driver->notify(&sch->dev, event); 366 ret = sch->driver->notify(sch, event);
297 spin_lock_irqsave(sch->lock, flags); 367 spin_lock_irqsave(sch->lock, flags);
298 if (ret) 368 if (ret)
299 action = NONE; 369 action = NONE;
@@ -349,7 +419,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
349 /* Will be done on the slow path. */ 419 /* Will be done on the slow path. */
350 return -EAGAIN; 420 return -EAGAIN;
351 } 421 }
352 if (stsch_err(schid, &schib) || !schib.pmcw.dnv) { 422 if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
353 /* Unusable - ignore. */ 423 /* Unusable - ignore. */
354 return 0; 424 return 0;
355 } 425 }
@@ -388,20 +458,56 @@ static int __init slow_subchannel_init(void)
388 return 0; 458 return 0;
389} 459}
390 460
391static void css_slow_path_func(struct work_struct *unused) 461static int slow_eval_known_fn(struct subchannel *sch, void *data)
392{ 462{
393 struct subchannel_id schid; 463 int eval;
464 int rc;
394 465
395 CIO_TRACE_EVENT(4, "slowpath");
396 spin_lock_irq(&slow_subchannel_lock); 466 spin_lock_irq(&slow_subchannel_lock);
397 init_subchannel_id(&schid); 467 eval = idset_sch_contains(slow_subchannel_set, sch->schid);
398 while (idset_sch_get_first(slow_subchannel_set, &schid)) { 468 idset_sch_del(slow_subchannel_set, sch->schid);
399 idset_sch_del(slow_subchannel_set, schid); 469 spin_unlock_irq(&slow_subchannel_lock);
400 spin_unlock_irq(&slow_subchannel_lock); 470 if (eval) {
401 css_evaluate_subchannel(schid, 1); 471 rc = css_evaluate_known_subchannel(sch, 1);
402 spin_lock_irq(&slow_subchannel_lock); 472 if (rc == -EAGAIN)
473 css_schedule_eval(sch->schid);
403 } 474 }
475 return 0;
476}
477
478static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
479{
480 int eval;
481 int rc = 0;
482
483 spin_lock_irq(&slow_subchannel_lock);
484 eval = idset_sch_contains(slow_subchannel_set, schid);
485 idset_sch_del(slow_subchannel_set, schid);
404 spin_unlock_irq(&slow_subchannel_lock); 486 spin_unlock_irq(&slow_subchannel_lock);
487 if (eval) {
488 rc = css_evaluate_new_subchannel(schid, 1);
489 switch (rc) {
490 case -EAGAIN:
491 css_schedule_eval(schid);
492 rc = 0;
493 break;
494 case -ENXIO:
495 case -ENOMEM:
496 case -EIO:
497 /* These should abort looping */
498 break;
499 default:
500 rc = 0;
501 }
502 }
503 return rc;
504}
505
506static void css_slow_path_func(struct work_struct *unused)
507{
508 CIO_TRACE_EVENT(4, "slowpath");
509 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
510 NULL);
405} 511}
406 512
407static DECLARE_WORK(slow_path_work, css_slow_path_func); 513static DECLARE_WORK(slow_path_work, css_slow_path_func);
@@ -430,7 +536,6 @@ void css_schedule_eval_all(void)
430/* Reprobe subchannel if unregistered. */ 536/* Reprobe subchannel if unregistered. */
431static int reprobe_subchannel(struct subchannel_id schid, void *data) 537static int reprobe_subchannel(struct subchannel_id schid, void *data)
432{ 538{
433 struct subchannel *sch;
434 int ret; 539 int ret;
435 540
436 CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n", 541 CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
@@ -438,13 +543,6 @@ static int reprobe_subchannel(struct subchannel_id schid, void *data)
438 if (need_reprobe) 543 if (need_reprobe)
439 return -EAGAIN; 544 return -EAGAIN;
440 545
441 sch = get_subchannel_by_schid(schid);
442 if (sch) {
443 /* Already known. */
444 put_device(&sch->dev);
445 return 0;
446 }
447
448 ret = css_probe_device(schid); 546 ret = css_probe_device(schid);
449 switch (ret) { 547 switch (ret) {
450 case 0: 548 case 0:
@@ -472,7 +570,7 @@ static void reprobe_all(struct work_struct *unused)
472 /* Make sure initial subchannel scan is done. */ 570 /* Make sure initial subchannel scan is done. */
473 wait_event(ccw_device_init_wq, 571 wait_event(ccw_device_init_wq,
474 atomic_read(&ccw_device_init_count) == 0); 572 atomic_read(&ccw_device_init_count) == 0);
475 ret = for_each_subchannel(reprobe_subchannel, NULL); 573 ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
476 574
477 CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, 575 CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
478 need_reprobe); 576 need_reprobe);
@@ -787,8 +885,8 @@ int sch_is_pseudo_sch(struct subchannel *sch)
787static int 885static int
788css_bus_match (struct device *dev, struct device_driver *drv) 886css_bus_match (struct device *dev, struct device_driver *drv)
789{ 887{
790 struct subchannel *sch = container_of (dev, struct subchannel, dev); 888 struct subchannel *sch = to_subchannel(dev);
791 struct css_driver *driver = container_of (drv, struct css_driver, drv); 889 struct css_driver *driver = to_cssdriver(drv);
792 890
793 if (sch->st == driver->subchannel_type) 891 if (sch->st == driver->subchannel_type)
794 return 1; 892 return 1;
@@ -796,32 +894,36 @@ css_bus_match (struct device *dev, struct device_driver *drv)
796 return 0; 894 return 0;
797} 895}
798 896
799static int 897static int css_probe(struct device *dev)
800css_probe (struct device *dev)
801{ 898{
802 struct subchannel *sch; 899 struct subchannel *sch;
900 int ret;
803 901
804 sch = to_subchannel(dev); 902 sch = to_subchannel(dev);
805 sch->driver = container_of (dev->driver, struct css_driver, drv); 903 sch->driver = to_cssdriver(dev->driver);
806 return (sch->driver->probe ? sch->driver->probe(sch) : 0); 904 ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
905 if (ret)
906 sch->driver = NULL;
907 return ret;
807} 908}
808 909
809static int 910static int css_remove(struct device *dev)
810css_remove (struct device *dev)
811{ 911{
812 struct subchannel *sch; 912 struct subchannel *sch;
913 int ret;
813 914
814 sch = to_subchannel(dev); 915 sch = to_subchannel(dev);
815 return (sch->driver->remove ? sch->driver->remove(sch) : 0); 916 ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
917 sch->driver = NULL;
918 return ret;
816} 919}
817 920
818static void 921static void css_shutdown(struct device *dev)
819css_shutdown (struct device *dev)
820{ 922{
821 struct subchannel *sch; 923 struct subchannel *sch;
822 924
823 sch = to_subchannel(dev); 925 sch = to_subchannel(dev);
824 if (sch->driver->shutdown) 926 if (sch->driver && sch->driver->shutdown)
825 sch->driver->shutdown(sch); 927 sch->driver->shutdown(sch);
826} 928}
827 929
@@ -833,6 +935,34 @@ struct bus_type css_bus_type = {
833 .shutdown = css_shutdown, 935 .shutdown = css_shutdown,
834}; 936};
835 937
938/**
939 * css_driver_register - register a css driver
940 * @cdrv: css driver to register
941 *
942 * This is mainly a wrapper around driver_register that sets name
943 * and bus_type in the embedded struct device_driver correctly.
944 */
945int css_driver_register(struct css_driver *cdrv)
946{
947 cdrv->drv.name = cdrv->name;
948 cdrv->drv.bus = &css_bus_type;
949 cdrv->drv.owner = cdrv->owner;
950 return driver_register(&cdrv->drv);
951}
952EXPORT_SYMBOL_GPL(css_driver_register);
953
954/**
955 * css_driver_unregister - unregister a css driver
956 * @cdrv: css driver to unregister
957 *
958 * This is a wrapper around driver_unregister.
959 */
960void css_driver_unregister(struct css_driver *cdrv)
961{
962 driver_unregister(&cdrv->drv);
963}
964EXPORT_SYMBOL_GPL(css_driver_unregister);
965
836subsys_initcall(init_channel_subsystem); 966subsys_initcall(init_channel_subsystem);
837 967
838MODULE_LICENSE("GPL"); 968MODULE_LICENSE("GPL");
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 81215ef32435..b70554523552 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -58,64 +58,6 @@ struct pgid {
58 __u32 tod_high; /* high word TOD clock */ 58 __u32 tod_high; /* high word TOD clock */
59} __attribute__ ((packed)); 59} __attribute__ ((packed));
60 60
61#define MAX_CIWS 8
62
63/*
64 * sense-id response buffer layout
65 */
66struct senseid {
67 /* common part */
68 __u8 reserved; /* always 0x'FF' */
69 __u16 cu_type; /* control unit type */
70 __u8 cu_model; /* control unit model */
71 __u16 dev_type; /* device type */
72 __u8 dev_model; /* device model */
73 __u8 unused; /* padding byte */
74 /* extended part */
75 struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */
76} __attribute__ ((packed,aligned(4)));
77
78struct ccw_device_private {
79 struct ccw_device *cdev;
80 struct subchannel *sch;
81 int state; /* device state */
82 atomic_t onoff;
83 unsigned long registered;
84 struct ccw_dev_id dev_id; /* device id */
85 struct subchannel_id schid; /* subchannel number */
86 __u8 imask; /* lpm mask for SNID/SID/SPGID */
87 int iretry; /* retry counter SNID/SID/SPGID */
88 struct {
89 unsigned int fast:1; /* post with "channel end" */
90 unsigned int repall:1; /* report every interrupt status */
91 unsigned int pgroup:1; /* do path grouping */
92 unsigned int force:1; /* allow forced online */
93 } __attribute__ ((packed)) options;
94 struct {
95 unsigned int pgid_single:1; /* use single path for Set PGID */
96 unsigned int esid:1; /* Ext. SenseID supported by HW */
97 unsigned int dosense:1; /* delayed SENSE required */
98 unsigned int doverify:1; /* delayed path verification */
99 unsigned int donotify:1; /* call notify function */
100 unsigned int recog_done:1; /* dev. recog. complete */
101 unsigned int fake_irb:1; /* deliver faked irb */
102 unsigned int intretry:1; /* retry internal operation */
103 } __attribute__((packed)) flags;
104 unsigned long intparm; /* user interruption parameter */
105 struct qdio_irq *qdio_data;
106 struct irb irb; /* device status */
107 struct senseid senseid; /* SenseID info */
108 struct pgid pgid[8]; /* path group IDs per chpid*/
109 struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
110 struct work_struct kick_work;
111 wait_queue_head_t wait_q;
112 struct timer_list timer;
113 void *cmb; /* measurement information */
114 struct list_head cmb_list; /* list of measured devices */
115 u64 cmb_start_time; /* clock value of cmb reset */
116 void *cmb_wait; /* deferred cmb enable/disable */
117};
118
119/* 61/*
120 * A css driver handles all subchannels of one type. 62 * A css driver handles all subchannels of one type.
121 * Currently, we only care about I/O subchannels (type 0), these 63 * Currently, we only care about I/O subchannels (type 0), these
@@ -123,25 +65,35 @@ struct ccw_device_private {
123 */ 65 */
124struct subchannel; 66struct subchannel;
125struct css_driver { 67struct css_driver {
68 struct module *owner;
126 unsigned int subchannel_type; 69 unsigned int subchannel_type;
127 struct device_driver drv; 70 struct device_driver drv;
128 void (*irq)(struct device *); 71 void (*irq)(struct subchannel *);
129 int (*notify)(struct device *, int); 72 int (*notify)(struct subchannel *, int);
130 void (*verify)(struct device *); 73 void (*verify)(struct subchannel *);
131 void (*termination)(struct device *); 74 void (*termination)(struct subchannel *);
132 int (*probe)(struct subchannel *); 75 int (*probe)(struct subchannel *);
133 int (*remove)(struct subchannel *); 76 int (*remove)(struct subchannel *);
134 void (*shutdown)(struct subchannel *); 77 void (*shutdown)(struct subchannel *);
78 const char *name;
135}; 79};
136 80
81#define to_cssdriver(n) container_of(n, struct css_driver, drv)
82
137/* 83/*
138 * all css_drivers have the css_bus_type 84 * all css_drivers have the css_bus_type
139 */ 85 */
140extern struct bus_type css_bus_type; 86extern struct bus_type css_bus_type;
141 87
88extern int css_driver_register(struct css_driver *);
89extern void css_driver_unregister(struct css_driver *);
90
142extern void css_sch_device_unregister(struct subchannel *); 91extern void css_sch_device_unregister(struct subchannel *);
143extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); 92extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
144extern int css_init_done; 93extern int css_init_done;
94int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
95 int (*fn_unknown)(struct subchannel_id,
96 void *), void *data);
145extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); 97extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
146extern void css_process_crw(int, int); 98extern void css_process_crw(int, int);
147extern void css_reiterate_subchannels(void); 99extern void css_reiterate_subchannels(void);
@@ -188,6 +140,8 @@ void css_schedule_eval(struct subchannel_id schid);
188void css_schedule_eval_all(void); 140void css_schedule_eval_all(void);
189 141
190int sch_is_pseudo_sch(struct subchannel *); 142int sch_is_pseudo_sch(struct subchannel *);
143struct schib;
144int css_sch_is_valid(struct schib *);
191 145
192extern struct workqueue_struct *slow_path_wq; 146extern struct workqueue_struct *slow_path_wq;
193 147
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 74f6b539974a..d35dc3f25d06 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -17,6 +17,7 @@
17#include <linux/list.h> 17#include <linux/list.h>
18#include <linux/device.h> 18#include <linux/device.h>
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/timer.h>
20 21
21#include <asm/ccwdev.h> 22#include <asm/ccwdev.h>
22#include <asm/cio.h> 23#include <asm/cio.h>
@@ -28,6 +29,12 @@
28#include "css.h" 29#include "css.h"
29#include "device.h" 30#include "device.h"
30#include "ioasm.h" 31#include "ioasm.h"
32#include "io_sch.h"
33
34static struct timer_list recovery_timer;
35static spinlock_t recovery_lock;
36static int recovery_phase;
37static const unsigned long recovery_delay[] = { 3, 30, 300 };
31 38
32/******************* bus type handling ***********************/ 39/******************* bus type handling ***********************/
33 40
@@ -115,19 +122,18 @@ static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
115 122
116struct bus_type ccw_bus_type; 123struct bus_type ccw_bus_type;
117 124
118static int io_subchannel_probe (struct subchannel *); 125static void io_subchannel_irq(struct subchannel *);
119static int io_subchannel_remove (struct subchannel *); 126static int io_subchannel_probe(struct subchannel *);
120static int io_subchannel_notify(struct device *, int); 127static int io_subchannel_remove(struct subchannel *);
121static void io_subchannel_verify(struct device *); 128static int io_subchannel_notify(struct subchannel *, int);
122static void io_subchannel_ioterm(struct device *); 129static void io_subchannel_verify(struct subchannel *);
130static void io_subchannel_ioterm(struct subchannel *);
123static void io_subchannel_shutdown(struct subchannel *); 131static void io_subchannel_shutdown(struct subchannel *);
124 132
125static struct css_driver io_subchannel_driver = { 133static struct css_driver io_subchannel_driver = {
134 .owner = THIS_MODULE,
126 .subchannel_type = SUBCHANNEL_TYPE_IO, 135 .subchannel_type = SUBCHANNEL_TYPE_IO,
127 .drv = { 136 .name = "io_subchannel",
128 .name = "io_subchannel",
129 .bus = &css_bus_type,
130 },
131 .irq = io_subchannel_irq, 137 .irq = io_subchannel_irq,
132 .notify = io_subchannel_notify, 138 .notify = io_subchannel_notify,
133 .verify = io_subchannel_verify, 139 .verify = io_subchannel_verify,
@@ -142,6 +148,8 @@ struct workqueue_struct *ccw_device_notify_work;
142wait_queue_head_t ccw_device_init_wq; 148wait_queue_head_t ccw_device_init_wq;
143atomic_t ccw_device_init_count; 149atomic_t ccw_device_init_count;
144 150
151static void recovery_func(unsigned long data);
152
145static int __init 153static int __init
146init_ccw_bus_type (void) 154init_ccw_bus_type (void)
147{ 155{
@@ -149,6 +157,7 @@ init_ccw_bus_type (void)
149 157
150 init_waitqueue_head(&ccw_device_init_wq); 158 init_waitqueue_head(&ccw_device_init_wq);
151 atomic_set(&ccw_device_init_count, 0); 159 atomic_set(&ccw_device_init_count, 0);
160 setup_timer(&recovery_timer, recovery_func, 0);
152 161
153 ccw_device_work = create_singlethread_workqueue("cio"); 162 ccw_device_work = create_singlethread_workqueue("cio");
154 if (!ccw_device_work) 163 if (!ccw_device_work)
@@ -166,7 +175,8 @@ init_ccw_bus_type (void)
166 if ((ret = bus_register (&ccw_bus_type))) 175 if ((ret = bus_register (&ccw_bus_type)))
167 goto out_err; 176 goto out_err;
168 177
169 if ((ret = driver_register(&io_subchannel_driver.drv))) 178 ret = css_driver_register(&io_subchannel_driver);
179 if (ret)
170 goto out_err; 180 goto out_err;
171 181
172 wait_event(ccw_device_init_wq, 182 wait_event(ccw_device_init_wq,
@@ -186,7 +196,7 @@ out_err:
186static void __exit 196static void __exit
187cleanup_ccw_bus_type (void) 197cleanup_ccw_bus_type (void)
188{ 198{
189 driver_unregister(&io_subchannel_driver.drv); 199 css_driver_unregister(&io_subchannel_driver);
190 bus_unregister(&ccw_bus_type); 200 bus_unregister(&ccw_bus_type);
191 destroy_workqueue(ccw_device_notify_work); 201 destroy_workqueue(ccw_device_notify_work);
192 destroy_workqueue(ccw_device_work); 202 destroy_workqueue(ccw_device_work);
@@ -773,7 +783,7 @@ static void sch_attach_device(struct subchannel *sch,
773{ 783{
774 css_update_ssd_info(sch); 784 css_update_ssd_info(sch);
775 spin_lock_irq(sch->lock); 785 spin_lock_irq(sch->lock);
776 sch->dev.driver_data = cdev; 786 sch_set_cdev(sch, cdev);
777 cdev->private->schid = sch->schid; 787 cdev->private->schid = sch->schid;
778 cdev->ccwlock = sch->lock; 788 cdev->ccwlock = sch->lock;
779 device_trigger_reprobe(sch); 789 device_trigger_reprobe(sch);
@@ -795,7 +805,7 @@ static void sch_attach_disconnected_device(struct subchannel *sch,
795 put_device(&other_sch->dev); 805 put_device(&other_sch->dev);
796 return; 806 return;
797 } 807 }
798 other_sch->dev.driver_data = NULL; 808 sch_set_cdev(other_sch, NULL);
799 /* No need to keep a subchannel without ccw device around. */ 809 /* No need to keep a subchannel without ccw device around. */
800 css_sch_device_unregister(other_sch); 810 css_sch_device_unregister(other_sch);
801 put_device(&other_sch->dev); 811 put_device(&other_sch->dev);
@@ -831,12 +841,12 @@ static void sch_create_and_recog_new_device(struct subchannel *sch)
831 return; 841 return;
832 } 842 }
833 spin_lock_irq(sch->lock); 843 spin_lock_irq(sch->lock);
834 sch->dev.driver_data = cdev; 844 sch_set_cdev(sch, cdev);
835 spin_unlock_irq(sch->lock); 845 spin_unlock_irq(sch->lock);
836 /* Start recognition for the new ccw device. */ 846 /* Start recognition for the new ccw device. */
837 if (io_subchannel_recog(cdev, sch)) { 847 if (io_subchannel_recog(cdev, sch)) {
838 spin_lock_irq(sch->lock); 848 spin_lock_irq(sch->lock);
839 sch->dev.driver_data = NULL; 849 sch_set_cdev(sch, NULL);
840 spin_unlock_irq(sch->lock); 850 spin_unlock_irq(sch->lock);
841 if (cdev->dev.release) 851 if (cdev->dev.release)
842 cdev->dev.release(&cdev->dev); 852 cdev->dev.release(&cdev->dev);
@@ -940,7 +950,7 @@ io_subchannel_register(struct work_struct *work)
940 cdev->private->dev_id.devno, ret); 950 cdev->private->dev_id.devno, ret);
941 put_device(&cdev->dev); 951 put_device(&cdev->dev);
942 spin_lock_irqsave(sch->lock, flags); 952 spin_lock_irqsave(sch->lock, flags);
943 sch->dev.driver_data = NULL; 953 sch_set_cdev(sch, NULL);
944 spin_unlock_irqrestore(sch->lock, flags); 954 spin_unlock_irqrestore(sch->lock, flags);
945 kfree (cdev->private); 955 kfree (cdev->private);
946 kfree (cdev); 956 kfree (cdev);
@@ -1022,7 +1032,7 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1022 int rc; 1032 int rc;
1023 struct ccw_device_private *priv; 1033 struct ccw_device_private *priv;
1024 1034
1025 sch->dev.driver_data = cdev; 1035 sch_set_cdev(sch, cdev);
1026 sch->driver = &io_subchannel_driver; 1036 sch->driver = &io_subchannel_driver;
1027 cdev->ccwlock = sch->lock; 1037 cdev->ccwlock = sch->lock;
1028 1038
@@ -1082,7 +1092,7 @@ static void ccw_device_move_to_sch(struct work_struct *work)
1082 } 1092 }
1083 if (former_parent) { 1093 if (former_parent) {
1084 spin_lock_irq(former_parent->lock); 1094 spin_lock_irq(former_parent->lock);
1085 former_parent->dev.driver_data = NULL; 1095 sch_set_cdev(former_parent, NULL);
1086 spin_unlock_irq(former_parent->lock); 1096 spin_unlock_irq(former_parent->lock);
1087 css_sch_device_unregister(former_parent); 1097 css_sch_device_unregister(former_parent);
1088 /* Reset intparm to zeroes. */ 1098 /* Reset intparm to zeroes. */
@@ -1096,6 +1106,18 @@ out:
1096 put_device(&cdev->dev); 1106 put_device(&cdev->dev);
1097} 1107}
1098 1108
1109static void io_subchannel_irq(struct subchannel *sch)
1110{
1111 struct ccw_device *cdev;
1112
1113 cdev = sch_get_cdev(sch);
1114
1115 CIO_TRACE_EVENT(3, "IRQ");
1116 CIO_TRACE_EVENT(3, sch->dev.bus_id);
1117 if (cdev)
1118 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1119}
1120
1099static int 1121static int
1100io_subchannel_probe (struct subchannel *sch) 1122io_subchannel_probe (struct subchannel *sch)
1101{ 1123{
@@ -1104,13 +1126,13 @@ io_subchannel_probe (struct subchannel *sch)
1104 unsigned long flags; 1126 unsigned long flags;
1105 struct ccw_dev_id dev_id; 1127 struct ccw_dev_id dev_id;
1106 1128
1107 if (sch->dev.driver_data) { 1129 cdev = sch_get_cdev(sch);
1130 if (cdev) {
1108 /* 1131 /*
1109 * This subchannel already has an associated ccw_device. 1132 * This subchannel already has an associated ccw_device.
1110 * Register it and exit. This happens for all early 1133 * Register it and exit. This happens for all early
1111 * device, e.g. the console. 1134 * device, e.g. the console.
1112 */ 1135 */
1113 cdev = sch->dev.driver_data;
1114 cdev->dev.groups = ccwdev_attr_groups; 1136 cdev->dev.groups = ccwdev_attr_groups;
1115 device_initialize(&cdev->dev); 1137 device_initialize(&cdev->dev);
1116 ccw_device_register(cdev); 1138 ccw_device_register(cdev);
@@ -1132,6 +1154,11 @@ io_subchannel_probe (struct subchannel *sch)
1132 */ 1154 */
1133 dev_id.devno = sch->schib.pmcw.dev; 1155 dev_id.devno = sch->schib.pmcw.dev;
1134 dev_id.ssid = sch->schid.ssid; 1156 dev_id.ssid = sch->schid.ssid;
1157 /* Allocate I/O subchannel private data. */
1158 sch->private = kzalloc(sizeof(struct io_subchannel_private),
1159 GFP_KERNEL | GFP_DMA);
1160 if (!sch->private)
1161 return -ENOMEM;
1135 cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); 1162 cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
1136 if (!cdev) 1163 if (!cdev)
1137 cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), 1164 cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
@@ -1149,16 +1176,18 @@ io_subchannel_probe (struct subchannel *sch)
1149 return 0; 1176 return 0;
1150 } 1177 }
1151 cdev = io_subchannel_create_ccwdev(sch); 1178 cdev = io_subchannel_create_ccwdev(sch);
1152 if (IS_ERR(cdev)) 1179 if (IS_ERR(cdev)) {
1180 kfree(sch->private);
1153 return PTR_ERR(cdev); 1181 return PTR_ERR(cdev);
1154 1182 }
1155 rc = io_subchannel_recog(cdev, sch); 1183 rc = io_subchannel_recog(cdev, sch);
1156 if (rc) { 1184 if (rc) {
1157 spin_lock_irqsave(sch->lock, flags); 1185 spin_lock_irqsave(sch->lock, flags);
1158 sch->dev.driver_data = NULL; 1186 sch_set_cdev(sch, NULL);
1159 spin_unlock_irqrestore(sch->lock, flags); 1187 spin_unlock_irqrestore(sch->lock, flags);
1160 if (cdev->dev.release) 1188 if (cdev->dev.release)
1161 cdev->dev.release(&cdev->dev); 1189 cdev->dev.release(&cdev->dev);
1190 kfree(sch->private);
1162 } 1191 }
1163 1192
1164 return rc; 1193 return rc;
@@ -1170,25 +1199,25 @@ io_subchannel_remove (struct subchannel *sch)
1170 struct ccw_device *cdev; 1199 struct ccw_device *cdev;
1171 unsigned long flags; 1200 unsigned long flags;
1172 1201
1173 if (!sch->dev.driver_data) 1202 cdev = sch_get_cdev(sch);
1203 if (!cdev)
1174 return 0; 1204 return 0;
1175 cdev = sch->dev.driver_data;
1176 /* Set ccw device to not operational and drop reference. */ 1205 /* Set ccw device to not operational and drop reference. */
1177 spin_lock_irqsave(cdev->ccwlock, flags); 1206 spin_lock_irqsave(cdev->ccwlock, flags);
1178 sch->dev.driver_data = NULL; 1207 sch_set_cdev(sch, NULL);
1179 cdev->private->state = DEV_STATE_NOT_OPER; 1208 cdev->private->state = DEV_STATE_NOT_OPER;
1180 spin_unlock_irqrestore(cdev->ccwlock, flags); 1209 spin_unlock_irqrestore(cdev->ccwlock, flags);
1181 ccw_device_unregister(cdev); 1210 ccw_device_unregister(cdev);
1182 put_device(&cdev->dev); 1211 put_device(&cdev->dev);
1212 kfree(sch->private);
1183 return 0; 1213 return 0;
1184} 1214}
1185 1215
1186static int 1216static int io_subchannel_notify(struct subchannel *sch, int event)
1187io_subchannel_notify(struct device *dev, int event)
1188{ 1217{
1189 struct ccw_device *cdev; 1218 struct ccw_device *cdev;
1190 1219
1191 cdev = dev->driver_data; 1220 cdev = sch_get_cdev(sch);
1192 if (!cdev) 1221 if (!cdev)
1193 return 0; 1222 return 0;
1194 if (!cdev->drv) 1223 if (!cdev->drv)
@@ -1198,22 +1227,20 @@ io_subchannel_notify(struct device *dev, int event)
1198 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; 1227 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
1199} 1228}
1200 1229
1201static void 1230static void io_subchannel_verify(struct subchannel *sch)
1202io_subchannel_verify(struct device *dev)
1203{ 1231{
1204 struct ccw_device *cdev; 1232 struct ccw_device *cdev;
1205 1233
1206 cdev = dev->driver_data; 1234 cdev = sch_get_cdev(sch);
1207 if (cdev) 1235 if (cdev)
1208 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1236 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1209} 1237}
1210 1238
1211static void 1239static void io_subchannel_ioterm(struct subchannel *sch)
1212io_subchannel_ioterm(struct device *dev)
1213{ 1240{
1214 struct ccw_device *cdev; 1241 struct ccw_device *cdev;
1215 1242
1216 cdev = dev->driver_data; 1243 cdev = sch_get_cdev(sch);
1217 if (!cdev) 1244 if (!cdev)
1218 return; 1245 return;
1219 /* Internal I/O will be retried by the interrupt handler. */ 1246 /* Internal I/O will be retried by the interrupt handler. */
@@ -1231,7 +1258,7 @@ io_subchannel_shutdown(struct subchannel *sch)
1231 struct ccw_device *cdev; 1258 struct ccw_device *cdev;
1232 int ret; 1259 int ret;
1233 1260
1234 cdev = sch->dev.driver_data; 1261 cdev = sch_get_cdev(sch);
1235 1262
1236 if (cio_is_console(sch->schid)) 1263 if (cio_is_console(sch->schid))
1237 return; 1264 return;
@@ -1271,6 +1298,9 @@ ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch)
1271{ 1298{
1272 int rc; 1299 int rc;
1273 1300
1301 /* Attach subchannel private data. */
1302 sch->private = cio_get_console_priv();
1303 memset(sch->private, 0, sizeof(struct io_subchannel_private));
1274 /* Initialize the ccw_device structure. */ 1304 /* Initialize the ccw_device structure. */
1275 cdev->dev.parent= &sch->dev; 1305 cdev->dev.parent= &sch->dev;
1276 rc = io_subchannel_recog(cdev, sch); 1306 rc = io_subchannel_recog(cdev, sch);
@@ -1456,6 +1486,7 @@ int ccw_driver_register(struct ccw_driver *cdriver)
1456 1486
1457 drv->bus = &ccw_bus_type; 1487 drv->bus = &ccw_bus_type;
1458 drv->name = cdriver->name; 1488 drv->name = cdriver->name;
1489 drv->owner = cdriver->owner;
1459 1490
1460 return driver_register(drv); 1491 return driver_register(drv);
1461} 1492}
@@ -1481,6 +1512,60 @@ ccw_device_get_subchannel_id(struct ccw_device *cdev)
1481 return sch->schid; 1512 return sch->schid;
1482} 1513}
1483 1514
1515static int recovery_check(struct device *dev, void *data)
1516{
1517 struct ccw_device *cdev = to_ccwdev(dev);
1518 int *redo = data;
1519
1520 spin_lock_irq(cdev->ccwlock);
1521 switch (cdev->private->state) {
1522 case DEV_STATE_DISCONNECTED:
1523 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1524 cdev->private->dev_id.ssid,
1525 cdev->private->dev_id.devno);
1526 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1527 *redo = 1;
1528 break;
1529 case DEV_STATE_DISCONNECTED_SENSE_ID:
1530 *redo = 1;
1531 break;
1532 }
1533 spin_unlock_irq(cdev->ccwlock);
1534
1535 return 0;
1536}
1537
1538static void recovery_func(unsigned long data)
1539{
1540 int redo = 0;
1541
1542 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1543 if (redo) {
1544 spin_lock_irq(&recovery_lock);
1545 if (!timer_pending(&recovery_timer)) {
1546 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1547 recovery_phase++;
1548 mod_timer(&recovery_timer, jiffies +
1549 recovery_delay[recovery_phase] * HZ);
1550 }
1551 spin_unlock_irq(&recovery_lock);
1552 } else
1553 CIO_MSG_EVENT(2, "recovery: end\n");
1554}
1555
1556void ccw_device_schedule_recovery(void)
1557{
1558 unsigned long flags;
1559
1560 CIO_MSG_EVENT(2, "recovery: schedule\n");
1561 spin_lock_irqsave(&recovery_lock, flags);
1562 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1563 recovery_phase = 0;
1564 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1565 }
1566 spin_unlock_irqrestore(&recovery_lock, flags);
1567}
1568
1484MODULE_LICENSE("GPL"); 1569MODULE_LICENSE("GPL");
1485EXPORT_SYMBOL(ccw_device_set_online); 1570EXPORT_SYMBOL(ccw_device_set_online);
1486EXPORT_SYMBOL(ccw_device_set_offline); 1571EXPORT_SYMBOL(ccw_device_set_offline);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 0d4089600439..d40a2ffaa000 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -5,6 +5,8 @@
5#include <asm/atomic.h> 5#include <asm/atomic.h>
6#include <linux/wait.h> 6#include <linux/wait.h>
7 7
8#include "io_sch.h"
9
8/* 10/*
9 * states of the device statemachine 11 * states of the device statemachine
10 */ 12 */
@@ -74,7 +76,6 @@ extern struct workqueue_struct *ccw_device_notify_work;
74extern wait_queue_head_t ccw_device_init_wq; 76extern wait_queue_head_t ccw_device_init_wq;
75extern atomic_t ccw_device_init_count; 77extern atomic_t ccw_device_init_count;
76 78
77void io_subchannel_irq (struct device *pdev);
78void io_subchannel_recog_done(struct ccw_device *cdev); 79void io_subchannel_recog_done(struct ccw_device *cdev);
79 80
80int ccw_device_cancel_halt_clear(struct ccw_device *); 81int ccw_device_cancel_halt_clear(struct ccw_device *);
@@ -87,6 +88,8 @@ int ccw_device_recognition(struct ccw_device *);
87int ccw_device_online(struct ccw_device *); 88int ccw_device_online(struct ccw_device *);
88int ccw_device_offline(struct ccw_device *); 89int ccw_device_offline(struct ccw_device *);
89 90
91void ccw_device_schedule_recovery(void);
92
90/* Function prototypes for device status and basic sense stuff. */ 93/* Function prototypes for device status and basic sense stuff. */
91void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); 94void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
92void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *); 95void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index bfad421cda66..4b92c84fb438 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -25,14 +25,16 @@
25#include "ioasm.h" 25#include "ioasm.h"
26#include "chp.h" 26#include "chp.h"
27 27
28static int timeout_log_enabled;
29
28int 30int
29device_is_online(struct subchannel *sch) 31device_is_online(struct subchannel *sch)
30{ 32{
31 struct ccw_device *cdev; 33 struct ccw_device *cdev;
32 34
33 if (!sch->dev.driver_data) 35 cdev = sch_get_cdev(sch);
36 if (!cdev)
34 return 0; 37 return 0;
35 cdev = sch->dev.driver_data;
36 return (cdev->private->state == DEV_STATE_ONLINE); 38 return (cdev->private->state == DEV_STATE_ONLINE);
37} 39}
38 40
@@ -41,9 +43,9 @@ device_is_disconnected(struct subchannel *sch)
41{ 43{
42 struct ccw_device *cdev; 44 struct ccw_device *cdev;
43 45
44 if (!sch->dev.driver_data) 46 cdev = sch_get_cdev(sch);
47 if (!cdev)
45 return 0; 48 return 0;
46 cdev = sch->dev.driver_data;
47 return (cdev->private->state == DEV_STATE_DISCONNECTED || 49 return (cdev->private->state == DEV_STATE_DISCONNECTED ||
48 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); 50 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
49} 51}
@@ -53,19 +55,21 @@ device_set_disconnected(struct subchannel *sch)
53{ 55{
54 struct ccw_device *cdev; 56 struct ccw_device *cdev;
55 57
56 if (!sch->dev.driver_data) 58 cdev = sch_get_cdev(sch);
59 if (!cdev)
57 return; 60 return;
58 cdev = sch->dev.driver_data;
59 ccw_device_set_timeout(cdev, 0); 61 ccw_device_set_timeout(cdev, 0);
60 cdev->private->flags.fake_irb = 0; 62 cdev->private->flags.fake_irb = 0;
61 cdev->private->state = DEV_STATE_DISCONNECTED; 63 cdev->private->state = DEV_STATE_DISCONNECTED;
64 if (cdev->online)
65 ccw_device_schedule_recovery();
62} 66}
63 67
64void device_set_intretry(struct subchannel *sch) 68void device_set_intretry(struct subchannel *sch)
65{ 69{
66 struct ccw_device *cdev; 70 struct ccw_device *cdev;
67 71
68 cdev = sch->dev.driver_data; 72 cdev = sch_get_cdev(sch);
69 if (!cdev) 73 if (!cdev)
70 return; 74 return;
71 cdev->private->flags.intretry = 1; 75 cdev->private->flags.intretry = 1;
@@ -75,13 +79,62 @@ int device_trigger_verify(struct subchannel *sch)
75{ 79{
76 struct ccw_device *cdev; 80 struct ccw_device *cdev;
77 81
78 cdev = sch->dev.driver_data; 82 cdev = sch_get_cdev(sch);
79 if (!cdev || !cdev->online) 83 if (!cdev || !cdev->online)
80 return -EINVAL; 84 return -EINVAL;
81 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 85 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
82 return 0; 86 return 0;
83} 87}
84 88
89static int __init ccw_timeout_log_setup(char *unused)
90{
91 timeout_log_enabled = 1;
92 return 1;
93}
94
95__setup("ccw_timeout_log", ccw_timeout_log_setup);
96
97static void ccw_timeout_log(struct ccw_device *cdev)
98{
99 struct schib schib;
100 struct subchannel *sch;
101 struct io_subchannel_private *private;
102 int cc;
103
104 sch = to_subchannel(cdev->dev.parent);
105 private = to_io_private(sch);
106 cc = stsch(sch->schid, &schib);
107
108 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
109 "device information:\n", get_clock());
110 printk(KERN_WARNING "cio: orb:\n");
111 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
112 &private->orb, sizeof(private->orb), 0);
113 printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id);
114 printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id);
115 printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
116 "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
117
118 if ((void *)(addr_t)private->orb.cpa == &private->sense_ccw ||
119 (void *)(addr_t)private->orb.cpa == cdev->private->iccws)
120 printk(KERN_WARNING "cio: last channel program (intern):\n");
121 else
122 printk(KERN_WARNING "cio: last channel program:\n");
123
124 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
125 (void *)(addr_t)private->orb.cpa,
126 sizeof(struct ccw1), 0);
127 printk(KERN_WARNING "cio: ccw device state: %d\n",
128 cdev->private->state);
129 printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
130 printk(KERN_WARNING "cio: schib:\n");
131 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
132 &schib, sizeof(schib), 0);
133 printk(KERN_WARNING "cio: ccw device flags:\n");
134 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
135 &cdev->private->flags, sizeof(cdev->private->flags), 0);
136}
137
85/* 138/*
86 * Timeout function. It just triggers a DEV_EVENT_TIMEOUT. 139 * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
87 */ 140 */
@@ -92,6 +145,8 @@ ccw_device_timeout(unsigned long data)
92 145
93 cdev = (struct ccw_device *) data; 146 cdev = (struct ccw_device *) data;
94 spin_lock_irq(cdev->ccwlock); 147 spin_lock_irq(cdev->ccwlock);
148 if (timeout_log_enabled)
149 ccw_timeout_log(cdev);
95 dev_fsm_event(cdev, DEV_EVENT_TIMEOUT); 150 dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
96 spin_unlock_irq(cdev->ccwlock); 151 spin_unlock_irq(cdev->ccwlock);
97} 152}
@@ -122,9 +177,9 @@ device_kill_pending_timer(struct subchannel *sch)
122{ 177{
123 struct ccw_device *cdev; 178 struct ccw_device *cdev;
124 179
125 if (!sch->dev.driver_data) 180 cdev = sch_get_cdev(sch);
181 if (!cdev)
126 return; 182 return;
127 cdev = sch->dev.driver_data;
128 ccw_device_set_timeout(cdev, 0); 183 ccw_device_set_timeout(cdev, 0);
129} 184}
130 185
@@ -268,7 +323,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
268 switch (state) { 323 switch (state) {
269 case DEV_STATE_NOT_OPER: 324 case DEV_STATE_NOT_OPER:
270 CIO_DEBUG(KERN_WARNING, 2, 325 CIO_DEBUG(KERN_WARNING, 2,
271 "cio: SenseID : unknown device %04x on subchannel " 326 "SenseID : unknown device %04x on subchannel "
272 "0.%x.%04x\n", cdev->private->dev_id.devno, 327 "0.%x.%04x\n", cdev->private->dev_id.devno,
273 sch->schid.ssid, sch->schid.sch_no); 328 sch->schid.ssid, sch->schid.sch_no);
274 break; 329 break;
@@ -294,7 +349,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
294 } 349 }
295 /* Issue device info message. */ 350 /* Issue device info message. */
296 CIO_DEBUG(KERN_INFO, 2, 351 CIO_DEBUG(KERN_INFO, 2,
297 "cio: SenseID : device 0.%x.%04x reports: " 352 "SenseID : device 0.%x.%04x reports: "
298 "CU Type/Mod = %04X/%02X, Dev Type/Mod = " 353 "CU Type/Mod = %04X/%02X, Dev Type/Mod = "
299 "%04X/%02X\n", 354 "%04X/%02X\n",
300 cdev->private->dev_id.ssid, 355 cdev->private->dev_id.ssid,
@@ -304,7 +359,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
304 break; 359 break;
305 case DEV_STATE_BOXED: 360 case DEV_STATE_BOXED:
306 CIO_DEBUG(KERN_WARNING, 2, 361 CIO_DEBUG(KERN_WARNING, 2,
307 "cio: SenseID : boxed device %04x on subchannel " 362 "SenseID : boxed device %04x on subchannel "
308 "0.%x.%04x\n", cdev->private->dev_id.devno, 363 "0.%x.%04x\n", cdev->private->dev_id.devno,
309 sch->schid.ssid, sch->schid.sch_no); 364 sch->schid.ssid, sch->schid.sch_no);
310 break; 365 break;
@@ -349,7 +404,7 @@ ccw_device_oper_notify(struct work_struct *work)
349 sch = to_subchannel(cdev->dev.parent); 404 sch = to_subchannel(cdev->dev.parent);
350 if (sch->driver && sch->driver->notify) { 405 if (sch->driver && sch->driver->notify) {
351 spin_unlock_irqrestore(cdev->ccwlock, flags); 406 spin_unlock_irqrestore(cdev->ccwlock, flags);
352 ret = sch->driver->notify(&sch->dev, CIO_OPER); 407 ret = sch->driver->notify(sch, CIO_OPER);
353 spin_lock_irqsave(cdev->ccwlock, flags); 408 spin_lock_irqsave(cdev->ccwlock, flags);
354 } else 409 } else
355 ret = 0; 410 ret = 0;
@@ -389,7 +444,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
389 444
390 if (state == DEV_STATE_BOXED) 445 if (state == DEV_STATE_BOXED)
391 CIO_DEBUG(KERN_WARNING, 2, 446 CIO_DEBUG(KERN_WARNING, 2,
392 "cio: Boxed device %04x on subchannel %04x\n", 447 "Boxed device %04x on subchannel %04x\n",
393 cdev->private->dev_id.devno, sch->schid.sch_no); 448 cdev->private->dev_id.devno, sch->schid.sch_no);
394 449
395 if (cdev->private->flags.donotify) { 450 if (cdev->private->flags.donotify) {
@@ -500,7 +555,8 @@ ccw_device_recognition(struct ccw_device *cdev)
500 (cdev->private->state != DEV_STATE_BOXED)) 555 (cdev->private->state != DEV_STATE_BOXED))
501 return -EINVAL; 556 return -EINVAL;
502 sch = to_subchannel(cdev->dev.parent); 557 sch = to_subchannel(cdev->dev.parent);
503 ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc); 558 ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc,
559 (u32)(addr_t)sch);
504 if (ret != 0) 560 if (ret != 0)
505 /* Couldn't enable the subchannel for i/o. Sick device. */ 561 /* Couldn't enable the subchannel for i/o. Sick device. */
506 return ret; 562 return ret;
@@ -587,9 +643,10 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
587 default: 643 default:
588 /* Reset oper notify indication after verify error. */ 644 /* Reset oper notify indication after verify error. */
589 cdev->private->flags.donotify = 0; 645 cdev->private->flags.donotify = 0;
590 if (cdev->online) 646 if (cdev->online) {
647 ccw_device_set_timeout(cdev, 0);
591 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 648 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
592 else 649 } else
593 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 650 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
594 break; 651 break;
595 } 652 }
@@ -610,7 +667,8 @@ ccw_device_online(struct ccw_device *cdev)
610 sch = to_subchannel(cdev->dev.parent); 667 sch = to_subchannel(cdev->dev.parent);
611 if (css_init_done && !get_device(&cdev->dev)) 668 if (css_init_done && !get_device(&cdev->dev))
612 return -ENODEV; 669 return -ENODEV;
613 ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc); 670 ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc,
671 (u32)(addr_t)sch);
614 if (ret != 0) { 672 if (ret != 0) {
615 /* Couldn't enable the subchannel for i/o. Sick device. */ 673 /* Couldn't enable the subchannel for i/o. Sick device. */
616 if (ret == -ENODEV) 674 if (ret == -ENODEV)
@@ -937,7 +995,7 @@ void device_kill_io(struct subchannel *sch)
937 int ret; 995 int ret;
938 struct ccw_device *cdev; 996 struct ccw_device *cdev;
939 997
940 cdev = sch->dev.driver_data; 998 cdev = sch_get_cdev(sch);
941 ret = ccw_device_cancel_halt_clear(cdev); 999 ret = ccw_device_cancel_halt_clear(cdev);
942 if (ret == -EBUSY) { 1000 if (ret == -EBUSY) {
943 ccw_device_set_timeout(cdev, 3*HZ); 1001 ccw_device_set_timeout(cdev, 3*HZ);
@@ -990,7 +1048,8 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
990 struct subchannel *sch; 1048 struct subchannel *sch;
991 1049
992 sch = to_subchannel(cdev->dev.parent); 1050 sch = to_subchannel(cdev->dev.parent);
993 if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0) 1051 if (cio_enable_subchannel(sch, sch->schib.pmcw.isc,
1052 (u32)(addr_t)sch) != 0)
994 /* Couldn't enable the subchannel for i/o. Sick device. */ 1053 /* Couldn't enable the subchannel for i/o. Sick device. */
995 return; 1054 return;
996 1055
@@ -1006,9 +1065,9 @@ device_trigger_reprobe(struct subchannel *sch)
1006{ 1065{
1007 struct ccw_device *cdev; 1066 struct ccw_device *cdev;
1008 1067
1009 if (!sch->dev.driver_data) 1068 cdev = sch_get_cdev(sch);
1069 if (!cdev)
1010 return; 1070 return;
1011 cdev = sch->dev.driver_data;
1012 if (cdev->private->state != DEV_STATE_DISCONNECTED) 1071 if (cdev->private->state != DEV_STATE_DISCONNECTED)
1013 return; 1072 return;
1014 1073
@@ -1028,7 +1087,7 @@ device_trigger_reprobe(struct subchannel *sch)
1028 sch->schib.pmcw.ena = 0; 1087 sch->schib.pmcw.ena = 0;
1029 if ((sch->lpm & (sch->lpm - 1)) != 0) 1088 if ((sch->lpm & (sch->lpm - 1)) != 0)
1030 sch->schib.pmcw.mp = 1; 1089 sch->schib.pmcw.mp = 1;
1031 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; 1090 sch->schib.pmcw.intparm = (u32)(addr_t)sch;
1032 /* We should also udate ssd info, but this has to wait. */ 1091 /* We should also udate ssd info, but this has to wait. */
1033 /* Check if this is another device which appeared on the same sch. */ 1092 /* Check if this is another device which appeared on the same sch. */
1034 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 1093 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
@@ -1223,21 +1282,4 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1223 }, 1282 },
1224}; 1283};
1225 1284
1226/*
1227 * io_subchannel_irq is called for "real" interrupts or for status
1228 * pending conditions on msch.
1229 */
1230void
1231io_subchannel_irq (struct device *pdev)
1232{
1233 struct ccw_device *cdev;
1234
1235 cdev = to_subchannel(pdev)->dev.driver_data;
1236
1237 CIO_TRACE_EVENT (3, "IRQ");
1238 CIO_TRACE_EVENT (3, pdev->bus_id);
1239 if (cdev)
1240 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1241}
1242
1243EXPORT_SYMBOL_GPL(ccw_device_set_timeout); 1285EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 156f3f9786b5..918b8b89cf9a 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -24,6 +24,7 @@
24#include "css.h" 24#include "css.h"
25#include "device.h" 25#include "device.h"
26#include "ioasm.h" 26#include "ioasm.h"
27#include "io_sch.h"
27 28
28/* 29/*
29 * Input : 30 * Input :
@@ -219,11 +220,13 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
219 return -EAGAIN; 220 return -EAGAIN;
220 } 221 }
221 if (irb->scsw.cc == 3) { 222 if (irb->scsw.cc == 3) {
222 if ((sch->orb.lpm & 223 u8 lpm;
223 sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) 224
225 lpm = to_io_private(sch)->orb.lpm;
226 if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
224 CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x " 227 CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x "
225 "on subchannel 0.%x.%04x is " 228 "on subchannel 0.%x.%04x is "
226 "'not operational'\n", sch->orb.lpm, 229 "'not operational'\n", lpm,
227 cdev->private->dev_id.devno, 230 cdev->private->dev_id.devno,
228 sch->schid.ssid, sch->schid.sch_no); 231 sch->schid.ssid, sch->schid.sch_no);
229 return -EACCES; 232 return -EACCES;
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 7fd2dadc3297..49b58eb0fab8 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -501,7 +501,7 @@ ccw_device_stlck(struct ccw_device *cdev)
501 return -ENOMEM; 501 return -ENOMEM;
502 } 502 }
503 spin_lock_irqsave(sch->lock, flags); 503 spin_lock_irqsave(sch->lock, flags);
504 ret = cio_enable_subchannel(sch, 3); 504 ret = cio_enable_subchannel(sch, 3, (u32)(addr_t)sch);
505 if (ret) 505 if (ret)
506 goto out_unlock; 506 goto out_unlock;
507 /* 507 /*
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index cb1879a96818..c52449a1f9fc 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -22,6 +22,7 @@
22#include "css.h" 22#include "css.h"
23#include "device.h" 23#include "device.h"
24#include "ioasm.h" 24#include "ioasm.h"
25#include "io_sch.h"
25 26
26/* 27/*
27 * Helper function called from interrupt context to decide whether an 28 * Helper function called from interrupt context to decide whether an
@@ -155,10 +156,13 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
155 return -EAGAIN; 156 return -EAGAIN;
156 } 157 }
157 if (irb->scsw.cc == 3) { 158 if (irb->scsw.cc == 3) {
159 u8 lpm;
160
161 lpm = to_io_private(sch)->orb.lpm;
158 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x," 162 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x,"
159 " lpm %02X, became 'not operational'\n", 163 " lpm %02X, became 'not operational'\n",
160 cdev->private->dev_id.devno, sch->schid.ssid, 164 cdev->private->dev_id.devno, sch->schid.ssid,
161 sch->schid.sch_no, sch->orb.lpm); 165 sch->schid.sch_no, lpm);
162 return -EACCES; 166 return -EACCES;
163 } 167 }
164 i = 8 - ffs(cdev->private->imask); 168 i = 8 - ffs(cdev->private->imask);
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index aa96e6752592..ebe0848cfe33 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -20,6 +20,7 @@
20#include "css.h" 20#include "css.h"
21#include "device.h" 21#include "device.h"
22#include "ioasm.h" 22#include "ioasm.h"
23#include "io_sch.h"
23 24
24/* 25/*
25 * Check for any kind of channel or interface control check but don't 26 * Check for any kind of channel or interface control check but don't
@@ -310,6 +311,7 @@ int
310ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb) 311ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
311{ 312{
312 struct subchannel *sch; 313 struct subchannel *sch;
314 struct ccw1 *sense_ccw;
313 315
314 sch = to_subchannel(cdev->dev.parent); 316 sch = to_subchannel(cdev->dev.parent);
315 317
@@ -326,15 +328,16 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
326 /* 328 /*
327 * We have ending status but no sense information. Do a basic sense. 329 * We have ending status but no sense information. Do a basic sense.
328 */ 330 */
329 sch->sense_ccw.cmd_code = CCW_CMD_BASIC_SENSE; 331 sense_ccw = &to_io_private(sch)->sense_ccw;
330 sch->sense_ccw.cda = (__u32) __pa(cdev->private->irb.ecw); 332 sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
331 sch->sense_ccw.count = SENSE_MAX_COUNT; 333 sense_ccw->cda = (__u32) __pa(cdev->private->irb.ecw);
332 sch->sense_ccw.flags = CCW_FLAG_SLI; 334 sense_ccw->count = SENSE_MAX_COUNT;
335 sense_ccw->flags = CCW_FLAG_SLI;
333 336
334 /* Reset internal retry indication. */ 337 /* Reset internal retry indication. */
335 cdev->private->flags.intretry = 0; 338 cdev->private->flags.intretry = 0;
336 339
337 return cio_start (sch, &sch->sense_ccw, 0xff); 340 return cio_start(sch, sense_ccw, 0xff);
338} 341}
339 342
340/* 343/*
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
new file mode 100644
index 000000000000..8c613160bfce
--- /dev/null
+++ b/drivers/s390/cio/io_sch.h
@@ -0,0 +1,163 @@
1#ifndef S390_IO_SCH_H
2#define S390_IO_SCH_H
3
4#include "schid.h"
5
6/*
7 * operation request block
8 */
9struct orb {
10 u32 intparm; /* interruption parameter */
11 u32 key : 4; /* flags, like key, suspend control, etc. */
12 u32 spnd : 1; /* suspend control */
13 u32 res1 : 1; /* reserved */
14 u32 mod : 1; /* modification control */
15 u32 sync : 1; /* synchronize control */
16 u32 fmt : 1; /* format control */
17 u32 pfch : 1; /* prefetch control */
18 u32 isic : 1; /* initial-status-interruption control */
19 u32 alcc : 1; /* address-limit-checking control */
20 u32 ssic : 1; /* suppress-suspended-interr. control */
21 u32 res2 : 1; /* reserved */
22 u32 c64 : 1; /* IDAW/QDIO 64 bit control */
23 u32 i2k : 1; /* IDAW 2/4kB block size control */
24 u32 lpm : 8; /* logical path mask */
25 u32 ils : 1; /* incorrect length */
26 u32 zero : 6; /* reserved zeros */
27 u32 orbx : 1; /* ORB extension control */
28 u32 cpa; /* channel program address */
29} __attribute__ ((packed, aligned(4)));
30
31struct io_subchannel_private {
32 struct orb orb; /* operation request block */
33 struct ccw1 sense_ccw; /* static ccw for sense command */
34} __attribute__ ((aligned(8)));
35
36#define to_io_private(n) ((struct io_subchannel_private *)n->private)
37#define sch_get_cdev(n) (dev_get_drvdata(&n->dev))
38#define sch_set_cdev(n, c) (dev_set_drvdata(&n->dev, c))
39
40#define MAX_CIWS 8
41
42/*
43 * sense-id response buffer layout
44 */
45struct senseid {
46 /* common part */
47 u8 reserved; /* always 0x'FF' */
48 u16 cu_type; /* control unit type */
49 u8 cu_model; /* control unit model */
50 u16 dev_type; /* device type */
51 u8 dev_model; /* device model */
52 u8 unused; /* padding byte */
53 /* extended part */
54 struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */
55} __attribute__ ((packed, aligned(4)));
56
57struct ccw_device_private {
58 struct ccw_device *cdev;
59 struct subchannel *sch;
60 int state; /* device state */
61 atomic_t onoff;
62 unsigned long registered;
63 struct ccw_dev_id dev_id; /* device id */
64 struct subchannel_id schid; /* subchannel number */
65 u8 imask; /* lpm mask for SNID/SID/SPGID */
66 int iretry; /* retry counter SNID/SID/SPGID */
67 struct {
68 unsigned int fast:1; /* post with "channel end" */
69 unsigned int repall:1; /* report every interrupt status */
70 unsigned int pgroup:1; /* do path grouping */
71 unsigned int force:1; /* allow forced online */
72 } __attribute__ ((packed)) options;
73 struct {
74 unsigned int pgid_single:1; /* use single path for Set PGID */
75 unsigned int esid:1; /* Ext. SenseID supported by HW */
76 unsigned int dosense:1; /* delayed SENSE required */
77 unsigned int doverify:1; /* delayed path verification */
78 unsigned int donotify:1; /* call notify function */
79 unsigned int recog_done:1; /* dev. recog. complete */
80 unsigned int fake_irb:1; /* deliver faked irb */
81 unsigned int intretry:1; /* retry internal operation */
82 } __attribute__((packed)) flags;
83 unsigned long intparm; /* user interruption parameter */
84 struct qdio_irq *qdio_data;
85 struct irb irb; /* device status */
86 struct senseid senseid; /* SenseID info */
87 struct pgid pgid[8]; /* path group IDs per chpid*/
88 struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
89 struct work_struct kick_work;
90 wait_queue_head_t wait_q;
91 struct timer_list timer;
92 void *cmb; /* measurement information */
93 struct list_head cmb_list; /* list of measured devices */
94 u64 cmb_start_time; /* clock value of cmb reset */
95 void *cmb_wait; /* deferred cmb enable/disable */
96};
97
98static inline int ssch(struct subchannel_id schid, volatile struct orb *addr)
99{
100 register struct subchannel_id reg1 asm("1") = schid;
101 int ccode;
102
103 asm volatile(
104 " ssch 0(%2)\n"
105 " ipm %0\n"
106 " srl %0,28"
107 : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
108 return ccode;
109}
110
111static inline int rsch(struct subchannel_id schid)
112{
113 register struct subchannel_id reg1 asm("1") = schid;
114 int ccode;
115
116 asm volatile(
117 " rsch\n"
118 " ipm %0\n"
119 " srl %0,28"
120 : "=d" (ccode) : "d" (reg1) : "cc");
121 return ccode;
122}
123
124static inline int csch(struct subchannel_id schid)
125{
126 register struct subchannel_id reg1 asm("1") = schid;
127 int ccode;
128
129 asm volatile(
130 " csch\n"
131 " ipm %0\n"
132 " srl %0,28"
133 : "=d" (ccode) : "d" (reg1) : "cc");
134 return ccode;
135}
136
137static inline int hsch(struct subchannel_id schid)
138{
139 register struct subchannel_id reg1 asm("1") = schid;
140 int ccode;
141
142 asm volatile(
143 " hsch\n"
144 " ipm %0\n"
145 " srl %0,28"
146 : "=d" (ccode) : "d" (reg1) : "cc");
147 return ccode;
148}
149
150static inline int xsch(struct subchannel_id schid)
151{
152 register struct subchannel_id reg1 asm("1") = schid;
153 int ccode;
154
155 asm volatile(
156 " .insn rre,0xb2760000,%1,0\n"
157 " ipm %0\n"
158 " srl %0,28"
159 : "=d" (ccode) : "d" (reg1) : "cc");
160 return ccode;
161}
162
163#endif
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 7153dd959082..652ea3625f9d 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -109,72 +109,6 @@ static inline int tpi( volatile struct tpi_info *addr)
109 return ccode; 109 return ccode;
110} 110}
111 111
112static inline int ssch(struct subchannel_id schid,
113 volatile struct orb *addr)
114{
115 register struct subchannel_id reg1 asm ("1") = schid;
116 int ccode;
117
118 asm volatile(
119 " ssch 0(%2)\n"
120 " ipm %0\n"
121 " srl %0,28"
122 : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
123 return ccode;
124}
125
126static inline int rsch(struct subchannel_id schid)
127{
128 register struct subchannel_id reg1 asm ("1") = schid;
129 int ccode;
130
131 asm volatile(
132 " rsch\n"
133 " ipm %0\n"
134 " srl %0,28"
135 : "=d" (ccode) : "d" (reg1) : "cc");
136 return ccode;
137}
138
139static inline int csch(struct subchannel_id schid)
140{
141 register struct subchannel_id reg1 asm ("1") = schid;
142 int ccode;
143
144 asm volatile(
145 " csch\n"
146 " ipm %0\n"
147 " srl %0,28"
148 : "=d" (ccode) : "d" (reg1) : "cc");
149 return ccode;
150}
151
152static inline int hsch(struct subchannel_id schid)
153{
154 register struct subchannel_id reg1 asm ("1") = schid;
155 int ccode;
156
157 asm volatile(
158 " hsch\n"
159 " ipm %0\n"
160 " srl %0,28"
161 : "=d" (ccode) : "d" (reg1) : "cc");
162 return ccode;
163}
164
165static inline int xsch(struct subchannel_id schid)
166{
167 register struct subchannel_id reg1 asm ("1") = schid;
168 int ccode;
169
170 asm volatile(
171 " .insn rre,0xb2760000,%1,0\n"
172 " ipm %0\n"
173 " srl %0,28"
174 : "=d" (ccode) : "d" (reg1) : "cc");
175 return ccode;
176}
177
178static inline int chsc(void *chsc_area) 112static inline int chsc(void *chsc_area)
179{ 113{
180 typedef struct { char _[4096]; } addr_type; 114 typedef struct { char _[4096]; } addr_type;
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 40a3208c7cf3..e2a781b6b21d 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -48,11 +48,11 @@
48#include <asm/debug.h> 48#include <asm/debug.h>
49#include <asm/s390_rdev.h> 49#include <asm/s390_rdev.h>
50#include <asm/qdio.h> 50#include <asm/qdio.h>
51#include <asm/airq.h>
51 52
52#include "cio.h" 53#include "cio.h"
53#include "css.h" 54#include "css.h"
54#include "device.h" 55#include "device.h"
55#include "airq.h"
56#include "qdio.h" 56#include "qdio.h"
57#include "ioasm.h" 57#include "ioasm.h"
58#include "chsc.h" 58#include "chsc.h"
@@ -96,7 +96,7 @@ static debug_info_t *qdio_dbf_slsb_in;
96static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change 96static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change
97 during a while loop */ 97 during a while loop */
98static DEFINE_SPINLOCK(ttiq_list_lock); 98static DEFINE_SPINLOCK(ttiq_list_lock);
99static int register_thinint_result; 99static void *tiqdio_ind;
100static void tiqdio_tl(unsigned long); 100static void tiqdio_tl(unsigned long);
101static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0); 101static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0);
102 102
@@ -399,7 +399,7 @@ qdio_get_indicator(void)
399{ 399{
400 int i; 400 int i;
401 401
402 for (i=1;i<INDICATORS_PER_CACHELINE;i++) 402 for (i = 0; i < INDICATORS_PER_CACHELINE; i++)
403 if (!indicator_used[i]) { 403 if (!indicator_used[i]) {
404 indicator_used[i]=1; 404 indicator_used[i]=1;
405 return indicators+i; 405 return indicators+i;
@@ -1408,8 +1408,7 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1408 if (q->hydra_gives_outbound_pcis) { 1408 if (q->hydra_gives_outbound_pcis) {
1409 if (!q->siga_sync_done_on_thinints) { 1409 if (!q->siga_sync_done_on_thinints) {
1410 SYNC_MEMORY_ALL; 1410 SYNC_MEMORY_ALL;
1411 } else if ((!q->siga_sync_done_on_outb_tis)&& 1411 } else if (!q->siga_sync_done_on_outb_tis) {
1412 (q->hydra_gives_outbound_pcis)) {
1413 SYNC_MEMORY_ALL_OUTB; 1412 SYNC_MEMORY_ALL_OUTB;
1414 } 1413 }
1415 } else { 1414 } else {
@@ -1911,8 +1910,7 @@ qdio_fill_thresholds(struct qdio_irq *irq_ptr,
1911 } 1910 }
1912} 1911}
1913 1912
1914static int 1913static void tiqdio_thinint_handler(void *ind, void *drv_data)
1915tiqdio_thinint_handler(void)
1916{ 1914{
1917 QDIO_DBF_TEXT4(0,trace,"thin_int"); 1915 QDIO_DBF_TEXT4(0,trace,"thin_int");
1918 1916
@@ -1925,7 +1923,6 @@ tiqdio_thinint_handler(void)
1925 tiqdio_clear_global_summary(); 1923 tiqdio_clear_global_summary();
1926 1924
1927 tiqdio_inbound_checks(); 1925 tiqdio_inbound_checks();
1928 return 0;
1929} 1926}
1930 1927
1931static void 1928static void
@@ -2445,7 +2442,7 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2445 real_addr_dev_st_chg_ind=0; 2442 real_addr_dev_st_chg_ind=0;
2446 } else { 2443 } else {
2447 real_addr_local_summary_bit= 2444 real_addr_local_summary_bit=
2448 virt_to_phys((volatile void *)indicators); 2445 virt_to_phys((volatile void *)tiqdio_ind);
2449 real_addr_dev_st_chg_ind= 2446 real_addr_dev_st_chg_ind=
2450 virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind); 2447 virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind);
2451 } 2448 }
@@ -3740,23 +3737,25 @@ static void
3740tiqdio_register_thinints(void) 3737tiqdio_register_thinints(void)
3741{ 3738{
3742 char dbf_text[20]; 3739 char dbf_text[20];
3743 register_thinint_result= 3740
3744 s390_register_adapter_interrupt(&tiqdio_thinint_handler); 3741 tiqdio_ind =
3745 if (register_thinint_result) { 3742 s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL);
3746 sprintf(dbf_text,"regthn%x",(register_thinint_result&0xff)); 3743 if (IS_ERR(tiqdio_ind)) {
3744 sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind));
3747 QDIO_DBF_TEXT0(0,setup,dbf_text); 3745 QDIO_DBF_TEXT0(0,setup,dbf_text);
3748 QDIO_PRINT_ERR("failed to register adapter handler " \ 3746 QDIO_PRINT_ERR("failed to register adapter handler " \
3749 "(rc=%i).\nAdapter interrupts might " \ 3747 "(rc=%li).\nAdapter interrupts might " \
3750 "not work. Continuing.\n", 3748 "not work. Continuing.\n",
3751 register_thinint_result); 3749 PTR_ERR(tiqdio_ind));
3750 tiqdio_ind = NULL;
3752 } 3751 }
3753} 3752}
3754 3753
3755static void 3754static void
3756tiqdio_unregister_thinints(void) 3755tiqdio_unregister_thinints(void)
3757{ 3756{
3758 if (!register_thinint_result) 3757 if (tiqdio_ind)
3759 s390_unregister_adapter_interrupt(&tiqdio_thinint_handler); 3758 s390_unregister_adapter_interrupt(tiqdio_ind);
3760} 3759}
3761 3760
3762static int 3761static int
@@ -3768,8 +3767,8 @@ qdio_get_qdio_memory(void)
3768 for (i=1;i<INDICATORS_PER_CACHELINE;i++) 3767 for (i=1;i<INDICATORS_PER_CACHELINE;i++)
3769 indicator_used[i]=0; 3768 indicator_used[i]=0;
3770 indicators = kzalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE), 3769 indicators = kzalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE),
3771 GFP_KERNEL); 3770 GFP_KERNEL);
3772 if (!indicators) 3771 if (!indicators)
3773 return -ENOMEM; 3772 return -ENOMEM;
3774 return 0; 3773 return 0;
3775} 3774}
@@ -3780,7 +3779,6 @@ qdio_release_qdio_memory(void)
3780 kfree(indicators); 3779 kfree(indicators);
3781} 3780}
3782 3781
3783
3784static void 3782static void
3785qdio_unregister_dbf_views(void) 3783qdio_unregister_dbf_views(void)
3786{ 3784{
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 6d7aad18f6f0..37870e4e938e 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -57,7 +57,7 @@
57 of the queue to 0 */ 57 of the queue to 0 */
58 58
59#define QDIO_ESTABLISH_TIMEOUT (1*HZ) 59#define QDIO_ESTABLISH_TIMEOUT (1*HZ)
60#define QDIO_ACTIVATE_TIMEOUT ((5*HZ)>>10) 60#define QDIO_ACTIVATE_TIMEOUT (5*HZ)
61#define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ) 61#define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ)
62#define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ) 62#define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ)
63#define QDIO_FORCE_CHECK_TIMEOUT (10*HZ) 63#define QDIO_FORCE_CHECK_TIMEOUT (10*HZ)
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 3561982749e3..c3076217871e 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -2416,7 +2416,7 @@ init_ccw_bk(struct net_device *dev)
2416 privptr->p_buff_pages_perwrite); 2416 privptr->p_buff_pages_perwrite);
2417#endif 2417#endif
2418 if (p_buff==NULL) { 2418 if (p_buff==NULL) {
2419 printk(KERN_INFO "%s:%s __get_free_pages" 2419 printk(KERN_INFO "%s:%s __get_free_pages "
2420 "for writes buf failed : get is for %d pages\n", 2420 "for writes buf failed : get is for %d pages\n",
2421 dev->name, 2421 dev->name,
2422 __FUNCTION__, 2422 __FUNCTION__,
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 0fd663b23d76..7bfe8d707a34 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1115,7 +1115,7 @@ list_modified:
1115 rc = lcs_send_setipm(card, ipm); 1115 rc = lcs_send_setipm(card, ipm);
1116 spin_lock_irqsave(&card->ipm_lock, flags); 1116 spin_lock_irqsave(&card->ipm_lock, flags);
1117 if (rc) { 1117 if (rc) {
1118 PRINT_INFO("Adding multicast address failed." 1118 PRINT_INFO("Adding multicast address failed. "
1119 "Table possibly full!\n"); 1119 "Table possibly full!\n");
1120 /* store ipm in failed list -> will be added 1120 /* store ipm in failed list -> will be added
1121 * to ipm_list again, so a retry will be done 1121 * to ipm_list again, so a retry will be done
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index d6e93f15440e..f3d893cfe61d 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -198,8 +198,7 @@ struct iucv_connection {
198/** 198/**
199 * Linked list of all connection structs. 199 * Linked list of all connection structs.
200 */ 200 */
201static struct list_head iucv_connection_list = 201static LIST_HEAD(iucv_connection_list);
202 LIST_HEAD_INIT(iucv_connection_list);
203static DEFINE_RWLOCK(iucv_connection_rwlock); 202static DEFINE_RWLOCK(iucv_connection_rwlock);
204 203
205/** 204/**
diff --git a/drivers/s390/net/qeth_proc.c b/drivers/s390/net/qeth_proc.c
index f1ff165a5e05..46ecd03a597e 100644
--- a/drivers/s390/net/qeth_proc.c
+++ b/drivers/s390/net/qeth_proc.c
@@ -146,7 +146,7 @@ qeth_procfile_seq_show(struct seq_file *s, void *it)
146 return 0; 146 return 0;
147} 147}
148 148
149static struct seq_operations qeth_procfile_seq_ops = { 149static const struct seq_operations qeth_procfile_seq_ops = {
150 .start = qeth_procfile_seq_start, 150 .start = qeth_procfile_seq_start,
151 .stop = qeth_procfile_seq_stop, 151 .stop = qeth_procfile_seq_stop,
152 .next = qeth_procfile_seq_next, 152 .next = qeth_procfile_seq_next,
@@ -264,7 +264,7 @@ qeth_perf_procfile_seq_show(struct seq_file *s, void *it)
264 return 0; 264 return 0;
265} 265}
266 266
267static struct seq_operations qeth_perf_procfile_seq_ops = { 267static const struct seq_operations qeth_perf_procfile_seq_ops = {
268 .start = qeth_procfile_seq_start, 268 .start = qeth_procfile_seq_start,
269 .stop = qeth_procfile_seq_stop, 269 .stop = qeth_procfile_seq_stop,
270 .next = qeth_procfile_seq_next, 270 .next = qeth_procfile_seq_next,
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 47bb47b48581..8735a415a116 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -42,7 +42,7 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver");
42static struct iucv_path *smsg_path; 42static struct iucv_path *smsg_path;
43 43
44static DEFINE_SPINLOCK(smsg_list_lock); 44static DEFINE_SPINLOCK(smsg_list_lock);
45static struct list_head smsg_list = LIST_HEAD_INIT(smsg_list); 45static LIST_HEAD(smsg_list);
46 46
47static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]); 47static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
48static void smsg_message_pending(struct iucv_path *, struct iucv_message *); 48static void smsg_message_pending(struct iucv_path *, struct iucv_message *);
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 4f86c0e12961..2dc8110ebf74 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -1286,7 +1286,7 @@ zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
1286 * note: no lock in subsequent strategy routines 1286 * note: no lock in subsequent strategy routines
1287 * (this allows these routine to call schedule, e.g. 1287 * (this allows these routine to call schedule, e.g.
1288 * kmalloc with such flags or qdio_initialize & friends) 1288 * kmalloc with such flags or qdio_initialize & friends)
1289 * Note: in case of timeout, the seperate strategies will fail 1289 * Note: in case of timeout, the separate strategies will fail
1290 * anyhow. No need for a special action. Even worse, a nameserver 1290 * anyhow. No need for a special action. Even worse, a nameserver
1291 * failure would not wake up waiting ports without the call. 1291 * failure would not wake up waiting ports without the call.
1292 */ 1292 */
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index fe57941ab55d..e45f85f7c7ed 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -502,7 +502,7 @@ zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
502 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 502 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
503 break; 503 break;
504 case FSF_SQ_NO_RECOM: 504 case FSF_SQ_NO_RECOM:
505 ZFCP_LOG_NORMAL("bug: No recommendation could be given for a" 505 ZFCP_LOG_NORMAL("bug: No recommendation could be given for a "
506 "problem on the adapter %s " 506 "problem on the adapter %s "
507 "Stopping all operations on this adapter. ", 507 "Stopping all operations on this adapter. ",
508 zfcp_get_busid_by_adapter(fsf_req->adapter)); 508 zfcp_get_busid_by_adapter(fsf_req->adapter));
@@ -813,7 +813,7 @@ zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *fsf_req)
813 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 813 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
814 814
815 if (!port || (port->d_id != (status_buffer->d_id & ZFCP_DID_MASK))) { 815 if (!port || (port->d_id != (status_buffer->d_id & ZFCP_DID_MASK))) {
816 ZFCP_LOG_NORMAL("bug: Reopen port indication received for" 816 ZFCP_LOG_NORMAL("bug: Reopen port indication received for "
817 "nonexisting port with d_id 0x%06x on " 817 "nonexisting port with d_id 0x%06x on "
818 "adapter %s. Ignored.\n", 818 "adapter %s. Ignored.\n",
819 status_buffer->d_id & ZFCP_DID_MASK, 819 status_buffer->d_id & ZFCP_DID_MASK,
@@ -2281,7 +2281,7 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
2281 &lock_flags, &fsf_req); 2281 &lock_flags, &fsf_req);
2282 if (retval) { 2282 if (retval) {
2283 ZFCP_LOG_INFO("error: Out of resources. Could not create an " 2283 ZFCP_LOG_INFO("error: Out of resources. Could not create an "
2284 "exchange port data request for" 2284 "exchange port data request for "
2285 "the adapter %s.\n", 2285 "the adapter %s.\n",
2286 zfcp_get_busid_by_adapter(adapter)); 2286 zfcp_get_busid_by_adapter(adapter));
2287 write_unlock_irqrestore(&adapter->request_queue.queue_lock, 2287 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
@@ -2340,7 +2340,7 @@ zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
2340 0, NULL, &lock_flags, &fsf_req); 2340 0, NULL, &lock_flags, &fsf_req);
2341 if (retval) { 2341 if (retval) {
2342 ZFCP_LOG_INFO("error: Out of resources. Could not create an " 2342 ZFCP_LOG_INFO("error: Out of resources. Could not create an "
2343 "exchange port data request for" 2343 "exchange port data request for "
2344 "the adapter %s.\n", 2344 "the adapter %s.\n",
2345 zfcp_get_busid_by_adapter(adapter)); 2345 zfcp_get_busid_by_adapter(adapter));
2346 write_unlock_irqrestore(&adapter->request_queue.queue_lock, 2346 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
@@ -4725,7 +4725,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4725 /* allocate new FSF request */ 4725 /* allocate new FSF request */
4726 fsf_req = zfcp_fsf_req_alloc(pool, req_flags); 4726 fsf_req = zfcp_fsf_req_alloc(pool, req_flags);
4727 if (unlikely(NULL == fsf_req)) { 4727 if (unlikely(NULL == fsf_req)) {
4728 ZFCP_LOG_DEBUG("error: Could not put an FSF request into" 4728 ZFCP_LOG_DEBUG("error: Could not put an FSF request into "
4729 "the outbound (send) queue.\n"); 4729 "the outbound (send) queue.\n");
4730 ret = -ENOMEM; 4730 ret = -ENOMEM;
4731 goto failed_fsf_req; 4731 goto failed_fsf_req;
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 51d92b196ee7..22fdc17e0d0e 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -529,7 +529,7 @@ zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *fsf_req)
529 529
530 530
531/** 531/**
532 * zfcp_qdio_sbale_fill - set address and lenght in current SBALE 532 * zfcp_qdio_sbale_fill - set address and length in current SBALE
533 * on request_queue 533 * on request_queue
534 */ 534 */
535static void 535static void
diff --git a/include/asm-s390/airq.h b/include/asm-s390/airq.h
new file mode 100644
index 000000000000..41d028cb52a4
--- /dev/null
+++ b/include/asm-s390/airq.h
@@ -0,0 +1,19 @@
1/*
2 * include/asm-s390/airq.h
3 *
4 * Copyright IBM Corp. 2002,2007
5 * Author(s): Ingo Adlung <adlung@de.ibm.com>
6 * Cornelia Huck <cornelia.huck@de.ibm.com>
7 * Arnd Bergmann <arndb@de.ibm.com>
8 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
9 */
10
11#ifndef _ASM_S390_AIRQ_H
12#define _ASM_S390_AIRQ_H
13
14typedef void (*adapter_int_handler_t)(void *, void *);
15
16void *s390_register_adapter_interrupt(adapter_int_handler_t, void *);
17void s390_unregister_adapter_interrupt(void *);
18
19#endif /* _ASM_S390_AIRQ_H */
diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h
index 2f08c16e44ad..123b557c3ff4 100644
--- a/include/asm-s390/cio.h
+++ b/include/asm-s390/cio.h
@@ -24,8 +24,8 @@
24 * @fmt: format 24 * @fmt: format
25 * @pfch: prefetch 25 * @pfch: prefetch
26 * @isic: initial-status interruption control 26 * @isic: initial-status interruption control
27 * @alcc: adress-limit checking control 27 * @alcc: address-limit checking control
28 * @ssi: supress-suspended interruption 28 * @ssi: suppress-suspended interruption
29 * @zcc: zero condition code 29 * @zcc: zero condition code
30 * @ectl: extended control 30 * @ectl: extended control
31 * @pno: path not operational 31 * @pno: path not operational
diff --git a/include/asm-s390/dasd.h b/include/asm-s390/dasd.h
index 604f68fa6f56..3f002e13d024 100644
--- a/include/asm-s390/dasd.h
+++ b/include/asm-s390/dasd.h
@@ -105,7 +105,7 @@ typedef struct dasd_information_t {
105} dasd_information_t; 105} dasd_information_t;
106 106
107/* 107/*
108 * Read Subsystem Data - Perfomance Statistics 108 * Read Subsystem Data - Performance Statistics
109 */ 109 */
110typedef struct dasd_rssd_perf_stats_t { 110typedef struct dasd_rssd_perf_stats_t {
111 unsigned char invalid:1; 111 unsigned char invalid:1;
diff --git a/include/asm-s390/ipl.h b/include/asm-s390/ipl.h
index 2c40fd3a137f..c1b2e50392bb 100644
--- a/include/asm-s390/ipl.h
+++ b/include/asm-s390/ipl.h
@@ -83,6 +83,8 @@ extern u32 dump_prefix_page;
83extern unsigned int zfcpdump_prefix_array[]; 83extern unsigned int zfcpdump_prefix_array[];
84 84
85extern void do_reipl(void); 85extern void do_reipl(void);
86extern void do_halt(void);
87extern void do_poff(void);
86extern void ipl_save_parameters(void); 88extern void ipl_save_parameters(void);
87 89
88enum { 90enum {
@@ -118,7 +120,7 @@ struct ipl_info
118}; 120};
119 121
120extern struct ipl_info ipl_info; 122extern struct ipl_info ipl_info;
121extern void setup_ipl_info(void); 123extern void setup_ipl(void);
122 124
123/* 125/*
124 * DIAG 308 support 126 * DIAG 308 support
@@ -141,6 +143,10 @@ enum diag308_opt {
141 DIAG308_IPL_OPT_DUMP = 0x20, 143 DIAG308_IPL_OPT_DUMP = 0x20,
142}; 144};
143 145
146enum diag308_flags {
147 DIAG308_FLAGS_LP_VALID = 0x80,
148};
149
144enum diag308_rc { 150enum diag308_rc {
145 DIAG308_RC_OK = 1, 151 DIAG308_RC_OK = 1,
146}; 152};
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
index 05b842126b99..a77d4ba3c8eb 100644
--- a/include/asm-s390/mmu_context.h
+++ b/include/asm-s390/mmu_context.h
@@ -12,10 +12,15 @@
12#include <asm/pgalloc.h> 12#include <asm/pgalloc.h>
13#include <asm-generic/mm_hooks.h> 13#include <asm-generic/mm_hooks.h>
14 14
15/* 15static inline int init_new_context(struct task_struct *tsk,
16 * get a new mmu context.. S390 don't know about contexts. 16 struct mm_struct *mm)
17 */ 17{
18#define init_new_context(tsk,mm) 0 18 mm->context = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
19#ifdef CONFIG_64BIT
20 mm->context |= _ASCE_TYPE_REGION3;
21#endif
22 return 0;
23}
19 24
20#define destroy_context(mm) do { } while (0) 25#define destroy_context(mm) do { } while (0)
21 26
@@ -27,19 +32,11 @@
27 32
28static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) 33static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
29{ 34{
30 pgd_t *pgd = mm->pgd; 35 S390_lowcore.user_asce = mm->context | __pa(mm->pgd);
31 unsigned long asce_bits;
32
33 /* Calculate asce bits from the first pgd table entry. */
34 asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
35#ifdef CONFIG_64BIT
36 asce_bits |= _ASCE_TYPE_REGION3;
37#endif
38 S390_lowcore.user_asce = asce_bits | __pa(pgd);
39 if (switch_amode) { 36 if (switch_amode) {
40 /* Load primary space page table origin. */ 37 /* Load primary space page table origin. */
41 pgd_t *shadow_pgd = get_shadow_table(pgd) ? : pgd; 38 pgd_t *shadow_pgd = get_shadow_table(mm->pgd) ? : mm->pgd;
42 S390_lowcore.user_exec_asce = asce_bits | __pa(shadow_pgd); 39 S390_lowcore.user_exec_asce = mm->context | __pa(shadow_pgd);
43 asm volatile(LCTL_OPCODE" 1,1,%0\n" 40 asm volatile(LCTL_OPCODE" 1,1,%0\n"
44 : : "m" (S390_lowcore.user_exec_asce) ); 41 : : "m" (S390_lowcore.user_exec_asce) );
45 } else 42 } else
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 1f530f8a6280..79b9eab1a0c7 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -104,41 +104,27 @@ extern char empty_zero_page[PAGE_SIZE];
104 104
105#ifndef __ASSEMBLY__ 105#ifndef __ASSEMBLY__
106/* 106/*
107 * Just any arbitrary offset to the start of the vmalloc VM area: the 107 * The vmalloc area will always be on the topmost area of the kernel
108 * current 8MB value just means that there will be a 8MB "hole" after the 108 * mapping. We reserve 96MB (31bit) / 1GB (64bit) for vmalloc,
109 * physical memory until the kernel virtual memory starts. That means that 109 * which should be enough for any sane case.
110 * any out-of-bounds memory accesses will hopefully be caught. 110 * By putting vmalloc at the top, we maximise the gap between physical
111 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 111 * memory and vmalloc to catch misplaced memory accesses. As a side
112 * area for the same reason. ;) 112 * effect, this also makes sure that 64 bit module code cannot be used
113 * vmalloc area starts at 4GB to prevent syscall table entry exchanging 113 * as system call address.
114 * from modules.
115 */
116extern unsigned long vmalloc_end;
117
118#ifdef CONFIG_64BIT
119#define VMALLOC_ADDR (max(0x100000000UL, (unsigned long) high_memory))
120#else
121#define VMALLOC_ADDR ((unsigned long) high_memory)
122#endif
123#define VMALLOC_OFFSET (8*1024*1024)
124#define VMALLOC_START ((VMALLOC_ADDR + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
125#define VMALLOC_END vmalloc_end
126
127/*
128 * We need some free virtual space to be able to do vmalloc.
129 * VMALLOC_MIN_SIZE defines the minimum size of the vmalloc
130 * area. On a machine with 2GB memory we make sure that we
131 * have at least 128MB free space for vmalloc. On a machine
132 * with 4TB we make sure we have at least 128GB.
133 */ 114 */
134#ifndef __s390x__ 115#ifndef __s390x__
135#define VMALLOC_MIN_SIZE 0x8000000UL 116#define VMALLOC_START 0x78000000UL
136#define VMALLOC_END_INIT 0x80000000UL 117#define VMALLOC_END 0x7e000000UL
118#define VMEM_MAP_MAX 0x80000000UL
137#else /* __s390x__ */ 119#else /* __s390x__ */
138#define VMALLOC_MIN_SIZE 0x2000000000UL 120#define VMALLOC_START 0x3e000000000UL
139#define VMALLOC_END_INIT 0x40000000000UL 121#define VMALLOC_END 0x3e040000000UL
122#define VMEM_MAP_MAX 0x40000000000UL
140#endif /* __s390x__ */ 123#endif /* __s390x__ */
141 124
125#define VMEM_MAP ((struct page *) VMALLOC_END)
126#define VMEM_MAP_SIZE ((VMALLOC_START / PAGE_SIZE) * sizeof(struct page))
127
142/* 128/*
143 * A 31 bit pagetable entry of S390 has following format: 129 * A 31 bit pagetable entry of S390 has following format:
144 * | PFRA | | OS | 130 * | PFRA | | OS |
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 21d40a19355e..c86b982aef5a 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -59,9 +59,6 @@ extern void s390_adjust_jiffies(void);
59extern void print_cpu_info(struct cpuinfo_S390 *); 59extern void print_cpu_info(struct cpuinfo_S390 *);
60extern int get_cpu_capability(unsigned int *); 60extern int get_cpu_capability(unsigned int *);
61 61
62/* Lazy FPU handling on uni-processor */
63extern struct task_struct *last_task_used_math;
64
65/* 62/*
66 * User space process size: 2GB for 31 bit, 4TB for 64 bit. 63 * User space process size: 2GB for 31 bit, 4TB for 64 bit.
67 */ 64 */
@@ -95,7 +92,6 @@ struct thread_struct {
95 unsigned long ksp; /* kernel stack pointer */ 92 unsigned long ksp; /* kernel stack pointer */
96 mm_segment_t mm_segment; 93 mm_segment_t mm_segment;
97 unsigned long prot_addr; /* address of protection-excep. */ 94 unsigned long prot_addr; /* address of protection-excep. */
98 unsigned int error_code; /* error-code of last prog-excep. */
99 unsigned int trap_no; 95 unsigned int trap_no;
100 per_struct per_info; 96 per_struct per_info;
101 /* Used to give failing instruction back to user for ieee exceptions */ 97 /* Used to give failing instruction back to user for ieee exceptions */
diff --git a/include/asm-s390/ptrace.h b/include/asm-s390/ptrace.h
index 332ee73688fc..61f6952f2e35 100644
--- a/include/asm-s390/ptrace.h
+++ b/include/asm-s390/ptrace.h
@@ -465,6 +465,14 @@ struct user_regs_struct
465#ifdef __KERNEL__ 465#ifdef __KERNEL__
466#define __ARCH_SYS_PTRACE 1 466#define __ARCH_SYS_PTRACE 1
467 467
468/*
469 * These are defined as per linux/ptrace.h, which see.
470 */
471#define arch_has_single_step() (1)
472struct task_struct;
473extern void user_enable_single_step(struct task_struct *);
474extern void user_disable_single_step(struct task_struct *);
475
468#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0) 476#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
469#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN) 477#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN)
470#define regs_return_value(regs)((regs)->gprs[2]) 478#define regs_return_value(regs)((regs)->gprs[2])
diff --git a/include/asm-s390/qdio.h b/include/asm-s390/qdio.h
index 74db1dc10a7d..4b8ff55f680e 100644
--- a/include/asm-s390/qdio.h
+++ b/include/asm-s390/qdio.h
@@ -184,7 +184,7 @@ struct qdr {
184#endif /* QDIO_32_BIT */ 184#endif /* QDIO_32_BIT */
185 unsigned long qiba; /* queue-information-block address */ 185 unsigned long qiba; /* queue-information-block address */
186 unsigned int res8; /* reserved */ 186 unsigned int res8; /* reserved */
187 unsigned int qkey : 4; /* queue-informatio-block key */ 187 unsigned int qkey : 4; /* queue-information-block key */
188 unsigned int res9 : 28; /* reserved */ 188 unsigned int res9 : 28; /* reserved */
189/* union _qd {*/ /* why this? */ 189/* union _qd {*/ /* why this? */
190 struct qdesfmt0 qdf0[126]; 190 struct qdesfmt0 qdf0[126];
diff --git a/include/asm-s390/rwsem.h b/include/asm-s390/rwsem.h
index 90f4eccaa290..9d2a17971805 100644
--- a/include/asm-s390/rwsem.h
+++ b/include/asm-s390/rwsem.h
@@ -91,8 +91,8 @@ struct rw_semaphore {
91#endif 91#endif
92 92
93#define __RWSEM_INITIALIZER(name) \ 93#define __RWSEM_INITIALIZER(name) \
94{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ 94 { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait.lock), \
95 __RWSEM_DEP_MAP_INIT(name) } 95 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
96 96
97#define DECLARE_RWSEM(name) \ 97#define DECLARE_RWSEM(name) \
98 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 98 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
diff --git a/include/asm-s390/sclp.h b/include/asm-s390/sclp.h
index cb9faf1ea5cf..b5f2843013a3 100644
--- a/include/asm-s390/sclp.h
+++ b/include/asm-s390/sclp.h
@@ -27,7 +27,25 @@ struct sclp_ipl_info {
27 char loadparm[LOADPARM_LEN]; 27 char loadparm[LOADPARM_LEN];
28}; 28};
29 29
30void sclp_readinfo_early(void); 30struct sclp_cpu_entry {
31 u8 address;
32 u8 reserved0[13];
33 u8 type;
34 u8 reserved1;
35} __attribute__((packed));
36
37struct sclp_cpu_info {
38 unsigned int configured;
39 unsigned int standby;
40 unsigned int combined;
41 int has_cpu_type;
42 struct sclp_cpu_entry cpu[255];
43};
44
45int sclp_get_cpu_info(struct sclp_cpu_info *info);
46int sclp_cpu_configure(u8 cpu);
47int sclp_cpu_deconfigure(u8 cpu);
48void sclp_read_info_early(void);
31void sclp_facilities_detect(void); 49void sclp_facilities_detect(void);
32unsigned long long sclp_memory_detect(void); 50unsigned long long sclp_memory_detect(void);
33int sclp_sdias_blk_count(void); 51int sclp_sdias_blk_count(void);
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index 07708c07701e..c7b74326a527 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -35,8 +35,6 @@ extern void machine_restart_smp(char *);
35extern void machine_halt_smp(void); 35extern void machine_halt_smp(void);
36extern void machine_power_off_smp(void); 36extern void machine_power_off_smp(void);
37 37
38extern void smp_setup_cpu_possible_map(void);
39
40#define NO_PROC_ID 0xFF /* No processor magic marker */ 38#define NO_PROC_ID 0xFF /* No processor magic marker */
41 39
42/* 40/*
@@ -92,6 +90,8 @@ extern void __cpu_die (unsigned int cpu);
92extern void cpu_die (void) __attribute__ ((noreturn)); 90extern void cpu_die (void) __attribute__ ((noreturn));
93extern int __cpu_up (unsigned int cpu); 91extern int __cpu_up (unsigned int cpu);
94 92
93extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
94 void *info, int wait);
95#endif 95#endif
96 96
97#ifndef CONFIG_SMP 97#ifndef CONFIG_SMP
@@ -103,7 +103,6 @@ static inline void smp_send_stop(void)
103 103
104#define hard_smp_processor_id() 0 104#define hard_smp_processor_id() 0
105#define smp_cpu_not_running(cpu) 1 105#define smp_cpu_not_running(cpu) 1
106#define smp_setup_cpu_possible_map() do { } while (0)
107#endif 106#endif
108 107
109extern union save_area *zfcpdump_save_areas[NR_CPUS + 1]; 108extern union save_area *zfcpdump_save_areas[NR_CPUS + 1];
diff --git a/include/asm-s390/spinlock.h b/include/asm-s390/spinlock.h
index 3fd43826fd0b..df84ae96915f 100644
--- a/include/asm-s390/spinlock.h
+++ b/include/asm-s390/spinlock.h
@@ -53,44 +53,48 @@ _raw_compare_and_swap(volatile unsigned int *lock,
53 */ 53 */
54 54
55#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0) 55#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0)
56#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
57#define __raw_spin_unlock_wait(lock) \ 56#define __raw_spin_unlock_wait(lock) \
58 do { while (__raw_spin_is_locked(lock)) \ 57 do { while (__raw_spin_is_locked(lock)) \
59 _raw_spin_relax(lock); } while (0) 58 _raw_spin_relax(lock); } while (0)
60 59
61extern void _raw_spin_lock_wait(raw_spinlock_t *, unsigned int pc); 60extern void _raw_spin_lock_wait(raw_spinlock_t *);
62extern int _raw_spin_trylock_retry(raw_spinlock_t *, unsigned int pc); 61extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags);
62extern int _raw_spin_trylock_retry(raw_spinlock_t *);
63extern void _raw_spin_relax(raw_spinlock_t *lock); 63extern void _raw_spin_relax(raw_spinlock_t *lock);
64 64
65static inline void __raw_spin_lock(raw_spinlock_t *lp) 65static inline void __raw_spin_lock(raw_spinlock_t *lp)
66{ 66{
67 unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
68 int old; 67 int old;
69 68
70 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 69 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
71 if (likely(old == 0)) { 70 if (likely(old == 0))
72 lp->owner_pc = pc;
73 return; 71 return;
74 } 72 _raw_spin_lock_wait(lp);
75 _raw_spin_lock_wait(lp, pc); 73}
74
75static inline void __raw_spin_lock_flags(raw_spinlock_t *lp,
76 unsigned long flags)
77{
78 int old;
79
80 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
81 if (likely(old == 0))
82 return;
83 _raw_spin_lock_wait_flags(lp, flags);
76} 84}
77 85
78static inline int __raw_spin_trylock(raw_spinlock_t *lp) 86static inline int __raw_spin_trylock(raw_spinlock_t *lp)
79{ 87{
80 unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
81 int old; 88 int old;
82 89
83 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 90 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
84 if (likely(old == 0)) { 91 if (likely(old == 0))
85 lp->owner_pc = pc;
86 return 1; 92 return 1;
87 } 93 return _raw_spin_trylock_retry(lp);
88 return _raw_spin_trylock_retry(lp, pc);
89} 94}
90 95
91static inline void __raw_spin_unlock(raw_spinlock_t *lp) 96static inline void __raw_spin_unlock(raw_spinlock_t *lp)
92{ 97{
93 lp->owner_pc = 0;
94 _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); 98 _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
95} 99}
96 100
diff --git a/include/asm-s390/spinlock_types.h b/include/asm-s390/spinlock_types.h
index b7ac13f7aa37..654abc40de04 100644
--- a/include/asm-s390/spinlock_types.h
+++ b/include/asm-s390/spinlock_types.h
@@ -7,7 +7,6 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int owner_cpu; 9 volatile unsigned int owner_cpu;
10 volatile unsigned int owner_pc;
11} __attribute__ ((aligned (4))) raw_spinlock_t; 10} __attribute__ ((aligned (4))) raw_spinlock_t;
12 11
13#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h
index a69bd2490d52..70fa5ae58180 100644
--- a/include/asm-s390/tlbflush.h
+++ b/include/asm-s390/tlbflush.h
@@ -42,11 +42,11 @@ static inline void __tlb_flush_global(void)
42/* 42/*
43 * Flush all tlb entries of a page table on all cpus. 43 * Flush all tlb entries of a page table on all cpus.
44 */ 44 */
45static inline void __tlb_flush_idte(pgd_t *pgd) 45static inline void __tlb_flush_idte(unsigned long asce)
46{ 46{
47 asm volatile( 47 asm volatile(
48 " .insn rrf,0xb98e0000,0,%0,%1,0" 48 " .insn rrf,0xb98e0000,0,%0,%1,0"
49 : : "a" (2048), "a" (__pa(pgd) & PAGE_MASK) : "cc" ); 49 : : "a" (2048), "a" (asce) : "cc" );
50} 50}
51 51
52static inline void __tlb_flush_mm(struct mm_struct * mm) 52static inline void __tlb_flush_mm(struct mm_struct * mm)
@@ -61,11 +61,11 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
61 * only ran on the local cpu. 61 * only ran on the local cpu.
62 */ 62 */
63 if (MACHINE_HAS_IDTE) { 63 if (MACHINE_HAS_IDTE) {
64 pgd_t *shadow_pgd = get_shadow_table(mm->pgd); 64 pgd_t *shadow = get_shadow_table(mm->pgd);
65 65
66 if (shadow_pgd) 66 if (shadow)
67 __tlb_flush_idte(shadow_pgd); 67 __tlb_flush_idte((unsigned long) shadow | mm->context);
68 __tlb_flush_idte(mm->pgd); 68 __tlb_flush_idte((unsigned long) mm->pgd | mm->context);
69 return; 69 return;
70 } 70 }
71 preempt_disable(); 71 preempt_disable();
@@ -106,9 +106,23 @@ static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
106 */ 106 */
107#define flush_tlb() do { } while (0) 107#define flush_tlb() do { } while (0)
108#define flush_tlb_all() do { } while (0) 108#define flush_tlb_all() do { } while (0)
109#define flush_tlb_mm(mm) __tlb_flush_mm_cond(mm)
110#define flush_tlb_page(vma, addr) do { } while (0) 109#define flush_tlb_page(vma, addr) do { } while (0)
111#define flush_tlb_range(vma, start, end) __tlb_flush_mm_cond(mm) 110
112#define flush_tlb_kernel_range(start, end) __tlb_flush_mm(&init_mm) 111static inline void flush_tlb_mm(struct mm_struct *mm)
112{
113 __tlb_flush_mm_cond(mm);
114}
115
116static inline void flush_tlb_range(struct vm_area_struct *vma,
117 unsigned long start, unsigned long end)
118{
119 __tlb_flush_mm_cond(vma->vm_mm);
120}
121
122static inline void flush_tlb_kernel_range(unsigned long start,
123 unsigned long end)
124{
125 __tlb_flush_mm(&init_mm);
126}
113 127
114#endif /* _S390_TLBFLUSH_H */ 128#endif /* _S390_TLBFLUSH_H */
diff --git a/include/asm-s390/zcrypt.h b/include/asm-s390/zcrypt.h
index a5dada617751..f228f1b86877 100644
--- a/include/asm-s390/zcrypt.h
+++ b/include/asm-s390/zcrypt.h
@@ -117,7 +117,7 @@ struct CPRBX {
117 unsigned char padx004[16 - sizeof (char *)]; 117 unsigned char padx004[16 - sizeof (char *)];
118 unsigned char * req_extb; /* request extension block 'addr'*/ 118 unsigned char * req_extb; /* request extension block 'addr'*/
119 unsigned char padx005[16 - sizeof (char *)]; 119 unsigned char padx005[16 - sizeof (char *)];
120 unsigned char * rpl_extb; /* reply extension block 'addres'*/ 120 unsigned char * rpl_extb; /* reply extension block 'address'*/
121 unsigned short ccp_rtcode; /* server return code */ 121 unsigned short ccp_rtcode; /* server return code */
122 unsigned short ccp_rscode; /* server reason code */ 122 unsigned short ccp_rscode; /* server reason code */
123 unsigned int mac_data_len; /* Mac Data Length */ 123 unsigned int mac_data_len; /* Mac Data Length */
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index a68425a5cc1d..d8a5558a47b4 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -1,6 +1,5 @@
1#include <linux/stat.h> 1#include <linux/stat.h>
2#include <linux/sysctl.h> 2#include <linux/sysctl.h>
3#include "../arch/s390/appldata/appldata.h"
4#include "../fs/xfs/linux-2.6/xfs_sysctl.h" 3#include "../fs/xfs/linux-2.6/xfs_sysctl.h"
5#include <linux/sunrpc/debug.h> 4#include <linux/sunrpc/debug.h>
6#include <linux/string.h> 5#include <linux/string.h>